[VOL-5486] Fix deprecated versions

Change-Id: Ic398322948171d8a1c5b4e866602805ec0ea396f
Signed-off-by: Abhay Kumar <abhay.kumar@radisys.com>
diff --git a/vendor/github.com/IBM/sarama/.gitignore b/vendor/github.com/IBM/sarama/.gitignore
new file mode 100644
index 0000000..f887957
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/.gitignore
@@ -0,0 +1,31 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+*.test
+
+# Folders
+_obj
+_test
+.vagrant
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+/bin
+/coverage.txt
+/profile.out
+/output.json
+
+.idea
diff --git a/vendor/github.com/IBM/sarama/.golangci.yml b/vendor/github.com/IBM/sarama/.golangci.yml
new file mode 100644
index 0000000..520e422
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/.golangci.yml
@@ -0,0 +1,104 @@
+# yaml-language-server: $schema=https://golangci-lint.run/jsonschema/golangci.jsonschema.json
+version: "2"
+linters:
+  default: none
+  enable:
+  - bodyclose
+  - copyloopvar
+  - depguard
+  - dogsled
+  - errcheck
+  - errorlint
+  - funlen
+  - gochecknoinits
+  - gocritic
+  - gocyclo
+  - gosec
+  - govet
+  - misspell
+  - nilerr
+  - unconvert
+  - unused
+  - whitespace
+  settings:
+    depguard:
+      rules:
+        main:
+          deny:
+          - pkg: io/ioutil
+            desc: Use the "io" and "os" packages instead.
+    dupl:
+      threshold: 100
+    funlen:
+      lines: 300
+      statements: 300
+    goconst:
+      min-len: 2
+      min-occurrences: 3
+    gocritic:
+      enabled-checks:
+      - importShadow
+      - nestingReduce
+      - stringsCompare
+      # - unnamedResult
+      # - whyNoLint
+      disabled-checks:
+      - assignOp
+      - appendAssign
+      - commentedOutCode
+      - hugeParam
+      - ifElseChain
+      - singleCaseSwitch
+      - sloppyReassign
+      enabled-tags:
+      - diagnostic
+      - performance
+      # - experimental
+      # - opinionated
+      # - style
+    gocyclo:
+      min-complexity: 99
+    govet:
+      disable:
+      - fieldalignment
+      - shadow
+      enable-all: true
+    misspell:
+      locale: US
+  # exclude some linters from running on certains files.
+  exclusions:
+    generated: lax
+    presets:
+    - comments
+    - common-false-positives
+    - legacy
+    - std-error-handling
+    rules:
+    - linters:
+      - paralleltest
+      path: functional.*_test\.go
+    - path: (.+)\.go$
+      text: 'G115: integer overflow conversion'
+    - path: (.+)\.go$
+      text: 'G404: Use of weak random number generator'
+    paths:
+    - third_party$
+    - builtin$
+    - examples$
+issues:
+  # maximum count of issues with the same text. set to 0 for unlimited. default is 3.
+  max-same-issues: 0
+formatters:
+  enable:
+  - gofmt
+  - goimports
+  settings:
+    goimports:
+      local-prefixes:
+      - github.com/IBM/sarama
+  exclusions:
+    generated: lax
+    paths:
+    - third_party$
+    - builtin$
+    - examples$
diff --git a/vendor/github.com/IBM/sarama/.pre-commit-config.yaml b/vendor/github.com/IBM/sarama/.pre-commit-config.yaml
new file mode 100644
index 0000000..5387686
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/.pre-commit-config.yaml
@@ -0,0 +1,41 @@
+fail_fast: false
+default_install_hook_types: [pre-commit, commit-msg]
+repos:
+  - repo: https://github.com/pre-commit/pre-commit-hooks
+    rev: v6.0.0
+    hooks:
+      - id: check-merge-conflict
+      - id: check-yaml
+      - id: end-of-file-fixer
+      - id: fix-byte-order-marker
+      - id: mixed-line-ending
+      - id: trailing-whitespace
+  - repo: local
+    hooks:
+      - id: conventional-commit-msg-validation
+        name: commit message conventional validation
+        language: pygrep
+        entry: '^(?:fixup! )?(breaking|build|chore|ci|docs|feat|fix|perf|refactor|revert|style|test){1}(\([\w\-\.]+\))?(!)?: ([\w `])+([\s\S]*)'
+        args: [--multiline, --negate]
+        stages: [commit-msg]
+      - id: commit-msg-needs-to-be-signed-off
+        name: commit message needs to be signed off
+        language: pygrep
+        entry: "^Signed-off-by:"
+        args: [--multiline, --negate]
+        stages: [commit-msg]
+      - id: gofmt
+        name: gofmt
+        description: Format files with gofmt.
+        entry: gofmt -l
+        language: golang
+        files: \.go$
+        args: []
+  - repo: https://github.com/gitleaks/gitleaks
+    rev: v8.28.0
+    hooks:
+      - id: gitleaks
+  - repo: https://github.com/golangci/golangci-lint
+    rev: v2.4.0
+    hooks:
+      - id: golangci-lint
diff --git a/vendor/github.com/IBM/sarama/.whitesource b/vendor/github.com/IBM/sarama/.whitesource
new file mode 100644
index 0000000..26e9c47
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/.whitesource
@@ -0,0 +1,3 @@
+{
+  "settingsInheritedFrom": "ibm-mend-config/mend-config@main"
+}
\ No newline at end of file
diff --git a/vendor/github.com/IBM/sarama/CHANGELOG.md b/vendor/github.com/IBM/sarama/CHANGELOG.md
new file mode 100644
index 0000000..99abeb3
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/CHANGELOG.md
@@ -0,0 +1,1760 @@
+# Changelog
+
+## Version 1.42.2 (2024-02-09)
+
+## What's Changed
+
+⚠️ The go.mod directive has been bumped to 1.18 as the minimum version of Go required for the module. This was necessary to continue to receive updates from some of the third party dependencies that Sarama makes use of for compression.
+
+### :tada: New Features / Improvements
+* feat: update go directive to 1.18 by @dnwe in https://github.com/IBM/sarama/pull/2713
+* feat: return KError instead of errors in AlterConfigs and DescribeConfig by @zhuliquan in https://github.com/IBM/sarama/pull/2472
+### :bug: Fixes
+* fix: don't waste time for backoff on member id required error by @lzakharov in https://github.com/IBM/sarama/pull/2759
+* fix: prevent ConsumerGroup.Close infinitely locking by @maqdev in https://github.com/IBM/sarama/pull/2717
+### :package: Dependency updates
+* chore(deps): bump golang.org/x/net from 0.17.0 to 0.18.0 by @dependabot in https://github.com/IBM/sarama/pull/2716
+* chore(deps): bump golang.org/x/sync to v0.5.0 by @dependabot in https://github.com/IBM/sarama/pull/2718
+* chore(deps): bump github.com/pierrec/lz4/v4 from 4.1.18 to 4.1.19 by @dependabot in https://github.com/IBM/sarama/pull/2739
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 by @dependabot in https://github.com/IBM/sarama/pull/2748
+* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2734
+* chore(deps): bump the golang-org-x group with 2 updates by @dependabot in https://github.com/IBM/sarama/pull/2764
+* chore(deps): bump github.com/pierrec/lz4/v4 from 4.1.19 to 4.1.21 by @dependabot in https://github.com/IBM/sarama/pull/2763
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/exactly_once by @dependabot in https://github.com/IBM/sarama/pull/2749
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/consumergroup by @dependabot in https://github.com/IBM/sarama/pull/2750
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/sasl_scram_client by @dependabot in https://github.com/IBM/sarama/pull/2751
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/interceptors by @dependabot in https://github.com/IBM/sarama/pull/2752
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/http_server by @dependabot in https://github.com/IBM/sarama/pull/2753
+* chore(deps): bump github.com/eapache/go-resiliency from 1.4.0 to 1.5.0 by @dependabot in https://github.com/IBM/sarama/pull/2745
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/txn_producer by @dependabot in https://github.com/IBM/sarama/pull/2754
+* chore(deps): bump go.opentelemetry.io/otel/sdk from 1.19.0 to 1.22.0 in /examples/interceptors by @dependabot in https://github.com/IBM/sarama/pull/2767
+* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2793
+* chore(deps): bump go.opentelemetry.io/otel/exporters/stdout/stdoutmetric from 0.42.0 to 1.23.1 in /examples/interceptors by @dependabot in https://github.com/IBM/sarama/pull/2792
+### :wrench: Maintenance
+* fix(examples): housekeeping of code and deps by @dnwe in https://github.com/IBM/sarama/pull/2720
+### :heavy_plus_sign: Other Changes
+* fix(test): retry MockBroker Listen for EADDRINUSE by @dnwe in https://github.com/IBM/sarama/pull/2721
+
+## New Contributors
+* @maqdev made their first contribution in https://github.com/IBM/sarama/pull/2717
+* @zhuliquan made their first contribution in https://github.com/IBM/sarama/pull/2472
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.42.1...v1.42.2
+
+## Version 1.42.1 (2023-11-07)
+
+## What's Changed
+### :bug: Fixes
+* fix: make fetchInitialOffset use correct protocol by @dnwe in https://github.com/IBM/sarama/pull/2705
+* fix(config): relax ClientID validation after 1.0.0 by @dnwe in https://github.com/IBM/sarama/pull/2706
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.42.0...v1.42.1
+
+## Version 1.42.0 (2023-11-02)
+
+## What's Changed
+### :bug: Fixes
+* Asynchronously close brokers during a RefreshBrokers by @bmassemin in https://github.com/IBM/sarama/pull/2693
+* Fix data race on Broker.done channel by @prestona in https://github.com/IBM/sarama/pull/2698
+* fix: data race in Broker.AsyncProduce by @lzakharov in https://github.com/IBM/sarama/pull/2678
+* Fix default retention time value in offset commit by @prestona in https://github.com/IBM/sarama/pull/2700
+* fix(txmgr): ErrOffsetsLoadInProgress is retriable by @dnwe in https://github.com/IBM/sarama/pull/2701
+### :wrench: Maintenance
+* chore(ci): improve ossf scorecard result by @dnwe in https://github.com/IBM/sarama/pull/2685
+* chore(ci): add kafka 3.6.0 to FVT and versions by @dnwe in https://github.com/IBM/sarama/pull/2692
+### :heavy_plus_sign: Other Changes
+* chore(ci): ossf scorecard.yml by @dnwe in https://github.com/IBM/sarama/pull/2683
+* fix(ci): always run CodeQL on every commit by @dnwe in https://github.com/IBM/sarama/pull/2689
+* chore(doc): add OpenSSF Scorecard badge by @dnwe in https://github.com/IBM/sarama/pull/2691
+
+## New Contributors
+* @bmassemin made their first contribution in https://github.com/IBM/sarama/pull/2693
+* @lzakharov made their first contribution in https://github.com/IBM/sarama/pull/2678
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.3...v1.42.0
+
+## Version 1.41.3 (2023-10-17)
+
+## What's Changed
+### :bug: Fixes
+* fix: pre-compile regex for parsing kafka version by @qshuai in https://github.com/IBM/sarama/pull/2663
+* fix(client): ignore empty Metadata responses when refreshing by @HaoSunUber in https://github.com/IBM/sarama/pull/2672
+### :package: Dependency updates
+* chore(deps): bump the golang-org-x group with 2 updates by @dependabot in https://github.com/IBM/sarama/pull/2661
+* chore(deps): bump golang.org/x/net from 0.16.0 to 0.17.0 by @dependabot in https://github.com/IBM/sarama/pull/2671
+### :memo: Documentation
+* fix(docs): correct topic name in rebalancing strategy example by @maksadbek in https://github.com/IBM/sarama/pull/2657
+
+## New Contributors
+* @maksadbek made their first contribution in https://github.com/IBM/sarama/pull/2657
+* @qshuai made their first contribution in https://github.com/IBM/sarama/pull/2663
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.2...v1.41.3
+
+## Version 1.41.2 (2023-09-12)
+
+## What's Changed
+### :tada: New Features / Improvements
+* perf: Alloc records in batch by @ronanh in https://github.com/IBM/sarama/pull/2646
+### :bug: Fixes
+* fix(consumer): guard against nil client by @dnwe in https://github.com/IBM/sarama/pull/2636
+* fix(consumer): don't retry session if ctx canceled by @dnwe in https://github.com/IBM/sarama/pull/2642
+* fix: use least loaded broker to refresh metadata by @HaoSunUber in https://github.com/IBM/sarama/pull/2645
+### :package: Dependency updates
+* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2641
+
+## New Contributors
+* @HaoSunUber made their first contribution in https://github.com/IBM/sarama/pull/2645
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.1...v1.41.2
+
+## Version 1.41.1 (2023-08-30)
+
+## What's Changed
+### :bug: Fixes
+* fix(proto): handle V3 member metadata and empty owned partitions by @dnwe in https://github.com/IBM/sarama/pull/2618
+* fix: make clear that error is configuration issue not server error by @hindessm in https://github.com/IBM/sarama/pull/2628
+* fix(client): force Event Hubs to use V1_0_0_0 by @dnwe in https://github.com/IBM/sarama/pull/2633
+* fix: add retries to alter user scram creds by @hindessm in https://github.com/IBM/sarama/pull/2632
+### :wrench: Maintenance
+* chore(lint): bump golangci-lint and tweak config by @dnwe in https://github.com/IBM/sarama/pull/2620
+### :memo: Documentation
+* fix(doc): add missing doc for mock consumer by @hsweif in https://github.com/IBM/sarama/pull/2386
+* chore(proto): doc CreateTopics/JoinGroup fields by @dnwe in https://github.com/IBM/sarama/pull/2627
+### :heavy_plus_sign: Other Changes
+* chore(gh): add new style issue templates by @dnwe in https://github.com/IBM/sarama/pull/2624
+
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.0...v1.41.1
+
+## Version 1.41.0 (2023-08-21)
+
+## What's Changed
+### :rotating_light: Breaking Changes
+
+Note: this version of Sarama has had a big overhaul in its adherence to the use of the right Kafka protocol versions for the given Config Version. It has also bumped the default Version set in Config (where one is not supplied) to 2.1.0. This is in preparation for Kafka 4.0 dropping support for protocol versions older than 2.1. If you are using Sarama against Kafka clusters older than v2.1.0, or using it against Azure EventHubs then you will likely have to change your application code to pin to the appropriate Version.
+
+* chore(config): make DefaultVersion V2_0_0_0 by @dnwe in https://github.com/IBM/sarama/pull/2572
+* chore(config): make DefaultVersion V2_1_0_0 by @dnwe in https://github.com/IBM/sarama/pull/2574
+### :tada: New Features / Improvements
+* Implement resolve_canonical_bootstrap_servers_only by @gebn in https://github.com/IBM/sarama/pull/2156
+* feat: sleep when throttled (KIP-219) by @hindessm in https://github.com/IBM/sarama/pull/2536
+* feat: add isValidVersion to protocol types by @dnwe in https://github.com/IBM/sarama/pull/2538
+* fix(consumer): use newer LeaveGroup as appropriate by @dnwe in https://github.com/IBM/sarama/pull/2544
+* Add support for up to version 4 List Groups API by @prestona in https://github.com/IBM/sarama/pull/2541
+* fix(producer): use newer ProduceReq as appropriate by @dnwe in https://github.com/IBM/sarama/pull/2546
+* fix(proto): ensure req+resp requiredVersion match by @dnwe in https://github.com/IBM/sarama/pull/2548
+* chore(proto): permit CreatePartitionsRequest V1 by @dnwe in https://github.com/IBM/sarama/pull/2549
+* chore(proto): permit AlterConfigsRequest V1 by @dnwe in https://github.com/IBM/sarama/pull/2550
+* chore(proto): permit DeleteGroupsRequest V1 by @dnwe in https://github.com/IBM/sarama/pull/2551
+* fix(proto): correct JoinGroup usage for wider version range by @dnwe in https://github.com/IBM/sarama/pull/2553
+* fix(consumer): use full range of FetchRequest vers by @dnwe in https://github.com/IBM/sarama/pull/2554
+* fix(proto): use range of OffsetCommitRequest vers by @dnwe in https://github.com/IBM/sarama/pull/2555
+* fix(proto): use full range of MetadataRequest by @dnwe in https://github.com/IBM/sarama/pull/2556
+* fix(proto): use fuller ranges of supported proto by @dnwe in https://github.com/IBM/sarama/pull/2558
+* fix(proto): use full range of SyncGroupRequest by @dnwe in https://github.com/IBM/sarama/pull/2565
+* fix(proto): use full range of ListGroupsRequest by @dnwe in https://github.com/IBM/sarama/pull/2568
+* feat(proto): support for Metadata V6-V10 by @dnwe in https://github.com/IBM/sarama/pull/2566
+* fix(proto): use full ranges for remaining proto by @dnwe in https://github.com/IBM/sarama/pull/2570
+* feat(proto): add remaining protocol for V2.1 by @dnwe in https://github.com/IBM/sarama/pull/2573
+* feat: add new error for MockDeleteTopicsResponse by @javiercri in https://github.com/IBM/sarama/pull/2475
+* feat(gzip): switch to klauspost/compress gzip by @dnwe in https://github.com/IBM/sarama/pull/2600
+### :bug: Fixes
+* fix: correct unsupported version check by @hindessm in https://github.com/IBM/sarama/pull/2528
+* fix: avoiding burning cpu if all partitions are paused by @napallday in https://github.com/IBM/sarama/pull/2532
+* extend throttling metric scope by @hindessm in https://github.com/IBM/sarama/pull/2533
+* Fix printing of final metrics by @prestona in https://github.com/IBM/sarama/pull/2545
+* fix(consumer): cannot automatically fetch newly-added partitions unless restart by @napallday in https://github.com/IBM/sarama/pull/2563
+* bug: implement unsigned modulus for partitioning with crc32 hashing by @csm8118 in https://github.com/IBM/sarama/pull/2560
+* fix: avoid logging value of proxy.Dialer by @prestona in https://github.com/IBM/sarama/pull/2569
+* fix(test): add missing closes to admin client tests by @dnwe in https://github.com/IBM/sarama/pull/2594
+* fix(test): ensure some more clients are closed by @dnwe in https://github.com/IBM/sarama/pull/2595
+* fix(examples): sync exactly_once and consumergroup by @dnwe in https://github.com/IBM/sarama/pull/2614
+* fix(fvt): fresh metrics registry for each test by @dnwe in https://github.com/IBM/sarama/pull/2616
+* fix(test): flaky test TestFuncOffsetManager by @napallday in https://github.com/IBM/sarama/pull/2609
+### :package: Dependency updates
+* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2542
+* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2561
+* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.18 by @dnwe in https://github.com/IBM/sarama/pull/2589
+* chore(deps): bump module github.com/jcmturner/gokrb5/v8 to v8.4.4 by @dnwe in https://github.com/IBM/sarama/pull/2587
+* chore(deps): bump github.com/eapache/go-xerial-snappy digest to c322873 by @dnwe in https://github.com/IBM/sarama/pull/2586
+* chore(deps): bump module github.com/klauspost/compress to v1.16.7 by @dnwe in https://github.com/IBM/sarama/pull/2588
+* chore(deps): bump github.com/eapache/go-resiliency from 1.3.0 to 1.4.0 by @dependabot in https://github.com/IBM/sarama/pull/2598
+### :wrench: Maintenance
+* fix(fvt): ensure fully-replicated at test start by @hindessm in https://github.com/IBM/sarama/pull/2531
+* chore: rollup fvt kafka to latest three by @dnwe in https://github.com/IBM/sarama/pull/2537
+* Merge the two CONTRIBUTING.md's by @prestona in https://github.com/IBM/sarama/pull/2543
+* fix(test): test timing error by @hindessm in https://github.com/IBM/sarama/pull/2552
+* chore(ci): tidyup and improve actions workflows by @dnwe in https://github.com/IBM/sarama/pull/2557
+* fix(test): shutdown MockBroker by @dnwe in https://github.com/IBM/sarama/pull/2571
+* chore(proto): match HeartbeatResponse version by @dnwe in https://github.com/IBM/sarama/pull/2576
+* chore(test): ensure MockBroker closed within test by @dnwe in https://github.com/IBM/sarama/pull/2575
+* chore(test): ensure all mockresponses use version by @dnwe in https://github.com/IBM/sarama/pull/2578
+* chore(ci): use latest Go in actions by @dnwe in https://github.com/IBM/sarama/pull/2580
+* chore(test): speedup some slow tests by @dnwe in https://github.com/IBM/sarama/pull/2579
+* chore(test): use modern protocol versions in FVT by @dnwe in https://github.com/IBM/sarama/pull/2581
+* chore(test): fix a couple of leaks by @dnwe in https://github.com/IBM/sarama/pull/2591
+* feat(fvt): experiment with per-kafka-version image by @dnwe in https://github.com/IBM/sarama/pull/2592
+* chore(ci): replace toxiproxy client dep by @dnwe in https://github.com/IBM/sarama/pull/2593
+* feat(fvt): add healthcheck, depends_on and --wait by @dnwe in https://github.com/IBM/sarama/pull/2601
+* fix(fvt): handle msgset vs batchset by @dnwe in https://github.com/IBM/sarama/pull/2603
+* fix(fvt): Metadata version in ensureFullyReplicated by @dnwe in https://github.com/IBM/sarama/pull/2612
+* fix(fvt): versioned cfg for invalid topic producer by @dnwe in https://github.com/IBM/sarama/pull/2613
+* chore(fvt): tweak to work across more versions by @dnwe in https://github.com/IBM/sarama/pull/2615
+* feat(fvt): test wider range of kafkas by @dnwe in https://github.com/IBM/sarama/pull/2605
+### :memo: Documentation
+* fix(example): check if msg channel is closed by @ioanzicu in https://github.com/IBM/sarama/pull/2479
+* chore: use go install for installing sarama tools by @vigith in https://github.com/IBM/sarama/pull/2599
+
+## New Contributors
+* @gebn made their first contribution in https://github.com/IBM/sarama/pull/2156
+* @prestona made their first contribution in https://github.com/IBM/sarama/pull/2543
+* @ioanzicu made their first contribution in https://github.com/IBM/sarama/pull/2479
+* @csm8118 made their first contribution in https://github.com/IBM/sarama/pull/2560
+* @javiercri made their first contribution in https://github.com/IBM/sarama/pull/2475
+* @vigith made their first contribution in https://github.com/IBM/sarama/pull/2599
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.40.1...v1.41.0
+
+## Version 1.40.1 (2023-07-27)
+
+## What's Changed
+### :tada: New Features / Improvements
+* Use buffer pools for decompression by @ronanh in https://github.com/IBM/sarama/pull/2484
+* feat: support for Kerberos authentication with a credentials cache. by @mrogaski in https://github.com/IBM/sarama/pull/2457
+### :bug: Fixes
+* Fix some retry issues by @hindessm in https://github.com/IBM/sarama/pull/2517
+* fix: admin retry logic by @hindessm in https://github.com/IBM/sarama/pull/2519
+* Add some retry logic to more admin client functions by @hindessm in https://github.com/IBM/sarama/pull/2520
+* fix: concurrent issue on updateMetadataMs by @napallday in https://github.com/IBM/sarama/pull/2522
+* fix(test): allow testing of skipped test without IsTransactional panic by @hindessm in https://github.com/IBM/sarama/pull/2525
+### :package: Dependency updates
+* chore(deps): bump the golang-org-x group with 2 updates by @dependabot in https://github.com/IBM/sarama/pull/2509
+* chore(deps): bump github.com/klauspost/compress from 1.15.14 to 1.16.6 by @dependabot in https://github.com/IBM/sarama/pull/2513
+* chore(deps): bump github.com/stretchr/testify from 1.8.1 to 1.8.3 by @dependabot in https://github.com/IBM/sarama/pull/2512
+### :wrench: Maintenance
+* chore(ci): migrate probot-stale to actions/stale by @dnwe in https://github.com/IBM/sarama/pull/2496
+* chore(ci): bump golangci version, cleanup, depguard config by @EladLeev in https://github.com/IBM/sarama/pull/2504
+* Clean up some typos and docs/help mistakes by @hindessm in https://github.com/IBM/sarama/pull/2514
+### :heavy_plus_sign: Other Changes
+* chore(ci): add simple apidiff workflow by @dnwe in https://github.com/IBM/sarama/pull/2497
+* chore(ci): bump actions/setup-go from 3 to 4 by @dependabot in https://github.com/IBM/sarama/pull/2508
+* fix(comments): PauseAll and ResumeAll by @napallday in https://github.com/IBM/sarama/pull/2523
+
+## New Contributors
+* @EladLeev made their first contribution in https://github.com/IBM/sarama/pull/2504
+* @hindessm made their first contribution in https://github.com/IBM/sarama/pull/2514
+* @ronanh made their first contribution in https://github.com/IBM/sarama/pull/2484
+* @mrogaski made their first contribution in https://github.com/IBM/sarama/pull/2457
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.40.0...v1.40.1
+
+## Version 1.40.0 (2023-07-17)
+
+## What's Changed
+
+Note: this is the first release after the transition of Sarama ownership from Shopify to IBM in https://github.com/IBM/sarama/issues/2461
+
+### :rotating_light: Breaking Changes
+
+- chore: migrate module to github.com/IBM/sarama by @dnwe in https://github.com/IBM/sarama/pull/2492
+- fix: restore (\*OffsetCommitRequest) AddBlock func by @dnwe in https://github.com/IBM/sarama/pull/2494
+
+### :bug: Fixes
+
+- fix(consumer): don't retry FindCoordinator forever by @dnwe in https://github.com/IBM/sarama/pull/2427
+- fix(metrics): fix race condition when calling Broker.Open() twice by @vincentbernat in https://github.com/IBM/sarama/pull/2428
+- fix: use version 4 of DescribeGroupsRequest only if kafka broker vers… …ion is >= 2.4 by @faillefer in https://github.com/IBM/sarama/pull/2451
+- Fix HighWaterMarkOffset of mocks partition consumer by @gr8web in https://github.com/IBM/sarama/pull/2447
+- fix: prevent data race in balance strategy by @napallday in https://github.com/IBM/sarama/pull/2453
+
+### :package: Dependency updates
+
+- chore(deps): bump golang.org/x/net from 0.5.0 to 0.7.0 by @dependabot in https://github.com/IBM/sarama/pull/2452
+
+### :wrench: Maintenance
+
+- chore: add kafka 3.3.2 by @dnwe in https://github.com/IBM/sarama/pull/2434
+- chore(ci): remove Shopify/shopify-cla-action by @dnwe in https://github.com/IBM/sarama/pull/2489
+- chore: bytes.Equal instead bytes.Compare by @testwill in https://github.com/IBM/sarama/pull/2485
+
+## New Contributors
+
+- @dependabot made their first contribution in https://github.com/IBM/sarama/pull/2452
+- @gr8web made their first contribution in https://github.com/IBM/sarama/pull/2447
+- @testwill made their first contribution in https://github.com/IBM/sarama/pull/2485
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.38.1...v1.40.0
+
+## Version 1.38.1 (2023-01-22)
+
+## What's Changed
+### :bug: Fixes
+* fix(example): correct `records-number` param in txn producer readme by @diallo-han in https://github.com/IBM/sarama/pull/2420
+* fix: use newConsumer method in newConsumerGroup method by @Lumotheninja in https://github.com/IBM/sarama/pull/2424
+### :package: Dependency updates
+* chore(deps): bump module github.com/klauspost/compress to v1.15.14 by @dnwe in https://github.com/IBM/sarama/pull/2410
+* chore(deps): bump module golang.org/x/net to v0.5.0 by @dnwe in https://github.com/IBM/sarama/pull/2413
+* chore(deps): bump module github.com/stretchr/testify to v1.8.1 by @dnwe in https://github.com/IBM/sarama/pull/2411
+* chore(deps): bump module github.com/xdg-go/scram to v1.1.2 by @dnwe in https://github.com/IBM/sarama/pull/2412
+* chore(deps): bump module golang.org/x/sync to v0.1.0 by @dnwe in https://github.com/IBM/sarama/pull/2414
+* chore(deps): bump github.com/eapache/go-xerial-snappy digest to bf00bc1 by @dnwe in https://github.com/IBM/sarama/pull/2418
+
+## New Contributors
+* @diallo-han made their first contribution in https://github.com/IBM/sarama/pull/2420
+* @Lumotheninja made their first contribution in https://github.com/IBM/sarama/pull/2424
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.38.0...v1.38.1
+
+## Version 1.38.0 (2023-01-08)
+
+## What's Changed
+### :tada: New Features / Improvements
+* feat(producer): improve memory usage of zstd encoder by using our own pool management by @rtreffer in https://github.com/IBM/sarama/pull/2375
+* feat(proto): implement and use MetadataRequest v7 by @dnwe in https://github.com/IBM/sarama/pull/2388
+* feat(metrics): add protocol-requests-rate metric by @auntan in https://github.com/IBM/sarama/pull/2373
+### :bug: Fixes
+* fix(proto): track and supply leader epoch to FetchRequest by @dnwe in https://github.com/IBM/sarama/pull/2389
+* fix(example): improve arg name used for tls skip verify by @michaeljmarshall in https://github.com/IBM/sarama/pull/2385
+* fix(zstd): default back to GOMAXPROCS concurrency by @bgreenlee in https://github.com/IBM/sarama/pull/2404
+* fix(producer): add nil check while producer is retrying by @hsweif in https://github.com/IBM/sarama/pull/2387
+* fix(producer): return errors for every message in retryBatch to avoid producer hang forever by @cch123 in https://github.com/IBM/sarama/pull/2378
+* fix(metrics): fix race when accessing metric registry by @vincentbernat in https://github.com/IBM/sarama/pull/2409
+### :package: Dependency updates
+* chore(deps): bump golang.org/x/net to v0.4.0 by @dnwe in https://github.com/IBM/sarama/pull/2403
+### :wrench: Maintenance
+* chore(ci): replace set-output command in GH Action by @dnwe in https://github.com/IBM/sarama/pull/2390
+* chore(ci): include kafka 3.3.1 in testing matrix by @dnwe in https://github.com/IBM/sarama/pull/2406
+
+## New Contributors
+* @michaeljmarshall made their first contribution in https://github.com/IBM/sarama/pull/2385
+* @bgreenlee made their first contribution in https://github.com/IBM/sarama/pull/2404
+* @hsweif made their first contribution in https://github.com/IBM/sarama/pull/2387
+* @cch123 made their first contribution in https://github.com/IBM/sarama/pull/2378
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.2...v1.38.0
+
+## Version 1.37.2 (2022-10-04)
+
+## What's Changed
+### :bug: Fixes
+* fix: ensure updateMetaDataMs is 64-bit aligned by @dnwe in https://github.com/IBM/sarama/pull/2356
+### :heavy_plus_sign: Other Changes
+* fix: bump go.mod specification to go 1.17 by @dnwe in https://github.com/IBM/sarama/pull/2357
+
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.1...v1.37.2
+
+## Version 1.37.1 (2022-10-04)
+
+## What's Changed
+### :bug: Fixes
+* fix: support existing deprecated Rebalance.Strategy field usage by @spongecaptain in https://github.com/IBM/sarama/pull/2352
+* fix(test): consumer group rebalance strategy compatibility by @Jacob-bzx in https://github.com/IBM/sarama/pull/2353
+* fix(producer): replace time.After with time.Timer to avoid high memory usage by @Jacob-bzx in https://github.com/IBM/sarama/pull/2355
+
+## New Contributors
+* @spongecaptain made their first contribution in https://github.com/IBM/sarama/pull/2352
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.0...v1.37.1
+
+## Version 1.37.0 (2022-09-28)
+
+## What's Changed
+
+### :rotating_light: Breaking Changes
+* Due to a change in [github.com/klauspost/compress v1.15.10](https://github.com/klauspost/compress/releases/tag/v1.15.10), Sarama v1.37.0 requires Go 1.17 going forward, unfortunately due to an oversight this wasn't reflected in the go.mod declaration at time of release.
+
+### :tada: New Features / Improvements
+* feat(consumer): support multiple balance strategies by @Jacob-bzx in https://github.com/IBM/sarama/pull/2339
+* feat(producer): transactional API by @ryarnyah in https://github.com/IBM/sarama/pull/2295
+* feat(mocks): support key in MockFetchResponse. by @Skandalik in https://github.com/IBM/sarama/pull/2328
+### :bug: Fixes
+* fix: avoid panic when Metadata.RefreshFrequency is 0 by @Jacob-bzx in https://github.com/IBM/sarama/pull/2329
+* fix(consumer): avoid pushing unrelated responses to paused children by @pkoutsovasilis in https://github.com/IBM/sarama/pull/2317
+* fix: prevent metrics leak with cleanup by @auntan in https://github.com/IBM/sarama/pull/2340
+* fix: race condition(may panic) when closing consumer group by @Jacob-bzx in https://github.com/IBM/sarama/pull/2331
+* fix(consumer): default ResetInvalidOffsets to true by @dnwe in https://github.com/IBM/sarama/pull/2345
+* Validate the `Config` when creating a mock producer/consumer by @joewreschnig in https://github.com/IBM/sarama/pull/2327
+### :package: Dependency updates
+* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.16 by @dnwe in https://github.com/IBM/sarama/pull/2335
+* chore(deps): bump golang.org/x/net digest to bea034e by @dnwe in https://github.com/IBM/sarama/pull/2333
+* chore(deps): bump golang.org/x/sync digest to 7f9b162 by @dnwe in https://github.com/IBM/sarama/pull/2334
+* chore(deps): bump golang.org/x/net digest to f486391 by @dnwe in https://github.com/IBM/sarama/pull/2348
+* chore(deps): bump module github.com/shopify/toxiproxy/v2 to v2.5.0 by @dnwe in https://github.com/IBM/sarama/pull/2336
+* chore(deps): bump module github.com/klauspost/compress to v1.15.11 by @dnwe in https://github.com/IBM/sarama/pull/2349
+* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.17 by @dnwe in https://github.com/IBM/sarama/pull/2350
+### :wrench: Maintenance
+* chore(ci): bump kafka-versions to latest by @dnwe in https://github.com/IBM/sarama/pull/2346
+* chore(ci): bump go-versions to N and N-1 by @dnwe in https://github.com/IBM/sarama/pull/2347
+
+## New Contributors
+* @Jacob-bzx made their first contribution in https://github.com/IBM/sarama/pull/2329
+* @pkoutsovasilis made their first contribution in https://github.com/IBM/sarama/pull/2317
+* @Skandalik made their first contribution in https://github.com/IBM/sarama/pull/2328
+* @auntan made their first contribution in https://github.com/IBM/sarama/pull/2340
+* @ryarnyah made their first contribution in https://github.com/IBM/sarama/pull/2295
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.36.0...v1.37.0
+
+## Version 1.36.0 (2022-08-11)
+
+## What's Changed
+### :tada: New Features / Improvements
+* feat: add option to propagate OffsetOutOfRange error by @dkolistratova in https://github.com/IBM/sarama/pull/2252
+* feat(producer): expose ProducerMessage.byteSize() function by @k8scat in https://github.com/IBM/sarama/pull/2315
+* feat(metrics): track consumer fetch request rates by @dnwe in https://github.com/IBM/sarama/pull/2299
+### :bug: Fixes
+* fix(consumer): avoid submitting empty fetch requests when paused by @raulnegreiros in https://github.com/IBM/sarama/pull/2143
+### :package: Dependency updates
+* chore(deps): bump module github.com/klauspost/compress to v1.15.9 by @dnwe in https://github.com/IBM/sarama/pull/2304
+* chore(deps): bump golang.org/x/net digest to c7608f3 by @dnwe in https://github.com/IBM/sarama/pull/2301
+* chore(deps): bump golangci/golangci-lint-action action to v3 by @dnwe in https://github.com/IBM/sarama/pull/2311
+* chore(deps): bump golang.org/x/net digest to 07c6da5 by @dnwe in https://github.com/IBM/sarama/pull/2307
+* chore(deps): bump github actions versions (major) by @dnwe in https://github.com/IBM/sarama/pull/2313
+* chore(deps): bump module github.com/jcmturner/gofork to v1.7.6 by @dnwe in https://github.com/IBM/sarama/pull/2305
+* chore(deps): bump golang.org/x/sync digest to 886fb93 by @dnwe in https://github.com/IBM/sarama/pull/2302
+* chore(deps): bump module github.com/jcmturner/gokrb5/v8 to v8.4.3 by @dnwe in https://github.com/IBM/sarama/pull/2303
+### :wrench: Maintenance
+* chore: add kafka 3.1.1 to the version matrix by @dnwe in https://github.com/IBM/sarama/pull/2300
+### :heavy_plus_sign: Other Changes
+* Migrate off probot-CLA to new GitHub Action by @cursedcoder in https://github.com/IBM/sarama/pull/2294
+* Forgot to remove cla probot by @cursedcoder in https://github.com/IBM/sarama/pull/2297
+* chore(lint): re-enable a small amount of go-critic by @dnwe in https://github.com/IBM/sarama/pull/2312
+
+## New Contributors
+* @cursedcoder made their first contribution in https://github.com/IBM/sarama/pull/2294
+* @dkolistratova made their first contribution in https://github.com/IBM/sarama/pull/2252
+* @k8scat made their first contribution in https://github.com/IBM/sarama/pull/2315
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.35.0...v1.36.0
+
+## Version 1.35.0 (2022-07-22)
+
+## What's Changed
+### :bug: Fixes
+* fix: fix metadata retry backoff invalid when get metadata failed by @Stephan14 in https://github.com/IBM/sarama/pull/2256
+* fix(balance): sort and de-deplicate memberIDs by @dnwe in https://github.com/IBM/sarama/pull/2285
+* fix: prevent DescribeLogDirs hang in admin client by @zerowidth in https://github.com/IBM/sarama/pull/2269
+* fix: include assignment-less members in SyncGroup by @dnwe in https://github.com/IBM/sarama/pull/2292
+### :package: Dependency updates
+* chore(deps): bump module github.com/stretchr/testify to v1.8.0 by @dnwe in https://github.com/IBM/sarama/pull/2284
+* chore(deps): bump module github.com/eapache/go-resiliency to v1.3.0 by @dnwe in https://github.com/IBM/sarama/pull/2283
+* chore(deps): bump golang.org/x/net digest to 1185a90 by @dnwe in https://github.com/IBM/sarama/pull/2279
+* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.15 by @dnwe in https://github.com/IBM/sarama/pull/2281
+* chore(deps): bump module github.com/klauspost/compress to v1.15.8 by @dnwe in https://github.com/IBM/sarama/pull/2280
+### :wrench: Maintenance
+* chore: rename `any` func to avoid identifier by @dnwe in https://github.com/IBM/sarama/pull/2272
+* chore: add and test against kafka 3.2.0 by @dnwe in https://github.com/IBM/sarama/pull/2288
+* chore: document Fetch protocol fields by @dnwe in https://github.com/IBM/sarama/pull/2289
+### :heavy_plus_sign: Other Changes
+* chore(ci): fix redirect with GITHUB_STEP_SUMMARY by @dnwe in https://github.com/IBM/sarama/pull/2286
+* fix(test): permit ECONNRESET in TestInitProducerID by @dnwe in https://github.com/IBM/sarama/pull/2287
+* fix: ensure empty or devel version valid by @dnwe in https://github.com/IBM/sarama/pull/2291
+
+## New Contributors
+* @zerowidth made their first contribution in https://github.com/IBM/sarama/pull/2269
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.34.1...v1.35.0
+
+##  Version 1.34.1 (2022-06-07)
+
+## What's Changed
+### :bug: Fixes
+* fix(examples): check session.Context().Done() in examples/consumergroup by @zxc111 in https://github.com/IBM/sarama/pull/2240
+* fix(protocol): move AuthorizedOperations into GroupDescription of DescribeGroupsResponse by @aiquestion in https://github.com/IBM/sarama/pull/2247
+* fix(protocol): tidyup DescribeGroupsResponse by @dnwe in https://github.com/IBM/sarama/pull/2248
+* fix(consumer): range balance strategy not like reference by @njhartwell in https://github.com/IBM/sarama/pull/2245
+### :wrench: Maintenance
+* chore(ci): experiment with using tparse by @dnwe in https://github.com/IBM/sarama/pull/2236
+* chore(deps): bump thirdparty dependencies to latest releases by @dnwe in https://github.com/IBM/sarama/pull/2242
+
+## New Contributors
+* @zxc111 made their first contribution in https://github.com/IBM/sarama/pull/2240
+* @njhartwell made their first contribution in https://github.com/IBM/sarama/pull/2245
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.34.0...v1.34.1
+
+## Version 1.34.0 (2022-05-30)
+
+## What's Changed
+### :tada: New Features / Improvements
+* KIP-345: support static membership by @aiquestion in https://github.com/IBM/sarama/pull/2230
+### :bug: Fixes
+* fix: KIP-368 use receiver goroutine to process all sasl v1 responses by @k-wall in https://github.com/IBM/sarama/pull/2234
+### :wrench: Maintenance
+* chore(deps): bump module github.com/pierrec/lz4 to v4 by @dnwe in https://github.com/IBM/sarama/pull/2231
+* chore(deps): bump golang.org/x/net digest to 2e3eb7b by @dnwe in https://github.com/IBM/sarama/pull/2232
+
+## New Contributors
+* @aiquestion made their first contribution in https://github.com/IBM/sarama/pull/2230
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.33.0...v1.34.0
+
+## Version 1.33.0 (2022-05-11)
+
+## What's Changed
+### :rotating_light: Breaking Changes
+
+**Note: with this change, the user of Sarama is required to use Go 1.13's errors.Is etc (rather then ==) when forming conditionals returned by this library.**
+* feat: make `ErrOutOfBrokers` wrap the underlying error that prevented connections to the brokers by @k-wall in https://github.com/IBM/sarama/pull/2131
+
+
+### :tada: New Features / Improvements
+* feat(message): add UnmarshalText method to CompressionCodec by @vincentbernat in https://github.com/IBM/sarama/pull/2172
+* KIP-368 : Allow SASL Connections to Periodically Re-Authenticate by @k-wall in https://github.com/IBM/sarama/pull/2197
+* feat: add batched CreateACLs func to ClusterAdmin by @nkostoulas in https://github.com/IBM/sarama/pull/2191
+### :bug: Fixes
+* fix: TestRecordBatchDecoding failing sporadically by @k-wall in https://github.com/IBM/sarama/pull/2154
+* feat(test): add an fvt for broker deadlock by @dnwe in https://github.com/IBM/sarama/pull/2144
+* fix: avoid starvation in subscriptionManager by @dnwe in https://github.com/IBM/sarama/pull/2109
+* fix: remove "Is your cluster reachable?" from msg by @dnwe in https://github.com/IBM/sarama/pull/2165
+* fix: remove trailing fullstop from error strings by @dnwe in https://github.com/IBM/sarama/pull/2166
+* fix: return underlying sasl error message by @dnwe in https://github.com/IBM/sarama/pull/2164
+* fix: potential data race on a global variable by @pior in https://github.com/IBM/sarama/pull/2171
+* fix: AdminClient | CreateACLs | check for error in response, return error if needed by @omris94 in https://github.com/IBM/sarama/pull/2185
+* producer: ensure that the management message (fin) is never "leaked" by @niamster in https://github.com/IBM/sarama/pull/2182
+* fix: prevent RefreshBrokers leaking old brokers  by @k-wall in https://github.com/IBM/sarama/pull/2203
+* fix: prevent RefreshController leaking controller by @k-wall in https://github.com/IBM/sarama/pull/2204
+* fix: prevent AsyncProducer retryBatch from leaking  by @k-wall in https://github.com/IBM/sarama/pull/2208
+* fix: prevent metrics leak when authenticate fails  by @Stephan14 in https://github.com/IBM/sarama/pull/2205
+* fix: prevent deadlock between subscription manager and consumer goroutines by @niamster in https://github.com/IBM/sarama/pull/2194
+* fix: prevent idempotent producer epoch exhaustion by @ladislavmacoun in https://github.com/IBM/sarama/pull/2178
+* fix(test): mockbroker offsetResponse vers behavior by @dnwe in https://github.com/IBM/sarama/pull/2213
+* fix: cope with OffsetsLoadInProgress on Join+Sync  by @dnwe in https://github.com/IBM/sarama/pull/2214
+* fix: make default MaxWaitTime 500ms by @dnwe in https://github.com/IBM/sarama/pull/2227
+### :package: Dependency updates
+* chore(deps): bump xdg-go/scram and klauspost/compress by @dnwe in https://github.com/IBM/sarama/pull/2170
+### :wrench: Maintenance
+* fix(test): skip TestReadOnlyAndAllCommittedMessages by @dnwe in https://github.com/IBM/sarama/pull/2161
+* fix(test): remove t.Parallel() by @dnwe in https://github.com/IBM/sarama/pull/2162
+* chore(ci): bump along to Go 1.17+1.18 and bump golangci-lint by @dnwe in https://github.com/IBM/sarama/pull/2183
+* chore: switch to multi-arch compatible docker images by @dnwe in https://github.com/IBM/sarama/pull/2210
+### :heavy_plus_sign: Other Changes
+* Remediate a number go-routine leaks (mainly test issues) by @k-wall in https://github.com/IBM/sarama/pull/2198
+* chore: retract v1.32.0 due to #2150 by @dnwe in https://github.com/IBM/sarama/pull/2199
+* chore: bump functional test timeout to 12m by @dnwe in https://github.com/IBM/sarama/pull/2200
+* fix(admin): make DeleteRecords err consistent by @dnwe in https://github.com/IBM/sarama/pull/2226
+
+## New Contributors
+* @k-wall made their first contribution in https://github.com/IBM/sarama/pull/2154
+* @pior made their first contribution in https://github.com/IBM/sarama/pull/2171
+* @omris94 made their first contribution in https://github.com/IBM/sarama/pull/2185
+* @vincentbernat made their first contribution in https://github.com/IBM/sarama/pull/2172
+* @niamster made their first contribution in https://github.com/IBM/sarama/pull/2182
+* @ladislavmacoun made their first contribution in https://github.com/IBM/sarama/pull/2178
+* @nkostoulas made their first contribution in https://github.com/IBM/sarama/pull/2191
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.32.0...v1.33.0
+
+## Version 1.32.0 (2022-02-24)
+
+### ⚠️ This release has been superseded by v1.33.0 and should _not_ be used.
+
+* chore: retract v1.32.0 due to #2150 by @dnwe in https://github.com/IBM/sarama/pull/2199
+
+---
+
+## What's Changed
+### :bug: Fixes
+* Fix deadlock when closing Broker in brokerProducer by @slaunay in https://github.com/IBM/sarama/pull/2133
+### :package: Dependency updates
+* chore: refresh dependencies to latest by @dnwe in https://github.com/IBM/sarama/pull/2159
+### :wrench: Maintenance
+* fix: rework RebalancingMultiplePartitions test by @dnwe in https://github.com/IBM/sarama/pull/2130
+* fix(test): use Sarama transactional producer by @dnwe in https://github.com/IBM/sarama/pull/1939
+* chore: enable t.Parallel() wherever possible by @dnwe in https://github.com/IBM/sarama/pull/2138
+### :heavy_plus_sign: Other Changes
+* chore: restrict to 1 testbinary at once by @dnwe in https://github.com/IBM/sarama/pull/2145
+* chore: restrict to 1 parallel test at once by @dnwe in https://github.com/IBM/sarama/pull/2146
+* Remove myself from codeowners by @bai in https://github.com/IBM/sarama/pull/2147
+* chore: add retractions for known bad versions by @dnwe in https://github.com/IBM/sarama/pull/2160
+
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.31.1...v1.32.0
+
+## Version 1.31.1 (2022-02-01)
+
+- #2126 - @bai - Populate missing kafka versions
+- #2124 - @bai - Add Kafka 3.1.0 to CI matrix, migrate to bitnami kafka image
+- #2123 - @bai - Update klauspost/compress to 0.14
+- #2122 - @dnwe - fix(test): make it simpler to re-use toxiproxy
+- #2119 - @bai - Add Kafka 3.1.0 version number
+- #2005 - @raulnegreiros - feat: add methods to pause/resume consumer's consumption
+- #2051 - @seveas - Expose the TLS connection state of a broker connection
+- #2117 - @wuhuizuo - feat: add method MockApiVersionsResponse.SetApiKeys
+- #2110 - @dnwe - fix: ensure heartbeats only stop after cleanup
+- #2113 - @mosceo - Fix typo
+
+## Version 1.31.0 (2022-01-18)
+
+## What's Changed
+### :tada: New Features / Improvements
+* feat: expose IncrementalAlterConfigs API in admin.go by @fengyinqiao in https://github.com/IBM/sarama/pull/2088
+* feat: allow AsyncProducer to have MaxOpenRequests inflight produce requests per broker by @xujianhai666 in https://github.com/IBM/sarama/pull/1686
+* Support request pipelining in AsyncProducer by @slaunay in https://github.com/IBM/sarama/pull/2094
+### :bug: Fixes
+* fix(test): add fluent interface for mocks where missing by @grongor in https://github.com/IBM/sarama/pull/2080
+* fix(test): test for ConsumePartition with OffsetOldest by @grongor in https://github.com/IBM/sarama/pull/2081
+* fix: set HWMO during creation of partitionConsumer (fix incorrect HWMO before first fetch) by @grongor in https://github.com/IBM/sarama/pull/2082
+* fix: ignore non-nil but empty error strings in Describe/Alter client quotas responses by @agriffaut in https://github.com/IBM/sarama/pull/2096
+* fix: skip over KIP-482 tagged fields by @dnwe in https://github.com/IBM/sarama/pull/2107
+* fix: clear preferredReadReplica if broker shutdown by @dnwe in https://github.com/IBM/sarama/pull/2108
+* fix(test): correct wrong offsets in mock Consumer by @grongor in https://github.com/IBM/sarama/pull/2078
+* fix: correct bugs in DescribeGroupsResponse by @dnwe in https://github.com/IBM/sarama/pull/2111
+### :wrench: Maintenance
+* chore: bump runtime and test dependencies by @dnwe in https://github.com/IBM/sarama/pull/2100
+### :memo: Documentation
+* docs: refresh README.md for Kafka 3.0.0 by @dnwe in https://github.com/IBM/sarama/pull/2099
+### :heavy_plus_sign: Other Changes
+* Fix typo by @mosceo in https://github.com/IBM/sarama/pull/2084
+
+## New Contributors
+* @grongor made their first contribution in https://github.com/IBM/sarama/pull/2080
+* @fengyinqiao made their first contribution in https://github.com/IBM/sarama/pull/2088
+* @xujianhai666 made their first contribution in https://github.com/IBM/sarama/pull/1686
+* @mosceo made their first contribution in https://github.com/IBM/sarama/pull/2084
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.30.1...v1.31.0
+
+## Version 1.30.1 (2021-12-04)
+
+## What's Changed
+### :tada: New Features / Improvements
+* feat(zstd): pass level param through to compress/zstd encoder by @lizthegrey in https://github.com/IBM/sarama/pull/2045
+### :bug: Fixes
+* fix: set min-go-version to 1.16 by @troyanov in https://github.com/IBM/sarama/pull/2048
+* logger: fix debug logs' formatting directives by @utrack in https://github.com/IBM/sarama/pull/2054
+* fix: stuck on the batch with zero records length by @pachmu in https://github.com/IBM/sarama/pull/2057
+* fix: only update preferredReadReplica if valid by @dnwe in https://github.com/IBM/sarama/pull/2076
+### :wrench: Maintenance
+* chore: add release notes configuration by @dnwe in https://github.com/IBM/sarama/pull/2046
+* chore: confluent platform version bump by @lizthegrey in https://github.com/IBM/sarama/pull/2070
+
+## Notes
+* ℹ️ from Sarama 1.30.x onward the minimum version of Go toolchain required is 1.16.x
+
+## New Contributors
+* @troyanov made their first contribution in https://github.com/IBM/sarama/pull/2048
+* @lizthegrey made their first contribution in https://github.com/IBM/sarama/pull/2045
+* @utrack made their first contribution in https://github.com/IBM/sarama/pull/2054
+* @pachmu made their first contribution in https://github.com/IBM/sarama/pull/2057
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.30.0...v1.30.1
+
+## Version 1.30.0 (2021-09-29)
+
+⚠️ This release has been superseded by v1.30.1 and should _not_ be used.
+
+**regression**: enabling rackawareness causes severe throughput drops (#2071) — fixed in v1.30.1 via #2076
+
+---
+
+ℹ️ **Note: from Sarama 1.30.0 the minimum version of Go toolchain required is 1.16.x**
+
+---
+
+# New Features / Improvements
+
+- #1983 - @zifengyu - allow configure AllowAutoTopicCreation argument in metadata refresh
+- #2000 - @matzew - Using xdg-go module for SCRAM
+- #2003 - @gdm85 - feat: add counter metrics for consumer group join/sync and their failures
+- #1992 - @zhaomoran - feat: support SaslHandshakeRequest v0 for SCRAM
+- #2006 - @faillefer - Add support for DeleteOffsets operation
+- #1909 - @agriffaut - KIP-546 Client quota APIs
+- #1633 - @aldelucca1 - feat: allow balance strategies to provide initial state
+- #1275 - @dnwe - log: add a DebugLogger that proxies to Logger
+- #2018 - @dnwe - feat: use DebugLogger reference for goldenpath log
+- #2019 - @dnwe - feat: add logging & a metric for producer throttle
+- #2023 - @dnwe - feat: add Controller() to ClusterAdmin interface
+- #2025 - @dnwe - feat: support ApiVersionsRequest V3 protocol
+- #2028 - @dnwe - feat: send ApiVersionsRequest on broker open
+- #2034 - @bai - Add support for kafka 3.0.0
+
+# Fixes
+
+- #1990 - @doxsch - fix: correctly pass ValidateOnly through to CreatePartitionsRequest
+- #1988 - @LubergAlexander - fix: correct WithCustomFallbackPartitioner implementation
+- #2001 - @HurSungYun - docs: inform AsyncProducer Close pitfalls
+- #1973 - @qiangmzsx - fix: metrics still taking up too much memory when metrics.UseNilMetrics=true
+- #2007 - @bai - Add support for Go 1.17
+- #2009 - @dnwe - fix: enable nilerr linter and fix iferr checks
+- #2010 - @dnwe - chore: enable exportloopref and misspell linters
+- #2013 - @faillefer - fix(test): disable encoded response/request check when map contains multiple elements
+- #2015 - @bai - Change default branch to main
+- #1718 - @crivera-fastly - fix: correct the error handling in client.InitProducerID()
+- #1984 - @null-sleep - fix(test): bump confluentPlatformVersion from 6.1.1 to 6.2.0
+- #2016 - @dnwe - chore: replace deprecated Go calls
+- #2017 - @dnwe - chore: delete legacy vagrant script
+- #2020 - @dnwe - fix(test): remove testLogger from TrackLeader test
+- #2024 - @dnwe - chore: bump toxiproxy container to v2.1.5
+- #2033 - @bai - Update dependencies
+- #2031 - @gdm85 - docs: do not mention buffered messages in sync producer Close method
+- #2035 - @dnwe - chore: populate the missing kafka versions
+- #2038 - @dnwe - feat: add a fuzzing workflow to github actions
+
+## New Contributors
+* @zifengyu made their first contribution in https://github.com/IBM/sarama/pull/1983
+* @doxsch made their first contribution in https://github.com/IBM/sarama/pull/1990
+* @LubergAlexander made their first contribution in https://github.com/IBM/sarama/pull/1988
+* @HurSungYun made their first contribution in https://github.com/IBM/sarama/pull/2001
+* @gdm85 made their first contribution in https://github.com/IBM/sarama/pull/2003
+* @qiangmzsx made their first contribution in https://github.com/IBM/sarama/pull/1973
+* @zhaomoran made their first contribution in https://github.com/IBM/sarama/pull/1992
+* @faillefer made their first contribution in https://github.com/IBM/sarama/pull/2006
+* @crivera-fastly made their first contribution in https://github.com/IBM/sarama/pull/1718
+* @null-sleep made their first contribution in https://github.com/IBM/sarama/pull/1984
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.29.1...v1.30.0
+
+## Version 1.29.1 (2021-06-24)
+
+# New Features / Improvements
+
+- #1966 - @ajanikow - KIP-339: Add Incremental Config updates API
+- #1964 - @ajanikow - Add DelegationToken ResourceType
+
+# Fixes
+
+- #1962 - @hanxiaolin - fix(consumer):  call interceptors when MaxProcessingTime expire
+- #1971 - @KerryJava - fix  kafka-producer-performance throughput panic
+- #1968 - @dnwe - chore: bump golang.org/x versions
+- #1956 - @joewreschnig - Allow checking the entire `ProducerMessage` in the mock producers
+- #1963 - @dnwe - fix: ensure backoff timer is re-used
+- #1949 - @dnwe - fix: explicitly use uint64 for payload length
+
+## Version 1.29.0 (2021-05-07)
+
+### New Features / Improvements
+
+- #1917 - @arkady-emelyanov - KIP-554: Add Broker-side SCRAM Config API
+- #1869 - @wyndhblb - zstd: encode+decode performance improvements
+- #1541 - @izolight - add String, (Un)MarshalText for acl types.
+- #1921 - @bai - Add support for Kafka 2.8.0
+
+### Fixes
+- #1936 - @dnwe - fix(consumer): follow preferred broker
+- #1933 - @ozzieba - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication
+- #1929 - @celrenheit - Handle isolation level in Offset(Request|Response) and require stable offset in FetchOffset(Request|Response)
+- #1926 - @dnwe - fix: correct initial CodeQL findings
+- #1925 - @bai - Test out CodeQL
+- #1923 - @bestgopher - Remove redundant switch-case, fix doc typos
+- #1922 - @bai - Update go dependencies
+- #1898 - @mmaslankaprv - Parsing only known control batches value
+- #1887 - @withshubh - Fix: issues affecting code quality
+
+## Version 1.28.0 (2021-02-15)
+
+**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.**
+
+- #1870 - @kvch - Update Kerberos library to latest major
+- #1876 - @bai - Update docs, reference pkg.go.dev
+- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close
+- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages
+- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies
+- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy
+- #1862 - @bai - Fix CI setenv permissions issues
+- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev
+- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica
+
+## Version 1.27.2 (2020-10-21)
+
+### Improvements
+
+#1750 - @krantideep95 Adds missing mock responses for mocking consumer group
+
+## Fixes
+
+#1817 - reverts #1785 - Add private method to Client interface to prevent implementation
+
+## Version 1.27.1 (2020-10-07)
+
+### Improvements
+
+#1775 - @d1egoaz - Adds a Producer Interceptor example
+#1781 - @justin-chen - Refresh brokers given list of seed brokers
+#1784 - @justin-chen - Add randomize seed broker method
+#1790 - @d1egoaz - remove example binary
+#1798 - @bai - Test against Go 1.15
+#1785 - @justin-chen - Add private method to Client interface to prevent implementation
+#1802 - @uvw - Support Go 1.13 error unwrapping
+
+## Fixes
+
+#1791 - @stanislavkozlovski - bump default version to 1.0.0
+
+## Version 1.27.0 (2020-08-11)
+
+### Improvements
+
+#1466 - @rubenvp8510  - Expose kerberos fast negotiation configuration
+#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests
+#1699 - @wclaeys  - Consumer group support for manually comitting offsets
+#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0
+#1726 - @d1egoaz - Include zstd on the functional tests
+#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors
+#1738 - @varun06 - fixed variable names that are named same as some std lib package names
+#1741 - @varun06 - updated zstd dependency to latest v1.10.10
+#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base
+#1763 - @alrs - remove deprecated tls options from test
+#1769 - @bai - Add support for Kafka 2.6.0
+
+## Fixes
+
+#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication
+#1744 - @alrs  - Fix isBalanced Function Signature
+
+## Version 1.26.4 (2020-05-19)
+
+## Fixes
+
+- #1701 - @d1egoaz - Set server name only for the current broker
+- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka
+
+## Version 1.26.3 (2020-05-07)
+
+## Fixes
+
+- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config
+
+## Version 1.26.2 (2020-05-06)
+
+## ⚠️ Known Issues
+
+This release has been marked as not ready for production and may be unstable, please use v1.26.4.
+
+### Improvements
+
+- #1560 - @iyacontrol - add sync pool for gzip 1-9
+- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID
+- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs
+- #1632 - @bai - Add support for Go 1.14
+- #1640 - @random-dwi - Feature/fix list partition reassignments
+- #1646 - @mimaison - Add DescribeLogDirs to admin client
+- #1667 - @bai - Add support for kafka 2.5.0
+
+## Fixes
+
+- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0
+- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine
+- #1602 - @d1egoaz - adds a note about consumer groups Consume method
+- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly
+- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented
+- #1614 - @alrs - produce_response.go: Remove Unused Functions
+- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables
+- #1639 - @agriffaut - Handle errors with no message but error code
+- #1643 - @kzinglzy - fix `config.net.keepalive`
+- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs
+- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata
+- #1650 - @lavoiesl - Return the response error in heartbeatLoop
+- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die
+- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy.
+
+## Version 1.26.1 (2020-02-04)
+
+Improvements:
+- Add requests-in-flight metric ([1539](https://github.com/IBM/sarama/pull/1539))
+- Fix misleading example for cluster admin ([1595](https://github.com/IBM/sarama/pull/1595))
+- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/IBM/sarama/pull/1573))
+- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/IBM/sarama/pull/1592))
+
+Bug Fixes:
+- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/IBM/sarama/pull/1590))
+- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/IBM/sarama/pull/1589))
+
+## Version 1.26.0 (2020-01-24)
+
+New Features:
+- Enable zstd compression
+  ([1574](https://github.com/IBM/sarama/pull/1574),
+  [1582](https://github.com/IBM/sarama/pull/1582))
+- Support headers in tools kafka-console-producer
+  ([1549](https://github.com/IBM/sarama/pull/1549))
+
+Improvements:
+- Add SASL AuthIdentity to SASL frames (authzid)
+  ([1585](https://github.com/IBM/sarama/pull/1585)).
+
+Bug Fixes:
+- Sending messages with ZStd compression enabled fails in multiple ways
+  ([1252](https://github.com/IBM/sarama/issues/1252)).
+- Use the broker for any admin on BrokerConfig
+  ([1571](https://github.com/IBM/sarama/pull/1571)).
+- Set DescribeConfigRequest Version field
+  ([1576](https://github.com/IBM/sarama/pull/1576)).
+- ConsumerGroup flooding logs with client/metadata update req
+  ([1578](https://github.com/IBM/sarama/pull/1578)).
+- MetadataRequest version in DescribeCluster
+  ([1580](https://github.com/IBM/sarama/pull/1580)).
+- Fix deadlock in consumer group handleError
+  ([1581](https://github.com/IBM/sarama/pull/1581))
+- Fill in the Fetch{Request,Response} protocol
+  ([1582](https://github.com/IBM/sarama/pull/1582)).
+- Retry topic request on ControllerNotAvailable
+  ([1586](https://github.com/IBM/sarama/pull/1586)).
+
+## Version 1.25.0 (2020-01-13)
+
+New Features:
+- Support TLS protocol in kafka-producer-performance
+  ([1538](https://github.com/IBM/sarama/pull/1538)).
+- Add support for kafka 2.4.0
+  ([1552](https://github.com/IBM/sarama/pull/1552)).
+
+Improvements:
+- Allow the Consumer to disable auto-commit offsets
+  ([1164](https://github.com/IBM/sarama/pull/1164)).
+- Produce records with consistent timestamps
+  ([1455](https://github.com/IBM/sarama/pull/1455)).
+
+Bug Fixes:
+- Fix incorrect SetTopicMetadata name mentions
+  ([1534](https://github.com/IBM/sarama/pull/1534)).
+- Fix client.tryRefreshMetadata Println
+  ([1535](https://github.com/IBM/sarama/pull/1535)).
+- Fix panic on calling updateMetadata on closed client
+  ([1531](https://github.com/IBM/sarama/pull/1531)).
+- Fix possible faulty metrics in TestFuncProducing
+  ([1545](https://github.com/IBM/sarama/pull/1545)).
+
+## Version 1.24.1 (2019-10-31)
+
+New Features:
+- Add DescribeLogDirs Request/Response pair
+  ([1520](https://github.com/IBM/sarama/pull/1520)).
+
+Bug Fixes:
+- Fix ClusterAdmin returning invalid controller ID on DescribeCluster
+  ([1518](https://github.com/IBM/sarama/pull/1518)).
+- Fix issue with consumergroup not rebalancing when new partition is added
+  ([1525](https://github.com/IBM/sarama/pull/1525)).
+- Ensure consistent use of read/write deadlines
+  ([1529](https://github.com/IBM/sarama/pull/1529)).
+
+## Version 1.24.0 (2019-10-09)
+
+New Features:
+- Add sticky partition assignor
+  ([1416](https://github.com/IBM/sarama/pull/1416)).
+- Switch from cgo zstd package to pure Go implementation
+  ([1477](https://github.com/IBM/sarama/pull/1477)).
+
+Improvements:
+- Allow creating ClusterAdmin from client
+  ([1415](https://github.com/IBM/sarama/pull/1415)).
+- Set KafkaVersion in ListAcls method
+  ([1452](https://github.com/IBM/sarama/pull/1452)).
+- Set request version in CreateACL ClusterAdmin method
+  ([1458](https://github.com/IBM/sarama/pull/1458)).
+- Set request version in DeleteACL ClusterAdmin method
+  ([1461](https://github.com/IBM/sarama/pull/1461)).
+- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest
+  ([1464](https://github.com/IBM/sarama/pull/1464)).
+- Remove direct usage of gofork
+  ([1465](https://github.com/IBM/sarama/pull/1465)).
+- Add support for Go 1.13
+  ([1478](https://github.com/IBM/sarama/pull/1478)).
+- Improve behavior of NewMockListAclsResponse
+  ([1481](https://github.com/IBM/sarama/pull/1481)).
+
+Bug Fixes:
+- Fix race condition in consumergroup example
+  ([1434](https://github.com/IBM/sarama/pull/1434)).
+- Fix brokerProducer goroutine leak
+  ([1442](https://github.com/IBM/sarama/pull/1442)).
+- Use released version of lz4 library
+  ([1469](https://github.com/IBM/sarama/pull/1469)).
+- Set correct version in MockDeleteTopicsResponse
+  ([1484](https://github.com/IBM/sarama/pull/1484)).
+- Fix CLI help message typo
+  ([1494](https://github.com/IBM/sarama/pull/1494)).
+
+Known Issues:
+- Please **don't** use Zstd, as it doesn't work right now.
+  See https://github.com/IBM/sarama/issues/1252
+
+## Version 1.23.1 (2019-07-22)
+
+Bug Fixes:
+- Fix fetch delete bug record
+  ([1425](https://github.com/IBM/sarama/pull/1425)).
+- Handle SASL/OAUTHBEARER token rejection
+  ([1428](https://github.com/IBM/sarama/pull/1428)).
+
+## Version 1.23.0 (2019-07-02)
+
+New Features:
+- Add support for Kafka 2.3.0
+  ([1418](https://github.com/IBM/sarama/pull/1418)).
+- Add support for ListConsumerGroupOffsets v2
+  ([1374](https://github.com/IBM/sarama/pull/1374)).
+- Add support for DeleteConsumerGroup
+  ([1417](https://github.com/IBM/sarama/pull/1417)).
+- Add support for SASLVersion configuration
+  ([1410](https://github.com/IBM/sarama/pull/1410)).
+- Add kerberos support
+  ([1366](https://github.com/IBM/sarama/pull/1366)).
+
+Improvements:
+- Improve sasl_scram_client example
+  ([1406](https://github.com/IBM/sarama/pull/1406)).
+- Fix shutdown and race-condition in consumer-group example
+  ([1404](https://github.com/IBM/sarama/pull/1404)).
+- Add support for error codes 77—81
+  ([1397](https://github.com/IBM/sarama/pull/1397)).
+- Pool internal objects allocated per message
+  ([1385](https://github.com/IBM/sarama/pull/1385)).
+- Reduce packet decoder allocations
+  ([1373](https://github.com/IBM/sarama/pull/1373)).
+- Support timeout when fetching metadata
+  ([1359](https://github.com/IBM/sarama/pull/1359)).
+
+Bug Fixes:
+- Fix fetch size integer overflow
+  ([1376](https://github.com/IBM/sarama/pull/1376)).
+- Handle and log throttled FetchResponses
+  ([1383](https://github.com/IBM/sarama/pull/1383)).
+- Refactor misspelled word Resouce to Resource
+  ([1368](https://github.com/IBM/sarama/pull/1368)).
+
+## Version 1.22.1 (2019-04-29)
+
+Improvements:
+- Use zstd 1.3.8
+  ([1350](https://github.com/IBM/sarama/pull/1350)).
+- Add support for SaslHandshakeRequest v1
+  ([1354](https://github.com/IBM/sarama/pull/1354)).
+
+Bug Fixes:
+- Fix V5 MetadataRequest nullable topics array
+  ([1353](https://github.com/IBM/sarama/pull/1353)).
+- Use a different SCRAM client for each broker connection
+  ([1349](https://github.com/IBM/sarama/pull/1349)).
+- Fix AllowAutoTopicCreation for MetadataRequest greater than v3
+  ([1344](https://github.com/IBM/sarama/pull/1344)).
+
+## Version 1.22.0 (2019-04-09)
+
+New Features:
+- Add Offline Replicas Operation to Client
+  ([1318](https://github.com/IBM/sarama/pull/1318)).
+- Allow using proxy when connecting to broker
+  ([1326](https://github.com/IBM/sarama/pull/1326)).
+- Implement ReadCommitted
+  ([1307](https://github.com/IBM/sarama/pull/1307)).
+- Add support for Kafka 2.2.0
+  ([1331](https://github.com/IBM/sarama/pull/1331)).
+- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes
+  ([1331](https://github.com/IBM/sarama/pull/1295)).
+
+Improvements:
+- Unregister all broker metrics on broker stop
+  ([1232](https://github.com/IBM/sarama/pull/1232)).
+- Add SCRAM authentication example
+  ([1303](https://github.com/IBM/sarama/pull/1303)).
+- Add consumergroup examples
+  ([1304](https://github.com/IBM/sarama/pull/1304)).
+- Expose consumer batch size metric
+  ([1296](https://github.com/IBM/sarama/pull/1296)).
+- Add TLS options to console producer and consumer
+  ([1300](https://github.com/IBM/sarama/pull/1300)).
+- Reduce client close bookkeeping
+  ([1297](https://github.com/IBM/sarama/pull/1297)).
+- Satisfy error interface in create responses
+  ([1154](https://github.com/IBM/sarama/pull/1154)).
+- Please lint gods
+  ([1346](https://github.com/IBM/sarama/pull/1346)).
+
+Bug Fixes:
+- Fix multi consumer group instance crash
+  ([1338](https://github.com/IBM/sarama/pull/1338)).
+- Update lz4 to latest version
+  ([1347](https://github.com/IBM/sarama/pull/1347)).
+- Retry ErrNotCoordinatorForConsumer in new consumergroup session
+  ([1231](https://github.com/IBM/sarama/pull/1231)).
+- Fix cleanup error handler
+  ([1332](https://github.com/IBM/sarama/pull/1332)).
+- Fix rate condition in PartitionConsumer
+  ([1156](https://github.com/IBM/sarama/pull/1156)).
+
+## Version 1.21.0 (2019-02-24)
+
+New Features:
+- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest
+  ([1236](https://github.com/IBM/sarama/pull/1236)).
+- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests
+  ([1178](https://github.com/IBM/sarama/pull/1178)).
+- Implement SASL/OAUTHBEARER
+  ([1240](https://github.com/IBM/sarama/pull/1240)).
+
+Improvements:
+- Add Go mod support
+  ([1282](https://github.com/IBM/sarama/pull/1282)).
+- Add error codes 73—76
+  ([1239](https://github.com/IBM/sarama/pull/1239)).
+- Add retry backoff function
+  ([1160](https://github.com/IBM/sarama/pull/1160)).
+- Maintain metadata in the producer even when retries are disabled
+  ([1189](https://github.com/IBM/sarama/pull/1189)).
+- Include ReplicaAssignment in ListTopics
+  ([1274](https://github.com/IBM/sarama/pull/1274)).
+- Add producer performance tool
+  ([1222](https://github.com/IBM/sarama/pull/1222)).
+- Add support LogAppend timestamps
+  ([1258](https://github.com/IBM/sarama/pull/1258)).
+
+Bug Fixes:
+- Fix potential deadlock when a heartbeat request fails
+  ([1286](https://github.com/IBM/sarama/pull/1286)).
+- Fix consuming compacted topic
+  ([1227](https://github.com/IBM/sarama/pull/1227)).
+- Set correct Kafka version for DescribeConfigsRequest v1
+  ([1277](https://github.com/IBM/sarama/pull/1277)).
+- Update kafka test version
+  ([1273](https://github.com/IBM/sarama/pull/1273)).
+
+## Version 1.20.1 (2019-01-10)
+
+New Features:
+- Add optional replica id in offset request
+  ([1100](https://github.com/IBM/sarama/pull/1100)).
+
+Improvements:
+- Implement DescribeConfigs Request + Response v1 & v2
+  ([1230](https://github.com/IBM/sarama/pull/1230)).
+- Reuse compression objects
+  ([1185](https://github.com/IBM/sarama/pull/1185)).
+- Switch from png to svg for GoDoc link in README
+  ([1243](https://github.com/IBM/sarama/pull/1243)).
+- Fix typo in deprecation notice for FetchResponseBlock.Records
+  ([1242](https://github.com/IBM/sarama/pull/1242)).
+- Fix typos in consumer metadata response file
+  ([1244](https://github.com/IBM/sarama/pull/1244)).
+
+Bug Fixes:
+- Revert to individual msg retries for non-idempotent
+  ([1203](https://github.com/IBM/sarama/pull/1203)).
+- Respect MaxMessageBytes limit for uncompressed messages
+  ([1141](https://github.com/IBM/sarama/pull/1141)).
+
+## Version 1.20.0 (2018-12-10)
+
+New Features:
+ - Add support for zstd compression
+   ([#1170](https://github.com/IBM/sarama/pull/1170)).
+ - Add support for Idempotent Producer
+   ([#1152](https://github.com/IBM/sarama/pull/1152)).
+ - Add support support for Kafka 2.1.0
+   ([#1229](https://github.com/IBM/sarama/pull/1229)).
+ - Add support support for OffsetCommit request/response pairs versions v1 to v5
+   ([#1201](https://github.com/IBM/sarama/pull/1201)).
+ - Add support support for OffsetFetch request/response pair up to version v5
+   ([#1198](https://github.com/IBM/sarama/pull/1198)).
+
+Improvements:
+ - Export broker's Rack setting
+   ([#1173](https://github.com/IBM/sarama/pull/1173)).
+ - Always use latest patch version of Go on CI
+   ([#1202](https://github.com/IBM/sarama/pull/1202)).
+ - Add error codes 61 to 72
+   ([#1195](https://github.com/IBM/sarama/pull/1195)).
+
+Bug Fixes:
+ - Fix build without cgo
+   ([#1182](https://github.com/IBM/sarama/pull/1182)).
+ - Fix go vet suggestion in consumer group file
+   ([#1209](https://github.com/IBM/sarama/pull/1209)).
+ - Fix typos in code and comments
+   ([#1228](https://github.com/IBM/sarama/pull/1228)).
+
+## Version 1.19.0 (2018-09-27)
+
+New Features:
+ - Implement a higher-level consumer group
+   ([#1099](https://github.com/IBM/sarama/pull/1099)).
+
+Improvements:
+ - Add support for Go 1.11
+   ([#1176](https://github.com/IBM/sarama/pull/1176)).
+
+Bug Fixes:
+ - Fix encoding of `MetadataResponse` with version 2 and higher
+   ([#1174](https://github.com/IBM/sarama/pull/1174)).
+ - Fix race condition in mock async producer
+   ([#1174](https://github.com/IBM/sarama/pull/1174)).
+
+## Version 1.18.0 (2018-09-07)
+
+New Features:
+ - Make `Partitioner.RequiresConsistency` vary per-message
+   ([#1112](https://github.com/IBM/sarama/pull/1112)).
+ - Add customizable partitioner
+   ([#1118](https://github.com/IBM/sarama/pull/1118)).
+ - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`,
+   `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL`
+   ([#1055](https://github.com/IBM/sarama/pull/1055)).
+
+Improvements:
+ - Add support for Kafka 2.0.0
+   ([#1149](https://github.com/IBM/sarama/pull/1149)).
+ - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts
+   ([#1123](https://github.com/IBM/sarama/pull/1123)).
+ - Simpler offset management
+   ([#1127](https://github.com/IBM/sarama/pull/1127)).
+
+Bug Fixes:
+ - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka
+   ([#1110](https://github.com/IBM/sarama/pull/1110)).
+ - Fix consumer block when response did not contain all the
+   expected topic/partition blocks
+   ([#1086](https://github.com/IBM/sarama/pull/1086)).
+ - Fix consumer block when response contains only constrol messages
+   ([#1115](https://github.com/IBM/sarama/pull/1115)).
+ - Add timeout config for ClusterAdmin requests
+   ([#1142](https://github.com/IBM/sarama/pull/1142)).
+ - Add version check when producing message with headers
+   ([#1117](https://github.com/IBM/sarama/pull/1117)).
+ - Fix `MetadataRequest` for empty list of topics
+   ([#1132](https://github.com/IBM/sarama/pull/1132)).
+ - Fix producer topic metadata on-demand fetch when topic error happens in metadata response
+   ([#1125](https://github.com/IBM/sarama/pull/1125)).
+
+## Version 1.17.0 (2018-05-30)
+
+New Features:
+ - Add support for gzip compression levels
+   ([#1044](https://github.com/IBM/sarama/pull/1044)).
+ - Add support for Metadata request/response pairs versions v1 to v5
+   ([#1047](https://github.com/IBM/sarama/pull/1047),
+    [#1069](https://github.com/IBM/sarama/pull/1069)).
+ - Add versioning to JoinGroup request/response pairs
+   ([#1098](https://github.com/IBM/sarama/pull/1098))
+ - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs
+   ([#1065](https://github.com/IBM/sarama/pull/1065),
+    [#1096](https://github.com/IBM/sarama/pull/1096),
+    [#1027](https://github.com/IBM/sarama/pull/1027)).
+ - Add `Controller()` method to Client interface
+   ([#1063](https://github.com/IBM/sarama/pull/1063)).
+
+Improvements:
+ - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp
+   ([#1010](https://github.com/IBM/sarama/pull/1010)).
+ - Expose missing protocol parts: `msgSet` and `recordBatch`
+   ([#1049](https://github.com/IBM/sarama/pull/1049)).
+ - Add support for v1 DeleteTopics Request
+   ([#1052](https://github.com/IBM/sarama/pull/1052)).
+ - Add support for Go 1.10
+   ([#1064](https://github.com/IBM/sarama/pull/1064)).
+ - Claim support for Kafka 1.1.0
+   ([#1073](https://github.com/IBM/sarama/pull/1073)).
+
+Bug Fixes:
+ - Fix FindCoordinatorResponse.encode to allow nil Coordinator
+   ([#1050](https://github.com/IBM/sarama/pull/1050),
+    [#1051](https://github.com/IBM/sarama/pull/1051)).
+ - Clear all metadata when we have the latest topic info
+   ([#1033](https://github.com/IBM/sarama/pull/1033)).
+ - Make `PartitionConsumer.Close` idempotent
+   ([#1092](https://github.com/IBM/sarama/pull/1092)).
+
+## Version 1.16.0 (2018-02-12)
+
+New Features:
+ - Add support for the Create/Delete Topics request/response pairs
+   ([#1007](https://github.com/IBM/sarama/pull/1007),
+    [#1008](https://github.com/IBM/sarama/pull/1008)).
+ - Add support for the Describe/Create/Delete ACL request/response pairs
+   ([#1009](https://github.com/IBM/sarama/pull/1009)).
+ - Add support for the five transaction-related request/response pairs
+   ([#1016](https://github.com/IBM/sarama/pull/1016)).
+
+Improvements:
+ - Permit setting version on mock producer responses
+   ([#999](https://github.com/IBM/sarama/pull/999)).
+ - Add `NewMockBrokerListener` helper for testing TLS connections
+   ([#1019](https://github.com/IBM/sarama/pull/1019)).
+ - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB
+   which results in much higher throughput in most cases
+   ([#1024](https://github.com/IBM/sarama/pull/1024)).
+ - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to
+   reduce CPU and memory usage when processing many partitions
+   ([#1028](https://github.com/IBM/sarama/pull/1028)).
+ - Assign relative offsets to messages in the producer to save the brokers a
+   recompression pass
+   ([#1002](https://github.com/IBM/sarama/pull/1002),
+    [#1015](https://github.com/IBM/sarama/pull/1015)).
+
+Bug Fixes:
+ - Fix producing uncompressed batches with the new protocol format
+   ([#1032](https://github.com/IBM/sarama/issues/1032)).
+ - Fix consuming compacted topics with the new protocol format
+   ([#1005](https://github.com/IBM/sarama/issues/1005)).
+ - Fix consuming topics with a mix of protocol formats
+   ([#1021](https://github.com/IBM/sarama/issues/1021)).
+ - Fix consuming when the broker includes multiple batches in a single response
+   ([#1022](https://github.com/IBM/sarama/issues/1022)).
+ - Fix detection of `PartialTrailingMessage` when the partial message was
+   truncated before the magic value indicating its version
+   ([#1030](https://github.com/IBM/sarama/pull/1030)).
+ - Fix expectation-checking in the mock of `SyncProducer.SendMessages`
+   ([#1035](https://github.com/IBM/sarama/pull/1035)).
+
+## Version 1.15.0 (2017-12-08)
+
+New Features:
+ - Claim official support for Kafka 1.0, though it did already work
+   ([#984](https://github.com/IBM/sarama/pull/984)).
+ - Helper methods for Kafka version numbers to/from strings
+   ([#989](https://github.com/IBM/sarama/pull/989)).
+ - Implement CreatePartitions request/response
+   ([#985](https://github.com/IBM/sarama/pull/985)).
+
+Improvements:
+ - Add error codes 45-60
+   ([#986](https://github.com/IBM/sarama/issues/986)).
+
+Bug Fixes:
+ - Fix slow consuming for certain Kafka 0.11/1.0 configurations
+   ([#982](https://github.com/IBM/sarama/pull/982)).
+ - Correctly determine when a FetchResponse contains the new message format
+   ([#990](https://github.com/IBM/sarama/pull/990)).
+ - Fix producing with multiple headers
+   ([#996](https://github.com/IBM/sarama/pull/996)).
+ - Fix handling of truncated record batches
+   ([#998](https://github.com/IBM/sarama/pull/998)).
+ - Fix leaking metrics when closing brokers
+   ([#991](https://github.com/IBM/sarama/pull/991)).
+
+## Version 1.14.0 (2017-11-13)
+
+New Features:
+ - Add support for the new Kafka 0.11 record-batch format, including the wire
+   protocol and the necessary behavioural changes in the producer and consumer.
+   Transactions and idempotency are not yet supported, but producing and
+   consuming should work with all the existing bells and whistles (batching,
+   compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta
+   of Arista Networks for this work. Part of
+   ([#901](https://github.com/IBM/sarama/issues/901)).
+
+Bug Fixes:
+ - Fix encoding of ProduceResponse versions in test
+   ([#970](https://github.com/IBM/sarama/pull/970)).
+ - Return partial replicas list when we have it
+   ([#975](https://github.com/IBM/sarama/pull/975)).
+
+## Version 1.13.0 (2017-10-04)
+
+New Features:
+ - Support for FetchRequest version 3
+   ([#905](https://github.com/IBM/sarama/pull/905)).
+ - Permit setting version on mock FetchResponses
+   ([#939](https://github.com/IBM/sarama/pull/939)).
+ - Add a configuration option to support storing only minimal metadata for
+   extremely large clusters
+   ([#937](https://github.com/IBM/sarama/pull/937)).
+ - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets
+   ([#932](https://github.com/IBM/sarama/pull/932)).
+
+Improvements:
+ - Provide the block-level timestamp when consuming compressed messages
+   ([#885](https://github.com/IBM/sarama/issues/885)).
+ - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned
+   by the broker, which can be meaningful
+   ([#930](https://github.com/IBM/sarama/pull/930)).
+ - Use a `Ticker` to reduce consumer timer overhead at the cost of higher
+   variance in the actual timeout
+   ([#933](https://github.com/IBM/sarama/pull/933)).
+
+Bug Fixes:
+ - Gracefully handle messages with negative timestamps
+   ([#907](https://github.com/IBM/sarama/pull/907)).
+ - Raise a proper error when encountering an unknown message version
+   ([#940](https://github.com/IBM/sarama/pull/940)).
+
+## Version 1.12.0 (2017-05-08)
+
+New Features:
+ - Added support for the `ApiVersions` request and response pair, and Kafka
+   version 0.10.2 ([#867](https://github.com/IBM/sarama/pull/867)). Note
+   that you still need to specify the Kafka version in the Sarama configuration
+   for the time being.
+ - Added a `Brokers` method to the Client which returns the complete set of
+   active brokers ([#813](https://github.com/IBM/sarama/pull/813)).
+ - Added an `InSyncReplicas` method to the Client which returns the set of all
+   in-sync broker IDs for the given partition, now that the Kafka versions for
+   which this was misleading are no longer in our supported set
+   ([#872](https://github.com/IBM/sarama/pull/872)).
+ - Added a `NewCustomHashPartitioner` method which allows constructing a hash
+   partitioner with a custom hash method in case the default (FNV-1a) is not
+   suitable
+   ([#837](https://github.com/IBM/sarama/pull/837),
+    [#841](https://github.com/IBM/sarama/pull/841)).
+
+Improvements:
+ - Recognize more Kafka error codes
+   ([#859](https://github.com/IBM/sarama/pull/859)).
+
+Bug Fixes:
+ - Fix an issue where decoding a malformed FetchRequest would not return the
+   correct error ([#818](https://github.com/IBM/sarama/pull/818)).
+ - Respect ordering of group protocols in JoinGroupRequests. This fix is
+   transparent if you're using the `AddGroupProtocol` or
+   `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from
+   the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols`
+   ([#812](https://github.com/IBM/sarama/issues/812)).
+ - Fix an alignment-related issue with atomics on 32-bit architectures
+   ([#859](https://github.com/IBM/sarama/pull/859)).
+
+## Version 1.11.0 (2016-12-20)
+
+_Important:_ As of Sarama 1.11 it is necessary to set the config value of
+`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
+versions would silently override this value when instantiating a SyncProducer
+which led to unexpected values and data races.
+
+New Features:
+ - Metrics! Thanks to Sébastien Launay for all his work on this feature
+   ([#701](https://github.com/IBM/sarama/pull/701),
+    [#746](https://github.com/IBM/sarama/pull/746),
+    [#766](https://github.com/IBM/sarama/pull/766)).
+ - Add support for LZ4 compression
+   ([#786](https://github.com/IBM/sarama/pull/786)).
+ - Add support for ListOffsetRequest v1 and Kafka 0.10.1
+   ([#775](https://github.com/IBM/sarama/pull/775)).
+ - Added a `HighWaterMarks` method to the Consumer which aggregates the
+   `HighWaterMarkOffset` values of its child topic/partitions
+   ([#769](https://github.com/IBM/sarama/pull/769)).
+
+Bug Fixes:
+ - Fixed producing when using timestamps, compression and Kafka 0.10
+   ([#759](https://github.com/IBM/sarama/pull/759)).
+ - Added missing decoder methods to DescribeGroups response
+   ([#756](https://github.com/IBM/sarama/pull/756)).
+ - Fix producer shutdown when `Return.Errors` is disabled
+   ([#787](https://github.com/IBM/sarama/pull/787)).
+ - Don't mutate configuration in SyncProducer
+   ([#790](https://github.com/IBM/sarama/pull/790)).
+ - Fix crash on SASL initialization failure
+   ([#795](https://github.com/IBM/sarama/pull/795)).
+
+## Version 1.10.1 (2016-08-30)
+
+Bug Fixes:
+ - Fix the documentation for `HashPartitioner` which was incorrect
+   ([#717](https://github.com/IBM/sarama/pull/717)).
+ - Permit client creation even when it is limited by ACLs
+   ([#722](https://github.com/IBM/sarama/pull/722)).
+ - Several fixes to the consumer timer optimization code, regressions introduced
+   in v1.10.0. Go's timers are finicky
+   ([#730](https://github.com/IBM/sarama/pull/730),
+    [#733](https://github.com/IBM/sarama/pull/733),
+    [#734](https://github.com/IBM/sarama/pull/734)).
+ - Handle consuming compressed relative offsets with Kafka 0.10
+   ([#735](https://github.com/IBM/sarama/pull/735)).
+
+## Version 1.10.0 (2016-08-02)
+
+_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of
+Kafka you are running against (via the `config.Version` value) in order to use
+features that may not be compatible with old Kafka versions. If you don't
+specify this value it will default to 0.8.2 (the minimum supported), and trying
+to use more recent features (like the offset manager) will fail with an error.
+
+_Also:_ The offset-manager's behaviour has been changed to match the upstream
+java consumer (see [#705](https://github.com/IBM/sarama/pull/705) and
+[#713](https://github.com/IBM/sarama/pull/713)). If you use the
+offset-manager, please ensure that you are committing one *greater* than the
+last consumed message offset or else you may end up consuming duplicate
+messages.
+
+New Features:
+ - Support for Kafka 0.10
+   ([#672](https://github.com/IBM/sarama/pull/672),
+    [#678](https://github.com/IBM/sarama/pull/678),
+    [#681](https://github.com/IBM/sarama/pull/681), and others).
+ - Support for configuring the target Kafka version
+   ([#676](https://github.com/IBM/sarama/pull/676)).
+ - Batch producing support in the SyncProducer
+   ([#677](https://github.com/IBM/sarama/pull/677)).
+ - Extend producer mock to allow setting expectations on message contents
+   ([#667](https://github.com/IBM/sarama/pull/667)).
+
+Improvements:
+ - Support `nil` compressed messages for deleting in compacted topics
+   ([#634](https://github.com/IBM/sarama/pull/634)).
+ - Pre-allocate decoding errors, greatly reducing heap usage and GC time against
+   misbehaving brokers ([#690](https://github.com/IBM/sarama/pull/690)).
+ - Re-use consumer expiry timers, removing one allocation per consumed message
+   ([#707](https://github.com/IBM/sarama/pull/707)).
+
+Bug Fixes:
+ - Actually default the client ID to "sarama" like we say we do
+   ([#664](https://github.com/IBM/sarama/pull/664)).
+ - Fix a rare issue where `Client.Leader` could return the wrong error
+   ([#685](https://github.com/IBM/sarama/pull/685)).
+ - Fix a possible tight loop in the consumer
+   ([#693](https://github.com/IBM/sarama/pull/693)).
+ - Match upstream's offset-tracking behaviour
+   ([#705](https://github.com/IBM/sarama/pull/705)).
+ - Report UnknownTopicOrPartition errors from the offset manager
+   ([#706](https://github.com/IBM/sarama/pull/706)).
+ - Fix possible negative partition value from the HashPartitioner
+   ([#709](https://github.com/IBM/sarama/pull/709)).
+
+## Version 1.9.0 (2016-05-16)
+
+New Features:
+ - Add support for custom offset manager retention durations
+   ([#602](https://github.com/IBM/sarama/pull/602)).
+ - Publish low-level mocks to enable testing of third-party producer/consumer
+   implementations ([#570](https://github.com/IBM/sarama/pull/570)).
+ - Declare support for Golang 1.6
+   ([#611](https://github.com/IBM/sarama/pull/611)).
+ - Support for SASL plain-text auth
+   ([#648](https://github.com/IBM/sarama/pull/648)).
+
+Improvements:
+ - Simplified broker locking scheme slightly
+   ([#604](https://github.com/IBM/sarama/pull/604)).
+ - Documentation cleanup
+   ([#605](https://github.com/IBM/sarama/pull/605),
+    [#621](https://github.com/IBM/sarama/pull/621),
+    [#654](https://github.com/IBM/sarama/pull/654)).
+
+Bug Fixes:
+ - Fix race condition shutting down the OffsetManager
+   ([#658](https://github.com/IBM/sarama/pull/658)).
+
+## Version 1.8.0 (2016-02-01)
+
+New Features:
+ - Full support for Kafka 0.9:
+   - All protocol messages and fields
+   ([#586](https://github.com/IBM/sarama/pull/586),
+   [#588](https://github.com/IBM/sarama/pull/588),
+   [#590](https://github.com/IBM/sarama/pull/590)).
+   - Verified that TLS support works
+   ([#581](https://github.com/IBM/sarama/pull/581)).
+   - Fixed the OffsetManager compatibility
+   ([#585](https://github.com/IBM/sarama/pull/585)).
+
+Improvements:
+ - Optimize for fewer system calls when reading from the network
+   ([#584](https://github.com/IBM/sarama/pull/584)).
+ - Automatically retry `InvalidMessage` errors to match upstream behaviour
+   ([#589](https://github.com/IBM/sarama/pull/589)).
+
+## Version 1.7.0 (2015-12-11)
+
+New Features:
+ - Preliminary support for Kafka 0.9
+   ([#572](https://github.com/IBM/sarama/pull/572)). This comes with several
+   caveats:
+   - Protocol-layer support is mostly in place
+     ([#577](https://github.com/IBM/sarama/pull/577)), however Kafka 0.9
+     renamed some messages and fields, which we did not in order to preserve API
+     compatibility.
+   - The producer and consumer work against 0.9, but the offset manager does
+     not ([#573](https://github.com/IBM/sarama/pull/573)).
+   - TLS support may or may not work
+     ([#581](https://github.com/IBM/sarama/pull/581)).
+
+Improvements:
+ - Don't wait for request timeouts on dead brokers, greatly speeding recovery
+   when the TCP connection is left hanging
+   ([#548](https://github.com/IBM/sarama/pull/548)).
+ - Refactored part of the producer. The new version provides a much more elegant
+   solution to [#449](https://github.com/IBM/sarama/pull/449). It is also
+   slightly more efficient, and much more precise in calculating batch sizes
+   when compression is used
+   ([#549](https://github.com/IBM/sarama/pull/549),
+   [#550](https://github.com/IBM/sarama/pull/550),
+   [#551](https://github.com/IBM/sarama/pull/551)).
+
+Bug Fixes:
+ - Fix race condition in consumer test mock
+   ([#553](https://github.com/IBM/sarama/pull/553)).
+
+## Version 1.6.1 (2015-09-25)
+
+Bug Fixes:
+ - Fix panic that could occur if a user-supplied message value failed to encode
+   ([#449](https://github.com/IBM/sarama/pull/449)).
+
+## Version 1.6.0 (2015-09-04)
+
+New Features:
+ - Implementation of a consumer offset manager using the APIs introduced in
+   Kafka 0.8.2. The API is designed mainly for integration into a future
+   high-level consumer, not for direct use, although it is *possible* to use it
+   directly.
+   ([#461](https://github.com/IBM/sarama/pull/461)).
+
+Improvements:
+ - CRC32 calculation is much faster on machines with SSE4.2 instructions,
+   removing a major hotspot from most profiles
+   ([#255](https://github.com/IBM/sarama/pull/255)).
+
+Bug Fixes:
+ - Make protocol decoding more robust against some malformed packets generated
+   by go-fuzz ([#523](https://github.com/IBM/sarama/pull/523),
+   [#525](https://github.com/IBM/sarama/pull/525)) or found in other ways
+   ([#528](https://github.com/IBM/sarama/pull/528)).
+ - Fix a potential race condition panic in the consumer on shutdown
+   ([#529](https://github.com/IBM/sarama/pull/529)).
+
+## Version 1.5.0 (2015-08-17)
+
+New Features:
+ - TLS-encrypted network connections are now supported. This feature is subject
+   to change when Kafka releases built-in TLS support, but for now this is
+   enough to work with TLS-terminating proxies
+   ([#154](https://github.com/IBM/sarama/pull/154)).
+
+Improvements:
+ - The consumer will not block if a single partition is not drained by the user;
+   all other partitions will continue to consume normally
+   ([#485](https://github.com/IBM/sarama/pull/485)).
+ - Formatting of error strings has been much improved
+   ([#495](https://github.com/IBM/sarama/pull/495)).
+ - Internal refactoring of the producer for code cleanliness and to enable
+   future work ([#300](https://github.com/IBM/sarama/pull/300)).
+
+Bug Fixes:
+ - Fix a potential deadlock in the consumer on shutdown
+   ([#475](https://github.com/IBM/sarama/pull/475)).
+
+## Version 1.4.3 (2015-07-21)
+
+Bug Fixes:
+ - Don't include the partitioner in the producer's "fetch partitions"
+   circuit-breaker ([#466](https://github.com/IBM/sarama/pull/466)).
+ - Don't retry messages until the broker is closed when abandoning a broker in
+   the producer ([#468](https://github.com/IBM/sarama/pull/468)).
+ - Update the import path for snappy-go, it has moved again and the API has
+   changed slightly ([#486](https://github.com/IBM/sarama/pull/486)).
+
+## Version 1.4.2 (2015-05-27)
+
+Bug Fixes:
+ - Update the import path for snappy-go, it has moved from google code to github
+   ([#456](https://github.com/IBM/sarama/pull/456)).
+
+## Version 1.4.1 (2015-05-25)
+
+Improvements:
+ - Optimizations when decoding snappy messages, thanks to John Potocny
+   ([#446](https://github.com/IBM/sarama/pull/446)).
+
+Bug Fixes:
+ - Fix hypothetical race conditions on producer shutdown
+   ([#450](https://github.com/IBM/sarama/pull/450),
+   [#451](https://github.com/IBM/sarama/pull/451)).
+
+## Version 1.4.0 (2015-05-01)
+
+New Features:
+ - The consumer now implements `Topics()` and `Partitions()` methods to enable
+   users to dynamically choose what topics/partitions to consume without
+   instantiating a full client
+   ([#431](https://github.com/IBM/sarama/pull/431)).
+ - The partition-consumer now exposes the high water mark offset value returned
+   by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/IBM/sarama/pull/339)).
+ - Added a `kafka-console-consumer` tool capable of handling multiple
+   partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
+   ([#439](https://github.com/IBM/sarama/pull/439),
+   [#442](https://github.com/IBM/sarama/pull/442)).
+
+Improvements:
+ - The producer's logging during retry scenarios is more consistent, more
+   useful, and slightly less verbose
+   ([#429](https://github.com/IBM/sarama/pull/429)).
+ - The client now shuffles its initial list of seed brokers in order to prevent
+   thundering herd on the first broker in the list
+   ([#441](https://github.com/IBM/sarama/pull/441)).
+
+Bug Fixes:
+ - The producer now correctly manages its state if retries occur when it is
+   shutting down, fixing several instances of confusing behaviour and at least
+   one potential deadlock ([#419](https://github.com/IBM/sarama/pull/419)).
+ - The consumer now handles messages for different partitions asynchronously,
+   making it much more resilient to specific user code ordering
+   ([#325](https://github.com/IBM/sarama/pull/325)).
+
+## Version 1.3.0 (2015-04-16)
+
+New Features:
+ - The client now tracks consumer group coordinators using
+   ConsumerMetadataRequests similar to how it tracks partition leadership using
+   regular MetadataRequests ([#411](https://github.com/IBM/sarama/pull/411)).
+   This adds two methods to the client API:
+   - `Coordinator(consumerGroup string) (*Broker, error)`
+   - `RefreshCoordinator(consumerGroup string) error`
+
+Improvements:
+ - ConsumerMetadataResponses now automatically create a Broker object out of the
+   ID/address/port combination for the Coordinator; accessing the fields
+   individually has been deprecated
+   ([#413](https://github.com/IBM/sarama/pull/413)).
+ - Much improved handling of `OffsetOutOfRange` errors in the consumer.
+   Consumers will fail to start if the provided offset is out of range
+   ([#418](https://github.com/IBM/sarama/pull/418))
+   and they will automatically shut down if the offset falls out of range
+   ([#424](https://github.com/IBM/sarama/pull/424)).
+ - Small performance improvement in encoding and decoding protocol messages
+   ([#427](https://github.com/IBM/sarama/pull/427)).
+
+Bug Fixes:
+ - Fix a rare race condition in the client's background metadata refresher if
+   it happens to be activated while the client is being closed
+   ([#422](https://github.com/IBM/sarama/pull/422)).
+
+## Version 1.2.0 (2015-04-07)
+
+Improvements:
+ - The producer's behaviour when `Flush.Frequency` is set is now more intuitive
+   ([#389](https://github.com/IBM/sarama/pull/389)).
+ - The producer is now somewhat more memory-efficient during and after retrying
+   messages due to an improved queue implementation
+   ([#396](https://github.com/IBM/sarama/pull/396)).
+ - The consumer produces much more useful logging output when leadership
+   changes ([#385](https://github.com/IBM/sarama/pull/385)).
+ - The client's `GetOffset` method will now automatically refresh metadata and
+   retry once in the event of stale information or similar
+   ([#394](https://github.com/IBM/sarama/pull/394)).
+ - Broker connections now have support for using TCP keepalives
+   ([#407](https://github.com/IBM/sarama/issues/407)).
+
+Bug Fixes:
+ - The OffsetCommitRequest message now correctly implements all three possible
+   API versions ([#390](https://github.com/IBM/sarama/pull/390),
+   [#400](https://github.com/IBM/sarama/pull/400)).
+
+## Version 1.1.0 (2015-03-20)
+
+Improvements:
+ - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
+   broken topics don't choke throughput
+   ([#373](https://github.com/IBM/sarama/pull/373)).
+
+Bug Fixes:
+ - Fix the producer's internal reference counting in certain unusual scenarios
+   ([#367](https://github.com/IBM/sarama/pull/367)).
+ - Fix the consumer's internal reference counting in certain unusual scenarios
+   ([#369](https://github.com/IBM/sarama/pull/369)).
+ - Fix a condition where the producer's internal control messages could have
+   gotten stuck ([#368](https://github.com/IBM/sarama/pull/368)).
+ - Fix an issue where invalid partition lists would be cached when asking for
+   metadata for a non-existant topic ([#372](https://github.com/IBM/sarama/pull/372)).
+
+
+## Version 1.0.0 (2015-03-17)
+
+Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
+
+- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
+- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
+- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/IBM/sarama/mocks` package.
+- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
+- All the configuration values have been unified in the `Config` struct.
+- Much improved test suite.
diff --git a/vendor/github.com/IBM/sarama/CODE_OF_CONDUCT.md b/vendor/github.com/IBM/sarama/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..8470ec5
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/CODE_OF_CONDUCT.md
@@ -0,0 +1,128 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, religion, or sexual identity
+and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+  and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the
+  overall community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or
+  advances of any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email
+  address, without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+  professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+dominic.evans@uk.ibm.com.
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series
+of actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or
+permanent ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior,  harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within
+the community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.0, available at
+https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
+
+Community Impact Guidelines were inspired by [Mozilla's code of conduct
+enforcement ladder](https://github.com/mozilla/diversity).
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see the FAQ at
+https://www.contributor-covenant.org/faq. Translations are available at
+https://www.contributor-covenant.org/translations.
diff --git a/vendor/github.com/IBM/sarama/CONTRIBUTING.md b/vendor/github.com/IBM/sarama/CONTRIBUTING.md
new file mode 100644
index 0000000..bb88127
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/CONTRIBUTING.md
@@ -0,0 +1,77 @@
+# Contributing
+
+[fork]: https://github.com/IBM/sarama/fork
+[pr]: https://github.com/IBM/sarama/compare
+[released]: https://help.github.com/articles/github-terms-of-service/#6-contributions-under-repository-license
+
+Hi there! We are thrilled that you would like to contribute to Sarama.
+Contributions are always welcome, both reporting issues and submitting pull requests!
+
+## Reporting issues
+
+Please make sure to include any potentially useful information in the issue, so we can pinpoint the issue faster without going back and forth.
+
+- What SHA of Sarama are you running? If this is not the latest SHA on the main branch, please try if the problem persists with the latest version.
+- You can set `sarama.Logger` to a [log.Logger](http://golang.org/pkg/log/#Logger) instance to capture debug output. Please include it in your issue description.
+- Also look at the logs of the Kafka broker you are connected to. If you see anything out of the ordinary, please include it.
+
+Also, please include the following information about your environment, so we can help you faster:
+
+- What version of Kafka are you using?
+- What version of Go are you using?
+- What are the values of your Producer/Consumer/Client configuration?
+
+
+## Contributing a change
+
+Contributions to this project are [released][released] to the public under the project's [opensource license](LICENSE.md).
+By contributing to this project you agree to the [Developer Certificate of Origin](https://developercertificate.org/) (DCO).
+The DCO was created by the Linux Kernel community and is a simple statement that you, as a contributor, wrote or otherwise have the legal right to contribute those changes.
+
+Contributors must _sign-off_ that they adhere to these requirements by adding a `Signed-off-by` line to all commit messages with an email address that matches the commit author:
+
+```
+feat: this is my commit message
+
+Signed-off-by: Random J Developer <random@developer.example.org>
+```
+
+Git even has a `-s` command line option to append this automatically to your
+commit message:
+
+```
+$ git commit -s -m 'This is my commit message'
+```
+
+Because this library is in production use by many people and applications, we code review all additions.
+To make the review process go as smooth as possible, please consider the following.
+
+- If you plan to work on something major, please open an issue to discuss the design first.
+- Don't break backwards compatibility. If you really have to, open an issue to discuss this first.
+- Make sure to use the `go fmt` command to format your code according to the standards. Even better, set up your editor to do this for you when saving.
+- Run [go vet](https://golang.org/cmd/vet/) to detect any suspicious constructs in your code that could be bugs.
+- Explicitly handle all error return values. If you really want to ignore an error value, you can assign it to `_`. You can use [errcheck](https://github.com/kisielk/errcheck) to verify whether you have handled all errors.
+- You may also want to run [golint](https://github.com/golang/lint) as well to detect style problems.
+- Add tests that cover the changes you made. Make sure to run `go test` with the `-race` argument to test for race conditions.
+- Make sure your code is supported by all the Go versions we support.
+  You can rely on GitHub Actions for testing older Go versions.
+
+## Submitting a pull request
+
+0. [Fork][fork] and clone the repository
+1. Create a new branch: `git checkout -b my-branch-name`
+2. Make your change, push to your fork and [submit a pull request][pr]
+3. Wait for your pull request to be reviewed and merged.
+
+Here are a few things you can do that will increase the likelihood of your pull request being accepted:
+
+- Keep your change as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as separate pull requests.
+- Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html).
+
+## Further Reading
+
+- [Developer Certificate of Origin versus Contributor License Agreements](https://julien.ponge.org/blog/developer-certificate-of-origin-versus-contributor-license-agreements/)
+- [The most powerful contributor agreement](https://lwn.net/Articles/592503/)
+- [How to Contribute to Open Source](https://opensource.guide/how-to-contribute/)
+- [Using Pull Requests](https://help.github.com/articles/about-pull-requests/)
+- [GitHub Help](https://help.github.com)
diff --git a/vendor/github.com/IBM/sarama/Dockerfile.kafka b/vendor/github.com/IBM/sarama/Dockerfile.kafka
new file mode 100644
index 0000000..bb18b6c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/Dockerfile.kafka
@@ -0,0 +1,55 @@
+FROM registry.access.redhat.com/ubi9/ubi-minimal:9.6@sha256:7c5495d5fad59aaee12abc3cbbd2b283818ee1e814b00dbc7f25bf2d14fa4f0c
+
+USER root
+
+RUN microdnf update -y \
+ && microdnf install -y git gzip java-17-openjdk-headless tar tzdata-java \
+ && microdnf reinstall -y tzdata \
+ && microdnf clean all
+
+ENV JAVA_HOME=/usr/lib/jvm/jre-17
+
+# https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html
+# Ensure Java doesn't cache any dns results
+RUN cd /etc/java/java-17-openjdk/*/conf/security \
+ && sed -e '/networkaddress.cache.ttl/d' -e '/networkaddress.cache.negative.ttl/d' -i java.security \
+ && echo 'networkaddress.cache.ttl=0' >> java.security \
+ && echo 'networkaddress.cache.negative.ttl=0' >> java.security
+
+ARG SCALA_VERSION="2.13"
+ARG KAFKA_VERSION="3.9.1"
+
+WORKDIR /tmp
+
+# https://github.com/apache/kafka/blob/2e2b0a58eda3e677763af974a44a6aaa3c280214/tests/docker/Dockerfile#L77-L105
+ARG KAFKA_MIRROR="https://s3-us-west-2.amazonaws.com/kafka-packages"
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+RUN --mount=type=bind,target=.,rw=true \
+    mkdir -p "/opt/kafka-${KAFKA_VERSION}" \
+ && chmod a+rw "/opt/kafka-${KAFKA_VERSION}" \
+ && curl -s "$KAFKA_MIRROR/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz" | tar xz --strip-components=1 -C "/opt/kafka-${KAFKA_VERSION}"
+
+# older kafka versions depend upon jaxb-api being bundled with the JDK, but it
+# was removed from Java 11 so work around that by including it in the kafka
+# libs dir regardless
+RUN curl -sLO "https://repo1.maven.org/maven2/javax/xml/bind/jaxb-api/2.3.0/jaxb-api-2.3.0.jar" \
+ && for DIR in /opt/kafka-*; do cp -v jaxb-api-2.3.0.jar $DIR/libs/ ; done \
+ && rm -f jaxb-api-2.3.0.jar
+
+# older kafka versions with the zookeeper 3.4.13/3.4.14 client aren't compatible with Java 17 so quietly bump them to 3.5.9
+RUN if ! stat /opt/kafka-${KAFKA_VERSION}/libs/zookeeper-3.4.*.jar; then exit 0; fi ; \
+    rm -f /opt/kafka-${KAFKA_VERSION}/libs/zookeeper-3.4.*.jar \
+ && curl --fail -sSL -o "/opt/kafka-${KAFKA_VERSION}/libs/zookeeper-3.5.9.jar" "https://repo1.maven.org/maven2/org/apache/zookeeper/zookeeper/3.5.9/zookeeper-3.5.9.jar" \
+ && curl --fail -sSL -o "/opt/kafka-${KAFKA_VERSION}/libs/zookeeper-jute-3.5.9.jar" "https://repo1.maven.org/maven2/org/apache/zookeeper/zookeeper-jute/3.5.9/zookeeper-jute-3.5.9.jar"
+
+WORKDIR /opt/kafka-${KAFKA_VERSION}
+
+ENV JAVA_MAJOR_VERSION=17
+
+RUN sed -e "s/JAVA_MAJOR_VERSION=.*/JAVA_MAJOR_VERSION=${JAVA_MAJOR_VERSION}/" -i"" ./bin/kafka-run-class.sh
+
+COPY entrypoint.sh /
+
+USER 65534:65534
+
+ENTRYPOINT ["/entrypoint.sh"]
diff --git a/vendor/github.com/IBM/sarama/LICENSE.md b/vendor/github.com/IBM/sarama/LICENSE.md
new file mode 100644
index 0000000..f8f64d4
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/LICENSE.md
@@ -0,0 +1,24 @@
+# MIT License
+
+Copyright (c) 2013 Shopify
+
+Copyright (c) 2023 IBM Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/IBM/sarama/Makefile b/vendor/github.com/IBM/sarama/Makefile
new file mode 100644
index 0000000..c41445c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/Makefile
@@ -0,0 +1,47 @@
+default: fmt get update test lint
+
+GO       := go
+GOBIN    := $(shell pwd)/bin
+GOBUILD  := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG)
+
+GOTESTSUM         := $(GOBIN)/gotestsum
+GOTESTSUM_VERSION := v1.13.0
+$(GOTESTSUM):
+	GOBIN=$(GOBIN) go install gotest.tools/gotestsum@$(GOTESTSUM_VERSION)
+
+TESTSTAT         := $(GOBIN)/teststat
+TESTSTAT_VERSION := v0.1.26
+$(TESTSTAT):
+	GOBIN=$(GOBIN) go install github.com/vearutop/teststat@$(TESTSTAT_VERSION)
+
+FILES := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -not -name '*_test.go')
+TESTS := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go')
+
+get:
+	$(GO) get ./...
+	$(GO) mod verify
+	$(GO) mod tidy
+
+update:
+	$(GO) get -u -v ./...
+	$(GO) mod verify
+	$(GO) mod tidy
+
+fmt:
+	gofmt -s -l -w $(FILES) $(TESTS)
+
+lint:
+	GOFLAGS="-tags=functional" golangci-lint run
+
+test: $(GOTESTSUM) $(TESTSTAT) $(TPARSE)
+	@$(GOTESTSUM) $(if ${CI},--format github-actions,--format testdox) --jsonfile _test/unittests.json --junitfile _test/unittests.xml \
+		--rerun-fails --packages="./..." \
+		-- -v -race -coverprofile=profile.out -covermode=atomic -timeout 2m
+	@$(TESTSTAT) _test/unittests.json
+
+.PHONY: test_functional
+test_functional: $(GOTESTSUM) $(TESTSTAT) $(TPARSE)
+	@$(GOTESTSUM) $(if ${CI},--format github-actions,--format testdox) --jsonfile _test/fvt.json --junitfile _test/fvt.xml \
+		--rerun-fails --packages="./..." \
+		-- -v -race -coverprofile=profile.out -covermode=atomic -timeout 15m -tags=functional
+	@$(TESTSTAT) _test/fvt.json
diff --git a/vendor/github.com/IBM/sarama/README.md b/vendor/github.com/IBM/sarama/README.md
new file mode 100644
index 0000000..4534d7b
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/README.md
@@ -0,0 +1,35 @@
+# sarama
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/IBM/sarama.svg)](https://pkg.go.dev/github.com/IBM/sarama)
+[![OpenSSF Scorecard](https://api.securityscorecards.dev/projects/github.com/IBM/sarama/badge?style=flat)](https://securityscorecards.dev/viewer/?uri=github.com/IBM/sarama)
+[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/7996/badge)](https://www.bestpractices.dev/projects/7996)
+
+Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/).
+
+## Getting started
+
+- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/IBM/sarama).
+- Mocks for testing are available in the [mocks](./mocks) subpackage.
+- The [examples](./examples) directory contains more elaborate example applications.
+- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
+
+You might also want to look at the [Frequently Asked Questions](https://github.com/IBM/sarama/wiki/Frequently-Asked-Questions).
+
+## Compatibility and API stability
+
+Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
+the two latest stable releases of Kafka and Go, and we provide a two month
+grace period for older releases. However, older releases of Kafka are still likely to work.
+
+Sarama follows semantic versioning and provides API stability via the standard Go
+[module version numbering](https://go.dev/doc/modules/version-numbers) scheme.
+
+A changelog is available [here](CHANGELOG.md).
+
+## Contributing
+
+- Get started by checking our [contribution guidelines](https://github.com/IBM/sarama/blob/main/CONTRIBUTING.md).
+- Read the [Sarama wiki](https://github.com/IBM/sarama/wiki) for more technical and design details.
+- The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information.
+- For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
+- If you have any questions, just ask!
diff --git a/vendor/github.com/IBM/sarama/SECURITY.md b/vendor/github.com/IBM/sarama/SECURITY.md
new file mode 100644
index 0000000..b2f6e61
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/SECURITY.md
@@ -0,0 +1,11 @@
+# Security
+
+## Reporting Security Issues
+
+**Please do not report security vulnerabilities through public GitHub issues.**
+
+The easiest way to report a security issue is privately through GitHub [here](https://github.com/IBM/sarama/security/advisories/new).
+
+See [Privately reporting a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability) for full instructions.
+
+Alternatively, you can report them via e-mail or anonymous form to the IBM Product Security Incident Response Team (PSIRT) following the guidelines under the [IBM Security Vulnerability Management](https://www.ibm.com/support/pages/ibm-security-vulnerability-management) pages.
diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/IBM/sarama/Vagrantfile
similarity index 100%
rename from vendor/github.com/Shopify/sarama/Vagrantfile
rename to vendor/github.com/IBM/sarama/Vagrantfile
diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/IBM/sarama/acl_bindings.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/acl_bindings.go
rename to vendor/github.com/IBM/sarama/acl_bindings.go
diff --git a/vendor/github.com/IBM/sarama/acl_create_request.go b/vendor/github.com/IBM/sarama/acl_create_request.go
new file mode 100644
index 0000000..187cee0
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_create_request.go
@@ -0,0 +1,97 @@
+package sarama
+
+// CreateAclsRequest is an acl creation request
+type CreateAclsRequest struct {
+	Version      int16
+	AclCreations []*AclCreation
+}
+
+func (c *CreateAclsRequest) setVersion(v int16) {
+	c.Version = v
+}
+
+func (c *CreateAclsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(c.AclCreations)); err != nil {
+		return err
+	}
+
+	for _, aclCreation := range c.AclCreations {
+		if err := aclCreation.encode(pe, c.Version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+	c.Version = version
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.AclCreations = make([]*AclCreation, n)
+
+	for i := 0; i < n; i++ {
+		c.AclCreations[i] = new(AclCreation)
+		if err := c.AclCreations[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateAclsRequest) key() int16 {
+	return apiKeyCreateAcls
+}
+
+func (c *CreateAclsRequest) version() int16 {
+	return c.Version
+}
+
+func (c *CreateAclsRequest) headerVersion() int16 {
+	return 1
+}
+
+func (c *CreateAclsRequest) isValidVersion() bool {
+	return c.Version >= 0 && c.Version <= 1
+}
+
+func (c *CreateAclsRequest) requiredVersion() KafkaVersion {
+	switch c.Version {
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+// AclCreation is a wrapper around Resource and Acl type
+type AclCreation struct {
+	Resource
+	Acl
+}
+
+func (a *AclCreation) encode(pe packetEncoder, version int16) error {
+	if err := a.Resource.encode(pe, version); err != nil {
+		return err
+	}
+	if err := a.Acl.encode(pe); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) {
+	if err := a.Resource.decode(pd, version); err != nil {
+		return err
+	}
+	if err := a.Acl.decode(pd, version); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/acl_create_response.go b/vendor/github.com/IBM/sarama/acl_create_response.go
new file mode 100644
index 0000000..514fc64
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_create_response.go
@@ -0,0 +1,110 @@
+package sarama
+
+import "time"
+
+// CreateAclsResponse is a an acl response creation type
+type CreateAclsResponse struct {
+	Version              int16
+	ThrottleTime         time.Duration
+	AclCreationResponses []*AclCreationResponse
+}
+
+func (c *CreateAclsResponse) setVersion(v int16) {
+	c.Version = v
+}
+
+func (c *CreateAclsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(c.ThrottleTime)
+
+	if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil {
+		return err
+	}
+
+	for _, aclCreationResponse := range c.AclCreationResponses {
+		if err := aclCreationResponse.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+	c.ThrottleTime, err = pd.getDurationMs()
+	if err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.AclCreationResponses = make([]*AclCreationResponse, n)
+	for i := 0; i < n; i++ {
+		c.AclCreationResponses[i] = new(AclCreationResponse)
+		if err := c.AclCreationResponses[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreateAclsResponse) key() int16 {
+	return apiKeyCreateAcls
+}
+
+func (c *CreateAclsResponse) version() int16 {
+	return c.Version
+}
+
+func (c *CreateAclsResponse) headerVersion() int16 {
+	return 0
+}
+
+func (c *CreateAclsResponse) isValidVersion() bool {
+	return c.Version >= 0 && c.Version <= 1
+}
+
+func (c *CreateAclsResponse) requiredVersion() KafkaVersion {
+	switch c.Version {
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+func (r *CreateAclsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
+
+// AclCreationResponse is an acl creation response type
+type AclCreationResponse struct {
+	Err    KError
+	ErrMsg *string
+}
+
+func (a *AclCreationResponse) encode(pe packetEncoder) error {
+	pe.putKError(a.Err)
+
+	if err := pe.putNullableString(a.ErrMsg); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) {
+	a.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if a.ErrMsg, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/acl_delete_request.go b/vendor/github.com/IBM/sarama/acl_delete_request.go
new file mode 100644
index 0000000..ff40740
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_delete_request.go
@@ -0,0 +1,70 @@
+package sarama
+
+// DeleteAclsRequest is a delete acl request
+type DeleteAclsRequest struct {
+	Version int
+	Filters []*AclFilter
+}
+
+func (d *DeleteAclsRequest) setVersion(v int16) {
+	d.Version = int(v)
+}
+
+func (d *DeleteAclsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(d.Filters)); err != nil {
+		return err
+	}
+
+	for _, filter := range d.Filters {
+		filter.Version = d.Version
+		if err := filter.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+	d.Version = int(version)
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	d.Filters = make([]*AclFilter, n)
+	for i := 0; i < n; i++ {
+		d.Filters[i] = new(AclFilter)
+		d.Filters[i].Version = int(version)
+		if err := d.Filters[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteAclsRequest) key() int16 {
+	return apiKeyDeleteAcls
+}
+
+func (d *DeleteAclsRequest) version() int16 {
+	return int16(d.Version)
+}
+
+func (d *DeleteAclsRequest) headerVersion() int16 {
+	return 1
+}
+
+func (d *DeleteAclsRequest) isValidVersion() bool {
+	return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DeleteAclsRequest) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/acl_delete_response.go b/vendor/github.com/IBM/sarama/acl_delete_response.go
new file mode 100644
index 0000000..bc6637e
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_delete_response.go
@@ -0,0 +1,176 @@
+package sarama
+
+import "time"
+
+// DeleteAclsResponse is a delete acl response
+type DeleteAclsResponse struct {
+	Version         int16
+	ThrottleTime    time.Duration
+	FilterResponses []*FilterResponse
+}
+
+func (d *DeleteAclsResponse) setVersion(v int16) {
+	d.Version = v
+}
+
+func (d *DeleteAclsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(d.ThrottleTime)
+
+	if err := pe.putArrayLength(len(d.FilterResponses)); err != nil {
+		return err
+	}
+
+	for _, filterResponse := range d.FilterResponses {
+		if err := filterResponse.encode(pe, d.Version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if d.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	d.FilterResponses = make([]*FilterResponse, n)
+
+	for i := 0; i < n; i++ {
+		d.FilterResponses[i] = new(FilterResponse)
+		if err := d.FilterResponses[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteAclsResponse) key() int16 {
+	return apiKeyDeleteAcls
+}
+
+func (d *DeleteAclsResponse) version() int16 {
+	return d.Version
+}
+
+func (d *DeleteAclsResponse) headerVersion() int16 {
+	return 0
+}
+
+func (d *DeleteAclsResponse) isValidVersion() bool {
+	return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DeleteAclsResponse) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+func (r *DeleteAclsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
+
+// FilterResponse is a filter response type
+type FilterResponse struct {
+	Err          KError
+	ErrMsg       *string
+	MatchingAcls []*MatchingAcl
+}
+
+func (f *FilterResponse) encode(pe packetEncoder, version int16) error {
+	pe.putKError(f.Err)
+	if err := pe.putNullableString(f.ErrMsg); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil {
+		return err
+	}
+	for _, matchingAcl := range f.MatchingAcls {
+		if err := matchingAcl.encode(pe, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) {
+	f.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if f.ErrMsg, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	f.MatchingAcls = make([]*MatchingAcl, n)
+	for i := 0; i < n; i++ {
+		f.MatchingAcls[i] = new(MatchingAcl)
+		if err := f.MatchingAcls[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// MatchingAcl is a matching acl type
+type MatchingAcl struct {
+	Err    KError
+	ErrMsg *string
+	Resource
+	Acl
+}
+
+func (m *MatchingAcl) encode(pe packetEncoder, version int16) error {
+	pe.putKError(m.Err)
+	if err := pe.putNullableString(m.ErrMsg); err != nil {
+		return err
+	}
+
+	if err := m.Resource.encode(pe, version); err != nil {
+		return err
+	}
+
+	if err := m.Acl.encode(pe); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) {
+	m.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if m.ErrMsg, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	if err := m.Resource.decode(pd, version); err != nil {
+		return err
+	}
+
+	if err := m.Acl.decode(pd, version); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/acl_describe_request.go b/vendor/github.com/IBM/sarama/acl_describe_request.go
new file mode 100644
index 0000000..6eb218f
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_describe_request.go
@@ -0,0 +1,47 @@
+package sarama
+
+// DescribeAclsRequest is a describe acl request type
+type DescribeAclsRequest struct {
+	Version int
+	AclFilter
+}
+
+func (d *DescribeAclsRequest) setVersion(v int16) {
+	d.Version = int(v)
+}
+
+func (d *DescribeAclsRequest) encode(pe packetEncoder) error {
+	d.AclFilter.Version = d.Version
+	return d.AclFilter.encode(pe)
+}
+
+func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+	d.Version = int(version)
+	d.AclFilter.Version = int(version)
+	return d.AclFilter.decode(pd, version)
+}
+
+func (d *DescribeAclsRequest) key() int16 {
+	return apiKeyDescribeAcls
+}
+
+func (d *DescribeAclsRequest) version() int16 {
+	return int16(d.Version)
+}
+
+func (d *DescribeAclsRequest) headerVersion() int16 {
+	return 1
+}
+
+func (d *DescribeAclsRequest) isValidVersion() bool {
+	return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DescribeAclsRequest) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/acl_describe_response.go b/vendor/github.com/IBM/sarama/acl_describe_response.go
new file mode 100644
index 0000000..c9e2552
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_describe_response.go
@@ -0,0 +1,100 @@
+package sarama
+
+import "time"
+
+// DescribeAclsResponse is a describe acl response type
+type DescribeAclsResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Err          KError
+	ErrMsg       *string
+	ResourceAcls []*ResourceAcls
+}
+
+func (d *DescribeAclsResponse) setVersion(v int16) {
+	d.Version = v
+}
+
+func (d *DescribeAclsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(d.ThrottleTime)
+	pe.putKError(d.Err)
+
+	if err := pe.putNullableString(d.ErrMsg); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil {
+		return err
+	}
+
+	for _, resourceAcl := range d.ResourceAcls {
+		if err := resourceAcl.encode(pe, d.Version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if d.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	d.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	errmsg, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	if errmsg != "" {
+		d.ErrMsg = &errmsg
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	d.ResourceAcls = make([]*ResourceAcls, n)
+
+	for i := 0; i < n; i++ {
+		d.ResourceAcls[i] = new(ResourceAcls)
+		if err := d.ResourceAcls[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (d *DescribeAclsResponse) key() int16 {
+	return apiKeyDescribeAcls
+}
+
+func (d *DescribeAclsResponse) version() int16 {
+	return d.Version
+}
+
+func (d *DescribeAclsResponse) headerVersion() int16 {
+	return 0
+}
+
+func (d *DescribeAclsResponse) isValidVersion() bool {
+	return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DescribeAclsResponse) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+func (r *DescribeAclsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/IBM/sarama/acl_filter.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/acl_filter.go
rename to vendor/github.com/IBM/sarama/acl_filter.go
diff --git a/vendor/github.com/IBM/sarama/acl_types.go b/vendor/github.com/IBM/sarama/acl_types.go
new file mode 100644
index 0000000..62bb534
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_types.go
@@ -0,0 +1,238 @@
+package sarama
+
+import (
+	"fmt"
+	"strings"
+)
+
+type (
+	AclOperation int
+
+	AclPermissionType int
+
+	AclResourceType int
+
+	AclResourcePatternType int
+)
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
+const (
+	AclOperationUnknown AclOperation = iota
+	AclOperationAny
+	AclOperationAll
+	AclOperationRead
+	AclOperationWrite
+	AclOperationCreate
+	AclOperationDelete
+	AclOperationAlter
+	AclOperationDescribe
+	AclOperationClusterAction
+	AclOperationDescribeConfigs
+	AclOperationAlterConfigs
+	AclOperationIdempotentWrite
+)
+
+func (a *AclOperation) String() string {
+	mapping := map[AclOperation]string{
+		AclOperationUnknown:         "Unknown",
+		AclOperationAny:             "Any",
+		AclOperationAll:             "All",
+		AclOperationRead:            "Read",
+		AclOperationWrite:           "Write",
+		AclOperationCreate:          "Create",
+		AclOperationDelete:          "Delete",
+		AclOperationAlter:           "Alter",
+		AclOperationDescribe:        "Describe",
+		AclOperationClusterAction:   "ClusterAction",
+		AclOperationDescribeConfigs: "DescribeConfigs",
+		AclOperationAlterConfigs:    "AlterConfigs",
+		AclOperationIdempotentWrite: "IdempotentWrite",
+	}
+	s, ok := mapping[*a]
+	if !ok {
+		s = mapping[AclOperationUnknown]
+	}
+	return s
+}
+
+// MarshalText returns the text form of the AclOperation (name without prefix)
+func (a *AclOperation) MarshalText() ([]byte, error) {
+	return []byte(a.String()), nil
+}
+
+// UnmarshalText takes a text representation of the operation and converts it to an AclOperation
+func (a *AclOperation) UnmarshalText(text []byte) error {
+	normalized := strings.ToLower(string(text))
+	mapping := map[string]AclOperation{
+		"unknown":         AclOperationUnknown,
+		"any":             AclOperationAny,
+		"all":             AclOperationAll,
+		"read":            AclOperationRead,
+		"write":           AclOperationWrite,
+		"create":          AclOperationCreate,
+		"delete":          AclOperationDelete,
+		"alter":           AclOperationAlter,
+		"describe":        AclOperationDescribe,
+		"clusteraction":   AclOperationClusterAction,
+		"describeconfigs": AclOperationDescribeConfigs,
+		"alterconfigs":    AclOperationAlterConfigs,
+		"idempotentwrite": AclOperationIdempotentWrite,
+	}
+	ao, ok := mapping[normalized]
+	if !ok {
+		*a = AclOperationUnknown
+		return fmt.Errorf("no acl operation with name %s", normalized)
+	}
+	*a = ao
+	return nil
+}
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java
+const (
+	AclPermissionUnknown AclPermissionType = iota
+	AclPermissionAny
+	AclPermissionDeny
+	AclPermissionAllow
+)
+
+func (a *AclPermissionType) String() string {
+	mapping := map[AclPermissionType]string{
+		AclPermissionUnknown: "Unknown",
+		AclPermissionAny:     "Any",
+		AclPermissionDeny:    "Deny",
+		AclPermissionAllow:   "Allow",
+	}
+	s, ok := mapping[*a]
+	if !ok {
+		s = mapping[AclPermissionUnknown]
+	}
+	return s
+}
+
+// MarshalText returns the text form of the AclPermissionType (name without prefix)
+func (a *AclPermissionType) MarshalText() ([]byte, error) {
+	return []byte(a.String()), nil
+}
+
+// UnmarshalText takes a text representation of the permission type and converts it to an AclPermissionType
+func (a *AclPermissionType) UnmarshalText(text []byte) error {
+	normalized := strings.ToLower(string(text))
+	mapping := map[string]AclPermissionType{
+		"unknown": AclPermissionUnknown,
+		"any":     AclPermissionAny,
+		"deny":    AclPermissionDeny,
+		"allow":   AclPermissionAllow,
+	}
+
+	apt, ok := mapping[normalized]
+	if !ok {
+		*a = AclPermissionUnknown
+		return fmt.Errorf("no acl permission with name %s", normalized)
+	}
+	*a = apt
+	return nil
+}
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
+const (
+	AclResourceUnknown AclResourceType = iota
+	AclResourceAny
+	AclResourceTopic
+	AclResourceGroup
+	AclResourceCluster
+	AclResourceTransactionalID
+	AclResourceDelegationToken
+)
+
+func (a *AclResourceType) String() string {
+	mapping := map[AclResourceType]string{
+		AclResourceUnknown:         "Unknown",
+		AclResourceAny:             "Any",
+		AclResourceTopic:           "Topic",
+		AclResourceGroup:           "Group",
+		AclResourceCluster:         "Cluster",
+		AclResourceTransactionalID: "TransactionalID",
+		AclResourceDelegationToken: "DelegationToken",
+	}
+	s, ok := mapping[*a]
+	if !ok {
+		s = mapping[AclResourceUnknown]
+	}
+	return s
+}
+
+// MarshalText returns the text form of the AclResourceType (name without prefix)
+func (a *AclResourceType) MarshalText() ([]byte, error) {
+	return []byte(a.String()), nil
+}
+
+// UnmarshalText takes a text representation of the resource type and converts it to an AclResourceType
+func (a *AclResourceType) UnmarshalText(text []byte) error {
+	normalized := strings.ToLower(string(text))
+	mapping := map[string]AclResourceType{
+		"unknown":         AclResourceUnknown,
+		"any":             AclResourceAny,
+		"topic":           AclResourceTopic,
+		"group":           AclResourceGroup,
+		"cluster":         AclResourceCluster,
+		"transactionalid": AclResourceTransactionalID,
+		"delegationtoken": AclResourceDelegationToken,
+	}
+
+	art, ok := mapping[normalized]
+	if !ok {
+		*a = AclResourceUnknown
+		return fmt.Errorf("no acl resource with name %s", normalized)
+	}
+	*a = art
+	return nil
+}
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java
+const (
+	AclPatternUnknown AclResourcePatternType = iota
+	AclPatternAny
+	AclPatternMatch
+	AclPatternLiteral
+	AclPatternPrefixed
+)
+
+func (a *AclResourcePatternType) String() string {
+	mapping := map[AclResourcePatternType]string{
+		AclPatternUnknown:  "Unknown",
+		AclPatternAny:      "Any",
+		AclPatternMatch:    "Match",
+		AclPatternLiteral:  "Literal",
+		AclPatternPrefixed: "Prefixed",
+	}
+	s, ok := mapping[*a]
+	if !ok {
+		s = mapping[AclPatternUnknown]
+	}
+	return s
+}
+
+// MarshalText returns the text form of the AclResourcePatternType (name without prefix)
+func (a *AclResourcePatternType) MarshalText() ([]byte, error) {
+	return []byte(a.String()), nil
+}
+
+// UnmarshalText takes a text representation of the resource pattern type and converts it to an AclResourcePatternType
+func (a *AclResourcePatternType) UnmarshalText(text []byte) error {
+	normalized := strings.ToLower(string(text))
+	mapping := map[string]AclResourcePatternType{
+		"unknown":  AclPatternUnknown,
+		"any":      AclPatternAny,
+		"match":    AclPatternMatch,
+		"literal":  AclPatternLiteral,
+		"prefixed": AclPatternPrefixed,
+	}
+
+	arpt, ok := mapping[normalized]
+	if !ok {
+		*a = AclPatternUnknown
+		return fmt.Errorf("no acl resource pattern with name %s", normalized)
+	}
+	*a = arpt
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go b/vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go
new file mode 100644
index 0000000..4361abb
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go
@@ -0,0 +1,75 @@
+package sarama
+
+// AddOffsetsToTxnRequest adds offsets to a transaction request
+type AddOffsetsToTxnRequest struct {
+	Version         int16
+	TransactionalID string
+	ProducerID      int64
+	ProducerEpoch   int16
+	GroupID         string
+}
+
+func (a *AddOffsetsToTxnRequest) setVersion(v int16) {
+	a.Version = v
+}
+
+func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(a.TransactionalID); err != nil {
+		return err
+	}
+
+	pe.putInt64(a.ProducerID)
+
+	pe.putInt16(a.ProducerEpoch)
+
+	if err := pe.putString(a.GroupID); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+	if a.TransactionalID, err = pd.getString(); err != nil {
+		return err
+	}
+	if a.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+	if a.GroupID, err = pd.getString(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (a *AddOffsetsToTxnRequest) key() int16 {
+	return apiKeyAddOffsetsToTxn
+}
+
+func (a *AddOffsetsToTxnRequest) version() int16 {
+	return a.Version
+}
+
+func (a *AddOffsetsToTxnRequest) headerVersion() int16 {
+	return 1
+}
+
+func (a *AddOffsetsToTxnRequest) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 2:
+		return V2_7_0_0
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V0_11_0_0
+	default:
+		return V2_7_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go b/vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go
new file mode 100644
index 0000000..17858a1
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go
@@ -0,0 +1,68 @@
+package sarama
+
+import (
+	"time"
+)
+
+// AddOffsetsToTxnResponse is a response type for adding offsets to txns
+type AddOffsetsToTxnResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Err          KError
+}
+
+func (a *AddOffsetsToTxnResponse) setVersion(v int16) {
+	a.Version = v
+}
+
+func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(a.ThrottleTime)
+	pe.putKError(a.Err)
+	return nil
+}
+
+func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+	if a.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	a.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (a *AddOffsetsToTxnResponse) key() int16 {
+	return apiKeyAddOffsetsToTxn
+}
+
+func (a *AddOffsetsToTxnResponse) version() int16 {
+	return a.Version
+}
+
+func (a *AddOffsetsToTxnResponse) headerVersion() int16 {
+	return 0
+}
+
+func (a *AddOffsetsToTxnResponse) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 2:
+		return V2_7_0_0
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V0_11_0_0
+	default:
+		return V2_7_0_0
+	}
+}
+
+func (r *AddOffsetsToTxnResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go b/vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go
new file mode 100644
index 0000000..492023b
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go
@@ -0,0 +1,97 @@
+package sarama
+
+// AddPartitionsToTxnRequest is a add partition request
+type AddPartitionsToTxnRequest struct {
+	Version         int16
+	TransactionalID string
+	ProducerID      int64
+	ProducerEpoch   int16
+	TopicPartitions map[string][]int32
+}
+
+func (a *AddPartitionsToTxnRequest) setVersion(v int16) {
+	a.Version = v
+}
+
+func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(a.TransactionalID); err != nil {
+		return err
+	}
+	pe.putInt64(a.ProducerID)
+	pe.putInt16(a.ProducerEpoch)
+
+	if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil {
+		return err
+	}
+	for topic, partitions := range a.TopicPartitions {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+	if a.TransactionalID, err = pd.getString(); err != nil {
+		return err
+	}
+	if a.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.TopicPartitions = make(map[string][]int32)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		partitions, err := pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+
+		a.TopicPartitions[topic] = partitions
+	}
+
+	return nil
+}
+
+func (a *AddPartitionsToTxnRequest) key() int16 {
+	return apiKeyAddPartitionsToTxn
+}
+
+func (a *AddPartitionsToTxnRequest) version() int16 {
+	return a.Version
+}
+
+func (a *AddPartitionsToTxnRequest) headerVersion() int16 {
+	return 1
+}
+
+func (a *AddPartitionsToTxnRequest) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 2:
+		return V2_7_0_0
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go b/vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go
new file mode 100644
index 0000000..0384263
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go
@@ -0,0 +1,132 @@
+package sarama
+
+import (
+	"time"
+)
+
+// AddPartitionsToTxnResponse is a partition errors to transaction type
+type AddPartitionsToTxnResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Errors       map[string][]*PartitionError
+}
+
+func (a *AddPartitionsToTxnResponse) setVersion(v int16) {
+	a.Version = v
+}
+
+func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(a.ThrottleTime)
+	if err := pe.putArrayLength(len(a.Errors)); err != nil {
+		return err
+	}
+
+	for topic, e := range a.Errors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(e)); err != nil {
+			return err
+		}
+		for _, partitionError := range e {
+			if err := partitionError.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+	a.Version = version
+	if a.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.Errors = make(map[string][]*PartitionError)
+
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		m, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		a.Errors[topic] = make([]*PartitionError, m)
+
+		for j := 0; j < m; j++ {
+			a.Errors[topic][j] = new(PartitionError)
+			if err := a.Errors[topic][j].decode(pd, version); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (a *AddPartitionsToTxnResponse) key() int16 {
+	return apiKeyAddPartitionsToTxn
+}
+
+func (a *AddPartitionsToTxnResponse) version() int16 {
+	return a.Version
+}
+
+func (a *AddPartitionsToTxnResponse) headerVersion() int16 {
+	return 0
+}
+
+func (a *AddPartitionsToTxnResponse) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 2:
+		return V2_7_0_0
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+func (r *AddPartitionsToTxnResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
+
+// PartitionError is a partition error type
+type PartitionError struct {
+	Partition int32
+	Err       KError
+}
+
+func (p *PartitionError) encode(pe packetEncoder) error {
+	pe.putInt32(p.Partition)
+	pe.putKError(p.Err)
+	return nil
+}
+
+func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) {
+	if p.Partition, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	p.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/admin.go b/vendor/github.com/IBM/sarama/admin.go
new file mode 100644
index 0000000..dda0201
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/admin.go
@@ -0,0 +1,1395 @@
+package sarama
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"maps"
+	"math/rand"
+	"strconv"
+	"sync"
+	"time"
+)
+
+// ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics,
+// brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0.
+// Methods with stricter requirements will specify the minimum broker version required.
+// You MUST call Close() on a client to avoid leaks
+type ClusterAdmin interface {
+	// Creates a new topic. This operation is supported by brokers with version 0.10.1.0 or higher.
+	// It may take several seconds after CreateTopic returns success for all the brokers
+	// to become aware that the topic has been created. During this time, listTopics
+	// may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0.
+	CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error
+
+	// List the topics available in the cluster with the default options.
+	ListTopics() (map[string]TopicDetail, error)
+
+	// Describe some topics in the cluster.
+	DescribeTopics(topics []string) (metadata []*TopicMetadata, err error)
+
+	// Delete a topic. It may take several seconds after the DeleteTopic to returns success
+	// and for all the brokers to become aware that the topics are gone.
+	// During this time, listTopics  may continue to return information about the deleted topic.
+	// If delete.topic.enable is false on the brokers, deleteTopic will mark
+	// the topic for deletion, but not actually delete them.
+	// This operation is supported by brokers with version 0.10.1.0 or higher.
+	DeleteTopic(topic string) error
+
+	// Increase the number of partitions of the topics  according to the corresponding values.
+	// If partitions are increased for a topic that has a key, the partition logic or ordering of
+	// the messages will be affected. It may take several seconds after this method returns
+	// success for all the brokers to become aware that the partitions have been created.
+	// During this time, ClusterAdmin#describeTopics may not return information about the
+	// new partitions. This operation is supported by brokers with version 1.0.0 or higher.
+	CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error
+
+	// Alter the replica assignment for partitions.
+	// This operation is supported by brokers with version 2.4.0.0 or higher.
+	AlterPartitionReassignments(topic string, assignment [][]int32) error
+
+	// Provides info on ongoing partitions replica reassignments.
+	// This operation is supported by brokers with version 2.4.0.0 or higher.
+	ListPartitionReassignments(topics string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error)
+
+	// Delete records whose offset is smaller than the given offset of the corresponding partition.
+	// This operation is supported by brokers with version 0.11.0.0 or higher.
+	DeleteRecords(topic string, partitionOffsets map[int32]int64) error
+
+	// Get the configuration for the specified resources.
+	// The returned configuration includes default values and the Default is true
+	// can be used to distinguish them from user supplied values.
+	// Config entries where ReadOnly is true cannot be updated.
+	// The value of config entries where Sensitive is true is always nil so
+	// sensitive information is not disclosed.
+	// This operation is supported by brokers with version 0.11.0.0 or higher.
+	DescribeConfig(resource ConfigResource) ([]ConfigEntry, error)
+
+	// Update the configuration for the specified resources with the default options.
+	// This operation is supported by brokers with version 0.11.0.0 or higher.
+	// The resources with their configs (topic is the only resource type with configs
+	// that can be updated currently Updates are not transactional so they may succeed
+	// for some resources while fail for others. The configs for a particular resource are updated automatically.
+	AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error
+
+	// IncrementalAlterConfig Incrementally Update the configuration for the specified resources with the default options.
+	// This operation is supported by brokers with version 2.3.0.0 or higher.
+	// Updates are not transactional so they may succeed for some resources while fail for others.
+	// The configs for a particular resource are updated automatically.
+	IncrementalAlterConfig(resourceType ConfigResourceType, name string, entries map[string]IncrementalAlterConfigsEntry, validateOnly bool) error
+
+	// Creates an access control list (ACL) which is bound to a specific resource.
+	// This operation is not transactional so it may succeed or fail.
+	// If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but
+	// no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher.
+	// Deprecated: Use CreateACLs instead.
+	CreateACL(resource Resource, acl Acl) error
+
+	// Creates access control lists (ACLs) which are bound to specific resources.
+	// This operation is not transactional so it may succeed for some ACLs while fail for others.
+	// If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but
+	// no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher.
+	CreateACLs([]*ResourceAcls) error
+
+	// Lists access control lists (ACLs) according to the supplied filter.
+	// it may take some time for changes made by createAcls or deleteAcls to be reflected in the output of ListAcls
+	// This operation is supported by brokers with version 0.11.0.0 or higher.
+	ListAcls(filter AclFilter) ([]ResourceAcls, error)
+
+	// Deletes access control lists (ACLs) according to the supplied filters.
+	// This operation is not transactional so it may succeed for some ACLs while fail for others.
+	// This operation is supported by brokers with version 0.11.0.0 or higher.
+	DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error)
+
+	// ElectLeaders allows to trigger the election of preferred leaders for a set of partitions.
+	ElectLeaders(ElectionType, map[string][]int32) (map[string]map[int32]*PartitionResult, error)
+
+	// List the consumer groups available in the cluster.
+	ListConsumerGroups() (map[string]string, error)
+
+	// Describe the given consumer groups.
+	DescribeConsumerGroups(groups []string) ([]*GroupDescription, error)
+
+	// List the consumer group offsets available in the cluster.
+	ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error)
+
+	// Deletes a consumer group offset
+	DeleteConsumerGroupOffset(group string, topic string, partition int32) error
+
+	// Delete a consumer group.
+	DeleteConsumerGroup(group string) error
+
+	// Get information about the nodes in the cluster
+	DescribeCluster() (brokers []*Broker, controllerID int32, err error)
+
+	// Get information about all log directories on the given set of brokers
+	DescribeLogDirs(brokers []int32) (map[int32][]DescribeLogDirsResponseDirMetadata, error)
+
+	// Get information about SCRAM users
+	DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error)
+
+	// Delete SCRAM users
+	DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error)
+
+	// Upsert SCRAM users
+	UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error)
+
+	// Get client quota configurations corresponding to the specified filter.
+	// This operation is supported by brokers with version 2.6.0.0 or higher.
+	DescribeClientQuotas(components []QuotaFilterComponent, strict bool) ([]DescribeClientQuotasEntry, error)
+
+	// Alters client quota configurations with the specified alterations.
+	// This operation is supported by brokers with version 2.6.0.0 or higher.
+	AlterClientQuotas(entity []QuotaEntityComponent, op ClientQuotasOp, validateOnly bool) error
+
+	// Controller returns the cluster controller broker. It will return a
+	// locally cached value if it's available.
+	Controller() (*Broker, error)
+
+	// Coordinator returns the coordinating broker for a consumer group. It will
+	// return a locally cached value if it's available.
+	Coordinator(group string) (*Broker, error)
+
+	// Remove members from the consumer group by given member identities.
+	// This operation is supported by brokers with version 2.3 or higher
+	// This is for static membership feature. KIP-345
+	RemoveMemberFromConsumerGroup(groupId string, groupInstanceIds []string) (*LeaveGroupResponse, error)
+
+	// Close shuts down the admin and closes underlying client.
+	Close() error
+}
+
+type clusterAdmin struct {
+	client Client
+	conf   *Config
+}
+
+// NewClusterAdmin creates a new ClusterAdmin using the given broker addresses and configuration.
+func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) {
+	client, err := NewClient(addrs, conf)
+	if err != nil {
+		return nil, err
+	}
+	admin, err := NewClusterAdminFromClient(client)
+	if err != nil {
+		client.Close()
+	}
+	return admin, err
+}
+
+// NewClusterAdminFromClient creates a new ClusterAdmin using the given client.
+// Note that underlying client will also be closed on admin's Close() call.
+func NewClusterAdminFromClient(client Client) (ClusterAdmin, error) {
+	// make sure we can retrieve the controller
+	_, err := client.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	ca := &clusterAdmin{
+		client: client,
+		conf:   client.Config(),
+	}
+	return ca, nil
+}
+
+func (ca *clusterAdmin) Close() error {
+	return ca.client.Close()
+}
+
+func (ca *clusterAdmin) Controller() (*Broker, error) {
+	return ca.client.Controller()
+}
+
+func (ca *clusterAdmin) Coordinator(group string) (*Broker, error) {
+	return ca.client.Coordinator(group)
+}
+
+func (ca *clusterAdmin) refreshController() (*Broker, error) {
+	return ca.client.RefreshController()
+}
+
+// isRetriableControllerError returns `true` if the given error type unwraps to
+// an `ErrNotController` or `EOF` response from Kafka
+func isRetriableControllerError(err error) bool {
+	return errors.Is(err, ErrNotController) || errors.Is(err, io.EOF)
+}
+
+// isRetriableGroupCoordinatorError returns `true` if the given error type
+// unwraps to an `ErrNotCoordinatorForConsumer`,
+// `ErrConsumerCoordinatorNotAvailable` or `EOF` response from Kafka
+func isRetriableGroupCoordinatorError(err error) bool {
+	return errors.Is(err, ErrNotCoordinatorForConsumer) || errors.Is(err, ErrConsumerCoordinatorNotAvailable) || errors.Is(err, io.EOF)
+}
+
+// retryOnError will repeatedly call the given (error-returning) func in the
+// case that its response is non-nil and retryable (as determined by the
+// provided retryable func) up to the maximum number of tries permitted by
+// the admin client configuration
+func (ca *clusterAdmin) retryOnError(retryable func(error) bool, fn func() error) error {
+	for attemptsRemaining := ca.conf.Admin.Retry.Max + 1; ; {
+		err := fn()
+		attemptsRemaining--
+		if err == nil || attemptsRemaining <= 0 || !retryable(err) {
+			return err
+		}
+		Logger.Printf(
+			"admin/request retrying after %dms... (%d attempts remaining)\n",
+			ca.conf.Admin.Retry.Backoff/time.Millisecond, attemptsRemaining)
+		time.Sleep(ca.conf.Admin.Retry.Backoff)
+	}
+}
+
+func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error {
+	if topic == "" {
+		return ErrInvalidTopic
+	}
+
+	if detail == nil {
+		return errors.New("you must specify topic details")
+	}
+
+	topicDetails := map[string]*TopicDetail{
+		topic: detail,
+	}
+
+	request := NewCreateTopicsRequest(
+		ca.conf.Version,
+		topicDetails,
+		ca.conf.Admin.Timeout,
+		validateOnly,
+	)
+
+	return ca.retryOnError(isRetriableControllerError, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+
+		rsp, err := b.CreateTopics(request)
+		if err != nil {
+			return err
+		}
+
+		topicErr, ok := rsp.TopicErrors[topic]
+		if !ok {
+			return ErrIncompleteResponse
+		}
+
+		if !errors.Is(topicErr.Err, ErrNoError) {
+			if isRetriableControllerError(topicErr.Err) {
+				_, _ = ca.refreshController()
+			}
+			return topicErr
+		}
+
+		return nil
+	})
+}
+
+func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) {
+	var response *MetadataResponse
+	err = ca.retryOnError(isRetriableControllerError, func() error {
+		controller, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+		request := NewMetadataRequest(ca.conf.Version, topics)
+		response, err = controller.GetMetadata(request)
+		if isRetriableControllerError(err) {
+			_, _ = ca.refreshController()
+		}
+		return err
+	})
+	if err != nil {
+		return nil, err
+	}
+	return response.Topics, nil
+}
+
+func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) {
+	var response *MetadataResponse
+	err = ca.retryOnError(isRetriableControllerError, func() error {
+		controller, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+
+		request := NewMetadataRequest(ca.conf.Version, nil)
+		response, err = controller.GetMetadata(request)
+		if isRetriableControllerError(err) {
+			_, _ = ca.refreshController()
+		}
+		return err
+	})
+	if err != nil {
+		return nil, int32(0), err
+	}
+
+	return response.Brokers, response.ControllerID, nil
+}
+
+func (ca *clusterAdmin) findBroker(id int32) (*Broker, error) {
+	brokers := ca.client.Brokers()
+	for _, b := range brokers {
+		if b.ID() == id {
+			return b, nil
+		}
+	}
+	return nil, fmt.Errorf("could not find broker id %d", id)
+}
+
+func (ca *clusterAdmin) findAnyBroker() (*Broker, error) {
+	brokers := ca.client.Brokers()
+	if len(brokers) > 0 {
+		index := rand.Intn(len(brokers))
+		return brokers[index], nil
+	}
+	return nil, errors.New("no available broker")
+}
+
+func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) {
+	// In order to build TopicDetails we need to first get the list of all
+	// topics using a MetadataRequest and then get their configs using a
+	// DescribeConfigsRequest request. To avoid sending many requests to the
+	// broker, we use a single DescribeConfigsRequest.
+
+	// Send the all-topic MetadataRequest
+	b, err := ca.findAnyBroker()
+	if err != nil {
+		return nil, err
+	}
+	_ = b.Open(ca.client.Config())
+
+	metadataReq := NewMetadataRequest(ca.conf.Version, nil)
+	metadataResp, err := b.GetMetadata(metadataReq)
+	if err != nil {
+		return nil, err
+	}
+
+	topicsDetailsMap := make(map[string]TopicDetail, len(metadataResp.Topics))
+
+	var describeConfigsResources []*ConfigResource
+
+	for _, topic := range metadataResp.Topics {
+		topicDetails := TopicDetail{
+			NumPartitions: int32(len(topic.Partitions)),
+		}
+		if len(topic.Partitions) > 0 {
+			topicDetails.ReplicaAssignment = make(map[int32][]int32, len(topic.Partitions))
+			for _, partition := range topic.Partitions {
+				topicDetails.ReplicaAssignment[partition.ID] = partition.Replicas
+			}
+			topicDetails.ReplicationFactor = int16(len(topic.Partitions[0].Replicas))
+		}
+		topicsDetailsMap[topic.Name] = topicDetails
+
+		// we populate the resources we want to describe from the MetadataResponse
+		topicResource := ConfigResource{
+			Type: TopicResource,
+			Name: topic.Name,
+		}
+		describeConfigsResources = append(describeConfigsResources, &topicResource)
+	}
+
+	// Send the DescribeConfigsRequest
+	describeConfigsReq := &DescribeConfigsRequest{
+		Resources: describeConfigsResources,
+	}
+
+	if ca.conf.Version.IsAtLeast(V1_1_0_0) {
+		describeConfigsReq.Version = 1
+	}
+
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		describeConfigsReq.Version = 2
+	}
+
+	describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, resource := range describeConfigsResp.Resources {
+		topicDetails := topicsDetailsMap[resource.Name]
+		topicDetails.ConfigEntries = make(map[string]*string)
+
+		for _, entry := range resource.Configs {
+			// only include non-default non-sensitive config
+			// (don't actually think topic config will ever be sensitive)
+			if entry.Default || entry.Sensitive {
+				continue
+			}
+			topicDetails.ConfigEntries[entry.Name] = &entry.Value
+		}
+
+		topicsDetailsMap[resource.Name] = topicDetails
+	}
+
+	return topicsDetailsMap, nil
+}
+
+func (ca *clusterAdmin) DeleteTopic(topic string) error {
+	if topic == "" {
+		return ErrInvalidTopic
+	}
+
+	request := &DeleteTopicsRequest{
+		Topics:  []string{topic},
+		Timeout: ca.conf.Admin.Timeout,
+	}
+
+	// Versions 0, 1, 2, and 3 are the same.
+	// Version 4 is first flexible version.
+	if ca.conf.Version.IsAtLeast(V2_4_0_0) {
+		request.Version = 4
+	} else if ca.conf.Version.IsAtLeast(V2_1_0_0) {
+		request.Version = 3
+	} else if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 2
+	} else if ca.conf.Version.IsAtLeast(V0_11_0_0) {
+		request.Version = 1
+	}
+
+	return ca.retryOnError(isRetriableControllerError, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+
+		rsp, err := b.DeleteTopics(request)
+		if err != nil {
+			return err
+		}
+
+		topicErr, ok := rsp.TopicErrorCodes[topic]
+		if !ok {
+			return ErrIncompleteResponse
+		}
+
+		if !errors.Is(topicErr, ErrNoError) {
+			if errors.Is(topicErr, ErrNotController) {
+				_, _ = ca.refreshController()
+			}
+			return topicErr
+		}
+
+		return nil
+	})
+}
+
+func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error {
+	if topic == "" {
+		return ErrInvalidTopic
+	}
+
+	topicPartitions := map[string]*TopicPartition{
+		topic: {
+			Count:      count,
+			Assignment: assignment,
+		},
+	}
+
+	request := &CreatePartitionsRequest{
+		TopicPartitions: topicPartitions,
+		Timeout:         ca.conf.Admin.Timeout,
+		ValidateOnly:    validateOnly,
+	}
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
+	return ca.retryOnError(isRetriableControllerError, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+
+		rsp, err := b.CreatePartitions(request)
+		if err != nil {
+			return err
+		}
+
+		topicErr, ok := rsp.TopicPartitionErrors[topic]
+		if !ok {
+			return ErrIncompleteResponse
+		}
+
+		if !errors.Is(topicErr.Err, ErrNoError) {
+			if errors.Is(topicErr.Err, ErrNotController) {
+				_, _ = ca.refreshController()
+			}
+			return topicErr
+		}
+
+		return nil
+	})
+}
+
+func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][]int32) error {
+	if topic == "" {
+		return ErrInvalidTopic
+	}
+
+	request := &AlterPartitionReassignmentsRequest{
+		TimeoutMs: int32(60000),
+		Version:   int16(0),
+	}
+
+	for i := 0; i < len(assignment); i++ {
+		request.AddBlock(topic, int32(i), assignment[i])
+	}
+
+	return ca.retryOnError(isRetriableControllerError, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+
+		errs := make([]error, 0)
+
+		rsp, err := b.AlterPartitionReassignments(request)
+
+		if err != nil {
+			errs = append(errs, err)
+		} else {
+			if rsp.ErrorCode > 0 {
+				errs = append(errs, rsp.ErrorCode)
+			}
+
+			for topic, topicErrors := range rsp.Errors {
+				for partition, partitionError := range topicErrors {
+					if !errors.Is(partitionError.errorCode, ErrNoError) {
+						errs = append(errs, fmt.Errorf("[%s-%d]: %w", topic, partition, partitionError.errorCode))
+					}
+				}
+			}
+		}
+
+		if len(errs) > 0 {
+			return Wrap(ErrReassignPartitions, errs...)
+		}
+
+		return nil
+	})
+}
+
+func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) {
+	if topic == "" {
+		return nil, ErrInvalidTopic
+	}
+
+	request := &ListPartitionReassignmentsRequest{
+		TimeoutMs: int32(60000),
+		Version:   int16(0),
+	}
+
+	request.AddBlock(topic, partitions)
+
+	var rsp *ListPartitionReassignmentsResponse
+	err = ca.retryOnError(isRetriableControllerError, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+		_ = b.Open(ca.client.Config())
+
+		rsp, err = b.ListPartitionReassignments(request)
+		if isRetriableControllerError(err) {
+			_, _ = ca.refreshController()
+		}
+		return err
+	})
+
+	if err == nil && rsp != nil {
+		return rsp.TopicStatus, nil
+	} else {
+		return nil, err
+	}
+}
+
+func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error {
+	if topic == "" {
+		return ErrInvalidTopic
+	}
+	errs := make([]error, 0)
+	partitionPerBroker := make(map[*Broker][]int32)
+	for partition := range partitionOffsets {
+		broker, err := ca.client.Leader(topic, partition)
+		if err != nil {
+			errs = append(errs, err)
+			continue
+		}
+		partitionPerBroker[broker] = append(partitionPerBroker[broker], partition)
+	}
+	for broker, partitions := range partitionPerBroker {
+		recordsToDelete := make(map[int32]int64, len(partitions))
+		for _, p := range partitions {
+			recordsToDelete[p] = partitionOffsets[p]
+		}
+		topics := map[string]*DeleteRecordsRequestTopic{
+			topic: {
+				PartitionOffsets: recordsToDelete,
+			},
+		}
+		request := &DeleteRecordsRequest{
+			Topics:  topics,
+			Timeout: ca.conf.Admin.Timeout,
+		}
+		if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+			request.Version = 1
+		}
+		rsp, err := broker.DeleteRecords(request)
+		if err != nil {
+			errs = append(errs, err)
+			continue
+		}
+
+		deleteRecordsResponseTopic, ok := rsp.Topics[topic]
+		if !ok {
+			errs = append(errs, ErrIncompleteResponse)
+			continue
+		}
+
+		for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions {
+			if !errors.Is(deleteRecordsResponsePartition.Err, ErrNoError) {
+				errs = append(errs, deleteRecordsResponsePartition.Err)
+				continue
+			}
+		}
+	}
+	if len(errs) > 0 {
+		return Wrap(ErrDeleteRecords, errs...)
+	}
+	// todo since we are dealing with couple of partitions it would be good if we return slice of errors
+	// for each partition instead of one error
+	return nil
+}
+
+// Returns a bool indicating whether the resource request needs to go to a
+// specific broker
+func dependsOnSpecificNode(resource ConfigResource) bool {
+	return (resource.Type == BrokerResource && resource.Name != "") ||
+		resource.Type == BrokerLoggerResource
+}
+
+func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) {
+	var entries []ConfigEntry
+	var resources []*ConfigResource
+	resources = append(resources, &resource)
+
+	request := &DescribeConfigsRequest{
+		Resources: resources,
+	}
+
+	if ca.conf.Version.IsAtLeast(V1_1_0_0) {
+		request.Version = 1
+	}
+
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 2
+	}
+
+	var (
+		b   *Broker
+		err error
+	)
+
+	// DescribeConfig of broker/broker logger must be sent to the broker in question
+	if dependsOnSpecificNode(resource) {
+		var id int64
+		id, err = strconv.ParseInt(resource.Name, 10, 32)
+		if err != nil {
+			return nil, err
+		}
+		b, err = ca.findBroker(int32(id))
+	} else {
+		b, err = ca.findAnyBroker()
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	_ = b.Open(ca.client.Config())
+	rsp, err := b.DescribeConfigs(request)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, rspResource := range rsp.Resources {
+		if rspResource.Name == resource.Name {
+			if rspResource.ErrorCode != 0 {
+				return nil, &DescribeConfigError{Err: KError(rspResource.ErrorCode), ErrMsg: rspResource.ErrorMsg}
+			}
+			for _, cfgEntry := range rspResource.Configs {
+				entries = append(entries, *cfgEntry)
+			}
+		}
+	}
+	return entries, nil
+}
+
+func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error {
+	var resources []*AlterConfigsResource
+	resources = append(resources, &AlterConfigsResource{
+		Type:          resourceType,
+		Name:          name,
+		ConfigEntries: entries,
+	})
+
+	request := &AlterConfigsRequest{
+		Resources:    resources,
+		ValidateOnly: validateOnly,
+	}
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
+	var (
+		b   *Broker
+		err error
+	)
+
+	// AlterConfig of broker/broker logger must be sent to the broker in question
+	if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) {
+		var id int64
+		id, err = strconv.ParseInt(name, 10, 32)
+		if err != nil {
+			return err
+		}
+		b, err = ca.findBroker(int32(id))
+	} else {
+		b, err = ca.findAnyBroker()
+	}
+	if err != nil {
+		return err
+	}
+
+	_ = b.Open(ca.client.Config())
+	rsp, err := b.AlterConfigs(request)
+	if err != nil {
+		return err
+	}
+
+	for _, rspResource := range rsp.Resources {
+		if rspResource.Name == name {
+			if rspResource.ErrorCode != 0 {
+				return &AlterConfigError{Err: KError(rspResource.ErrorCode), ErrMsg: rspResource.ErrorMsg}
+			}
+		}
+	}
+	return nil
+}
+
+func (ca *clusterAdmin) IncrementalAlterConfig(resourceType ConfigResourceType, name string, entries map[string]IncrementalAlterConfigsEntry, validateOnly bool) error {
+	var resources []*IncrementalAlterConfigsResource
+	resources = append(resources, &IncrementalAlterConfigsResource{
+		Type:          resourceType,
+		Name:          name,
+		ConfigEntries: entries,
+	})
+
+	request := &IncrementalAlterConfigsRequest{
+		Resources:    resources,
+		ValidateOnly: validateOnly,
+	}
+
+	if ca.conf.Version.IsAtLeast(V2_4_0_0) {
+		request.Version = 1
+	}
+
+	var (
+		b   *Broker
+		err error
+	)
+
+	// AlterConfig of broker/broker logger must be sent to the broker in question
+	if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) {
+		var id int64
+		id, err = strconv.ParseInt(name, 10, 32)
+		if err != nil {
+			return err
+		}
+		b, err = ca.findBroker(int32(id))
+	} else {
+		b, err = ca.findAnyBroker()
+	}
+	if err != nil {
+		return err
+	}
+
+	_ = b.Open(ca.client.Config())
+	rsp, err := b.IncrementalAlterConfigs(request)
+	if err != nil {
+		return err
+	}
+
+	for _, rspResource := range rsp.Resources {
+		if rspResource.Name == name {
+			if rspResource.ErrorMsg != "" {
+				return errors.New(rspResource.ErrorMsg)
+			}
+			if rspResource.ErrorCode != 0 {
+				return KError(rspResource.ErrorCode)
+			}
+		}
+	}
+	return nil
+}
+
+func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error {
+	var acls []*AclCreation
+	acls = append(acls, &AclCreation{resource, acl})
+	request := &CreateAclsRequest{AclCreations: acls}
+
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return err
+	}
+
+	_, err = b.CreateAcls(request)
+	return err
+}
+
+func (ca *clusterAdmin) CreateACLs(resourceACLs []*ResourceAcls) error {
+	var acls []*AclCreation
+	for _, resourceACL := range resourceACLs {
+		for _, acl := range resourceACL.Acls {
+			acls = append(acls, &AclCreation{resourceACL.Resource, *acl})
+		}
+	}
+	request := &CreateAclsRequest{AclCreations: acls}
+
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return err
+	}
+
+	_, err = b.CreateAcls(request)
+	return err
+}
+
+func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) {
+	request := &DescribeAclsRequest{AclFilter: filter}
+
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	rsp, err := b.DescribeAcls(request)
+	if err != nil {
+		return nil, err
+	}
+
+	var lAcls []ResourceAcls
+	for _, rAcl := range rsp.ResourceAcls {
+		lAcls = append(lAcls, *rAcl)
+	}
+	return lAcls, nil
+}
+
+func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) {
+	var filters []*AclFilter
+	filters = append(filters, &filter)
+	request := &DeleteAclsRequest{Filters: filters}
+
+	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	rsp, err := b.DeleteAcls(request)
+	if err != nil {
+		return nil, err
+	}
+
+	var mAcls []MatchingAcl
+	for _, fr := range rsp.FilterResponses {
+		for _, mACL := range fr.MatchingAcls {
+			mAcls = append(mAcls, *mACL)
+		}
+	}
+	return mAcls, nil
+}
+
+func (ca *clusterAdmin) ElectLeaders(electionType ElectionType, partitions map[string][]int32) (map[string]map[int32]*PartitionResult, error) {
+	request := &ElectLeadersRequest{
+		Type:            electionType,
+		TopicPartitions: partitions,
+		TimeoutMs:       int32(60000),
+	}
+
+	if ca.conf.Version.IsAtLeast(V2_4_0_0) {
+		request.Version = 2
+	} else if ca.conf.Version.IsAtLeast(V0_11_0_0) {
+		request.Version = 1
+	}
+
+	var res *ElectLeadersResponse
+	if err := ca.retryOnError(isRetriableControllerError, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+		_ = b.Open(ca.client.Config())
+
+		res, err = b.ElectLeaders(request)
+		if err != nil {
+			return err
+		}
+		if !errors.Is(res.ErrorCode, ErrNoError) {
+			if isRetriableControllerError(res.ErrorCode) {
+				_, _ = ca.refreshController()
+			}
+			return res.ErrorCode
+		}
+		return nil
+	}); err != nil {
+		return nil, err
+	}
+	return res.ReplicaElectionResults, nil
+}
+
+func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) {
+	groupsPerBroker := make(map[*Broker][]string)
+
+	for _, group := range groups {
+		coordinator, err := ca.client.Coordinator(group)
+		if err != nil {
+			return nil, err
+		}
+		groupsPerBroker[coordinator] = append(groupsPerBroker[coordinator], group)
+	}
+
+	for broker, brokerGroups := range groupsPerBroker {
+		describeReq := &DescribeGroupsRequest{
+			Groups: brokerGroups,
+		}
+
+		if ca.conf.Version.IsAtLeast(V2_4_0_0) {
+			// Starting in version 4, the response will include group.instance.id info for members.
+			// Starting in version 5, the response uses flexible encoding
+			describeReq.Version = 5
+		} else if ca.conf.Version.IsAtLeast(V2_3_0_0) {
+			// Starting in version 3, authorized operations can be requested.
+			describeReq.Version = 3
+		} else if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+			// Version 2 is the same as version 0.
+			describeReq.Version = 2
+		} else if ca.conf.Version.IsAtLeast(V1_1_0_0) {
+			// Version 1 is the same as version 0.
+			describeReq.Version = 1
+		}
+		response, err := broker.DescribeGroups(describeReq)
+		if err != nil {
+			return nil, err
+		}
+
+		result = append(result, response.Groups...)
+	}
+	return result, nil
+}
+
+func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err error) {
+	allGroups = make(map[string]string)
+
+	// Query brokers in parallel, since we have to query *all* brokers
+	brokers := ca.client.Brokers()
+	groupMaps := make(chan map[string]string, len(brokers))
+	errChan := make(chan error, len(brokers))
+	wg := sync.WaitGroup{}
+
+	for _, b := range brokers {
+		wg.Add(1)
+		go func(b *Broker, conf *Config) {
+			defer wg.Done()
+			_ = b.Open(conf) // Ensure that broker is opened
+
+			request := &ListGroupsRequest{}
+			if ca.conf.Version.IsAtLeast(V3_8_0_0) {
+				// Version 5 adds the TypesFilter field (KIP-848).
+				request.Version = 5
+			} else if ca.conf.Version.IsAtLeast(V2_6_0_0) {
+				// Version 4 adds the StatesFilter field (KIP-518).
+				request.Version = 4
+			} else if ca.conf.Version.IsAtLeast(V2_4_0_0) {
+				// Version 3 is the first flexible version.
+				request.Version = 3
+			} else if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+				// Version 2 is the same as version 0.
+				request.Version = 2
+			} else if ca.conf.Version.IsAtLeast(V0_11_0_0) {
+				// Version 1 is the same as version 0.
+				request.Version = 1
+			}
+
+			response, err := b.ListGroups(request)
+			if err != nil {
+				errChan <- err
+				return
+			}
+
+			groupMaps <- maps.Clone(response.Groups)
+		}(b, ca.conf)
+	}
+
+	wg.Wait()
+	close(groupMaps)
+	close(errChan)
+
+	for groupMap := range groupMaps {
+		maps.Copy(allGroups, groupMap)
+	}
+
+	// Intentionally return only the first error for simplicity
+	err = <-errChan
+	return
+}
+
+func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) {
+	var response *OffsetFetchResponse
+	request := NewOffsetFetchRequest(ca.conf.Version, group, topicPartitions)
+	err := ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) {
+		defer func() {
+			if err != nil && isRetriableGroupCoordinatorError(err) {
+				_ = ca.client.RefreshCoordinator(group)
+			}
+		}()
+
+		coordinator, err := ca.client.Coordinator(group)
+		if err != nil {
+			return err
+		}
+
+		response, err = coordinator.FetchOffset(request)
+		if err != nil {
+			return err
+		}
+		if !errors.Is(response.Err, ErrNoError) {
+			return response.Err
+		}
+
+		return nil
+	})
+
+	return response, err
+}
+
+func (ca *clusterAdmin) DeleteConsumerGroupOffset(group string, topic string, partition int32) error {
+	var response *DeleteOffsetsResponse
+	request := &DeleteOffsetsRequest{
+		Group: group,
+		partitions: map[string][]int32{
+			topic: {partition},
+		},
+	}
+
+	return ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) {
+		defer func() {
+			if err != nil && isRetriableGroupCoordinatorError(err) {
+				_ = ca.client.RefreshCoordinator(group)
+			}
+		}()
+
+		coordinator, err := ca.client.Coordinator(group)
+		if err != nil {
+			return err
+		}
+
+		response, err = coordinator.DeleteOffsets(request)
+		if err != nil {
+			return err
+		}
+		if !errors.Is(response.ErrorCode, ErrNoError) {
+			return response.ErrorCode
+		}
+		if !errors.Is(response.Errors[topic][partition], ErrNoError) {
+			return response.Errors[topic][partition]
+		}
+
+		return nil
+	})
+}
+
+func (ca *clusterAdmin) DeleteConsumerGroup(group string) error {
+	var response *DeleteGroupsResponse
+	request := &DeleteGroupsRequest{
+		Groups: []string{group},
+	}
+
+	if ca.conf.Version.IsAtLeast(V2_4_0_0) {
+		request.Version = 2
+	} else if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 1
+	}
+
+	return ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) {
+		defer func() {
+			if err != nil && isRetriableGroupCoordinatorError(err) {
+				_ = ca.client.RefreshCoordinator(group)
+			}
+		}()
+
+		coordinator, err := ca.client.Coordinator(group)
+		if err != nil {
+			return err
+		}
+
+		response, err = coordinator.DeleteGroups(request)
+		if err != nil {
+			return err
+		}
+
+		groupErr, ok := response.GroupErrorCodes[group]
+		if !ok {
+			return ErrIncompleteResponse
+		}
+
+		if !errors.Is(groupErr, ErrNoError) {
+			return groupErr
+		}
+
+		return nil
+	})
+}
+
+func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) {
+	type result struct {
+		id      int32
+		logdirs []DescribeLogDirsResponseDirMetadata
+	}
+	// Query brokers in parallel, since we may have to query multiple brokers
+	logDirsResults := make(chan result, len(brokerIds))
+	errChan := make(chan error, len(brokerIds))
+	wg := sync.WaitGroup{}
+
+	for _, b := range brokerIds {
+		broker, err := ca.findBroker(b)
+		if err != nil {
+			Logger.Printf("Unable to find broker with ID = %v\n", b)
+			continue
+		}
+		wg.Add(1)
+		go func(b *Broker, conf *Config) {
+			defer wg.Done()
+			_ = b.Open(conf) // Ensure that broker is opened
+
+			request := &DescribeLogDirsRequest{}
+			if ca.conf.Version.IsAtLeast(V3_3_0_0) {
+				request.Version = 4
+			} else if ca.conf.Version.IsAtLeast(V3_2_0_0) {
+				request.Version = 3
+			} else if ca.conf.Version.IsAtLeast(V2_6_0_0) {
+				request.Version = 2
+			} else if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+				request.Version = 1
+			}
+			response, err := b.DescribeLogDirs(request)
+			if err != nil {
+				errChan <- err
+				return
+			}
+			if !errors.Is(response.ErrorCode, ErrNoError) {
+				errChan <- response.ErrorCode
+				return
+			}
+			logDirsResults <- result{id: b.ID(), logdirs: response.LogDirs}
+		}(broker, ca.conf)
+	}
+
+	wg.Wait()
+	close(logDirsResults)
+	close(errChan)
+
+	allLogDirs = make(map[int32][]DescribeLogDirsResponseDirMetadata, len(brokerIds))
+	for logDirsResult := range logDirsResults {
+		allLogDirs[logDirsResult.id] = logDirsResult.logdirs
+	}
+
+	// Intentionally return only the first error for simplicity
+	err = <-errChan
+	return
+}
+
+func (ca *clusterAdmin) DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error) {
+	req := &DescribeUserScramCredentialsRequest{}
+	for _, u := range users {
+		req.DescribeUsers = append(req.DescribeUsers, DescribeUserScramCredentialsRequestUser{
+			Name: u,
+		})
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	rsp, err := b.DescribeUserScramCredentials(req)
+	if err != nil {
+		return nil, err
+	}
+
+	return rsp.Results, nil
+}
+
+func (ca *clusterAdmin) UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error) {
+	res, err := ca.AlterUserScramCredentials(upsert, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	return res, nil
+}
+
+func (ca *clusterAdmin) DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) {
+	res, err := ca.AlterUserScramCredentials(nil, delete)
+	if err != nil {
+		return nil, err
+	}
+
+	return res, nil
+}
+
+func (ca *clusterAdmin) AlterUserScramCredentials(u []AlterUserScramCredentialsUpsert, d []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) {
+	req := &AlterUserScramCredentialsRequest{
+		Deletions:  d,
+		Upsertions: u,
+	}
+
+	var rsp *AlterUserScramCredentialsResponse
+	err := ca.retryOnError(isRetriableControllerError, func() error {
+		b, err := ca.Controller()
+		if err != nil {
+			return err
+		}
+
+		rsp, err = b.AlterUserScramCredentials(req)
+		return err
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	return rsp.Results, nil
+}
+
+// Describe All : use an empty/nil components slice + strict = false
+// Contains components: strict = false
+// Contains only components: strict = true
+func (ca *clusterAdmin) DescribeClientQuotas(components []QuotaFilterComponent, strict bool) ([]DescribeClientQuotasEntry, error) {
+	request := NewDescribeClientQuotasRequest(
+		ca.conf.Version,
+		components,
+		strict,
+	)
+
+	b, err := ca.Controller()
+	if err != nil {
+		return nil, err
+	}
+
+	rsp, err := b.DescribeClientQuotas(request)
+	if err != nil {
+		return nil, err
+	}
+
+	if rsp.ErrorMsg != nil && len(*rsp.ErrorMsg) > 0 {
+		return nil, errors.New(*rsp.ErrorMsg)
+	}
+	if !errors.Is(rsp.ErrorCode, ErrNoError) {
+		return nil, rsp.ErrorCode
+	}
+
+	return rsp.Entries, nil
+}
+
+func (ca *clusterAdmin) AlterClientQuotas(entity []QuotaEntityComponent, op ClientQuotasOp, validateOnly bool) error {
+	entry := AlterClientQuotasEntry{
+		Entity: entity,
+		Ops:    []ClientQuotasOp{op},
+	}
+
+	request := &AlterClientQuotasRequest{
+		Entries:      []AlterClientQuotasEntry{entry},
+		ValidateOnly: validateOnly,
+	}
+
+	b, err := ca.Controller()
+	if err != nil {
+		return err
+	}
+
+	rsp, err := b.AlterClientQuotas(request)
+	if err != nil {
+		return err
+	}
+
+	for _, entry := range rsp.Entries {
+		if entry.ErrorMsg != nil && len(*entry.ErrorMsg) > 0 {
+			return errors.New(*entry.ErrorMsg)
+		}
+		if !errors.Is(entry.ErrorCode, ErrNoError) {
+			return entry.ErrorCode
+		}
+	}
+
+	return nil
+}
+
+func (ca *clusterAdmin) RemoveMemberFromConsumerGroup(group string, groupInstanceIds []string) (*LeaveGroupResponse, error) {
+	if !ca.conf.Version.IsAtLeast(V2_4_0_0) {
+		return nil, ConfigurationError("Removing members from a consumer group headers requires Kafka version of at least v2.4.0")
+	}
+	var response *LeaveGroupResponse
+	request := &LeaveGroupRequest{
+		Version: 3,
+		GroupId: group,
+	}
+	for _, instanceId := range groupInstanceIds {
+		groupInstanceId := instanceId
+		request.Members = append(request.Members, MemberIdentity{
+			GroupInstanceId: &groupInstanceId,
+		})
+	}
+	err := ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) {
+		defer func() {
+			if err != nil && isRetriableGroupCoordinatorError(err) {
+				_ = ca.client.RefreshCoordinator(group)
+			}
+		}()
+
+		coordinator, err := ca.client.Coordinator(group)
+		if err != nil {
+			return err
+		}
+
+		response, err = coordinator.LeaveGroup(request)
+		if err != nil {
+			return err
+		}
+		if !errors.Is(response.Err, ErrNoError) {
+			return response.Err
+		}
+
+		return nil
+	})
+
+	return response, err
+}
diff --git a/vendor/github.com/IBM/sarama/alter_client_quotas_request.go b/vendor/github.com/IBM/sarama/alter_client_quotas_request.go
new file mode 100644
index 0000000..95554e3
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_client_quotas_request.go
@@ -0,0 +1,203 @@
+package sarama
+
+// AlterClientQuotas Request (Version: 0) => [entries] validate_only
+//   entries => [entity] [ops]
+//     entity => entity_type entity_name
+//       entity_type => STRING
+//       entity_name => NULLABLE_STRING
+//     ops => key value remove
+//       key => STRING
+//       value => FLOAT64
+//       remove => BOOLEAN
+//   validate_only => BOOLEAN
+
+type AlterClientQuotasRequest struct {
+	Version      int16
+	Entries      []AlterClientQuotasEntry // The quota configuration entries to alter.
+	ValidateOnly bool                     // Whether the alteration should be validated, but not performed.
+}
+
+func (a *AlterClientQuotasRequest) setVersion(v int16) {
+	a.Version = v
+}
+
+type AlterClientQuotasEntry struct {
+	Entity []QuotaEntityComponent // The quota entity to alter.
+	Ops    []ClientQuotasOp       // An individual quota configuration entry to alter.
+}
+
+type ClientQuotasOp struct {
+	Key    string  // The quota configuration key.
+	Value  float64 // The value to set, otherwise ignored if the value is to be removed.
+	Remove bool    // Whether the quota configuration value should be removed, otherwise set.
+}
+
+func (a *AlterClientQuotasRequest) encode(pe packetEncoder) error {
+	// Entries
+	if err := pe.putArrayLength(len(a.Entries)); err != nil {
+		return err
+	}
+	for _, e := range a.Entries {
+		if err := e.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	// ValidateOnly
+	pe.putBool(a.ValidateOnly)
+
+	return nil
+}
+
+func (a *AlterClientQuotasRequest) decode(pd packetDecoder, version int16) error {
+	// Entries
+	entryCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if entryCount > 0 {
+		a.Entries = make([]AlterClientQuotasEntry, entryCount)
+		for i := range a.Entries {
+			e := AlterClientQuotasEntry{}
+			if err = e.decode(pd, version); err != nil {
+				return err
+			}
+			a.Entries[i] = e
+		}
+	} else {
+		a.Entries = []AlterClientQuotasEntry{}
+	}
+
+	// ValidateOnly
+	validateOnly, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	a.ValidateOnly = validateOnly
+
+	return nil
+}
+
+func (a *AlterClientQuotasEntry) encode(pe packetEncoder) error {
+	// Entity
+	if err := pe.putArrayLength(len(a.Entity)); err != nil {
+		return err
+	}
+	for _, component := range a.Entity {
+		if err := component.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	// Ops
+	if err := pe.putArrayLength(len(a.Ops)); err != nil {
+		return err
+	}
+	for _, o := range a.Ops {
+		if err := o.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *AlterClientQuotasEntry) decode(pd packetDecoder, version int16) error {
+	// Entity
+	componentCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if componentCount > 0 {
+		a.Entity = make([]QuotaEntityComponent, componentCount)
+		for i := 0; i < componentCount; i++ {
+			component := QuotaEntityComponent{}
+			if err := component.decode(pd, version); err != nil {
+				return err
+			}
+			a.Entity[i] = component
+		}
+	} else {
+		a.Entity = []QuotaEntityComponent{}
+	}
+
+	// Ops
+	opCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if opCount > 0 {
+		a.Ops = make([]ClientQuotasOp, opCount)
+		for i := range a.Ops {
+			c := ClientQuotasOp{}
+			if err = c.decode(pd, version); err != nil {
+				return err
+			}
+			a.Ops[i] = c
+		}
+	} else {
+		a.Ops = []ClientQuotasOp{}
+	}
+
+	return nil
+}
+
+func (c *ClientQuotasOp) encode(pe packetEncoder) error {
+	// Key
+	if err := pe.putString(c.Key); err != nil {
+		return err
+	}
+
+	// Value
+	pe.putFloat64(c.Value)
+
+	// Remove
+	pe.putBool(c.Remove)
+
+	return nil
+}
+
+func (c *ClientQuotasOp) decode(pd packetDecoder, version int16) error {
+	// Key
+	key, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	c.Key = key
+
+	// Value
+	value, err := pd.getFloat64()
+	if err != nil {
+		return err
+	}
+	c.Value = value
+
+	// Remove
+	remove, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	c.Remove = remove
+
+	return nil
+}
+
+func (a *AlterClientQuotasRequest) key() int16 {
+	return apiKeyAlterClientQuotas
+}
+
+func (a *AlterClientQuotasRequest) version() int16 {
+	return a.Version
+}
+
+func (a *AlterClientQuotasRequest) headerVersion() int16 {
+	return 1
+}
+
+func (a *AlterClientQuotasRequest) isValidVersion() bool {
+	return a.Version == 0
+}
+
+func (a *AlterClientQuotasRequest) requiredVersion() KafkaVersion {
+	return V2_6_0_0
+}
diff --git a/vendor/github.com/IBM/sarama/alter_client_quotas_response.go b/vendor/github.com/IBM/sarama/alter_client_quotas_response.go
new file mode 100644
index 0000000..2ca4974
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_client_quotas_response.go
@@ -0,0 +1,153 @@
+package sarama
+
+import (
+	"time"
+)
+
+// AlterClientQuotas Response (Version: 0) => throttle_time_ms [entries]
+//   throttle_time_ms => INT32
+//   entries => error_code error_message [entity]
+//     error_code => INT16
+//     error_message => NULLABLE_STRING
+//     entity => entity_type entity_name
+//       entity_type => STRING
+//       entity_name => NULLABLE_STRING
+
+type AlterClientQuotasResponse struct {
+	Version      int16
+	ThrottleTime time.Duration                    // The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+	Entries      []AlterClientQuotasEntryResponse // The quota configuration entries altered.
+}
+
+func (a *AlterClientQuotasResponse) setVersion(v int16) {
+	a.Version = v
+}
+
+type AlterClientQuotasEntryResponse struct {
+	ErrorCode KError                 // The error code, or `0` if the quota alteration succeeded.
+	ErrorMsg  *string                // The error message, or `null` if the quota alteration succeeded.
+	Entity    []QuotaEntityComponent // The quota entity altered.
+}
+
+func (a *AlterClientQuotasResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(a.ThrottleTime)
+
+	// Entries
+	if err := pe.putArrayLength(len(a.Entries)); err != nil {
+		return err
+	}
+	for _, e := range a.Entries {
+		if err := e.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *AlterClientQuotasResponse) decode(pd packetDecoder, version int16) (err error) {
+	if a.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	// Entries
+	entryCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if entryCount > 0 {
+		a.Entries = make([]AlterClientQuotasEntryResponse, entryCount)
+		for i := range a.Entries {
+			e := AlterClientQuotasEntryResponse{}
+			if err = e.decode(pd, version); err != nil {
+				return err
+			}
+			a.Entries[i] = e
+		}
+	} else {
+		a.Entries = []AlterClientQuotasEntryResponse{}
+	}
+
+	return nil
+}
+
+func (a *AlterClientQuotasEntryResponse) encode(pe packetEncoder) error {
+	// ErrorCode
+	pe.putKError(a.ErrorCode)
+
+	// ErrorMsg
+	if err := pe.putNullableString(a.ErrorMsg); err != nil {
+		return err
+	}
+
+	// Entity
+	if err := pe.putArrayLength(len(a.Entity)); err != nil {
+		return err
+	}
+	for _, component := range a.Entity {
+		if err := component.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *AlterClientQuotasEntryResponse) decode(pd packetDecoder, version int16) (err error) {
+	// ErrorCode
+	a.ErrorCode, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	// ErrorMsg
+	errMsg, err := pd.getNullableString()
+	if err != nil {
+		return err
+	}
+	a.ErrorMsg = errMsg
+
+	// Entity
+	componentCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if componentCount > 0 {
+		a.Entity = make([]QuotaEntityComponent, componentCount)
+		for i := 0; i < componentCount; i++ {
+			component := QuotaEntityComponent{}
+			if err := component.decode(pd, version); err != nil {
+				return err
+			}
+			a.Entity[i] = component
+		}
+	} else {
+		a.Entity = []QuotaEntityComponent{}
+	}
+
+	return nil
+}
+
+func (a *AlterClientQuotasResponse) key() int16 {
+	return apiKeyAlterClientQuotas
+}
+
+func (a *AlterClientQuotasResponse) version() int16 {
+	return a.Version
+}
+
+func (a *AlterClientQuotasResponse) headerVersion() int16 {
+	return 0
+}
+
+func (a *AlterClientQuotasResponse) isValidVersion() bool {
+	return a.Version == 0
+}
+
+func (a *AlterClientQuotasResponse) requiredVersion() KafkaVersion {
+	return V2_6_0_0
+}
+
+func (r *AlterClientQuotasResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/alter_configs_request.go b/vendor/github.com/IBM/sarama/alter_configs_request.go
new file mode 100644
index 0000000..5293b49
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_configs_request.go
@@ -0,0 +1,142 @@
+package sarama
+
+// AlterConfigsRequest is an alter config request type
+type AlterConfigsRequest struct {
+	Version      int16
+	Resources    []*AlterConfigsResource
+	ValidateOnly bool
+}
+
+func (a *AlterConfigsRequest) setVersion(v int16) {
+	a.Version = v
+}
+
+// AlterConfigsResource is an alter config resource type
+type AlterConfigsResource struct {
+	Type          ConfigResourceType
+	Name          string
+	ConfigEntries map[string]*string
+}
+
+func (a *AlterConfigsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(a.Resources)); err != nil {
+		return err
+	}
+
+	for _, r := range a.Resources {
+		if err := r.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putBool(a.ValidateOnly)
+	return nil
+}
+
+func (a *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
+	resourceCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.Resources = make([]*AlterConfigsResource, resourceCount)
+	for i := range a.Resources {
+		r := &AlterConfigsResource{}
+		err = r.decode(pd, version)
+		if err != nil {
+			return err
+		}
+		a.Resources[i] = r
+	}
+
+	validateOnly, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+
+	a.ValidateOnly = validateOnly
+
+	return nil
+}
+
+func (a *AlterConfigsResource) encode(pe packetEncoder) error {
+	pe.putInt8(int8(a.Type))
+
+	if err := pe.putString(a.Name); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
+		return err
+	}
+	for configKey, configValue := range a.ConfigEntries {
+		if err := pe.putString(configKey); err != nil {
+			return err
+		}
+		if err := pe.putNullableString(configValue); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Type = ConfigResourceType(t)
+
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	a.Name = name
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		a.ConfigEntries = make(map[string]*string, n)
+		for i := 0; i < n; i++ {
+			configKey, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			if a.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
+				return err
+			}
+		}
+	}
+	return err
+}
+
+func (a *AlterConfigsRequest) key() int16 {
+	return apiKeyAlterConfigs
+}
+
+func (a *AlterConfigsRequest) version() int16 {
+	return a.Version
+}
+
+func (a *AlterConfigsRequest) headerVersion() int16 {
+	return 1
+}
+
+func (a *AlterConfigsRequest) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 1
+}
+
+func (a *AlterConfigsRequest) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V0_11_0_0
+	default:
+		return V2_0_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/alter_configs_response.go b/vendor/github.com/IBM/sarama/alter_configs_response.go
new file mode 100644
index 0000000..cde2686
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_configs_response.go
@@ -0,0 +1,156 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+// AlterConfigsResponse is a response type for alter config
+type AlterConfigsResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Resources    []*AlterConfigsResourceResponse
+}
+
+func (a *AlterConfigsResponse) setVersion(v int16) {
+	a.Version = v
+}
+
+type AlterConfigError struct {
+	Err    KError
+	ErrMsg string
+}
+
+func (c *AlterConfigError) Error() string {
+	text := c.Err.Error()
+	if c.ErrMsg != "" {
+		text = fmt.Sprintf("%s - %s", text, c.ErrMsg)
+	}
+	return text
+}
+
+// AlterConfigsResourceResponse is a response type for alter config resource
+type AlterConfigsResourceResponse struct {
+	ErrorCode int16
+	ErrorMsg  string
+	Type      ConfigResourceType
+	Name      string
+}
+
+func (a *AlterConfigsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(a.ThrottleTime)
+
+	if err := pe.putArrayLength(len(a.Resources)); err != nil {
+		return err
+	}
+
+	for _, v := range a.Resources {
+		if err := v.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *AlterConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if a.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	responseCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
+
+	for i := range a.Resources {
+		a.Resources[i] = new(AlterConfigsResourceResponse)
+
+		if err := a.Resources[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *AlterConfigsResourceResponse) encode(pe packetEncoder) error {
+	pe.putInt16(a.ErrorCode)
+	err := pe.putString(a.ErrorMsg)
+	if err != nil {
+		return err
+	}
+	pe.putInt8(int8(a.Type))
+	err = pe.putString(a.Name)
+	if err != nil {
+		return err
+	}
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (a *AlterConfigsResourceResponse) decode(pd packetDecoder, version int16) error {
+	errCode, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	a.ErrorCode = errCode
+
+	e, err := pd.getNullableString()
+	if err != nil {
+		return err
+	}
+	if e == nil {
+		a.ErrorMsg = ""
+	} else {
+		a.ErrorMsg = *e
+	}
+
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Type = ConfigResourceType(t)
+
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	a.Name = name
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (a *AlterConfigsResponse) key() int16 {
+	return apiKeyAlterConfigs
+}
+
+func (a *AlterConfigsResponse) version() int16 {
+	return a.Version
+}
+
+func (a *AlterConfigsResponse) headerVersion() int16 {
+	return 0
+}
+
+func (a *AlterConfigsResponse) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 1
+}
+
+func (a *AlterConfigsResponse) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V0_11_0_0
+	default:
+		return V2_0_0_0
+	}
+}
+
+func (r *AlterConfigsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go b/vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go
new file mode 100644
index 0000000..165cbcc
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go
@@ -0,0 +1,146 @@
+package sarama
+
+type alterPartitionReassignmentsBlock struct {
+	replicas []int32
+}
+
+func (b *alterPartitionReassignmentsBlock) encode(pe packetEncoder) error {
+	if err := pe.putNullableInt32Array(b.replicas); err != nil {
+		return err
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (b *alterPartitionReassignmentsBlock) decode(pd packetDecoder) (err error) {
+	if b.replicas, err = pd.getInt32Array(); err != nil {
+		return err
+	}
+	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+	return nil
+}
+
+type AlterPartitionReassignmentsRequest struct {
+	TimeoutMs int32
+	blocks    map[string]map[int32]*alterPartitionReassignmentsBlock
+	Version   int16
+}
+
+func (r *AlterPartitionReassignmentsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *AlterPartitionReassignmentsRequest) encode(pe packetEncoder) error {
+	pe.putInt32(r.TimeoutMs)
+
+	if err := pe.putArrayLength(len(r.blocks)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.blocks {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err := block.encode(pe); err != nil {
+				return err
+			}
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (r *AlterPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.TimeoutMs, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount > 0 {
+		r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
+		for i := 0; i < topicCount; i++ {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			partitionCount, err := pd.getArrayLength()
+			if err != nil {
+				return err
+			}
+			r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
+			for j := 0; j < partitionCount; j++ {
+				partition, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+				block := &alterPartitionReassignmentsBlock{}
+				if err := block.decode(pd); err != nil {
+					return err
+				}
+				r.blocks[topic][partition] = block
+			}
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *AlterPartitionReassignmentsRequest) key() int16 {
+	return apiKeyAlterPartitionReassignments
+}
+
+func (r *AlterPartitionReassignmentsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *AlterPartitionReassignmentsRequest) headerVersion() int16 {
+	return 2
+}
+
+func (r *AlterPartitionReassignmentsRequest) isValidVersion() bool {
+	return r.Version == 0
+}
+
+func (r *AlterPartitionReassignmentsRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *AlterPartitionReassignmentsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 0
+}
+
+func (r *AlterPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
+	return V2_4_0_0
+}
+
+func (r *AlterPartitionReassignmentsRequest) AddBlock(topic string, partitionID int32, replicas []int32) {
+	if r.blocks == nil {
+		r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
+	}
+
+	r.blocks[topic][partitionID] = &alterPartitionReassignmentsBlock{replicas}
+}
diff --git a/vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go b/vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go
new file mode 100644
index 0000000..886c4f6
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go
@@ -0,0 +1,178 @@
+package sarama
+
+import "time"
+
+type alterPartitionReassignmentsErrorBlock struct {
+	errorCode    KError
+	errorMessage *string
+}
+
+func (b *alterPartitionReassignmentsErrorBlock) encode(pe packetEncoder) error {
+	pe.putKError(b.errorCode)
+	if err := pe.putNullableString(b.errorMessage); err != nil {
+		return err
+	}
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (b *alterPartitionReassignmentsErrorBlock) decode(pd packetDecoder) (err error) {
+	b.errorCode, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+	b.errorMessage, err = pd.getNullableString()
+	if err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+type AlterPartitionReassignmentsResponse struct {
+	Version        int16
+	ThrottleTimeMs int32
+	ErrorCode      KError
+	ErrorMessage   *string
+	Errors         map[string]map[int32]*alterPartitionReassignmentsErrorBlock
+}
+
+func (r *AlterPartitionReassignmentsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *AlterPartitionReassignmentsResponse) AddError(topic string, partition int32, kerror KError, message *string) {
+	if r.Errors == nil {
+		r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock)
+	}
+	partitions := r.Errors[topic]
+	if partitions == nil {
+		partitions = make(map[int32]*alterPartitionReassignmentsErrorBlock)
+		r.Errors[topic] = partitions
+	}
+
+	partitions[partition] = &alterPartitionReassignmentsErrorBlock{errorCode: kerror, errorMessage: message}
+}
+
+func (r *AlterPartitionReassignmentsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(r.ThrottleTimeMs)
+	pe.putKError(r.ErrorCode)
+	if err := pe.putNullableString(r.ErrorMessage); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.Errors)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.Errors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+
+			if err := block.encode(pe); err != nil {
+				return err
+			}
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *AlterPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	r.ErrorCode, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if r.ErrorMessage, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if numTopics > 0 {
+		r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock, numTopics)
+		for i := 0; i < numTopics; i++ {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+
+			ongoingPartitionReassignments, err := pd.getArrayLength()
+			if err != nil {
+				return err
+			}
+
+			r.Errors[topic] = make(map[int32]*alterPartitionReassignmentsErrorBlock, ongoingPartitionReassignments)
+
+			for j := 0; j < ongoingPartitionReassignments; j++ {
+				partition, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+				block := &alterPartitionReassignmentsErrorBlock{}
+				if err := block.decode(pd); err != nil {
+					return err
+				}
+
+				r.Errors[topic][partition] = block
+			}
+			if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *AlterPartitionReassignmentsResponse) key() int16 {
+	return apiKeyAlterPartitionReassignments
+}
+
+func (r *AlterPartitionReassignmentsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *AlterPartitionReassignmentsResponse) headerVersion() int16 {
+	return 1
+}
+
+func (r *AlterPartitionReassignmentsResponse) isValidVersion() bool {
+	return r.Version == 0
+}
+
+func (r *AlterPartitionReassignmentsResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *AlterPartitionReassignmentsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 0
+}
+
+func (r *AlterPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
+	return V2_4_0_0
+}
+
+func (r *AlterPartitionReassignmentsResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go
new file mode 100644
index 0000000..d50a38d
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go
@@ -0,0 +1,160 @@
+package sarama
+
+type AlterUserScramCredentialsRequest struct {
+	Version int16
+
+	// Deletions represent list of SCRAM credentials to remove
+	Deletions []AlterUserScramCredentialsDelete
+
+	// Upsertions represent list of SCRAM credentials to update/insert
+	Upsertions []AlterUserScramCredentialsUpsert
+}
+
+func (r *AlterUserScramCredentialsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+type AlterUserScramCredentialsDelete struct {
+	Name      string
+	Mechanism ScramMechanismType
+}
+
+type AlterUserScramCredentialsUpsert struct {
+	Name           string
+	Mechanism      ScramMechanismType
+	Iterations     int32
+	Salt           []byte
+	saltedPassword []byte
+
+	// This field is never transmitted over the wire
+	// @see: https://tools.ietf.org/html/rfc5802
+	Password []byte
+}
+
+func (r *AlterUserScramCredentialsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(r.Deletions)); err != nil {
+		return err
+	}
+	for _, d := range r.Deletions {
+		if err := pe.putString(d.Name); err != nil {
+			return err
+		}
+		pe.putInt8(int8(d.Mechanism))
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	if err := pe.putArrayLength(len(r.Upsertions)); err != nil {
+		return err
+	}
+	for _, u := range r.Upsertions {
+		if err := pe.putString(u.Name); err != nil {
+			return err
+		}
+		pe.putInt8(int8(u.Mechanism))
+		pe.putInt32(u.Iterations)
+
+		if err := pe.putBytes(u.Salt); err != nil {
+			return err
+		}
+
+		// do not transmit the password over the wire
+		formatter := scramFormatter{mechanism: u.Mechanism}
+		salted, err := formatter.saltedPassword(u.Password, u.Salt, int(u.Iterations))
+		if err != nil {
+			return err
+		}
+
+		if err := pe.putBytes(salted); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *AlterUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error {
+	numDeletions, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Deletions = make([]AlterUserScramCredentialsDelete, numDeletions)
+	for i := 0; i < numDeletions; i++ {
+		r.Deletions[i] = AlterUserScramCredentialsDelete{}
+		if r.Deletions[i].Name, err = pd.getString(); err != nil {
+			return err
+		}
+		mechanism, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.Deletions[i].Mechanism = ScramMechanismType(mechanism)
+		if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	numUpsertions, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Upsertions = make([]AlterUserScramCredentialsUpsert, numUpsertions)
+	for i := 0; i < numUpsertions; i++ {
+		r.Upsertions[i] = AlterUserScramCredentialsUpsert{}
+		if r.Upsertions[i].Name, err = pd.getString(); err != nil {
+			return err
+		}
+		mechanism, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+
+		r.Upsertions[i].Mechanism = ScramMechanismType(mechanism)
+		if r.Upsertions[i].Iterations, err = pd.getInt32(); err != nil {
+			return err
+		}
+		if r.Upsertions[i].Salt, err = pd.getBytes(); err != nil {
+			return err
+		}
+		if r.Upsertions[i].saltedPassword, err = pd.getBytes(); err != nil {
+			return err
+		}
+		if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *AlterUserScramCredentialsRequest) key() int16 {
+	return apiKeyAlterUserScramCredentials
+}
+
+func (r *AlterUserScramCredentialsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *AlterUserScramCredentialsRequest) headerVersion() int16 {
+	return 2
+}
+
+func (r *AlterUserScramCredentialsRequest) isValidVersion() bool {
+	return r.Version == 0
+}
+
+func (r *AlterUserScramCredentialsRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *AlterUserScramCredentialsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 0
+}
+
+func (r *AlterUserScramCredentialsRequest) requiredVersion() KafkaVersion {
+	return V2_7_0_0
+}
diff --git a/vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go
new file mode 100644
index 0000000..229ed54
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go
@@ -0,0 +1,111 @@
+package sarama
+
+import "time"
+
+type AlterUserScramCredentialsResponse struct {
+	Version int16
+
+	ThrottleTime time.Duration
+
+	Results []*AlterUserScramCredentialsResult
+}
+
+func (r *AlterUserScramCredentialsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+type AlterUserScramCredentialsResult struct {
+	User string
+
+	ErrorCode    KError
+	ErrorMessage *string
+}
+
+func (r *AlterUserScramCredentialsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(r.ThrottleTime)
+	if err := pe.putArrayLength(len(r.Results)); err != nil {
+		return err
+	}
+
+	for _, u := range r.Results {
+		if err := pe.putString(u.User); err != nil {
+			return err
+		}
+		pe.putKError(u.ErrorCode)
+		if err := pe.putNullableString(u.ErrorMessage); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *AlterUserScramCredentialsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	numResults, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if numResults > 0 {
+		r.Results = make([]*AlterUserScramCredentialsResult, numResults)
+		for i := 0; i < numResults; i++ {
+			r.Results[i] = &AlterUserScramCredentialsResult{}
+			if r.Results[i].User, err = pd.getString(); err != nil {
+				return err
+			}
+
+			r.Results[i].ErrorCode, err = pd.getKError()
+			if err != nil {
+				return err
+			}
+
+			if r.Results[i].ErrorMessage, err = pd.getNullableString(); err != nil {
+				return err
+			}
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *AlterUserScramCredentialsResponse) key() int16 {
+	return apiKeyAlterUserScramCredentials
+}
+
+func (r *AlterUserScramCredentialsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *AlterUserScramCredentialsResponse) headerVersion() int16 {
+	return 2
+}
+
+func (r *AlterUserScramCredentialsResponse) isValidVersion() bool {
+	return r.Version == 0
+}
+
+func (r *AlterUserScramCredentialsResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *AlterUserScramCredentialsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 0
+}
+
+func (r *AlterUserScramCredentialsResponse) requiredVersion() KafkaVersion {
+	return V2_7_0_0
+}
+
+func (r *AlterUserScramCredentialsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/api_versions.go b/vendor/github.com/IBM/sarama/api_versions.go
new file mode 100644
index 0000000..e993432
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/api_versions.go
@@ -0,0 +1,84 @@
+package sarama
+
+type apiVersionRange struct {
+	minVersion int16
+	maxVersion int16
+}
+
+type apiVersionMap map[int16]*apiVersionRange
+
+// restrictApiVersion selects the appropriate API version for a given protocol body according to
+// the client and broker version ranges. By default, it selects the maximum version supported by both
+// client and broker, capped by the maximum Kafka version from Config.
+// It then calls setVersion() on the protocol body.
+// If no valid version is found, an error is returned.
+func restrictApiVersion(pb protocolBody, brokerVersions apiVersionMap) error {
+	key := pb.key()
+	// Since message constructors take a Kafka version and select the maximum supported protocol version already, we can
+	// rely on pb.version() being the max version supported for the user-selected Kafka API version.
+	clientMax := pb.version()
+
+	if brokerVersionRange := brokerVersions[key]; brokerVersionRange != nil {
+		// Select the maximum version that both client and server support
+		// Clamp to the client max to respect user preference above broker advertised version range
+		pb.setVersion(min(clientMax, max(min(clientMax, brokerVersionRange.maxVersion), brokerVersionRange.minVersion)))
+		return nil
+	}
+
+	return nil // no version ranges available, no restriction
+}
+
+const (
+	apiKeyProduce                      = 0
+	apiKeyFetch                        = 1
+	apiKeyListOffsets                  = 2
+	apiKeyMetadata                     = 3
+	apiKeyLeaderAndIsr                 = 4
+	apiKeyStopReplica                  = 5
+	apiKeyUpdateMetadata               = 6
+	apiKeyControlledShutdown           = 7
+	apiKeyOffsetCommit                 = 8
+	apiKeyOffsetFetch                  = 9
+	apiKeyFindCoordinator              = 10
+	apiKeyJoinGroup                    = 11
+	apiKeyHeartbeat                    = 12
+	apiKeyLeaveGroup                   = 13
+	apiKeySyncGroup                    = 14
+	apiKeyDescribeGroups               = 15
+	apiKeyListGroups                   = 16
+	apiKeySaslHandshake                = 17
+	apiKeyApiVersions                  = 18
+	apiKeyCreateTopics                 = 19
+	apiKeyDeleteTopics                 = 20
+	apiKeyDeleteRecords                = 21
+	apiKeyInitProducerId               = 22
+	apiKeyOffsetForLeaderEpoch         = 23
+	apiKeyAddPartitionsToTxn           = 24
+	apiKeyAddOffsetsToTxn              = 25
+	apiKeyEndTxn                       = 26
+	apiKeyWriteTxnMarkers              = 27
+	apiKeyTxnOffsetCommit              = 28
+	apiKeyDescribeAcls                 = 29
+	apiKeyCreateAcls                   = 30
+	apiKeyDeleteAcls                   = 31
+	apiKeyDescribeConfigs              = 32
+	apiKeyAlterConfigs                 = 33
+	apiKeyAlterReplicaLogDirs          = 34
+	apiKeyDescribeLogDirs              = 35
+	apiKeySASLAuth                     = 36
+	apiKeyCreatePartitions             = 37
+	apiKeyCreateDelegationToken        = 38
+	apiKeyRenewDelegationToken         = 39
+	apiKeyExpireDelegationToken        = 40
+	apiKeyDescribeDelegationToken      = 41
+	apiKeyDeleteGroups                 = 42
+	apiKeyElectLeaders                 = 43
+	apiKeyIncrementalAlterConfigs      = 44
+	apiKeyAlterPartitionReassignments  = 45
+	apiKeyListPartitionReassignments   = 46
+	apiKeyOffsetDelete                 = 47
+	apiKeyDescribeClientQuotas         = 48
+	apiKeyAlterClientQuotas            = 49
+	apiKeyDescribeUserScramCredentials = 50
+	apiKeyAlterUserScramCredentials    = 51
+)
diff --git a/vendor/github.com/IBM/sarama/api_versions_request.go b/vendor/github.com/IBM/sarama/api_versions_request.go
new file mode 100644
index 0000000..593b0f0
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/api_versions_request.go
@@ -0,0 +1,87 @@
+package sarama
+
+const defaultClientSoftwareName = "sarama"
+
+type ApiVersionsRequest struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// ClientSoftwareName contains the name of the client.
+	ClientSoftwareName string
+	// ClientSoftwareVersion contains the version of the client.
+	ClientSoftwareVersion string
+}
+
+func (r *ApiVersionsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *ApiVersionsRequest) encode(pe packetEncoder) (err error) {
+	if r.Version >= 3 {
+		if err := pe.putString(r.ClientSoftwareName); err != nil {
+			return err
+		}
+		if err := pe.putString(r.ClientSoftwareVersion); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	return nil
+}
+
+func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.Version >= 3 {
+		if r.ClientSoftwareName, err = pd.getString(); err != nil {
+			return err
+		}
+		if r.ClientSoftwareVersion, err = pd.getString(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *ApiVersionsRequest) key() int16 {
+	return apiKeyApiVersions
+}
+
+func (r *ApiVersionsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *ApiVersionsRequest) headerVersion() int16 {
+	if r.Version >= 3 {
+		return 2
+	}
+	return 1
+}
+
+func (r *ApiVersionsRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 3
+}
+
+func (r *ApiVersionsRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ApiVersionsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 3
+}
+
+func (r *ApiVersionsRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 3:
+		return V2_4_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_10_0_0
+	default:
+		return V2_4_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/api_versions_response.go b/vendor/github.com/IBM/sarama/api_versions_response.go
new file mode 100644
index 0000000..3f3c11a
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/api_versions_response.go
@@ -0,0 +1,171 @@
+package sarama
+
+import (
+	"time"
+)
+
+// ApiVersionsResponseKey contains the APIs supported by the broker.
+type ApiVersionsResponseKey struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// ApiKey contains the API index.
+	ApiKey int16
+	// MinVersion contains the minimum supported version, inclusive.
+	MinVersion int16
+	// MaxVersion contains the maximum supported version, inclusive.
+	MaxVersion int16
+}
+
+func (a *ApiVersionsResponseKey) encode(pe packetEncoder, version int16) (err error) {
+	a.Version = version
+	pe.putInt16(a.ApiKey)
+
+	pe.putInt16(a.MinVersion)
+
+	pe.putInt16(a.MaxVersion)
+
+	if version >= 3 {
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	return nil
+}
+
+func (a *ApiVersionsResponseKey) decode(pd packetDecoder, version int16) (err error) {
+	a.Version = version
+	if a.ApiKey, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	if a.MinVersion, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	if a.MaxVersion, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+type ApiVersionsResponse struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// ErrorCode contains the top-level error code.
+	ErrorCode int16
+	// ApiKeys contains the APIs supported by the broker.
+	ApiKeys []ApiVersionsResponseKey
+	// ThrottleTimeMs contains the duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+	ThrottleTimeMs int32
+}
+
+func (r *ApiVersionsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *ApiVersionsResponse) encode(pe packetEncoder) (err error) {
+	pe.putInt16(r.ErrorCode)
+
+	if err := pe.putArrayLength(len(r.ApiKeys)); err != nil {
+		return err
+	}
+	for _, block := range r.ApiKeys {
+		if err := block.encode(pe, r.Version); err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 1 {
+		pe.putInt32(r.ThrottleTimeMs)
+	}
+
+	if r.Version >= 3 {
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	return nil
+}
+
+func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.ErrorCode, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	// KIP-511: if broker didn't understand the ApiVersionsRequest version then
+	// it replies with a V0 non-flexible ApiVersionResponse where its supported
+	// ApiVersionsRequest version is available in ApiKeys
+	if r.ErrorCode == int16(ErrUnsupportedVersion) {
+		// drop version to 0 and to revert packageDecoder to non-flexible for remaining decoding
+		r.Version = 0
+		pd = downgradeFlexibleDecoder(pd)
+	}
+
+	numApiKeys, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	r.ApiKeys = make([]ApiVersionsResponseKey, numApiKeys)
+	for i := 0; i < numApiKeys; i++ {
+		var block ApiVersionsResponseKey
+		if err = block.decode(pd, r.Version); err != nil {
+			return err
+		}
+		r.ApiKeys[i] = block
+	}
+
+	if r.Version >= 1 {
+		if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *ApiVersionsResponse) key() int16 {
+	return apiKeyApiVersions
+}
+
+func (r *ApiVersionsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *ApiVersionsResponse) headerVersion() int16 {
+	// ApiVersionsResponse always includes a v0 header.
+	// See KIP-511 for details
+	return 0
+}
+
+func (r *ApiVersionsResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 3
+}
+
+func (r *ApiVersionsResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ApiVersionsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 3
+}
+
+func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 3:
+		return V2_4_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_10_0_0
+	default:
+		return V2_4_0_0
+	}
+}
+
+func (r *ApiVersionsResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/async_producer.go b/vendor/github.com/IBM/sarama/async_producer.go
new file mode 100644
index 0000000..e34eed5
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/async_producer.go
@@ -0,0 +1,1427 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"math"
+	"sync"
+	"time"
+
+	"github.com/eapache/go-resiliency/breaker"
+	"github.com/eapache/queue"
+	"github.com/rcrowley/go-metrics"
+)
+
+// ErrProducerRetryBufferOverflow is returned when the bridging retry buffer is full and OOM prevention needs to be applied.
+var ErrProducerRetryBufferOverflow = errors.New("retry buffer full: message discarded to prevent buffer overflow")
+
+const (
+	// minFunctionalRetryBufferLength defines the minimum number of messages the retry buffer must support.
+	// If Producer.Retry.MaxBufferLength is set to a non-zero value below this limit, it will be adjusted to this value.
+	// This ensures the retry buffer remains functional under typical workloads.
+	minFunctionalRetryBufferLength = 4 * 1024
+	// minFunctionalRetryBufferBytes defines the minimum total byte size the retry buffer must support.
+	// If Producer.Retry.MaxBufferBytes is set to a non-zero value below this limit, it will be adjusted to this value.
+	// A 32 MB lower limit ensures sufficient capacity for retrying larger messages without exhausting resources.
+	minFunctionalRetryBufferBytes = 32 * 1024 * 1024
+)
+
+// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
+// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
+// and parses responses for errors. You must read from the Errors() channel or the
+// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
+// leaks and message lost: it will not be garbage-collected automatically when it passes
+// out of scope and buffered messages may not be flushed.
+type AsyncProducer interface {
+	// AsyncClose triggers a shutdown of the producer. The shutdown has completed
+	// when both the Errors and Successes channels have been closed. When calling
+	// AsyncClose, you *must* continue to read from those channels in order to
+	// drain the results of any messages in flight.
+	AsyncClose()
+
+	// Close shuts down the producer and waits for any buffered messages to be
+	// flushed. You must call this function before a producer object passes out of
+	// scope, as it may otherwise leak memory. You must call this before process
+	// shutting down, or you may lose messages. You must call this before calling
+	// Close on the underlying client.
+	Close() error
+
+	// Input is the input channel for the user to write messages to that they
+	// wish to send.
+	Input() chan<- *ProducerMessage
+
+	// Successes is the success output channel back to the user when Return.Successes is
+	// enabled. If Return.Successes is true, you MUST read from this channel or the
+	// Producer will deadlock. It is suggested that you send and read messages
+	// together in a single select statement.
+	Successes() <-chan *ProducerMessage
+
+	// Errors is the error output channel back to the user. You MUST read from this
+	// channel or the Producer will deadlock when the channel is full. Alternatively,
+	// you can set Producer.Return.Errors in your config to false, which prevents
+	// errors to be returned.
+	Errors() <-chan *ProducerError
+
+	// IsTransactional return true when current producer is transactional.
+	IsTransactional() bool
+
+	// TxnStatus return current producer transaction status.
+	TxnStatus() ProducerTxnStatusFlag
+
+	// BeginTxn mark current transaction as ready.
+	BeginTxn() error
+
+	// CommitTxn commit current transaction.
+	CommitTxn() error
+
+	// AbortTxn abort current transaction.
+	AbortTxn() error
+
+	// AddOffsetsToTxn add associated offsets to current transaction.
+	AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error
+
+	// AddMessageToTxn add message offsets to current transaction.
+	AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error
+}
+
+type asyncProducer struct {
+	client Client
+	conf   *Config
+
+	errors                    chan *ProducerError
+	input, successes, retries chan *ProducerMessage
+	inFlight                  sync.WaitGroup
+
+	brokers    map[*Broker]*brokerProducer
+	brokerRefs map[*brokerProducer]int
+	brokerLock sync.Mutex
+
+	txnmgr *transactionManager
+	txLock sync.Mutex
+
+	metricsRegistry metrics.Registry
+}
+
+// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
+func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
+	client, err := NewClient(addrs, conf)
+	if err != nil {
+		return nil, err
+	}
+	return newAsyncProducer(client)
+}
+
+// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
+	// For clients passed in by the client, ensure we don't
+	// call Close() on it.
+	cli := &nopCloserClient{client}
+	return newAsyncProducer(cli)
+}
+
+func newAsyncProducer(client Client) (AsyncProducer, error) {
+	// Check that we are not dealing with a closed Client before processing any other arguments
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	txnmgr, err := newTransactionManager(client.Config(), client)
+	if err != nil {
+		return nil, err
+	}
+
+	p := &asyncProducer{
+		client:          client,
+		conf:            client.Config(),
+		errors:          make(chan *ProducerError),
+		input:           make(chan *ProducerMessage),
+		successes:       make(chan *ProducerMessage),
+		retries:         make(chan *ProducerMessage),
+		brokers:         make(map[*Broker]*brokerProducer),
+		brokerRefs:      make(map[*brokerProducer]int),
+		txnmgr:          txnmgr,
+		metricsRegistry: newCleanupRegistry(client.Config().MetricRegistry),
+	}
+
+	// launch our singleton dispatchers
+	go withRecover(p.dispatcher)
+	go withRecover(p.retryHandler)
+
+	return p, nil
+}
+
+type flagSet int8
+
+const (
+	syn       flagSet = 1 << iota // first message from partitionProducer to brokerProducer
+	fin                           // final message from partitionProducer to brokerProducer and back
+	shutdown                      // start the shutdown process
+	endtxn                        // endtxn
+	committxn                     // endtxn
+	aborttxn                      // endtxn
+)
+
+// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
+type ProducerMessage struct {
+	Topic string // The Kafka topic for this message.
+	// The partitioning key for this message. Pre-existing Encoders include
+	// StringEncoder and ByteEncoder.
+	Key Encoder
+	// The actual message to store in Kafka. Pre-existing Encoders include
+	// StringEncoder and ByteEncoder.
+	Value Encoder
+
+	// The headers are key-value pairs that are transparently passed
+	// by Kafka between producers and consumers.
+	Headers []RecordHeader
+
+	// This field is used to hold arbitrary data you wish to include so it
+	// will be available when receiving on the Successes and Errors channels.
+	// Sarama completely ignores this field and is only to be used for
+	// pass-through data.
+	Metadata interface{}
+
+	// Below this point are filled in by the producer as the message is processed
+
+	// Offset is the offset of the message stored on the broker. This is only
+	// guaranteed to be defined if the message was successfully delivered and
+	// RequiredAcks is not NoResponse.
+	Offset int64
+	// Partition is the partition that the message was sent to. This is only
+	// guaranteed to be defined if the message was successfully delivered.
+	Partition int32
+	// Timestamp can vary in behavior depending on broker configuration, being
+	// in either one of the CreateTime or LogAppendTime modes (default CreateTime),
+	// and requiring version at least 0.10.0.
+	//
+	// When configured to CreateTime, the timestamp is specified by the producer
+	// either by explicitly setting this field, or when the message is added
+	// to a produce set.
+	//
+	// When configured to LogAppendTime, the timestamp assigned to the message
+	// by the broker. This is only guaranteed to be defined if the message was
+	// successfully delivered and RequiredAcks is not NoResponse.
+	Timestamp time.Time
+
+	retries        int
+	flags          flagSet
+	expectation    chan *ProducerError
+	sequenceNumber int32
+	producerEpoch  int16
+	hasSequence    bool
+}
+
+const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
+
+func (m *ProducerMessage) ByteSize(version int) int {
+	var size int
+	if version >= 2 {
+		size = maximumRecordOverhead
+		for _, h := range m.Headers {
+			size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32
+		}
+	} else {
+		size = producerMessageOverhead
+	}
+	if m.Key != nil {
+		size += m.Key.Length()
+	}
+	if m.Value != nil {
+		size += m.Value.Length()
+	}
+	return size
+}
+
+func (m *ProducerMessage) clear() {
+	m.flags = 0
+	m.retries = 0
+	m.sequenceNumber = 0
+	m.producerEpoch = 0
+	m.hasSequence = false
+}
+
+// ProducerError is the type of error generated when the producer fails to deliver a message.
+// It contains the original ProducerMessage as well as the actual error value.
+type ProducerError struct {
+	Msg *ProducerMessage
+	Err error
+}
+
+func (pe ProducerError) Error() string {
+	return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
+}
+
+func (pe ProducerError) Unwrap() error {
+	return pe.Err
+}
+
+// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
+// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
+// when closing a producer.
+type ProducerErrors []*ProducerError
+
+func (pe ProducerErrors) Error() string {
+	return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
+}
+
+func (p *asyncProducer) IsTransactional() bool {
+	return p.txnmgr.isTransactional()
+}
+
+func (p *asyncProducer) AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error {
+	offsets := make(map[string][]*PartitionOffsetMetadata)
+	offsets[msg.Topic] = []*PartitionOffsetMetadata{
+		{
+			Partition: msg.Partition,
+			Offset:    msg.Offset + 1,
+			Metadata:  metadata,
+		},
+	}
+	return p.AddOffsetsToTxn(offsets, groupId)
+}
+
+func (p *asyncProducer) AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error {
+	p.txLock.Lock()
+	defer p.txLock.Unlock()
+
+	if !p.IsTransactional() {
+		DebugLogger.Printf("producer/txnmgr [%s] attempt to call AddOffsetsToTxn on a non-transactional producer\n", p.txnmgr.transactionalID)
+		return ErrNonTransactedProducer
+	}
+
+	DebugLogger.Printf("producer/txnmgr [%s] add offsets to transaction\n", p.txnmgr.transactionalID)
+	return p.txnmgr.addOffsetsToTxn(offsets, groupId)
+}
+
+func (p *asyncProducer) TxnStatus() ProducerTxnStatusFlag {
+	return p.txnmgr.currentTxnStatus()
+}
+
+func (p *asyncProducer) BeginTxn() error {
+	p.txLock.Lock()
+	defer p.txLock.Unlock()
+
+	if !p.IsTransactional() {
+		DebugLogger.Println("producer/txnmgr attempt to call BeginTxn on a non-transactional producer")
+		return ErrNonTransactedProducer
+	}
+
+	return p.txnmgr.transitionTo(ProducerTxnFlagInTransaction, nil)
+}
+
+func (p *asyncProducer) CommitTxn() error {
+	p.txLock.Lock()
+	defer p.txLock.Unlock()
+
+	if !p.IsTransactional() {
+		DebugLogger.Printf("producer/txnmgr [%s] attempt to call CommitTxn on a non-transactional producer\n", p.txnmgr.transactionalID)
+		return ErrNonTransactedProducer
+	}
+
+	DebugLogger.Printf("producer/txnmgr [%s] committing transaction\n", p.txnmgr.transactionalID)
+	err := p.finishTransaction(true)
+	if err != nil {
+		return err
+	}
+	DebugLogger.Printf("producer/txnmgr [%s] transaction committed\n", p.txnmgr.transactionalID)
+	return nil
+}
+
+func (p *asyncProducer) AbortTxn() error {
+	p.txLock.Lock()
+	defer p.txLock.Unlock()
+
+	if !p.IsTransactional() {
+		DebugLogger.Printf("producer/txnmgr [%s] attempt to call AbortTxn on a non-transactional producer\n", p.txnmgr.transactionalID)
+		return ErrNonTransactedProducer
+	}
+	DebugLogger.Printf("producer/txnmgr [%s] aborting transaction\n", p.txnmgr.transactionalID)
+	err := p.finishTransaction(false)
+	if err != nil {
+		return err
+	}
+	DebugLogger.Printf("producer/txnmgr [%s] transaction aborted\n", p.txnmgr.transactionalID)
+	return nil
+}
+
+func (p *asyncProducer) finishTransaction(commit bool) error {
+	p.inFlight.Add(1)
+	if commit {
+		p.input <- &ProducerMessage{flags: endtxn | committxn}
+	} else {
+		p.input <- &ProducerMessage{flags: endtxn | aborttxn}
+	}
+	p.inFlight.Wait()
+	return p.txnmgr.finishTransaction(commit)
+}
+
+func (p *asyncProducer) Errors() <-chan *ProducerError {
+	return p.errors
+}
+
+func (p *asyncProducer) Successes() <-chan *ProducerMessage {
+	return p.successes
+}
+
+func (p *asyncProducer) Input() chan<- *ProducerMessage {
+	return p.input
+}
+
+func (p *asyncProducer) Close() error {
+	p.AsyncClose()
+
+	if p.conf.Producer.Return.Successes {
+		go withRecover(func() {
+			for range p.successes {
+			}
+		})
+	}
+
+	var pErrs ProducerErrors
+	if p.conf.Producer.Return.Errors {
+		for event := range p.errors {
+			pErrs = append(pErrs, event)
+		}
+	} else {
+		<-p.errors
+	}
+
+	if len(pErrs) > 0 {
+		return pErrs
+	}
+	return nil
+}
+
+func (p *asyncProducer) AsyncClose() {
+	go withRecover(p.shutdown)
+}
+
+// singleton
+// dispatches messages by topic
+func (p *asyncProducer) dispatcher() {
+	handlers := make(map[string]chan<- *ProducerMessage)
+	shuttingDown := false
+
+	for msg := range p.input {
+		if msg == nil {
+			Logger.Println("Something tried to send a nil message, it was ignored.")
+			continue
+		}
+
+		if msg.flags&endtxn != 0 {
+			var err error
+			if msg.flags&committxn != 0 {
+				err = p.txnmgr.transitionTo(ProducerTxnFlagEndTransaction|ProducerTxnFlagCommittingTransaction, nil)
+			} else {
+				err = p.txnmgr.transitionTo(ProducerTxnFlagEndTransaction|ProducerTxnFlagAbortingTransaction, nil)
+			}
+			if err != nil {
+				Logger.Printf("producer/txnmgr unable to end transaction %s", err)
+			}
+			p.inFlight.Done()
+			continue
+		}
+
+		if msg.flags&shutdown != 0 {
+			shuttingDown = true
+			p.inFlight.Done()
+			continue
+		}
+
+		if msg.retries == 0 {
+			if shuttingDown {
+				// we can't just call returnError here because that decrements the wait group,
+				// which hasn't been incremented yet for this message, and shouldn't be
+				pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
+				if p.conf.Producer.Return.Errors {
+					p.errors <- pErr
+				} else {
+					Logger.Println(pErr)
+				}
+				continue
+			}
+			p.inFlight.Add(1)
+			// Ignore retried msg, there are already in txn.
+			// Can't produce new record when transaction is not started.
+			if p.IsTransactional() && p.txnmgr.currentTxnStatus()&ProducerTxnFlagInTransaction == 0 {
+				Logger.Printf("attempt to send message when transaction is not started or is in ending state, got %d, expect %d\n", p.txnmgr.currentTxnStatus(), ProducerTxnFlagInTransaction)
+				p.returnError(msg, ErrTransactionNotReady)
+				continue
+			}
+		}
+
+		for _, interceptor := range p.conf.Producer.Interceptors {
+			msg.safelyApplyInterceptor(interceptor)
+		}
+
+		version := 1
+		if p.conf.Version.IsAtLeast(V0_11_0_0) {
+			version = 2
+		} else if msg.Headers != nil {
+			p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11"))
+			continue
+		}
+
+		size := msg.ByteSize(version)
+		if size > p.conf.Producer.MaxMessageBytes {
+			p.returnError(msg, ConfigurationError(fmt.Sprintf("Attempt to produce message larger than configured Producer.MaxMessageBytes: %d > %d", size, p.conf.Producer.MaxMessageBytes)))
+			continue
+		}
+
+		handler := handlers[msg.Topic]
+		if handler == nil {
+			handler = p.newTopicProducer(msg.Topic)
+			handlers[msg.Topic] = handler
+		}
+
+		handler <- msg
+	}
+
+	for _, handler := range handlers {
+		close(handler)
+	}
+}
+
+// one per topic
+// partitions messages, then dispatches them by partition
+type topicProducer struct {
+	parent *asyncProducer
+	topic  string
+	input  <-chan *ProducerMessage
+
+	breaker     *breaker.Breaker
+	handlers    map[int32]chan<- *ProducerMessage
+	partitioner Partitioner
+}
+
+func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
+	input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+	tp := &topicProducer{
+		parent:      p,
+		topic:       topic,
+		input:       input,
+		breaker:     breaker.New(3, 1, 10*time.Second),
+		handlers:    make(map[int32]chan<- *ProducerMessage),
+		partitioner: p.conf.Producer.Partitioner(topic),
+	}
+	go withRecover(tp.dispatch)
+	return input
+}
+
+func (tp *topicProducer) dispatch() {
+	for msg := range tp.input {
+		if msg.retries == 0 {
+			if err := tp.partitionMessage(msg); err != nil {
+				tp.parent.returnError(msg, err)
+				continue
+			}
+		}
+
+		handler := tp.handlers[msg.Partition]
+		if handler == nil {
+			handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
+			tp.handlers[msg.Partition] = handler
+		}
+
+		handler <- msg
+	}
+
+	for _, handler := range tp.handlers {
+		close(handler)
+	}
+}
+
+func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
+	var partitions []int32
+
+	err := tp.breaker.Run(func() (err error) {
+		requiresConsistency := false
+		if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok {
+			requiresConsistency = ep.MessageRequiresConsistency(msg)
+		} else {
+			requiresConsistency = tp.partitioner.RequiresConsistency()
+		}
+
+		if requiresConsistency {
+			partitions, err = tp.parent.client.Partitions(msg.Topic)
+		} else {
+			partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
+		}
+		return
+	})
+	if err != nil {
+		return err
+	}
+
+	numPartitions := int32(len(partitions))
+
+	if numPartitions == 0 {
+		return ErrLeaderNotAvailable
+	}
+
+	choice, err := tp.partitioner.Partition(msg, numPartitions)
+
+	if err != nil {
+		return err
+	} else if choice < 0 || choice >= numPartitions {
+		return ErrInvalidPartition
+	}
+
+	msg.Partition = partitions[choice]
+
+	return nil
+}
+
+// one per partition per topic
+// dispatches messages to the appropriate broker
+// also responsible for maintaining message order during retries
+type partitionProducer struct {
+	parent    *asyncProducer
+	topic     string
+	partition int32
+	input     <-chan *ProducerMessage
+
+	leader         *Broker
+	breaker        *breaker.Breaker
+	brokerProducer *brokerProducer
+
+	// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
+	// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
+	// retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
+	// therefore whether our buffer is complete and safe to flush)
+	highWatermark int
+	retryState    []partitionRetryState
+}
+
+type partitionRetryState struct {
+	buf          []*ProducerMessage
+	expectChaser bool
+}
+
+func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
+	input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+	pp := &partitionProducer{
+		parent:    p,
+		topic:     topic,
+		partition: partition,
+		input:     input,
+
+		breaker:    breaker.New(3, 1, 10*time.Second),
+		retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
+	}
+	go withRecover(pp.dispatch)
+	return input
+}
+
+func (pp *partitionProducer) backoff(retries int) {
+	var backoff time.Duration
+	if pp.parent.conf.Producer.Retry.BackoffFunc != nil {
+		maxRetries := pp.parent.conf.Producer.Retry.Max
+		backoff = pp.parent.conf.Producer.Retry.BackoffFunc(retries, maxRetries)
+	} else {
+		backoff = pp.parent.conf.Producer.Retry.Backoff
+	}
+	if backoff > 0 {
+		time.Sleep(backoff)
+	}
+}
+
+func (pp *partitionProducer) updateLeaderIfBrokerProducerIsNil(msg *ProducerMessage) error {
+	if pp.brokerProducer == nil {
+		if err := pp.updateLeader(); err != nil {
+			pp.parent.returnError(msg, err)
+			pp.backoff(msg.retries)
+			return err
+		}
+		Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+	}
+	return nil
+}
+
+func (pp *partitionProducer) dispatch() {
+	// try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
+	// on the first message
+	pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
+	if pp.leader != nil {
+		pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
+		pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+		pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+	}
+
+	defer func() {
+		if pp.brokerProducer != nil {
+			pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
+		}
+	}()
+
+	for msg := range pp.input {
+		if pp.brokerProducer != nil && pp.brokerProducer.abandoned != nil {
+			select {
+			case <-pp.brokerProducer.abandoned:
+				// a message on the abandoned channel means that our current broker selection is out of date
+				Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+				pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
+				pp.brokerProducer = nil
+				time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
+			default:
+				// producer connection is still open.
+			}
+		}
+
+		if msg.retries > pp.highWatermark {
+			if err := pp.updateLeaderIfBrokerProducerIsNil(msg); err != nil {
+				continue
+			}
+			// a new, higher, retry level; handle it and then back off
+			pp.newHighWatermark(msg.retries)
+			pp.backoff(msg.retries)
+		} else if pp.highWatermark > 0 {
+			// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
+			if msg.retries < pp.highWatermark {
+				// in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
+				if msg.flags&fin == fin {
+					pp.retryState[msg.retries].expectChaser = false
+					pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+				} else {
+					pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
+				}
+				continue
+			} else if msg.flags&fin == fin {
+				// this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
+				// meaning this retry level is done and we can go down (at least) one level and flush that
+				pp.retryState[pp.highWatermark].expectChaser = false
+				pp.flushRetryBuffers()
+				pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+				continue
+			}
+		}
+
+		// if we made it this far then the current msg contains real data, and can be sent to the next goroutine
+		// without breaking any of our ordering guarantees
+		if err := pp.updateLeaderIfBrokerProducerIsNil(msg); err != nil {
+			continue
+		}
+
+		// Now that we know we have a broker to actually try and send this message to, generate the sequence
+		// number for it.
+		// All messages being retried (sent or not) have already had their retry count updated
+		// Also, ignore "special" syn/fin messages used to sync the brokerProducer and the topicProducer.
+		if pp.parent.conf.Producer.Idempotent && msg.retries == 0 && msg.flags == 0 {
+			msg.sequenceNumber, msg.producerEpoch = pp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition)
+			msg.hasSequence = true
+		}
+
+		if pp.parent.IsTransactional() {
+			pp.parent.txnmgr.maybeAddPartitionToCurrentTxn(pp.topic, pp.partition)
+		}
+
+		pp.brokerProducer.input <- msg
+	}
+}
+
+func (pp *partitionProducer) newHighWatermark(hwm int) {
+	Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
+	pp.highWatermark = hwm
+
+	// send off a fin so that we know when everything "in between" has made it
+	// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
+	pp.retryState[pp.highWatermark].expectChaser = true
+	pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
+	pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
+
+	// a new HWM means that our current broker selection is out of date
+	Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+	pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
+	pp.brokerProducer = nil
+}
+
+func (pp *partitionProducer) flushRetryBuffers() {
+	Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+	for {
+		pp.highWatermark--
+
+		if pp.brokerProducer == nil {
+			if err := pp.updateLeader(); err != nil {
+				pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
+				goto flushDone
+			}
+			Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+		}
+
+		for _, msg := range pp.retryState[pp.highWatermark].buf {
+			pp.brokerProducer.input <- msg
+		}
+
+	flushDone:
+		pp.retryState[pp.highWatermark].buf = nil
+		if pp.retryState[pp.highWatermark].expectChaser {
+			Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+			break
+		} else if pp.highWatermark == 0 {
+			Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
+			break
+		}
+	}
+}
+
+func (pp *partitionProducer) updateLeader() error {
+	return pp.breaker.Run(func() (err error) {
+		if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
+			return err
+		}
+
+		if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
+			return err
+		}
+
+		pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
+		pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+		pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+
+		return nil
+	})
+}
+
+// one per broker; also constructs an associated flusher
+func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer {
+	var (
+		input     = make(chan *ProducerMessage)
+		bridge    = make(chan *produceSet)
+		pending   = make(chan *brokerProducerResponse)
+		responses = make(chan *brokerProducerResponse)
+	)
+
+	bp := &brokerProducer{
+		parent:         p,
+		broker:         broker,
+		input:          input,
+		output:         bridge,
+		responses:      responses,
+		buffer:         newProduceSet(p),
+		currentRetries: make(map[string]map[int32]error),
+	}
+	go withRecover(bp.run)
+
+	// minimal bridge to make the network response `select`able
+	go withRecover(func() {
+		// Use a wait group to know if we still have in flight requests
+		var wg sync.WaitGroup
+
+		for set := range bridge {
+			request := set.buildRequest()
+
+			// Count the in flight requests to know when we can close the pending channel safely
+			wg.Add(1)
+			// Capture the current set to forward in the callback
+			sendResponse := func(set *produceSet) ProduceCallback {
+				return func(response *ProduceResponse, err error) {
+					// Forward the response to make sure we do not block the responseReceiver
+					pending <- &brokerProducerResponse{
+						set: set,
+						err: err,
+						res: response,
+					}
+					wg.Done()
+				}
+			}(set)
+
+			if p.IsTransactional() {
+				// Add partition to tx before sending current batch
+				err := p.txnmgr.publishTxnPartitions()
+				if err != nil {
+					// Request failed to be sent
+					sendResponse(nil, err)
+					continue
+				}
+			}
+
+			// Use AsyncProduce vs Produce to not block waiting for the response
+			// so that we can pipeline multiple produce requests and achieve higher throughput, see:
+			// https://kafka.apache.org/protocol#protocol_network
+			err := broker.AsyncProduce(request, sendResponse)
+			if err != nil {
+				// Request failed to be sent
+				sendResponse(nil, err)
+				continue
+			}
+			// Callback is not called when using NoResponse
+			if p.conf.Producer.RequiredAcks == NoResponse {
+				// Provide the expected nil response
+				sendResponse(nil, nil)
+			}
+		}
+		// Wait for all in flight requests to close the pending channel safely
+		wg.Wait()
+		close(pending)
+	})
+
+	// In order to avoid a deadlock when closing the broker on network or malformed response error
+	// we use an intermediate channel to buffer and send pending responses in order
+	// This is because the AsyncProduce callback inside the bridge is invoked from the broker
+	// responseReceiver goroutine and closing the broker requires such goroutine to be finished
+	go withRecover(func() {
+		buf := queue.New()
+		for {
+			if buf.Length() == 0 {
+				res, ok := <-pending
+				if !ok {
+					// We are done forwarding the last pending response
+					close(responses)
+					return
+				}
+				buf.Add(res)
+			}
+			// Send the head pending response or buffer another one
+			// so that we never block the callback
+			headRes := buf.Peek().(*brokerProducerResponse)
+			select {
+			case res, ok := <-pending:
+				if !ok {
+					continue
+				}
+				buf.Add(res)
+				continue
+			case responses <- headRes:
+				buf.Remove()
+				continue
+			}
+		}
+	})
+
+	if p.conf.Producer.Retry.Max <= 0 {
+		bp.abandoned = make(chan struct{})
+	}
+
+	return bp
+}
+
+type brokerProducerResponse struct {
+	set *produceSet
+	err error
+	res *ProduceResponse
+}
+
+// groups messages together into appropriately-sized batches for sending to the broker
+// handles state related to retries etc
+type brokerProducer struct {
+	parent *asyncProducer
+	broker *Broker
+
+	input     chan *ProducerMessage
+	output    chan<- *produceSet
+	responses <-chan *brokerProducerResponse
+	abandoned chan struct{}
+
+	buffer     *produceSet
+	timer      *time.Timer
+	timerFired bool
+
+	closing        error
+	currentRetries map[string]map[int32]error
+}
+
+func (bp *brokerProducer) run() {
+	var output chan<- *produceSet
+	var timerChan <-chan time.Time
+	Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
+
+	for {
+		select {
+		case msg, ok := <-bp.input:
+			if !ok {
+				Logger.Printf("producer/broker/%d input chan closed\n", bp.broker.ID())
+				bp.shutdown()
+				return
+			}
+
+			if msg == nil {
+				continue
+			}
+
+			if msg.flags&syn == syn {
+				Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
+					bp.broker.ID(), msg.Topic, msg.Partition)
+				if bp.currentRetries[msg.Topic] == nil {
+					bp.currentRetries[msg.Topic] = make(map[int32]error)
+				}
+				bp.currentRetries[msg.Topic][msg.Partition] = nil
+				bp.parent.inFlight.Done()
+				continue
+			}
+
+			if reason := bp.needsRetry(msg); reason != nil {
+				bp.parent.retryMessage(msg, reason)
+
+				if bp.closing == nil && msg.flags&fin == fin {
+					// we were retrying this partition but we can start processing again
+					delete(bp.currentRetries[msg.Topic], msg.Partition)
+					Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
+						bp.broker.ID(), msg.Topic, msg.Partition)
+				}
+
+				continue
+			}
+
+			if msg.flags&fin == fin {
+				// New broker producer that was caught up by the retry loop
+				bp.parent.retryMessage(msg, ErrShuttingDown)
+				DebugLogger.Printf("producer/broker/%d state change to [dying-%d] on %s/%d\n",
+					bp.broker.ID(), msg.retries, msg.Topic, msg.Partition)
+				continue
+			}
+
+			if bp.buffer.wouldOverflow(msg) {
+				Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
+				if err := bp.waitForSpace(msg, false); err != nil {
+					bp.parent.retryMessage(msg, err)
+					continue
+				}
+			}
+
+			if bp.parent.txnmgr.producerID != noProducerID && bp.buffer.producerEpoch != msg.producerEpoch {
+				// The epoch was reset, need to roll the buffer over
+				Logger.Printf("producer/broker/%d detected epoch rollover, waiting for new buffer\n", bp.broker.ID())
+				if err := bp.waitForSpace(msg, true); err != nil {
+					bp.parent.retryMessage(msg, err)
+					continue
+				}
+			}
+			if err := bp.buffer.add(msg); err != nil {
+				bp.parent.returnError(msg, err)
+				continue
+			}
+
+			if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
+				bp.timer = time.NewTimer(bp.parent.conf.Producer.Flush.Frequency)
+				timerChan = bp.timer.C
+			}
+		case <-timerChan:
+			bp.timerFired = true
+		case output <- bp.buffer:
+			bp.rollOver()
+			timerChan = nil
+		case response, ok := <-bp.responses:
+			if ok {
+				bp.handleResponse(response)
+			}
+		}
+
+		if bp.timerFired || bp.buffer.readyToFlush() {
+			output = bp.output
+		} else {
+			output = nil
+		}
+	}
+}
+
+func (bp *brokerProducer) shutdown() {
+	for !bp.buffer.empty() {
+		select {
+		case response := <-bp.responses:
+			bp.handleResponse(response)
+		case bp.output <- bp.buffer:
+			bp.rollOver()
+		}
+	}
+	close(bp.output)
+	// Drain responses from the bridge goroutine
+	for response := range bp.responses {
+		bp.handleResponse(response)
+	}
+	// No more brokerProducer related goroutine should be running
+	Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
+}
+
+func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
+	if bp.closing != nil {
+		return bp.closing
+	}
+
+	return bp.currentRetries[msg.Topic][msg.Partition]
+}
+
+func (bp *brokerProducer) waitForSpace(msg *ProducerMessage, forceRollover bool) error {
+	for {
+		select {
+		case response := <-bp.responses:
+			bp.handleResponse(response)
+			// handling a response can change our state, so re-check some things
+			if reason := bp.needsRetry(msg); reason != nil {
+				return reason
+			} else if !bp.buffer.wouldOverflow(msg) && !forceRollover {
+				return nil
+			}
+		case bp.output <- bp.buffer:
+			bp.rollOver()
+			return nil
+		}
+	}
+}
+
+func (bp *brokerProducer) rollOver() {
+	if bp.timer != nil {
+		bp.timer.Stop()
+	}
+	bp.timer = nil
+	bp.timerFired = false
+	bp.buffer = newProduceSet(bp.parent)
+}
+
+func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
+	if response.err != nil {
+		bp.handleError(response.set, response.err)
+	} else {
+		bp.handleSuccess(response.set, response.res)
+	}
+
+	if bp.buffer.empty() {
+		bp.rollOver() // this can happen if the response invalidated our buffer
+	}
+}
+
+func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
+	// we iterate through the blocks in the request set, not the response, so that we notice
+	// if the response is missing a block completely
+	var retryTopics []string
+	sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+		if response == nil {
+			// this only happens when RequiredAcks is NoResponse, so we have to assume success
+			bp.parent.returnSuccesses(pSet.msgs)
+			return
+		}
+
+		block := response.GetBlock(topic, partition)
+		if block == nil {
+			bp.parent.returnErrors(pSet.msgs, ErrIncompleteResponse)
+			return
+		}
+
+		switch block.Err {
+		// Success
+		case ErrNoError:
+			if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
+				for _, msg := range pSet.msgs {
+					msg.Timestamp = block.Timestamp
+				}
+			}
+			for i, msg := range pSet.msgs {
+				msg.Offset = block.Offset + int64(i)
+			}
+			bp.parent.returnSuccesses(pSet.msgs)
+		// Duplicate
+		case ErrDuplicateSequenceNumber:
+			bp.parent.returnSuccesses(pSet.msgs)
+		// Retriable errors
+		case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
+			ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError:
+			if bp.parent.conf.Producer.Retry.Max <= 0 {
+				bp.parent.abandonBrokerConnection(bp.broker)
+				bp.parent.returnErrors(pSet.msgs, block.Err)
+			} else {
+				retryTopics = append(retryTopics, topic)
+			}
+		// Other non-retriable errors
+		default:
+			if bp.parent.conf.Producer.Retry.Max <= 0 {
+				bp.parent.abandonBrokerConnection(bp.broker)
+			}
+			bp.parent.returnErrors(pSet.msgs, block.Err)
+		}
+	})
+
+	if len(retryTopics) > 0 {
+		if bp.parent.conf.Producer.Idempotent {
+			err := bp.parent.client.RefreshMetadata(retryTopics...)
+			if err != nil {
+				Logger.Printf("Failed refreshing metadata because of %v\n", err)
+			}
+		}
+
+		sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+			block := response.GetBlock(topic, partition)
+			if block == nil {
+				// handled in the previous "eachPartition" loop
+				return
+			}
+
+			switch block.Err {
+			case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
+				ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError:
+				Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
+					bp.broker.ID(), topic, partition, block.Err)
+				if bp.currentRetries[topic] == nil {
+					bp.currentRetries[topic] = make(map[int32]error)
+				}
+				bp.currentRetries[topic][partition] = block.Err
+				if bp.parent.conf.Producer.Idempotent {
+					go bp.parent.retryBatch(topic, partition, pSet, block.Err)
+				} else {
+					bp.parent.retryMessages(pSet.msgs, block.Err)
+				}
+				// dropping the following messages has the side effect of incrementing their retry count
+				bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
+			}
+		})
+	}
+}
+
+func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitionSet, kerr KError) {
+	Logger.Printf("Retrying batch for %v-%d because of %s\n", topic, partition, kerr)
+	produceSet := newProduceSet(p)
+	produceSet.msgs[topic] = make(map[int32]*partitionSet)
+	produceSet.msgs[topic][partition] = pSet
+	produceSet.bufferBytes += pSet.bufferBytes
+	produceSet.bufferCount += len(pSet.msgs)
+	for _, msg := range pSet.msgs {
+		if msg.retries >= p.conf.Producer.Retry.Max {
+			p.returnErrors(pSet.msgs, kerr)
+			return
+		}
+		msg.retries++
+	}
+
+	// it's expected that a metadata refresh has been requested prior to calling retryBatch
+	leader, err := p.client.Leader(topic, partition)
+	if err != nil {
+		Logger.Printf("Failed retrying batch for %v-%d because of %v while looking up for new leader\n", topic, partition, err)
+		for _, msg := range pSet.msgs {
+			p.returnError(msg, kerr)
+		}
+		return
+	}
+	bp := p.getBrokerProducer(leader)
+	bp.output <- produceSet
+	p.unrefBrokerProducer(leader, bp)
+}
+
+func (bp *brokerProducer) handleError(sent *produceSet, err error) {
+	var target PacketEncodingError
+	if errors.As(err, &target) {
+		sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+			bp.parent.returnErrors(pSet.msgs, err)
+		})
+	} else {
+		Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
+		bp.parent.abandonBrokerConnection(bp.broker)
+		_ = bp.broker.Close()
+		bp.closing = err
+		sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+			bp.parent.retryMessages(pSet.msgs, err)
+		})
+		bp.buffer.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+			bp.parent.retryMessages(pSet.msgs, err)
+		})
+		bp.rollOver()
+	}
+}
+
+// singleton
+// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
+// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
+func (p *asyncProducer) retryHandler() {
+	maxBufferLength := p.conf.Producer.Retry.MaxBufferLength
+	if 0 < maxBufferLength && maxBufferLength < minFunctionalRetryBufferLength {
+		maxBufferLength = minFunctionalRetryBufferLength
+	}
+
+	maxBufferBytes := p.conf.Producer.Retry.MaxBufferBytes
+	if 0 < maxBufferBytes && maxBufferBytes < minFunctionalRetryBufferBytes {
+		maxBufferBytes = minFunctionalRetryBufferBytes
+	}
+
+	version := 1
+	if p.conf.Version.IsAtLeast(V0_11_0_0) {
+		version = 2
+	}
+
+	var currentByteSize int64
+	var msg *ProducerMessage
+	buf := queue.New()
+
+	for {
+		if buf.Length() == 0 {
+			msg = <-p.retries
+		} else {
+			select {
+			case msg = <-p.retries:
+			case p.input <- buf.Peek().(*ProducerMessage):
+				msgToRemove := buf.Remove().(*ProducerMessage)
+				currentByteSize -= int64(msgToRemove.ByteSize(version))
+				continue
+			}
+		}
+
+		if msg == nil {
+			return
+		}
+
+		buf.Add(msg)
+		currentByteSize += int64(msg.ByteSize(version))
+
+		if (maxBufferLength <= 0 || buf.Length() < maxBufferLength) && (maxBufferBytes <= 0 || currentByteSize < maxBufferBytes) {
+			continue
+		}
+
+		msgToHandle := buf.Peek().(*ProducerMessage)
+		if msgToHandle.flags == 0 {
+			select {
+			case p.input <- msgToHandle:
+				buf.Remove()
+				currentByteSize -= int64(msgToHandle.ByteSize(version))
+			default:
+				buf.Remove()
+				currentByteSize -= int64(msgToHandle.ByteSize(version))
+				p.returnError(msgToHandle, ErrProducerRetryBufferOverflow)
+			}
+		}
+	}
+}
+
+// utility functions
+
+func (p *asyncProducer) shutdown() {
+	Logger.Println("Producer shutting down.")
+	p.inFlight.Add(1)
+	p.input <- &ProducerMessage{flags: shutdown}
+
+	p.inFlight.Wait()
+
+	err := p.client.Close()
+	if err != nil {
+		Logger.Println("producer/shutdown failed to close the embedded client:", err)
+	}
+
+	close(p.input)
+	close(p.retries)
+	close(p.errors)
+	close(p.successes)
+
+	p.metricsRegistry.UnregisterAll()
+}
+
+func (p *asyncProducer) bumpIdempotentProducerEpoch() {
+	_, epoch := p.txnmgr.getProducerID()
+	if epoch == math.MaxInt16 {
+		Logger.Println("producer/txnmanager epoch exhausted, requesting new producer ID")
+		txnmgr, err := newTransactionManager(p.conf, p.client)
+		if err != nil {
+			Logger.Println(err)
+			return
+		}
+
+		p.txnmgr = txnmgr
+	} else {
+		p.txnmgr.bumpEpoch()
+	}
+}
+
+func (p *asyncProducer) maybeTransitionToErrorState(err error) error {
+	if errors.Is(err, ErrClusterAuthorizationFailed) ||
+		errors.Is(err, ErrProducerFenced) ||
+		errors.Is(err, ErrUnsupportedVersion) ||
+		errors.Is(err, ErrTransactionalIDAuthorizationFailed) {
+		return p.txnmgr.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, err)
+	}
+	if p.txnmgr.coordinatorSupportsBumpingEpoch && p.txnmgr.currentTxnStatus()&ProducerTxnFlagEndTransaction == 0 {
+		p.txnmgr.epochBumpRequired = true
+	}
+	return p.txnmgr.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, err)
+}
+
+func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
+	if p.IsTransactional() {
+		_ = p.maybeTransitionToErrorState(err)
+	}
+	// We need to reset the producer ID epoch if we set a sequence number on it, because the broker
+	// will never see a message with this number, so we can never continue the sequence.
+	if !p.IsTransactional() && msg.hasSequence {
+		Logger.Printf("producer/txnmanager rolling over epoch due to publish failure on %s/%d", msg.Topic, msg.Partition)
+		p.bumpIdempotentProducerEpoch()
+	}
+
+	msg.clear()
+	pErr := &ProducerError{Msg: msg, Err: err}
+	if p.conf.Producer.Return.Errors {
+		p.errors <- pErr
+	} else {
+		Logger.Println(pErr)
+	}
+	p.inFlight.Done()
+}
+
+func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
+	for _, msg := range batch {
+		p.returnError(msg, err)
+	}
+}
+
+func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
+	for _, msg := range batch {
+		if p.conf.Producer.Return.Successes {
+			msg.clear()
+			p.successes <- msg
+		}
+		p.inFlight.Done()
+	}
+}
+
+func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
+	if msg.retries >= p.conf.Producer.Retry.Max {
+		p.returnError(msg, err)
+	} else {
+		msg.retries++
+		p.retries <- msg
+	}
+}
+
+func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
+	for _, msg := range batch {
+		p.retryMessage(msg, err)
+	}
+}
+
+func (p *asyncProducer) getBrokerProducer(broker *Broker) *brokerProducer {
+	p.brokerLock.Lock()
+	defer p.brokerLock.Unlock()
+
+	bp := p.brokers[broker]
+
+	if bp == nil {
+		bp = p.newBrokerProducer(broker)
+		p.brokers[broker] = bp
+		p.brokerRefs[bp] = 0
+	}
+
+	p.brokerRefs[bp]++
+
+	return bp
+}
+
+func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp *brokerProducer) {
+	p.brokerLock.Lock()
+	defer p.brokerLock.Unlock()
+
+	p.brokerRefs[bp]--
+	if p.brokerRefs[bp] == 0 {
+		close(bp.input)
+		delete(p.brokerRefs, bp)
+
+		if p.brokers[broker] == bp {
+			delete(p.brokers, broker)
+		}
+	}
+}
+
+func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
+	p.brokerLock.Lock()
+	defer p.brokerLock.Unlock()
+
+	bc, ok := p.brokers[broker]
+	if ok && bc.abandoned != nil {
+		close(bc.abandoned)
+	}
+
+	delete(p.brokers, broker)
+}
diff --git a/vendor/github.com/IBM/sarama/balance_strategy.go b/vendor/github.com/IBM/sarama/balance_strategy.go
new file mode 100644
index 0000000..59f8948
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/balance_strategy.go
@@ -0,0 +1,1127 @@
+package sarama
+
+import (
+	"container/heap"
+	"errors"
+	"fmt"
+	"maps"
+	"math"
+	"slices"
+	"sort"
+	"strings"
+)
+
+const (
+	// RangeBalanceStrategyName identifies strategies that use the range partition assignment strategy
+	RangeBalanceStrategyName = "range"
+
+	// RoundRobinBalanceStrategyName identifies strategies that use the round-robin partition assignment strategy
+	RoundRobinBalanceStrategyName = "roundrobin"
+
+	// StickyBalanceStrategyName identifies strategies that use the sticky-partition assignment strategy
+	StickyBalanceStrategyName = "sticky"
+
+	defaultGeneration = -1
+)
+
+// BalanceStrategyPlan is the results of any BalanceStrategy.Plan attempt.
+// It contains an allocation of topic/partitions by memberID in the form of
+// a `memberID -> topic -> partitions` map.
+type BalanceStrategyPlan map[string]map[string][]int32
+
+// Add assigns a topic with a number partitions to a member.
+func (p BalanceStrategyPlan) Add(memberID, topic string, partitions ...int32) {
+	if len(partitions) == 0 {
+		return
+	}
+	if _, ok := p[memberID]; !ok {
+		p[memberID] = make(map[string][]int32, 1)
+	}
+	p[memberID][topic] = append(p[memberID][topic], partitions...)
+}
+
+// --------------------------------------------------------------------
+
+// BalanceStrategy is used to balance topics and partitions
+// across members of a consumer group
+type BalanceStrategy interface {
+	// Name uniquely identifies the strategy.
+	Name() string
+
+	// Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions`
+	// and returns a distribution plan.
+	Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error)
+
+	// AssignmentData returns the serialized assignment data for the specified
+	// memberID
+	AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error)
+}
+
+// --------------------------------------------------------------------
+
+// NewBalanceStrategyRange returns a range balance strategy,
+// which is the default and assigns partitions as ranges to consumer group members.
+// This follows the same logic as
+// https://kafka.apache.org/31/javadoc/org/apache/kafka/clients/consumer/RangeAssignor.html
+//
+// Example with two topics T1 and T2 with six partitions each (0..5) and two members (M1, M2):
+//
+//	M1: {T1: [0, 1, 2], T2: [0, 1, 2]}
+//	M2: {T1: [3, 4, 5], T2: [3, 4, 5]}
+func NewBalanceStrategyRange() BalanceStrategy {
+	return &balanceStrategy{
+		name: RangeBalanceStrategyName,
+		coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
+			partitionsPerConsumer := len(partitions) / len(memberIDs)
+			consumersWithExtraPartition := len(partitions) % len(memberIDs)
+
+			sort.Strings(memberIDs)
+
+			for i, memberID := range memberIDs {
+				min := i*partitionsPerConsumer + int(math.Min(float64(consumersWithExtraPartition), float64(i)))
+				extra := 0
+				if i < consumersWithExtraPartition {
+					extra = 1
+				}
+				max := min + partitionsPerConsumer + extra
+				plan.Add(memberID, topic, partitions[min:max]...)
+			}
+		},
+	}
+}
+
+// Deprecated: use NewBalanceStrategyRange to avoid data race issue
+var BalanceStrategyRange = NewBalanceStrategyRange()
+
+// NewBalanceStrategySticky returns a sticky balance strategy,
+// which assigns partitions to members with an attempt to preserve earlier assignments
+// while maintain a balanced partition distribution.
+// Example with topic T with six partitions (0..5) and two members (M1, M2):
+//
+//	M1: {T: [0, 2, 4]}
+//	M2: {T: [1, 3, 5]}
+//
+// On reassignment with an additional consumer, you might get an assignment plan like:
+//
+//	M1: {T: [0, 2]}
+//	M2: {T: [1, 3]}
+//	M3: {T: [4, 5]}
+func NewBalanceStrategySticky() BalanceStrategy {
+	return &stickyBalanceStrategy{}
+}
+
+// Deprecated: use NewBalanceStrategySticky to avoid data race issue
+var BalanceStrategySticky = NewBalanceStrategySticky()
+
+// --------------------------------------------------------------------
+
+type balanceStrategy struct {
+	coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32)
+	name   string
+}
+
+// Name implements BalanceStrategy.
+func (s *balanceStrategy) Name() string { return s.name }
+
+// Plan implements BalanceStrategy.
+func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
+	// Build members by topic map
+	mbt := make(map[string][]string)
+	for memberID, meta := range members {
+		for _, topic := range meta.Topics {
+			mbt[topic] = append(mbt[topic], memberID)
+		}
+	}
+
+	// func to sort and de-duplicate a StringSlice
+	uniq := func(ss sort.StringSlice) []string {
+		if ss.Len() < 2 {
+			return ss
+		}
+		sort.Sort(ss)
+		var i, j int
+		for i = 1; i < ss.Len(); i++ {
+			if ss[i] == ss[j] {
+				continue
+			}
+			j++
+			ss.Swap(i, j)
+		}
+		return ss[:j+1]
+	}
+
+	// Assemble plan
+	plan := make(BalanceStrategyPlan, len(members))
+	for topic, memberIDs := range mbt {
+		s.coreFn(plan, uniq(memberIDs), topic, topics[topic])
+	}
+	return plan, nil
+}
+
+// AssignmentData simple strategies do not require any shared assignment data
+func (s *balanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
+	return nil, nil
+}
+
+type stickyBalanceStrategy struct {
+	movements partitionMovements
+}
+
+// Name implements BalanceStrategy.
+func (s *stickyBalanceStrategy) Name() string { return StickyBalanceStrategyName }
+
+// Plan implements BalanceStrategy.
+func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
+	// track partition movements during generation of the partition assignment plan
+	s.movements = partitionMovements{
+		Movements:                 make(map[topicPartitionAssignment]consumerPair),
+		PartitionMovementsByTopic: make(map[string]map[consumerPair]map[topicPartitionAssignment]bool),
+	}
+
+	// prepopulate the current assignment state from userdata on the consumer group members
+	currentAssignment, prevAssignment, err := prepopulateCurrentAssignments(members)
+	if err != nil {
+		return nil, err
+	}
+
+	// determine if we're dealing with a completely fresh assignment, or if there's existing assignment state
+	isFreshAssignment := len(currentAssignment) == 0
+
+	// create a mapping of all current topic partitions and the consumers that can be assigned to them
+	partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string)
+	for topic, partitions := range topics {
+		for _, partition := range partitions {
+			partition2AllPotentialConsumers[topicPartitionAssignment{Topic: topic, Partition: partition}] = []string{}
+		}
+	}
+
+	// create a mapping of all consumers to all potential topic partitions that can be assigned to them
+	// also, populate the mapping of partitions to potential consumers
+	consumer2AllPotentialPartitions := make(map[string][]topicPartitionAssignment, len(members))
+	for memberID, meta := range members {
+		consumer2AllPotentialPartitions[memberID] = make([]topicPartitionAssignment, 0)
+		for _, topicSubscription := range meta.Topics {
+			// only evaluate topic subscriptions that are present in the supplied topics map
+			if _, found := topics[topicSubscription]; found {
+				for _, partition := range topics[topicSubscription] {
+					topicPartition := topicPartitionAssignment{Topic: topicSubscription, Partition: partition}
+					consumer2AllPotentialPartitions[memberID] = append(consumer2AllPotentialPartitions[memberID], topicPartition)
+					partition2AllPotentialConsumers[topicPartition] = append(partition2AllPotentialConsumers[topicPartition], memberID)
+				}
+			}
+		}
+
+		// add this consumer to currentAssignment (with an empty topic partition assignment) if it does not already exist
+		if _, exists := currentAssignment[memberID]; !exists {
+			currentAssignment[memberID] = make([]topicPartitionAssignment, 0)
+		}
+	}
+
+	// create a mapping of each partition to its current consumer, where possible
+	currentPartitionConsumers := make(map[topicPartitionAssignment]string, len(currentAssignment))
+	unvisitedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
+	for partition := range partition2AllPotentialConsumers {
+		unvisitedPartitions[partition] = true
+	}
+	var unassignedPartitions []topicPartitionAssignment
+	for memberID, partitions := range currentAssignment {
+		var keepPartitions []topicPartitionAssignment
+		for _, partition := range partitions {
+			// If this partition no longer exists at all, likely due to the
+			// topic being deleted, we remove the partition from the member.
+			if _, exists := partition2AllPotentialConsumers[partition]; !exists {
+				continue
+			}
+			delete(unvisitedPartitions, partition)
+			currentPartitionConsumers[partition] = memberID
+
+			if !slices.Contains(members[memberID].Topics, partition.Topic) {
+				unassignedPartitions = append(unassignedPartitions, partition)
+				continue
+			}
+			keepPartitions = append(keepPartitions, partition)
+		}
+		currentAssignment[memberID] = keepPartitions
+	}
+	for unvisited := range unvisitedPartitions {
+		unassignedPartitions = append(unassignedPartitions, unvisited)
+	}
+
+	// sort the topic partitions in order of priority for reassignment
+	sortedPartitions := sortPartitions(currentAssignment, prevAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions)
+
+	// at this point we have preserved all valid topic partition to consumer assignments and removed
+	// all invalid topic partitions and invalid consumers. Now we need to assign unassignedPartitions
+	// to consumers so that the topic partition assignments are as balanced as possible.
+
+	// an ascending sorted set of consumers based on how many topic partitions are already assigned to them
+	sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment)
+	s.balance(currentAssignment, prevAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumers)
+
+	// Assemble plan
+	plan := make(BalanceStrategyPlan, len(currentAssignment))
+	for memberID, assignments := range currentAssignment {
+		if len(assignments) == 0 {
+			plan[memberID] = make(map[string][]int32)
+		} else {
+			for _, assignment := range assignments {
+				plan.Add(memberID, assignment.Topic, assignment.Partition)
+			}
+		}
+	}
+	return plan, nil
+}
+
+// AssignmentData serializes the set of topics currently assigned to the
+// specified member as part of the supplied balance plan
+func (s *stickyBalanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
+	return encode(&StickyAssignorUserDataV1{
+		Topics:     topics,
+		Generation: generationID,
+	}, nil)
+}
+
+// Balance assignments across consumers for maximum fairness and stickiness.
+func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) {
+	initializing := len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0
+
+	// assign all unassigned partitions
+	for _, partition := range unassignedPartitions {
+		// skip if there is no potential consumer for the partition
+		if len(partition2AllPotentialConsumers[partition]) == 0 {
+			continue
+		}
+		sortedCurrentSubscriptions = assignPartition(partition, sortedCurrentSubscriptions, currentAssignment, consumer2AllPotentialPartitions, currentPartitionConsumer)
+	}
+
+	// narrow down the reassignment scope to only those partitions that can actually be reassigned
+	for partition := range partition2AllPotentialConsumers {
+		if !canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
+			sortedPartitions = removeTopicPartitionFromMemberAssignments(sortedPartitions, partition)
+		}
+	}
+
+	// narrow down the reassignment scope to only those consumers that are subject to reassignment
+	fixedAssignments := make(map[string][]topicPartitionAssignment)
+	for memberID := range consumer2AllPotentialPartitions {
+		if !canConsumerParticipateInReassignment(memberID, currentAssignment, consumer2AllPotentialPartitions, partition2AllPotentialConsumers) {
+			fixedAssignments[memberID] = currentAssignment[memberID]
+			delete(currentAssignment, memberID)
+			sortedCurrentSubscriptions = sortMemberIDsByPartitionAssignments(currentAssignment)
+		}
+	}
+
+	// create a deep copy of the current assignment so we can revert to it if we do not get a more balanced assignment later
+	preBalanceAssignment := deepCopyAssignment(currentAssignment)
+	preBalancePartitionConsumers := maps.Clone(currentPartitionConsumer)
+
+	reassignmentPerformed := s.performReassignments(sortedPartitions, currentAssignment, prevAssignment, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer)
+
+	// if we are not preserving existing assignments and we have made changes to the current assignment
+	// make sure we are getting a more balanced assignment; otherwise, revert to previous assignment
+	if !initializing && reassignmentPerformed && getBalanceScore(currentAssignment) >= getBalanceScore(preBalanceAssignment) {
+		currentAssignment = deepCopyAssignment(preBalanceAssignment)
+		clear(currentPartitionConsumer)
+		maps.Copy(currentPartitionConsumer, preBalancePartitionConsumers)
+	}
+
+	// add the fixed assignments (those that could not change) back
+	maps.Copy(currentAssignment, fixedAssignments)
+}
+
+// NewBalanceStrategyRoundRobin returns a round-robin balance strategy,
+// which assigns partitions to members in alternating order.
+// For example, there are two topics (t0, t1) and two consumer (m0, m1), and each topic has three partitions (p0, p1, p2):
+// M0: [t0p0, t0p2, t1p1]
+// M1: [t0p1, t1p0, t1p2]
+func NewBalanceStrategyRoundRobin() BalanceStrategy {
+	return new(roundRobinBalancer)
+}
+
+// Deprecated: use NewBalanceStrategyRoundRobin to avoid data race issue
+var BalanceStrategyRoundRobin = NewBalanceStrategyRoundRobin()
+
+type roundRobinBalancer struct{}
+
+func (b *roundRobinBalancer) Name() string {
+	return RoundRobinBalanceStrategyName
+}
+
+func (b *roundRobinBalancer) Plan(memberAndMetadata map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
+	if len(memberAndMetadata) == 0 || len(topics) == 0 {
+		return nil, errors.New("members and topics are not provided")
+	}
+	// sort partitions
+	var topicPartitions []topicAndPartition
+	for topic, partitions := range topics {
+		for _, partition := range partitions {
+			topicPartitions = append(topicPartitions, topicAndPartition{topic: topic, partition: partition})
+		}
+	}
+	sort.SliceStable(topicPartitions, func(i, j int) bool {
+		pi := topicPartitions[i]
+		pj := topicPartitions[j]
+		return pi.comparedValue() < pj.comparedValue()
+	})
+
+	// sort members
+	var members []memberAndTopic
+	for memberID, meta := range memberAndMetadata {
+		m := memberAndTopic{
+			memberID: memberID,
+			topics:   make(map[string]struct{}),
+		}
+		for _, t := range meta.Topics {
+			m.topics[t] = struct{}{}
+		}
+		members = append(members, m)
+	}
+	sort.SliceStable(members, func(i, j int) bool {
+		mi := members[i]
+		mj := members[j]
+		return mi.memberID < mj.memberID
+	})
+
+	// assign partitions
+	plan := make(BalanceStrategyPlan, len(members))
+	i := 0
+	n := len(members)
+	for _, tp := range topicPartitions {
+		m := members[i%n]
+		for !m.hasTopic(tp.topic) {
+			i++
+			m = members[i%n]
+		}
+		plan.Add(m.memberID, tp.topic, tp.partition)
+		i++
+	}
+	return plan, nil
+}
+
+func (b *roundRobinBalancer) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
+	return nil, nil // do nothing for now
+}
+
+type topicAndPartition struct {
+	topic     string
+	partition int32
+}
+
+func (tp *topicAndPartition) comparedValue() string {
+	return fmt.Sprintf("%s-%d", tp.topic, tp.partition)
+}
+
+type memberAndTopic struct {
+	topics   map[string]struct{}
+	memberID string
+}
+
+func (m *memberAndTopic) hasTopic(topic string) bool {
+	_, isExist := m.topics[topic]
+	return isExist
+}
+
+// Calculate the balance score of the given assignment, as the sum of assigned partitions size difference of all consumer pairs.
+// A perfectly balanced assignment (with all consumers getting the same number of partitions) has a balance score of 0.
+// Lower balance score indicates a more balanced assignment.
+func getBalanceScore(assignment map[string][]topicPartitionAssignment) int {
+	consumer2AssignmentSize := make(map[string]int, len(assignment))
+	for memberID, partitions := range assignment {
+		consumer2AssignmentSize[memberID] = len(partitions)
+	}
+
+	var score float64
+	for memberID, consumerAssignmentSize := range consumer2AssignmentSize {
+		delete(consumer2AssignmentSize, memberID)
+		for _, otherConsumerAssignmentSize := range consumer2AssignmentSize {
+			score += math.Abs(float64(consumerAssignmentSize - otherConsumerAssignmentSize))
+		}
+	}
+	return int(score)
+}
+
+// Determine whether the current assignment plan is balanced.
+func isBalanced(currentAssignment map[string][]topicPartitionAssignment, allSubscriptions map[string][]topicPartitionAssignment) bool {
+	sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment)
+	min := len(currentAssignment[sortedCurrentSubscriptions[0]])
+	max := len(currentAssignment[sortedCurrentSubscriptions[len(sortedCurrentSubscriptions)-1]])
+	if min >= max-1 {
+		// if minimum and maximum numbers of partitions assigned to consumers differ by at most one return true
+		return true
+	}
+
+	// create a mapping from partitions to the consumer assigned to them
+	allPartitions := make(map[topicPartitionAssignment]string)
+	for memberID, partitions := range currentAssignment {
+		for _, partition := range partitions {
+			if _, exists := allPartitions[partition]; exists {
+				Logger.Printf("Topic %s Partition %d is assigned more than one consumer", partition.Topic, partition.Partition)
+			}
+			allPartitions[partition] = memberID
+		}
+	}
+
+	// for each consumer that does not have all the topic partitions it can get make sure none of the topic partitions it
+	// could but did not get cannot be moved to it (because that would break the balance)
+	for _, memberID := range sortedCurrentSubscriptions {
+		consumerPartitions := currentAssignment[memberID]
+		consumerPartitionCount := len(consumerPartitions)
+
+		// skip if this consumer already has all the topic partitions it can get
+		if consumerPartitionCount == len(allSubscriptions[memberID]) {
+			continue
+		}
+
+		// otherwise make sure it cannot get any more
+		potentialTopicPartitions := allSubscriptions[memberID]
+		for _, partition := range potentialTopicPartitions {
+			if !memberAssignmentsIncludeTopicPartition(currentAssignment[memberID], partition) {
+				otherConsumer := allPartitions[partition]
+				otherConsumerPartitionCount := len(currentAssignment[otherConsumer])
+				if consumerPartitionCount < otherConsumerPartitionCount {
+					return false
+				}
+			}
+		}
+	}
+	return true
+}
+
+// Reassign all topic partitions that need reassignment until balanced.
+func (s *stickyBalanceStrategy) performReassignments(reassignablePartitions []topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) bool {
+	reassignmentPerformed := false
+	modified := false
+
+	// repeat reassignment until no partition can be moved to improve the balance
+	for {
+		modified = false
+		// reassign all reassignable partitions (starting from the partition with least potential consumers and if needed)
+		// until the full list is processed or a balance is achieved
+		for _, partition := range reassignablePartitions {
+			if isBalanced(currentAssignment, consumer2AllPotentialPartitions) {
+				break
+			}
+
+			// the partition must have at least two consumers
+			if len(partition2AllPotentialConsumers[partition]) <= 1 {
+				Logger.Printf("Expected more than one potential consumer for partition %s topic %d", partition.Topic, partition.Partition)
+			}
+
+			// the partition must have a consumer
+			consumer := currentPartitionConsumer[partition]
+			if consumer == "" {
+				Logger.Printf("Expected topic %s partition %d to be assigned to a consumer", partition.Topic, partition.Partition)
+			}
+
+			if _, exists := prevAssignment[partition]; exists {
+				if len(currentAssignment[consumer]) > (len(currentAssignment[prevAssignment[partition].MemberID]) + 1) {
+					sortedCurrentSubscriptions = s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, prevAssignment[partition].MemberID)
+					reassignmentPerformed = true
+					modified = true
+					continue
+				}
+			}
+
+			// check if a better-suited consumer exists for the partition; if so, reassign it
+			for _, otherConsumer := range partition2AllPotentialConsumers[partition] {
+				if len(currentAssignment[consumer]) > (len(currentAssignment[otherConsumer]) + 1) {
+					sortedCurrentSubscriptions = s.reassignPartitionToNewConsumer(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, consumer2AllPotentialPartitions)
+					reassignmentPerformed = true
+					modified = true
+					break
+				}
+			}
+		}
+		if !modified {
+			return reassignmentPerformed
+		}
+	}
+}
+
+// Identify a new consumer for a topic partition and reassign it.
+func (s *stickyBalanceStrategy) reassignPartitionToNewConsumer(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []string {
+	for _, anotherConsumer := range sortedCurrentSubscriptions {
+		if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[anotherConsumer], partition) {
+			return s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, anotherConsumer)
+		}
+	}
+	return sortedCurrentSubscriptions
+}
+
+// Reassign a specific partition to a new consumer
+func (s *stickyBalanceStrategy) reassignPartition(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, newConsumer string) []string {
+	consumer := currentPartitionConsumer[partition]
+	// find the correct partition movement considering the stickiness requirement
+	partitionToBeMoved := s.movements.getTheActualPartitionToBeMoved(partition, consumer, newConsumer)
+	return s.processPartitionMovement(partitionToBeMoved, newConsumer, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer)
+}
+
+// Track the movement of a topic partition after assignment
+func (s *stickyBalanceStrategy) processPartitionMovement(partition topicPartitionAssignment, newConsumer string, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
+	oldConsumer := currentPartitionConsumer[partition]
+	s.movements.movePartition(partition, oldConsumer, newConsumer)
+
+	currentAssignment[oldConsumer] = removeTopicPartitionFromMemberAssignments(currentAssignment[oldConsumer], partition)
+	currentAssignment[newConsumer] = append(currentAssignment[newConsumer], partition)
+	currentPartitionConsumer[partition] = newConsumer
+	return sortMemberIDsByPartitionAssignments(currentAssignment)
+}
+
+// Determine whether a specific consumer should be considered for topic partition assignment.
+func canConsumerParticipateInReassignment(memberID string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
+	currentPartitions := currentAssignment[memberID]
+	currentAssignmentSize := len(currentPartitions)
+	maxAssignmentSize := len(consumer2AllPotentialPartitions[memberID])
+	if currentAssignmentSize > maxAssignmentSize {
+		Logger.Printf("The consumer %s is assigned more partitions than the maximum possible", memberID)
+	}
+	if currentAssignmentSize < maxAssignmentSize {
+		// if a consumer is not assigned all its potential partitions it is subject to reassignment
+		return true
+	}
+	for _, partition := range currentPartitions {
+		if canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
+			return true
+		}
+	}
+	return false
+}
+
+// Only consider reassigning those topic partitions that have two or more potential consumers.
+func canTopicPartitionParticipateInReassignment(partition topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
+	return len(partition2AllPotentialConsumers[partition]) >= 2
+}
+
+// The assignment should improve the overall balance of the partition assignments to consumers.
+func assignPartition(partition topicPartitionAssignment, sortedCurrentSubscriptions []string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
+	for _, memberID := range sortedCurrentSubscriptions {
+		if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[memberID], partition) {
+			currentAssignment[memberID] = append(currentAssignment[memberID], partition)
+			currentPartitionConsumer[partition] = memberID
+			break
+		}
+	}
+	return sortMemberIDsByPartitionAssignments(currentAssignment)
+}
+
+// Deserialize topic partition assignment data to aid with creation of a sticky assignment.
+func deserializeTopicPartitionAssignment(userDataBytes []byte) (StickyAssignorUserData, error) {
+	userDataV1 := &StickyAssignorUserDataV1{}
+	if err := decode(userDataBytes, userDataV1, nil); err != nil {
+		userDataV0 := &StickyAssignorUserDataV0{}
+		if err := decode(userDataBytes, userDataV0, nil); err != nil {
+			return nil, err
+		}
+		return userDataV0, nil
+	}
+	return userDataV1, nil
+}
+
+// filterAssignedPartitions returns a map of consumer group members to their list of previously-assigned topic partitions, limited
+// to those topic partitions currently reported by the Kafka cluster.
+func filterAssignedPartitions(currentAssignment map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) map[string][]topicPartitionAssignment {
+	assignments := deepCopyAssignment(currentAssignment)
+	for memberID, partitions := range assignments {
+		// perform in-place filtering
+		i := 0
+		for _, partition := range partitions {
+			if _, exists := partition2AllPotentialConsumers[partition]; exists {
+				partitions[i] = partition
+				i++
+			}
+		}
+		assignments[memberID] = partitions[:i]
+	}
+	return assignments
+}
+
+func removeTopicPartitionFromMemberAssignments(assignments []topicPartitionAssignment, topic topicPartitionAssignment) []topicPartitionAssignment {
+	for i, assignment := range assignments {
+		if assignment == topic {
+			return append(assignments[:i], assignments[i+1:]...)
+		}
+	}
+	return assignments
+}
+
+func memberAssignmentsIncludeTopicPartition(assignments []topicPartitionAssignment, topic topicPartitionAssignment) bool {
+	return slices.Contains(assignments, topic)
+}
+
+func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, partitionsWithADifferentPreviousAssignment map[topicPartitionAssignment]consumerGenerationPair, isFreshAssignment bool, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []topicPartitionAssignment {
+	unassignedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
+	for partition := range partition2AllPotentialConsumers {
+		unassignedPartitions[partition] = true
+	}
+
+	sortedPartitions := make([]topicPartitionAssignment, 0)
+	if !isFreshAssignment && areSubscriptionsIdentical(partition2AllPotentialConsumers, consumer2AllPotentialPartitions) {
+		// if this is a reassignment and the subscriptions are identical (all consumers can consumer from all topics)
+		// then we just need to simply list partitions in a round robin fashion (from consumers with
+		// most assigned partitions to those with least)
+		assignments := filterAssignedPartitions(currentAssignment, partition2AllPotentialConsumers)
+
+		// use priority-queue to evaluate consumer group members in descending-order based on
+		// the number of topic partition assignments (i.e. consumers with most assignments first)
+		pq := make(assignmentPriorityQueue, len(assignments))
+		i := 0
+		for consumerID, consumerAssignments := range assignments {
+			pq[i] = &consumerGroupMember{
+				id:          consumerID,
+				assignments: consumerAssignments,
+			}
+			i++
+		}
+		heap.Init(&pq)
+
+		// loop until no consumer-group members remain
+		for pq.Len() != 0 {
+			member := pq[0]
+
+			// partitions that were assigned to a different consumer last time
+			var prevPartitionIndex int
+			for i, partition := range member.assignments {
+				if _, exists := partitionsWithADifferentPreviousAssignment[partition]; exists {
+					prevPartitionIndex = i
+					break
+				}
+			}
+
+			if len(member.assignments) > 0 {
+				partition := member.assignments[prevPartitionIndex]
+				sortedPartitions = append(sortedPartitions, partition)
+				delete(unassignedPartitions, partition)
+				if prevPartitionIndex == 0 {
+					member.assignments = member.assignments[1:]
+				} else {
+					member.assignments = append(member.assignments[:prevPartitionIndex], member.assignments[prevPartitionIndex+1:]...)
+				}
+				heap.Fix(&pq, 0)
+			} else {
+				heap.Pop(&pq)
+			}
+		}
+
+		for partition := range unassignedPartitions {
+			sortedPartitions = append(sortedPartitions, partition)
+		}
+	} else {
+		// an ascending sorted set of topic partitions based on how many consumers can potentially use them
+		sortedPartitions = sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers)
+	}
+	return sortedPartitions
+}
+
+func sortMemberIDsByPartitionAssignments(assignments map[string][]topicPartitionAssignment) []string {
+	// sort the members by the number of partition assignments in ascending order
+	sortedMemberIDs := make([]string, 0, len(assignments))
+	for memberID := range assignments {
+		sortedMemberIDs = append(sortedMemberIDs, memberID)
+	}
+	sort.SliceStable(sortedMemberIDs, func(i, j int) bool {
+		ret := len(assignments[sortedMemberIDs[i]]) - len(assignments[sortedMemberIDs[j]])
+		if ret == 0 {
+			return sortedMemberIDs[i] < sortedMemberIDs[j]
+		}
+		return len(assignments[sortedMemberIDs[i]]) < len(assignments[sortedMemberIDs[j]])
+	})
+	return sortedMemberIDs
+}
+
+func sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers map[topicPartitionAssignment][]string) []topicPartitionAssignment {
+	// sort the members by the number of partition assignments in descending order
+	sortedPartionIDs := make([]topicPartitionAssignment, len(partition2AllPotentialConsumers))
+	i := 0
+	for partition := range partition2AllPotentialConsumers {
+		sortedPartionIDs[i] = partition
+		i++
+	}
+	sort.Slice(sortedPartionIDs, func(i, j int) bool {
+		if len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) == len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) {
+			ret := strings.Compare(sortedPartionIDs[i].Topic, sortedPartionIDs[j].Topic)
+			if ret == 0 {
+				return sortedPartionIDs[i].Partition < sortedPartionIDs[j].Partition
+			}
+			return ret < 0
+		}
+		return len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) < len(partition2AllPotentialConsumers[sortedPartionIDs[j]])
+	})
+	return sortedPartionIDs
+}
+
+func deepCopyAssignment(assignment map[string][]topicPartitionAssignment) map[string][]topicPartitionAssignment {
+	m := make(map[string][]topicPartitionAssignment, len(assignment))
+	for memberID, subscriptions := range assignment {
+		m[memberID] = append(subscriptions[:0:0], subscriptions...)
+	}
+	return m
+}
+
+func areSubscriptionsIdentical(partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) bool {
+	curMembers := make(map[string]int)
+	for _, cur := range partition2AllPotentialConsumers {
+		if len(curMembers) == 0 {
+			for _, curMembersElem := range cur {
+				curMembers[curMembersElem]++
+			}
+			continue
+		}
+
+		if len(curMembers) != len(cur) {
+			return false
+		}
+
+		yMap := make(map[string]int)
+		for _, yElem := range cur {
+			yMap[yElem]++
+		}
+
+		for curMembersMapKey, curMembersMapVal := range curMembers {
+			if yMap[curMembersMapKey] != curMembersMapVal {
+				return false
+			}
+		}
+	}
+
+	curPartitions := make(map[topicPartitionAssignment]int)
+	for _, cur := range consumer2AllPotentialPartitions {
+		if len(curPartitions) == 0 {
+			for _, curPartitionElem := range cur {
+				curPartitions[curPartitionElem]++
+			}
+			continue
+		}
+
+		if len(curPartitions) != len(cur) {
+			return false
+		}
+
+		yMap := make(map[topicPartitionAssignment]int)
+		for _, yElem := range cur {
+			yMap[yElem]++
+		}
+
+		for curMembersMapKey, curMembersMapVal := range curPartitions {
+			if yMap[curMembersMapKey] != curMembersMapVal {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// We need to process subscriptions' user data with each consumer's reported generation in mind
+// higher generations overwrite lower generations in case of a conflict
+// note that a conflict could exist only if user data is for different generations
+func prepopulateCurrentAssignments(members map[string]ConsumerGroupMemberMetadata) (map[string][]topicPartitionAssignment, map[topicPartitionAssignment]consumerGenerationPair, error) {
+	currentAssignment := make(map[string][]topicPartitionAssignment)
+	prevAssignment := make(map[topicPartitionAssignment]consumerGenerationPair)
+
+	// for each partition we create a sorted map of its consumers by generation
+	sortedPartitionConsumersByGeneration := make(map[topicPartitionAssignment]map[int]string)
+	for memberID, meta := range members {
+		consumerUserData, err := deserializeTopicPartitionAssignment(meta.UserData)
+		if err != nil {
+			return nil, nil, err
+		}
+		for _, partition := range consumerUserData.partitions() {
+			if consumers, exists := sortedPartitionConsumersByGeneration[partition]; exists {
+				if consumerUserData.hasGeneration() {
+					if _, generationExists := consumers[consumerUserData.generation()]; generationExists {
+						// same partition is assigned to two consumers during the same rebalance.
+						// log a warning and skip this record
+						Logger.Printf("Topic %s Partition %d is assigned to multiple consumers following sticky assignment generation %d", partition.Topic, partition.Partition, consumerUserData.generation())
+						continue
+					} else {
+						consumers[consumerUserData.generation()] = memberID
+					}
+				} else {
+					consumers[defaultGeneration] = memberID
+				}
+			} else {
+				generation := defaultGeneration
+				if consumerUserData.hasGeneration() {
+					generation = consumerUserData.generation()
+				}
+				sortedPartitionConsumersByGeneration[partition] = map[int]string{generation: memberID}
+			}
+		}
+	}
+
+	// prevAssignment holds the prior ConsumerGenerationPair (before current) of each partition
+	// current and previous consumers are the last two consumers of each partition in the above sorted map
+	for partition, consumers := range sortedPartitionConsumersByGeneration {
+		// sort consumers by generation in decreasing order
+		var generations []int
+		for generation := range consumers {
+			generations = append(generations, generation)
+		}
+		sort.Sort(sort.Reverse(sort.IntSlice(generations)))
+
+		consumer := consumers[generations[0]]
+		if _, exists := currentAssignment[consumer]; !exists {
+			currentAssignment[consumer] = []topicPartitionAssignment{partition}
+		} else {
+			currentAssignment[consumer] = append(currentAssignment[consumer], partition)
+		}
+
+		// check for previous assignment, if any
+		if len(generations) > 1 {
+			prevAssignment[partition] = consumerGenerationPair{
+				MemberID:   consumers[generations[1]],
+				Generation: generations[1],
+			}
+		}
+	}
+	return currentAssignment, prevAssignment, nil
+}
+
+type consumerGenerationPair struct {
+	MemberID   string
+	Generation int
+}
+
+// consumerPair represents a pair of Kafka consumer ids involved in a partition reassignment.
+type consumerPair struct {
+	SrcMemberID string
+	DstMemberID string
+}
+
+// partitionMovements maintains some data structures to simplify lookup of partition movements among consumers.
+type partitionMovements struct {
+	PartitionMovementsByTopic map[string]map[consumerPair]map[topicPartitionAssignment]bool
+	Movements                 map[topicPartitionAssignment]consumerPair
+}
+
+func (p *partitionMovements) removeMovementRecordOfPartition(partition topicPartitionAssignment) consumerPair {
+	pair := p.Movements[partition]
+	delete(p.Movements, partition)
+
+	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+	delete(partitionMovementsForThisTopic[pair], partition)
+	if len(partitionMovementsForThisTopic[pair]) == 0 {
+		delete(partitionMovementsForThisTopic, pair)
+	}
+	if len(p.PartitionMovementsByTopic[partition.Topic]) == 0 {
+		delete(p.PartitionMovementsByTopic, partition.Topic)
+	}
+	return pair
+}
+
+func (p *partitionMovements) addPartitionMovementRecord(partition topicPartitionAssignment, pair consumerPair) {
+	p.Movements[partition] = pair
+	if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
+		p.PartitionMovementsByTopic[partition.Topic] = make(map[consumerPair]map[topicPartitionAssignment]bool)
+	}
+	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+	if _, exists := partitionMovementsForThisTopic[pair]; !exists {
+		partitionMovementsForThisTopic[pair] = make(map[topicPartitionAssignment]bool)
+	}
+	partitionMovementsForThisTopic[pair][partition] = true
+}
+
+func (p *partitionMovements) movePartition(partition topicPartitionAssignment, oldConsumer, newConsumer string) {
+	pair := consumerPair{
+		SrcMemberID: oldConsumer,
+		DstMemberID: newConsumer,
+	}
+	if _, exists := p.Movements[partition]; exists {
+		// this partition has previously moved
+		existingPair := p.removeMovementRecordOfPartition(partition)
+		if existingPair.DstMemberID != oldConsumer {
+			Logger.Printf("Existing pair DstMemberID %s was not equal to the oldConsumer ID %s", existingPair.DstMemberID, oldConsumer)
+		}
+		if existingPair.SrcMemberID != newConsumer {
+			// the partition is not moving back to its previous consumer
+			p.addPartitionMovementRecord(partition, consumerPair{
+				SrcMemberID: existingPair.SrcMemberID,
+				DstMemberID: newConsumer,
+			})
+		}
+	} else {
+		p.addPartitionMovementRecord(partition, pair)
+	}
+}
+
+func (p *partitionMovements) getTheActualPartitionToBeMoved(partition topicPartitionAssignment, oldConsumer, newConsumer string) topicPartitionAssignment {
+	if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
+		return partition
+	}
+	if _, exists := p.Movements[partition]; exists {
+		// this partition has previously moved
+		if oldConsumer != p.Movements[partition].DstMemberID {
+			Logger.Printf("Partition movement DstMemberID %s was not equal to the oldConsumer ID %s", p.Movements[partition].DstMemberID, oldConsumer)
+		}
+		oldConsumer = p.Movements[partition].SrcMemberID
+	}
+
+	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+	reversePair := consumerPair{
+		SrcMemberID: newConsumer,
+		DstMemberID: oldConsumer,
+	}
+	if _, exists := partitionMovementsForThisTopic[reversePair]; !exists {
+		return partition
+	}
+	var reversePairPartition topicPartitionAssignment
+	for otherPartition := range partitionMovementsForThisTopic[reversePair] {
+		reversePairPartition = otherPartition
+	}
+	return reversePairPartition
+}
+
+//lint:ignore U1000 // this is used but only in unittests as a helper (which are excluded by the integration build tag)
+func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, currentPath []string) ([]string, bool) {
+	if src == dst {
+		return currentPath, false
+	}
+	if len(pairs) == 0 {
+		return currentPath, false
+	}
+	for _, pair := range pairs {
+		if src == pair.SrcMemberID && dst == pair.DstMemberID {
+			currentPath = append(currentPath, src, dst)
+			return currentPath, true
+		}
+	}
+
+	for _, pair := range pairs {
+		if pair.SrcMemberID != src {
+			continue
+		}
+		// create a deep copy of the pairs, excluding the current pair
+		reducedSet := make([]consumerPair, len(pairs)-1)
+		i := 0
+		for _, p := range pairs {
+			if p != pair {
+				reducedSet[i] = pair
+				i++
+			}
+		}
+
+		currentPath = append(currentPath, pair.SrcMemberID)
+		return p.isLinked(pair.DstMemberID, dst, reducedSet, currentPath)
+	}
+	return currentPath, false
+}
+
+//lint:ignore U1000 // this is used but only in unittests as a helper (which are excluded by the integration build tag)
+func (p *partitionMovements) in(cycle []string, cycles [][]string) bool {
+	superCycle := make([]string, len(cycle)-1)
+	for i := 0; i < len(cycle)-1; i++ {
+		superCycle[i] = cycle[i]
+	}
+	superCycle = append(superCycle, cycle...)
+	for _, foundCycle := range cycles {
+		if len(foundCycle) == len(cycle) && indexOfSubList(superCycle, foundCycle) != -1 {
+			return true
+		}
+	}
+	return false
+}
+
+//lint:ignore U1000 // this is used but only in unittests as a helper (which are excluded by the integration build tag)
+func (p *partitionMovements) hasCycles(pairs []consumerPair) bool {
+	cycles := make([][]string, 0)
+	for _, pair := range pairs {
+		// create a deep copy of the pairs, excluding the current pair
+		reducedPairs := make([]consumerPair, len(pairs)-1)
+		i := 0
+		for _, p := range pairs {
+			if p != pair {
+				reducedPairs[i] = pair
+				i++
+			}
+		}
+		if path, linked := p.isLinked(pair.DstMemberID, pair.SrcMemberID, reducedPairs, []string{pair.SrcMemberID}); linked {
+			if !p.in(path, cycles) {
+				cycles = append(cycles, path)
+				Logger.Printf("A cycle of length %d was found: %v", len(path)-1, path)
+			}
+		}
+	}
+
+	// for now we want to make sure there is no partition movements of the same topic between a pair of consumers.
+	// the odds of finding a cycle among more than two consumers seem to be very low (according to various randomized
+	// tests with the given sticky algorithm) that it should not worth the added complexity of handling those cases.
+	for _, cycle := range cycles {
+		if len(cycle) == 3 {
+			return true
+		}
+	}
+	return false
+}
+
+//lint:ignore U1000 // this is used but only in unittests as a helper (which are excluded by the integration build tag)
+func (p *partitionMovements) isSticky() bool {
+	for topic, movements := range p.PartitionMovementsByTopic {
+		movementPairs := make([]consumerPair, len(movements))
+		i := 0
+		for pair := range movements {
+			movementPairs[i] = pair
+			i++
+		}
+		if p.hasCycles(movementPairs) {
+			Logger.Printf("Stickiness is violated for topic %s", topic)
+			Logger.Printf("Partition movements for this topic occurred among the following consumer pairs: %v", movements)
+			return false
+		}
+	}
+	return true
+}
+
+//lint:ignore U1000 // this is used but only in unittests as a helper (which are excluded by the integration build tag)
+func indexOfSubList(source []string, target []string) int {
+	targetSize := len(target)
+	maxCandidate := len(source) - targetSize
+nextCand:
+	for candidate := 0; candidate <= maxCandidate; candidate++ {
+		j := candidate
+		for i := 0; i < targetSize; i++ {
+			if target[i] != source[j] {
+				// Element mismatch, try next cand
+				continue nextCand
+			}
+			j++
+		}
+		// All elements of candidate matched target
+		return candidate
+	}
+	return -1
+}
+
+type consumerGroupMember struct {
+	id          string
+	assignments []topicPartitionAssignment
+}
+
+// assignmentPriorityQueue is a priority-queue of consumer group members that is sorted
+// in descending order (most assignments to least assignments).
+type assignmentPriorityQueue []*consumerGroupMember
+
+func (pq assignmentPriorityQueue) Len() int { return len(pq) }
+
+func (pq assignmentPriorityQueue) Less(i, j int) bool {
+	// order assignment priority queue in descending order using assignment-count/member-id
+	if len(pq[i].assignments) == len(pq[j].assignments) {
+		return pq[i].id > pq[j].id
+	}
+	return len(pq[i].assignments) > len(pq[j].assignments)
+}
+
+func (pq assignmentPriorityQueue) Swap(i, j int) {
+	pq[i], pq[j] = pq[j], pq[i]
+}
+
+func (pq *assignmentPriorityQueue) Push(x interface{}) {
+	member := x.(*consumerGroupMember)
+	*pq = append(*pq, member)
+}
+
+func (pq *assignmentPriorityQueue) Pop() interface{} {
+	old := *pq
+	n := len(old)
+	member := old[n-1]
+	*pq = old[0 : n-1]
+	return member
+}
diff --git a/vendor/github.com/IBM/sarama/broker.go b/vendor/github.com/IBM/sarama/broker.go
new file mode 100644
index 0000000..7c559cf
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/broker.go
@@ -0,0 +1,1869 @@
+package sarama
+
+import (
+	"crypto/tls"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"math/rand"
+	"net"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
+type Broker struct {
+	conf *Config
+	rack *string
+
+	id            int32
+	addr          string
+	correlationID int32
+	conn          net.Conn
+	connErr       error
+	lock          sync.Mutex
+	opened        atomic.Bool
+	responses     chan *responsePromise
+	done          chan bool
+
+	metricRegistry             metrics.Registry
+	incomingByteRate           metrics.Meter
+	requestRate                metrics.Meter
+	fetchRate                  metrics.Meter
+	requestSize                metrics.Histogram
+	requestLatency             metrics.Histogram
+	outgoingByteRate           metrics.Meter
+	responseRate               metrics.Meter
+	responseSize               metrics.Histogram
+	requestsInFlight           metrics.Counter
+	protocolRequestsRate       map[int16]metrics.Meter
+	brokerIncomingByteRate     metrics.Meter
+	brokerRequestRate          metrics.Meter
+	brokerFetchRate            metrics.Meter
+	brokerRequestSize          metrics.Histogram
+	brokerRequestLatency       metrics.Histogram
+	brokerOutgoingByteRate     metrics.Meter
+	brokerResponseRate         metrics.Meter
+	brokerResponseSize         metrics.Histogram
+	brokerRequestsInFlight     metrics.Counter
+	brokerThrottleTime         metrics.Histogram
+	brokerProtocolRequestsRate map[int16]metrics.Meter
+	brokerAPIVersions          apiVersionMap
+
+	kerberosAuthenticator               GSSAPIKerberosAuth
+	clientSessionReauthenticationTimeMs int64
+
+	throttleTimer     *time.Timer
+	throttleTimerLock sync.Mutex
+}
+
+// SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker
+type SASLMechanism string
+
+const (
+	// SASLTypeOAuth represents the SASL/OAUTHBEARER mechanism (Kafka 2.0.0+)
+	SASLTypeOAuth = "OAUTHBEARER"
+	// SASLTypePlaintext represents the SASL/PLAIN mechanism
+	SASLTypePlaintext = "PLAIN"
+	// SASLTypeSCRAMSHA256 represents the SCRAM-SHA-256 mechanism.
+	SASLTypeSCRAMSHA256 = "SCRAM-SHA-256"
+	// SASLTypeSCRAMSHA512 represents the SCRAM-SHA-512 mechanism.
+	SASLTypeSCRAMSHA512 = "SCRAM-SHA-512"
+	SASLTypeGSSAPI      = "GSSAPI"
+	// SASLHandshakeV0 is v0 of the Kafka SASL handshake protocol. Client and
+	// server negotiate SASL auth using opaque packets.
+	SASLHandshakeV0 = int16(0)
+	// SASLHandshakeV1 is v1 of the Kafka SASL handshake protocol. Client and
+	// server negotiate SASL by wrapping tokens with Kafka protocol headers.
+	SASLHandshakeV1 = int16(1)
+	// SASLExtKeyAuth is the reserved extension key name sent as part of the
+	// SASL/OAUTHBEARER initial client response
+	SASLExtKeyAuth = "auth"
+)
+
+// AccessToken contains an access token used to authenticate a
+// SASL/OAUTHBEARER client along with associated metadata.
+type AccessToken struct {
+	// Token is the access token payload.
+	Token string
+	// Extensions is a optional map of arbitrary key-value pairs that can be
+	// sent with the SASL/OAUTHBEARER initial client response. These values are
+	// ignored by the SASL server if they are unexpected. This feature is only
+	// supported by Kafka >= 2.1.0.
+	Extensions map[string]string
+}
+
+// AccessTokenProvider is the interface that encapsulates how implementors
+// can generate access tokens for Kafka broker authentication.
+type AccessTokenProvider interface {
+	// Token returns an access token. The implementation should ensure token
+	// reuse so that multiple calls at connect time do not create multiple
+	// tokens. The implementation should also periodically refresh the token in
+	// order to guarantee that each call returns an unexpired token.  This
+	// method should not block indefinitely--a timeout error should be returned
+	// after a short period of inactivity so that the broker connection logic
+	// can log debugging information and retry.
+	Token() (*AccessToken, error)
+}
+
+// SCRAMClient is a an interface to a SCRAM
+// client implementation.
+type SCRAMClient interface {
+	// Begin prepares the client for the SCRAM exchange
+	// with the server with a user name and a password
+	Begin(userName, password, authzID string) error
+	// Step steps client through the SCRAM exchange. It is
+	// called repeatedly until it errors or `Done` returns true.
+	Step(challenge string) (response string, err error)
+	// Done should return true when the SCRAM conversation
+	// is over.
+	Done() bool
+}
+
+type responsePromise struct {
+	requestTime   time.Time
+	correlationID int32
+	response      protocolBody
+	handler       func([]byte, error)
+	packets       chan []byte
+	errors        chan error
+}
+
+func (p *responsePromise) handle(packets []byte, err error) {
+	// Use callback when provided
+	if p.handler != nil {
+		p.handler(packets, err)
+		return
+	}
+	// Otherwise fallback to using channels
+	if err != nil {
+		p.errors <- err
+		return
+	}
+	p.packets <- packets
+}
+
+// NewBroker creates and returns a Broker targeting the given host:port address.
+// This does not attempt to actually connect, you have to call Open() for that.
+func NewBroker(addr string) *Broker {
+	return &Broker{id: -1, addr: addr}
+}
+
+// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
+// waiting for the connection to complete. This means that any subsequent operations on the broker will
+// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
+// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
+// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
+func (b *Broker) Open(conf *Config) error {
+	if !b.opened.CompareAndSwap(false, true) {
+		return ErrAlreadyConnected
+	}
+
+	if conf == nil {
+		conf = NewConfig()
+	}
+
+	err := conf.Validate()
+	if err != nil {
+		return err
+	}
+
+	b.lock.Lock()
+
+	if b.metricRegistry == nil {
+		b.metricRegistry = newCleanupRegistry(conf.MetricRegistry)
+	}
+
+	go withRecover(func() {
+		defer b.lock.Unlock()
+
+		dialer := conf.getDialer()
+		b.conn, b.connErr = dialer.Dial("tcp", b.addr)
+		if b.connErr != nil {
+			Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
+			b.conn = nil
+			b.opened.Store(false)
+			return
+		}
+		if conf.Net.TLS.Enable {
+			b.conn = tls.Client(b.conn, validServerNameTLS(b.addr, conf.Net.TLS.Config))
+		}
+
+		b.conn = newBufConn(b.conn)
+		b.conf = conf
+
+		// Create or reuse the global metrics shared between brokers
+		b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", b.metricRegistry)
+		b.requestRate = metrics.GetOrRegisterMeter("request-rate", b.metricRegistry)
+		b.fetchRate = metrics.GetOrRegisterMeter("consumer-fetch-rate", b.metricRegistry)
+		b.requestSize = getOrRegisterHistogram("request-size", b.metricRegistry)
+		b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", b.metricRegistry)
+		b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", b.metricRegistry)
+		b.responseRate = metrics.GetOrRegisterMeter("response-rate", b.metricRegistry)
+		b.responseSize = getOrRegisterHistogram("response-size", b.metricRegistry)
+		b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", b.metricRegistry)
+		b.protocolRequestsRate = map[int16]metrics.Meter{}
+		// Do not gather metrics for seeded broker (only used during bootstrap) because they share
+		// the same id (-1) and are already exposed through the global metrics above
+		if b.id >= 0 && !metrics.UseNilMetrics {
+			b.registerMetrics()
+		}
+
+		// Send an ApiVersionsRequest to identify the client (KIP-511).
+		// Store the response in the brokerAPIVersions map.
+		// It will be used to determine the supported API versions for each request.
+		// This should happen before SASL authentication: https://kafka.apache.org/26/protocol.html#api_versions
+		if conf.ApiVersionsRequest {
+			apiVersionsResponse, err := b.sendAndReceiveApiVersions(3)
+			if err != nil {
+				Logger.Printf("Error while sending ApiVersionsRequest V3 to broker %s: %s\n", b.addr, err)
+				// send a lower version request in case remote cluster is <= 2.4.0.0
+				maxVersion := int16(0)
+				if apiVersionsResponse != nil {
+					for _, k := range apiVersionsResponse.ApiKeys {
+						if k.ApiKey == apiKeyApiVersions {
+							maxVersion = k.MaxVersion
+							break
+						}
+					}
+				}
+				apiVersionsResponse, err = b.sendAndReceiveApiVersions(maxVersion)
+				if err != nil {
+					Logger.Printf("Error while sending ApiVersionsRequest V%d to broker %s: %s\n", maxVersion, b.addr, err)
+				}
+			}
+			if apiVersionsResponse != nil {
+				b.brokerAPIVersions = make(apiVersionMap, len(apiVersionsResponse.ApiKeys))
+				for _, key := range apiVersionsResponse.ApiKeys {
+					b.brokerAPIVersions[key.ApiKey] = &apiVersionRange{
+						minVersion: key.MinVersion,
+						maxVersion: key.MaxVersion,
+					}
+				}
+			}
+		}
+
+		if conf.Net.SASL.Mechanism == SASLTypeOAuth && conf.Net.SASL.Version == SASLHandshakeV0 {
+			conf.Net.SASL.Version = SASLHandshakeV1
+		}
+
+		useSaslV0 := conf.Net.SASL.Version == SASLHandshakeV0
+		if conf.Net.SASL.Enable && useSaslV0 {
+			b.connErr = b.authenticateViaSASLv0()
+
+			if b.connErr != nil {
+				err = b.conn.Close()
+				if err == nil {
+					DebugLogger.Printf("Closed connection to broker %s due to SASL v0 auth error: %s\n", b.addr, b.connErr)
+				} else {
+					Logger.Printf("Error while closing connection to broker %s (due to SASL v0 auth error: %s): %s\n", b.addr, b.connErr, err)
+				}
+				b.conn = nil
+				b.opened.Store(false)
+				return
+			}
+		}
+
+		b.done = make(chan bool)
+		b.responses = make(chan *responsePromise, b.conf.Net.MaxOpenRequests-1)
+
+		go withRecover(b.responseReceiver)
+		if conf.Net.SASL.Enable && !useSaslV0 {
+			b.connErr = b.authenticateViaSASLv1()
+			if b.connErr != nil {
+				close(b.responses)
+				<-b.done
+				err = b.conn.Close()
+				if err == nil {
+					DebugLogger.Printf("Closed connection to broker %s due to SASL v1 auth error: %s\n", b.addr, b.connErr)
+				} else {
+					Logger.Printf("Error while closing connection to broker %s (due to SASL v1 auth error: %s): %s\n", b.addr, b.connErr, err)
+				}
+				b.conn = nil
+				b.opened.Store(false)
+				return
+			}
+		}
+		if b.id >= 0 {
+			DebugLogger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
+		} else {
+			DebugLogger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
+		}
+	})
+
+	return nil
+}
+
+func (b *Broker) ResponseSize() int {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	return len(b.responses)
+}
+
+// Connected returns true if the broker is connected and false otherwise. If the broker is not
+// connected but it had tried to connect, the error from that connection attempt is also returned.
+func (b *Broker) Connected() (bool, error) {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	return b.conn != nil, b.connErr
+}
+
+// TLSConnectionState returns the client's TLS connection state. The second return value is false if this is not a tls connection or the connection has not yet been established.
+func (b *Broker) TLSConnectionState() (state tls.ConnectionState, ok bool) {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	if b.conn == nil {
+		return state, false
+	}
+	conn := b.conn
+	if bconn, ok := b.conn.(*bufConn); ok {
+		conn = bconn.Conn
+	}
+	if tc, ok := conn.(*tls.Conn); ok {
+		return tc.ConnectionState(), true
+	}
+	return state, false
+}
+
+// Close closes the broker resources
+func (b *Broker) Close() error {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	if b.conn == nil {
+		return ErrNotConnected
+	}
+
+	close(b.responses)
+	<-b.done
+
+	err := b.conn.Close()
+
+	b.conn = nil
+	b.connErr = nil
+	b.done = nil
+	b.responses = nil
+
+	b.metricRegistry.UnregisterAll()
+
+	if err == nil {
+		DebugLogger.Printf("Closed connection to broker %s\n", b.addr)
+	} else {
+		Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
+	}
+	b.opened.Store(false)
+
+	return err
+}
+
+// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
+func (b *Broker) ID() int32 {
+	return b.id
+}
+
+// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
+func (b *Broker) Addr() string {
+	return b.addr
+}
+
+// Rack returns the broker's rack as retrieved from Kafka's metadata or the
+// empty string if it is not known.  The returned value corresponds to the
+// broker's broker.rack configuration setting.  Requires protocol version to be
+// at least v0.10.0.0.
+func (b *Broker) Rack() string {
+	if b.rack == nil {
+		return ""
+	}
+	return *b.rack
+}
+
+// GetMetadata send a metadata request and returns a metadata response or error
+func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
+	response := new(MetadataResponse)
+	response.Version = request.Version // Required to ensure use of the correct response header version
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error
+func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
+	response := new(ConsumerMetadataResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// FindCoordinator sends a find coordinate request and returns a response or error
+func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) {
+	response := new(FindCoordinatorResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// GetAvailableOffsets return an offset response or error
+func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
+	response := new(OffsetResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// ProduceCallback function is called once the produce response has been parsed
+// or could not be read.
+type ProduceCallback func(*ProduceResponse, error)
+
+// AsyncProduce sends a produce request and eventually call the provided callback
+// with a produce response or an error.
+//
+// Waiting for the response is generally not blocking on the contrary to using Produce.
+// If the maximum number of in flight request configured is reached then
+// the request will be blocked till a previous response is received.
+//
+// When configured with RequiredAcks == NoResponse, the callback will not be invoked.
+// If an error is returned because the request could not be sent then the callback
+// will not be invoked either.
+//
+// Make sure not to Close the broker in the callback as it will lead to a deadlock.
+func (b *Broker) AsyncProduce(request *ProduceRequest, cb ProduceCallback) error {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	needAcks := request.RequiredAcks != NoResponse
+	// Use a nil promise when no acks is required
+	var promise *responsePromise
+
+	if needAcks {
+		metricRegistry := b.metricRegistry
+
+		// Create ProduceResponse early to provide the header version
+		res := new(ProduceResponse)
+		promise = &responsePromise{
+			response: res,
+			// Packets will be converted to a ProduceResponse in the responseReceiver goroutine
+			handler: func(packets []byte, err error) {
+				if err != nil {
+					// Failed request
+					cb(nil, err)
+					return
+				}
+
+				if err := versionedDecode(packets, res, request.version(), metricRegistry); err != nil {
+					// Malformed response
+					cb(nil, err)
+					return
+				}
+
+				// Well-formed response
+				b.handleThrottledResponse(res)
+				cb(res, nil)
+			},
+		}
+	}
+
+	return b.sendWithPromise(request, promise)
+}
+
+// Produce returns a produce response or error
+func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
+	var (
+		response *ProduceResponse
+		err      error
+	)
+
+	if request.RequiredAcks == NoResponse {
+		err = b.sendAndReceive(request, nil)
+	} else {
+		response = new(ProduceResponse)
+		err = b.sendAndReceive(request, response)
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// Fetch returns a FetchResponse or error
+func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
+	defer func() {
+		if b.fetchRate != nil {
+			b.fetchRate.Mark(1)
+		}
+		if b.brokerFetchRate != nil {
+			b.brokerFetchRate.Mark(1)
+		}
+	}()
+
+	response := new(FetchResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// CommitOffset return an Offset commit response or error
+func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
+	response := new(OffsetCommitResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// FetchOffset returns an offset fetch response or error
+func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
+	response := new(OffsetFetchResponse)
+	response.Version = request.Version // needed to handle the two header versions
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// JoinGroup returns a join group response or error
+func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
+	response := new(JoinGroupResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// SyncGroup returns a sync group response or error
+func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
+	response := new(SyncGroupResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// LeaveGroup return a leave group response or error
+func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
+	response := new(LeaveGroupResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// Heartbeat returns a heartbeat response or error
+func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
+	response := new(HeartbeatResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// ListGroups return a list group response or error
+func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
+	response := new(ListGroupsResponse)
+	response.Version = request.Version // Required to ensure use of the correct response header version
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DescribeGroups return describe group response or error
+func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
+	response := new(DescribeGroupsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// ApiVersions return api version response or error
+func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
+	response := new(ApiVersionsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// CreateTopics send a create topic request and returns create topic response
+func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) {
+	response := new(CreateTopicsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DeleteTopics sends a delete topic request and returns delete topic response
+func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) {
+	response := new(DeleteTopicsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// CreatePartitions sends a create partition request and returns create
+// partitions response or error
+func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) {
+	response := new(CreatePartitionsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// AlterPartitionReassignments sends a alter partition reassignments request and
+// returns alter partition reassignments response
+func (b *Broker) AlterPartitionReassignments(request *AlterPartitionReassignmentsRequest) (*AlterPartitionReassignmentsResponse, error) {
+	response := new(AlterPartitionReassignmentsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// ListPartitionReassignments sends a list partition reassignments request and
+// returns list partition reassignments response
+func (b *Broker) ListPartitionReassignments(request *ListPartitionReassignmentsRequest) (*ListPartitionReassignmentsResponse, error) {
+	response := new(ListPartitionReassignmentsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// ElectLeaders sends aa elect leaders request and returns list partitions elect result
+func (b *Broker) ElectLeaders(request *ElectLeadersRequest) (*ElectLeadersResponse, error) {
+	response := new(ElectLeadersResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DeleteRecords send a request to delete records and return delete record
+// response or error
+func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) {
+	response := new(DeleteRecordsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DescribeAcls sends a describe acl request and returns a response or error
+func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) {
+	response := new(DescribeAclsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// CreateAcls sends a create acl request and returns a response or error
+func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) {
+	response := new(CreateAclsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	errs := make([]error, 0)
+	for _, res := range response.AclCreationResponses {
+		if !errors.Is(res.Err, ErrNoError) {
+			errs = append(errs, res.Err)
+		}
+	}
+
+	if len(errs) > 0 {
+		return response, Wrap(ErrCreateACLs, errs...)
+	}
+
+	return response, nil
+}
+
+// DeleteAcls sends a delete acl request and returns a response or error
+func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) {
+	response := new(DeleteAclsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// InitProducerID sends an init producer request and returns a response or error
+func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) {
+	response := new(InitProducerIDResponse)
+	response.Version = request.version()
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// AddPartitionsToTxn send a request to add partition to txn and returns
+// a response or error
+func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) {
+	response := new(AddPartitionsToTxnResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// AddOffsetsToTxn sends a request to add offsets to txn and returns a response
+// or error
+func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) {
+	response := new(AddOffsetsToTxnResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// EndTxn sends a request to end txn and returns a response or error
+func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) {
+	response := new(EndTxnResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// TxnOffsetCommit sends a request to commit transaction offsets and returns
+// a response or error
+func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) {
+	response := new(TxnOffsetCommitResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DescribeConfigs sends a request to describe config and returns a response or
+// error
+func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) {
+	response := new(DescribeConfigsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// AlterConfigs sends a request to alter config and return a response or error
+func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) {
+	response := new(AlterConfigsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// IncrementalAlterConfigs sends a request to incremental alter config and return a response or error
+func (b *Broker) IncrementalAlterConfigs(request *IncrementalAlterConfigsRequest) (*IncrementalAlterConfigsResponse, error) {
+	response := new(IncrementalAlterConfigsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DeleteGroups sends a request to delete groups and returns a response or error
+func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) {
+	response := new(DeleteGroupsResponse)
+
+	if err := b.sendAndReceive(request, response); err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DeleteOffsets sends a request to delete group offsets and returns a response or error
+func (b *Broker) DeleteOffsets(request *DeleteOffsetsRequest) (*DeleteOffsetsResponse, error) {
+	response := new(DeleteOffsetsResponse)
+
+	if err := b.sendAndReceive(request, response); err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DescribeLogDirs sends a request to get the broker's log dir paths and sizes
+func (b *Broker) DescribeLogDirs(request *DescribeLogDirsRequest) (*DescribeLogDirsResponse, error) {
+	response := new(DescribeLogDirsResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// DescribeUserScramCredentials sends a request to get SCRAM users
+func (b *Broker) DescribeUserScramCredentials(req *DescribeUserScramCredentialsRequest) (*DescribeUserScramCredentialsResponse, error) {
+	res := new(DescribeUserScramCredentialsResponse)
+
+	err := b.sendAndReceive(req, res)
+	if err != nil {
+		return nil, err
+	}
+
+	return res, err
+}
+
+func (b *Broker) AlterUserScramCredentials(req *AlterUserScramCredentialsRequest) (*AlterUserScramCredentialsResponse, error) {
+	res := new(AlterUserScramCredentialsResponse)
+
+	err := b.sendAndReceive(req, res)
+	if err != nil {
+		return nil, err
+	}
+
+	return res, nil
+}
+
+// DescribeClientQuotas sends a request to get the broker's quotas
+func (b *Broker) DescribeClientQuotas(request *DescribeClientQuotasRequest) (*DescribeClientQuotasResponse, error) {
+	response := new(DescribeClientQuotasResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// AlterClientQuotas sends a request to alter the broker's quotas
+func (b *Broker) AlterClientQuotas(request *AlterClientQuotasRequest) (*AlterClientQuotasResponse, error) {
+	response := new(AlterClientQuotasResponse)
+
+	err := b.sendAndReceive(request, response)
+	if err != nil {
+		return nil, err
+	}
+
+	return response, nil
+}
+
+// readFull ensures the conn ReadDeadline has been setup before making a
+// call to io.ReadFull
+func (b *Broker) readFull(buf []byte) (n int, err error) {
+	if err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)); err != nil {
+		return 0, err
+	}
+
+	return io.ReadFull(b.conn, buf)
+}
+
+// write ensures the conn Deadline has been setup before making a
+// call to conn.Write
+func (b *Broker) write(buf []byte) (n int, err error) {
+	now := time.Now()
+	if err := b.conn.SetWriteDeadline(now.Add(b.conf.Net.WriteTimeout)); err != nil {
+		return 0, err
+	}
+	// TLS connections require both read and write deadlines to be set
+	// to avoid handshake indefinite blocking
+	// see https://github.com/golang/go/blob/go1.23.0/src/crypto/tls/conn.go#L1192-L1195
+	if b.conf.Net.TLS.Enable {
+		if err := b.conn.SetReadDeadline(now.Add(b.conf.Net.ReadTimeout)); err != nil {
+			return 0, err
+		}
+	}
+
+	return b.conn.Write(buf)
+}
+
+// b.lock must be held by caller
+//
+// a non-nil res results in a response promise being created
+func (b *Broker) send(req, res protocolBody) (*responsePromise, error) {
+	var promise *responsePromise
+	if res != nil {
+		// Packets or error will be sent to the following channels
+		// once the response is received
+		promise = makeResponsePromise(res)
+	}
+
+	if err := b.sendWithPromise(req, promise); err != nil {
+		return nil, err
+	}
+
+	return promise, nil
+}
+
+func makeResponsePromise(res protocolBody) *responsePromise {
+	promise := &responsePromise{
+		response: res,
+		packets:  make(chan []byte),
+		errors:   make(chan error),
+	}
+	return promise
+}
+
+// b.lock must be held by caller
+func (b *Broker) sendWithPromise(rb protocolBody, promise *responsePromise) error {
+	if b.conn == nil {
+		if b.connErr != nil {
+			return b.connErr
+		}
+		return ErrNotConnected
+	}
+
+	if b.clientSessionReauthenticationTimeMs > 0 && currentUnixMilli() > b.clientSessionReauthenticationTimeMs {
+		err := b.authenticateViaSASLv1()
+		if err != nil {
+			return err
+		}
+	}
+
+	return b.sendInternal(rb, promise)
+}
+
+// b.lock must be held by caller
+func (b *Broker) sendInternal(rb protocolBody, promise *responsePromise) error {
+	// try restricting API version to ranges advertised by the broker
+	if err := restrictApiVersion(rb, b.brokerAPIVersions); err != nil {
+		return err
+	}
+
+	// response versions must always match their corresponding request's
+	if promise != nil && promise.response != nil {
+		promise.response.setVersion(rb.version())
+	}
+
+	if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
+		return ErrUnsupportedVersion
+	}
+
+	req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+	buf, err := encode(req, b.metricRegistry)
+	if err != nil {
+		return err
+	}
+
+	// check and wait if throttled
+	b.waitIfThrottled()
+
+	requestTime := time.Now()
+	// Will be decremented in responseReceiver (except error or request with NoResponse)
+	b.addRequestInFlightMetrics(1)
+	bytes, err := b.write(buf)
+	b.updateOutgoingCommunicationMetrics(bytes)
+	b.updateProtocolMetrics(rb)
+	if err != nil {
+		b.addRequestInFlightMetrics(-1)
+		return err
+	}
+	b.correlationID++
+
+	if promise == nil {
+		// Record request latency without the response
+		b.updateRequestLatencyAndInFlightMetrics(time.Since(requestTime))
+		return nil
+	}
+
+	promise.requestTime = requestTime
+	promise.correlationID = req.correlationID
+	b.responses <- promise
+
+	return nil
+}
+
+func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error {
+	b.lock.Lock()
+	defer b.lock.Unlock()
+
+	promise, err := b.send(req, res)
+	if err != nil {
+		return err
+	}
+
+	if promise == nil {
+		return nil
+	}
+
+	err = handleResponsePromise(req, res, promise, b.metricRegistry)
+	if err != nil {
+		return err
+	}
+	if res != nil {
+		b.handleThrottledResponse(res)
+	}
+	return nil
+}
+
+func handleResponsePromise(req protocolBody, res protocolBody, promise *responsePromise, metricRegistry metrics.Registry) error {
+	select {
+	case buf := <-promise.packets:
+		return versionedDecode(buf, res, req.version(), metricRegistry)
+	case err := <-promise.errors:
+		return err
+	}
+}
+
+func (b *Broker) decode(pd packetDecoder, version int16) (err error) {
+	b.id, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	host, err := pd.getString()
+	if err != nil {
+		return err
+	}
+
+	port, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	if version >= 1 {
+		b.rack, err = pd.getNullableString()
+		if err != nil {
+			return err
+		}
+	}
+
+	b.addr = net.JoinHostPort(host, fmt.Sprint(port))
+	if _, _, err := net.SplitHostPort(b.addr); err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (b *Broker) encode(pe packetEncoder, version int16) (err error) {
+	host, portstr, err := net.SplitHostPort(b.addr)
+	if err != nil {
+		return err
+	}
+
+	port, err := strconv.ParseInt(portstr, 10, 32)
+	if err != nil {
+		return err
+	}
+
+	pe.putInt32(b.id)
+
+	err = pe.putString(host)
+	if err != nil {
+		return err
+	}
+
+	pe.putInt32(int32(port))
+
+	if version >= 1 {
+		err = pe.putNullableString(b.rack)
+		if err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (b *Broker) responseReceiver() {
+	var dead error
+
+	for promise := range b.responses {
+		if dead != nil {
+			// This was previously incremented in send() and
+			// we are not calling updateIncomingCommunicationMetrics()
+			b.addRequestInFlightMetrics(-1)
+			promise.handle(nil, dead)
+			continue
+		}
+
+		headerLength := getHeaderLength(promise.response.headerVersion())
+		header := make([]byte, headerLength)
+
+		bytesReadHeader, err := b.readFull(header)
+		requestLatency := time.Since(promise.requestTime)
+		if err != nil {
+			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+			dead = err
+			promise.handle(nil, err)
+			continue
+		}
+
+		decodedHeader := responseHeader{}
+		err = versionedDecode(header, &decodedHeader, promise.response.headerVersion(), b.metricRegistry)
+		if err != nil {
+			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+			dead = err
+			promise.handle(nil, err)
+			continue
+		}
+		if decodedHeader.correlationID != promise.correlationID {
+			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+			// TODO if decoded ID < cur ID, discard until we catch up
+			// TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
+			dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", promise.correlationID, decodedHeader.correlationID)}
+			promise.handle(nil, dead)
+			continue
+		}
+
+		buf := make([]byte, decodedHeader.length-int32(headerLength)+4)
+		bytesReadBody, err := b.readFull(buf)
+		b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
+		if err != nil {
+			dead = err
+			promise.handle(nil, err)
+			continue
+		}
+
+		promise.handle(buf, nil)
+	}
+	close(b.done)
+}
+
+func getHeaderLength(headerVersion int16) int8 {
+	if headerVersion < 1 {
+		return 8
+	} else {
+		// header contains additional tagged field length (0), we don't support actual tags yet.
+		return 9
+	}
+}
+
+func (b *Broker) sendAndReceiveApiVersions(v int16) (*ApiVersionsResponse, error) {
+	rb := &ApiVersionsRequest{
+		Version:               v,
+		ClientSoftwareName:    defaultClientSoftwareName,
+		ClientSoftwareVersion: version(),
+	}
+
+	req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+	buf, err := encode(req, b.metricRegistry)
+	if err != nil {
+		return nil, err
+	}
+
+	requestTime := time.Now()
+	// Will be decremented in updateIncomingCommunicationMetrics (except error)
+	b.addRequestInFlightMetrics(1)
+	bytes, err := b.write(buf)
+	b.updateOutgoingCommunicationMetrics(bytes)
+	if err != nil {
+		b.addRequestInFlightMetrics(-1)
+		Logger.Printf("Failed to send ApiVersionsRequest V%d to %s: %s\n", v, b.addr, err)
+		return nil, err
+	}
+	b.correlationID++
+
+	// Kafka protocol response structure:
+	// - Message length (4 bytes): Total length of the response excluding this field
+	// - ResponseHeader v0 (4 bytes): Contains correlation ID for request-response matching
+	header := make([]byte, 8)
+	_, err = b.readFull(header)
+	if err != nil {
+		b.addRequestInFlightMetrics(-1)
+		Logger.Printf("Failed to read ApiVersionsResponse V%d header from %s: %s\n", v, b.addr, err)
+		return nil, err
+	}
+
+	length := binary.BigEndian.Uint32(header[:4])
+	// we're not using the correlation ID here, but it is part of the response header
+	// correlationID := binary.BigEndian.Uint32(header[4:])
+
+	payload := make([]byte, length-4)
+	n, err := b.readFull(payload)
+	if err != nil {
+		b.addRequestInFlightMetrics(-1)
+		Logger.Printf("Failed to read ApiVersionsResponse V%d payload from %s: %s\n", v, b.addr, err)
+		return nil, err
+	}
+
+	b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
+	res := &ApiVersionsResponse{Version: rb.version()}
+	err = versionedDecode(payload, res, rb.version(), b.metricRegistry)
+	if err != nil {
+		Logger.Printf("Failed to parse ApiVersionsResponse V%d from %s: %s\n", v, b.addr, err)
+		return nil, err
+	}
+
+	kerr := KError(res.ErrorCode)
+	if kerr != ErrNoError {
+		return res, fmt.Errorf("Error in ApiVersionsResponse V%d from %s: %w", res.Version, b.addr, kerr)
+	}
+
+	DebugLogger.Printf("Completed ApiVersionsRequest V%d to %s. Broker supports %d APIs\n", v, b.addr, len(res.ApiKeys))
+	return res, nil
+}
+
+func (b *Broker) authenticateViaSASLv0() error {
+	switch b.conf.Net.SASL.Mechanism {
+	case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
+		return b.sendAndReceiveSASLSCRAMv0()
+	case SASLTypeGSSAPI:
+		return b.sendAndReceiveKerberos()
+	default:
+		return b.sendAndReceiveSASLPlainAuthV0()
+	}
+}
+
+func (b *Broker) authenticateViaSASLv1() error {
+	metricRegistry := b.metricRegistry
+	if b.conf.Net.SASL.Handshake {
+		handshakeRequest := &SaslHandshakeRequest{Mechanism: string(b.conf.Net.SASL.Mechanism), Version: b.conf.Net.SASL.Version}
+		handshakeResponse := new(SaslHandshakeResponse)
+		prom := makeResponsePromise(handshakeResponse)
+
+		handshakeErr := b.sendInternal(handshakeRequest, prom)
+		if handshakeErr != nil {
+			Logger.Printf("Error while performing SASL handshake %s: %s\n", b.addr, handshakeErr)
+			return handshakeErr
+		}
+		handshakeErr = handleResponsePromise(handshakeRequest, handshakeResponse, prom, metricRegistry)
+		if handshakeErr != nil {
+			Logger.Printf("Error while handling SASL handshake response %s: %s\n", b.addr, handshakeErr)
+			return handshakeErr
+		}
+
+		if !errors.Is(handshakeResponse.Err, ErrNoError) {
+			return handshakeResponse.Err
+		}
+	}
+
+	authSendReceiver := func(authBytes []byte) (*SaslAuthenticateResponse, error) {
+		authenticateRequest := b.createSaslAuthenticateRequest(authBytes)
+		authenticateResponse := new(SaslAuthenticateResponse)
+		prom := makeResponsePromise(authenticateResponse)
+		authErr := b.sendInternal(authenticateRequest, prom)
+		if authErr != nil {
+			Logger.Printf("Error while performing SASL Auth %s\n", b.addr)
+			return nil, authErr
+		}
+		authErr = handleResponsePromise(authenticateRequest, authenticateResponse, prom, metricRegistry)
+		if authErr != nil {
+			Logger.Printf("Error while performing SASL Auth %s: %s\n", b.addr, authErr)
+			return nil, authErr
+		}
+
+		if !errors.Is(authenticateResponse.Err, ErrNoError) {
+			var err error = authenticateResponse.Err
+			if authenticateResponse.ErrorMessage != nil {
+				err = Wrap(authenticateResponse.Err, errors.New(*authenticateResponse.ErrorMessage))
+			}
+			return nil, err
+		}
+
+		b.computeSaslSessionLifetime(authenticateResponse)
+		return authenticateResponse, nil
+	}
+
+	switch b.conf.Net.SASL.Mechanism {
+	case SASLTypeGSSAPI:
+		b.kerberosAuthenticator.Config = &b.conf.Net.SASL.GSSAPI
+		if b.kerberosAuthenticator.NewKerberosClientFunc == nil {
+			b.kerberosAuthenticator.NewKerberosClientFunc = NewKerberosClient
+		}
+		return b.kerberosAuthenticator.AuthorizeV2(b, authSendReceiver)
+	case SASLTypeOAuth:
+		provider := b.conf.Net.SASL.TokenProvider
+		return b.sendAndReceiveSASLOAuth(authSendReceiver, provider)
+	case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
+		return b.sendAndReceiveSASLSCRAMv1(authSendReceiver, b.conf.Net.SASL.SCRAMClientGeneratorFunc())
+	default:
+		return b.sendAndReceiveSASLPlainAuthV1(authSendReceiver)
+	}
+}
+
+func (b *Broker) sendAndReceiveKerberos() error {
+	b.kerberosAuthenticator.Config = &b.conf.Net.SASL.GSSAPI
+	if b.kerberosAuthenticator.NewKerberosClientFunc == nil {
+		b.kerberosAuthenticator.NewKerberosClientFunc = NewKerberosClient
+	}
+	return b.kerberosAuthenticator.Authorize(b)
+}
+
+func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int16) error {
+	rb := &SaslHandshakeRequest{Mechanism: string(saslType), Version: version}
+
+	req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+	buf, err := encode(req, b.metricRegistry)
+	if err != nil {
+		return err
+	}
+
+	requestTime := time.Now()
+	// Will be decremented in updateIncomingCommunicationMetrics (except error)
+	b.addRequestInFlightMetrics(1)
+	bytes, err := b.write(buf)
+	b.updateOutgoingCommunicationMetrics(bytes)
+	if err != nil {
+		b.addRequestInFlightMetrics(-1)
+		Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
+		return err
+	}
+	b.correlationID++
+
+	header := make([]byte, 8) // response header
+	_, err = b.readFull(header)
+	if err != nil {
+		b.addRequestInFlightMetrics(-1)
+		Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
+		return err
+	}
+
+	length := binary.BigEndian.Uint32(header[:4])
+	payload := make([]byte, length-4)
+	n, err := b.readFull(payload)
+	if err != nil {
+		b.addRequestInFlightMetrics(-1)
+		Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
+		return err
+	}
+
+	b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
+	res := &SaslHandshakeResponse{}
+
+	err = versionedDecode(payload, res, 0, b.metricRegistry)
+	if err != nil {
+		Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
+		return err
+	}
+
+	if !errors.Is(res.Err, ErrNoError) {
+		Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
+		return res.Err
+	}
+
+	DebugLogger.Print("Completed pre-auth SASL handshake. Available mechanisms: ", res.EnabledMechanisms)
+	return nil
+}
+
+//
+// In SASL Plain, Kafka expects the auth header to be in the following format
+// Message format (from https://tools.ietf.org/html/rfc4616):
+//
+//   message   = [authzid] UTF8NUL authcid UTF8NUL passwd
+//   authcid   = 1*SAFE ; MUST accept up to 255 octets
+//   authzid   = 1*SAFE ; MUST accept up to 255 octets
+//   passwd    = 1*SAFE ; MUST accept up to 255 octets
+//   UTF8NUL   = %x00 ; UTF-8 encoded NUL character
+//
+//   SAFE      = UTF1 / UTF2 / UTF3 / UTF4
+//                  ;; any UTF-8 encoded Unicode character except NUL
+//
+//
+
+// Kafka 0.10.x supported SASL PLAIN/Kerberos via KAFKA-3149 (KIP-43).
+// sendAndReceiveSASLPlainAuthV0 flows the v0 sasl auth NOT wrapped in the kafka protocol
+//
+// With SASL v0 handshake and auth then:
+// When credentials are valid, Kafka returns a 4 byte array of null characters.
+// When credentials are invalid, Kafka closes the connection.
+func (b *Broker) sendAndReceiveSASLPlainAuthV0() error {
+	// default to V0 to allow for backward compatibility when SASL is enabled
+	// but not the handshake
+	if b.conf.Net.SASL.Handshake {
+		handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version)
+		if handshakeErr != nil {
+			Logger.Printf("Error while performing SASL handshake %s: %s\n", b.addr, handshakeErr)
+			return handshakeErr
+		}
+	}
+
+	length := len(b.conf.Net.SASL.AuthIdentity) + 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
+	authBytes := make([]byte, length+4) // 4 byte length header + auth data
+	binary.BigEndian.PutUint32(authBytes, uint32(length))
+	copy(authBytes[4:], b.conf.Net.SASL.AuthIdentity+"\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password)
+
+	requestTime := time.Now()
+	// Will be decremented in updateIncomingCommunicationMetrics (except error)
+	b.addRequestInFlightMetrics(1)
+	bytesWritten, err := b.write(authBytes)
+	b.updateOutgoingCommunicationMetrics(bytesWritten)
+	if err != nil {
+		b.addRequestInFlightMetrics(-1)
+		Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
+		return err
+	}
+
+	header := make([]byte, 4)
+	n, err := b.readFull(header)
+	b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
+	// If the credentials are valid, we would get a 4 byte response filled with null characters.
+	// Otherwise, the broker closes the connection and we get an EOF
+	if err != nil {
+		Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
+		return err
+	}
+
+	DebugLogger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
+	return nil
+}
+
+// Kafka 1.x.x onward added a SaslAuthenticate request/response message which
+// wraps the SASL flow in the Kafka protocol, which allows for returning
+// meaningful errors on authentication failure.
+func (b *Broker) sendAndReceiveSASLPlainAuthV1(authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error)) error {
+	authBytes := []byte(b.conf.Net.SASL.AuthIdentity + "\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password)
+	_, err := authSendReceiver(authBytes)
+	return err
+}
+
+func currentUnixMilli() int64 {
+	return time.Now().UnixNano() / int64(time.Millisecond)
+}
+
+// sendAndReceiveSASLOAuth performs the authentication flow as described by KIP-255
+// https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=75968876
+func (b *Broker) sendAndReceiveSASLOAuth(authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error), provider AccessTokenProvider) error {
+	token, err := provider.Token()
+	if err != nil {
+		return err
+	}
+
+	message, err := buildClientFirstMessage(token)
+	if err != nil {
+		return err
+	}
+
+	res, err := authSendReceiver(message)
+	if err != nil {
+		return err
+	}
+	isChallenge := len(res.SaslAuthBytes) > 0
+
+	if isChallenge {
+		// Abort the token exchange. The broker returns the failure code.
+		_, err = authSendReceiver([]byte(`\x01`))
+	}
+	return err
+}
+
+func (b *Broker) sendAndReceiveSASLSCRAMv0() error {
+	if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV0); err != nil {
+		return err
+	}
+
+	scramClient := b.conf.Net.SASL.SCRAMClientGeneratorFunc()
+	if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil {
+		return fmt.Errorf("failed to start SCRAM exchange with the server: %w", err)
+	}
+
+	msg, err := scramClient.Step("")
+	if err != nil {
+		return fmt.Errorf("failed to advance the SCRAM exchange: %w", err)
+	}
+
+	for !scramClient.Done() {
+		requestTime := time.Now()
+		// Will be decremented in updateIncomingCommunicationMetrics (except error)
+		b.addRequestInFlightMetrics(1)
+		length := len(msg)
+		authBytes := make([]byte, length+4) // 4 byte length header + auth data
+		binary.BigEndian.PutUint32(authBytes, uint32(length))
+		copy(authBytes[4:], msg)
+		_, err := b.write(authBytes)
+		b.updateOutgoingCommunicationMetrics(length + 4)
+		if err != nil {
+			b.addRequestInFlightMetrics(-1)
+			Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
+			return err
+		}
+		b.correlationID++
+		header := make([]byte, 4)
+		_, err = b.readFull(header)
+		if err != nil {
+			b.addRequestInFlightMetrics(-1)
+			Logger.Printf("Failed to read response header while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
+			return err
+		}
+		payload := make([]byte, int32(binary.BigEndian.Uint32(header)))
+		n, err := b.readFull(payload)
+		if err != nil {
+			b.addRequestInFlightMetrics(-1)
+			Logger.Printf("Failed to read response payload while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
+			return err
+		}
+		b.updateIncomingCommunicationMetrics(n+4, time.Since(requestTime))
+		msg, err = scramClient.Step(string(payload))
+		if err != nil {
+			Logger.Println("SASL authentication failed", err)
+			return err
+		}
+	}
+
+	DebugLogger.Println("SASL authentication succeeded")
+	return nil
+}
+
+func (b *Broker) sendAndReceiveSASLSCRAMv1(authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error), scramClient SCRAMClient) error {
+	if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil {
+		return fmt.Errorf("failed to start SCRAM exchange with the server: %w", err)
+	}
+
+	msg, err := scramClient.Step("")
+	if err != nil {
+		return fmt.Errorf("failed to advance the SCRAM exchange: %w", err)
+	}
+
+	for !scramClient.Done() {
+		res, err := authSendReceiver([]byte(msg))
+		if err != nil {
+			return err
+		}
+
+		msg, err = scramClient.Step(string(res.SaslAuthBytes))
+		if err != nil {
+			Logger.Println("SASL authentication failed", err)
+			return err
+		}
+	}
+
+	DebugLogger.Println("SASL authentication succeeded")
+
+	return nil
+}
+
+func (b *Broker) createSaslAuthenticateRequest(msg []byte) *SaslAuthenticateRequest {
+	authenticateRequest := SaslAuthenticateRequest{SaslAuthBytes: msg}
+	if b.conf.Version.IsAtLeast(V2_2_0_0) {
+		authenticateRequest.Version = 1
+	}
+
+	return &authenticateRequest
+}
+
+// Build SASL/OAUTHBEARER initial client response as described by RFC-7628
+// https://tools.ietf.org/html/rfc7628
+func buildClientFirstMessage(token *AccessToken) ([]byte, error) {
+	var ext string
+
+	if token == nil {
+		return []byte{}, fmt.Errorf("failed to build client first message: token is nil")
+	}
+
+	if len(token.Extensions) > 0 {
+		if _, ok := token.Extensions[SASLExtKeyAuth]; ok {
+			return []byte{}, fmt.Errorf("the extension `%s` is invalid", SASLExtKeyAuth)
+		}
+		ext = "\x01" + mapToString(token.Extensions, "=", "\x01")
+	}
+
+	resp := fmt.Appendf(nil, "n,,\x01auth=Bearer %s%s\x01\x01", token.Token, ext)
+
+	return resp, nil
+}
+
+// mapToString returns a list of key-value pairs ordered by key.
+// keyValSep separates the key from the value. elemSep separates each pair.
+func mapToString(extensions map[string]string, keyValSep string, elemSep string) string {
+	buf := make([]string, 0, len(extensions))
+
+	for k, v := range extensions {
+		buf = append(buf, k+keyValSep+v)
+	}
+
+	sort.Strings(buf)
+
+	return strings.Join(buf, elemSep)
+}
+
+func (b *Broker) computeSaslSessionLifetime(res *SaslAuthenticateResponse) {
+	if res.SessionLifetimeMs > 0 {
+		// Follows the Java Kafka implementation from SaslClientAuthenticator.ReauthInfo#setAuthenticationEndAndSessionReauthenticationTimes
+		// pick a random percentage between 85% and 95% for session re-authentication
+		positiveSessionLifetimeMs := res.SessionLifetimeMs
+		authenticationEndMs := currentUnixMilli()
+		pctWindowFactorToTakeNetworkLatencyAndClockDriftIntoAccount := 0.85
+		pctWindowJitterToAvoidReauthenticationStormAcrossManyChannelsSimultaneously := 0.10
+		pctToUse := pctWindowFactorToTakeNetworkLatencyAndClockDriftIntoAccount + rand.Float64()*pctWindowJitterToAvoidReauthenticationStormAcrossManyChannelsSimultaneously
+		sessionLifetimeMsToUse := int64(float64(positiveSessionLifetimeMs) * pctToUse)
+		DebugLogger.Printf("Session expiration in %d ms and session re-authentication on or after %d ms", positiveSessionLifetimeMs, sessionLifetimeMsToUse)
+		b.clientSessionReauthenticationTimeMs = authenticationEndMs + sessionLifetimeMsToUse
+	} else {
+		b.clientSessionReauthenticationTimeMs = 0
+	}
+}
+
+func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
+	b.updateRequestLatencyAndInFlightMetrics(requestLatency)
+	b.responseRate.Mark(1)
+
+	if b.brokerResponseRate != nil {
+		b.brokerResponseRate.Mark(1)
+	}
+
+	responseSize := int64(bytes)
+	b.incomingByteRate.Mark(responseSize)
+	if b.brokerIncomingByteRate != nil {
+		b.brokerIncomingByteRate.Mark(responseSize)
+	}
+
+	b.responseSize.Update(responseSize)
+	if b.brokerResponseSize != nil {
+		b.brokerResponseSize.Update(responseSize)
+	}
+}
+
+func (b *Broker) updateRequestLatencyAndInFlightMetrics(requestLatency time.Duration) {
+	requestLatencyInMs := int64(requestLatency / time.Millisecond)
+	b.requestLatency.Update(requestLatencyInMs)
+
+	if b.brokerRequestLatency != nil {
+		b.brokerRequestLatency.Update(requestLatencyInMs)
+	}
+
+	b.addRequestInFlightMetrics(-1)
+}
+
+func (b *Broker) addRequestInFlightMetrics(i int64) {
+	b.requestsInFlight.Inc(i)
+	if b.brokerRequestsInFlight != nil {
+		b.brokerRequestsInFlight.Inc(i)
+	}
+}
+
+func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
+	b.requestRate.Mark(1)
+	if b.brokerRequestRate != nil {
+		b.brokerRequestRate.Mark(1)
+	}
+
+	requestSize := int64(bytes)
+	b.outgoingByteRate.Mark(requestSize)
+	if b.brokerOutgoingByteRate != nil {
+		b.brokerOutgoingByteRate.Mark(requestSize)
+	}
+
+	b.requestSize.Update(requestSize)
+	if b.brokerRequestSize != nil {
+		b.brokerRequestSize.Update(requestSize)
+	}
+}
+
+func (b *Broker) updateProtocolMetrics(rb protocolBody) {
+	protocolRequestsRate := b.protocolRequestsRate[rb.key()]
+	if protocolRequestsRate == nil {
+		protocolRequestsRate = metrics.GetOrRegisterMeter(fmt.Sprintf("protocol-requests-rate-%d", rb.key()), b.metricRegistry)
+		b.protocolRequestsRate[rb.key()] = protocolRequestsRate
+	}
+	protocolRequestsRate.Mark(1)
+
+	if b.brokerProtocolRequestsRate != nil {
+		brokerProtocolRequestsRate := b.brokerProtocolRequestsRate[rb.key()]
+		if brokerProtocolRequestsRate == nil {
+			brokerProtocolRequestsRate = b.registerMeter(fmt.Sprintf("protocol-requests-rate-%d", rb.key()))
+			b.brokerProtocolRequestsRate[rb.key()] = brokerProtocolRequestsRate
+		}
+		brokerProtocolRequestsRate.Mark(1)
+	}
+}
+
+type throttleSupport interface {
+	throttleTime() time.Duration
+}
+
+func (b *Broker) handleThrottledResponse(resp protocolBody) {
+	throttledResponse, ok := resp.(throttleSupport)
+	if !ok {
+		return
+	}
+	throttleTime := throttledResponse.throttleTime()
+	if throttleTime == time.Duration(0) {
+		return
+	}
+	DebugLogger.Printf(
+		"broker/%d %T throttled %v\n", b.ID(), resp, throttleTime)
+	b.setThrottle(throttleTime)
+	b.updateThrottleMetric(throttleTime)
+}
+
+func (b *Broker) setThrottle(throttleTime time.Duration) {
+	b.throttleTimerLock.Lock()
+	defer b.throttleTimerLock.Unlock()
+	if b.throttleTimer != nil {
+		// if there is an existing timer stop/clear it
+		if !b.throttleTimer.Stop() {
+			<-b.throttleTimer.C
+		}
+	}
+	b.throttleTimer = time.NewTimer(throttleTime)
+}
+
+func (b *Broker) waitIfThrottled() {
+	b.throttleTimerLock.Lock()
+	defer b.throttleTimerLock.Unlock()
+	if b.throttleTimer != nil {
+		DebugLogger.Printf("broker/%d waiting for throttle timer\n", b.ID())
+		<-b.throttleTimer.C
+		b.throttleTimer = nil
+	}
+}
+
+func (b *Broker) updateThrottleMetric(throttleTime time.Duration) {
+	if b.brokerThrottleTime != nil {
+		throttleTimeInMs := int64(throttleTime / time.Millisecond)
+		b.brokerThrottleTime.Update(throttleTimeInMs)
+	}
+}
+
+func (b *Broker) registerMetrics() {
+	b.brokerIncomingByteRate = b.registerMeter("incoming-byte-rate")
+	b.brokerRequestRate = b.registerMeter("request-rate")
+	b.brokerFetchRate = b.registerMeter("consumer-fetch-rate")
+	b.brokerRequestSize = b.registerHistogram("request-size")
+	b.brokerRequestLatency = b.registerHistogram("request-latency-in-ms")
+	b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate")
+	b.brokerResponseRate = b.registerMeter("response-rate")
+	b.brokerResponseSize = b.registerHistogram("response-size")
+	b.brokerRequestsInFlight = b.registerCounter("requests-in-flight")
+	b.brokerThrottleTime = b.registerHistogram("throttle-time-in-ms")
+	b.brokerProtocolRequestsRate = map[int16]metrics.Meter{}
+}
+
+func (b *Broker) registerMeter(name string) metrics.Meter {
+	nameForBroker := getMetricNameForBroker(name, b)
+	return metrics.GetOrRegisterMeter(nameForBroker, b.metricRegistry)
+}
+
+func (b *Broker) registerHistogram(name string) metrics.Histogram {
+	nameForBroker := getMetricNameForBroker(name, b)
+	return getOrRegisterHistogram(nameForBroker, b.metricRegistry)
+}
+
+func (b *Broker) registerCounter(name string) metrics.Counter {
+	nameForBroker := getMetricNameForBroker(name, b)
+	return metrics.GetOrRegisterCounter(nameForBroker, b.metricRegistry)
+}
+
+func validServerNameTLS(addr string, cfg *tls.Config) *tls.Config {
+	if cfg == nil {
+		cfg = &tls.Config{
+			MinVersion: tls.VersionTLS12,
+		}
+	}
+	if cfg.ServerName != "" {
+		return cfg
+	}
+
+	c := cfg.Clone()
+	sn, _, err := net.SplitHostPort(addr)
+	if err != nil {
+		Logger.Println(fmt.Errorf("failed to get ServerName from addr %w", err))
+	}
+	c.ServerName = sn
+	return c
+}
diff --git a/vendor/github.com/IBM/sarama/client.go b/vendor/github.com/IBM/sarama/client.go
new file mode 100644
index 0000000..0dc29e2
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/client.go
@@ -0,0 +1,1291 @@
+package sarama
+
+import (
+	"context"
+	"errors"
+	"math"
+	"math/rand"
+	"net"
+	"slices"
+	"sort"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"golang.org/x/net/proxy"
+)
+
+// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
+// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
+// automatically when it passes out of scope. It is safe to share a client amongst many
+// users, however Kafka will process requests from a single client strictly in serial,
+// so it is generally more efficient to use the default one client per producer/consumer.
+type Client interface {
+	// Config returns the Config struct of the client. This struct should not be
+	// altered after it has been created.
+	Config() *Config
+
+	// Controller returns the cluster controller broker. It will return a
+	// locally cached value if it's available. You can call RefreshController
+	// to update the cached value. Requires Kafka 0.10 or higher.
+	Controller() (*Broker, error)
+
+	// RefreshController retrieves the cluster controller from fresh metadata
+	// and stores it in the local cache. Requires Kafka 0.10 or higher.
+	RefreshController() (*Broker, error)
+
+	// Brokers returns the current set of active brokers as retrieved from cluster metadata.
+	Brokers() []*Broker
+
+	// Broker returns the active Broker if available for the broker ID.
+	Broker(brokerID int32) (*Broker, error)
+
+	// Topics returns the set of available topics as retrieved from cluster metadata.
+	Topics() ([]string, error)
+
+	// Partitions returns the sorted list of all partition IDs for the given topic.
+	Partitions(topic string) ([]int32, error)
+
+	// WritablePartitions returns the sorted list of all writable partition IDs for
+	// the given topic, where "writable" means "having a valid leader accepting
+	// writes".
+	WritablePartitions(topic string) ([]int32, error)
+
+	// Leader returns the broker object that is the leader of the current
+	// topic/partition, as determined by querying the cluster metadata.
+	Leader(topic string, partitionID int32) (*Broker, error)
+
+	// LeaderAndEpoch returns the leader and its epoch for the current
+	// topic/partition, as determined by querying the cluster metadata.
+	LeaderAndEpoch(topic string, partitionID int32) (*Broker, int32, error)
+
+	// Replicas returns the set of all replica IDs for the given partition.
+	Replicas(topic string, partitionID int32) ([]int32, error)
+
+	// InSyncReplicas returns the set of all in-sync replica IDs for the given
+	// partition. In-sync replicas are replicas which are fully caught up with
+	// the partition leader.
+	InSyncReplicas(topic string, partitionID int32) ([]int32, error)
+
+	// OfflineReplicas returns the set of all offline replica IDs for the given
+	// partition. Offline replicas are replicas which are offline
+	OfflineReplicas(topic string, partitionID int32) ([]int32, error)
+
+	// RefreshBrokers takes a list of addresses to be used as seed brokers.
+	// Existing broker connections are closed and the updated list of seed brokers
+	// will be used for the next metadata fetch.
+	RefreshBrokers(addrs []string) error
+
+	// RefreshMetadata takes a list of topics and queries the cluster to refresh the
+	// available metadata for those topics. If no topics are provided, it will refresh
+	// metadata for all topics.
+	RefreshMetadata(topics ...string) error
+
+	// GetOffset queries the cluster to get the most recent available offset at the
+	// given time (in milliseconds) on the topic/partition combination.
+	// Time should be OffsetOldest for the earliest available offset,
+	// OffsetNewest for the offset of the message that will be produced next, or a time.
+	GetOffset(topic string, partitionID int32, time int64) (int64, error)
+
+	// Coordinator returns the coordinating broker for a consumer group. It will
+	// return a locally cached value if it's available. You can call
+	// RefreshCoordinator to update the cached value. This function only works on
+	// Kafka 0.8.2 and higher.
+	Coordinator(consumerGroup string) (*Broker, error)
+
+	// RefreshCoordinator retrieves the coordinator for a consumer group and stores it
+	// in local cache. This function only works on Kafka 0.8.2 and higher.
+	RefreshCoordinator(consumerGroup string) error
+
+	// TransactionCoordinator returns the coordinating broker for a transaction id. It will
+	// return a locally cached value if it's available. You can call
+	// RefreshCoordinator to update the cached value. This function only works on
+	// Kafka 0.11.0.0 and higher.
+	TransactionCoordinator(transactionID string) (*Broker, error)
+
+	// RefreshTransactionCoordinator retrieves the coordinator for a transaction id and stores it
+	// in local cache. This function only works on Kafka 0.11.0.0 and higher.
+	RefreshTransactionCoordinator(transactionID string) error
+
+	// InitProducerID retrieves information required for Idempotent Producer
+	InitProducerID() (*InitProducerIDResponse, error)
+
+	// LeastLoadedBroker retrieves broker that has the least responses pending
+	LeastLoadedBroker() *Broker
+
+	// PartitionNotReadable checks if partition is not readable
+	PartitionNotReadable(topic string, partition int32) bool
+
+	// Close shuts down all broker connections managed by this client. It is required
+	// to call this function before a client object passes out of scope, as it will
+	// otherwise leak memory. You must close any Producers or Consumers using a client
+	// before you close the client.
+	Close() error
+
+	// Closed returns true if the client has already had Close called on it
+	Closed() bool
+}
+
+const (
+	// OffsetNewest stands for the log head offset, i.e. the offset that will be
+	// assigned to the next message that will be produced to the partition. You
+	// can send this to a client's GetOffset method to get this offset, or when
+	// calling ConsumePartition to start consuming new messages.
+	OffsetNewest int64 = -1
+	// OffsetOldest stands for the oldest offset available on the broker for a
+	// partition. You can send this to a client's GetOffset method to get this
+	// offset, or when calling ConsumePartition to start consuming from the
+	// oldest offset that is still available on the broker.
+	OffsetOldest int64 = -2
+)
+
+type client struct {
+	// updateMetadataMs stores the time at which metadata was lasted updated.
+	// Note: this accessed atomically so must be the first word in the struct
+	// as per golang/go#41970
+	updateMetadataMs atomic.Int64
+
+	conf           *Config
+	closer, closed chan none // for shutting down background metadata updater
+
+	// the broker addresses given to us through the constructor are not guaranteed to be returned in
+	// the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
+	// so we store them separately
+	seedBrokers []*Broker
+	deadSeeds   []*Broker
+
+	controllerID            int32                                   // cluster controller broker id
+	brokers                 map[int32]*Broker                       // maps broker ids to brokers
+	metadata                map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
+	metadataTopics          map[string]none                         // topics that need to collect metadata
+	coordinators            map[string]int32                        // Maps consumer group names to coordinating broker IDs
+	transactionCoordinators map[string]int32                        // Maps transaction ids to coordinating broker IDs
+
+	// If the number of partitions is large, we can get some churn calling cachedPartitions,
+	// so the result is cached.  It is important to update this value whenever metadata is changed
+	cachedPartitionsResults map[string][maxPartitionIndex][]int32
+
+	lock sync.RWMutex // protects access to the maps that hold cluster state.
+
+	metadataRefresh metadataRefresh
+}
+
+// NewClient creates a new Client. It connects to one of the given broker addresses
+// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
+// be retrieved from any of the given broker addresses, the client is not created.
+func NewClient(addrs []string, conf *Config) (Client, error) {
+	DebugLogger.Println("Initializing new client")
+
+	if conf == nil {
+		conf = NewConfig()
+	}
+
+	if err := conf.Validate(); err != nil {
+		return nil, err
+	}
+
+	if len(addrs) < 1 {
+		return nil, ConfigurationError("You must provide at least one broker address")
+	}
+
+	if strings.Contains(addrs[0], ".servicebus.windows.net") {
+		if conf.Version.IsAtLeast(V1_1_0_0) || !conf.Version.IsAtLeast(V0_11_0_0) {
+			Logger.Println("Connecting to Azure Event Hubs, forcing version to V1_0_0_0 for compatibility")
+			conf.Version = V1_0_0_0
+		}
+	}
+	client := &client{
+		conf:                    conf,
+		closer:                  make(chan none),
+		closed:                  make(chan none),
+		brokers:                 make(map[int32]*Broker),
+		metadata:                make(map[string]map[int32]*PartitionMetadata),
+		metadataTopics:          make(map[string]none),
+		cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
+		coordinators:            make(map[string]int32),
+		transactionCoordinators: make(map[string]int32),
+	}
+	refresh := func(topics []string) error {
+		deadline := time.Time{}
+		if client.conf.Metadata.Timeout > 0 {
+			deadline = time.Now().Add(client.conf.Metadata.Timeout)
+		}
+		return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max, deadline)
+	}
+	if conf.Metadata.SingleFlight {
+		client.metadataRefresh = newSingleFlightRefresher(refresh)
+	} else {
+		client.metadataRefresh = refresh
+	}
+
+	if conf.Net.ResolveCanonicalBootstrapServers {
+		var err error
+		addrs, err = client.resolveCanonicalNames(addrs)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	client.randomizeSeedBrokers(addrs)
+
+	if conf.Metadata.Full {
+		// do an initial fetch of all cluster metadata by specifying an empty list of topics
+		err := client.RefreshMetadata()
+		if err == nil {
+		} else if errors.Is(err, ErrLeaderNotAvailable) || errors.Is(err, ErrReplicaNotAvailable) || errors.Is(err, ErrTopicAuthorizationFailed) || errors.Is(err, ErrClusterAuthorizationFailed) {
+			// indicates that maybe part of the cluster is down, but is not fatal to creating the client
+			Logger.Println(err)
+		} else {
+			close(client.closed) // we haven't started the background updater yet, so we have to do this manually
+			_ = client.Close()
+			return nil, err
+		}
+	}
+	go withRecover(client.backgroundMetadataUpdater)
+
+	DebugLogger.Println("Successfully initialized new client")
+
+	return client, nil
+}
+
+func (client *client) Config() *Config {
+	return client.conf
+}
+
+func (client *client) Brokers() []*Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+	brokers := make([]*Broker, 0, len(client.brokers))
+	for _, broker := range client.brokers {
+		brokers = append(brokers, broker)
+	}
+	return brokers
+}
+
+func (client *client) Broker(brokerID int32) (*Broker, error) {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+	broker, ok := client.brokers[brokerID]
+	if !ok {
+		return nil, ErrBrokerNotFound
+	}
+	_ = broker.Open(client.conf)
+	return broker, nil
+}
+
+func (client *client) InitProducerID() (*InitProducerIDResponse, error) {
+	// FIXME: this InitProducerID seems to only be called from client_test.go (TestInitProducerIDConnectionRefused) and has been superceded by transaction_manager.go?
+	brokerErrors := make([]error, 0)
+	for broker := client.LeastLoadedBroker(); broker != nil; broker = client.LeastLoadedBroker() {
+		request := &InitProducerIDRequest{}
+
+		if client.conf.Version.IsAtLeast(V2_7_0_0) {
+			// Version 4 adds the support for new error code PRODUCER_FENCED.
+			request.Version = 4
+		} else if client.conf.Version.IsAtLeast(V2_5_0_0) {
+			// Version 3 adds ProducerId and ProducerEpoch, allowing producers to try to resume after an INVALID_PRODUCER_EPOCH error
+			request.Version = 3
+		} else if client.conf.Version.IsAtLeast(V2_4_0_0) {
+			// Version 2 is the first flexible version.
+			request.Version = 2
+		} else if client.conf.Version.IsAtLeast(V2_0_0_0) {
+			// Version 1 is the same as version 0.
+			request.Version = 1
+		}
+
+		response, err := broker.InitProducerID(request)
+		if err == nil {
+			return response, nil
+		} else {
+			// some error, remove that broker and try again
+			Logger.Printf("Client got error from broker %d when issuing InitProducerID : %v\n", broker.ID(), err)
+			_ = broker.Close()
+			brokerErrors = append(brokerErrors, err)
+			client.deregisterBroker(broker)
+		}
+	}
+
+	return nil, Wrap(ErrOutOfBrokers, brokerErrors...)
+}
+
+func (client *client) Close() error {
+	if client.Closed() {
+		// Chances are this is being called from a defer() and the error will go unobserved
+		// so we go ahead and log the event in this case.
+		Logger.Printf("Close() called on already closed client")
+		return ErrClosedClient
+	}
+
+	// shutdown and wait for the background thread before we take the lock, to avoid races
+	close(client.closer)
+	<-client.closed
+
+	client.lock.Lock()
+	defer client.lock.Unlock()
+	DebugLogger.Println("Closing Client")
+
+	for _, broker := range client.brokers {
+		safeAsyncClose(broker)
+	}
+
+	for _, broker := range client.seedBrokers {
+		safeAsyncClose(broker)
+	}
+
+	client.brokers = nil
+	client.metadata = nil
+	client.metadataTopics = nil
+
+	return nil
+}
+
+func (client *client) Closed() bool {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	return client.brokers == nil
+}
+
+func (client *client) Topics() ([]string, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	ret := make([]string, 0, len(client.metadata))
+	for topic := range client.metadata {
+		ret = append(ret, topic)
+	}
+
+	return ret, nil
+}
+
+func (client *client) MetadataTopics() ([]string, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	ret := make([]string, 0, len(client.metadataTopics))
+	for topic := range client.metadataTopics {
+		ret = append(ret, topic)
+	}
+
+	return ret, nil
+}
+
+func (client *client) Partitions(topic string) ([]int32, error) {
+	return client.getPartitions(topic, allPartitions)
+}
+
+func (client *client) WritablePartitions(topic string) ([]int32, error) {
+	return client.getPartitions(topic, writablePartitions)
+}
+
+func (client *client) getPartitions(topic string, pt partitionType) ([]int32, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	partitions := client.cachedPartitions(topic, pt)
+
+	// len==0 catches when it's nil (no such topic) and the odd case when every single
+	// partition is undergoing leader election simultaneously. Callers have to be able to handle
+	// this function returning an empty slice (which is a valid return value) but catching it
+	// here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
+	// a metadata refresh as a nicety so callers can just try again and don't have to manually
+	// trigger a refresh (otherwise they'd just keep getting a stale cached copy).
+	if len(partitions) == 0 {
+		err := client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		partitions = client.cachedPartitions(topic, pt)
+	}
+
+	if partitions == nil {
+		return nil, ErrUnknownTopicOrPartition
+	}
+
+	return partitions, nil
+}
+
+func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
+	return client.getReplicas(topic, partitionID, func(metadata *PartitionMetadata) []int32 {
+		return metadata.Replicas
+	})
+}
+
+func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
+	return client.getReplicas(topic, partitionID, func(metadata *PartitionMetadata) []int32 {
+		return metadata.Isr
+	})
+}
+
+func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) {
+	return client.getReplicas(topic, partitionID, func(metadata *PartitionMetadata) []int32 {
+		return metadata.OfflineReplicas
+	})
+}
+
+func (client *client) getReplicas(topic string, partitionID int32, extractor func(metadata *PartitionMetadata) []int32) ([]int32, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	metadata := client.cachedMetadata(topic, partitionID)
+
+	if metadata == nil {
+		err := client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, err
+		}
+		metadata = client.cachedMetadata(topic, partitionID)
+	}
+
+	if metadata == nil {
+		return nil, ErrUnknownTopicOrPartition
+	}
+
+	replicas := extractor(metadata)
+	if errors.Is(metadata.Err, ErrReplicaNotAvailable) {
+		return dupInt32Slice(replicas), metadata.Err
+	}
+	return dupInt32Slice(replicas), nil
+}
+
+func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
+	leader, _, err := client.LeaderAndEpoch(topic, partitionID)
+	return leader, err
+}
+
+func (client *client) LeaderAndEpoch(topic string, partitionID int32) (*Broker, int32, error) {
+	if client.Closed() {
+		return nil, -1, ErrClosedClient
+	}
+
+	leader, epoch, err := client.cachedLeader(topic, partitionID)
+	if leader == nil {
+		err = client.RefreshMetadata(topic)
+		if err != nil {
+			return nil, -1, err
+		}
+		leader, epoch, err = client.cachedLeader(topic, partitionID)
+	}
+
+	return leader, epoch, err
+}
+
+func (client *client) RefreshBrokers(addrs []string) error {
+	if client.Closed() {
+		return ErrClosedClient
+	}
+
+	client.lock.Lock()
+	defer client.lock.Unlock()
+
+	for _, broker := range client.brokers {
+		safeAsyncClose(broker)
+	}
+	client.brokers = make(map[int32]*Broker)
+
+	for _, broker := range client.seedBrokers {
+		safeAsyncClose(broker)
+	}
+
+	for _, broker := range client.deadSeeds {
+		safeAsyncClose(broker)
+	}
+
+	client.seedBrokers = nil
+	client.deadSeeds = nil
+
+	client.randomizeSeedBrokers(addrs)
+
+	return nil
+}
+
+func (client *client) RefreshMetadata(topics ...string) error {
+	if client.Closed() {
+		return ErrClosedClient
+	}
+
+	// Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
+	// error. This handles the case by returning an error instead of sending it
+	// off to Kafka. See: https://github.com/IBM/sarama/pull/38#issuecomment-26362310
+	if slices.Contains(topics, "") {
+		return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
+	}
+	return client.metadataRefresh(topics)
+}
+
+func (client *client) GetOffset(topic string, partitionID int32, timestamp int64) (int64, error) {
+	if client.Closed() {
+		return -1, ErrClosedClient
+	}
+
+	offset, err := client.getOffset(topic, partitionID, timestamp)
+	if err != nil {
+		if err := client.RefreshMetadata(topic); err != nil {
+			return -1, err
+		}
+		return client.getOffset(topic, partitionID, timestamp)
+	}
+
+	return offset, err
+}
+
+func (client *client) Controller() (*Broker, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	if !client.conf.Version.IsAtLeast(V0_10_0_0) {
+		return nil, ErrUnsupportedVersion
+	}
+
+	controller := client.cachedController()
+	if controller == nil {
+		if err := client.refreshMetadata(); err != nil {
+			return nil, err
+		}
+		controller = client.cachedController()
+	}
+
+	if controller == nil {
+		return nil, ErrControllerNotAvailable
+	}
+
+	_ = controller.Open(client.conf)
+	return controller, nil
+}
+
+// deregisterController removes the cached controllerID
+func (client *client) deregisterController() {
+	client.lock.Lock()
+	defer client.lock.Unlock()
+	if controller, ok := client.brokers[client.controllerID]; ok {
+		_ = controller.Close()
+		delete(client.brokers, client.controllerID)
+	}
+}
+
+// RefreshController retrieves the cluster controller from fresh metadata
+// and stores it in the local cache. Requires Kafka 0.10 or higher.
+func (client *client) RefreshController() (*Broker, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	client.deregisterController()
+
+	if err := client.refreshMetadata(); err != nil {
+		return nil, err
+	}
+
+	controller := client.cachedController()
+	if controller == nil {
+		return nil, ErrControllerNotAvailable
+	}
+
+	_ = controller.Open(client.conf)
+	return controller, nil
+}
+
+func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	coordinator := client.cachedCoordinator(consumerGroup)
+
+	if coordinator == nil {
+		if err := client.RefreshCoordinator(consumerGroup); err != nil {
+			return nil, err
+		}
+		coordinator = client.cachedCoordinator(consumerGroup)
+	}
+
+	if coordinator == nil {
+		return nil, ErrConsumerCoordinatorNotAvailable
+	}
+
+	_ = coordinator.Open(client.conf)
+	return coordinator, nil
+}
+
+func (client *client) RefreshCoordinator(consumerGroup string) error {
+	if client.Closed() {
+		return ErrClosedClient
+	}
+
+	response, err := client.findCoordinator(consumerGroup, CoordinatorGroup, client.conf.Metadata.Retry.Max)
+	if err != nil {
+		return err
+	}
+
+	client.lock.Lock()
+	defer client.lock.Unlock()
+	client.registerBroker(response.Coordinator)
+	client.coordinators[consumerGroup] = response.Coordinator.ID()
+	return nil
+}
+
+func (client *client) TransactionCoordinator(transactionID string) (*Broker, error) {
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	coordinator := client.cachedTransactionCoordinator(transactionID)
+
+	if coordinator == nil {
+		if err := client.RefreshTransactionCoordinator(transactionID); err != nil {
+			return nil, err
+		}
+		coordinator = client.cachedTransactionCoordinator(transactionID)
+	}
+
+	if coordinator == nil {
+		return nil, ErrConsumerCoordinatorNotAvailable
+	}
+
+	_ = coordinator.Open(client.conf)
+	return coordinator, nil
+}
+
+func (client *client) RefreshTransactionCoordinator(transactionID string) error {
+	if client.Closed() {
+		return ErrClosedClient
+	}
+
+	response, err := client.findCoordinator(transactionID, CoordinatorTransaction, client.conf.Metadata.Retry.Max)
+	if err != nil {
+		return err
+	}
+
+	client.lock.Lock()
+	defer client.lock.Unlock()
+	client.registerBroker(response.Coordinator)
+	client.transactionCoordinators[transactionID] = response.Coordinator.ID()
+	return nil
+}
+
+// private broker management helpers
+
+func (client *client) randomizeSeedBrokers(addrs []string) {
+	random := rand.New(rand.NewSource(time.Now().UnixNano()))
+	for _, index := range random.Perm(len(addrs)) {
+		client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
+	}
+}
+
+func (client *client) updateBroker(brokers []*Broker) {
+	currentBroker := make(map[int32]*Broker, len(brokers))
+
+	for _, broker := range brokers {
+		currentBroker[broker.ID()] = broker
+		if client.brokers[broker.ID()] == nil { // add new broker
+			client.brokers[broker.ID()] = broker
+			DebugLogger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
+		} else if broker.Addr() != client.brokers[broker.ID()].Addr() { // replace broker with new address
+			safeAsyncClose(client.brokers[broker.ID()])
+			client.brokers[broker.ID()] = broker
+			Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
+		}
+	}
+
+	for id, broker := range client.brokers {
+		if _, exist := currentBroker[id]; !exist { // remove old broker
+			safeAsyncClose(broker)
+			delete(client.brokers, id)
+			Logger.Printf("client/broker remove invalid broker #%d with %s", broker.ID(), broker.Addr())
+		}
+	}
+}
+
+// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
+// in the brokers map. It returns the broker that is registered, which may be the provided broker,
+// or a previously registered Broker instance. You must hold the write lock before calling this function.
+func (client *client) registerBroker(broker *Broker) {
+	if client.brokers == nil {
+		Logger.Printf("cannot register broker #%d at %s, client already closed", broker.ID(), broker.Addr())
+		return
+	}
+
+	if client.brokers[broker.ID()] == nil {
+		client.brokers[broker.ID()] = broker
+		DebugLogger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
+	} else if broker.Addr() != client.brokers[broker.ID()].Addr() {
+		safeAsyncClose(client.brokers[broker.ID()])
+		client.brokers[broker.ID()] = broker
+		Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
+	}
+}
+
+// deregisterBroker removes a broker from the broker list, and if it's
+// not in the broker list, removes it from seedBrokers.
+func (client *client) deregisterBroker(broker *Broker) {
+	client.lock.Lock()
+	defer client.lock.Unlock()
+
+	_, ok := client.brokers[broker.ID()]
+	if ok {
+		Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
+		delete(client.brokers, broker.ID())
+		return
+	}
+	if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
+		client.deadSeeds = append(client.deadSeeds, broker)
+		client.seedBrokers = client.seedBrokers[1:]
+	}
+}
+
+func (client *client) resurrectDeadBrokers() {
+	client.lock.Lock()
+	defer client.lock.Unlock()
+
+	Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
+	client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
+	client.deadSeeds = nil
+}
+
+// LeastLoadedBroker returns the broker with the least pending requests.
+// Firstly, choose the broker from cached broker list. If the broker list is empty, choose from seed brokers.
+func (client *client) LeastLoadedBroker() *Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	var leastLoadedBroker *Broker
+	pendingRequests := math.MaxInt
+	for _, broker := range client.brokers {
+		if pendingRequests > broker.ResponseSize() {
+			pendingRequests = broker.ResponseSize()
+			leastLoadedBroker = broker
+		}
+	}
+	if leastLoadedBroker != nil {
+		_ = leastLoadedBroker.Open(client.conf)
+		return leastLoadedBroker
+	}
+
+	if len(client.seedBrokers) > 0 {
+		_ = client.seedBrokers[0].Open(client.conf)
+		return client.seedBrokers[0]
+	}
+
+	return leastLoadedBroker
+}
+
+// private caching/lazy metadata helpers
+
+type partitionType int
+
+const (
+	allPartitions partitionType = iota
+	writablePartitions
+	// If you add any more types, update the partition cache in update()
+
+	// Ensure this is the last partition type value
+	maxPartitionIndex
+)
+
+func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	partitions := client.metadata[topic]
+	if partitions != nil {
+		return partitions[partitionID]
+	}
+
+	return nil
+}
+
+func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	partitions, exists := client.cachedPartitionsResults[topic]
+
+	if !exists {
+		return nil
+	}
+	return partitions[partitionSet]
+}
+
+func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
+	partitions := client.metadata[topic]
+
+	if partitions == nil {
+		return nil
+	}
+
+	ret := make([]int32, 0, len(partitions))
+	for _, partition := range partitions {
+		if partitionSet == writablePartitions && errors.Is(partition.Err, ErrLeaderNotAvailable) {
+			continue
+		}
+		ret = append(ret, partition.ID)
+	}
+
+	sort.Sort(int32Slice(ret))
+	return ret
+}
+
+func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, int32, error) {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	partitions := client.metadata[topic]
+	if partitions != nil {
+		metadata, ok := partitions[partitionID]
+		if ok {
+			if errors.Is(metadata.Err, ErrLeaderNotAvailable) {
+				return nil, -1, ErrLeaderNotAvailable
+			}
+			b := client.brokers[metadata.Leader]
+			if b == nil {
+				return nil, -1, ErrLeaderNotAvailable
+			}
+			_ = b.Open(client.conf)
+			return b, metadata.LeaderEpoch, nil
+		}
+	}
+
+	return nil, -1, ErrUnknownTopicOrPartition
+}
+
+func (client *client) getOffset(topic string, partitionID int32, timestamp int64) (int64, error) {
+	broker, err := client.Leader(topic, partitionID)
+	if err != nil {
+		return -1, err
+	}
+
+	request := NewOffsetRequest(client.conf.Version)
+	request.AddBlock(topic, partitionID, timestamp, 1)
+
+	response, err := broker.GetAvailableOffsets(request)
+	if err != nil {
+		_ = broker.Close()
+		return -1, err
+	}
+
+	block := response.GetBlock(topic, partitionID)
+	if block == nil {
+		_ = broker.Close()
+		return -1, ErrIncompleteResponse
+	}
+	if !errors.Is(block.Err, ErrNoError) {
+		return -1, block.Err
+	}
+	if len(block.Offsets) != 1 {
+		return -1, ErrOffsetOutOfRange
+	}
+
+	return block.Offsets[0], nil
+}
+
+// core metadata update logic
+
+func (client *client) backgroundMetadataUpdater() {
+	defer close(client.closed)
+
+	if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
+		return
+	}
+
+	ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
+	defer ticker.Stop()
+
+	for {
+		select {
+		case <-ticker.C:
+			if err := client.refreshMetadata(); err != nil {
+				Logger.Println("Client background metadata update:", err)
+			}
+		case <-client.closer:
+			return
+		}
+	}
+}
+
+func (client *client) refreshMetadata() error {
+	var topics []string
+
+	if !client.conf.Metadata.Full {
+		if specificTopics, err := client.MetadataTopics(); err != nil {
+			return err
+		} else if len(specificTopics) == 0 {
+			return ErrNoTopicsToUpdateMetadata
+		} else {
+			topics = specificTopics
+		}
+	}
+
+	if err := client.RefreshMetadata(topics...); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, deadline time.Time) error {
+	pastDeadline := func(backoff time.Duration) bool {
+		if !deadline.IsZero() && time.Now().Add(backoff).After(deadline) {
+			// we are past the deadline
+			return true
+		}
+		return false
+	}
+	retry := func(err error) error {
+		if attemptsRemaining > 0 {
+			backoff := client.computeBackoff(attemptsRemaining)
+			if pastDeadline(backoff) {
+				Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout")
+				return err
+			}
+			if backoff > 0 {
+				time.Sleep(backoff)
+			}
+
+			t := client.updateMetadataMs.Load()
+			if time.Since(time.UnixMilli(t)) < backoff {
+				return err
+			}
+			attemptsRemaining--
+			Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
+
+			return client.tryRefreshMetadata(topics, attemptsRemaining, deadline)
+		}
+		return err
+	}
+
+	broker := client.LeastLoadedBroker()
+	brokerErrors := make([]error, 0)
+	for ; broker != nil && !pastDeadline(0); broker = client.LeastLoadedBroker() {
+		allowAutoTopicCreation := client.conf.Metadata.AllowAutoTopicCreation
+		if len(topics) > 0 {
+			DebugLogger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
+		} else {
+			allowAutoTopicCreation = false
+			DebugLogger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
+		}
+
+		req := NewMetadataRequest(client.conf.Version, topics)
+		req.AllowAutoTopicCreation = allowAutoTopicCreation
+		client.updateMetadataMs.Store(time.Now().UnixMilli())
+
+		response, err := broker.GetMetadata(req)
+		var kerror KError
+		var packetEncodingError PacketEncodingError
+		if err == nil {
+			// When talking to the startup phase of a broker, it is possible to receive an empty metadata set. We should remove that broker and try next broker (https://issues.apache.org/jira/browse/KAFKA-7924).
+			if len(response.Brokers) == 0 {
+				Logger.Printf("client/metadata receiving empty brokers from the metadata response when requesting the broker #%d at %s", broker.ID(), broker.addr)
+				_ = broker.Close()
+				client.deregisterBroker(broker)
+				continue
+			}
+			allKnownMetaData := len(topics) == 0
+			// valid response, use it
+			shouldRetry, err := client.updateMetadata(response, allKnownMetaData)
+			if shouldRetry {
+				Logger.Println("client/metadata found some partitions to be leaderless")
+				return retry(err) // note: err can be nil
+			}
+			return err
+		} else if errors.As(err, &packetEncodingError) {
+			// didn't even send, return the error
+			return err
+		} else if errors.As(err, &kerror) {
+			// if SASL auth error return as this _should_ be a non retryable err for all brokers
+			if errors.Is(err, ErrSASLAuthenticationFailed) {
+				Logger.Println("client/metadata failed SASL authentication")
+				return err
+			}
+
+			if errors.Is(err, ErrTopicAuthorizationFailed) {
+				Logger.Println("client is not authorized to access this topic. The topics were: ", topics)
+				return err
+			}
+			// else remove that broker and try again
+			Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
+			_ = broker.Close()
+			client.deregisterBroker(broker)
+		} else {
+			// some other error, remove that broker and try again
+			Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
+			brokerErrors = append(brokerErrors, err)
+			_ = broker.Close()
+			client.deregisterBroker(broker)
+		}
+	}
+
+	error := Wrap(ErrOutOfBrokers, brokerErrors...)
+	if broker != nil {
+		Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr)
+		return retry(error)
+	}
+
+	Logger.Println("client/metadata no available broker to send metadata request to")
+	client.resurrectDeadBrokers()
+	return retry(error)
+}
+
+// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
+func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) {
+	if client.Closed() {
+		return
+	}
+
+	client.lock.Lock()
+	defer client.lock.Unlock()
+
+	// For all the brokers we received:
+	// - if it is a new ID, save it
+	// - if it is an existing ID, but the address we have is stale, discard the old one and save it
+	// - if some brokers is not exist in it, remove old broker
+	// - otherwise ignore it, replacing our existing one would just bounce the connection
+	client.updateBroker(data.Brokers)
+
+	client.controllerID = data.ControllerID
+
+	if allKnownMetaData {
+		client.metadata = make(map[string]map[int32]*PartitionMetadata)
+		client.metadataTopics = make(map[string]none)
+		client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32)
+	}
+	for _, topic := range data.Topics {
+		// topics must be added firstly to `metadataTopics` to guarantee that all
+		// requested topics must be recorded to keep them trackable for periodically
+		// metadata refresh.
+		if _, exists := client.metadataTopics[topic.Name]; !exists {
+			client.metadataTopics[topic.Name] = none{}
+		}
+		delete(client.metadata, topic.Name)
+		delete(client.cachedPartitionsResults, topic.Name)
+
+		switch topic.Err {
+		case ErrNoError:
+			// no-op
+		case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
+			err = topic.Err
+			continue
+		case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
+			err = topic.Err
+			retry = true
+			continue
+		case ErrLeaderNotAvailable: // retry, but store partial partition results
+			retry = true
+		default: // don't retry, don't store partial results
+			Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
+			err = topic.Err
+			continue
+		}
+
+		client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
+		for _, partition := range topic.Partitions {
+			client.metadata[topic.Name][partition.ID] = partition
+			if errors.Is(partition.Err, ErrLeaderNotAvailable) {
+				retry = true
+			}
+		}
+
+		var partitionCache [maxPartitionIndex][]int32
+		partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
+		partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
+		client.cachedPartitionsResults[topic.Name] = partitionCache
+	}
+
+	return
+}
+
+func (client *client) cachedCoordinator(consumerGroup string) *Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+	if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
+		return client.brokers[coordinatorID]
+	}
+	return nil
+}
+
+func (client *client) cachedTransactionCoordinator(transactionID string) *Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+	if coordinatorID, ok := client.transactionCoordinators[transactionID]; ok {
+		return client.brokers[coordinatorID]
+	}
+	return nil
+}
+
+func (client *client) cachedController() *Broker {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	return client.brokers[client.controllerID]
+}
+
+func (client *client) computeBackoff(attemptsRemaining int) time.Duration {
+	if client.conf.Metadata.Retry.BackoffFunc != nil {
+		maxRetries := client.conf.Metadata.Retry.Max
+		retries := maxRetries - attemptsRemaining
+		return client.conf.Metadata.Retry.BackoffFunc(retries, maxRetries)
+	}
+	return client.conf.Metadata.Retry.Backoff
+}
+
+func (client *client) findCoordinator(coordinatorKey string, coordinatorType CoordinatorType, attemptsRemaining int) (*FindCoordinatorResponse, error) {
+	retry := func(err error) (*FindCoordinatorResponse, error) {
+		if attemptsRemaining > 0 {
+			backoff := client.computeBackoff(attemptsRemaining)
+			attemptsRemaining--
+			Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
+			time.Sleep(backoff)
+			return client.findCoordinator(coordinatorKey, coordinatorType, attemptsRemaining)
+		}
+		return nil, err
+	}
+
+	brokerErrors := make([]error, 0)
+	for broker := client.LeastLoadedBroker(); broker != nil; broker = client.LeastLoadedBroker() {
+		DebugLogger.Printf("client/coordinator requesting coordinator for %s from %s\n", coordinatorKey, broker.Addr())
+
+		request := new(FindCoordinatorRequest)
+		request.CoordinatorKey = coordinatorKey
+		request.CoordinatorType = coordinatorType
+
+		// Version 1 adds KeyType.
+		if client.conf.Version.IsAtLeast(V0_11_0_0) {
+			request.Version = 1
+		}
+		// Version 2 is the same as version 1.
+		if client.conf.Version.IsAtLeast(V2_0_0_0) {
+			request.Version = 2
+		}
+
+		response, err := broker.FindCoordinator(request)
+		if err != nil {
+			Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
+
+			var packetEncodingError PacketEncodingError
+			if errors.As(err, &packetEncodingError) {
+				return nil, err
+			} else {
+				_ = broker.Close()
+				brokerErrors = append(brokerErrors, err)
+				client.deregisterBroker(broker)
+				continue
+			}
+		}
+
+		if errors.Is(response.Err, ErrNoError) {
+			DebugLogger.Printf("client/coordinator coordinator for %s is #%d (%s)\n", coordinatorKey, response.Coordinator.ID(), response.Coordinator.Addr())
+			return response, nil
+		} else if errors.Is(response.Err, ErrConsumerCoordinatorNotAvailable) {
+			Logger.Printf("client/coordinator coordinator for %s is not available\n", coordinatorKey)
+
+			// This is very ugly, but this scenario will only happen once per cluster.
+			// The __consumer_offsets topic only has to be created one time.
+			// The number of partitions not configurable, but partition 0 should always exist.
+			if _, err := client.Leader("__consumer_offsets", 0); err != nil {
+				Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
+				time.Sleep(2 * time.Second)
+			}
+			if coordinatorType == CoordinatorTransaction {
+				if _, err := client.Leader("__transaction_state", 0); err != nil {
+					Logger.Printf("client/coordinator the __transaction_state topic is not initialized completely yet. Waiting 2 seconds...\n")
+					time.Sleep(2 * time.Second)
+				}
+			}
+
+			return retry(ErrConsumerCoordinatorNotAvailable)
+		} else if errors.Is(response.Err, ErrGroupAuthorizationFailed) {
+			Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", coordinatorKey)
+			return retry(ErrGroupAuthorizationFailed)
+		} else {
+			return nil, response.Err
+		}
+	}
+
+	Logger.Println("client/coordinator no available broker to send consumer metadata request to")
+	client.resurrectDeadBrokers()
+	return retry(Wrap(ErrOutOfBrokers, brokerErrors...))
+}
+
+func (client *client) resolveCanonicalNames(addrs []string) ([]string, error) {
+	ctx := context.Background()
+
+	dialer := client.Config().getDialer()
+	resolver := net.Resolver{
+		Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
+			// dial func should only be called once, so switching within is acceptable
+			switch d := dialer.(type) {
+			case proxy.ContextDialer:
+				return d.DialContext(ctx, network, address)
+			default:
+				// we have no choice but to ignore the context
+				return d.Dial(network, address)
+			}
+		},
+	}
+
+	canonicalAddrs := make(map[string]struct{}, len(addrs)) // dedupe as we go
+	for _, addr := range addrs {
+		host, port, err := net.SplitHostPort(addr)
+		if err != nil {
+			return nil, err // message includes addr
+		}
+
+		ips, err := resolver.LookupHost(ctx, host)
+		if err != nil {
+			return nil, err // message includes host
+		}
+		for _, ip := range ips {
+			ptrs, err := resolver.LookupAddr(ctx, ip)
+			if err != nil {
+				return nil, err // message includes ip
+			}
+
+			// unlike the Java client, we do not further check that PTRs resolve
+			ptr := strings.TrimSuffix(ptrs[0], ".") // trailing dot breaks GSSAPI
+			canonicalAddrs[net.JoinHostPort(ptr, port)] = struct{}{}
+		}
+	}
+
+	addrs = make([]string, 0, len(canonicalAddrs))
+	for addr := range canonicalAddrs {
+		addrs = append(addrs, addr)
+	}
+	return addrs, nil
+}
+
+// nopCloserClient embeds an existing Client, but disables
+// the Close method (yet all other methods pass
+// through unchanged). This is for use in larger structs
+// where it is undesirable to close the client that was
+// passed in by the caller.
+type nopCloserClient struct {
+	Client
+}
+
+// Close intercepts and purposely does not call the underlying
+// client's Close() method.
+func (ncc *nopCloserClient) Close() error {
+	return nil
+}
+
+func (client *client) PartitionNotReadable(topic string, partition int32) bool {
+	client.lock.RLock()
+	defer client.lock.RUnlock()
+
+	pm := client.metadata[topic][partition]
+	if pm == nil {
+		return true
+	}
+	return pm.Leader == -1
+}
diff --git a/vendor/github.com/IBM/sarama/compress.go b/vendor/github.com/IBM/sarama/compress.go
new file mode 100644
index 0000000..b752cb8
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/compress.go
@@ -0,0 +1,191 @@
+package sarama
+
+import (
+	"bytes"
+	"fmt"
+	"sync"
+
+	snappy "github.com/eapache/go-xerial-snappy"
+	"github.com/klauspost/compress/gzip"
+	"github.com/pierrec/lz4/v4"
+)
+
+var (
+	lz4WriterPool = sync.Pool{
+		New: func() interface{} {
+			lz := lz4.NewWriter(nil)
+			if err := lz.Apply(lz4.BlockSizeOption(lz4.Block64Kb)); err != nil {
+				panic(err)
+			}
+			return lz
+		},
+	}
+
+	gzipWriterPool = sync.Pool{
+		New: func() interface{} {
+			return gzip.NewWriter(nil)
+		},
+	}
+	gzipWriterPoolForCompressionLevel1 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 1)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel2 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 2)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel3 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 3)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel4 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 4)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel5 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 5)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel6 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 6)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel7 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 7)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel8 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 8)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+	gzipWriterPoolForCompressionLevel9 = sync.Pool{
+		New: func() interface{} {
+			gz, err := gzip.NewWriterLevel(nil, 9)
+			if err != nil {
+				panic(err)
+			}
+			return gz
+		},
+	}
+)
+
+func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) {
+	switch cc {
+	case CompressionNone:
+		return data, nil
+	case CompressionGZIP:
+		return gzipCompress(level, data)
+	case CompressionSnappy:
+		return snappy.Encode(data), nil
+	case CompressionLZ4:
+		return lz4Compress(data)
+	case CompressionZSTD:
+		return zstdCompress(ZstdEncoderParams{level}, nil, data)
+	default:
+		return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)}
+	}
+}
+
+func gzipCompress(level int, data []byte) ([]byte, error) {
+	var (
+		buf    bytes.Buffer
+		writer *gzip.Writer
+		pool   *sync.Pool
+	)
+
+	switch level {
+	case CompressionLevelDefault:
+		pool = &gzipWriterPool
+	case 1:
+		pool = &gzipWriterPoolForCompressionLevel1
+	case 2:
+		pool = &gzipWriterPoolForCompressionLevel2
+	case 3:
+		pool = &gzipWriterPoolForCompressionLevel3
+	case 4:
+		pool = &gzipWriterPoolForCompressionLevel4
+	case 5:
+		pool = &gzipWriterPoolForCompressionLevel5
+	case 6:
+		pool = &gzipWriterPoolForCompressionLevel6
+	case 7:
+		pool = &gzipWriterPoolForCompressionLevel7
+	case 8:
+		pool = &gzipWriterPoolForCompressionLevel8
+	case 9:
+		pool = &gzipWriterPoolForCompressionLevel9
+	default:
+		var err error
+		writer, err = gzip.NewWriterLevel(&buf, level)
+		if err != nil {
+			return nil, err
+		}
+	}
+	if pool != nil {
+		writer = pool.Get().(*gzip.Writer)
+		writer.Reset(&buf)
+		defer pool.Put(writer)
+	}
+	if _, err := writer.Write(data); err != nil {
+		return nil, err
+	}
+	if err := writer.Close(); err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+func lz4Compress(data []byte) ([]byte, error) {
+	writer := lz4WriterPool.Get().(*lz4.Writer)
+	defer lz4WriterPool.Put(writer)
+
+	var buf bytes.Buffer
+	writer.Reset(&buf)
+	if _, err := writer.Write(data); err != nil {
+		return nil, err
+	}
+	if err := writer.Close(); err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
diff --git a/vendor/github.com/IBM/sarama/config.go b/vendor/github.com/IBM/sarama/config.go
new file mode 100644
index 0000000..5bac2b5
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/config.go
@@ -0,0 +1,917 @@
+package sarama
+
+import (
+	"crypto/tls"
+	"fmt"
+	"io"
+	"net"
+	"regexp"
+	"time"
+
+	"github.com/klauspost/compress/gzip"
+	"github.com/rcrowley/go-metrics"
+	"golang.org/x/net/proxy"
+)
+
+const defaultClientID = "sarama"
+
+// validClientID specifies the permitted characters for a client.id when
+// connecting to Kafka versions before 1.0.0 (KIP-190)
+var validClientID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
+
+// Config is used to pass multiple configuration options to Sarama's constructors.
+type Config struct {
+	// Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client.
+	Admin struct {
+		Retry struct {
+			// The total number of times to retry sending (retriable) admin requests (default 5).
+			// Similar to the `retries` setting of the JVM AdminClientConfig.
+			Max int
+			// Backoff time between retries of a failed request (default 100ms)
+			Backoff time.Duration
+		}
+		// The maximum duration the administrative Kafka client will wait for ClusterAdmin operations,
+		// including topics, brokers, configurations and ACLs (defaults to 3 seconds).
+		Timeout time.Duration
+	}
+
+	// Net is the namespace for network-level properties used by the Broker, and
+	// shared by the Client/Producer/Consumer.
+	Net struct {
+		// How many outstanding requests a connection is allowed to have before
+		// sending on it blocks (default 5).
+		// Throughput can improve but message ordering is not guaranteed if Producer.Idempotent is disabled, see:
+		// https://kafka.apache.org/protocol#protocol_network
+		// https://kafka.apache.org/28/documentation.html#producerconfigs_max.in.flight.requests.per.connection
+		MaxOpenRequests int
+
+		// All three of the below configurations are similar to the
+		// `socket.timeout.ms` setting in JVM kafka. All of them default
+		// to 30 seconds.
+		DialTimeout  time.Duration // How long to wait for the initial connection.
+		ReadTimeout  time.Duration // How long to wait for a response.
+		WriteTimeout time.Duration // How long to wait for a transmit.
+
+		// ResolveCanonicalBootstrapServers turns each bootstrap broker address
+		// into a set of IPs, then does a reverse lookup on each one to get its
+		// canonical hostname. This list of hostnames then replaces the
+		// original address list. Similar to the `client.dns.lookup` option in
+		// the JVM client, this is especially useful with GSSAPI, where it
+		// allows providing an alias record instead of individual broker
+		// hostnames. Defaults to false.
+		ResolveCanonicalBootstrapServers bool
+
+		TLS struct {
+			// Whether or not to use TLS when connecting to the broker
+			// (defaults to false).
+			Enable bool
+			// The TLS configuration to use for secure connections if
+			// enabled (defaults to nil).
+			Config *tls.Config
+		}
+
+		// SASL based authentication with broker. While there are multiple SASL authentication methods
+		// the current implementation is limited to plaintext (SASL/PLAIN) authentication
+		SASL struct {
+			// Whether or not to use SASL authentication when connecting to the broker
+			// (defaults to false).
+			Enable bool
+			// SASLMechanism is the name of the enabled SASL mechanism.
+			// Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN).
+			Mechanism SASLMechanism
+			// Version is the SASL Protocol Version to use
+			// Kafka > 1.x should use V1, except on Azure EventHub which use V0
+			Version int16
+			// Whether or not to send the Kafka SASL handshake first if enabled
+			// (defaults to true). You should only set this to false if you're using
+			// a non-Kafka SASL proxy.
+			Handshake bool
+			// AuthIdentity is an (optional) authorization identity (authzid) to
+			// use for SASL/PLAIN authentication (if different from User) when
+			// an authenticated user is permitted to act as the presented
+			// alternative user. See RFC4616 for details.
+			AuthIdentity string
+			// User is the authentication identity (authcid) to present for
+			// SASL/PLAIN or SASL/SCRAM authentication
+			User string
+			// Password for SASL/PLAIN authentication
+			Password string
+			// authz id used for SASL/SCRAM authentication
+			SCRAMAuthzID string
+			// SCRAMClientGeneratorFunc is a generator of a user provided implementation of a SCRAM
+			// client used to perform the SCRAM exchange with the server.
+			SCRAMClientGeneratorFunc func() SCRAMClient
+			// TokenProvider is a user-defined callback for generating
+			// access tokens for SASL/OAUTHBEARER auth. See the
+			// AccessTokenProvider interface docs for proper implementation
+			// guidelines.
+			TokenProvider AccessTokenProvider
+
+			GSSAPI GSSAPIConfig
+		}
+
+		// KeepAlive specifies the keep-alive period for an active network connection (defaults to 0).
+		// If zero or positive, keep-alives are enabled.
+		// If negative, keep-alives are disabled.
+		KeepAlive time.Duration
+
+		// LocalAddr is the local address to use when dialing an
+		// address. The address must be of a compatible type for the
+		// network being dialed.
+		// If nil, a local address is automatically chosen.
+		LocalAddr net.Addr
+
+		Proxy struct {
+			// Whether or not to use proxy when connecting to the broker
+			// (defaults to false).
+			Enable bool
+			// The proxy dialer to use enabled (defaults to nil).
+			Dialer proxy.Dialer
+		}
+	}
+
+	// Metadata is the namespace for metadata management properties used by the
+	// Client, and shared by the Producer/Consumer.
+	Metadata struct {
+		Retry struct {
+			// The total number of times to retry a metadata request when the
+			// cluster is in the middle of a leader election (default 3).
+			Max int
+			// How long to wait for leader election to occur before retrying
+			// (default 250ms). Similar to the JVM's `retry.backoff.ms`.
+			Backoff time.Duration
+			// Called to compute backoff time dynamically. Useful for implementing
+			// more sophisticated backoff strategies. This takes precedence over
+			// `Backoff` if set.
+			BackoffFunc func(retries, maxRetries int) time.Duration
+		}
+		// How frequently to refresh the cluster metadata in the background.
+		// Defaults to 10 minutes. Set to 0 to disable. Similar to
+		// `topic.metadata.refresh.interval.ms` in the JVM version.
+		RefreshFrequency time.Duration
+
+		// Whether to maintain a full set of metadata for all topics, or just
+		// the minimal set that has been necessary so far. The full set is simpler
+		// and usually more convenient, but can take up a substantial amount of
+		// memory if you have many topics and partitions. Defaults to true.
+		Full bool
+
+		// How long to wait for a successful metadata response.
+		// Disabled by default which means a metadata request against an unreachable
+		// cluster (all brokers are unreachable or unresponsive) can take up to
+		// `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max`
+		// to fail.
+		Timeout time.Duration
+
+		// Whether to allow auto-create topics in metadata refresh. If set to true,
+		// the broker may auto-create topics that we requested which do not already exist,
+		// if it is configured to do so (`auto.create.topics.enable` is true). Defaults to true.
+		AllowAutoTopicCreation bool
+
+		// SingleFlight controls whether to send a single metadata refresh request at a given time
+		// or whether to allow anyone to refresh the metadata concurrently.
+		// If this is set to true and the client needs to refresh the metadata from different goroutines,
+		// the requests will be batched together so that a single refresh is sent at a time.
+		// See https://github.com/IBM/sarama/issues/3224 for more details.
+		// SingleFlight defaults to true.
+		SingleFlight bool
+	}
+
+	// Producer is the namespace for configuration related to producing messages,
+	// used by the Producer.
+	Producer struct {
+		// The maximum permitted size of a message (defaults to 1000000). Should be
+		// set equal to or smaller than the broker's `message.max.bytes`.
+		MaxMessageBytes int
+		// The level of acknowledgement reliability needed from the broker (defaults
+		// to WaitForLocal). Equivalent to the `request.required.acks` setting of the
+		// JVM producer.
+		RequiredAcks RequiredAcks
+		// The maximum duration the broker will wait the receipt of the number of
+		// RequiredAcks (defaults to 10 seconds). This is only relevant when
+		// RequiredAcks is set to WaitForAll or a number > 1. Only supports
+		// millisecond resolution, nanoseconds will be truncated. Equivalent to
+		// the JVM producer's `request.timeout.ms` setting.
+		Timeout time.Duration
+		// The type of compression to use on messages (defaults to no compression).
+		// Similar to `compression.codec` setting of the JVM producer.
+		Compression CompressionCodec
+		// The level of compression to use on messages. The meaning depends
+		// on the actual compression type used and defaults to default compression
+		// level for the codec.
+		CompressionLevel int
+		// Generates partitioners for choosing the partition to send messages to
+		// (defaults to hashing the message key). Similar to the `partitioner.class`
+		// setting for the JVM producer.
+		Partitioner PartitionerConstructor
+		// If enabled, the producer will ensure that exactly one copy of each message is
+		// written.
+		Idempotent bool
+		// Transaction specify
+		Transaction struct {
+			// Used in transactions to identify an instance of a producer through restarts
+			ID string
+			// Amount of time a transaction can remain unresolved (neither committed nor aborted)
+			// default is 1 min
+			Timeout time.Duration
+
+			Retry struct {
+				// The total number of times to retry sending a message (default 50).
+				// Similar to the `message.send.max.retries` setting of the JVM producer.
+				Max int
+				// How long to wait for the cluster to settle between retries
+				// (default 10ms). Similar to the `retry.backoff.ms` setting of the
+				// JVM producer.
+				Backoff time.Duration
+				// Called to compute backoff time dynamically. Useful for implementing
+				// more sophisticated backoff strategies. This takes precedence over
+				// `Backoff` if set.
+				BackoffFunc func(retries, maxRetries int) time.Duration
+			}
+		}
+
+		// Return specifies what channels will be populated. If they are set to true,
+		// you must read from the respective channels to prevent deadlock. If,
+		// however, this config is used to create a `SyncProducer`, both must be set
+		// to true and you shall not read from the channels since the producer does
+		// this internally.
+		Return struct {
+			// If enabled, successfully delivered messages will be returned on the
+			// Successes channel (default disabled).
+			Successes bool
+
+			// If enabled, messages that failed to deliver will be returned on the
+			// Errors channel, including error (default enabled).
+			Errors bool
+		}
+
+		// The following config options control how often messages are batched up and
+		// sent to the broker. By default, messages are sent as fast as possible, and
+		// all messages received while the current batch is in-flight are placed
+		// into the subsequent batch.
+		Flush struct {
+			// The best-effort number of bytes needed to trigger a flush. Use the
+			// global sarama.MaxRequestSize to set a hard upper limit.
+			Bytes int
+			// The best-effort number of messages needed to trigger a flush. Use
+			// `MaxMessages` to set a hard upper limit.
+			Messages int
+			// The best-effort frequency of flushes. Equivalent to
+			// `queue.buffering.max.ms` setting of JVM producer.
+			Frequency time.Duration
+			// The maximum number of messages the producer will send in a single
+			// broker request. Defaults to 0 for unlimited. Similar to
+			// `queue.buffering.max.messages` in the JVM producer.
+			MaxMessages int
+		}
+
+		Retry struct {
+			// The total number of times to retry sending a message (default 3).
+			// Similar to the `message.send.max.retries` setting of the JVM producer.
+			Max int
+			// How long to wait for the cluster to settle between retries
+			// (default 100ms). Similar to the `retry.backoff.ms` setting of the
+			// JVM producer.
+			Backoff time.Duration
+			// Called to compute backoff time dynamically. Useful for implementing
+			// more sophisticated backoff strategies. This takes precedence over
+			// `Backoff` if set.
+			BackoffFunc func(retries, maxRetries int) time.Duration
+			// The maximum length of the bridging buffer between `input` and `retries` channels
+			// in AsyncProducer#retryHandler.
+			// The limit is to prevent this buffer from overflowing or causing OOM.
+			// Defaults to 0 for unlimited.
+			// Any value between 0 and 4096 is pushed to 4096.
+			// A zero or negative value indicates unlimited.
+			MaxBufferLength int
+			// The maximum total byte size of messages in the bridging buffer between `input`
+			// and `retries` channels in AsyncProducer#retryHandler.
+			// This limit prevents the buffer from consuming excessive memory.
+			// Defaults to 0 for unlimited.
+			// Any value between 0 and 32 MB is pushed to 32 MB.
+			// A zero or negative value indicates unlimited.
+			MaxBufferBytes int64
+		}
+
+		// Interceptors to be called when the producer dispatcher reads the
+		// message for the first time. Interceptors allows to intercept and
+		// possible mutate the message before they are published to Kafka
+		// cluster. *ProducerMessage modified by the first interceptor's
+		// OnSend() is passed to the second interceptor OnSend(), and so on in
+		// the interceptor chain.
+		Interceptors []ProducerInterceptor
+	}
+
+	// Consumer is the namespace for configuration related to consuming messages,
+	// used by the Consumer.
+	Consumer struct {
+		// Group is the namespace for configuring consumer group.
+		Group struct {
+			Session struct {
+				// The timeout used to detect consumer failures when using Kafka's group management facility.
+				// The consumer sends periodic heartbeats to indicate its liveness to the broker.
+				// If no heartbeats are received by the broker before the expiration of this session timeout,
+				// then the broker will remove this consumer from the group and initiate a rebalance.
+				// Note that the value must be in the allowable range as configured in the broker configuration
+				// by `group.min.session.timeout.ms` and `group.max.session.timeout.ms` (default 10s)
+				Timeout time.Duration
+			}
+			Heartbeat struct {
+				// The expected time between heartbeats to the consumer coordinator when using Kafka's group
+				// management facilities. Heartbeats are used to ensure that the consumer's session stays active and
+				// to facilitate rebalancing when new consumers join or leave the group.
+				// The value must be set lower than Consumer.Group.Session.Timeout, but typically should be set no
+				// higher than 1/3 of that value.
+				// It can be adjusted even lower to control the expected time for normal rebalances (default 3s)
+				Interval time.Duration
+			}
+			Rebalance struct {
+				// Strategy for allocating topic partitions to members.
+				// Deprecated: Strategy exists for historical compatibility
+				// and should not be used. Please use GroupStrategies.
+				Strategy BalanceStrategy
+
+				// GroupStrategies is the priority-ordered list of client-side consumer group
+				// balancing strategies that will be offered to the coordinator. The first
+				// strategy that all group members support will be chosen by the leader.
+				// default: [ NewBalanceStrategyRange() ]
+				GroupStrategies []BalanceStrategy
+
+				// The maximum allowed time for each worker to join the group once a rebalance has begun.
+				// This is basically a limit on the amount of time needed for all tasks to flush any pending
+				// data and commit offsets. If the timeout is exceeded, then the worker will be removed from
+				// the group, which will cause offset commit failures (default 60s).
+				Timeout time.Duration
+
+				Retry struct {
+					// When a new consumer joins a consumer group the set of consumers attempt to "rebalance"
+					// the load to assign partitions to each consumer. If the set of consumers changes while
+					// this assignment is taking place the rebalance will fail and retry. This setting controls
+					// the maximum number of attempts before giving up (default 4).
+					Max int
+					// Backoff time between retries during rebalance (default 2s)
+					Backoff time.Duration
+				}
+			}
+			Member struct {
+				// Custom metadata to include when joining the group. The user data for all joined members
+				// can be retrieved by sending a DescribeGroupRequest to the broker that is the
+				// coordinator for the group.
+				UserData []byte
+			}
+
+			// support KIP-345
+			InstanceId string
+
+			// If true, consumer offsets will be automatically reset to configured Initial value
+			// if the fetched consumer offset is out of range of available offsets. Out of range
+			// can happen if the data has been deleted from the server, or during situations of
+			// under-replication where a replica does not have all the data yet. It can be
+			// dangerous to reset the offset automatically, particularly in the latter case. Defaults
+			// to true to maintain existing behavior.
+			ResetInvalidOffsets bool
+		}
+
+		Retry struct {
+			// How long to wait after a failing to read from a partition before
+			// trying again (default 2s).
+			Backoff time.Duration
+			// Called to compute backoff time dynamically. Useful for implementing
+			// more sophisticated backoff strategies. This takes precedence over
+			// `Backoff` if set.
+			BackoffFunc func(retries int) time.Duration
+		}
+
+		// Fetch is the namespace for controlling how many bytes are retrieved by any
+		// given request.
+		Fetch struct {
+			// The minimum number of message bytes to fetch in a request - the broker
+			// will wait until at least this many are available. The default is 1,
+			// as 0 causes the consumer to spin when no messages are available.
+			// Equivalent to the JVM's `fetch.min.bytes`.
+			Min int32
+			// The default number of message bytes to fetch from the broker in each
+			// request (default 1MB). This should be larger than the majority of
+			// your messages, or else the consumer will spend a lot of time
+			// negotiating sizes and not actually consuming. Similar to the JVM's
+			// `fetch.message.max.bytes`.
+			Default int32
+			// The maximum number of message bytes to fetch from the broker in a
+			// single request. Messages larger than this will return
+			// ErrMessageTooLarge and will not be consumable, so you must be sure
+			// this is at least as large as your largest message. Defaults to 0
+			// (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
+			// global `sarama.MaxResponseSize` still applies.
+			Max int32
+		}
+		// The maximum amount of time the broker will wait for Consumer.Fetch.Min
+		// bytes to become available before it returns fewer than that anyways. The
+		// default is 250ms, since 0 causes the consumer to spin when no events are
+		// available. 100-500ms is a reasonable range for most cases. Kafka only
+		// supports precision up to milliseconds; nanoseconds will be truncated.
+		// Equivalent to the JVM's `fetch.max.wait.ms`.
+		MaxWaitTime time.Duration
+
+		// The maximum amount of time the consumer expects a message takes to
+		// process for the user. If writing to the Messages channel takes longer
+		// than this, that partition will stop fetching more messages until it
+		// can proceed again.
+		// Note that, since the Messages channel is buffered, the actual grace time is
+		// (MaxProcessingTime * ChannelBufferSize). Defaults to 100ms.
+		// If a message is not written to the Messages channel between two ticks
+		// of the expiryTicker then a timeout is detected.
+		// Using a ticker instead of a timer to detect timeouts should typically
+		// result in many fewer calls to Timer functions which may result in a
+		// significant performance improvement if many messages are being sent
+		// and timeouts are infrequent.
+		// The disadvantage of using a ticker instead of a timer is that
+		// timeouts will be less accurate. That is, the effective timeout could
+		// be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
+		// example, if `MaxProcessingTime` is 100ms then a delay of 180ms
+		// between two messages being sent may not be recognized as a timeout.
+		MaxProcessingTime time.Duration
+
+		// Return specifies what channels will be populated. If they are set to true,
+		// you must read from them to prevent deadlock.
+		Return struct {
+			// If enabled, any errors that occurred while consuming are returned on
+			// the Errors channel (default disabled).
+			Errors bool
+		}
+
+		// Offsets specifies configuration for how and when to commit consumed
+		// offsets. This currently requires the manual use of an OffsetManager
+		// but will eventually be automated.
+		Offsets struct {
+			// Deprecated: CommitInterval exists for historical compatibility
+			// and should not be used. Please use Consumer.Offsets.AutoCommit
+			CommitInterval time.Duration
+
+			// AutoCommit specifies configuration for commit messages automatically.
+			AutoCommit struct {
+				// Whether or not to auto-commit updated offsets back to the broker.
+				// (default enabled).
+				Enable bool
+
+				// How frequently to commit updated offsets. Ineffective unless
+				// auto-commit is enabled (default 1s)
+				Interval time.Duration
+			}
+
+			// The initial offset to use if no offset was previously committed.
+			// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
+			Initial int64
+
+			// The retention duration for committed offsets. If zero, disabled
+			// (in which case the `offsets.retention.minutes` option on the
+			// broker will be used).  Kafka only supports precision up to
+			// milliseconds; nanoseconds will be truncated. Requires Kafka
+			// broker version 0.9.0 or later.
+			// (default is 0: disabled).
+			Retention time.Duration
+
+			Retry struct {
+				// The total number of times to retry failing commit
+				// requests during OffsetManager shutdown (default 3).
+				Max int
+			}
+		}
+
+		// IsolationLevel support 2 mode:
+		// 	- use `ReadUncommitted` (default) to consume and return all messages in message channel
+		//	- use `ReadCommitted` to hide messages that are part of an aborted transaction
+		IsolationLevel IsolationLevel
+
+		// Interceptors to be called just before the record is sent to the
+		// messages channel. Interceptors allows to intercept and possible
+		// mutate the message before they are returned to the client.
+		// *ConsumerMessage modified by the first interceptor's OnConsume() is
+		// passed to the second interceptor OnConsume(), and so on in the
+		// interceptor chain.
+		Interceptors []ConsumerInterceptor
+	}
+
+	// A user-provided string sent with every request to the brokers for logging,
+	// debugging, and auditing purposes. Defaults to "sarama", but you should
+	// probably set it to something specific to your application.
+	ClientID string
+	// A rack identifier for this client. This can be any string value which
+	// indicates where this client is physically located.
+	// It corresponds with the broker config 'broker.rack'
+	RackID string
+	// The number of events to buffer in internal and external channels. This
+	// permits the producer and consumer to continue processing some messages
+	// in the background while user code is working, greatly improving throughput.
+	// Defaults to 256.
+	ChannelBufferSize int
+	// ApiVersionsRequest determines whether Sarama should send an
+	// ApiVersionsRequest message to each broker as part of its initial
+	// connection. This defaults to `true` to match the official Java client
+	// and most 3rdparty ones.
+	ApiVersionsRequest bool
+	// The version of Kafka that Sarama will assume it is running against.
+	// Defaults to the oldest supported stable version. Since Kafka provides
+	// backwards-compatibility, setting it to a version older than you have
+	// will not break anything, although it may prevent you from using the
+	// latest features. Setting it to a version greater than you are actually
+	// running may lead to random breakage.
+	Version KafkaVersion
+	// The registry to define metrics into.
+	// Defaults to a local registry.
+	// If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
+	// prior to starting Sarama.
+	// See Examples on how to use the metrics registry
+	MetricRegistry metrics.Registry
+}
+
+// NewConfig returns a new configuration instance with sane defaults.
+func NewConfig() *Config {
+	c := &Config{}
+
+	c.Admin.Retry.Max = 5
+	c.Admin.Retry.Backoff = 100 * time.Millisecond
+	c.Admin.Timeout = 3 * time.Second
+
+	c.Net.MaxOpenRequests = 5
+	c.Net.DialTimeout = 30 * time.Second
+	c.Net.ReadTimeout = 30 * time.Second
+	c.Net.WriteTimeout = 30 * time.Second
+	c.Net.SASL.Handshake = true
+	c.Net.SASL.Version = SASLHandshakeV1
+
+	c.Metadata.Retry.Max = 3
+	c.Metadata.Retry.Backoff = 250 * time.Millisecond
+	c.Metadata.RefreshFrequency = 10 * time.Minute
+	c.Metadata.Full = true
+	c.Metadata.AllowAutoTopicCreation = true
+	c.Metadata.SingleFlight = true
+
+	c.Producer.MaxMessageBytes = 1024 * 1024
+	c.Producer.RequiredAcks = WaitForLocal
+	c.Producer.Timeout = 10 * time.Second
+	c.Producer.Partitioner = NewHashPartitioner
+	c.Producer.Retry.Max = 3
+	c.Producer.Retry.Backoff = 100 * time.Millisecond
+	c.Producer.Return.Errors = true
+	c.Producer.CompressionLevel = CompressionLevelDefault
+
+	c.Producer.Transaction.Timeout = 1 * time.Minute
+	c.Producer.Transaction.Retry.Max = 50
+	c.Producer.Transaction.Retry.Backoff = 100 * time.Millisecond
+
+	c.Consumer.Fetch.Min = 1
+	c.Consumer.Fetch.Default = 1024 * 1024
+	c.Consumer.Retry.Backoff = 2 * time.Second
+	c.Consumer.MaxWaitTime = 500 * time.Millisecond
+	c.Consumer.MaxProcessingTime = 100 * time.Millisecond
+	c.Consumer.Return.Errors = false
+	c.Consumer.Offsets.AutoCommit.Enable = true
+	c.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second
+	c.Consumer.Offsets.Initial = OffsetNewest
+	c.Consumer.Offsets.Retry.Max = 3
+
+	c.Consumer.Group.Session.Timeout = 10 * time.Second
+	c.Consumer.Group.Heartbeat.Interval = 3 * time.Second
+	c.Consumer.Group.Rebalance.GroupStrategies = []BalanceStrategy{NewBalanceStrategyRange()}
+	c.Consumer.Group.Rebalance.Timeout = 60 * time.Second
+	c.Consumer.Group.Rebalance.Retry.Max = 4
+	c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second
+	c.Consumer.Group.ResetInvalidOffsets = true
+
+	c.ClientID = defaultClientID
+	c.ChannelBufferSize = 256
+	c.ApiVersionsRequest = true
+	c.Version = DefaultVersion
+	c.MetricRegistry = metrics.NewRegistry()
+
+	return c
+}
+
+// Validate checks a Config instance. It will return a
+// ConfigurationError if the specified values don't make sense.
+//
+//nolint:gocyclo // This function's cyclomatic complexity has go beyond 100
+func (c *Config) Validate() error {
+	// some configuration values should be warned on but not fail completely, do those first
+	if !c.Net.TLS.Enable && c.Net.TLS.Config != nil {
+		Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
+	}
+	if !c.Net.SASL.Enable {
+		if c.Net.SASL.User != "" {
+			Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
+		}
+		if c.Net.SASL.Password != "" {
+			Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
+		}
+	}
+	if c.Producer.RequiredAcks > 1 {
+		Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
+	}
+	if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
+		Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
+	}
+	if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
+		Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
+	}
+	if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
+		Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
+	}
+	if c.Producer.Timeout%time.Millisecond != 0 {
+		Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
+	}
+	if c.Consumer.MaxWaitTime < 100*time.Millisecond {
+		Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
+	}
+	if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
+		Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
+		Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.Consumer.Group.Session.Timeout%time.Millisecond != 0 {
+		Logger.Println("Consumer.Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.Consumer.Group.Heartbeat.Interval%time.Millisecond != 0 {
+		Logger.Println("Consumer.Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.Consumer.Group.Rebalance.Timeout%time.Millisecond != 0 {
+		Logger.Println("Consumer.Group.Rebalance.Timeout only supports millisecond precision; nanoseconds will be truncated.")
+	}
+	if c.ClientID == defaultClientID {
+		Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
+	}
+
+	// validate Net values
+	switch {
+	case c.Net.MaxOpenRequests <= 0:
+		return ConfigurationError("Net.MaxOpenRequests must be > 0")
+	case c.Net.DialTimeout <= 0:
+		return ConfigurationError("Net.DialTimeout must be > 0")
+	case c.Net.ReadTimeout <= 0:
+		return ConfigurationError("Net.ReadTimeout must be > 0")
+	case c.Net.WriteTimeout <= 0:
+		return ConfigurationError("Net.WriteTimeout must be > 0")
+	case c.Net.SASL.Enable:
+		if c.Net.SASL.Mechanism == "" {
+			c.Net.SASL.Mechanism = SASLTypePlaintext
+		}
+		if c.Net.SASL.Version == SASLHandshakeV0 && c.ApiVersionsRequest {
+			return ConfigurationError("ApiVersionsRequest must be disabled when SASL v0 is enabled")
+		}
+		switch c.Net.SASL.Mechanism {
+		case SASLTypePlaintext:
+			if c.Net.SASL.User == "" {
+				return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
+			}
+			if c.Net.SASL.Password == "" {
+				return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
+			}
+		case SASLTypeOAuth:
+			if c.Net.SASL.TokenProvider == nil {
+				return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider")
+			}
+		case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
+			if c.Net.SASL.User == "" {
+				return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
+			}
+			if c.Net.SASL.Password == "" {
+				return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
+			}
+			if c.Net.SASL.SCRAMClientGeneratorFunc == nil {
+				return ConfigurationError("A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc")
+			}
+		case SASLTypeGSSAPI:
+			if c.Net.SASL.GSSAPI.ServiceName == "" {
+				return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used")
+			}
+
+			switch c.Net.SASL.GSSAPI.AuthType {
+			case KRB5_USER_AUTH:
+				if c.Net.SASL.GSSAPI.Password == "" {
+					return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " +
+						"mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH")
+				}
+			case KRB5_KEYTAB_AUTH:
+				if c.Net.SASL.GSSAPI.KeyTabPath == "" {
+					return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" +
+						" and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH")
+				}
+			case KRB5_CCACHE_AUTH:
+				if c.Net.SASL.GSSAPI.CCachePath == "" {
+					return ConfigurationError("Net.SASL.GSSAPI.CCachePath must not be empty when GSS-API mechanism is used" +
+						" and Net.SASL.GSSAPI.AuthType = KRB5_CCACHE_AUTH")
+				}
+			default:
+				return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH, KRB5_KEYTAB_AUTH, and KRB5_CCACHE_AUTH")
+			}
+
+			if c.Net.SASL.GSSAPI.KerberosConfigPath == "" {
+				return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used")
+			}
+			if c.Net.SASL.GSSAPI.Username == "" {
+				return ConfigurationError("Net.SASL.GSSAPI.Username must not be empty when GSS-API mechanism is used")
+			}
+			if c.Net.SASL.GSSAPI.Realm == "" {
+				return ConfigurationError("Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used")
+			}
+		default:
+			msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s`, `%s`, `%s`, `%s` and `%s`",
+				SASLTypeOAuth, SASLTypePlaintext, SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512, SASLTypeGSSAPI)
+			return ConfigurationError(msg)
+		}
+	}
+
+	// validate the Admin values
+	switch {
+	case c.Admin.Timeout <= 0:
+		return ConfigurationError("Admin.Timeout must be > 0")
+	}
+
+	// validate the Metadata values
+	switch {
+	case c.Metadata.Retry.Max < 0:
+		return ConfigurationError("Metadata.Retry.Max must be >= 0")
+	case c.Metadata.Retry.Backoff < 0:
+		return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
+	case c.Metadata.RefreshFrequency < 0:
+		return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
+	}
+
+	// validate the Producer values
+	switch {
+	case c.Producer.MaxMessageBytes <= 0:
+		return ConfigurationError("Producer.MaxMessageBytes must be > 0")
+	case c.Producer.RequiredAcks < -1:
+		return ConfigurationError("Producer.RequiredAcks must be >= -1")
+	case c.Producer.Timeout <= 0:
+		return ConfigurationError("Producer.Timeout must be > 0")
+	case c.Producer.Partitioner == nil:
+		return ConfigurationError("Producer.Partitioner must not be nil")
+	case c.Producer.Flush.Bytes < 0:
+		return ConfigurationError("Producer.Flush.Bytes must be >= 0")
+	case c.Producer.Flush.Messages < 0:
+		return ConfigurationError("Producer.Flush.Messages must be >= 0")
+	case c.Producer.Flush.Frequency < 0:
+		return ConfigurationError("Producer.Flush.Frequency must be >= 0")
+	case c.Producer.Flush.MaxMessages < 0:
+		return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
+	case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
+		return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
+	case c.Producer.Retry.Max < 0:
+		return ConfigurationError("Producer.Retry.Max must be >= 0")
+	case c.Producer.Retry.Backoff < 0:
+		return ConfigurationError("Producer.Retry.Backoff must be >= 0")
+	}
+
+	if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
+		return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
+	}
+
+	if c.Producer.Compression == CompressionGZIP {
+		if c.Producer.CompressionLevel != CompressionLevelDefault {
+			if _, err := gzip.NewWriterLevel(io.Discard, c.Producer.CompressionLevel); err != nil {
+				return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err))
+			}
+		}
+	}
+
+	if c.Producer.Compression == CompressionZSTD && !c.Version.IsAtLeast(V2_1_0_0) {
+		return ConfigurationError("zstd compression requires Version >= V2_1_0_0")
+	}
+
+	if c.Producer.Idempotent {
+		if !c.Version.IsAtLeast(V0_11_0_0) {
+			return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0")
+		}
+		if c.Producer.Retry.Max == 0 {
+			return ConfigurationError("Idempotent producer requires Producer.Retry.Max >= 1")
+		}
+		if c.Producer.RequiredAcks != WaitForAll {
+			return ConfigurationError("Idempotent producer requires Producer.RequiredAcks to be WaitForAll")
+		}
+		if c.Net.MaxOpenRequests > 1 {
+			return ConfigurationError("Idempotent producer requires Net.MaxOpenRequests to be 1")
+		}
+	}
+
+	if c.Producer.Transaction.ID != "" && !c.Producer.Idempotent {
+		return ConfigurationError("Transactional producer requires Idempotent to be true")
+	}
+
+	// validate the Consumer values
+	switch {
+	case c.Consumer.Fetch.Min <= 0:
+		return ConfigurationError("Consumer.Fetch.Min must be > 0")
+	case c.Consumer.Fetch.Default <= 0:
+		return ConfigurationError("Consumer.Fetch.Default must be > 0")
+	case c.Consumer.Fetch.Max < 0:
+		return ConfigurationError("Consumer.Fetch.Max must be >= 0")
+	case c.Consumer.MaxWaitTime < 1*time.Millisecond:
+		return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
+	case c.Consumer.MaxProcessingTime <= 0:
+		return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
+	case c.Consumer.Retry.Backoff < 0:
+		return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
+	case c.Consumer.Offsets.AutoCommit.Interval <= 0:
+		return ConfigurationError("Consumer.Offsets.AutoCommit.Interval must be > 0")
+	case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
+		return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
+	case c.Consumer.Offsets.Retry.Max < 0:
+		return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0")
+	case c.Consumer.IsolationLevel != ReadUncommitted && c.Consumer.IsolationLevel != ReadCommitted:
+		return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted")
+	}
+
+	if c.Consumer.Offsets.CommitInterval != 0 {
+		Logger.Println("Deprecation warning: Consumer.Offsets.CommitInterval exists for historical compatibility" +
+			" and should not be used. Please use Consumer.Offsets.AutoCommit, the current value will be ignored")
+	}
+	if c.Consumer.Group.Rebalance.Strategy != nil {
+		Logger.Println("Deprecation warning: Consumer.Group.Rebalance.Strategy exists for historical compatibility" +
+			" and should not be used. Please use Consumer.Group.Rebalance.GroupStrategies")
+	}
+
+	// validate IsolationLevel
+	if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) {
+		return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0")
+	}
+
+	// validate the Consumer Group values
+	switch {
+	case c.Consumer.Group.Session.Timeout <= 2*time.Millisecond:
+		return ConfigurationError("Consumer.Group.Session.Timeout must be >= 2ms")
+	case c.Consumer.Group.Heartbeat.Interval < 1*time.Millisecond:
+		return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms")
+	case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout:
+		return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout")
+	case c.Consumer.Group.Rebalance.Strategy == nil && len(c.Consumer.Group.Rebalance.GroupStrategies) == 0:
+		return ConfigurationError("Consumer.Group.Rebalance.GroupStrategies or Consumer.Group.Rebalance.Strategy must not be empty")
+	case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond:
+		return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms")
+	case c.Consumer.Group.Rebalance.Retry.Max < 0:
+		return ConfigurationError("Consumer.Group.Rebalance.Retry.Max must be >= 0")
+	case c.Consumer.Group.Rebalance.Retry.Backoff < 0:
+		return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0")
+	}
+
+	for _, strategy := range c.Consumer.Group.Rebalance.GroupStrategies {
+		if strategy == nil {
+			return ConfigurationError("elements in Consumer.Group.Rebalance.Strategies must not be empty")
+		}
+	}
+
+	if c.Consumer.Group.InstanceId != "" {
+		if !c.Version.IsAtLeast(V2_3_0_0) {
+			return ConfigurationError("Consumer.Group.InstanceId need Version >= 2.3")
+		}
+		if err := validateGroupInstanceId(c.Consumer.Group.InstanceId); err != nil {
+			return err
+		}
+	}
+
+	// validate misc shared values
+	switch {
+	case c.ChannelBufferSize < 0:
+		return ConfigurationError("ChannelBufferSize must be >= 0")
+	}
+
+	// only validate clientID locally for Kafka versions before KIP-190 was implemented
+	if !c.Version.IsAtLeast(V1_0_0_0) && !validClientID.MatchString(c.ClientID) {
+		return ConfigurationError(fmt.Sprintf("ClientID value %q is not valid for Kafka versions before 1.0.0", c.ClientID))
+	}
+
+	return nil
+}
+
+func (c *Config) getDialer() proxy.Dialer {
+	if c.Net.Proxy.Enable {
+		Logger.Println("using proxy")
+		return c.Net.Proxy.Dialer
+	} else {
+		return &net.Dialer{
+			Timeout:   c.Net.DialTimeout,
+			KeepAlive: c.Net.KeepAlive,
+			LocalAddr: c.Net.LocalAddr,
+		}
+	}
+}
+
+const MAX_GROUP_INSTANCE_ID_LENGTH = 249
+
+var GROUP_INSTANCE_ID_REGEXP = regexp.MustCompile(`^[0-9a-zA-Z\._\-]+$`)
+
+func validateGroupInstanceId(id string) error {
+	if id == "" {
+		return ConfigurationError("Group instance id must be non-empty string")
+	}
+	if id == "." || id == ".." {
+		return ConfigurationError(`Group instance id cannot be "." or ".."`)
+	}
+	if len(id) > MAX_GROUP_INSTANCE_ID_LENGTH {
+		return ConfigurationError(fmt.Sprintf(`Group instance id cannot be longer than %v, characters: %s`, MAX_GROUP_INSTANCE_ID_LENGTH, id))
+	}
+	if !GROUP_INSTANCE_ID_REGEXP.MatchString(id) {
+		return ConfigurationError(fmt.Sprintf(`Group instance id %s is illegal, it contains a character other than, '.', '_' and '-'`, id))
+	}
+	return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/IBM/sarama/config_resource_type.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/config_resource_type.go
rename to vendor/github.com/IBM/sarama/config_resource_type.go
diff --git a/vendor/github.com/IBM/sarama/consumer.go b/vendor/github.com/IBM/sarama/consumer.go
new file mode 100644
index 0000000..4c96af4
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/consumer.go
@@ -0,0 +1,1136 @@
+package sarama
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// ConsumerMessage encapsulates a Kafka message returned by the consumer.
+type ConsumerMessage struct {
+	Headers        []*RecordHeader // only set if kafka is version 0.11+
+	Timestamp      time.Time       // only set if kafka is version 0.10+, inner message timestamp
+	BlockTimestamp time.Time       // only set if kafka is version 0.10+, outer (compressed) block timestamp
+
+	Key, Value []byte
+	Topic      string
+	Partition  int32
+	Offset     int64
+}
+
+// ConsumerError is what is provided to the user when an error occurs.
+// It wraps an error and includes the topic and partition.
+type ConsumerError struct {
+	Topic     string
+	Partition int32
+	Err       error
+}
+
+func (ce ConsumerError) Error() string {
+	return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
+}
+
+func (ce ConsumerError) Unwrap() error {
+	return ce.Err
+}
+
+// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
+// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
+// when stopping.
+type ConsumerErrors []*ConsumerError
+
+func (ce ConsumerErrors) Error() string {
+	return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
+}
+
+// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
+// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
+// scope.
+type Consumer interface {
+	// Topics returns the set of available topics as retrieved from the cluster
+	// metadata. This method is the same as Client.Topics(), and is provided for
+	// convenience.
+	Topics() ([]string, error)
+
+	// Partitions returns the sorted list of all partition IDs for the given topic.
+	// This method is the same as Client.Partitions(), and is provided for convenience.
+	Partitions(topic string) ([]int32, error)
+
+	// ConsumePartition creates a PartitionConsumer on the given topic/partition with
+	// the given offset. It will return an error if this Consumer is already consuming
+	// on the given topic/partition. Offset can be a literal offset, or OffsetNewest
+	// or OffsetOldest
+	ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
+
+	// HighWaterMarks returns the current high water marks for each topic and partition.
+	// Consistency between partitions is not guaranteed since high water marks are updated separately.
+	HighWaterMarks() map[string]map[int32]int64
+
+	// Close shuts down the consumer. It must be called after all child
+	// PartitionConsumers have already been closed.
+	Close() error
+
+	// Pause suspends fetching from the requested partitions. Future calls to the broker will not return any
+	// records from these partitions until they have been resumed using Resume()/ResumeAll().
+	// Note that this method does not affect partition subscription.
+	// In particular, it does not cause a group rebalance when automatic assignment is used.
+	Pause(topicPartitions map[string][]int32)
+
+	// Resume resumes specified partitions which have been paused with Pause()/PauseAll().
+	// New calls to the broker will return records from these partitions if there are any to be fetched.
+	Resume(topicPartitions map[string][]int32)
+
+	// PauseAll suspends fetching from all partitions. Future calls to the broker will not return any
+	// records from these partitions until they have been resumed using Resume()/ResumeAll().
+	// Note that this method does not affect partition subscription.
+	// In particular, it does not cause a group rebalance when automatic assignment is used.
+	PauseAll()
+
+	// ResumeAll resumes all partitions which have been paused with Pause()/PauseAll().
+	// New calls to the broker will return records from these partitions if there are any to be fetched.
+	ResumeAll()
+}
+
+// max time to wait for more partition subscriptions
+const partitionConsumersBatchTimeout = 100 * time.Millisecond
+
+type consumer struct {
+	conf            *Config
+	children        map[string]map[int32]*partitionConsumer
+	brokerConsumers map[*Broker]*brokerConsumer
+	client          Client
+	metricRegistry  metrics.Registry
+	lock            sync.Mutex
+}
+
+// NewConsumer creates a new consumer using the given broker addresses and configuration.
+func NewConsumer(addrs []string, config *Config) (Consumer, error) {
+	client, err := NewClient(addrs, config)
+	if err != nil {
+		return nil, err
+	}
+	return newConsumer(client)
+}
+
+// NewConsumerFromClient creates a new consumer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this consumer.
+func NewConsumerFromClient(client Client) (Consumer, error) {
+	// For clients passed in by the client, ensure we don't
+	// call Close() on it.
+	cli := &nopCloserClient{client}
+	return newConsumer(cli)
+}
+
+func newConsumer(client Client) (Consumer, error) {
+	// Check that we are not dealing with a closed Client before processing any other arguments
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	c := &consumer{
+		client:          client,
+		conf:            client.Config(),
+		children:        make(map[string]map[int32]*partitionConsumer),
+		brokerConsumers: make(map[*Broker]*brokerConsumer),
+		metricRegistry:  newCleanupRegistry(client.Config().MetricRegistry),
+	}
+
+	return c, nil
+}
+
+func (c *consumer) Close() error {
+	c.metricRegistry.UnregisterAll()
+	return c.client.Close()
+}
+
+func (c *consumer) Topics() ([]string, error) {
+	return c.client.Topics()
+}
+
+func (c *consumer) Partitions(topic string) ([]int32, error) {
+	return c.client.Partitions(topic)
+}
+
+func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
+	child := &partitionConsumer{
+		consumer:             c,
+		conf:                 c.conf,
+		topic:                topic,
+		partition:            partition,
+		messages:             make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
+		errors:               make(chan *ConsumerError, c.conf.ChannelBufferSize),
+		feeder:               make(chan *FetchResponse, 1),
+		leaderEpoch:          invalidLeaderEpoch,
+		preferredReadReplica: invalidPreferredReplicaID,
+		trigger:              make(chan none, 1),
+		dying:                make(chan none),
+		fetchSize:            c.conf.Consumer.Fetch.Default,
+	}
+
+	if err := child.chooseStartingOffset(offset); err != nil {
+		return nil, err
+	}
+
+	leader, epoch, err := c.client.LeaderAndEpoch(child.topic, child.partition)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := c.addChild(child); err != nil {
+		return nil, err
+	}
+
+	go withRecover(child.dispatcher)
+	go withRecover(child.responseFeeder)
+
+	child.leaderEpoch = epoch
+	child.broker = c.refBrokerConsumer(leader)
+	child.broker.input <- child
+
+	return child, nil
+}
+
+func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	hwms := make(map[string]map[int32]int64)
+	for topic, p := range c.children {
+		hwm := make(map[int32]int64, len(p))
+		for partition, pc := range p {
+			hwm[partition] = pc.HighWaterMarkOffset()
+		}
+		hwms[topic] = hwm
+	}
+
+	return hwms
+}
+
+func (c *consumer) addChild(child *partitionConsumer) error {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	topicChildren := c.children[child.topic]
+	if topicChildren == nil {
+		topicChildren = make(map[int32]*partitionConsumer)
+		c.children[child.topic] = topicChildren
+	}
+
+	if topicChildren[child.partition] != nil {
+		return ConfigurationError("That topic/partition is already being consumed")
+	}
+
+	topicChildren[child.partition] = child
+	return nil
+}
+
+func (c *consumer) removeChild(child *partitionConsumer) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	delete(c.children[child.topic], child.partition)
+}
+
+func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	bc := c.brokerConsumers[broker]
+	if bc == nil {
+		bc = c.newBrokerConsumer(broker)
+		c.brokerConsumers[broker] = bc
+	}
+
+	bc.refs++
+
+	return bc
+}
+
+func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	brokerWorker.refs--
+
+	if brokerWorker.refs == 0 {
+		close(brokerWorker.input)
+		if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
+			delete(c.brokerConsumers, brokerWorker.broker)
+		}
+	}
+}
+
+func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	delete(c.brokerConsumers, brokerWorker.broker)
+}
+
+// Pause implements Consumer.
+func (c *consumer) Pause(topicPartitions map[string][]int32) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	for topic, partitions := range topicPartitions {
+		for _, partition := range partitions {
+			if topicConsumers, ok := c.children[topic]; ok {
+				if partitionConsumer, ok := topicConsumers[partition]; ok {
+					partitionConsumer.Pause()
+				}
+			}
+		}
+	}
+}
+
+// Resume implements Consumer.
+func (c *consumer) Resume(topicPartitions map[string][]int32) {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	for topic, partitions := range topicPartitions {
+		for _, partition := range partitions {
+			if topicConsumers, ok := c.children[topic]; ok {
+				if partitionConsumer, ok := topicConsumers[partition]; ok {
+					partitionConsumer.Resume()
+				}
+			}
+		}
+	}
+}
+
+// PauseAll implements Consumer.
+func (c *consumer) PauseAll() {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	for _, partitions := range c.children {
+		for _, partitionConsumer := range partitions {
+			partitionConsumer.Pause()
+		}
+	}
+}
+
+// ResumeAll implements Consumer.
+func (c *consumer) ResumeAll() {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	for _, partitions := range c.children {
+		for _, partitionConsumer := range partitions {
+			partitionConsumer.Resume()
+		}
+	}
+}
+
+// PartitionConsumer
+
+// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
+// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
+// of scope.
+//
+// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
+// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
+// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
+// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
+// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
+// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
+// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
+//
+// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
+// consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process
+// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
+// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
+// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
+type PartitionConsumer interface {
+	// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
+	// should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
+	// function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
+	// this before calling Close on the underlying client.
+	AsyncClose()
+
+	// Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
+	// the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
+	// the Messages channel when this function is called, you will be competing with Close for messages; consider
+	// calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
+	// out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
+	Close() error
+
+	// Messages returns the read channel for the messages that are returned by
+	// the broker.
+	Messages() <-chan *ConsumerMessage
+
+	// Errors returns a read channel of errors that occurred during consuming, if
+	// enabled. By default, errors are logged and not returned over this channel.
+	// If you want to implement any custom error handling, set your config's
+	// Consumer.Return.Errors setting to true, and read from this channel.
+	Errors() <-chan *ConsumerError
+
+	// HighWaterMarkOffset returns the high water mark offset of the partition,
+	// i.e. the offset that will be used for the next message that will be produced.
+	// You can use this to determine how far behind the processing is.
+	HighWaterMarkOffset() int64
+
+	// Pause suspends fetching from this partition. Future calls to the broker will not return
+	// any records from these partition until it have been resumed using Resume().
+	// Note that this method does not affect partition subscription.
+	// In particular, it does not cause a group rebalance when automatic assignment is used.
+	Pause()
+
+	// Resume resumes this partition which have been paused with Pause().
+	// New calls to the broker will return records from these partitions if there are any to be fetched.
+	// If the partition was not previously paused, this method is a no-op.
+	Resume()
+
+	// IsPaused indicates if this partition consumer is paused or not
+	IsPaused() bool
+}
+
+type partitionConsumer struct {
+	highWaterMarkOffset atomic.Int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
+
+	consumer *consumer
+	conf     *Config
+	broker   *brokerConsumer
+	messages chan *ConsumerMessage
+	errors   chan *ConsumerError
+	feeder   chan *FetchResponse
+
+	leaderEpoch          int32
+	preferredReadReplica int32
+
+	trigger, dying chan none
+	closeOnce      sync.Once
+	topic          string
+	partition      int32
+	responseResult error
+	fetchSize      int32
+	offset         int64
+	retries        atomic.Int32
+
+	paused atomic.Bool // accessed atomically, 0 = not paused, 1 = paused
+}
+
+var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
+
+func (child *partitionConsumer) sendError(err error) {
+	cErr := &ConsumerError{
+		Topic:     child.topic,
+		Partition: child.partition,
+		Err:       err,
+	}
+
+	if child.conf.Consumer.Return.Errors {
+		child.errors <- cErr
+	} else {
+		Logger.Println(cErr)
+	}
+}
+
+func (child *partitionConsumer) computeBackoff() time.Duration {
+	if child.conf.Consumer.Retry.BackoffFunc != nil {
+		retries := child.retries.Add(1)
+		return child.conf.Consumer.Retry.BackoffFunc(int(retries))
+	}
+	return child.conf.Consumer.Retry.Backoff
+}
+
+func (child *partitionConsumer) dispatcher() {
+	for range child.trigger {
+		select {
+		case <-child.dying:
+			close(child.trigger)
+		case <-time.After(child.computeBackoff()):
+			if child.broker != nil {
+				child.consumer.unrefBrokerConsumer(child.broker)
+				child.broker = nil
+			}
+
+			if err := child.dispatch(); err != nil {
+				child.sendError(err)
+				child.trigger <- none{}
+			}
+		}
+	}
+
+	if child.broker != nil {
+		child.consumer.unrefBrokerConsumer(child.broker)
+	}
+	child.consumer.removeChild(child)
+	close(child.feeder)
+}
+
+func (child *partitionConsumer) preferredBroker() (*Broker, int32, error) {
+	if child.preferredReadReplica >= 0 {
+		broker, err := child.consumer.client.Broker(child.preferredReadReplica)
+		if err == nil {
+			return broker, child.leaderEpoch, nil
+		}
+		Logger.Printf(
+			"consumer/%s/%d failed to find active broker for preferred read replica %d - will fallback to leader",
+			child.topic, child.partition, child.preferredReadReplica)
+
+		// if we couldn't find it, discard the replica preference and trigger a
+		// metadata refresh whilst falling back to consuming from the leader again
+		child.preferredReadReplica = invalidPreferredReplicaID
+		_ = child.consumer.client.RefreshMetadata(child.topic)
+	}
+
+	// if preferred replica cannot be found fallback to leader
+	return child.consumer.client.LeaderAndEpoch(child.topic, child.partition)
+}
+
+func (child *partitionConsumer) dispatch() error {
+	if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
+		return err
+	}
+
+	broker, epoch, err := child.preferredBroker()
+	if err != nil {
+		return err
+	}
+
+	child.leaderEpoch = epoch
+	child.broker = child.consumer.refBrokerConsumer(broker)
+	child.broker.input <- child
+
+	return nil
+}
+
+func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
+	newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
+	if err != nil {
+		return err
+	}
+
+	child.highWaterMarkOffset.Store(newestOffset)
+
+	oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
+	if err != nil {
+		return err
+	}
+
+	switch {
+	case offset == OffsetNewest:
+		child.offset = newestOffset
+	case offset == OffsetOldest:
+		child.offset = oldestOffset
+	case offset >= oldestOffset && offset <= newestOffset:
+		child.offset = offset
+	default:
+		return ErrOffsetOutOfRange
+	}
+
+	return nil
+}
+
+func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
+	return child.messages
+}
+
+func (child *partitionConsumer) Errors() <-chan *ConsumerError {
+	return child.errors
+}
+
+func (child *partitionConsumer) AsyncClose() {
+	// this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
+	// the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
+	// 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
+	// also just close itself)
+	child.closeOnce.Do(func() {
+		close(child.dying)
+	})
+}
+
+func (child *partitionConsumer) Close() error {
+	child.AsyncClose()
+
+	var consumerErrors ConsumerErrors
+	for err := range child.errors {
+		consumerErrors = append(consumerErrors, err)
+	}
+
+	if len(consumerErrors) > 0 {
+		return consumerErrors
+	}
+	return nil
+}
+
+func (child *partitionConsumer) HighWaterMarkOffset() int64 {
+	return child.highWaterMarkOffset.Load()
+}
+
+func (child *partitionConsumer) responseFeeder() {
+	var msgs []*ConsumerMessage
+	expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
+	firstAttempt := true
+
+feederLoop:
+	for response := range child.feeder {
+		msgs, child.responseResult = child.parseResponse(response)
+
+		if child.responseResult == nil {
+			child.retries.Store(0)
+		}
+
+		for i, msg := range msgs {
+			child.interceptors(msg)
+		messageSelect:
+			select {
+			case <-child.dying:
+				child.broker.acks.Done()
+				continue feederLoop
+			case child.messages <- msg:
+				firstAttempt = true
+			case <-expiryTicker.C:
+				if !firstAttempt {
+					child.responseResult = errTimedOut
+					child.broker.acks.Done()
+				remainingLoop:
+					for _, msg = range msgs[i:] {
+						child.interceptors(msg)
+						select {
+						case child.messages <- msg:
+						case <-child.dying:
+							break remainingLoop
+						}
+					}
+					child.broker.input <- child
+					continue feederLoop
+				} else {
+					// current message has not been sent, return to select
+					// statement
+					firstAttempt = false
+					goto messageSelect
+				}
+			}
+		}
+
+		child.broker.acks.Done()
+	}
+
+	expiryTicker.Stop()
+	close(child.messages)
+	close(child.errors)
+}
+
+func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
+	var messages []*ConsumerMessage
+	for _, msgBlock := range msgSet.Messages {
+		for _, msg := range msgBlock.Messages() {
+			offset := msg.Offset
+			timestamp := msg.Msg.Timestamp
+			if msg.Msg.Version >= 1 {
+				baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
+				offset += baseOffset
+				if msg.Msg.LogAppendTime {
+					timestamp = msgBlock.Msg.Timestamp
+				}
+			}
+			if offset < child.offset {
+				continue
+			}
+			messages = append(messages, &ConsumerMessage{
+				Topic:          child.topic,
+				Partition:      child.partition,
+				Key:            msg.Msg.Key,
+				Value:          msg.Msg.Value,
+				Offset:         offset,
+				Timestamp:      timestamp,
+				BlockTimestamp: msgBlock.Msg.Timestamp,
+			})
+			child.offset = offset + 1
+		}
+	}
+	if len(messages) == 0 {
+		child.offset++
+	}
+	return messages, nil
+}
+
+func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
+	messages := make([]*ConsumerMessage, 0, len(batch.Records))
+
+	for _, rec := range batch.Records {
+		offset := batch.FirstOffset + rec.OffsetDelta
+		if offset < child.offset {
+			continue
+		}
+		timestamp := batch.FirstTimestamp.Add(rec.TimestampDelta)
+		if batch.LogAppendTime {
+			timestamp = batch.MaxTimestamp
+		}
+		messages = append(messages, &ConsumerMessage{
+			Topic:     child.topic,
+			Partition: child.partition,
+			Key:       rec.Key,
+			Value:     rec.Value,
+			Offset:    offset,
+			Timestamp: timestamp,
+			Headers:   rec.Headers,
+		})
+		child.offset = offset + 1
+	}
+	if len(messages) == 0 {
+		child.offset++
+	}
+	return messages, nil
+}
+
+func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
+	var consumerBatchSizeMetric metrics.Histogram
+	if child.consumer != nil && child.consumer.metricRegistry != nil {
+		consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", child.consumer.metricRegistry)
+	}
+
+	// If request was throttled and empty we log and return without error
+	if response.ThrottleTime != time.Duration(0) && len(response.Blocks) == 0 {
+		Logger.Printf(
+			"consumer/broker/%d FetchResponse throttled %v\n",
+			child.broker.broker.ID(), response.ThrottleTime)
+		return nil, nil
+	}
+
+	block := response.GetBlock(child.topic, child.partition)
+	if block == nil {
+		return nil, ErrIncompleteResponse
+	}
+
+	if !errors.Is(block.Err, ErrNoError) {
+		return nil, block.Err
+	}
+
+	nRecs, err := block.numRecords()
+	if err != nil {
+		return nil, err
+	}
+
+	if consumerBatchSizeMetric != nil {
+		consumerBatchSizeMetric.Update(int64(nRecs))
+	}
+
+	if block.PreferredReadReplica != invalidPreferredReplicaID {
+		child.preferredReadReplica = block.PreferredReadReplica
+	}
+
+	if nRecs == 0 {
+		partialTrailingMessage, err := block.isPartial()
+		if err != nil {
+			return nil, err
+		}
+		// We got no messages. If we got a trailing one then we need to ask for more data.
+		// Otherwise we just poll again and wait for one to be produced...
+		if partialTrailingMessage {
+			if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
+				// we can't ask for more data, we've hit the configured limit
+				child.sendError(ErrMessageTooLarge)
+				child.offset++ // skip this one so we can keep processing future messages
+			} else {
+				child.fetchSize *= 2
+				// check int32 overflow
+				if child.fetchSize < 0 {
+					child.fetchSize = math.MaxInt32
+				}
+				if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
+					child.fetchSize = child.conf.Consumer.Fetch.Max
+				}
+			}
+		} else if block.recordsNextOffset != nil && *block.recordsNextOffset <= block.HighWaterMarkOffset {
+			// check last record next offset to avoid stuck if high watermark was not reached
+			Logger.Printf("consumer/broker/%d received batch with zero records but high watermark was not reached, topic %s, partition %d, next offset %d\n", child.broker.broker.ID(), child.topic, child.partition, *block.recordsNextOffset)
+			child.offset = *block.recordsNextOffset
+		}
+
+		return nil, nil
+	}
+
+	// we got messages, reset our fetch size in case it was increased for a previous request
+	child.fetchSize = child.conf.Consumer.Fetch.Default
+	child.highWaterMarkOffset.Store(block.HighWaterMarkOffset)
+
+	// abortedProducerIDs contains producerID which message should be ignored as uncommitted
+	// - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset)
+	// - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over
+	abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions))
+	abortedTransactions := block.getAbortedTransactions()
+
+	var messages []*ConsumerMessage
+	for _, records := range block.RecordsSet {
+		switch records.recordsType {
+		case legacyRecords:
+			messageSetMessages, err := child.parseMessages(records.MsgSet)
+			if err != nil {
+				return nil, err
+			}
+
+			messages = append(messages, messageSetMessages...)
+		case defaultRecords:
+			// Consume remaining abortedTransaction up to last offset of current batch
+			for _, txn := range abortedTransactions {
+				if txn.FirstOffset > records.RecordBatch.LastOffset() {
+					break
+				}
+				abortedProducerIDs[txn.ProducerID] = struct{}{}
+				// Pop abortedTransactions so that we never add it again
+				abortedTransactions = abortedTransactions[1:]
+			}
+
+			recordBatchMessages, err := child.parseRecords(records.RecordBatch)
+			if err != nil {
+				return nil, err
+			}
+
+			// Parse and commit offset but do not expose messages that are:
+			// - control records
+			// - part of an aborted transaction when set to `ReadCommitted`
+
+			// control record
+			isControl, err := records.isControl()
+			if err != nil {
+				// I don't know why there is this continue in case of error to begin with
+				// Safe bet is to ignore control messages if ReadUncommitted
+				// and block on them in case of error and ReadCommitted
+				if child.conf.Consumer.IsolationLevel == ReadCommitted {
+					return nil, err
+				}
+				continue
+			}
+			if isControl {
+				controlRecord, err := records.getControlRecord()
+				if err != nil {
+					return nil, err
+				}
+
+				if controlRecord.Type == ControlRecordAbort {
+					delete(abortedProducerIDs, records.RecordBatch.ProducerID)
+				}
+				continue
+			}
+
+			// filter aborted transactions
+			if child.conf.Consumer.IsolationLevel == ReadCommitted {
+				_, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID]
+				if records.RecordBatch.IsTransactional && isAborted {
+					continue
+				}
+			}
+
+			messages = append(messages, recordBatchMessages...)
+		default:
+			return nil, fmt.Errorf("unknown records type: %v", records.recordsType)
+		}
+	}
+
+	return messages, nil
+}
+
+func (child *partitionConsumer) interceptors(msg *ConsumerMessage) {
+	for _, interceptor := range child.conf.Consumer.Interceptors {
+		msg.safelyApplyInterceptor(interceptor)
+	}
+}
+
+// Pause implements PartitionConsumer.
+func (child *partitionConsumer) Pause() {
+	child.paused.Store(true)
+}
+
+// Resume implements PartitionConsumer.
+func (child *partitionConsumer) Resume() {
+	child.paused.Store(false)
+}
+
+// IsPaused implements PartitionConsumer.
+func (child *partitionConsumer) IsPaused() bool {
+	return child.paused.Load()
+}
+
+type brokerConsumer struct {
+	consumer         *consumer
+	broker           *Broker
+	input            chan *partitionConsumer
+	newSubscriptions chan []*partitionConsumer
+	subscriptions    map[*partitionConsumer]none
+	acks             sync.WaitGroup
+	refs             int
+}
+
+func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
+	bc := &brokerConsumer{
+		consumer:         c,
+		broker:           broker,
+		input:            make(chan *partitionConsumer),
+		newSubscriptions: make(chan []*partitionConsumer),
+		subscriptions:    make(map[*partitionConsumer]none),
+		refs:             0,
+	}
+
+	go withRecover(bc.subscriptionManager)
+	go withRecover(bc.subscriptionConsumer)
+
+	return bc
+}
+
+// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
+// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
+// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
+// it nil if no new subscriptions are available.
+func (bc *brokerConsumer) subscriptionManager() {
+	defer close(bc.newSubscriptions)
+
+	for {
+		var partitionConsumers []*partitionConsumer
+
+		// Check for any partition consumer asking to subscribe if there aren't
+		// any, trigger the network request (to fetch Kafka messages) by sending "nil" to the
+		// newSubscriptions channel
+		select {
+		case pc, ok := <-bc.input:
+			if !ok {
+				return
+			}
+			partitionConsumers = append(partitionConsumers, pc)
+		case bc.newSubscriptions <- nil:
+			continue
+		}
+
+		// drain input of any further incoming subscriptions
+		timer := time.NewTimer(partitionConsumersBatchTimeout)
+		for batchComplete := false; !batchComplete; {
+			select {
+			case pc := <-bc.input:
+				partitionConsumers = append(partitionConsumers, pc)
+			case <-timer.C:
+				batchComplete = true
+			}
+		}
+		timer.Stop()
+
+		Logger.Printf(
+			"consumer/broker/%d accumulated %d new subscriptions\n",
+			bc.broker.ID(), len(partitionConsumers))
+
+		bc.newSubscriptions <- partitionConsumers
+	}
+}
+
+// subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
+// this is the main loop that fetches Kafka messages
+func (bc *brokerConsumer) subscriptionConsumer() {
+	for newSubscriptions := range bc.newSubscriptions {
+		bc.updateSubscriptions(newSubscriptions)
+
+		if len(bc.subscriptions) == 0 {
+			// We're about to be shut down or we're about to receive more subscriptions.
+			// Take a small nap to avoid burning the CPU.
+			time.Sleep(partitionConsumersBatchTimeout)
+			continue
+		}
+
+		response, err := bc.fetchNewMessages()
+		if err != nil {
+			Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
+			bc.abort(err)
+			return
+		}
+
+		// if there isn't response, it means that not fetch was made
+		// so we don't need to handle any response
+		if response == nil {
+			time.Sleep(partitionConsumersBatchTimeout)
+			continue
+		}
+
+		bc.acks.Add(len(bc.subscriptions))
+		for child := range bc.subscriptions {
+			if _, ok := response.Blocks[child.topic]; !ok {
+				bc.acks.Done()
+				continue
+			}
+
+			if _, ok := response.Blocks[child.topic][child.partition]; !ok {
+				bc.acks.Done()
+				continue
+			}
+
+			child.feeder <- response
+		}
+		bc.acks.Wait()
+		bc.handleResponses()
+	}
+}
+
+func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
+	for _, child := range newSubscriptions {
+		bc.subscriptions[child] = none{}
+		Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+	}
+
+	for child := range bc.subscriptions {
+		select {
+		case <-child.dying:
+			Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+			close(child.trigger)
+			delete(bc.subscriptions, child)
+		default:
+			// no-op
+		}
+	}
+}
+
+// handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed
+func (bc *brokerConsumer) handleResponses() {
+	for child := range bc.subscriptions {
+		result := child.responseResult
+		child.responseResult = nil
+
+		if result == nil {
+			if preferredBroker, _, err := child.preferredBroker(); err == nil {
+				if bc.broker.ID() != preferredBroker.ID() {
+					// not an error but needs redispatching to consume from preferred replica
+					Logger.Printf(
+						"consumer/broker/%d abandoned in favor of preferred replica broker/%d\n",
+						bc.broker.ID(), preferredBroker.ID())
+					child.trigger <- none{}
+					delete(bc.subscriptions, child)
+				}
+			}
+			continue
+		}
+
+		// Discard any replica preference.
+		child.preferredReadReplica = invalidPreferredReplicaID
+
+		if errors.Is(result, errTimedOut) {
+			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
+				bc.broker.ID(), child.topic, child.partition)
+			delete(bc.subscriptions, child)
+		} else if errors.Is(result, ErrOffsetOutOfRange) {
+			// there's no point in retrying this it will just fail the same way again
+			// shut it down and force the user to choose what to do
+			child.sendError(result)
+			Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
+			close(child.trigger)
+			delete(bc.subscriptions, child)
+		} else if errors.Is(result, ErrUnknownTopicOrPartition) ||
+			errors.Is(result, ErrNotLeaderForPartition) ||
+			errors.Is(result, ErrLeaderNotAvailable) ||
+			errors.Is(result, ErrReplicaNotAvailable) ||
+			errors.Is(result, ErrFencedLeaderEpoch) ||
+			errors.Is(result, ErrUnknownLeaderEpoch) {
+			// not an error, but does need redispatching
+			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+				bc.broker.ID(), child.topic, child.partition, result)
+			child.trigger <- none{}
+			delete(bc.subscriptions, child)
+		} else {
+			// dunno, tell the user and try redispatching
+			child.sendError(result)
+			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+				bc.broker.ID(), child.topic, child.partition, result)
+			child.trigger <- none{}
+			delete(bc.subscriptions, child)
+		}
+	}
+}
+
+func (bc *brokerConsumer) abort(err error) {
+	bc.consumer.abandonBrokerConsumer(bc)
+	_ = bc.broker.Close() // we don't care about the error this might return, we already have one
+
+	for child := range bc.subscriptions {
+		child.sendError(err)
+		child.trigger <- none{}
+	}
+
+	for newSubscriptions := range bc.newSubscriptions {
+		if len(newSubscriptions) == 0 {
+			// Take a small nap to avoid burning the CPU.
+			time.Sleep(partitionConsumersBatchTimeout)
+			continue
+		}
+		for _, child := range newSubscriptions {
+			child.sendError(err)
+			child.trigger <- none{}
+		}
+	}
+}
+
+// fetchNewMessages can be nil if no fetch is made, it can occur when
+// all partitions are paused
+func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
+	request := &FetchRequest{
+		MinBytes:    bc.consumer.conf.Consumer.Fetch.Min,
+		MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
+	}
+	// Version 1 is the same as version 0.
+	if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) {
+		request.Version = 1
+	}
+	// Starting in Version 2, the requestor must be able to handle Kafka Log
+	// Message format version 1.
+	if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
+		request.Version = 2
+	}
+	// Version 3 adds MaxBytes.  Starting in version 3, the partition ordering in
+	// the request is now relevant.  Partitions will be processed in the order
+	// they appear in the request.
+	if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
+		request.Version = 3
+		request.MaxBytes = MaxResponseSize
+	}
+	// Version 4 adds IsolationLevel.  Starting in version 4, the reqestor must be
+	// able to handle Kafka log message format version 2.
+	// Version 5 adds LogStartOffset to indicate the earliest available offset of
+	// partition data that can be consumed.
+	if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
+		request.Version = 5
+		request.Isolation = bc.consumer.conf.Consumer.IsolationLevel
+	}
+	// Version 6 is the same as version 5.
+	if bc.consumer.conf.Version.IsAtLeast(V1_0_0_0) {
+		request.Version = 6
+	}
+	// Version 7 adds incremental fetch request support.
+	if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) {
+		request.Version = 7
+		// We do not currently implement KIP-227 FetchSessions. Setting the id to 0
+		// and the epoch to -1 tells the broker not to generate as session ID we're going
+		// to just ignore anyway.
+		request.SessionID = 0
+		request.SessionEpoch = -1
+	}
+	// Version 8 is the same as version 7.
+	if bc.consumer.conf.Version.IsAtLeast(V2_0_0_0) {
+		request.Version = 8
+	}
+	// Version 9 adds CurrentLeaderEpoch, as described in KIP-320.
+	// Version 10 indicates that we can use the ZStd compression algorithm, as
+	// described in KIP-110.
+	if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) {
+		request.Version = 10
+	}
+	// Version 11 adds RackID for KIP-392 fetch from closest replica
+	if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) {
+		request.Version = 11
+		request.RackID = bc.consumer.conf.RackID
+	}
+
+	for child := range bc.subscriptions {
+		if !child.IsPaused() {
+			request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize, child.leaderEpoch)
+		}
+	}
+
+	// avoid to fetch when there is no block
+	if len(request.blocks) == 0 {
+		return nil, nil
+	}
+
+	return bc.broker.Fetch(request)
+}
diff --git a/vendor/github.com/IBM/sarama/consumer_group.go b/vendor/github.com/IBM/sarama/consumer_group.go
new file mode 100644
index 0000000..f47eac7
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/consumer_group.go
@@ -0,0 +1,1184 @@
+package sarama
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sort"
+	"sync"
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// ErrClosedConsumerGroup is the error returned when a method is called on a consumer group that has been closed.
+var ErrClosedConsumerGroup = errors.New("kafka: tried to use a consumer group that was closed")
+
+// ConsumerGroup is responsible for dividing up processing of topics and partitions
+// over a collection of processes (the members of the consumer group).
+type ConsumerGroup interface {
+	// Consume joins a cluster of consumers for a given list of topics and
+	// starts a blocking ConsumerGroupSession through the ConsumerGroupHandler.
+	//
+	// The life-cycle of a session is represented by the following steps:
+	//
+	// 1. The consumers join the group (as explained in https://kafka.apache.org/documentation/#intro_consumers)
+	//    and is assigned their "fair share" of partitions, aka 'claims'.
+	// 2. Before processing starts, the handler's Setup() hook is called to notify the user
+	//    of the claims and allow any necessary preparation or alteration of state.
+	// 3. For each of the assigned claims the handler's ConsumeClaim() function is then called
+	//    in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected
+	//    from concurrent reads/writes.
+	// 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the
+	//    parent context is canceled or when a server-side rebalance cycle is initiated.
+	// 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called
+	//    to allow the user to perform any final tasks before a rebalance.
+	// 6. Finally, marked offsets are committed one last time before claims are released.
+	//
+	// Please note, that once a rebalance is triggered, sessions must be completed within
+	// Config.Consumer.Group.Rebalance.Timeout. This means that ConsumeClaim() functions must exit
+	// as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout
+	// is exceeded, the consumer will be removed from the group by Kafka, which will cause offset
+	// commit failures.
+	// This method should be called inside an infinite loop, when a
+	// server-side rebalance happens, the consumer session will need to be
+	// recreated to get the new claims.
+	Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error
+
+	// Errors returns a read channel of errors that occurred during the consumer life-cycle.
+	// By default, errors are logged and not returned over this channel.
+	// If you want to implement any custom error handling, set your config's
+	// Consumer.Return.Errors setting to true, and read from this channel.
+	Errors() <-chan error
+
+	// Close stops the ConsumerGroup and detaches any running sessions. It is required to call
+	// this function before the object passes out of scope, as it will otherwise leak memory.
+	Close() error
+
+	// Pause suspends fetching from the requested partitions. Future calls to the broker will not return any
+	// records from these partitions until they have been resumed using Resume()/ResumeAll().
+	// Note that this method does not affect partition subscription.
+	// In particular, it does not cause a group rebalance when automatic assignment is used.
+	Pause(partitions map[string][]int32)
+
+	// Resume resumes specified partitions which have been paused with Pause()/PauseAll().
+	// New calls to the broker will return records from these partitions if there are any to be fetched.
+	Resume(partitions map[string][]int32)
+
+	// Pause suspends fetching from all partitions. Future calls to the broker will not return any
+	// records from these partitions until they have been resumed using Resume()/ResumeAll().
+	// Note that this method does not affect partition subscription.
+	// In particular, it does not cause a group rebalance when automatic assignment is used.
+	PauseAll()
+
+	// Resume resumes all partitions which have been paused with Pause()/PauseAll().
+	// New calls to the broker will return records from these partitions if there are any to be fetched.
+	ResumeAll()
+}
+
+type consumerGroup struct {
+	client Client
+
+	config          *Config
+	consumer        Consumer
+	groupID         string
+	groupInstanceId *string
+	memberID        string
+	errors          chan error
+
+	lock       sync.Mutex
+	errorsLock sync.RWMutex
+	closed     chan none
+	closeOnce  sync.Once
+
+	userData []byte
+
+	metricRegistry metrics.Registry
+}
+
+// NewConsumerGroup creates a new consumer group the given broker addresses and configuration.
+func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerGroup, error) {
+	client, err := NewClient(addrs, config)
+	if err != nil {
+		return nil, err
+	}
+
+	c, err := newConsumerGroup(groupID, client)
+	if err != nil {
+		_ = client.Close()
+	}
+	return c, err
+}
+
+// NewConsumerGroupFromClient creates a new consumer group using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this consumer.
+// PLEASE NOTE: consumer groups can only re-use but not share clients.
+func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) {
+	if client == nil {
+		return nil, ConfigurationError("client must not be nil")
+	}
+	// For clients passed in by the client, ensure we don't
+	// call Close() on it.
+	cli := &nopCloserClient{client}
+	return newConsumerGroup(groupID, cli)
+}
+
+func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) {
+	config := client.Config()
+	if !config.Version.IsAtLeast(V0_10_2_0) {
+		return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0")
+	}
+
+	consumer, err := newConsumer(client)
+	if err != nil {
+		return nil, err
+	}
+
+	cg := &consumerGroup{
+		client:         client,
+		consumer:       consumer,
+		config:         config,
+		groupID:        groupID,
+		errors:         make(chan error, config.ChannelBufferSize),
+		closed:         make(chan none),
+		userData:       config.Consumer.Group.Member.UserData,
+		metricRegistry: newCleanupRegistry(config.MetricRegistry),
+	}
+	if config.Consumer.Group.InstanceId != "" && config.Version.IsAtLeast(V2_3_0_0) {
+		cg.groupInstanceId = &config.Consumer.Group.InstanceId
+	}
+	return cg, nil
+}
+
+// Errors implements ConsumerGroup.
+func (c *consumerGroup) Errors() <-chan error { return c.errors }
+
+// Close implements ConsumerGroup.
+func (c *consumerGroup) Close() (err error) {
+	c.closeOnce.Do(func() {
+		close(c.closed)
+
+		// leave group
+		if e := c.leave(); e != nil {
+			err = e
+		}
+
+		go func() {
+			c.errorsLock.Lock()
+			defer c.errorsLock.Unlock()
+			close(c.errors)
+		}()
+
+		// drain errors
+		for e := range c.errors {
+			err = e
+		}
+
+		if e := c.client.Close(); e != nil {
+			err = e
+		}
+
+		c.metricRegistry.UnregisterAll()
+	})
+	return
+}
+
+// Consume implements ConsumerGroup.
+func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error {
+	// Ensure group is not closed
+	select {
+	case <-c.closed:
+		return ErrClosedConsumerGroup
+	default:
+	}
+
+	c.lock.Lock()
+	defer c.lock.Unlock()
+
+	// Quick exit when no topics are provided
+	if len(topics) == 0 {
+		return fmt.Errorf("no topics provided")
+	}
+
+	// Refresh metadata for requested topics
+	if err := c.client.RefreshMetadata(topics...); err != nil {
+		return err
+	}
+
+	// Init session
+	sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max)
+	if errors.Is(err, ErrClosedClient) {
+		return ErrClosedConsumerGroup
+	} else if err != nil {
+		return err
+	}
+
+	// Wait for session exit signal or Close() call
+	select {
+	case <-c.closed:
+	case <-sess.ctx.Done():
+	}
+
+	// Gracefully release session claims
+	return sess.release(true)
+}
+
+// Pause implements ConsumerGroup.
+func (c *consumerGroup) Pause(partitions map[string][]int32) {
+	c.consumer.Pause(partitions)
+}
+
+// Resume implements ConsumerGroup.
+func (c *consumerGroup) Resume(partitions map[string][]int32) {
+	c.consumer.Resume(partitions)
+}
+
+// PauseAll implements ConsumerGroup.
+func (c *consumerGroup) PauseAll() {
+	c.consumer.PauseAll()
+}
+
+// ResumeAll implements ConsumerGroup.
+func (c *consumerGroup) ResumeAll() {
+	c.consumer.ResumeAll()
+}
+
+func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) {
+	select {
+	case <-ctx.Done():
+		return nil, ctx.Err()
+	case <-c.closed:
+		return nil, ErrClosedConsumerGroup
+	case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
+	}
+
+	if refreshCoordinator {
+		err := c.client.RefreshCoordinator(c.groupID)
+		if err != nil {
+			if retries <= 0 {
+				return nil, err
+			}
+			return c.retryNewSession(ctx, topics, handler, retries-1, true)
+		}
+	}
+
+	return c.newSession(ctx, topics, handler, retries-1)
+}
+
+func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) {
+	if ctx.Err() != nil {
+		return nil, ctx.Err()
+	}
+	coordinator, err := c.client.Coordinator(c.groupID)
+	if err != nil {
+		if retries <= 0 {
+			return nil, err
+		}
+
+		return c.retryNewSession(ctx, topics, handler, retries, true)
+	}
+
+	var (
+		metricRegistry          = c.metricRegistry
+		consumerGroupJoinTotal  metrics.Counter
+		consumerGroupJoinFailed metrics.Counter
+		consumerGroupSyncTotal  metrics.Counter
+		consumerGroupSyncFailed metrics.Counter
+	)
+
+	if metricRegistry != nil {
+		consumerGroupJoinTotal = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-join-total-%s", c.groupID), metricRegistry)
+		consumerGroupJoinFailed = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-join-failed-%s", c.groupID), metricRegistry)
+		consumerGroupSyncTotal = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-sync-total-%s", c.groupID), metricRegistry)
+		consumerGroupSyncFailed = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-sync-failed-%s", c.groupID), metricRegistry)
+	}
+
+	// Join consumer group
+	join, err := c.joinGroupRequest(coordinator, topics)
+	if consumerGroupJoinTotal != nil {
+		consumerGroupJoinTotal.Inc(1)
+	}
+	if err != nil {
+		_ = coordinator.Close()
+		if consumerGroupJoinFailed != nil {
+			consumerGroupJoinFailed.Inc(1)
+		}
+		return nil, err
+	}
+	if !errors.Is(join.Err, ErrNoError) {
+		if consumerGroupJoinFailed != nil {
+			consumerGroupJoinFailed.Inc(1)
+		}
+	}
+	switch join.Err {
+	case ErrNoError:
+		c.memberID = join.MemberId
+	case ErrUnknownMemberId, ErrIllegalGeneration:
+		// reset member ID and retry immediately
+		c.memberID = ""
+		return c.newSession(ctx, topics, handler, retries)
+	case ErrNotCoordinatorForConsumer, ErrRebalanceInProgress, ErrOffsetsLoadInProgress:
+		// retry after backoff
+		if retries <= 0 {
+			return nil, join.Err
+		}
+		return c.retryNewSession(ctx, topics, handler, retries, true)
+	case ErrMemberIdRequired:
+		// from JoinGroupRequest v4 onwards (due to KIP-394) if the client starts
+		// with an empty member id, it needs to get the assigned id from the
+		// response and send another join request with that id to actually join the
+		// group
+		c.memberID = join.MemberId
+		return c.newSession(ctx, topics, handler, retries)
+	case ErrFencedInstancedId:
+		if c.groupInstanceId != nil {
+			Logger.Printf("JoinGroup failed: group instance id %s has been fenced\n", *c.groupInstanceId)
+		}
+		return nil, join.Err
+	default:
+		return nil, join.Err
+	}
+
+	var strategy BalanceStrategy
+	var ok bool
+	if strategy = c.config.Consumer.Group.Rebalance.Strategy; strategy == nil {
+		strategy, ok = c.findStrategy(join.GroupProtocol, c.config.Consumer.Group.Rebalance.GroupStrategies)
+		if !ok {
+			// this case shouldn't happen in practice, since the leader will choose the protocol
+			// that all the members support
+			return nil, fmt.Errorf("unable to find selected strategy: %s", join.GroupProtocol)
+		}
+	}
+
+	// Prepare distribution plan if we joined as the leader
+	var plan BalanceStrategyPlan
+	var members map[string]ConsumerGroupMemberMetadata
+	var allSubscribedTopicPartitions map[string][]int32
+	var allSubscribedTopics []string
+	if join.LeaderId == join.MemberId {
+		members, err = join.GetMembers()
+		if err != nil {
+			return nil, err
+		}
+
+		allSubscribedTopicPartitions, allSubscribedTopics, plan, err = c.balance(strategy, members)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// Sync consumer group
+	syncGroupResponse, err := c.syncGroupRequest(coordinator, members, plan, join.GenerationId, strategy)
+	if consumerGroupSyncTotal != nil {
+		consumerGroupSyncTotal.Inc(1)
+	}
+	if err != nil {
+		_ = coordinator.Close()
+		if consumerGroupSyncFailed != nil {
+			consumerGroupSyncFailed.Inc(1)
+		}
+		return nil, err
+	}
+	if !errors.Is(syncGroupResponse.Err, ErrNoError) {
+		if consumerGroupSyncFailed != nil {
+			consumerGroupSyncFailed.Inc(1)
+		}
+	}
+
+	switch syncGroupResponse.Err {
+	case ErrNoError:
+	case ErrUnknownMemberId, ErrIllegalGeneration:
+		// reset member ID and retry immediately
+		c.memberID = ""
+		return c.newSession(ctx, topics, handler, retries)
+	case ErrNotCoordinatorForConsumer, ErrRebalanceInProgress, ErrOffsetsLoadInProgress:
+		// retry after backoff
+		if retries <= 0 {
+			return nil, syncGroupResponse.Err
+		}
+		return c.retryNewSession(ctx, topics, handler, retries, true)
+	case ErrFencedInstancedId:
+		if c.groupInstanceId != nil {
+			Logger.Printf("JoinGroup failed: group instance id %s has been fenced\n", *c.groupInstanceId)
+		}
+		return nil, syncGroupResponse.Err
+	default:
+		return nil, syncGroupResponse.Err
+	}
+
+	// Retrieve and sort claims
+	var claims map[string][]int32
+	if len(syncGroupResponse.MemberAssignment) > 0 {
+		members, err := syncGroupResponse.GetMemberAssignment()
+		if err != nil {
+			return nil, err
+		}
+		claims = members.Topics
+
+		// in the case of stateful balance strategies, hold on to the returned
+		// assignment metadata, otherwise, reset the statically defined consumer
+		// group metadata
+		if members.UserData != nil {
+			c.userData = members.UserData
+		} else {
+			c.userData = c.config.Consumer.Group.Member.UserData
+		}
+
+		for _, partitions := range claims {
+			sort.Sort(int32Slice(partitions))
+		}
+	}
+
+	session, err := newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler)
+	if err != nil {
+		return nil, err
+	}
+
+	// only the leader needs to check whether there are newly-added partitions in order to trigger a rebalance
+	if join.LeaderId == join.MemberId {
+		go c.loopCheckPartitionNumbers(allSubscribedTopicPartitions, allSubscribedTopics, session)
+	}
+
+	return session, err
+}
+
+func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) {
+	req := &JoinGroupRequest{
+		GroupId:        c.groupID,
+		MemberId:       c.memberID,
+		SessionTimeout: int32(c.config.Consumer.Group.Session.Timeout / time.Millisecond),
+		ProtocolType:   "consumer",
+	}
+	if c.config.Version.IsAtLeast(V0_10_1_0) {
+		req.Version = 1
+		req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond)
+	}
+	if c.config.Version.IsAtLeast(V0_11_0_0) {
+		req.Version = 2
+	}
+	if c.config.Version.IsAtLeast(V0_11_0_0) {
+		req.Version = 2
+	}
+	if c.config.Version.IsAtLeast(V2_0_0_0) {
+		req.Version = 3
+	}
+	// from JoinGroupRequest v4 onwards (due to KIP-394) the client will actually
+	// send two JoinGroupRequests, once with the empty member id, and then again
+	// with the assigned id from the first response. This is handled via the
+	// ErrMemberIdRequired case.
+	if c.config.Version.IsAtLeast(V2_2_0_0) {
+		req.Version = 4
+	}
+	if c.config.Version.IsAtLeast(V2_3_0_0) {
+		req.Version = 5
+		req.GroupInstanceId = c.groupInstanceId
+		if c.config.Version.IsAtLeast(V2_4_0_0) {
+			req.Version = 6
+		}
+	}
+
+	meta := &ConsumerGroupMemberMetadata{
+		Topics:   topics,
+		UserData: c.userData,
+	}
+	var strategy BalanceStrategy
+	if strategy = c.config.Consumer.Group.Rebalance.Strategy; strategy != nil {
+		if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil {
+			return nil, err
+		}
+	} else {
+		for _, strategy = range c.config.Consumer.Group.Rebalance.GroupStrategies {
+			if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil {
+				return nil, err
+			}
+		}
+	}
+
+	return coordinator.JoinGroup(req)
+}
+
+// findStrategy returns the BalanceStrategy with the specified protocolName
+// from the slice provided.
+func (c *consumerGroup) findStrategy(name string, groupStrategies []BalanceStrategy) (BalanceStrategy, bool) {
+	for _, strategy := range groupStrategies {
+		if strategy.Name() == name {
+			return strategy, true
+		}
+	}
+	return nil, false
+}
+
+func (c *consumerGroup) syncGroupRequest(
+	coordinator *Broker,
+	members map[string]ConsumerGroupMemberMetadata,
+	plan BalanceStrategyPlan,
+	generationID int32,
+	strategy BalanceStrategy,
+) (*SyncGroupResponse, error) {
+	req := &SyncGroupRequest{
+		GroupId:      c.groupID,
+		MemberId:     c.memberID,
+		GenerationId: generationID,
+	}
+
+	// Versions 1 and 2 are the same as version 0.
+	if c.config.Version.IsAtLeast(V0_11_0_0) {
+		req.Version = 1
+	}
+	if c.config.Version.IsAtLeast(V2_0_0_0) {
+		req.Version = 2
+	}
+	// Starting from version 3, we add a new field called groupInstanceId to indicate member identity across restarts.
+	if c.config.Version.IsAtLeast(V2_3_0_0) {
+		req.Version = 3
+		req.GroupInstanceId = c.groupInstanceId
+		if c.config.Version.IsAtLeast(V2_4_0_0) {
+			req.Version = 4
+		}
+	}
+
+	for memberID, topics := range plan {
+		assignment := &ConsumerGroupMemberAssignment{Topics: topics}
+		userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID)
+		if err != nil {
+			return nil, err
+		}
+		assignment.UserData = userDataBytes
+		if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil {
+			return nil, err
+		}
+		delete(members, memberID)
+	}
+	// add empty assignments for any remaining members
+	for memberID := range members {
+		if err := req.AddGroupAssignmentMember(memberID, &ConsumerGroupMemberAssignment{}); err != nil {
+			return nil, err
+		}
+	}
+
+	return coordinator.SyncGroup(req)
+}
+
+func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, generationID int32) (*HeartbeatResponse, error) {
+	req := &HeartbeatRequest{
+		GroupId:      c.groupID,
+		MemberId:     memberID,
+		GenerationId: generationID,
+	}
+
+	// Version 1 and version 2 are the same as version 0.
+	if c.config.Version.IsAtLeast(V0_11_0_0) {
+		req.Version = 1
+	}
+	if c.config.Version.IsAtLeast(V2_0_0_0) {
+		req.Version = 2
+	}
+	// Starting from version 3, we add a new field called groupInstanceId to indicate member identity across restarts.
+	if c.config.Version.IsAtLeast(V2_3_0_0) {
+		req.Version = 3
+		req.GroupInstanceId = c.groupInstanceId
+		// Version 4 is the first flexible version
+		if c.config.Version.IsAtLeast(V2_4_0_0) {
+			req.Version = 4
+		}
+	}
+
+	return coordinator.Heartbeat(req)
+}
+
+func (c *consumerGroup) balance(strategy BalanceStrategy, members map[string]ConsumerGroupMemberMetadata) (map[string][]int32, []string, BalanceStrategyPlan, error) {
+	topicPartitions := make(map[string][]int32)
+	for _, meta := range members {
+		for _, topic := range meta.Topics {
+			topicPartitions[topic] = nil
+		}
+	}
+
+	allSubscribedTopics := make([]string, 0, len(topicPartitions))
+	for topic := range topicPartitions {
+		allSubscribedTopics = append(allSubscribedTopics, topic)
+	}
+
+	// refresh metadata for all the subscribed topics in the consumer group
+	// to avoid using stale metadata to assigning partitions
+	err := c.client.RefreshMetadata(allSubscribedTopics...)
+	if err != nil {
+		return nil, nil, nil, err
+	}
+
+	for topic := range topicPartitions {
+		partitions, err := c.client.Partitions(topic)
+		if err != nil {
+			return nil, nil, nil, err
+		}
+		topicPartitions[topic] = partitions
+	}
+
+	plan, err := strategy.Plan(members, topicPartitions)
+	return topicPartitions, allSubscribedTopics, plan, err
+}
+
+// Leaves the cluster, called by Close.
+func (c *consumerGroup) leave() error {
+	c.lock.Lock()
+	defer c.lock.Unlock()
+	if c.memberID == "" {
+		return nil
+	}
+
+	coordinator, err := c.client.Coordinator(c.groupID)
+	if err != nil {
+		return err
+	}
+
+	// as per KIP-345 if groupInstanceId is set, i.e. static membership is in action, then do not leave group when consumer closed, just clear memberID
+	if c.groupInstanceId != nil {
+		c.memberID = ""
+		return nil
+	}
+	req := &LeaveGroupRequest{
+		GroupId:  c.groupID,
+		MemberId: c.memberID,
+	}
+	if c.config.Version.IsAtLeast(V0_11_0_0) {
+		req.Version = 1
+	}
+	if c.config.Version.IsAtLeast(V2_0_0_0) {
+		req.Version = 2
+	}
+	if c.config.Version.IsAtLeast(V2_4_0_0) {
+		req.Version = 4
+		req.Members = append(req.Members, MemberIdentity{
+			MemberId: c.memberID,
+		})
+	}
+
+	resp, err := coordinator.LeaveGroup(req)
+	if err != nil {
+		_ = coordinator.Close()
+		return err
+	}
+
+	// clear the memberID
+	c.memberID = ""
+
+	switch resp.Err {
+	case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError:
+		return nil
+	default:
+		return resp.Err
+	}
+}
+
+func (c *consumerGroup) handleError(err error, topic string, partition int32) {
+	var consumerError *ConsumerError
+	if ok := errors.As(err, &consumerError); !ok && topic != "" && partition > -1 {
+		err = &ConsumerError{
+			Topic:     topic,
+			Partition: partition,
+			Err:       err,
+		}
+	}
+
+	if !c.config.Consumer.Return.Errors {
+		Logger.Println(err)
+		return
+	}
+
+	c.errorsLock.RLock()
+	defer c.errorsLock.RUnlock()
+	select {
+	case <-c.closed:
+		// consumer is closed
+		return
+	default:
+	}
+
+	select {
+	case c.errors <- err:
+	default:
+		// no error listener
+	}
+}
+
+func (c *consumerGroup) loopCheckPartitionNumbers(allSubscribedTopicPartitions map[string][]int32, topics []string, session *consumerGroupSession) {
+	if c.config.Metadata.RefreshFrequency == time.Duration(0) {
+		return
+	}
+
+	defer session.cancel()
+
+	oldTopicToPartitionNum := make(map[string]int, len(allSubscribedTopicPartitions))
+	for topic, partitions := range allSubscribedTopicPartitions {
+		oldTopicToPartitionNum[topic] = len(partitions)
+	}
+
+	pause := time.NewTicker(c.config.Metadata.RefreshFrequency)
+	defer pause.Stop()
+	for {
+		if newTopicToPartitionNum, err := c.topicToPartitionNumbers(topics); err != nil {
+			return
+		} else {
+			for topic, num := range oldTopicToPartitionNum {
+				if newTopicToPartitionNum[topic] != num {
+					Logger.Printf(
+						"consumergroup/%s loop check partition number goroutine find partitions in topics %s changed from %d to %d\n",
+						c.groupID, topics, num, newTopicToPartitionNum[topic])
+					return // trigger the end of the session on exit
+				}
+			}
+		}
+		select {
+		case <-pause.C:
+		case <-session.ctx.Done():
+			Logger.Printf(
+				"consumergroup/%s loop check partition number goroutine will exit, topics %s\n",
+				c.groupID, topics)
+			// if session closed by other, should be exited
+			return
+		case <-c.closed:
+			return
+		}
+	}
+}
+
+func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int, error) {
+	topicToPartitionNum := make(map[string]int, len(topics))
+	for _, topic := range topics {
+		if partitionNum, err := c.client.Partitions(topic); err != nil {
+			Logger.Printf(
+				"consumergroup/%s topic %s get partition number failed due to '%v'\n",
+				c.groupID, topic, err)
+			return nil, err
+		} else {
+			topicToPartitionNum[topic] = len(partitionNum)
+		}
+	}
+	return topicToPartitionNum, nil
+}
+
+// --------------------------------------------------------------------
+
+// ConsumerGroupSession represents a consumer group member session.
+type ConsumerGroupSession interface {
+	// Claims returns information about the claimed partitions by topic.
+	Claims() map[string][]int32
+
+	// MemberID returns the cluster member ID.
+	MemberID() string
+
+	// GenerationID returns the current generation ID.
+	GenerationID() int32
+
+	// MarkOffset marks the provided offset, alongside a metadata string
+	// that represents the state of the partition consumer at that point in time. The
+	// metadata string can be used by another consumer to restore that state, so it
+	// can resume consumption.
+	//
+	// To follow upstream conventions, you are expected to mark the offset of the
+	// next message to read, not the last message read. Thus, when calling `MarkOffset`
+	// you should typically add one to the offset of the last consumed message.
+	//
+	// Note: calling MarkOffset does not necessarily commit the offset to the backend
+	// store immediately for efficiency reasons, and it may never be committed if
+	// your application crashes. This means that you may end up processing the same
+	// message twice, and your processing should ideally be idempotent.
+	MarkOffset(topic string, partition int32, offset int64, metadata string)
+
+	// Commit the offset to the backend
+	//
+	// Note: calling Commit performs a blocking synchronous operation.
+	Commit()
+
+	// ResetOffset resets to the provided offset, alongside a metadata string that
+	// represents the state of the partition consumer at that point in time. Reset
+	// acts as a counterpart to MarkOffset, the difference being that it allows to
+	// reset an offset to an earlier or smaller value, where MarkOffset only
+	// allows incrementing the offset. cf MarkOffset for more details.
+	ResetOffset(topic string, partition int32, offset int64, metadata string)
+
+	// MarkMessage marks a message as consumed.
+	MarkMessage(msg *ConsumerMessage, metadata string)
+
+	// Context returns the session context.
+	Context() context.Context
+}
+
+type consumerGroupSession struct {
+	parent       *consumerGroup
+	memberID     string
+	generationID int32
+	handler      ConsumerGroupHandler
+
+	claims  map[string][]int32
+	offsets *offsetManager
+	ctx     context.Context
+	cancel  func()
+
+	waitGroup       sync.WaitGroup
+	releaseOnce     sync.Once
+	hbDying, hbDead chan none
+}
+
+func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) {
+	// init context
+	ctx, cancel := context.WithCancel(ctx)
+
+	// init offset manager
+	offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client, cancel)
+	if err != nil {
+		return nil, err
+	}
+
+	// init session
+	sess := &consumerGroupSession{
+		parent:       parent,
+		memberID:     memberID,
+		generationID: generationID,
+		handler:      handler,
+		offsets:      offsets,
+		claims:       claims,
+		ctx:          ctx,
+		cancel:       cancel,
+		hbDying:      make(chan none),
+		hbDead:       make(chan none),
+	}
+
+	// start heartbeat loop
+	go sess.heartbeatLoop()
+
+	// create a POM for each claim
+	for topic, partitions := range claims {
+		for _, partition := range partitions {
+			pom, err := offsets.ManagePartition(topic, partition)
+			if err != nil {
+				_ = sess.release(false)
+				return nil, err
+			}
+
+			// handle POM errors
+			go func(topic string, partition int32) {
+				for err := range pom.Errors() {
+					sess.parent.handleError(err, topic, partition)
+				}
+			}(topic, partition)
+		}
+	}
+
+	// perform setup
+	if err := handler.Setup(sess); err != nil {
+		_ = sess.release(true)
+		return nil, err
+	}
+
+	// start consuming each topic partition in its own goroutine
+	for topic, partitions := range claims {
+		for _, partition := range partitions {
+			sess.waitGroup.Add(1) // increment wait group before spawning goroutine
+			go func(topic string, partition int32) {
+				defer sess.waitGroup.Done()
+				// cancel the group session as soon as any of the consume calls return
+				defer sess.cancel()
+
+				// if partition not currently readable, wait for it to become readable
+				if sess.parent.client.PartitionNotReadable(topic, partition) {
+					timer := time.NewTimer(5 * time.Second)
+					defer timer.Stop()
+
+					for sess.parent.client.PartitionNotReadable(topic, partition) {
+						select {
+						case <-ctx.Done():
+							return
+						case <-parent.closed:
+							return
+						case <-timer.C:
+							timer.Reset(5 * time.Second)
+						}
+					}
+				}
+
+				// consume a single topic/partition, blocking
+				sess.consume(topic, partition)
+			}(topic, partition)
+		}
+	}
+	return sess, nil
+}
+
+func (s *consumerGroupSession) Claims() map[string][]int32 { return s.claims }
+func (s *consumerGroupSession) MemberID() string           { return s.memberID }
+func (s *consumerGroupSession) GenerationID() int32        { return s.generationID }
+
+func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) {
+	if pom := s.offsets.findPOM(topic, partition); pom != nil {
+		pom.MarkOffset(offset, metadata)
+	}
+}
+
+func (s *consumerGroupSession) Commit() {
+	s.offsets.Commit()
+}
+
+func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) {
+	if pom := s.offsets.findPOM(topic, partition); pom != nil {
+		pom.ResetOffset(offset, metadata)
+	}
+}
+
+func (s *consumerGroupSession) MarkMessage(msg *ConsumerMessage, metadata string) {
+	s.MarkOffset(msg.Topic, msg.Partition, msg.Offset+1, metadata)
+}
+
+func (s *consumerGroupSession) Context() context.Context {
+	return s.ctx
+}
+
+func (s *consumerGroupSession) consume(topic string, partition int32) {
+	// quick exit if rebalance is due
+	select {
+	case <-s.ctx.Done():
+		return
+	case <-s.parent.closed:
+		return
+	default:
+	}
+
+	// get next offset
+	offset := s.parent.config.Consumer.Offsets.Initial
+	if pom := s.offsets.findPOM(topic, partition); pom != nil {
+		offset, _ = pom.NextOffset()
+	}
+
+	// create new claim
+	claim, err := newConsumerGroupClaim(s, topic, partition, offset)
+	if err != nil {
+		s.parent.handleError(err, topic, partition)
+		return
+	}
+
+	// handle errors
+	go func() {
+		for err := range claim.Errors() {
+			s.parent.handleError(err, topic, partition)
+		}
+	}()
+
+	// trigger close when session is done
+	go func() {
+		select {
+		case <-s.ctx.Done():
+		case <-s.parent.closed:
+		}
+		claim.AsyncClose()
+	}()
+
+	// start processing
+	if err := s.handler.ConsumeClaim(s, claim); err != nil {
+		s.parent.handleError(err, topic, partition)
+	}
+
+	// ensure consumer is closed & drained
+	claim.AsyncClose()
+	for _, err := range claim.waitClosed() {
+		s.parent.handleError(err, topic, partition)
+	}
+}
+
+func (s *consumerGroupSession) release(withCleanup bool) (err error) {
+	// signal release, stop heartbeat
+	s.cancel()
+
+	// wait for consumers to exit
+	s.waitGroup.Wait()
+
+	// perform release
+	s.releaseOnce.Do(func() {
+		if withCleanup {
+			if e := s.handler.Cleanup(s); e != nil {
+				s.parent.handleError(e, "", -1)
+				err = e
+			}
+		}
+
+		if e := s.offsets.Close(); e != nil {
+			err = e
+		}
+
+		close(s.hbDying)
+		<-s.hbDead
+	})
+
+	Logger.Printf(
+		"consumergroup/session/%s/%d released\n",
+		s.MemberID(), s.GenerationID())
+
+	return
+}
+
+func (s *consumerGroupSession) heartbeatLoop() {
+	defer close(s.hbDead)
+	defer s.cancel() // trigger the end of the session on exit
+	defer func() {
+		Logger.Printf(
+			"consumergroup/session/%s/%d heartbeat loop stopped\n",
+			s.MemberID(), s.GenerationID())
+	}()
+
+	pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval)
+	defer pause.Stop()
+
+	retryBackoff := time.NewTimer(s.parent.config.Metadata.Retry.Backoff)
+	defer retryBackoff.Stop()
+
+	retries := s.parent.config.Metadata.Retry.Max
+	for {
+		coordinator, err := s.parent.client.Coordinator(s.parent.groupID)
+		if err != nil {
+			if retries <= 0 {
+				s.parent.handleError(err, "", -1)
+				return
+			}
+			retryBackoff.Reset(s.parent.config.Metadata.Retry.Backoff)
+			select {
+			case <-s.hbDying:
+				return
+			case <-retryBackoff.C:
+				retries--
+			}
+			continue
+		}
+
+		resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID)
+		if err != nil {
+			_ = coordinator.Close()
+
+			if retries <= 0 {
+				s.parent.handleError(err, "", -1)
+				return
+			}
+
+			retries--
+			continue
+		}
+
+		switch resp.Err {
+		case ErrNoError:
+			retries = s.parent.config.Metadata.Retry.Max
+		case ErrRebalanceInProgress:
+			retries = s.parent.config.Metadata.Retry.Max
+			s.cancel()
+		case ErrUnknownMemberId, ErrIllegalGeneration:
+			return
+		case ErrFencedInstancedId:
+			if s.parent.groupInstanceId != nil {
+				Logger.Printf("JoinGroup failed: group instance id %s has been fenced\n", *s.parent.groupInstanceId)
+			}
+			s.parent.handleError(resp.Err, "", -1)
+			return
+		default:
+			s.parent.handleError(resp.Err, "", -1)
+			return
+		}
+
+		select {
+		case <-pause.C:
+		case <-s.hbDying:
+			return
+		}
+	}
+}
+
+// --------------------------------------------------------------------
+
+// ConsumerGroupHandler instances are used to handle individual topic/partition claims.
+// It also provides hooks for your consumer group session life-cycle and allow you to
+// trigger logic before or after the consume loop(s).
+//
+// PLEASE NOTE that handlers are likely be called from several goroutines concurrently,
+// ensure that all state is safely protected against race conditions.
+type ConsumerGroupHandler interface {
+	// Setup is run at the beginning of a new session, before ConsumeClaim.
+	Setup(ConsumerGroupSession) error
+
+	// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
+	// but before the offsets are committed for the very last time.
+	Cleanup(ConsumerGroupSession) error
+
+	// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
+	// Once the Messages() channel is closed, the Handler must finish its processing
+	// loop and exit.
+	ConsumeClaim(ConsumerGroupSession, ConsumerGroupClaim) error
+}
+
+// ConsumerGroupClaim processes Kafka messages from a given topic and partition within a consumer group.
+type ConsumerGroupClaim interface {
+	// Topic returns the consumed topic name.
+	Topic() string
+
+	// Partition returns the consumed partition.
+	Partition() int32
+
+	// InitialOffset returns the initial offset that was used as a starting point for this claim.
+	InitialOffset() int64
+
+	// HighWaterMarkOffset returns the high watermark offset of the partition,
+	// i.e. the offset that will be used for the next message that will be produced.
+	// You can use this to determine how far behind the processing is.
+	HighWaterMarkOffset() int64
+
+	// Messages returns the read channel for the messages that are returned by
+	// the broker. The messages channel will be closed when a new rebalance cycle
+	// is due. You must finish processing and mark offsets within
+	// Config.Consumer.Group.Session.Timeout before the topic/partition is eventually
+	// re-assigned to another group member.
+	Messages() <-chan *ConsumerMessage
+}
+
+type consumerGroupClaim struct {
+	topic     string
+	partition int32
+	offset    int64
+	PartitionConsumer
+}
+
+func newConsumerGroupClaim(sess *consumerGroupSession, topic string, partition int32, offset int64) (*consumerGroupClaim, error) {
+	pcm, err := sess.parent.consumer.ConsumePartition(topic, partition, offset)
+
+	if errors.Is(err, ErrOffsetOutOfRange) && sess.parent.config.Consumer.Group.ResetInvalidOffsets {
+		offset = sess.parent.config.Consumer.Offsets.Initial
+		pcm, err = sess.parent.consumer.ConsumePartition(topic, partition, offset)
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	go func() {
+		for err := range pcm.Errors() {
+			sess.parent.handleError(err, topic, partition)
+		}
+	}()
+
+	return &consumerGroupClaim{
+		topic:             topic,
+		partition:         partition,
+		offset:            offset,
+		PartitionConsumer: pcm,
+	}, nil
+}
+
+func (c *consumerGroupClaim) Topic() string        { return c.topic }
+func (c *consumerGroupClaim) Partition() int32     { return c.partition }
+func (c *consumerGroupClaim) InitialOffset() int64 { return c.offset }
+
+// Drains messages and errors, ensures the claim is fully closed.
+func (c *consumerGroupClaim) waitClosed() (errs ConsumerErrors) {
+	go func() {
+		for range c.Messages() {
+		}
+	}()
+
+	for err := range c.Errors() {
+		errs = append(errs, err)
+	}
+	return
+}
diff --git a/vendor/github.com/IBM/sarama/consumer_group_members.go b/vendor/github.com/IBM/sarama/consumer_group_members.go
new file mode 100644
index 0000000..2d38960
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/consumer_group_members.go
@@ -0,0 +1,183 @@
+package sarama
+
+import "errors"
+
+// ConsumerGroupMemberMetadata holds the metadata for consumer group
+// https://github.com/apache/kafka/blob/trunk/clients/src/main/resources/common/message/ConsumerProtocolSubscription.json
+type ConsumerGroupMemberMetadata struct {
+	Version         int16
+	Topics          []string
+	UserData        []byte
+	OwnedPartitions []*OwnedPartition
+	GenerationID    int32
+	RackID          *string
+}
+
+func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
+	pe.putInt16(m.Version)
+
+	if err := pe.putStringArray(m.Topics); err != nil {
+		return err
+	}
+
+	if err := pe.putBytes(m.UserData); err != nil {
+		return err
+	}
+
+	if m.Version >= 1 {
+		if err := pe.putArrayLength(len(m.OwnedPartitions)); err != nil {
+			return err
+		}
+		for _, op := range m.OwnedPartitions {
+			if err := op.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	if m.Version >= 2 {
+		pe.putInt32(m.GenerationID)
+	}
+
+	if m.Version >= 3 {
+		if err := pe.putNullableString(m.RackID); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
+	if m.Version, err = pd.getInt16(); err != nil {
+		return
+	}
+
+	if m.Topics, err = pd.getStringArray(); err != nil {
+		return
+	}
+
+	if m.UserData, err = pd.getBytes(); err != nil {
+		return
+	}
+	if m.Version >= 1 {
+		n, err := pd.getArrayLength()
+		if err != nil {
+			// permit missing data here in case of misbehaving 3rd party
+			// clients who incorrectly marked the member metadata as V1 in
+			// their JoinGroup request
+			if errors.Is(err, ErrInsufficientData) {
+				return nil
+			}
+			return err
+		}
+		if n > 0 {
+			m.OwnedPartitions = make([]*OwnedPartition, n)
+			for i := 0; i < n; i++ {
+				m.OwnedPartitions[i] = &OwnedPartition{}
+				if err := m.OwnedPartitions[i].decode(pd); err != nil {
+					return err
+				}
+			}
+		}
+	}
+
+	if m.Version >= 2 {
+		if m.GenerationID, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	if m.Version >= 3 {
+		if m.RackID, err = pd.getNullableString(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+type OwnedPartition struct {
+	Topic      string
+	Partitions []int32
+}
+
+func (m *OwnedPartition) encode(pe packetEncoder) error {
+	if err := pe.putString(m.Topic); err != nil {
+		return err
+	}
+	if err := pe.putInt32Array(m.Partitions); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (m *OwnedPartition) decode(pd packetDecoder) (err error) {
+	if m.Topic, err = pd.getString(); err != nil {
+		return err
+	}
+	if m.Partitions, err = pd.getInt32Array(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// ConsumerGroupMemberAssignment holds the member assignment for a consume group
+// https://github.com/apache/kafka/blob/trunk/clients/src/main/resources/common/message/ConsumerProtocolAssignment.json
+type ConsumerGroupMemberAssignment struct {
+	Version  int16
+	Topics   map[string][]int32
+	UserData []byte
+}
+
+func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
+	pe.putInt16(m.Version)
+
+	if err := pe.putArrayLength(len(m.Topics)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range m.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+	}
+
+	if err := pe.putBytes(m.UserData); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
+	if m.Version, err = pd.getInt16(); err != nil {
+		return
+	}
+
+	var topicLen int
+	if topicLen, err = pd.getArrayLength(); err != nil {
+		return
+	}
+
+	m.Topics = make(map[string][]int32, topicLen)
+	for i := 0; i < topicLen; i++ {
+		var topic string
+		if topic, err = pd.getString(); err != nil {
+			return
+		}
+		if m.Topics[topic], err = pd.getInt32Array(); err != nil {
+			return
+		}
+	}
+
+	if m.UserData, err = pd.getBytes(); err != nil {
+		return
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/consumer_metadata_request.go b/vendor/github.com/IBM/sarama/consumer_metadata_request.go
new file mode 100644
index 0000000..85ffb03
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/consumer_metadata_request.go
@@ -0,0 +1,55 @@
+package sarama
+
+// ConsumerMetadataRequest is used for metadata requests
+type ConsumerMetadataRequest struct {
+	Version       int16
+	ConsumerGroup string
+}
+
+func (r *ConsumerMetadataRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
+	tmp := new(FindCoordinatorRequest)
+	tmp.CoordinatorKey = r.ConsumerGroup
+	tmp.CoordinatorType = CoordinatorGroup
+	tmp.Version = r.Version
+	return tmp.encode(pe)
+}
+
+func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
+	tmp := new(FindCoordinatorRequest)
+	if err := tmp.decode(pd, version); err != nil {
+		return err
+	}
+	r.ConsumerGroup = tmp.CoordinatorKey
+	return nil
+}
+
+func (r *ConsumerMetadataRequest) key() int16 {
+	return apiKeyFindCoordinator
+}
+
+func (r *ConsumerMetadataRequest) version() int16 {
+	return r.Version
+}
+
+func (r *ConsumerMetadataRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *ConsumerMetadataRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_8_2_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/consumer_metadata_response.go b/vendor/github.com/IBM/sarama/consumer_metadata_response.go
new file mode 100644
index 0000000..7fb080b
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/consumer_metadata_response.go
@@ -0,0 +1,98 @@
+package sarama
+
+import (
+	"net"
+	"strconv"
+)
+
+// ConsumerMetadataResponse holds the response for a consumer group meta data requests
+type ConsumerMetadataResponse struct {
+	Version         int16
+	Err             KError
+	Coordinator     *Broker
+	CoordinatorID   int32  // deprecated: use Coordinator.ID()
+	CoordinatorHost string // deprecated: use Coordinator.Addr()
+	CoordinatorPort int32  // deprecated: use Coordinator.Addr()
+}
+
+func (r *ConsumerMetadataResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
+	tmp := new(FindCoordinatorResponse)
+
+	if err := tmp.decode(pd, version); err != nil {
+		return err
+	}
+
+	r.Err = tmp.Err
+
+	r.Coordinator = tmp.Coordinator
+	if tmp.Coordinator == nil {
+		return nil
+	}
+
+	// this can all go away in 2.0, but we have to fill in deprecated fields to maintain
+	// backwards compatibility
+	host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
+	if err != nil {
+		return err
+	}
+	port, err := strconv.ParseInt(portstr, 10, 32)
+	if err != nil {
+		return err
+	}
+	r.CoordinatorID = r.Coordinator.ID()
+	r.CoordinatorHost = host
+	r.CoordinatorPort = int32(port)
+
+	return nil
+}
+
+func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
+	if r.Coordinator == nil {
+		r.Coordinator = new(Broker)
+		r.Coordinator.id = r.CoordinatorID
+		r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort)))
+	}
+
+	tmp := &FindCoordinatorResponse{
+		Version:     r.Version,
+		Err:         r.Err,
+		Coordinator: r.Coordinator,
+	}
+
+	if err := tmp.encode(pe); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *ConsumerMetadataResponse) key() int16 {
+	return apiKeyFindCoordinator
+}
+
+func (r *ConsumerMetadataResponse) version() int16 {
+	return r.Version
+}
+
+func (r *ConsumerMetadataResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *ConsumerMetadataResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_8_2_0
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/control_record.go b/vendor/github.com/IBM/sarama/control_record.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/control_record.go
rename to vendor/github.com/IBM/sarama/control_record.go
diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/IBM/sarama/crc32_field.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/crc32_field.go
rename to vendor/github.com/IBM/sarama/crc32_field.go
diff --git a/vendor/github.com/IBM/sarama/create_partitions_request.go b/vendor/github.com/IBM/sarama/create_partitions_request.go
new file mode 100644
index 0000000..75e2dec
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/create_partitions_request.go
@@ -0,0 +1,141 @@
+package sarama
+
+import "time"
+
+type CreatePartitionsRequest struct {
+	Version         int16
+	TopicPartitions map[string]*TopicPartition
+	Timeout         time.Duration
+	ValidateOnly    bool
+}
+
+func (c *CreatePartitionsRequest) setVersion(v int16) {
+	c.Version = v
+}
+
+func (c *CreatePartitionsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil {
+		return err
+	}
+
+	for topic, partition := range c.TopicPartitions {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := partition.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putInt32(int32(c.Timeout / time.Millisecond))
+
+	pe.putBool(c.ValidateOnly)
+
+	return nil
+}
+
+func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	c.TopicPartitions = make(map[string]*TopicPartition, n)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		c.TopicPartitions[topic] = new(TopicPartition)
+		if err := c.TopicPartitions[topic].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	c.Timeout = time.Duration(timeout) * time.Millisecond
+
+	if c.ValidateOnly, err = pd.getBool(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *CreatePartitionsRequest) key() int16 {
+	return apiKeyCreatePartitions
+}
+
+func (r *CreatePartitionsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *CreatePartitionsRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *CreatePartitionsRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 1
+}
+
+func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V1_0_0_0
+	default:
+		return V2_0_0_0
+	}
+}
+
+type TopicPartition struct {
+	Count      int32
+	Assignment [][]int32
+}
+
+func (t *TopicPartition) encode(pe packetEncoder) error {
+	pe.putInt32(t.Count)
+
+	if len(t.Assignment) == 0 {
+		pe.putInt32(-1)
+		return nil
+	}
+
+	if err := pe.putArrayLength(len(t.Assignment)); err != nil {
+		return err
+	}
+
+	for _, assign := range t.Assignment {
+		if err := pe.putInt32Array(assign); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) {
+	if t.Count, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	n, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	if n <= 0 {
+		return nil
+	}
+	t.Assignment = make([][]int32, n)
+
+	for i := 0; i < int(n); i++ {
+		if t.Assignment[i], err = pd.getInt32Array(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/create_partitions_response.go b/vendor/github.com/IBM/sarama/create_partitions_response.go
new file mode 100644
index 0000000..0785c35
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/create_partitions_response.go
@@ -0,0 +1,130 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+type CreatePartitionsResponse struct {
+	Version              int16
+	ThrottleTime         time.Duration
+	TopicPartitionErrors map[string]*TopicPartitionError
+}
+
+func (c *CreatePartitionsResponse) setVersion(v int16) {
+	c.Version = v
+}
+
+func (c *CreatePartitionsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(c.ThrottleTime)
+	if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil {
+		return err
+	}
+
+	for topic, partitionError := range c.TopicPartitionErrors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := partitionError.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if c.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		c.TopicPartitionErrors[topic] = new(TopicPartitionError)
+		if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *CreatePartitionsResponse) key() int16 {
+	return apiKeyCreatePartitions
+}
+
+func (r *CreatePartitionsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *CreatePartitionsResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *CreatePartitionsResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 1
+}
+
+func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V1_0_0_0
+	default:
+		return V2_0_0_0
+	}
+}
+
+func (r *CreatePartitionsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
+
+type TopicPartitionError struct {
+	Err    KError
+	ErrMsg *string
+}
+
+func (t *TopicPartitionError) Error() string {
+	text := t.Err.Error()
+	if t.ErrMsg != nil {
+		text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
+	}
+	return text
+}
+
+func (t *TopicPartitionError) Unwrap() error {
+	return t.Err
+}
+
+func (t *TopicPartitionError) encode(pe packetEncoder) error {
+	pe.putKError(t.Err)
+
+	if err := pe.putNullableString(t.ErrMsg); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) {
+	t.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if t.ErrMsg, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/create_topics_request.go b/vendor/github.com/IBM/sarama/create_topics_request.go
new file mode 100644
index 0000000..5684e27
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/create_topics_request.go
@@ -0,0 +1,258 @@
+package sarama
+
+import (
+	"time"
+)
+
+type CreateTopicsRequest struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// TopicDetails contains the topics to create.
+	TopicDetails map[string]*TopicDetail
+	// Timeout contains how long to wait before timing out the request.
+	Timeout time.Duration
+	// ValidateOnly if true, check that the topics can be created as specified,
+	// but don't create anything.
+	ValidateOnly bool
+}
+
+func (c *CreateTopicsRequest) setVersion(v int16) {
+	c.Version = v
+}
+
+func NewCreateTopicsRequest(
+	version KafkaVersion,
+	topicDetails map[string]*TopicDetail,
+	timeout time.Duration,
+	validateOnly bool,
+) *CreateTopicsRequest {
+	r := &CreateTopicsRequest{
+		TopicDetails: topicDetails,
+		Timeout:      timeout,
+		ValidateOnly: validateOnly,
+	}
+	switch {
+	case version.IsAtLeast(V2_4_0_0):
+		// Version 5 is the first flexible version
+		// Version 4 makes partitions/replicationFactor optional even when assignments are not present (KIP-464)
+		r.Version = 5
+	case version.IsAtLeast(V2_0_0_0):
+		// Version 3 is the same as version 2 (brokers response before throttling)
+		r.Version = 3
+	case version.IsAtLeast(V0_11_0_0):
+		// Version 2 is the same as version 1 (response has ThrottleTime)
+		r.Version = 2
+	case version.IsAtLeast(V0_10_2_0):
+		// Version 1 adds validateOnly.
+		r.Version = 1
+	}
+	return r
+}
+
+func (c *CreateTopicsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(c.TopicDetails)); err != nil {
+		return err
+	}
+	for topic, detail := range c.TopicDetails {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := detail.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putInt32(int32(c.Timeout / time.Millisecond))
+
+	if c.Version >= 1 {
+		pe.putBool(c.ValidateOnly)
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.TopicDetails = make(map[string]*TopicDetail, n)
+
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		c.TopicDetails[topic] = new(TopicDetail)
+		if err = c.TopicDetails[topic].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	c.Timeout = time.Duration(timeout) * time.Millisecond
+
+	if version >= 1 {
+		c.ValidateOnly, err = pd.getBool()
+		if err != nil {
+			return err
+		}
+
+		c.Version = version
+	}
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (c *CreateTopicsRequest) key() int16 {
+	return apiKeyCreateTopics
+}
+
+func (c *CreateTopicsRequest) version() int16 {
+	return c.Version
+}
+
+func (c *CreateTopicsRequest) headerVersion() int16 {
+	if c.Version >= 5 {
+		return 2
+	}
+	return 1
+}
+
+func (c *CreateTopicsRequest) isFlexible() bool {
+	return c.isFlexibleVersion(c.Version)
+}
+
+func (c *CreateTopicsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 5
+}
+
+func (c *CreateTopicsRequest) isValidVersion() bool {
+	return c.Version >= 0 && c.Version <= 5
+}
+
+func (c *CreateTopicsRequest) requiredVersion() KafkaVersion {
+	switch c.Version {
+	case 5:
+		return V2_4_0_0
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_0_0_0
+	case 2:
+		return V0_11_0_0
+	case 1:
+		return V0_10_2_0
+	case 0:
+		return V0_10_1_0
+	default:
+		return V2_8_0_0
+	}
+}
+
+type TopicDetail struct {
+	// NumPartitions contains the number of partitions to create in the topic, or
+	// -1 if we are either specifying a manual partition assignment or using the
+	// default partitions.
+	NumPartitions int32
+	// ReplicationFactor contains the number of replicas to create for each
+	// partition in the topic, or -1 if we are either specifying a manual
+	// partition assignment or using the default replication factor.
+	ReplicationFactor int16
+	// ReplicaAssignment contains the manual partition assignment, or the empty
+	// array if we are using automatic assignment.
+	ReplicaAssignment map[int32][]int32
+	// ConfigEntries contains the custom topic configurations to set.
+	ConfigEntries map[string]*string
+}
+
+func (t *TopicDetail) encode(pe packetEncoder) error {
+	pe.putInt32(t.NumPartitions)
+	pe.putInt16(t.ReplicationFactor)
+
+	if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil {
+		return err
+	}
+	for partition, assignment := range t.ReplicaAssignment {
+		pe.putInt32(partition)
+		if err := pe.putInt32Array(assignment); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil {
+		return err
+	}
+	for configKey, configValue := range t.ConfigEntries {
+		if err := pe.putString(configKey); err != nil {
+			return err
+		}
+		if err := pe.putNullableString(configValue); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) {
+	if t.NumPartitions, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if t.ReplicationFactor, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		t.ReplicaAssignment = make(map[int32][]int32, n)
+		for i := 0; i < n; i++ {
+			replica, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil {
+				return err
+			}
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	n, err = pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		t.ConfigEntries = make(map[string]*string, n)
+		for i := 0; i < n; i++ {
+			configKey, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
+				return err
+			}
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
diff --git a/vendor/github.com/IBM/sarama/create_topics_response.go b/vendor/github.com/IBM/sarama/create_topics_response.go
new file mode 100644
index 0000000..f8be692
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/create_topics_response.go
@@ -0,0 +1,320 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+type CreateTopicsResponse struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// ThrottleTime contains the duration for which the request was throttled due
+	// to a quota violation, or zero if the request did not violate any quota.
+	ThrottleTime time.Duration
+	// TopicErrors contains a map of any errors for the topics we tried to create.
+	TopicErrors map[string]*TopicError
+	// TopicResults contains a map of the results for the topics we tried to create.
+	TopicResults map[string]*CreatableTopicResult
+}
+
+func (c *CreateTopicsResponse) setVersion(v int16) {
+	c.Version = v
+}
+
+func (c *CreateTopicsResponse) encode(pe packetEncoder) error {
+	if c.Version >= 2 {
+		pe.putDurationMs(c.ThrottleTime)
+	}
+
+	if err := pe.putArrayLength(len(c.TopicErrors)); err != nil {
+		return err
+	}
+	for topic, topicError := range c.TopicErrors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := topicError.encode(pe, c.Version); err != nil {
+			return err
+		}
+		if c.Version >= 5 {
+			result, ok := c.TopicResults[topic]
+			if !ok {
+				return fmt.Errorf("expected TopicResult for topic, %s, for V5 protocol", topic)
+			}
+			if err := result.encode(pe, c.Version); err != nil {
+				return err
+			}
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
+	c.Version = version
+
+	if version >= 2 {
+		if c.ThrottleTime, err = pd.getDurationMs(); err != nil {
+			return err
+		}
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	c.TopicErrors = make(map[string]*TopicError, n)
+	if version >= 5 {
+		c.TopicResults = make(map[string]*CreatableTopicResult, n)
+	}
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		c.TopicErrors[topic] = new(TopicError)
+		if err := c.TopicErrors[topic].decode(pd, version); err != nil {
+			return err
+		}
+		if version >= 5 {
+			c.TopicResults[topic] = &CreatableTopicResult{}
+			if err := c.TopicResults[topic].decode(pd, version); err != nil {
+				return err
+			}
+		}
+	}
+
+	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (c *CreateTopicsResponse) key() int16 {
+	return apiKeyCreateTopics
+}
+
+func (c *CreateTopicsResponse) version() int16 {
+	return c.Version
+}
+
+func (c *CreateTopicsResponse) headerVersion() int16 {
+	if c.Version >= 5 {
+		return 1
+	}
+	return 0
+}
+
+func (c *CreateTopicsResponse) isFlexible() bool {
+	return c.isFlexibleVersion(c.Version)
+}
+
+func (c *CreateTopicsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 5
+}
+
+func (c *CreateTopicsResponse) isValidVersion() bool {
+	return c.Version >= 0 && c.Version <= 5
+}
+
+func (c *CreateTopicsResponse) requiredVersion() KafkaVersion {
+	switch c.Version {
+	case 5:
+		return V2_4_0_0
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_0_0_0
+	case 2:
+		return V0_11_0_0
+	case 1:
+		return V0_10_2_0
+	case 0:
+		return V0_10_1_0
+	default:
+		return V2_8_0_0
+	}
+}
+
+func (r *CreateTopicsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
+
+type TopicError struct {
+	Err    KError
+	ErrMsg *string
+}
+
+func (t *TopicError) Error() string {
+	text := t.Err.Error()
+	if t.ErrMsg != nil {
+		text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
+	}
+	return text
+}
+
+func (t *TopicError) Unwrap() error {
+	return t.Err
+}
+
+func (t *TopicError) encode(pe packetEncoder, version int16) error {
+	pe.putKError(t.Err)
+
+	if version >= 1 {
+		if err := pe.putNullableString(t.ErrMsg); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (t *TopicError) decode(pd packetDecoder, version int16) (err error) {
+	t.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if version >= 1 {
+		if t.ErrMsg, err = pd.getNullableString(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// CreatableTopicResult struct {
+type CreatableTopicResult struct {
+	// TopicConfigErrorCode contains a Optional topic config error returned if configs are not returned in the response.
+	TopicConfigErrorCode KError
+	// NumPartitions contains a Number of partitions of the topic.
+	NumPartitions int32
+	// ReplicationFactor contains a Replication factor of the topic.
+	ReplicationFactor int16
+	// Configs contains a Configuration of the topic.
+	Configs map[string]*CreatableTopicConfigs
+}
+
+func (r *CreatableTopicResult) encode(pe packetEncoder, version int16) error {
+	pe.putInt32(r.NumPartitions)
+	pe.putInt16(r.ReplicationFactor)
+
+	if err := pe.putArrayLength(len(r.Configs)); err != nil {
+		return err
+	}
+	for name, config := range r.Configs {
+		if err := pe.putString(name); err != nil {
+			return err
+		}
+		if err := config.encode(pe, version); err != nil {
+			return err
+		}
+	}
+	if r.TopicConfigErrorCode == ErrNoError {
+		pe.putEmptyTaggedFieldArray()
+		return nil
+	}
+
+	// TODO: refactor to helper for tagged fields
+	pe.putUVarint(1) // number of tagged fields
+
+	pe.putUVarint(0) // tag
+
+	pe.putUVarint(2) // value length
+
+	pe.putKError(r.TopicConfigErrorCode) // tag value
+
+	return nil
+}
+
+func (r *CreatableTopicResult) decode(pd packetDecoder, version int16) (err error) {
+	r.NumPartitions, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	r.ReplicationFactor, err = pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	r.Configs = make(map[string]*CreatableTopicConfigs, n)
+	for i := 0; i < n; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		r.Configs[name] = &CreatableTopicConfigs{}
+		if err := r.Configs[name].decode(pd, version); err != nil {
+			return err
+		}
+	}
+	err = pd.getTaggedFieldArray(taggedFieldDecoders{
+		0: func(pd packetDecoder) error {
+			r.TopicConfigErrorCode, err = pd.getKError()
+			if err != nil {
+				return err
+			}
+			return nil
+		},
+	})
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// CreatableTopicConfigs contains a Configuration of the topic.
+type CreatableTopicConfigs struct {
+	// Value contains the configuration value.
+	Value *string
+	// ReadOnly contains a True if the configuration is read-only.
+	ReadOnly bool
+	// ConfigSource contains the configuration source.
+	ConfigSource ConfigSource
+	// IsSensitive contains a True if this configuration is sensitive.
+	IsSensitive bool
+}
+
+func (c *CreatableTopicConfigs) encode(pe packetEncoder, version int16) (err error) {
+	if err = pe.putNullableString(c.Value); err != nil {
+		return err
+	}
+	pe.putBool(c.ReadOnly)
+	pe.putInt8(int8(c.ConfigSource))
+	pe.putBool(c.IsSensitive)
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (c *CreatableTopicConfigs) decode(pd packetDecoder, version int16) (err error) {
+	c.Value, err = pd.getNullableString()
+	if err != nil {
+		return err
+	}
+	c.ReadOnly, err = pd.getBool()
+	if err != nil {
+		return err
+	}
+	source, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	c.ConfigSource = ConfigSource(source)
+	c.IsSensitive, err = pd.getBool()
+	if err != nil {
+		return err
+	}
+
+	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/decompress.go b/vendor/github.com/IBM/sarama/decompress.go
new file mode 100644
index 0000000..0a09983
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/decompress.go
@@ -0,0 +1,98 @@
+package sarama
+
+import (
+	"bytes"
+	"fmt"
+	"sync"
+
+	snappy "github.com/eapache/go-xerial-snappy"
+	"github.com/klauspost/compress/gzip"
+	"github.com/pierrec/lz4/v4"
+)
+
+var (
+	lz4ReaderPool = sync.Pool{
+		New: func() interface{} {
+			return lz4.NewReader(nil)
+		},
+	}
+
+	gzipReaderPool sync.Pool
+
+	bufferPool = sync.Pool{
+		New: func() interface{} {
+			return new(bytes.Buffer)
+		},
+	}
+
+	bytesPool = sync.Pool{
+		New: func() interface{} {
+			res := make([]byte, 0, 4096)
+			return &res
+		},
+	}
+)
+
+func decompress(cc CompressionCodec, data []byte) ([]byte, error) {
+	switch cc {
+	case CompressionNone:
+		return data, nil
+	case CompressionGZIP:
+		var err error
+		reader, ok := gzipReaderPool.Get().(*gzip.Reader)
+		if !ok {
+			reader, err = gzip.NewReader(bytes.NewReader(data))
+		} else {
+			err = reader.Reset(bytes.NewReader(data))
+		}
+
+		if err != nil {
+			return nil, err
+		}
+
+		buffer := bufferPool.Get().(*bytes.Buffer)
+		_, err = buffer.ReadFrom(reader)
+		// copy the buffer to a new slice with the correct length
+		// reuse gzipReader and buffer
+		gzipReaderPool.Put(reader)
+		res := make([]byte, buffer.Len())
+		copy(res, buffer.Bytes())
+		buffer.Reset()
+		bufferPool.Put(buffer)
+
+		return res, err
+	case CompressionSnappy:
+		return snappy.Decode(data)
+	case CompressionLZ4:
+		reader, ok := lz4ReaderPool.Get().(*lz4.Reader)
+		if !ok {
+			reader = lz4.NewReader(bytes.NewReader(data))
+		} else {
+			reader.Reset(bytes.NewReader(data))
+		}
+		buffer := bufferPool.Get().(*bytes.Buffer)
+		_, err := buffer.ReadFrom(reader)
+		// copy the buffer to a new slice with the correct length
+		// reuse lz4Reader and buffer
+		lz4ReaderPool.Put(reader)
+		res := make([]byte, buffer.Len())
+		copy(res, buffer.Bytes())
+		buffer.Reset()
+		bufferPool.Put(buffer)
+
+		return res, err
+	case CompressionZSTD:
+		buffer := *bytesPool.Get().(*[]byte)
+		var err error
+		buffer, err = zstdDecompress(ZstdDecoderParams{}, buffer, data)
+		// copy the buffer to a new slice with the correct length and reuse buffer
+		res := make([]byte, len(buffer))
+		copy(res, buffer)
+		buffer = buffer[:0]
+		bytesPool.Put(&buffer)
+
+		return res, err
+	default:
+		return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)}
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/delete_groups_request.go b/vendor/github.com/IBM/sarama/delete_groups_request.go
new file mode 100644
index 0000000..e6c1db0
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_groups_request.go
@@ -0,0 +1,71 @@
+package sarama
+
+type DeleteGroupsRequest struct {
+	Version int16
+	Groups  []string
+}
+
+func (r *DeleteGroupsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *DeleteGroupsRequest) encode(pe packetEncoder) error {
+	if err := pe.putStringArray(r.Groups); err != nil {
+		return err
+	}
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Groups, err = pd.getStringArray()
+	if err != nil {
+		return err
+	}
+	_, err = pd.getEmptyTaggedFieldArray()
+	return
+}
+
+func (r *DeleteGroupsRequest) key() int16 {
+	return apiKeyDeleteGroups
+}
+
+func (r *DeleteGroupsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *DeleteGroupsRequest) headerVersion() int16 {
+	if r.Version >= 2 {
+		return 2
+	}
+	return 1
+}
+
+func (r *DeleteGroupsRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DeleteGroupsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 2
+}
+
+func (r *DeleteGroupsRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 2:
+		return V2_4_0_0
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V1_1_0_0
+	default:
+		return V2_0_0_0
+	}
+}
+
+func (r *DeleteGroupsRequest) AddGroup(group string) {
+	r.Groups = append(r.Groups, group)
+}
diff --git a/vendor/github.com/IBM/sarama/delete_groups_response.go b/vendor/github.com/IBM/sarama/delete_groups_response.go
new file mode 100644
index 0000000..ca683cd
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_groups_response.go
@@ -0,0 +1,111 @@
+package sarama
+
+import (
+	"time"
+)
+
+type DeleteGroupsResponse struct {
+	Version         int16
+	ThrottleTime    time.Duration
+	GroupErrorCodes map[string]KError
+}
+
+func (r *DeleteGroupsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *DeleteGroupsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(r.ThrottleTime)
+
+	if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil {
+		return err
+	}
+	for groupID, errorCode := range r.GroupErrorCodes {
+		if err := pe.putString(groupID); err != nil {
+			return err
+		}
+		pe.putKError(errorCode)
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		_, err = pd.getEmptyTaggedFieldArray()
+		return err
+	}
+
+	r.GroupErrorCodes = make(map[string]KError, n)
+	for i := 0; i < n; i++ {
+		groupID, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		r.GroupErrorCodes[groupID], err = pd.getKError()
+		if err != nil {
+			return err
+		}
+
+		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *DeleteGroupsResponse) key() int16 {
+	return apiKeyDeleteGroups
+}
+
+func (r *DeleteGroupsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *DeleteGroupsResponse) headerVersion() int16 {
+	if r.Version >= 2 {
+		return 1
+	}
+	return 0
+}
+
+func (r *DeleteGroupsResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DeleteGroupsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 2
+}
+
+func (r *DeleteGroupsResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 2:
+		return V2_4_0_0
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V1_1_0_0
+	default:
+		return V2_0_0_0
+	}
+}
+
+func (r *DeleteGroupsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/delete_offsets_request.go b/vendor/github.com/IBM/sarama/delete_offsets_request.go
new file mode 100644
index 0000000..8fd10ea
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_offsets_request.go
@@ -0,0 +1,101 @@
+package sarama
+
+type DeleteOffsetsRequest struct {
+	Version    int16
+	Group      string
+	partitions map[string][]int32
+}
+
+func (r *DeleteOffsetsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *DeleteOffsetsRequest) encode(pe packetEncoder) (err error) {
+	err = pe.putString(r.Group)
+	if err != nil {
+		return err
+	}
+
+	if r.partitions == nil {
+		pe.putInt32(0)
+	} else {
+		if err = pe.putArrayLength(len(r.partitions)); err != nil {
+			return err
+		}
+	}
+	for topic, partitions := range r.partitions {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putInt32Array(partitions)
+		if err != nil {
+			return err
+		}
+	}
+	return
+}
+
+func (r *DeleteOffsetsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Group, err = pd.getString()
+	if err != nil {
+		return err
+	}
+	var partitionCount int
+
+	partitionCount, err = pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if (partitionCount == 0 && version < 2) || partitionCount < 0 {
+		return nil
+	}
+
+	r.partitions = make(map[string][]int32, partitionCount)
+	for i := 0; i < partitionCount; i++ {
+		var topic string
+		topic, err = pd.getString()
+		if err != nil {
+			return err
+		}
+
+		var partitions []int32
+		partitions, err = pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+
+		r.partitions[topic] = partitions
+	}
+
+	return nil
+}
+
+func (r *DeleteOffsetsRequest) key() int16 {
+	return apiKeyOffsetDelete
+}
+
+func (r *DeleteOffsetsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *DeleteOffsetsRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *DeleteOffsetsRequest) isValidVersion() bool {
+	return r.Version == 0
+}
+
+func (r *DeleteOffsetsRequest) requiredVersion() KafkaVersion {
+	return V2_4_0_0
+}
+
+func (r *DeleteOffsetsRequest) AddPartition(topic string, partitionID int32) {
+	if r.partitions == nil {
+		r.partitions = make(map[string][]int32)
+	}
+
+	r.partitions[topic] = append(r.partitions[topic], partitionID)
+}
diff --git a/vendor/github.com/IBM/sarama/delete_offsets_response.go b/vendor/github.com/IBM/sarama/delete_offsets_response.go
new file mode 100644
index 0000000..64180b1
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_offsets_response.go
@@ -0,0 +1,121 @@
+package sarama
+
+import (
+	"time"
+)
+
+type DeleteOffsetsResponse struct {
+	Version int16
+	// The top-level error code, or 0 if there was no error.
+	ErrorCode    KError
+	ThrottleTime time.Duration
+	// The responses for each partition of the topics.
+	Errors map[string]map[int32]KError
+}
+
+func (r *DeleteOffsetsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *DeleteOffsetsResponse) AddError(topic string, partition int32, errorCode KError) {
+	if r.Errors == nil {
+		r.Errors = make(map[string]map[int32]KError)
+	}
+	partitions := r.Errors[topic]
+	if partitions == nil {
+		partitions = make(map[int32]KError)
+		r.Errors[topic] = partitions
+	}
+	partitions[partition] = errorCode
+}
+
+func (r *DeleteOffsetsResponse) encode(pe packetEncoder) error {
+	pe.putKError(r.ErrorCode)
+	pe.putDurationMs(r.ThrottleTime)
+
+	if err := pe.putArrayLength(len(r.Errors)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.Errors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, errorCode := range partitions {
+			pe.putInt32(partition)
+			pe.putKError(errorCode)
+		}
+	}
+	return nil
+}
+
+func (r *DeleteOffsetsResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.ErrorCode, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil || numTopics == 0 {
+		return err
+	}
+
+	r.Errors = make(map[string]map[int32]KError, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numErrors, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Errors[name] = make(map[int32]KError, numErrors)
+
+		for j := 0; j < numErrors; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			r.Errors[name][id], err = pd.getKError()
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (r *DeleteOffsetsResponse) key() int16 {
+	return apiKeyOffsetDelete
+}
+
+func (r *DeleteOffsetsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *DeleteOffsetsResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *DeleteOffsetsResponse) isValidVersion() bool {
+	return r.Version == 0
+}
+
+func (r *DeleteOffsetsResponse) requiredVersion() KafkaVersion {
+	return V2_4_0_0
+}
+
+func (r *DeleteOffsetsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/delete_records_request.go b/vendor/github.com/IBM/sarama/delete_records_request.go
new file mode 100644
index 0000000..05538dd
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_records_request.go
@@ -0,0 +1,145 @@
+package sarama
+
+import (
+	"slices"
+	"sort"
+	"time"
+)
+
+// request message format is:
+// [topic] timeout(int32)
+// where topic is:
+//  name(string) [partition]
+// where partition is:
+//  id(int32) offset(int64)
+
+type DeleteRecordsRequest struct {
+	Version int16
+	Topics  map[string]*DeleteRecordsRequestTopic
+	Timeout time.Duration
+}
+
+func (d *DeleteRecordsRequest) setVersion(v int16) {
+	d.Version = v
+}
+
+func (d *DeleteRecordsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(d.Topics)); err != nil {
+		return err
+	}
+	keys := make([]string, 0, len(d.Topics))
+	for topic := range d.Topics {
+		keys = append(keys, topic)
+	}
+	sort.Strings(keys)
+	for _, topic := range keys {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := d.Topics[topic].encode(pe); err != nil {
+			return err
+		}
+	}
+	pe.putInt32(int32(d.Timeout / time.Millisecond))
+
+	return nil
+}
+
+func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		d.Topics = make(map[string]*DeleteRecordsRequestTopic, n)
+		for i := 0; i < n; i++ {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			details := new(DeleteRecordsRequestTopic)
+			if err = details.decode(pd, version); err != nil {
+				return err
+			}
+			d.Topics[topic] = details
+		}
+	}
+
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	d.Timeout = time.Duration(timeout) * time.Millisecond
+
+	return nil
+}
+
+func (d *DeleteRecordsRequest) key() int16 {
+	return apiKeyDeleteRecords
+}
+
+func (d *DeleteRecordsRequest) version() int16 {
+	return d.Version
+}
+
+func (d *DeleteRecordsRequest) headerVersion() int16 {
+	return 1
+}
+
+func (d *DeleteRecordsRequest) isValidVersion() bool {
+	return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+type DeleteRecordsRequestTopic struct {
+	PartitionOffsets map[int32]int64 // partition => offset
+}
+
+func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil {
+		return err
+	}
+	keys := make([]int32, 0, len(t.PartitionOffsets))
+	for partition := range t.PartitionOffsets {
+		keys = append(keys, partition)
+	}
+	slices.Sort(keys)
+	for _, partition := range keys {
+		pe.putInt32(partition)
+		pe.putInt64(t.PartitionOffsets[partition])
+	}
+	return nil
+}
+
+func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		t.PartitionOffsets = make(map[int32]int64, n)
+		for i := 0; i < n; i++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			offset, err := pd.getInt64()
+			if err != nil {
+				return err
+			}
+			t.PartitionOffsets[partition] = offset
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/delete_records_response.go b/vendor/github.com/IBM/sarama/delete_records_response.go
new file mode 100644
index 0000000..4a41811
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_records_response.go
@@ -0,0 +1,177 @@
+package sarama
+
+import (
+	"slices"
+	"sort"
+	"time"
+)
+
+// response message format is:
+// throttleMs(int32) [topic]
+// where topic is:
+//  name(string) [partition]
+// where partition is:
+//  id(int32) low_watermark(int64) error_code(int16)
+
+type DeleteRecordsResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Topics       map[string]*DeleteRecordsResponseTopic
+}
+
+func (d *DeleteRecordsResponse) setVersion(v int16) {
+	d.Version = v
+}
+
+func (d *DeleteRecordsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(d.ThrottleTime)
+
+	if err := pe.putArrayLength(len(d.Topics)); err != nil {
+		return err
+	}
+	keys := make([]string, 0, len(d.Topics))
+	for topic := range d.Topics {
+		keys = append(keys, topic)
+	}
+	sort.Strings(keys)
+	for _, topic := range keys {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := d.Topics[topic].encode(pe); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) (err error) {
+	d.Version = version
+
+	if d.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		d.Topics = make(map[string]*DeleteRecordsResponseTopic, n)
+		for i := 0; i < n; i++ {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			details := new(DeleteRecordsResponseTopic)
+			if err = details.decode(pd, version); err != nil {
+				return err
+			}
+			d.Topics[topic] = details
+		}
+	}
+
+	return nil
+}
+
+func (d *DeleteRecordsResponse) key() int16 {
+	return apiKeyDeleteRecords
+}
+
+func (d *DeleteRecordsResponse) version() int16 {
+	return d.Version
+}
+
+func (d *DeleteRecordsResponse) headerVersion() int16 {
+	return 0
+}
+
+func (d *DeleteRecordsResponse) isValidVersion() bool {
+	return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+func (r *DeleteRecordsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
+
+type DeleteRecordsResponseTopic struct {
+	Partitions map[int32]*DeleteRecordsResponsePartition
+}
+
+func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(t.Partitions)); err != nil {
+		return err
+	}
+	keys := make([]int32, 0, len(t.Partitions))
+	for partition := range t.Partitions {
+		keys = append(keys, partition)
+	}
+	slices.Sort(keys)
+	for _, partition := range keys {
+		pe.putInt32(partition)
+		if err := t.Partitions[partition].encode(pe); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n)
+		for i := 0; i < n; i++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			details := new(DeleteRecordsResponsePartition)
+			if err = details.decode(pd, version); err != nil {
+				return err
+			}
+			t.Partitions[partition] = details
+		}
+	}
+
+	return nil
+}
+
+type DeleteRecordsResponsePartition struct {
+	LowWatermark int64
+	Err          KError
+}
+
+func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error {
+	pe.putInt64(t.LowWatermark)
+	pe.putKError(t.Err)
+	return nil
+}
+
+func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error {
+	lowWatermark, err := pd.getInt64()
+	if err != nil {
+		return err
+	}
+	t.LowWatermark = lowWatermark
+
+	t.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/delete_topics_request.go b/vendor/github.com/IBM/sarama/delete_topics_request.go
new file mode 100644
index 0000000..cb9e924
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_topics_request.go
@@ -0,0 +1,98 @@
+package sarama
+
+import "time"
+
+type DeleteTopicsRequest struct {
+	Version int16
+	Topics  []string
+	Timeout time.Duration
+}
+
+func (d *DeleteTopicsRequest) setVersion(v int16) {
+	d.Version = v
+}
+
+func NewDeleteTopicsRequest(version KafkaVersion, topics []string, timeout time.Duration) *DeleteTopicsRequest {
+	d := &DeleteTopicsRequest{
+		Topics:  topics,
+		Timeout: timeout,
+	}
+	if version.IsAtLeast(V2_4_0_0) {
+		d.Version = 4
+	} else if version.IsAtLeast(V2_1_0_0) {
+		d.Version = 3
+	} else if version.IsAtLeast(V2_0_0_0) {
+		d.Version = 2
+	} else if version.IsAtLeast(V0_11_0_0) {
+		d.Version = 1
+	}
+	return d
+}
+
+func (d *DeleteTopicsRequest) encode(pe packetEncoder) error {
+	if err := pe.putStringArray(d.Topics); err != nil {
+		return err
+	}
+	pe.putInt32(int32(d.Timeout / time.Millisecond))
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
+	if d.Topics, err = pd.getStringArray(); err != nil {
+		return err
+	}
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	d.Timeout = time.Duration(timeout) * time.Millisecond
+	d.Version = version
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (d *DeleteTopicsRequest) key() int16 {
+	return apiKeyDeleteTopics
+}
+
+func (d *DeleteTopicsRequest) version() int16 {
+	return d.Version
+}
+
+func (d *DeleteTopicsRequest) headerVersion() int16 {
+	if d.Version >= 4 {
+		return 2
+	}
+	return 1
+}
+
+func (d *DeleteTopicsRequest) isFlexible() bool {
+	return d.isFlexibleVersion(d.Version)
+}
+
+func (d *DeleteTopicsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 4
+}
+
+func (d *DeleteTopicsRequest) isValidVersion() bool {
+	return d.Version >= 0 && d.Version <= 4
+}
+
+func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_1_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_10_1_0
+	default:
+		return V2_2_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/delete_topics_response.go b/vendor/github.com/IBM/sarama/delete_topics_response.go
new file mode 100644
index 0000000..4b53f0b
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_topics_response.go
@@ -0,0 +1,118 @@
+package sarama
+
+import (
+	"time"
+)
+
+type DeleteTopicsResponse struct {
+	Version         int16
+	ThrottleTime    time.Duration
+	TopicErrorCodes map[string]KError
+}
+
+func (d *DeleteTopicsResponse) setVersion(v int16) {
+	d.Version = v
+}
+
+func (d *DeleteTopicsResponse) encode(pe packetEncoder) error {
+	if d.Version >= 1 {
+		pe.putDurationMs(d.ThrottleTime)
+	}
+
+	if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil {
+		return err
+	}
+	for topic, errorCode := range d.TopicErrorCodes {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		pe.putKError(errorCode)
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if version >= 1 {
+		if d.ThrottleTime, err = pd.getDurationMs(); err != nil {
+			return err
+		}
+
+		d.Version = version
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	d.TopicErrorCodes = make(map[string]KError, n)
+
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		d.TopicErrorCodes[topic], err = pd.getKError()
+		if err != nil {
+			return err
+		}
+
+		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (d *DeleteTopicsResponse) key() int16 {
+	return apiKeyDeleteTopics
+}
+
+func (d *DeleteTopicsResponse) version() int16 {
+	return d.Version
+}
+
+func (d *DeleteTopicsResponse) headerVersion() int16 {
+	if d.Version >= 4 {
+		return 1
+	}
+	return 0
+}
+
+func (d *DeleteTopicsResponse) isFlexible() bool {
+	return d.isFlexibleVersion(d.Version)
+}
+
+func (d *DeleteTopicsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 4
+}
+
+func (d *DeleteTopicsResponse) isValidVersion() bool {
+	return d.Version >= 0 && d.Version <= 4
+}
+
+func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_1_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_10_1_0
+	default:
+		return V2_2_0_0
+	}
+}
+
+func (r *DeleteTopicsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/describe_client_quotas_request.go b/vendor/github.com/IBM/sarama/describe_client_quotas_request.go
new file mode 100644
index 0000000..120ed33
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_client_quotas_request.go
@@ -0,0 +1,194 @@
+package sarama
+
+// DescribeClientQuotas Request (Version: 0) => [components] strict
+//   components => entity_type match_type match
+//     entity_type => STRING
+//     match_type => INT8
+//     match => NULLABLE_STRING
+//   strict => BOOLEAN
+// DescribeClientQuotas Request (Version: 1) => [components] strict _tagged_fields
+//   components => entity_type match_type match _tagged_fields
+//     entity_type => COMPACT_STRING
+//     match_type => INT8
+//     match => COMPACT_NULLABLE_STRING
+//   strict => BOOLEAN
+
+// DescribeClientQuotasRequest contains a filter to be applied to matching
+// client quotas.
+// Components: the components to filter on
+// Strict: whether the filter only includes specified components
+type DescribeClientQuotasRequest struct {
+	Version    int16
+	Components []QuotaFilterComponent
+	Strict     bool
+}
+
+func NewDescribeClientQuotasRequest(version KafkaVersion, components []QuotaFilterComponent, strict bool) *DescribeClientQuotasRequest {
+	d := &DescribeClientQuotasRequest{
+		Components: components,
+		Strict:     strict,
+	}
+	if version.IsAtLeast(V2_8_0_0) {
+		d.Version = 1
+	}
+	return d
+}
+
+func (d *DescribeClientQuotasRequest) setVersion(v int16) {
+	d.Version = v
+}
+
+// QuotaFilterComponent describes a component for applying a client quota filter.
+// EntityType: the entity type the filter component applies to ("user", "client-id", "ip")
+// MatchType: the match type of the filter component (any, exact, default)
+// Match: the name that's matched exactly (used when MatchType is QuotaMatchExact)
+type QuotaFilterComponent struct {
+	EntityType QuotaEntityType
+	MatchType  QuotaMatchType
+	Match      string
+}
+
+func (d *DescribeClientQuotasRequest) encode(pe packetEncoder) error {
+	// Components
+	if err := pe.putArrayLength(len(d.Components)); err != nil {
+		return err
+	}
+	for _, c := range d.Components {
+		if err := c.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	// Strict
+	pe.putBool(d.Strict)
+
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (d *DescribeClientQuotasRequest) decode(pd packetDecoder, version int16) error {
+	// Components
+	componentCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if componentCount > 0 {
+		d.Components = make([]QuotaFilterComponent, componentCount)
+		for i := range d.Components {
+			c := QuotaFilterComponent{}
+			if err = c.decode(pd, version); err != nil {
+				return err
+			}
+			d.Components[i] = c
+		}
+	} else {
+		d.Components = []QuotaFilterComponent{}
+	}
+
+	// Strict
+	strict, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	d.Strict = strict
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (d *QuotaFilterComponent) encode(pe packetEncoder) error {
+	// EntityType
+	if err := pe.putString(string(d.EntityType)); err != nil {
+		return err
+	}
+
+	// MatchType
+	pe.putInt8(int8(d.MatchType))
+
+	// Match
+	if d.MatchType == QuotaMatchAny {
+		if err := pe.putNullableString(nil); err != nil {
+			return err
+		}
+	} else if d.MatchType == QuotaMatchDefault {
+		if err := pe.putNullableString(nil); err != nil {
+			return err
+		}
+	} else {
+		if err := pe.putString(d.Match); err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (d *QuotaFilterComponent) decode(pd packetDecoder, version int16) error {
+	// EntityType
+	entityType, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	d.EntityType = QuotaEntityType(entityType)
+
+	// MatchType
+	matchType, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	d.MatchType = QuotaMatchType(matchType)
+
+	// Match
+	match, err := pd.getNullableString()
+	if err != nil {
+		return err
+	}
+	if match != nil {
+		d.Match = *match
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (d *DescribeClientQuotasRequest) key() int16 {
+	return apiKeyDescribeClientQuotas
+}
+
+func (d *DescribeClientQuotasRequest) version() int16 {
+	return d.Version
+}
+
+func (d *DescribeClientQuotasRequest) headerVersion() int16 {
+	if d.Version >= 1 {
+		return 2
+	}
+
+	return 1
+}
+
+func (d *DescribeClientQuotasRequest) isValidVersion() bool {
+	return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DescribeClientQuotasRequest) isFlexible() bool {
+	return d.isFlexibleVersion(d.Version)
+}
+
+func (d *DescribeClientQuotasRequest) isFlexibleVersion(version int16) bool {
+	return version >= 1
+}
+
+func (d *DescribeClientQuotasRequest) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V2_8_0_0
+	case 0:
+		return V2_6_0_0
+	default:
+		return V2_8_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/describe_client_quotas_response.go b/vendor/github.com/IBM/sarama/describe_client_quotas_response.go
new file mode 100644
index 0000000..4cf9e5b
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_client_quotas_response.go
@@ -0,0 +1,285 @@
+package sarama
+
+import (
+	"time"
+)
+
+// DescribeClientQuotas Response (Version: 0) => throttle_time_ms error_code error_message [entries]
+//   throttle_time_ms => INT32
+//   error_code => INT16
+//   error_message => NULLABLE_STRING
+//   entries => [entity] [values]
+//     entity => entity_type entity_name
+//       entity_type => STRING
+//       entity_name => NULLABLE_STRING
+//     values => key value
+//       key => STRING
+//       value => FLOAT64
+// DescribeClientQuotas Response (Version: 1) => throttle_time_ms error_code error_message [entries] _tagged_fields
+//   throttle_time_ms => INT32
+//   error_code => INT16
+//   error_message => COMPACT_NULLABLE_STRING
+//   entries => [entity] [values] _tagged_fields
+//     entity => entity_type entity_name _tagged_fields
+//       entity_type => COMPACT_STRING
+//       entity_name => COMPACT_NULLABLE_STRING
+//     values => key value _tagged_fields
+//       key => COMPACT_STRING
+//       value => FLOAT64
+
+type DescribeClientQuotasResponse struct {
+	Version      int16
+	ThrottleTime time.Duration               // The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+	ErrorCode    KError                      // The error code, or `0` if the quota description succeeded.
+	ErrorMsg     *string                     // The error message, or `null` if the quota description succeeded.
+	Entries      []DescribeClientQuotasEntry // A result entry.
+}
+
+func (d *DescribeClientQuotasResponse) setVersion(v int16) {
+	d.Version = v
+}
+
+type DescribeClientQuotasEntry struct {
+	Entity []QuotaEntityComponent // The quota entity description.
+	Values map[string]float64     // The quota values for the entity.
+}
+
+type QuotaEntityComponent struct {
+	EntityType QuotaEntityType
+	MatchType  QuotaMatchType
+	Name       string
+}
+
+func (d *DescribeClientQuotasResponse) encode(pe packetEncoder) error {
+	// ThrottleTime
+	pe.putDurationMs(d.ThrottleTime)
+
+	// ErrorCode
+	pe.putKError(d.ErrorCode)
+
+	// ErrorMsg
+	if err := pe.putNullableString(d.ErrorMsg); err != nil {
+		return err
+	}
+
+	// Entries
+	if err := pe.putArrayLength(len(d.Entries)); err != nil {
+		return err
+	}
+	for _, e := range d.Entries {
+		if err := e.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (d *DescribeClientQuotasResponse) decode(pd packetDecoder, version int16) (err error) {
+	if d.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	// ErrorCode
+	d.ErrorCode, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	// ErrorMsg
+	errMsg, err := pd.getNullableString()
+	if err != nil {
+		return err
+	}
+	d.ErrorMsg = errMsg
+
+	// Entries
+	entryCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if entryCount > 0 {
+		d.Entries = make([]DescribeClientQuotasEntry, entryCount)
+		for i := range d.Entries {
+			e := DescribeClientQuotasEntry{}
+			if err = e.decode(pd, version); err != nil {
+				return err
+			}
+			d.Entries[i] = e
+		}
+	} else {
+		d.Entries = []DescribeClientQuotasEntry{}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (d *DescribeClientQuotasEntry) encode(pe packetEncoder) error {
+	// Entity
+	if err := pe.putArrayLength(len(d.Entity)); err != nil {
+		return err
+	}
+	for _, e := range d.Entity {
+		if err := e.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	// Values
+	if err := pe.putArrayLength(len(d.Values)); err != nil {
+		return err
+	}
+	for key, value := range d.Values {
+		// key
+		if err := pe.putString(key); err != nil {
+			return err
+		}
+		// value
+		pe.putFloat64(value)
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (d *DescribeClientQuotasEntry) decode(pd packetDecoder, version int16) error {
+	// Entity
+	componentCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if componentCount > 0 {
+		d.Entity = make([]QuotaEntityComponent, componentCount)
+		for i := 0; i < componentCount; i++ {
+			component := QuotaEntityComponent{}
+			if err := component.decode(pd, version); err != nil {
+				return err
+			}
+			d.Entity[i] = component
+		}
+	} else {
+		d.Entity = []QuotaEntityComponent{}
+	}
+
+	// Values
+	valueCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if valueCount > 0 {
+		d.Values = make(map[string]float64, valueCount)
+		for i := 0; i < valueCount; i++ {
+			// key
+			key, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			// value
+			value, err := pd.getFloat64()
+			if err != nil {
+				return err
+			}
+			d.Values[key] = value
+			_, err = pd.getEmptyTaggedFieldArray()
+			if err != nil {
+				return err
+			}
+		}
+	} else {
+		d.Values = map[string]float64{}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (c *QuotaEntityComponent) encode(pe packetEncoder) error {
+	// entity_type
+	if err := pe.putString(string(c.EntityType)); err != nil {
+		return err
+	}
+	// entity_name
+	if c.MatchType == QuotaMatchDefault {
+		if err := pe.putNullableString(nil); err != nil {
+			return err
+		}
+	} else {
+		if err := pe.putString(c.Name); err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (c *QuotaEntityComponent) decode(pd packetDecoder, version int16) error {
+	// entity_type
+	entityType, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	c.EntityType = QuotaEntityType(entityType)
+
+	// entity_name
+	entityName, err := pd.getNullableString()
+	if err != nil {
+		return err
+	}
+
+	if entityName == nil {
+		c.MatchType = QuotaMatchDefault
+	} else {
+		c.MatchType = QuotaMatchExact
+		c.Name = *entityName
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (d *DescribeClientQuotasResponse) key() int16 {
+	return apiKeyDescribeClientQuotas
+}
+
+func (d *DescribeClientQuotasResponse) version() int16 {
+	return d.Version
+}
+
+func (d *DescribeClientQuotasResponse) headerVersion() int16 {
+	if d.Version >= 1 {
+		return 1
+	}
+
+	return 0
+}
+
+func (d *DescribeClientQuotasResponse) isValidVersion() bool {
+	return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DescribeClientQuotasResponse) isFlexible() bool {
+	return d.isFlexibleVersion(d.Version)
+}
+
+func (d *DescribeClientQuotasResponse) isFlexibleVersion(version int16) bool {
+	return version >= 1
+}
+
+func (d *DescribeClientQuotasResponse) requiredVersion() KafkaVersion {
+	switch d.Version {
+	case 1:
+		return V2_8_0_0
+	case 0:
+		return V2_6_0_0
+	default:
+		return V2_8_0_0
+	}
+}
+
+func (r *DescribeClientQuotasResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/describe_configs_request.go b/vendor/github.com/IBM/sarama/describe_configs_request.go
new file mode 100644
index 0000000..d42e271
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_configs_request.go
@@ -0,0 +1,125 @@
+package sarama
+
+type DescribeConfigsRequest struct {
+	Version         int16
+	Resources       []*ConfigResource
+	IncludeSynonyms bool
+}
+
+func (r *DescribeConfigsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+type ConfigResource struct {
+	Type        ConfigResourceType
+	Name        string
+	ConfigNames []string
+}
+
+func (r *DescribeConfigsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(r.Resources)); err != nil {
+		return err
+	}
+
+	for _, c := range r.Resources {
+		pe.putInt8(int8(c.Type))
+		if err := pe.putString(c.Name); err != nil {
+			return err
+		}
+
+		if len(c.ConfigNames) == 0 {
+			pe.putInt32(-1)
+			continue
+		}
+		if err := pe.putStringArray(c.ConfigNames); err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 1 {
+		pe.putBool(r.IncludeSynonyms)
+	}
+
+	return nil
+}
+
+func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Resources = make([]*ConfigResource, n)
+
+	for i := 0; i < n; i++ {
+		r.Resources[i] = &ConfigResource{}
+		t, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.Resources[i].Type = ConfigResourceType(t)
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		r.Resources[i].Name = name
+
+		confLength, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		if confLength == -1 {
+			continue
+		}
+
+		cfnames := make([]string, confLength)
+		for i := 0; i < confLength; i++ {
+			s, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			cfnames[i] = s
+		}
+		r.Resources[i].ConfigNames = cfnames
+	}
+	r.Version = version
+	if r.Version >= 1 {
+		b, err := pd.getBool()
+		if err != nil {
+			return err
+		}
+		r.IncludeSynonyms = b
+	}
+
+	return nil
+}
+
+func (r *DescribeConfigsRequest) key() int16 {
+	return apiKeyDescribeConfigs
+}
+
+func (r *DescribeConfigsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeConfigsRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *DescribeConfigsRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V1_1_0_0
+	case 0:
+		return V0_11_0_0
+	default:
+		return V2_0_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/describe_configs_response.go b/vendor/github.com/IBM/sarama/describe_configs_response.go
new file mode 100644
index 0000000..c702f02
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_configs_response.go
@@ -0,0 +1,352 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+type ConfigSource int8
+
+func (s ConfigSource) String() string {
+	switch s {
+	case SourceUnknown:
+		return "Unknown"
+	case SourceTopic:
+		return "Topic"
+	case SourceDynamicBroker:
+		return "DynamicBroker"
+	case SourceDynamicDefaultBroker:
+		return "DynamicDefaultBroker"
+	case SourceStaticBroker:
+		return "StaticBroker"
+	case SourceDefault:
+		return "Default"
+	}
+	return fmt.Sprintf("Source Invalid: %d", int(s))
+}
+
+const (
+	SourceUnknown ConfigSource = iota
+	SourceTopic
+	SourceDynamicBroker
+	SourceDynamicDefaultBroker
+	SourceStaticBroker
+	SourceDefault
+)
+
+type DescribeConfigError struct {
+	Err    KError
+	ErrMsg string
+}
+
+func (c *DescribeConfigError) Error() string {
+	text := c.Err.Error()
+	if c.ErrMsg != "" {
+		text = fmt.Sprintf("%s - %s", text, c.ErrMsg)
+	}
+	return text
+}
+
+type DescribeConfigsResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Resources    []*ResourceResponse
+}
+
+func (r *DescribeConfigsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+type ResourceResponse struct {
+	ErrorCode int16
+	ErrorMsg  string
+	Type      ConfigResourceType
+	Name      string
+	Configs   []*ConfigEntry
+}
+
+type ConfigEntry struct {
+	Name      string
+	Value     string
+	ReadOnly  bool
+	Default   bool
+	Source    ConfigSource
+	Sensitive bool
+	Synonyms  []*ConfigSynonym
+}
+
+type ConfigSynonym struct {
+	ConfigName  string
+	ConfigValue string
+	Source      ConfigSource
+}
+
+func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) {
+	pe.putDurationMs(r.ThrottleTime)
+	if err = pe.putArrayLength(len(r.Resources)); err != nil {
+		return err
+	}
+
+	for _, c := range r.Resources {
+		if err = c.encode(pe, r.Version); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Resources = make([]*ResourceResponse, n)
+	for i := 0; i < n; i++ {
+		rr := &ResourceResponse{}
+		if err := rr.decode(pd, version); err != nil {
+			return err
+		}
+		r.Resources[i] = rr
+	}
+
+	return nil
+}
+
+func (r *DescribeConfigsResponse) key() int16 {
+	return apiKeyDescribeConfigs
+}
+
+func (r *DescribeConfigsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeConfigsResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *DescribeConfigsResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V1_1_0_0
+	case 0:
+		return V0_11_0_0
+	default:
+		return V2_0_0_0
+	}
+}
+
+func (r *DescribeConfigsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
+
+func (r *ResourceResponse) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt16(r.ErrorCode)
+
+	if err = pe.putString(r.ErrorMsg); err != nil {
+		return err
+	}
+
+	pe.putInt8(int8(r.Type))
+
+	if err = pe.putString(r.Name); err != nil {
+		return err
+	}
+
+	if err = pe.putArrayLength(len(r.Configs)); err != nil {
+		return err
+	}
+
+	for _, c := range r.Configs {
+		if err = c.encode(pe, version); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) {
+	ec, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	r.ErrorCode = ec
+
+	em, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.ErrorMsg = em
+
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	r.Type = ConfigResourceType(t)
+
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Name = name
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Configs = make([]*ConfigEntry, n)
+	for i := 0; i < n; i++ {
+		c := &ConfigEntry{}
+		if err := c.decode(pd, version); err != nil {
+			return err
+		}
+		r.Configs[i] = c
+	}
+	return nil
+}
+
+func (r *ConfigEntry) encode(pe packetEncoder, version int16) (err error) {
+	if err = pe.putString(r.Name); err != nil {
+		return err
+	}
+
+	if err = pe.putString(r.Value); err != nil {
+		return err
+	}
+
+	pe.putBool(r.ReadOnly)
+
+	if version <= 0 {
+		pe.putBool(r.Default)
+		pe.putBool(r.Sensitive)
+	} else {
+		pe.putInt8(int8(r.Source))
+		pe.putBool(r.Sensitive)
+
+		if err := pe.putArrayLength(len(r.Synonyms)); err != nil {
+			return err
+		}
+		for _, c := range r.Synonyms {
+			if err = c.encode(pe, version); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration
+func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
+	if version == 0 {
+		r.Source = SourceUnknown
+	}
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Name = name
+
+	value, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Value = value
+
+	read, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	r.ReadOnly = read
+
+	if version == 0 {
+		defaultB, err := pd.getBool()
+		if err != nil {
+			return err
+		}
+		r.Default = defaultB
+		if defaultB {
+			r.Source = SourceDefault
+		}
+	} else {
+		source, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.Source = ConfigSource(source)
+		r.Default = r.Source == SourceDefault
+	}
+
+	sensitive, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	r.Sensitive = sensitive
+
+	if version > 0 {
+		n, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.Synonyms = make([]*ConfigSynonym, n)
+
+		for i := 0; i < n; i++ {
+			s := &ConfigSynonym{}
+			if err := s.decode(pd, version); err != nil {
+				return err
+			}
+			r.Synonyms[i] = s
+		}
+	}
+	return nil
+}
+
+func (c *ConfigSynonym) encode(pe packetEncoder, version int16) (err error) {
+	err = pe.putString(c.ConfigName)
+	if err != nil {
+		return err
+	}
+
+	err = pe.putString(c.ConfigValue)
+	if err != nil {
+		return err
+	}
+
+	pe.putInt8(int8(c.Source))
+
+	return nil
+}
+
+func (c *ConfigSynonym) decode(pd packetDecoder, version int16) error {
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	c.ConfigName = name
+
+	value, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	c.ConfigValue = value
+
+	source, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	c.Source = ConfigSource(source)
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/describe_groups_request.go b/vendor/github.com/IBM/sarama/describe_groups_request.go
new file mode 100644
index 0000000..b2fb07e
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_groups_request.go
@@ -0,0 +1,87 @@
+package sarama
+
+type DescribeGroupsRequest struct {
+	Version                     int16
+	Groups                      []string
+	IncludeAuthorizedOperations bool
+}
+
+func (r *DescribeGroupsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
+	if err := pe.putStringArray(r.Groups); err != nil {
+		return err
+	}
+	if r.Version >= 3 {
+		pe.putBool(r.IncludeAuthorizedOperations)
+	}
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	r.Groups, err = pd.getStringArray()
+	if err != nil {
+		return err
+	}
+	if r.Version >= 3 {
+		if r.IncludeAuthorizedOperations, err = pd.getBool(); err != nil {
+			return err
+		}
+	}
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *DescribeGroupsRequest) key() int16 {
+	return apiKeyDescribeGroups
+}
+
+func (r *DescribeGroupsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeGroupsRequest) headerVersion() int16 {
+	if r.Version >= 5 {
+		return 2
+	}
+	return 1
+}
+
+func (r *DescribeGroupsRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 5
+}
+
+func (r *DescribeGroupsRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DescribeGroupsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 5
+}
+
+func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 5:
+		return V2_4_0_0
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_3_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_9_0_0
+	default:
+		return V2_4_0_0
+	}
+}
+
+func (r *DescribeGroupsRequest) AddGroup(group string) {
+	r.Groups = append(r.Groups, group)
+}
diff --git a/vendor/github.com/IBM/sarama/describe_groups_response.go b/vendor/github.com/IBM/sarama/describe_groups_response.go
new file mode 100644
index 0000000..dcc274d
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_groups_response.go
@@ -0,0 +1,306 @@
+package sarama
+
+import "time"
+
+type DescribeGroupsResponse struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// ThrottleTimeMs contains the duration in milliseconds for which the
+	// request was throttled due to a quota violation, or zero if the request
+	// did not violate any quota.
+	ThrottleTimeMs int32
+	// Groups contains each described group.
+	Groups []*GroupDescription
+}
+
+func (r *DescribeGroupsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *DescribeGroupsResponse) encode(pe packetEncoder) (err error) {
+	if r.Version >= 1 {
+		pe.putInt32(r.ThrottleTimeMs)
+	}
+	if err := pe.putArrayLength(len(r.Groups)); err != nil {
+		return err
+	}
+
+	for _, block := range r.Groups {
+		if err := block.encode(pe, r.Version); err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.Version >= 1 {
+		if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+	if numGroups, err := pd.getArrayLength(); err != nil {
+		return err
+	} else if numGroups > 0 {
+		r.Groups = make([]*GroupDescription, numGroups)
+		for i := 0; i < numGroups; i++ {
+			block := &GroupDescription{}
+			if err := block.decode(pd, r.Version); err != nil {
+				return err
+			}
+			r.Groups[i] = block
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *DescribeGroupsResponse) key() int16 {
+	return apiKeyDescribeGroups
+}
+
+func (r *DescribeGroupsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeGroupsResponse) headerVersion() int16 {
+	if r.Version >= 5 {
+		return 1
+	}
+	return 0
+}
+
+func (r *DescribeGroupsResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 5
+}
+
+func (r *DescribeGroupsResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DescribeGroupsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 5
+}
+
+func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 5:
+		return V2_4_0_0
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_3_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_9_0_0
+	default:
+		return V2_4_0_0
+	}
+}
+
+func (r *DescribeGroupsResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
+
+// GroupDescription contains each described group.
+type GroupDescription struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// Err contains the describe error as the KError type.
+	Err KError
+	// ErrorCode contains the describe error, or 0 if there was no error.
+	ErrorCode int16
+	// GroupId contains the group ID string.
+	GroupId string
+	// State contains the group state string, or the empty string.
+	State string
+	// ProtocolType contains the group protocol type, or the empty string.
+	ProtocolType string
+	// Protocol contains the group protocol data, or the empty string.
+	Protocol string
+	// Members contains the group members.
+	Members map[string]*GroupMemberDescription
+	// AuthorizedOperations contains a 32-bit bitfield to represent authorized
+	// operations for this group.
+	AuthorizedOperations int32
+}
+
+func (gd *GroupDescription) encode(pe packetEncoder, version int16) (err error) {
+	gd.Version = version
+	pe.putInt16(gd.ErrorCode)
+
+	if err := pe.putString(gd.GroupId); err != nil {
+		return err
+	}
+	if err := pe.putString(gd.State); err != nil {
+		return err
+	}
+	if err := pe.putString(gd.ProtocolType); err != nil {
+		return err
+	}
+	if err := pe.putString(gd.Protocol); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(gd.Members)); err != nil {
+		return err
+	}
+
+	for _, block := range gd.Members {
+		if err := block.encode(pe, gd.Version); err != nil {
+			return err
+		}
+	}
+
+	if gd.Version >= 3 {
+		pe.putInt32(gd.AuthorizedOperations)
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (gd *GroupDescription) decode(pd packetDecoder, version int16) (err error) {
+	gd.Version = version
+	if gd.ErrorCode, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	gd.Err = KError(gd.ErrorCode)
+
+	if gd.GroupId, err = pd.getString(); err != nil {
+		return err
+	}
+	if gd.State, err = pd.getString(); err != nil {
+		return err
+	}
+	if gd.ProtocolType, err = pd.getString(); err != nil {
+		return err
+	}
+	if gd.Protocol, err = pd.getString(); err != nil {
+		return err
+	}
+
+	if numMembers, err := pd.getArrayLength(); err != nil {
+		return err
+	} else if numMembers > 0 {
+		gd.Members = make(map[string]*GroupMemberDescription, numMembers)
+		for i := 0; i < numMembers; i++ {
+			block := &GroupMemberDescription{}
+			if err := block.decode(pd, gd.Version); err != nil {
+				return err
+			}
+			gd.Members[block.MemberId] = block
+		}
+	}
+
+	if gd.Version >= 3 {
+		if gd.AuthorizedOperations, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+// GroupMemberDescription contains the group members.
+type GroupMemberDescription struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// MemberId contains the member ID assigned by the group coordinator.
+	MemberId string
+	// GroupInstanceId contains the unique identifier of the consumer instance
+	// provided by end user.
+	GroupInstanceId *string
+	// ClientId contains the client ID used in the member's latest join group
+	// request.
+	ClientId string
+	// ClientHost contains the client host.
+	ClientHost string
+	// MemberMetadata contains the metadata corresponding to the current group
+	// protocol in use.
+	MemberMetadata []byte
+	// MemberAssignment contains the current assignment provided by the group
+	// leader.
+	MemberAssignment []byte
+}
+
+func (gmd *GroupMemberDescription) encode(pe packetEncoder, version int16) (err error) {
+	gmd.Version = version
+	if err := pe.putString(gmd.MemberId); err != nil {
+		return err
+	}
+	if gmd.Version >= 4 {
+		if err := pe.putNullableString(gmd.GroupInstanceId); err != nil {
+			return err
+		}
+	}
+	if err := pe.putString(gmd.ClientId); err != nil {
+		return err
+	}
+	if err := pe.putString(gmd.ClientHost); err != nil {
+		return err
+	}
+	if err := pe.putBytes(gmd.MemberMetadata); err != nil {
+		return err
+	}
+	if err := pe.putBytes(gmd.MemberAssignment); err != nil {
+		return err
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (gmd *GroupMemberDescription) decode(pd packetDecoder, version int16) (err error) {
+	gmd.Version = version
+	if gmd.MemberId, err = pd.getString(); err != nil {
+		return err
+	}
+	if gmd.Version >= 4 {
+		if gmd.GroupInstanceId, err = pd.getNullableString(); err != nil {
+			return err
+		}
+	}
+	if gmd.ClientId, err = pd.getString(); err != nil {
+		return err
+	}
+	if gmd.ClientHost, err = pd.getString(); err != nil {
+		return err
+	}
+	if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
+		return err
+	}
+	if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
+	if len(gmd.MemberAssignment) == 0 {
+		return nil, nil
+	}
+	assignment := new(ConsumerGroupMemberAssignment)
+	err := decode(gmd.MemberAssignment, assignment, nil)
+	return assignment, err
+}
+
+func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
+	if len(gmd.MemberMetadata) == 0 {
+		return nil, nil
+	}
+	metadata := new(ConsumerGroupMemberMetadata)
+	err := decode(gmd.MemberMetadata, metadata, nil)
+	return metadata, err
+}
diff --git a/vendor/github.com/IBM/sarama/describe_log_dirs_request.go b/vendor/github.com/IBM/sarama/describe_log_dirs_request.go
new file mode 100644
index 0000000..32354f6
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_log_dirs_request.go
@@ -0,0 +1,124 @@
+package sarama
+
+// DescribeLogDirsRequest is a describe request to get partitions' log size
+type DescribeLogDirsRequest struct {
+	// Version 0 and 1 are equal
+	// The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
+	Version int16
+
+	// If this is an empty array, all topics will be queried
+	DescribeTopics []DescribeLogDirsRequestTopic
+}
+
+func (r *DescribeLogDirsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+// DescribeLogDirsRequestTopic is a describe request about the log dir of one or more partitions within a Topic
+type DescribeLogDirsRequestTopic struct {
+	Topic        string
+	PartitionIDs []int32
+}
+
+func (r *DescribeLogDirsRequest) encode(pe packetEncoder) error {
+	length := len(r.DescribeTopics)
+	if length == 0 {
+		// In order to query all topics we must send null
+		length = -1
+	}
+	if err := pe.putArrayLength(length); err != nil {
+		return err
+	}
+
+	for _, d := range r.DescribeTopics {
+		if err := pe.putString(d.Topic); err != nil {
+			return err
+		}
+
+		if err := pe.putInt32Array(d.PartitionIDs); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DescribeLogDirsRequest) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n == -1 {
+		n = 0
+	}
+
+	topics := make([]DescribeLogDirsRequestTopic, n)
+	for i := 0; i < n; i++ {
+		topics[i] = DescribeLogDirsRequestTopic{}
+
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		topics[i].Topic = topic
+
+		pIDs, err := pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+		topics[i].PartitionIDs = pIDs
+		_, err = pd.getEmptyTaggedFieldArray()
+		if err != nil {
+			return err
+		}
+	}
+	r.DescribeTopics = topics
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *DescribeLogDirsRequest) key() int16 {
+	return apiKeyDescribeLogDirs
+}
+
+func (r *DescribeLogDirsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeLogDirsRequest) headerVersion() int16 {
+	if r.Version >= 2 {
+		return 2
+	}
+	return 1
+}
+
+func (r *DescribeLogDirsRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *DescribeLogDirsRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DescribeLogDirsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 2
+}
+
+func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 4:
+		return V3_3_0_0
+	case 3:
+		return V3_2_0_0
+	case 2:
+		return V2_6_0_0
+	case 1:
+		return V2_0_0_0
+	default:
+		return V1_0_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/describe_log_dirs_response.go b/vendor/github.com/IBM/sarama/describe_log_dirs_response.go
new file mode 100644
index 0000000..ebae414
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_log_dirs_response.go
@@ -0,0 +1,306 @@
+package sarama
+
+import "time"
+
+type DescribeLogDirsResponse struct {
+	ThrottleTime time.Duration
+
+	// Version 0 and 1 are equal
+	// The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
+	Version int16
+
+	LogDirs []DescribeLogDirsResponseDirMetadata
+
+	ErrorCode KError
+}
+
+func (r *DescribeLogDirsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *DescribeLogDirsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(r.ThrottleTime)
+
+	if r.Version >= 3 {
+		pe.putKError(r.ErrorCode)
+	}
+
+	if err := pe.putArrayLength(len(r.LogDirs)); err != nil {
+		return err
+	}
+
+	for _, dir := range r.LogDirs {
+		if err := dir.encode(pe, r.Version); err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DescribeLogDirsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	if version >= 3 {
+		r.ErrorCode, err = pd.getKError()
+		if err != nil {
+			return err
+		}
+	}
+
+	// Decode array of DescribeLogDirsResponseDirMetadata
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.LogDirs = make([]DescribeLogDirsResponseDirMetadata, n)
+	for i := 0; i < n; i++ {
+		dir := DescribeLogDirsResponseDirMetadata{}
+		if err := dir.decode(pd, version); err != nil {
+			return err
+		}
+		r.LogDirs[i] = dir
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *DescribeLogDirsResponse) key() int16 {
+	return apiKeyDescribeLogDirs
+}
+
+func (r *DescribeLogDirsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeLogDirsResponse) headerVersion() int16 {
+	if r.Version >= 2 {
+		return 1
+	}
+	return 0
+}
+
+func (r *DescribeLogDirsResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *DescribeLogDirsResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DescribeLogDirsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 2
+}
+
+func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 4:
+		return V3_3_0_0
+	case 3:
+		return V3_2_0_0
+	case 2:
+		return V2_6_0_0
+	case 1:
+		return V2_0_0_0
+	default:
+		return V1_0_0_0
+	}
+}
+
+func (r *DescribeLogDirsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
+
+type DescribeLogDirsResponseDirMetadata struct {
+	ErrorCode KError
+
+	// The absolute log directory path
+	Path   string
+	Topics []DescribeLogDirsResponseTopic
+
+	TotalBytes  int64
+	UsableBytes int64
+}
+
+func (r *DescribeLogDirsResponseDirMetadata) encode(pe packetEncoder, version int16) error {
+	pe.putKError(r.ErrorCode)
+
+	err := pe.putString(r.Path)
+	if err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.Topics)); err != nil {
+		return err
+	}
+	for _, topic := range r.Topics {
+		if err := topic.encode(pe, version); err != nil {
+			return err
+		}
+	}
+
+	if version >= 4 {
+		pe.putInt64(r.TotalBytes)
+		pe.putInt64(r.UsableBytes)
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DescribeLogDirsResponseDirMetadata) decode(pd packetDecoder, version int16) (err error) {
+	r.ErrorCode, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	path, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Path = path
+
+	// Decode array of DescribeLogDirsResponseTopic
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Topics = make([]DescribeLogDirsResponseTopic, n)
+	for i := 0; i < n; i++ {
+		t := DescribeLogDirsResponseTopic{}
+
+		if err := t.decode(pd, version); err != nil {
+			return err
+		}
+
+		r.Topics[i] = t
+	}
+
+	if version >= 4 {
+		totalBytes, err := pd.getInt64()
+		if err != nil {
+			return err
+		}
+		r.TotalBytes = totalBytes
+		usableBytes, err := pd.getInt64()
+		if err != nil {
+			return err
+		}
+		r.UsableBytes = usableBytes
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+// DescribeLogDirsResponseTopic contains a topic's partitions descriptions
+type DescribeLogDirsResponseTopic struct {
+	Topic      string
+	Partitions []DescribeLogDirsResponsePartition
+}
+
+func (r *DescribeLogDirsResponseTopic) encode(pe packetEncoder, version int16) error {
+	if err := pe.putString(r.Topic); err != nil {
+		return err
+	}
+	if err := pe.putArrayLength(len(r.Partitions)); err != nil {
+		return err
+	}
+	for _, partition := range r.Partitions {
+		if err := partition.encode(pe, version); err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DescribeLogDirsResponseTopic) decode(pd packetDecoder, version int16) error {
+	t, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	r.Topic = t
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Partitions = make([]DescribeLogDirsResponsePartition, n)
+	for i := 0; i < n; i++ {
+		p := DescribeLogDirsResponsePartition{}
+		if err := p.decode(pd, version); err != nil {
+			return err
+		}
+		r.Partitions[i] = p
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+// DescribeLogDirsResponsePartition describes a partition's log directory
+type DescribeLogDirsResponsePartition struct {
+	PartitionID int32
+
+	// The size of the log segments of the partition in bytes.
+	Size int64
+
+	// The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or
+	// current replica's LEO (if it is the future log for the partition)
+	OffsetLag int64
+
+	// True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of
+	// the replica in the future.
+	IsTemporary bool
+}
+
+func (r *DescribeLogDirsResponsePartition) encode(pe packetEncoder, version int16) error {
+	isFlexible := version >= 2
+	pe.putInt32(r.PartitionID)
+	pe.putInt64(r.Size)
+	pe.putInt64(r.OffsetLag)
+	pe.putBool(r.IsTemporary)
+	if isFlexible {
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	return nil
+}
+
+func (r *DescribeLogDirsResponsePartition) decode(pd packetDecoder, version int16) error {
+	pID, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	r.PartitionID = pID
+
+	size, err := pd.getInt64()
+	if err != nil {
+		return err
+	}
+	r.Size = size
+
+	lag, err := pd.getInt64()
+	if err != nil {
+		return err
+	}
+	r.OffsetLag = lag
+
+	isTemp, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+	r.IsTemporary = isTemp
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
diff --git a/vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go
new file mode 100644
index 0000000..4de3dcf
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go
@@ -0,0 +1,86 @@
+package sarama
+
+// DescribeUserScramCredentialsRequest is a request to get list of SCRAM user names
+type DescribeUserScramCredentialsRequest struct {
+	// Version 0 is currently only supported
+	Version int16
+
+	// If this is an empty array, all users will be queried
+	DescribeUsers []DescribeUserScramCredentialsRequestUser
+}
+
+func (r *DescribeUserScramCredentialsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+// DescribeUserScramCredentialsRequestUser is a describe request about specific user name
+type DescribeUserScramCredentialsRequestUser struct {
+	Name string
+}
+
+func (r *DescribeUserScramCredentialsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(r.DescribeUsers)); err != nil {
+		return err
+	}
+	for _, d := range r.DescribeUsers {
+		if err := pe.putString(d.Name); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DescribeUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error {
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == -1 {
+		n = 0
+	}
+
+	r.DescribeUsers = make([]DescribeUserScramCredentialsRequestUser, n)
+	for i := 0; i < n; i++ {
+		r.DescribeUsers[i] = DescribeUserScramCredentialsRequestUser{}
+		if r.DescribeUsers[i].Name, err = pd.getString(); err != nil {
+			return err
+		}
+		if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *DescribeUserScramCredentialsRequest) key() int16 {
+	return apiKeyDescribeUserScramCredentials
+}
+
+func (r *DescribeUserScramCredentialsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeUserScramCredentialsRequest) headerVersion() int16 {
+	return 2
+}
+
+func (r *DescribeUserScramCredentialsRequest) isValidVersion() bool {
+	return r.Version == 0
+}
+
+func (r *DescribeUserScramCredentialsRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DescribeUserScramCredentialsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 0
+}
+
+func (r *DescribeUserScramCredentialsRequest) requiredVersion() KafkaVersion {
+	return V2_7_0_0
+}
diff --git a/vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go
new file mode 100644
index 0000000..6852e0c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go
@@ -0,0 +1,186 @@
+package sarama
+
+import "time"
+
+type ScramMechanismType int8
+
+const (
+	SCRAM_MECHANISM_UNKNOWN ScramMechanismType = iota // 0
+	SCRAM_MECHANISM_SHA_256                           // 1
+	SCRAM_MECHANISM_SHA_512                           // 2
+)
+
+func (s ScramMechanismType) String() string {
+	switch s {
+	case 1:
+		return SASLTypeSCRAMSHA256
+	case 2:
+		return SASLTypeSCRAMSHA512
+	default:
+		return "Unknown"
+	}
+}
+
+type DescribeUserScramCredentialsResponse struct {
+	// Version 0 is currently only supported
+	Version int16
+
+	ThrottleTime time.Duration
+
+	ErrorCode    KError
+	ErrorMessage *string
+
+	Results []*DescribeUserScramCredentialsResult
+}
+
+func (r *DescribeUserScramCredentialsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+type DescribeUserScramCredentialsResult struct {
+	User string
+
+	ErrorCode    KError
+	ErrorMessage *string
+
+	CredentialInfos []*UserScramCredentialsResponseInfo
+}
+
+type UserScramCredentialsResponseInfo struct {
+	Mechanism  ScramMechanismType
+	Iterations int32
+}
+
+func (r *DescribeUserScramCredentialsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(r.ThrottleTime)
+
+	pe.putKError(r.ErrorCode)
+	if err := pe.putNullableString(r.ErrorMessage); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.Results)); err != nil {
+		return err
+	}
+	for _, u := range r.Results {
+		if err := pe.putString(u.User); err != nil {
+			return err
+		}
+		pe.putInt16(int16(u.ErrorCode))
+		if err := pe.putNullableString(u.ErrorMessage); err != nil {
+			return err
+		}
+
+		if err := pe.putArrayLength(len(u.CredentialInfos)); err != nil {
+			return err
+		}
+		for _, c := range u.CredentialInfos {
+			pe.putInt8(int8(c.Mechanism))
+			pe.putInt32(c.Iterations)
+			pe.putEmptyTaggedFieldArray()
+		}
+
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *DescribeUserScramCredentialsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	r.ErrorCode, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if r.ErrorMessage, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	numUsers, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if numUsers > 0 {
+		r.Results = make([]*DescribeUserScramCredentialsResult, numUsers)
+		for i := 0; i < numUsers; i++ {
+			r.Results[i] = &DescribeUserScramCredentialsResult{}
+			if r.Results[i].User, err = pd.getString(); err != nil {
+				return err
+			}
+
+			r.Results[i].ErrorCode, err = pd.getKError()
+			if err != nil {
+				return err
+			}
+			if r.Results[i].ErrorMessage, err = pd.getNullableString(); err != nil {
+				return err
+			}
+
+			numCredentialInfos, err := pd.getArrayLength()
+			if err != nil {
+				return err
+			}
+
+			r.Results[i].CredentialInfos = make([]*UserScramCredentialsResponseInfo, numCredentialInfos)
+			for j := 0; j < numCredentialInfos; j++ {
+				r.Results[i].CredentialInfos[j] = &UserScramCredentialsResponseInfo{}
+				scramMechanism, err := pd.getInt8()
+				if err != nil {
+					return err
+				}
+				r.Results[i].CredentialInfos[j].Mechanism = ScramMechanismType(scramMechanism)
+				if r.Results[i].CredentialInfos[j].Iterations, err = pd.getInt32(); err != nil {
+					return err
+				}
+				if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+					return err
+				}
+			}
+
+			if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *DescribeUserScramCredentialsResponse) key() int16 {
+	return apiKeyDescribeUserScramCredentials
+}
+
+func (r *DescribeUserScramCredentialsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *DescribeUserScramCredentialsResponse) headerVersion() int16 {
+	return 2
+}
+
+func (r *DescribeUserScramCredentialsResponse) isValidVersion() bool {
+	return r.Version == 0
+}
+
+func (r *DescribeUserScramCredentialsResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DescribeUserScramCredentialsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 0
+}
+
+func (r *DescribeUserScramCredentialsResponse) requiredVersion() KafkaVersion {
+	return V2_7_0_0
+}
+
+func (r *DescribeUserScramCredentialsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/dev.yml b/vendor/github.com/IBM/sarama/dev.yml
new file mode 100644
index 0000000..e2acb38
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/dev.yml
@@ -0,0 +1,10 @@
+name: sarama
+
+up:
+  - go:
+      version: '1.17.6'
+
+commands:
+  test:
+    run: make test
+    desc: 'run unit tests'
diff --git a/vendor/github.com/IBM/sarama/docker-compose.yml b/vendor/github.com/IBM/sarama/docker-compose.yml
new file mode 100644
index 0000000..3d23396
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/docker-compose.yml
@@ -0,0 +1,175 @@
+x-zookeeper-base: &zookeeper-base
+  image: 'docker.io/library/zookeeper:3.7.2'
+  init: true
+  restart: always
+  profiles:
+    - zookeeper
+  environment: &zookeeper-base-env
+    ZOO_SERVERS: 'server.1=zookeeper-1:2888:3888 server.2=zookeeper-2:2888:3888 server.3=zookeeper-3:2888:3888'
+    ZOO_CFG_EXTRA: 'clientPort=2181 peerPort=2888 leaderPort=3888'
+    ZOO_INIT_LIMIT: '10'
+    ZOO_SYNC_LIMIT: '5'
+    ZOO_MAX_CLIENT_CNXNS: '0'
+    ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok'
+
+x-kafka-base: &kafka-base
+  image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.9.1}'
+  init: true
+  build:
+    context: .
+    dockerfile: Dockerfile.kafka
+    args:
+      KAFKA_VERSION: ${KAFKA_VERSION:-3.9.1}
+      SCALA_VERSION: ${SCALA_VERSION:-2.13}
+  depends_on:
+    - toxiproxy
+  restart: always
+  environment: &kafka-base-env
+    KAFKA_VERSION: ${KAFKA_VERSION:-3.9.1}
+    KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2'
+    KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR: '2'
+    KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2'
+    KAFKA_CFG_ZOOKEEPER_SESSION_TIMEOUT_MS: '6000'
+    KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '6000'
+    KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
+    KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true'
+    KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false'
+    KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
+    KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions"
+    KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
+    KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT,CONTROLLER:PLAINTEXT'
+    # ZooKeeper-specific
+    KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
+    # KRaft-specific
+    KAFKA_CFG_PROCESS_ROLES: 'broker,controller'
+    KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: '1@kafka-1:9093,2@kafka-2:9093,3@kafka-3:9093,4@kafka-4:9093,5@kafka-5:9093'
+    KAFKA_CFG_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
+    KAFKA_CFG_CLUSTER_ID: 'cDZEekk4T3hTNGVlNzB3LUtUbkxaQQo='
+
+services:
+  zookeeper-1:
+    <<: *zookeeper-base
+    container_name: 'zookeeper-1'
+    environment:
+      <<: *zookeeper-base-env
+      ZOO_MY_ID: '1'
+
+  zookeeper-2:
+    <<: *zookeeper-base
+    container_name: 'zookeeper-2'
+    environment:
+      <<: *zookeeper-base-env
+      ZOO_MY_ID: '2'
+
+  zookeeper-3:
+    <<: *zookeeper-base
+    container_name: 'zookeeper-3'
+    environment:
+      <<: *zookeeper-base-env
+      ZOO_MY_ID: '3'
+
+  kafka-1:
+    <<: *kafka-base
+    container_name: 'kafka-1'
+    healthcheck:
+      test: ['CMD', '/opt/kafka-${KAFKA_VERSION:-3.9.1}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-1:9091']
+      interval: 15s
+      timeout: 15s
+      retries: 10
+      start_period: 360s
+    environment:
+      <<: *kafka-base-env
+      KAFKA_CFG_BROKER_ID: '1'
+      KAFKA_CFG_NODE_ID: '1'
+      KAFKA_CFG_BROKER_RACK: '1'
+      KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091,CONTROLLER://:9093'
+      KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091'
+
+  kafka-2:
+    <<: *kafka-base
+    container_name: 'kafka-2'
+    healthcheck:
+      test: ['CMD', '/opt/kafka-${KAFKA_VERSION:-3.9.1}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-2:9091']
+      interval: 15s
+      timeout: 15s
+      retries: 10
+      start_period: 360s
+    environment:
+      <<: *kafka-base-env
+      KAFKA_CFG_BROKER_ID: '2'
+      KAFKA_CFG_NODE_ID: '2'
+      KAFKA_CFG_BROKER_RACK: '2'
+      KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092,CONTROLLER://:9093'
+      KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092'
+
+  kafka-3:
+    <<: *kafka-base
+    container_name: 'kafka-3'
+    healthcheck:
+      test: ['CMD', '/opt/kafka-${KAFKA_VERSION:-3.9.1}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-3:9091']
+      interval: 15s
+      timeout: 15s
+      retries: 10
+      start_period: 360s
+    environment:
+      <<: *kafka-base-env
+      KAFKA_CFG_BROKER_ID: '3'
+      KAFKA_CFG_NODE_ID: '3'
+      KAFKA_CFG_BROKER_RACK: '3'
+      KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093,CONTROLLER://:9093'
+      KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093'
+
+  kafka-4:
+    <<: *kafka-base
+    container_name: 'kafka-4'
+    healthcheck:
+      test: ['CMD', '/opt/kafka-${KAFKA_VERSION:-3.9.1}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-4:9091']
+      interval: 15s
+      timeout: 15s
+      retries: 10
+      start_period: 360s
+    environment:
+      <<: *kafka-base-env
+      KAFKA_CFG_BROKER_ID: '4'
+      KAFKA_CFG_NODE_ID: '4'
+      KAFKA_CFG_BROKER_RACK: '4'
+      KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094,CONTROLLER://:9093'
+      KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094'
+
+  kafka-5:
+    <<: *kafka-base
+    container_name: 'kafka-5'
+    healthcheck:
+      test: ['CMD', '/opt/kafka-${KAFKA_VERSION:-3.9.1}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-5:9091']
+      interval: 15s
+      timeout: 15s
+      retries: 10
+      start_period: 360s
+    environment:
+      <<: *kafka-base-env
+      KAFKA_CFG_BROKER_ID: '5'
+      KAFKA_CFG_NODE_ID: '5'
+      KAFKA_CFG_BROKER_RACK: '5'
+      KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095,CONTROLLER://:9093'
+      KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095'
+
+  toxiproxy:
+    container_name: 'toxiproxy'
+    image: 'ghcr.io/shopify/toxiproxy:2.12.0'
+    init: true
+    healthcheck:
+      test: ['CMD', '/toxiproxy-cli', 'l']
+      interval: 15s
+      timeout: 15s
+      retries: 3
+      start_period: 30s
+    ports:
+      # The tests themselves actually start the proxies on these ports
+      - '29091:29091'
+      - '29092:29092'
+      - '29093:29093'
+      - '29094:29094'
+      - '29095:29095'
+
+      # This is the toxiproxy API port
+      - '8474:8474'
diff --git a/vendor/github.com/IBM/sarama/elect_leaders_request.go b/vendor/github.com/IBM/sarama/elect_leaders_request.go
new file mode 100644
index 0000000..01b9dd5
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/elect_leaders_request.go
@@ -0,0 +1,125 @@
+package sarama
+
+type ElectLeadersRequest struct {
+	Version         int16
+	Type            ElectionType
+	TopicPartitions map[string][]int32
+	TimeoutMs       int32
+}
+
+func (r *ElectLeadersRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *ElectLeadersRequest) encode(pe packetEncoder) error {
+	if r.Version > 0 {
+		pe.putInt8(int8(r.Type))
+	}
+
+	if err := pe.putArrayLength(len(r.TopicPartitions)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.TopicPartitions {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+
+		if err := pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putInt32(r.TimeoutMs)
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *ElectLeadersRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.Version > 0 {
+		t, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.Type = ElectionType(t)
+	}
+
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount > 0 {
+		r.TopicPartitions = make(map[string][]int32)
+		for i := 0; i < topicCount; i++ {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			partitionCount, err := pd.getArrayLength()
+			if err != nil {
+				return err
+			}
+			partitions := make([]int32, partitionCount)
+			for j := 0; j < partitionCount; j++ {
+				partition, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+				partitions[j] = partition
+			}
+			r.TopicPartitions[topic] = partitions
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	r.TimeoutMs, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *ElectLeadersRequest) key() int16 {
+	return apiKeyElectLeaders
+}
+
+func (r *ElectLeadersRequest) version() int16 {
+	return r.Version
+}
+
+func (r *ElectLeadersRequest) headerVersion() int16 {
+	return 2
+}
+
+func (r *ElectLeadersRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *ElectLeadersRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ElectLeadersRequest) isFlexibleVersion(version int16) bool {
+	return version >= 2
+}
+
+func (r *ElectLeadersRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 2:
+		return V2_4_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_10_0_0
+	default:
+		return V2_4_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/elect_leaders_response.go b/vendor/github.com/IBM/sarama/elect_leaders_response.go
new file mode 100644
index 0000000..90a3fab
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/elect_leaders_response.go
@@ -0,0 +1,161 @@
+package sarama
+
+import "time"
+
+type PartitionResult struct {
+	ErrorCode    KError
+	ErrorMessage *string
+}
+
+func (b *PartitionResult) encode(pe packetEncoder, version int16) error {
+	pe.putKError(b.ErrorCode)
+	if err := pe.putNullableString(b.ErrorMessage); err != nil {
+		return err
+	}
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (b *PartitionResult) decode(pd packetDecoder, version int16) (err error) {
+	b.ErrorCode, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+	b.ErrorMessage, err = pd.getNullableString()
+	if err != nil {
+		return err
+	}
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+type ElectLeadersResponse struct {
+	Version                int16
+	ThrottleTimeMs         int32
+	ErrorCode              KError
+	ReplicaElectionResults map[string]map[int32]*PartitionResult
+}
+
+func (r *ElectLeadersResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *ElectLeadersResponse) encode(pe packetEncoder) error {
+	pe.putInt32(r.ThrottleTimeMs)
+
+	if r.Version > 0 {
+		pe.putKError(r.ErrorCode)
+	}
+
+	if err := pe.putArrayLength(len(r.ReplicaElectionResults)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.ReplicaElectionResults {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, result := range partitions {
+			pe.putInt32(partition)
+			if err := result.encode(pe, r.Version); err != nil {
+				return err
+			}
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *ElectLeadersResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if r.Version > 0 {
+		r.ErrorCode, err = pd.getKError()
+		if err != nil {
+			return err
+		}
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.ReplicaElectionResults = make(map[string]map[int32]*PartitionResult, numTopics)
+	for i := 0; i < numTopics; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numPartitions, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.ReplicaElectionResults[topic] = make(map[int32]*PartitionResult, numPartitions)
+		for j := 0; j < numPartitions; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			result := new(PartitionResult)
+			if err := result.decode(pd, r.Version); err != nil {
+				return err
+			}
+			r.ReplicaElectionResults[topic][partition] = result
+		}
+		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *ElectLeadersResponse) key() int16 {
+	return apiKeyElectLeaders
+}
+
+func (r *ElectLeadersResponse) version() int16 {
+	return r.Version
+}
+
+func (r *ElectLeadersResponse) headerVersion() int16 {
+	return 1
+}
+
+func (r *ElectLeadersResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *ElectLeadersResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ElectLeadersResponse) isFlexibleVersion(version int16) bool {
+	return version >= 2
+}
+
+func (r *ElectLeadersResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 2:
+		return V2_4_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_10_0_0
+	default:
+		return V2_4_0_0
+	}
+}
+
+func (r *ElectLeadersResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/election_type.go b/vendor/github.com/IBM/sarama/election_type.go
new file mode 100644
index 0000000..01f3b65
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/election_type.go
@@ -0,0 +1,10 @@
+package sarama
+
+type ElectionType int8
+
+const (
+	// PreferredElection constant type
+	PreferredElection ElectionType = 0
+	// UncleanElection constant type
+	UncleanElection ElectionType = 1
+)
diff --git a/vendor/github.com/IBM/sarama/encoder_decoder.go b/vendor/github.com/IBM/sarama/encoder_decoder.go
new file mode 100644
index 0000000..ef60224
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/encoder_decoder.go
@@ -0,0 +1,134 @@
+package sarama
+
+import (
+	"fmt"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// Encoder is the interface that wraps the basic Encode method.
+// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
+type encoder interface {
+	encode(pe packetEncoder) error
+}
+
+type encoderWithHeader interface {
+	encoder
+	headerVersion() int16
+}
+
+// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
+func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
+	if e == nil {
+		return nil, nil
+	}
+
+	var prepEnc prepEncoder
+	var realEnc realEncoder
+
+	err := e.encode(prepareFlexibleEncoder(&prepEnc, e))
+	if err != nil {
+		return nil, err
+	}
+
+	if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
+		return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
+	}
+
+	realEnc.raw = make([]byte, prepEnc.length)
+	realEnc.registry = metricRegistry
+	err = e.encode(prepareFlexibleEncoder(&realEnc, e))
+	if err != nil {
+		return nil, err
+	}
+
+	return realEnc.raw, nil
+}
+
+// decoder is the interface that wraps the basic Decode method.
+// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
+type decoder interface {
+	decode(pd packetDecoder) error
+}
+
+type versionedDecoder interface {
+	decode(pd packetDecoder, version int16) error
+}
+
+type flexibleVersion interface {
+	isFlexibleVersion(version int16) bool
+	isFlexible() bool
+}
+
+// decode takes bytes and a decoder and fills the fields of the decoder from the bytes,
+// interpreted using Kafka's encoding rules.
+func decode(buf []byte, in decoder, metricRegistry metrics.Registry) error {
+	if buf == nil {
+		return nil
+	}
+	helper := realDecoder{
+		raw:      buf,
+		registry: metricRegistry,
+	}
+	err := in.decode(&helper)
+	if err != nil {
+		return err
+	}
+
+	if helper.off != len(buf) {
+		return PacketDecodingError{fmt.Sprintf("invalid length: buf=%d decoded=%d %#v", len(buf), helper.off, in)}
+	}
+
+	return nil
+}
+
+func versionedDecode(buf []byte, in versionedDecoder, version int16, metricRegistry metrics.Registry) error {
+	if buf == nil {
+		return nil
+	}
+
+	helper := prepareFlexibleDecoder(&realDecoder{
+		raw:      buf,
+		registry: metricRegistry,
+	}, in, version)
+	err := in.decode(helper, version)
+	if err != nil {
+		return err
+	}
+
+	if remaining := helper.remaining(); remaining != 0 {
+		return PacketDecodingError{
+			Info: fmt.Sprintf("invalid length len=%d remaining=%d", len(buf), remaining),
+		}
+	}
+
+	return nil
+}
+
+func prepareFlexibleDecoder(pd *realDecoder, in versionedDecoder, version int16) packetDecoder {
+	if flexibleDecoder, ok := in.(flexibleVersion); ok && flexibleDecoder.isFlexibleVersion(version) {
+		return &realFlexibleDecoder{pd}
+	}
+	return pd
+}
+
+func prepareFlexibleEncoder(pe packetEncoder, req encoder) packetEncoder {
+	if flexibleEncoder, ok := req.(flexibleVersion); ok && flexibleEncoder.isFlexible() {
+		switch e := pe.(type) {
+		case *prepEncoder:
+			return &prepFlexibleEncoder{e}
+		case *realEncoder:
+			return &realFlexibleEncoder{e}
+		default:
+			return pe
+		}
+	}
+	return pe
+}
+
+func downgradeFlexibleDecoder(pd packetDecoder) packetDecoder {
+	if f, ok := pd.(*realFlexibleDecoder); ok {
+		return f.realDecoder
+	}
+	return pd
+}
diff --git a/vendor/github.com/IBM/sarama/end_txn_request.go b/vendor/github.com/IBM/sarama/end_txn_request.go
new file mode 100644
index 0000000..1f0bf8c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/end_txn_request.go
@@ -0,0 +1,70 @@
+package sarama
+
+type EndTxnRequest struct {
+	Version           int16
+	TransactionalID   string
+	ProducerID        int64
+	ProducerEpoch     int16
+	TransactionResult bool
+}
+
+func (a *EndTxnRequest) setVersion(v int16) {
+	a.Version = v
+}
+
+func (a *EndTxnRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(a.TransactionalID); err != nil {
+		return err
+	}
+
+	pe.putInt64(a.ProducerID)
+
+	pe.putInt16(a.ProducerEpoch)
+
+	pe.putBool(a.TransactionResult)
+
+	return nil
+}
+
+func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+	if a.TransactionalID, err = pd.getString(); err != nil {
+		return err
+	}
+	if a.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+	if a.TransactionResult, err = pd.getBool(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (a *EndTxnRequest) key() int16 {
+	return apiKeyEndTxn
+}
+
+func (a *EndTxnRequest) version() int16 {
+	return a.Version
+}
+
+func (r *EndTxnRequest) headerVersion() int16 {
+	return 1
+}
+
+func (a *EndTxnRequest) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *EndTxnRequest) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 2:
+		return V2_7_0_0
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/end_txn_response.go b/vendor/github.com/IBM/sarama/end_txn_response.go
new file mode 100644
index 0000000..259f18b
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/end_txn_response.go
@@ -0,0 +1,65 @@
+package sarama
+
+import (
+	"time"
+)
+
+type EndTxnResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Err          KError
+}
+
+func (e *EndTxnResponse) setVersion(v int16) {
+	e.Version = v
+}
+
+func (e *EndTxnResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(e.ThrottleTime)
+	pe.putKError(e.Err)
+	return nil
+}
+
+func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+	if e.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	e.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (e *EndTxnResponse) key() int16 {
+	return apiKeyEndTxn
+}
+
+func (e *EndTxnResponse) version() int16 {
+	return e.Version
+}
+
+func (r *EndTxnResponse) headerVersion() int16 {
+	return 0
+}
+
+func (e *EndTxnResponse) isValidVersion() bool {
+	return e.Version >= 0 && e.Version <= 2
+}
+
+func (e *EndTxnResponse) requiredVersion() KafkaVersion {
+	switch e.Version {
+	case 2:
+		return V2_7_0_0
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+func (r *EndTxnResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/entrypoint.sh b/vendor/github.com/IBM/sarama/entrypoint.sh
new file mode 100644
index 0000000..9815d90
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/entrypoint.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+set -eu
+set -o pipefail
+
+KAFKA_VERSION="${KAFKA_VERSION:-3.9.1}"
+KAFKA_HOME="/opt/kafka-${KAFKA_VERSION}"
+
+if [ ! -d "${KAFKA_HOME}" ]; then
+    echo 'Error: KAFKA_VERSION '$KAFKA_VERSION' not available in this image at '$KAFKA_HOME
+    exit 1
+fi
+
+cd "${KAFKA_HOME}" || exit 1
+
+# discard all empty/commented lines from default config and copy to /tmp
+sed -e '/^#/d' -e '/^$/d' config/server.properties >/tmp/server.properties
+
+echo "########################################################################" >>/tmp/server.properties
+
+# emulate kafka_configure_from_environment_variables from bitnami/bitnami-docker-kafka
+for var in "${!KAFKA_CFG_@}"; do
+    key="$(echo "$var" | sed -e 's/^KAFKA_CFG_//g' -e 's/_/\./g' -e 's/.*/\L&/')"
+    sed -e '/^'$key'/d' -i"" /tmp/server.properties
+    value="${!var}"
+    echo "$key=$value" >>/tmp/server.properties
+done
+
+# use KRaft if KAFKA_VERSION is 4.0.0 or newer
+if printf "%s\n4.0.0" "$KAFKA_VERSION" | sort -V | head -n1 | grep -q "^4.0.0$"; then
+  # remove zookeeper-only options from server.properties and setup storage
+  sed -e '/zookeeper.connect/d' \
+      -i /tmp/server.properties
+  bin/kafka-storage.sh format -t "$KAFKA_CFG_CLUSTER_ID" -c /tmp/server.properties --ignore-formatted
+else
+  # remove KRaft-only options from server.properties
+  sed -e '/process.roles/d' \
+      -e '/controller.quorum.voters/d' \
+      -e '/controller.listener.names/d' \
+      -e '/cluster.id/d' \
+      -i /tmp/server.properties
+fi
+
+sort /tmp/server.properties
+
+exec bin/kafka-server-start.sh /tmp/server.properties
diff --git a/vendor/github.com/IBM/sarama/errors.go b/vendor/github.com/IBM/sarama/errors.go
new file mode 100644
index 0000000..55af653
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/errors.go
@@ -0,0 +1,450 @@
+package sarama
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+)
+
+// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
+// or otherwise failed to respond.
+var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to")
+
+// ErrBrokerNotFound is the error returned when there's no broker found for the requested ID.
+var ErrBrokerNotFound = errors.New("kafka: broker for ID is not found")
+
+// ErrClosedClient is the error returned when a method is called on a client that has been closed.
+var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
+
+// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
+// not contain the expected information.
+var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
+
+// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
+// (meaning one outside of the range [0...numPartitions-1]).
+var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
+
+// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
+var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
+
+// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
+var ErrNotConnected = errors.New("kafka: broker not connected")
+
+// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
+// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
+// of the message set.
+var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
+
+// ErrShuttingDown is returned when a producer receives a message during shutdown.
+var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
+
+// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
+var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
+
+// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing
+// a RecordBatch.
+var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch")
+
+// ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version
+// is lower than 0.10.0.0.
+var ErrControllerNotAvailable = errors.New("kafka: controller is not available")
+
+// ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update
+// the metadata.
+var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata")
+
+// ErrUnknownScramMechanism is returned when user tries to AlterUserScramCredentials with unknown SCRAM mechanism
+var ErrUnknownScramMechanism = errors.New("kafka: unknown SCRAM mechanism provided")
+
+// ErrReassignPartitions is returned when altering partition assignments for a topic fails
+var ErrReassignPartitions = errors.New("failed to reassign partitions for topic")
+
+// ErrDeleteRecords is the type of error returned when fail to delete the required records
+var ErrDeleteRecords = errors.New("kafka server: failed to delete records")
+
+// ErrCreateACLs is the type of error returned when ACL creation failed
+var ErrCreateACLs = errors.New("kafka server: failed to create one or more ACL rules")
+
+// ErrAddPartitionsToTxn is returned when AddPartitionsToTxn failed multiple times
+var ErrAddPartitionsToTxn = errors.New("transaction manager: failed to send partitions to transaction")
+
+// ErrTxnOffsetCommit is returned when TxnOffsetCommit failed multiple times
+var ErrTxnOffsetCommit = errors.New("transaction manager: failed to send offsets to transaction")
+
+// ErrTransactionNotReady when transaction status is invalid for the current action.
+var ErrTransactionNotReady = errors.New("transaction manager: transaction is not ready")
+
+// ErrNonTransactedProducer when calling BeginTxn, CommitTxn or AbortTxn on a non transactional producer.
+var ErrNonTransactedProducer = errors.New("transaction manager: you need to add TransactionalID to producer")
+
+// ErrTransitionNotAllowed when txnmgr state transition is not valid.
+var ErrTransitionNotAllowed = errors.New("transaction manager: invalid transition attempted")
+
+// ErrCannotTransitionNilError when transition is attempted with an nil error.
+var ErrCannotTransitionNilError = errors.New("transaction manager: cannot transition with a nil error")
+
+// ErrTxnUnableToParseResponse when response is nil
+var ErrTxnUnableToParseResponse = errors.New("transaction manager: unable to parse response")
+
+// ErrUnknownMessage when the protocol message key is not recognized
+var ErrUnknownMessage = errors.New("kafka: unknown protocol message key")
+
+// MultiErrorFormat specifies the formatter applied to format multierrors.
+//
+// Deprecated: Please use [errors.Join] instead.
+func MultiErrorFormat(es []error) string {
+	if len(es) == 1 {
+		return es[0].Error()
+	}
+
+	points := make([]string, len(es))
+	for i, err := range es {
+		points[i] = fmt.Sprintf("* %s", err)
+	}
+
+	return fmt.Sprintf(
+		"%d errors occurred:\n\t%s\n",
+		len(es), strings.Join(points, "\n\t"))
+}
+
+type sentinelError struct {
+	sentinel error
+	wrapped  error
+}
+
+func (err sentinelError) Error() string {
+	if err.wrapped != nil {
+		return fmt.Sprintf("%s: %v", err.sentinel, err.wrapped)
+	} else {
+		return fmt.Sprintf("%s", err.sentinel)
+	}
+}
+
+func (err sentinelError) Is(target error) bool {
+	return errors.Is(err.sentinel, target) || errors.Is(err.wrapped, target)
+}
+
+func (err sentinelError) Unwrap() error {
+	return err.wrapped
+}
+
+func Wrap(sentinel error, wrapped ...error) sentinelError {
+	return sentinelError{sentinel: sentinel, wrapped: errors.Join(wrapped...)}
+}
+
+// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
+// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
+type PacketEncodingError struct {
+	Info string
+}
+
+func (err PacketEncodingError) Error() string {
+	return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
+}
+
+// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
+// This can be a bad CRC or length field, or any other invalid value.
+type PacketDecodingError struct {
+	Info string
+}
+
+func (err PacketDecodingError) Error() string {
+	return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
+}
+
+// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
+// when the specified configuration is invalid.
+type ConfigurationError string
+
+func (err ConfigurationError) Error() string {
+	return "kafka: invalid configuration (" + string(err) + ")"
+}
+
+// KError is the type of error that can be returned directly by the Kafka broker.
+// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
+type KError int16
+
+// Numeric error codes returned by the Kafka server.
+const (
+	ErrUnknown                            KError = -1 // Errors.UNKNOWN_SERVER_ERROR
+	ErrNoError                            KError = 0  // Errors.NONE
+	ErrOffsetOutOfRange                   KError = 1  // Errors.OFFSET_OUT_OF_RANGE
+	ErrInvalidMessage                     KError = 2  // Errors.CORRUPT_MESSAGE
+	ErrUnknownTopicOrPartition            KError = 3  // Errors.UNKNOWN_TOPIC_OR_PARTITION
+	ErrInvalidMessageSize                 KError = 4  // Errors.INVALID_FETCH_SIZE
+	ErrLeaderNotAvailable                 KError = 5  // Errors.LEADER_NOT_AVAILABLE
+	ErrNotLeaderForPartition              KError = 6  // Errors.NOT_LEADER_OR_FOLLOWER
+	ErrRequestTimedOut                    KError = 7  // Errors.REQUEST_TIMED_OUT
+	ErrBrokerNotAvailable                 KError = 8  // Errors.BROKER_NOT_AVAILABLE
+	ErrReplicaNotAvailable                KError = 9  // Errors.REPLICA_NOT_AVAILABLE
+	ErrMessageSizeTooLarge                KError = 10 // Errors.MESSAGE_TOO_LARGE
+	ErrStaleControllerEpochCode           KError = 11 // Errors.STALE_CONTROLLER_EPOCH
+	ErrOffsetMetadataTooLarge             KError = 12 // Errors.OFFSET_METADATA_TOO_LARGE
+	ErrNetworkException                   KError = 13 // Errors.NETWORK_EXCEPTION
+	ErrOffsetsLoadInProgress              KError = 14 // Errors.COORDINATOR_LOAD_IN_PROGRESS
+	ErrConsumerCoordinatorNotAvailable    KError = 15 // Errors.COORDINATOR_NOT_AVAILABLE
+	ErrNotCoordinatorForConsumer          KError = 16 // Errors.NOT_COORDINATOR
+	ErrInvalidTopic                       KError = 17 // Errors.INVALID_TOPIC_EXCEPTION
+	ErrMessageSetSizeTooLarge             KError = 18 // Errors.RECORD_LIST_TOO_LARGE
+	ErrNotEnoughReplicas                  KError = 19 // Errors.NOT_ENOUGH_REPLICAS
+	ErrNotEnoughReplicasAfterAppend       KError = 20 // Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND
+	ErrInvalidRequiredAcks                KError = 21 // Errors.INVALID_REQUIRED_ACKS
+	ErrIllegalGeneration                  KError = 22 // Errors.ILLEGAL_GENERATION
+	ErrInconsistentGroupProtocol          KError = 23 // Errors.INCONSISTENT_GROUP_PROTOCOL
+	ErrInvalidGroupId                     KError = 24 // Errors.INVALID_GROUP_ID
+	ErrUnknownMemberId                    KError = 25 // Errors.UNKNOWN_MEMBER_ID
+	ErrInvalidSessionTimeout              KError = 26 // Errors.INVALID_SESSION_TIMEOUT
+	ErrRebalanceInProgress                KError = 27 // Errors.REBALANCE_IN_PROGRESS
+	ErrInvalidCommitOffsetSize            KError = 28 // Errors.INVALID_COMMIT_OFFSET_SIZE
+	ErrTopicAuthorizationFailed           KError = 29 // Errors.TOPIC_AUTHORIZATION_FAILED
+	ErrGroupAuthorizationFailed           KError = 30 // Errors.GROUP_AUTHORIZATION_FAILED
+	ErrClusterAuthorizationFailed         KError = 31 // Errors.CLUSTER_AUTHORIZATION_FAILED
+	ErrInvalidTimestamp                   KError = 32 // Errors.INVALID_TIMESTAMP
+	ErrUnsupportedSASLMechanism           KError = 33 // Errors.UNSUPPORTED_SASL_MECHANISM
+	ErrIllegalSASLState                   KError = 34 // Errors.ILLEGAL_SASL_STATE
+	ErrUnsupportedVersion                 KError = 35 // Errors.UNSUPPORTED_VERSION
+	ErrTopicAlreadyExists                 KError = 36 // Errors.TOPIC_ALREADY_EXISTS
+	ErrInvalidPartitions                  KError = 37 // Errors.INVALID_PARTITIONS
+	ErrInvalidReplicationFactor           KError = 38 // Errors.INVALID_REPLICATION_FACTOR
+	ErrInvalidReplicaAssignment           KError = 39 // Errors.INVALID_REPLICA_ASSIGNMENT
+	ErrInvalidConfig                      KError = 40 // Errors.INVALID_CONFIG
+	ErrNotController                      KError = 41 // Errors.NOT_CONTROLLER
+	ErrInvalidRequest                     KError = 42 // Errors.INVALID_REQUEST
+	ErrUnsupportedForMessageFormat        KError = 43 // Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT
+	ErrPolicyViolation                    KError = 44 // Errors.POLICY_VIOLATION
+	ErrOutOfOrderSequenceNumber           KError = 45 // Errors.OUT_OF_ORDER_SEQUENCE_NUMBER
+	ErrDuplicateSequenceNumber            KError = 46 // Errors.DUPLICATE_SEQUENCE_NUMBER
+	ErrInvalidProducerEpoch               KError = 47 // Errors.INVALID_PRODUCER_EPOCH
+	ErrInvalidTxnState                    KError = 48 // Errors.INVALID_TXN_STATE
+	ErrInvalidProducerIDMapping           KError = 49 // Errors.INVALID_PRODUCER_ID_MAPPING
+	ErrInvalidTransactionTimeout          KError = 50 // Errors.INVALID_TRANSACTION_TIMEOUT
+	ErrConcurrentTransactions             KError = 51 // Errors.CONCURRENT_TRANSACTIONS
+	ErrTransactionCoordinatorFenced       KError = 52 // Errors.TRANSACTION_COORDINATOR_FENCED
+	ErrTransactionalIDAuthorizationFailed KError = 53 // Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED
+	ErrSecurityDisabled                   KError = 54 // Errors.SECURITY_DISABLED
+	ErrOperationNotAttempted              KError = 55 // Errors.OPERATION_NOT_ATTEMPTED
+	ErrKafkaStorageError                  KError = 56 // Errors.KAFKA_STORAGE_ERROR
+	ErrLogDirNotFound                     KError = 57 // Errors.LOG_DIR_NOT_FOUND
+	ErrSASLAuthenticationFailed           KError = 58 // Errors.SASL_AUTHENTICATION_FAILED
+	ErrUnknownProducerID                  KError = 59 // Errors.UNKNOWN_PRODUCER_ID
+	ErrReassignmentInProgress             KError = 60 // Errors.REASSIGNMENT_IN_PROGRESS
+	ErrDelegationTokenAuthDisabled        KError = 61 // Errors.DELEGATION_TOKEN_AUTH_DISABLED
+	ErrDelegationTokenNotFound            KError = 62 // Errors.DELEGATION_TOKEN_NOT_FOUND
+	ErrDelegationTokenOwnerMismatch       KError = 63 // Errors.DELEGATION_TOKEN_OWNER_MISMATCH
+	ErrDelegationTokenRequestNotAllowed   KError = 64 // Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED
+	ErrDelegationTokenAuthorizationFailed KError = 65 // Errors.DELEGATION_TOKEN_AUTHORIZATION_FAILED
+	ErrDelegationTokenExpired             KError = 66 // Errors.DELEGATION_TOKEN_EXPIRED
+	ErrInvalidPrincipalType               KError = 67 // Errors.INVALID_PRINCIPAL_TYPE
+	ErrNonEmptyGroup                      KError = 68 // Errors.NON_EMPTY_GROUP
+	ErrGroupIDNotFound                    KError = 69 // Errors.GROUP_ID_NOT_FOUND
+	ErrFetchSessionIDNotFound             KError = 70 // Errors.FETCH_SESSION_ID_NOT_FOUND
+	ErrInvalidFetchSessionEpoch           KError = 71 // Errors.INVALID_FETCH_SESSION_EPOCH
+	ErrListenerNotFound                   KError = 72 // Errors.LISTENER_NOT_FOUND
+	ErrTopicDeletionDisabled              KError = 73 // Errors.TOPIC_DELETION_DISABLED
+	ErrFencedLeaderEpoch                  KError = 74 // Errors.FENCED_LEADER_EPOCH
+	ErrUnknownLeaderEpoch                 KError = 75 // Errors.UNKNOWN_LEADER_EPOCH
+	ErrUnsupportedCompressionType         KError = 76 // Errors.UNSUPPORTED_COMPRESSION_TYPE
+	ErrStaleBrokerEpoch                   KError = 77 // Errors.STALE_BROKER_EPOCH
+	ErrOffsetNotAvailable                 KError = 78 // Errors.OFFSET_NOT_AVAILABLE
+	ErrMemberIdRequired                   KError = 79 // Errors.MEMBER_ID_REQUIRED
+	ErrPreferredLeaderNotAvailable        KError = 80 // Errors.PREFERRED_LEADER_NOT_AVAILABLE
+	ErrGroupMaxSizeReached                KError = 81 // Errors.GROUP_MAX_SIZE_REACHED
+	ErrFencedInstancedId                  KError = 82 // Errors.FENCED_INSTANCE_ID
+	ErrEligibleLeadersNotAvailable        KError = 83 // Errors.ELIGIBLE_LEADERS_NOT_AVAILABLE
+	ErrElectionNotNeeded                  KError = 84 // Errors.ELECTION_NOT_NEEDED
+	ErrNoReassignmentInProgress           KError = 85 // Errors.NO_REASSIGNMENT_IN_PROGRESS
+	ErrGroupSubscribedToTopic             KError = 86 // Errors.GROUP_SUBSCRIBED_TO_TOPIC
+	ErrInvalidRecord                      KError = 87 // Errors.INVALID_RECORD
+	ErrUnstableOffsetCommit               KError = 88 // Errors.UNSTABLE_OFFSET_COMMIT
+	ErrThrottlingQuotaExceeded            KError = 89 // Errors.THROTTLING_QUOTA_EXCEEDED
+	ErrProducerFenced                     KError = 90 // Errors.PRODUCER_FENCED
+)
+
+func (err KError) Error() string {
+	// Error messages stolen/adapted from
+	// https://kafka.apache.org/protocol#protocol_error_codes
+	switch err {
+	case ErrNoError:
+		return "kafka server: Not an error, why are you printing me?"
+	case ErrUnknown:
+		return "kafka server: Unexpected (unknown?) server error"
+	case ErrOffsetOutOfRange:
+		return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition"
+	case ErrInvalidMessage:
+		return "kafka server: Message contents does not match its CRC"
+	case ErrUnknownTopicOrPartition:
+		return "kafka server: Request was for a topic or partition that does not exist on this broker"
+	case ErrInvalidMessageSize:
+		return "kafka server: The message has a negative size"
+	case ErrLeaderNotAvailable:
+		return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes"
+	case ErrNotLeaderForPartition:
+		return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date"
+	case ErrRequestTimedOut:
+		return "kafka server: Request exceeded the user-specified time limit in the request"
+	case ErrBrokerNotAvailable:
+		return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
+	case ErrReplicaNotAvailable:
+		return "kafka server: Replica information not available, one or more brokers are down"
+	case ErrMessageSizeTooLarge:
+		return "kafka server: Message was too large, server rejected it to avoid allocation error"
+	case ErrStaleControllerEpochCode:
+		return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)"
+	case ErrOffsetMetadataTooLarge:
+		return "kafka server: Specified a string larger than the configured maximum for offset metadata"
+	case ErrNetworkException:
+		return "kafka server: The server disconnected before a response was received"
+	case ErrOffsetsLoadInProgress:
+		return "kafka server: The coordinator is still loading offsets and cannot currently process requests"
+	case ErrConsumerCoordinatorNotAvailable:
+		return "kafka server: The coordinator is not available"
+	case ErrNotCoordinatorForConsumer:
+		return "kafka server: Request was for a consumer group that is not coordinated by this broker"
+	case ErrInvalidTopic:
+		return "kafka server: The request attempted to perform an operation on an invalid topic"
+	case ErrMessageSetSizeTooLarge:
+		return "kafka server: The request included message batch larger than the configured segment size on the server"
+	case ErrNotEnoughReplicas:
+		return "kafka server: Messages are rejected since there are fewer in-sync replicas than required"
+	case ErrNotEnoughReplicasAfterAppend:
+		return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required"
+	case ErrInvalidRequiredAcks:
+		return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)"
+	case ErrIllegalGeneration:
+		return "kafka server: The provided generation id is not the current generation"
+	case ErrInconsistentGroupProtocol:
+		return "kafka server: The provider group protocol type is incompatible with the other members"
+	case ErrInvalidGroupId:
+		return "kafka server: The provided group id was empty"
+	case ErrUnknownMemberId:
+		return "kafka server: The provided member is not known in the current generation"
+	case ErrInvalidSessionTimeout:
+		return "kafka server: The provided session timeout is outside the allowed range"
+	case ErrRebalanceInProgress:
+		return "kafka server: A rebalance for the group is in progress. Please re-join the group"
+	case ErrInvalidCommitOffsetSize:
+		return "kafka server: The provided commit metadata was too large"
+	case ErrTopicAuthorizationFailed:
+		return "kafka server: The client is not authorized to access this topic"
+	case ErrGroupAuthorizationFailed:
+		return "kafka server: The client is not authorized to access this group"
+	case ErrClusterAuthorizationFailed:
+		return "kafka server: The client is not authorized to send this request type"
+	case ErrInvalidTimestamp:
+		return "kafka server: The timestamp of the message is out of acceptable range"
+	case ErrUnsupportedSASLMechanism:
+		return "kafka server: The broker does not support the requested SASL mechanism"
+	case ErrIllegalSASLState:
+		return "kafka server: Request is not valid given the current SASL state"
+	case ErrUnsupportedVersion:
+		return "kafka server: The version of API is not supported"
+	case ErrTopicAlreadyExists:
+		return "kafka server: Topic with this name already exists"
+	case ErrInvalidPartitions:
+		return "kafka server: Number of partitions is invalid"
+	case ErrInvalidReplicationFactor:
+		return "kafka server: Replication-factor is invalid"
+	case ErrInvalidReplicaAssignment:
+		return "kafka server: Replica assignment is invalid"
+	case ErrInvalidConfig:
+		return "kafka server: Configuration is invalid"
+	case ErrNotController:
+		return "kafka server: This is not the correct controller for this cluster"
+	case ErrInvalidRequest:
+		return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details"
+	case ErrUnsupportedForMessageFormat:
+		return "kafka server: The requested operation is not supported by the message format version"
+	case ErrPolicyViolation:
+		return "kafka server: Request parameters do not satisfy the configured policy"
+	case ErrOutOfOrderSequenceNumber:
+		return "kafka server: The broker received an out of order sequence number"
+	case ErrDuplicateSequenceNumber:
+		return "kafka server: The broker received a duplicate sequence number"
+	case ErrInvalidProducerEpoch:
+		return "kafka server: Producer attempted an operation with an old epoch"
+	case ErrInvalidTxnState:
+		return "kafka server: The producer attempted a transactional operation in an invalid state"
+	case ErrInvalidProducerIDMapping:
+		return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id"
+	case ErrInvalidTransactionTimeout:
+		return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)"
+	case ErrConcurrentTransactions:
+		return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing"
+	case ErrTransactionCoordinatorFenced:
+		return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer"
+	case ErrTransactionalIDAuthorizationFailed:
+		return "kafka server: Transactional ID authorization failed"
+	case ErrSecurityDisabled:
+		return "kafka server: Security features are disabled"
+	case ErrOperationNotAttempted:
+		return "kafka server: The broker did not attempt to execute this operation"
+	case ErrKafkaStorageError:
+		return "kafka server: Disk error when trying to access log file on the disk"
+	case ErrLogDirNotFound:
+		return "kafka server: The specified log directory is not found in the broker config"
+	case ErrSASLAuthenticationFailed:
+		return "kafka server: SASL Authentication failed"
+	case ErrUnknownProducerID:
+		return "kafka server: The broker could not locate the producer metadata associated with the Producer ID"
+	case ErrReassignmentInProgress:
+		return "kafka server: A partition reassignment is in progress"
+	case ErrDelegationTokenAuthDisabled:
+		return "kafka server: Delegation Token feature is not enabled"
+	case ErrDelegationTokenNotFound:
+		return "kafka server: Delegation Token is not found on server"
+	case ErrDelegationTokenOwnerMismatch:
+		return "kafka server: Specified Principal is not valid Owner/Renewer"
+	case ErrDelegationTokenRequestNotAllowed:
+		return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels"
+	case ErrDelegationTokenAuthorizationFailed:
+		return "kafka server: Delegation Token authorization failed"
+	case ErrDelegationTokenExpired:
+		return "kafka server: Delegation Token is expired"
+	case ErrInvalidPrincipalType:
+		return "kafka server: Supplied principalType is not supported"
+	case ErrNonEmptyGroup:
+		return "kafka server: The group is not empty"
+	case ErrGroupIDNotFound:
+		return "kafka server: The group id does not exist"
+	case ErrFetchSessionIDNotFound:
+		return "kafka server: The fetch session ID was not found"
+	case ErrInvalidFetchSessionEpoch:
+		return "kafka server: The fetch session epoch is invalid"
+	case ErrListenerNotFound:
+		return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed"
+	case ErrTopicDeletionDisabled:
+		return "kafka server: Topic deletion is disabled"
+	case ErrFencedLeaderEpoch:
+		return "kafka server: The leader epoch in the request is older than the epoch on the broker"
+	case ErrUnknownLeaderEpoch:
+		return "kafka server: The leader epoch in the request is newer than the epoch on the broker"
+	case ErrUnsupportedCompressionType:
+		return "kafka server: The requesting client does not support the compression type of given partition"
+	case ErrStaleBrokerEpoch:
+		return "kafka server: Broker epoch has changed"
+	case ErrOffsetNotAvailable:
+		return "kafka server: The leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing"
+	case ErrMemberIdRequired:
+		return "kafka server: The group member needs to have a valid member id before actually entering a consumer group"
+	case ErrPreferredLeaderNotAvailable:
+		return "kafka server: The preferred leader was not available"
+	case ErrGroupMaxSizeReached:
+		return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members"
+	case ErrFencedInstancedId:
+		return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id"
+	case ErrEligibleLeadersNotAvailable:
+		return "kafka server: Eligible topic partition leaders are not available"
+	case ErrElectionNotNeeded:
+		return "kafka server: Leader election not needed for topic partition"
+	case ErrNoReassignmentInProgress:
+		return "kafka server: No partition reassignment is in progress"
+	case ErrGroupSubscribedToTopic:
+		return "kafka server: Deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it"
+	case ErrInvalidRecord:
+		return "kafka server: This record has failed the validation on broker and hence will be rejected"
+	case ErrUnstableOffsetCommit:
+		return "kafka server: There are unstable offsets that need to be cleared"
+	}
+
+	return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
+}
diff --git a/vendor/github.com/IBM/sarama/fetch_request.go b/vendor/github.com/IBM/sarama/fetch_request.go
new file mode 100644
index 0000000..6d072dd
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/fetch_request.go
@@ -0,0 +1,340 @@
+package sarama
+
+import "fmt"
+
+type fetchRequestBlock struct {
+	Version int16
+	// currentLeaderEpoch contains the current leader epoch of the partition.
+	currentLeaderEpoch int32
+	// fetchOffset contains the message offset.
+	fetchOffset int64
+	// logStartOffset contains the earliest available offset of the follower
+	// replica.  The field is only used when the request is sent by the
+	// follower.
+	logStartOffset int64
+	// maxBytes contains the maximum bytes to fetch from this partition.  See
+	// KIP-74 for cases where this limit may not be honored.
+	maxBytes int32
+}
+
+func (b *fetchRequestBlock) encode(pe packetEncoder, version int16) error {
+	b.Version = version
+	if b.Version >= 9 {
+		pe.putInt32(b.currentLeaderEpoch)
+	}
+	pe.putInt64(b.fetchOffset)
+	if b.Version >= 5 {
+		pe.putInt64(b.logStartOffset)
+	}
+	pe.putInt32(b.maxBytes)
+	return nil
+}
+
+func (b *fetchRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+	b.Version = version
+	if b.Version >= 9 {
+		if b.currentLeaderEpoch, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+	if b.fetchOffset, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if b.Version >= 5 {
+		if b.logStartOffset, err = pd.getInt64(); err != nil {
+			return err
+		}
+	}
+	if b.maxBytes, err = pd.getInt32(); err != nil {
+		return err
+	}
+	return nil
+}
+
+// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
+// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that.  The KIP is at
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
+type FetchRequest struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// ReplicaID contains the broker ID of the follower, of -1 if this request
+	// is from a consumer.
+	// ReplicaID int32
+	// MaxWaitTime contains the maximum time in milliseconds to wait for the response.
+	MaxWaitTime int32
+	// MinBytes contains the minimum bytes to accumulate in the response.
+	MinBytes int32
+	// MaxBytes contains the maximum bytes to fetch.  See KIP-74 for cases
+	// where this limit may not be honored.
+	MaxBytes int32
+	// Isolation contains a This setting controls the visibility of
+	// transactional records. Using READ_UNCOMMITTED (isolation_level = 0)
+	// makes all records visible. With READ_COMMITTED (isolation_level = 1),
+	// non-transactional and COMMITTED transactional records are visible. To be
+	// more concrete, READ_COMMITTED returns all data from offsets smaller than
+	// the current LSO (last stable offset), and enables the inclusion of the
+	// list of aborted transactions in the result, which allows consumers to
+	// discard ABORTED transactional records
+	Isolation IsolationLevel
+	// SessionID contains the fetch session ID.
+	SessionID int32
+	// SessionEpoch contains the epoch of the partition leader as known to the
+	// follower replica or a consumer.
+	SessionEpoch int32
+	// blocks contains the topics to fetch.
+	blocks map[string]map[int32]*fetchRequestBlock
+	// forgotten contains in an incremental fetch request, the partitions to remove.
+	forgotten map[string][]int32
+	// RackID contains a Rack ID of the consumer making this request
+	RackID string
+}
+
+func (r *FetchRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+type IsolationLevel int8
+
+const (
+	ReadUncommitted IsolationLevel = iota
+	ReadCommitted
+)
+
+func (r *FetchRequest) encode(pe packetEncoder) (err error) {
+	metricRegistry := pe.metricRegistry()
+
+	pe.putInt32(-1) // ReplicaID is always -1 for clients
+	pe.putInt32(r.MaxWaitTime)
+	pe.putInt32(r.MinBytes)
+	if r.Version >= 3 {
+		pe.putInt32(r.MaxBytes)
+	}
+	if r.Version >= 4 {
+		pe.putInt8(int8(r.Isolation))
+	}
+	if r.Version >= 7 {
+		pe.putInt32(r.SessionID)
+		pe.putInt32(r.SessionEpoch)
+	}
+	err = pe.putArrayLength(len(r.blocks))
+	if err != nil {
+		return err
+	}
+	for topic, blocks := range r.blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putArrayLength(len(blocks))
+		if err != nil {
+			return err
+		}
+		for partition, block := range blocks {
+			pe.putInt32(partition)
+			err = block.encode(pe, r.Version)
+			if err != nil {
+				return err
+			}
+		}
+		getOrRegisterTopicMeter("consumer-fetch-rate", topic, metricRegistry).Mark(1)
+	}
+	if r.Version >= 7 {
+		err = pe.putArrayLength(len(r.forgotten))
+		if err != nil {
+			return err
+		}
+		for topic, partitions := range r.forgotten {
+			err = pe.putString(topic)
+			if err != nil {
+				return err
+			}
+			err = pe.putArrayLength(len(partitions))
+			if err != nil {
+				return err
+			}
+			for _, partition := range partitions {
+				pe.putInt32(partition)
+			}
+		}
+	}
+	if r.Version >= 11 {
+		err = pe.putString(r.RackID)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if _, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if r.MaxWaitTime, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if r.MinBytes, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if r.Version >= 3 {
+		if r.MaxBytes, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+	if r.Version >= 4 {
+		isolation, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+		r.Isolation = IsolationLevel(isolation)
+	}
+	if r.Version >= 7 {
+		r.SessionID, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+		r.SessionEpoch, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount == 0 {
+		return nil
+	}
+	r.blocks = make(map[string]map[int32]*fetchRequestBlock)
+	for i := 0; i < topicCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitionCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.blocks[topic] = make(map[int32]*fetchRequestBlock)
+		for j := 0; j < partitionCount; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			fetchBlock := &fetchRequestBlock{}
+			if err = fetchBlock.decode(pd, r.Version); err != nil {
+				return err
+			}
+			r.blocks[topic][partition] = fetchBlock
+		}
+	}
+
+	if r.Version >= 7 {
+		forgottenCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.forgotten = make(map[string][]int32)
+		for i := 0; i < forgottenCount; i++ {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			partitionCount, err := pd.getArrayLength()
+			if err != nil {
+				return err
+			}
+			if partitionCount < 0 {
+				return fmt.Errorf("partitionCount %d is invalid", partitionCount)
+			}
+			r.forgotten[topic] = make([]int32, partitionCount)
+
+			for j := 0; j < partitionCount; j++ {
+				partition, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+				r.forgotten[topic][j] = partition
+			}
+		}
+	}
+
+	if r.Version >= 11 {
+		r.RackID, err = pd.getString()
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *FetchRequest) key() int16 {
+	return apiKeyFetch
+}
+
+func (r *FetchRequest) version() int16 {
+	return r.Version
+}
+
+func (r *FetchRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *FetchRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 11
+}
+
+func (r *FetchRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 11:
+		return V2_3_0_0
+	case 9, 10:
+		return V2_1_0_0
+	case 8:
+		return V2_0_0_0
+	case 7:
+		return V1_1_0_0
+	case 6:
+		return V1_0_0_0
+	case 4, 5:
+		return V0_11_0_0
+	case 3:
+		return V0_10_1_0
+	case 2:
+		return V0_10_0_0
+	case 1:
+		return V0_9_0_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_3_0_0
+	}
+}
+
+func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32, leaderEpoch int32) {
+	if r.blocks == nil {
+		r.blocks = make(map[string]map[int32]*fetchRequestBlock)
+	}
+
+	if r.Version >= 7 && r.forgotten == nil {
+		r.forgotten = make(map[string][]int32)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = make(map[int32]*fetchRequestBlock)
+	}
+
+	tmp := new(fetchRequestBlock)
+	tmp.Version = r.Version
+	tmp.maxBytes = maxBytes
+	tmp.fetchOffset = fetchOffset
+	if r.Version >= 9 {
+		tmp.currentLeaderEpoch = leaderEpoch
+	}
+
+	r.blocks[topic][partitionID] = tmp
+}
diff --git a/vendor/github.com/IBM/sarama/fetch_response.go b/vendor/github.com/IBM/sarama/fetch_response.go
new file mode 100644
index 0000000..95de90e
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/fetch_response.go
@@ -0,0 +1,605 @@
+package sarama
+
+import (
+	"errors"
+	"sort"
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+const (
+	invalidLeaderEpoch        = -1
+	invalidPreferredReplicaID = -1
+)
+
+type AbortedTransaction struct {
+	// ProducerID contains the producer id associated with the aborted transaction.
+	ProducerID int64
+	// FirstOffset contains the first offset in the aborted transaction.
+	FirstOffset int64
+}
+
+func (t *AbortedTransaction) decode(pd packetDecoder) (err error) {
+	if t.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if t.FirstOffset, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
+	pe.putInt64(t.ProducerID)
+	pe.putInt64(t.FirstOffset)
+
+	return nil
+}
+
+type FetchResponseBlock struct {
+	// Err contains the error code, or 0 if there was no fetch error.
+	Err KError
+	// HighWatermarkOffset contains the current high water mark.
+	HighWaterMarkOffset int64
+	// LastStableOffset contains the last stable offset (or LSO) of the
+	// partition. This is the last offset such that the state of all
+	// transactional records prior to this offset have been decided (ABORTED or
+	// COMMITTED)
+	LastStableOffset int64
+	// LogStartOffset contains the current log start offset.
+	LogStartOffset int64
+	// AbortedTransactions contains the aborted transactions.
+	AbortedTransactions []*AbortedTransaction
+	// PreferredReadReplica contains the preferred read replica for the
+	// consumer to use on its next fetch request
+	PreferredReadReplica int32
+	// RecordsSet contains the record data.
+	RecordsSet []*Records
+
+	Partial bool
+	Records *Records // deprecated: use FetchResponseBlock.RecordsSet
+
+	// recordsNextOffset contains the next consecutive offset following this response block.
+	// This field is computed locally and is not part of the server's binary response.
+	recordsNextOffset *int64
+}
+
+func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	metricRegistry := pd.metricRegistry()
+	var sizeMetric metrics.Histogram
+	if metricRegistry != nil {
+		sizeMetric = getOrRegisterHistogram("consumer-fetch-response-size", metricRegistry)
+	}
+
+	b.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	b.HighWaterMarkOffset, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	if version >= 4 {
+		b.LastStableOffset, err = pd.getInt64()
+		if err != nil {
+			return err
+		}
+
+		if version >= 5 {
+			b.LogStartOffset, err = pd.getInt64()
+			if err != nil {
+				return err
+			}
+		}
+
+		numTransact, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		if numTransact >= 0 {
+			b.AbortedTransactions = make([]*AbortedTransaction, numTransact)
+		}
+
+		for i := 0; i < numTransact; i++ {
+			transact := new(AbortedTransaction)
+			if err = transact.decode(pd); err != nil {
+				return err
+			}
+			b.AbortedTransactions[i] = transact
+		}
+	}
+
+	if version >= 11 {
+		b.PreferredReadReplica, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	} else {
+		b.PreferredReadReplica = -1
+	}
+
+	recordsSize, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	if sizeMetric != nil {
+		sizeMetric.Update(int64(recordsSize))
+	}
+
+	recordsDecoder, err := pd.getSubset(int(recordsSize))
+	if err != nil {
+		return err
+	}
+
+	b.RecordsSet = []*Records{}
+
+	for recordsDecoder.remaining() > 0 {
+		records := &Records{}
+		if err := records.decode(recordsDecoder); err != nil {
+			// If we have at least one decoded records, this is not an error
+			if errors.Is(err, ErrInsufficientData) {
+				if len(b.RecordsSet) == 0 {
+					b.Partial = true
+				}
+				break
+			}
+			return err
+		}
+
+		b.recordsNextOffset, err = records.nextOffset()
+		if err != nil {
+			return err
+		}
+
+		partial, err := records.isPartial()
+		if err != nil {
+			return err
+		}
+
+		n, err := records.numRecords()
+		if err != nil {
+			return err
+		}
+
+		if n > 0 || (partial && len(b.RecordsSet) == 0) {
+			b.RecordsSet = append(b.RecordsSet, records)
+
+			if b.Records == nil {
+				b.Records = records
+			}
+		}
+
+		overflow, err := records.isOverflow()
+		if err != nil {
+			return err
+		}
+
+		if partial || overflow {
+			break
+		}
+	}
+
+	return nil
+}
+
+func (b *FetchResponseBlock) numRecords() (int, error) {
+	sum := 0
+
+	for _, records := range b.RecordsSet {
+		count, err := records.numRecords()
+		if err != nil {
+			return 0, err
+		}
+
+		sum += count
+	}
+
+	return sum, nil
+}
+
+func (b *FetchResponseBlock) isPartial() (bool, error) {
+	if b.Partial {
+		return true, nil
+	}
+
+	if len(b.RecordsSet) == 1 {
+		return b.RecordsSet[0].isPartial()
+	}
+
+	return false, nil
+}
+
+func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	pe.putKError(b.Err)
+
+	pe.putInt64(b.HighWaterMarkOffset)
+
+	if version >= 4 {
+		pe.putInt64(b.LastStableOffset)
+
+		if version >= 5 {
+			pe.putInt64(b.LogStartOffset)
+		}
+
+		if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
+			return err
+		}
+		for _, transact := range b.AbortedTransactions {
+			if err = transact.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	if version >= 11 {
+		pe.putInt32(b.PreferredReadReplica)
+	}
+
+	pe.push(&lengthField{})
+	for _, records := range b.RecordsSet {
+		err = records.encode(pe)
+		if err != nil {
+			return err
+		}
+	}
+	return pe.pop()
+}
+
+func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction {
+	// I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered
+	// plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself
+	at := b.AbortedTransactions
+	sort.Slice(
+		at,
+		func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset },
+	)
+	return at
+}
+
+type FetchResponse struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// ThrottleTime contains the duration in milliseconds for which the request
+	// was throttled due to a quota violation, or zero if the request did not
+	// violate any quota.
+	ThrottleTime time.Duration
+	// ErrorCode contains the top level response error code.
+	ErrorCode int16
+	// SessionID contains the fetch session ID, or 0 if this is not part of a fetch session.
+	SessionID int32
+	// Blocks contains the response topics.
+	Blocks map[string]map[int32]*FetchResponseBlock
+
+	LogAppendTime bool
+	Timestamp     time.Time
+}
+
+func (r *FetchResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.Version >= 1 {
+		if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 7 {
+		r.ErrorCode, err = pd.getInt16()
+		if err != nil {
+			return err
+		}
+		r.SessionID, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numBlocks, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
+
+		for j := 0; j < numBlocks; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			block := new(FetchResponseBlock)
+			err = block.decode(pd, version)
+			if err != nil {
+				return err
+			}
+			r.Blocks[name][id] = block
+		}
+	}
+
+	return nil
+}
+
+func (r *FetchResponse) encode(pe packetEncoder) (err error) {
+	if r.Version >= 1 {
+		pe.putDurationMs(r.ThrottleTime)
+	}
+
+	if r.Version >= 7 {
+		pe.putInt16(r.ErrorCode)
+		pe.putInt32(r.SessionID)
+	}
+
+	err = pe.putArrayLength(len(r.Blocks))
+	if err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.Blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+
+		for id, block := range partitions {
+			pe.putInt32(id)
+			err = block.encode(pe, r.Version)
+			if err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (r *FetchResponse) key() int16 {
+	return apiKeyFetch
+}
+
+func (r *FetchResponse) version() int16 {
+	return r.Version
+}
+
+func (r *FetchResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *FetchResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 11
+}
+
+func (r *FetchResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 11:
+		return V2_3_0_0
+	case 9, 10:
+		return V2_1_0_0
+	case 8:
+		return V2_0_0_0
+	case 7:
+		return V1_1_0_0
+	case 6:
+		return V1_0_0_0
+	case 4, 5:
+		return V0_11_0_0
+	case 3:
+		return V0_10_1_0
+	case 2:
+		return V0_10_0_0
+	case 1:
+		return V0_9_0_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_3_0_0
+	}
+}
+
+func (r *FetchResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
+
+func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
+	if r.Blocks == nil {
+		return nil
+	}
+
+	if r.Blocks[topic] == nil {
+		return nil
+	}
+
+	return r.Blocks[topic][partition]
+}
+
+func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+	}
+	partitions, ok := r.Blocks[topic]
+	if !ok {
+		partitions = make(map[int32]*FetchResponseBlock)
+		r.Blocks[topic] = partitions
+	}
+	frb, ok := partitions[partition]
+	if !ok {
+		frb = new(FetchResponseBlock)
+		partitions[partition] = frb
+	}
+	frb.Err = err
+}
+
+func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+	}
+	partitions, ok := r.Blocks[topic]
+	if !ok {
+		partitions = make(map[int32]*FetchResponseBlock)
+		r.Blocks[topic] = partitions
+	}
+	frb, ok := partitions[partition]
+	if !ok {
+		frb = new(FetchResponseBlock)
+		partitions[partition] = frb
+	}
+
+	return frb
+}
+
+func encodeKV(key, value Encoder) ([]byte, []byte) {
+	var kb []byte
+	var vb []byte
+	if key != nil {
+		kb, _ = key.Encode()
+	}
+	if value != nil {
+		vb, _ = value.Encode()
+	}
+
+	return kb, vb
+}
+
+func (r *FetchResponse) AddMessageWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time, version int8) {
+	frb := r.getOrCreateBlock(topic, partition)
+	kb, vb := encodeKV(key, value)
+	if r.LogAppendTime {
+		timestamp = r.Timestamp
+	}
+	msg := &Message{Key: kb, Value: vb, LogAppendTime: r.LogAppendTime, Timestamp: timestamp, Version: version}
+	msgBlock := &MessageBlock{Msg: msg, Offset: offset}
+	if len(frb.RecordsSet) == 0 {
+		records := newLegacyRecords(&MessageSet{})
+		frb.RecordsSet = []*Records{&records}
+	}
+	set := frb.RecordsSet[0].MsgSet
+	set.Messages = append(set.Messages, msgBlock)
+}
+
+func (r *FetchResponse) AddRecordWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time) {
+	frb := r.getOrCreateBlock(topic, partition)
+	kb, vb := encodeKV(key, value)
+	if len(frb.RecordsSet) == 0 {
+		records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
+		frb.RecordsSet = []*Records{&records}
+	}
+	batch := frb.RecordsSet[0].RecordBatch
+	rec := &Record{Key: kb, Value: vb, OffsetDelta: offset, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
+	batch.addRecord(rec)
+}
+
+// AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp
+// But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse
+// Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions
+func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) {
+	frb := r.getOrCreateBlock(topic, partition)
+	kb, vb := encodeKV(key, value)
+
+	records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
+	batch := &RecordBatch{
+		Version:         2,
+		LogAppendTime:   r.LogAppendTime,
+		FirstTimestamp:  timestamp,
+		MaxTimestamp:    r.Timestamp,
+		FirstOffset:     offset,
+		LastOffsetDelta: 0,
+		ProducerID:      producerID,
+		IsTransactional: isTransactional,
+	}
+	rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
+	batch.addRecord(rec)
+	records.RecordBatch = batch
+
+	frb.RecordsSet = append(frb.RecordsSet, &records)
+}
+
+func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) {
+	frb := r.getOrCreateBlock(topic, partition)
+
+	// batch
+	batch := &RecordBatch{
+		Version:         2,
+		LogAppendTime:   r.LogAppendTime,
+		FirstTimestamp:  timestamp,
+		MaxTimestamp:    r.Timestamp,
+		FirstOffset:     offset,
+		LastOffsetDelta: 0,
+		ProducerID:      producerID,
+		IsTransactional: true,
+		Control:         true,
+	}
+
+	// records
+	records := newDefaultRecords(nil)
+	records.RecordBatch = batch
+
+	// record
+	crAbort := ControlRecord{
+		Version: 0,
+		Type:    recordType,
+	}
+	crKey := &realEncoder{raw: make([]byte, 4)}
+	crValue := &realEncoder{raw: make([]byte, 6)}
+	crAbort.encode(crKey, crValue)
+	rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
+	batch.addRecord(rec)
+
+	frb.RecordsSet = append(frb.RecordsSet, &records)
+}
+
+func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
+	r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0)
+}
+
+func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
+	r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{})
+}
+
+func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) {
+	r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{})
+}
+
+func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) {
+	// define controlRecord key and value
+	r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{})
+}
+
+func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) {
+	frb := r.getOrCreateBlock(topic, partition)
+	if len(frb.RecordsSet) == 0 {
+		records := newDefaultRecords(&RecordBatch{Version: 2})
+		frb.RecordsSet = []*Records{&records}
+	}
+	batch := frb.RecordsSet[0].RecordBatch
+	batch.LastOffsetDelta = offset
+}
+
+func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
+	frb := r.getOrCreateBlock(topic, partition)
+	frb.LastStableOffset = offset
+}
diff --git a/vendor/github.com/IBM/sarama/find_coordinator_request.go b/vendor/github.com/IBM/sarama/find_coordinator_request.go
new file mode 100644
index 0000000..04ffc6f
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/find_coordinator_request.go
@@ -0,0 +1,75 @@
+package sarama
+
+type CoordinatorType int8
+
+const (
+	CoordinatorGroup CoordinatorType = iota
+	CoordinatorTransaction
+)
+
+type FindCoordinatorRequest struct {
+	Version         int16
+	CoordinatorKey  string
+	CoordinatorType CoordinatorType
+}
+
+func (f *FindCoordinatorRequest) setVersion(v int16) {
+	f.Version = v
+}
+
+func (f *FindCoordinatorRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(f.CoordinatorKey); err != nil {
+		return err
+	}
+
+	if f.Version >= 1 {
+		pe.putInt8(int8(f.CoordinatorType))
+	}
+
+	return nil
+}
+
+func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) {
+	if f.CoordinatorKey, err = pd.getString(); err != nil {
+		return err
+	}
+
+	if version >= 1 {
+		f.Version = version
+		coordinatorType, err := pd.getInt8()
+		if err != nil {
+			return err
+		}
+
+		f.CoordinatorType = CoordinatorType(coordinatorType)
+	}
+
+	return nil
+}
+
+func (f *FindCoordinatorRequest) key() int16 {
+	return apiKeyFindCoordinator
+}
+
+func (f *FindCoordinatorRequest) version() int16 {
+	return f.Version
+}
+
+func (r *FindCoordinatorRequest) headerVersion() int16 {
+	return 1
+}
+
+func (f *FindCoordinatorRequest) isValidVersion() bool {
+	return f.Version >= 0 && f.Version <= 2
+}
+
+func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion {
+	switch f.Version {
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_8_2_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/find_coordinator_response.go b/vendor/github.com/IBM/sarama/find_coordinator_response.go
new file mode 100644
index 0000000..1fb8a28
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/find_coordinator_response.go
@@ -0,0 +1,107 @@
+package sarama
+
+import (
+	"time"
+)
+
+var NoNode = &Broker{id: -1, addr: ":-1"}
+
+type FindCoordinatorResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Err          KError
+	ErrMsg       *string
+	Coordinator  *Broker
+}
+
+func (f *FindCoordinatorResponse) setVersion(v int16) {
+	f.Version = v
+}
+
+func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) {
+	if version >= 1 {
+		f.Version = version
+
+		if f.ThrottleTime, err = pd.getDurationMs(); err != nil {
+			return err
+		}
+	}
+
+	f.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if version >= 1 {
+		if f.ErrMsg, err = pd.getNullableString(); err != nil {
+			return err
+		}
+	}
+
+	coordinator := new(Broker)
+	// The version is hardcoded to 0, as version 1 of the Broker-decode
+	// contains the rack-field which is not present in the FindCoordinatorResponse.
+	if err := coordinator.decode(pd, 0); err != nil {
+		return err
+	}
+	if coordinator.addr == ":0" {
+		return nil
+	}
+	f.Coordinator = coordinator
+
+	return nil
+}
+
+func (f *FindCoordinatorResponse) encode(pe packetEncoder) error {
+	if f.Version >= 1 {
+		pe.putDurationMs(f.ThrottleTime)
+	}
+
+	pe.putKError(f.Err)
+
+	if f.Version >= 1 {
+		if err := pe.putNullableString(f.ErrMsg); err != nil {
+			return err
+		}
+	}
+
+	coordinator := f.Coordinator
+	if coordinator == nil {
+		coordinator = NoNode
+	}
+	if err := coordinator.encode(pe, 0); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (f *FindCoordinatorResponse) key() int16 {
+	return apiKeyFindCoordinator
+}
+
+func (f *FindCoordinatorResponse) version() int16 {
+	return f.Version
+}
+
+func (r *FindCoordinatorResponse) headerVersion() int16 {
+	return 0
+}
+
+func (f *FindCoordinatorResponse) isValidVersion() bool {
+	return f.Version >= 0 && f.Version <= 2
+}
+
+func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion {
+	switch f.Version {
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	default:
+		return V0_8_2_0
+	}
+}
+
+func (r *FindCoordinatorResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/gssapi_kerberos.go b/vendor/github.com/IBM/sarama/gssapi_kerberos.go
new file mode 100644
index 0000000..93ea921
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/gssapi_kerberos.go
@@ -0,0 +1,321 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"net"
+	"strings"
+	"time"
+
+	"github.com/jcmturner/gofork/encoding/asn1"
+	"github.com/jcmturner/gokrb5/v8/asn1tools"
+	"github.com/jcmturner/gokrb5/v8/gssapi"
+	"github.com/jcmturner/gokrb5/v8/iana/chksumtype"
+	"github.com/jcmturner/gokrb5/v8/iana/keyusage"
+	"github.com/jcmturner/gokrb5/v8/messages"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+const (
+	TOK_ID_KRB_AP_REQ   = 256
+	GSS_API_GENERIC_TAG = 0x60
+	KRB5_USER_AUTH      = 1
+	KRB5_KEYTAB_AUTH    = 2
+	KRB5_CCACHE_AUTH    = 3
+	GSS_API_INITIAL     = 1
+	GSS_API_VERIFY      = 2
+	GSS_API_FINISH      = 3
+)
+
+type GSSAPIConfig struct {
+	AuthType           int
+	KeyTabPath         string
+	CCachePath         string
+	KerberosConfigPath string
+	ServiceName        string
+	Username           string
+	Password           string
+	Realm              string
+	DisablePAFXFAST    bool
+	BuildSpn           BuildSpnFunc
+}
+
+type GSSAPIKerberosAuth struct {
+	Config                *GSSAPIConfig
+	ticket                messages.Ticket
+	encKey                types.EncryptionKey
+	NewKerberosClientFunc func(config *GSSAPIConfig) (KerberosClient, error)
+	step                  int
+}
+
+type KerberosClient interface {
+	Login() error
+	GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error)
+	Domain() string
+	CName() types.PrincipalName
+	Destroy()
+}
+
+type BuildSpnFunc func(serviceName, host string) string
+
+// writePackage appends length in big endian before the payload, and sends it to kafka
+func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) {
+	length := uint64(len(payload))
+	size := length + 4 // 4 byte length header + payload
+	if size > math.MaxInt32 {
+		return 0, errors.New("payload too large, will overflow int32")
+	}
+	finalPackage := make([]byte, size)
+	copy(finalPackage[4:], payload)
+	binary.BigEndian.PutUint32(finalPackage, uint32(length))
+	bytes, err := broker.conn.Write(finalPackage)
+	if err != nil {
+		return bytes, err
+	}
+	return bytes, nil
+}
+
+// readPackage reads payload length (4 bytes) and then reads the payload into []byte
+func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) {
+	bytesRead := 0
+	lengthInBytes := make([]byte, 4)
+	bytes, err := io.ReadFull(broker.conn, lengthInBytes)
+	if err != nil {
+		return nil, bytesRead, err
+	}
+	bytesRead += bytes
+	payloadLength := binary.BigEndian.Uint32(lengthInBytes)
+	payloadBytes := make([]byte, payloadLength)         // buffer for read..
+	bytes, err = io.ReadFull(broker.conn, payloadBytes) // read bytes
+	if err != nil {
+		return payloadBytes, bytesRead, err
+	}
+	bytesRead += bytes
+	return payloadBytes, bytesRead, nil
+}
+
+func (krbAuth *GSSAPIKerberosAuth) newAuthenticatorChecksum() []byte {
+	a := make([]byte, 24)
+	flags := []int{gssapi.ContextFlagInteg, gssapi.ContextFlagConf}
+	binary.LittleEndian.PutUint32(a[:4], 16)
+	for _, i := range flags {
+		f := binary.LittleEndian.Uint32(a[20:24])
+		f |= uint32(i)
+		binary.LittleEndian.PutUint32(a[20:24], f)
+	}
+	return a
+}
+
+// Construct Kerberos AP_REQ package, conforming to RFC-4120
+// https://tools.ietf.org/html/rfc4120#page-84
+func (krbAuth *GSSAPIKerberosAuth) createKrb5Token(
+	domain string,
+	cname types.PrincipalName,
+	ticket messages.Ticket,
+	sessionKey types.EncryptionKey,
+) ([]byte, error) {
+	auth, err := types.NewAuthenticator(domain, cname)
+	if err != nil {
+		return nil, err
+	}
+	auth.Cksum = types.Checksum{
+		CksumType: chksumtype.GSSAPI,
+		Checksum:  krbAuth.newAuthenticatorChecksum(),
+	}
+	APReq, err := messages.NewAPReq(
+		ticket,
+		sessionKey,
+		auth,
+	)
+	if err != nil {
+		return nil, err
+	}
+	aprBytes := make([]byte, 2)
+	binary.BigEndian.PutUint16(aprBytes, TOK_ID_KRB_AP_REQ)
+	tb, err := APReq.Marshal()
+	if err != nil {
+		return nil, err
+	}
+	aprBytes = append(aprBytes, tb...)
+	return aprBytes, nil
+}
+
+// Append the GSS-API header to the payload, conforming to RFC-2743
+// Section 3.1, Mechanism-Independent Token Format
+//
+// https://tools.ietf.org/html/rfc2743#page-81
+//
+// GSSAPIHeader + <specific mechanism payload>
+func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) {
+	oidBytes, err := asn1.Marshal(gssapi.OIDKRB5.OID())
+	if err != nil {
+		return nil, err
+	}
+	tkoLengthBytes := asn1tools.MarshalLengthBytes(len(oidBytes) + len(payload))
+	GSSHeader := append([]byte{GSS_API_GENERIC_TAG}, tkoLengthBytes...)
+	GSSHeader = append(GSSHeader, oidBytes...)
+	GSSPackage := append(GSSHeader, payload...)
+	return GSSPackage, nil
+}
+
+func (krbAuth *GSSAPIKerberosAuth) initSecContext(
+	client KerberosClient,
+	bytes []byte,
+) ([]byte, error) {
+	switch krbAuth.step {
+	case GSS_API_INITIAL:
+		aprBytes, err := krbAuth.createKrb5Token(
+			client.Domain(),
+			client.CName(),
+			krbAuth.ticket,
+			krbAuth.encKey)
+		if err != nil {
+			return nil, err
+		}
+		krbAuth.step = GSS_API_VERIFY
+		return krbAuth.appendGSSAPIHeader(aprBytes)
+	case GSS_API_VERIFY:
+		wrapTokenReq := gssapi.WrapToken{}
+		if err := wrapTokenReq.Unmarshal(bytes, true); err != nil {
+			return nil, err
+		}
+		// Validate response.
+		isValid, err := wrapTokenReq.Verify(krbAuth.encKey, keyusage.GSSAPI_ACCEPTOR_SEAL)
+		if !isValid {
+			return nil, err
+		}
+
+		wrapTokenResponse, err := gssapi.NewInitiatorWrapToken(wrapTokenReq.Payload, krbAuth.encKey)
+		if err != nil {
+			return nil, err
+		}
+		krbAuth.step = GSS_API_FINISH
+		return wrapTokenResponse.Marshal()
+	}
+	return nil, nil
+}
+
+func (krbAuth *GSSAPIKerberosAuth) spn(broker *Broker) string {
+	host, _, _ := net.SplitHostPort(broker.addr)
+	var spn string
+	if krbAuth.Config.BuildSpn != nil {
+		spn = krbAuth.Config.BuildSpn(broker.conf.Net.SASL.GSSAPI.ServiceName, host)
+	} else {
+		spn = fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host)
+	}
+	return spn
+}
+
+// Login will use the given KerberosClient to login and get a ticket for the given spn.
+func (krbAuth *GSSAPIKerberosAuth) Login(
+	client KerberosClient,
+	spn string,
+) (*messages.Ticket, error) {
+	if err := client.Login(); err != nil {
+		Logger.Printf("Kerberos client login error: %s", err)
+		return nil, err
+	}
+
+	ticket, encKey, err := client.GetServiceTicket(spn)
+	if err != nil {
+		Logger.Printf("Kerberos service ticket error for %s: %s", spn, err)
+		return nil, err
+	}
+	krbAuth.ticket = ticket
+	krbAuth.encKey = encKey
+	krbAuth.step = GSS_API_INITIAL
+
+	return &ticket, nil
+}
+
+// Authorize performs the kerberos auth handshake for authorization
+func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error {
+	client, err := krbAuth.NewKerberosClientFunc(krbAuth.Config)
+	if err != nil {
+		Logger.Printf("Kerberos client initialization error: %s", err)
+		return err
+	}
+	defer client.Destroy()
+
+	ticket, err := krbAuth.Login(client, krbAuth.spn(broker))
+	if err != nil {
+		return err
+	}
+
+	principal := strings.Join(ticket.SName.NameString, "/") + "@" + ticket.Realm
+	var receivedBytes []byte
+
+	for {
+		packBytes, err := krbAuth.initSecContext(client, receivedBytes)
+		if err != nil {
+			Logger.Printf("Kerberos init error as %s: %s", principal, err)
+			return err
+		}
+
+		requestTime := time.Now()
+		bytesWritten, err := krbAuth.writePackage(broker, packBytes)
+		if err != nil {
+			Logger.Printf("Kerberos write error as %s: %s", principal, err)
+			return err
+		}
+		broker.updateOutgoingCommunicationMetrics(bytesWritten)
+
+		switch krbAuth.step {
+		case GSS_API_VERIFY:
+			var bytesRead int
+			receivedBytes, bytesRead, err = krbAuth.readPackage(broker)
+			requestLatency := time.Since(requestTime)
+			broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
+			if err != nil {
+				Logger.Printf("Kerberos read error as %s: %s", principal, err)
+				return err
+			}
+		case GSS_API_FINISH:
+			return nil
+		}
+	}
+}
+
+// AuthorizeV2 performs the SASL v2 GSSAPI authentication with the Kafka broker.
+func (krbAuth *GSSAPIKerberosAuth) AuthorizeV2(
+	broker *Broker,
+	authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error),
+) error {
+	client, err := krbAuth.NewKerberosClientFunc(krbAuth.Config)
+	if err != nil {
+		Logger.Printf("Kerberos client initialization error: %s", err)
+		return err
+	}
+	defer client.Destroy()
+
+	ticket, err := krbAuth.Login(client, krbAuth.spn(broker))
+	if err != nil {
+		return err
+	}
+
+	principal := strings.Join(ticket.SName.NameString, "/") + "@" + ticket.Realm
+	var receivedBytes []byte
+
+	for {
+		token, err := krbAuth.initSecContext(client, receivedBytes)
+		if err != nil {
+			Logger.Printf("SASL Kerberos init error as %s: %s", principal, err)
+			return err
+		}
+
+		authResponse, err := authSendReceiver(token)
+		if err != nil {
+			Logger.Printf("SASL Kerberos authenticate error as %s: %s", principal, err)
+			return err
+		}
+
+		receivedBytes = authResponse.SaslAuthBytes
+
+		if krbAuth.step == GSS_API_FINISH {
+			return nil
+		}
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/heartbeat_request.go b/vendor/github.com/IBM/sarama/heartbeat_request.go
new file mode 100644
index 0000000..21b9971
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/heartbeat_request.go
@@ -0,0 +1,99 @@
+package sarama
+
+type HeartbeatRequest struct {
+	Version         int16
+	GroupId         string
+	GenerationId    int32
+	MemberId        string
+	GroupInstanceId *string
+}
+
+func (r *HeartbeatRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *HeartbeatRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.GroupId); err != nil {
+		return err
+	}
+
+	pe.putInt32(r.GenerationId)
+
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+
+	if r.Version >= 3 {
+		if err := pe.putNullableString(r.GroupInstanceId); err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+	if r.GenerationId, err = pd.getInt32(); err != nil {
+		return
+	}
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+	if r.Version >= 3 {
+		if r.GroupInstanceId, err = pd.getNullableString(); err != nil {
+			return
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *HeartbeatRequest) key() int16 {
+	return apiKeyHeartbeat
+}
+
+func (r *HeartbeatRequest) version() int16 {
+	return r.Version
+}
+
+func (r *HeartbeatRequest) headerVersion() int16 {
+	if r.Version >= 4 {
+		return 2
+	}
+	return 1
+}
+
+func (r *HeartbeatRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *HeartbeatRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *HeartbeatRequest) isFlexibleVersion(version int16) bool {
+	return version >= 4
+}
+
+func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_3_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_3_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/heartbeat_response.go b/vendor/github.com/IBM/sarama/heartbeat_response.go
new file mode 100644
index 0000000..d753957
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/heartbeat_response.go
@@ -0,0 +1,87 @@
+package sarama
+
+import "time"
+
+type HeartbeatResponse struct {
+	Version      int16
+	ThrottleTime int32
+	Err          KError
+}
+
+func (r *HeartbeatResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *HeartbeatResponse) encode(pe packetEncoder) error {
+	if r.Version >= 1 {
+		pe.putInt32(r.ThrottleTime)
+	}
+	pe.putKError(r.Err)
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
+	var err error
+	r.Version = version
+	if r.Version >= 1 {
+		if r.ThrottleTime, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+	r.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *HeartbeatResponse) key() int16 {
+	return apiKeyHeartbeat
+}
+
+func (r *HeartbeatResponse) version() int16 {
+	return r.Version
+}
+
+func (r *HeartbeatResponse) headerVersion() int16 {
+	if r.Version >= 4 {
+		return 1
+	}
+	return 0
+}
+
+func (r *HeartbeatResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *HeartbeatResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *HeartbeatResponse) isFlexibleVersion(version int16) bool {
+	return version >= 4
+}
+
+func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_3_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_3_0_0
+	}
+}
+
+func (r *HeartbeatResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTime) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/incremental_alter_configs_request.go b/vendor/github.com/IBM/sarama/incremental_alter_configs_request.go
new file mode 100644
index 0000000..e048684
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/incremental_alter_configs_request.go
@@ -0,0 +1,205 @@
+package sarama
+
+type IncrementalAlterConfigsOperation int8
+
+const (
+	IncrementalAlterConfigsOperationSet IncrementalAlterConfigsOperation = iota
+	IncrementalAlterConfigsOperationDelete
+	IncrementalAlterConfigsOperationAppend
+	IncrementalAlterConfigsOperationSubtract
+)
+
+// IncrementalAlterConfigsRequest is an incremental alter config request type
+type IncrementalAlterConfigsRequest struct {
+	Version      int16
+	Resources    []*IncrementalAlterConfigsResource
+	ValidateOnly bool
+}
+
+func (a *IncrementalAlterConfigsRequest) setVersion(v int16) {
+	a.Version = v
+}
+
+type IncrementalAlterConfigsResource struct {
+	Type          ConfigResourceType
+	Name          string
+	ConfigEntries map[string]IncrementalAlterConfigsEntry
+}
+
+type IncrementalAlterConfigsEntry struct {
+	Operation IncrementalAlterConfigsOperation
+	Value     *string
+}
+
+func (a *IncrementalAlterConfigsRequest) encode(pe packetEncoder) error {
+	if err := pe.putArrayLength(len(a.Resources)); err != nil {
+		return err
+	}
+
+	for _, r := range a.Resources {
+		if err := r.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putBool(a.ValidateOnly)
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (a *IncrementalAlterConfigsRequest) decode(pd packetDecoder, version int16) error {
+	resourceCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.Resources = make([]*IncrementalAlterConfigsResource, resourceCount)
+	for i := range a.Resources {
+		r := &IncrementalAlterConfigsResource{}
+		err = r.decode(pd, version)
+		if err != nil {
+			return err
+		}
+		a.Resources[i] = r
+	}
+
+	validateOnly, err := pd.getBool()
+	if err != nil {
+		return err
+	}
+
+	a.ValidateOnly = validateOnly
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (a *IncrementalAlterConfigsResource) encode(pe packetEncoder) error {
+	pe.putInt8(int8(a.Type))
+
+	if err := pe.putString(a.Name); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
+		return err
+	}
+
+	for name, e := range a.ConfigEntries {
+		if err := pe.putString(name); err != nil {
+			return err
+		}
+
+		if err := e.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (a *IncrementalAlterConfigsResource) decode(pd packetDecoder, version int16) error {
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Type = ConfigResourceType(t)
+
+	name, err := pd.getString()
+	if err != nil {
+		return err
+	}
+	a.Name = name
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if n > 0 {
+		a.ConfigEntries = make(map[string]IncrementalAlterConfigsEntry, n)
+		for i := 0; i < n; i++ {
+			name, err := pd.getString()
+			if err != nil {
+				return err
+			}
+
+			var v IncrementalAlterConfigsEntry
+
+			if err := v.decode(pd, version); err != nil {
+				return err
+			}
+
+			a.ConfigEntries[name] = v
+		}
+	}
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (a *IncrementalAlterConfigsEntry) encode(pe packetEncoder) error {
+	pe.putInt8(int8(a.Operation))
+
+	if err := pe.putNullableString(a.Value); err != nil {
+		return err
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (a *IncrementalAlterConfigsEntry) decode(pd packetDecoder, version int16) error {
+	t, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	a.Operation = IncrementalAlterConfigsOperation(t)
+
+	s, err := pd.getNullableString()
+	if err != nil {
+		return err
+	}
+
+	a.Value = s
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (a *IncrementalAlterConfigsRequest) key() int16 {
+	return apiKeyIncrementalAlterConfigs
+}
+
+func (a *IncrementalAlterConfigsRequest) version() int16 {
+	return a.Version
+}
+
+func (a *IncrementalAlterConfigsRequest) headerVersion() int16 {
+	if a.Version >= 1 {
+		return 2
+	}
+	return 1
+}
+
+func (a *IncrementalAlterConfigsRequest) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 1
+}
+
+func (a *IncrementalAlterConfigsRequest) isFlexible() bool {
+	return a.isFlexibleVersion(a.Version)
+}
+
+func (a *IncrementalAlterConfigsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 1
+}
+
+func (a *IncrementalAlterConfigsRequest) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 1:
+		return V2_4_0_0
+	default:
+		return V2_3_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/incremental_alter_configs_response.go b/vendor/github.com/IBM/sarama/incremental_alter_configs_response.go
new file mode 100644
index 0000000..9333785
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/incremental_alter_configs_response.go
@@ -0,0 +1,94 @@
+package sarama
+
+import "time"
+
+// IncrementalAlterConfigsResponse is a response type for incremental alter config
+type IncrementalAlterConfigsResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Resources    []*AlterConfigsResourceResponse
+}
+
+func (a *IncrementalAlterConfigsResponse) setVersion(v int16) {
+	a.Version = v
+}
+
+func (a *IncrementalAlterConfigsResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(a.ThrottleTime)
+
+	if err := pe.putArrayLength(len(a.Resources)); err != nil {
+		return err
+	}
+
+	for _, v := range a.Resources {
+		if err := v.encode(pe); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (a *IncrementalAlterConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
+	if a.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	responseCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
+
+	for i := range a.Resources {
+		a.Resources[i] = new(AlterConfigsResourceResponse)
+
+		if err := a.Resources[i].decode(pd, version); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (a *IncrementalAlterConfigsResponse) key() int16 {
+	return apiKeyIncrementalAlterConfigs
+}
+
+func (a *IncrementalAlterConfigsResponse) version() int16 {
+	return a.Version
+}
+
+func (a *IncrementalAlterConfigsResponse) headerVersion() int16 {
+	if a.Version >= 1 {
+		return 1
+	}
+	return 0
+}
+
+func (a *IncrementalAlterConfigsResponse) isFlexible() bool {
+	return a.isFlexibleVersion(a.Version)
+}
+
+func (a *IncrementalAlterConfigsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 1
+}
+
+func (a *IncrementalAlterConfigsResponse) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 1
+}
+
+func (a *IncrementalAlterConfigsResponse) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 1:
+		return V2_4_0_0
+	default:
+		return V2_3_0_0
+	}
+}
+
+func (r *IncrementalAlterConfigsResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/init_producer_id_request.go b/vendor/github.com/IBM/sarama/init_producer_id_request.go
new file mode 100644
index 0000000..a5d6d26
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/init_producer_id_request.go
@@ -0,0 +1,99 @@
+package sarama
+
+import "time"
+
+type InitProducerIDRequest struct {
+	Version            int16
+	TransactionalID    *string
+	TransactionTimeout time.Duration
+	ProducerID         int64
+	ProducerEpoch      int16
+}
+
+func (i *InitProducerIDRequest) setVersion(v int16) {
+	i.Version = v
+}
+
+func (i *InitProducerIDRequest) encode(pe packetEncoder) error {
+	if err := pe.putNullableString(i.TransactionalID); err != nil {
+		return err
+	}
+	pe.putInt32(int32(i.TransactionTimeout / time.Millisecond))
+	if i.Version >= 3 {
+		pe.putInt64(i.ProducerID)
+		pe.putInt16(i.ProducerEpoch)
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) {
+	i.Version = version
+	if i.TransactionalID, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	timeout, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	i.TransactionTimeout = time.Duration(timeout) * time.Millisecond
+	if i.Version >= 3 {
+		if i.ProducerID, err = pd.getInt64(); err != nil {
+			return err
+		}
+
+		if i.ProducerEpoch, err = pd.getInt16(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (i *InitProducerIDRequest) key() int16 {
+	return apiKeyInitProducerId
+}
+
+func (i *InitProducerIDRequest) version() int16 {
+	return i.Version
+}
+
+func (i *InitProducerIDRequest) headerVersion() int16 {
+	if i.Version >= 2 {
+		return 2
+	}
+
+	return 1
+}
+
+func (i *InitProducerIDRequest) isValidVersion() bool {
+	return i.Version >= 0 && i.Version <= 4
+}
+
+func (i *InitProducerIDRequest) isFlexible() bool {
+	return i.isFlexibleVersion(i.Version)
+}
+
+func (i *InitProducerIDRequest) isFlexibleVersion(version int16) bool {
+	return version >= 2
+}
+
+func (i *InitProducerIDRequest) requiredVersion() KafkaVersion {
+	switch i.Version {
+	case 4:
+		return V2_7_0_0
+	case 3:
+		return V2_5_0_0
+	case 2:
+		return V2_4_0_0
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V0_11_0_0
+	default:
+		return V2_7_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/init_producer_id_response.go b/vendor/github.com/IBM/sarama/init_producer_id_response.go
new file mode 100644
index 0000000..0e84c43
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/init_producer_id_response.go
@@ -0,0 +1,93 @@
+package sarama
+
+import "time"
+
+type InitProducerIDResponse struct {
+	ThrottleTime  time.Duration
+	Err           KError
+	Version       int16
+	ProducerID    int64
+	ProducerEpoch int16
+}
+
+func (i *InitProducerIDResponse) setVersion(v int16) {
+	i.Version = v
+}
+
+func (i *InitProducerIDResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(i.ThrottleTime)
+	pe.putKError(i.Err)
+	pe.putInt64(i.ProducerID)
+	pe.putInt16(i.ProducerEpoch)
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) {
+	i.Version = version
+	if i.ThrottleTime, err = pd.getDurationMs(); err != nil {
+		return err
+	}
+
+	i.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if i.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if i.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (i *InitProducerIDResponse) key() int16 {
+	return apiKeyInitProducerId
+}
+
+func (i *InitProducerIDResponse) version() int16 {
+	return i.Version
+}
+
+func (i *InitProducerIDResponse) headerVersion() int16 {
+	if i.Version >= 2 {
+		return 1
+	}
+	return 0
+}
+
+func (i *InitProducerIDResponse) isValidVersion() bool {
+	return i.Version >= 0 && i.Version <= 4
+}
+
+func (i *InitProducerIDResponse) isFlexible() bool {
+	return i.isFlexibleVersion(i.Version)
+}
+
+func (i *InitProducerIDResponse) isFlexibleVersion(version int16) bool {
+	return version >= 2
+}
+
+func (i *InitProducerIDResponse) requiredVersion() KafkaVersion {
+	switch i.Version {
+	case 4:
+		return V2_7_0_0
+	case 3:
+		return V2_5_0_0
+	case 2:
+		return V2_4_0_0
+	case 1:
+		return V2_0_0_0
+	default:
+		return V0_11_0_0
+	}
+}
+
+func (r *InitProducerIDResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/interceptors.go b/vendor/github.com/IBM/sarama/interceptors.go
new file mode 100644
index 0000000..d4dc23c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/interceptors.go
@@ -0,0 +1,43 @@
+package sarama
+
+// ProducerInterceptor allows you to intercept (and possibly mutate) the records
+// received by the producer before they are published to the Kafka cluster.
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation
+type ProducerInterceptor interface {
+
+	// OnSend is called when the producer message is intercepted. Please avoid
+	// modifying the message until it's safe to do so, as this is _not_ a copy
+	// of the message.
+	OnSend(*ProducerMessage)
+}
+
+// ConsumerInterceptor allows you to intercept (and possibly mutate) the records
+// received by the consumer before they are sent to the messages channel.
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation
+type ConsumerInterceptor interface {
+
+	// OnConsume is called when the consumed message is intercepted. Please
+	// avoid modifying the message until it's safe to do so, as this is _not_ a
+	// copy of the message.
+	OnConsume(*ConsumerMessage)
+}
+
+func (msg *ProducerMessage) safelyApplyInterceptor(interceptor ProducerInterceptor) {
+	defer func() {
+		if r := recover(); r != nil {
+			Logger.Printf("Error when calling producer interceptor: %v, %v", interceptor, r)
+		}
+	}()
+
+	interceptor.OnSend(msg)
+}
+
+func (msg *ConsumerMessage) safelyApplyInterceptor(interceptor ConsumerInterceptor) {
+	defer func() {
+		if r := recover(); r != nil {
+			Logger.Printf("Error when calling consumer interceptor: %v, %v", interceptor, r)
+		}
+	}()
+
+	interceptor.OnConsume(msg)
+}
diff --git a/vendor/github.com/IBM/sarama/join_group_request.go b/vendor/github.com/IBM/sarama/join_group_request.go
new file mode 100644
index 0000000..e8cbc46
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/join_group_request.go
@@ -0,0 +1,234 @@
+package sarama
+
+type GroupProtocol struct {
+	// Name contains the protocol name.
+	Name string
+	// Metadata contains the protocol metadata.
+	Metadata []byte
+}
+
+func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
+	p.Name, err = pd.getString()
+	if err != nil {
+		return err
+	}
+	p.Metadata, err = pd.getBytes()
+	if err != nil {
+		return err
+	}
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
+	if err := pe.putString(p.Name); err != nil {
+		return err
+	}
+	if err := pe.putBytes(p.Metadata); err != nil {
+		return err
+	}
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+type JoinGroupRequest struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// GroupId contains the group identifier.
+	GroupId string
+	// SessionTimeout specifies that the coordinator should consider the consumer
+	// dead if it receives no heartbeat after this timeout in milliseconds.
+	SessionTimeout int32
+	// RebalanceTimeout contains the maximum time in milliseconds that the
+	// coordinator will wait for each member to rejoin when rebalancing the
+	// group.
+	RebalanceTimeout int32
+	// MemberId contains the member id assigned by the group coordinator.
+	MemberId string
+	// GroupInstanceId contains the unique identifier of the consumer instance
+	// provided by end user.
+	GroupInstanceId *string
+	// ProtocolType contains the unique name the for class of protocols
+	// implemented by the group we want to join.
+	ProtocolType string
+	// GroupProtocols contains the list of protocols that the member supports.
+	// deprecated; use OrderedGroupProtocols
+	GroupProtocols map[string][]byte
+	// OrderedGroupProtocols contains an ordered list of protocols that the member
+	// supports.
+	OrderedGroupProtocols []*GroupProtocol
+}
+
+func (r *JoinGroupRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *JoinGroupRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.GroupId); err != nil {
+		return err
+	}
+	pe.putInt32(r.SessionTimeout)
+	if r.Version >= 1 {
+		pe.putInt32(r.RebalanceTimeout)
+	}
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+	if r.Version >= 5 {
+		if err := pe.putNullableString(r.GroupInstanceId); err != nil {
+			return err
+		}
+	}
+	if err := pe.putString(r.ProtocolType); err != nil {
+		return err
+	}
+
+	if len(r.GroupProtocols) > 0 {
+		if len(r.OrderedGroupProtocols) > 0 {
+			return PacketEncodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
+		}
+
+		if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
+			return err
+		}
+		for name, metadata := range r.GroupProtocols {
+			if err := pe.putString(name); err != nil {
+				return err
+			}
+			if err := pe.putBytes(metadata); err != nil {
+				return err
+			}
+			pe.putEmptyTaggedFieldArray()
+		}
+	} else {
+		if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
+			return err
+		}
+		for _, protocol := range r.OrderedGroupProtocols {
+			if err := protocol.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+
+	if r.SessionTimeout, err = pd.getInt32(); err != nil {
+		return
+	}
+
+	if version >= 1 {
+		if r.RebalanceTimeout, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+
+	if version >= 5 {
+		if r.GroupInstanceId, err = pd.getNullableString(); err != nil {
+			return
+		}
+	}
+
+	if r.ProtocolType, err = pd.getString(); err != nil {
+		return
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		return nil
+	}
+
+	r.GroupProtocols = make(map[string][]byte)
+	for i := 0; i < n; i++ {
+		protocol := &GroupProtocol{}
+		if err := protocol.decode(pd); err != nil {
+			return err
+		}
+		r.GroupProtocols[protocol.Name] = protocol.Metadata
+		r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *JoinGroupRequest) key() int16 {
+	return apiKeyJoinGroup
+}
+
+func (r *JoinGroupRequest) version() int16 {
+	return r.Version
+}
+
+func (r *JoinGroupRequest) headerVersion() int16 {
+	if r.Version >= 6 {
+		return 2
+	}
+	return 1
+}
+
+func (r *JoinGroupRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 6
+}
+
+func (r *JoinGroupRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *JoinGroupRequest) isFlexibleVersion(version int16) bool {
+	return version >= 6
+}
+
+func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 6:
+		return V2_4_0_0
+	case 5:
+		return V2_3_0_0
+	case 4:
+		return V2_2_0_0
+	case 3:
+		return V2_0_0_0
+	case 2:
+		return V0_11_0_0
+	case 1:
+		return V0_10_1_0
+	case 0:
+		return V0_10_0_0
+	default:
+		return V2_3_0_0
+	}
+}
+
+func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
+	r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
+		Name:     name,
+		Metadata: metadata,
+	})
+}
+
+func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
+	bin, err := encode(metadata, nil)
+	if err != nil {
+		return err
+	}
+
+	r.AddGroupProtocol(name, bin)
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/join_group_response.go b/vendor/github.com/IBM/sarama/join_group_response.go
new file mode 100644
index 0000000..bdabe0a
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/join_group_response.go
@@ -0,0 +1,211 @@
+package sarama
+
+import "time"
+
+type JoinGroupResponse struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// ThrottleTime contains the duration for which the request was throttled due
+	// to a quota violation, or zero if the request did not violate any quota.
+	ThrottleTime int32
+	// Err contains the error code, or 0 if there was no error.
+	Err KError
+	// GenerationId contains the generation ID of the group.
+	GenerationId int32
+	// GroupProtocol contains the group protocol selected by the coordinator.
+	GroupProtocol string
+	// LeaderId contains the leader of the group.
+	LeaderId string
+	// MemberId contains the member ID assigned by the group coordinator.
+	MemberId string
+	// Members contains the per-group-member information.
+	Members []GroupMember
+}
+
+func (r *JoinGroupResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+type GroupMember struct {
+	// MemberId contains the group member ID.
+	MemberId string
+	// GroupInstanceId contains the unique identifier of the consumer instance
+	// provided by end user.
+	GroupInstanceId *string
+	// Metadata contains the group member metadata.
+	Metadata []byte
+}
+
+func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
+	members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
+	for _, member := range r.Members {
+		meta := new(ConsumerGroupMemberMetadata)
+		if err := decode(member.Metadata, meta, nil); err != nil {
+			return nil, err
+		}
+		members[member.MemberId] = *meta
+	}
+	return members, nil
+}
+
+func (r *JoinGroupResponse) encode(pe packetEncoder) error {
+	if r.Version >= 2 {
+		pe.putInt32(r.ThrottleTime)
+	}
+	pe.putKError(r.Err)
+	pe.putInt32(r.GenerationId)
+
+	if err := pe.putString(r.GroupProtocol); err != nil {
+		return err
+	}
+	if err := pe.putString(r.LeaderId); err != nil {
+		return err
+	}
+	if err := pe.putString(r.MemberId); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.Members)); err != nil {
+		return err
+	}
+
+	for _, member := range r.Members {
+		if err := pe.putString(member.MemberId); err != nil {
+			return err
+		}
+		if r.Version >= 5 {
+			if err := pe.putNullableString(member.GroupInstanceId); err != nil {
+				return err
+			}
+		}
+		if err := pe.putBytes(member.Metadata); err != nil {
+			return err
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if version >= 2 {
+		if r.ThrottleTime, err = pd.getInt32(); err != nil {
+			return
+		}
+	}
+
+	r.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if r.GenerationId, err = pd.getInt32(); err != nil {
+		return
+	}
+
+	if r.GroupProtocol, err = pd.getString(); err != nil {
+		return
+	}
+
+	if r.LeaderId, err = pd.getString(); err != nil {
+		return
+	}
+
+	if r.MemberId, err = pd.getString(); err != nil {
+		return
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if n == 0 {
+		_, err = pd.getEmptyTaggedFieldArray()
+		return err
+	}
+
+	r.Members = make([]GroupMember, n)
+	for i := 0; i < n; i++ {
+		memberId, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		var groupInstanceId *string = nil
+		if r.Version >= 5 {
+			groupInstanceId, err = pd.getNullableString()
+			if err != nil {
+				return err
+			}
+		}
+
+		memberMetadata, err := pd.getBytes()
+		if err != nil {
+			return err
+		}
+
+		r.Members[i] = GroupMember{MemberId: memberId, GroupInstanceId: groupInstanceId, Metadata: memberMetadata}
+
+		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *JoinGroupResponse) key() int16 {
+	return apiKeyJoinGroup
+}
+
+func (r *JoinGroupResponse) version() int16 {
+	return r.Version
+}
+
+func (r *JoinGroupResponse) headerVersion() int16 {
+	if r.Version >= 6 {
+		return 1
+	}
+	return 0
+}
+
+func (r *JoinGroupResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 6
+}
+
+func (r *JoinGroupResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *JoinGroupResponse) isFlexibleVersion(version int16) bool {
+	return version >= 6
+}
+
+func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 6:
+		return V2_4_0_0
+	case 5:
+		return V2_3_0_0
+	case 4:
+		return V2_2_0_0
+	case 3:
+		return V2_0_0_0
+	case 2:
+		return V0_11_0_0
+	case 1:
+		return V0_10_1_0
+	case 0:
+		return V0_10_0_0
+	default:
+		return V2_3_0_0
+	}
+}
+
+func (r *JoinGroupResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTime) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/kerberos_client.go b/vendor/github.com/IBM/sarama/kerberos_client.go
new file mode 100644
index 0000000..2891268
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/kerberos_client.go
@@ -0,0 +1,57 @@
+package sarama
+
+import (
+	krb5client "github.com/jcmturner/gokrb5/v8/client"
+	krb5config "github.com/jcmturner/gokrb5/v8/config"
+	"github.com/jcmturner/gokrb5/v8/credentials"
+	"github.com/jcmturner/gokrb5/v8/keytab"
+	"github.com/jcmturner/gokrb5/v8/types"
+)
+
+type KerberosGoKrb5Client struct {
+	krb5client.Client
+}
+
+func (c *KerberosGoKrb5Client) Domain() string {
+	return c.Credentials.Domain()
+}
+
+func (c *KerberosGoKrb5Client) CName() types.PrincipalName {
+	return c.Credentials.CName()
+}
+
+// NewKerberosClient creates kerberos client used to obtain TGT and TGS tokens.
+// It uses pure go Kerberos 5 solution (RFC-4121 and RFC-4120).
+// uses gokrb5 library underlying which is a pure go kerberos client with some GSS-API capabilities.
+func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) {
+	cfg, err := krb5config.Load(config.KerberosConfigPath)
+	if err != nil {
+		return nil, err
+	}
+	return createClient(config, cfg)
+}
+
+func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) {
+	var client *krb5client.Client
+	switch config.AuthType {
+	case KRB5_KEYTAB_AUTH:
+		kt, err := keytab.Load(config.KeyTabPath)
+		if err != nil {
+			return nil, err
+		}
+		client = krb5client.NewWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
+	case KRB5_CCACHE_AUTH:
+		cc, err := credentials.LoadCCache(config.CCachePath)
+		if err != nil {
+			return nil, err
+		}
+		client, err = krb5client.NewFromCCache(cc, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
+		if err != nil {
+			return nil, err
+		}
+	default:
+		client = krb5client.NewWithPassword(config.Username,
+			config.Realm, config.Password, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
+	}
+	return &KerberosGoKrb5Client{*client}, nil
+}
diff --git a/vendor/github.com/IBM/sarama/leave_group_request.go b/vendor/github.com/IBM/sarama/leave_group_request.go
new file mode 100644
index 0000000..377bdc5
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/leave_group_request.go
@@ -0,0 +1,125 @@
+package sarama
+
+type MemberIdentity struct {
+	MemberId        string
+	GroupInstanceId *string
+}
+
+type LeaveGroupRequest struct {
+	Version  int16
+	GroupId  string
+	MemberId string           // Removed in Version 3
+	Members  []MemberIdentity // Added in Version 3
+}
+
+func (r *LeaveGroupRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.GroupId); err != nil {
+		return err
+	}
+	if r.Version < 3 {
+		if err := pe.putString(r.MemberId); err != nil {
+			return err
+		}
+	}
+	if r.Version >= 3 {
+		if err := pe.putArrayLength(len(r.Members)); err != nil {
+			return err
+		}
+		for _, member := range r.Members {
+			if err := pe.putString(member.MemberId); err != nil {
+				return err
+			}
+			if err := pe.putNullableString(member.GroupInstanceId); err != nil {
+				return err
+			}
+			pe.putEmptyTaggedFieldArray()
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.GroupId, err = pd.getString(); err != nil {
+		return
+	}
+	if r.Version < 3 {
+		if r.MemberId, err = pd.getString(); err != nil {
+			return
+		}
+	}
+	if r.Version >= 3 {
+		memberCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.Members = make([]MemberIdentity, memberCount)
+		for i := 0; i < memberCount; i++ {
+			memberIdentity := MemberIdentity{}
+			if memberIdentity.MemberId, err = pd.getString(); err != nil {
+				return err
+			}
+			if memberIdentity.GroupInstanceId, err = pd.getNullableString(); err != nil {
+				return err
+			}
+			r.Members[i] = memberIdentity
+			_, err = pd.getEmptyTaggedFieldArray()
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *LeaveGroupRequest) key() int16 {
+	return apiKeyLeaveGroup
+}
+
+func (r *LeaveGroupRequest) version() int16 {
+	return r.Version
+}
+
+func (r *LeaveGroupRequest) headerVersion() int16 {
+	if r.Version >= 4 {
+		return 2
+	}
+	return 1
+}
+
+func (r *LeaveGroupRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *LeaveGroupRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *LeaveGroupRequest) isFlexibleVersion(version int16) bool {
+	return version >= 4
+}
+
+func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_4_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_9_0_0
+	default:
+		return V2_4_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/leave_group_response.go b/vendor/github.com/IBM/sarama/leave_group_response.go
new file mode 100644
index 0000000..5700992
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/leave_group_response.go
@@ -0,0 +1,129 @@
+package sarama
+
+import "time"
+
+type MemberResponse struct {
+	MemberId        string
+	GroupInstanceId *string
+	Err             KError
+}
+type LeaveGroupResponse struct {
+	Version      int16
+	ThrottleTime int32
+	Err          KError
+	Members      []MemberResponse
+}
+
+func (r *LeaveGroupResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
+	if r.Version >= 1 {
+		pe.putInt32(r.ThrottleTime)
+	}
+	pe.putKError(r.Err)
+	if r.Version >= 3 {
+		if err := pe.putArrayLength(len(r.Members)); err != nil {
+			return err
+		}
+		for _, member := range r.Members {
+			if err := pe.putString(member.MemberId); err != nil {
+				return err
+			}
+			if err := pe.putNullableString(member.GroupInstanceId); err != nil {
+				return err
+			}
+			pe.putKError(member.Err)
+			pe.putEmptyTaggedFieldArray()
+		}
+	}
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.Version >= 1 {
+		if r.ThrottleTime, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+	r.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if r.Version >= 3 {
+		membersLen, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.Members = make([]MemberResponse, membersLen)
+		for i := 0; i < len(r.Members); i++ {
+			if r.Members[i].MemberId, err = pd.getString(); err != nil {
+				return err
+			}
+			if r.Members[i].GroupInstanceId, err = pd.getNullableString(); err != nil {
+				return err
+			}
+			if r.Members[i].Err, err = pd.getKError(); err != nil {
+				return err
+			}
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *LeaveGroupResponse) key() int16 {
+	return apiKeyLeaveGroup
+}
+
+func (r *LeaveGroupResponse) version() int16 {
+	return r.Version
+}
+
+func (r *LeaveGroupResponse) headerVersion() int16 {
+	if r.Version >= 4 {
+		return 1
+	}
+	return 0
+}
+
+func (r *LeaveGroupResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *LeaveGroupResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *LeaveGroupResponse) isFlexibleVersion(version int16) bool {
+	return version >= 4
+}
+
+func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_4_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_9_0_0
+	default:
+		return V2_4_0_0
+	}
+}
+
+func (r *LeaveGroupResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTime) * time.Millisecond
+}
diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/IBM/sarama/length_field.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/length_field.go
rename to vendor/github.com/IBM/sarama/length_field.go
diff --git a/vendor/github.com/IBM/sarama/list_groups_request.go b/vendor/github.com/IBM/sarama/list_groups_request.go
new file mode 100644
index 0000000..b02e331
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/list_groups_request.go
@@ -0,0 +1,100 @@
+package sarama
+
+type ListGroupsRequest struct {
+	Version      int16
+	StatesFilter []string // version 4 or later
+	TypesFilter  []string // version 5 or later
+}
+
+func (r *ListGroupsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *ListGroupsRequest) encode(pe packetEncoder) error {
+	if r.Version >= 4 {
+		if err := pe.putArrayLength(len(r.StatesFilter)); err != nil {
+			return err
+		}
+		for _, filter := range r.StatesFilter {
+			err := pe.putString(filter)
+			if err != nil {
+				return err
+			}
+		}
+		if r.Version >= 5 {
+			if err := pe.putArrayLength(len(r.TypesFilter)); err != nil {
+				return err
+			}
+			for _, filter := range r.TypesFilter {
+				err := pe.putString(filter)
+				if err != nil {
+					return err
+				}
+			}
+		}
+	}
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.Version >= 4 {
+		if r.StatesFilter, err = pd.getStringArray(); err != nil {
+			return err
+		}
+		if r.Version >= 5 {
+			if r.TypesFilter, err = pd.getStringArray(); err != nil {
+				return err
+			}
+		}
+	}
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *ListGroupsRequest) key() int16 {
+	return apiKeyListGroups
+}
+
+func (r *ListGroupsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *ListGroupsRequest) headerVersion() int16 {
+	if r.Version >= 3 {
+		return 2
+	}
+	return 1
+}
+
+func (r *ListGroupsRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 5
+}
+
+func (r *ListGroupsRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ListGroupsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 3
+}
+
+func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 5:
+		return V3_8_0_0
+	case 4:
+		return V2_6_0_0
+	case 3:
+		return V2_4_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_9_0_0
+	default:
+		return V2_6_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/list_groups_response.go b/vendor/github.com/IBM/sarama/list_groups_response.go
new file mode 100644
index 0000000..7e3a55f
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/list_groups_response.go
@@ -0,0 +1,165 @@
+package sarama
+
+type ListGroupsResponse struct {
+	Version      int16
+	ThrottleTime int32
+	Err          KError
+	Groups       map[string]string
+	GroupsData   map[string]GroupData // version 4 or later
+}
+
+func (r *ListGroupsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+type GroupData struct {
+	GroupState string // version 4 or later
+	GroupType  string // version 5 or later
+}
+
+func (r *ListGroupsResponse) encode(pe packetEncoder) error {
+	if r.Version >= 1 {
+		pe.putInt32(r.ThrottleTime)
+	}
+
+	pe.putKError(r.Err)
+
+	if err := pe.putArrayLength(len(r.Groups)); err != nil {
+		return err
+	}
+	for groupId, protocolType := range r.Groups {
+		if err := pe.putString(groupId); err != nil {
+			return err
+		}
+		if err := pe.putString(protocolType); err != nil {
+			return err
+		}
+		if r.Version >= 4 {
+			groupData := r.GroupsData[groupId]
+			if err := pe.putString(groupData.GroupState); err != nil {
+				return err
+			}
+		}
+
+		if r.Version >= 5 {
+			groupData := r.GroupsData[groupId]
+			if err := pe.putString(groupData.GroupType); err != nil {
+				return err
+			}
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.Version >= 1 {
+		if r.ThrottleTime, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	r.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	for i := 0; i < n; i++ {
+		if i == 0 {
+			r.Groups = make(map[string]string)
+			if r.Version >= 4 {
+				r.GroupsData = make(map[string]GroupData)
+			}
+		}
+
+		var groupId, protocolType string
+		groupId, err = pd.getString()
+		if err != nil {
+			return err
+		}
+		protocolType, err = pd.getString()
+		if err != nil {
+			return err
+		}
+
+		r.Groups[groupId] = protocolType
+
+		if r.Version >= 4 {
+			var groupData GroupData
+			groupState, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			groupData.GroupState = groupState
+			if r.Version >= 5 {
+				groupType, err := pd.getString()
+				if err != nil {
+					return err
+				}
+				groupData.GroupType = groupType
+			}
+			r.GroupsData[groupId] = groupData
+		}
+
+		if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *ListGroupsResponse) key() int16 {
+	return apiKeyListGroups
+}
+
+func (r *ListGroupsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *ListGroupsResponse) headerVersion() int16 {
+	if r.Version >= 3 {
+		return 1
+	}
+	return 0
+}
+
+func (r *ListGroupsResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 5
+}
+
+func (r *ListGroupsResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ListGroupsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 3
+}
+
+func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 5:
+		return V3_8_0_0
+	case 4:
+		return V2_6_0_0
+	case 3:
+		return V2_4_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_9_0_0
+	default:
+		return V2_6_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/list_partition_reassignments_request.go b/vendor/github.com/IBM/sarama/list_partition_reassignments_request.go
new file mode 100644
index 0000000..25b1b29
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/list_partition_reassignments_request.go
@@ -0,0 +1,113 @@
+package sarama
+
+type ListPartitionReassignmentsRequest struct {
+	TimeoutMs int32
+	blocks    map[string][]int32
+	Version   int16
+}
+
+func (r *ListPartitionReassignmentsRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *ListPartitionReassignmentsRequest) encode(pe packetEncoder) error {
+	pe.putInt32(r.TimeoutMs)
+
+	if err := pe.putArrayLength(len(r.blocks)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.blocks {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+
+		if err := pe.putInt32Array(partitions); err != nil {
+			return err
+		}
+
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (r *ListPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.TimeoutMs, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount > 0 {
+		r.blocks = make(map[string][]int32)
+		for i := 0; i < topicCount; i++ {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			partitionCount, err := pd.getArrayLength()
+			if err != nil {
+				return err
+			}
+			r.blocks[topic] = make([]int32, partitionCount)
+			for j := 0; j < partitionCount; j++ {
+				partition, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+				r.blocks[topic][j] = partition
+			}
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *ListPartitionReassignmentsRequest) key() int16 {
+	return apiKeyListPartitionReassignments
+}
+
+func (r *ListPartitionReassignmentsRequest) version() int16 {
+	return r.Version
+}
+
+func (r *ListPartitionReassignmentsRequest) headerVersion() int16 {
+	return 2
+}
+
+func (r *ListPartitionReassignmentsRequest) isValidVersion() bool {
+	return r.Version == 0
+}
+
+func (r *ListPartitionReassignmentsRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ListPartitionReassignmentsRequest) isFlexibleVersion(version int16) bool {
+	return version >= 0
+}
+
+func (r *ListPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
+	return V2_4_0_0
+}
+
+func (r *ListPartitionReassignmentsRequest) AddBlock(topic string, partitionIDs []int32) {
+	if r.blocks == nil {
+		r.blocks = make(map[string][]int32)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = partitionIDs
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/list_partition_reassignments_response.go b/vendor/github.com/IBM/sarama/list_partition_reassignments_response.go
new file mode 100644
index 0000000..8ade1e4
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/list_partition_reassignments_response.go
@@ -0,0 +1,188 @@
+package sarama
+
+import "time"
+
+type PartitionReplicaReassignmentsStatus struct {
+	Replicas         []int32
+	AddingReplicas   []int32
+	RemovingReplicas []int32
+}
+
+func (b *PartitionReplicaReassignmentsStatus) encode(pe packetEncoder) error {
+	if err := pe.putInt32Array(b.Replicas); err != nil {
+		return err
+	}
+	if err := pe.putInt32Array(b.AddingReplicas); err != nil {
+		return err
+	}
+	if err := pe.putInt32Array(b.RemovingReplicas); err != nil {
+		return err
+	}
+
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (b *PartitionReplicaReassignmentsStatus) decode(pd packetDecoder) (err error) {
+	if b.Replicas, err = pd.getInt32Array(); err != nil {
+		return err
+	}
+
+	if b.AddingReplicas, err = pd.getInt32Array(); err != nil {
+		return err
+	}
+
+	if b.RemovingReplicas, err = pd.getInt32Array(); err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+type ListPartitionReassignmentsResponse struct {
+	Version        int16
+	ThrottleTimeMs int32
+	ErrorCode      KError
+	ErrorMessage   *string
+	TopicStatus    map[string]map[int32]*PartitionReplicaReassignmentsStatus
+}
+
+func (r *ListPartitionReassignmentsResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *ListPartitionReassignmentsResponse) AddBlock(topic string, partition int32, replicas, addingReplicas, removingReplicas []int32) {
+	if r.TopicStatus == nil {
+		r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus)
+	}
+	partitions := r.TopicStatus[topic]
+	if partitions == nil {
+		partitions = make(map[int32]*PartitionReplicaReassignmentsStatus)
+		r.TopicStatus[topic] = partitions
+	}
+
+	partitions[partition] = &PartitionReplicaReassignmentsStatus{Replicas: replicas, AddingReplicas: addingReplicas, RemovingReplicas: removingReplicas}
+}
+
+func (r *ListPartitionReassignmentsResponse) encode(pe packetEncoder) error {
+	pe.putInt32(r.ThrottleTimeMs)
+	pe.putKError(r.ErrorCode)
+	if err := pe.putNullableString(r.ErrorMessage); err != nil {
+		return err
+	}
+
+	if err := pe.putArrayLength(len(r.TopicStatus)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.TopicStatus {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+
+			if err := block.encode(pe); err != nil {
+				return err
+			}
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	pe.putEmptyTaggedFieldArray()
+
+	return nil
+}
+
+func (r *ListPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	r.ErrorCode, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if r.ErrorMessage, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus, numTopics)
+	for i := 0; i < numTopics; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		ongoingPartitionReassignments, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.TopicStatus[topic] = make(map[int32]*PartitionReplicaReassignmentsStatus, ongoingPartitionReassignments)
+
+		for j := 0; j < ongoingPartitionReassignments; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			block := &PartitionReplicaReassignmentsStatus{}
+			if err := block.decode(pd); err != nil {
+				return err
+			}
+			r.TopicStatus[topic][partition] = block
+		}
+
+		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *ListPartitionReassignmentsResponse) key() int16 {
+	return apiKeyListPartitionReassignments
+}
+
+func (r *ListPartitionReassignmentsResponse) version() int16 {
+	return r.Version
+}
+
+func (r *ListPartitionReassignmentsResponse) headerVersion() int16 {
+	return 1
+}
+
+func (r *ListPartitionReassignmentsResponse) isValidVersion() bool {
+	return r.Version == 0
+}
+
+func (r *ListPartitionReassignmentsResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ListPartitionReassignmentsResponse) isFlexibleVersion(version int16) bool {
+	return version >= 0
+}
+
+func (r *ListPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
+	return V2_4_0_0
+}
+
+func (r *ListPartitionReassignmentsResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/message.go b/vendor/github.com/IBM/sarama/message.go
new file mode 100644
index 0000000..c6f35a3
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/message.go
@@ -0,0 +1,190 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+const (
+	// CompressionNone no compression
+	CompressionNone CompressionCodec = iota
+	// CompressionGZIP compression using GZIP
+	CompressionGZIP
+	// CompressionSnappy compression using snappy
+	CompressionSnappy
+	// CompressionLZ4 compression using LZ4
+	CompressionLZ4
+	// CompressionZSTD compression using ZSTD
+	CompressionZSTD
+
+	// The lowest 3 bits contain the compression codec used for the message
+	compressionCodecMask int8 = 0x07
+
+	// Bit 3 set for "LogAppend" timestamps
+	timestampTypeMask = 0x08
+
+	// CompressionLevelDefault is the constant to use in CompressionLevel
+	// to have the default compression level for any codec. The value is picked
+	// that we don't use any existing compression levels.
+	CompressionLevelDefault = -1000
+)
+
+// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
+type CompressionCodec int8
+
+func (cc CompressionCodec) String() string {
+	return []string{
+		"none",
+		"gzip",
+		"snappy",
+		"lz4",
+		"zstd",
+	}[int(cc)]
+}
+
+// UnmarshalText returns a CompressionCodec from its string representation.
+func (cc *CompressionCodec) UnmarshalText(text []byte) error {
+	codecs := map[string]CompressionCodec{
+		"none":   CompressionNone,
+		"gzip":   CompressionGZIP,
+		"snappy": CompressionSnappy,
+		"lz4":    CompressionLZ4,
+		"zstd":   CompressionZSTD,
+	}
+	codec, ok := codecs[string(text)]
+	if !ok {
+		return fmt.Errorf("cannot parse %q as a compression codec", string(text))
+	}
+	*cc = codec
+	return nil
+}
+
+// MarshalText transforms a CompressionCodec into its string representation.
+func (cc CompressionCodec) MarshalText() ([]byte, error) {
+	return []byte(cc.String()), nil
+}
+
+// Message is a kafka message type
+type Message struct {
+	Codec            CompressionCodec // codec used to compress the message contents
+	CompressionLevel int              // compression level
+	LogAppendTime    bool             // the used timestamp is LogAppendTime
+	Key              []byte           // the message key, may be nil
+	Value            []byte           // the message contents
+	Set              *MessageSet      // the message set a message might wrap
+	Version          int8             // v1 requires Kafka 0.10
+	Timestamp        time.Time        // the timestamp of the message (version 1+ only)
+
+	compressedCache []byte
+	compressedSize  int // used for computing the compression ratio metrics
+}
+
+func (m *Message) encode(pe packetEncoder) error {
+	pe.push(newCRC32Field(crcIEEE))
+
+	pe.putInt8(m.Version)
+
+	attributes := int8(m.Codec) & compressionCodecMask
+	if m.LogAppendTime {
+		attributes |= timestampTypeMask
+	}
+	pe.putInt8(attributes)
+
+	if m.Version >= 1 {
+		if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil {
+			return err
+		}
+	}
+
+	err := pe.putBytes(m.Key)
+	if err != nil {
+		return err
+	}
+
+	var payload []byte
+
+	if m.compressedCache != nil {
+		payload = m.compressedCache
+		m.compressedCache = nil
+	} else if m.Value != nil {
+		payload, err = compress(m.Codec, m.CompressionLevel, m.Value)
+		if err != nil {
+			return err
+		}
+		m.compressedCache = payload
+		// Keep in mind the compressed payload size for metric gathering
+		m.compressedSize = len(payload)
+	}
+
+	if err = pe.putBytes(payload); err != nil {
+		return err
+	}
+
+	return pe.pop()
+}
+
+func (m *Message) decode(pd packetDecoder) (err error) {
+	crc32Decoder := acquireCrc32Field(crcIEEE)
+	defer releaseCrc32Field(crc32Decoder)
+
+	err = pd.push(crc32Decoder)
+	if err != nil {
+		return err
+	}
+
+	m.Version, err = pd.getInt8()
+	if err != nil {
+		return err
+	}
+
+	if m.Version > 1 {
+		return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)}
+	}
+
+	attribute, err := pd.getInt8()
+	if err != nil {
+		return err
+	}
+	m.Codec = CompressionCodec(attribute & compressionCodecMask)
+	m.LogAppendTime = attribute&timestampTypeMask == timestampTypeMask
+
+	if m.Version == 1 {
+		if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil {
+			return err
+		}
+	}
+
+	m.Key, err = pd.getBytes()
+	if err != nil {
+		return err
+	}
+
+	m.Value, err = pd.getBytes()
+	if err != nil {
+		return err
+	}
+
+	// Required for deep equal assertion during tests but might be useful
+	// for future metrics about the compression ratio in fetch requests
+	m.compressedSize = len(m.Value)
+
+	if m.Value != nil && m.Codec != CompressionNone {
+		m.Value, err = decompress(m.Codec, m.Value)
+		if err != nil {
+			return err
+		}
+
+		if err := m.decodeSet(); err != nil {
+			return err
+		}
+	}
+
+	return pd.pop()
+}
+
+// decodes a message set from a previously encoded bulk-message
+func (m *Message) decodeSet() (err error) {
+	pd := realDecoder{raw: m.Value}
+	m.Set = &MessageSet{}
+	return m.Set.decode(&pd)
+}
diff --git a/vendor/github.com/IBM/sarama/message_set.go b/vendor/github.com/IBM/sarama/message_set.go
new file mode 100644
index 0000000..5bc2112
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/message_set.go
@@ -0,0 +1,112 @@
+package sarama
+
+import "errors"
+
+type MessageBlock struct {
+	Offset int64
+	Msg    *Message
+}
+
+// Messages convenience helper which returns either all the
+// messages that are wrapped in this block
+func (msb *MessageBlock) Messages() []*MessageBlock {
+	if msb.Msg.Set != nil {
+		return msb.Msg.Set.Messages
+	}
+	return []*MessageBlock{msb}
+}
+
+func (msb *MessageBlock) encode(pe packetEncoder) error {
+	pe.putInt64(msb.Offset)
+	pe.push(&lengthField{})
+	err := msb.Msg.encode(pe)
+	if err != nil {
+		return err
+	}
+	return pe.pop()
+}
+
+func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
+	if msb.Offset, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	lengthDecoder := acquireLengthField()
+	defer releaseLengthField(lengthDecoder)
+
+	if err = pd.push(lengthDecoder); err != nil {
+		return err
+	}
+
+	msb.Msg = new(Message)
+	if err = msb.Msg.decode(pd); err != nil {
+		return err
+	}
+
+	if err = pd.pop(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+type MessageSet struct {
+	PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
+	OverflowMessage        bool // whether the set on the wire contained an overflow message
+	Messages               []*MessageBlock
+}
+
+func (ms *MessageSet) encode(pe packetEncoder) error {
+	for i := range ms.Messages {
+		err := ms.Messages[i].encode(pe)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (ms *MessageSet) decode(pd packetDecoder) (err error) {
+	ms.Messages = nil
+
+	for pd.remaining() > 0 {
+		magic, err := magicValue(pd)
+		if err != nil {
+			if errors.Is(err, ErrInsufficientData) {
+				ms.PartialTrailingMessage = true
+				return nil
+			}
+			return err
+		}
+
+		if magic > 1 {
+			return nil
+		}
+
+		msb := new(MessageBlock)
+		err = msb.decode(pd)
+		if err == nil {
+			ms.Messages = append(ms.Messages, msb)
+		} else if errors.Is(err, ErrInsufficientData) {
+			// As an optimization the server is allowed to return a partial message at the
+			// end of the message set. Clients should handle this case. So we just ignore such things.
+			if msb.Offset == -1 {
+				// This is an overflow message caused by chunked down conversion
+				ms.OverflowMessage = true
+			} else {
+				ms.PartialTrailingMessage = true
+			}
+			return nil
+		} else {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (ms *MessageSet) addMessage(msg *Message) {
+	block := new(MessageBlock)
+	block.Msg = msg
+	ms.Messages = append(ms.Messages, block)
+}
diff --git a/vendor/github.com/IBM/sarama/metadata.go b/vendor/github.com/IBM/sarama/metadata.go
new file mode 100644
index 0000000..20620b3
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/metadata.go
@@ -0,0 +1,239 @@
+package sarama
+
+import (
+	"sync"
+)
+
+type metadataRefresh func(topics []string) error
+
+// currentRefresh makes sure sarama does not issue metadata requests
+// in parallel. If we need to refresh the metadata for a list of topics,
+// this struct will check if a refresh is already ongoing, and if so, it will
+// accumulate the list of topics to refresh in the next refresh.
+// When the current refresh is over, it will queue a new metadata refresh call
+// with the accumulated list of topics.
+type currentRefresh struct {
+	// This is the function that gets called when to refresh the metadata.
+	// It is called with the list of all topics that need to be refreshed
+	// or with nil if all topics need to be refreshed.
+	refresh func(topics []string) error
+
+	mu        sync.Mutex
+	ongoing   bool
+	topicsMap map[string]struct{}
+	topics    []string
+	allTopics bool
+	chans     []chan error
+}
+
+// addTopicsFrom adds topics from the next refresh to the current refresh.
+// You need to hold the lock to call this method.
+func (r *currentRefresh) addTopicsFrom(next *nextRefresh) {
+	if next.allTopics {
+		r.allTopics = true
+		return
+	}
+	if len(next.topics) > 0 {
+		r.addTopics(next.topics)
+	}
+}
+
+// nextRefresh holds the list of topics we will need
+// to refresh in the next refresh.
+// When a refresh is ongoing, calls to RefreshMetadata() are
+// accumulated in this struct, so that we can immediately issue another
+// refresh when the current refresh is over.
+type nextRefresh struct {
+	mu        sync.Mutex
+	topics    []string
+	allTopics bool
+}
+
+// addTopics adds topics to the refresh.
+// You need to hold the lock to call this method.
+func (r *currentRefresh) addTopics(topics []string) {
+	if len(topics) == 0 {
+		r.allTopics = true
+		return
+	}
+	for _, topic := range topics {
+		if _, ok := r.topicsMap[topic]; ok {
+			continue
+		}
+		r.topicsMap[topic] = struct{}{}
+		r.topics = append(r.topics, topic)
+	}
+}
+
+func (r *nextRefresh) addTopics(topics []string) {
+	if len(topics) == 0 {
+		r.allTopics = true
+		// All topics are requested, so we can clear the topics
+		// that were previously accumulated.
+		r.topics = r.topics[:0]
+		return
+	}
+	r.topics = append(r.topics, topics...)
+}
+
+func (r *nextRefresh) clear() {
+	r.topics = r.topics[:0]
+	r.allTopics = false
+}
+
+func (r *currentRefresh) hasTopics(topics []string) bool {
+	if len(topics) == 0 {
+		// This means that the caller wants to know if the refresh is for all topics.
+		// In this case, we return true if the refresh is for all topics, or false if it is not.
+		return r.allTopics
+	}
+	if r.allTopics {
+		return true
+	}
+	for _, topic := range topics {
+		if _, ok := r.topicsMap[topic]; !ok {
+			return false
+		}
+	}
+	return true
+}
+
+// start starts a new refresh.
+// The refresh is started in a new goroutine, and this function
+// returns a channel on which the caller can wait for the refresh
+// to complete.
+// You need to hold the lock to call this method.
+func (r *currentRefresh) start() chan error {
+	r.ongoing = true
+	ch := r.wait()
+	topics := r.topics
+	if r.allTopics {
+		topics = nil
+	}
+	go func() {
+		err := r.refresh(topics)
+		r.mu.Lock()
+		defer r.mu.Unlock()
+
+		r.ongoing = false
+		for _, ch := range r.chans {
+			ch <- err
+			close(ch)
+		}
+		r.clear()
+	}()
+	return ch
+}
+
+// clear clears the refresh state.
+// You need to hold the lock to call this method.
+func (r *currentRefresh) clear() {
+	r.topics = r.topics[:0]
+	for key := range r.topicsMap {
+		delete(r.topicsMap, key)
+	}
+	r.allTopics = false
+	r.chans = r.chans[:0]
+}
+
+// wait returns the channel on which you can wait for the refresh
+// to complete.
+// You need to hold the lock to call this method.
+func (r *currentRefresh) wait() chan error {
+	if !r.ongoing {
+		panic("waiting for a refresh that is not ongoing")
+	}
+	ch := make(chan error, 1)
+	r.chans = append(r.chans, ch)
+	return ch
+}
+
+// singleFlightMetadataRefresher helps managing metadata refreshes.
+// It makes sure a sarama client never issues more than one metadata refresh
+// in parallel.
+type singleFlightMetadataRefresher struct {
+	current *currentRefresh
+	next    *nextRefresh
+}
+
+func newSingleFlightRefresher(f func(topics []string) error) metadataRefresh {
+	return newMetadataRefresh(f).Refresh
+}
+
+func newMetadataRefresh(f func(topics []string) error) *singleFlightMetadataRefresher {
+	return &singleFlightMetadataRefresher{
+		current: &currentRefresh{
+			topicsMap: make(map[string]struct{}),
+			refresh:   f,
+		},
+		next: &nextRefresh{},
+	}
+}
+
+// Refresh is the function that clients call when they want to refresh
+// the metadata. This function blocks until a refresh is issued, and its
+// result is received, for the list of topics the caller provided.
+// If a refresh was already ongoing for this list of topics, the function
+// waits on that refresh to complete, and returns its result.
+// If a refresh was already ongoing for a different list of topics, the function
+// accumulates the list of topics to refresh in the next refresh, and queues that refresh.
+// If no refresh is ongoing, it will start a new refresh, and return its result.
+func (m *singleFlightMetadataRefresher) Refresh(topics []string) error {
+	for {
+		ch, queued := m.refreshOrQueue(topics)
+		if !queued {
+			return <-ch
+		}
+		<-ch
+	}
+}
+
+// refreshOrQueue returns a channel the refresh needs to wait on, and a boolean
+// that indicates whether waiting on the channel will return the result of that refresh
+// or whether the refresh was "queued" and the caller needs to wait for the channel to
+// return, and then call refreshOrQueue again.
+// When calling refreshOrQueue, three things can happen:
+//  1. either no refresh is ongoing.
+//     In this case, a new refresh is started, and the channel that's returned will
+//     contain the result of that refresh, so it returns "false" as the second return value.
+//  2. a refresh is ongoing, and it contains the topics we need.
+//     In this case, the channel that's returned will contain the result of that refresh,
+//     so it returns "false" as the second return value.
+//     In this case, the channel that's returned will contain the result of that refresh,
+//     so it returns "false" as the second return value.
+//  3. a refresh is already ongoing, but doesn't contain the topics we need. In this case,
+//     the caller needs to wait for the refresh to finish, and then call refreshOrQueue again.
+//     The channel that's returned is for the current refresh (not the one the caller is
+//     interested in), so it returns "true" as the second return value. The caller needs to
+//     wait on the channel, disregard the value, and call refreshOrQueue again.
+func (m *singleFlightMetadataRefresher) refreshOrQueue(topics []string) (chan error, bool) {
+	m.current.mu.Lock()
+	defer m.current.mu.Unlock()
+	if !m.current.ongoing {
+		// If no refresh is ongoing, we can start a new one, in which
+		// we add the topics that have been accumulated in the next refresh
+		// and the topics that have been provided by the caller.
+		m.next.mu.Lock()
+		m.current.addTopicsFrom(m.next)
+		m.next.clear()
+		m.next.mu.Unlock()
+		m.current.addTopics(topics)
+		ch := m.current.start()
+		return ch, false
+	}
+	if m.current.hasTopics(topics) {
+		// A refresh is ongoing, and we were lucky: it is refreshing the topics we need already:
+		// we just have to wait for it to finish and return its results.
+		ch := m.current.wait()
+		return ch, false
+	}
+	// There is a refresh ongoing, but it is not refreshing the topics we need.
+	// We need to wait for it to finish, and then start a new refresh.
+	ch := m.current.wait()
+	m.next.mu.Lock()
+	m.next.addTopics(topics)
+	m.next.mu.Unlock()
+	// This is where we wait for that refresh to finish, and the loop will take care
+	// of starting the new one.
+	return ch, true
+}
diff --git a/vendor/github.com/IBM/sarama/metadata_request.go b/vendor/github.com/IBM/sarama/metadata_request.go
new file mode 100644
index 0000000..f00e5ab
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/metadata_request.go
@@ -0,0 +1,211 @@
+package sarama
+
+import "encoding/base64"
+
+type Uuid [16]byte
+
+func (u Uuid) String() string {
+	return base64.URLEncoding.WithPadding(base64.NoPadding).EncodeToString(u[:])
+}
+
+var NullUUID = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+
+type MetadataRequest struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// Topics contains the topics to fetch metadata for.
+	Topics []string
+	// AllowAutoTopicCreation contains a If this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so.
+	AllowAutoTopicCreation             bool
+	IncludeClusterAuthorizedOperations bool // version 8 and up
+	IncludeTopicAuthorizedOperations   bool // version 8 and up
+}
+
+func (r *MetadataRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func NewMetadataRequest(version KafkaVersion, topics []string) *MetadataRequest {
+	m := &MetadataRequest{Topics: topics}
+	if version.IsAtLeast(V2_8_0_0) {
+		m.Version = 10
+	} else if version.IsAtLeast(V2_4_0_0) {
+		m.Version = 9
+	} else if version.IsAtLeast(V2_4_0_0) {
+		m.Version = 8
+	} else if version.IsAtLeast(V2_1_0_0) {
+		m.Version = 7
+	} else if version.IsAtLeast(V2_0_0_0) {
+		m.Version = 6
+	} else if version.IsAtLeast(V1_0_0_0) {
+		m.Version = 5
+	} else if version.IsAtLeast(V0_11_0_0) {
+		m.Version = 4
+	} else if version.IsAtLeast(V0_10_1_0) {
+		m.Version = 2
+	} else if version.IsAtLeast(V0_10_0_0) {
+		m.Version = 1
+	}
+	return m
+}
+
+func (r *MetadataRequest) encode(pe packetEncoder) (err error) {
+	if r.Version < 0 || r.Version > 10 {
+		return PacketEncodingError{"invalid or unsupported MetadataRequest version field"}
+	}
+	if r.Version == 0 || len(r.Topics) > 0 {
+		if err := pe.putArrayLength(len(r.Topics)); err != nil {
+			return err
+		}
+		if r.Version <= 9 {
+			for _, topicName := range r.Topics {
+				if err := pe.putString(topicName); err != nil {
+					return err
+				}
+				pe.putEmptyTaggedFieldArray()
+			}
+		} else { // r.Version = 10
+			for _, topicName := range r.Topics {
+				if err := pe.putRawBytes(NullUUID); err != nil {
+					return err
+				}
+				// Avoid implicit memory aliasing in for loop
+				tn := topicName
+				if err := pe.putNullableString(&tn); err != nil {
+					return err
+				}
+				pe.putEmptyTaggedFieldArray()
+			}
+		}
+	} else {
+		if err := pe.putArrayLength(-1); err != nil {
+			return err
+		}
+	}
+
+	if r.Version > 3 {
+		pe.putBool(r.AllowAutoTopicCreation)
+	}
+	if r.Version > 7 {
+		pe.putBool(r.IncludeClusterAuthorizedOperations)
+		pe.putBool(r.IncludeTopicAuthorizedOperations)
+	}
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *MetadataRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	size, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if size > 0 {
+		r.Topics = make([]string, size)
+	}
+	if version <= 9 {
+		for i := range r.Topics {
+			topic, err := pd.getString()
+			if err != nil {
+				return err
+			}
+			r.Topics[i] = topic
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	} else {
+		for i := range r.Topics {
+			if _, err = pd.getRawBytes(16); err != nil { // skip UUID
+				return err
+			}
+			topic, err := pd.getNullableString()
+			if err != nil {
+				return err
+			}
+			if topic != nil {
+				r.Topics[i] = *topic
+			}
+
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	if r.Version >= 4 {
+		if r.AllowAutoTopicCreation, err = pd.getBool(); err != nil {
+			return err
+		}
+	}
+
+	if r.Version > 7 {
+		includeClusterAuthz, err := pd.getBool()
+		if err != nil {
+			return err
+		}
+		r.IncludeClusterAuthorizedOperations = includeClusterAuthz
+		includeTopicAuthz, err := pd.getBool()
+		if err != nil {
+			return err
+		}
+		r.IncludeTopicAuthorizedOperations = includeTopicAuthz
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *MetadataRequest) key() int16 {
+	return apiKeyMetadata
+}
+
+func (r *MetadataRequest) version() int16 {
+	return r.Version
+}
+
+func (r *MetadataRequest) headerVersion() int16 {
+	if r.Version >= 9 {
+		return 2
+	}
+	return 1
+}
+
+func (r *MetadataRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 10
+}
+
+func (r *MetadataRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *MetadataRequest) isFlexibleVersion(version int16) bool {
+	return version >= 9
+}
+
+func (r *MetadataRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 10:
+		return V2_8_0_0
+	case 9:
+		return V2_4_0_0
+	case 8:
+		return V2_3_0_0
+	case 7:
+		return V2_1_0_0
+	case 6:
+		return V2_0_0_0
+	case 5:
+		return V1_0_0_0
+	case 3, 4:
+		return V0_11_0_0
+	case 2:
+		return V0_10_1_0
+	case 1:
+		return V0_10_0_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_8_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/metadata_response.go b/vendor/github.com/IBM/sarama/metadata_response.go
new file mode 100644
index 0000000..196c075
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/metadata_response.go
@@ -0,0 +1,449 @@
+package sarama
+
+import "time"
+
+// PartitionMetadata contains each partition in the topic.
+type PartitionMetadata struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// Err contains the partition error, or 0 if there was no error.
+	Err KError
+	// ID contains the partition index.
+	ID int32
+	// Leader contains the ID of the leader broker.
+	Leader int32
+	// LeaderEpoch contains the leader epoch of this partition.
+	LeaderEpoch int32
+	// Replicas contains the set of all nodes that host this partition.
+	Replicas []int32
+	// Isr contains the set of nodes that are in sync with the leader for this partition.
+	Isr []int32
+	// OfflineReplicas contains the set of offline replicas of this partition.
+	OfflineReplicas []int32
+}
+
+func (p *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) {
+	p.Version = version
+	p.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if p.ID, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	if p.Leader, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	if p.Version >= 7 {
+		if p.LeaderEpoch, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	p.Replicas, err = pd.getInt32Array()
+	if err != nil {
+		return err
+	}
+
+	p.Isr, err = pd.getInt32Array()
+	if err != nil {
+		return err
+	}
+
+	if p.Version >= 5 {
+		p.OfflineReplicas, err = pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (p *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) {
+	p.Version = version
+	pe.putKError(p.Err)
+
+	pe.putInt32(p.ID)
+
+	pe.putInt32(p.Leader)
+
+	if p.Version >= 7 {
+		pe.putInt32(p.LeaderEpoch)
+	}
+
+	err = pe.putInt32Array(p.Replicas)
+	if err != nil {
+		return err
+	}
+
+	err = pe.putInt32Array(p.Isr)
+	if err != nil {
+		return err
+	}
+
+	if p.Version >= 5 {
+		err = pe.putInt32Array(p.OfflineReplicas)
+		if err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+// TopicMetadata contains each topic in the response.
+type TopicMetadata struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// Err contains the topic error, or 0 if there was no error.
+	Err KError
+	// Name contains the topic name.
+	Name string
+	Uuid Uuid
+	// IsInternal contains a True if the topic is internal.
+	IsInternal bool
+	// Partitions contains each partition in the topic.
+	Partitions                []*PartitionMetadata
+	TopicAuthorizedOperations int32 // Only valid for Version >= 8
+}
+
+func (t *TopicMetadata) decode(pd packetDecoder, version int16) (err error) {
+	t.Version = version
+	t.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	t.Name, err = pd.getString()
+	if err != nil {
+		return err
+	}
+
+	if t.Version >= 10 {
+		uuid, err := pd.getRawBytes(16)
+		if err != nil {
+			return err
+		}
+		t.Uuid = [16]byte{}
+		for i := 0; i < 16; i++ {
+			t.Uuid[i] = uuid[i]
+		}
+	}
+
+	if t.Version >= 1 {
+		t.IsInternal, err = pd.getBool()
+		if err != nil {
+			return err
+		}
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	t.Partitions = make([]*PartitionMetadata, n)
+	for i := 0; i < n; i++ {
+		block := &PartitionMetadata{}
+		if err := block.decode(pd, t.Version); err != nil {
+			return err
+		}
+		t.Partitions[i] = block
+	}
+
+	if t.Version >= 8 {
+		t.TopicAuthorizedOperations, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (t *TopicMetadata) encode(pe packetEncoder, version int16) (err error) {
+	t.Version = version
+	pe.putKError(t.Err)
+
+	err = pe.putString(t.Name)
+	if err != nil {
+		return err
+	}
+
+	if t.Version >= 10 {
+		err = pe.putRawBytes(t.Uuid[:])
+		if err != nil {
+			return err
+		}
+	}
+
+	if t.Version >= 1 {
+		pe.putBool(t.IsInternal)
+	}
+
+	err = pe.putArrayLength(len(t.Partitions))
+	if err != nil {
+		return err
+	}
+	for _, block := range t.Partitions {
+		if err := block.encode(pe, t.Version); err != nil {
+			return err
+		}
+	}
+
+	if t.Version >= 8 {
+		pe.putInt32(t.TopicAuthorizedOperations)
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+type MetadataResponse struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// ThrottleTimeMs contains the duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+	ThrottleTimeMs int32
+	// Brokers contains each broker in the response.
+	Brokers []*Broker
+	// ClusterID contains the cluster ID that responding broker belongs to.
+	ClusterID *string
+	// ControllerID contains the ID of the controller broker.
+	ControllerID int32
+	// Topics contains each topic in the response.
+	Topics                      []*TopicMetadata
+	ClusterAuthorizedOperations int32 // Only valid for Version >= 8
+}
+
+func (r *MetadataResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.Version >= 3 {
+		if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	brokerArrayLen, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Brokers = make([]*Broker, brokerArrayLen)
+	for i := 0; i < brokerArrayLen; i++ {
+		r.Brokers[i] = new(Broker)
+		err = r.Brokers[i].decode(pd, version)
+		if err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 2 {
+		r.ClusterID, err = pd.getNullableString()
+		if err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 1 {
+		if r.ControllerID, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	topicArrayLen, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Topics = make([]*TopicMetadata, topicArrayLen)
+	for i := 0; i < topicArrayLen; i++ {
+		r.Topics[i] = new(TopicMetadata)
+		err = r.Topics[i].decode(pd, version)
+		if err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 8 {
+		r.ClusterAuthorizedOperations, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *MetadataResponse) encode(pe packetEncoder) (err error) {
+	if r.Version >= 3 {
+		pe.putInt32(r.ThrottleTimeMs)
+	}
+
+	err = pe.putArrayLength(len(r.Brokers))
+	if err != nil {
+		return err
+	}
+
+	for _, broker := range r.Brokers {
+		err = broker.encode(pe, r.Version)
+		if err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 2 {
+		err = pe.putNullableString(r.ClusterID)
+		if err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 1 {
+		pe.putInt32(r.ControllerID)
+	}
+
+	err = pe.putArrayLength(len(r.Topics))
+	if err != nil {
+		return err
+	}
+	for _, block := range r.Topics {
+		if err := block.encode(pe, r.Version); err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 8 {
+		pe.putInt32(r.ClusterAuthorizedOperations)
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *MetadataResponse) key() int16 {
+	return apiKeyMetadata
+}
+
+func (r *MetadataResponse) version() int16 {
+	return r.Version
+}
+
+func (r *MetadataResponse) headerVersion() int16 {
+	if r.Version < 9 {
+		return 0
+	} else {
+		return 1
+	}
+}
+
+func (r *MetadataResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 10
+}
+
+func (r *MetadataResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *MetadataResponse) isFlexibleVersion(version int16) bool {
+	return version >= 9
+}
+
+func (r *MetadataResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 10:
+		return V2_8_0_0
+	case 9:
+		return V2_4_0_0
+	case 8:
+		return V2_3_0_0
+	case 7:
+		return V2_1_0_0
+	case 6:
+		return V2_0_0_0
+	case 5:
+		return V1_0_0_0
+	case 3, 4:
+		return V0_11_0_0
+	case 2:
+		return V0_10_1_0
+	case 1:
+		return V0_10_0_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_8_0_0
+	}
+}
+
+func (r *MetadataResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
+
+// testing API
+
+func (r *MetadataResponse) AddBroker(addr string, id int32) {
+	r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
+}
+
+func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
+	var tmatch *TopicMetadata
+
+	for _, tm := range r.Topics {
+		if tm.Name == topic {
+			tmatch = tm
+			goto foundTopic
+		}
+	}
+
+	tmatch = new(TopicMetadata)
+	tmatch.Name = topic
+	r.Topics = append(r.Topics, tmatch)
+
+foundTopic:
+
+	tmatch.Err = err
+	return tmatch
+}
+
+func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, offline []int32, err KError) {
+	tmatch := r.AddTopic(topic, ErrNoError)
+	var pmatch *PartitionMetadata
+
+	for _, pm := range tmatch.Partitions {
+		if pm.ID == partition {
+			pmatch = pm
+			goto foundPartition
+		}
+	}
+
+	pmatch = new(PartitionMetadata)
+	pmatch.ID = partition
+	tmatch.Partitions = append(tmatch.Partitions, pmatch)
+
+foundPartition:
+	pmatch.Leader = brokerID
+	pmatch.Replicas = replicas
+	if pmatch.Replicas == nil {
+		pmatch.Replicas = []int32{}
+	}
+	pmatch.Isr = isr
+	if pmatch.Isr == nil {
+		pmatch.Isr = []int32{}
+	}
+	pmatch.OfflineReplicas = offline
+	if pmatch.OfflineReplicas == nil {
+		pmatch.OfflineReplicas = []int32{}
+	}
+	pmatch.Err = err
+}
diff --git a/vendor/github.com/IBM/sarama/metrics.go b/vendor/github.com/IBM/sarama/metrics.go
new file mode 100644
index 0000000..4f512f2
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/metrics.go
@@ -0,0 +1,120 @@
+package sarama
+
+import (
+	"strconv"
+	"strings"
+	"sync"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
+// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
+// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
+// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
+const (
+	metricsReservoirSize = 1028
+	metricsAlphaFactor   = 0.015
+)
+
+func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
+	return r.GetOrRegister(name, func() metrics.Histogram {
+		return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
+	}).(metrics.Histogram)
+}
+
+func getMetricNameForBroker(name string, broker *Broker) string {
+	// Use broker id like the Java client as it does not contain '.' or ':' characters that
+	// can be interpreted as special character by monitoring tool (e.g. Graphite)
+	return name + "-for-broker-" + strconv.FormatInt(int64(broker.ID()), 10)
+}
+
+func getMetricNameForTopic(name string, topic string) string {
+	// Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
+	// cf. KAFKA-1902 and KAFKA-2337
+	return name + "-for-topic-" + strings.ReplaceAll(topic, ".", "_")
+}
+
+func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
+	return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
+}
+
+func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
+	return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
+}
+
+// cleanupRegistry is an implementation of metrics.Registry that allows
+// to unregister from the parent registry only those metrics
+// that have been registered in cleanupRegistry
+type cleanupRegistry struct {
+	parent  metrics.Registry
+	metrics map[string]struct{}
+	mutex   sync.RWMutex
+}
+
+func newCleanupRegistry(parent metrics.Registry) metrics.Registry {
+	return &cleanupRegistry{
+		parent:  parent,
+		metrics: map[string]struct{}{},
+	}
+}
+
+func (r *cleanupRegistry) Each(fn func(string, interface{})) {
+	r.mutex.RLock()
+	defer r.mutex.RUnlock()
+	wrappedFn := func(name string, iface interface{}) {
+		if _, ok := r.metrics[name]; ok {
+			fn(name, iface)
+		}
+	}
+	r.parent.Each(wrappedFn)
+}
+
+func (r *cleanupRegistry) Get(name string) interface{} {
+	r.mutex.RLock()
+	defer r.mutex.RUnlock()
+	if _, ok := r.metrics[name]; ok {
+		return r.parent.Get(name)
+	}
+	return nil
+}
+
+func (r *cleanupRegistry) GetOrRegister(name string, metric interface{}) interface{} {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	r.metrics[name] = struct{}{}
+	return r.parent.GetOrRegister(name, metric)
+}
+
+func (r *cleanupRegistry) Register(name string, metric interface{}) error {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	r.metrics[name] = struct{}{}
+	return r.parent.Register(name, metric)
+}
+
+func (r *cleanupRegistry) RunHealthchecks() {
+	r.parent.RunHealthchecks()
+}
+
+func (r *cleanupRegistry) GetAll() map[string]map[string]interface{} {
+	return r.parent.GetAll()
+}
+
+func (r *cleanupRegistry) Unregister(name string) {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	if _, ok := r.metrics[name]; ok {
+		delete(r.metrics, name)
+		r.parent.Unregister(name)
+	}
+}
+
+func (r *cleanupRegistry) UnregisterAll() {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	for name := range r.metrics {
+		delete(r.metrics, name)
+		r.parent.Unregister(name)
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/mockbroker.go b/vendor/github.com/IBM/sarama/mockbroker.go
new file mode 100644
index 0000000..d913d44
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/mockbroker.go
@@ -0,0 +1,467 @@
+package sarama
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"maps"
+	"net"
+	"reflect"
+	"strconv"
+	"sync"
+	"syscall"
+	"time"
+
+	"github.com/davecgh/go-spew/spew"
+)
+
+const (
+	expectationTimeout = 500 * time.Millisecond
+)
+
+type GSSApiHandlerFunc func([]byte) []byte
+
+type requestHandlerFunc func(req *request) (res encoderWithHeader)
+
+// RequestNotifierFunc is invoked when a mock broker processes a request successfully
+// and will provides the number of bytes read and written.
+type RequestNotifierFunc func(bytesRead, bytesWritten int)
+
+// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
+// to facilitate testing of higher level or specialized consumers and producers
+// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
+// but rather provides a facility to do that. It takes care of the TCP
+// transport, request unmarshalling, response marshaling, and makes it the test
+// writer responsibility to program correct according to the Kafka API protocol
+// MockBroker behavior.
+//
+// MockBroker is implemented as a TCP server listening on a kernel-selected
+// localhost port that can accept many connections. It reads Kafka requests
+// from that connection and returns responses programmed by the SetHandlerByMap
+// function. If a MockBroker receives a request that it has no programmed
+// response for, then it returns nothing and the request times out.
+//
+// A set of MockRequest builders to define mappings used by MockBroker is
+// provided by Sarama. But users can develop MockRequests of their own and use
+// them along with or instead of the standard ones.
+//
+// When running tests with MockBroker it is strongly recommended to specify
+// a timeout to `go test` so that if the broker hangs waiting for a response,
+// the test panics.
+//
+// It is not necessary to prefix message length or correlation ID to your
+// response bytes, the server does that automatically as a convenience.
+type MockBroker struct {
+	brokerID      int32
+	port          int32
+	closing       chan none
+	stopper       chan none
+	expectations  chan encoderWithHeader
+	listener      net.Listener
+	t             TestReporter
+	latency       time.Duration
+	handler       requestHandlerFunc
+	notifier      RequestNotifierFunc
+	history       []RequestResponse
+	lock          sync.Mutex
+	gssApiHandler GSSApiHandlerFunc
+}
+
+// RequestResponse represents a Request/Response pair processed by MockBroker.
+type RequestResponse struct {
+	Request  protocolBody
+	Response encoder
+}
+
+// SetLatency makes broker pause for the specified period every time before
+// replying.
+func (b *MockBroker) SetLatency(latency time.Duration) {
+	b.latency = latency
+}
+
+// SetHandlerByMap defines mapping of Request types to MockResponses. When a
+// request is received by the broker, it looks up the request type in the map
+// and uses the found MockResponse instance to generate an appropriate reply.
+// If the request type is not found in the map then nothing is sent.
+func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
+	fnMap := maps.Clone(handlerMap)
+	b.setHandler(func(req *request) (res encoderWithHeader) {
+		reqTypeName := reflect.TypeOf(req.body).Elem().Name()
+		mockResponse := fnMap[reqTypeName]
+		if mockResponse == nil {
+			return nil
+		}
+		return mockResponse.For(req.body)
+	})
+}
+
+// SetHandlerFuncByMap defines mapping of Request types to RequestHandlerFunc. When a
+// request is received by the broker, it looks up the request type in the map
+// and invoke the found RequestHandlerFunc instance to generate an appropriate reply.
+func (b *MockBroker) SetHandlerFuncByMap(handlerMap map[string]requestHandlerFunc) {
+	fnMap := maps.Clone(handlerMap)
+	b.setHandler(func(req *request) (res encoderWithHeader) {
+		reqTypeName := reflect.TypeOf(req.body).Elem().Name()
+		return fnMap[reqTypeName](req)
+	})
+}
+
+// SetNotifier set a function that will get invoked whenever a request has been
+// processed successfully and will provide the number of bytes read and written
+func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
+	b.lock.Lock()
+	b.notifier = notifier
+	b.lock.Unlock()
+}
+
+// BrokerID returns broker ID assigned to the broker.
+func (b *MockBroker) BrokerID() int32 {
+	return b.brokerID
+}
+
+// History returns a slice of RequestResponse pairs in the order they were
+// processed by the broker. Note that in case of multiple connections to the
+// broker the order expected by a test can be different from the order recorded
+// in the history, unless some synchronization is implemented in the test.
+func (b *MockBroker) History() []RequestResponse {
+	b.lock.Lock()
+	history := make([]RequestResponse, len(b.history))
+	copy(history, b.history)
+	b.lock.Unlock()
+	return history
+}
+
+// Port returns the TCP port number the broker is listening for requests on.
+func (b *MockBroker) Port() int32 {
+	return b.port
+}
+
+// Addr returns the broker connection string in the form "<address>:<port>".
+func (b *MockBroker) Addr() string {
+	return b.listener.Addr().String()
+}
+
+// Close terminates the broker blocking until it stops internal goroutines and
+// releases all resources.
+func (b *MockBroker) Close() {
+	close(b.expectations)
+	if len(b.expectations) > 0 {
+		buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
+		for e := range b.expectations {
+			_, _ = buf.WriteString(spew.Sdump(e))
+		}
+		b.t.Error(buf.String())
+	}
+	close(b.closing)
+	<-b.stopper
+}
+
+// setHandler sets the specified function as the request handler. Whenever
+// a mock broker reads a request from the wire it passes the request to the
+// function and sends back whatever the handler function returns.
+func (b *MockBroker) setHandler(handler requestHandlerFunc) {
+	b.lock.Lock()
+	b.handler = handler
+	b.lock.Unlock()
+}
+
+func (b *MockBroker) serverLoop() {
+	defer close(b.stopper)
+	var err error
+	var conn net.Conn
+
+	go func() {
+		<-b.closing
+		err := b.listener.Close()
+		if err != nil {
+			b.t.Error(err)
+		}
+	}()
+
+	wg := &sync.WaitGroup{}
+	i := 0
+	for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
+		wg.Add(1)
+		go b.handleRequests(conn, i, wg)
+		i++
+	}
+	wg.Wait()
+	if !isConnectionClosedError(err) {
+		Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
+	}
+}
+
+func (b *MockBroker) SetGSSAPIHandler(handler GSSApiHandlerFunc) {
+	b.gssApiHandler = handler
+}
+
+func (b *MockBroker) readToBytes(r io.Reader) ([]byte, error) {
+	var (
+		bytesRead   int
+		lengthBytes = make([]byte, 4)
+	)
+
+	if _, err := io.ReadFull(r, lengthBytes); err != nil {
+		return nil, err
+	}
+
+	bytesRead += len(lengthBytes)
+	length := int32(binary.BigEndian.Uint32(lengthBytes))
+
+	if length <= 4 || length > MaxRequestSize {
+		return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
+	}
+
+	encodedReq := make([]byte, length)
+	if _, err := io.ReadFull(r, encodedReq); err != nil {
+		return nil, err
+	}
+
+	bytesRead += len(encodedReq)
+
+	fullBytes := append(lengthBytes, encodedReq...)
+
+	return fullBytes, nil
+}
+
+func (b *MockBroker) isGSSAPI(buffer []byte) bool {
+	return buffer[4] == 0x60 || bytes.Equal(buffer[4:6], []byte{0x05, 0x04})
+}
+
+func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.WaitGroup) {
+	defer wg.Done()
+	defer func() {
+		_ = conn.Close()
+	}()
+	s := spew.NewDefaultConfig()
+	s.MaxDepth = 1
+	Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
+	var err error
+
+	abort := make(chan none)
+	defer close(abort)
+	go func() {
+		select {
+		case <-b.closing:
+			_ = conn.Close()
+		case <-abort:
+		}
+	}()
+
+	var bytesWritten int
+	var bytesRead int
+	for {
+		buffer, err := b.readToBytes(conn)
+		if err != nil {
+			if !isConnectionClosedError(err) {
+				Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer))
+				b.serverError(err)
+			}
+			break
+		}
+
+		bytesWritten = 0
+		if !b.isGSSAPI(buffer) {
+			req, br, err := decodeRequest(bytes.NewReader(buffer))
+			bytesRead = br
+			if err != nil {
+				if !isConnectionClosedError(err) {
+					Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
+					b.serverError(err)
+				}
+				break
+			}
+
+			if b.latency > 0 {
+				time.Sleep(b.latency)
+			}
+
+			b.lock.Lock()
+			res := b.handler(req)
+			b.history = append(b.history, RequestResponse{req.body, res})
+			b.lock.Unlock()
+
+			if res == nil {
+				Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
+				continue
+			}
+			Logger.Printf(
+				"*** mockbroker/%d/%d: replied to %T with %T\n-> %s\n-> %s",
+				b.brokerID, idx, req.body, res,
+				s.Sprintf("%#v", req.body),
+				s.Sprintf("%#v", res),
+			)
+
+			encodedRes, err := encode(res, nil)
+			if err != nil {
+				b.serverError(fmt.Errorf("failed to encode %T - %w", res, err))
+				break
+			}
+			if len(encodedRes) == 0 {
+				b.lock.Lock()
+				if b.notifier != nil {
+					b.notifier(bytesRead, 0)
+				}
+				b.lock.Unlock()
+				continue
+			}
+
+			resHeader := b.encodeHeader(res.headerVersion(), req.correlationID, uint32(len(encodedRes)))
+			if _, err = conn.Write(resHeader); err != nil {
+				b.serverError(err)
+				break
+			}
+			if _, err = conn.Write(encodedRes); err != nil {
+				b.serverError(err)
+				break
+			}
+			bytesWritten = len(resHeader) + len(encodedRes)
+		} else {
+			// GSSAPI is not part of kafka protocol, but is supported for authentication proposes.
+			// Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism
+			b.lock.Lock()
+			res := b.gssApiHandler(buffer)
+			b.lock.Unlock()
+			if res == nil {
+				Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(buffer))
+				continue
+			}
+			if _, err = conn.Write(res); err != nil {
+				b.serverError(err)
+				break
+			}
+			bytesWritten = len(res)
+		}
+
+		b.lock.Lock()
+		if b.notifier != nil {
+			b.notifier(bytesRead, bytesWritten)
+		}
+		b.lock.Unlock()
+	}
+	Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
+}
+
+func (b *MockBroker) encodeHeader(headerVersion int16, correlationId int32, payloadLength uint32) []byte {
+	headerLength := uint32(8)
+
+	if headerVersion >= 1 {
+		headerLength = 9
+	}
+
+	resHeader := make([]byte, headerLength)
+	binary.BigEndian.PutUint32(resHeader, payloadLength+headerLength-4)
+	binary.BigEndian.PutUint32(resHeader[4:], uint32(correlationId))
+
+	if headerVersion >= 1 {
+		binary.PutUvarint(resHeader[8:], 0)
+	}
+
+	return resHeader
+}
+
+func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) {
+	select {
+	case res, ok := <-b.expectations:
+		if !ok {
+			return nil
+		}
+		return res
+	case <-time.After(expectationTimeout):
+		return nil
+	}
+}
+
+func isConnectionClosedError(err error) bool {
+	var result bool
+	opError := &net.OpError{}
+	if errors.As(err, &opError) {
+		result = true
+	} else if errors.Is(err, io.EOF) {
+		result = true
+	} else if err.Error() == "use of closed network connection" {
+		result = true
+	}
+
+	return result
+}
+
+func (b *MockBroker) serverError(err error) {
+	b.t.Helper()
+	if isConnectionClosedError(err) {
+		return
+	}
+	b.t.Errorf(err.Error())
+}
+
+// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
+// test framework and a channel of responses to use.  If an error occurs it is
+// simply logged to the TestReporter and the broker exits.
+func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
+	return NewMockBrokerAddr(t, brokerID, "localhost:0")
+}
+
+// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
+// it rather than just some ephemeral port.
+func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
+	var (
+		listener net.Listener
+		err      error
+	)
+
+	// retry up to 20 times if address already in use (e.g., if replacing broker which hasn't cleanly shutdown)
+	for i := 0; i < 20; i++ {
+		listener, err = net.Listen("tcp", addr)
+		if err != nil {
+			if errors.Is(err, syscall.EADDRINUSE) {
+				Logger.Printf("*** mockbroker/%d waiting for %s (address already in use)", brokerID, addr)
+				time.Sleep(time.Millisecond * 100)
+				continue
+			}
+			t.Fatal(err)
+		}
+		break
+	}
+
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	return NewMockBrokerListener(t, brokerID, listener)
+}
+
+// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified.
+func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker {
+	var err error
+
+	broker := &MockBroker{
+		closing:      make(chan none),
+		stopper:      make(chan none),
+		t:            t,
+		brokerID:     brokerID,
+		expectations: make(chan encoderWithHeader, 512),
+		listener:     listener,
+	}
+	broker.handler = broker.defaultRequestHandler
+
+	Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
+	_, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
+	if err != nil {
+		t.Fatal(err)
+	}
+	tmp, err := strconv.ParseInt(portStr, 10, 32)
+	if err != nil {
+		t.Fatal(err)
+	}
+	broker.port = int32(tmp)
+
+	go broker.serverLoop()
+
+	return broker
+}
+
+func (b *MockBroker) Returns(e encoderWithHeader) {
+	b.expectations <- e
+}
diff --git a/vendor/github.com/Shopify/sarama/mockkerberos.go b/vendor/github.com/IBM/sarama/mockkerberos.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/mockkerberos.go
rename to vendor/github.com/IBM/sarama/mockkerberos.go
diff --git a/vendor/github.com/IBM/sarama/mockresponses.go b/vendor/github.com/IBM/sarama/mockresponses.go
new file mode 100644
index 0000000..2c35279
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/mockresponses.go
@@ -0,0 +1,1531 @@
+package sarama
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+)
+
+// TestReporter has methods matching go's testing.T to avoid importing
+// `testing` in the main part of the library.
+type TestReporter interface {
+	Error(...interface{})
+	Errorf(string, ...interface{})
+	Fatal(...interface{})
+	Fatalf(string, ...interface{})
+	Helper()
+}
+
+// MockResponse is a response builder interface it defines one method that
+// allows generating a response based on a request body. MockResponses are used
+// to program behavior of MockBroker in tests.
+type MockResponse interface {
+	For(reqBody versionedDecoder) (res encoderWithHeader)
+}
+
+// MockWrapper is a mock response builder that returns a particular concrete
+// response regardless of the actual request passed to the `For` method.
+type MockWrapper struct {
+	res encoderWithHeader
+}
+
+func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoderWithHeader) {
+	return mw.res
+}
+
+func NewMockWrapper(res encoderWithHeader) *MockWrapper {
+	return &MockWrapper{res: res}
+}
+
+// MockSequence is a mock response builder that is created from a sequence of
+// concrete responses. Every time when a `MockBroker` calls its `For` method
+// the next response from the sequence is returned. When the end of the
+// sequence is reached the last element from the sequence is returned.
+type MockSequence struct {
+	responses []MockResponse
+}
+
+func NewMockSequence(responses ...interface{}) *MockSequence {
+	ms := &MockSequence{}
+	ms.responses = make([]MockResponse, len(responses))
+	for i, res := range responses {
+		switch res := res.(type) {
+		case MockResponse:
+			ms.responses[i] = res
+		case encoderWithHeader:
+			ms.responses[i] = NewMockWrapper(res)
+		default:
+			panic(fmt.Sprintf("Unexpected response type: %T", res))
+		}
+	}
+	return ms
+}
+
+func (mc *MockSequence) For(reqBody versionedDecoder) (res encoderWithHeader) {
+	res = mc.responses[0].For(reqBody)
+	if len(mc.responses) > 1 {
+		mc.responses = mc.responses[1:]
+	}
+	return res
+}
+
+type MockListGroupsResponse struct {
+	groups map[string]string
+	t      TestReporter
+}
+
+func NewMockListGroupsResponse(t TestReporter) *MockListGroupsResponse {
+	return &MockListGroupsResponse{
+		groups: make(map[string]string),
+		t:      t,
+	}
+}
+
+func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	request := reqBody.(*ListGroupsRequest)
+	response := &ListGroupsResponse{
+		Version: request.Version,
+		Groups:  m.groups,
+	}
+	return response
+}
+
+func (m *MockListGroupsResponse) AddGroup(groupID, protocolType string) *MockListGroupsResponse {
+	m.groups[groupID] = protocolType
+	return m
+}
+
+type MockDescribeGroupsResponse struct {
+	groups map[string]*GroupDescription
+	t      TestReporter
+}
+
+func NewMockDescribeGroupsResponse(t TestReporter) *MockDescribeGroupsResponse {
+	return &MockDescribeGroupsResponse{
+		t:      t,
+		groups: make(map[string]*GroupDescription),
+	}
+}
+
+func (m *MockDescribeGroupsResponse) AddGroupDescription(groupID string, description *GroupDescription) *MockDescribeGroupsResponse {
+	m.groups[groupID] = description
+	return m
+}
+
+func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	request := reqBody.(*DescribeGroupsRequest)
+
+	response := &DescribeGroupsResponse{Version: request.version()}
+	for _, requestedGroup := range request.Groups {
+		if group, ok := m.groups[requestedGroup]; ok {
+			response.Groups = append(response.Groups, group)
+		} else {
+			// Mimic real kafka - if a group doesn't exist, return
+			// an entry with state "Dead"
+			response.Groups = append(response.Groups, &GroupDescription{
+				GroupId: requestedGroup,
+				State:   "Dead",
+			})
+		}
+	}
+
+	return response
+}
+
+// MockMetadataResponse is a `MetadataResponse` builder.
+type MockMetadataResponse struct {
+	controllerID int32
+	errors       map[string]KError
+	leaders      map[string]map[int32]int32
+	brokers      map[string]int32
+	t            TestReporter
+}
+
+func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
+	return &MockMetadataResponse{
+		errors:  make(map[string]KError),
+		leaders: make(map[string]map[int32]int32),
+		brokers: make(map[string]int32),
+		t:       t,
+	}
+}
+
+func (mmr *MockMetadataResponse) SetError(topic string, kerror KError) *MockMetadataResponse {
+	mmr.errors[topic] = kerror
+	return mmr
+}
+
+func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse {
+	partitions := mmr.leaders[topic]
+	if partitions == nil {
+		partitions = make(map[int32]int32)
+		mmr.leaders[topic] = partitions
+	}
+	partitions[partition] = brokerID
+	return mmr
+}
+
+func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse {
+	mmr.brokers[addr] = brokerID
+	return mmr
+}
+
+func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResponse {
+	mmr.controllerID = brokerID
+	return mmr
+}
+
+func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	metadataRequest := reqBody.(*MetadataRequest)
+	metadataResponse := &MetadataResponse{
+		Version:      metadataRequest.version(),
+		ControllerID: mmr.controllerID,
+	}
+	for addr, brokerID := range mmr.brokers {
+		metadataResponse.AddBroker(addr, brokerID)
+	}
+
+	// Generate set of replicas
+	var replicas []int32
+	var offlineReplicas []int32
+	for _, brokerID := range mmr.brokers {
+		replicas = append(replicas, brokerID)
+	}
+
+	if len(metadataRequest.Topics) == 0 {
+		for topic, partitions := range mmr.leaders {
+			for partition, brokerID := range partitions {
+				metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
+			}
+		}
+		for topic, err := range mmr.errors {
+			metadataResponse.AddTopic(topic, err)
+		}
+		return metadataResponse
+	}
+	for _, topic := range metadataRequest.Topics {
+		leaders, ok := mmr.leaders[topic]
+		if !ok {
+			if err, ok := mmr.errors[topic]; ok {
+				metadataResponse.AddTopic(topic, err)
+			} else {
+				metadataResponse.AddTopic(topic, ErrUnknownTopicOrPartition)
+			}
+			continue
+		}
+		for partition, brokerID := range leaders {
+			metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
+		}
+	}
+	return metadataResponse
+}
+
+// MockOffsetResponse is an `OffsetResponse` builder.
+type MockOffsetResponse struct {
+	offsets map[string]map[int32]map[int64]int64
+	t       TestReporter
+}
+
+func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
+	return &MockOffsetResponse{
+		offsets: make(map[string]map[int32]map[int64]int64),
+		t:       t,
+	}
+}
+
+func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
+	partitions := mor.offsets[topic]
+	if partitions == nil {
+		partitions = make(map[int32]map[int64]int64)
+		mor.offsets[topic] = partitions
+	}
+	times := partitions[partition]
+	if times == nil {
+		times = make(map[int64]int64)
+		partitions[partition] = times
+	}
+	times[time] = offset
+	return mor
+}
+
+func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	offsetRequest := reqBody.(*OffsetRequest)
+	offsetResponse := &OffsetResponse{Version: offsetRequest.Version}
+	for topic, partitions := range offsetRequest.blocks {
+		for partition, block := range partitions {
+			offset := mor.getOffset(topic, partition, block.timestamp)
+			offsetResponse.AddTopicPartition(topic, partition, offset)
+		}
+	}
+	return offsetResponse
+}
+
+func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
+	partitions := mor.offsets[topic]
+	if partitions == nil {
+		mor.t.Errorf("missing topic: %s", topic)
+	}
+	times := partitions[partition]
+	if times == nil {
+		mor.t.Errorf("missing partition: %d", partition)
+	}
+	offset, ok := times[time]
+	if !ok {
+		mor.t.Errorf("missing time: %d", time)
+	}
+	return offset
+}
+
+// mockMessage is a message that used to be mocked for `FetchResponse`
+type mockMessage struct {
+	key Encoder
+	msg Encoder
+}
+
+func newMockMessage(key, msg Encoder) *mockMessage {
+	return &mockMessage{
+		key: key,
+		msg: msg,
+	}
+}
+
+// MockFetchResponse is a `FetchResponse` builder.
+type MockFetchResponse struct {
+	messages       map[string]map[int32]map[int64]*mockMessage
+	messagesLock   *sync.RWMutex
+	highWaterMarks map[string]map[int32]int64
+	t              TestReporter
+	batchSize      int
+}
+
+func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
+	return &MockFetchResponse{
+		messages:       make(map[string]map[int32]map[int64]*mockMessage),
+		messagesLock:   &sync.RWMutex{},
+		highWaterMarks: make(map[string]map[int32]int64),
+		t:              t,
+		batchSize:      batchSize,
+	}
+}
+
+func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
+	return mfr.SetMessageWithKey(topic, partition, offset, nil, msg)
+}
+
+func (mfr *MockFetchResponse) SetMessageWithKey(topic string, partition int32, offset int64, key, msg Encoder) *MockFetchResponse {
+	mfr.messagesLock.Lock()
+	defer mfr.messagesLock.Unlock()
+	partitions := mfr.messages[topic]
+	if partitions == nil {
+		partitions = make(map[int32]map[int64]*mockMessage)
+		mfr.messages[topic] = partitions
+	}
+	messages := partitions[partition]
+	if messages == nil {
+		messages = make(map[int64]*mockMessage)
+		partitions[partition] = messages
+	}
+	messages[offset] = newMockMessage(key, msg)
+	return mfr
+}
+
+func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse {
+	partitions := mfr.highWaterMarks[topic]
+	if partitions == nil {
+		partitions = make(map[int32]int64)
+		mfr.highWaterMarks[topic] = partitions
+	}
+	partitions[partition] = offset
+	return mfr
+}
+
+func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	fetchRequest := reqBody.(*FetchRequest)
+	res := &FetchResponse{
+		Version: fetchRequest.Version,
+	}
+	for topic, partitions := range fetchRequest.blocks {
+		for partition, block := range partitions {
+			initialOffset := block.fetchOffset
+			offset := initialOffset
+			maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
+			for i := 0; i < mfr.batchSize && offset < maxOffset; {
+				msg := mfr.getMessage(topic, partition, offset)
+				if msg != nil {
+					res.AddMessage(topic, partition, msg.key, msg.msg, offset)
+					i++
+				}
+				offset++
+			}
+			fb := res.GetBlock(topic, partition)
+			if fb == nil {
+				res.AddError(topic, partition, ErrNoError)
+				fb = res.GetBlock(topic, partition)
+			}
+			fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
+		}
+	}
+	return res
+}
+
+func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) *mockMessage {
+	mfr.messagesLock.RLock()
+	defer mfr.messagesLock.RUnlock()
+	partitions := mfr.messages[topic]
+	if partitions == nil {
+		return nil
+	}
+	messages := partitions[partition]
+	if messages == nil {
+		return nil
+	}
+	return messages[offset]
+}
+
+func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int {
+	mfr.messagesLock.RLock()
+	defer mfr.messagesLock.RUnlock()
+	partitions := mfr.messages[topic]
+	if partitions == nil {
+		return 0
+	}
+	messages := partitions[partition]
+	if messages == nil {
+		return 0
+	}
+	return len(messages)
+}
+
+func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
+	partitions := mfr.highWaterMarks[topic]
+	if partitions == nil {
+		return 0
+	}
+	return partitions[partition]
+}
+
+// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
+type MockConsumerMetadataResponse struct {
+	coordinators map[string]interface{}
+	t            TestReporter
+}
+
+func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse {
+	return &MockConsumerMetadataResponse{
+		coordinators: make(map[string]interface{}),
+		t:            t,
+	}
+}
+
+func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse {
+	mr.coordinators[group] = broker
+	return mr
+}
+
+func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse {
+	mr.coordinators[group] = kerror
+	return mr
+}
+
+func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*ConsumerMetadataRequest)
+	group := req.ConsumerGroup
+	res := &ConsumerMetadataResponse{Version: req.version()}
+	v := mr.coordinators[group]
+	switch v := v.(type) {
+	case *MockBroker:
+		res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
+	case KError:
+		res.Err = v
+	}
+	return res
+}
+
+// MockFindCoordinatorResponse is a `FindCoordinatorResponse` builder.
+type MockFindCoordinatorResponse struct {
+	groupCoordinators map[string]interface{}
+	transCoordinators map[string]interface{}
+	t                 TestReporter
+}
+
+func NewMockFindCoordinatorResponse(t TestReporter) *MockFindCoordinatorResponse {
+	return &MockFindCoordinatorResponse{
+		groupCoordinators: make(map[string]interface{}),
+		transCoordinators: make(map[string]interface{}),
+		t:                 t,
+	}
+}
+
+func (mr *MockFindCoordinatorResponse) SetCoordinator(coordinatorType CoordinatorType, group string, broker *MockBroker) *MockFindCoordinatorResponse {
+	switch coordinatorType {
+	case CoordinatorGroup:
+		mr.groupCoordinators[group] = broker
+	case CoordinatorTransaction:
+		mr.transCoordinators[group] = broker
+	}
+	return mr
+}
+
+func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, group string, kerror KError) *MockFindCoordinatorResponse {
+	switch coordinatorType {
+	case CoordinatorGroup:
+		mr.groupCoordinators[group] = kerror
+	case CoordinatorTransaction:
+		mr.transCoordinators[group] = kerror
+	}
+	return mr
+}
+
+func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*FindCoordinatorRequest)
+	res := &FindCoordinatorResponse{Version: req.version()}
+	var v interface{}
+	switch req.CoordinatorType {
+	case CoordinatorGroup:
+		v = mr.groupCoordinators[req.CoordinatorKey]
+	case CoordinatorTransaction:
+		v = mr.transCoordinators[req.CoordinatorKey]
+	}
+	switch v := v.(type) {
+	case *MockBroker:
+		res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
+	case KError:
+		res.Err = v
+	}
+	return res
+}
+
+// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
+type MockOffsetCommitResponse struct {
+	errors map[string]map[string]map[int32]KError
+	t      TestReporter
+}
+
+func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse {
+	return &MockOffsetCommitResponse{t: t}
+}
+
+func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse {
+	if mr.errors == nil {
+		mr.errors = make(map[string]map[string]map[int32]KError)
+	}
+	topics := mr.errors[group]
+	if topics == nil {
+		topics = make(map[string]map[int32]KError)
+		mr.errors[group] = topics
+	}
+	partitions := topics[topic]
+	if partitions == nil {
+		partitions = make(map[int32]KError)
+		topics[topic] = partitions
+	}
+	partitions[partition] = kerror
+	return mr
+}
+
+func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*OffsetCommitRequest)
+	group := req.ConsumerGroup
+	res := &OffsetCommitResponse{Version: req.version()}
+	for topic, partitions := range req.blocks {
+		for partition := range partitions {
+			res.AddError(topic, partition, mr.getError(group, topic, partition))
+		}
+	}
+	return res
+}
+
+func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
+	topics := mr.errors[group]
+	if topics == nil {
+		return ErrNoError
+	}
+	partitions := topics[topic]
+	if partitions == nil {
+		return ErrNoError
+	}
+	kerror, ok := partitions[partition]
+	if !ok {
+		return ErrNoError
+	}
+	return kerror
+}
+
+// MockProduceResponse is a `ProduceResponse` builder.
+type MockProduceResponse struct {
+	version int16
+	errors  map[string]map[int32]KError
+	t       TestReporter
+}
+
+func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
+	return &MockProduceResponse{t: t}
+}
+
+func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse {
+	mr.version = version
+	return mr
+}
+
+func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
+	if mr.errors == nil {
+		mr.errors = make(map[string]map[int32]KError)
+	}
+	partitions := mr.errors[topic]
+	if partitions == nil {
+		partitions = make(map[int32]KError)
+		mr.errors[topic] = partitions
+	}
+	partitions[partition] = kerror
+	return mr
+}
+
+func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*ProduceRequest)
+	res := &ProduceResponse{
+		Version: req.version(),
+	}
+	if mr.version > 0 {
+		res.Version = mr.version
+	}
+	for topic, partitions := range req.records {
+		for partition := range partitions {
+			res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
+		}
+	}
+	return res
+}
+
+func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
+	partitions := mr.errors[topic]
+	if partitions == nil {
+		return ErrNoError
+	}
+	kerror, ok := partitions[partition]
+	if !ok {
+		return ErrNoError
+	}
+	return kerror
+}
+
+// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
+type MockOffsetFetchResponse struct {
+	offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
+	error   KError
+	t       TestReporter
+}
+
+func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse {
+	return &MockOffsetFetchResponse{t: t}
+}
+
+func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse {
+	if mr.offsets == nil {
+		mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
+	}
+	topics := mr.offsets[group]
+	if topics == nil {
+		topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
+		mr.offsets[group] = topics
+	}
+	partitions := topics[topic]
+	if partitions == nil {
+		partitions = make(map[int32]*OffsetFetchResponseBlock)
+		topics[topic] = partitions
+	}
+	partitions[partition] = &OffsetFetchResponseBlock{offset, 0, metadata, kerror}
+	return mr
+}
+
+func (mr *MockOffsetFetchResponse) SetError(kerror KError) *MockOffsetFetchResponse {
+	mr.error = kerror
+	return mr
+}
+
+func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*OffsetFetchRequest)
+	group := req.ConsumerGroup
+	res := &OffsetFetchResponse{Version: req.Version}
+
+	for topic, partitions := range mr.offsets[group] {
+		for partition, block := range partitions {
+			res.AddBlock(topic, partition, block)
+		}
+	}
+
+	if res.Version >= 2 {
+		res.Err = mr.error
+	}
+	return res
+}
+
+type MockCreateTopicsResponse struct {
+	t TestReporter
+}
+
+func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse {
+	return &MockCreateTopicsResponse{t: t}
+}
+
+func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*CreateTopicsRequest)
+	res := &CreateTopicsResponse{
+		Version: req.Version,
+	}
+	res.TopicErrors = make(map[string]*TopicError)
+
+	for topic := range req.TopicDetails {
+		if res.Version >= 1 && strings.HasPrefix(topic, "_") {
+			msg := "insufficient permissions to create topic with reserved prefix"
+			res.TopicErrors[topic] = &TopicError{
+				Err:    ErrTopicAuthorizationFailed,
+				ErrMsg: &msg,
+			}
+			continue
+		}
+		res.TopicErrors[topic] = &TopicError{Err: ErrNoError}
+	}
+	return res
+}
+
+type MockDeleteTopicsResponse struct {
+	t     TestReporter
+	error KError
+}
+
+func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse {
+	return &MockDeleteTopicsResponse{t: t}
+}
+
+func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*DeleteTopicsRequest)
+	res := &DeleteTopicsResponse{Version: req.version()}
+	res.TopicErrorCodes = make(map[string]KError)
+
+	for _, topic := range req.Topics {
+		res.TopicErrorCodes[topic] = mr.error
+	}
+	res.Version = req.Version
+	return res
+}
+
+func (mr *MockDeleteTopicsResponse) SetError(kerror KError) *MockDeleteTopicsResponse {
+	mr.error = kerror
+	return mr
+}
+
+type MockCreatePartitionsResponse struct {
+	t TestReporter
+}
+
+func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsResponse {
+	return &MockCreatePartitionsResponse{t: t}
+}
+
+func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*CreatePartitionsRequest)
+	res := &CreatePartitionsResponse{Version: req.version()}
+	res.TopicPartitionErrors = make(map[string]*TopicPartitionError)
+
+	for topic := range req.TopicPartitions {
+		if strings.HasPrefix(topic, "_") {
+			msg := "insufficient permissions to create partition on topic with reserved prefix"
+			res.TopicPartitionErrors[topic] = &TopicPartitionError{
+				Err:    ErrTopicAuthorizationFailed,
+				ErrMsg: &msg,
+			}
+			continue
+		}
+		res.TopicPartitionErrors[topic] = &TopicPartitionError{Err: ErrNoError}
+	}
+	return res
+}
+
+type MockAlterPartitionReassignmentsResponse struct {
+	t TestReporter
+}
+
+func NewMockAlterPartitionReassignmentsResponse(t TestReporter) *MockAlterPartitionReassignmentsResponse {
+	return &MockAlterPartitionReassignmentsResponse{t: t}
+}
+
+func (mr *MockAlterPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*AlterPartitionReassignmentsRequest)
+	_ = req
+	res := &AlterPartitionReassignmentsResponse{Version: req.version()}
+	return res
+}
+
+type MockListPartitionReassignmentsResponse struct {
+	t TestReporter
+}
+
+func NewMockListPartitionReassignmentsResponse(t TestReporter) *MockListPartitionReassignmentsResponse {
+	return &MockListPartitionReassignmentsResponse{t: t}
+}
+
+func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*ListPartitionReassignmentsRequest)
+	_ = req
+	res := &ListPartitionReassignmentsResponse{Version: req.version()}
+
+	for topic, partitions := range req.blocks {
+		for _, partition := range partitions {
+			res.AddBlock(topic, partition, []int32{0}, []int32{1}, []int32{2})
+		}
+	}
+
+	return res
+}
+
+type MockElectLeadersResponse struct {
+	t TestReporter
+}
+
+func NewMockElectLeadersResponse(t TestReporter) *MockElectLeadersResponse {
+	return &MockElectLeadersResponse{t: t}
+}
+
+func (mr *MockElectLeadersResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*ElectLeadersRequest)
+	res := &ElectLeadersResponse{Version: req.version(), ReplicaElectionResults: map[string]map[int32]*PartitionResult{}}
+
+	for topic, partitions := range req.TopicPartitions {
+		for _, partition := range partitions {
+			res.ReplicaElectionResults[topic] = map[int32]*PartitionResult{
+				partition: {ErrorCode: ErrNoError},
+			}
+		}
+	}
+	return res
+}
+
+type MockDeleteRecordsResponse struct {
+	t TestReporter
+}
+
+func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse {
+	return &MockDeleteRecordsResponse{t: t}
+}
+
+func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*DeleteRecordsRequest)
+	res := &DeleteRecordsResponse{Version: req.version()}
+	res.Topics = make(map[string]*DeleteRecordsResponseTopic)
+
+	for topic, deleteRecordRequestTopic := range req.Topics {
+		partitions := make(map[int32]*DeleteRecordsResponsePartition)
+		for partition := range deleteRecordRequestTopic.PartitionOffsets {
+			partitions[partition] = &DeleteRecordsResponsePartition{Err: ErrNoError}
+		}
+		res.Topics[topic] = &DeleteRecordsResponseTopic{Partitions: partitions}
+	}
+	return res
+}
+
+type MockDescribeConfigsResponse struct {
+	t TestReporter
+}
+
+func NewMockDescribeConfigsResponse(t TestReporter) *MockDescribeConfigsResponse {
+	return &MockDescribeConfigsResponse{t: t}
+}
+
+func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*DescribeConfigsRequest)
+	res := &DescribeConfigsResponse{
+		Version: req.Version,
+	}
+
+	includeSynonyms := req.Version > 0
+	includeSource := req.Version > 0
+
+	for _, r := range req.Resources {
+		var configEntries []*ConfigEntry
+		switch r.Type {
+		case BrokerResource:
+			configEntries = append(configEntries,
+				&ConfigEntry{
+					Name:     "min.insync.replicas",
+					Value:    "2",
+					ReadOnly: false,
+					Default:  false,
+				},
+			)
+			res.Resources = append(res.Resources, &ResourceResponse{
+				Name:    r.Name,
+				Configs: configEntries,
+			})
+		case BrokerLoggerResource:
+			configEntries = append(configEntries,
+				&ConfigEntry{
+					Name:     "kafka.controller.KafkaController",
+					Value:    "DEBUG",
+					ReadOnly: false,
+					Default:  false,
+				},
+			)
+			res.Resources = append(res.Resources, &ResourceResponse{
+				Name:    r.Name,
+				Configs: configEntries,
+			})
+		case TopicResource:
+			maxMessageBytes := &ConfigEntry{
+				Name:      "max.message.bytes",
+				Value:     "1000000",
+				ReadOnly:  false,
+				Default:   !includeSource,
+				Sensitive: false,
+			}
+			if includeSource {
+				maxMessageBytes.Source = SourceDefault
+			}
+			if includeSynonyms {
+				maxMessageBytes.Synonyms = []*ConfigSynonym{
+					{
+						ConfigName:  "max.message.bytes",
+						ConfigValue: "500000",
+					},
+				}
+			}
+			retentionMs := &ConfigEntry{
+				Name:      "retention.ms",
+				Value:     "5000",
+				ReadOnly:  false,
+				Default:   false,
+				Sensitive: false,
+			}
+			if includeSynonyms {
+				retentionMs.Synonyms = []*ConfigSynonym{
+					{
+						ConfigName:  "log.retention.ms",
+						ConfigValue: "2500",
+					},
+				}
+			}
+			password := &ConfigEntry{
+				Name:      "password",
+				Value:     "12345",
+				ReadOnly:  false,
+				Default:   false,
+				Sensitive: true,
+			}
+			configEntries = append(
+				configEntries, maxMessageBytes, retentionMs, password)
+			res.Resources = append(res.Resources, &ResourceResponse{
+				Name:    r.Name,
+				Configs: configEntries,
+			})
+		}
+	}
+	return res
+}
+
+type MockDescribeConfigsResponseWithErrorCode struct {
+	t TestReporter
+}
+
+func NewMockDescribeConfigsResponseWithErrorCode(t TestReporter) *MockDescribeConfigsResponseWithErrorCode {
+	return &MockDescribeConfigsResponseWithErrorCode{t: t}
+}
+
+func (mr *MockDescribeConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*DescribeConfigsRequest)
+	res := &DescribeConfigsResponse{
+		Version: req.Version,
+	}
+
+	for _, r := range req.Resources {
+		res.Resources = append(res.Resources, &ResourceResponse{
+			Name:      r.Name,
+			Type:      r.Type,
+			ErrorCode: 83,
+			ErrorMsg:  "",
+		})
+	}
+	return res
+}
+
+type MockAlterConfigsResponse struct {
+	t TestReporter
+}
+
+func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse {
+	return &MockAlterConfigsResponse{t: t}
+}
+
+func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*AlterConfigsRequest)
+	res := &AlterConfigsResponse{Version: req.version()}
+
+	for _, r := range req.Resources {
+		res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
+			Name:     r.Name,
+			Type:     r.Type,
+			ErrorMsg: "",
+		})
+	}
+	return res
+}
+
+type MockAlterConfigsResponseWithErrorCode struct {
+	t TestReporter
+}
+
+func NewMockAlterConfigsResponseWithErrorCode(t TestReporter) *MockAlterConfigsResponseWithErrorCode {
+	return &MockAlterConfigsResponseWithErrorCode{t: t}
+}
+
+func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*AlterConfigsRequest)
+	res := &AlterConfigsResponse{Version: req.version()}
+
+	for _, r := range req.Resources {
+		res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
+			Name:      r.Name,
+			Type:      r.Type,
+			ErrorCode: 83,
+			ErrorMsg:  "",
+		})
+	}
+	return res
+}
+
+type MockIncrementalAlterConfigsResponse struct {
+	t TestReporter
+}
+
+func NewMockIncrementalAlterConfigsResponse(t TestReporter) *MockIncrementalAlterConfigsResponse {
+	return &MockIncrementalAlterConfigsResponse{t: t}
+}
+
+func (mr *MockIncrementalAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*IncrementalAlterConfigsRequest)
+	res := &IncrementalAlterConfigsResponse{Version: req.version()}
+
+	for _, r := range req.Resources {
+		res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
+			Name:     r.Name,
+			Type:     r.Type,
+			ErrorMsg: "",
+		})
+	}
+	return res
+}
+
+type MockIncrementalAlterConfigsResponseWithErrorCode struct {
+	t TestReporter
+}
+
+func NewMockIncrementalAlterConfigsResponseWithErrorCode(t TestReporter) *MockIncrementalAlterConfigsResponseWithErrorCode {
+	return &MockIncrementalAlterConfigsResponseWithErrorCode{t: t}
+}
+
+func (mr *MockIncrementalAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*IncrementalAlterConfigsRequest)
+	res := &IncrementalAlterConfigsResponse{Version: req.version()}
+
+	for _, r := range req.Resources {
+		res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
+			Name:      r.Name,
+			Type:      r.Type,
+			ErrorCode: 83,
+			ErrorMsg:  "",
+		})
+	}
+	return res
+}
+
+type MockCreateAclsResponse struct {
+	t TestReporter
+}
+
+func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse {
+	return &MockCreateAclsResponse{t: t}
+}
+
+func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*CreateAclsRequest)
+	res := &CreateAclsResponse{Version: req.version()}
+
+	for range req.AclCreations {
+		res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrNoError})
+	}
+	return res
+}
+
+type MockCreateAclsResponseError struct {
+	t TestReporter
+}
+
+func NewMockCreateAclsResponseWithError(t TestReporter) *MockCreateAclsResponseError {
+	return &MockCreateAclsResponseError{t: t}
+}
+
+func (mr *MockCreateAclsResponseError) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*CreateAclsRequest)
+	res := &CreateAclsResponse{Version: req.version()}
+
+	for range req.AclCreations {
+		res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrInvalidRequest})
+	}
+	return res
+}
+
+type MockListAclsResponse struct {
+	t TestReporter
+}
+
+func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse {
+	return &MockListAclsResponse{t: t}
+}
+
+func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*DescribeAclsRequest)
+	res := &DescribeAclsResponse{Version: req.version()}
+	res.Err = ErrNoError
+	acl := &ResourceAcls{}
+	if req.ResourceName != nil {
+		acl.Resource.ResourceName = *req.ResourceName
+	}
+	acl.Resource.ResourcePatternType = req.ResourcePatternTypeFilter
+	acl.Resource.ResourceType = req.ResourceType
+
+	host := "*"
+	if req.Host != nil {
+		host = *req.Host
+	}
+
+	principal := "User:test"
+	if req.Principal != nil {
+		principal = *req.Principal
+	}
+
+	permissionType := req.PermissionType
+	if permissionType == AclPermissionAny {
+		permissionType = AclPermissionAllow
+	}
+
+	acl.Acls = append(acl.Acls, &Acl{Operation: req.Operation, PermissionType: permissionType, Host: host, Principal: principal})
+	res.ResourceAcls = append(res.ResourceAcls, acl)
+	res.Version = int16(req.Version)
+	return res
+}
+
+type MockSaslAuthenticateResponse struct {
+	t                 TestReporter
+	kerror            KError
+	saslAuthBytes     []byte
+	sessionLifetimeMs int64
+}
+
+func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateResponse {
+	return &MockSaslAuthenticateResponse{t: t}
+}
+
+func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*SaslAuthenticateRequest)
+	res := &SaslAuthenticateResponse{
+		Version:           req.version(),
+		Err:               msar.kerror,
+		SaslAuthBytes:     msar.saslAuthBytes,
+		SessionLifetimeMs: msar.sessionLifetimeMs,
+	}
+	return res
+}
+
+func (msar *MockSaslAuthenticateResponse) SetError(kerror KError) *MockSaslAuthenticateResponse {
+	msar.kerror = kerror
+	return msar
+}
+
+func (msar *MockSaslAuthenticateResponse) SetAuthBytes(saslAuthBytes []byte) *MockSaslAuthenticateResponse {
+	msar.saslAuthBytes = saslAuthBytes
+	return msar
+}
+
+func (msar *MockSaslAuthenticateResponse) SetSessionLifetimeMs(sessionLifetimeMs int64) *MockSaslAuthenticateResponse {
+	msar.sessionLifetimeMs = sessionLifetimeMs
+	return msar
+}
+
+type MockDeleteAclsResponse struct {
+	t TestReporter
+}
+
+type MockSaslHandshakeResponse struct {
+	enabledMechanisms []string
+	kerror            KError
+	t                 TestReporter
+}
+
+func NewMockSaslHandshakeResponse(t TestReporter) *MockSaslHandshakeResponse {
+	return &MockSaslHandshakeResponse{t: t}
+}
+
+func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*SaslHandshakeRequest)
+	res := &SaslHandshakeResponse{Version: req.version()}
+	res.Err = mshr.kerror
+	res.EnabledMechanisms = mshr.enabledMechanisms
+	return res
+}
+
+func (mshr *MockSaslHandshakeResponse) SetError(kerror KError) *MockSaslHandshakeResponse {
+	mshr.kerror = kerror
+	return mshr
+}
+
+func (mshr *MockSaslHandshakeResponse) SetEnabledMechanisms(enabledMechanisms []string) *MockSaslHandshakeResponse {
+	mshr.enabledMechanisms = enabledMechanisms
+	return mshr
+}
+
+func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse {
+	return &MockDeleteAclsResponse{t: t}
+}
+
+func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*DeleteAclsRequest)
+	res := &DeleteAclsResponse{Version: req.version()}
+
+	for range req.Filters {
+		response := &FilterResponse{Err: ErrNoError}
+		response.MatchingAcls = append(response.MatchingAcls, &MatchingAcl{Err: ErrNoError})
+		res.FilterResponses = append(res.FilterResponses, response)
+	}
+	res.Version = int16(req.Version)
+	return res
+}
+
+type MockDeleteGroupsResponse struct {
+	deletedGroups []string
+}
+
+func NewMockDeleteGroupsRequest(t TestReporter) *MockDeleteGroupsResponse {
+	return &MockDeleteGroupsResponse{}
+}
+
+func (m *MockDeleteGroupsResponse) SetDeletedGroups(groups []string) *MockDeleteGroupsResponse {
+	m.deletedGroups = groups
+	return m
+}
+
+func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*DeleteGroupsRequest)
+	resp := &DeleteGroupsResponse{
+		Version:         req.version(),
+		GroupErrorCodes: map[string]KError{},
+	}
+	for _, group := range m.deletedGroups {
+		resp.GroupErrorCodes[group] = ErrNoError
+	}
+	return resp
+}
+
+type MockDeleteOffsetResponse struct {
+	errorCode      KError
+	topic          string
+	partition      int32
+	errorPartition KError
+}
+
+func NewMockDeleteOffsetRequest(t TestReporter) *MockDeleteOffsetResponse {
+	return &MockDeleteOffsetResponse{}
+}
+
+func (m *MockDeleteOffsetResponse) SetDeletedOffset(errorCode KError, topic string, partition int32, errorPartition KError) *MockDeleteOffsetResponse {
+	m.errorCode = errorCode
+	m.topic = topic
+	m.partition = partition
+	m.errorPartition = errorPartition
+	return m
+}
+
+func (m *MockDeleteOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*DeleteOffsetsRequest)
+	resp := &DeleteOffsetsResponse{
+		Version:   req.version(),
+		ErrorCode: m.errorCode,
+		Errors: map[string]map[int32]KError{
+			m.topic: {m.partition: m.errorPartition},
+		},
+	}
+	return resp
+}
+
+type MockJoinGroupResponse struct {
+	t TestReporter
+
+	ThrottleTime  int32
+	Err           KError
+	GenerationId  int32
+	GroupProtocol string
+	LeaderId      string
+	MemberId      string
+	Members       []GroupMember
+}
+
+func NewMockJoinGroupResponse(t TestReporter) *MockJoinGroupResponse {
+	return &MockJoinGroupResponse{
+		t:       t,
+		Members: make([]GroupMember, 0),
+	}
+}
+
+func (m *MockJoinGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*JoinGroupRequest)
+	resp := &JoinGroupResponse{
+		Version:       req.Version,
+		ThrottleTime:  m.ThrottleTime,
+		Err:           m.Err,
+		GenerationId:  m.GenerationId,
+		GroupProtocol: m.GroupProtocol,
+		LeaderId:      m.LeaderId,
+		MemberId:      m.MemberId,
+		Members:       m.Members,
+	}
+	return resp
+}
+
+func (m *MockJoinGroupResponse) SetThrottleTime(t int32) *MockJoinGroupResponse {
+	m.ThrottleTime = t
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetError(kerr KError) *MockJoinGroupResponse {
+	m.Err = kerr
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetGenerationId(id int32) *MockJoinGroupResponse {
+	m.GenerationId = id
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetGroupProtocol(proto string) *MockJoinGroupResponse {
+	m.GroupProtocol = proto
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetLeaderId(id string) *MockJoinGroupResponse {
+	m.LeaderId = id
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetMemberId(id string) *MockJoinGroupResponse {
+	m.MemberId = id
+	return m
+}
+
+func (m *MockJoinGroupResponse) SetMember(id string, meta *ConsumerGroupMemberMetadata) *MockJoinGroupResponse {
+	bin, err := encode(meta, nil)
+	if err != nil {
+		panic(fmt.Sprintf("error encoding member metadata: %v", err))
+	}
+	m.Members = append(m.Members, GroupMember{MemberId: id, Metadata: bin})
+	return m
+}
+
+type MockLeaveGroupResponse struct {
+	t TestReporter
+
+	Err KError
+}
+
+func NewMockLeaveGroupResponse(t TestReporter) *MockLeaveGroupResponse {
+	return &MockLeaveGroupResponse{t: t}
+}
+
+func (m *MockLeaveGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*LeaveGroupRequest)
+	resp := &LeaveGroupResponse{
+		Version: req.version(),
+		Err:     m.Err,
+	}
+	return resp
+}
+
+func (m *MockLeaveGroupResponse) SetError(kerr KError) *MockLeaveGroupResponse {
+	m.Err = kerr
+	return m
+}
+
+type MockSyncGroupResponse struct {
+	t TestReporter
+
+	Err              KError
+	MemberAssignment []byte
+}
+
+func NewMockSyncGroupResponse(t TestReporter) *MockSyncGroupResponse {
+	return &MockSyncGroupResponse{t: t}
+}
+
+func (m *MockSyncGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*SyncGroupRequest)
+	resp := &SyncGroupResponse{
+		Version:          req.version(),
+		Err:              m.Err,
+		MemberAssignment: m.MemberAssignment,
+	}
+	return resp
+}
+
+func (m *MockSyncGroupResponse) SetError(kerr KError) *MockSyncGroupResponse {
+	m.Err = kerr
+	return m
+}
+
+func (m *MockSyncGroupResponse) SetMemberAssignment(assignment *ConsumerGroupMemberAssignment) *MockSyncGroupResponse {
+	bin, err := encode(assignment, nil)
+	if err != nil {
+		panic(fmt.Sprintf("error encoding member assignment: %v", err))
+	}
+	m.MemberAssignment = bin
+	return m
+}
+
+type MockHeartbeatResponse struct {
+	t TestReporter
+
+	Err KError
+}
+
+func NewMockHeartbeatResponse(t TestReporter) *MockHeartbeatResponse {
+	return &MockHeartbeatResponse{t: t}
+}
+
+func (m *MockHeartbeatResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*HeartbeatRequest)
+	resp := &HeartbeatResponse{
+		Version: req.version(),
+	}
+	return resp
+}
+
+func (m *MockHeartbeatResponse) SetError(kerr KError) *MockHeartbeatResponse {
+	m.Err = kerr
+	return m
+}
+
+type MockDescribeLogDirsResponse struct {
+	t       TestReporter
+	logDirs []DescribeLogDirsResponseDirMetadata
+}
+
+func NewMockDescribeLogDirsResponse(t TestReporter) *MockDescribeLogDirsResponse {
+	return &MockDescribeLogDirsResponse{t: t}
+}
+
+func (m *MockDescribeLogDirsResponse) SetLogDirs(logDirPath string, topicPartitions map[string]int) *MockDescribeLogDirsResponse {
+	var topics []DescribeLogDirsResponseTopic
+	for topic := range topicPartitions {
+		var partitions []DescribeLogDirsResponsePartition
+		for i := 0; i < topicPartitions[topic]; i++ {
+			partitions = append(partitions, DescribeLogDirsResponsePartition{
+				PartitionID: int32(i),
+				IsTemporary: false,
+				OffsetLag:   int64(0),
+				Size:        int64(1234),
+			})
+		}
+		topics = append(topics, DescribeLogDirsResponseTopic{
+			Topic:      topic,
+			Partitions: partitions,
+		})
+	}
+	logDir := DescribeLogDirsResponseDirMetadata{
+		ErrorCode: ErrNoError,
+		Path:      logDirPath,
+		Topics:    topics,
+	}
+	m.logDirs = []DescribeLogDirsResponseDirMetadata{logDir}
+	return m
+}
+
+func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*DescribeLogDirsRequest)
+	resp := &DescribeLogDirsResponse{
+		Version: req.version(),
+		LogDirs: m.logDirs,
+	}
+	return resp
+}
+
+type MockApiVersionsResponse struct {
+	t       TestReporter
+	apiKeys []ApiVersionsResponseKey
+}
+
+func NewMockApiVersionsResponse(t TestReporter) *MockApiVersionsResponse {
+	return &MockApiVersionsResponse{
+		t: t,
+		apiKeys: []ApiVersionsResponseKey{
+			{
+				ApiKey:     0,
+				MinVersion: 5,
+				MaxVersion: 8,
+			},
+			{
+				ApiKey:     1,
+				MinVersion: 7,
+				MaxVersion: 11,
+			},
+		},
+	}
+}
+
+func (m *MockApiVersionsResponse) SetApiKeys(apiKeys []ApiVersionsResponseKey) *MockApiVersionsResponse {
+	m.apiKeys = apiKeys
+	return m
+}
+
+func (m *MockApiVersionsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*ApiVersionsRequest)
+	res := &ApiVersionsResponse{
+		Version: req.Version,
+		ApiKeys: m.apiKeys,
+	}
+	return res
+}
+
+// MockInitProducerIDResponse is an `InitPorducerIDResponse` builder.
+type MockInitProducerIDResponse struct {
+	producerID    int64
+	producerEpoch int16
+	err           KError
+	t             TestReporter
+}
+
+func NewMockInitProducerIDResponse(t TestReporter) *MockInitProducerIDResponse {
+	return &MockInitProducerIDResponse{
+		t: t,
+	}
+}
+
+func (m *MockInitProducerIDResponse) SetProducerID(id int) *MockInitProducerIDResponse {
+	m.producerID = int64(id)
+	return m
+}
+
+func (m *MockInitProducerIDResponse) SetProducerEpoch(epoch int) *MockInitProducerIDResponse {
+	m.producerEpoch = int16(epoch)
+	return m
+}
+
+func (m *MockInitProducerIDResponse) SetError(err KError) *MockInitProducerIDResponse {
+	m.err = err
+	return m
+}
+
+func (m *MockInitProducerIDResponse) For(reqBody versionedDecoder) encoderWithHeader {
+	req := reqBody.(*InitProducerIDRequest)
+	res := &InitProducerIDResponse{
+		Version:       req.Version,
+		Err:           m.err,
+		ProducerID:    m.producerID,
+		ProducerEpoch: m.producerEpoch,
+	}
+	return res
+}
diff --git a/vendor/github.com/IBM/sarama/offset_commit_request.go b/vendor/github.com/IBM/sarama/offset_commit_request.go
new file mode 100644
index 0000000..2c5c693
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_commit_request.go
@@ -0,0 +1,257 @@
+package sarama
+
+import "errors"
+
+// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
+// tells the broker to set the timestamp to the time at which the request was received.
+// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
+const ReceiveTime int64 = -1
+
+// GroupGenerationUndefined is a special value for the group generation field of
+// Offset Commit Requests that should be used when a consumer group does not rely
+// on Kafka for partition management.
+const GroupGenerationUndefined = -1
+
+type offsetCommitRequestBlock struct {
+	offset               int64
+	timestamp            int64
+	committedLeaderEpoch int32
+	metadata             string
+}
+
+func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
+	pe.putInt64(b.offset)
+	if version == 1 {
+		pe.putInt64(b.timestamp)
+	} else if b.timestamp != 0 {
+		Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
+	}
+	if version >= 6 {
+		pe.putInt32(b.committedLeaderEpoch)
+	}
+
+	return pe.putString(b.metadata)
+}
+
+func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+	if b.offset, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if version == 1 {
+		if b.timestamp, err = pd.getInt64(); err != nil {
+			return err
+		}
+	}
+	if version >= 6 {
+		if b.committedLeaderEpoch, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	b.metadata, err = pd.getString()
+	return err
+}
+
+type OffsetCommitRequest struct {
+	ConsumerGroup           string
+	ConsumerGroupGeneration int32   // v1 or later
+	ConsumerID              string  // v1 or later
+	GroupInstanceId         *string // v7 or later
+	RetentionTime           int64   // v2 or later
+
+	// Version can be:
+	// - 0 (kafka 0.8.1 and later)
+	// - 1 (kafka 0.8.2 and later)
+	// - 2 (kafka 0.9.0 and later)
+	// - 3 (kafka 0.11.0 and later)
+	// - 4 (kafka 2.0.0 and later)
+	// - 5&6 (kafka 2.1.0 and later)
+	// - 7 (kafka 2.3.0 and later)
+	Version int16
+	blocks  map[string]map[int32]*offsetCommitRequestBlock
+}
+
+func (r *OffsetCommitRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
+	if r.Version < 0 || r.Version > 7 {
+		return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
+	}
+
+	if err := pe.putString(r.ConsumerGroup); err != nil {
+		return err
+	}
+
+	if r.Version >= 1 {
+		pe.putInt32(r.ConsumerGroupGeneration)
+		if err := pe.putString(r.ConsumerID); err != nil {
+			return err
+		}
+	} else {
+		if r.ConsumerGroupGeneration != 0 {
+			Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
+		}
+		if r.ConsumerID != "" {
+			Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
+		}
+	}
+
+	// Version 5 removes RetentionTime, which is now controlled only by a broker configuration.
+	if r.Version >= 2 && r.Version <= 4 {
+		pe.putInt64(r.RetentionTime)
+	} else if r.RetentionTime != 0 {
+		Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
+	}
+
+	if r.Version >= 7 {
+		if err := pe.putNullableString(r.GroupInstanceId); err != nil {
+			return err
+		}
+	}
+
+	if err := pe.putArrayLength(len(r.blocks)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.blocks {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err := block.encode(pe, r.Version); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if r.ConsumerGroup, err = pd.getString(); err != nil {
+		return err
+	}
+
+	if r.Version >= 1 {
+		if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
+			return err
+		}
+		if r.ConsumerID, err = pd.getString(); err != nil {
+			return err
+		}
+	}
+
+	// Version 5 removes RetentionTime, which is now controlled only by a broker configuration.
+	if r.Version >= 2 && r.Version <= 4 {
+		if r.RetentionTime, err = pd.getInt64(); err != nil {
+			return err
+		}
+	}
+
+	if r.Version >= 7 {
+		if r.GroupInstanceId, err = pd.getNullableString(); err != nil {
+			return err
+		}
+	}
+
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount == 0 {
+		return nil
+	}
+	r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+	for i := 0; i < topicCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitionCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+		for j := 0; j < partitionCount; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			block := &offsetCommitRequestBlock{}
+			if err := block.decode(pd, r.Version); err != nil {
+				return err
+			}
+			r.blocks[topic][partition] = block
+		}
+	}
+	return nil
+}
+
+func (r *OffsetCommitRequest) key() int16 {
+	return apiKeyOffsetCommit
+}
+
+func (r *OffsetCommitRequest) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetCommitRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *OffsetCommitRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 7
+}
+
+func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 7:
+		return V2_3_0_0
+	case 5, 6:
+		return V2_1_0_0
+	case 4:
+		return V2_0_0_0
+	case 3:
+		return V0_11_0_0
+	case 2:
+		return V0_9_0_0
+	case 0, 1:
+		return V0_8_2_0
+	default:
+		return V2_4_0_0
+	}
+}
+
+func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
+	r.AddBlockWithLeaderEpoch(topic, partitionID, offset, 0, timestamp, metadata)
+}
+
+func (r *OffsetCommitRequest) AddBlockWithLeaderEpoch(topic string, partitionID int32, offset int64, leaderEpoch int32, timestamp int64, metadata string) {
+	if r.blocks == nil {
+		r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+	}
+
+	r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, leaderEpoch, metadata}
+}
+
+func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) {
+	partitions := r.blocks[topic]
+	if partitions == nil {
+		return 0, "", errors.New("no such offset")
+	}
+	block := partitions[partitionID]
+	if block == nil {
+		return 0, "", errors.New("no such offset")
+	}
+	return block.offset, block.metadata, nil
+}
diff --git a/vendor/github.com/IBM/sarama/offset_commit_response.go b/vendor/github.com/IBM/sarama/offset_commit_response.go
new file mode 100644
index 0000000..2af478c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_commit_response.go
@@ -0,0 +1,131 @@
+package sarama
+
+import "time"
+
+type OffsetCommitResponse struct {
+	Version        int16
+	ThrottleTimeMs int32
+	Errors         map[string]map[int32]KError
+}
+
+func (r *OffsetCommitResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
+	if r.Errors == nil {
+		r.Errors = make(map[string]map[int32]KError)
+	}
+	partitions := r.Errors[topic]
+	if partitions == nil {
+		partitions = make(map[int32]KError)
+		r.Errors[topic] = partitions
+	}
+	partitions[partition] = kerror
+}
+
+func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
+	if r.Version >= 3 {
+		pe.putInt32(r.ThrottleTimeMs)
+	}
+	if err := pe.putArrayLength(len(r.Errors)); err != nil {
+		return err
+	}
+	for topic, partitions := range r.Errors {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, kerror := range partitions {
+			pe.putInt32(partition)
+			pe.putKError(kerror)
+		}
+	}
+	return nil
+}
+
+func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if version >= 3 {
+		r.ThrottleTimeMs, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil || numTopics == 0 {
+		return err
+	}
+
+	r.Errors = make(map[string]map[int32]KError, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numErrors, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Errors[name] = make(map[int32]KError, numErrors)
+
+		for j := 0; j < numErrors; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			r.Errors[name][id], err = pd.getKError()
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (r *OffsetCommitResponse) key() int16 {
+	return apiKeyOffsetCommit
+}
+
+func (r *OffsetCommitResponse) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetCommitResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *OffsetCommitResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 7
+}
+
+func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 7:
+		return V2_3_0_0
+	case 5, 6:
+		return V2_1_0_0
+	case 4:
+		return V2_0_0_0
+	case 3:
+		return V0_11_0_0
+	case 2:
+		return V0_9_0_0
+	case 0, 1:
+		return V0_8_2_0
+	default:
+		return V2_4_0_0
+	}
+}
+
+func (r *OffsetCommitResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/offset_fetch_request.go b/vendor/github.com/IBM/sarama/offset_fetch_request.go
new file mode 100644
index 0000000..0cfbe7f
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_fetch_request.go
@@ -0,0 +1,206 @@
+package sarama
+
+type OffsetFetchRequest struct {
+	Version       int16
+	ConsumerGroup string
+	RequireStable bool // requires v7+
+	partitions    map[string][]int32
+}
+
+func (r *OffsetFetchRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func NewOffsetFetchRequest(
+	version KafkaVersion,
+	group string,
+	partitions map[string][]int32,
+) *OffsetFetchRequest {
+	request := &OffsetFetchRequest{
+		ConsumerGroup: group,
+		partitions:    partitions,
+	}
+	if version.IsAtLeast(V2_5_0_0) {
+		// Version 7 is adding the require stable flag.
+		request.Version = 7
+	} else if version.IsAtLeast(V2_4_0_0) {
+		// Version 6 is the first flexible version.
+		request.Version = 6
+	} else if version.IsAtLeast(V2_1_0_0) {
+		// Version 3, 4, and 5 are the same as version 2.
+		request.Version = 5
+	} else if version.IsAtLeast(V2_0_0_0) {
+		request.Version = 4
+	} else if version.IsAtLeast(V0_11_0_0) {
+		request.Version = 3
+	} else if version.IsAtLeast(V0_10_2_0) {
+		// Starting in version 2, the request can contain a null topics array to indicate that offsets
+		// for all topics should be fetched. It also returns a top level error code
+		// for group or coordinator level errors.
+		request.Version = 2
+	} else if version.IsAtLeast(V0_8_2_0) {
+		// In version 0, the request read offsets from ZK.
+		//
+		// Starting in version 1, the broker supports fetching offsets from the internal __consumer_offsets topic.
+		request.Version = 1
+	}
+
+	return request
+}
+
+func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
+	if r.Version < 0 || r.Version > 7 {
+		return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
+	}
+
+	err = pe.putString(r.ConsumerGroup)
+	if err != nil {
+		return err
+	}
+
+	if r.partitions == nil && r.Version >= 2 {
+		if err := pe.putArrayLength(-1); err != nil {
+			return err
+		}
+	} else {
+		if err = pe.putArrayLength(len(r.partitions)); err != nil {
+			return err
+		}
+	}
+
+	for topic, partitions := range r.partitions {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+
+		err = pe.putInt32Array(partitions)
+		if err != nil {
+			return err
+		}
+
+		pe.putEmptyTaggedFieldArray()
+	}
+
+	if r.RequireStable && r.Version < 7 {
+		return PacketEncodingError{"requireStable is not supported. use version 7 or later"}
+	}
+
+	if r.Version >= 7 {
+		pe.putBool(r.RequireStable)
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	r.ConsumerGroup, err = pd.getString()
+	if err != nil {
+		return err
+	}
+
+	partitionCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if (partitionCount == 0 && version < 2) || partitionCount < 0 {
+		return nil
+	}
+
+	r.partitions = make(map[string][]int32, partitionCount)
+	for i := 0; i < partitionCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		partitions, err := pd.getInt32Array()
+		if err != nil {
+			return err
+		}
+		_, err = pd.getEmptyTaggedFieldArray()
+		if err != nil {
+			return err
+		}
+
+		r.partitions[topic] = partitions
+	}
+
+	if r.Version >= 7 {
+		r.RequireStable, err = pd.getBool()
+		if err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *OffsetFetchRequest) key() int16 {
+	return apiKeyOffsetFetch
+}
+
+func (r *OffsetFetchRequest) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetFetchRequest) headerVersion() int16 {
+	if r.Version >= 6 {
+		return 2
+	}
+
+	return 1
+}
+
+func (r *OffsetFetchRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 7
+}
+
+func (r *OffsetFetchRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *OffsetFetchRequest) isFlexibleVersion(version int16) bool {
+	return version >= 6
+}
+
+func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 7:
+		return V2_5_0_0
+	case 6:
+		return V2_4_0_0
+	case 5:
+		return V2_1_0_0
+	case 4:
+		return V2_0_0_0
+	case 3:
+		return V0_11_0_0
+	case 2:
+		return V0_10_2_0
+	case 1:
+		return V0_8_2_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_5_0_0
+	}
+}
+
+func (r *OffsetFetchRequest) ZeroPartitions() {
+	if r.partitions == nil && r.Version >= 2 {
+		r.partitions = make(map[string][]int32)
+	}
+}
+
+func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
+	if r.partitions == nil {
+		r.partitions = make(map[string][]int32)
+	}
+
+	r.partitions[topic] = append(r.partitions[topic], partitionID)
+}
diff --git a/vendor/github.com/IBM/sarama/offset_fetch_response.go b/vendor/github.com/IBM/sarama/offset_fetch_response.go
new file mode 100644
index 0000000..0c27563
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_fetch_response.go
@@ -0,0 +1,244 @@
+package sarama
+
+import "time"
+
+type OffsetFetchResponseBlock struct {
+	Offset      int64
+	LeaderEpoch int32
+	Metadata    string
+	Err         KError
+}
+
+func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	b.Offset, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	if version >= 5 {
+		b.LeaderEpoch, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	} else {
+		b.LeaderEpoch = -1
+	}
+
+	b.Metadata, err = pd.getString()
+	if err != nil {
+		return err
+	}
+
+	b.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (b *OffsetFetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	pe.putInt64(b.Offset)
+
+	if version >= 5 {
+		pe.putInt32(b.LeaderEpoch)
+	}
+	err = pe.putString(b.Metadata)
+	if err != nil {
+		return err
+	}
+
+	pe.putKError(b.Err)
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+type OffsetFetchResponse struct {
+	Version        int16
+	ThrottleTimeMs int32
+	Blocks         map[string]map[int32]*OffsetFetchResponseBlock
+	Err            KError
+}
+
+func (r *OffsetFetchResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *OffsetFetchResponse) encode(pe packetEncoder) (err error) {
+	if r.Version >= 3 {
+		pe.putInt32(r.ThrottleTimeMs)
+	}
+	err = pe.putArrayLength(len(r.Blocks))
+	if err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.Blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err := block.encode(pe, r.Version); err != nil {
+				return err
+			}
+		}
+		pe.putEmptyTaggedFieldArray()
+	}
+	if r.Version >= 2 {
+		pe.putKError(r.Err)
+	}
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	if version >= 3 {
+		r.ThrottleTimeMs, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	if numTopics > 0 {
+		r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
+		for i := 0; i < numTopics; i++ {
+			name, err := pd.getString()
+			if err != nil {
+				return err
+			}
+
+			numBlocks, err := pd.getArrayLength()
+			if err != nil {
+				return err
+			}
+
+			r.Blocks[name] = nil
+			if numBlocks > 0 {
+				r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
+			}
+			for j := 0; j < numBlocks; j++ {
+				id, err := pd.getInt32()
+				if err != nil {
+					return err
+				}
+
+				block := new(OffsetFetchResponseBlock)
+				err = block.decode(pd, version)
+				if err != nil {
+					return err
+				}
+
+				r.Blocks[name][id] = block
+			}
+
+			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+				return err
+			}
+		}
+	}
+
+	if version >= 2 {
+		r.Err, err = pd.getKError()
+		if err != nil {
+			return err
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *OffsetFetchResponse) key() int16 {
+	return apiKeyOffsetFetch
+}
+
+func (r *OffsetFetchResponse) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetFetchResponse) headerVersion() int16 {
+	if r.Version >= 6 {
+		return 1
+	}
+
+	return 0
+}
+
+func (r *OffsetFetchResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 7
+}
+
+func (r *OffsetFetchResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *OffsetFetchResponse) isFlexibleVersion(version int16) bool {
+	return version >= 6
+}
+
+func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 7:
+		return V2_5_0_0
+	case 6:
+		return V2_4_0_0
+	case 5:
+		return V2_1_0_0
+	case 4:
+		return V2_0_0_0
+	case 3:
+		return V0_11_0_0
+	case 2:
+		return V0_10_2_0
+	case 1:
+		return V0_8_2_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_5_0_0
+	}
+}
+
+func (r *OffsetFetchResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
+
+func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
+	if r.Blocks == nil {
+		return nil
+	}
+
+	if r.Blocks[topic] == nil {
+		return nil
+	}
+
+	return r.Blocks[topic][partition]
+}
+
+func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
+	}
+	partitions := r.Blocks[topic]
+	if partitions == nil {
+		partitions = make(map[int32]*OffsetFetchResponseBlock)
+		r.Blocks[topic] = partitions
+	}
+	partitions[partition] = block
+}
diff --git a/vendor/github.com/IBM/sarama/offset_manager.go b/vendor/github.com/IBM/sarama/offset_manager.go
new file mode 100644
index 0000000..0b594c2
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_manager.go
@@ -0,0 +1,663 @@
+package sarama
+
+import (
+	"sync"
+	"time"
+)
+
+// Offset Manager
+
+// OffsetManager uses Kafka to store and fetch consumed partition offsets.
+type OffsetManager interface {
+	// ManagePartition creates a PartitionOffsetManager on the given topic/partition.
+	// It will return an error if this OffsetManager is already managing the given
+	// topic/partition.
+	ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
+
+	// Close stops the OffsetManager from managing offsets. It is required to call
+	// this function before an OffsetManager object passes out of scope, as it
+	// will otherwise leak memory. You must call this after all the
+	// PartitionOffsetManagers are closed.
+	Close() error
+
+	// Commit commits the offsets. This method can be used if AutoCommit.Enable is
+	// set to false.
+	Commit()
+}
+
+type offsetManager struct {
+	client          Client
+	conf            *Config
+	group           string
+	ticker          *time.Ticker
+	sessionCanceler func()
+
+	memberID        string
+	groupInstanceId *string
+	generation      int32
+
+	broker     *Broker
+	brokerLock sync.RWMutex
+
+	poms     map[string]map[int32]*partitionOffsetManager
+	pomsLock sync.RWMutex
+
+	closeOnce sync.Once
+	closing   chan none
+	closed    chan none
+}
+
+// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
+// It is still necessary to call Close() on the underlying client when finished with the partition manager.
+func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
+	return newOffsetManagerFromClient(group, "", GroupGenerationUndefined, client, nil)
+}
+
+func newOffsetManagerFromClient(group, memberID string, generation int32, client Client, sessionCanceler func()) (*offsetManager, error) {
+	// Check that we are not dealing with a closed Client before processing any other arguments
+	if client.Closed() {
+		return nil, ErrClosedClient
+	}
+
+	conf := client.Config()
+	om := &offsetManager{
+		client:          client,
+		conf:            conf,
+		group:           group,
+		poms:            make(map[string]map[int32]*partitionOffsetManager),
+		sessionCanceler: sessionCanceler,
+
+		memberID:   memberID,
+		generation: generation,
+
+		closing: make(chan none),
+		closed:  make(chan none),
+	}
+	if conf.Consumer.Group.InstanceId != "" {
+		om.groupInstanceId = &conf.Consumer.Group.InstanceId
+	}
+	if conf.Consumer.Offsets.AutoCommit.Enable {
+		om.ticker = time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval)
+		go withRecover(om.mainLoop)
+	}
+
+	return om, nil
+}
+
+func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
+	pom, err := om.newPartitionOffsetManager(topic, partition)
+	if err != nil {
+		return nil, err
+	}
+
+	om.pomsLock.Lock()
+	defer om.pomsLock.Unlock()
+
+	topicManagers := om.poms[topic]
+	if topicManagers == nil {
+		topicManagers = make(map[int32]*partitionOffsetManager)
+		om.poms[topic] = topicManagers
+	}
+
+	if topicManagers[partition] != nil {
+		return nil, ConfigurationError("That topic/partition is already being managed")
+	}
+
+	topicManagers[partition] = pom
+	return pom, nil
+}
+
+func (om *offsetManager) Close() error {
+	om.closeOnce.Do(func() {
+		// exit the mainLoop
+		close(om.closing)
+		if om.conf.Consumer.Offsets.AutoCommit.Enable {
+			<-om.closed
+		}
+
+		// mark all POMs as closed
+		om.asyncClosePOMs()
+
+		// flush one last time
+		if om.conf.Consumer.Offsets.AutoCommit.Enable {
+			for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ {
+				om.flushToBroker()
+				if om.releasePOMs(false) == 0 {
+					break
+				}
+			}
+		}
+
+		om.releasePOMs(true)
+		om.brokerLock.Lock()
+		om.broker = nil
+		om.brokerLock.Unlock()
+	})
+	return nil
+}
+
+func (om *offsetManager) computeBackoff(retries int) time.Duration {
+	if om.conf.Metadata.Retry.BackoffFunc != nil {
+		return om.conf.Metadata.Retry.BackoffFunc(retries, om.conf.Metadata.Retry.Max)
+	} else {
+		return om.conf.Metadata.Retry.Backoff
+	}
+}
+
+func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, int32, string, error) {
+	broker, err := om.coordinator()
+	if err != nil {
+		if retries <= 0 {
+			return 0, 0, "", err
+		}
+		return om.fetchInitialOffset(topic, partition, retries-1)
+	}
+
+	partitions := map[string][]int32{topic: {partition}}
+	req := NewOffsetFetchRequest(om.conf.Version, om.group, partitions)
+	resp, err := broker.FetchOffset(req)
+	if err != nil {
+		if retries <= 0 {
+			return 0, 0, "", err
+		}
+		om.releaseCoordinator(broker)
+		return om.fetchInitialOffset(topic, partition, retries-1)
+	}
+
+	block := resp.GetBlock(topic, partition)
+	if block == nil {
+		return 0, 0, "", ErrIncompleteResponse
+	}
+
+	switch block.Err {
+	case ErrNoError:
+		return block.Offset, block.LeaderEpoch, block.Metadata, nil
+	case ErrNotCoordinatorForConsumer:
+		if retries <= 0 {
+			return 0, 0, "", block.Err
+		}
+		om.releaseCoordinator(broker)
+		return om.fetchInitialOffset(topic, partition, retries-1)
+	case ErrOffsetsLoadInProgress:
+		if retries <= 0 {
+			return 0, 0, "", block.Err
+		}
+		backoff := om.computeBackoff(retries)
+		select {
+		case <-om.closing:
+			return 0, 0, "", block.Err
+		case <-time.After(backoff):
+		}
+		return om.fetchInitialOffset(topic, partition, retries-1)
+	default:
+		return 0, 0, "", block.Err
+	}
+}
+
+func (om *offsetManager) coordinator() (*Broker, error) {
+	om.brokerLock.RLock()
+	broker := om.broker
+	om.brokerLock.RUnlock()
+
+	if broker != nil {
+		return broker, nil
+	}
+
+	om.brokerLock.Lock()
+	defer om.brokerLock.Unlock()
+
+	if broker := om.broker; broker != nil {
+		return broker, nil
+	}
+
+	if err := om.client.RefreshCoordinator(om.group); err != nil {
+		return nil, err
+	}
+
+	broker, err := om.client.Coordinator(om.group)
+	if err != nil {
+		return nil, err
+	}
+
+	om.broker = broker
+	return broker, nil
+}
+
+func (om *offsetManager) releaseCoordinator(b *Broker) {
+	om.brokerLock.Lock()
+	if om.broker == b {
+		om.broker = nil
+	}
+	om.brokerLock.Unlock()
+}
+
+func (om *offsetManager) mainLoop() {
+	defer om.ticker.Stop()
+	defer close(om.closed)
+
+	for {
+		select {
+		case <-om.ticker.C:
+			om.Commit()
+		case <-om.closing:
+			return
+		}
+	}
+}
+
+func (om *offsetManager) Commit() {
+	om.flushToBroker()
+	om.releasePOMs(false)
+}
+
+func (om *offsetManager) flushToBroker() {
+	broker, err := om.coordinator()
+	if err != nil {
+		om.handleError(err)
+		return
+	}
+
+	// Care needs to be taken to unlock this. Don't want to defer the unlock as this would
+	// cause the lock to be held while waiting for the broker to reply.
+	broker.lock.Lock()
+	req := om.constructRequest()
+	if req == nil {
+		broker.lock.Unlock()
+		return
+	}
+	resp, rp, err := sendOffsetCommit(broker, req)
+	broker.lock.Unlock()
+
+	if err != nil {
+		om.handleError(err)
+		om.releaseCoordinator(broker)
+		_ = broker.Close()
+		return
+	}
+
+	err = handleResponsePromise(req, resp, rp, nil)
+	if err != nil {
+		om.handleError(err)
+		om.releaseCoordinator(broker)
+		_ = broker.Close()
+		return
+	}
+
+	broker.handleThrottledResponse(resp)
+	om.handleResponse(broker, req, resp)
+}
+
+func sendOffsetCommit(coordinator *Broker, req *OffsetCommitRequest) (*OffsetCommitResponse, *responsePromise, error) {
+	resp := new(OffsetCommitResponse)
+
+	promise, err := coordinator.send(req, resp)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return resp, promise, nil
+}
+
+func (om *offsetManager) constructRequest() *OffsetCommitRequest {
+	r := &OffsetCommitRequest{
+		Version:                 1,
+		ConsumerGroup:           om.group,
+		ConsumerID:              om.memberID,
+		ConsumerGroupGeneration: om.generation,
+	}
+	// Version 1 adds timestamp and group membership information, as well as the commit timestamp.
+	//
+	// Version 2 adds retention time.  It removes the commit timestamp added in version 1.
+	if om.conf.Version.IsAtLeast(V0_9_0_0) {
+		r.Version = 2
+	}
+	// Version 3 and 4 are the same as version 2.
+	if om.conf.Version.IsAtLeast(V0_11_0_0) {
+		r.Version = 3
+	}
+	if om.conf.Version.IsAtLeast(V2_0_0_0) {
+		r.Version = 4
+	}
+	// Version 5 removes the retention time, which is now controlled only by a broker configuration.
+	//
+	// Version 6 adds the leader epoch for fencing.
+	if om.conf.Version.IsAtLeast(V2_1_0_0) {
+		r.Version = 6
+	}
+	// version 7 adds a new field called groupInstanceId to indicate member identity across restarts.
+	if om.conf.Version.IsAtLeast(V2_3_0_0) {
+		r.Version = 7
+		r.GroupInstanceId = om.groupInstanceId
+	}
+
+	// commit timestamp was only briefly supported in V1 where we set it to
+	// ReceiveTime (-1) to tell the broker to set it to the time when the commit
+	// request was received
+	var commitTimestamp int64
+	if r.Version == 1 {
+		commitTimestamp = ReceiveTime
+	}
+
+	// request controlled retention was only supported from V2-V4 (it became
+	// broker-only after that) so if the user has set the config options then
+	// flow those through as retention time on the commit request.
+	if r.Version >= 2 && r.Version < 5 {
+		// Map Sarama's default of 0 to Kafka's default of -1
+		r.RetentionTime = -1
+		if om.conf.Consumer.Offsets.Retention > 0 {
+			r.RetentionTime = int64(om.conf.Consumer.Offsets.Retention / time.Millisecond)
+		}
+	}
+
+	om.pomsLock.RLock()
+	defer om.pomsLock.RUnlock()
+
+	for _, topicManagers := range om.poms {
+		for _, pom := range topicManagers {
+			pom.lock.Lock()
+			if pom.dirty {
+				r.AddBlockWithLeaderEpoch(pom.topic, pom.partition, pom.offset, pom.leaderEpoch, commitTimestamp, pom.metadata)
+			}
+			pom.lock.Unlock()
+		}
+	}
+
+	if len(r.blocks) > 0 {
+		return r
+	}
+
+	return nil
+}
+
+func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest, resp *OffsetCommitResponse) {
+	om.pomsLock.RLock()
+	defer om.pomsLock.RUnlock()
+
+	for _, topicManagers := range om.poms {
+		for _, pom := range topicManagers {
+			if req.blocks[pom.topic] == nil || req.blocks[pom.topic][pom.partition] == nil {
+				continue
+			}
+
+			var err KError
+			var ok bool
+
+			if resp.Errors[pom.topic] == nil {
+				pom.handleError(ErrIncompleteResponse)
+				continue
+			}
+			if err, ok = resp.Errors[pom.topic][pom.partition]; !ok {
+				pom.handleError(ErrIncompleteResponse)
+				continue
+			}
+
+			switch err {
+			case ErrNoError:
+				block := req.blocks[pom.topic][pom.partition]
+				pom.updateCommitted(block.offset, block.metadata)
+			case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
+				ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
+				// not a critical error, we just need to redispatch
+				om.releaseCoordinator(broker)
+			case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
+				// nothing we can do about this, just tell the user and carry on
+				pom.handleError(err)
+			case ErrOffsetsLoadInProgress:
+				// nothing wrong but we didn't commit, we'll get it next time round
+			case ErrFencedInstancedId:
+				pom.handleError(err)
+				// TODO close the whole consumer for instance fenced....
+				om.tryCancelSession()
+			case ErrUnknownTopicOrPartition:
+				// let the user know *and* try redispatching - if topic-auto-create is
+				// enabled, redispatching should trigger a metadata req and create the
+				// topic; if not then re-dispatching won't help, but we've let the user
+				// know and it shouldn't hurt either (see https://github.com/IBM/sarama/issues/706)
+				fallthrough
+			default:
+				// dunno, tell the user and try redispatching
+				pom.handleError(err)
+				om.releaseCoordinator(broker)
+			}
+		}
+	}
+}
+
+func (om *offsetManager) handleError(err error) {
+	om.pomsLock.RLock()
+	defer om.pomsLock.RUnlock()
+
+	for _, topicManagers := range om.poms {
+		for _, pom := range topicManagers {
+			pom.handleError(err)
+		}
+	}
+}
+
+func (om *offsetManager) asyncClosePOMs() {
+	om.pomsLock.RLock()
+	defer om.pomsLock.RUnlock()
+
+	for _, topicManagers := range om.poms {
+		for _, pom := range topicManagers {
+			pom.AsyncClose()
+		}
+	}
+}
+
+// Releases/removes closed POMs once they are clean (or when forced)
+func (om *offsetManager) releasePOMs(force bool) (remaining int) {
+	om.pomsLock.Lock()
+	defer om.pomsLock.Unlock()
+
+	for topic, topicManagers := range om.poms {
+		for partition, pom := range topicManagers {
+			pom.lock.Lock()
+			releaseDue := pom.done && (force || !pom.dirty)
+			pom.lock.Unlock()
+
+			if releaseDue {
+				pom.release()
+
+				delete(om.poms[topic], partition)
+				if len(om.poms[topic]) == 0 {
+					delete(om.poms, topic)
+				}
+			}
+		}
+		remaining += len(om.poms[topic])
+	}
+	return
+}
+
+func (om *offsetManager) findPOM(topic string, partition int32) *partitionOffsetManager {
+	om.pomsLock.RLock()
+	defer om.pomsLock.RUnlock()
+
+	if partitions, ok := om.poms[topic]; ok {
+		if pom, ok := partitions[partition]; ok {
+			return pom
+		}
+	}
+	return nil
+}
+
+func (om *offsetManager) tryCancelSession() {
+	if om.sessionCanceler != nil {
+		om.sessionCanceler()
+	}
+}
+
+// Partition Offset Manager
+
+// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
+// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
+// out of scope.
+type PartitionOffsetManager interface {
+	// NextOffset returns the next offset that should be consumed for the managed
+	// partition, accompanied by metadata which can be used to reconstruct the state
+	// of the partition consumer when it resumes. NextOffset() will return
+	// `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
+	// was committed for this partition yet.
+	NextOffset() (int64, string)
+
+	// MarkOffset marks the provided offset, alongside a metadata string
+	// that represents the state of the partition consumer at that point in time. The
+	// metadata string can be used by another consumer to restore that state, so it
+	// can resume consumption.
+	//
+	// To follow upstream conventions, you are expected to mark the offset of the
+	// next message to read, not the last message read. Thus, when calling `MarkOffset`
+	// you should typically add one to the offset of the last consumed message.
+	//
+	// Note: calling MarkOffset does not necessarily commit the offset to the backend
+	// store immediately for efficiency reasons, and it may never be committed if
+	// your application crashes. This means that you may end up processing the same
+	// message twice, and your processing should ideally be idempotent.
+	MarkOffset(offset int64, metadata string)
+
+	// ResetOffset resets to the provided offset, alongside a metadata string that
+	// represents the state of the partition consumer at that point in time. Reset
+	// acts as a counterpart to MarkOffset, the difference being that it allows to
+	// reset an offset to an earlier or smaller value, where MarkOffset only
+	// allows incrementing the offset. cf MarkOffset for more details.
+	ResetOffset(offset int64, metadata string)
+
+	// Errors returns a read channel of errors that occur during offset management, if
+	// enabled. By default, errors are logged and not returned over this channel. If
+	// you want to implement any custom error handling, set your config's
+	// Consumer.Return.Errors setting to true, and read from this channel.
+	Errors() <-chan *ConsumerError
+
+	// AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
+	// return immediately, after which you should wait until the 'errors' channel has
+	// been drained and closed. It is required to call this function, or Close before
+	// a consumer object passes out of scope, as it will otherwise leak memory. You
+	// must call this before calling Close on the underlying client.
+	AsyncClose()
+
+	// Close stops the PartitionOffsetManager from managing offsets. It is required to
+	// call this function (or AsyncClose) before a PartitionOffsetManager object
+	// passes out of scope, as it will otherwise leak memory. You must call this
+	// before calling Close on the underlying client.
+	Close() error
+}
+
+type partitionOffsetManager struct {
+	parent      *offsetManager
+	topic       string
+	partition   int32
+	leaderEpoch int32
+
+	lock     sync.Mutex
+	offset   int64
+	metadata string
+	dirty    bool
+	done     bool
+
+	releaseOnce sync.Once
+	errors      chan *ConsumerError
+}
+
+func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
+	offset, leaderEpoch, metadata, err := om.fetchInitialOffset(topic, partition, om.conf.Metadata.Retry.Max)
+	if err != nil {
+		return nil, err
+	}
+
+	return &partitionOffsetManager{
+		parent:      om,
+		topic:       topic,
+		partition:   partition,
+		leaderEpoch: leaderEpoch,
+		errors:      make(chan *ConsumerError, om.conf.ChannelBufferSize),
+		offset:      offset,
+		metadata:    metadata,
+	}, nil
+}
+
+func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
+	return pom.errors
+}
+
+func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
+	pom.lock.Lock()
+	defer pom.lock.Unlock()
+
+	if offset > pom.offset {
+		pom.offset = offset
+		pom.metadata = metadata
+		pom.dirty = true
+	}
+}
+
+func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) {
+	pom.lock.Lock()
+	defer pom.lock.Unlock()
+
+	if offset <= pom.offset {
+		pom.offset = offset
+		pom.metadata = metadata
+		pom.dirty = true
+	}
+}
+
+func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
+	pom.lock.Lock()
+	defer pom.lock.Unlock()
+
+	if pom.offset == offset && pom.metadata == metadata {
+		pom.dirty = false
+	}
+}
+
+func (pom *partitionOffsetManager) NextOffset() (int64, string) {
+	pom.lock.Lock()
+	defer pom.lock.Unlock()
+
+	if pom.offset >= 0 {
+		return pom.offset, pom.metadata
+	}
+
+	return pom.parent.conf.Consumer.Offsets.Initial, ""
+}
+
+func (pom *partitionOffsetManager) AsyncClose() {
+	pom.lock.Lock()
+	pom.done = true
+	pom.lock.Unlock()
+}
+
+func (pom *partitionOffsetManager) Close() error {
+	pom.AsyncClose()
+
+	var errors ConsumerErrors
+	for err := range pom.errors {
+		errors = append(errors, err)
+	}
+
+	if len(errors) > 0 {
+		return errors
+	}
+	return nil
+}
+
+func (pom *partitionOffsetManager) handleError(err error) {
+	cErr := &ConsumerError{
+		Topic:     pom.topic,
+		Partition: pom.partition,
+		Err:       err,
+	}
+
+	if pom.parent.conf.Consumer.Return.Errors {
+		pom.errors <- cErr
+	} else {
+		Logger.Println(cErr)
+	}
+}
+
+func (pom *partitionOffsetManager) release() {
+	pom.releaseOnce.Do(func() {
+		close(pom.errors)
+	})
+}
diff --git a/vendor/github.com/IBM/sarama/offset_request.go b/vendor/github.com/IBM/sarama/offset_request.go
new file mode 100644
index 0000000..01fbb33
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_request.go
@@ -0,0 +1,236 @@
+package sarama
+
+type offsetRequestBlock struct {
+	// currentLeaderEpoch contains the current leader epoch (used in version 4+).
+	currentLeaderEpoch int32
+	// timestamp contains the current timestamp.
+	timestamp int64
+	// maxNumOffsets contains the maximum number of offsets to report.
+	maxNumOffsets int32 // Only used in version 0
+}
+
+func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
+	if version >= 4 {
+		pe.putInt32(b.currentLeaderEpoch)
+	}
+
+	pe.putInt64(b.timestamp)
+
+	if version == 0 {
+		pe.putInt32(b.maxNumOffsets)
+	}
+
+	return nil
+}
+
+func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+	b.currentLeaderEpoch = -1
+	if version >= 4 {
+		if b.currentLeaderEpoch, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	if b.timestamp, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if version == 0 {
+		if b.maxNumOffsets, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+type OffsetRequest struct {
+	Version        int16
+	IsolationLevel IsolationLevel
+	replicaID      int32
+	isReplicaIDSet bool
+	blocks         map[string]map[int32]*offsetRequestBlock
+}
+
+func NewOffsetRequest(version KafkaVersion) *OffsetRequest {
+	request := &OffsetRequest{}
+	if version.IsAtLeast(V2_2_0_0) {
+		// Version 5 adds a new error code, OFFSET_NOT_AVAILABLE.
+		request.Version = 5
+	} else if version.IsAtLeast(V2_1_0_0) {
+		// Version 4 adds the current leader epoch, which is used for fencing.
+		request.Version = 4
+	} else if version.IsAtLeast(V2_0_0_0) {
+		// Version 3 is the same as version 2.
+		request.Version = 3
+	} else if version.IsAtLeast(V0_11_0_0) {
+		// Version 2 adds the isolation level, which is used for transactional reads.
+		request.Version = 2
+	} else if version.IsAtLeast(V0_10_1_0) {
+		// Version 1 removes MaxNumOffsets.  From this version forward, only a single
+		// offset can be returned.
+		request.Version = 1
+	}
+	return request
+}
+
+func (r *OffsetRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *OffsetRequest) encode(pe packetEncoder) error {
+	if r.isReplicaIDSet {
+		pe.putInt32(r.replicaID)
+	} else {
+		// default replica ID is always -1 for clients
+		pe.putInt32(-1)
+	}
+
+	if r.Version >= 2 {
+		pe.putBool(r.IsolationLevel == ReadCommitted)
+	}
+
+	err := pe.putArrayLength(len(r.blocks))
+	if err != nil {
+		return err
+	}
+	for topic, partitions := range r.blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err = block.encode(pe, r.Version); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
+	r.Version = version
+
+	replicaID, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	if replicaID >= 0 {
+		r.SetReplicaID(replicaID)
+	}
+
+	if r.Version >= 2 {
+		tmp, err := pd.getBool()
+		if err != nil {
+			return err
+		}
+
+		r.IsolationLevel = ReadUncommitted
+		if tmp {
+			r.IsolationLevel = ReadCommitted
+		}
+	}
+
+	blockCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if blockCount == 0 {
+		return nil
+	}
+	r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+	for i := 0; i < blockCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitionCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+		for j := 0; j < partitionCount; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			block := &offsetRequestBlock{}
+			if err := block.decode(pd, version); err != nil {
+				return err
+			}
+			r.blocks[topic][partition] = block
+		}
+	}
+	return nil
+}
+
+func (r *OffsetRequest) key() int16 {
+	return apiKeyListOffsets
+}
+
+func (r *OffsetRequest) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *OffsetRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 5
+}
+
+func (r *OffsetRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 5:
+		return V2_2_0_0
+	case 4:
+		return V2_1_0_0
+	case 3:
+		return V2_0_0_0
+	case 2:
+		return V0_11_0_0
+	case 1:
+		return V0_10_1_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_0_0_0
+	}
+}
+
+func (r *OffsetRequest) SetReplicaID(id int32) {
+	r.replicaID = id
+	r.isReplicaIDSet = true
+}
+
+func (r *OffsetRequest) ReplicaID() int32 {
+	if r.isReplicaIDSet {
+		return r.replicaID
+	}
+	return -1
+}
+
+func (r *OffsetRequest) AddBlock(topic string, partitionID int32, timestamp int64, maxOffsets int32) {
+	if r.blocks == nil {
+		r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+	}
+
+	if r.blocks[topic] == nil {
+		r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+	}
+
+	tmp := new(offsetRequestBlock)
+	tmp.currentLeaderEpoch = -1
+	tmp.timestamp = timestamp
+	if r.Version == 0 {
+		tmp.maxNumOffsets = maxOffsets
+	}
+
+	r.blocks[topic][partitionID] = tmp
+}
diff --git a/vendor/github.com/IBM/sarama/offset_response.go b/vendor/github.com/IBM/sarama/offset_response.go
new file mode 100644
index 0000000..d57c24f
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_response.go
@@ -0,0 +1,230 @@
+package sarama
+
+import "time"
+
+type OffsetResponseBlock struct {
+	Err KError
+	// Offsets contains the result offsets (for V0/V1 compatibility)
+	Offsets []int64 // Version 0
+	// Timestamp contains the timestamp associated with the returned offset.
+	Timestamp int64 // Version 1
+	// Offset contains the returned offset.
+	Offset int64 // Version 1
+	// LeaderEpoch contains the current leader epoch of the partition.
+	LeaderEpoch int32
+}
+
+func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	b.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if version == 0 {
+		b.Offsets, err = pd.getInt64Array()
+		return err
+	}
+
+	if version >= 1 {
+		b.Timestamp, err = pd.getInt64()
+		if err != nil {
+			return err
+		}
+
+		b.Offset, err = pd.getInt64()
+		if err != nil {
+			return err
+		}
+
+		// For backwards compatibility put the offset in the offsets array too
+		b.Offsets = []int64{b.Offset}
+	}
+
+	if version >= 4 {
+		if b.LeaderEpoch, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	pe.putKError(b.Err)
+
+	if version == 0 {
+		return pe.putInt64Array(b.Offsets)
+	}
+
+	if version >= 1 {
+		pe.putInt64(b.Timestamp)
+		pe.putInt64(b.Offset)
+	}
+
+	if version >= 4 {
+		pe.putInt32(b.LeaderEpoch)
+	}
+
+	return nil
+}
+
+type OffsetResponse struct {
+	Version        int16
+	ThrottleTimeMs int32
+	Blocks         map[string]map[int32]*OffsetResponseBlock
+}
+
+func (r *OffsetResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
+	if version >= 2 {
+		r.ThrottleTimeMs, err = pd.getInt32()
+		if err != nil {
+			return err
+		}
+	}
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numBlocks, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
+
+		for j := 0; j < numBlocks; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			block := new(OffsetResponseBlock)
+			err = block.decode(pd, version)
+			if err != nil {
+				return err
+			}
+			r.Blocks[name][id] = block
+		}
+	}
+
+	return nil
+}
+
+func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
+	if r.Blocks == nil {
+		return nil
+	}
+
+	if r.Blocks[topic] == nil {
+		return nil
+	}
+
+	return r.Blocks[topic][partition]
+}
+
+/*
+// [0 0 0 1 ntopics
+0 8 109 121 95 116 111 112 105 99 topic
+0 0 0 1 npartitions
+0 0 0 0 id
+0 0
+
+0 0 0 1 0 0 0 0
+0 1 1 1 0 0 0 1
+0 8 109 121 95 116 111 112
+105 99 0 0 0 1 0 0
+0 0 0 0 0 0 0 1
+0 0 0 0 0 1 1 1] <nil>
+*/
+func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
+	if r.Version >= 2 {
+		pe.putInt32(r.ThrottleTimeMs)
+	}
+
+	if err = pe.putArrayLength(len(r.Blocks)); err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.Blocks {
+		if err = pe.putString(topic); err != nil {
+			return err
+		}
+		if err = pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for partition, block := range partitions {
+			pe.putInt32(partition)
+			if err = block.encode(pe, r.version()); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (r *OffsetResponse) key() int16 {
+	return apiKeyListOffsets
+}
+
+func (r *OffsetResponse) version() int16 {
+	return r.Version
+}
+
+func (r *OffsetResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *OffsetResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 5
+}
+
+func (r *OffsetResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 5:
+		return V2_2_0_0
+	case 4:
+		return V2_1_0_0
+	case 3:
+		return V2_0_0_0
+	case 2:
+		return V0_11_0_0
+	case 1:
+		return V0_10_1_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_0_0_0
+	}
+}
+
+func (r *OffsetResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
+
+// testing API
+
+func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
+	}
+	byTopic, ok := r.Blocks[topic]
+	if !ok {
+		byTopic = make(map[int32]*OffsetResponseBlock)
+		r.Blocks[topic] = byTopic
+	}
+	byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset}
+}
diff --git a/vendor/github.com/IBM/sarama/packet_decoder.go b/vendor/github.com/IBM/sarama/packet_decoder.go
new file mode 100644
index 0000000..241c6c6
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/packet_decoder.go
@@ -0,0 +1,79 @@
+package sarama
+
+import (
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+type taggedFieldDecoderFunc func(pd packetDecoder) error
+type taggedFieldDecoders map[uint64]taggedFieldDecoderFunc
+
+// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
+// Types implementing Decoder only need to worry about calling methods like GetString,
+// not about how a string is represented in Kafka.
+type packetDecoder interface {
+	// Primitives
+	getInt8() (int8, error)
+	getInt16() (int16, error)
+	getInt32() (int32, error)
+	getInt64() (int64, error)
+	getVarint() (int64, error)
+	getUVarint() (uint64, error)
+	getFloat64() (float64, error)
+	getArrayLength() (int, error)
+	getBool() (bool, error)
+	getKError() (KError, error)
+	getDurationMs() (time.Duration, error)
+	getEmptyTaggedFieldArray() (int, error)
+	getTaggedFieldArray(taggedFieldDecoders) error
+
+	// Collections
+	getBytes() ([]byte, error)
+	getVarintBytes() ([]byte, error)
+	getRawBytes(length int) ([]byte, error)
+	getString() (string, error)
+	getNullableString() (*string, error)
+	getInt32Array() ([]int32, error)
+	getInt64Array() ([]int64, error)
+	getStringArray() ([]string, error)
+
+	// Subsets
+	remaining() int
+	getSubset(length int) (packetDecoder, error)
+	peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset
+	peekInt8(offset int) (int8, error)              // similar to peek, but just one byte
+
+	// Stacks, see PushDecoder
+	push(in pushDecoder) error
+	pop() error
+
+	// To record metrics when provided
+	metricRegistry() metrics.Registry
+}
+
+// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
+// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
+// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
+// depend upon have been decoded.
+type pushDecoder interface {
+	// Saves the offset into the input buffer as the location to actually read the calculated value when able.
+	saveOffset(in int)
+
+	// Returns the length of data to reserve for the input of this encoder (e.g. 4 bytes for a CRC32).
+	reserveLength() int
+
+	// Indicates that all required data is now available to calculate and check the field.
+	// SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
+	// of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
+	check(curOffset int, buf []byte) error
+}
+
+// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the
+// fields itself is unknown until its value was decoded (for instance varint encoded length
+// fields).
+// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength()
+type dynamicPushDecoder interface {
+	pushDecoder
+	decoder
+}
diff --git a/vendor/github.com/IBM/sarama/packet_encoder.go b/vendor/github.com/IBM/sarama/packet_encoder.go
new file mode 100644
index 0000000..674a550
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/packet_encoder.go
@@ -0,0 +1,75 @@
+package sarama
+
+import (
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
+// Types implementing Encoder only need to worry about calling methods like PutString,
+// not about how a string is represented in Kafka.
+type packetEncoder interface {
+	// Primitives
+	putInt8(in int8)
+	putInt16(in int16)
+	putInt32(in int32)
+	putInt64(in int64)
+	putVarint(in int64)
+	putUVarint(in uint64)
+	putFloat64(in float64)
+	putArrayLength(in int) error
+	putBool(in bool)
+	putKError(in KError)
+	putDurationMs(in time.Duration)
+
+	// Collections
+	putBytes(in []byte) error
+	putVarintBytes(in []byte) error
+	putRawBytes(in []byte) error
+	putString(in string) error
+	putNullableString(in *string) error
+	putStringArray(in []string) error
+	putInt32Array(in []int32) error
+	putInt64Array(in []int64) error
+	putNullableInt32Array(in []int32) error
+	putEmptyTaggedFieldArray()
+
+	// Provide the current offset to record the batch size metric
+	offset() int
+
+	// Stacks, see PushEncoder
+	push(in pushEncoder)
+	pop() error
+
+	// To record metrics when provided
+	metricRegistry() metrics.Registry
+}
+
+// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
+// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
+// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
+// depend upon have been written.
+type pushEncoder interface {
+	// Saves the offset into the input buffer as the location to actually write the calculated value when able.
+	saveOffset(in int)
+
+	// Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
+	reserveLength() int
+
+	// Indicates that all required data is now available to calculate and write the field.
+	// SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
+	// of data to the saved offset, based on the data between the saved offset and curOffset.
+	run(curOffset int, buf []byte) error
+}
+
+// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the
+// fields itself is unknown until its value was computed (for instance varint encoded length
+// fields).
+type dynamicPushEncoder interface {
+	pushEncoder
+
+	// Called during pop() to adjust the length of the field.
+	// It should return the difference in bytes between the last computed length and current length.
+	adjustLength(currOffset int) int
+}
diff --git a/vendor/github.com/IBM/sarama/partitioner.go b/vendor/github.com/IBM/sarama/partitioner.go
new file mode 100644
index 0000000..50a345a
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/partitioner.go
@@ -0,0 +1,248 @@
+package sarama
+
+import (
+	"hash"
+	"hash/crc32"
+	"hash/fnv"
+	"math/rand"
+	"time"
+)
+
+// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
+// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
+// as simple default implementations.
+type Partitioner interface {
+	// Partition takes a message and partition count and chooses a partition
+	Partition(message *ProducerMessage, numPartitions int32) (int32, error)
+
+	// RequiresConsistency indicates to the user of the partitioner whether the
+	// mapping of key->partition is consistent or not. Specifically, if a
+	// partitioner requires consistency then it must be allowed to choose from all
+	// partitions (even ones known to be unavailable), and its choice must be
+	// respected by the caller. The obvious example is the HashPartitioner.
+	RequiresConsistency() bool
+}
+
+// DynamicConsistencyPartitioner can optionally be implemented by Partitioners
+// in order to allow more flexibility than is originally allowed by the
+// RequiresConsistency method in the Partitioner interface. This allows
+// partitioners to require consistency sometimes, but not all times. It's useful
+// for, e.g., the HashPartitioner, which does not require consistency if the
+// message key is nil.
+type DynamicConsistencyPartitioner interface {
+	Partitioner
+
+	// MessageRequiresConsistency is similar to Partitioner.RequiresConsistency,
+	// but takes in the message being partitioned so that the partitioner can
+	// make a per-message determination.
+	MessageRequiresConsistency(message *ProducerMessage) bool
+}
+
+// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
+type PartitionerConstructor func(topic string) Partitioner
+
+type manualPartitioner struct{}
+
+// HashPartitionerOption lets you modify default values of the partitioner
+type HashPartitionerOption func(*hashPartitioner)
+
+// WithAbsFirst means that the partitioner handles absolute values
+// in the same way as the reference Java implementation
+func WithAbsFirst() HashPartitionerOption {
+	return func(hp *hashPartitioner) {
+		hp.referenceAbs = true
+	}
+}
+
+// WithHashUnsigned means the partitioner treats the hashed value as unsigned when
+// partitioning.  This is intended to be combined with the crc32 hash algorithm to
+// be compatible with librdkafka's implementation
+func WithHashUnsigned() HashPartitionerOption {
+	return func(hp *hashPartitioner) {
+		hp.hashUnsigned = true
+	}
+}
+
+// WithCustomHashFunction lets you specify what hash function to use for the partitioning
+func WithCustomHashFunction(hasher func() hash.Hash32) HashPartitionerOption {
+	return func(hp *hashPartitioner) {
+		hp.hasher = hasher()
+	}
+}
+
+// WithCustomFallbackPartitioner lets you specify what HashPartitioner should be used in case a Distribution Key is empty
+func WithCustomFallbackPartitioner(randomHP Partitioner) HashPartitionerOption {
+	return func(hp *hashPartitioner) {
+		hp.random = randomHP
+	}
+}
+
+// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
+// ProducerMessage's Partition field as the partition to produce to.
+func NewManualPartitioner(topic string) Partitioner {
+	return new(manualPartitioner)
+}
+
+func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+	return message.Partition, nil
+}
+
+func (p *manualPartitioner) RequiresConsistency() bool {
+	return true
+}
+
+type randomPartitioner struct {
+	generator *rand.Rand
+}
+
+// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
+func NewRandomPartitioner(topic string) Partitioner {
+	p := new(randomPartitioner)
+	p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
+	return p
+}
+
+func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+	return int32(p.generator.Intn(int(numPartitions))), nil
+}
+
+func (p *randomPartitioner) RequiresConsistency() bool {
+	return false
+}
+
+type roundRobinPartitioner struct {
+	partition int32
+}
+
+// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
+func NewRoundRobinPartitioner(topic string) Partitioner {
+	return &roundRobinPartitioner{}
+}
+
+func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+	if p.partition >= numPartitions {
+		p.partition = 0
+	}
+	ret := p.partition
+	p.partition++
+	return ret, nil
+}
+
+func (p *roundRobinPartitioner) RequiresConsistency() bool {
+	return false
+}
+
+type hashPartitioner struct {
+	random       Partitioner
+	hasher       hash.Hash32
+	referenceAbs bool
+	hashUnsigned bool
+}
+
+// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher.
+// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that
+// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance.
+func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor {
+	return func(topic string) Partitioner {
+		p := new(hashPartitioner)
+		p.random = NewRandomPartitioner(topic)
+		p.hasher = hasher()
+		p.referenceAbs = false
+		p.hashUnsigned = false
+		return p
+	}
+}
+
+// NewCustomPartitioner creates a default Partitioner but lets you specify the behavior of each component via options
+func NewCustomPartitioner(options ...HashPartitionerOption) PartitionerConstructor {
+	return func(topic string) Partitioner {
+		p := new(hashPartitioner)
+		p.random = NewRandomPartitioner(topic)
+		p.hasher = fnv.New32a()
+		p.referenceAbs = false
+		p.hashUnsigned = false
+		for _, option := range options {
+			option(p)
+		}
+		return p
+	}
+}
+
+// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
+// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
+// modulus the number of partitions. This ensures that messages with the same key always end up on the
+// same partition.
+func NewHashPartitioner(topic string) Partitioner {
+	p := new(hashPartitioner)
+	p.random = NewRandomPartitioner(topic)
+	p.hasher = fnv.New32a()
+	p.referenceAbs = false
+	p.hashUnsigned = false
+	return p
+}
+
+// NewReferenceHashPartitioner is like NewHashPartitioner except that it handles absolute values
+// in the same way as the reference Java implementation. NewHashPartitioner was supposed to do
+// that but it had a mistake and now there are people depending on both behaviors. This will
+// all go away on the next major version bump.
+func NewReferenceHashPartitioner(topic string) Partitioner {
+	p := new(hashPartitioner)
+	p.random = NewRandomPartitioner(topic)
+	p.hasher = fnv.New32a()
+	p.referenceAbs = true
+	p.hashUnsigned = false
+	return p
+}
+
+// NewConsistentCRCHashPartitioner is like NewHashPartitioner execpt that it uses the *unsigned* crc32 hash
+// of the encoded bytes of the message key modulus the number of partitions.  This is compatible with
+// librdkafka's `consistent_random` partitioner
+func NewConsistentCRCHashPartitioner(topic string) Partitioner {
+	p := new(hashPartitioner)
+	p.random = NewRandomPartitioner(topic)
+	p.hasher = crc32.NewIEEE()
+	p.referenceAbs = false
+	p.hashUnsigned = true
+	return p
+}
+
+func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+	if message.Key == nil {
+		return p.random.Partition(message, numPartitions)
+	}
+	bytes, err := message.Key.Encode()
+	if err != nil {
+		return -1, err
+	}
+	p.hasher.Reset()
+	_, err = p.hasher.Write(bytes)
+	if err != nil {
+		return -1, err
+	}
+	var partition int32
+	// Turns out we were doing our absolute value in a subtly different way from the upstream
+	// implementation, but now we need to maintain backwards compat for people who started using
+	// the old version; if referenceAbs is set we are compatible with the reference java client
+	// but not past Sarama versions
+	if p.referenceAbs {
+		partition = (int32(p.hasher.Sum32()) & 0x7fffffff) % numPartitions
+	} else if p.hashUnsigned {
+		// librdkafka treats the hashed value as unsigned.  If `hashUnsigned` is set we are compatible
+		// with librdkafka's `consistent` partitioning but not past Sarama versions
+		partition = int32(p.hasher.Sum32() % uint32(numPartitions))
+	} else {
+		partition = int32(p.hasher.Sum32()) % numPartitions
+		if partition < 0 {
+			partition = -partition
+		}
+	}
+	return partition, nil
+}
+
+func (p *hashPartitioner) RequiresConsistency() bool {
+	return true
+}
+
+func (p *hashPartitioner) MessageRequiresConsistency(message *ProducerMessage) bool {
+	return message.Key != nil
+}
diff --git a/vendor/github.com/IBM/sarama/prep_encoder.go b/vendor/github.com/IBM/sarama/prep_encoder.go
new file mode 100644
index 0000000..d30f6e5
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/prep_encoder.go
@@ -0,0 +1,258 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"math"
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+type prepEncoder struct {
+	stack  []pushEncoder
+	length int
+}
+
+type prepFlexibleEncoder struct {
+	*prepEncoder
+}
+
+// primitives
+
+func (pe *prepEncoder) putInt8(in int8) {
+	pe.length++
+}
+
+func (pe *prepEncoder) putInt16(in int16) {
+	pe.length += 2
+}
+
+func (pe *prepEncoder) putInt32(in int32) {
+	pe.length += 4
+}
+
+func (pe *prepEncoder) putInt64(in int64) {
+	pe.length += 8
+}
+
+func (pe *prepEncoder) putVarint(in int64) {
+	var buf [binary.MaxVarintLen64]byte
+	pe.length += binary.PutVarint(buf[:], in)
+}
+
+func (pe *prepEncoder) putUVarint(in uint64) {
+	var buf [binary.MaxVarintLen64]byte
+	pe.length += binary.PutUvarint(buf[:], in)
+}
+
+func (pe *prepEncoder) putFloat64(in float64) {
+	pe.length += 8
+}
+
+func (pe *prepEncoder) putArrayLength(in int) error {
+	if in > math.MaxInt32 {
+		return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
+	}
+	pe.length += 4
+	return nil
+}
+
+func (pe *prepEncoder) putBool(in bool) {
+	pe.length++
+}
+
+func (pe *prepEncoder) putKError(in KError) {
+	pe.length += 2
+}
+
+func (pe *prepEncoder) putDurationMs(in time.Duration) {
+	pe.length += 4
+}
+
+// arrays
+
+func (pe *prepEncoder) putBytes(in []byte) error {
+	pe.length += 4
+	if in == nil {
+		return nil
+	}
+	return pe.putRawBytes(in)
+}
+
+func (pe *prepEncoder) putVarintBytes(in []byte) error {
+	if in == nil {
+		pe.putVarint(-1)
+		return nil
+	}
+	pe.putVarint(int64(len(in)))
+	return pe.putRawBytes(in)
+}
+
+func (pe *prepEncoder) putRawBytes(in []byte) error {
+	if len(in) > math.MaxInt32 {
+		return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
+	}
+	pe.length += len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putNullableString(in *string) error {
+	if in == nil {
+		pe.length += 2
+		return nil
+	}
+	return pe.putString(*in)
+}
+
+func (pe *prepEncoder) putString(in string) error {
+	pe.length += 2
+	if len(in) > math.MaxInt16 {
+		return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
+	}
+	pe.length += len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putStringArray(in []string) error {
+	err := pe.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+
+	for _, str := range in {
+		if err := pe.putString(str); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (pe *prepEncoder) putInt32Array(in []int32) error {
+	err := pe.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	pe.length += 4 * len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putNullableInt32Array(in []int32) error {
+	if in == nil {
+		pe.length += 4
+		return nil
+	}
+	err := pe.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	pe.length += 4 * len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putInt64Array(in []int64) error {
+	err := pe.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	pe.length += 8 * len(in)
+	return nil
+}
+
+func (pe *prepEncoder) putEmptyTaggedFieldArray() {
+}
+
+func (pe *prepEncoder) offset() int {
+	return pe.length
+}
+
+// stackable
+
+func (pe *prepEncoder) push(in pushEncoder) {
+	in.saveOffset(pe.length)
+	pe.length += in.reserveLength()
+	pe.stack = append(pe.stack, in)
+}
+
+func (pe *prepEncoder) pop() error {
+	in := pe.stack[len(pe.stack)-1]
+	pe.stack = pe.stack[:len(pe.stack)-1]
+	if dpe, ok := in.(dynamicPushEncoder); ok {
+		pe.length += dpe.adjustLength(pe.length)
+	}
+
+	return nil
+}
+
+// we do not record metrics during the prep encoder pass
+func (pe *prepEncoder) metricRegistry() metrics.Registry {
+	return nil
+}
+
+func (pe *prepFlexibleEncoder) putArrayLength(in int) error {
+	pe.putUVarint(uint64(in + 1))
+	return nil
+}
+
+func (pe *prepFlexibleEncoder) putBytes(in []byte) error {
+	pe.putUVarint(uint64(len(in) + 1))
+	return pe.putRawBytes(in)
+}
+
+func (pe *prepFlexibleEncoder) putString(in string) error {
+	if err := pe.putArrayLength(len(in)); err != nil {
+		return err
+	}
+	return pe.putRawBytes([]byte(in))
+}
+
+func (pe *prepFlexibleEncoder) putNullableString(in *string) error {
+	if in == nil {
+		pe.putUVarint(0)
+		return nil
+	} else {
+		return pe.putString(*in)
+	}
+}
+
+func (pe *prepFlexibleEncoder) putStringArray(in []string) error {
+	err := pe.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+
+	for _, str := range in {
+		if err := pe.putString(str); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (pe *prepFlexibleEncoder) putInt32Array(in []int32) error {
+	if in == nil {
+		return errors.New("expected int32 array to be non null")
+	}
+
+	pe.putUVarint(uint64(len(in)) + 1)
+	pe.length += 4 * len(in)
+	return nil
+}
+
+func (pe *prepFlexibleEncoder) putNullableInt32Array(in []int32) error {
+	if in == nil {
+		pe.putUVarint(0)
+		return nil
+	}
+
+	pe.putUVarint(uint64(len(in)) + 1)
+	pe.length += 4 * len(in)
+	return nil
+}
+
+func (pe *prepFlexibleEncoder) putEmptyTaggedFieldArray() {
+	pe.putUVarint(0)
+}
diff --git a/vendor/github.com/IBM/sarama/produce_request.go b/vendor/github.com/IBM/sarama/produce_request.go
new file mode 100644
index 0000000..de88ba4
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/produce_request.go
@@ -0,0 +1,274 @@
+package sarama
+
+import "github.com/rcrowley/go-metrics"
+
+// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
+// it must see before responding. Any of the constants defined here are valid. On broker versions
+// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
+// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
+// by setting the `min.isr` value in the brokers configuration).
+type RequiredAcks int16
+
+const (
+	// NoResponse doesn't send any response, the TCP ACK is all you get.
+	NoResponse RequiredAcks = 0
+	// WaitForLocal waits for only the local commit to succeed before responding.
+	WaitForLocal RequiredAcks = 1
+	// WaitForAll waits for all in-sync replicas to commit before responding.
+	// The minimum number of in-sync replicas is configured on the broker via
+	// the `min.insync.replicas` configuration key.
+	WaitForAll RequiredAcks = -1
+)
+
+type ProduceRequest struct {
+	TransactionalID *string
+	RequiredAcks    RequiredAcks
+	Timeout         int32
+	Version         int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11
+	records         map[string]map[int32]Records
+}
+
+func (r *ProduceRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram,
+	topicCompressionRatioMetric metrics.Histogram,
+) int64 {
+	var topicRecordCount int64
+	for _, messageBlock := range msgSet.Messages {
+		// Is this a fake "message" wrapping real messages?
+		if messageBlock.Msg.Set != nil {
+			topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
+		} else {
+			// A single uncompressed message
+			topicRecordCount++
+		}
+		// Better be safe than sorry when computing the compression ratio
+		if messageBlock.Msg.compressedSize != 0 {
+			compressionRatio := float64(len(messageBlock.Msg.Value)) /
+				float64(messageBlock.Msg.compressedSize)
+			// Histogram do not support decimal values, let's multiple it by 100 for better precision
+			intCompressionRatio := int64(100 * compressionRatio)
+			compressionRatioMetric.Update(intCompressionRatio)
+			topicCompressionRatioMetric.Update(intCompressionRatio)
+		}
+	}
+	return topicRecordCount
+}
+
+func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram,
+	topicCompressionRatioMetric metrics.Histogram,
+) int64 {
+	if recordBatch.compressedRecords != nil {
+		compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100)
+		compressionRatioMetric.Update(compressionRatio)
+		topicCompressionRatioMetric.Update(compressionRatio)
+	}
+
+	return int64(len(recordBatch.Records))
+}
+
+func (r *ProduceRequest) encode(pe packetEncoder) error {
+	if r.Version >= 3 {
+		if err := pe.putNullableString(r.TransactionalID); err != nil {
+			return err
+		}
+	}
+	pe.putInt16(int16(r.RequiredAcks))
+	pe.putInt32(r.Timeout)
+	metricRegistry := pe.metricRegistry()
+	var batchSizeMetric metrics.Histogram
+	var compressionRatioMetric metrics.Histogram
+	if metricRegistry != nil {
+		batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
+		compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
+	}
+	totalRecordCount := int64(0)
+
+	err := pe.putArrayLength(len(r.records))
+	if err != nil {
+		return err
+	}
+
+	for topic, partitions := range r.records {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+		topicRecordCount := int64(0)
+		var topicCompressionRatioMetric metrics.Histogram
+		if metricRegistry != nil {
+			topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
+		}
+		for id, records := range partitions {
+			startOffset := pe.offset()
+			pe.putInt32(id)
+			pe.push(&lengthField{})
+			err = records.encode(pe)
+			if err != nil {
+				return err
+			}
+			err = pe.pop()
+			if err != nil {
+				return err
+			}
+			if metricRegistry != nil {
+				if r.Version >= 3 {
+					topicRecordCount += updateBatchMetrics(records.RecordBatch, compressionRatioMetric, topicCompressionRatioMetric)
+				} else {
+					topicRecordCount += updateMsgSetMetrics(records.MsgSet, compressionRatioMetric, topicCompressionRatioMetric)
+				}
+				batchSize := int64(pe.offset() - startOffset)
+				batchSizeMetric.Update(batchSize)
+				getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
+			}
+		}
+		if topicRecordCount > 0 {
+			getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount)
+			getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount)
+			totalRecordCount += topicRecordCount
+		}
+	}
+	if totalRecordCount > 0 {
+		metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount)
+		getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount)
+	}
+
+	return nil
+}
+
+func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
+	r.Version = version
+
+	if version >= 3 {
+		id, err := pd.getNullableString()
+		if err != nil {
+			return err
+		}
+		r.TransactionalID = id
+	}
+	requiredAcks, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	r.RequiredAcks = RequiredAcks(requiredAcks)
+	if r.Timeout, err = pd.getInt32(); err != nil {
+		return err
+	}
+	topicCount, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if topicCount == 0 {
+		return nil
+	}
+
+	r.records = make(map[string]map[int32]Records)
+	for i := 0; i < topicCount; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+		partitionCount, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+		r.records[topic] = make(map[int32]Records)
+
+		for j := 0; j < partitionCount; j++ {
+			partition, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			size, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+			recordsDecoder, err := pd.getSubset(int(size))
+			if err != nil {
+				return err
+			}
+			var records Records
+			if err := records.decode(recordsDecoder); err != nil {
+				return err
+			}
+			r.records[topic][partition] = records
+		}
+	}
+
+	return nil
+}
+
+func (r *ProduceRequest) key() int16 {
+	return apiKeyProduce
+}
+
+func (r *ProduceRequest) version() int16 {
+	return r.Version
+}
+
+func (r *ProduceRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *ProduceRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 7
+}
+
+func (r *ProduceRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 7:
+		return V2_1_0_0
+	case 6:
+		return V2_0_0_0
+	case 4, 5:
+		return V1_0_0_0
+	case 3:
+		return V0_11_0_0
+	case 2:
+		return V0_10_0_0
+	case 1:
+		return V0_9_0_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_1_0_0
+	}
+}
+
+func (r *ProduceRequest) ensureRecords(topic string, partition int32) {
+	if r.records == nil {
+		r.records = make(map[string]map[int32]Records)
+	}
+
+	if r.records[topic] == nil {
+		r.records[topic] = make(map[int32]Records)
+	}
+}
+
+func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
+	r.ensureRecords(topic, partition)
+	set := r.records[topic][partition].MsgSet
+
+	if set == nil {
+		set = new(MessageSet)
+		r.records[topic][partition] = newLegacyRecords(set)
+	}
+
+	set.addMessage(msg)
+}
+
+func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
+	r.ensureRecords(topic, partition)
+	r.records[topic][partition] = newLegacyRecords(set)
+}
+
+func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) {
+	r.ensureRecords(topic, partition)
+	r.records[topic][partition] = newDefaultRecords(batch)
+}
diff --git a/vendor/github.com/IBM/sarama/produce_response.go b/vendor/github.com/IBM/sarama/produce_response.go
new file mode 100644
index 0000000..f3c5d5d
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/produce_response.go
@@ -0,0 +1,237 @@
+package sarama
+
+import (
+	"fmt"
+	"time"
+)
+
+// Protocol, http://kafka.apache.org/protocol.html
+// v1
+// v2 = v3 = v4
+// v5 = v6 = v7
+// Produce Response (Version: 7) => [responses] throttle_time_ms
+//   responses => topic [partition_responses]
+//     topic => STRING
+//     partition_responses => partition error_code base_offset log_append_time log_start_offset
+//       partition => INT32
+//       error_code => INT16
+//       base_offset => INT64
+//       log_append_time => INT64
+//       log_start_offset => INT64
+//   throttle_time_ms => INT32
+
+// partition_responses in protocol
+type ProduceResponseBlock struct {
+	Err         KError    // v0, error_code
+	Offset      int64     // v0, base_offset
+	Timestamp   time.Time // v2, log_append_time, and the broker is configured with `LogAppendTime`
+	StartOffset int64     // v5, log_start_offset
+}
+
+func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+	b.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	b.Offset, err = pd.getInt64()
+	if err != nil {
+		return err
+	}
+
+	if version >= 2 {
+		if millis, err := pd.getInt64(); err != nil {
+			return err
+		} else if millis != -1 {
+			b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
+		}
+	}
+
+	if version >= 5 {
+		b.StartOffset, err = pd.getInt64()
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+	pe.putKError(b.Err)
+	pe.putInt64(b.Offset)
+
+	if version >= 2 {
+		timestamp := int64(-1)
+		if !b.Timestamp.Before(time.Unix(0, 0)) {
+			timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond)
+		} else if !b.Timestamp.IsZero() {
+			return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)}
+		}
+		pe.putInt64(timestamp)
+	}
+
+	if version >= 5 {
+		pe.putInt64(b.StartOffset)
+	}
+
+	return nil
+}
+
+type ProduceResponse struct {
+	Blocks       map[string]map[int32]*ProduceResponseBlock // v0, responses
+	Version      int16
+	ThrottleTime time.Duration // v1, throttle_time_ms
+}
+
+func (r *ProduceResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+
+	numTopics, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
+	for i := 0; i < numTopics; i++ {
+		name, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		numBlocks, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
+
+		for j := 0; j < numBlocks; j++ {
+			id, err := pd.getInt32()
+			if err != nil {
+				return err
+			}
+
+			block := new(ProduceResponseBlock)
+			err = block.decode(pd, version)
+			if err != nil {
+				return err
+			}
+			r.Blocks[name][id] = block
+		}
+	}
+
+	if r.Version >= 1 {
+		if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (r *ProduceResponse) encode(pe packetEncoder) error {
+	err := pe.putArrayLength(len(r.Blocks))
+	if err != nil {
+		return err
+	}
+	for topic, partitions := range r.Blocks {
+		err = pe.putString(topic)
+		if err != nil {
+			return err
+		}
+		err = pe.putArrayLength(len(partitions))
+		if err != nil {
+			return err
+		}
+		for id, prb := range partitions {
+			pe.putInt32(id)
+			err = prb.encode(pe, r.Version)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	if r.Version >= 1 {
+		pe.putDurationMs(r.ThrottleTime)
+	}
+	return nil
+}
+
+func (r *ProduceResponse) key() int16 {
+	return apiKeyProduce
+}
+
+func (r *ProduceResponse) version() int16 {
+	return r.Version
+}
+
+func (r *ProduceResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *ProduceResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 7
+}
+
+func (r *ProduceResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 7:
+		return V2_1_0_0
+	case 6:
+		return V2_0_0_0
+	case 4, 5:
+		return V1_0_0_0
+	case 3:
+		return V0_11_0_0
+	case 2:
+		return V0_10_0_0
+	case 1:
+		return V0_9_0_0
+	case 0:
+		return V0_8_2_0
+	default:
+		return V2_1_0_0
+	}
+}
+
+func (r *ProduceResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
+
+func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
+	if r.Blocks == nil {
+		return nil
+	}
+
+	if r.Blocks[topic] == nil {
+		return nil
+	}
+
+	return r.Blocks[topic][partition]
+}
+
+// Testing API
+
+func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
+	if r.Blocks == nil {
+		r.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
+	}
+	byTopic, ok := r.Blocks[topic]
+	if !ok {
+		byTopic = make(map[int32]*ProduceResponseBlock)
+		r.Blocks[topic] = byTopic
+	}
+	block := &ProduceResponseBlock{
+		Err: err,
+	}
+	if r.Version >= 2 {
+		block.Timestamp = time.Now()
+	}
+	byTopic[partition] = block
+}
diff --git a/vendor/github.com/IBM/sarama/produce_set.go b/vendor/github.com/IBM/sarama/produce_set.go
new file mode 100644
index 0000000..c91403d
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/produce_set.go
@@ -0,0 +1,289 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"errors"
+	"time"
+)
+
+type partitionSet struct {
+	msgs          []*ProducerMessage
+	recordsToSend Records
+	bufferBytes   int
+}
+
+type produceSet struct {
+	parent        *asyncProducer
+	msgs          map[string]map[int32]*partitionSet
+	producerID    int64
+	producerEpoch int16
+
+	bufferBytes int
+	bufferCount int
+}
+
+func newProduceSet(parent *asyncProducer) *produceSet {
+	pid, epoch := parent.txnmgr.getProducerID()
+	return &produceSet{
+		msgs:          make(map[string]map[int32]*partitionSet),
+		parent:        parent,
+		producerID:    pid,
+		producerEpoch: epoch,
+	}
+}
+
+func (ps *produceSet) add(msg *ProducerMessage) error {
+	var err error
+	var key, val []byte
+
+	if msg.Key != nil {
+		if key, err = msg.Key.Encode(); err != nil {
+			return err
+		}
+	}
+
+	if msg.Value != nil {
+		if val, err = msg.Value.Encode(); err != nil {
+			return err
+		}
+	}
+
+	timestamp := msg.Timestamp
+	if timestamp.IsZero() {
+		timestamp = time.Now()
+	}
+	timestamp = timestamp.Truncate(time.Millisecond)
+
+	partitions := ps.msgs[msg.Topic]
+	if partitions == nil {
+		partitions = make(map[int32]*partitionSet)
+		ps.msgs[msg.Topic] = partitions
+	}
+
+	var size int
+
+	set := partitions[msg.Partition]
+	if set == nil {
+		if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+			batch := &RecordBatch{
+				FirstTimestamp:   timestamp,
+				Version:          2,
+				Codec:            ps.parent.conf.Producer.Compression,
+				CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
+				ProducerID:       ps.producerID,
+				ProducerEpoch:    ps.producerEpoch,
+			}
+			if ps.parent.conf.Producer.Idempotent {
+				batch.FirstSequence = msg.sequenceNumber
+			}
+			set = &partitionSet{recordsToSend: newDefaultRecords(batch)}
+			size = recordBatchOverhead
+		} else {
+			set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))}
+		}
+		partitions[msg.Partition] = set
+	}
+
+	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+		if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence {
+			return errors.New("assertion failed: message out of sequence added to a batch")
+		}
+	}
+
+	// Past this point we can't return an error, because we've already added the message to the set.
+	set.msgs = append(set.msgs, msg)
+
+	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+		// We are being conservative here to avoid having to prep encode the record
+		size += maximumRecordOverhead
+		rec := &Record{
+			Key:            key,
+			Value:          val,
+			TimestampDelta: timestamp.Sub(set.recordsToSend.RecordBatch.FirstTimestamp),
+		}
+		size += len(key) + len(val)
+		if len(msg.Headers) > 0 {
+			rec.Headers = make([]*RecordHeader, len(msg.Headers))
+			for i := range msg.Headers {
+				rec.Headers[i] = &msg.Headers[i]
+				size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32
+			}
+		}
+		set.recordsToSend.RecordBatch.addRecord(rec)
+	} else {
+		msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
+		if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+			msgToSend.Timestamp = timestamp
+			msgToSend.Version = 1
+		}
+		set.recordsToSend.MsgSet.addMessage(msgToSend)
+		size = producerMessageOverhead + len(key) + len(val)
+	}
+
+	set.bufferBytes += size
+	ps.bufferBytes += size
+	ps.bufferCount++
+
+	return nil
+}
+
+func (ps *produceSet) buildRequest() *ProduceRequest {
+	req := &ProduceRequest{
+		RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
+		Timeout:      int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
+	}
+	if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+		req.Version = 2
+	}
+	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+		req.Version = 3
+		if ps.parent.IsTransactional() {
+			req.TransactionalID = &ps.parent.conf.Producer.Transaction.ID
+		}
+	}
+	if ps.parent.conf.Version.IsAtLeast(V1_0_0_0) {
+		req.Version = 5
+	}
+	if ps.parent.conf.Version.IsAtLeast(V2_0_0_0) {
+		req.Version = 6
+	}
+	if ps.parent.conf.Version.IsAtLeast(V2_1_0_0) {
+		req.Version = 7
+	}
+
+	for topic, partitionSets := range ps.msgs {
+		for partition, set := range partitionSets {
+			if req.Version >= 3 {
+				// If the API version we're hitting is 3 or greater, we need to calculate
+				// offsets for each record in the batch relative to FirstOffset.
+				// Additionally, we must set LastOffsetDelta to the value of the last offset
+				// in the batch. Since the OffsetDelta of the first record is 0, we know that the
+				// final record of any batch will have an offset of (# of records in batch) - 1.
+				// (See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets
+				//  under the RecordBatch section for details.)
+				rb := set.recordsToSend.RecordBatch
+				if len(rb.Records) > 0 {
+					rb.LastOffsetDelta = int32(len(rb.Records) - 1)
+					var maxTimestampDelta time.Duration
+					for i, record := range rb.Records {
+						record.OffsetDelta = int64(i)
+						maxTimestampDelta = max(maxTimestampDelta, record.TimestampDelta)
+					}
+					// Also set the MaxTimestamp similar to other clients.
+					rb.MaxTimestamp = rb.FirstTimestamp.Add(maxTimestampDelta)
+				}
+
+				// Set the batch as transactional when a transactionalID is set
+				rb.IsTransactional = ps.parent.IsTransactional()
+
+				req.AddBatch(topic, partition, rb)
+				continue
+			}
+			if ps.parent.conf.Producer.Compression == CompressionNone {
+				req.AddSet(topic, partition, set.recordsToSend.MsgSet)
+			} else {
+				// When compression is enabled, the entire set for each partition is compressed
+				// and sent as the payload of a single fake "message" with the appropriate codec
+				// set and no key. When the server sees a message with a compression codec, it
+				// decompresses the payload and treats the result as its message set.
+
+				if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+					// If our version is 0.10 or later, assign relative offsets
+					// to the inner messages. This lets the broker avoid
+					// recompressing the message set.
+					// (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets
+					// for details on relative offsets.)
+					for i, msg := range set.recordsToSend.MsgSet.Messages {
+						msg.Offset = int64(i)
+					}
+				}
+				payload, err := encode(set.recordsToSend.MsgSet, ps.parent.metricsRegistry)
+				if err != nil {
+					Logger.Println(err) // if this happens, it's basically our fault.
+					panic(err)
+				}
+				compMsg := &Message{
+					Codec:            ps.parent.conf.Producer.Compression,
+					CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
+					Key:              nil,
+					Value:            payload,
+					Set:              set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics
+				}
+				if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+					compMsg.Version = 1
+					compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp
+				}
+				req.AddMessage(topic, partition, compMsg)
+			}
+		}
+	}
+
+	return req
+}
+
+func (ps *produceSet) eachPartition(cb func(topic string, partition int32, pSet *partitionSet)) {
+	for topic, partitionSet := range ps.msgs {
+		for partition, set := range partitionSet {
+			cb(topic, partition, set)
+		}
+	}
+}
+
+func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
+	if ps.msgs[topic] == nil {
+		return nil
+	}
+	set := ps.msgs[topic][partition]
+	if set == nil {
+		return nil
+	}
+	ps.bufferBytes -= set.bufferBytes
+	ps.bufferCount -= len(set.msgs)
+	delete(ps.msgs[topic], partition)
+	return set.msgs
+}
+
+func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
+	version := 1
+	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+		version = 2
+	}
+
+	switch {
+	// Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
+	case ps.bufferBytes+msg.ByteSize(version) >= int(MaxRequestSize-(10*1024)):
+		return true
+	// Would we overflow the size-limit of a message-batch for this partition?
+	case ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
+		ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.ByteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes:
+		return true
+	// Would we overflow simply in number of messages?
+	case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
+		return true
+	default:
+		return false
+	}
+}
+
+func (ps *produceSet) readyToFlush() bool {
+	switch {
+	// If we don't have any messages, nothing else matters
+	case ps.empty():
+		return false
+	// If all three config values are 0, we always flush as-fast-as-possible
+	case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
+		return true
+	// If we've passed the message trigger-point
+	case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
+		return true
+	// If we've passed the byte trigger-point
+	case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
+		return true
+	default:
+		return false
+	}
+}
+
+func (ps *produceSet) empty() bool {
+	return ps.bufferCount == 0
+}
diff --git a/vendor/github.com/IBM/sarama/quota_types.go b/vendor/github.com/IBM/sarama/quota_types.go
new file mode 100644
index 0000000..4f33af0
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/quota_types.go
@@ -0,0 +1,21 @@
+package sarama
+
+type (
+	QuotaEntityType string
+
+	QuotaMatchType int
+)
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/quota/ClientQuotaEntity.java
+const (
+	QuotaEntityUser     QuotaEntityType = "user"
+	QuotaEntityClientID QuotaEntityType = "client-id"
+	QuotaEntityIP       QuotaEntityType = "ip"
+)
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/requests/DescribeClientQuotasRequest.java
+const (
+	QuotaMatchExact QuotaMatchType = iota
+	QuotaMatchDefault
+	QuotaMatchAny
+)
diff --git a/vendor/github.com/IBM/sarama/real_decoder.go b/vendor/github.com/IBM/sarama/real_decoder.go
new file mode 100644
index 0000000..ddeb543
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/real_decoder.go
@@ -0,0 +1,549 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"math"
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+var (
+	errInvalidArrayLength     = PacketDecodingError{"invalid array length"}
+	errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
+	errInvalidStringLength    = PacketDecodingError{"invalid string length"}
+	errVarintOverflow         = PacketDecodingError{"varint overflow"}
+	errUVarintOverflow        = PacketDecodingError{"uvarint overflow"}
+	errInvalidBool            = PacketDecodingError{"invalid bool"}
+)
+
+type realDecoder struct {
+	raw      []byte
+	off      int
+	stack    []pushDecoder
+	registry metrics.Registry
+}
+
+type realFlexibleDecoder struct {
+	*realDecoder
+}
+
+// primitives
+
+func (rd *realDecoder) getInt8() (int8, error) {
+	if rd.remaining() < 1 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int8(rd.raw[rd.off])
+	rd.off++
+	return tmp, nil
+}
+
+func (rd *realDecoder) getInt16() (int16, error) {
+	if rd.remaining() < 2 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
+	rd.off += 2
+	return tmp, nil
+}
+
+func (rd *realDecoder) getInt32() (int32, error) {
+	if rd.remaining() < 4 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+	rd.off += 4
+	return tmp, nil
+}
+
+func (rd *realDecoder) getInt64() (int64, error) {
+	if rd.remaining() < 8 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+	rd.off += 8
+	return tmp, nil
+}
+
+func (rd *realDecoder) getVarint() (int64, error) {
+	tmp, n := binary.Varint(rd.raw[rd.off:])
+	if n == 0 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	if n < 0 {
+		rd.off -= n
+		return -1, errVarintOverflow
+	}
+	rd.off += n
+	return tmp, nil
+}
+
+func (rd *realDecoder) getUVarint() (uint64, error) {
+	tmp, n := binary.Uvarint(rd.raw[rd.off:])
+	if n == 0 {
+		rd.off = len(rd.raw)
+		return 0, ErrInsufficientData
+	}
+
+	if n < 0 {
+		rd.off -= n
+		return 0, errUVarintOverflow
+	}
+
+	rd.off += n
+	return tmp, nil
+}
+
+func (rd *realDecoder) getFloat64() (float64, error) {
+	if rd.remaining() < 8 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	tmp := math.Float64frombits(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+	rd.off += 8
+	return tmp, nil
+}
+
+func (rd *realDecoder) getArrayLength() (int, error) {
+	if rd.remaining() < 4 {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	}
+	// cast to int32 first to get correct signedness for length before then
+	// casting to int for ease of interop
+	tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:])))
+	rd.off += 4
+	if tmp > rd.remaining() {
+		rd.off = len(rd.raw)
+		return -1, ErrInsufficientData
+	} else if tmp > int(MaxResponseSize) {
+		return -1, errInvalidArrayLength
+	}
+	return tmp, nil
+}
+
+func (rd *realDecoder) getBool() (bool, error) {
+	b, err := rd.getInt8()
+	if err != nil || b == 0 {
+		return false, err
+	}
+	if b != 1 {
+		return false, errInvalidBool
+	}
+	return true, nil
+}
+
+func (rd *realDecoder) getKError() (KError, error) {
+	i, err := rd.getInt16()
+	return KError(i), err
+}
+
+func (rd *realDecoder) getDurationMs() (time.Duration, error) {
+	t, err := rd.getInt32()
+	if err != nil {
+		return time.Duration(0), err
+	}
+	return time.Duration(t) * time.Millisecond, nil
+}
+
+func (rd *realDecoder) getTaggedFieldArray(decoders taggedFieldDecoders) error {
+	return PacketDecodingError{"tagged fields used in non-flexible context"}
+}
+
+func (rd *realDecoder) getEmptyTaggedFieldArray() (int, error) {
+	return 0, nil
+}
+
+// collections
+
+func (rd *realDecoder) getBytes() ([]byte, error) {
+	tmp, err := rd.getInt32()
+	if err != nil {
+		return nil, err
+	}
+	if tmp == -1 {
+		return nil, nil
+	}
+
+	return rd.getRawBytes(int(tmp))
+}
+
+func (rd *realDecoder) getVarintBytes() ([]byte, error) {
+	tmp, err := rd.getVarint()
+	if err != nil {
+		return nil, err
+	}
+	if tmp == -1 {
+		return nil, nil
+	}
+
+	return rd.getRawBytes(int(tmp))
+}
+
+func (rd *realDecoder) getStringLength() (int, error) {
+	length, err := rd.getInt16()
+	if err != nil {
+		return 0, err
+	}
+
+	n := int(length)
+
+	switch {
+	case n < -1:
+		return 0, errInvalidStringLength
+	case n > rd.remaining():
+		rd.off = len(rd.raw)
+		return 0, ErrInsufficientData
+	}
+
+	return n, nil
+}
+
+func (rd *realDecoder) getString() (string, error) {
+	n, err := rd.getStringLength()
+	if err != nil || n == -1 {
+		return "", err
+	}
+
+	tmpStr := string(rd.raw[rd.off : rd.off+n])
+	rd.off += n
+	return tmpStr, nil
+}
+
+func (rd *realDecoder) getNullableString() (*string, error) {
+	n, err := rd.getStringLength()
+	if err != nil || n == -1 {
+		return nil, err
+	}
+
+	tmpStr := string(rd.raw[rd.off : rd.off+n])
+	rd.off += n
+	return &tmpStr, err
+}
+
+func (rd *realDecoder) getInt32Array() ([]int32, error) {
+	n, err := rd.getArrayLength()
+	if err != nil {
+		return nil, err
+	}
+	if n <= 0 {
+		return nil, nil
+	}
+
+	if rd.remaining() < 4*n {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+
+	ret := make([]int32, n)
+	for i := range ret {
+		ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+		rd.off += 4
+	}
+	return ret, nil
+}
+
+func (rd *realDecoder) getInt64Array() ([]int64, error) {
+	n, err := rd.getArrayLength()
+	if err != nil {
+		return nil, err
+	}
+	if n <= 0 {
+		return nil, nil
+	}
+
+	if rd.remaining() < 8*n {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+
+	ret := make([]int64, n)
+	for i := range ret {
+		ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+		rd.off += 8
+	}
+	return ret, nil
+}
+
+func (rd *realDecoder) getStringArray() ([]string, error) {
+	n, err := rd.getArrayLength()
+	if err != nil {
+		return nil, err
+	}
+	if n <= 0 {
+		return nil, nil
+	}
+
+	ret := make([]string, n)
+	for i := range ret {
+		str, err := rd.getString()
+		if err != nil {
+			return nil, err
+		}
+
+		ret[i] = str
+	}
+	return ret, nil
+}
+
+// subsets
+
+func (rd *realDecoder) remaining() int {
+	return len(rd.raw) - rd.off
+}
+
+func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
+	buf, err := rd.getRawBytes(length)
+	if err != nil {
+		return nil, err
+	}
+	return &realDecoder{raw: buf}, nil
+}
+
+func (rd *realDecoder) getRawBytes(length int) ([]byte, error) {
+	if length < 0 {
+		return nil, errInvalidByteSliceLength
+	} else if length > rd.remaining() {
+		rd.off = len(rd.raw)
+		return nil, ErrInsufficientData
+	}
+
+	start := rd.off
+	rd.off += length
+	return rd.raw[start:rd.off], nil
+}
+
+func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) {
+	if rd.remaining() < offset+length {
+		return nil, ErrInsufficientData
+	}
+	off := rd.off + offset
+	return &realDecoder{raw: rd.raw[off : off+length]}, nil
+}
+
+func (rd *realDecoder) peekInt8(offset int) (int8, error) {
+	const byteLen = 1
+	if rd.remaining() < offset+byteLen {
+		return -1, ErrInsufficientData
+	}
+	return int8(rd.raw[rd.off+offset]), nil
+}
+
+// stacks
+
+func (rd *realDecoder) push(in pushDecoder) error {
+	in.saveOffset(rd.off)
+
+	var reserve int
+	if dpd, ok := in.(dynamicPushDecoder); ok {
+		if err := dpd.decode(rd); err != nil {
+			return err
+		}
+	} else {
+		reserve = in.reserveLength()
+		if rd.remaining() < reserve {
+			rd.off = len(rd.raw)
+			return ErrInsufficientData
+		}
+	}
+
+	rd.stack = append(rd.stack, in)
+
+	rd.off += reserve
+
+	return nil
+}
+
+func (rd *realDecoder) pop() error {
+	// this is go's ugly pop pattern (the inverse of append)
+	in := rd.stack[len(rd.stack)-1]
+	rd.stack = rd.stack[:len(rd.stack)-1]
+
+	return in.check(rd.off, rd.raw)
+}
+
+func (rd *realDecoder) metricRegistry() metrics.Registry {
+	return rd.registry
+}
+
+func (rd *realFlexibleDecoder) getArrayLength() (int, error) {
+	n, err := rd.getUVarint()
+	if err != nil {
+		return 0, err
+	}
+
+	if n == 0 {
+		return 0, nil
+	}
+
+	return int(n) - 1, nil
+}
+
+func (rd *realFlexibleDecoder) getEmptyTaggedFieldArray() (int, error) {
+	tagCount, err := rd.getUVarint()
+	if err != nil {
+		return 0, err
+	}
+
+	// skip over any tagged fields without deserializing them
+	// as we don't currently support doing anything with them
+	for i := uint64(0); i < tagCount; i++ {
+		// fetch and ignore tag identifier
+		_, err := rd.getUVarint()
+		if err != nil {
+			return 0, err
+		}
+		length, err := rd.getUVarint()
+		if err != nil {
+			return 0, err
+		}
+		if _, err := rd.getRawBytes(int(length)); err != nil {
+			return 0, err
+		}
+	}
+
+	return 0, nil
+}
+
+func (rd *realFlexibleDecoder) getTaggedFieldArray(decoders taggedFieldDecoders) error {
+	// if we have no decoders just skip over the tagged fields
+	if decoders == nil {
+		_, err := rd.getEmptyTaggedFieldArray()
+		return err
+	}
+
+	tagCount, err := rd.getUVarint()
+	if err != nil {
+		return err
+	}
+
+	for i := uint64(0); i < tagCount; i++ {
+		// fetch and ignore tag identifier
+		id, err := rd.getUVarint()
+		if err != nil {
+			return err
+		}
+		length, err := rd.getUVarint()
+		if err != nil {
+			return err
+		}
+		bytes, err := rd.getRawBytes(int(length))
+		if err != nil {
+			return err
+		}
+		decoder, ok := decoders[id]
+		if !ok {
+			continue
+		}
+		if err := decoder(&realFlexibleDecoder{&realDecoder{raw: bytes}}); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (rd *realFlexibleDecoder) getBytes() ([]byte, error) {
+	n, err := rd.getUVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	length := int(n - 1)
+	return rd.getRawBytes(length)
+}
+
+func (rd *realFlexibleDecoder) getStringLength() (int, error) {
+	length, err := rd.getUVarint()
+	if err != nil {
+		return 0, err
+	}
+
+	n := int(length - 1)
+
+	switch {
+	case n < -1:
+		return 0, errInvalidStringLength
+	case n > rd.remaining():
+		rd.off = len(rd.raw)
+		return 0, ErrInsufficientData
+	}
+
+	return n, nil
+}
+
+func (rd *realFlexibleDecoder) getString() (string, error) {
+	length, err := rd.getStringLength()
+	if err != nil || length == -1 {
+		return "", err
+	}
+
+	if length < 0 {
+		return "", errInvalidStringLength
+	}
+	tmpStr := string(rd.raw[rd.off : rd.off+length])
+	rd.off += length
+	return tmpStr, nil
+}
+
+func (rd *realFlexibleDecoder) getNullableString() (*string, error) {
+	length, err := rd.getStringLength()
+	if err != nil {
+		return nil, err
+	}
+
+	if length < 0 {
+		return nil, err
+	}
+
+	tmpStr := string(rd.raw[rd.off : rd.off+length])
+	rd.off += length
+	return &tmpStr, err
+}
+
+func (rd *realFlexibleDecoder) getInt32Array() ([]int32, error) {
+	n, err := rd.getUVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	if n == 0 {
+		return nil, nil
+	}
+
+	arrayLength := int(n) - 1
+
+	ret := make([]int32, arrayLength)
+
+	for i := range ret {
+		ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+		rd.off += 4
+	}
+	return ret, nil
+}
+
+func (rd *realFlexibleDecoder) getStringArray() ([]string, error) {
+	n, err := rd.getArrayLength()
+	if err != nil {
+		return nil, err
+	}
+	if n <= 0 {
+		return nil, nil
+	}
+
+	ret := make([]string, n)
+	for i := range ret {
+		str, err := rd.getString()
+		if err != nil {
+			return nil, err
+		}
+
+		ret[i] = str
+	}
+	return ret, nil
+}
diff --git a/vendor/github.com/IBM/sarama/real_encoder.go b/vendor/github.com/IBM/sarama/real_encoder.go
new file mode 100644
index 0000000..b9bd178
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/real_encoder.go
@@ -0,0 +1,268 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"errors"
+	"math"
+	"time"
+
+	"github.com/rcrowley/go-metrics"
+)
+
+type realEncoder struct {
+	raw      []byte
+	off      int
+	stack    []pushEncoder
+	registry metrics.Registry
+}
+
+type realFlexibleEncoder struct {
+	*realEncoder
+}
+
+// primitives
+
+func (re *realEncoder) putInt8(in int8) {
+	re.raw[re.off] = byte(in)
+	re.off++
+}
+
+func (re *realEncoder) putInt16(in int16) {
+	binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
+	re.off += 2
+}
+
+func (re *realEncoder) putInt32(in int32) {
+	binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
+	re.off += 4
+}
+
+func (re *realEncoder) putInt64(in int64) {
+	binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
+	re.off += 8
+}
+
+func (re *realEncoder) putVarint(in int64) {
+	re.off += binary.PutVarint(re.raw[re.off:], in)
+}
+
+func (re *realEncoder) putUVarint(in uint64) {
+	re.off += binary.PutUvarint(re.raw[re.off:], in)
+}
+
+func (re *realEncoder) putFloat64(in float64) {
+	binary.BigEndian.PutUint64(re.raw[re.off:], math.Float64bits(in))
+	re.off += 8
+}
+
+func (re *realEncoder) putArrayLength(in int) error {
+	re.putInt32(int32(in))
+	return nil
+}
+
+func (re *realEncoder) putBool(in bool) {
+	if in {
+		re.putInt8(1)
+		return
+	}
+	re.putInt8(0)
+}
+
+func (re *realEncoder) putKError(in KError) {
+	re.putInt16(int16(in))
+}
+
+func (re *realEncoder) putDurationMs(in time.Duration) {
+	re.putInt32(int32(in / time.Millisecond))
+}
+
+// collection
+
+func (re *realEncoder) putRawBytes(in []byte) error {
+	copy(re.raw[re.off:], in)
+	re.off += len(in)
+	return nil
+}
+
+func (re *realEncoder) putBytes(in []byte) error {
+	if in == nil {
+		re.putInt32(-1)
+		return nil
+	}
+	re.putInt32(int32(len(in)))
+	return re.putRawBytes(in)
+}
+
+func (re *realEncoder) putVarintBytes(in []byte) error {
+	if in == nil {
+		re.putVarint(-1)
+		return nil
+	}
+	re.putVarint(int64(len(in)))
+	return re.putRawBytes(in)
+}
+
+func (re *realEncoder) putString(in string) error {
+	re.putInt16(int16(len(in)))
+	copy(re.raw[re.off:], in)
+	re.off += len(in)
+	return nil
+}
+
+func (re *realEncoder) putNullableString(in *string) error {
+	if in == nil {
+		re.putInt16(-1)
+		return nil
+	}
+	return re.putString(*in)
+}
+
+func (re *realEncoder) putStringArray(in []string) error {
+	err := re.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+
+	for _, val := range in {
+		if err := re.putString(val); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (re *realEncoder) putInt32Array(in []int32) error {
+	err := re.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	for _, val := range in {
+		re.putInt32(val)
+	}
+	return nil
+}
+
+func (re *realEncoder) putNullableInt32Array(in []int32) error {
+	if in == nil {
+		re.putInt32(-1)
+		return nil
+	}
+	err := re.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	for _, val := range in {
+		re.putInt32(val)
+	}
+	return nil
+}
+
+func (re *realEncoder) putInt64Array(in []int64) error {
+	err := re.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+	for _, val := range in {
+		re.putInt64(val)
+	}
+	return nil
+}
+
+func (re *realEncoder) putEmptyTaggedFieldArray() {
+}
+
+func (re *realEncoder) offset() int {
+	return re.off
+}
+
+// stacks
+
+func (re *realEncoder) push(in pushEncoder) {
+	in.saveOffset(re.off)
+	re.off += in.reserveLength()
+	re.stack = append(re.stack, in)
+}
+
+func (re *realEncoder) pop() error {
+	// this is go's ugly pop pattern (the inverse of append)
+	in := re.stack[len(re.stack)-1]
+	re.stack = re.stack[:len(re.stack)-1]
+
+	return in.run(re.off, re.raw)
+}
+
+// we do record metrics during the real encoder pass
+func (re *realEncoder) metricRegistry() metrics.Registry {
+	return re.registry
+}
+
+func (re *realFlexibleEncoder) putArrayLength(in int) error {
+	// 0 represents a null array, so +1 has to be added
+	re.putUVarint(uint64(in + 1))
+	return nil
+}
+
+func (re *realFlexibleEncoder) putBytes(in []byte) error {
+	re.putUVarint(uint64(len(in) + 1))
+	return re.putRawBytes(in)
+}
+
+func (re *realFlexibleEncoder) putString(in string) error {
+	if err := re.putArrayLength(len(in)); err != nil {
+		return err
+	}
+	return re.putRawBytes([]byte(in))
+}
+
+func (re *realFlexibleEncoder) putNullableString(in *string) error {
+	if in == nil {
+		re.putInt8(0)
+		return nil
+	}
+	return re.putString(*in)
+}
+
+func (re *realFlexibleEncoder) putStringArray(in []string) error {
+	err := re.putArrayLength(len(in))
+	if err != nil {
+		return err
+	}
+
+	for _, val := range in {
+		if err := re.putString(val); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (re *realFlexibleEncoder) putInt32Array(in []int32) error {
+	if in == nil {
+		return errors.New("expected int32 array to be non null")
+	}
+	// 0 represents a null array, so +1 has to be added
+	re.putUVarint(uint64(len(in)) + 1)
+	for _, val := range in {
+		re.putInt32(val)
+	}
+	return nil
+}
+
+func (re *realFlexibleEncoder) putNullableInt32Array(in []int32) error {
+	if in == nil {
+		re.putUVarint(0)
+		return nil
+	}
+	// 0 represents a null array, so +1 has to be added
+	re.putUVarint(uint64(len(in)) + 1)
+	for _, val := range in {
+		re.putInt32(val)
+	}
+	return nil
+}
+
+func (re *realFlexibleEncoder) putEmptyTaggedFieldArray() {
+	re.putUVarint(0)
+}
diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/IBM/sarama/record.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/record.go
rename to vendor/github.com/IBM/sarama/record.go
diff --git a/vendor/github.com/IBM/sarama/record_batch.go b/vendor/github.com/IBM/sarama/record_batch.go
new file mode 100644
index 0000000..c422c5c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/record_batch.go
@@ -0,0 +1,226 @@
+package sarama
+
+import (
+	"errors"
+	"fmt"
+	"time"
+)
+
+const recordBatchOverhead = 49
+
+type recordsArray []*Record
+
+func (e recordsArray) encode(pe packetEncoder) error {
+	for _, r := range e {
+		if err := r.encode(pe); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (e recordsArray) decode(pd packetDecoder) error {
+	records := make([]Record, len(e))
+	for i := range e {
+		if err := records[i].decode(pd); err != nil {
+			return err
+		}
+		e[i] = &records[i]
+	}
+	return nil
+}
+
+type RecordBatch struct {
+	FirstOffset           int64
+	PartitionLeaderEpoch  int32
+	Version               int8
+	Codec                 CompressionCodec
+	CompressionLevel      int
+	Control               bool
+	LogAppendTime         bool
+	LastOffsetDelta       int32
+	FirstTimestamp        time.Time
+	MaxTimestamp          time.Time
+	ProducerID            int64
+	ProducerEpoch         int16
+	FirstSequence         int32
+	Records               []*Record
+	PartialTrailingRecord bool
+	IsTransactional       bool
+
+	compressedRecords []byte
+	recordsLen        int // uncompressed records size
+}
+
+func (b *RecordBatch) LastOffset() int64 {
+	return b.FirstOffset + int64(b.LastOffsetDelta)
+}
+
+func (b *RecordBatch) encode(pe packetEncoder) error {
+	if b.Version != 2 {
+		return PacketEncodingError{fmt.Sprintf("unsupported record batch version (%d)", b.Version)}
+	}
+	pe.putInt64(b.FirstOffset)
+	pe.push(&lengthField{})
+	pe.putInt32(b.PartitionLeaderEpoch)
+	pe.putInt8(b.Version)
+	pe.push(newCRC32Field(crcCastagnoli))
+	pe.putInt16(b.computeAttributes())
+	pe.putInt32(b.LastOffsetDelta)
+
+	if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil {
+		return err
+	}
+
+	if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil {
+		return err
+	}
+
+	pe.putInt64(b.ProducerID)
+	pe.putInt16(b.ProducerEpoch)
+	pe.putInt32(b.FirstSequence)
+
+	if err := pe.putArrayLength(len(b.Records)); err != nil {
+		return err
+	}
+
+	if b.compressedRecords == nil {
+		if err := b.encodeRecords(pe); err != nil {
+			return err
+		}
+	}
+	if err := pe.putRawBytes(b.compressedRecords); err != nil {
+		return err
+	}
+
+	if err := pe.pop(); err != nil {
+		return err
+	}
+	return pe.pop()
+}
+
+func (b *RecordBatch) decode(pd packetDecoder) (err error) {
+	if b.FirstOffset, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	batchLen, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	if b.Version, err = pd.getInt8(); err != nil {
+		return err
+	}
+
+	crc32Decoder := acquireCrc32Field(crcCastagnoli)
+	defer releaseCrc32Field(crc32Decoder)
+
+	if err = pd.push(crc32Decoder); err != nil {
+		return err
+	}
+
+	attributes, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+	b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask)
+	b.Control = attributes&controlMask == controlMask
+	b.LogAppendTime = attributes&timestampTypeMask == timestampTypeMask
+	b.IsTransactional = attributes&isTransactionalMask == isTransactionalMask
+
+	if b.LastOffsetDelta, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil {
+		return err
+	}
+
+	if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil {
+		return err
+	}
+
+	if b.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if b.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	if b.FirstSequence, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	numRecs, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+	if numRecs >= 0 {
+		b.Records = make([]*Record, numRecs)
+	}
+
+	bufSize := int(batchLen) - recordBatchOverhead
+	recBuffer, err := pd.getRawBytes(bufSize)
+	if err != nil {
+		if errors.Is(err, ErrInsufficientData) {
+			b.PartialTrailingRecord = true
+			b.Records = nil
+			return nil
+		}
+		return err
+	}
+
+	if err = pd.pop(); err != nil {
+		return err
+	}
+
+	recBuffer, err = decompress(b.Codec, recBuffer)
+	if err != nil {
+		return err
+	}
+
+	b.recordsLen = len(recBuffer)
+	err = decode(recBuffer, recordsArray(b.Records), nil)
+	if errors.Is(err, ErrInsufficientData) {
+		b.PartialTrailingRecord = true
+		b.Records = nil
+		return nil
+	}
+	return err
+}
+
+func (b *RecordBatch) encodeRecords(pe packetEncoder) error {
+	var raw []byte
+	var err error
+	if raw, err = encode(recordsArray(b.Records), pe.metricRegistry()); err != nil {
+		return err
+	}
+	b.recordsLen = len(raw)
+
+	b.compressedRecords, err = compress(b.Codec, b.CompressionLevel, raw)
+	return err
+}
+
+func (b *RecordBatch) computeAttributes() int16 {
+	attr := int16(b.Codec) & int16(compressionCodecMask)
+	if b.Control {
+		attr |= controlMask
+	}
+	if b.LogAppendTime {
+		attr |= timestampTypeMask
+	}
+	if b.IsTransactional {
+		attr |= isTransactionalMask
+	}
+	return attr
+}
+
+func (b *RecordBatch) addRecord(r *Record) {
+	b.Records = append(b.Records, r)
+}
diff --git a/vendor/github.com/IBM/sarama/records.go b/vendor/github.com/IBM/sarama/records.go
new file mode 100644
index 0000000..ec03d71
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/records.go
@@ -0,0 +1,219 @@
+package sarama
+
+import "fmt"
+
+const (
+	unknownRecords = iota
+	legacyRecords
+	defaultRecords
+
+	magicOffset = 16
+)
+
+// Records implements a union type containing either a RecordBatch or a legacy MessageSet.
+type Records struct {
+	recordsType int
+	MsgSet      *MessageSet
+	RecordBatch *RecordBatch
+}
+
+func newLegacyRecords(msgSet *MessageSet) Records {
+	return Records{recordsType: legacyRecords, MsgSet: msgSet}
+}
+
+func newDefaultRecords(batch *RecordBatch) Records {
+	return Records{recordsType: defaultRecords, RecordBatch: batch}
+}
+
+// setTypeFromFields sets type of Records depending on which of MsgSet or RecordBatch is not nil.
+// The first return value indicates whether both fields are nil (and the type is not set).
+// If both fields are not nil, it returns an error.
+func (r *Records) setTypeFromFields() (bool, error) {
+	if r.MsgSet == nil && r.RecordBatch == nil {
+		return true, nil
+	}
+	if r.MsgSet != nil && r.RecordBatch != nil {
+		return false, fmt.Errorf("both MsgSet and RecordBatch are set, but record type is unknown")
+	}
+	r.recordsType = defaultRecords
+	if r.MsgSet != nil {
+		r.recordsType = legacyRecords
+	}
+	return false, nil
+}
+
+func (r *Records) encode(pe packetEncoder) error {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return err
+		}
+	}
+
+	switch r.recordsType {
+	case legacyRecords:
+		if r.MsgSet == nil {
+			return nil
+		}
+		return r.MsgSet.encode(pe)
+	case defaultRecords:
+		if r.RecordBatch == nil {
+			return nil
+		}
+		return r.RecordBatch.encode(pe)
+	}
+
+	return fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) setTypeFromMagic(pd packetDecoder) error {
+	magic, err := magicValue(pd)
+	if err != nil {
+		return err
+	}
+
+	r.recordsType = defaultRecords
+	if magic < 2 {
+		r.recordsType = legacyRecords
+	}
+
+	return nil
+}
+
+func (r *Records) decode(pd packetDecoder) error {
+	if r.recordsType == unknownRecords {
+		if err := r.setTypeFromMagic(pd); err != nil {
+			return err
+		}
+	}
+
+	switch r.recordsType {
+	case legacyRecords:
+		r.MsgSet = &MessageSet{}
+		return r.MsgSet.decode(pd)
+	case defaultRecords:
+		r.RecordBatch = &RecordBatch{}
+		return r.RecordBatch.decode(pd)
+	}
+	return fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) numRecords() (int, error) {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return 0, err
+		}
+	}
+
+	switch r.recordsType {
+	case legacyRecords:
+		if r.MsgSet == nil {
+			return 0, nil
+		}
+		return len(r.MsgSet.Messages), nil
+	case defaultRecords:
+		if r.RecordBatch == nil {
+			return 0, nil
+		}
+		return len(r.RecordBatch.Records), nil
+	}
+	return 0, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isPartial() (bool, error) {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return false, err
+		}
+	}
+
+	switch r.recordsType {
+	case unknownRecords:
+		return false, nil
+	case legacyRecords:
+		if r.MsgSet == nil {
+			return false, nil
+		}
+		return r.MsgSet.PartialTrailingMessage, nil
+	case defaultRecords:
+		if r.RecordBatch == nil {
+			return false, nil
+		}
+		return r.RecordBatch.PartialTrailingRecord, nil
+	}
+	return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isControl() (bool, error) {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return false, err
+		}
+	}
+
+	switch r.recordsType {
+	case legacyRecords:
+		return false, nil
+	case defaultRecords:
+		if r.RecordBatch == nil {
+			return false, nil
+		}
+		return r.RecordBatch.Control, nil
+	}
+	return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isOverflow() (bool, error) {
+	if r.recordsType == unknownRecords {
+		if empty, err := r.setTypeFromFields(); err != nil || empty {
+			return false, err
+		}
+	}
+
+	switch r.recordsType {
+	case unknownRecords:
+		return false, nil
+	case legacyRecords:
+		if r.MsgSet == nil {
+			return false, nil
+		}
+		return r.MsgSet.OverflowMessage, nil
+	case defaultRecords:
+		return false, nil
+	}
+	return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) nextOffset() (*int64, error) {
+	switch r.recordsType {
+	case unknownRecords:
+		return nil, nil
+	case legacyRecords:
+		return nil, nil
+	case defaultRecords:
+		if r.RecordBatch == nil {
+			return nil, nil
+		}
+		nextOffset := r.RecordBatch.LastOffset() + 1
+		return &nextOffset, nil
+	}
+	return nil, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func magicValue(pd packetDecoder) (int8, error) {
+	return pd.peekInt8(magicOffset)
+}
+
+func (r *Records) getControlRecord() (ControlRecord, error) {
+	if r.RecordBatch == nil || len(r.RecordBatch.Records) == 0 {
+		return ControlRecord{}, fmt.Errorf("cannot get control record, record batch is empty")
+	}
+
+	firstRecord := r.RecordBatch.Records[0]
+	controlRecord := ControlRecord{}
+	err := controlRecord.decode(&realDecoder{raw: firstRecord.Key}, &realDecoder{raw: firstRecord.Value})
+	if err != nil {
+		return ControlRecord{}, err
+	}
+
+	return controlRecord, nil
+}
diff --git a/vendor/github.com/IBM/sarama/request.go b/vendor/github.com/IBM/sarama/request.go
new file mode 100644
index 0000000..83a1c46
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/request.go
@@ -0,0 +1,239 @@
+package sarama
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+)
+
+type protocolBody interface {
+	encoder
+	versionedDecoder
+	key() int16
+	version() int16
+	setVersion(int16)
+	headerVersion() int16
+	isValidVersion() bool
+	requiredVersion() KafkaVersion
+}
+
+type request struct {
+	correlationID int32
+	clientID      string
+	body          protocolBody
+}
+
+func (r *request) encode(pe packetEncoder) error {
+	pe.push(&lengthField{})
+	pe.putInt16(r.body.key())
+	pe.putInt16(r.body.version())
+	pe.putInt32(r.correlationID)
+
+	if r.body.headerVersion() >= 1 {
+		err := pe.putString(r.clientID)
+		if err != nil {
+			return err
+		}
+	}
+
+	if r.body.headerVersion() >= 2 {
+		// we don't use tag headers at the moment so we just put an array length of 0
+		pe.putUVarint(0)
+	}
+	pe = prepareFlexibleEncoder(pe, r.body)
+
+	err := r.body.encode(pe)
+	if err != nil {
+		return err
+	}
+
+	return pe.pop()
+}
+
+func (r *request) decode(pd packetDecoder) (err error) {
+	key, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	version, err := pd.getInt16()
+	if err != nil {
+		return err
+	}
+
+	r.correlationID, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	r.clientID, err = pd.getString()
+	if err != nil {
+		return err
+	}
+
+	r.body = allocateBody(key, version)
+	if r.body == nil {
+		return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
+	}
+
+	if r.body.headerVersion() >= 2 {
+		// tagged field
+		_, err = pd.getUVarint()
+		if err != nil {
+			return err
+		}
+	}
+
+	if decoder, ok := pd.(*realDecoder); ok {
+		pd = prepareFlexibleDecoder(decoder, r.body, version)
+	}
+	return r.body.decode(pd, version)
+}
+
+func decodeRequest(r io.Reader) (*request, int, error) {
+	var (
+		bytesRead   int
+		lengthBytes = make([]byte, 4)
+	)
+
+	if n, err := io.ReadFull(r, lengthBytes); err != nil {
+		return nil, n, err
+	}
+
+	bytesRead += len(lengthBytes)
+	length := int32(binary.BigEndian.Uint32(lengthBytes))
+
+	if length <= 4 || length > MaxRequestSize {
+		return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
+	}
+
+	encodedReq := make([]byte, length)
+	if n, err := io.ReadFull(r, encodedReq); err != nil {
+		return nil, bytesRead + n, err
+	}
+
+	bytesRead += len(encodedReq)
+
+	req := &request{}
+	if err := decode(encodedReq, req, nil); err != nil {
+		return nil, bytesRead, err
+	}
+
+	return req, bytesRead, nil
+}
+
+func allocateBody(key, version int16) protocolBody {
+	switch key {
+	case apiKeyProduce:
+		return &ProduceRequest{Version: version}
+	case apiKeyFetch:
+		return &FetchRequest{Version: version}
+	case apiKeyListOffsets:
+		return &OffsetRequest{Version: version}
+	case apiKeyMetadata:
+		return &MetadataRequest{Version: version}
+	// 4: LeaderAndIsrRequest
+	// 5: StopReplicaRequest
+	// 6: UpdateMetadataRequest
+	// 7: ControlledShutdownRequest
+	case apiKeyOffsetCommit:
+		return &OffsetCommitRequest{Version: version}
+	case apiKeyOffsetFetch:
+		return &OffsetFetchRequest{Version: version}
+	case apiKeyFindCoordinator:
+		return &FindCoordinatorRequest{Version: version}
+	case apiKeyJoinGroup:
+		return &JoinGroupRequest{Version: version}
+	case apiKeyHeartbeat:
+		return &HeartbeatRequest{Version: version}
+	case apiKeyLeaveGroup:
+		return &LeaveGroupRequest{Version: version}
+	case apiKeySyncGroup:
+		return &SyncGroupRequest{Version: version}
+	case apiKeyDescribeGroups:
+		return &DescribeGroupsRequest{Version: version}
+	case apiKeyListGroups:
+		return &ListGroupsRequest{Version: version}
+	case apiKeySaslHandshake:
+		return &SaslHandshakeRequest{Version: version}
+	case apiKeyApiVersions:
+		return &ApiVersionsRequest{Version: version}
+	case apiKeyCreateTopics:
+		return &CreateTopicsRequest{Version: version}
+	case apiKeyDeleteTopics:
+		return &DeleteTopicsRequest{Version: version}
+	case apiKeyDeleteRecords:
+		return &DeleteRecordsRequest{Version: version}
+	case apiKeyInitProducerId:
+		return &InitProducerIDRequest{Version: version}
+	// 23: OffsetForLeaderEpochRequest
+	case apiKeyAddPartitionsToTxn:
+		return &AddPartitionsToTxnRequest{Version: version}
+	case apiKeyAddOffsetsToTxn:
+		return &AddOffsetsToTxnRequest{Version: version}
+	case apiKeyEndTxn:
+		return &EndTxnRequest{Version: version}
+	// 27: WriteTxnMarkersRequest
+	case apiKeyTxnOffsetCommit:
+		return &TxnOffsetCommitRequest{Version: version}
+	case apiKeyDescribeAcls:
+		return &DescribeAclsRequest{Version: int(version)}
+	case apiKeyCreateAcls:
+		return &CreateAclsRequest{Version: version}
+	case apiKeyDeleteAcls:
+		return &DeleteAclsRequest{Version: int(version)}
+	case apiKeyDescribeConfigs:
+		return &DescribeConfigsRequest{Version: version}
+	case apiKeyAlterConfigs:
+		return &AlterConfigsRequest{Version: version}
+	// 34: AlterReplicaLogDirsRequest
+	case apiKeyDescribeLogDirs:
+		return &DescribeLogDirsRequest{Version: version}
+	case apiKeySASLAuth:
+		return &SaslAuthenticateRequest{Version: version}
+	case apiKeyCreatePartitions:
+		return &CreatePartitionsRequest{Version: version}
+	// 38: CreateDelegationTokenRequest
+	// 39: RenewDelegationTokenRequest
+	// 40: ExpireDelegationTokenRequest
+	// 41: DescribeDelegationTokenRequest
+	case apiKeyDeleteGroups:
+		return &DeleteGroupsRequest{Version: version}
+	case apiKeyElectLeaders:
+		return &ElectLeadersRequest{Version: version}
+	case apiKeyIncrementalAlterConfigs:
+		return &IncrementalAlterConfigsRequest{Version: version}
+	case apiKeyAlterPartitionReassignments:
+		return &AlterPartitionReassignmentsRequest{Version: version}
+	case apiKeyListPartitionReassignments:
+		return &ListPartitionReassignmentsRequest{Version: version}
+	case apiKeyOffsetDelete:
+		return &DeleteOffsetsRequest{Version: version}
+	case apiKeyDescribeClientQuotas:
+		return &DescribeClientQuotasRequest{Version: version}
+	case apiKeyAlterClientQuotas:
+		return &AlterClientQuotasRequest{Version: version}
+	case apiKeyDescribeUserScramCredentials:
+		return &DescribeUserScramCredentialsRequest{Version: version}
+	case apiKeyAlterUserScramCredentials:
+		return &AlterUserScramCredentialsRequest{Version: version}
+		// 52: VoteRequest
+		// 53: BeginQuorumEpochRequest
+		// 54: EndQuorumEpochRequest
+		// 55: DescribeQuorumRequest
+		// 56: AlterPartitionRequest
+		// 57: UpdateFeaturesRequest
+		// 58: EnvelopeRequest
+		// 59: FetchSnapshotRequest
+		// 60: DescribeClusterRequest
+		// 61: DescribeProducersRequest
+		// 62: BrokerRegistrationRequest
+		// 63: BrokerHeartbeatRequest
+		// 64: UnregisterBrokerRequest
+		// 65: DescribeTransactionsRequest
+		// 66: ListTransactionsRequest
+		// 67: AllocateProducerIdsRequest
+		// 68: ConsumerGroupHeartbeatRequest
+	}
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/response_header.go b/vendor/github.com/IBM/sarama/response_header.go
new file mode 100644
index 0000000..88b0b44
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/response_header.go
@@ -0,0 +1,33 @@
+package sarama
+
+import "fmt"
+
+type responseHeader struct {
+	length        int32
+	correlationID int32
+}
+
+func (r *responseHeader) decode(pd packetDecoder, version int16) (err error) {
+	if version >= 1 {
+		if decoder, ok := pd.(*realDecoder); ok {
+			pd = &realFlexibleDecoder{decoder}
+		} else {
+			return PacketDecodingError{"failed to instantiate flexible decoder"}
+		}
+	}
+	r.length, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+	if r.length <= 4 || r.length > MaxResponseSize {
+		return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
+	}
+
+	r.correlationID, err = pd.getInt32()
+	if err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
diff --git a/vendor/github.com/IBM/sarama/sarama.go b/vendor/github.com/IBM/sarama/sarama.go
new file mode 100644
index 0000000..4d5f60a
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sarama.go
@@ -0,0 +1,139 @@
+/*
+Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level
+API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level
+API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation.
+
+To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel
+and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases.
+The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be
+useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees
+depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the
+SyncProducer can still sometimes be lost.
+
+To consume messages, use Consumer or Consumer-Group API.
+
+For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
+and message sent on the wire; the Client provides higher-level metadata management that is shared between
+the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up
+exactly with the protocol fields documented by Kafka at
+https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
+
+Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry.
+
+Broker related metrics:
+
+	+---------------------------------------------------------+------------+---------------------------------------------------------------+
+	| Name                                                    | Type       | Description                                                   |
+	+---------------------------------------------------------+------------+---------------------------------------------------------------+
+	| incoming-byte-rate                                      | meter      | Bytes/second read off all brokers                             |
+	| incoming-byte-rate-for-broker-<broker-id>               | meter      | Bytes/second read off a given broker                          |
+	| outgoing-byte-rate                                      | meter      | Bytes/second written off all brokers                          |
+	| outgoing-byte-rate-for-broker-<broker-id>               | meter      | Bytes/second written off a given broker                       |
+	| request-rate                                            | meter      | Requests/second sent to all brokers                           |
+	| request-rate-for-broker-<broker-id>                     | meter      | Requests/second sent to a given broker                        |
+	| request-size                                            | histogram  | Distribution of the request size in bytes for all brokers     |
+	| request-size-for-broker-<broker-id>                     | histogram  | Distribution of the request size in bytes for a given broker  |
+	| request-latency-in-ms                                   | histogram  | Distribution of the request latency in ms for all brokers     |
+	| request-latency-in-ms-for-broker-<broker-id>            | histogram  | Distribution of the request latency in ms for a given broker  |
+	| response-rate                                           | meter      | Responses/second received from all brokers                    |
+	| response-rate-for-broker-<broker-id>                    | meter      | Responses/second received from a given broker                 |
+	| response-size                                           | histogram  | Distribution of the response size in bytes for all brokers    |
+	| response-size-for-broker-<broker-id>                    | histogram  | Distribution of the response size in bytes for a given broker |
+	| requests-in-flight                                      | counter    | The current number of in-flight requests awaiting a response  |
+	|                                                         |            | for all brokers                                               |
+	| requests-in-flight-for-broker-<broker-id>               | counter    | The current number of in-flight requests awaiting a response  |
+	|                                                         |            | for a given broker                                            |
+	| protocol-requests-rate-<api-key>          	          | meter      | Number of api requests sent to the brokers for all brokers    |
+	|                                                         |            | https://kafka.apache.org/protocol.html#protocol_api_keys      |                                        |
+	| protocol-requests-rate-<api-key>-for-broker-<broker-id> | meter      | Number of packets sent to the brokers by api-key for a given  |
+	|                                                         |            | broker                                                        |
+	+---------------------------------------------------------+------------+---------------------------------------------------------------+
+
+Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
+
+Producer related metrics:
+
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+	| Name                                      | Type       | Description                                                                          |
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+	| batch-size                                | histogram  | Distribution of the number of bytes sent per partition per request for all topics    |
+	| batch-size-for-topic-<topic>              | histogram  | Distribution of the number of bytes sent per partition per request for a given topic |
+	| record-send-rate                          | meter      | Records/second sent to all topics                                                    |
+	| record-send-rate-for-topic-<topic>        | meter      | Records/second sent to a given topic                                                 |
+	| records-per-request                       | histogram  | Distribution of the number of records sent per request for all topics                |
+	| records-per-request-for-topic-<topic>     | histogram  | Distribution of the number of records sent per request for a given topic             |
+	| compression-ratio                         | histogram  | Distribution of the compression ratio times 100 of record batches for all topics     |
+	| compression-ratio-for-topic-<topic>       | histogram  | Distribution of the compression ratio times 100 of record batches for a given topic  |
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+
+Consumer related metrics:
+
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+	| Name                                      | Type       | Description                                                                          |
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+	| consumer-batch-size                       | histogram  | Distribution of the number of messages in a batch                                    |
+	| consumer-fetch-rate                       | meter      | Fetch requests/second sent to all brokers                                            |
+	| consumer-fetch-rate-for-broker-<broker>   | meter      | Fetch requests/second sent to a given broker                                         |
+	| consumer-fetch-rate-for-topic-<topic>     | meter      | Fetch requests/second sent for a given topic                                         |
+	| consumer-fetch-response-size              | histogram  | Distribution of the fetch response size in bytes                                     |
+	| consumer-group-join-total-<GroupID>       | counter    | Total count of consumer group join attempts                                          |
+	| consumer-group-join-failed-<GroupID>      | counter    | Total count of consumer group join failures                                          |
+	| consumer-group-sync-total-<GroupID>       | counter    | Total count of consumer group sync attempts                                          |
+	| consumer-group-sync-failed-<GroupID>      | counter    | Total count of consumer group sync failures                                          |
+	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+*/
+package sarama
+
+import (
+	"io"
+	"log"
+)
+
+var (
+	// Logger is the instance of a StdLogger interface that Sarama writes connection
+	// management events to. By default it is set to discard all log messages via io.Discard,
+	// but you can set it to redirect wherever you want.
+	Logger StdLogger = log.New(io.Discard, "[Sarama] ", log.LstdFlags)
+
+	// PanicHandler is called for recovering from panics spawned internally to the library (and thus
+	// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
+	PanicHandler func(interface{})
+
+	// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
+	// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
+	// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
+	// to process.
+	MaxRequestSize int32 = 100 * 1024 * 1024
+
+	// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
+	// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
+	// protect the client from running out of memory. Please note that brokers do not have any natural limit on
+	// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
+	// (see https://issues.apache.org/jira/browse/KAFKA-2063).
+	MaxResponseSize int32 = 100 * 1024 * 1024
+)
+
+// StdLogger is used to log error messages.
+type StdLogger interface {
+	Print(v ...interface{})
+	Printf(format string, v ...interface{})
+	Println(v ...interface{})
+}
+
+type debugLogger struct{}
+
+func (d *debugLogger) Print(v ...interface{}) {
+	Logger.Print(v...)
+}
+func (d *debugLogger) Printf(format string, v ...interface{}) {
+	Logger.Printf(format, v...)
+}
+func (d *debugLogger) Println(v ...interface{}) {
+	Logger.Println(v...)
+}
+
+// DebugLogger is the instance of a StdLogger that Sarama writes more verbose
+// debug information to. By default it is set to redirect all debug to the
+// default Logger above, but you can optionally set it to another StdLogger
+// instance to (e.g.,) discard debug information
+var DebugLogger StdLogger = &debugLogger{}
diff --git a/vendor/github.com/IBM/sarama/sasl_authenticate_request.go b/vendor/github.com/IBM/sarama/sasl_authenticate_request.go
new file mode 100644
index 0000000..f53a991
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sasl_authenticate_request.go
@@ -0,0 +1,46 @@
+package sarama
+
+type SaslAuthenticateRequest struct {
+	// Version defines the protocol version to use for encode and decode
+	Version       int16
+	SaslAuthBytes []byte
+}
+
+func (r *SaslAuthenticateRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *SaslAuthenticateRequest) encode(pe packetEncoder) error {
+	return pe.putBytes(r.SaslAuthBytes)
+}
+
+func (r *SaslAuthenticateRequest) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	r.SaslAuthBytes, err = pd.getBytes()
+	return err
+}
+
+func (r *SaslAuthenticateRequest) key() int16 {
+	return apiKeySASLAuth
+}
+
+func (r *SaslAuthenticateRequest) version() int16 {
+	return r.Version
+}
+
+func (r *SaslAuthenticateRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *SaslAuthenticateRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 1
+}
+
+func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V2_2_0_0
+	default:
+		return V1_0_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/sasl_authenticate_response.go b/vendor/github.com/IBM/sarama/sasl_authenticate_response.go
new file mode 100644
index 0000000..60752e8
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sasl_authenticate_response.go
@@ -0,0 +1,75 @@
+package sarama
+
+type SaslAuthenticateResponse struct {
+	// Version defines the protocol version to use for encode and decode
+	Version           int16
+	Err               KError
+	ErrorMessage      *string
+	SaslAuthBytes     []byte
+	SessionLifetimeMs int64
+}
+
+func (r *SaslAuthenticateResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *SaslAuthenticateResponse) encode(pe packetEncoder) error {
+	pe.putKError(r.Err)
+	if err := pe.putNullableString(r.ErrorMessage); err != nil {
+		return err
+	}
+	if err := pe.putBytes(r.SaslAuthBytes); err != nil {
+		return err
+	}
+	if r.Version > 0 {
+		pe.putInt64(r.SessionLifetimeMs)
+	}
+	return nil
+}
+
+func (r *SaslAuthenticateResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	r.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if r.ErrorMessage, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	if r.SaslAuthBytes, err = pd.getBytes(); err != nil {
+		return err
+	}
+
+	if version > 0 {
+		r.SessionLifetimeMs, err = pd.getInt64()
+	}
+
+	return err
+}
+
+func (r *SaslAuthenticateResponse) key() int16 {
+	return apiKeySASLAuth
+}
+
+func (r *SaslAuthenticateResponse) version() int16 {
+	return r.Version
+}
+
+func (r *SaslAuthenticateResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *SaslAuthenticateResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 1
+}
+
+func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V2_2_0_0
+	default:
+		return V1_0_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/sasl_handshake_request.go b/vendor/github.com/IBM/sarama/sasl_handshake_request.go
new file mode 100644
index 0000000..cae8536
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sasl_handshake_request.go
@@ -0,0 +1,51 @@
+package sarama
+
+type SaslHandshakeRequest struct {
+	Mechanism string
+	Version   int16
+}
+
+func (r *SaslHandshakeRequest) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *SaslHandshakeRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(r.Mechanism); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) {
+	if r.Mechanism, err = pd.getString(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *SaslHandshakeRequest) key() int16 {
+	return apiKeySaslHandshake
+}
+
+func (r *SaslHandshakeRequest) version() int16 {
+	return r.Version
+}
+
+func (r *SaslHandshakeRequest) headerVersion() int16 {
+	return 1
+}
+
+func (r *SaslHandshakeRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 1
+}
+
+func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V1_0_0_0
+	default:
+		return V0_10_0_0
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/sasl_handshake_response.go b/vendor/github.com/IBM/sarama/sasl_handshake_response.go
new file mode 100644
index 0000000..3974589
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sasl_handshake_response.go
@@ -0,0 +1,54 @@
+package sarama
+
+type SaslHandshakeResponse struct {
+	Version           int16
+	Err               KError
+	EnabledMechanisms []string
+}
+
+func (r *SaslHandshakeResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *SaslHandshakeResponse) encode(pe packetEncoder) error {
+	pe.putKError(r.Err)
+	return pe.putStringArray(r.EnabledMechanisms)
+}
+
+func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	if r.EnabledMechanisms, err = pd.getStringArray(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (r *SaslHandshakeResponse) key() int16 {
+	return apiKeySaslHandshake
+}
+
+func (r *SaslHandshakeResponse) version() int16 {
+	return r.Version
+}
+
+func (r *SaslHandshakeResponse) headerVersion() int16 {
+	return 0
+}
+
+func (r *SaslHandshakeResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 1
+}
+
+func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 1:
+		return V1_0_0_0
+	default:
+		return V0_10_0_0
+	}
+}
diff --git a/vendor/github.com/Shopify/sarama/scram_formatter.go b/vendor/github.com/IBM/sarama/scram_formatter.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/scram_formatter.go
rename to vendor/github.com/IBM/sarama/scram_formatter.go
diff --git a/vendor/github.com/IBM/sarama/server.properties b/vendor/github.com/IBM/sarama/server.properties
new file mode 100644
index 0000000..21ba1c7
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/server.properties
@@ -0,0 +1,138 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This configuration file is intended for use in ZK-based mode, where Apache ZooKeeper is required.
+# See kafka.server.KafkaConfig for additional details and defaults
+#
+
+############################# Server Basics #############################
+
+# The id of the broker. This must be set to a unique integer for each broker.
+broker.id=0
+
+############################# Socket Server Settings #############################
+
+# The address the socket server listens on. If not configured, the host name will be equal to the value of
+# java.net.InetAddress.getCanonicalHostName(), with PLAINTEXT listener name, and port 9092.
+#   FORMAT:
+#     listeners = listener_name://host_name:port
+#   EXAMPLE:
+#     listeners = PLAINTEXT://your.host.name:9092
+#listeners=PLAINTEXT://:9092
+
+# Listener name, hostname and port the broker will advertise to clients.
+# If not set, it uses the value for "listeners".
+#advertised.listeners=PLAINTEXT://your.host.name:9092
+
+# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
+#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
+
+# The number of threads that the server uses for receiving requests from the network and sending responses to the network
+num.network.threads=3
+
+# The number of threads that the server uses for processing requests, which may include disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=102400
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=102400
+
+# The maximum size of a request that the socket server will accept (protection against OOM)
+socket.request.max.bytes=104857600
+
+
+############################# Log Basics #############################
+
+# A comma separated list of directories under which to store log files
+log.dirs=/tmp/kafka-logs
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions=1
+
+# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
+# This value is recommended to be increased for installations with data dirs located in RAID array.
+num.recovery.threads.per.data.dir=1
+
+############################# Internal Topic Settings  #############################
+# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
+# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
+offsets.topic.replication.factor=1
+transaction.state.log.replication.factor=1
+transaction.state.log.min.isr=1
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data to disk.
+# There are a few important trade-offs here:
+#    1. Durability: Unflushed data may be lost if you are not using replication.
+#    2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
+#    3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
+# The settings below allow one to configure the flush policy to flush data after a period of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+#log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+#log.flush.interval.ms=1000
+
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion due to age
+log.retention.hours=168
+
+# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
+# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
+#log.retention.bytes=1073741824
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+#log.segment.bytes=1073741824
+
+# The interval at which log segments are checked to see if they can be deleted according
+# to the retention policies
+log.retention.check.interval.ms=300000
+
+############################# Zookeeper #############################
+
+# Zookeeper connection string (see zookeeper docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zookeeper.connect=localhost:2181
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.connection.timeout.ms=18000
+
+
+############################# Group Coordinator Settings #############################
+
+# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
+# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
+# The default value for this is 3 seconds.
+# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
+# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
+group.initial.rebalance.delay.ms=0
diff --git a/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go b/vendor/github.com/IBM/sarama/sticky_assignor_user_data.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go
rename to vendor/github.com/IBM/sarama/sticky_assignor_user_data.go
diff --git a/vendor/github.com/IBM/sarama/sync_group_request.go b/vendor/github.com/IBM/sarama/sync_group_request.go
new file mode 100644
index 0000000..b109cd9
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sync_group_request.go
@@ -0,0 +1,184 @@
+package sarama
+
+type SyncGroupRequestAssignment struct {
+	// MemberId contains the ID of the member to assign.
+	MemberId string
+	// Assignment contains the member assignment.
+	Assignment []byte
+}
+
+func (a *SyncGroupRequestAssignment) encode(pe packetEncoder, version int16) (err error) {
+	if err := pe.putString(a.MemberId); err != nil {
+		return err
+	}
+
+	if err := pe.putBytes(a.Assignment); err != nil {
+		return err
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (a *SyncGroupRequestAssignment) decode(pd packetDecoder, version int16) (err error) {
+	if a.MemberId, err = pd.getString(); err != nil {
+		return err
+	}
+
+	if a.Assignment, err = pd.getBytes(); err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+type SyncGroupRequest struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// GroupId contains the unique group identifier.
+	GroupId string
+	// GenerationId contains the generation of the group.
+	GenerationId int32
+	// MemberId contains the member ID assigned by the group.
+	MemberId string
+	// GroupInstanceId contains the unique identifier of the consumer instance provided by end user.
+	GroupInstanceId *string
+	// GroupAssignments contains each assignment.
+	GroupAssignments []SyncGroupRequestAssignment
+}
+
+func (s *SyncGroupRequest) setVersion(v int16) {
+	s.Version = v
+}
+
+func (s *SyncGroupRequest) encode(pe packetEncoder) (err error) {
+	if err := pe.putString(s.GroupId); err != nil {
+		return err
+	}
+
+	pe.putInt32(s.GenerationId)
+
+	if err := pe.putString(s.MemberId); err != nil {
+		return err
+	}
+
+	if s.Version >= 3 {
+		if err := pe.putNullableString(s.GroupInstanceId); err != nil {
+			return err
+		}
+	}
+
+	if err := pe.putArrayLength(len(s.GroupAssignments)); err != nil {
+		return err
+	}
+	for _, block := range s.GroupAssignments {
+		if err := block.encode(pe, s.Version); err != nil {
+			return err
+		}
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (s *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+	s.Version = version
+	if s.GroupId, err = pd.getString(); err != nil {
+		return err
+	}
+
+	if s.GenerationId, err = pd.getInt32(); err != nil {
+		return err
+	}
+
+	if s.MemberId, err = pd.getString(); err != nil {
+		return err
+	}
+
+	if s.Version >= 3 {
+		if s.GroupInstanceId, err = pd.getNullableString(); err != nil {
+			return err
+		}
+	}
+
+	if numAssignments, err := pd.getArrayLength(); err != nil {
+		return err
+	} else if numAssignments > 0 {
+		s.GroupAssignments = make([]SyncGroupRequestAssignment, numAssignments)
+		for i := 0; i < numAssignments; i++ {
+			var block SyncGroupRequestAssignment
+			if err := block.decode(pd, s.Version); err != nil {
+				return err
+			}
+			s.GroupAssignments[i] = block
+		}
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *SyncGroupRequest) key() int16 {
+	return apiKeySyncGroup
+}
+
+func (r *SyncGroupRequest) version() int16 {
+	return r.Version
+}
+
+func (r *SyncGroupRequest) headerVersion() int16 {
+	if r.Version >= 4 {
+		return 2
+	}
+	return 1
+}
+
+func (r *SyncGroupRequest) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *SyncGroupRequest) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *SyncGroupRequest) isFlexibleVersion(version int16) bool {
+	return version >= 4
+}
+
+func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_3_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_9_0_0
+	default:
+		return V2_3_0_0
+	}
+}
+
+func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) {
+	r.GroupAssignments = append(r.GroupAssignments, SyncGroupRequestAssignment{
+		MemberId:   memberId,
+		Assignment: memberAssignment,
+	})
+}
+
+func (r *SyncGroupRequest) AddGroupAssignmentMember(
+	memberId string,
+	memberAssignment *ConsumerGroupMemberAssignment,
+) error {
+	bin, err := encode(memberAssignment, nil)
+	if err != nil {
+		return err
+	}
+
+	r.AddGroupAssignment(memberId, bin)
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/sync_group_response.go b/vendor/github.com/IBM/sarama/sync_group_response.go
new file mode 100644
index 0000000..a605ced
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sync_group_response.go
@@ -0,0 +1,108 @@
+package sarama
+
+import "time"
+
+type SyncGroupResponse struct {
+	// Version defines the protocol version to use for encode and decode
+	Version int16
+	// ThrottleTime contains the duration in milliseconds for which the
+	// request was throttled due to a quota violation, or zero if the request
+	// did not violate any quota.
+	ThrottleTime int32
+	// Err contains the error code, or 0 if there was no error.
+	Err KError
+	// MemberAssignment contains the member assignment.
+	MemberAssignment []byte
+}
+
+func (r *SyncGroupResponse) setVersion(v int16) {
+	r.Version = v
+}
+
+func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
+	assignment := new(ConsumerGroupMemberAssignment)
+	err := decode(r.MemberAssignment, assignment, nil)
+	return assignment, err
+}
+
+func (r *SyncGroupResponse) encode(pe packetEncoder) error {
+	if r.Version >= 1 {
+		pe.putInt32(r.ThrottleTime)
+	}
+	pe.putKError(r.Err)
+	if err := pe.putBytes(r.MemberAssignment); err != nil {
+		return err
+	}
+
+	pe.putEmptyTaggedFieldArray()
+	return nil
+}
+
+func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+	r.Version = version
+	if r.Version >= 1 {
+		if r.ThrottleTime, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+	r.Err, err = pd.getKError()
+	if err != nil {
+		return err
+	}
+
+	r.MemberAssignment, err = pd.getBytes()
+	if err != nil {
+		return err
+	}
+
+	_, err = pd.getEmptyTaggedFieldArray()
+	return err
+}
+
+func (r *SyncGroupResponse) key() int16 {
+	return apiKeySyncGroup
+}
+
+func (r *SyncGroupResponse) version() int16 {
+	return r.Version
+}
+
+func (r *SyncGroupResponse) headerVersion() int16 {
+	if r.Version >= 4 {
+		return 1
+	}
+	return 0
+}
+
+func (r *SyncGroupResponse) isValidVersion() bool {
+	return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *SyncGroupResponse) isFlexible() bool {
+	return r.isFlexibleVersion(r.Version)
+}
+
+func (r *SyncGroupResponse) isFlexibleVersion(version int16) bool {
+	return version >= 4
+}
+
+func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
+	switch r.Version {
+	case 4:
+		return V2_4_0_0
+	case 3:
+		return V2_3_0_0
+	case 2:
+		return V2_0_0_0
+	case 1:
+		return V0_11_0_0
+	case 0:
+		return V0_9_0_0
+	default:
+		return V2_3_0_0
+	}
+}
+
+func (r *SyncGroupResponse) throttleTime() time.Duration {
+	return time.Duration(r.ThrottleTime) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/sync_producer.go b/vendor/github.com/IBM/sarama/sync_producer.go
new file mode 100644
index 0000000..f6876fb
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sync_producer.go
@@ -0,0 +1,209 @@
+package sarama
+
+import "sync"
+
+var expectationsPool = sync.Pool{
+	New: func() interface{} {
+		return make(chan *ProducerError, 1)
+	},
+}
+
+// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct
+// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer
+// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope.
+//
+// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual
+// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`.
+// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost.
+//
+// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to
+// be set to true in its configuration.
+type SyncProducer interface {
+
+	// SendMessage produces a given message, and returns only when it either has
+	// succeeded or failed to produce. It will return the partition and the offset
+	// of the produced message, or an error if the message failed to produce.
+	SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
+
+	// SendMessages produces a given set of messages, and returns only when all
+	// messages in the set have either succeeded or failed. Note that messages
+	// can succeed and fail individually; if some succeed and some fail,
+	// SendMessages will return an error.
+	SendMessages(msgs []*ProducerMessage) error
+
+	// Close shuts down the producer; you must call this function before a producer
+	// object passes out of scope, as it may otherwise leak memory.
+	// You must call this before calling Close on the underlying client.
+	Close() error
+
+	// TxnStatus return current producer transaction status.
+	TxnStatus() ProducerTxnStatusFlag
+
+	// IsTransactional return true when current producer is transactional.
+	IsTransactional() bool
+
+	// BeginTxn mark current transaction as ready.
+	BeginTxn() error
+
+	// CommitTxn commit current transaction.
+	CommitTxn() error
+
+	// AbortTxn abort current transaction.
+	AbortTxn() error
+
+	// AddOffsetsToTxn add associated offsets to current transaction.
+	AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error
+
+	// AddMessageToTxn add message offsets to current transaction.
+	AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error
+}
+
+type syncProducer struct {
+	producer *asyncProducer
+	wg       sync.WaitGroup
+}
+
+// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
+func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
+	if config == nil {
+		config = NewConfig()
+		config.Producer.Return.Successes = true
+	}
+
+	if err := verifyProducerConfig(config); err != nil {
+		return nil, err
+	}
+
+	p, err := NewAsyncProducer(addrs, config)
+	if err != nil {
+		return nil, err
+	}
+	return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
+	if err := verifyProducerConfig(client.Config()); err != nil {
+		return nil, err
+	}
+
+	p, err := NewAsyncProducerFromClient(client)
+	if err != nil {
+		return nil, err
+	}
+	return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
+	sp := &syncProducer{producer: p}
+
+	sp.wg.Add(2)
+	go withRecover(sp.handleSuccesses)
+	go withRecover(sp.handleErrors)
+
+	return sp
+}
+
+func verifyProducerConfig(config *Config) error {
+	if !config.Producer.Return.Errors {
+		return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer")
+	}
+	if !config.Producer.Return.Successes {
+		return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer")
+	}
+	return nil
+}
+
+func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
+	expectation := expectationsPool.Get().(chan *ProducerError)
+	msg.expectation = expectation
+	sp.producer.Input() <- msg
+	pErr := <-expectation
+	msg.expectation = nil
+	expectationsPool.Put(expectation)
+	if pErr != nil {
+		return -1, -1, pErr.Err
+	}
+
+	return msg.Partition, msg.Offset, nil
+}
+
+func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error {
+	indices := make(chan int, len(msgs))
+	go func() {
+		for i, msg := range msgs {
+			expectation := expectationsPool.Get().(chan *ProducerError)
+			msg.expectation = expectation
+			sp.producer.Input() <- msg
+			indices <- i
+		}
+		close(indices)
+	}()
+
+	var errors ProducerErrors
+	for i := range indices {
+		expectation := msgs[i].expectation
+		pErr := <-expectation
+		msgs[i].expectation = nil
+		expectationsPool.Put(expectation)
+		if pErr != nil {
+			errors = append(errors, pErr)
+		}
+	}
+
+	if len(errors) > 0 {
+		return errors
+	}
+	return nil
+}
+
+func (sp *syncProducer) handleSuccesses() {
+	defer sp.wg.Done()
+	for msg := range sp.producer.Successes() {
+		expectation := msg.expectation
+		expectation <- nil
+	}
+}
+
+func (sp *syncProducer) handleErrors() {
+	defer sp.wg.Done()
+	for err := range sp.producer.Errors() {
+		expectation := err.Msg.expectation
+		expectation <- err
+	}
+}
+
+func (sp *syncProducer) Close() error {
+	sp.producer.AsyncClose()
+	sp.wg.Wait()
+	return nil
+}
+
+func (sp *syncProducer) IsTransactional() bool {
+	return sp.producer.IsTransactional()
+}
+
+func (sp *syncProducer) BeginTxn() error {
+	return sp.producer.BeginTxn()
+}
+
+func (sp *syncProducer) CommitTxn() error {
+	return sp.producer.CommitTxn()
+}
+
+func (sp *syncProducer) AbortTxn() error {
+	return sp.producer.AbortTxn()
+}
+
+func (sp *syncProducer) AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error {
+	return sp.producer.AddOffsetsToTxn(offsets, groupId)
+}
+
+func (sp *syncProducer) AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error {
+	return sp.producer.AddMessageToTxn(msg, groupId, metadata)
+}
+
+func (p *syncProducer) TxnStatus() ProducerTxnStatusFlag {
+	return p.producer.TxnStatus()
+}
diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/IBM/sarama/timestamp.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/timestamp.go
rename to vendor/github.com/IBM/sarama/timestamp.go
diff --git a/vendor/github.com/IBM/sarama/transaction_manager.go b/vendor/github.com/IBM/sarama/transaction_manager.go
new file mode 100644
index 0000000..bf20b75
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/transaction_manager.go
@@ -0,0 +1,930 @@
+package sarama
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+	"sync"
+	"time"
+)
+
+// ProducerTxnStatusFlag mark current transaction status.
+type ProducerTxnStatusFlag int16
+
+const (
+	// ProducerTxnFlagUninitialized when txnmgr is created
+	ProducerTxnFlagUninitialized ProducerTxnStatusFlag = 1 << iota
+	// ProducerTxnFlagInitializing when txnmgr is initializing
+	ProducerTxnFlagInitializing
+	// ProducerTxnFlagReady when is ready to receive transaction
+	ProducerTxnFlagReady
+	// ProducerTxnFlagInTransaction when transaction is started
+	ProducerTxnFlagInTransaction
+	// ProducerTxnFlagEndTransaction when transaction will be committed
+	ProducerTxnFlagEndTransaction
+	// ProducerTxnFlagInError when having abortable or fatal error
+	ProducerTxnFlagInError
+	// ProducerTxnFlagCommittingTransaction when committing txn
+	ProducerTxnFlagCommittingTransaction
+	// ProducerTxnFlagAbortingTransaction when committing txn
+	ProducerTxnFlagAbortingTransaction
+	// ProducerTxnFlagAbortableError when producer encounter an abortable error
+	// Must call AbortTxn in this case.
+	ProducerTxnFlagAbortableError
+	// ProducerTxnFlagFatalError when producer encounter an fatal error
+	// Must Close an recreate it.
+	ProducerTxnFlagFatalError
+)
+
+func (s ProducerTxnStatusFlag) String() string {
+	status := make([]string, 0)
+	if s&ProducerTxnFlagUninitialized != 0 {
+		status = append(status, "ProducerTxnStateUninitialized")
+	}
+	if s&ProducerTxnFlagInitializing != 0 {
+		status = append(status, "ProducerTxnStateInitializing")
+	}
+	if s&ProducerTxnFlagReady != 0 {
+		status = append(status, "ProducerTxnStateReady")
+	}
+	if s&ProducerTxnFlagInTransaction != 0 {
+		status = append(status, "ProducerTxnStateInTransaction")
+	}
+	if s&ProducerTxnFlagEndTransaction != 0 {
+		status = append(status, "ProducerTxnStateEndTransaction")
+	}
+	if s&ProducerTxnFlagInError != 0 {
+		status = append(status, "ProducerTxnStateInError")
+	}
+	if s&ProducerTxnFlagCommittingTransaction != 0 {
+		status = append(status, "ProducerTxnStateCommittingTransaction")
+	}
+	if s&ProducerTxnFlagAbortingTransaction != 0 {
+		status = append(status, "ProducerTxnStateAbortingTransaction")
+	}
+	if s&ProducerTxnFlagAbortableError != 0 {
+		status = append(status, "ProducerTxnStateAbortableError")
+	}
+	if s&ProducerTxnFlagFatalError != 0 {
+		status = append(status, "ProducerTxnStateFatalError")
+	}
+	return strings.Join(status, "|")
+}
+
+// transactionManager keeps the state necessary to ensure idempotent production
+type transactionManager struct {
+	producerID         int64
+	producerEpoch      int16
+	sequenceNumbers    map[string]int32
+	mutex              sync.Mutex
+	transactionalID    string
+	transactionTimeout time.Duration
+	client             Client
+
+	// when kafka cluster is at least 2.5.0.
+	// used to recover when producer failed.
+	coordinatorSupportsBumpingEpoch bool
+
+	// When producer need to bump it's epoch.
+	epochBumpRequired bool
+	// Record last seen error.
+	lastError error
+
+	// Ensure that status is never accessed with a race-condition.
+	statusLock sync.RWMutex
+	status     ProducerTxnStatusFlag
+
+	// Ensure that only one goroutine will update partitions in current transaction.
+	partitionInTxnLock            sync.Mutex
+	pendingPartitionsInCurrentTxn topicPartitionSet
+	partitionsInCurrentTxn        topicPartitionSet
+
+	// Offsets to add to transaction.
+	offsetsInCurrentTxn map[string]topicPartitionOffsets
+}
+
+const (
+	noProducerID    = -1
+	noProducerEpoch = -1
+
+	// see publishTxnPartitions comment.
+	addPartitionsRetryBackoff = 20 * time.Millisecond
+)
+
+// txnmngr allowed transitions.
+var producerTxnTransitions = map[ProducerTxnStatusFlag][]ProducerTxnStatusFlag{
+	ProducerTxnFlagUninitialized: {
+		ProducerTxnFlagReady,
+		ProducerTxnFlagInError,
+	},
+	// When we need are initializing
+	ProducerTxnFlagInitializing: {
+		ProducerTxnFlagInitializing,
+		ProducerTxnFlagReady,
+		ProducerTxnFlagInError,
+	},
+	// When we have initialized transactional producer
+	ProducerTxnFlagReady: {
+		ProducerTxnFlagInTransaction,
+	},
+	// When beginTxn has been called
+	ProducerTxnFlagInTransaction: {
+		// When calling commit or abort
+		ProducerTxnFlagEndTransaction,
+		// When got an error
+		ProducerTxnFlagInError,
+	},
+	ProducerTxnFlagEndTransaction: {
+		// When epoch bump
+		ProducerTxnFlagInitializing,
+		// When commit is good
+		ProducerTxnFlagReady,
+		// When got an error
+		ProducerTxnFlagInError,
+	},
+	// Need to abort transaction
+	ProducerTxnFlagAbortableError: {
+		// Call AbortTxn
+		ProducerTxnFlagAbortingTransaction,
+		// When got an error
+		ProducerTxnFlagInError,
+	},
+	// Need to close producer
+	ProducerTxnFlagFatalError: {
+		ProducerTxnFlagFatalError,
+	},
+}
+
+type topicPartition struct {
+	topic     string
+	partition int32
+}
+
+// to ensure that we don't do a full scan every time a partition or an offset is added.
+type (
+	topicPartitionSet     map[topicPartition]struct{}
+	topicPartitionOffsets map[topicPartition]*PartitionOffsetMetadata
+)
+
+func (s topicPartitionSet) mapToRequest() map[string][]int32 {
+	result := make(map[string][]int32, len(s))
+	for tp := range s {
+		result[tp.topic] = append(result[tp.topic], tp.partition)
+	}
+	return result
+}
+
+func (s topicPartitionOffsets) mapToRequest() map[string][]*PartitionOffsetMetadata {
+	result := make(map[string][]*PartitionOffsetMetadata, len(s))
+	for tp, offset := range s {
+		result[tp.topic] = append(result[tp.topic], offset)
+	}
+	return result
+}
+
+// Return true if current transition is allowed.
+func (t *transactionManager) isTransitionValid(target ProducerTxnStatusFlag) bool {
+	for status, allowedTransitions := range producerTxnTransitions {
+		if status&t.status != 0 {
+			for _, allowedTransition := range allowedTransitions {
+				if allowedTransition&target != 0 {
+					return true
+				}
+			}
+		}
+	}
+	return false
+}
+
+// Get current transaction status.
+func (t *transactionManager) currentTxnStatus() ProducerTxnStatusFlag {
+	t.statusLock.RLock()
+	defer t.statusLock.RUnlock()
+
+	return t.status
+}
+
+// Try to transition to a valid status and return an error otherwise.
+func (t *transactionManager) transitionTo(target ProducerTxnStatusFlag, err error) error {
+	t.statusLock.Lock()
+	defer t.statusLock.Unlock()
+
+	if !t.isTransitionValid(target) {
+		return ErrTransitionNotAllowed
+	}
+
+	if target&ProducerTxnFlagInError != 0 {
+		if err == nil {
+			return ErrCannotTransitionNilError
+		}
+		t.lastError = err
+	} else {
+		t.lastError = nil
+	}
+
+	DebugLogger.Printf("txnmgr/transition [%s] transition from %s to %s\n", t.transactionalID, t.status, target)
+
+	t.status = target
+	return err
+}
+
+func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) {
+	key := fmt.Sprintf("%s-%d", topic, partition)
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	sequence := t.sequenceNumbers[key]
+	t.sequenceNumbers[key] = sequence + 1
+	return sequence, t.producerEpoch
+}
+
+func (t *transactionManager) bumpEpoch() {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	t.producerEpoch++
+	for k := range t.sequenceNumbers {
+		t.sequenceNumbers[k] = 0
+	}
+}
+
+func (t *transactionManager) getProducerID() (int64, int16) {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	return t.producerID, t.producerEpoch
+}
+
+// Compute retry backoff considered current attempts.
+func (t *transactionManager) computeBackoff(attemptsRemaining int) time.Duration {
+	if t.client.Config().Producer.Transaction.Retry.BackoffFunc != nil {
+		maxRetries := t.client.Config().Producer.Transaction.Retry.Max
+		retries := maxRetries - attemptsRemaining
+		return t.client.Config().Producer.Transaction.Retry.BackoffFunc(retries, maxRetries)
+	}
+	return t.client.Config().Producer.Transaction.Retry.Backoff
+}
+
+// return true is txnmngr is transactinal.
+func (t *transactionManager) isTransactional() bool {
+	return t.transactionalID != ""
+}
+
+// add specified offsets to current transaction.
+func (t *transactionManager) addOffsetsToTxn(offsetsToAdd map[string][]*PartitionOffsetMetadata, groupId string) error {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+
+	if t.currentTxnStatus()&ProducerTxnFlagInTransaction == 0 {
+		return ErrTransactionNotReady
+	}
+
+	if t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 {
+		return t.lastError
+	}
+
+	if _, ok := t.offsetsInCurrentTxn[groupId]; !ok {
+		t.offsetsInCurrentTxn[groupId] = topicPartitionOffsets{}
+	}
+
+	for topic, offsets := range offsetsToAdd {
+		for _, offset := range offsets {
+			tp := topicPartition{topic: topic, partition: offset.Partition}
+			t.offsetsInCurrentTxn[groupId][tp] = offset
+		}
+	}
+	return nil
+}
+
+// send txnmgnr save offsets to transaction coordinator.
+func (t *transactionManager) publishOffsetsToTxn(offsets topicPartitionOffsets, groupId string) (topicPartitionOffsets, error) {
+	// First AddOffsetsToTxn
+	attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max
+	exec := func(run func() (bool, error), err error) error {
+		for attemptsRemaining >= 0 {
+			var retry bool
+			retry, err = run()
+			if !retry {
+				return err
+			}
+			backoff := t.computeBackoff(attemptsRemaining)
+			Logger.Printf("txnmgr/add-offset-to-txn [%s] retrying after %dms... (%d attempts remaining) (%s)\n",
+				t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err)
+			time.Sleep(backoff)
+			attemptsRemaining--
+		}
+		return err
+	}
+	lastError := exec(func() (bool, error) {
+		coordinator, err := t.client.TransactionCoordinator(t.transactionalID)
+		if err != nil {
+			return true, err
+		}
+		request := &AddOffsetsToTxnRequest{
+			TransactionalID: t.transactionalID,
+			ProducerEpoch:   t.producerEpoch,
+			ProducerID:      t.producerID,
+			GroupID:         groupId,
+		}
+		if t.client.Config().Version.IsAtLeast(V2_7_0_0) {
+			// Version 2 adds the support for new error code PRODUCER_FENCED.
+			request.Version = 2
+		} else if t.client.Config().Version.IsAtLeast(V2_0_0_0) {
+			// Version 1 is the same as version 0.
+			request.Version = 1
+		}
+		response, err := coordinator.AddOffsetsToTxn(request)
+		if err != nil {
+			// If an error occurred try to refresh current transaction coordinator.
+			_ = coordinator.Close()
+			_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+			return true, err
+		}
+		if response == nil {
+			// If no response is returned just retry.
+			return true, ErrTxnUnableToParseResponse
+		}
+		if response.Err == ErrNoError {
+			DebugLogger.Printf("txnmgr/add-offset-to-txn [%s] successful add-offset-to-txn with group %s %+v\n",
+				t.transactionalID, groupId, response)
+			// If no error, just exit.
+			return false, nil
+		}
+		switch response.Err {
+		case ErrConsumerCoordinatorNotAvailable:
+			fallthrough
+		case ErrNotCoordinatorForConsumer:
+			_ = coordinator.Close()
+			_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+			fallthrough
+		case ErrOffsetsLoadInProgress:
+			fallthrough
+		case ErrConcurrentTransactions:
+			// Retry
+		case ErrUnknownProducerID:
+			fallthrough
+		case ErrInvalidProducerIDMapping:
+			return false, t.abortableErrorIfPossible(response.Err)
+		case ErrGroupAuthorizationFailed:
+			return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, response.Err)
+		default:
+			// Others are fatal
+			return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err)
+		}
+		return true, response.Err
+	}, nil)
+
+	if lastError != nil {
+		return offsets, lastError
+	}
+
+	resultOffsets := offsets
+	// Then TxnOffsetCommit
+	// note the result is not completed until the TxnOffsetCommit returns
+	attemptsRemaining = t.client.Config().Producer.Transaction.Retry.Max
+	execTxnOffsetCommit := func(run func() (topicPartitionOffsets, bool, error), err error) (topicPartitionOffsets, error) {
+		var r topicPartitionOffsets
+		for attemptsRemaining >= 0 {
+			var retry bool
+			r, retry, err = run()
+			if !retry {
+				return r, err
+			}
+			backoff := t.computeBackoff(attemptsRemaining)
+			Logger.Printf("txnmgr/txn-offset-commit [%s] retrying after %dms... (%d attempts remaining) (%s)\n",
+				t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err)
+			time.Sleep(backoff)
+			attemptsRemaining--
+		}
+		return r, err
+	}
+	return execTxnOffsetCommit(func() (topicPartitionOffsets, bool, error) {
+		consumerGroupCoordinator, err := t.client.Coordinator(groupId)
+		if err != nil {
+			return resultOffsets, true, err
+		}
+		request := &TxnOffsetCommitRequest{
+			TransactionalID: t.transactionalID,
+			ProducerEpoch:   t.producerEpoch,
+			ProducerID:      t.producerID,
+			GroupID:         groupId,
+			Topics:          offsets.mapToRequest(),
+		}
+		if t.client.Config().Version.IsAtLeast(V2_1_0_0) {
+			// Version 2 adds the committed leader epoch.
+			request.Version = 2
+		} else if t.client.Config().Version.IsAtLeast(V2_0_0_0) {
+			// Version 1 is the same as version 0.
+			request.Version = 1
+		}
+		responses, err := consumerGroupCoordinator.TxnOffsetCommit(request)
+		if err != nil {
+			_ = consumerGroupCoordinator.Close()
+			_ = t.client.RefreshCoordinator(groupId)
+			return resultOffsets, true, err
+		}
+
+		if responses == nil {
+			return resultOffsets, true, ErrTxnUnableToParseResponse
+		}
+
+		var responseErrors []error
+		failedTxn := topicPartitionOffsets{}
+		for topic, partitionErrors := range responses.Topics {
+			for _, partitionError := range partitionErrors {
+				switch partitionError.Err {
+				case ErrNoError:
+					continue
+				// If the topic is unknown or the coordinator is loading, retry with the current coordinator
+				case ErrRequestTimedOut:
+					fallthrough
+				case ErrConsumerCoordinatorNotAvailable:
+					fallthrough
+				case ErrNotCoordinatorForConsumer:
+					_ = consumerGroupCoordinator.Close()
+					_ = t.client.RefreshCoordinator(groupId)
+					fallthrough
+				case ErrUnknownTopicOrPartition:
+					fallthrough
+				case ErrOffsetsLoadInProgress:
+					// Do nothing just retry
+				case ErrIllegalGeneration:
+					fallthrough
+				case ErrUnknownMemberId:
+					fallthrough
+				case ErrFencedInstancedId:
+					fallthrough
+				case ErrGroupAuthorizationFailed:
+					return resultOffsets, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, partitionError.Err)
+				default:
+					// Others are fatal
+					return resultOffsets, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, partitionError.Err)
+				}
+				tp := topicPartition{topic: topic, partition: partitionError.Partition}
+				failedTxn[tp] = offsets[tp]
+				responseErrors = append(responseErrors, partitionError.Err)
+			}
+		}
+
+		resultOffsets = failedTxn
+
+		if len(resultOffsets) == 0 {
+			DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s\n",
+				t.transactionalID, groupId)
+			return resultOffsets, false, nil
+		}
+		return resultOffsets, true, Wrap(ErrTxnOffsetCommit, responseErrors...)
+	}, nil)
+}
+
+func (t *transactionManager) initProducerId() (int64, int16, error) {
+	isEpochBump := false
+
+	req := &InitProducerIDRequest{}
+	if t.isTransactional() {
+		req.TransactionalID = &t.transactionalID
+		req.TransactionTimeout = t.transactionTimeout
+	}
+
+	if t.client.Config().Version.IsAtLeast(V2_5_0_0) {
+		if t.client.Config().Version.IsAtLeast(V2_7_0_0) {
+			// Version 4 adds the support for new error code PRODUCER_FENCED.
+			req.Version = 4
+		} else {
+			// Version 3 adds ProducerId and ProducerEpoch, allowing producers to try
+			// to resume after an INVALID_PRODUCER_EPOCH error
+			req.Version = 3
+		}
+		isEpochBump = t.producerID != noProducerID && t.producerEpoch != noProducerEpoch
+		t.coordinatorSupportsBumpingEpoch = true
+		req.ProducerID = t.producerID
+		req.ProducerEpoch = t.producerEpoch
+	} else if t.client.Config().Version.IsAtLeast(V2_4_0_0) {
+		// Version 2 is the first flexible version.
+		req.Version = 2
+	} else if t.client.Config().Version.IsAtLeast(V2_0_0_0) {
+		// Version 1 is the same as version 0.
+		req.Version = 1
+	}
+
+	if isEpochBump {
+		err := t.transitionTo(ProducerTxnFlagInitializing, nil)
+		if err != nil {
+			return -1, -1, err
+		}
+		DebugLogger.Printf("txnmgr/init-producer-id [%s] invoking InitProducerId for the first time in order to acquire a producer ID\n",
+			t.transactionalID)
+	} else {
+		DebugLogger.Printf("txnmgr/init-producer-id [%s] invoking InitProducerId with current producer ID %d and epoch %d in order to bump the epoch\n",
+			t.transactionalID, t.producerID, t.producerEpoch)
+	}
+
+	attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max
+	exec := func(run func() (int64, int16, bool, error), err error) (int64, int16, error) {
+		pid := int64(-1)
+		pepoch := int16(-1)
+		for attemptsRemaining >= 0 {
+			var retry bool
+			pid, pepoch, retry, err = run()
+			if !retry {
+				return pid, pepoch, err
+			}
+			backoff := t.computeBackoff(attemptsRemaining)
+			Logger.Printf("txnmgr/init-producer-id [%s] retrying after %dms... (%d attempts remaining) (%s)\n",
+				t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err)
+			time.Sleep(backoff)
+			attemptsRemaining--
+		}
+		return -1, -1, err
+	}
+	return exec(func() (int64, int16, bool, error) {
+		var err error
+		var coordinator *Broker
+		if t.isTransactional() {
+			coordinator, err = t.client.TransactionCoordinator(t.transactionalID)
+		} else {
+			coordinator = t.client.LeastLoadedBroker()
+		}
+		if err != nil {
+			return -1, -1, true, err
+		}
+		response, err := coordinator.InitProducerID(req)
+		if err != nil {
+			if t.isTransactional() {
+				_ = coordinator.Close()
+				_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+			}
+			return -1, -1, true, err
+		}
+		if response == nil {
+			return -1, -1, true, ErrTxnUnableToParseResponse
+		}
+		if response.Err == ErrNoError {
+			if isEpochBump {
+				t.sequenceNumbers = make(map[string]int32)
+			}
+			err := t.transitionTo(ProducerTxnFlagReady, nil)
+			if err != nil {
+				return -1, -1, true, err
+			}
+			DebugLogger.Printf("txnmgr/init-producer-id [%s] successful init producer id %+v\n",
+				t.transactionalID, response)
+			return response.ProducerID, response.ProducerEpoch, false, nil
+		}
+		switch response.Err {
+		// Retriable errors
+		case ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer, ErrOffsetsLoadInProgress:
+			if t.isTransactional() {
+				_ = coordinator.Close()
+				_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+			}
+		// Fatal errors
+		default:
+			return -1, -1, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err)
+		}
+		return -1, -1, true, response.Err
+	}, nil)
+}
+
+// if kafka cluster is at least 2.5.0 mark txnmngr to bump epoch else mark it as fatal.
+func (t *transactionManager) abortableErrorIfPossible(err error) error {
+	if t.coordinatorSupportsBumpingEpoch {
+		t.epochBumpRequired = true
+		return t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, err)
+	}
+	return t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, err)
+}
+
+// End current transaction.
+func (t *transactionManager) completeTransaction() error {
+	if t.epochBumpRequired {
+		err := t.transitionTo(ProducerTxnFlagInitializing, nil)
+		if err != nil {
+			return err
+		}
+	} else {
+		err := t.transitionTo(ProducerTxnFlagReady, nil)
+		if err != nil {
+			return err
+		}
+	}
+
+	t.lastError = nil
+	t.epochBumpRequired = false
+	t.partitionsInCurrentTxn = topicPartitionSet{}
+	t.pendingPartitionsInCurrentTxn = topicPartitionSet{}
+	t.offsetsInCurrentTxn = map[string]topicPartitionOffsets{}
+
+	return nil
+}
+
+// send EndTxn request with commit flag. (true when committing false otherwise)
+func (t *transactionManager) endTxn(commit bool) error {
+	attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max
+	exec := func(run func() (bool, error), err error) error {
+		for attemptsRemaining >= 0 {
+			var retry bool
+			retry, err = run()
+			if !retry {
+				return err
+			}
+			backoff := t.computeBackoff(attemptsRemaining)
+			Logger.Printf("txnmgr/endtxn [%s] retrying after %dms... (%d attempts remaining) (%s)\n",
+				t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err)
+			time.Sleep(backoff)
+			attemptsRemaining--
+		}
+		return err
+	}
+	return exec(func() (bool, error) {
+		coordinator, err := t.client.TransactionCoordinator(t.transactionalID)
+		if err != nil {
+			return true, err
+		}
+		request := &EndTxnRequest{
+			TransactionalID:   t.transactionalID,
+			ProducerEpoch:     t.producerEpoch,
+			ProducerID:        t.producerID,
+			TransactionResult: commit,
+		}
+		if t.client.Config().Version.IsAtLeast(V2_7_0_0) {
+			// Version 2 adds the support for new error code PRODUCER_FENCED.
+			request.Version = 2
+		} else if t.client.Config().Version.IsAtLeast(V2_0_0_0) {
+			// Version 1 is the same as version 0.
+			request.Version = 1
+		}
+		response, err := coordinator.EndTxn(request)
+		if err != nil {
+			// Always retry on network error
+			_ = coordinator.Close()
+			_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+			return true, err
+		}
+		if response == nil {
+			return true, ErrTxnUnableToParseResponse
+		}
+		if response.Err == ErrNoError {
+			DebugLogger.Printf("txnmgr/endtxn [%s] successful to end txn %+v\n",
+				t.transactionalID, response)
+			return false, t.completeTransaction()
+		}
+		switch response.Err {
+		// Need to refresh coordinator
+		case ErrConsumerCoordinatorNotAvailable:
+			fallthrough
+		case ErrNotCoordinatorForConsumer:
+			_ = coordinator.Close()
+			_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+			fallthrough
+		case ErrOffsetsLoadInProgress:
+			fallthrough
+		case ErrConcurrentTransactions:
+			// Just retry
+		case ErrUnknownProducerID:
+			fallthrough
+		case ErrInvalidProducerIDMapping:
+			return false, t.abortableErrorIfPossible(response.Err)
+		// Fatal errors
+		default:
+			return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err)
+		}
+		return true, response.Err
+	}, nil)
+}
+
+// We will try to publish associated offsets for each groups
+// then send endtxn request to mark transaction as finished.
+func (t *transactionManager) finishTransaction(commit bool) error {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+
+	// Ensure no error when committing or aborting
+	if commit && t.currentTxnStatus()&ProducerTxnFlagInError != 0 {
+		return t.lastError
+	} else if !commit && t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 {
+		return t.lastError
+	}
+
+	// if no records has been sent don't do anything.
+	if len(t.partitionsInCurrentTxn) == 0 {
+		return t.completeTransaction()
+	}
+
+	epochBump := t.epochBumpRequired
+	// If we're aborting the transaction, so there should be no need to add offsets.
+	if commit && len(t.offsetsInCurrentTxn) > 0 {
+		for group, offsets := range t.offsetsInCurrentTxn {
+			newOffsets, err := t.publishOffsetsToTxn(offsets, group)
+			if err != nil {
+				t.offsetsInCurrentTxn[group] = newOffsets
+				return err
+			}
+			delete(t.offsetsInCurrentTxn, group)
+		}
+	}
+
+	if t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 {
+		return t.lastError
+	}
+
+	if !errors.Is(t.lastError, ErrInvalidProducerIDMapping) {
+		err := t.endTxn(commit)
+		if err != nil {
+			return err
+		}
+		if !epochBump {
+			return nil
+		}
+	}
+	// reset pid and epoch if needed.
+	return t.initializeTransactions()
+}
+
+// called before sending any transactional record
+// won't do anything if current topic-partition is already added to transaction.
+func (t *transactionManager) maybeAddPartitionToCurrentTxn(topic string, partition int32) {
+	if t.currentTxnStatus()&ProducerTxnFlagInError != 0 {
+		return
+	}
+
+	tp := topicPartition{topic: topic, partition: partition}
+
+	t.partitionInTxnLock.Lock()
+	defer t.partitionInTxnLock.Unlock()
+	if _, ok := t.partitionsInCurrentTxn[tp]; ok {
+		// partition is already added
+		return
+	}
+
+	t.pendingPartitionsInCurrentTxn[tp] = struct{}{}
+}
+
+// Makes a request to kafka to add a list of partitions ot the current transaction.
+func (t *transactionManager) publishTxnPartitions() error {
+	t.partitionInTxnLock.Lock()
+	defer t.partitionInTxnLock.Unlock()
+
+	if t.currentTxnStatus()&ProducerTxnFlagInError != 0 {
+		return t.lastError
+	}
+
+	if len(t.pendingPartitionsInCurrentTxn) == 0 {
+		return nil
+	}
+
+	// Remove the partitions from the pending set regardless of the result. We use the presence
+	// of partitions in the pending set to know when it is not safe to send batches. However, if
+	// the partitions failed to be added and we enter an error state, we expect the batches to be
+	// aborted anyway. In this case, we must be able to continue sending the batches which are in
+	// retry for partitions that were successfully added.
+	removeAllPartitionsOnFatalOrAbortedError := func() {
+		t.pendingPartitionsInCurrentTxn = topicPartitionSet{}
+	}
+
+	// We only want to reduce the backoff when retrying the first AddPartition which errored out due to a
+	// CONCURRENT_TRANSACTIONS error since this means that the previous transaction is still completing and
+	// we don't want to wait too long before trying to start the new one.
+	//
+	// This is only a temporary fix, the long term solution is being tracked in
+	// https://issues.apache.org/jira/browse/KAFKA-5482
+	retryBackoff := t.client.Config().Producer.Transaction.Retry.Backoff
+	computeBackoff := func(attemptsRemaining int) time.Duration {
+		if t.client.Config().Producer.Transaction.Retry.BackoffFunc != nil {
+			maxRetries := t.client.Config().Producer.Transaction.Retry.Max
+			retries := maxRetries - attemptsRemaining
+			return t.client.Config().Producer.Transaction.Retry.BackoffFunc(retries, maxRetries)
+		}
+		return retryBackoff
+	}
+	attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max
+
+	exec := func(run func() (bool, error), err error) error {
+		for attemptsRemaining >= 0 {
+			var retry bool
+			retry, err = run()
+			if !retry {
+				return err
+			}
+			backoff := computeBackoff(attemptsRemaining)
+			Logger.Printf("txnmgr/add-partition-to-txn retrying after %dms... (%d attempts remaining) (%s)\n", backoff/time.Millisecond, attemptsRemaining, err)
+			time.Sleep(backoff)
+			attemptsRemaining--
+		}
+		return err
+	}
+	return exec(func() (bool, error) {
+		coordinator, err := t.client.TransactionCoordinator(t.transactionalID)
+		if err != nil {
+			return true, err
+		}
+		request := &AddPartitionsToTxnRequest{
+			TransactionalID: t.transactionalID,
+			ProducerID:      t.producerID,
+			ProducerEpoch:   t.producerEpoch,
+			TopicPartitions: t.pendingPartitionsInCurrentTxn.mapToRequest(),
+		}
+		if t.client.Config().Version.IsAtLeast(V2_7_0_0) {
+			// Version 2 adds the support for new error code PRODUCER_FENCED.
+			request.Version = 2
+		} else if t.client.Config().Version.IsAtLeast(V2_0_0_0) {
+			// Version 1 is the same as version 0.
+			request.Version = 1
+		}
+		addPartResponse, err := coordinator.AddPartitionsToTxn(request)
+		if err != nil {
+			_ = coordinator.Close()
+			_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+			return true, err
+		}
+
+		if addPartResponse == nil {
+			return true, ErrTxnUnableToParseResponse
+		}
+
+		// remove from the list partitions that have been successfully updated
+		var responseErrors []error
+		for topic, results := range addPartResponse.Errors {
+			for _, response := range results {
+				tp := topicPartition{topic: topic, partition: response.Partition}
+				switch response.Err {
+				case ErrNoError:
+					// Mark partition as added to transaction
+					t.partitionsInCurrentTxn[tp] = struct{}{}
+					delete(t.pendingPartitionsInCurrentTxn, tp)
+					continue
+				case ErrConsumerCoordinatorNotAvailable:
+					fallthrough
+				case ErrNotCoordinatorForConsumer:
+					_ = coordinator.Close()
+					_ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+					fallthrough
+				case ErrUnknownTopicOrPartition:
+					fallthrough
+				case ErrOffsetsLoadInProgress:
+					// Retry topicPartition
+				case ErrConcurrentTransactions:
+					if len(t.partitionsInCurrentTxn) == 0 && retryBackoff > addPartitionsRetryBackoff {
+						retryBackoff = addPartitionsRetryBackoff
+					}
+				case ErrOperationNotAttempted:
+					fallthrough
+				case ErrTopicAuthorizationFailed:
+					removeAllPartitionsOnFatalOrAbortedError()
+					return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, response.Err)
+				case ErrUnknownProducerID:
+					fallthrough
+				case ErrInvalidProducerIDMapping:
+					removeAllPartitionsOnFatalOrAbortedError()
+					return false, t.abortableErrorIfPossible(response.Err)
+				// Fatal errors
+				default:
+					removeAllPartitionsOnFatalOrAbortedError()
+					return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err)
+				}
+				responseErrors = append(responseErrors, response.Err)
+			}
+		}
+
+		// handle end
+		if len(t.pendingPartitionsInCurrentTxn) == 0 {
+			DebugLogger.Printf("txnmgr/add-partition-to-txn [%s] successful to add partitions txn %+v\n",
+				t.transactionalID, addPartResponse)
+			return false, nil
+		}
+		return true, Wrap(ErrAddPartitionsToTxn, responseErrors...)
+	}, nil)
+}
+
+// Build a new transaction manager sharing producer client.
+func newTransactionManager(conf *Config, client Client) (*transactionManager, error) {
+	txnmgr := &transactionManager{
+		producerID:                    noProducerID,
+		producerEpoch:                 noProducerEpoch,
+		client:                        client,
+		pendingPartitionsInCurrentTxn: topicPartitionSet{},
+		partitionsInCurrentTxn:        topicPartitionSet{},
+		offsetsInCurrentTxn:           make(map[string]topicPartitionOffsets),
+		status:                        ProducerTxnFlagUninitialized,
+	}
+
+	if conf.Producer.Idempotent {
+		txnmgr.transactionalID = conf.Producer.Transaction.ID
+		txnmgr.transactionTimeout = conf.Producer.Transaction.Timeout
+		txnmgr.sequenceNumbers = make(map[string]int32)
+		txnmgr.mutex = sync.Mutex{}
+
+		var err error
+		txnmgr.producerID, txnmgr.producerEpoch, err = txnmgr.initProducerId()
+		if err != nil {
+			return nil, err
+		}
+		Logger.Printf("txnmgr/init-producer-id [%s] obtained a ProducerId: %d and ProducerEpoch: %d\n",
+			txnmgr.transactionalID, txnmgr.producerID, txnmgr.producerEpoch)
+	}
+
+	return txnmgr, nil
+}
+
+// re-init producer-id and producer-epoch if needed.
+func (t *transactionManager) initializeTransactions() (err error) {
+	t.producerID, t.producerEpoch, err = t.initProducerId()
+	return
+}
diff --git a/vendor/github.com/IBM/sarama/txn_offset_commit_request.go b/vendor/github.com/IBM/sarama/txn_offset_commit_request.go
new file mode 100644
index 0000000..f390172
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/txn_offset_commit_request.go
@@ -0,0 +1,166 @@
+package sarama
+
+type TxnOffsetCommitRequest struct {
+	Version         int16
+	TransactionalID string
+	GroupID         string
+	ProducerID      int64
+	ProducerEpoch   int16
+	Topics          map[string][]*PartitionOffsetMetadata
+}
+
+func (t *TxnOffsetCommitRequest) setVersion(v int16) {
+	t.Version = v
+}
+
+func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error {
+	if err := pe.putString(t.TransactionalID); err != nil {
+		return err
+	}
+	if err := pe.putString(t.GroupID); err != nil {
+		return err
+	}
+	pe.putInt64(t.ProducerID)
+	pe.putInt16(t.ProducerEpoch)
+
+	if err := pe.putArrayLength(len(t.Topics)); err != nil {
+		return err
+	}
+	for topic, partitions := range t.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(partitions)); err != nil {
+			return err
+		}
+		for _, partition := range partitions {
+			if err := partition.encode(pe, t.Version); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
+	t.Version = version
+	if t.TransactionalID, err = pd.getString(); err != nil {
+		return err
+	}
+	if t.GroupID, err = pd.getString(); err != nil {
+		return err
+	}
+	if t.ProducerID, err = pd.getInt64(); err != nil {
+		return err
+	}
+	if t.ProducerEpoch, err = pd.getInt16(); err != nil {
+		return err
+	}
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	t.Topics = make(map[string][]*PartitionOffsetMetadata)
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		m, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		t.Topics[topic] = make([]*PartitionOffsetMetadata, m)
+
+		for j := 0; j < m; j++ {
+			partitionOffsetMetadata := new(PartitionOffsetMetadata)
+			if err := partitionOffsetMetadata.decode(pd, version); err != nil {
+				return err
+			}
+			t.Topics[topic][j] = partitionOffsetMetadata
+		}
+	}
+
+	return nil
+}
+
+func (a *TxnOffsetCommitRequest) key() int16 {
+	return apiKeyTxnOffsetCommit
+}
+
+func (a *TxnOffsetCommitRequest) version() int16 {
+	return a.Version
+}
+
+func (a *TxnOffsetCommitRequest) headerVersion() int16 {
+	return 1
+}
+
+func (a *TxnOffsetCommitRequest) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 2:
+		return V2_1_0_0
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V0_11_0_0
+	default:
+		return V2_1_0_0
+	}
+}
+
+type PartitionOffsetMetadata struct {
+	// Partition contains the index of the partition within the topic.
+	Partition int32
+	// Offset contains the message offset to be committed.
+	Offset int64
+	// LeaderEpoch contains the leader epoch of the last consumed record.
+	LeaderEpoch int32
+	// Metadata contains any associated metadata the client wants to keep.
+	Metadata *string
+}
+
+func (p *PartitionOffsetMetadata) encode(pe packetEncoder, version int16) error {
+	pe.putInt32(p.Partition)
+	pe.putInt64(p.Offset)
+
+	if version >= 2 {
+		pe.putInt32(p.LeaderEpoch)
+	}
+
+	if err := pe.putNullableString(p.Metadata); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err error) {
+	if p.Partition, err = pd.getInt32(); err != nil {
+		return err
+	}
+	if p.Offset, err = pd.getInt64(); err != nil {
+		return err
+	}
+
+	if version >= 2 {
+		if p.LeaderEpoch, err = pd.getInt32(); err != nil {
+			return err
+		}
+	}
+
+	if p.Metadata, err = pd.getNullableString(); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/IBM/sarama/txn_offset_commit_response.go b/vendor/github.com/IBM/sarama/txn_offset_commit_response.go
new file mode 100644
index 0000000..19bcad3
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/txn_offset_commit_response.go
@@ -0,0 +1,110 @@
+package sarama
+
+import (
+	"time"
+)
+
+type TxnOffsetCommitResponse struct {
+	Version      int16
+	ThrottleTime time.Duration
+	Topics       map[string][]*PartitionError
+}
+
+func (t *TxnOffsetCommitResponse) setVersion(v int16) {
+	t.Version = v
+}
+
+func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error {
+	pe.putDurationMs(t.ThrottleTime)
+	if err := pe.putArrayLength(len(t.Topics)); err != nil {
+		return err
+	}
+
+	for topic, e := range t.Topics {
+		if err := pe.putString(topic); err != nil {
+			return err
+		}
+		if err := pe.putArrayLength(len(e)); err != nil {
+			return err
+		}
+		for _, partitionError := range e {
+			if err := partitionError.encode(pe); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
+	t.Version = version
+	throttleTime, err := pd.getInt32()
+	if err != nil {
+		return err
+	}
+	t.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+	n, err := pd.getArrayLength()
+	if err != nil {
+		return err
+	}
+
+	t.Topics = make(map[string][]*PartitionError)
+
+	for i := 0; i < n; i++ {
+		topic, err := pd.getString()
+		if err != nil {
+			return err
+		}
+
+		m, err := pd.getArrayLength()
+		if err != nil {
+			return err
+		}
+
+		t.Topics[topic] = make([]*PartitionError, m)
+
+		for j := 0; j < m; j++ {
+			t.Topics[topic][j] = new(PartitionError)
+			if err := t.Topics[topic][j].decode(pd, version); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (a *TxnOffsetCommitResponse) key() int16 {
+	return apiKeyTxnOffsetCommit
+}
+
+func (a *TxnOffsetCommitResponse) version() int16 {
+	return a.Version
+}
+
+func (a *TxnOffsetCommitResponse) headerVersion() int16 {
+	return 0
+}
+
+func (a *TxnOffsetCommitResponse) isValidVersion() bool {
+	return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion {
+	switch a.Version {
+	case 2:
+		return V2_1_0_0
+	case 1:
+		return V2_0_0_0
+	case 0:
+		return V0_11_0_0
+	default:
+		return V2_1_0_0
+	}
+}
+
+func (r *TxnOffsetCommitResponse) throttleTime() time.Duration {
+	return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/utils.go b/vendor/github.com/IBM/sarama/utils.go
new file mode 100644
index 0000000..9b87cb8
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/utils.go
@@ -0,0 +1,393 @@
+package sarama
+
+import (
+	"bufio"
+	"fmt"
+	"math/rand"
+	"net"
+	"regexp"
+	"time"
+)
+
+const (
+	defaultRetryBackoff    = 100 * time.Millisecond
+	defaultRetryMaxBackoff = 1000 * time.Millisecond
+)
+
+type none struct{}
+
+// make []int32 sortable so we can sort partition numbers
+type int32Slice []int32
+
+func (slice int32Slice) Len() int {
+	return len(slice)
+}
+
+func (slice int32Slice) Less(i, j int) bool {
+	return slice[i] < slice[j]
+}
+
+func (slice int32Slice) Swap(i, j int) {
+	slice[i], slice[j] = slice[j], slice[i]
+}
+
+func dupInt32Slice(input []int32) []int32 {
+	ret := make([]int32, 0, len(input))
+	ret = append(ret, input...)
+	return ret
+}
+
+func withRecover(fn func()) {
+	defer func() {
+		handler := PanicHandler
+		if handler != nil {
+			if err := recover(); err != nil {
+				handler(err)
+			}
+		}
+	}()
+
+	fn()
+}
+
+func safeAsyncClose(b *Broker) {
+	go withRecover(func() {
+		if connected, _ := b.Connected(); connected {
+			if err := b.Close(); err != nil {
+				Logger.Println("Error closing broker", b.ID(), ":", err)
+			}
+		}
+	})
+}
+
+// Encoder is a simple interface for any type that can be encoded as an array of bytes
+// in order to be sent as the key or value of a Kafka message. Length() is provided as an
+// optimization, and must return the same as len() on the result of Encode().
+type Encoder interface {
+	Encode() ([]byte, error)
+	Length() int
+}
+
+// make strings and byte slices encodable for convenience so they can be used as keys
+// and/or values in kafka messages
+
+// StringEncoder implements the Encoder interface for Go strings so that they can be used
+// as the Key or Value in a ProducerMessage.
+type StringEncoder string
+
+func (s StringEncoder) Encode() ([]byte, error) {
+	return []byte(s), nil
+}
+
+func (s StringEncoder) Length() int {
+	return len(s)
+}
+
+// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
+// as the Key or Value in a ProducerMessage.
+type ByteEncoder []byte
+
+func (b ByteEncoder) Encode() ([]byte, error) {
+	return b, nil
+}
+
+func (b ByteEncoder) Length() int {
+	return len(b)
+}
+
+// bufConn wraps a net.Conn with a buffer for reads to reduce the number of
+// reads that trigger syscalls.
+type bufConn struct {
+	net.Conn
+	buf *bufio.Reader
+}
+
+func newBufConn(conn net.Conn) *bufConn {
+	return &bufConn{
+		Conn: conn,
+		buf:  bufio.NewReader(conn),
+	}
+}
+
+func (bc *bufConn) Read(b []byte) (n int, err error) {
+	return bc.buf.Read(b)
+}
+
+// KafkaVersion instances represent versions of the upstream Kafka broker.
+type KafkaVersion struct {
+	// it's a struct rather than just typing the array directly to make it opaque and stop people
+	// generating their own arbitrary versions
+	version [4]uint
+}
+
+func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion {
+	return KafkaVersion{
+		version: [4]uint{major, minor, veryMinor, patch},
+	}
+}
+
+// IsAtLeast return true if and only if the version it is called on is
+// greater than or equal to the version passed in:
+//
+//	V1.IsAtLeast(V2) // false
+//	V2.IsAtLeast(V1) // true
+func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
+	for i := range v.version {
+		if v.version[i] > other.version[i] {
+			return true
+		} else if v.version[i] < other.version[i] {
+			return false
+		}
+	}
+	return true
+}
+
+// Effective constants defining the supported kafka versions.
+var (
+	V0_8_2_0  = newKafkaVersion(0, 8, 2, 0)
+	V0_8_2_1  = newKafkaVersion(0, 8, 2, 1)
+	V0_8_2_2  = newKafkaVersion(0, 8, 2, 2)
+	V0_9_0_0  = newKafkaVersion(0, 9, 0, 0)
+	V0_9_0_1  = newKafkaVersion(0, 9, 0, 1)
+	V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
+	V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
+	V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
+	V0_10_1_1 = newKafkaVersion(0, 10, 1, 1)
+	V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
+	V0_10_2_1 = newKafkaVersion(0, 10, 2, 1)
+	V0_10_2_2 = newKafkaVersion(0, 10, 2, 2)
+	V0_11_0_0 = newKafkaVersion(0, 11, 0, 0)
+	V0_11_0_1 = newKafkaVersion(0, 11, 0, 1)
+	V0_11_0_2 = newKafkaVersion(0, 11, 0, 2)
+	V1_0_0_0  = newKafkaVersion(1, 0, 0, 0)
+	V1_0_1_0  = newKafkaVersion(1, 0, 1, 0)
+	V1_0_2_0  = newKafkaVersion(1, 0, 2, 0)
+	V1_1_0_0  = newKafkaVersion(1, 1, 0, 0)
+	V1_1_1_0  = newKafkaVersion(1, 1, 1, 0)
+	V2_0_0_0  = newKafkaVersion(2, 0, 0, 0)
+	V2_0_1_0  = newKafkaVersion(2, 0, 1, 0)
+	V2_1_0_0  = newKafkaVersion(2, 1, 0, 0)
+	V2_1_1_0  = newKafkaVersion(2, 1, 1, 0)
+	V2_2_0_0  = newKafkaVersion(2, 2, 0, 0)
+	V2_2_1_0  = newKafkaVersion(2, 2, 1, 0)
+	V2_2_2_0  = newKafkaVersion(2, 2, 2, 0)
+	V2_3_0_0  = newKafkaVersion(2, 3, 0, 0)
+	V2_3_1_0  = newKafkaVersion(2, 3, 1, 0)
+	V2_4_0_0  = newKafkaVersion(2, 4, 0, 0)
+	V2_4_1_0  = newKafkaVersion(2, 4, 1, 0)
+	V2_5_0_0  = newKafkaVersion(2, 5, 0, 0)
+	V2_5_1_0  = newKafkaVersion(2, 5, 1, 0)
+	V2_6_0_0  = newKafkaVersion(2, 6, 0, 0)
+	V2_6_1_0  = newKafkaVersion(2, 6, 1, 0)
+	V2_6_2_0  = newKafkaVersion(2, 6, 2, 0)
+	V2_6_3_0  = newKafkaVersion(2, 6, 3, 0)
+	V2_7_0_0  = newKafkaVersion(2, 7, 0, 0)
+	V2_7_1_0  = newKafkaVersion(2, 7, 1, 0)
+	V2_7_2_0  = newKafkaVersion(2, 7, 2, 0)
+	V2_8_0_0  = newKafkaVersion(2, 8, 0, 0)
+	V2_8_1_0  = newKafkaVersion(2, 8, 1, 0)
+	V2_8_2_0  = newKafkaVersion(2, 8, 2, 0)
+	V3_0_0_0  = newKafkaVersion(3, 0, 0, 0)
+	V3_0_1_0  = newKafkaVersion(3, 0, 1, 0)
+	V3_0_2_0  = newKafkaVersion(3, 0, 2, 0)
+	V3_1_0_0  = newKafkaVersion(3, 1, 0, 0)
+	V3_1_1_0  = newKafkaVersion(3, 1, 1, 0)
+	V3_1_2_0  = newKafkaVersion(3, 1, 2, 0)
+	V3_2_0_0  = newKafkaVersion(3, 2, 0, 0)
+	V3_2_1_0  = newKafkaVersion(3, 2, 1, 0)
+	V3_2_2_0  = newKafkaVersion(3, 2, 2, 0)
+	V3_2_3_0  = newKafkaVersion(3, 2, 3, 0)
+	V3_3_0_0  = newKafkaVersion(3, 3, 0, 0)
+	V3_3_1_0  = newKafkaVersion(3, 3, 1, 0)
+	V3_3_2_0  = newKafkaVersion(3, 3, 2, 0)
+	V3_4_0_0  = newKafkaVersion(3, 4, 0, 0)
+	V3_4_1_0  = newKafkaVersion(3, 4, 1, 0)
+	V3_5_0_0  = newKafkaVersion(3, 5, 0, 0)
+	V3_5_1_0  = newKafkaVersion(3, 5, 1, 0)
+	V3_5_2_0  = newKafkaVersion(3, 5, 2, 0)
+	V3_6_0_0  = newKafkaVersion(3, 6, 0, 0)
+	V3_6_1_0  = newKafkaVersion(3, 6, 1, 0)
+	V3_6_2_0  = newKafkaVersion(3, 6, 2, 0)
+	V3_7_0_0  = newKafkaVersion(3, 7, 0, 0)
+	V3_7_1_0  = newKafkaVersion(3, 7, 1, 0)
+	V3_7_2_0  = newKafkaVersion(3, 7, 2, 0)
+	V3_8_0_0  = newKafkaVersion(3, 8, 0, 0)
+	V3_8_1_0  = newKafkaVersion(3, 8, 1, 0)
+	V3_9_0_0  = newKafkaVersion(3, 9, 0, 0)
+	V3_9_1_0  = newKafkaVersion(3, 9, 1, 0)
+	V4_0_0_0  = newKafkaVersion(4, 0, 0, 0)
+	V4_1_0_0  = newKafkaVersion(4, 1, 0, 0)
+
+	SupportedVersions = []KafkaVersion{
+		V0_8_2_0,
+		V0_8_2_1,
+		V0_8_2_2,
+		V0_9_0_0,
+		V0_9_0_1,
+		V0_10_0_0,
+		V0_10_0_1,
+		V0_10_1_0,
+		V0_10_1_1,
+		V0_10_2_0,
+		V0_10_2_1,
+		V0_10_2_2,
+		V0_11_0_0,
+		V0_11_0_1,
+		V0_11_0_2,
+		V1_0_0_0,
+		V1_0_1_0,
+		V1_0_2_0,
+		V1_1_0_0,
+		V1_1_1_0,
+		V2_0_0_0,
+		V2_0_1_0,
+		V2_1_0_0,
+		V2_1_1_0,
+		V2_2_0_0,
+		V2_2_1_0,
+		V2_2_2_0,
+		V2_3_0_0,
+		V2_3_1_0,
+		V2_4_0_0,
+		V2_4_1_0,
+		V2_5_0_0,
+		V2_5_1_0,
+		V2_6_0_0,
+		V2_6_1_0,
+		V2_6_2_0,
+		V2_6_3_0,
+		V2_7_0_0,
+		V2_7_1_0,
+		V2_7_2_0,
+		V2_8_0_0,
+		V2_8_1_0,
+		V2_8_2_0,
+		V3_0_0_0,
+		V3_0_1_0,
+		V3_0_2_0,
+		V3_1_0_0,
+		V3_1_1_0,
+		V3_1_2_0,
+		V3_2_0_0,
+		V3_2_1_0,
+		V3_2_2_0,
+		V3_2_3_0,
+		V3_3_0_0,
+		V3_3_1_0,
+		V3_3_2_0,
+		V3_4_0_0,
+		V3_4_1_0,
+		V3_5_0_0,
+		V3_5_1_0,
+		V3_5_2_0,
+		V3_6_0_0,
+		V3_6_1_0,
+		V3_6_2_0,
+		V3_7_0_0,
+		V3_7_1_0,
+		V3_7_2_0,
+		V3_8_0_0,
+		V3_8_1_0,
+		V3_9_0_0,
+		V3_9_1_0,
+		V4_0_0_0,
+		V4_1_0_0,
+	}
+	MinVersion     = V0_8_2_0
+	MaxVersion     = V4_1_0_0
+	DefaultVersion = V2_1_0_0
+
+	// reduced set of protocol versions to matrix test
+	fvtRangeVersions = []KafkaVersion{
+		V0_8_2_2,
+		V0_10_2_2,
+		V1_0_2_0,
+		V1_1_1_0,
+		V2_0_1_0,
+		V2_2_2_0,
+		V2_4_1_0,
+		V2_6_3_0,
+		V2_8_2_0,
+		V3_1_2_0,
+		V3_3_2_0,
+		V3_6_2_0,
+	}
+)
+
+var (
+	// This regex validates that a string complies with the pre kafka 1.0.0 format for version strings, for example 0.11.0.3
+	validPreKafka1Version = regexp.MustCompile(`^0\.\d+\.\d+\.\d+$`)
+
+	// This regex validates that a string complies with the post Kafka 1.0.0 format, for example 1.0.0
+	validPostKafka1Version = regexp.MustCompile(`^\d+\.\d+\.\d+$`)
+)
+
+// ParseKafkaVersion parses and returns kafka version or error from a string
+func ParseKafkaVersion(s string) (KafkaVersion, error) {
+	if len(s) < 5 {
+		return DefaultVersion, fmt.Errorf("invalid version `%s`", s)
+	}
+	var major, minor, veryMinor, patch uint
+	var err error
+	if s[0] == '0' {
+		err = scanKafkaVersion(s, validPreKafka1Version, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch})
+	} else {
+		err = scanKafkaVersion(s, validPostKafka1Version, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor})
+	}
+	if err != nil {
+		return DefaultVersion, err
+	}
+	return newKafkaVersion(major, minor, veryMinor, patch), nil
+}
+
+func scanKafkaVersion(s string, pattern *regexp.Regexp, format string, v [3]*uint) error {
+	if !pattern.MatchString(s) {
+		return fmt.Errorf("invalid version `%s`", s)
+	}
+	_, err := fmt.Sscanf(s, format, v[0], v[1], v[2])
+	return err
+}
+
+func (v KafkaVersion) String() string {
+	if v.version[0] == 0 {
+		return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3])
+	}
+
+	return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2])
+}
+
+// NewExponentialBackoff returns a function that implements an exponential backoff strategy with jitter.
+// It follows KIP-580, implementing the formula:
+// MIN(retry.backoff.max.ms, (retry.backoff.ms * 2**(failures - 1)) * random(0.8, 1.2))
+// This ensures retries start with `backoff` and exponentially increase until `maxBackoff`, with added jitter.
+// The behavior when `failures = 0` is not explicitly defined in KIP-580 and is left to implementation discretion.
+//
+// Example usage:
+//
+//	backoffFunc := sarama.NewExponentialBackoff(config.Producer.Retry.Backoff, 2*time.Second)
+//	config.Producer.Retry.BackoffFunc = backoffFunc
+func NewExponentialBackoff(backoff time.Duration, maxBackoff time.Duration) func(retries, maxRetries int) time.Duration {
+	if backoff <= 0 {
+		backoff = defaultRetryBackoff
+	}
+	if maxBackoff <= 0 {
+		maxBackoff = defaultRetryMaxBackoff
+	}
+
+	if backoff > maxBackoff {
+		Logger.Println("Warning: backoff is greater than maxBackoff, using maxBackoff instead.")
+		backoff = maxBackoff
+	}
+
+	return func(retries, maxRetries int) time.Duration {
+		if retries <= 0 {
+			return backoff
+		}
+
+		calculatedBackoff := backoff * time.Duration(1<<(retries-1))
+		jitter := 0.8 + 0.4*rand.Float64()
+		calculatedBackoff = time.Duration(float64(calculatedBackoff) * jitter)
+
+		return min(calculatedBackoff, maxBackoff)
+	}
+}
diff --git a/vendor/github.com/IBM/sarama/version.go b/vendor/github.com/IBM/sarama/version.go
new file mode 100644
index 0000000..d3b9d53
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/version.go
@@ -0,0 +1,27 @@
+package sarama
+
+import (
+	"runtime/debug"
+	"sync"
+)
+
+var (
+	v     string
+	vOnce sync.Once
+)
+
+func version() string {
+	vOnce.Do(func() {
+		bi, ok := debug.ReadBuildInfo()
+		if ok {
+			v = bi.Main.Version
+		}
+		if v == "" || v == "(devel)" {
+			// if we can't read a go module version then they're using a git
+			// clone or vendored module so all we can do is report "dev" for
+			// the version to make a valid ApiVersions request
+			v = "dev"
+		}
+	})
+	return v
+}
diff --git a/vendor/github.com/IBM/sarama/zstd.go b/vendor/github.com/IBM/sarama/zstd.go
new file mode 100644
index 0000000..6073ce7
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/zstd.go
@@ -0,0 +1,74 @@
+package sarama
+
+import (
+	"sync"
+
+	"github.com/klauspost/compress/zstd"
+)
+
+// zstdMaxBufferedEncoders maximum number of not-in-use zstd encoders
+// If the pool of encoders is exhausted then new encoders will be created on the fly
+const zstdMaxBufferedEncoders = 1
+
+type ZstdEncoderParams struct {
+	Level int
+}
+type ZstdDecoderParams struct {
+}
+
+var zstdDecMap sync.Map
+
+var zstdAvailableEncoders sync.Map
+
+func getZstdEncoderChannel(params ZstdEncoderParams) chan *zstd.Encoder {
+	if c, ok := zstdAvailableEncoders.Load(params); ok {
+		return c.(chan *zstd.Encoder)
+	}
+	c, _ := zstdAvailableEncoders.LoadOrStore(params, make(chan *zstd.Encoder, zstdMaxBufferedEncoders))
+	return c.(chan *zstd.Encoder)
+}
+
+func getZstdEncoder(params ZstdEncoderParams) *zstd.Encoder {
+	select {
+	case enc := <-getZstdEncoderChannel(params):
+		return enc
+	default:
+		encoderLevel := zstd.SpeedDefault
+		if params.Level != CompressionLevelDefault {
+			encoderLevel = zstd.EncoderLevelFromZstd(params.Level)
+		}
+		zstdEnc, _ := zstd.NewWriter(nil, zstd.WithZeroFrames(true),
+			zstd.WithEncoderLevel(encoderLevel),
+			zstd.WithEncoderConcurrency(1))
+		return zstdEnc
+	}
+}
+
+func releaseEncoder(params ZstdEncoderParams, enc *zstd.Encoder) {
+	select {
+	case getZstdEncoderChannel(params) <- enc:
+	default:
+	}
+}
+
+func getDecoder(params ZstdDecoderParams) *zstd.Decoder {
+	if ret, ok := zstdDecMap.Load(params); ok {
+		return ret.(*zstd.Decoder)
+	}
+	// It's possible to race and create multiple new readers.
+	// Only one will survive GC after use.
+	zstdDec, _ := zstd.NewReader(nil, zstd.WithDecoderConcurrency(0))
+	zstdDecMap.Store(params, zstdDec)
+	return zstdDec
+}
+
+func zstdDecompress(params ZstdDecoderParams, dst, src []byte) ([]byte, error) {
+	return getDecoder(params).DecodeAll(src, dst)
+}
+
+func zstdCompress(params ZstdEncoderParams, dst, src []byte) ([]byte, error) {
+	enc := getZstdEncoder(params)
+	out := enc.EncodeAll(src, dst)
+	releaseEncoder(params, enc)
+	return out, nil
+}
diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore
deleted file mode 100644
index 2c9adc2..0000000
--- a/vendor/github.com/Shopify/sarama/.gitignore
+++ /dev/null
@@ -1,29 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-*.test
-
-# Folders
-_obj
-_test
-.vagrant
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-
-coverage.txt
-profile.out
-
-simplest-uncommitted-msg-0.1-jar-with-dependencies.jar
diff --git a/vendor/github.com/Shopify/sarama/.golangci.yml b/vendor/github.com/Shopify/sarama/.golangci.yml
deleted file mode 100644
index 09e5c46..0000000
--- a/vendor/github.com/Shopify/sarama/.golangci.yml
+++ /dev/null
@@ -1,74 +0,0 @@
-run:
-  timeout: 5m
-  deadline: 10m
-
-linters-settings:
-  govet:
-    check-shadowing: false
-  golint:
-    min-confidence: 0
-  gocyclo:
-    min-complexity: 99
-  maligned:
-    suggest-new: true
-  dupl:
-    threshold: 100
-  goconst:
-    min-len: 2
-    min-occurrences: 3
-  misspell:
-    locale: US
-  goimports:
-    local-prefixes: github.com/Shopify/sarama
-  gocritic:
-    enabled-tags:
-      - diagnostic
-      - experimental
-      - opinionated
-      - performance
-      - style
-    disabled-checks:
-      - wrapperFunc
-      - ifElseChain
-  funlen:
-    lines: 300
-    statements: 300
-
-linters:
-  disable-all: true
-  enable:
-    - bodyclose
-    - deadcode
-    - depguard
-    - dogsled
-    # - dupl
-    - errcheck
-    - funlen
-    - gochecknoinits
-    # - goconst
-    # - gocritic
-    - gocyclo
-    - gofmt
-    - goimports
-    # - golint
-    - gosec
-    # - gosimple
-    - govet
-    # - ineffassign
-    # - misspell
-    # - nakedret
-    # - scopelint
-    - staticcheck
-    - structcheck
-    # - stylecheck
-    - typecheck
-    - unconvert
-    - unused
-    - varcheck
-    - whitespace
-
-issues:
-  exclude:
-    - "G404: Use of weak random number generator"
-  # maximum count of issues with the same text. set to 0 for unlimited. default is 3.
-  max-same-issues: 0
diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md
deleted file mode 100644
index 59ccd1d..0000000
--- a/vendor/github.com/Shopify/sarama/CHANGELOG.md
+++ /dev/null
@@ -1,1019 +0,0 @@
-# Changelog
-
-#### Unreleased
-
-#### Version 1.28.0 (2021-02-15)
-
-**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.**
-
-- #1870 - @kvch - Update Kerberos library to latest major
-- #1876 - @bai - Update docs, reference pkg.go.dev
-- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close
-- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages
-- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies
-- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy
-- #1862 - @bai - Fix CI setenv permissions issues
-- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev
-- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica
-
-#### Version 1.27.2 (2020-10-21)
-
-# Improvements
-
-#1750 - @krantideep95 Adds missing mock responses for mocking consumer group
-
-# Fixes
-
-#1817 - reverts #1785 - Add private method to Client interface to prevent implementation
-
-#### Version 1.27.1 (2020-10-07)
-
-# Improvements
-
-#1775 - @d1egoaz - Adds a Producer Interceptor example
-#1781 - @justin-chen - Refresh brokers given list of seed brokers
-#1784 - @justin-chen - Add randomize seed broker method
-#1790 - @d1egoaz - remove example binary
-#1798 - @bai - Test against Go 1.15
-#1785 - @justin-chen - Add private method to Client interface to prevent implementation
-#1802 - @uvw - Support Go 1.13 error unwrapping
-
-# Fixes
-
-#1791 - @stanislavkozlovski - bump default version to 1.0.0
-
-#### Version 1.27.0 (2020-08-11)
-
-# Improvements
-
-#1466 - @rubenvp8510  - Expose kerberos fast negotiation configuration
-#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests
-#1699 - @wclaeys  - Consumer group support for manually comitting offsets
-#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0
-#1726 - @d1egoaz - Include zstd on the functional tests
-#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors
-#1738 - @varun06 - fixed variable names that are named same as some std lib package names
-#1741 - @varun06 - updated zstd dependency to latest v1.10.10
-#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base
-#1763 - @alrs - remove deprecated tls options from test
-#1769 - @bai - Add support for Kafka 2.6.0
-
-# Fixes
-
-#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication
-#1744 - @alrs  - Fix isBalanced Function Signature
-
-#### Version 1.26.4 (2020-05-19)
-
-# Fixes
-
-- #1701 - @d1egoaz - Set server name only for the current broker
-- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka
-
-#### Version 1.26.3 (2020-05-07)
-
-# Fixes
-
-- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config
-
-#### Version 1.26.2 (2020-05-06)
-
-# ⚠️ Known Issues
-
-This release has been marked as not ready for production and may be unstable, please use v1.26.4.
-
-# Improvements
-
-- #1560 - @iyacontrol - add sync pool for gzip 1-9
-- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID
-- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs
-- #1632 - @bai - Add support for Go 1.14
-- #1640 - @random-dwi - Feature/fix list partition reassignments
-- #1646 - @mimaison - Add DescribeLogDirs to admin client
-- #1667 - @bai - Add support for kafka 2.5.0
-
-# Fixes
-
-- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0
-- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine
-- #1602 - @d1egoaz - adds a note about consumer groups Consume method
-- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly
-- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented
-- #1614 - @alrs - produce_response.go: Remove Unused Functions
-- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables
-- #1639 - @agriffaut - Handle errors with no message but error code
-- #1643 - @kzinglzy - fix `config.net.keepalive`
-- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs
-- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata
-- #1650 - @lavoiesl - Return the response error in heartbeatLoop
-- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die
-- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy.
-
-#### Version 1.26.1 (2020-02-04)
-
-Improvements:
-- Add requests-in-flight metric ([1539](https://github.com/Shopify/sarama/pull/1539))
-- Fix misleading example for cluster admin ([1595](https://github.com/Shopify/sarama/pull/1595))
-- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/Shopify/sarama/pull/1573))
-- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/Shopify/sarama/pull/1592))
-
-Bug Fixes:
-- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/Shopify/sarama/pull/1590))
-- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/Shopify/sarama/pull/1589))
-
-#### Version 1.26.0 (2020-01-24)
-
-New Features:
-- Enable zstd compression
-  ([1574](https://github.com/Shopify/sarama/pull/1574),
-  [1582](https://github.com/Shopify/sarama/pull/1582))
-- Support headers in tools kafka-console-producer
-  ([1549](https://github.com/Shopify/sarama/pull/1549))
-
-Improvements:
-- Add SASL AuthIdentity to SASL frames (authzid)
-  ([1585](https://github.com/Shopify/sarama/pull/1585)).
-
-Bug Fixes:
-- Sending messages with ZStd compression enabled fails in multiple ways
-  ([1252](https://github.com/Shopify/sarama/issues/1252)).
-- Use the broker for any admin on BrokerConfig
-  ([1571](https://github.com/Shopify/sarama/pull/1571)).
-- Set DescribeConfigRequest Version field
-  ([1576](https://github.com/Shopify/sarama/pull/1576)).
-- ConsumerGroup flooding logs with client/metadata update req
-  ([1578](https://github.com/Shopify/sarama/pull/1578)).
-- MetadataRequest version in DescribeCluster
-  ([1580](https://github.com/Shopify/sarama/pull/1580)).
-- Fix deadlock in consumer group handleError
-  ([1581](https://github.com/Shopify/sarama/pull/1581))
-- Fill in the Fetch{Request,Response} protocol
-  ([1582](https://github.com/Shopify/sarama/pull/1582)).
-- Retry topic request on ControllerNotAvailable
-  ([1586](https://github.com/Shopify/sarama/pull/1586)).
-
-#### Version 1.25.0 (2020-01-13)
-
-New Features:
-- Support TLS protocol in kafka-producer-performance
-  ([1538](https://github.com/Shopify/sarama/pull/1538)).
-- Add support for kafka 2.4.0
-  ([1552](https://github.com/Shopify/sarama/pull/1552)).
-
-Improvements:
-- Allow the Consumer to disable auto-commit offsets
-  ([1164](https://github.com/Shopify/sarama/pull/1164)).
-- Produce records with consistent timestamps
-  ([1455](https://github.com/Shopify/sarama/pull/1455)).
-
-Bug Fixes:
-- Fix incorrect SetTopicMetadata name mentions
-  ([1534](https://github.com/Shopify/sarama/pull/1534)).
-- Fix client.tryRefreshMetadata Println
-  ([1535](https://github.com/Shopify/sarama/pull/1535)).
-- Fix panic on calling updateMetadata on closed client
-  ([1531](https://github.com/Shopify/sarama/pull/1531)).
-- Fix possible faulty metrics in TestFuncProducing
-  ([1545](https://github.com/Shopify/sarama/pull/1545)).
-
-#### Version 1.24.1 (2019-10-31)
-
-New Features:
-- Add DescribeLogDirs Request/Response pair
-  ([1520](https://github.com/Shopify/sarama/pull/1520)).
-
-Bug Fixes:
-- Fix ClusterAdmin returning invalid controller ID on DescribeCluster
-  ([1518](https://github.com/Shopify/sarama/pull/1518)).
-- Fix issue with consumergroup not rebalancing when new partition is added
-  ([1525](https://github.com/Shopify/sarama/pull/1525)).
-- Ensure consistent use of read/write deadlines
-  ([1529](https://github.com/Shopify/sarama/pull/1529)).
-
-#### Version 1.24.0 (2019-10-09)
-
-New Features:
-- Add sticky partition assignor
-  ([1416](https://github.com/Shopify/sarama/pull/1416)).
-- Switch from cgo zstd package to pure Go implementation
-  ([1477](https://github.com/Shopify/sarama/pull/1477)).
-
-Improvements:
-- Allow creating ClusterAdmin from client
-  ([1415](https://github.com/Shopify/sarama/pull/1415)).
-- Set KafkaVersion in ListAcls method
-  ([1452](https://github.com/Shopify/sarama/pull/1452)).
-- Set request version in CreateACL ClusterAdmin method
-  ([1458](https://github.com/Shopify/sarama/pull/1458)).
-- Set request version in DeleteACL ClusterAdmin method
-  ([1461](https://github.com/Shopify/sarama/pull/1461)).
-- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest
-  ([1464](https://github.com/Shopify/sarama/pull/1464)).
-- Remove direct usage of gofork
-  ([1465](https://github.com/Shopify/sarama/pull/1465)).
-- Add support for Go 1.13
-  ([1478](https://github.com/Shopify/sarama/pull/1478)).
-- Improve behavior of NewMockListAclsResponse
-  ([1481](https://github.com/Shopify/sarama/pull/1481)).
-
-Bug Fixes:
-- Fix race condition in consumergroup example
-  ([1434](https://github.com/Shopify/sarama/pull/1434)).
-- Fix brokerProducer goroutine leak
-  ([1442](https://github.com/Shopify/sarama/pull/1442)).
-- Use released version of lz4 library
-  ([1469](https://github.com/Shopify/sarama/pull/1469)).
-- Set correct version in MockDeleteTopicsResponse
-  ([1484](https://github.com/Shopify/sarama/pull/1484)).
-- Fix CLI help message typo
-  ([1494](https://github.com/Shopify/sarama/pull/1494)).
-
-Known Issues:
-- Please **don't** use Zstd, as it doesn't work right now.
-  See https://github.com/Shopify/sarama/issues/1252
-
-#### Version 1.23.1 (2019-07-22)
-
-Bug Fixes:
-- Fix fetch delete bug record
-  ([1425](https://github.com/Shopify/sarama/pull/1425)).
-- Handle SASL/OAUTHBEARER token rejection
-  ([1428](https://github.com/Shopify/sarama/pull/1428)).
-
-#### Version 1.23.0 (2019-07-02)
-
-New Features:
-- Add support for Kafka 2.3.0
-  ([1418](https://github.com/Shopify/sarama/pull/1418)).
-- Add support for ListConsumerGroupOffsets v2
-  ([1374](https://github.com/Shopify/sarama/pull/1374)).
-- Add support for DeleteConsumerGroup
-  ([1417](https://github.com/Shopify/sarama/pull/1417)).
-- Add support for SASLVersion configuration
-  ([1410](https://github.com/Shopify/sarama/pull/1410)).
-- Add kerberos support
-  ([1366](https://github.com/Shopify/sarama/pull/1366)).
-
-Improvements:
-- Improve sasl_scram_client example
-  ([1406](https://github.com/Shopify/sarama/pull/1406)).
-- Fix shutdown and race-condition in consumer-group example
-  ([1404](https://github.com/Shopify/sarama/pull/1404)).
-- Add support for error codes 77—81
-  ([1397](https://github.com/Shopify/sarama/pull/1397)).
-- Pool internal objects allocated per message
-  ([1385](https://github.com/Shopify/sarama/pull/1385)).
-- Reduce packet decoder allocations
-  ([1373](https://github.com/Shopify/sarama/pull/1373)).
-- Support timeout when fetching metadata
-  ([1359](https://github.com/Shopify/sarama/pull/1359)).
-
-Bug Fixes:
-- Fix fetch size integer overflow
-  ([1376](https://github.com/Shopify/sarama/pull/1376)).
-- Handle and log throttled FetchResponses
-  ([1383](https://github.com/Shopify/sarama/pull/1383)).
-- Refactor misspelled word Resouce to Resource
-  ([1368](https://github.com/Shopify/sarama/pull/1368)).
-
-#### Version 1.22.1 (2019-04-29)
-
-Improvements:
-- Use zstd 1.3.8
-  ([1350](https://github.com/Shopify/sarama/pull/1350)).
-- Add support for SaslHandshakeRequest v1
-  ([1354](https://github.com/Shopify/sarama/pull/1354)).
-
-Bug Fixes:
-- Fix V5 MetadataRequest nullable topics array
-  ([1353](https://github.com/Shopify/sarama/pull/1353)).
-- Use a different SCRAM client for each broker connection
-  ([1349](https://github.com/Shopify/sarama/pull/1349)).
-- Fix AllowAutoTopicCreation for MetadataRequest greater than v3
-  ([1344](https://github.com/Shopify/sarama/pull/1344)).
-
-#### Version 1.22.0 (2019-04-09)
-
-New Features:
-- Add Offline Replicas Operation to Client
-  ([1318](https://github.com/Shopify/sarama/pull/1318)).
-- Allow using proxy when connecting to broker
-  ([1326](https://github.com/Shopify/sarama/pull/1326)).
-- Implement ReadCommitted
-  ([1307](https://github.com/Shopify/sarama/pull/1307)).
-- Add support for Kafka 2.2.0
-  ([1331](https://github.com/Shopify/sarama/pull/1331)).
-- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes
-  ([1331](https://github.com/Shopify/sarama/pull/1295)).
-
-Improvements:
-- Unregister all broker metrics on broker stop
-  ([1232](https://github.com/Shopify/sarama/pull/1232)).
-- Add SCRAM authentication example
-  ([1303](https://github.com/Shopify/sarama/pull/1303)).
-- Add consumergroup examples
-  ([1304](https://github.com/Shopify/sarama/pull/1304)).
-- Expose consumer batch size metric
-  ([1296](https://github.com/Shopify/sarama/pull/1296)).
-- Add TLS options to console producer and consumer
-  ([1300](https://github.com/Shopify/sarama/pull/1300)).
-- Reduce client close bookkeeping
-  ([1297](https://github.com/Shopify/sarama/pull/1297)).
-- Satisfy error interface in create responses
-  ([1154](https://github.com/Shopify/sarama/pull/1154)).
-- Please lint gods
-  ([1346](https://github.com/Shopify/sarama/pull/1346)).
-
-Bug Fixes:
-- Fix multi consumer group instance crash
-  ([1338](https://github.com/Shopify/sarama/pull/1338)).
-- Update lz4 to latest version
-  ([1347](https://github.com/Shopify/sarama/pull/1347)).
-- Retry ErrNotCoordinatorForConsumer in new consumergroup session
-  ([1231](https://github.com/Shopify/sarama/pull/1231)).
-- Fix cleanup error handler
-  ([1332](https://github.com/Shopify/sarama/pull/1332)).
-- Fix rate condition in PartitionConsumer
-  ([1156](https://github.com/Shopify/sarama/pull/1156)).
-
-#### Version 1.21.0 (2019-02-24)
-
-New Features:
-- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest
-  ([1236](https://github.com/Shopify/sarama/pull/1236)).
-- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests
-  ([1178](https://github.com/Shopify/sarama/pull/1178)).
-- Implement SASL/OAUTHBEARER
-  ([1240](https://github.com/Shopify/sarama/pull/1240)).
-
-Improvements:
-- Add Go mod support
-  ([1282](https://github.com/Shopify/sarama/pull/1282)).
-- Add error codes 73—76
-  ([1239](https://github.com/Shopify/sarama/pull/1239)).
-- Add retry backoff function
-  ([1160](https://github.com/Shopify/sarama/pull/1160)).
-- Maintain metadata in the producer even when retries are disabled
-  ([1189](https://github.com/Shopify/sarama/pull/1189)).
-- Include ReplicaAssignment in ListTopics
-  ([1274](https://github.com/Shopify/sarama/pull/1274)).
-- Add producer performance tool
-  ([1222](https://github.com/Shopify/sarama/pull/1222)).
-- Add support LogAppend timestamps
-  ([1258](https://github.com/Shopify/sarama/pull/1258)).
-
-Bug Fixes:
-- Fix potential deadlock when a heartbeat request fails
-  ([1286](https://github.com/Shopify/sarama/pull/1286)).
-- Fix consuming compacted topic
-  ([1227](https://github.com/Shopify/sarama/pull/1227)).
-- Set correct Kafka version for DescribeConfigsRequest v1
-  ([1277](https://github.com/Shopify/sarama/pull/1277)).
-- Update kafka test version
-  ([1273](https://github.com/Shopify/sarama/pull/1273)).
-
-#### Version 1.20.1 (2019-01-10)
-
-New Features:
-- Add optional replica id in offset request
-  ([1100](https://github.com/Shopify/sarama/pull/1100)).
-
-Improvements:
-- Implement DescribeConfigs Request + Response v1 & v2
-  ([1230](https://github.com/Shopify/sarama/pull/1230)).
-- Reuse compression objects
-  ([1185](https://github.com/Shopify/sarama/pull/1185)).
-- Switch from png to svg for GoDoc link in README
-  ([1243](https://github.com/Shopify/sarama/pull/1243)).
-- Fix typo in deprecation notice for FetchResponseBlock.Records
-  ([1242](https://github.com/Shopify/sarama/pull/1242)).
-- Fix typos in consumer metadata response file
-  ([1244](https://github.com/Shopify/sarama/pull/1244)).
-
-Bug Fixes:
-- Revert to individual msg retries for non-idempotent
-  ([1203](https://github.com/Shopify/sarama/pull/1203)).
-- Respect MaxMessageBytes limit for uncompressed messages
-  ([1141](https://github.com/Shopify/sarama/pull/1141)).
-
-#### Version 1.20.0 (2018-12-10)
-
-New Features:
- - Add support for zstd compression
-   ([#1170](https://github.com/Shopify/sarama/pull/1170)).
- - Add support for Idempotent Producer
-   ([#1152](https://github.com/Shopify/sarama/pull/1152)).
- - Add support support for Kafka 2.1.0
-   ([#1229](https://github.com/Shopify/sarama/pull/1229)).
- - Add support support for OffsetCommit request/response pairs versions v1 to v5
-   ([#1201](https://github.com/Shopify/sarama/pull/1201)).
- - Add support support for OffsetFetch request/response pair up to version v5
-   ([#1198](https://github.com/Shopify/sarama/pull/1198)).
-
-Improvements:
- - Export broker's Rack setting
-   ([#1173](https://github.com/Shopify/sarama/pull/1173)).
- - Always use latest patch version of Go on CI
-   ([#1202](https://github.com/Shopify/sarama/pull/1202)).
- - Add error codes 61 to 72
-   ([#1195](https://github.com/Shopify/sarama/pull/1195)).
-
-Bug Fixes:
- - Fix build without cgo
-   ([#1182](https://github.com/Shopify/sarama/pull/1182)).
- - Fix go vet suggestion in consumer group file
-   ([#1209](https://github.com/Shopify/sarama/pull/1209)).
- - Fix typos in code and comments
-   ([#1228](https://github.com/Shopify/sarama/pull/1228)).
-
-#### Version 1.19.0 (2018-09-27)
-
-New Features:
- - Implement a higher-level consumer group
-   ([#1099](https://github.com/Shopify/sarama/pull/1099)).
-
-Improvements:
- - Add support for Go 1.11
-   ([#1176](https://github.com/Shopify/sarama/pull/1176)).
-
-Bug Fixes:
- - Fix encoding of `MetadataResponse` with version 2 and higher
-   ([#1174](https://github.com/Shopify/sarama/pull/1174)).
- - Fix race condition in mock async producer
-   ([#1174](https://github.com/Shopify/sarama/pull/1174)).
-
-#### Version 1.18.0 (2018-09-07)
-
-New Features:
- - Make `Partitioner.RequiresConsistency` vary per-message
-   ([#1112](https://github.com/Shopify/sarama/pull/1112)).
- - Add customizable partitioner
-   ([#1118](https://github.com/Shopify/sarama/pull/1118)).
- - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`,
-   `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL`
-   ([#1055](https://github.com/Shopify/sarama/pull/1055)).
-
-Improvements:
- - Add support for Kafka 2.0.0
-   ([#1149](https://github.com/Shopify/sarama/pull/1149)).
- - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts
-   ([#1123](https://github.com/Shopify/sarama/pull/1123)).
- - Simpler offset management
-   ([#1127](https://github.com/Shopify/sarama/pull/1127)).
-
-Bug Fixes:
- - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka
-   ([#1110](https://github.com/Shopify/sarama/pull/1110)).
- - Fix consumer block when response did not contain all the
-   expected topic/partition blocks
-   ([#1086](https://github.com/Shopify/sarama/pull/1086)).
- - Fix consumer block when response contains only constrol messages
-   ([#1115](https://github.com/Shopify/sarama/pull/1115)).
- - Add timeout config for ClusterAdmin requests
-   ([#1142](https://github.com/Shopify/sarama/pull/1142)).
- - Add version check when producing message with headers
-   ([#1117](https://github.com/Shopify/sarama/pull/1117)).
- - Fix `MetadataRequest` for empty list of topics
-   ([#1132](https://github.com/Shopify/sarama/pull/1132)).
- - Fix producer topic metadata on-demand fetch when topic error happens in metadata response
-   ([#1125](https://github.com/Shopify/sarama/pull/1125)).
-
-#### Version 1.17.0 (2018-05-30)
-
-New Features:
- - Add support for gzip compression levels
-   ([#1044](https://github.com/Shopify/sarama/pull/1044)).
- - Add support for Metadata request/response pairs versions v1 to v5
-   ([#1047](https://github.com/Shopify/sarama/pull/1047),
-    [#1069](https://github.com/Shopify/sarama/pull/1069)).
- - Add versioning to JoinGroup request/response pairs
-   ([#1098](https://github.com/Shopify/sarama/pull/1098))
- - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs
-   ([#1065](https://github.com/Shopify/sarama/pull/1065),
-    [#1096](https://github.com/Shopify/sarama/pull/1096),
-    [#1027](https://github.com/Shopify/sarama/pull/1027)).
- - Add `Controller()` method to Client interface
-   ([#1063](https://github.com/Shopify/sarama/pull/1063)).
-
-Improvements:
- - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp
-   ([#1010](https://github.com/Shopify/sarama/pull/1010)).
- - Expose missing protocol parts: `msgSet` and `recordBatch`
-   ([#1049](https://github.com/Shopify/sarama/pull/1049)).
- - Add support for v1 DeleteTopics Request
-   ([#1052](https://github.com/Shopify/sarama/pull/1052)).
- - Add support for Go 1.10
-   ([#1064](https://github.com/Shopify/sarama/pull/1064)).
- - Claim support for Kafka 1.1.0
-   ([#1073](https://github.com/Shopify/sarama/pull/1073)).
-
-Bug Fixes:
- - Fix FindCoordinatorResponse.encode to allow nil Coordinator
-   ([#1050](https://github.com/Shopify/sarama/pull/1050),
-    [#1051](https://github.com/Shopify/sarama/pull/1051)).
- - Clear all metadata when we have the latest topic info
-   ([#1033](https://github.com/Shopify/sarama/pull/1033)).
- - Make `PartitionConsumer.Close` idempotent
-   ([#1092](https://github.com/Shopify/sarama/pull/1092)).
-
-#### Version 1.16.0 (2018-02-12)
-
-New Features:
- - Add support for the Create/Delete Topics request/response pairs
-   ([#1007](https://github.com/Shopify/sarama/pull/1007),
-    [#1008](https://github.com/Shopify/sarama/pull/1008)).
- - Add support for the Describe/Create/Delete ACL request/response pairs
-   ([#1009](https://github.com/Shopify/sarama/pull/1009)).
- - Add support for the five transaction-related request/response pairs
-   ([#1016](https://github.com/Shopify/sarama/pull/1016)).
-
-Improvements:
- - Permit setting version on mock producer responses
-   ([#999](https://github.com/Shopify/sarama/pull/999)).
- - Add `NewMockBrokerListener` helper for testing TLS connections
-   ([#1019](https://github.com/Shopify/sarama/pull/1019)).
- - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB
-   which results in much higher throughput in most cases
-   ([#1024](https://github.com/Shopify/sarama/pull/1024)).
- - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to
-   reduce CPU and memory usage when processing many partitions
-   ([#1028](https://github.com/Shopify/sarama/pull/1028)).
- - Assign relative offsets to messages in the producer to save the brokers a
-   recompression pass
-   ([#1002](https://github.com/Shopify/sarama/pull/1002),
-    [#1015](https://github.com/Shopify/sarama/pull/1015)).
-
-Bug Fixes:
- - Fix producing uncompressed batches with the new protocol format
-   ([#1032](https://github.com/Shopify/sarama/issues/1032)).
- - Fix consuming compacted topics with the new protocol format
-   ([#1005](https://github.com/Shopify/sarama/issues/1005)).
- - Fix consuming topics with a mix of protocol formats
-   ([#1021](https://github.com/Shopify/sarama/issues/1021)).
- - Fix consuming when the broker includes multiple batches in a single response
-   ([#1022](https://github.com/Shopify/sarama/issues/1022)).
- - Fix detection of `PartialTrailingMessage` when the partial message was
-   truncated before the magic value indicating its version
-   ([#1030](https://github.com/Shopify/sarama/pull/1030)).
- - Fix expectation-checking in the mock of `SyncProducer.SendMessages`
-   ([#1035](https://github.com/Shopify/sarama/pull/1035)).
-
-#### Version 1.15.0 (2017-12-08)
-
-New Features:
- - Claim official support for Kafka 1.0, though it did already work
-   ([#984](https://github.com/Shopify/sarama/pull/984)).
- - Helper methods for Kafka version numbers to/from strings
-   ([#989](https://github.com/Shopify/sarama/pull/989)).
- - Implement CreatePartitions request/response
-   ([#985](https://github.com/Shopify/sarama/pull/985)).
-
-Improvements:
- - Add error codes 45-60
-   ([#986](https://github.com/Shopify/sarama/issues/986)).
-
-Bug Fixes:
- - Fix slow consuming for certain Kafka 0.11/1.0 configurations
-   ([#982](https://github.com/Shopify/sarama/pull/982)).
- - Correctly determine when a FetchResponse contains the new message format
-   ([#990](https://github.com/Shopify/sarama/pull/990)).
- - Fix producing with multiple headers
-   ([#996](https://github.com/Shopify/sarama/pull/996)).
- - Fix handling of truncated record batches
-   ([#998](https://github.com/Shopify/sarama/pull/998)).
- - Fix leaking metrics when closing brokers
-   ([#991](https://github.com/Shopify/sarama/pull/991)).
-
-#### Version 1.14.0 (2017-11-13)
-
-New Features:
- - Add support for the new Kafka 0.11 record-batch format, including the wire
-   protocol and the necessary behavioural changes in the producer and consumer.
-   Transactions and idempotency are not yet supported, but producing and
-   consuming should work with all the existing bells and whistles (batching,
-   compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta
-   of Arista Networks for this work. Part of
-   ([#901](https://github.com/Shopify/sarama/issues/901)).
-
-Bug Fixes:
- - Fix encoding of ProduceResponse versions in test
-   ([#970](https://github.com/Shopify/sarama/pull/970)).
- - Return partial replicas list when we have it
-   ([#975](https://github.com/Shopify/sarama/pull/975)).
-
-#### Version 1.13.0 (2017-10-04)
-
-New Features:
- - Support for FetchRequest version 3
-   ([#905](https://github.com/Shopify/sarama/pull/905)).
- - Permit setting version on mock FetchResponses
-   ([#939](https://github.com/Shopify/sarama/pull/939)).
- - Add a configuration option to support storing only minimal metadata for
-   extremely large clusters
-   ([#937](https://github.com/Shopify/sarama/pull/937)).
- - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets
-   ([#932](https://github.com/Shopify/sarama/pull/932)).
-
-Improvements:
- - Provide the block-level timestamp when consuming compressed messages
-   ([#885](https://github.com/Shopify/sarama/issues/885)).
- - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned
-   by the broker, which can be meaningful
-   ([#930](https://github.com/Shopify/sarama/pull/930)).
- - Use a `Ticker` to reduce consumer timer overhead at the cost of higher
-   variance in the actual timeout
-   ([#933](https://github.com/Shopify/sarama/pull/933)).
-
-Bug Fixes:
- - Gracefully handle messages with negative timestamps
-   ([#907](https://github.com/Shopify/sarama/pull/907)).
- - Raise a proper error when encountering an unknown message version
-   ([#940](https://github.com/Shopify/sarama/pull/940)).
-
-#### Version 1.12.0 (2017-05-08)
-
-New Features:
- - Added support for the `ApiVersions` request and response pair, and Kafka
-   version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note
-   that you still need to specify the Kafka version in the Sarama configuration
-   for the time being.
- - Added a `Brokers` method to the Client which returns the complete set of
-   active brokers ([#813](https://github.com/Shopify/sarama/pull/813)).
- - Added an `InSyncReplicas` method to the Client which returns the set of all
-   in-sync broker IDs for the given partition, now that the Kafka versions for
-   which this was misleading are no longer in our supported set
-   ([#872](https://github.com/Shopify/sarama/pull/872)).
- - Added a `NewCustomHashPartitioner` method which allows constructing a hash
-   partitioner with a custom hash method in case the default (FNV-1a) is not
-   suitable
-   ([#837](https://github.com/Shopify/sarama/pull/837),
-    [#841](https://github.com/Shopify/sarama/pull/841)).
-
-Improvements:
- - Recognize more Kafka error codes
-   ([#859](https://github.com/Shopify/sarama/pull/859)).
-
-Bug Fixes:
- - Fix an issue where decoding a malformed FetchRequest would not return the
-   correct error ([#818](https://github.com/Shopify/sarama/pull/818)).
- - Respect ordering of group protocols in JoinGroupRequests. This fix is
-   transparent if you're using the `AddGroupProtocol` or
-   `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from
-   the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols`
-   ([#812](https://github.com/Shopify/sarama/issues/812)).
- - Fix an alignment-related issue with atomics on 32-bit architectures
-   ([#859](https://github.com/Shopify/sarama/pull/859)).
-
-#### Version 1.11.0 (2016-12-20)
-
-_Important:_ As of Sarama 1.11 it is necessary to set the config value of
-`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
-versions would silently override this value when instantiating a SyncProducer
-which led to unexpected values and data races.
-
-New Features:
- - Metrics! Thanks to Sébastien Launay for all his work on this feature
-   ([#701](https://github.com/Shopify/sarama/pull/701),
-    [#746](https://github.com/Shopify/sarama/pull/746),
-    [#766](https://github.com/Shopify/sarama/pull/766)).
- - Add support for LZ4 compression
-   ([#786](https://github.com/Shopify/sarama/pull/786)).
- - Add support for ListOffsetRequest v1 and Kafka 0.10.1
-   ([#775](https://github.com/Shopify/sarama/pull/775)).
- - Added a `HighWaterMarks` method to the Consumer which aggregates the
-   `HighWaterMarkOffset` values of its child topic/partitions
-   ([#769](https://github.com/Shopify/sarama/pull/769)).
-
-Bug Fixes:
- - Fixed producing when using timestamps, compression and Kafka 0.10
-   ([#759](https://github.com/Shopify/sarama/pull/759)).
- - Added missing decoder methods to DescribeGroups response
-   ([#756](https://github.com/Shopify/sarama/pull/756)).
- - Fix producer shutdown when `Return.Errors` is disabled
-   ([#787](https://github.com/Shopify/sarama/pull/787)).
- - Don't mutate configuration in SyncProducer
-   ([#790](https://github.com/Shopify/sarama/pull/790)).
- - Fix crash on SASL initialization failure
-   ([#795](https://github.com/Shopify/sarama/pull/795)).
-
-#### Version 1.10.1 (2016-08-30)
-
-Bug Fixes:
- - Fix the documentation for `HashPartitioner` which was incorrect
-   ([#717](https://github.com/Shopify/sarama/pull/717)).
- - Permit client creation even when it is limited by ACLs
-   ([#722](https://github.com/Shopify/sarama/pull/722)).
- - Several fixes to the consumer timer optimization code, regressions introduced
-   in v1.10.0. Go's timers are finicky
-   ([#730](https://github.com/Shopify/sarama/pull/730),
-    [#733](https://github.com/Shopify/sarama/pull/733),
-    [#734](https://github.com/Shopify/sarama/pull/734)).
- - Handle consuming compressed relative offsets with Kafka 0.10
-   ([#735](https://github.com/Shopify/sarama/pull/735)).
-
-#### Version 1.10.0 (2016-08-02)
-
-_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of
-Kafka you are running against (via the `config.Version` value) in order to use
-features that may not be compatible with old Kafka versions. If you don't
-specify this value it will default to 0.8.2 (the minimum supported), and trying
-to use more recent features (like the offset manager) will fail with an error.
-
-_Also:_ The offset-manager's behaviour has been changed to match the upstream
-java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and
-[#713](https://github.com/Shopify/sarama/pull/713)). If you use the
-offset-manager, please ensure that you are committing one *greater* than the
-last consumed message offset or else you may end up consuming duplicate
-messages.
-
-New Features:
- - Support for Kafka 0.10
-   ([#672](https://github.com/Shopify/sarama/pull/672),
-    [#678](https://github.com/Shopify/sarama/pull/678),
-    [#681](https://github.com/Shopify/sarama/pull/681), and others).
- - Support for configuring the target Kafka version
-   ([#676](https://github.com/Shopify/sarama/pull/676)).
- - Batch producing support in the SyncProducer
-   ([#677](https://github.com/Shopify/sarama/pull/677)).
- - Extend producer mock to allow setting expectations on message contents
-   ([#667](https://github.com/Shopify/sarama/pull/667)).
-
-Improvements:
- - Support `nil` compressed messages for deleting in compacted topics
-   ([#634](https://github.com/Shopify/sarama/pull/634)).
- - Pre-allocate decoding errors, greatly reducing heap usage and GC time against
-   misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)).
- - Re-use consumer expiry timers, removing one allocation per consumed message
-   ([#707](https://github.com/Shopify/sarama/pull/707)).
-
-Bug Fixes:
- - Actually default the client ID to "sarama" like we say we do
-   ([#664](https://github.com/Shopify/sarama/pull/664)).
- - Fix a rare issue where `Client.Leader` could return the wrong error
-   ([#685](https://github.com/Shopify/sarama/pull/685)).
- - Fix a possible tight loop in the consumer
-   ([#693](https://github.com/Shopify/sarama/pull/693)).
- - Match upstream's offset-tracking behaviour
-   ([#705](https://github.com/Shopify/sarama/pull/705)).
- - Report UnknownTopicOrPartition errors from the offset manager
-   ([#706](https://github.com/Shopify/sarama/pull/706)).
- - Fix possible negative partition value from the HashPartitioner
-   ([#709](https://github.com/Shopify/sarama/pull/709)).
-
-#### Version 1.9.0 (2016-05-16)
-
-New Features:
- - Add support for custom offset manager retention durations
-   ([#602](https://github.com/Shopify/sarama/pull/602)).
- - Publish low-level mocks to enable testing of third-party producer/consumer
-   implementations ([#570](https://github.com/Shopify/sarama/pull/570)).
- - Declare support for Golang 1.6
-   ([#611](https://github.com/Shopify/sarama/pull/611)).
- - Support for SASL plain-text auth
-   ([#648](https://github.com/Shopify/sarama/pull/648)).
-
-Improvements:
- - Simplified broker locking scheme slightly
-   ([#604](https://github.com/Shopify/sarama/pull/604)).
- - Documentation cleanup
-   ([#605](https://github.com/Shopify/sarama/pull/605),
-    [#621](https://github.com/Shopify/sarama/pull/621),
-    [#654](https://github.com/Shopify/sarama/pull/654)).
-
-Bug Fixes:
- - Fix race condition shutting down the OffsetManager
-   ([#658](https://github.com/Shopify/sarama/pull/658)).
-
-#### Version 1.8.0 (2016-02-01)
-
-New Features:
- - Full support for Kafka 0.9:
-   - All protocol messages and fields
-   ([#586](https://github.com/Shopify/sarama/pull/586),
-   [#588](https://github.com/Shopify/sarama/pull/588),
-   [#590](https://github.com/Shopify/sarama/pull/590)).
-   - Verified that TLS support works
-   ([#581](https://github.com/Shopify/sarama/pull/581)).
-   - Fixed the OffsetManager compatibility
-   ([#585](https://github.com/Shopify/sarama/pull/585)).
-
-Improvements:
- - Optimize for fewer system calls when reading from the network
-   ([#584](https://github.com/Shopify/sarama/pull/584)).
- - Automatically retry `InvalidMessage` errors to match upstream behaviour
-   ([#589](https://github.com/Shopify/sarama/pull/589)).
-
-#### Version 1.7.0 (2015-12-11)
-
-New Features:
- - Preliminary support for Kafka 0.9
-   ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several
-   caveats:
-   - Protocol-layer support is mostly in place
-     ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9
-     renamed some messages and fields, which we did not in order to preserve API
-     compatibility.
-   - The producer and consumer work against 0.9, but the offset manager does
-     not ([#573](https://github.com/Shopify/sarama/pull/573)).
-   - TLS support may or may not work
-     ([#581](https://github.com/Shopify/sarama/pull/581)).
-
-Improvements:
- - Don't wait for request timeouts on dead brokers, greatly speeding recovery
-   when the TCP connection is left hanging
-   ([#548](https://github.com/Shopify/sarama/pull/548)).
- - Refactored part of the producer. The new version provides a much more elegant
-   solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also
-   slightly more efficient, and much more precise in calculating batch sizes
-   when compression is used
-   ([#549](https://github.com/Shopify/sarama/pull/549),
-   [#550](https://github.com/Shopify/sarama/pull/550),
-   [#551](https://github.com/Shopify/sarama/pull/551)).
-
-Bug Fixes:
- - Fix race condition in consumer test mock
-   ([#553](https://github.com/Shopify/sarama/pull/553)).
-
-#### Version 1.6.1 (2015-09-25)
-
-Bug Fixes:
- - Fix panic that could occur if a user-supplied message value failed to encode
-   ([#449](https://github.com/Shopify/sarama/pull/449)).
-
-#### Version 1.6.0 (2015-09-04)
-
-New Features:
- - Implementation of a consumer offset manager using the APIs introduced in
-   Kafka 0.8.2. The API is designed mainly for integration into a future
-   high-level consumer, not for direct use, although it is *possible* to use it
-   directly.
-   ([#461](https://github.com/Shopify/sarama/pull/461)).
-
-Improvements:
- - CRC32 calculation is much faster on machines with SSE4.2 instructions,
-   removing a major hotspot from most profiles
-   ([#255](https://github.com/Shopify/sarama/pull/255)).
-
-Bug Fixes:
- - Make protocol decoding more robust against some malformed packets generated
-   by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523),
-   [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways
-   ([#528](https://github.com/Shopify/sarama/pull/528)).
- - Fix a potential race condition panic in the consumer on shutdown
-   ([#529](https://github.com/Shopify/sarama/pull/529)).
-
-#### Version 1.5.0 (2015-08-17)
-
-New Features:
- - TLS-encrypted network connections are now supported. This feature is subject
-   to change when Kafka releases built-in TLS support, but for now this is
-   enough to work with TLS-terminating proxies
-   ([#154](https://github.com/Shopify/sarama/pull/154)).
-
-Improvements:
- - The consumer will not block if a single partition is not drained by the user;
-   all other partitions will continue to consume normally
-   ([#485](https://github.com/Shopify/sarama/pull/485)).
- - Formatting of error strings has been much improved
-   ([#495](https://github.com/Shopify/sarama/pull/495)).
- - Internal refactoring of the producer for code cleanliness and to enable
-   future work ([#300](https://github.com/Shopify/sarama/pull/300)).
-
-Bug Fixes:
- - Fix a potential deadlock in the consumer on shutdown
-   ([#475](https://github.com/Shopify/sarama/pull/475)).
-
-#### Version 1.4.3 (2015-07-21)
-
-Bug Fixes:
- - Don't include the partitioner in the producer's "fetch partitions"
-   circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
- - Don't retry messages until the broker is closed when abandoning a broker in
-   the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
- - Update the import path for snappy-go, it has moved again and the API has
-   changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
-
-#### Version 1.4.2 (2015-05-27)
-
-Bug Fixes:
- - Update the import path for snappy-go, it has moved from google code to github
-   ([#456](https://github.com/Shopify/sarama/pull/456)).
-
-#### Version 1.4.1 (2015-05-25)
-
-Improvements:
- - Optimizations when decoding snappy messages, thanks to John Potocny
-   ([#446](https://github.com/Shopify/sarama/pull/446)).
-
-Bug Fixes:
- - Fix hypothetical race conditions on producer shutdown
-   ([#450](https://github.com/Shopify/sarama/pull/450),
-   [#451](https://github.com/Shopify/sarama/pull/451)).
-
-#### Version 1.4.0 (2015-05-01)
-
-New Features:
- - The consumer now implements `Topics()` and `Partitions()` methods to enable
-   users to dynamically choose what topics/partitions to consume without
-   instantiating a full client
-   ([#431](https://github.com/Shopify/sarama/pull/431)).
- - The partition-consumer now exposes the high water mark offset value returned
-   by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
- - Added a `kafka-console-consumer` tool capable of handling multiple
-   partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
-   ([#439](https://github.com/Shopify/sarama/pull/439),
-   [#442](https://github.com/Shopify/sarama/pull/442)).
-
-Improvements:
- - The producer's logging during retry scenarios is more consistent, more
-   useful, and slightly less verbose
-   ([#429](https://github.com/Shopify/sarama/pull/429)).
- - The client now shuffles its initial list of seed brokers in order to prevent
-   thundering herd on the first broker in the list
-   ([#441](https://github.com/Shopify/sarama/pull/441)).
-
-Bug Fixes:
- - The producer now correctly manages its state if retries occur when it is
-   shutting down, fixing several instances of confusing behaviour and at least
-   one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
- - The consumer now handles messages for different partitions asynchronously,
-   making it much more resilient to specific user code ordering
-   ([#325](https://github.com/Shopify/sarama/pull/325)).
-
-#### Version 1.3.0 (2015-04-16)
-
-New Features:
- - The client now tracks consumer group coordinators using
-   ConsumerMetadataRequests similar to how it tracks partition leadership using
-   regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
-   This adds two methods to the client API:
-   - `Coordinator(consumerGroup string) (*Broker, error)`
-   - `RefreshCoordinator(consumerGroup string) error`
-
-Improvements:
- - ConsumerMetadataResponses now automatically create a Broker object out of the
-   ID/address/port combination for the Coordinator; accessing the fields
-   individually has been deprecated
-   ([#413](https://github.com/Shopify/sarama/pull/413)).
- - Much improved handling of `OffsetOutOfRange` errors in the consumer.
-   Consumers will fail to start if the provided offset is out of range
-   ([#418](https://github.com/Shopify/sarama/pull/418))
-   and they will automatically shut down if the offset falls out of range
-   ([#424](https://github.com/Shopify/sarama/pull/424)).
- - Small performance improvement in encoding and decoding protocol messages
-   ([#427](https://github.com/Shopify/sarama/pull/427)).
-
-Bug Fixes:
- - Fix a rare race condition in the client's background metadata refresher if
-   it happens to be activated while the client is being closed
-   ([#422](https://github.com/Shopify/sarama/pull/422)).
-
-#### Version 1.2.0 (2015-04-07)
-
-Improvements:
- - The producer's behaviour when `Flush.Frequency` is set is now more intuitive
-   ([#389](https://github.com/Shopify/sarama/pull/389)).
- - The producer is now somewhat more memory-efficient during and after retrying
-   messages due to an improved queue implementation
-   ([#396](https://github.com/Shopify/sarama/pull/396)).
- - The consumer produces much more useful logging output when leadership
-   changes ([#385](https://github.com/Shopify/sarama/pull/385)).
- - The client's `GetOffset` method will now automatically refresh metadata and
-   retry once in the event of stale information or similar
-   ([#394](https://github.com/Shopify/sarama/pull/394)).
- - Broker connections now have support for using TCP keepalives
-   ([#407](https://github.com/Shopify/sarama/issues/407)).
-
-Bug Fixes:
- - The OffsetCommitRequest message now correctly implements all three possible
-   API versions ([#390](https://github.com/Shopify/sarama/pull/390),
-   [#400](https://github.com/Shopify/sarama/pull/400)).
-
-#### Version 1.1.0 (2015-03-20)
-
-Improvements:
- - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
-   broken topics don't choke throughput
-   ([#373](https://github.com/Shopify/sarama/pull/373)).
-
-Bug Fixes:
- - Fix the producer's internal reference counting in certain unusual scenarios
-   ([#367](https://github.com/Shopify/sarama/pull/367)).
- - Fix the consumer's internal reference counting in certain unusual scenarios
-   ([#369](https://github.com/Shopify/sarama/pull/369)).
- - Fix a condition where the producer's internal control messages could have
-   gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
- - Fix an issue where invalid partition lists would be cached when asking for
-   metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
-
-
-#### Version 1.0.0 (2015-03-17)
-
-Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
-
-- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
-- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
-- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
-- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
-- All the configuration values have been unified in the `Config` struct.
-- Much improved test suite.
diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/Shopify/sarama/LICENSE
deleted file mode 100644
index d2bf435..0000000
--- a/vendor/github.com/Shopify/sarama/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (c) 2013 Shopify
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile
deleted file mode 100644
index 4714d77..0000000
--- a/vendor/github.com/Shopify/sarama/Makefile
+++ /dev/null
@@ -1,31 +0,0 @@
-default: fmt get update test lint
-
-GO       := go
-GOBUILD  := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG)
-GOTEST   := $(GO) test -gcflags='-l' -p 3 -v -race -timeout 6m -coverprofile=profile.out -covermode=atomic
-
-FILES    := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -not -name '*_test.go')
-TESTS    := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go')
-
-get:
-	$(GO) get ./...
-	$(GO) mod verify
-	$(GO) mod tidy
-
-update:
-	$(GO) get -u -v ./...
-	$(GO) mod verify
-	$(GO) mod tidy
-
-fmt:
-	gofmt -s -l -w $(FILES) $(TESTS)
-
-lint:
-	GOFLAGS="-tags=functional" golangci-lint run
-
-test:
-	$(GOTEST) ./...
-
-.PHONY: test_functional
-test_functional:
-	$(GOTEST) -tags=functional ./...
diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md
deleted file mode 100644
index f2beb73..0000000
--- a/vendor/github.com/Shopify/sarama/README.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# sarama
-
-[![Go Reference](https://pkg.go.dev/badge/github.com/Shopify/sarama.svg)](https://pkg.go.dev/github.com/Shopify/sarama)
-[![Build Status](https://travis-ci.org/Shopify/sarama.svg?branch=master)](https://travis-ci.org/Shopify/sarama)
-[![Coverage](https://codecov.io/gh/Shopify/sarama/branch/master/graph/badge.svg)](https://codecov.io/gh/Shopify/sarama)
-
-Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
-
-## Getting started
-
-- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/Shopify/sarama).
-- Mocks for testing are available in the [mocks](./mocks) subpackage.
-- The [examples](./examples) directory contains more elaborate example applications.
-- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
-
-You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
-
-## Compatibility and API stability
-
-Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
-the two latest stable releases of Kafka and Go, and we provide a two month
-grace period for older releases. This means we currently officially support
-Go 1.15 through 1.16, and Kafka 2.6 through 2.8, although older releases are
-still likely to work.
-
-Sarama follows semantic versioning and provides API stability via the gopkg.in service.
-You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
-A changelog is available [here](CHANGELOG.md).
-
-## Contributing
-
-- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
-- Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details.
-- The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information.
-- For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
-- If you have any questions, just ask!
diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go
deleted file mode 100644
index 449102f..0000000
--- a/vendor/github.com/Shopify/sarama/acl_create_request.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package sarama
-
-// CreateAclsRequest is an acl creation request
-type CreateAclsRequest struct {
-	Version      int16
-	AclCreations []*AclCreation
-}
-
-func (c *CreateAclsRequest) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(c.AclCreations)); err != nil {
-		return err
-	}
-
-	for _, aclCreation := range c.AclCreations {
-		if err := aclCreation.encode(pe, c.Version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) {
-	c.Version = version
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	c.AclCreations = make([]*AclCreation, n)
-
-	for i := 0; i < n; i++ {
-		c.AclCreations[i] = new(AclCreation)
-		if err := c.AclCreations[i].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (c *CreateAclsRequest) key() int16 {
-	return 30
-}
-
-func (c *CreateAclsRequest) version() int16 {
-	return c.Version
-}
-
-func (c *CreateAclsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (c *CreateAclsRequest) requiredVersion() KafkaVersion {
-	switch c.Version {
-	case 1:
-		return V2_0_0_0
-	default:
-		return V0_11_0_0
-	}
-}
-
-// AclCreation is a wrapper around Resource and Acl type
-type AclCreation struct {
-	Resource
-	Acl
-}
-
-func (a *AclCreation) encode(pe packetEncoder, version int16) error {
-	if err := a.Resource.encode(pe, version); err != nil {
-		return err
-	}
-	if err := a.Acl.encode(pe); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) {
-	if err := a.Resource.decode(pd, version); err != nil {
-		return err
-	}
-	if err := a.Acl.decode(pd, version); err != nil {
-		return err
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go
deleted file mode 100644
index 21d6c34..0000000
--- a/vendor/github.com/Shopify/sarama/acl_create_response.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package sarama
-
-import "time"
-
-// CreateAclsResponse is a an acl response creation type
-type CreateAclsResponse struct {
-	ThrottleTime         time.Duration
-	AclCreationResponses []*AclCreationResponse
-}
-
-func (c *CreateAclsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
-
-	if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil {
-		return err
-	}
-
-	for _, aclCreationResponse := range c.AclCreationResponses {
-		if err := aclCreationResponse.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	c.AclCreationResponses = make([]*AclCreationResponse, n)
-	for i := 0; i < n; i++ {
-		c.AclCreationResponses[i] = new(AclCreationResponse)
-		if err := c.AclCreationResponses[i].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (c *CreateAclsResponse) key() int16 {
-	return 30
-}
-
-func (c *CreateAclsResponse) version() int16 {
-	return 0
-}
-
-func (c *CreateAclsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (c *CreateAclsResponse) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
-
-// AclCreationResponse is an acl creation response type
-type AclCreationResponse struct {
-	Err    KError
-	ErrMsg *string
-}
-
-func (a *AclCreationResponse) encode(pe packetEncoder) error {
-	pe.putInt16(int16(a.Err))
-
-	if err := pe.putNullableString(a.ErrMsg); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	a.Err = KError(kerr)
-
-	if a.ErrMsg, err = pd.getNullableString(); err != nil {
-		return err
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go
deleted file mode 100644
index 5e5c03b..0000000
--- a/vendor/github.com/Shopify/sarama/acl_delete_request.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package sarama
-
-// DeleteAclsRequest is a delete acl request
-type DeleteAclsRequest struct {
-	Version int
-	Filters []*AclFilter
-}
-
-func (d *DeleteAclsRequest) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(d.Filters)); err != nil {
-		return err
-	}
-
-	for _, filter := range d.Filters {
-		filter.Version = d.Version
-		if err := filter.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) {
-	d.Version = int(version)
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	d.Filters = make([]*AclFilter, n)
-	for i := 0; i < n; i++ {
-		d.Filters[i] = new(AclFilter)
-		d.Filters[i].Version = int(version)
-		if err := d.Filters[i].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (d *DeleteAclsRequest) key() int16 {
-	return 31
-}
-
-func (d *DeleteAclsRequest) version() int16 {
-	return int16(d.Version)
-}
-
-func (d *DeleteAclsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (d *DeleteAclsRequest) requiredVersion() KafkaVersion {
-	switch d.Version {
-	case 1:
-		return V2_0_0_0
-	default:
-		return V0_11_0_0
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go
deleted file mode 100644
index cd33749..0000000
--- a/vendor/github.com/Shopify/sarama/acl_delete_response.go
+++ /dev/null
@@ -1,163 +0,0 @@
-package sarama
-
-import "time"
-
-// DeleteAclsResponse is a delete acl response
-type DeleteAclsResponse struct {
-	Version         int16
-	ThrottleTime    time.Duration
-	FilterResponses []*FilterResponse
-}
-
-func (d *DeleteAclsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
-
-	if err := pe.putArrayLength(len(d.FilterResponses)); err != nil {
-		return err
-	}
-
-	for _, filterResponse := range d.FilterResponses {
-		if err := filterResponse.encode(pe, d.Version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (d *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	d.FilterResponses = make([]*FilterResponse, n)
-
-	for i := 0; i < n; i++ {
-		d.FilterResponses[i] = new(FilterResponse)
-		if err := d.FilterResponses[i].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (d *DeleteAclsResponse) key() int16 {
-	return 31
-}
-
-func (d *DeleteAclsResponse) version() int16 {
-	return d.Version
-}
-
-func (d *DeleteAclsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (d *DeleteAclsResponse) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
-
-// FilterResponse is a filter response type
-type FilterResponse struct {
-	Err          KError
-	ErrMsg       *string
-	MatchingAcls []*MatchingAcl
-}
-
-func (f *FilterResponse) encode(pe packetEncoder, version int16) error {
-	pe.putInt16(int16(f.Err))
-	if err := pe.putNullableString(f.ErrMsg); err != nil {
-		return err
-	}
-
-	if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil {
-		return err
-	}
-	for _, matchingAcl := range f.MatchingAcls {
-		if err := matchingAcl.encode(pe, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	f.Err = KError(kerr)
-
-	if f.ErrMsg, err = pd.getNullableString(); err != nil {
-		return err
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	f.MatchingAcls = make([]*MatchingAcl, n)
-	for i := 0; i < n; i++ {
-		f.MatchingAcls[i] = new(MatchingAcl)
-		if err := f.MatchingAcls[i].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-// MatchingAcl is a matching acl type
-type MatchingAcl struct {
-	Err    KError
-	ErrMsg *string
-	Resource
-	Acl
-}
-
-func (m *MatchingAcl) encode(pe packetEncoder, version int16) error {
-	pe.putInt16(int16(m.Err))
-	if err := pe.putNullableString(m.ErrMsg); err != nil {
-		return err
-	}
-
-	if err := m.Resource.encode(pe, version); err != nil {
-		return err
-	}
-
-	if err := m.Acl.encode(pe); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	m.Err = KError(kerr)
-
-	if m.ErrMsg, err = pd.getNullableString(); err != nil {
-		return err
-	}
-
-	if err := m.Resource.decode(pd, version); err != nil {
-		return err
-	}
-
-	if err := m.Acl.decode(pd, version); err != nil {
-		return err
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go
deleted file mode 100644
index e0fe902..0000000
--- a/vendor/github.com/Shopify/sarama/acl_describe_request.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package sarama
-
-// DescribeAclsRequest is a secribe acl request type
-type DescribeAclsRequest struct {
-	Version int
-	AclFilter
-}
-
-func (d *DescribeAclsRequest) encode(pe packetEncoder) error {
-	d.AclFilter.Version = d.Version
-	return d.AclFilter.encode(pe)
-}
-
-func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) {
-	d.Version = int(version)
-	d.AclFilter.Version = int(version)
-	return d.AclFilter.decode(pd, version)
-}
-
-func (d *DescribeAclsRequest) key() int16 {
-	return 29
-}
-
-func (d *DescribeAclsRequest) version() int16 {
-	return int16(d.Version)
-}
-
-func (d *DescribeAclsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (d *DescribeAclsRequest) requiredVersion() KafkaVersion {
-	switch d.Version {
-	case 1:
-		return V2_0_0_0
-	default:
-		return V0_11_0_0
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go
deleted file mode 100644
index 3255fd4..0000000
--- a/vendor/github.com/Shopify/sarama/acl_describe_response.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package sarama
-
-import "time"
-
-// DescribeAclsResponse is a describe acl response type
-type DescribeAclsResponse struct {
-	Version      int16
-	ThrottleTime time.Duration
-	Err          KError
-	ErrMsg       *string
-	ResourceAcls []*ResourceAcls
-}
-
-func (d *DescribeAclsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
-	pe.putInt16(int16(d.Err))
-
-	if err := pe.putNullableString(d.ErrMsg); err != nil {
-		return err
-	}
-
-	if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil {
-		return err
-	}
-
-	for _, resourceAcl := range d.ResourceAcls {
-		if err := resourceAcl.encode(pe, d.Version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	d.Err = KError(kerr)
-
-	errmsg, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	if errmsg != "" {
-		d.ErrMsg = &errmsg
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	d.ResourceAcls = make([]*ResourceAcls, n)
-
-	for i := 0; i < n; i++ {
-		d.ResourceAcls[i] = new(ResourceAcls)
-		if err := d.ResourceAcls[i].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (d *DescribeAclsResponse) key() int16 {
-	return 29
-}
-
-func (d *DescribeAclsResponse) version() int16 {
-	return d.Version
-}
-
-func (d *DescribeAclsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (d *DescribeAclsResponse) requiredVersion() KafkaVersion {
-	switch d.Version {
-	case 1:
-		return V2_0_0_0
-	default:
-		return V0_11_0_0
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/Shopify/sarama/acl_types.go
deleted file mode 100644
index c3ba8dd..0000000
--- a/vendor/github.com/Shopify/sarama/acl_types.go
+++ /dev/null
@@ -1,238 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"strings"
-)
-
-type (
-	AclOperation int
-
-	AclPermissionType int
-
-	AclResourceType int
-
-	AclResourcePatternType int
-)
-
-// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
-const (
-	AclOperationUnknown AclOperation = iota
-	AclOperationAny
-	AclOperationAll
-	AclOperationRead
-	AclOperationWrite
-	AclOperationCreate
-	AclOperationDelete
-	AclOperationAlter
-	AclOperationDescribe
-	AclOperationClusterAction
-	AclOperationDescribeConfigs
-	AclOperationAlterConfigs
-	AclOperationIdempotentWrite
-)
-
-func (a *AclOperation) String() string {
-	mapping := map[AclOperation]string{
-		AclOperationUnknown:         "Unknown",
-		AclOperationAny:             "Any",
-		AclOperationAll:             "All",
-		AclOperationRead:            "Read",
-		AclOperationWrite:           "Write",
-		AclOperationCreate:          "Create",
-		AclOperationDelete:          "Delete",
-		AclOperationAlter:           "Alter",
-		AclOperationDescribe:        "Describe",
-		AclOperationClusterAction:   "ClusterAction",
-		AclOperationDescribeConfigs: "DescribeConfigs",
-		AclOperationAlterConfigs:    "AlterConfigs",
-		AclOperationIdempotentWrite: "IdempotentWrite",
-	}
-	s, ok := mapping[*a]
-	if !ok {
-		s = mapping[AclOperationUnknown]
-	}
-	return s
-}
-
-// MarshalText returns the text form of the AclOperation (name without prefix)
-func (a *AclOperation) MarshalText() ([]byte, error) {
-	return []byte(a.String()), nil
-}
-
-// UnmarshalText takes a text reprentation of the operation and converts it to an AclOperation
-func (a *AclOperation) UnmarshalText(text []byte) error {
-	normalized := strings.ToLower(string(text))
-	mapping := map[string]AclOperation{
-		"unknown":         AclOperationUnknown,
-		"any":             AclOperationAny,
-		"all":             AclOperationAll,
-		"read":            AclOperationRead,
-		"write":           AclOperationWrite,
-		"create":          AclOperationCreate,
-		"delete":          AclOperationDelete,
-		"alter":           AclOperationAlter,
-		"describe":        AclOperationDescribe,
-		"clusteraction":   AclOperationClusterAction,
-		"describeconfigs": AclOperationDescribeConfigs,
-		"alterconfigs":    AclOperationAlterConfigs,
-		"idempotentwrite": AclOperationIdempotentWrite,
-	}
-	ao, ok := mapping[normalized]
-	if !ok {
-		*a = AclOperationUnknown
-		return fmt.Errorf("no acl operation with name %s", normalized)
-	}
-	*a = ao
-	return nil
-}
-
-// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java
-const (
-	AclPermissionUnknown AclPermissionType = iota
-	AclPermissionAny
-	AclPermissionDeny
-	AclPermissionAllow
-)
-
-func (a *AclPermissionType) String() string {
-	mapping := map[AclPermissionType]string{
-		AclPermissionUnknown: "Unknown",
-		AclPermissionAny:     "Any",
-		AclPermissionDeny:    "Deny",
-		AclPermissionAllow:   "Allow",
-	}
-	s, ok := mapping[*a]
-	if !ok {
-		s = mapping[AclPermissionUnknown]
-	}
-	return s
-}
-
-// MarshalText returns the text form of the AclPermissionType (name without prefix)
-func (a *AclPermissionType) MarshalText() ([]byte, error) {
-	return []byte(a.String()), nil
-}
-
-// UnmarshalText takes a text reprentation of the permission type and converts it to an AclPermissionType
-func (a *AclPermissionType) UnmarshalText(text []byte) error {
-	normalized := strings.ToLower(string(text))
-	mapping := map[string]AclPermissionType{
-		"unknown": AclPermissionUnknown,
-		"any":     AclPermissionAny,
-		"deny":    AclPermissionDeny,
-		"allow":   AclPermissionAllow,
-	}
-
-	apt, ok := mapping[normalized]
-	if !ok {
-		*a = AclPermissionUnknown
-		return fmt.Errorf("no acl permission with name %s", normalized)
-	}
-	*a = apt
-	return nil
-}
-
-// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
-const (
-	AclResourceUnknown AclResourceType = iota
-	AclResourceAny
-	AclResourceTopic
-	AclResourceGroup
-	AclResourceCluster
-	AclResourceTransactionalID
-	AclResourceDelegationToken
-)
-
-func (a *AclResourceType) String() string {
-	mapping := map[AclResourceType]string{
-		AclResourceUnknown:         "Unknown",
-		AclResourceAny:             "Any",
-		AclResourceTopic:           "Topic",
-		AclResourceGroup:           "Group",
-		AclResourceCluster:         "Cluster",
-		AclResourceTransactionalID: "TransactionalID",
-		AclResourceDelegationToken: "DelegationToken",
-	}
-	s, ok := mapping[*a]
-	if !ok {
-		s = mapping[AclResourceUnknown]
-	}
-	return s
-}
-
-// MarshalText returns the text form of the AclResourceType (name without prefix)
-func (a *AclResourceType) MarshalText() ([]byte, error) {
-	return []byte(a.String()), nil
-}
-
-// UnmarshalText takes a text reprentation of the resource type and converts it to an AclResourceType
-func (a *AclResourceType) UnmarshalText(text []byte) error {
-	normalized := strings.ToLower(string(text))
-	mapping := map[string]AclResourceType{
-		"unknown":         AclResourceUnknown,
-		"any":             AclResourceAny,
-		"topic":           AclResourceTopic,
-		"group":           AclResourceGroup,
-		"cluster":         AclResourceCluster,
-		"transactionalid": AclResourceTransactionalID,
-		"delegationtoken": AclResourceDelegationToken,
-	}
-
-	art, ok := mapping[normalized]
-	if !ok {
-		*a = AclResourceUnknown
-		return fmt.Errorf("no acl resource with name %s", normalized)
-	}
-	*a = art
-	return nil
-}
-
-// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java
-const (
-	AclPatternUnknown AclResourcePatternType = iota
-	AclPatternAny
-	AclPatternMatch
-	AclPatternLiteral
-	AclPatternPrefixed
-)
-
-func (a *AclResourcePatternType) String() string {
-	mapping := map[AclResourcePatternType]string{
-		AclPatternUnknown:  "Unknown",
-		AclPatternAny:      "Any",
-		AclPatternMatch:    "Match",
-		AclPatternLiteral:  "Literal",
-		AclPatternPrefixed: "Prefixed",
-	}
-	s, ok := mapping[*a]
-	if !ok {
-		s = mapping[AclPatternUnknown]
-	}
-	return s
-}
-
-// MarshalText returns the text form of the AclResourcePatternType (name without prefix)
-func (a *AclResourcePatternType) MarshalText() ([]byte, error) {
-	return []byte(a.String()), nil
-}
-
-// UnmarshalText takes a text reprentation of the resource pattern type and converts it to an AclResourcePatternType
-func (a *AclResourcePatternType) UnmarshalText(text []byte) error {
-	normalized := strings.ToLower(string(text))
-	mapping := map[string]AclResourcePatternType{
-		"unknown":  AclPatternUnknown,
-		"any":      AclPatternAny,
-		"match":    AclPatternMatch,
-		"literal":  AclPatternLiteral,
-		"prefixed": AclPatternPrefixed,
-	}
-
-	arpt, ok := mapping[normalized]
-	if !ok {
-		*a = AclPatternUnknown
-		return fmt.Errorf("no acl resource pattern with name %s", normalized)
-	}
-	*a = arpt
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
deleted file mode 100644
index a96af93..0000000
--- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package sarama
-
-// AddOffsetsToTxnRequest adds offsets to a transaction request
-type AddOffsetsToTxnRequest struct {
-	TransactionalID string
-	ProducerID      int64
-	ProducerEpoch   int16
-	GroupID         string
-}
-
-func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error {
-	if err := pe.putString(a.TransactionalID); err != nil {
-		return err
-	}
-
-	pe.putInt64(a.ProducerID)
-
-	pe.putInt16(a.ProducerEpoch)
-
-	if err := pe.putString(a.GroupID); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
-	if a.TransactionalID, err = pd.getString(); err != nil {
-		return err
-	}
-	if a.ProducerID, err = pd.getInt64(); err != nil {
-		return err
-	}
-	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
-		return err
-	}
-	if a.GroupID, err = pd.getString(); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (a *AddOffsetsToTxnRequest) key() int16 {
-	return 25
-}
-
-func (a *AddOffsetsToTxnRequest) version() int16 {
-	return 0
-}
-
-func (a *AddOffsetsToTxnRequest) headerVersion() int16 {
-	return 1
-}
-
-func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
deleted file mode 100644
index bb61973..0000000
--- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package sarama
-
-import (
-	"time"
-)
-
-// AddOffsetsToTxnResponse is a response type for adding offsets to txns
-type AddOffsetsToTxnResponse struct {
-	ThrottleTime time.Duration
-	Err          KError
-}
-
-func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
-	pe.putInt16(int16(a.Err))
-	return nil
-}
-
-func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	a.Err = KError(kerr)
-
-	return nil
-}
-
-func (a *AddOffsetsToTxnResponse) key() int16 {
-	return 25
-}
-
-func (a *AddOffsetsToTxnResponse) version() int16 {
-	return 0
-}
-
-func (a *AddOffsetsToTxnResponse) headerVersion() int16 {
-	return 0
-}
-
-func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
deleted file mode 100644
index 57ecf64..0000000
--- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package sarama
-
-// AddPartitionsToTxnRequest is a add paartition request
-type AddPartitionsToTxnRequest struct {
-	TransactionalID string
-	ProducerID      int64
-	ProducerEpoch   int16
-	TopicPartitions map[string][]int32
-}
-
-func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error {
-	if err := pe.putString(a.TransactionalID); err != nil {
-		return err
-	}
-	pe.putInt64(a.ProducerID)
-	pe.putInt16(a.ProducerEpoch)
-
-	if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil {
-		return err
-	}
-	for topic, partitions := range a.TopicPartitions {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := pe.putInt32Array(partitions); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
-	if a.TransactionalID, err = pd.getString(); err != nil {
-		return err
-	}
-	if a.ProducerID, err = pd.getInt64(); err != nil {
-		return err
-	}
-	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
-		return err
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	a.TopicPartitions = make(map[string][]int32)
-	for i := 0; i < n; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		partitions, err := pd.getInt32Array()
-		if err != nil {
-			return err
-		}
-
-		a.TopicPartitions[topic] = partitions
-	}
-
-	return nil
-}
-
-func (a *AddPartitionsToTxnRequest) key() int16 {
-	return 24
-}
-
-func (a *AddPartitionsToTxnRequest) version() int16 {
-	return 0
-}
-
-func (a *AddPartitionsToTxnRequest) headerVersion() int16 {
-	return 1
-}
-
-func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
deleted file mode 100644
index 0989565..0000000
--- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package sarama
-
-import (
-	"time"
-)
-
-// AddPartitionsToTxnResponse is a partition errors to transaction type
-type AddPartitionsToTxnResponse struct {
-	ThrottleTime time.Duration
-	Errors       map[string][]*PartitionError
-}
-
-func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
-	if err := pe.putArrayLength(len(a.Errors)); err != nil {
-		return err
-	}
-
-	for topic, e := range a.Errors {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := pe.putArrayLength(len(e)); err != nil {
-			return err
-		}
-		for _, partitionError := range e {
-			if err := partitionError.encode(pe); err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	a.Errors = make(map[string][]*PartitionError)
-
-	for i := 0; i < n; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		m, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		a.Errors[topic] = make([]*PartitionError, m)
-
-		for j := 0; j < m; j++ {
-			a.Errors[topic][j] = new(PartitionError)
-			if err := a.Errors[topic][j].decode(pd, version); err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-func (a *AddPartitionsToTxnResponse) key() int16 {
-	return 24
-}
-
-func (a *AddPartitionsToTxnResponse) version() int16 {
-	return 0
-}
-
-func (a *AddPartitionsToTxnResponse) headerVersion() int16 {
-	return 0
-}
-
-func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
-
-// PartitionError is a partition error type
-type PartitionError struct {
-	Partition int32
-	Err       KError
-}
-
-func (p *PartitionError) encode(pe packetEncoder) error {
-	pe.putInt32(p.Partition)
-	pe.putInt16(int16(p.Err))
-	return nil
-}
-
-func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) {
-	if p.Partition, err = pd.getInt32(); err != nil {
-		return err
-	}
-
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	p.Err = KError(kerr)
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/Shopify/sarama/admin.go
deleted file mode 100644
index abe18b1..0000000
--- a/vendor/github.com/Shopify/sarama/admin.go
+++ /dev/null
@@ -1,1005 +0,0 @@
-package sarama
-
-import (
-	"errors"
-	"fmt"
-	"math/rand"
-	"strconv"
-	"sync"
-	"time"
-)
-
-// ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics,
-// brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0.
-// Methods with stricter requirements will specify the minimum broker version required.
-// You MUST call Close() on a client to avoid leaks
-type ClusterAdmin interface {
-	// Creates a new topic. This operation is supported by brokers with version 0.10.1.0 or higher.
-	// It may take several seconds after CreateTopic returns success for all the brokers
-	// to become aware that the topic has been created. During this time, listTopics
-	// may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0.
-	CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error
-
-	// List the topics available in the cluster with the default options.
-	ListTopics() (map[string]TopicDetail, error)
-
-	// Describe some topics in the cluster.
-	DescribeTopics(topics []string) (metadata []*TopicMetadata, err error)
-
-	// Delete a topic. It may take several seconds after the DeleteTopic to returns success
-	// and for all the brokers to become aware that the topics are gone.
-	// During this time, listTopics  may continue to return information about the deleted topic.
-	// If delete.topic.enable is false on the brokers, deleteTopic will mark
-	// the topic for deletion, but not actually delete them.
-	// This operation is supported by brokers with version 0.10.1.0 or higher.
-	DeleteTopic(topic string) error
-
-	// Increase the number of partitions of the topics  according to the corresponding values.
-	// If partitions are increased for a topic that has a key, the partition logic or ordering of
-	// the messages will be affected. It may take several seconds after this method returns
-	// success for all the brokers to become aware that the partitions have been created.
-	// During this time, ClusterAdmin#describeTopics may not return information about the
-	// new partitions. This operation is supported by brokers with version 1.0.0 or higher.
-	CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error
-
-	// Alter the replica assignment for partitions.
-	// This operation is supported by brokers with version 2.4.0.0 or higher.
-	AlterPartitionReassignments(topic string, assignment [][]int32) error
-
-	// Provides info on ongoing partitions replica reassignments.
-	// This operation is supported by brokers with version 2.4.0.0 or higher.
-	ListPartitionReassignments(topics string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error)
-
-	// Delete records whose offset is smaller than the given offset of the corresponding partition.
-	// This operation is supported by brokers with version 0.11.0.0 or higher.
-	DeleteRecords(topic string, partitionOffsets map[int32]int64) error
-
-	// Get the configuration for the specified resources.
-	// The returned configuration includes default values and the Default is true
-	// can be used to distinguish them from user supplied values.
-	// Config entries where ReadOnly is true cannot be updated.
-	// The value of config entries where Sensitive is true is always nil so
-	// sensitive information is not disclosed.
-	// This operation is supported by brokers with version 0.11.0.0 or higher.
-	DescribeConfig(resource ConfigResource) ([]ConfigEntry, error)
-
-	// Update the configuration for the specified resources with the default options.
-	// This operation is supported by brokers with version 0.11.0.0 or higher.
-	// The resources with their configs (topic is the only resource type with configs
-	// that can be updated currently Updates are not transactional so they may succeed
-	// for some resources while fail for others. The configs for a particular resource are updated automatically.
-	AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error
-
-	// Creates access control lists (ACLs) which are bound to specific resources.
-	// This operation is not transactional so it may succeed for some ACLs while fail for others.
-	// If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but
-	// no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher.
-	CreateACL(resource Resource, acl Acl) error
-
-	// Lists access control lists (ACLs) according to the supplied filter.
-	// it may take some time for changes made by createAcls or deleteAcls to be reflected in the output of ListAcls
-	// This operation is supported by brokers with version 0.11.0.0 or higher.
-	ListAcls(filter AclFilter) ([]ResourceAcls, error)
-
-	// Deletes access control lists (ACLs) according to the supplied filters.
-	// This operation is not transactional so it may succeed for some ACLs while fail for others.
-	// This operation is supported by brokers with version 0.11.0.0 or higher.
-	DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error)
-
-	// List the consumer groups available in the cluster.
-	ListConsumerGroups() (map[string]string, error)
-
-	// Describe the given consumer groups.
-	DescribeConsumerGroups(groups []string) ([]*GroupDescription, error)
-
-	// List the consumer group offsets available in the cluster.
-	ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error)
-
-	// Delete a consumer group.
-	DeleteConsumerGroup(group string) error
-
-	// Get information about the nodes in the cluster
-	DescribeCluster() (brokers []*Broker, controllerID int32, err error)
-
-	// Get information about all log directories on the given set of brokers
-	DescribeLogDirs(brokers []int32) (map[int32][]DescribeLogDirsResponseDirMetadata, error)
-
-	// Get information about SCRAM users
-	DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error)
-
-	// Delete SCRAM users
-	DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error)
-
-	// Upsert SCRAM users
-	UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error)
-
-	// Close shuts down the admin and closes underlying client.
-	Close() error
-}
-
-type clusterAdmin struct {
-	client Client
-	conf   *Config
-}
-
-// NewClusterAdmin creates a new ClusterAdmin using the given broker addresses and configuration.
-func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) {
-	client, err := NewClient(addrs, conf)
-	if err != nil {
-		return nil, err
-	}
-	return NewClusterAdminFromClient(client)
-}
-
-// NewClusterAdminFromClient creates a new ClusterAdmin using the given client.
-// Note that underlying client will also be closed on admin's Close() call.
-func NewClusterAdminFromClient(client Client) (ClusterAdmin, error) {
-	// make sure we can retrieve the controller
-	_, err := client.Controller()
-	if err != nil {
-		return nil, err
-	}
-
-	ca := &clusterAdmin{
-		client: client,
-		conf:   client.Config(),
-	}
-	return ca, nil
-}
-
-func (ca *clusterAdmin) Close() error {
-	return ca.client.Close()
-}
-
-func (ca *clusterAdmin) Controller() (*Broker, error) {
-	return ca.client.Controller()
-}
-
-func (ca *clusterAdmin) refreshController() (*Broker, error) {
-	return ca.client.RefreshController()
-}
-
-// isErrNoController returns `true` if the given error type unwraps to an
-// `ErrNotController` response from Kafka
-func isErrNoController(err error) bool {
-	switch e := err.(type) {
-	case *TopicError:
-		return e.Err == ErrNotController
-	case *TopicPartitionError:
-		return e.Err == ErrNotController
-	case KError:
-		return e == ErrNotController
-	}
-	return false
-}
-
-// retryOnError will repeatedly call the given (error-returning) func in the
-// case that its response is non-nil and retryable (as determined by the
-// provided retryable func) up to the maximum number of tries permitted by
-// the admin client configuration
-func (ca *clusterAdmin) retryOnError(retryable func(error) bool, fn func() error) error {
-	var err error
-	for attempt := 0; attempt < ca.conf.Admin.Retry.Max; attempt++ {
-		err = fn()
-		if err == nil || !retryable(err) {
-			return err
-		}
-		Logger.Printf(
-			"admin/request retrying after %dms... (%d attempts remaining)\n",
-			ca.conf.Admin.Retry.Backoff/time.Millisecond, ca.conf.Admin.Retry.Max-attempt)
-		time.Sleep(ca.conf.Admin.Retry.Backoff)
-		continue
-	}
-	return err
-}
-
-func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error {
-	if topic == "" {
-		return ErrInvalidTopic
-	}
-
-	if detail == nil {
-		return errors.New("you must specify topic details")
-	}
-
-	topicDetails := make(map[string]*TopicDetail)
-	topicDetails[topic] = detail
-
-	request := &CreateTopicsRequest{
-		TopicDetails: topicDetails,
-		ValidateOnly: validateOnly,
-		Timeout:      ca.conf.Admin.Timeout,
-	}
-
-	if ca.conf.Version.IsAtLeast(V0_11_0_0) {
-		request.Version = 1
-	}
-	if ca.conf.Version.IsAtLeast(V1_0_0_0) {
-		request.Version = 2
-	}
-
-	return ca.retryOnError(isErrNoController, func() error {
-		b, err := ca.Controller()
-		if err != nil {
-			return err
-		}
-
-		rsp, err := b.CreateTopics(request)
-		if err != nil {
-			return err
-		}
-
-		topicErr, ok := rsp.TopicErrors[topic]
-		if !ok {
-			return ErrIncompleteResponse
-		}
-
-		if topicErr.Err != ErrNoError {
-			if topicErr.Err == ErrNotController {
-				_, _ = ca.refreshController()
-			}
-			return topicErr
-		}
-
-		return nil
-	})
-}
-
-func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) {
-	controller, err := ca.Controller()
-	if err != nil {
-		return nil, err
-	}
-
-	request := &MetadataRequest{
-		Topics:                 topics,
-		AllowAutoTopicCreation: false,
-	}
-
-	if ca.conf.Version.IsAtLeast(V1_0_0_0) {
-		request.Version = 5
-	} else if ca.conf.Version.IsAtLeast(V0_11_0_0) {
-		request.Version = 4
-	}
-
-	response, err := controller.GetMetadata(request)
-	if err != nil {
-		return nil, err
-	}
-	return response.Topics, nil
-}
-
-func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) {
-	controller, err := ca.Controller()
-	if err != nil {
-		return nil, int32(0), err
-	}
-
-	request := &MetadataRequest{
-		Topics: []string{},
-	}
-
-	if ca.conf.Version.IsAtLeast(V0_10_0_0) {
-		request.Version = 1
-	}
-
-	response, err := controller.GetMetadata(request)
-	if err != nil {
-		return nil, int32(0), err
-	}
-
-	return response.Brokers, response.ControllerID, nil
-}
-
-func (ca *clusterAdmin) findBroker(id int32) (*Broker, error) {
-	brokers := ca.client.Brokers()
-	for _, b := range brokers {
-		if b.ID() == id {
-			return b, nil
-		}
-	}
-	return nil, fmt.Errorf("could not find broker id %d", id)
-}
-
-func (ca *clusterAdmin) findAnyBroker() (*Broker, error) {
-	brokers := ca.client.Brokers()
-	if len(brokers) > 0 {
-		index := rand.Intn(len(brokers))
-		return brokers[index], nil
-	}
-	return nil, errors.New("no available broker")
-}
-
-func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) {
-	// In order to build TopicDetails we need to first get the list of all
-	// topics using a MetadataRequest and then get their configs using a
-	// DescribeConfigsRequest request. To avoid sending many requests to the
-	// broker, we use a single DescribeConfigsRequest.
-
-	// Send the all-topic MetadataRequest
-	b, err := ca.findAnyBroker()
-	if err != nil {
-		return nil, err
-	}
-	_ = b.Open(ca.client.Config())
-
-	metadataReq := &MetadataRequest{}
-	metadataResp, err := b.GetMetadata(metadataReq)
-	if err != nil {
-		return nil, err
-	}
-
-	topicsDetailsMap := make(map[string]TopicDetail)
-
-	var describeConfigsResources []*ConfigResource
-
-	for _, topic := range metadataResp.Topics {
-		topicDetails := TopicDetail{
-			NumPartitions: int32(len(topic.Partitions)),
-		}
-		if len(topic.Partitions) > 0 {
-			topicDetails.ReplicaAssignment = map[int32][]int32{}
-			for _, partition := range topic.Partitions {
-				topicDetails.ReplicaAssignment[partition.ID] = partition.Replicas
-			}
-			topicDetails.ReplicationFactor = int16(len(topic.Partitions[0].Replicas))
-		}
-		topicsDetailsMap[topic.Name] = topicDetails
-
-		// we populate the resources we want to describe from the MetadataResponse
-		topicResource := ConfigResource{
-			Type: TopicResource,
-			Name: topic.Name,
-		}
-		describeConfigsResources = append(describeConfigsResources, &topicResource)
-	}
-
-	// Send the DescribeConfigsRequest
-	describeConfigsReq := &DescribeConfigsRequest{
-		Resources: describeConfigsResources,
-	}
-
-	if ca.conf.Version.IsAtLeast(V1_1_0_0) {
-		describeConfigsReq.Version = 1
-	}
-
-	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
-		describeConfigsReq.Version = 2
-	}
-
-	describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq)
-	if err != nil {
-		return nil, err
-	}
-
-	for _, resource := range describeConfigsResp.Resources {
-		topicDetails := topicsDetailsMap[resource.Name]
-		topicDetails.ConfigEntries = make(map[string]*string)
-
-		for _, entry := range resource.Configs {
-			// only include non-default non-sensitive config
-			// (don't actually think topic config will ever be sensitive)
-			if entry.Default || entry.Sensitive {
-				continue
-			}
-			topicDetails.ConfigEntries[entry.Name] = &entry.Value
-		}
-
-		topicsDetailsMap[resource.Name] = topicDetails
-	}
-
-	return topicsDetailsMap, nil
-}
-
-func (ca *clusterAdmin) DeleteTopic(topic string) error {
-	if topic == "" {
-		return ErrInvalidTopic
-	}
-
-	request := &DeleteTopicsRequest{
-		Topics:  []string{topic},
-		Timeout: ca.conf.Admin.Timeout,
-	}
-
-	if ca.conf.Version.IsAtLeast(V0_11_0_0) {
-		request.Version = 1
-	}
-
-	return ca.retryOnError(isErrNoController, func() error {
-		b, err := ca.Controller()
-		if err != nil {
-			return err
-		}
-
-		rsp, err := b.DeleteTopics(request)
-		if err != nil {
-			return err
-		}
-
-		topicErr, ok := rsp.TopicErrorCodes[topic]
-		if !ok {
-			return ErrIncompleteResponse
-		}
-
-		if topicErr != ErrNoError {
-			if topicErr == ErrNotController {
-				_, _ = ca.refreshController()
-			}
-			return topicErr
-		}
-
-		return nil
-	})
-}
-
-func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error {
-	if topic == "" {
-		return ErrInvalidTopic
-	}
-
-	topicPartitions := make(map[string]*TopicPartition)
-	topicPartitions[topic] = &TopicPartition{Count: count, Assignment: assignment}
-
-	request := &CreatePartitionsRequest{
-		TopicPartitions: topicPartitions,
-		Timeout:         ca.conf.Admin.Timeout,
-	}
-
-	return ca.retryOnError(isErrNoController, func() error {
-		b, err := ca.Controller()
-		if err != nil {
-			return err
-		}
-
-		rsp, err := b.CreatePartitions(request)
-		if err != nil {
-			return err
-		}
-
-		topicErr, ok := rsp.TopicPartitionErrors[topic]
-		if !ok {
-			return ErrIncompleteResponse
-		}
-
-		if topicErr.Err != ErrNoError {
-			if topicErr.Err == ErrNotController {
-				_, _ = ca.refreshController()
-			}
-			return topicErr
-		}
-
-		return nil
-	})
-}
-
-func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][]int32) error {
-	if topic == "" {
-		return ErrInvalidTopic
-	}
-
-	request := &AlterPartitionReassignmentsRequest{
-		TimeoutMs: int32(60000),
-		Version:   int16(0),
-	}
-
-	for i := 0; i < len(assignment); i++ {
-		request.AddBlock(topic, int32(i), assignment[i])
-	}
-
-	return ca.retryOnError(isErrNoController, func() error {
-		b, err := ca.Controller()
-		if err != nil {
-			return err
-		}
-
-		errs := make([]error, 0)
-
-		rsp, err := b.AlterPartitionReassignments(request)
-
-		if err != nil {
-			errs = append(errs, err)
-		} else {
-			if rsp.ErrorCode > 0 {
-				errs = append(errs, errors.New(rsp.ErrorCode.Error()))
-			}
-
-			for topic, topicErrors := range rsp.Errors {
-				for partition, partitionError := range topicErrors {
-					if partitionError.errorCode != ErrNoError {
-						errStr := fmt.Sprintf("[%s-%d]: %s", topic, partition, partitionError.errorCode.Error())
-						errs = append(errs, errors.New(errStr))
-					}
-				}
-			}
-		}
-
-		if len(errs) > 0 {
-			return ErrReassignPartitions{MultiError{&errs}}
-		}
-
-		return nil
-	})
-}
-
-func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) {
-	if topic == "" {
-		return nil, ErrInvalidTopic
-	}
-
-	request := &ListPartitionReassignmentsRequest{
-		TimeoutMs: int32(60000),
-		Version:   int16(0),
-	}
-
-	request.AddBlock(topic, partitions)
-
-	b, err := ca.Controller()
-	if err != nil {
-		return nil, err
-	}
-	_ = b.Open(ca.client.Config())
-
-	rsp, err := b.ListPartitionReassignments(request)
-
-	if err == nil && rsp != nil {
-		return rsp.TopicStatus, nil
-	} else {
-		return nil, err
-	}
-}
-
-func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error {
-	if topic == "" {
-		return ErrInvalidTopic
-	}
-	partitionPerBroker := make(map[*Broker][]int32)
-	for partition := range partitionOffsets {
-		broker, err := ca.client.Leader(topic, partition)
-		if err != nil {
-			return err
-		}
-		partitionPerBroker[broker] = append(partitionPerBroker[broker], partition)
-	}
-	errs := make([]error, 0)
-	for broker, partitions := range partitionPerBroker {
-		topics := make(map[string]*DeleteRecordsRequestTopic)
-		recordsToDelete := make(map[int32]int64)
-		for _, p := range partitions {
-			recordsToDelete[p] = partitionOffsets[p]
-		}
-		topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: recordsToDelete}
-		request := &DeleteRecordsRequest{
-			Topics:  topics,
-			Timeout: ca.conf.Admin.Timeout,
-		}
-
-		rsp, err := broker.DeleteRecords(request)
-		if err != nil {
-			errs = append(errs, err)
-		} else {
-			deleteRecordsResponseTopic, ok := rsp.Topics[topic]
-			if !ok {
-				errs = append(errs, ErrIncompleteResponse)
-			} else {
-				for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions {
-					if deleteRecordsResponsePartition.Err != ErrNoError {
-						errs = append(errs, errors.New(deleteRecordsResponsePartition.Err.Error()))
-					}
-				}
-			}
-		}
-	}
-	if len(errs) > 0 {
-		return ErrDeleteRecords{MultiError{&errs}}
-	}
-	// todo since we are dealing with couple of partitions it would be good if we return slice of errors
-	// for each partition instead of one error
-	return nil
-}
-
-// Returns a bool indicating whether the resource request needs to go to a
-// specific broker
-func dependsOnSpecificNode(resource ConfigResource) bool {
-	return (resource.Type == BrokerResource && resource.Name != "") ||
-		resource.Type == BrokerLoggerResource
-}
-
-func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) {
-	var entries []ConfigEntry
-	var resources []*ConfigResource
-	resources = append(resources, &resource)
-
-	request := &DescribeConfigsRequest{
-		Resources: resources,
-	}
-
-	if ca.conf.Version.IsAtLeast(V1_1_0_0) {
-		request.Version = 1
-	}
-
-	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
-		request.Version = 2
-	}
-
-	var (
-		b   *Broker
-		err error
-	)
-
-	// DescribeConfig of broker/broker logger must be sent to the broker in question
-	if dependsOnSpecificNode(resource) {
-		var id int64
-		id, err = strconv.ParseInt(resource.Name, 10, 32)
-		if err != nil {
-			return nil, err
-		}
-		b, err = ca.findBroker(int32(id))
-	} else {
-		b, err = ca.findAnyBroker()
-	}
-	if err != nil {
-		return nil, err
-	}
-
-	_ = b.Open(ca.client.Config())
-	rsp, err := b.DescribeConfigs(request)
-	if err != nil {
-		return nil, err
-	}
-
-	for _, rspResource := range rsp.Resources {
-		if rspResource.Name == resource.Name {
-			if rspResource.ErrorMsg != "" {
-				return nil, errors.New(rspResource.ErrorMsg)
-			}
-			if rspResource.ErrorCode != 0 {
-				return nil, KError(rspResource.ErrorCode)
-			}
-			for _, cfgEntry := range rspResource.Configs {
-				entries = append(entries, *cfgEntry)
-			}
-		}
-	}
-	return entries, nil
-}
-
-func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error {
-	var resources []*AlterConfigsResource
-	resources = append(resources, &AlterConfigsResource{
-		Type:          resourceType,
-		Name:          name,
-		ConfigEntries: entries,
-	})
-
-	request := &AlterConfigsRequest{
-		Resources:    resources,
-		ValidateOnly: validateOnly,
-	}
-
-	var (
-		b   *Broker
-		err error
-	)
-
-	// AlterConfig of broker/broker logger must be sent to the broker in question
-	if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) {
-		var id int64
-		id, err = strconv.ParseInt(name, 10, 32)
-		if err != nil {
-			return err
-		}
-		b, err = ca.findBroker(int32(id))
-	} else {
-		b, err = ca.findAnyBroker()
-	}
-	if err != nil {
-		return err
-	}
-
-	_ = b.Open(ca.client.Config())
-	rsp, err := b.AlterConfigs(request)
-	if err != nil {
-		return err
-	}
-
-	for _, rspResource := range rsp.Resources {
-		if rspResource.Name == name {
-			if rspResource.ErrorMsg != "" {
-				return errors.New(rspResource.ErrorMsg)
-			}
-			if rspResource.ErrorCode != 0 {
-				return KError(rspResource.ErrorCode)
-			}
-		}
-	}
-	return nil
-}
-
-func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error {
-	var acls []*AclCreation
-	acls = append(acls, &AclCreation{resource, acl})
-	request := &CreateAclsRequest{AclCreations: acls}
-
-	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
-		request.Version = 1
-	}
-
-	b, err := ca.Controller()
-	if err != nil {
-		return err
-	}
-
-	_, err = b.CreateAcls(request)
-	return err
-}
-
-func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) {
-	request := &DescribeAclsRequest{AclFilter: filter}
-
-	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
-		request.Version = 1
-	}
-
-	b, err := ca.Controller()
-	if err != nil {
-		return nil, err
-	}
-
-	rsp, err := b.DescribeAcls(request)
-	if err != nil {
-		return nil, err
-	}
-
-	var lAcls []ResourceAcls
-	for _, rAcl := range rsp.ResourceAcls {
-		lAcls = append(lAcls, *rAcl)
-	}
-	return lAcls, nil
-}
-
-func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) {
-	var filters []*AclFilter
-	filters = append(filters, &filter)
-	request := &DeleteAclsRequest{Filters: filters}
-
-	if ca.conf.Version.IsAtLeast(V2_0_0_0) {
-		request.Version = 1
-	}
-
-	b, err := ca.Controller()
-	if err != nil {
-		return nil, err
-	}
-
-	rsp, err := b.DeleteAcls(request)
-	if err != nil {
-		return nil, err
-	}
-
-	var mAcls []MatchingAcl
-	for _, fr := range rsp.FilterResponses {
-		for _, mACL := range fr.MatchingAcls {
-			mAcls = append(mAcls, *mACL)
-		}
-	}
-	return mAcls, nil
-}
-
-func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) {
-	groupsPerBroker := make(map[*Broker][]string)
-
-	for _, group := range groups {
-		controller, err := ca.client.Coordinator(group)
-		if err != nil {
-			return nil, err
-		}
-		groupsPerBroker[controller] = append(groupsPerBroker[controller], group)
-	}
-
-	for broker, brokerGroups := range groupsPerBroker {
-		response, err := broker.DescribeGroups(&DescribeGroupsRequest{
-			Groups: brokerGroups,
-		})
-		if err != nil {
-			return nil, err
-		}
-
-		result = append(result, response.Groups...)
-	}
-	return result, nil
-}
-
-func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err error) {
-	allGroups = make(map[string]string)
-
-	// Query brokers in parallel, since we have to query *all* brokers
-	brokers := ca.client.Brokers()
-	groupMaps := make(chan map[string]string, len(brokers))
-	errChan := make(chan error, len(brokers))
-	wg := sync.WaitGroup{}
-
-	for _, b := range brokers {
-		wg.Add(1)
-		go func(b *Broker, conf *Config) {
-			defer wg.Done()
-			_ = b.Open(conf) // Ensure that broker is opened
-
-			response, err := b.ListGroups(&ListGroupsRequest{})
-			if err != nil {
-				errChan <- err
-				return
-			}
-
-			groups := make(map[string]string)
-			for group, typ := range response.Groups {
-				groups[group] = typ
-			}
-
-			groupMaps <- groups
-		}(b, ca.conf)
-	}
-
-	wg.Wait()
-	close(groupMaps)
-	close(errChan)
-
-	for groupMap := range groupMaps {
-		for group, protocolType := range groupMap {
-			allGroups[group] = protocolType
-		}
-	}
-
-	// Intentionally return only the first error for simplicity
-	err = <-errChan
-	return
-}
-
-func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) {
-	coordinator, err := ca.client.Coordinator(group)
-	if err != nil {
-		return nil, err
-	}
-
-	request := &OffsetFetchRequest{
-		ConsumerGroup: group,
-		partitions:    topicPartitions,
-	}
-
-	if ca.conf.Version.IsAtLeast(V0_10_2_0) {
-		request.Version = 2
-	} else if ca.conf.Version.IsAtLeast(V0_8_2_2) {
-		request.Version = 1
-	}
-
-	return coordinator.FetchOffset(request)
-}
-
-func (ca *clusterAdmin) DeleteConsumerGroup(group string) error {
-	coordinator, err := ca.client.Coordinator(group)
-	if err != nil {
-		return err
-	}
-
-	request := &DeleteGroupsRequest{
-		Groups: []string{group},
-	}
-
-	resp, err := coordinator.DeleteGroups(request)
-	if err != nil {
-		return err
-	}
-
-	groupErr, ok := resp.GroupErrorCodes[group]
-	if !ok {
-		return ErrIncompleteResponse
-	}
-
-	if groupErr != ErrNoError {
-		return groupErr
-	}
-
-	return nil
-}
-
-func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) {
-	allLogDirs = make(map[int32][]DescribeLogDirsResponseDirMetadata)
-
-	// Query brokers in parallel, since we may have to query multiple brokers
-	logDirsMaps := make(chan map[int32][]DescribeLogDirsResponseDirMetadata, len(brokerIds))
-	errChan := make(chan error, len(brokerIds))
-	wg := sync.WaitGroup{}
-
-	for _, b := range brokerIds {
-		wg.Add(1)
-		broker, err := ca.findBroker(b)
-		if err != nil {
-			Logger.Printf("Unable to find broker with ID = %v\n", b)
-			continue
-		}
-		go func(b *Broker, conf *Config) {
-			defer wg.Done()
-			_ = b.Open(conf) // Ensure that broker is opened
-
-			response, err := b.DescribeLogDirs(&DescribeLogDirsRequest{})
-			if err != nil {
-				errChan <- err
-				return
-			}
-			logDirs := make(map[int32][]DescribeLogDirsResponseDirMetadata)
-			logDirs[b.ID()] = response.LogDirs
-			logDirsMaps <- logDirs
-		}(broker, ca.conf)
-	}
-
-	wg.Wait()
-	close(logDirsMaps)
-	close(errChan)
-
-	for logDirsMap := range logDirsMaps {
-		for id, logDirs := range logDirsMap {
-			allLogDirs[id] = logDirs
-		}
-	}
-
-	// Intentionally return only the first error for simplicity
-	err = <-errChan
-	return
-}
-
-func (ca *clusterAdmin) DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error) {
-	req := &DescribeUserScramCredentialsRequest{}
-	for _, u := range users {
-		req.DescribeUsers = append(req.DescribeUsers, DescribeUserScramCredentialsRequestUser{
-			Name: u,
-		})
-	}
-
-	b, err := ca.Controller()
-	if err != nil {
-		return nil, err
-	}
-
-	rsp, err := b.DescribeUserScramCredentials(req)
-	if err != nil {
-		return nil, err
-	}
-
-	return rsp.Results, nil
-}
-
-func (ca *clusterAdmin) UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error) {
-	res, err := ca.AlterUserScramCredentials(upsert, nil)
-	if err != nil {
-		return nil, err
-	}
-
-	return res, nil
-}
-
-func (ca *clusterAdmin) DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) {
-	res, err := ca.AlterUserScramCredentials(nil, delete)
-	if err != nil {
-		return nil, err
-	}
-
-	return res, nil
-}
-
-func (ca *clusterAdmin) AlterUserScramCredentials(u []AlterUserScramCredentialsUpsert, d []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) {
-	req := &AlterUserScramCredentialsRequest{
-		Deletions:  d,
-		Upsertions: u,
-	}
-
-	b, err := ca.Controller()
-	if err != nil {
-		return nil, err
-	}
-
-	rsp, err := b.AlterUserScramCredentials(req)
-	if err != nil {
-		return nil, err
-	}
-
-	return rsp.Results, nil
-}
diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go
deleted file mode 100644
index 8b94b1f..0000000
--- a/vendor/github.com/Shopify/sarama/alter_configs_request.go
+++ /dev/null
@@ -1,126 +0,0 @@
-package sarama
-
-// AlterConfigsRequest is an alter config request type
-type AlterConfigsRequest struct {
-	Resources    []*AlterConfigsResource
-	ValidateOnly bool
-}
-
-// AlterConfigsResource is an alter config resource type
-type AlterConfigsResource struct {
-	Type          ConfigResourceType
-	Name          string
-	ConfigEntries map[string]*string
-}
-
-func (a *AlterConfigsRequest) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(a.Resources)); err != nil {
-		return err
-	}
-
-	for _, r := range a.Resources {
-		if err := r.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	pe.putBool(a.ValidateOnly)
-	return nil
-}
-
-func (a *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
-	resourceCount, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	a.Resources = make([]*AlterConfigsResource, resourceCount)
-	for i := range a.Resources {
-		r := &AlterConfigsResource{}
-		err = r.decode(pd, version)
-		if err != nil {
-			return err
-		}
-		a.Resources[i] = r
-	}
-
-	validateOnly, err := pd.getBool()
-	if err != nil {
-		return err
-	}
-
-	a.ValidateOnly = validateOnly
-
-	return nil
-}
-
-func (a *AlterConfigsResource) encode(pe packetEncoder) error {
-	pe.putInt8(int8(a.Type))
-
-	if err := pe.putString(a.Name); err != nil {
-		return err
-	}
-
-	if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
-		return err
-	}
-	for configKey, configValue := range a.ConfigEntries {
-		if err := pe.putString(configKey); err != nil {
-			return err
-		}
-		if err := pe.putNullableString(configValue); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (a *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
-	t, err := pd.getInt8()
-	if err != nil {
-		return err
-	}
-	a.Type = ConfigResourceType(t)
-
-	name, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	a.Name = name
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if n > 0 {
-		a.ConfigEntries = make(map[string]*string, n)
-		for i := 0; i < n; i++ {
-			configKey, err := pd.getString()
-			if err != nil {
-				return err
-			}
-			if a.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
-				return err
-			}
-		}
-	}
-	return err
-}
-
-func (a *AlterConfigsRequest) key() int16 {
-	return 33
-}
-
-func (a *AlterConfigsRequest) version() int16 {
-	return 0
-}
-
-func (a *AlterConfigsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (a *AlterConfigsRequest) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go
deleted file mode 100644
index cfb6369..0000000
--- a/vendor/github.com/Shopify/sarama/alter_configs_response.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package sarama
-
-import "time"
-
-// AlterConfigsResponse is a response type for alter config
-type AlterConfigsResponse struct {
-	ThrottleTime time.Duration
-	Resources    []*AlterConfigsResourceResponse
-}
-
-// AlterConfigsResourceResponse is a response type for alter config resource
-type AlterConfigsResourceResponse struct {
-	ErrorCode int16
-	ErrorMsg  string
-	Type      ConfigResourceType
-	Name      string
-}
-
-func (a *AlterConfigsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
-
-	if err := pe.putArrayLength(len(a.Resources)); err != nil {
-		return err
-	}
-
-	for _, v := range a.Resources {
-		if err := v.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (a *AlterConfigsResponse) decode(pd packetDecoder, version int16) error {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	responseCount, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
-
-	for i := range a.Resources {
-		a.Resources[i] = new(AlterConfigsResourceResponse)
-
-		if err := a.Resources[i].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (a *AlterConfigsResourceResponse) encode(pe packetEncoder) error {
-	pe.putInt16(a.ErrorCode)
-	err := pe.putString(a.ErrorMsg)
-	if err != nil {
-		return nil
-	}
-	pe.putInt8(int8(a.Type))
-	err = pe.putString(a.Name)
-	if err != nil {
-		return nil
-	}
-	return nil
-}
-
-func (a *AlterConfigsResourceResponse) decode(pd packetDecoder, version int16) error {
-	errCode, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	a.ErrorCode = errCode
-
-	e, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	a.ErrorMsg = e
-
-	t, err := pd.getInt8()
-	if err != nil {
-		return err
-	}
-	a.Type = ConfigResourceType(t)
-
-	name, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	a.Name = name
-
-	return nil
-}
-
-func (a *AlterConfigsResponse) key() int16 {
-	return 32
-}
-
-func (a *AlterConfigsResponse) version() int16 {
-	return 0
-}
-
-func (a *AlterConfigsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (a *AlterConfigsResponse) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go
deleted file mode 100644
index f0a2f9d..0000000
--- a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package sarama
-
-type alterPartitionReassignmentsBlock struct {
-	replicas []int32
-}
-
-func (b *alterPartitionReassignmentsBlock) encode(pe packetEncoder) error {
-	if err := pe.putNullableCompactInt32Array(b.replicas); err != nil {
-		return err
-	}
-
-	pe.putEmptyTaggedFieldArray()
-	return nil
-}
-
-func (b *alterPartitionReassignmentsBlock) decode(pd packetDecoder) (err error) {
-	if b.replicas, err = pd.getCompactInt32Array(); err != nil {
-		return err
-	}
-	return nil
-}
-
-type AlterPartitionReassignmentsRequest struct {
-	TimeoutMs int32
-	blocks    map[string]map[int32]*alterPartitionReassignmentsBlock
-	Version   int16
-}
-
-func (r *AlterPartitionReassignmentsRequest) encode(pe packetEncoder) error {
-	pe.putInt32(r.TimeoutMs)
-
-	pe.putCompactArrayLength(len(r.blocks))
-
-	for topic, partitions := range r.blocks {
-		if err := pe.putCompactString(topic); err != nil {
-			return err
-		}
-		pe.putCompactArrayLength(len(partitions))
-		for partition, block := range partitions {
-			pe.putInt32(partition)
-			if err := block.encode(pe); err != nil {
-				return err
-			}
-		}
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	pe.putEmptyTaggedFieldArray()
-
-	return nil
-}
-
-func (r *AlterPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if r.TimeoutMs, err = pd.getInt32(); err != nil {
-		return err
-	}
-
-	topicCount, err := pd.getCompactArrayLength()
-	if err != nil {
-		return err
-	}
-	if topicCount > 0 {
-		r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
-		for i := 0; i < topicCount; i++ {
-			topic, err := pd.getCompactString()
-			if err != nil {
-				return err
-			}
-			partitionCount, err := pd.getCompactArrayLength()
-			if err != nil {
-				return err
-			}
-			r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
-			for j := 0; j < partitionCount; j++ {
-				partition, err := pd.getInt32()
-				if err != nil {
-					return err
-				}
-				block := &alterPartitionReassignmentsBlock{}
-				if err := block.decode(pd); err != nil {
-					return err
-				}
-				r.blocks[topic][partition] = block
-
-				if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-					return err
-				}
-			}
-			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-				return err
-			}
-		}
-	}
-
-	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-		return err
-	}
-
-	return
-}
-
-func (r *AlterPartitionReassignmentsRequest) key() int16 {
-	return 45
-}
-
-func (r *AlterPartitionReassignmentsRequest) version() int16 {
-	return r.Version
-}
-
-func (r *AlterPartitionReassignmentsRequest) headerVersion() int16 {
-	return 2
-}
-
-func (r *AlterPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
-	return V2_4_0_0
-}
-
-func (r *AlterPartitionReassignmentsRequest) AddBlock(topic string, partitionID int32, replicas []int32) {
-	if r.blocks == nil {
-		r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
-	}
-
-	if r.blocks[topic] == nil {
-		r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
-	}
-
-	r.blocks[topic][partitionID] = &alterPartitionReassignmentsBlock{replicas}
-}
diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go
deleted file mode 100644
index b3f9a15..0000000
--- a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package sarama
-
-type alterPartitionReassignmentsErrorBlock struct {
-	errorCode    KError
-	errorMessage *string
-}
-
-func (b *alterPartitionReassignmentsErrorBlock) encode(pe packetEncoder) error {
-	pe.putInt16(int16(b.errorCode))
-	if err := pe.putNullableCompactString(b.errorMessage); err != nil {
-		return err
-	}
-	pe.putEmptyTaggedFieldArray()
-
-	return nil
-}
-
-func (b *alterPartitionReassignmentsErrorBlock) decode(pd packetDecoder) (err error) {
-	errorCode, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	b.errorCode = KError(errorCode)
-	b.errorMessage, err = pd.getCompactNullableString()
-
-	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-		return err
-	}
-	return err
-}
-
-type AlterPartitionReassignmentsResponse struct {
-	Version        int16
-	ThrottleTimeMs int32
-	ErrorCode      KError
-	ErrorMessage   *string
-	Errors         map[string]map[int32]*alterPartitionReassignmentsErrorBlock
-}
-
-func (r *AlterPartitionReassignmentsResponse) AddError(topic string, partition int32, kerror KError, message *string) {
-	if r.Errors == nil {
-		r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock)
-	}
-	partitions := r.Errors[topic]
-	if partitions == nil {
-		partitions = make(map[int32]*alterPartitionReassignmentsErrorBlock)
-		r.Errors[topic] = partitions
-	}
-
-	partitions[partition] = &alterPartitionReassignmentsErrorBlock{errorCode: kerror, errorMessage: message}
-}
-
-func (r *AlterPartitionReassignmentsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(r.ThrottleTimeMs)
-	pe.putInt16(int16(r.ErrorCode))
-	if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
-		return err
-	}
-
-	pe.putCompactArrayLength(len(r.Errors))
-	for topic, partitions := range r.Errors {
-		if err := pe.putCompactString(topic); err != nil {
-			return err
-		}
-		pe.putCompactArrayLength(len(partitions))
-		for partition, block := range partitions {
-			pe.putInt32(partition)
-
-			if err := block.encode(pe); err != nil {
-				return err
-			}
-		}
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	pe.putEmptyTaggedFieldArray()
-	return nil
-}
-
-func (r *AlterPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
-		return err
-	}
-
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	r.ErrorCode = KError(kerr)
-
-	if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
-		return err
-	}
-
-	numTopics, err := pd.getCompactArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if numTopics > 0 {
-		r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock, numTopics)
-		for i := 0; i < numTopics; i++ {
-			topic, err := pd.getCompactString()
-			if err != nil {
-				return err
-			}
-
-			ongoingPartitionReassignments, err := pd.getCompactArrayLength()
-			if err != nil {
-				return err
-			}
-
-			r.Errors[topic] = make(map[int32]*alterPartitionReassignmentsErrorBlock, ongoingPartitionReassignments)
-
-			for j := 0; j < ongoingPartitionReassignments; j++ {
-				partition, err := pd.getInt32()
-				if err != nil {
-					return err
-				}
-				block := &alterPartitionReassignmentsErrorBlock{}
-				if err := block.decode(pd); err != nil {
-					return err
-				}
-
-				r.Errors[topic][partition] = block
-			}
-			if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
-				return err
-			}
-		}
-	}
-
-	if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (r *AlterPartitionReassignmentsResponse) key() int16 {
-	return 45
-}
-
-func (r *AlterPartitionReassignmentsResponse) version() int16 {
-	return r.Version
-}
-
-func (r *AlterPartitionReassignmentsResponse) headerVersion() int16 {
-	return 1
-}
-
-func (r *AlterPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
-	return V2_4_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go
deleted file mode 100644
index 0530d89..0000000
--- a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package sarama
-
-type AlterUserScramCredentialsRequest struct {
-	Version int16
-
-	// Deletions represent list of SCRAM credentials to remove
-	Deletions []AlterUserScramCredentialsDelete
-
-	// Upsertions represent list of SCRAM credentials to update/insert
-	Upsertions []AlterUserScramCredentialsUpsert
-}
-
-type AlterUserScramCredentialsDelete struct {
-	Name      string
-	Mechanism ScramMechanismType
-}
-
-type AlterUserScramCredentialsUpsert struct {
-	Name           string
-	Mechanism      ScramMechanismType
-	Iterations     int32
-	Salt           []byte
-	saltedPassword []byte
-
-	// This field is never transmitted over the wire
-	// @see: https://tools.ietf.org/html/rfc5802
-	Password []byte
-}
-
-func (r *AlterUserScramCredentialsRequest) encode(pe packetEncoder) error {
-	pe.putCompactArrayLength(len(r.Deletions))
-	for _, d := range r.Deletions {
-		if err := pe.putCompactString(d.Name); err != nil {
-			return err
-		}
-		pe.putInt8(int8(d.Mechanism))
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	pe.putCompactArrayLength(len(r.Upsertions))
-	for _, u := range r.Upsertions {
-		if err := pe.putCompactString(u.Name); err != nil {
-			return err
-		}
-		pe.putInt8(int8(u.Mechanism))
-		pe.putInt32(u.Iterations)
-
-		if err := pe.putCompactBytes(u.Salt); err != nil {
-			return err
-		}
-
-		// do not transmit the password over the wire
-		formatter := scramFormatter{mechanism: u.Mechanism}
-		salted, err := formatter.saltedPassword(u.Password, u.Salt, int(u.Iterations))
-		if err != nil {
-			return err
-		}
-
-		if err := pe.putCompactBytes(salted); err != nil {
-			return err
-		}
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	pe.putEmptyTaggedFieldArray()
-	return nil
-}
-
-func (r *AlterUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error {
-	numDeletions, err := pd.getCompactArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Deletions = make([]AlterUserScramCredentialsDelete, numDeletions)
-	for i := 0; i < numDeletions; i++ {
-		r.Deletions[i] = AlterUserScramCredentialsDelete{}
-		if r.Deletions[i].Name, err = pd.getCompactString(); err != nil {
-			return err
-		}
-		mechanism, err := pd.getInt8()
-		if err != nil {
-			return err
-		}
-		r.Deletions[i].Mechanism = ScramMechanismType(mechanism)
-		if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
-			return err
-		}
-	}
-
-	numUpsertions, err := pd.getCompactArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Upsertions = make([]AlterUserScramCredentialsUpsert, numUpsertions)
-	for i := 0; i < numUpsertions; i++ {
-		r.Upsertions[i] = AlterUserScramCredentialsUpsert{}
-		if r.Upsertions[i].Name, err = pd.getCompactString(); err != nil {
-			return err
-		}
-		mechanism, err := pd.getInt8()
-		if err != nil {
-			return err
-		}
-
-		r.Upsertions[i].Mechanism = ScramMechanismType(mechanism)
-		if r.Upsertions[i].Iterations, err = pd.getInt32(); err != nil {
-			return err
-		}
-		if r.Upsertions[i].Salt, err = pd.getCompactBytes(); err != nil {
-			return err
-		}
-		if r.Upsertions[i].saltedPassword, err = pd.getCompactBytes(); err != nil {
-			return err
-		}
-		if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
-			return err
-		}
-	}
-
-	if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (r *AlterUserScramCredentialsRequest) key() int16 {
-	return 51
-}
-
-func (r *AlterUserScramCredentialsRequest) version() int16 {
-	return r.Version
-}
-
-func (r *AlterUserScramCredentialsRequest) headerVersion() int16 {
-	return 2
-}
-
-func (r *AlterUserScramCredentialsRequest) requiredVersion() KafkaVersion {
-	return V2_7_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go
deleted file mode 100644
index 31e167b..0000000
--- a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package sarama
-
-import "time"
-
-type AlterUserScramCredentialsResponse struct {
-	Version int16
-
-	ThrottleTime time.Duration
-
-	Results []*AlterUserScramCredentialsResult
-}
-
-type AlterUserScramCredentialsResult struct {
-	User string
-
-	ErrorCode    KError
-	ErrorMessage *string
-}
-
-func (r *AlterUserScramCredentialsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
-	pe.putCompactArrayLength(len(r.Results))
-
-	for _, u := range r.Results {
-		if err := pe.putCompactString(u.User); err != nil {
-			return err
-		}
-		pe.putInt16(int16(u.ErrorCode))
-		if err := pe.putNullableCompactString(u.ErrorMessage); err != nil {
-			return err
-		}
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	pe.putEmptyTaggedFieldArray()
-	return nil
-}
-
-func (r *AlterUserScramCredentialsResponse) decode(pd packetDecoder, version int16) error {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	numResults, err := pd.getCompactArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if numResults > 0 {
-		r.Results = make([]*AlterUserScramCredentialsResult, numResults)
-		for i := 0; i < numResults; i++ {
-			r.Results[i] = &AlterUserScramCredentialsResult{}
-			if r.Results[i].User, err = pd.getCompactString(); err != nil {
-				return err
-			}
-
-			kerr, err := pd.getInt16()
-			if err != nil {
-				return err
-			}
-
-			r.Results[i].ErrorCode = KError(kerr)
-			if r.Results[i].ErrorMessage, err = pd.getCompactNullableString(); err != nil {
-				return err
-			}
-			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-				return err
-			}
-		}
-	}
-
-	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (r *AlterUserScramCredentialsResponse) key() int16 {
-	return 51
-}
-
-func (r *AlterUserScramCredentialsResponse) version() int16 {
-	return r.Version
-}
-
-func (r *AlterUserScramCredentialsResponse) headerVersion() int16 {
-	return 2
-}
-
-func (r *AlterUserScramCredentialsResponse) requiredVersion() KafkaVersion {
-	return V2_7_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go
deleted file mode 100644
index bee92c0..0000000
--- a/vendor/github.com/Shopify/sarama/api_versions_request.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package sarama
-
-// ApiVersionsRequest ...
-type ApiVersionsRequest struct{}
-
-func (a *ApiVersionsRequest) encode(pe packetEncoder) error {
-	return nil
-}
-
-func (a *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
-	return nil
-}
-
-func (a *ApiVersionsRequest) key() int16 {
-	return 18
-}
-
-func (a *ApiVersionsRequest) version() int16 {
-	return 0
-}
-
-func (a *ApiVersionsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (a *ApiVersionsRequest) requiredVersion() KafkaVersion {
-	return V0_10_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go
deleted file mode 100644
index 0e72e39..0000000
--- a/vendor/github.com/Shopify/sarama/api_versions_response.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package sarama
-
-// ApiVersionsResponseBlock is an api version response block type
-type ApiVersionsResponseBlock struct {
-	ApiKey     int16
-	MinVersion int16
-	MaxVersion int16
-}
-
-func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
-	pe.putInt16(b.ApiKey)
-	pe.putInt16(b.MinVersion)
-	pe.putInt16(b.MaxVersion)
-	return nil
-}
-
-func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
-	var err error
-
-	if b.ApiKey, err = pd.getInt16(); err != nil {
-		return err
-	}
-
-	if b.MinVersion, err = pd.getInt16(); err != nil {
-		return err
-	}
-
-	if b.MaxVersion, err = pd.getInt16(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// ApiVersionsResponse is an api version response type
-type ApiVersionsResponse struct {
-	Err         KError
-	ApiVersions []*ApiVersionsResponseBlock
-}
-
-func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
-	pe.putInt16(int16(r.Err))
-	if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
-		return err
-	}
-	for _, apiVersion := range r.ApiVersions {
-		if err := apiVersion.encode(pe); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	r.Err = KError(kerr)
-
-	numBlocks, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
-	for i := 0; i < numBlocks; i++ {
-		block := new(ApiVersionsResponseBlock)
-		if err := block.decode(pd); err != nil {
-			return err
-		}
-		r.ApiVersions[i] = block
-	}
-
-	return nil
-}
-
-func (r *ApiVersionsResponse) key() int16 {
-	return 18
-}
-
-func (r *ApiVersionsResponse) version() int16 {
-	return 0
-}
-
-func (a *ApiVersionsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
-	return V0_10_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go
deleted file mode 100644
index 5911f7b..0000000
--- a/vendor/github.com/Shopify/sarama/async_producer.go
+++ /dev/null
@@ -1,1161 +0,0 @@
-package sarama
-
-import (
-	"encoding/binary"
-	"fmt"
-	"sync"
-	"time"
-
-	"github.com/eapache/go-resiliency/breaker"
-	"github.com/eapache/queue"
-)
-
-// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
-// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
-// and parses responses for errors. You must read from the Errors() channel or the
-// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
-// leaks: it will not be garbage-collected automatically when it passes out of
-// scope.
-type AsyncProducer interface {
-
-	// AsyncClose triggers a shutdown of the producer. The shutdown has completed
-	// when both the Errors and Successes channels have been closed. When calling
-	// AsyncClose, you *must* continue to read from those channels in order to
-	// drain the results of any messages in flight.
-	AsyncClose()
-
-	// Close shuts down the producer and waits for any buffered messages to be
-	// flushed. You must call this function before a producer object passes out of
-	// scope, as it may otherwise leak memory. You must call this before calling
-	// Close on the underlying client.
-	Close() error
-
-	// Input is the input channel for the user to write messages to that they
-	// wish to send.
-	Input() chan<- *ProducerMessage
-
-	// Successes is the success output channel back to the user when Return.Successes is
-	// enabled. If Return.Successes is true, you MUST read from this channel or the
-	// Producer will deadlock. It is suggested that you send and read messages
-	// together in a single select statement.
-	Successes() <-chan *ProducerMessage
-
-	// Errors is the error output channel back to the user. You MUST read from this
-	// channel or the Producer will deadlock when the channel is full. Alternatively,
-	// you can set Producer.Return.Errors in your config to false, which prevents
-	// errors to be returned.
-	Errors() <-chan *ProducerError
-}
-
-// transactionManager keeps the state necessary to ensure idempotent production
-type transactionManager struct {
-	producerID      int64
-	producerEpoch   int16
-	sequenceNumbers map[string]int32
-	mutex           sync.Mutex
-}
-
-const (
-	noProducerID    = -1
-	noProducerEpoch = -1
-)
-
-func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) {
-	key := fmt.Sprintf("%s-%d", topic, partition)
-	t.mutex.Lock()
-	defer t.mutex.Unlock()
-	sequence := t.sequenceNumbers[key]
-	t.sequenceNumbers[key] = sequence + 1
-	return sequence, t.producerEpoch
-}
-
-func (t *transactionManager) bumpEpoch() {
-	t.mutex.Lock()
-	defer t.mutex.Unlock()
-	t.producerEpoch++
-	for k := range t.sequenceNumbers {
-		t.sequenceNumbers[k] = 0
-	}
-}
-
-func (t *transactionManager) getProducerID() (int64, int16) {
-	t.mutex.Lock()
-	defer t.mutex.Unlock()
-	return t.producerID, t.producerEpoch
-}
-
-func newTransactionManager(conf *Config, client Client) (*transactionManager, error) {
-	txnmgr := &transactionManager{
-		producerID:    noProducerID,
-		producerEpoch: noProducerEpoch,
-	}
-
-	if conf.Producer.Idempotent {
-		initProducerIDResponse, err := client.InitProducerID()
-		if err != nil {
-			return nil, err
-		}
-		txnmgr.producerID = initProducerIDResponse.ProducerID
-		txnmgr.producerEpoch = initProducerIDResponse.ProducerEpoch
-		txnmgr.sequenceNumbers = make(map[string]int32)
-		txnmgr.mutex = sync.Mutex{}
-
-		Logger.Printf("Obtained a ProducerId: %d and ProducerEpoch: %d\n", txnmgr.producerID, txnmgr.producerEpoch)
-	}
-
-	return txnmgr, nil
-}
-
-type asyncProducer struct {
-	client Client
-	conf   *Config
-
-	errors                    chan *ProducerError
-	input, successes, retries chan *ProducerMessage
-	inFlight                  sync.WaitGroup
-
-	brokers    map[*Broker]*brokerProducer
-	brokerRefs map[*brokerProducer]int
-	brokerLock sync.Mutex
-
-	txnmgr *transactionManager
-}
-
-// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
-func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
-	client, err := NewClient(addrs, conf)
-	if err != nil {
-		return nil, err
-	}
-	return newAsyncProducer(client)
-}
-
-// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
-// necessary to call Close() on the underlying client when shutting down this producer.
-func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
-	// For clients passed in by the client, ensure we don't
-	// call Close() on it.
-	cli := &nopCloserClient{client}
-	return newAsyncProducer(cli)
-}
-
-func newAsyncProducer(client Client) (AsyncProducer, error) {
-	// Check that we are not dealing with a closed Client before processing any other arguments
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	txnmgr, err := newTransactionManager(client.Config(), client)
-	if err != nil {
-		return nil, err
-	}
-
-	p := &asyncProducer{
-		client:     client,
-		conf:       client.Config(),
-		errors:     make(chan *ProducerError),
-		input:      make(chan *ProducerMessage),
-		successes:  make(chan *ProducerMessage),
-		retries:    make(chan *ProducerMessage),
-		brokers:    make(map[*Broker]*brokerProducer),
-		brokerRefs: make(map[*brokerProducer]int),
-		txnmgr:     txnmgr,
-	}
-
-	// launch our singleton dispatchers
-	go withRecover(p.dispatcher)
-	go withRecover(p.retryHandler)
-
-	return p, nil
-}
-
-type flagSet int8
-
-const (
-	syn      flagSet = 1 << iota // first message from partitionProducer to brokerProducer
-	fin                          // final message from partitionProducer to brokerProducer and back
-	shutdown                     // start the shutdown process
-)
-
-// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
-type ProducerMessage struct {
-	Topic string // The Kafka topic for this message.
-	// The partitioning key for this message. Pre-existing Encoders include
-	// StringEncoder and ByteEncoder.
-	Key Encoder
-	// The actual message to store in Kafka. Pre-existing Encoders include
-	// StringEncoder and ByteEncoder.
-	Value Encoder
-
-	// The headers are key-value pairs that are transparently passed
-	// by Kafka between producers and consumers.
-	Headers []RecordHeader
-
-	// This field is used to hold arbitrary data you wish to include so it
-	// will be available when receiving on the Successes and Errors channels.
-	// Sarama completely ignores this field and is only to be used for
-	// pass-through data.
-	Metadata interface{}
-
-	// Below this point are filled in by the producer as the message is processed
-
-	// Offset is the offset of the message stored on the broker. This is only
-	// guaranteed to be defined if the message was successfully delivered and
-	// RequiredAcks is not NoResponse.
-	Offset int64
-	// Partition is the partition that the message was sent to. This is only
-	// guaranteed to be defined if the message was successfully delivered.
-	Partition int32
-	// Timestamp can vary in behaviour depending on broker configuration, being
-	// in either one of the CreateTime or LogAppendTime modes (default CreateTime),
-	// and requiring version at least 0.10.0.
-	//
-	// When configured to CreateTime, the timestamp is specified by the producer
-	// either by explicitly setting this field, or when the message is added
-	// to a produce set.
-	//
-	// When configured to LogAppendTime, the timestamp assigned to the message
-	// by the broker. This is only guaranteed to be defined if the message was
-	// successfully delivered and RequiredAcks is not NoResponse.
-	Timestamp time.Time
-
-	retries        int
-	flags          flagSet
-	expectation    chan *ProducerError
-	sequenceNumber int32
-	producerEpoch  int16
-	hasSequence    bool
-}
-
-const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
-
-func (m *ProducerMessage) byteSize(version int) int {
-	var size int
-	if version >= 2 {
-		size = maximumRecordOverhead
-		for _, h := range m.Headers {
-			size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32
-		}
-	} else {
-		size = producerMessageOverhead
-	}
-	if m.Key != nil {
-		size += m.Key.Length()
-	}
-	if m.Value != nil {
-		size += m.Value.Length()
-	}
-	return size
-}
-
-func (m *ProducerMessage) clear() {
-	m.flags = 0
-	m.retries = 0
-	m.sequenceNumber = 0
-	m.producerEpoch = 0
-	m.hasSequence = false
-}
-
-// ProducerError is the type of error generated when the producer fails to deliver a message.
-// It contains the original ProducerMessage as well as the actual error value.
-type ProducerError struct {
-	Msg *ProducerMessage
-	Err error
-}
-
-func (pe ProducerError) Error() string {
-	return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
-}
-
-func (pe ProducerError) Unwrap() error {
-	return pe.Err
-}
-
-// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
-// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
-// when closing a producer.
-type ProducerErrors []*ProducerError
-
-func (pe ProducerErrors) Error() string {
-	return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
-}
-
-func (p *asyncProducer) Errors() <-chan *ProducerError {
-	return p.errors
-}
-
-func (p *asyncProducer) Successes() <-chan *ProducerMessage {
-	return p.successes
-}
-
-func (p *asyncProducer) Input() chan<- *ProducerMessage {
-	return p.input
-}
-
-func (p *asyncProducer) Close() error {
-	p.AsyncClose()
-
-	if p.conf.Producer.Return.Successes {
-		go withRecover(func() {
-			for range p.successes {
-			}
-		})
-	}
-
-	var errors ProducerErrors
-	if p.conf.Producer.Return.Errors {
-		for event := range p.errors {
-			errors = append(errors, event)
-		}
-	} else {
-		<-p.errors
-	}
-
-	if len(errors) > 0 {
-		return errors
-	}
-	return nil
-}
-
-func (p *asyncProducer) AsyncClose() {
-	go withRecover(p.shutdown)
-}
-
-// singleton
-// dispatches messages by topic
-func (p *asyncProducer) dispatcher() {
-	handlers := make(map[string]chan<- *ProducerMessage)
-	shuttingDown := false
-
-	for msg := range p.input {
-		if msg == nil {
-			Logger.Println("Something tried to send a nil message, it was ignored.")
-			continue
-		}
-
-		if msg.flags&shutdown != 0 {
-			shuttingDown = true
-			p.inFlight.Done()
-			continue
-		} else if msg.retries == 0 {
-			if shuttingDown {
-				// we can't just call returnError here because that decrements the wait group,
-				// which hasn't been incremented yet for this message, and shouldn't be
-				pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
-				if p.conf.Producer.Return.Errors {
-					p.errors <- pErr
-				} else {
-					Logger.Println(pErr)
-				}
-				continue
-			}
-			p.inFlight.Add(1)
-		}
-
-		for _, interceptor := range p.conf.Producer.Interceptors {
-			msg.safelyApplyInterceptor(interceptor)
-		}
-
-		version := 1
-		if p.conf.Version.IsAtLeast(V0_11_0_0) {
-			version = 2
-		} else if msg.Headers != nil {
-			p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11"))
-			continue
-		}
-		if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes {
-			p.returnError(msg, ErrMessageSizeTooLarge)
-			continue
-		}
-
-		handler := handlers[msg.Topic]
-		if handler == nil {
-			handler = p.newTopicProducer(msg.Topic)
-			handlers[msg.Topic] = handler
-		}
-
-		handler <- msg
-	}
-
-	for _, handler := range handlers {
-		close(handler)
-	}
-}
-
-// one per topic
-// partitions messages, then dispatches them by partition
-type topicProducer struct {
-	parent *asyncProducer
-	topic  string
-	input  <-chan *ProducerMessage
-
-	breaker     *breaker.Breaker
-	handlers    map[int32]chan<- *ProducerMessage
-	partitioner Partitioner
-}
-
-func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
-	input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
-	tp := &topicProducer{
-		parent:      p,
-		topic:       topic,
-		input:       input,
-		breaker:     breaker.New(3, 1, 10*time.Second),
-		handlers:    make(map[int32]chan<- *ProducerMessage),
-		partitioner: p.conf.Producer.Partitioner(topic),
-	}
-	go withRecover(tp.dispatch)
-	return input
-}
-
-func (tp *topicProducer) dispatch() {
-	for msg := range tp.input {
-		if msg.retries == 0 {
-			if err := tp.partitionMessage(msg); err != nil {
-				tp.parent.returnError(msg, err)
-				continue
-			}
-		}
-
-		handler := tp.handlers[msg.Partition]
-		if handler == nil {
-			handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
-			tp.handlers[msg.Partition] = handler
-		}
-
-		handler <- msg
-	}
-
-	for _, handler := range tp.handlers {
-		close(handler)
-	}
-}
-
-func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
-	var partitions []int32
-
-	err := tp.breaker.Run(func() (err error) {
-		requiresConsistency := false
-		if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok {
-			requiresConsistency = ep.MessageRequiresConsistency(msg)
-		} else {
-			requiresConsistency = tp.partitioner.RequiresConsistency()
-		}
-
-		if requiresConsistency {
-			partitions, err = tp.parent.client.Partitions(msg.Topic)
-		} else {
-			partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
-		}
-		return
-	})
-	if err != nil {
-		return err
-	}
-
-	numPartitions := int32(len(partitions))
-
-	if numPartitions == 0 {
-		return ErrLeaderNotAvailable
-	}
-
-	choice, err := tp.partitioner.Partition(msg, numPartitions)
-
-	if err != nil {
-		return err
-	} else if choice < 0 || choice >= numPartitions {
-		return ErrInvalidPartition
-	}
-
-	msg.Partition = partitions[choice]
-
-	return nil
-}
-
-// one per partition per topic
-// dispatches messages to the appropriate broker
-// also responsible for maintaining message order during retries
-type partitionProducer struct {
-	parent    *asyncProducer
-	topic     string
-	partition int32
-	input     <-chan *ProducerMessage
-
-	leader         *Broker
-	breaker        *breaker.Breaker
-	brokerProducer *brokerProducer
-
-	// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
-	// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
-	// retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
-	// therefore whether our buffer is complete and safe to flush)
-	highWatermark int
-	retryState    []partitionRetryState
-}
-
-type partitionRetryState struct {
-	buf          []*ProducerMessage
-	expectChaser bool
-}
-
-func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
-	input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
-	pp := &partitionProducer{
-		parent:    p,
-		topic:     topic,
-		partition: partition,
-		input:     input,
-
-		breaker:    breaker.New(3, 1, 10*time.Second),
-		retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
-	}
-	go withRecover(pp.dispatch)
-	return input
-}
-
-func (pp *partitionProducer) backoff(retries int) {
-	var backoff time.Duration
-	if pp.parent.conf.Producer.Retry.BackoffFunc != nil {
-		maxRetries := pp.parent.conf.Producer.Retry.Max
-		backoff = pp.parent.conf.Producer.Retry.BackoffFunc(retries, maxRetries)
-	} else {
-		backoff = pp.parent.conf.Producer.Retry.Backoff
-	}
-	if backoff > 0 {
-		time.Sleep(backoff)
-	}
-}
-
-func (pp *partitionProducer) dispatch() {
-	// try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
-	// on the first message
-	pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
-	if pp.leader != nil {
-		pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
-		pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
-		pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
-	}
-
-	defer func() {
-		if pp.brokerProducer != nil {
-			pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
-		}
-	}()
-
-	for msg := range pp.input {
-		if pp.brokerProducer != nil && pp.brokerProducer.abandoned != nil {
-			select {
-			case <-pp.brokerProducer.abandoned:
-				// a message on the abandoned channel means that our current broker selection is out of date
-				Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
-				pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
-				pp.brokerProducer = nil
-				time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
-			default:
-				// producer connection is still open.
-			}
-		}
-
-		if msg.retries > pp.highWatermark {
-			// a new, higher, retry level; handle it and then back off
-			pp.newHighWatermark(msg.retries)
-			pp.backoff(msg.retries)
-		} else if pp.highWatermark > 0 {
-			// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
-			if msg.retries < pp.highWatermark {
-				// in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
-				if msg.flags&fin == fin {
-					pp.retryState[msg.retries].expectChaser = false
-					pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
-				} else {
-					pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
-				}
-				continue
-			} else if msg.flags&fin == fin {
-				// this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
-				// meaning this retry level is done and we can go down (at least) one level and flush that
-				pp.retryState[pp.highWatermark].expectChaser = false
-				pp.flushRetryBuffers()
-				pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
-				continue
-			}
-		}
-
-		// if we made it this far then the current msg contains real data, and can be sent to the next goroutine
-		// without breaking any of our ordering guarantees
-
-		if pp.brokerProducer == nil {
-			if err := pp.updateLeader(); err != nil {
-				pp.parent.returnError(msg, err)
-				pp.backoff(msg.retries)
-				continue
-			}
-			Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
-		}
-
-		// Now that we know we have a broker to actually try and send this message to, generate the sequence
-		// number for it.
-		// All messages being retried (sent or not) have already had their retry count updated
-		// Also, ignore "special" syn/fin messages used to sync the brokerProducer and the topicProducer.
-		if pp.parent.conf.Producer.Idempotent && msg.retries == 0 && msg.flags == 0 {
-			msg.sequenceNumber, msg.producerEpoch = pp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition)
-			msg.hasSequence = true
-		}
-
-		pp.brokerProducer.input <- msg
-	}
-}
-
-func (pp *partitionProducer) newHighWatermark(hwm int) {
-	Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
-	pp.highWatermark = hwm
-
-	// send off a fin so that we know when everything "in between" has made it
-	// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
-	pp.retryState[pp.highWatermark].expectChaser = true
-	pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
-	pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
-
-	// a new HWM means that our current broker selection is out of date
-	Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
-	pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
-	pp.brokerProducer = nil
-}
-
-func (pp *partitionProducer) flushRetryBuffers() {
-	Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
-	for {
-		pp.highWatermark--
-
-		if pp.brokerProducer == nil {
-			if err := pp.updateLeader(); err != nil {
-				pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
-				goto flushDone
-			}
-			Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
-		}
-
-		for _, msg := range pp.retryState[pp.highWatermark].buf {
-			pp.brokerProducer.input <- msg
-		}
-
-	flushDone:
-		pp.retryState[pp.highWatermark].buf = nil
-		if pp.retryState[pp.highWatermark].expectChaser {
-			Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
-			break
-		} else if pp.highWatermark == 0 {
-			Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
-			break
-		}
-	}
-}
-
-func (pp *partitionProducer) updateLeader() error {
-	return pp.breaker.Run(func() (err error) {
-		if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
-			return err
-		}
-
-		if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
-			return err
-		}
-
-		pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
-		pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
-		pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
-
-		return nil
-	})
-}
-
-// one per broker; also constructs an associated flusher
-func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer {
-	var (
-		input     = make(chan *ProducerMessage)
-		bridge    = make(chan *produceSet)
-		responses = make(chan *brokerProducerResponse)
-	)
-
-	bp := &brokerProducer{
-		parent:         p,
-		broker:         broker,
-		input:          input,
-		output:         bridge,
-		responses:      responses,
-		stopchan:       make(chan struct{}),
-		buffer:         newProduceSet(p),
-		currentRetries: make(map[string]map[int32]error),
-	}
-	go withRecover(bp.run)
-
-	// minimal bridge to make the network response `select`able
-	go withRecover(func() {
-		for set := range bridge {
-			request := set.buildRequest()
-
-			response, err := broker.Produce(request)
-
-			responses <- &brokerProducerResponse{
-				set: set,
-				err: err,
-				res: response,
-			}
-		}
-		close(responses)
-	})
-
-	if p.conf.Producer.Retry.Max <= 0 {
-		bp.abandoned = make(chan struct{})
-	}
-
-	return bp
-}
-
-type brokerProducerResponse struct {
-	set *produceSet
-	err error
-	res *ProduceResponse
-}
-
-// groups messages together into appropriately-sized batches for sending to the broker
-// handles state related to retries etc
-type brokerProducer struct {
-	parent *asyncProducer
-	broker *Broker
-
-	input     chan *ProducerMessage
-	output    chan<- *produceSet
-	responses <-chan *brokerProducerResponse
-	abandoned chan struct{}
-	stopchan  chan struct{}
-
-	buffer     *produceSet
-	timer      <-chan time.Time
-	timerFired bool
-
-	closing        error
-	currentRetries map[string]map[int32]error
-}
-
-func (bp *brokerProducer) run() {
-	var output chan<- *produceSet
-	Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
-
-	for {
-		select {
-		case msg, ok := <-bp.input:
-			if !ok {
-				Logger.Printf("producer/broker/%d input chan closed\n", bp.broker.ID())
-				bp.shutdown()
-				return
-			}
-
-			if msg == nil {
-				continue
-			}
-
-			if msg.flags&syn == syn {
-				Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
-					bp.broker.ID(), msg.Topic, msg.Partition)
-				if bp.currentRetries[msg.Topic] == nil {
-					bp.currentRetries[msg.Topic] = make(map[int32]error)
-				}
-				bp.currentRetries[msg.Topic][msg.Partition] = nil
-				bp.parent.inFlight.Done()
-				continue
-			}
-
-			if reason := bp.needsRetry(msg); reason != nil {
-				bp.parent.retryMessage(msg, reason)
-
-				if bp.closing == nil && msg.flags&fin == fin {
-					// we were retrying this partition but we can start processing again
-					delete(bp.currentRetries[msg.Topic], msg.Partition)
-					Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
-						bp.broker.ID(), msg.Topic, msg.Partition)
-				}
-
-				continue
-			}
-
-			if bp.buffer.wouldOverflow(msg) {
-				Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
-				if err := bp.waitForSpace(msg, false); err != nil {
-					bp.parent.retryMessage(msg, err)
-					continue
-				}
-			}
-
-			if bp.parent.txnmgr.producerID != noProducerID && bp.buffer.producerEpoch != msg.producerEpoch {
-				// The epoch was reset, need to roll the buffer over
-				Logger.Printf("producer/broker/%d detected epoch rollover, waiting for new buffer\n", bp.broker.ID())
-				if err := bp.waitForSpace(msg, true); err != nil {
-					bp.parent.retryMessage(msg, err)
-					continue
-				}
-			}
-			if err := bp.buffer.add(msg); err != nil {
-				bp.parent.returnError(msg, err)
-				continue
-			}
-
-			if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
-				bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
-			}
-		case <-bp.timer:
-			bp.timerFired = true
-		case output <- bp.buffer:
-			bp.rollOver()
-		case response, ok := <-bp.responses:
-			if ok {
-				bp.handleResponse(response)
-			}
-		case <-bp.stopchan:
-			Logger.Printf(
-				"producer/broker/%d run loop asked to stop\n", bp.broker.ID())
-			return
-		}
-
-		if bp.timerFired || bp.buffer.readyToFlush() {
-			output = bp.output
-		} else {
-			output = nil
-		}
-	}
-}
-
-func (bp *brokerProducer) shutdown() {
-	for !bp.buffer.empty() {
-		select {
-		case response := <-bp.responses:
-			bp.handleResponse(response)
-		case bp.output <- bp.buffer:
-			bp.rollOver()
-		}
-	}
-	close(bp.output)
-	for response := range bp.responses {
-		bp.handleResponse(response)
-	}
-	close(bp.stopchan)
-	Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
-}
-
-func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
-	if bp.closing != nil {
-		return bp.closing
-	}
-
-	return bp.currentRetries[msg.Topic][msg.Partition]
-}
-
-func (bp *brokerProducer) waitForSpace(msg *ProducerMessage, forceRollover bool) error {
-	for {
-		select {
-		case response := <-bp.responses:
-			bp.handleResponse(response)
-			// handling a response can change our state, so re-check some things
-			if reason := bp.needsRetry(msg); reason != nil {
-				return reason
-			} else if !bp.buffer.wouldOverflow(msg) && !forceRollover {
-				return nil
-			}
-		case bp.output <- bp.buffer:
-			bp.rollOver()
-			return nil
-		}
-	}
-}
-
-func (bp *brokerProducer) rollOver() {
-	bp.timer = nil
-	bp.timerFired = false
-	bp.buffer = newProduceSet(bp.parent)
-}
-
-func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
-	if response.err != nil {
-		bp.handleError(response.set, response.err)
-	} else {
-		bp.handleSuccess(response.set, response.res)
-	}
-
-	if bp.buffer.empty() {
-		bp.rollOver() // this can happen if the response invalidated our buffer
-	}
-}
-
-func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
-	// we iterate through the blocks in the request set, not the response, so that we notice
-	// if the response is missing a block completely
-	var retryTopics []string
-	sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
-		if response == nil {
-			// this only happens when RequiredAcks is NoResponse, so we have to assume success
-			bp.parent.returnSuccesses(pSet.msgs)
-			return
-		}
-
-		block := response.GetBlock(topic, partition)
-		if block == nil {
-			bp.parent.returnErrors(pSet.msgs, ErrIncompleteResponse)
-			return
-		}
-
-		switch block.Err {
-		// Success
-		case ErrNoError:
-			if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
-				for _, msg := range pSet.msgs {
-					msg.Timestamp = block.Timestamp
-				}
-			}
-			for i, msg := range pSet.msgs {
-				msg.Offset = block.Offset + int64(i)
-			}
-			bp.parent.returnSuccesses(pSet.msgs)
-		// Duplicate
-		case ErrDuplicateSequenceNumber:
-			bp.parent.returnSuccesses(pSet.msgs)
-		// Retriable errors
-		case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
-			ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
-			if bp.parent.conf.Producer.Retry.Max <= 0 {
-				bp.parent.abandonBrokerConnection(bp.broker)
-				bp.parent.returnErrors(pSet.msgs, block.Err)
-			} else {
-				retryTopics = append(retryTopics, topic)
-			}
-		// Other non-retriable errors
-		default:
-			if bp.parent.conf.Producer.Retry.Max <= 0 {
-				bp.parent.abandonBrokerConnection(bp.broker)
-			}
-			bp.parent.returnErrors(pSet.msgs, block.Err)
-		}
-	})
-
-	if len(retryTopics) > 0 {
-		if bp.parent.conf.Producer.Idempotent {
-			err := bp.parent.client.RefreshMetadata(retryTopics...)
-			if err != nil {
-				Logger.Printf("Failed refreshing metadata because of %v\n", err)
-			}
-		}
-
-		sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
-			block := response.GetBlock(topic, partition)
-			if block == nil {
-				// handled in the previous "eachPartition" loop
-				return
-			}
-
-			switch block.Err {
-			case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
-				ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
-				Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
-					bp.broker.ID(), topic, partition, block.Err)
-				if bp.currentRetries[topic] == nil {
-					bp.currentRetries[topic] = make(map[int32]error)
-				}
-				bp.currentRetries[topic][partition] = block.Err
-				if bp.parent.conf.Producer.Idempotent {
-					go bp.parent.retryBatch(topic, partition, pSet, block.Err)
-				} else {
-					bp.parent.retryMessages(pSet.msgs, block.Err)
-				}
-				// dropping the following messages has the side effect of incrementing their retry count
-				bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
-			}
-		})
-	}
-}
-
-func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitionSet, kerr KError) {
-	Logger.Printf("Retrying batch for %v-%d because of %s\n", topic, partition, kerr)
-	produceSet := newProduceSet(p)
-	produceSet.msgs[topic] = make(map[int32]*partitionSet)
-	produceSet.msgs[topic][partition] = pSet
-	produceSet.bufferBytes += pSet.bufferBytes
-	produceSet.bufferCount += len(pSet.msgs)
-	for _, msg := range pSet.msgs {
-		if msg.retries >= p.conf.Producer.Retry.Max {
-			p.returnError(msg, kerr)
-			return
-		}
-		msg.retries++
-	}
-
-	// it's expected that a metadata refresh has been requested prior to calling retryBatch
-	leader, err := p.client.Leader(topic, partition)
-	if err != nil {
-		Logger.Printf("Failed retrying batch for %v-%d because of %v while looking up for new leader\n", topic, partition, err)
-		for _, msg := range pSet.msgs {
-			p.returnError(msg, kerr)
-		}
-		return
-	}
-	bp := p.getBrokerProducer(leader)
-	bp.output <- produceSet
-}
-
-func (bp *brokerProducer) handleError(sent *produceSet, err error) {
-	switch err.(type) {
-	case PacketEncodingError:
-		sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
-			bp.parent.returnErrors(pSet.msgs, err)
-		})
-	default:
-		Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
-		bp.parent.abandonBrokerConnection(bp.broker)
-		_ = bp.broker.Close()
-		bp.closing = err
-		sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
-			bp.parent.retryMessages(pSet.msgs, err)
-		})
-		bp.buffer.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
-			bp.parent.retryMessages(pSet.msgs, err)
-		})
-		bp.rollOver()
-	}
-}
-
-// singleton
-// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
-// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
-func (p *asyncProducer) retryHandler() {
-	var msg *ProducerMessage
-	buf := queue.New()
-
-	for {
-		if buf.Length() == 0 {
-			msg = <-p.retries
-		} else {
-			select {
-			case msg = <-p.retries:
-			case p.input <- buf.Peek().(*ProducerMessage):
-				buf.Remove()
-				continue
-			}
-		}
-
-		if msg == nil {
-			return
-		}
-
-		buf.Add(msg)
-	}
-}
-
-// utility functions
-
-func (p *asyncProducer) shutdown() {
-	Logger.Println("Producer shutting down.")
-	p.inFlight.Add(1)
-	p.input <- &ProducerMessage{flags: shutdown}
-
-	p.inFlight.Wait()
-
-	err := p.client.Close()
-	if err != nil {
-		Logger.Println("producer/shutdown failed to close the embedded client:", err)
-	}
-
-	close(p.input)
-	close(p.retries)
-	close(p.errors)
-	close(p.successes)
-}
-
-func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
-	// We need to reset the producer ID epoch if we set a sequence number on it, because the broker
-	// will never see a message with this number, so we can never continue the sequence.
-	if msg.hasSequence {
-		Logger.Printf("producer/txnmanager rolling over epoch due to publish failure on %s/%d", msg.Topic, msg.Partition)
-		p.txnmgr.bumpEpoch()
-	}
-	msg.clear()
-	pErr := &ProducerError{Msg: msg, Err: err}
-	if p.conf.Producer.Return.Errors {
-		p.errors <- pErr
-	} else {
-		Logger.Println(pErr)
-	}
-	p.inFlight.Done()
-}
-
-func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
-	for _, msg := range batch {
-		p.returnError(msg, err)
-	}
-}
-
-func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
-	for _, msg := range batch {
-		if p.conf.Producer.Return.Successes {
-			msg.clear()
-			p.successes <- msg
-		}
-		p.inFlight.Done()
-	}
-}
-
-func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
-	if msg.retries >= p.conf.Producer.Retry.Max {
-		p.returnError(msg, err)
-	} else {
-		msg.retries++
-		p.retries <- msg
-	}
-}
-
-func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
-	for _, msg := range batch {
-		p.retryMessage(msg, err)
-	}
-}
-
-func (p *asyncProducer) getBrokerProducer(broker *Broker) *brokerProducer {
-	p.brokerLock.Lock()
-	defer p.brokerLock.Unlock()
-
-	bp := p.brokers[broker]
-
-	if bp == nil {
-		bp = p.newBrokerProducer(broker)
-		p.brokers[broker] = bp
-		p.brokerRefs[bp] = 0
-	}
-
-	p.brokerRefs[bp]++
-
-	return bp
-}
-
-func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp *brokerProducer) {
-	p.brokerLock.Lock()
-	defer p.brokerLock.Unlock()
-
-	p.brokerRefs[bp]--
-	if p.brokerRefs[bp] == 0 {
-		close(bp.input)
-		delete(p.brokerRefs, bp)
-
-		if p.brokers[broker] == bp {
-			delete(p.brokers, broker)
-		}
-	}
-}
-
-func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
-	p.brokerLock.Lock()
-	defer p.brokerLock.Unlock()
-
-	bc, ok := p.brokers[broker]
-	if ok && bc.abandoned != nil {
-		close(bc.abandoned)
-	}
-
-	delete(p.brokers, broker)
-}
diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/Shopify/sarama/balance_strategy.go
deleted file mode 100644
index 9855bf4..0000000
--- a/vendor/github.com/Shopify/sarama/balance_strategy.go
+++ /dev/null
@@ -1,1136 +0,0 @@
-package sarama
-
-import (
-	"container/heap"
-	"errors"
-	"fmt"
-	"math"
-	"sort"
-	"strings"
-)
-
-const (
-	// RangeBalanceStrategyName identifies strategies that use the range partition assignment strategy
-	RangeBalanceStrategyName = "range"
-
-	// RoundRobinBalanceStrategyName identifies strategies that use the round-robin partition assignment strategy
-	RoundRobinBalanceStrategyName = "roundrobin"
-
-	// StickyBalanceStrategyName identifies strategies that use the sticky-partition assignment strategy
-	StickyBalanceStrategyName = "sticky"
-
-	defaultGeneration = -1
-)
-
-// BalanceStrategyPlan is the results of any BalanceStrategy.Plan attempt.
-// It contains an allocation of topic/partitions by memberID in the form of
-// a `memberID -> topic -> partitions` map.
-type BalanceStrategyPlan map[string]map[string][]int32
-
-// Add assigns a topic with a number partitions to a member.
-func (p BalanceStrategyPlan) Add(memberID, topic string, partitions ...int32) {
-	if len(partitions) == 0 {
-		return
-	}
-	if _, ok := p[memberID]; !ok {
-		p[memberID] = make(map[string][]int32, 1)
-	}
-	p[memberID][topic] = append(p[memberID][topic], partitions...)
-}
-
-// --------------------------------------------------------------------
-
-// BalanceStrategy is used to balance topics and partitions
-// across members of a consumer group
-type BalanceStrategy interface {
-	// Name uniquely identifies the strategy.
-	Name() string
-
-	// Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions`
-	// and returns a distribution plan.
-	Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error)
-
-	// AssignmentData returns the serialized assignment data for the specified
-	// memberID
-	AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error)
-}
-
-// --------------------------------------------------------------------
-
-// BalanceStrategyRange is the default and assigns partitions as ranges to consumer group members.
-// Example with one topic T with six partitions (0..5) and two members (M1, M2):
-//   M1: {T: [0, 1, 2]}
-//   M2: {T: [3, 4, 5]}
-var BalanceStrategyRange = &balanceStrategy{
-	name: RangeBalanceStrategyName,
-	coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
-		step := float64(len(partitions)) / float64(len(memberIDs))
-
-		for i, memberID := range memberIDs {
-			pos := float64(i)
-			min := int(math.Floor(pos*step + 0.5))
-			max := int(math.Floor((pos+1)*step + 0.5))
-			plan.Add(memberID, topic, partitions[min:max]...)
-		}
-	},
-}
-
-// BalanceStrategySticky assigns partitions to members with an attempt to preserve earlier assignments
-// while maintain a balanced partition distribution.
-// Example with topic T with six partitions (0..5) and two members (M1, M2):
-//   M1: {T: [0, 2, 4]}
-//   M2: {T: [1, 3, 5]}
-//
-// On reassignment with an additional consumer, you might get an assignment plan like:
-//   M1: {T: [0, 2]}
-//   M2: {T: [1, 3]}
-//   M3: {T: [4, 5]}
-//
-var BalanceStrategySticky = &stickyBalanceStrategy{}
-
-// --------------------------------------------------------------------
-
-type balanceStrategy struct {
-	name   string
-	coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32)
-}
-
-// Name implements BalanceStrategy.
-func (s *balanceStrategy) Name() string { return s.name }
-
-// Plan implements BalanceStrategy.
-func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
-	// Build members by topic map
-	mbt := make(map[string][]string)
-	for memberID, meta := range members {
-		for _, topic := range meta.Topics {
-			mbt[topic] = append(mbt[topic], memberID)
-		}
-	}
-
-	// Sort members for each topic
-	for topic, memberIDs := range mbt {
-		sort.Sort(&balanceStrategySortable{
-			topic:     topic,
-			memberIDs: memberIDs,
-		})
-	}
-
-	// Assemble plan
-	plan := make(BalanceStrategyPlan, len(members))
-	for topic, memberIDs := range mbt {
-		s.coreFn(plan, memberIDs, topic, topics[topic])
-	}
-	return plan, nil
-}
-
-// AssignmentData simple strategies do not require any shared assignment data
-func (s *balanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
-	return nil, nil
-}
-
-type balanceStrategySortable struct {
-	topic     string
-	memberIDs []string
-}
-
-func (p balanceStrategySortable) Len() int { return len(p.memberIDs) }
-func (p balanceStrategySortable) Swap(i, j int) {
-	p.memberIDs[i], p.memberIDs[j] = p.memberIDs[j], p.memberIDs[i]
-}
-
-func (p balanceStrategySortable) Less(i, j int) bool {
-	return balanceStrategyHashValue(p.topic, p.memberIDs[i]) < balanceStrategyHashValue(p.topic, p.memberIDs[j])
-}
-
-func balanceStrategyHashValue(vv ...string) uint32 {
-	h := uint32(2166136261)
-	for _, s := range vv {
-		for _, c := range s {
-			h ^= uint32(c)
-			h *= 16777619
-		}
-	}
-	return h
-}
-
-type stickyBalanceStrategy struct {
-	movements partitionMovements
-}
-
-// Name implements BalanceStrategy.
-func (s *stickyBalanceStrategy) Name() string { return StickyBalanceStrategyName }
-
-// Plan implements BalanceStrategy.
-func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
-	// track partition movements during generation of the partition assignment plan
-	s.movements = partitionMovements{
-		Movements:                 make(map[topicPartitionAssignment]consumerPair),
-		PartitionMovementsByTopic: make(map[string]map[consumerPair]map[topicPartitionAssignment]bool),
-	}
-
-	// prepopulate the current assignment state from userdata on the consumer group members
-	currentAssignment, prevAssignment, err := prepopulateCurrentAssignments(members)
-	if err != nil {
-		return nil, err
-	}
-
-	// determine if we're dealing with a completely fresh assignment, or if there's existing assignment state
-	isFreshAssignment := false
-	if len(currentAssignment) == 0 {
-		isFreshAssignment = true
-	}
-
-	// create a mapping of all current topic partitions and the consumers that can be assigned to them
-	partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string)
-	for topic, partitions := range topics {
-		for _, partition := range partitions {
-			partition2AllPotentialConsumers[topicPartitionAssignment{Topic: topic, Partition: partition}] = []string{}
-		}
-	}
-
-	// create a mapping of all consumers to all potential topic partitions that can be assigned to them
-	// also, populate the mapping of partitions to potential consumers
-	consumer2AllPotentialPartitions := make(map[string][]topicPartitionAssignment, len(members))
-	for memberID, meta := range members {
-		consumer2AllPotentialPartitions[memberID] = make([]topicPartitionAssignment, 0)
-		for _, topicSubscription := range meta.Topics {
-			// only evaluate topic subscriptions that are present in the supplied topics map
-			if _, found := topics[topicSubscription]; found {
-				for _, partition := range topics[topicSubscription] {
-					topicPartition := topicPartitionAssignment{Topic: topicSubscription, Partition: partition}
-					consumer2AllPotentialPartitions[memberID] = append(consumer2AllPotentialPartitions[memberID], topicPartition)
-					partition2AllPotentialConsumers[topicPartition] = append(partition2AllPotentialConsumers[topicPartition], memberID)
-				}
-			}
-		}
-
-		// add this consumer to currentAssignment (with an empty topic partition assignment) if it does not already exist
-		if _, exists := currentAssignment[memberID]; !exists {
-			currentAssignment[memberID] = make([]topicPartitionAssignment, 0)
-		}
-	}
-
-	// create a mapping of each partition to its current consumer, where possible
-	currentPartitionConsumers := make(map[topicPartitionAssignment]string, len(currentAssignment))
-	unvisitedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
-	for partition := range partition2AllPotentialConsumers {
-		unvisitedPartitions[partition] = true
-	}
-	var unassignedPartitions []topicPartitionAssignment
-	for memberID, partitions := range currentAssignment {
-		var keepPartitions []topicPartitionAssignment
-		for _, partition := range partitions {
-			// If this partition no longer exists at all, likely due to the
-			// topic being deleted, we remove the partition from the member.
-			if _, exists := partition2AllPotentialConsumers[partition]; !exists {
-				continue
-			}
-			delete(unvisitedPartitions, partition)
-			currentPartitionConsumers[partition] = memberID
-
-			if !strsContains(members[memberID].Topics, partition.Topic) {
-				unassignedPartitions = append(unassignedPartitions, partition)
-				continue
-			}
-			keepPartitions = append(keepPartitions, partition)
-		}
-		currentAssignment[memberID] = keepPartitions
-	}
-	for unvisited := range unvisitedPartitions {
-		unassignedPartitions = append(unassignedPartitions, unvisited)
-	}
-
-	// sort the topic partitions in order of priority for reassignment
-	sortedPartitions := sortPartitions(currentAssignment, prevAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions)
-
-	// at this point we have preserved all valid topic partition to consumer assignments and removed
-	// all invalid topic partitions and invalid consumers. Now we need to assign unassignedPartitions
-	// to consumers so that the topic partition assignments are as balanced as possible.
-
-	// an ascending sorted set of consumers based on how many topic partitions are already assigned to them
-	sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment)
-	s.balance(currentAssignment, prevAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumers)
-
-	// Assemble plan
-	plan := make(BalanceStrategyPlan, len(currentAssignment))
-	for memberID, assignments := range currentAssignment {
-		if len(assignments) == 0 {
-			plan[memberID] = make(map[string][]int32)
-		} else {
-			for _, assignment := range assignments {
-				plan.Add(memberID, assignment.Topic, assignment.Partition)
-			}
-		}
-	}
-	return plan, nil
-}
-
-// AssignmentData serializes the set of topics currently assigned to the
-// specified member as part of the supplied balance plan
-func (s *stickyBalanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
-	return encode(&StickyAssignorUserDataV1{
-		Topics:     topics,
-		Generation: generationID,
-	}, nil)
-}
-
-func strsContains(s []string, value string) bool {
-	for _, entry := range s {
-		if entry == value {
-			return true
-		}
-	}
-	return false
-}
-
-// Balance assignments across consumers for maximum fairness and stickiness.
-func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) {
-	initializing := false
-	if len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 {
-		initializing = true
-	}
-
-	// assign all unassigned partitions
-	for _, partition := range unassignedPartitions {
-		// skip if there is no potential consumer for the partition
-		if len(partition2AllPotentialConsumers[partition]) == 0 {
-			continue
-		}
-		sortedCurrentSubscriptions = assignPartition(partition, sortedCurrentSubscriptions, currentAssignment, consumer2AllPotentialPartitions, currentPartitionConsumer)
-	}
-
-	// narrow down the reassignment scope to only those partitions that can actually be reassigned
-	for partition := range partition2AllPotentialConsumers {
-		if !canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
-			sortedPartitions = removeTopicPartitionFromMemberAssignments(sortedPartitions, partition)
-		}
-	}
-
-	// narrow down the reassignment scope to only those consumers that are subject to reassignment
-	fixedAssignments := make(map[string][]topicPartitionAssignment)
-	for memberID := range consumer2AllPotentialPartitions {
-		if !canConsumerParticipateInReassignment(memberID, currentAssignment, consumer2AllPotentialPartitions, partition2AllPotentialConsumers) {
-			fixedAssignments[memberID] = currentAssignment[memberID]
-			delete(currentAssignment, memberID)
-			sortedCurrentSubscriptions = sortMemberIDsByPartitionAssignments(currentAssignment)
-		}
-	}
-
-	// create a deep copy of the current assignment so we can revert to it if we do not get a more balanced assignment later
-	preBalanceAssignment := deepCopyAssignment(currentAssignment)
-	preBalancePartitionConsumers := make(map[topicPartitionAssignment]string, len(currentPartitionConsumer))
-	for k, v := range currentPartitionConsumer {
-		preBalancePartitionConsumers[k] = v
-	}
-
-	reassignmentPerformed := s.performReassignments(sortedPartitions, currentAssignment, prevAssignment, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer)
-
-	// if we are not preserving existing assignments and we have made changes to the current assignment
-	// make sure we are getting a more balanced assignment; otherwise, revert to previous assignment
-	if !initializing && reassignmentPerformed && getBalanceScore(currentAssignment) >= getBalanceScore(preBalanceAssignment) {
-		currentAssignment = deepCopyAssignment(preBalanceAssignment)
-		currentPartitionConsumer = make(map[topicPartitionAssignment]string, len(preBalancePartitionConsumers))
-		for k, v := range preBalancePartitionConsumers {
-			currentPartitionConsumer[k] = v
-		}
-	}
-
-	// add the fixed assignments (those that could not change) back
-	for consumer, assignments := range fixedAssignments {
-		currentAssignment[consumer] = assignments
-	}
-}
-
-// BalanceStrategyRoundRobin assigns partitions to members in alternating order.
-// For example, there are two topics (t0, t1) and two consumer (m0, m1), and each topic has three partitions (p0, p1, p2):
-// M0: [t0p0, t0p2, t1p1]
-// M1: [t0p1, t1p0, t1p2]
-var BalanceStrategyRoundRobin = new(roundRobinBalancer)
-
-type roundRobinBalancer struct{}
-
-func (b *roundRobinBalancer) Name() string {
-	return RoundRobinBalanceStrategyName
-}
-
-func (b *roundRobinBalancer) Plan(memberAndMetadata map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
-	if len(memberAndMetadata) == 0 || len(topics) == 0 {
-		return nil, errors.New("members and topics are not provided")
-	}
-	// sort partitions
-	var topicPartitions []topicAndPartition
-	for topic, partitions := range topics {
-		for _, partition := range partitions {
-			topicPartitions = append(topicPartitions, topicAndPartition{topic: topic, partition: partition})
-		}
-	}
-	sort.SliceStable(topicPartitions, func(i, j int) bool {
-		pi := topicPartitions[i]
-		pj := topicPartitions[j]
-		return pi.comparedValue() < pj.comparedValue()
-	})
-
-	// sort members
-	var members []memberAndTopic
-	for memberID, meta := range memberAndMetadata {
-		m := memberAndTopic{
-			memberID: memberID,
-			topics:   make(map[string]struct{}),
-		}
-		for _, t := range meta.Topics {
-			m.topics[t] = struct{}{}
-		}
-		members = append(members, m)
-	}
-	sort.SliceStable(members, func(i, j int) bool {
-		mi := members[i]
-		mj := members[j]
-		return mi.memberID < mj.memberID
-	})
-
-	// assign partitions
-	plan := make(BalanceStrategyPlan, len(members))
-	i := 0
-	n := len(members)
-	for _, tp := range topicPartitions {
-		m := members[i%n]
-		for !m.hasTopic(tp.topic) {
-			i++
-			m = members[i%n]
-		}
-		plan.Add(m.memberID, tp.topic, tp.partition)
-		i++
-	}
-	return plan, nil
-}
-
-func (b *roundRobinBalancer) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
-	return nil, nil // do nothing for now
-}
-
-type topicAndPartition struct {
-	topic     string
-	partition int32
-}
-
-func (tp *topicAndPartition) comparedValue() string {
-	return fmt.Sprintf("%s-%d", tp.topic, tp.partition)
-}
-
-type memberAndTopic struct {
-	memberID string
-	topics   map[string]struct{}
-}
-
-func (m *memberAndTopic) hasTopic(topic string) bool {
-	_, isExist := m.topics[topic]
-	return isExist
-}
-
-// Calculate the balance score of the given assignment, as the sum of assigned partitions size difference of all consumer pairs.
-// A perfectly balanced assignment (with all consumers getting the same number of partitions) has a balance score of 0.
-// Lower balance score indicates a more balanced assignment.
-func getBalanceScore(assignment map[string][]topicPartitionAssignment) int {
-	consumer2AssignmentSize := make(map[string]int, len(assignment))
-	for memberID, partitions := range assignment {
-		consumer2AssignmentSize[memberID] = len(partitions)
-	}
-
-	var score float64
-	for memberID, consumerAssignmentSize := range consumer2AssignmentSize {
-		delete(consumer2AssignmentSize, memberID)
-		for _, otherConsumerAssignmentSize := range consumer2AssignmentSize {
-			score += math.Abs(float64(consumerAssignmentSize - otherConsumerAssignmentSize))
-		}
-	}
-	return int(score)
-}
-
-// Determine whether the current assignment plan is balanced.
-func isBalanced(currentAssignment map[string][]topicPartitionAssignment, allSubscriptions map[string][]topicPartitionAssignment) bool {
-	sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment)
-	min := len(currentAssignment[sortedCurrentSubscriptions[0]])
-	max := len(currentAssignment[sortedCurrentSubscriptions[len(sortedCurrentSubscriptions)-1]])
-	if min >= max-1 {
-		// if minimum and maximum numbers of partitions assigned to consumers differ by at most one return true
-		return true
-	}
-
-	// create a mapping from partitions to the consumer assigned to them
-	allPartitions := make(map[topicPartitionAssignment]string)
-	for memberID, partitions := range currentAssignment {
-		for _, partition := range partitions {
-			if _, exists := allPartitions[partition]; exists {
-				Logger.Printf("Topic %s Partition %d is assigned more than one consumer", partition.Topic, partition.Partition)
-			}
-			allPartitions[partition] = memberID
-		}
-	}
-
-	// for each consumer that does not have all the topic partitions it can get make sure none of the topic partitions it
-	// could but did not get cannot be moved to it (because that would break the balance)
-	for _, memberID := range sortedCurrentSubscriptions {
-		consumerPartitions := currentAssignment[memberID]
-		consumerPartitionCount := len(consumerPartitions)
-
-		// skip if this consumer already has all the topic partitions it can get
-		if consumerPartitionCount == len(allSubscriptions[memberID]) {
-			continue
-		}
-
-		// otherwise make sure it cannot get any more
-		potentialTopicPartitions := allSubscriptions[memberID]
-		for _, partition := range potentialTopicPartitions {
-			if !memberAssignmentsIncludeTopicPartition(currentAssignment[memberID], partition) {
-				otherConsumer := allPartitions[partition]
-				otherConsumerPartitionCount := len(currentAssignment[otherConsumer])
-				if consumerPartitionCount < otherConsumerPartitionCount {
-					return false
-				}
-			}
-		}
-	}
-	return true
-}
-
-// Reassign all topic partitions that need reassignment until balanced.
-func (s *stickyBalanceStrategy) performReassignments(reassignablePartitions []topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) bool {
-	reassignmentPerformed := false
-	modified := false
-
-	// repeat reassignment until no partition can be moved to improve the balance
-	for {
-		modified = false
-		// reassign all reassignable partitions (starting from the partition with least potential consumers and if needed)
-		// until the full list is processed or a balance is achieved
-		for _, partition := range reassignablePartitions {
-			if isBalanced(currentAssignment, consumer2AllPotentialPartitions) {
-				break
-			}
-
-			// the partition must have at least two consumers
-			if len(partition2AllPotentialConsumers[partition]) <= 1 {
-				Logger.Printf("Expected more than one potential consumer for partition %s topic %d", partition.Topic, partition.Partition)
-			}
-
-			// the partition must have a consumer
-			consumer := currentPartitionConsumer[partition]
-			if consumer == "" {
-				Logger.Printf("Expected topic %s partition %d to be assigned to a consumer", partition.Topic, partition.Partition)
-			}
-
-			if _, exists := prevAssignment[partition]; exists {
-				if len(currentAssignment[consumer]) > (len(currentAssignment[prevAssignment[partition].MemberID]) + 1) {
-					sortedCurrentSubscriptions = s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, prevAssignment[partition].MemberID)
-					reassignmentPerformed = true
-					modified = true
-					continue
-				}
-			}
-
-			// check if a better-suited consumer exists for the partition; if so, reassign it
-			for _, otherConsumer := range partition2AllPotentialConsumers[partition] {
-				if len(currentAssignment[consumer]) > (len(currentAssignment[otherConsumer]) + 1) {
-					sortedCurrentSubscriptions = s.reassignPartitionToNewConsumer(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, consumer2AllPotentialPartitions)
-					reassignmentPerformed = true
-					modified = true
-					break
-				}
-			}
-		}
-		if !modified {
-			return reassignmentPerformed
-		}
-	}
-}
-
-// Identify a new consumer for a topic partition and reassign it.
-func (s *stickyBalanceStrategy) reassignPartitionToNewConsumer(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []string {
-	for _, anotherConsumer := range sortedCurrentSubscriptions {
-		if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[anotherConsumer], partition) {
-			return s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, anotherConsumer)
-		}
-	}
-	return sortedCurrentSubscriptions
-}
-
-// Reassign a specific partition to a new consumer
-func (s *stickyBalanceStrategy) reassignPartition(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, newConsumer string) []string {
-	consumer := currentPartitionConsumer[partition]
-	// find the correct partition movement considering the stickiness requirement
-	partitionToBeMoved := s.movements.getTheActualPartitionToBeMoved(partition, consumer, newConsumer)
-	return s.processPartitionMovement(partitionToBeMoved, newConsumer, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer)
-}
-
-// Track the movement of a topic partition after assignment
-func (s *stickyBalanceStrategy) processPartitionMovement(partition topicPartitionAssignment, newConsumer string, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
-	oldConsumer := currentPartitionConsumer[partition]
-	s.movements.movePartition(partition, oldConsumer, newConsumer)
-
-	currentAssignment[oldConsumer] = removeTopicPartitionFromMemberAssignments(currentAssignment[oldConsumer], partition)
-	currentAssignment[newConsumer] = append(currentAssignment[newConsumer], partition)
-	currentPartitionConsumer[partition] = newConsumer
-	return sortMemberIDsByPartitionAssignments(currentAssignment)
-}
-
-// Determine whether a specific consumer should be considered for topic partition assignment.
-func canConsumerParticipateInReassignment(memberID string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
-	currentPartitions := currentAssignment[memberID]
-	currentAssignmentSize := len(currentPartitions)
-	maxAssignmentSize := len(consumer2AllPotentialPartitions[memberID])
-	if currentAssignmentSize > maxAssignmentSize {
-		Logger.Printf("The consumer %s is assigned more partitions than the maximum possible", memberID)
-	}
-	if currentAssignmentSize < maxAssignmentSize {
-		// if a consumer is not assigned all its potential partitions it is subject to reassignment
-		return true
-	}
-	for _, partition := range currentPartitions {
-		if canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
-			return true
-		}
-	}
-	return false
-}
-
-// Only consider reassigning those topic partitions that have two or more potential consumers.
-func canTopicPartitionParticipateInReassignment(partition topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
-	return len(partition2AllPotentialConsumers[partition]) >= 2
-}
-
-// The assignment should improve the overall balance of the partition assignments to consumers.
-func assignPartition(partition topicPartitionAssignment, sortedCurrentSubscriptions []string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
-	for _, memberID := range sortedCurrentSubscriptions {
-		if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[memberID], partition) {
-			currentAssignment[memberID] = append(currentAssignment[memberID], partition)
-			currentPartitionConsumer[partition] = memberID
-			break
-		}
-	}
-	return sortMemberIDsByPartitionAssignments(currentAssignment)
-}
-
-// Deserialize topic partition assignment data to aid with creation of a sticky assignment.
-func deserializeTopicPartitionAssignment(userDataBytes []byte) (StickyAssignorUserData, error) {
-	userDataV1 := &StickyAssignorUserDataV1{}
-	if err := decode(userDataBytes, userDataV1); err != nil {
-		userDataV0 := &StickyAssignorUserDataV0{}
-		if err := decode(userDataBytes, userDataV0); err != nil {
-			return nil, err
-		}
-		return userDataV0, nil
-	}
-	return userDataV1, nil
-}
-
-// filterAssignedPartitions returns a map of consumer group members to their list of previously-assigned topic partitions, limited
-// to those topic partitions currently reported by the Kafka cluster.
-func filterAssignedPartitions(currentAssignment map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) map[string][]topicPartitionAssignment {
-	assignments := deepCopyAssignment(currentAssignment)
-	for memberID, partitions := range assignments {
-		// perform in-place filtering
-		i := 0
-		for _, partition := range partitions {
-			if _, exists := partition2AllPotentialConsumers[partition]; exists {
-				partitions[i] = partition
-				i++
-			}
-		}
-		assignments[memberID] = partitions[:i]
-	}
-	return assignments
-}
-
-func removeTopicPartitionFromMemberAssignments(assignments []topicPartitionAssignment, topic topicPartitionAssignment) []topicPartitionAssignment {
-	for i, assignment := range assignments {
-		if assignment == topic {
-			return append(assignments[:i], assignments[i+1:]...)
-		}
-	}
-	return assignments
-}
-
-func memberAssignmentsIncludeTopicPartition(assignments []topicPartitionAssignment, topic topicPartitionAssignment) bool {
-	for _, assignment := range assignments {
-		if assignment == topic {
-			return true
-		}
-	}
-	return false
-}
-
-func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, partitionsWithADifferentPreviousAssignment map[topicPartitionAssignment]consumerGenerationPair, isFreshAssignment bool, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []topicPartitionAssignment {
-	unassignedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
-	for partition := range partition2AllPotentialConsumers {
-		unassignedPartitions[partition] = true
-	}
-
-	sortedPartitions := make([]topicPartitionAssignment, 0)
-	if !isFreshAssignment && areSubscriptionsIdentical(partition2AllPotentialConsumers, consumer2AllPotentialPartitions) {
-		// if this is a reassignment and the subscriptions are identical (all consumers can consumer from all topics)
-		// then we just need to simply list partitions in a round robin fashion (from consumers with
-		// most assigned partitions to those with least)
-		assignments := filterAssignedPartitions(currentAssignment, partition2AllPotentialConsumers)
-
-		// use priority-queue to evaluate consumer group members in descending-order based on
-		// the number of topic partition assignments (i.e. consumers with most assignments first)
-		pq := make(assignmentPriorityQueue, len(assignments))
-		i := 0
-		for consumerID, consumerAssignments := range assignments {
-			pq[i] = &consumerGroupMember{
-				id:          consumerID,
-				assignments: consumerAssignments,
-			}
-			i++
-		}
-		heap.Init(&pq)
-
-		for {
-			// loop until no consumer-group members remain
-			if pq.Len() == 0 {
-				break
-			}
-			member := pq[0]
-
-			// partitions that were assigned to a different consumer last time
-			var prevPartitionIndex int
-			for i, partition := range member.assignments {
-				if _, exists := partitionsWithADifferentPreviousAssignment[partition]; exists {
-					prevPartitionIndex = i
-					break
-				}
-			}
-
-			if len(member.assignments) > 0 {
-				partition := member.assignments[prevPartitionIndex]
-				sortedPartitions = append(sortedPartitions, partition)
-				delete(unassignedPartitions, partition)
-				if prevPartitionIndex == 0 {
-					member.assignments = member.assignments[1:]
-				} else {
-					member.assignments = append(member.assignments[:prevPartitionIndex], member.assignments[prevPartitionIndex+1:]...)
-				}
-				heap.Fix(&pq, 0)
-			} else {
-				heap.Pop(&pq)
-			}
-		}
-
-		for partition := range unassignedPartitions {
-			sortedPartitions = append(sortedPartitions, partition)
-		}
-	} else {
-		// an ascending sorted set of topic partitions based on how many consumers can potentially use them
-		sortedPartitions = sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers)
-	}
-	return sortedPartitions
-}
-
-func sortMemberIDsByPartitionAssignments(assignments map[string][]topicPartitionAssignment) []string {
-	// sort the members by the number of partition assignments in ascending order
-	sortedMemberIDs := make([]string, 0, len(assignments))
-	for memberID := range assignments {
-		sortedMemberIDs = append(sortedMemberIDs, memberID)
-	}
-	sort.SliceStable(sortedMemberIDs, func(i, j int) bool {
-		ret := len(assignments[sortedMemberIDs[i]]) - len(assignments[sortedMemberIDs[j]])
-		if ret == 0 {
-			return sortedMemberIDs[i] < sortedMemberIDs[j]
-		}
-		return len(assignments[sortedMemberIDs[i]]) < len(assignments[sortedMemberIDs[j]])
-	})
-	return sortedMemberIDs
-}
-
-func sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers map[topicPartitionAssignment][]string) []topicPartitionAssignment {
-	// sort the members by the number of partition assignments in descending order
-	sortedPartionIDs := make([]topicPartitionAssignment, len(partition2AllPotentialConsumers))
-	i := 0
-	for partition := range partition2AllPotentialConsumers {
-		sortedPartionIDs[i] = partition
-		i++
-	}
-	sort.Slice(sortedPartionIDs, func(i, j int) bool {
-		if len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) == len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) {
-			ret := strings.Compare(sortedPartionIDs[i].Topic, sortedPartionIDs[j].Topic)
-			if ret == 0 {
-				return sortedPartionIDs[i].Partition < sortedPartionIDs[j].Partition
-			}
-			return ret < 0
-		}
-		return len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) < len(partition2AllPotentialConsumers[sortedPartionIDs[j]])
-	})
-	return sortedPartionIDs
-}
-
-func deepCopyAssignment(assignment map[string][]topicPartitionAssignment) map[string][]topicPartitionAssignment {
-	m := make(map[string][]topicPartitionAssignment, len(assignment))
-	for memberID, subscriptions := range assignment {
-		m[memberID] = append(subscriptions[:0:0], subscriptions...)
-	}
-	return m
-}
-
-func areSubscriptionsIdentical(partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) bool {
-	curMembers := make(map[string]int)
-	for _, cur := range partition2AllPotentialConsumers {
-		if len(curMembers) == 0 {
-			for _, curMembersElem := range cur {
-				curMembers[curMembersElem]++
-			}
-			continue
-		}
-
-		if len(curMembers) != len(cur) {
-			return false
-		}
-
-		yMap := make(map[string]int)
-		for _, yElem := range cur {
-			yMap[yElem]++
-		}
-
-		for curMembersMapKey, curMembersMapVal := range curMembers {
-			if yMap[curMembersMapKey] != curMembersMapVal {
-				return false
-			}
-		}
-	}
-
-	curPartitions := make(map[topicPartitionAssignment]int)
-	for _, cur := range consumer2AllPotentialPartitions {
-		if len(curPartitions) == 0 {
-			for _, curPartitionElem := range cur {
-				curPartitions[curPartitionElem]++
-			}
-			continue
-		}
-
-		if len(curPartitions) != len(cur) {
-			return false
-		}
-
-		yMap := make(map[topicPartitionAssignment]int)
-		for _, yElem := range cur {
-			yMap[yElem]++
-		}
-
-		for curMembersMapKey, curMembersMapVal := range curPartitions {
-			if yMap[curMembersMapKey] != curMembersMapVal {
-				return false
-			}
-		}
-	}
-	return true
-}
-
-// We need to process subscriptions' user data with each consumer's reported generation in mind
-// higher generations overwrite lower generations in case of a conflict
-// note that a conflict could exist only if user data is for different generations
-func prepopulateCurrentAssignments(members map[string]ConsumerGroupMemberMetadata) (map[string][]topicPartitionAssignment, map[topicPartitionAssignment]consumerGenerationPair, error) {
-	currentAssignment := make(map[string][]topicPartitionAssignment)
-	prevAssignment := make(map[topicPartitionAssignment]consumerGenerationPair)
-
-	// for each partition we create a sorted map of its consumers by generation
-	sortedPartitionConsumersByGeneration := make(map[topicPartitionAssignment]map[int]string)
-	for memberID, meta := range members {
-		consumerUserData, err := deserializeTopicPartitionAssignment(meta.UserData)
-		if err != nil {
-			return nil, nil, err
-		}
-		for _, partition := range consumerUserData.partitions() {
-			if consumers, exists := sortedPartitionConsumersByGeneration[partition]; exists {
-				if consumerUserData.hasGeneration() {
-					if _, generationExists := consumers[consumerUserData.generation()]; generationExists {
-						// same partition is assigned to two consumers during the same rebalance.
-						// log a warning and skip this record
-						Logger.Printf("Topic %s Partition %d is assigned to multiple consumers following sticky assignment generation %d", partition.Topic, partition.Partition, consumerUserData.generation())
-						continue
-					} else {
-						consumers[consumerUserData.generation()] = memberID
-					}
-				} else {
-					consumers[defaultGeneration] = memberID
-				}
-			} else {
-				generation := defaultGeneration
-				if consumerUserData.hasGeneration() {
-					generation = consumerUserData.generation()
-				}
-				sortedPartitionConsumersByGeneration[partition] = map[int]string{generation: memberID}
-			}
-		}
-	}
-
-	// prevAssignment holds the prior ConsumerGenerationPair (before current) of each partition
-	// current and previous consumers are the last two consumers of each partition in the above sorted map
-	for partition, consumers := range sortedPartitionConsumersByGeneration {
-		// sort consumers by generation in decreasing order
-		var generations []int
-		for generation := range consumers {
-			generations = append(generations, generation)
-		}
-		sort.Sort(sort.Reverse(sort.IntSlice(generations)))
-
-		consumer := consumers[generations[0]]
-		if _, exists := currentAssignment[consumer]; !exists {
-			currentAssignment[consumer] = []topicPartitionAssignment{partition}
-		} else {
-			currentAssignment[consumer] = append(currentAssignment[consumer], partition)
-		}
-
-		// check for previous assignment, if any
-		if len(generations) > 1 {
-			prevAssignment[partition] = consumerGenerationPair{
-				MemberID:   consumers[generations[1]],
-				Generation: generations[1],
-			}
-		}
-	}
-	return currentAssignment, prevAssignment, nil
-}
-
-type consumerGenerationPair struct {
-	MemberID   string
-	Generation int
-}
-
-// consumerPair represents a pair of Kafka consumer ids involved in a partition reassignment.
-type consumerPair struct {
-	SrcMemberID string
-	DstMemberID string
-}
-
-// partitionMovements maintains some data structures to simplify lookup of partition movements among consumers.
-type partitionMovements struct {
-	PartitionMovementsByTopic map[string]map[consumerPair]map[topicPartitionAssignment]bool
-	Movements                 map[topicPartitionAssignment]consumerPair
-}
-
-func (p *partitionMovements) removeMovementRecordOfPartition(partition topicPartitionAssignment) consumerPair {
-	pair := p.Movements[partition]
-	delete(p.Movements, partition)
-
-	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
-	delete(partitionMovementsForThisTopic[pair], partition)
-	if len(partitionMovementsForThisTopic[pair]) == 0 {
-		delete(partitionMovementsForThisTopic, pair)
-	}
-	if len(p.PartitionMovementsByTopic[partition.Topic]) == 0 {
-		delete(p.PartitionMovementsByTopic, partition.Topic)
-	}
-	return pair
-}
-
-func (p *partitionMovements) addPartitionMovementRecord(partition topicPartitionAssignment, pair consumerPair) {
-	p.Movements[partition] = pair
-	if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
-		p.PartitionMovementsByTopic[partition.Topic] = make(map[consumerPair]map[topicPartitionAssignment]bool)
-	}
-	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
-	if _, exists := partitionMovementsForThisTopic[pair]; !exists {
-		partitionMovementsForThisTopic[pair] = make(map[topicPartitionAssignment]bool)
-	}
-	partitionMovementsForThisTopic[pair][partition] = true
-}
-
-func (p *partitionMovements) movePartition(partition topicPartitionAssignment, oldConsumer, newConsumer string) {
-	pair := consumerPair{
-		SrcMemberID: oldConsumer,
-		DstMemberID: newConsumer,
-	}
-	if _, exists := p.Movements[partition]; exists {
-		// this partition has previously moved
-		existingPair := p.removeMovementRecordOfPartition(partition)
-		if existingPair.DstMemberID != oldConsumer {
-			Logger.Printf("Existing pair DstMemberID %s was not equal to the oldConsumer ID %s", existingPair.DstMemberID, oldConsumer)
-		}
-		if existingPair.SrcMemberID != newConsumer {
-			// the partition is not moving back to its previous consumer
-			p.addPartitionMovementRecord(partition, consumerPair{
-				SrcMemberID: existingPair.SrcMemberID,
-				DstMemberID: newConsumer,
-			})
-		}
-	} else {
-		p.addPartitionMovementRecord(partition, pair)
-	}
-}
-
-func (p *partitionMovements) getTheActualPartitionToBeMoved(partition topicPartitionAssignment, oldConsumer, newConsumer string) topicPartitionAssignment {
-	if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
-		return partition
-	}
-	if _, exists := p.Movements[partition]; exists {
-		// this partition has previously moved
-		if oldConsumer != p.Movements[partition].DstMemberID {
-			Logger.Printf("Partition movement DstMemberID %s was not equal to the oldConsumer ID %s", p.Movements[partition].DstMemberID, oldConsumer)
-		}
-		oldConsumer = p.Movements[partition].SrcMemberID
-	}
-
-	partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
-	reversePair := consumerPair{
-		SrcMemberID: newConsumer,
-		DstMemberID: oldConsumer,
-	}
-	if _, exists := partitionMovementsForThisTopic[reversePair]; !exists {
-		return partition
-	}
-	var reversePairPartition topicPartitionAssignment
-	for otherPartition := range partitionMovementsForThisTopic[reversePair] {
-		reversePairPartition = otherPartition
-	}
-	return reversePairPartition
-}
-
-func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, currentPath []string) ([]string, bool) {
-	if src == dst {
-		return currentPath, false
-	}
-	if len(pairs) == 0 {
-		return currentPath, false
-	}
-	for _, pair := range pairs {
-		if src == pair.SrcMemberID && dst == pair.DstMemberID {
-			currentPath = append(currentPath, src, dst)
-			return currentPath, true
-		}
-	}
-
-	for _, pair := range pairs {
-		if pair.SrcMemberID == src {
-			// create a deep copy of the pairs, excluding the current pair
-			reducedSet := make([]consumerPair, len(pairs)-1)
-			i := 0
-			for _, p := range pairs {
-				if p != pair {
-					reducedSet[i] = pair
-					i++
-				}
-			}
-
-			currentPath = append(currentPath, pair.SrcMemberID)
-			return p.isLinked(pair.DstMemberID, dst, reducedSet, currentPath)
-		}
-	}
-	return currentPath, false
-}
-
-func (p *partitionMovements) in(cycle []string, cycles [][]string) bool {
-	superCycle := make([]string, len(cycle)-1)
-	for i := 0; i < len(cycle)-1; i++ {
-		superCycle[i] = cycle[i]
-	}
-	superCycle = append(superCycle, cycle...)
-	for _, foundCycle := range cycles {
-		if len(foundCycle) == len(cycle) && indexOfSubList(superCycle, foundCycle) != -1 {
-			return true
-		}
-	}
-	return false
-}
-
-func (p *partitionMovements) hasCycles(pairs []consumerPair) bool {
-	cycles := make([][]string, 0)
-	for _, pair := range pairs {
-		// create a deep copy of the pairs, excluding the current pair
-		reducedPairs := make([]consumerPair, len(pairs)-1)
-		i := 0
-		for _, p := range pairs {
-			if p != pair {
-				reducedPairs[i] = pair
-				i++
-			}
-		}
-		if path, linked := p.isLinked(pair.DstMemberID, pair.SrcMemberID, reducedPairs, []string{pair.SrcMemberID}); linked {
-			if !p.in(path, cycles) {
-				cycles = append(cycles, path)
-				Logger.Printf("A cycle of length %d was found: %v", len(path)-1, path)
-			}
-		}
-	}
-
-	// for now we want to make sure there is no partition movements of the same topic between a pair of consumers.
-	// the odds of finding a cycle among more than two consumers seem to be very low (according to various randomized
-	// tests with the given sticky algorithm) that it should not worth the added complexity of handling those cases.
-	for _, cycle := range cycles {
-		if len(cycle) == 3 {
-			return true
-		}
-	}
-	return false
-}
-
-func (p *partitionMovements) isSticky() bool {
-	for topic, movements := range p.PartitionMovementsByTopic {
-		movementPairs := make([]consumerPair, len(movements))
-		i := 0
-		for pair := range movements {
-			movementPairs[i] = pair
-			i++
-		}
-		if p.hasCycles(movementPairs) {
-			Logger.Printf("Stickiness is violated for topic %s", topic)
-			Logger.Printf("Partition movements for this topic occurred among the following consumer pairs: %v", movements)
-			return false
-		}
-	}
-	return true
-}
-
-func indexOfSubList(source []string, target []string) int {
-	targetSize := len(target)
-	maxCandidate := len(source) - targetSize
-nextCand:
-	for candidate := 0; candidate <= maxCandidate; candidate++ {
-		j := candidate
-		for i := 0; i < targetSize; i++ {
-			if target[i] != source[j] {
-				// Element mismatch, try next cand
-				continue nextCand
-			}
-			j++
-		}
-		// All elements of candidate matched target
-		return candidate
-	}
-	return -1
-}
-
-type consumerGroupMember struct {
-	id          string
-	assignments []topicPartitionAssignment
-}
-
-// assignmentPriorityQueue is a priority-queue of consumer group members that is sorted
-// in descending order (most assignments to least assignments).
-type assignmentPriorityQueue []*consumerGroupMember
-
-func (pq assignmentPriorityQueue) Len() int { return len(pq) }
-
-func (pq assignmentPriorityQueue) Less(i, j int) bool {
-	// order asssignment priority queue in descending order using assignment-count/member-id
-	if len(pq[i].assignments) == len(pq[j].assignments) {
-		return strings.Compare(pq[i].id, pq[j].id) > 0
-	}
-	return len(pq[i].assignments) > len(pq[j].assignments)
-}
-
-func (pq assignmentPriorityQueue) Swap(i, j int) {
-	pq[i], pq[j] = pq[j], pq[i]
-}
-
-func (pq *assignmentPriorityQueue) Push(x interface{}) {
-	member := x.(*consumerGroupMember)
-	*pq = append(*pq, member)
-}
-
-func (pq *assignmentPriorityQueue) Pop() interface{} {
-	old := *pq
-	n := len(old)
-	member := old[n-1]
-	*pq = old[0 : n-1]
-	return member
-}
diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go
deleted file mode 100644
index dd01e4e..0000000
--- a/vendor/github.com/Shopify/sarama/broker.go
+++ /dev/null
@@ -1,1475 +0,0 @@
-package sarama
-
-import (
-	"crypto/tls"
-	"encoding/binary"
-	"fmt"
-	"io"
-	"net"
-	"sort"
-	"strconv"
-	"strings"
-	"sync"
-	"sync/atomic"
-	"time"
-
-	"github.com/rcrowley/go-metrics"
-)
-
-// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
-type Broker struct {
-	conf *Config
-	rack *string
-
-	id            int32
-	addr          string
-	correlationID int32
-	conn          net.Conn
-	connErr       error
-	lock          sync.Mutex
-	opened        int32
-	responses     chan responsePromise
-	done          chan bool
-
-	registeredMetrics []string
-
-	incomingByteRate       metrics.Meter
-	requestRate            metrics.Meter
-	requestSize            metrics.Histogram
-	requestLatency         metrics.Histogram
-	outgoingByteRate       metrics.Meter
-	responseRate           metrics.Meter
-	responseSize           metrics.Histogram
-	requestsInFlight       metrics.Counter
-	brokerIncomingByteRate metrics.Meter
-	brokerRequestRate      metrics.Meter
-	brokerRequestSize      metrics.Histogram
-	brokerRequestLatency   metrics.Histogram
-	brokerOutgoingByteRate metrics.Meter
-	brokerResponseRate     metrics.Meter
-	brokerResponseSize     metrics.Histogram
-	brokerRequestsInFlight metrics.Counter
-
-	kerberosAuthenticator GSSAPIKerberosAuth
-}
-
-// SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker
-type SASLMechanism string
-
-const (
-	// SASLTypeOAuth represents the SASL/OAUTHBEARER mechanism (Kafka 2.0.0+)
-	SASLTypeOAuth = "OAUTHBEARER"
-	// SASLTypePlaintext represents the SASL/PLAIN mechanism
-	SASLTypePlaintext = "PLAIN"
-	// SASLTypeSCRAMSHA256 represents the SCRAM-SHA-256 mechanism.
-	SASLTypeSCRAMSHA256 = "SCRAM-SHA-256"
-	// SASLTypeSCRAMSHA512 represents the SCRAM-SHA-512 mechanism.
-	SASLTypeSCRAMSHA512 = "SCRAM-SHA-512"
-	SASLTypeGSSAPI      = "GSSAPI"
-	// SASLHandshakeV0 is v0 of the Kafka SASL handshake protocol. Client and
-	// server negotiate SASL auth using opaque packets.
-	SASLHandshakeV0 = int16(0)
-	// SASLHandshakeV1 is v1 of the Kafka SASL handshake protocol. Client and
-	// server negotiate SASL by wrapping tokens with Kafka protocol headers.
-	SASLHandshakeV1 = int16(1)
-	// SASLExtKeyAuth is the reserved extension key name sent as part of the
-	// SASL/OAUTHBEARER initial client response
-	SASLExtKeyAuth = "auth"
-)
-
-// AccessToken contains an access token used to authenticate a
-// SASL/OAUTHBEARER client along with associated metadata.
-type AccessToken struct {
-	// Token is the access token payload.
-	Token string
-	// Extensions is a optional map of arbitrary key-value pairs that can be
-	// sent with the SASL/OAUTHBEARER initial client response. These values are
-	// ignored by the SASL server if they are unexpected. This feature is only
-	// supported by Kafka >= 2.1.0.
-	Extensions map[string]string
-}
-
-// AccessTokenProvider is the interface that encapsulates how implementors
-// can generate access tokens for Kafka broker authentication.
-type AccessTokenProvider interface {
-	// Token returns an access token. The implementation should ensure token
-	// reuse so that multiple calls at connect time do not create multiple
-	// tokens. The implementation should also periodically refresh the token in
-	// order to guarantee that each call returns an unexpired token.  This
-	// method should not block indefinitely--a timeout error should be returned
-	// after a short period of inactivity so that the broker connection logic
-	// can log debugging information and retry.
-	Token() (*AccessToken, error)
-}
-
-// SCRAMClient is a an interface to a SCRAM
-// client implementation.
-type SCRAMClient interface {
-	// Begin prepares the client for the SCRAM exchange
-	// with the server with a user name and a password
-	Begin(userName, password, authzID string) error
-	// Step steps client through the SCRAM exchange. It is
-	// called repeatedly until it errors or `Done` returns true.
-	Step(challenge string) (response string, err error)
-	// Done should return true when the SCRAM conversation
-	// is over.
-	Done() bool
-}
-
-type responsePromise struct {
-	requestTime   time.Time
-	correlationID int32
-	headerVersion int16
-	packets       chan []byte
-	errors        chan error
-}
-
-// NewBroker creates and returns a Broker targeting the given host:port address.
-// This does not attempt to actually connect, you have to call Open() for that.
-func NewBroker(addr string) *Broker {
-	return &Broker{id: -1, addr: addr}
-}
-
-// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
-// waiting for the connection to complete. This means that any subsequent operations on the broker will
-// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
-// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
-// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
-func (b *Broker) Open(conf *Config) error {
-	if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
-		return ErrAlreadyConnected
-	}
-
-	if conf == nil {
-		conf = NewConfig()
-	}
-
-	err := conf.Validate()
-	if err != nil {
-		return err
-	}
-
-	b.lock.Lock()
-
-	go withRecover(func() {
-		defer b.lock.Unlock()
-
-		dialer := conf.getDialer()
-		b.conn, b.connErr = dialer.Dial("tcp", b.addr)
-		if b.connErr != nil {
-			Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
-			b.conn = nil
-			atomic.StoreInt32(&b.opened, 0)
-			return
-		}
-		if conf.Net.TLS.Enable {
-			b.conn = tls.Client(b.conn, validServerNameTLS(b.addr, conf.Net.TLS.Config))
-		}
-
-		b.conn = newBufConn(b.conn)
-		b.conf = conf
-
-		// Create or reuse the global metrics shared between brokers
-		b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry)
-		b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry)
-		b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry)
-		b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry)
-		b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
-		b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
-		b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
-		b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", conf.MetricRegistry)
-		// Do not gather metrics for seeded broker (only used during bootstrap) because they share
-		// the same id (-1) and are already exposed through the global metrics above
-		if b.id >= 0 {
-			b.registerMetrics()
-		}
-
-		if conf.Net.SASL.Enable {
-			b.connErr = b.authenticateViaSASL()
-
-			if b.connErr != nil {
-				err = b.conn.Close()
-				if err == nil {
-					Logger.Printf("Closed connection to broker %s\n", b.addr)
-				} else {
-					Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
-				}
-				b.conn = nil
-				atomic.StoreInt32(&b.opened, 0)
-				return
-			}
-		}
-
-		b.done = make(chan bool)
-		b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
-
-		if b.id >= 0 {
-			Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
-		} else {
-			Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
-		}
-		go withRecover(b.responseReceiver)
-	})
-
-	return nil
-}
-
-// Connected returns true if the broker is connected and false otherwise. If the broker is not
-// connected but it had tried to connect, the error from that connection attempt is also returned.
-func (b *Broker) Connected() (bool, error) {
-	b.lock.Lock()
-	defer b.lock.Unlock()
-
-	return b.conn != nil, b.connErr
-}
-
-// Close closes the broker resources
-func (b *Broker) Close() error {
-	b.lock.Lock()
-	defer b.lock.Unlock()
-
-	if b.conn == nil {
-		return ErrNotConnected
-	}
-
-	close(b.responses)
-	<-b.done
-
-	err := b.conn.Close()
-
-	b.conn = nil
-	b.connErr = nil
-	b.done = nil
-	b.responses = nil
-
-	b.unregisterMetrics()
-
-	if err == nil {
-		Logger.Printf("Closed connection to broker %s\n", b.addr)
-	} else {
-		Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
-	}
-
-	atomic.StoreInt32(&b.opened, 0)
-
-	return err
-}
-
-// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
-func (b *Broker) ID() int32 {
-	return b.id
-}
-
-// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
-func (b *Broker) Addr() string {
-	return b.addr
-}
-
-// Rack returns the broker's rack as retrieved from Kafka's metadata or the
-// empty string if it is not known.  The returned value corresponds to the
-// broker's broker.rack configuration setting.  Requires protocol version to be
-// at least v0.10.0.0.
-func (b *Broker) Rack() string {
-	if b.rack == nil {
-		return ""
-	}
-	return *b.rack
-}
-
-// GetMetadata send a metadata request and returns a metadata response or error
-func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
-	response := new(MetadataResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error
-func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
-	response := new(ConsumerMetadataResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// FindCoordinator sends a find coordinate request and returns a response or error
-func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) {
-	response := new(FindCoordinatorResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// GetAvailableOffsets return an offset response or error
-func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
-	response := new(OffsetResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// Produce returns a produce response or error
-func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
-	var (
-		response *ProduceResponse
-		err      error
-	)
-
-	if request.RequiredAcks == NoResponse {
-		err = b.sendAndReceive(request, nil)
-	} else {
-		response = new(ProduceResponse)
-		err = b.sendAndReceive(request, response)
-	}
-
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// Fetch returns a FetchResponse or error
-func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
-	response := new(FetchResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// CommitOffset return an Offset commit response or error
-func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
-	response := new(OffsetCommitResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// FetchOffset returns an offset fetch response or error
-func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
-	response := new(OffsetFetchResponse)
-	response.Version = request.Version // needed to handle the two header versions
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// JoinGroup returns a join group response or error
-func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
-	response := new(JoinGroupResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// SyncGroup returns a sync group response or error
-func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
-	response := new(SyncGroupResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// LeaveGroup return a leave group response or error
-func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
-	response := new(LeaveGroupResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// Heartbeat returns a heartbeat response or error
-func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
-	response := new(HeartbeatResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// ListGroups return a list group response or error
-func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
-	response := new(ListGroupsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// DescribeGroups return describe group response or error
-func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
-	response := new(DescribeGroupsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// ApiVersions return api version response or error
-func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
-	response := new(ApiVersionsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// CreateTopics send a create topic request and returns create topic response
-func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) {
-	response := new(CreateTopicsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// DeleteTopics sends a delete topic request and returns delete topic response
-func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) {
-	response := new(DeleteTopicsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// CreatePartitions sends a create partition request and returns create
-// partitions response or error
-func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) {
-	response := new(CreatePartitionsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// AlterPartitionReassignments sends a alter partition reassignments request and
-// returns alter partition reassignments response
-func (b *Broker) AlterPartitionReassignments(request *AlterPartitionReassignmentsRequest) (*AlterPartitionReassignmentsResponse, error) {
-	response := new(AlterPartitionReassignmentsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// ListPartitionReassignments sends a list partition reassignments request and
-// returns list partition reassignments response
-func (b *Broker) ListPartitionReassignments(request *ListPartitionReassignmentsRequest) (*ListPartitionReassignmentsResponse, error) {
-	response := new(ListPartitionReassignmentsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// DeleteRecords send a request to delete records and return delete record
-// response or error
-func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) {
-	response := new(DeleteRecordsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// DescribeAcls sends a describe acl request and returns a response or error
-func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) {
-	response := new(DescribeAclsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// CreateAcls sends a create acl request and returns a response or error
-func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) {
-	response := new(CreateAclsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// DeleteAcls sends a delete acl request and returns a response or error
-func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) {
-	response := new(DeleteAclsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// InitProducerID sends an init producer request and returns a response or error
-func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) {
-	response := new(InitProducerIDResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// AddPartitionsToTxn send a request to add partition to txn and returns
-// a response or error
-func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) {
-	response := new(AddPartitionsToTxnResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// AddOffsetsToTxn sends a request to add offsets to txn and returns a response
-// or error
-func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) {
-	response := new(AddOffsetsToTxnResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// EndTxn sends a request to end txn and returns a response or error
-func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) {
-	response := new(EndTxnResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// TxnOffsetCommit sends a request to commit transaction offsets and returns
-// a response or error
-func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) {
-	response := new(TxnOffsetCommitResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// DescribeConfigs sends a request to describe config and returns a response or
-// error
-func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) {
-	response := new(DescribeConfigsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// AlterConfigs sends a request to alter config and return a response or error
-func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) {
-	response := new(AlterConfigsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// IncrementalAlterConfigs sends a request to incremental alter config and return a response or error
-func (b *Broker) IncrementalAlterConfigs(request *IncrementalAlterConfigsRequest) (*IncrementalAlterConfigsResponse, error) {
-	response := new(IncrementalAlterConfigsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// DeleteGroups sends a request to delete groups and returns a response or error
-func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) {
-	response := new(DeleteGroupsResponse)
-
-	if err := b.sendAndReceive(request, response); err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// DescribeLogDirs sends a request to get the broker's log dir paths and sizes
-func (b *Broker) DescribeLogDirs(request *DescribeLogDirsRequest) (*DescribeLogDirsResponse, error) {
-	response := new(DescribeLogDirsResponse)
-
-	err := b.sendAndReceive(request, response)
-	if err != nil {
-		return nil, err
-	}
-
-	return response, nil
-}
-
-// DescribeUserScramCredentials sends a request to get SCRAM users
-func (b *Broker) DescribeUserScramCredentials(req *DescribeUserScramCredentialsRequest) (*DescribeUserScramCredentialsResponse, error) {
-	res := new(DescribeUserScramCredentialsResponse)
-
-	err := b.sendAndReceive(req, res)
-	if err != nil {
-		return nil, err
-	}
-
-	return res, err
-}
-
-func (b *Broker) AlterUserScramCredentials(req *AlterUserScramCredentialsRequest) (*AlterUserScramCredentialsResponse, error) {
-	res := new(AlterUserScramCredentialsResponse)
-
-	err := b.sendAndReceive(req, res)
-	if err != nil {
-		return nil, err
-	}
-
-	return res, nil
-}
-
-// readFull ensures the conn ReadDeadline has been setup before making a
-// call to io.ReadFull
-func (b *Broker) readFull(buf []byte) (n int, err error) {
-	if err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)); err != nil {
-		return 0, err
-	}
-
-	return io.ReadFull(b.conn, buf)
-}
-
-// write  ensures the conn WriteDeadline has been setup before making a
-// call to conn.Write
-func (b *Broker) write(buf []byte) (n int, err error) {
-	if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil {
-		return 0, err
-	}
-
-	return b.conn.Write(buf)
-}
-
-func (b *Broker) send(rb protocolBody, promiseResponse bool, responseHeaderVersion int16) (*responsePromise, error) {
-	b.lock.Lock()
-	defer b.lock.Unlock()
-
-	if b.conn == nil {
-		if b.connErr != nil {
-			return nil, b.connErr
-		}
-		return nil, ErrNotConnected
-	}
-
-	if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
-		return nil, ErrUnsupportedVersion
-	}
-
-	req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
-	buf, err := encode(req, b.conf.MetricRegistry)
-	if err != nil {
-		return nil, err
-	}
-
-	requestTime := time.Now()
-	// Will be decremented in responseReceiver (except error or request with NoResponse)
-	b.addRequestInFlightMetrics(1)
-	bytes, err := b.write(buf)
-	b.updateOutgoingCommunicationMetrics(bytes)
-	if err != nil {
-		b.addRequestInFlightMetrics(-1)
-		return nil, err
-	}
-	b.correlationID++
-
-	if !promiseResponse {
-		// Record request latency without the response
-		b.updateRequestLatencyAndInFlightMetrics(time.Since(requestTime))
-		return nil, nil
-	}
-
-	promise := responsePromise{requestTime, req.correlationID, responseHeaderVersion, make(chan []byte), make(chan error)}
-	b.responses <- promise
-
-	return &promise, nil
-}
-
-func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error {
-	responseHeaderVersion := int16(-1)
-	if res != nil {
-		responseHeaderVersion = res.headerVersion()
-	}
-
-	promise, err := b.send(req, res != nil, responseHeaderVersion)
-	if err != nil {
-		return err
-	}
-
-	if promise == nil {
-		return nil
-	}
-
-	select {
-	case buf := <-promise.packets:
-		return versionedDecode(buf, res, req.version())
-	case err = <-promise.errors:
-		return err
-	}
-}
-
-func (b *Broker) decode(pd packetDecoder, version int16) (err error) {
-	b.id, err = pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	host, err := pd.getString()
-	if err != nil {
-		return err
-	}
-
-	port, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	if version >= 1 {
-		b.rack, err = pd.getNullableString()
-		if err != nil {
-			return err
-		}
-	}
-
-	b.addr = net.JoinHostPort(host, fmt.Sprint(port))
-	if _, _, err := net.SplitHostPort(b.addr); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (b *Broker) encode(pe packetEncoder, version int16) (err error) {
-	host, portstr, err := net.SplitHostPort(b.addr)
-	if err != nil {
-		return err
-	}
-
-	port, err := strconv.ParseInt(portstr, 10, 32)
-	if err != nil {
-		return err
-	}
-
-	pe.putInt32(b.id)
-
-	err = pe.putString(host)
-	if err != nil {
-		return err
-	}
-
-	pe.putInt32(int32(port))
-
-	if version >= 1 {
-		err = pe.putNullableString(b.rack)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (b *Broker) responseReceiver() {
-	var dead error
-
-	for response := range b.responses {
-		if dead != nil {
-			// This was previously incremented in send() and
-			// we are not calling updateIncomingCommunicationMetrics()
-			b.addRequestInFlightMetrics(-1)
-			response.errors <- dead
-			continue
-		}
-
-		headerLength := getHeaderLength(response.headerVersion)
-		header := make([]byte, headerLength)
-
-		bytesReadHeader, err := b.readFull(header)
-		requestLatency := time.Since(response.requestTime)
-		if err != nil {
-			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
-			dead = err
-			response.errors <- err
-			continue
-		}
-
-		decodedHeader := responseHeader{}
-		err = versionedDecode(header, &decodedHeader, response.headerVersion)
-		if err != nil {
-			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
-			dead = err
-			response.errors <- err
-			continue
-		}
-		if decodedHeader.correlationID != response.correlationID {
-			b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
-			// TODO if decoded ID < cur ID, discard until we catch up
-			// TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
-			dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
-			response.errors <- dead
-			continue
-		}
-
-		buf := make([]byte, decodedHeader.length-int32(headerLength)+4)
-		bytesReadBody, err := b.readFull(buf)
-		b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
-		if err != nil {
-			dead = err
-			response.errors <- err
-			continue
-		}
-
-		response.packets <- buf
-	}
-	close(b.done)
-}
-
-func getHeaderLength(headerVersion int16) int8 {
-	if headerVersion < 1 {
-		return 8
-	} else {
-		// header contains additional tagged field length (0), we don't support actual tags yet.
-		return 9
-	}
-}
-
-func (b *Broker) authenticateViaSASL() error {
-	switch b.conf.Net.SASL.Mechanism {
-	case SASLTypeOAuth:
-		return b.sendAndReceiveSASLOAuth(b.conf.Net.SASL.TokenProvider)
-	case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
-		return b.sendAndReceiveSASLSCRAMv1()
-	case SASLTypeGSSAPI:
-		return b.sendAndReceiveKerberos()
-	default:
-		return b.sendAndReceiveSASLPlainAuth()
-	}
-}
-
-func (b *Broker) sendAndReceiveKerberos() error {
-	b.kerberosAuthenticator.Config = &b.conf.Net.SASL.GSSAPI
-	if b.kerberosAuthenticator.NewKerberosClientFunc == nil {
-		b.kerberosAuthenticator.NewKerberosClientFunc = NewKerberosClient
-	}
-	return b.kerberosAuthenticator.Authorize(b)
-}
-
-func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int16) error {
-	rb := &SaslHandshakeRequest{Mechanism: string(saslType), Version: version}
-
-	req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
-	buf, err := encode(req, b.conf.MetricRegistry)
-	if err != nil {
-		return err
-	}
-
-	requestTime := time.Now()
-	// Will be decremented in updateIncomingCommunicationMetrics (except error)
-	b.addRequestInFlightMetrics(1)
-	bytes, err := b.write(buf)
-	b.updateOutgoingCommunicationMetrics(bytes)
-	if err != nil {
-		b.addRequestInFlightMetrics(-1)
-		Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
-		return err
-	}
-	b.correlationID++
-
-	header := make([]byte, 8) // response header
-	_, err = b.readFull(header)
-	if err != nil {
-		b.addRequestInFlightMetrics(-1)
-		Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
-		return err
-	}
-
-	length := binary.BigEndian.Uint32(header[:4])
-	payload := make([]byte, length-4)
-	n, err := b.readFull(payload)
-	if err != nil {
-		b.addRequestInFlightMetrics(-1)
-		Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
-		return err
-	}
-
-	b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
-	res := &SaslHandshakeResponse{}
-
-	err = versionedDecode(payload, res, 0)
-	if err != nil {
-		Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
-		return err
-	}
-
-	if res.Err != ErrNoError {
-		Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
-		return res.Err
-	}
-
-	Logger.Print("Successful SASL handshake. Available mechanisms: ", res.EnabledMechanisms)
-	return nil
-}
-
-// Kafka 0.10.x supported SASL PLAIN/Kerberos via KAFKA-3149 (KIP-43).
-// Kafka 1.x.x onward added a SaslAuthenticate request/response message which
-// wraps the SASL flow in the Kafka protocol, which allows for returning
-// meaningful errors on authentication failure.
-//
-// In SASL Plain, Kafka expects the auth header to be in the following format
-// Message format (from https://tools.ietf.org/html/rfc4616):
-//
-//   message   = [authzid] UTF8NUL authcid UTF8NUL passwd
-//   authcid   = 1*SAFE ; MUST accept up to 255 octets
-//   authzid   = 1*SAFE ; MUST accept up to 255 octets
-//   passwd    = 1*SAFE ; MUST accept up to 255 octets
-//   UTF8NUL   = %x00 ; UTF-8 encoded NUL character
-//
-//   SAFE      = UTF1 / UTF2 / UTF3 / UTF4
-//                  ;; any UTF-8 encoded Unicode character except NUL
-//
-// With SASL v0 handshake and auth then:
-// When credentials are valid, Kafka returns a 4 byte array of null characters.
-// When credentials are invalid, Kafka closes the connection.
-//
-// With SASL v1 handshake and auth then:
-// When credentials are invalid, Kafka replies with a SaslAuthenticate response
-// containing an error code and message detailing the authentication failure.
-func (b *Broker) sendAndReceiveSASLPlainAuth() error {
-	// default to V0 to allow for backward compatibility when SASL is enabled
-	// but not the handshake
-	if b.conf.Net.SASL.Handshake {
-		handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version)
-		if handshakeErr != nil {
-			Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
-			return handshakeErr
-		}
-	}
-
-	if b.conf.Net.SASL.Version == SASLHandshakeV1 {
-		return b.sendAndReceiveV1SASLPlainAuth()
-	}
-	return b.sendAndReceiveV0SASLPlainAuth()
-}
-
-// sendAndReceiveV0SASLPlainAuth flows the v0 sasl auth NOT wrapped in the kafka protocol
-func (b *Broker) sendAndReceiveV0SASLPlainAuth() error {
-	length := len(b.conf.Net.SASL.AuthIdentity) + 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
-	authBytes := make([]byte, length+4) // 4 byte length header + auth data
-	binary.BigEndian.PutUint32(authBytes, uint32(length))
-	copy(authBytes[4:], b.conf.Net.SASL.AuthIdentity+"\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password)
-
-	requestTime := time.Now()
-	// Will be decremented in updateIncomingCommunicationMetrics (except error)
-	b.addRequestInFlightMetrics(1)
-	bytesWritten, err := b.write(authBytes)
-	b.updateOutgoingCommunicationMetrics(bytesWritten)
-	if err != nil {
-		b.addRequestInFlightMetrics(-1)
-		Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
-		return err
-	}
-
-	header := make([]byte, 4)
-	n, err := b.readFull(header)
-	b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
-	// If the credentials are valid, we would get a 4 byte response filled with null characters.
-	// Otherwise, the broker closes the connection and we get an EOF
-	if err != nil {
-		Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
-		return err
-	}
-
-	Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
-	return nil
-}
-
-// sendAndReceiveV1SASLPlainAuth flows the v1 sasl authentication using the kafka protocol
-func (b *Broker) sendAndReceiveV1SASLPlainAuth() error {
-	correlationID := b.correlationID
-
-	requestTime := time.Now()
-
-	// Will be decremented in updateIncomingCommunicationMetrics (except error)
-	b.addRequestInFlightMetrics(1)
-	bytesWritten, err := b.sendSASLPlainAuthClientResponse(correlationID)
-	b.updateOutgoingCommunicationMetrics(bytesWritten)
-
-	if err != nil {
-		b.addRequestInFlightMetrics(-1)
-		Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
-		return err
-	}
-
-	b.correlationID++
-
-	bytesRead, err := b.receiveSASLServerResponse(&SaslAuthenticateResponse{}, correlationID)
-	b.updateIncomingCommunicationMetrics(bytesRead, time.Since(requestTime))
-
-	// With v1 sasl we get an error message set in the response we can return
-	if err != nil {
-		Logger.Printf("Error returned from broker during SASL flow %s: %s\n", b.addr, err.Error())
-		return err
-	}
-
-	return nil
-}
-
-// sendAndReceiveSASLOAuth performs the authentication flow as described by KIP-255
-// https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=75968876
-func (b *Broker) sendAndReceiveSASLOAuth(provider AccessTokenProvider) error {
-	if err := b.sendAndReceiveSASLHandshake(SASLTypeOAuth, SASLHandshakeV1); err != nil {
-		return err
-	}
-
-	token, err := provider.Token()
-	if err != nil {
-		return err
-	}
-
-	message, err := buildClientFirstMessage(token)
-	if err != nil {
-		return err
-	}
-
-	challenged, err := b.sendClientMessage(message)
-	if err != nil {
-		return err
-	}
-
-	if challenged {
-		// Abort the token exchange. The broker returns the failure code.
-		_, err = b.sendClientMessage([]byte(`\x01`))
-	}
-
-	return err
-}
-
-// sendClientMessage sends a SASL/OAUTHBEARER client message and returns true
-// if the broker responds with a challenge, in which case the token is
-// rejected.
-func (b *Broker) sendClientMessage(message []byte) (bool, error) {
-	requestTime := time.Now()
-	// Will be decremented in updateIncomingCommunicationMetrics (except error)
-	b.addRequestInFlightMetrics(1)
-	correlationID := b.correlationID
-
-	bytesWritten, err := b.sendSASLOAuthBearerClientMessage(message, correlationID)
-	b.updateOutgoingCommunicationMetrics(bytesWritten)
-	if err != nil {
-		b.addRequestInFlightMetrics(-1)
-		return false, err
-	}
-
-	b.correlationID++
-
-	res := &SaslAuthenticateResponse{}
-	bytesRead, err := b.receiveSASLServerResponse(res, correlationID)
-
-	requestLatency := time.Since(requestTime)
-	b.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
-
-	isChallenge := len(res.SaslAuthBytes) > 0
-
-	if isChallenge && err != nil {
-		Logger.Printf("Broker rejected authentication token: %s", res.SaslAuthBytes)
-	}
-
-	return isChallenge, err
-}
-
-func (b *Broker) sendAndReceiveSASLSCRAMv1() error {
-	if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV1); err != nil {
-		return err
-	}
-
-	scramClient := b.conf.Net.SASL.SCRAMClientGeneratorFunc()
-	if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil {
-		return fmt.Errorf("failed to start SCRAM exchange with the server: %s", err.Error())
-	}
-
-	msg, err := scramClient.Step("")
-	if err != nil {
-		return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error())
-	}
-
-	for !scramClient.Done() {
-		requestTime := time.Now()
-		// Will be decremented in updateIncomingCommunicationMetrics (except error)
-		b.addRequestInFlightMetrics(1)
-		correlationID := b.correlationID
-		bytesWritten, err := b.sendSaslAuthenticateRequest(correlationID, []byte(msg))
-		b.updateOutgoingCommunicationMetrics(bytesWritten)
-		if err != nil {
-			b.addRequestInFlightMetrics(-1)
-			Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
-			return err
-		}
-
-		b.correlationID++
-		challenge, err := b.receiveSaslAuthenticateResponse(correlationID)
-		if err != nil {
-			b.addRequestInFlightMetrics(-1)
-			Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
-			return err
-		}
-
-		b.updateIncomingCommunicationMetrics(len(challenge), time.Since(requestTime))
-		msg, err = scramClient.Step(string(challenge))
-		if err != nil {
-			Logger.Println("SASL authentication failed", err)
-			return err
-		}
-	}
-
-	Logger.Println("SASL authentication succeeded")
-	return nil
-}
-
-func (b *Broker) sendSaslAuthenticateRequest(correlationID int32, msg []byte) (int, error) {
-	rb := &SaslAuthenticateRequest{msg}
-	req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
-	buf, err := encode(req, b.conf.MetricRegistry)
-	if err != nil {
-		return 0, err
-	}
-
-	return b.write(buf)
-}
-
-func (b *Broker) receiveSaslAuthenticateResponse(correlationID int32) ([]byte, error) {
-	buf := make([]byte, responseLengthSize+correlationIDSize)
-	_, err := b.readFull(buf)
-	if err != nil {
-		return nil, err
-	}
-
-	header := responseHeader{}
-	err = versionedDecode(buf, &header, 0)
-	if err != nil {
-		return nil, err
-	}
-
-	if header.correlationID != correlationID {
-		return nil, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID)
-	}
-
-	buf = make([]byte, header.length-correlationIDSize)
-	_, err = b.readFull(buf)
-	if err != nil {
-		return nil, err
-	}
-
-	res := &SaslAuthenticateResponse{}
-	if err := versionedDecode(buf, res, 0); err != nil {
-		return nil, err
-	}
-	if res.Err != ErrNoError {
-		return nil, res.Err
-	}
-	return res.SaslAuthBytes, nil
-}
-
-// Build SASL/OAUTHBEARER initial client response as described by RFC-7628
-// https://tools.ietf.org/html/rfc7628
-func buildClientFirstMessage(token *AccessToken) ([]byte, error) {
-	var ext string
-
-	if token.Extensions != nil && len(token.Extensions) > 0 {
-		if _, ok := token.Extensions[SASLExtKeyAuth]; ok {
-			return []byte{}, fmt.Errorf("the extension `%s` is invalid", SASLExtKeyAuth)
-		}
-		ext = "\x01" + mapToString(token.Extensions, "=", "\x01")
-	}
-
-	resp := []byte(fmt.Sprintf("n,,\x01auth=Bearer %s%s\x01\x01", token.Token, ext))
-
-	return resp, nil
-}
-
-// mapToString returns a list of key-value pairs ordered by key.
-// keyValSep separates the key from the value. elemSep separates each pair.
-func mapToString(extensions map[string]string, keyValSep string, elemSep string) string {
-	buf := make([]string, 0, len(extensions))
-
-	for k, v := range extensions {
-		buf = append(buf, k+keyValSep+v)
-	}
-
-	sort.Strings(buf)
-
-	return strings.Join(buf, elemSep)
-}
-
-func (b *Broker) sendSASLPlainAuthClientResponse(correlationID int32) (int, error) {
-	authBytes := []byte(b.conf.Net.SASL.AuthIdentity + "\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password)
-	rb := &SaslAuthenticateRequest{authBytes}
-	req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
-	buf, err := encode(req, b.conf.MetricRegistry)
-	if err != nil {
-		return 0, err
-	}
-
-	return b.write(buf)
-}
-
-func (b *Broker) sendSASLOAuthBearerClientMessage(initialResp []byte, correlationID int32) (int, error) {
-	rb := &SaslAuthenticateRequest{initialResp}
-
-	req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
-
-	buf, err := encode(req, b.conf.MetricRegistry)
-	if err != nil {
-		return 0, err
-	}
-
-	return b.write(buf)
-}
-
-func (b *Broker) receiveSASLServerResponse(res *SaslAuthenticateResponse, correlationID int32) (int, error) {
-	buf := make([]byte, responseLengthSize+correlationIDSize)
-	bytesRead, err := b.readFull(buf)
-	if err != nil {
-		return bytesRead, err
-	}
-
-	header := responseHeader{}
-	err = versionedDecode(buf, &header, 0)
-	if err != nil {
-		return bytesRead, err
-	}
-
-	if header.correlationID != correlationID {
-		return bytesRead, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID)
-	}
-
-	buf = make([]byte, header.length-correlationIDSize)
-	c, err := b.readFull(buf)
-	bytesRead += c
-	if err != nil {
-		return bytesRead, err
-	}
-
-	if err := versionedDecode(buf, res, 0); err != nil {
-		return bytesRead, err
-	}
-
-	if res.Err != ErrNoError {
-		return bytesRead, res.Err
-	}
-
-	return bytesRead, nil
-}
-
-func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
-	b.updateRequestLatencyAndInFlightMetrics(requestLatency)
-	b.responseRate.Mark(1)
-
-	if b.brokerResponseRate != nil {
-		b.brokerResponseRate.Mark(1)
-	}
-
-	responseSize := int64(bytes)
-	b.incomingByteRate.Mark(responseSize)
-	if b.brokerIncomingByteRate != nil {
-		b.brokerIncomingByteRate.Mark(responseSize)
-	}
-
-	b.responseSize.Update(responseSize)
-	if b.brokerResponseSize != nil {
-		b.brokerResponseSize.Update(responseSize)
-	}
-}
-
-func (b *Broker) updateRequestLatencyAndInFlightMetrics(requestLatency time.Duration) {
-	requestLatencyInMs := int64(requestLatency / time.Millisecond)
-	b.requestLatency.Update(requestLatencyInMs)
-
-	if b.brokerRequestLatency != nil {
-		b.brokerRequestLatency.Update(requestLatencyInMs)
-	}
-
-	b.addRequestInFlightMetrics(-1)
-}
-
-func (b *Broker) addRequestInFlightMetrics(i int64) {
-	b.requestsInFlight.Inc(i)
-	if b.brokerRequestsInFlight != nil {
-		b.brokerRequestsInFlight.Inc(i)
-	}
-}
-
-func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
-	b.requestRate.Mark(1)
-	if b.brokerRequestRate != nil {
-		b.brokerRequestRate.Mark(1)
-	}
-
-	requestSize := int64(bytes)
-	b.outgoingByteRate.Mark(requestSize)
-	if b.brokerOutgoingByteRate != nil {
-		b.brokerOutgoingByteRate.Mark(requestSize)
-	}
-
-	b.requestSize.Update(requestSize)
-	if b.brokerRequestSize != nil {
-		b.brokerRequestSize.Update(requestSize)
-	}
-}
-
-func (b *Broker) registerMetrics() {
-	b.brokerIncomingByteRate = b.registerMeter("incoming-byte-rate")
-	b.brokerRequestRate = b.registerMeter("request-rate")
-	b.brokerRequestSize = b.registerHistogram("request-size")
-	b.brokerRequestLatency = b.registerHistogram("request-latency-in-ms")
-	b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate")
-	b.brokerResponseRate = b.registerMeter("response-rate")
-	b.brokerResponseSize = b.registerHistogram("response-size")
-	b.brokerRequestsInFlight = b.registerCounter("requests-in-flight")
-}
-
-func (b *Broker) unregisterMetrics() {
-	for _, name := range b.registeredMetrics {
-		b.conf.MetricRegistry.Unregister(name)
-	}
-	b.registeredMetrics = nil
-}
-
-func (b *Broker) registerMeter(name string) metrics.Meter {
-	nameForBroker := getMetricNameForBroker(name, b)
-	b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
-	return metrics.GetOrRegisterMeter(nameForBroker, b.conf.MetricRegistry)
-}
-
-func (b *Broker) registerHistogram(name string) metrics.Histogram {
-	nameForBroker := getMetricNameForBroker(name, b)
-	b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
-	return getOrRegisterHistogram(nameForBroker, b.conf.MetricRegistry)
-}
-
-func (b *Broker) registerCounter(name string) metrics.Counter {
-	nameForBroker := getMetricNameForBroker(name, b)
-	b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
-	return metrics.GetOrRegisterCounter(nameForBroker, b.conf.MetricRegistry)
-}
-
-func validServerNameTLS(addr string, cfg *tls.Config) *tls.Config {
-	if cfg == nil {
-		cfg = &tls.Config{
-			MinVersion: tls.VersionTLS12,
-		}
-	}
-	if cfg.ServerName != "" {
-		return cfg
-	}
-
-	c := cfg.Clone()
-	sn, _, err := net.SplitHostPort(addr)
-	if err != nil {
-		Logger.Println(fmt.Errorf("failed to get ServerName from addr %w", err))
-	}
-	c.ServerName = sn
-	return c
-}
diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go
deleted file mode 100644
index c0918ba..0000000
--- a/vendor/github.com/Shopify/sarama/client.go
+++ /dev/null
@@ -1,1112 +0,0 @@
-package sarama
-
-import (
-	"math/rand"
-	"sort"
-	"sync"
-	"time"
-)
-
-// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
-// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
-// automatically when it passes out of scope. It is safe to share a client amongst many
-// users, however Kafka will process requests from a single client strictly in serial,
-// so it is generally more efficient to use the default one client per producer/consumer.
-type Client interface {
-	// Config returns the Config struct of the client. This struct should not be
-	// altered after it has been created.
-	Config() *Config
-
-	// Controller returns the cluster controller broker. It will return a
-	// locally cached value if it's available. You can call RefreshController
-	// to update the cached value. Requires Kafka 0.10 or higher.
-	Controller() (*Broker, error)
-
-	// RefreshController retrieves the cluster controller from fresh metadata
-	// and stores it in the local cache. Requires Kafka 0.10 or higher.
-	RefreshController() (*Broker, error)
-
-	// Brokers returns the current set of active brokers as retrieved from cluster metadata.
-	Brokers() []*Broker
-
-	// Broker returns the active Broker if available for the broker ID.
-	Broker(brokerID int32) (*Broker, error)
-
-	// Topics returns the set of available topics as retrieved from cluster metadata.
-	Topics() ([]string, error)
-
-	// Partitions returns the sorted list of all partition IDs for the given topic.
-	Partitions(topic string) ([]int32, error)
-
-	// WritablePartitions returns the sorted list of all writable partition IDs for
-	// the given topic, where "writable" means "having a valid leader accepting
-	// writes".
-	WritablePartitions(topic string) ([]int32, error)
-
-	// Leader returns the broker object that is the leader of the current
-	// topic/partition, as determined by querying the cluster metadata.
-	Leader(topic string, partitionID int32) (*Broker, error)
-
-	// Replicas returns the set of all replica IDs for the given partition.
-	Replicas(topic string, partitionID int32) ([]int32, error)
-
-	// InSyncReplicas returns the set of all in-sync replica IDs for the given
-	// partition. In-sync replicas are replicas which are fully caught up with
-	// the partition leader.
-	InSyncReplicas(topic string, partitionID int32) ([]int32, error)
-
-	// OfflineReplicas returns the set of all offline replica IDs for the given
-	// partition. Offline replicas are replicas which are offline
-	OfflineReplicas(topic string, partitionID int32) ([]int32, error)
-
-	// RefreshBrokers takes a list of addresses to be used as seed brokers.
-	// Existing broker connections are closed and the updated list of seed brokers
-	// will be used for the next metadata fetch.
-	RefreshBrokers(addrs []string) error
-
-	// RefreshMetadata takes a list of topics and queries the cluster to refresh the
-	// available metadata for those topics. If no topics are provided, it will refresh
-	// metadata for all topics.
-	RefreshMetadata(topics ...string) error
-
-	// GetOffset queries the cluster to get the most recent available offset at the
-	// given time (in milliseconds) on the topic/partition combination.
-	// Time should be OffsetOldest for the earliest available offset,
-	// OffsetNewest for the offset of the message that will be produced next, or a time.
-	GetOffset(topic string, partitionID int32, time int64) (int64, error)
-
-	// Coordinator returns the coordinating broker for a consumer group. It will
-	// return a locally cached value if it's available. You can call
-	// RefreshCoordinator to update the cached value. This function only works on
-	// Kafka 0.8.2 and higher.
-	Coordinator(consumerGroup string) (*Broker, error)
-
-	// RefreshCoordinator retrieves the coordinator for a consumer group and stores it
-	// in local cache. This function only works on Kafka 0.8.2 and higher.
-	RefreshCoordinator(consumerGroup string) error
-
-	// InitProducerID retrieves information required for Idempotent Producer
-	InitProducerID() (*InitProducerIDResponse, error)
-
-	// Close shuts down all broker connections managed by this client. It is required
-	// to call this function before a client object passes out of scope, as it will
-	// otherwise leak memory. You must close any Producers or Consumers using a client
-	// before you close the client.
-	Close() error
-
-	// Closed returns true if the client has already had Close called on it
-	Closed() bool
-}
-
-const (
-	// OffsetNewest stands for the log head offset, i.e. the offset that will be
-	// assigned to the next message that will be produced to the partition. You
-	// can send this to a client's GetOffset method to get this offset, or when
-	// calling ConsumePartition to start consuming new messages.
-	OffsetNewest int64 = -1
-	// OffsetOldest stands for the oldest offset available on the broker for a
-	// partition. You can send this to a client's GetOffset method to get this
-	// offset, or when calling ConsumePartition to start consuming from the
-	// oldest offset that is still available on the broker.
-	OffsetOldest int64 = -2
-)
-
-type client struct {
-	conf           *Config
-	closer, closed chan none // for shutting down background metadata updater
-
-	// the broker addresses given to us through the constructor are not guaranteed to be returned in
-	// the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
-	// so we store them separately
-	seedBrokers []*Broker
-	deadSeeds   []*Broker
-
-	controllerID   int32                                   // cluster controller broker id
-	brokers        map[int32]*Broker                       // maps broker ids to brokers
-	metadata       map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
-	metadataTopics map[string]none                         // topics that need to collect metadata
-	coordinators   map[string]int32                        // Maps consumer group names to coordinating broker IDs
-
-	// If the number of partitions is large, we can get some churn calling cachedPartitions,
-	// so the result is cached.  It is important to update this value whenever metadata is changed
-	cachedPartitionsResults map[string][maxPartitionIndex][]int32
-
-	lock sync.RWMutex // protects access to the maps that hold cluster state.
-}
-
-// NewClient creates a new Client. It connects to one of the given broker addresses
-// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
-// be retrieved from any of the given broker addresses, the client is not created.
-func NewClient(addrs []string, conf *Config) (Client, error) {
-	Logger.Println("Initializing new client")
-
-	if conf == nil {
-		conf = NewConfig()
-	}
-
-	if err := conf.Validate(); err != nil {
-		return nil, err
-	}
-
-	if len(addrs) < 1 {
-		return nil, ConfigurationError("You must provide at least one broker address")
-	}
-
-	client := &client{
-		conf:                    conf,
-		closer:                  make(chan none),
-		closed:                  make(chan none),
-		brokers:                 make(map[int32]*Broker),
-		metadata:                make(map[string]map[int32]*PartitionMetadata),
-		metadataTopics:          make(map[string]none),
-		cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
-		coordinators:            make(map[string]int32),
-	}
-
-	client.randomizeSeedBrokers(addrs)
-
-	if conf.Metadata.Full {
-		// do an initial fetch of all cluster metadata by specifying an empty list of topics
-		err := client.RefreshMetadata()
-		switch err {
-		case nil:
-			break
-		case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
-			// indicates that maybe part of the cluster is down, but is not fatal to creating the client
-			Logger.Println(err)
-		default:
-			close(client.closed) // we haven't started the background updater yet, so we have to do this manually
-			_ = client.Close()
-			return nil, err
-		}
-	}
-	go withRecover(client.backgroundMetadataUpdater)
-
-	Logger.Println("Successfully initialized new client")
-
-	return client, nil
-}
-
-func (client *client) Config() *Config {
-	return client.conf
-}
-
-func (client *client) Brokers() []*Broker {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-	brokers := make([]*Broker, 0, len(client.brokers))
-	for _, broker := range client.brokers {
-		brokers = append(brokers, broker)
-	}
-	return brokers
-}
-
-func (client *client) Broker(brokerID int32) (*Broker, error) {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-	broker, ok := client.brokers[brokerID]
-	if !ok {
-		return nil, ErrBrokerNotFound
-	}
-	_ = broker.Open(client.conf)
-	return broker, nil
-}
-
-func (client *client) InitProducerID() (*InitProducerIDResponse, error) {
-	var err error
-	for broker := client.any(); broker != nil; broker = client.any() {
-		req := &InitProducerIDRequest{}
-
-		response, err := broker.InitProducerID(req)
-		switch err.(type) {
-		case nil:
-			return response, nil
-		default:
-			// some error, remove that broker and try again
-			Logger.Printf("Client got error from broker %d when issuing InitProducerID : %v\n", broker.ID(), err)
-			_ = broker.Close()
-			client.deregisterBroker(broker)
-		}
-	}
-	return nil, err
-}
-
-func (client *client) Close() error {
-	if client.Closed() {
-		// Chances are this is being called from a defer() and the error will go unobserved
-		// so we go ahead and log the event in this case.
-		Logger.Printf("Close() called on already closed client")
-		return ErrClosedClient
-	}
-
-	// shutdown and wait for the background thread before we take the lock, to avoid races
-	close(client.closer)
-	<-client.closed
-
-	client.lock.Lock()
-	defer client.lock.Unlock()
-	Logger.Println("Closing Client")
-
-	for _, broker := range client.brokers {
-		safeAsyncClose(broker)
-	}
-
-	for _, broker := range client.seedBrokers {
-		safeAsyncClose(broker)
-	}
-
-	client.brokers = nil
-	client.metadata = nil
-	client.metadataTopics = nil
-
-	return nil
-}
-
-func (client *client) Closed() bool {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-
-	return client.brokers == nil
-}
-
-func (client *client) Topics() ([]string, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-
-	ret := make([]string, 0, len(client.metadata))
-	for topic := range client.metadata {
-		ret = append(ret, topic)
-	}
-
-	return ret, nil
-}
-
-func (client *client) MetadataTopics() ([]string, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-
-	ret := make([]string, 0, len(client.metadataTopics))
-	for topic := range client.metadataTopics {
-		ret = append(ret, topic)
-	}
-
-	return ret, nil
-}
-
-func (client *client) Partitions(topic string) ([]int32, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	partitions := client.cachedPartitions(topic, allPartitions)
-
-	if len(partitions) == 0 {
-		err := client.RefreshMetadata(topic)
-		if err != nil {
-			return nil, err
-		}
-		partitions = client.cachedPartitions(topic, allPartitions)
-	}
-
-	// no partitions found after refresh metadata
-	if len(partitions) == 0 {
-		return nil, ErrUnknownTopicOrPartition
-	}
-
-	return partitions, nil
-}
-
-func (client *client) WritablePartitions(topic string) ([]int32, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	partitions := client.cachedPartitions(topic, writablePartitions)
-
-	// len==0 catches when it's nil (no such topic) and the odd case when every single
-	// partition is undergoing leader election simultaneously. Callers have to be able to handle
-	// this function returning an empty slice (which is a valid return value) but catching it
-	// here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
-	// a metadata refresh as a nicety so callers can just try again and don't have to manually
-	// trigger a refresh (otherwise they'd just keep getting a stale cached copy).
-	if len(partitions) == 0 {
-		err := client.RefreshMetadata(topic)
-		if err != nil {
-			return nil, err
-		}
-		partitions = client.cachedPartitions(topic, writablePartitions)
-	}
-
-	if partitions == nil {
-		return nil, ErrUnknownTopicOrPartition
-	}
-
-	return partitions, nil
-}
-
-func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	metadata := client.cachedMetadata(topic, partitionID)
-
-	if metadata == nil {
-		err := client.RefreshMetadata(topic)
-		if err != nil {
-			return nil, err
-		}
-		metadata = client.cachedMetadata(topic, partitionID)
-	}
-
-	if metadata == nil {
-		return nil, ErrUnknownTopicOrPartition
-	}
-
-	if metadata.Err == ErrReplicaNotAvailable {
-		return dupInt32Slice(metadata.Replicas), metadata.Err
-	}
-	return dupInt32Slice(metadata.Replicas), nil
-}
-
-func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	metadata := client.cachedMetadata(topic, partitionID)
-
-	if metadata == nil {
-		err := client.RefreshMetadata(topic)
-		if err != nil {
-			return nil, err
-		}
-		metadata = client.cachedMetadata(topic, partitionID)
-	}
-
-	if metadata == nil {
-		return nil, ErrUnknownTopicOrPartition
-	}
-
-	if metadata.Err == ErrReplicaNotAvailable {
-		return dupInt32Slice(metadata.Isr), metadata.Err
-	}
-	return dupInt32Slice(metadata.Isr), nil
-}
-
-func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	metadata := client.cachedMetadata(topic, partitionID)
-
-	if metadata == nil {
-		err := client.RefreshMetadata(topic)
-		if err != nil {
-			return nil, err
-		}
-		metadata = client.cachedMetadata(topic, partitionID)
-	}
-
-	if metadata == nil {
-		return nil, ErrUnknownTopicOrPartition
-	}
-
-	if metadata.Err == ErrReplicaNotAvailable {
-		return dupInt32Slice(metadata.OfflineReplicas), metadata.Err
-	}
-	return dupInt32Slice(metadata.OfflineReplicas), nil
-}
-
-func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	leader, err := client.cachedLeader(topic, partitionID)
-
-	if leader == nil {
-		err = client.RefreshMetadata(topic)
-		if err != nil {
-			return nil, err
-		}
-		leader, err = client.cachedLeader(topic, partitionID)
-	}
-
-	return leader, err
-}
-
-func (client *client) RefreshBrokers(addrs []string) error {
-	if client.Closed() {
-		return ErrClosedClient
-	}
-
-	client.lock.Lock()
-	defer client.lock.Unlock()
-
-	for _, broker := range client.brokers {
-		_ = broker.Close()
-		delete(client.brokers, broker.ID())
-	}
-
-	client.seedBrokers = nil
-	client.deadSeeds = nil
-
-	client.randomizeSeedBrokers(addrs)
-
-	return nil
-}
-
-func (client *client) RefreshMetadata(topics ...string) error {
-	if client.Closed() {
-		return ErrClosedClient
-	}
-
-	// Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
-	// error. This handles the case by returning an error instead of sending it
-	// off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
-	for _, topic := range topics {
-		if topic == "" {
-			return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
-		}
-	}
-
-	deadline := time.Time{}
-	if client.conf.Metadata.Timeout > 0 {
-		deadline = time.Now().Add(client.conf.Metadata.Timeout)
-	}
-	return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max, deadline)
-}
-
-func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
-	if client.Closed() {
-		return -1, ErrClosedClient
-	}
-
-	offset, err := client.getOffset(topic, partitionID, time)
-	if err != nil {
-		if err := client.RefreshMetadata(topic); err != nil {
-			return -1, err
-		}
-		return client.getOffset(topic, partitionID, time)
-	}
-
-	return offset, err
-}
-
-func (client *client) Controller() (*Broker, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	if !client.conf.Version.IsAtLeast(V0_10_0_0) {
-		return nil, ErrUnsupportedVersion
-	}
-
-	controller := client.cachedController()
-	if controller == nil {
-		if err := client.refreshMetadata(); err != nil {
-			return nil, err
-		}
-		controller = client.cachedController()
-	}
-
-	if controller == nil {
-		return nil, ErrControllerNotAvailable
-	}
-
-	_ = controller.Open(client.conf)
-	return controller, nil
-}
-
-// deregisterController removes the cached controllerID
-func (client *client) deregisterController() {
-	client.lock.Lock()
-	defer client.lock.Unlock()
-	delete(client.brokers, client.controllerID)
-}
-
-// RefreshController retrieves the cluster controller from fresh metadata
-// and stores it in the local cache. Requires Kafka 0.10 or higher.
-func (client *client) RefreshController() (*Broker, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	client.deregisterController()
-
-	if err := client.refreshMetadata(); err != nil {
-		return nil, err
-	}
-
-	controller := client.cachedController()
-	if controller == nil {
-		return nil, ErrControllerNotAvailable
-	}
-
-	_ = controller.Open(client.conf)
-	return controller, nil
-}
-
-func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	coordinator := client.cachedCoordinator(consumerGroup)
-
-	if coordinator == nil {
-		if err := client.RefreshCoordinator(consumerGroup); err != nil {
-			return nil, err
-		}
-		coordinator = client.cachedCoordinator(consumerGroup)
-	}
-
-	if coordinator == nil {
-		return nil, ErrConsumerCoordinatorNotAvailable
-	}
-
-	_ = coordinator.Open(client.conf)
-	return coordinator, nil
-}
-
-func (client *client) RefreshCoordinator(consumerGroup string) error {
-	if client.Closed() {
-		return ErrClosedClient
-	}
-
-	response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
-	if err != nil {
-		return err
-	}
-
-	client.lock.Lock()
-	defer client.lock.Unlock()
-	client.registerBroker(response.Coordinator)
-	client.coordinators[consumerGroup] = response.Coordinator.ID()
-	return nil
-}
-
-// private broker management helpers
-
-func (client *client) randomizeSeedBrokers(addrs []string) {
-	random := rand.New(rand.NewSource(time.Now().UnixNano()))
-	for _, index := range random.Perm(len(addrs)) {
-		client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
-	}
-}
-
-func (client *client) updateBroker(brokers []*Broker) {
-	currentBroker := make(map[int32]*Broker, len(brokers))
-
-	for _, broker := range brokers {
-		currentBroker[broker.ID()] = broker
-		if client.brokers[broker.ID()] == nil { // add new broker
-			client.brokers[broker.ID()] = broker
-			Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
-		} else if broker.Addr() != client.brokers[broker.ID()].Addr() { // replace broker with new address
-			safeAsyncClose(client.brokers[broker.ID()])
-			client.brokers[broker.ID()] = broker
-			Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
-		}
-	}
-
-	for id, broker := range client.brokers {
-		if _, exist := currentBroker[id]; !exist { // remove old broker
-			safeAsyncClose(broker)
-			delete(client.brokers, id)
-			Logger.Printf("client/broker remove invalid broker #%d with %s", broker.ID(), broker.Addr())
-		}
-	}
-}
-
-// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
-// in the brokers map. It returns the broker that is registered, which may be the provided broker,
-// or a previously registered Broker instance. You must hold the write lock before calling this function.
-func (client *client) registerBroker(broker *Broker) {
-	if client.brokers == nil {
-		Logger.Printf("cannot register broker #%d at %s, client already closed", broker.ID(), broker.Addr())
-		return
-	}
-
-	if client.brokers[broker.ID()] == nil {
-		client.brokers[broker.ID()] = broker
-		Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
-	} else if broker.Addr() != client.brokers[broker.ID()].Addr() {
-		safeAsyncClose(client.brokers[broker.ID()])
-		client.brokers[broker.ID()] = broker
-		Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
-	}
-}
-
-// deregisterBroker removes a broker from the seedsBroker list, and if it's
-// not the seedbroker, removes it from brokers map completely.
-func (client *client) deregisterBroker(broker *Broker) {
-	client.lock.Lock()
-	defer client.lock.Unlock()
-
-	if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
-		client.deadSeeds = append(client.deadSeeds, broker)
-		client.seedBrokers = client.seedBrokers[1:]
-	} else {
-		// we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
-		// but we really shouldn't have to; once that loop is made better this case can be
-		// removed, and the function generally can be renamed from `deregisterBroker` to
-		// `nextSeedBroker` or something
-		Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
-		delete(client.brokers, broker.ID())
-	}
-}
-
-func (client *client) resurrectDeadBrokers() {
-	client.lock.Lock()
-	defer client.lock.Unlock()
-
-	Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
-	client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
-	client.deadSeeds = nil
-}
-
-func (client *client) any() *Broker {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-
-	if len(client.seedBrokers) > 0 {
-		_ = client.seedBrokers[0].Open(client.conf)
-		return client.seedBrokers[0]
-	}
-
-	// not guaranteed to be random *or* deterministic
-	for _, broker := range client.brokers {
-		_ = broker.Open(client.conf)
-		return broker
-	}
-
-	return nil
-}
-
-// private caching/lazy metadata helpers
-
-type partitionType int
-
-const (
-	allPartitions partitionType = iota
-	writablePartitions
-	// If you add any more types, update the partition cache in update()
-
-	// Ensure this is the last partition type value
-	maxPartitionIndex
-)
-
-func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-
-	partitions := client.metadata[topic]
-	if partitions != nil {
-		return partitions[partitionID]
-	}
-
-	return nil
-}
-
-func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-
-	partitions, exists := client.cachedPartitionsResults[topic]
-
-	if !exists {
-		return nil
-	}
-	return partitions[partitionSet]
-}
-
-func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
-	partitions := client.metadata[topic]
-
-	if partitions == nil {
-		return nil
-	}
-
-	ret := make([]int32, 0, len(partitions))
-	for _, partition := range partitions {
-		if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
-			continue
-		}
-		ret = append(ret, partition.ID)
-	}
-
-	sort.Sort(int32Slice(ret))
-	return ret
-}
-
-func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-
-	partitions := client.metadata[topic]
-	if partitions != nil {
-		metadata, ok := partitions[partitionID]
-		if ok {
-			if metadata.Err == ErrLeaderNotAvailable {
-				return nil, ErrLeaderNotAvailable
-			}
-			b := client.brokers[metadata.Leader]
-			if b == nil {
-				return nil, ErrLeaderNotAvailable
-			}
-			_ = b.Open(client.conf)
-			return b, nil
-		}
-	}
-
-	return nil, ErrUnknownTopicOrPartition
-}
-
-func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
-	broker, err := client.Leader(topic, partitionID)
-	if err != nil {
-		return -1, err
-	}
-
-	request := &OffsetRequest{}
-	if client.conf.Version.IsAtLeast(V0_10_1_0) {
-		request.Version = 1
-	}
-	request.AddBlock(topic, partitionID, time, 1)
-
-	response, err := broker.GetAvailableOffsets(request)
-	if err != nil {
-		_ = broker.Close()
-		return -1, err
-	}
-
-	block := response.GetBlock(topic, partitionID)
-	if block == nil {
-		_ = broker.Close()
-		return -1, ErrIncompleteResponse
-	}
-	if block.Err != ErrNoError {
-		return -1, block.Err
-	}
-	if len(block.Offsets) != 1 {
-		return -1, ErrOffsetOutOfRange
-	}
-
-	return block.Offsets[0], nil
-}
-
-// core metadata update logic
-
-func (client *client) backgroundMetadataUpdater() {
-	defer close(client.closed)
-
-	if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
-		return
-	}
-
-	ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
-	defer ticker.Stop()
-
-	for {
-		select {
-		case <-ticker.C:
-			if err := client.refreshMetadata(); err != nil {
-				Logger.Println("Client background metadata update:", err)
-			}
-		case <-client.closer:
-			return
-		}
-	}
-}
-
-func (client *client) refreshMetadata() error {
-	var topics []string
-
-	if !client.conf.Metadata.Full {
-		if specificTopics, err := client.MetadataTopics(); err != nil {
-			return err
-		} else if len(specificTopics) == 0 {
-			return ErrNoTopicsToUpdateMetadata
-		} else {
-			topics = specificTopics
-		}
-	}
-
-	if err := client.RefreshMetadata(topics...); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, deadline time.Time) error {
-	pastDeadline := func(backoff time.Duration) bool {
-		if !deadline.IsZero() && time.Now().Add(backoff).After(deadline) {
-			// we are past the deadline
-			return true
-		}
-		return false
-	}
-	retry := func(err error) error {
-		if attemptsRemaining > 0 {
-			backoff := client.computeBackoff(attemptsRemaining)
-			if pastDeadline(backoff) {
-				Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout")
-				return err
-			}
-			Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
-			if backoff > 0 {
-				time.Sleep(backoff)
-			}
-			return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline)
-		}
-		return err
-	}
-
-	broker := client.any()
-	for ; broker != nil && !pastDeadline(0); broker = client.any() {
-		allowAutoTopicCreation := true
-		if len(topics) > 0 {
-			Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
-		} else {
-			allowAutoTopicCreation = false
-			Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
-		}
-
-		req := &MetadataRequest{Topics: topics, AllowAutoTopicCreation: allowAutoTopicCreation}
-		if client.conf.Version.IsAtLeast(V1_0_0_0) {
-			req.Version = 5
-		} else if client.conf.Version.IsAtLeast(V0_10_0_0) {
-			req.Version = 1
-		}
-		response, err := broker.GetMetadata(req)
-		switch err := err.(type) {
-		case nil:
-			allKnownMetaData := len(topics) == 0
-			// valid response, use it
-			shouldRetry, err := client.updateMetadata(response, allKnownMetaData)
-			if shouldRetry {
-				Logger.Println("client/metadata found some partitions to be leaderless")
-				return retry(err) // note: err can be nil
-			}
-			return err
-
-		case PacketEncodingError:
-			// didn't even send, return the error
-			return err
-
-		case KError:
-			// if SASL auth error return as this _should_ be a non retryable err for all brokers
-			if err == ErrSASLAuthenticationFailed {
-				Logger.Println("client/metadata failed SASL authentication")
-				return err
-			}
-
-			if err == ErrTopicAuthorizationFailed {
-				Logger.Println("client is not authorized to access this topic. The topics were: ", topics)
-				return err
-			}
-			// else remove that broker and try again
-			Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
-			_ = broker.Close()
-			client.deregisterBroker(broker)
-
-		default:
-			// some other error, remove that broker and try again
-			Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
-			_ = broker.Close()
-			client.deregisterBroker(broker)
-		}
-	}
-
-	if broker != nil {
-		Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr)
-		return retry(ErrOutOfBrokers)
-	}
-
-	Logger.Println("client/metadata no available broker to send metadata request to")
-	client.resurrectDeadBrokers()
-	return retry(ErrOutOfBrokers)
-}
-
-// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
-func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) {
-	if client.Closed() {
-		return
-	}
-
-	client.lock.Lock()
-	defer client.lock.Unlock()
-
-	// For all the brokers we received:
-	// - if it is a new ID, save it
-	// - if it is an existing ID, but the address we have is stale, discard the old one and save it
-	// - if some brokers is not exist in it, remove old broker
-	// - otherwise ignore it, replacing our existing one would just bounce the connection
-	client.updateBroker(data.Brokers)
-
-	client.controllerID = data.ControllerID
-
-	if allKnownMetaData {
-		client.metadata = make(map[string]map[int32]*PartitionMetadata)
-		client.metadataTopics = make(map[string]none)
-		client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32)
-	}
-	for _, topic := range data.Topics {
-		// topics must be added firstly to `metadataTopics` to guarantee that all
-		// requested topics must be recorded to keep them trackable for periodically
-		// metadata refresh.
-		if _, exists := client.metadataTopics[topic.Name]; !exists {
-			client.metadataTopics[topic.Name] = none{}
-		}
-		delete(client.metadata, topic.Name)
-		delete(client.cachedPartitionsResults, topic.Name)
-
-		switch topic.Err {
-		case ErrNoError:
-			// no-op
-		case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
-			err = topic.Err
-			continue
-		case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
-			err = topic.Err
-			retry = true
-			continue
-		case ErrLeaderNotAvailable: // retry, but store partial partition results
-			retry = true
-		default: // don't retry, don't store partial results
-			Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
-			err = topic.Err
-			continue
-		}
-
-		client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
-		for _, partition := range topic.Partitions {
-			client.metadata[topic.Name][partition.ID] = partition
-			if partition.Err == ErrLeaderNotAvailable {
-				retry = true
-			}
-		}
-
-		var partitionCache [maxPartitionIndex][]int32
-		partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
-		partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
-		client.cachedPartitionsResults[topic.Name] = partitionCache
-	}
-
-	return
-}
-
-func (client *client) cachedCoordinator(consumerGroup string) *Broker {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-	if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
-		return client.brokers[coordinatorID]
-	}
-	return nil
-}
-
-func (client *client) cachedController() *Broker {
-	client.lock.RLock()
-	defer client.lock.RUnlock()
-
-	return client.brokers[client.controllerID]
-}
-
-func (client *client) computeBackoff(attemptsRemaining int) time.Duration {
-	if client.conf.Metadata.Retry.BackoffFunc != nil {
-		maxRetries := client.conf.Metadata.Retry.Max
-		retries := maxRetries - attemptsRemaining
-		return client.conf.Metadata.Retry.BackoffFunc(retries, maxRetries)
-	}
-	return client.conf.Metadata.Retry.Backoff
-}
-
-func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) {
-	retry := func(err error) (*FindCoordinatorResponse, error) {
-		if attemptsRemaining > 0 {
-			backoff := client.computeBackoff(attemptsRemaining)
-			Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
-			time.Sleep(backoff)
-			return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
-		}
-		return nil, err
-	}
-
-	for broker := client.any(); broker != nil; broker = client.any() {
-		Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
-
-		request := new(FindCoordinatorRequest)
-		request.CoordinatorKey = consumerGroup
-		request.CoordinatorType = CoordinatorGroup
-
-		response, err := broker.FindCoordinator(request)
-		if err != nil {
-			Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
-
-			switch err.(type) {
-			case PacketEncodingError:
-				return nil, err
-			default:
-				_ = broker.Close()
-				client.deregisterBroker(broker)
-				continue
-			}
-		}
-
-		switch response.Err {
-		case ErrNoError:
-			Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
-			return response, nil
-
-		case ErrConsumerCoordinatorNotAvailable:
-			Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
-
-			// This is very ugly, but this scenario will only happen once per cluster.
-			// The __consumer_offsets topic only has to be created one time.
-			// The number of partitions not configurable, but partition 0 should always exist.
-			if _, err := client.Leader("__consumer_offsets", 0); err != nil {
-				Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
-				time.Sleep(2 * time.Second)
-			}
-
-			return retry(ErrConsumerCoordinatorNotAvailable)
-		case ErrGroupAuthorizationFailed:
-			Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", consumerGroup)
-			return retry(ErrGroupAuthorizationFailed)
-
-		default:
-			return nil, response.Err
-		}
-	}
-
-	Logger.Println("client/coordinator no available broker to send consumer metadata request to")
-	client.resurrectDeadBrokers()
-	return retry(ErrOutOfBrokers)
-}
-
-// nopCloserClient embeds an existing Client, but disables
-// the Close method (yet all other methods pass
-// through unchanged). This is for use in larger structs
-// where it is undesirable to close the client that was
-// passed in by the caller.
-type nopCloserClient struct {
-	Client
-}
-
-// Close intercepts and purposely does not call the underlying
-// client's Close() method.
-func (ncc *nopCloserClient) Close() error {
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/Shopify/sarama/compress.go
deleted file mode 100644
index 12cd7c3..0000000
--- a/vendor/github.com/Shopify/sarama/compress.go
+++ /dev/null
@@ -1,194 +0,0 @@
-package sarama
-
-import (
-	"bytes"
-	"compress/gzip"
-	"fmt"
-	"sync"
-
-	snappy "github.com/eapache/go-xerial-snappy"
-	"github.com/pierrec/lz4"
-)
-
-var (
-	lz4WriterPool = sync.Pool{
-		New: func() interface{} {
-			return lz4.NewWriter(nil)
-		},
-	}
-
-	gzipWriterPool = sync.Pool{
-		New: func() interface{} {
-			return gzip.NewWriter(nil)
-		},
-	}
-	gzipWriterPoolForCompressionLevel1 = sync.Pool{
-		New: func() interface{} {
-			gz, err := gzip.NewWriterLevel(nil, 1)
-			if err != nil {
-				panic(err)
-			}
-			return gz
-		},
-	}
-	gzipWriterPoolForCompressionLevel2 = sync.Pool{
-		New: func() interface{} {
-			gz, err := gzip.NewWriterLevel(nil, 2)
-			if err != nil {
-				panic(err)
-			}
-			return gz
-		},
-	}
-	gzipWriterPoolForCompressionLevel3 = sync.Pool{
-		New: func() interface{} {
-			gz, err := gzip.NewWriterLevel(nil, 3)
-			if err != nil {
-				panic(err)
-			}
-			return gz
-		},
-	}
-	gzipWriterPoolForCompressionLevel4 = sync.Pool{
-		New: func() interface{} {
-			gz, err := gzip.NewWriterLevel(nil, 4)
-			if err != nil {
-				panic(err)
-			}
-			return gz
-		},
-	}
-	gzipWriterPoolForCompressionLevel5 = sync.Pool{
-		New: func() interface{} {
-			gz, err := gzip.NewWriterLevel(nil, 5)
-			if err != nil {
-				panic(err)
-			}
-			return gz
-		},
-	}
-	gzipWriterPoolForCompressionLevel6 = sync.Pool{
-		New: func() interface{} {
-			gz, err := gzip.NewWriterLevel(nil, 6)
-			if err != nil {
-				panic(err)
-			}
-			return gz
-		},
-	}
-	gzipWriterPoolForCompressionLevel7 = sync.Pool{
-		New: func() interface{} {
-			gz, err := gzip.NewWriterLevel(nil, 7)
-			if err != nil {
-				panic(err)
-			}
-			return gz
-		},
-	}
-	gzipWriterPoolForCompressionLevel8 = sync.Pool{
-		New: func() interface{} {
-			gz, err := gzip.NewWriterLevel(nil, 8)
-			if err != nil {
-				panic(err)
-			}
-			return gz
-		},
-	}
-	gzipWriterPoolForCompressionLevel9 = sync.Pool{
-		New: func() interface{} {
-			gz, err := gzip.NewWriterLevel(nil, 9)
-			if err != nil {
-				panic(err)
-			}
-			return gz
-		},
-	}
-)
-
-func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) {
-	switch cc {
-	case CompressionNone:
-		return data, nil
-	case CompressionGZIP:
-		var (
-			err    error
-			buf    bytes.Buffer
-			writer *gzip.Writer
-		)
-
-		switch level {
-		case CompressionLevelDefault:
-			writer = gzipWriterPool.Get().(*gzip.Writer)
-			defer gzipWriterPool.Put(writer)
-			writer.Reset(&buf)
-		case 1:
-			writer = gzipWriterPoolForCompressionLevel1.Get().(*gzip.Writer)
-			defer gzipWriterPoolForCompressionLevel1.Put(writer)
-			writer.Reset(&buf)
-		case 2:
-			writer = gzipWriterPoolForCompressionLevel2.Get().(*gzip.Writer)
-			defer gzipWriterPoolForCompressionLevel2.Put(writer)
-			writer.Reset(&buf)
-		case 3:
-			writer = gzipWriterPoolForCompressionLevel3.Get().(*gzip.Writer)
-			defer gzipWriterPoolForCompressionLevel3.Put(writer)
-			writer.Reset(&buf)
-		case 4:
-			writer = gzipWriterPoolForCompressionLevel4.Get().(*gzip.Writer)
-			defer gzipWriterPoolForCompressionLevel4.Put(writer)
-			writer.Reset(&buf)
-		case 5:
-			writer = gzipWriterPoolForCompressionLevel5.Get().(*gzip.Writer)
-			defer gzipWriterPoolForCompressionLevel5.Put(writer)
-			writer.Reset(&buf)
-		case 6:
-			writer = gzipWriterPoolForCompressionLevel6.Get().(*gzip.Writer)
-			defer gzipWriterPoolForCompressionLevel6.Put(writer)
-			writer.Reset(&buf)
-		case 7:
-			writer = gzipWriterPoolForCompressionLevel7.Get().(*gzip.Writer)
-			defer gzipWriterPoolForCompressionLevel7.Put(writer)
-			writer.Reset(&buf)
-		case 8:
-			writer = gzipWriterPoolForCompressionLevel8.Get().(*gzip.Writer)
-			defer gzipWriterPoolForCompressionLevel8.Put(writer)
-			writer.Reset(&buf)
-		case 9:
-			writer = gzipWriterPoolForCompressionLevel9.Get().(*gzip.Writer)
-			defer gzipWriterPoolForCompressionLevel9.Put(writer)
-			writer.Reset(&buf)
-		default:
-			writer, err = gzip.NewWriterLevel(&buf, level)
-			if err != nil {
-				return nil, err
-			}
-		}
-		if _, err := writer.Write(data); err != nil {
-			return nil, err
-		}
-		if err := writer.Close(); err != nil {
-			return nil, err
-		}
-		return buf.Bytes(), nil
-	case CompressionSnappy:
-		return snappy.Encode(data), nil
-	case CompressionLZ4:
-		writer := lz4WriterPool.Get().(*lz4.Writer)
-		defer lz4WriterPool.Put(writer)
-
-		var buf bytes.Buffer
-		writer.Reset(&buf)
-
-		if _, err := writer.Write(data); err != nil {
-			return nil, err
-		}
-		if err := writer.Close(); err != nil {
-			return nil, err
-		}
-		return buf.Bytes(), nil
-	case CompressionZSTD:
-		return zstdCompress(nil, data)
-	default:
-		return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)}
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go
deleted file mode 100644
index 43e739c..0000000
--- a/vendor/github.com/Shopify/sarama/config.go
+++ /dev/null
@@ -1,765 +0,0 @@
-package sarama
-
-import (
-	"compress/gzip"
-	"crypto/tls"
-	"fmt"
-	"io/ioutil"
-	"net"
-	"regexp"
-	"time"
-
-	"github.com/rcrowley/go-metrics"
-	"golang.org/x/net/proxy"
-)
-
-const defaultClientID = "sarama"
-
-var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
-
-// Config is used to pass multiple configuration options to Sarama's constructors.
-type Config struct {
-	// Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client.
-	Admin struct {
-		Retry struct {
-			// The total number of times to retry sending (retriable) admin requests (default 5).
-			// Similar to the `retries` setting of the JVM AdminClientConfig.
-			Max int
-			// Backoff time between retries of a failed request (default 100ms)
-			Backoff time.Duration
-		}
-		// The maximum duration the administrative Kafka client will wait for ClusterAdmin operations,
-		// including topics, brokers, configurations and ACLs (defaults to 3 seconds).
-		Timeout time.Duration
-	}
-
-	// Net is the namespace for network-level properties used by the Broker, and
-	// shared by the Client/Producer/Consumer.
-	Net struct {
-		// How many outstanding requests a connection is allowed to have before
-		// sending on it blocks (default 5).
-		MaxOpenRequests int
-
-		// All three of the below configurations are similar to the
-		// `socket.timeout.ms` setting in JVM kafka. All of them default
-		// to 30 seconds.
-		DialTimeout  time.Duration // How long to wait for the initial connection.
-		ReadTimeout  time.Duration // How long to wait for a response.
-		WriteTimeout time.Duration // How long to wait for a transmit.
-
-		TLS struct {
-			// Whether or not to use TLS when connecting to the broker
-			// (defaults to false).
-			Enable bool
-			// The TLS configuration to use for secure connections if
-			// enabled (defaults to nil).
-			Config *tls.Config
-		}
-
-		// SASL based authentication with broker. While there are multiple SASL authentication methods
-		// the current implementation is limited to plaintext (SASL/PLAIN) authentication
-		SASL struct {
-			// Whether or not to use SASL authentication when connecting to the broker
-			// (defaults to false).
-			Enable bool
-			// SASLMechanism is the name of the enabled SASL mechanism.
-			// Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN).
-			Mechanism SASLMechanism
-			// Version is the SASL Protocol Version to use
-			// Kafka > 1.x should use V1, except on Azure EventHub which use V0
-			Version int16
-			// Whether or not to send the Kafka SASL handshake first if enabled
-			// (defaults to true). You should only set this to false if you're using
-			// a non-Kafka SASL proxy.
-			Handshake bool
-			// AuthIdentity is an (optional) authorization identity (authzid) to
-			// use for SASL/PLAIN authentication (if different from User) when
-			// an authenticated user is permitted to act as the presented
-			// alternative user. See RFC4616 for details.
-			AuthIdentity string
-			// User is the authentication identity (authcid) to present for
-			// SASL/PLAIN or SASL/SCRAM authentication
-			User string
-			// Password for SASL/PLAIN authentication
-			Password string
-			// authz id used for SASL/SCRAM authentication
-			SCRAMAuthzID string
-			// SCRAMClientGeneratorFunc is a generator of a user provided implementation of a SCRAM
-			// client used to perform the SCRAM exchange with the server.
-			SCRAMClientGeneratorFunc func() SCRAMClient
-			// TokenProvider is a user-defined callback for generating
-			// access tokens for SASL/OAUTHBEARER auth. See the
-			// AccessTokenProvider interface docs for proper implementation
-			// guidelines.
-			TokenProvider AccessTokenProvider
-
-			GSSAPI GSSAPIConfig
-		}
-
-		// KeepAlive specifies the keep-alive period for an active network connection (defaults to 0).
-		// If zero or positive, keep-alives are enabled.
-		// If negative, keep-alives are disabled.
-		KeepAlive time.Duration
-
-		// LocalAddr is the local address to use when dialing an
-		// address. The address must be of a compatible type for the
-		// network being dialed.
-		// If nil, a local address is automatically chosen.
-		LocalAddr net.Addr
-
-		Proxy struct {
-			// Whether or not to use proxy when connecting to the broker
-			// (defaults to false).
-			Enable bool
-			// The proxy dialer to use enabled (defaults to nil).
-			Dialer proxy.Dialer
-		}
-	}
-
-	// Metadata is the namespace for metadata management properties used by the
-	// Client, and shared by the Producer/Consumer.
-	Metadata struct {
-		Retry struct {
-			// The total number of times to retry a metadata request when the
-			// cluster is in the middle of a leader election (default 3).
-			Max int
-			// How long to wait for leader election to occur before retrying
-			// (default 250ms). Similar to the JVM's `retry.backoff.ms`.
-			Backoff time.Duration
-			// Called to compute backoff time dynamically. Useful for implementing
-			// more sophisticated backoff strategies. This takes precedence over
-			// `Backoff` if set.
-			BackoffFunc func(retries, maxRetries int) time.Duration
-		}
-		// How frequently to refresh the cluster metadata in the background.
-		// Defaults to 10 minutes. Set to 0 to disable. Similar to
-		// `topic.metadata.refresh.interval.ms` in the JVM version.
-		RefreshFrequency time.Duration
-
-		// Whether to maintain a full set of metadata for all topics, or just
-		// the minimal set that has been necessary so far. The full set is simpler
-		// and usually more convenient, but can take up a substantial amount of
-		// memory if you have many topics and partitions. Defaults to true.
-		Full bool
-
-		// How long to wait for a successful metadata response.
-		// Disabled by default which means a metadata request against an unreachable
-		// cluster (all brokers are unreachable or unresponsive) can take up to
-		// `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max`
-		// to fail.
-		Timeout time.Duration
-	}
-
-	// Producer is the namespace for configuration related to producing messages,
-	// used by the Producer.
-	Producer struct {
-		// The maximum permitted size of a message (defaults to 1000000). Should be
-		// set equal to or smaller than the broker's `message.max.bytes`.
-		MaxMessageBytes int
-		// The level of acknowledgement reliability needed from the broker (defaults
-		// to WaitForLocal). Equivalent to the `request.required.acks` setting of the
-		// JVM producer.
-		RequiredAcks RequiredAcks
-		// The maximum duration the broker will wait the receipt of the number of
-		// RequiredAcks (defaults to 10 seconds). This is only relevant when
-		// RequiredAcks is set to WaitForAll or a number > 1. Only supports
-		// millisecond resolution, nanoseconds will be truncated. Equivalent to
-		// the JVM producer's `request.timeout.ms` setting.
-		Timeout time.Duration
-		// The type of compression to use on messages (defaults to no compression).
-		// Similar to `compression.codec` setting of the JVM producer.
-		Compression CompressionCodec
-		// The level of compression to use on messages. The meaning depends
-		// on the actual compression type used and defaults to default compression
-		// level for the codec.
-		CompressionLevel int
-		// Generates partitioners for choosing the partition to send messages to
-		// (defaults to hashing the message key). Similar to the `partitioner.class`
-		// setting for the JVM producer.
-		Partitioner PartitionerConstructor
-		// If enabled, the producer will ensure that exactly one copy of each message is
-		// written.
-		Idempotent bool
-
-		// Return specifies what channels will be populated. If they are set to true,
-		// you must read from the respective channels to prevent deadlock. If,
-		// however, this config is used to create a `SyncProducer`, both must be set
-		// to true and you shall not read from the channels since the producer does
-		// this internally.
-		Return struct {
-			// If enabled, successfully delivered messages will be returned on the
-			// Successes channel (default disabled).
-			Successes bool
-
-			// If enabled, messages that failed to deliver will be returned on the
-			// Errors channel, including error (default enabled).
-			Errors bool
-		}
-
-		// The following config options control how often messages are batched up and
-		// sent to the broker. By default, messages are sent as fast as possible, and
-		// all messages received while the current batch is in-flight are placed
-		// into the subsequent batch.
-		Flush struct {
-			// The best-effort number of bytes needed to trigger a flush. Use the
-			// global sarama.MaxRequestSize to set a hard upper limit.
-			Bytes int
-			// The best-effort number of messages needed to trigger a flush. Use
-			// `MaxMessages` to set a hard upper limit.
-			Messages int
-			// The best-effort frequency of flushes. Equivalent to
-			// `queue.buffering.max.ms` setting of JVM producer.
-			Frequency time.Duration
-			// The maximum number of messages the producer will send in a single
-			// broker request. Defaults to 0 for unlimited. Similar to
-			// `queue.buffering.max.messages` in the JVM producer.
-			MaxMessages int
-		}
-
-		Retry struct {
-			// The total number of times to retry sending a message (default 3).
-			// Similar to the `message.send.max.retries` setting of the JVM producer.
-			Max int
-			// How long to wait for the cluster to settle between retries
-			// (default 100ms). Similar to the `retry.backoff.ms` setting of the
-			// JVM producer.
-			Backoff time.Duration
-			// Called to compute backoff time dynamically. Useful for implementing
-			// more sophisticated backoff strategies. This takes precedence over
-			// `Backoff` if set.
-			BackoffFunc func(retries, maxRetries int) time.Duration
-		}
-
-		// Interceptors to be called when the producer dispatcher reads the
-		// message for the first time. Interceptors allows to intercept and
-		// possible mutate the message before they are published to Kafka
-		// cluster. *ProducerMessage modified by the first interceptor's
-		// OnSend() is passed to the second interceptor OnSend(), and so on in
-		// the interceptor chain.
-		Interceptors []ProducerInterceptor
-	}
-
-	// Consumer is the namespace for configuration related to consuming messages,
-	// used by the Consumer.
-	Consumer struct {
-
-		// Group is the namespace for configuring consumer group.
-		Group struct {
-			Session struct {
-				// The timeout used to detect consumer failures when using Kafka's group management facility.
-				// The consumer sends periodic heartbeats to indicate its liveness to the broker.
-				// If no heartbeats are received by the broker before the expiration of this session timeout,
-				// then the broker will remove this consumer from the group and initiate a rebalance.
-				// Note that the value must be in the allowable range as configured in the broker configuration
-				// by `group.min.session.timeout.ms` and `group.max.session.timeout.ms` (default 10s)
-				Timeout time.Duration
-			}
-			Heartbeat struct {
-				// The expected time between heartbeats to the consumer coordinator when using Kafka's group
-				// management facilities. Heartbeats are used to ensure that the consumer's session stays active and
-				// to facilitate rebalancing when new consumers join or leave the group.
-				// The value must be set lower than Consumer.Group.Session.Timeout, but typically should be set no
-				// higher than 1/3 of that value.
-				// It can be adjusted even lower to control the expected time for normal rebalances (default 3s)
-				Interval time.Duration
-			}
-			Rebalance struct {
-				// Strategy for allocating topic partitions to members (default BalanceStrategyRange)
-				Strategy BalanceStrategy
-				// The maximum allowed time for each worker to join the group once a rebalance has begun.
-				// This is basically a limit on the amount of time needed for all tasks to flush any pending
-				// data and commit offsets. If the timeout is exceeded, then the worker will be removed from
-				// the group, which will cause offset commit failures (default 60s).
-				Timeout time.Duration
-
-				Retry struct {
-					// When a new consumer joins a consumer group the set of consumers attempt to "rebalance"
-					// the load to assign partitions to each consumer. If the set of consumers changes while
-					// this assignment is taking place the rebalance will fail and retry. This setting controls
-					// the maximum number of attempts before giving up (default 4).
-					Max int
-					// Backoff time between retries during rebalance (default 2s)
-					Backoff time.Duration
-				}
-			}
-			Member struct {
-				// Custom metadata to include when joining the group. The user data for all joined members
-				// can be retrieved by sending a DescribeGroupRequest to the broker that is the
-				// coordinator for the group.
-				UserData []byte
-			}
-		}
-
-		Retry struct {
-			// How long to wait after a failing to read from a partition before
-			// trying again (default 2s).
-			Backoff time.Duration
-			// Called to compute backoff time dynamically. Useful for implementing
-			// more sophisticated backoff strategies. This takes precedence over
-			// `Backoff` if set.
-			BackoffFunc func(retries int) time.Duration
-		}
-
-		// Fetch is the namespace for controlling how many bytes are retrieved by any
-		// given request.
-		Fetch struct {
-			// The minimum number of message bytes to fetch in a request - the broker
-			// will wait until at least this many are available. The default is 1,
-			// as 0 causes the consumer to spin when no messages are available.
-			// Equivalent to the JVM's `fetch.min.bytes`.
-			Min int32
-			// The default number of message bytes to fetch from the broker in each
-			// request (default 1MB). This should be larger than the majority of
-			// your messages, or else the consumer will spend a lot of time
-			// negotiating sizes and not actually consuming. Similar to the JVM's
-			// `fetch.message.max.bytes`.
-			Default int32
-			// The maximum number of message bytes to fetch from the broker in a
-			// single request. Messages larger than this will return
-			// ErrMessageTooLarge and will not be consumable, so you must be sure
-			// this is at least as large as your largest message. Defaults to 0
-			// (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
-			// global `sarama.MaxResponseSize` still applies.
-			Max int32
-		}
-		// The maximum amount of time the broker will wait for Consumer.Fetch.Min
-		// bytes to become available before it returns fewer than that anyways. The
-		// default is 250ms, since 0 causes the consumer to spin when no events are
-		// available. 100-500ms is a reasonable range for most cases. Kafka only
-		// supports precision up to milliseconds; nanoseconds will be truncated.
-		// Equivalent to the JVM's `fetch.wait.max.ms`.
-		MaxWaitTime time.Duration
-
-		// The maximum amount of time the consumer expects a message takes to
-		// process for the user. If writing to the Messages channel takes longer
-		// than this, that partition will stop fetching more messages until it
-		// can proceed again.
-		// Note that, since the Messages channel is buffered, the actual grace time is
-		// (MaxProcessingTime * ChannelBufferSize). Defaults to 100ms.
-		// If a message is not written to the Messages channel between two ticks
-		// of the expiryTicker then a timeout is detected.
-		// Using a ticker instead of a timer to detect timeouts should typically
-		// result in many fewer calls to Timer functions which may result in a
-		// significant performance improvement if many messages are being sent
-		// and timeouts are infrequent.
-		// The disadvantage of using a ticker instead of a timer is that
-		// timeouts will be less accurate. That is, the effective timeout could
-		// be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
-		// example, if `MaxProcessingTime` is 100ms then a delay of 180ms
-		// between two messages being sent may not be recognized as a timeout.
-		MaxProcessingTime time.Duration
-
-		// Return specifies what channels will be populated. If they are set to true,
-		// you must read from them to prevent deadlock.
-		Return struct {
-			// If enabled, any errors that occurred while consuming are returned on
-			// the Errors channel (default disabled).
-			Errors bool
-		}
-
-		// Offsets specifies configuration for how and when to commit consumed
-		// offsets. This currently requires the manual use of an OffsetManager
-		// but will eventually be automated.
-		Offsets struct {
-			// Deprecated: CommitInterval exists for historical compatibility
-			// and should not be used. Please use Consumer.Offsets.AutoCommit
-			CommitInterval time.Duration
-
-			// AutoCommit specifies configuration for commit messages automatically.
-			AutoCommit struct {
-				// Whether or not to auto-commit updated offsets back to the broker.
-				// (default enabled).
-				Enable bool
-
-				// How frequently to commit updated offsets. Ineffective unless
-				// auto-commit is enabled (default 1s)
-				Interval time.Duration
-			}
-
-			// The initial offset to use if no offset was previously committed.
-			// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
-			Initial int64
-
-			// The retention duration for committed offsets. If zero, disabled
-			// (in which case the `offsets.retention.minutes` option on the
-			// broker will be used).  Kafka only supports precision up to
-			// milliseconds; nanoseconds will be truncated. Requires Kafka
-			// broker version 0.9.0 or later.
-			// (default is 0: disabled).
-			Retention time.Duration
-
-			Retry struct {
-				// The total number of times to retry failing commit
-				// requests during OffsetManager shutdown (default 3).
-				Max int
-			}
-		}
-
-		// IsolationLevel support 2 mode:
-		// 	- use `ReadUncommitted` (default) to consume and return all messages in message channel
-		//	- use `ReadCommitted` to hide messages that are part of an aborted transaction
-		IsolationLevel IsolationLevel
-
-		// Interceptors to be called just before the record is sent to the
-		// messages channel. Interceptors allows to intercept and possible
-		// mutate the message before they are returned to the client.
-		// *ConsumerMessage modified by the first interceptor's OnConsume() is
-		// passed to the second interceptor OnConsume(), and so on in the
-		// interceptor chain.
-		Interceptors []ConsumerInterceptor
-	}
-
-	// A user-provided string sent with every request to the brokers for logging,
-	// debugging, and auditing purposes. Defaults to "sarama", but you should
-	// probably set it to something specific to your application.
-	ClientID string
-	// A rack identifier for this client. This can be any string value which
-	// indicates where this client is physically located.
-	// It corresponds with the broker config 'broker.rack'
-	RackID string
-	// The number of events to buffer in internal and external channels. This
-	// permits the producer and consumer to continue processing some messages
-	// in the background while user code is working, greatly improving throughput.
-	// Defaults to 256.
-	ChannelBufferSize int
-	// The version of Kafka that Sarama will assume it is running against.
-	// Defaults to the oldest supported stable version. Since Kafka provides
-	// backwards-compatibility, setting it to a version older than you have
-	// will not break anything, although it may prevent you from using the
-	// latest features. Setting it to a version greater than you are actually
-	// running may lead to random breakage.
-	Version KafkaVersion
-	// The registry to define metrics into.
-	// Defaults to a local registry.
-	// If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
-	// prior to starting Sarama.
-	// See Examples on how to use the metrics registry
-	MetricRegistry metrics.Registry
-}
-
-// NewConfig returns a new configuration instance with sane defaults.
-func NewConfig() *Config {
-	c := &Config{}
-
-	c.Admin.Retry.Max = 5
-	c.Admin.Retry.Backoff = 100 * time.Millisecond
-	c.Admin.Timeout = 3 * time.Second
-
-	c.Net.MaxOpenRequests = 5
-	c.Net.DialTimeout = 30 * time.Second
-	c.Net.ReadTimeout = 30 * time.Second
-	c.Net.WriteTimeout = 30 * time.Second
-	c.Net.SASL.Handshake = true
-	c.Net.SASL.Version = SASLHandshakeV0
-
-	c.Metadata.Retry.Max = 3
-	c.Metadata.Retry.Backoff = 250 * time.Millisecond
-	c.Metadata.RefreshFrequency = 10 * time.Minute
-	c.Metadata.Full = true
-
-	c.Producer.MaxMessageBytes = 1000000
-	c.Producer.RequiredAcks = WaitForLocal
-	c.Producer.Timeout = 10 * time.Second
-	c.Producer.Partitioner = NewHashPartitioner
-	c.Producer.Retry.Max = 3
-	c.Producer.Retry.Backoff = 100 * time.Millisecond
-	c.Producer.Return.Errors = true
-	c.Producer.CompressionLevel = CompressionLevelDefault
-
-	c.Consumer.Fetch.Min = 1
-	c.Consumer.Fetch.Default = 1024 * 1024
-	c.Consumer.Retry.Backoff = 2 * time.Second
-	c.Consumer.MaxWaitTime = 250 * time.Millisecond
-	c.Consumer.MaxProcessingTime = 100 * time.Millisecond
-	c.Consumer.Return.Errors = false
-	c.Consumer.Offsets.AutoCommit.Enable = true
-	c.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second
-	c.Consumer.Offsets.Initial = OffsetNewest
-	c.Consumer.Offsets.Retry.Max = 3
-
-	c.Consumer.Group.Session.Timeout = 10 * time.Second
-	c.Consumer.Group.Heartbeat.Interval = 3 * time.Second
-	c.Consumer.Group.Rebalance.Strategy = BalanceStrategyRange
-	c.Consumer.Group.Rebalance.Timeout = 60 * time.Second
-	c.Consumer.Group.Rebalance.Retry.Max = 4
-	c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second
-
-	c.ClientID = defaultClientID
-	c.ChannelBufferSize = 256
-	c.Version = DefaultVersion
-	c.MetricRegistry = metrics.NewRegistry()
-
-	return c
-}
-
-// Validate checks a Config instance. It will return a
-// ConfigurationError if the specified values don't make sense.
-func (c *Config) Validate() error {
-	// some configuration values should be warned on but not fail completely, do those first
-	if !c.Net.TLS.Enable && c.Net.TLS.Config != nil {
-		Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
-	}
-	if !c.Net.SASL.Enable {
-		if c.Net.SASL.User != "" {
-			Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
-		}
-		if c.Net.SASL.Password != "" {
-			Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
-		}
-	}
-	if c.Producer.RequiredAcks > 1 {
-		Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
-	}
-	if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
-		Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
-	}
-	if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
-		Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
-	}
-	if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
-		Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
-	}
-	if c.Producer.Timeout%time.Millisecond != 0 {
-		Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
-	}
-	if c.Consumer.MaxWaitTime < 100*time.Millisecond {
-		Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
-	}
-	if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
-		Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
-	}
-	if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
-		Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
-	}
-	if c.Consumer.Group.Session.Timeout%time.Millisecond != 0 {
-		Logger.Println("Consumer.Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.")
-	}
-	if c.Consumer.Group.Heartbeat.Interval%time.Millisecond != 0 {
-		Logger.Println("Consumer.Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.")
-	}
-	if c.Consumer.Group.Rebalance.Timeout%time.Millisecond != 0 {
-		Logger.Println("Consumer.Group.Rebalance.Timeout only supports millisecond precision; nanoseconds will be truncated.")
-	}
-	if c.ClientID == defaultClientID {
-		Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
-	}
-
-	// validate Net values
-	switch {
-	case c.Net.MaxOpenRequests <= 0:
-		return ConfigurationError("Net.MaxOpenRequests must be > 0")
-	case c.Net.DialTimeout <= 0:
-		return ConfigurationError("Net.DialTimeout must be > 0")
-	case c.Net.ReadTimeout <= 0:
-		return ConfigurationError("Net.ReadTimeout must be > 0")
-	case c.Net.WriteTimeout <= 0:
-		return ConfigurationError("Net.WriteTimeout must be > 0")
-	case c.Net.SASL.Enable:
-		if c.Net.SASL.Mechanism == "" {
-			c.Net.SASL.Mechanism = SASLTypePlaintext
-		}
-
-		switch c.Net.SASL.Mechanism {
-		case SASLTypePlaintext:
-			if c.Net.SASL.User == "" {
-				return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
-			}
-			if c.Net.SASL.Password == "" {
-				return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
-			}
-		case SASLTypeOAuth:
-			if c.Net.SASL.TokenProvider == nil {
-				return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider")
-			}
-		case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
-			if c.Net.SASL.User == "" {
-				return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
-			}
-			if c.Net.SASL.Password == "" {
-				return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
-			}
-			if c.Net.SASL.SCRAMClientGeneratorFunc == nil {
-				return ConfigurationError("A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc")
-			}
-		case SASLTypeGSSAPI:
-			if c.Net.SASL.GSSAPI.ServiceName == "" {
-				return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used")
-			}
-
-			if c.Net.SASL.GSSAPI.AuthType == KRB5_USER_AUTH {
-				if c.Net.SASL.GSSAPI.Password == "" {
-					return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " +
-						"mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH")
-				}
-			} else if c.Net.SASL.GSSAPI.AuthType == KRB5_KEYTAB_AUTH {
-				if c.Net.SASL.GSSAPI.KeyTabPath == "" {
-					return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" +
-						" and  Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH")
-				}
-			} else {
-				return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH and KRB5_KEYTAB_AUTH")
-			}
-			if c.Net.SASL.GSSAPI.KerberosConfigPath == "" {
-				return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used")
-			}
-			if c.Net.SASL.GSSAPI.Username == "" {
-				return ConfigurationError("Net.SASL.GSSAPI.Username must not be empty when GSS-API mechanism is used")
-			}
-			if c.Net.SASL.GSSAPI.Realm == "" {
-				return ConfigurationError("Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used")
-			}
-		default:
-			msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s`, `%s`, `%s`, `%s` and `%s`",
-				SASLTypeOAuth, SASLTypePlaintext, SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512, SASLTypeGSSAPI)
-			return ConfigurationError(msg)
-		}
-	}
-
-	// validate the Admin values
-	switch {
-	case c.Admin.Timeout <= 0:
-		return ConfigurationError("Admin.Timeout must be > 0")
-	}
-
-	// validate the Metadata values
-	switch {
-	case c.Metadata.Retry.Max < 0:
-		return ConfigurationError("Metadata.Retry.Max must be >= 0")
-	case c.Metadata.Retry.Backoff < 0:
-		return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
-	case c.Metadata.RefreshFrequency < 0:
-		return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
-	}
-
-	// validate the Producer values
-	switch {
-	case c.Producer.MaxMessageBytes <= 0:
-		return ConfigurationError("Producer.MaxMessageBytes must be > 0")
-	case c.Producer.RequiredAcks < -1:
-		return ConfigurationError("Producer.RequiredAcks must be >= -1")
-	case c.Producer.Timeout <= 0:
-		return ConfigurationError("Producer.Timeout must be > 0")
-	case c.Producer.Partitioner == nil:
-		return ConfigurationError("Producer.Partitioner must not be nil")
-	case c.Producer.Flush.Bytes < 0:
-		return ConfigurationError("Producer.Flush.Bytes must be >= 0")
-	case c.Producer.Flush.Messages < 0:
-		return ConfigurationError("Producer.Flush.Messages must be >= 0")
-	case c.Producer.Flush.Frequency < 0:
-		return ConfigurationError("Producer.Flush.Frequency must be >= 0")
-	case c.Producer.Flush.MaxMessages < 0:
-		return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
-	case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
-		return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
-	case c.Producer.Retry.Max < 0:
-		return ConfigurationError("Producer.Retry.Max must be >= 0")
-	case c.Producer.Retry.Backoff < 0:
-		return ConfigurationError("Producer.Retry.Backoff must be >= 0")
-	}
-
-	if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
-		return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
-	}
-
-	if c.Producer.Compression == CompressionGZIP {
-		if c.Producer.CompressionLevel != CompressionLevelDefault {
-			if _, err := gzip.NewWriterLevel(ioutil.Discard, c.Producer.CompressionLevel); err != nil {
-				return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err))
-			}
-		}
-	}
-
-	if c.Producer.Compression == CompressionZSTD && !c.Version.IsAtLeast(V2_1_0_0) {
-		return ConfigurationError("zstd compression requires Version >= V2_1_0_0")
-	}
-
-	if c.Producer.Idempotent {
-		if !c.Version.IsAtLeast(V0_11_0_0) {
-			return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0")
-		}
-		if c.Producer.Retry.Max == 0 {
-			return ConfigurationError("Idempotent producer requires Producer.Retry.Max >= 1")
-		}
-		if c.Producer.RequiredAcks != WaitForAll {
-			return ConfigurationError("Idempotent producer requires Producer.RequiredAcks to be WaitForAll")
-		}
-		if c.Net.MaxOpenRequests > 1 {
-			return ConfigurationError("Idempotent producer requires Net.MaxOpenRequests to be 1")
-		}
-	}
-
-	// validate the Consumer values
-	switch {
-	case c.Consumer.Fetch.Min <= 0:
-		return ConfigurationError("Consumer.Fetch.Min must be > 0")
-	case c.Consumer.Fetch.Default <= 0:
-		return ConfigurationError("Consumer.Fetch.Default must be > 0")
-	case c.Consumer.Fetch.Max < 0:
-		return ConfigurationError("Consumer.Fetch.Max must be >= 0")
-	case c.Consumer.MaxWaitTime < 1*time.Millisecond:
-		return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
-	case c.Consumer.MaxProcessingTime <= 0:
-		return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
-	case c.Consumer.Retry.Backoff < 0:
-		return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
-	case c.Consumer.Offsets.AutoCommit.Interval <= 0:
-		return ConfigurationError("Consumer.Offsets.AutoCommit.Interval must be > 0")
-	case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
-		return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
-	case c.Consumer.Offsets.Retry.Max < 0:
-		return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0")
-	case c.Consumer.IsolationLevel != ReadUncommitted && c.Consumer.IsolationLevel != ReadCommitted:
-		return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted")
-	}
-
-	if c.Consumer.Offsets.CommitInterval != 0 {
-		Logger.Println("Deprecation warning: Consumer.Offsets.CommitInterval exists for historical compatibility" +
-			" and should not be used. Please use Consumer.Offsets.AutoCommit, the current value will be ignored")
-	}
-
-	// validate IsolationLevel
-	if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) {
-		return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0")
-	}
-
-	// validate the Consumer Group values
-	switch {
-	case c.Consumer.Group.Session.Timeout <= 2*time.Millisecond:
-		return ConfigurationError("Consumer.Group.Session.Timeout must be >= 2ms")
-	case c.Consumer.Group.Heartbeat.Interval < 1*time.Millisecond:
-		return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms")
-	case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout:
-		return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout")
-	case c.Consumer.Group.Rebalance.Strategy == nil:
-		return ConfigurationError("Consumer.Group.Rebalance.Strategy must not be empty")
-	case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond:
-		return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms")
-	case c.Consumer.Group.Rebalance.Retry.Max < 0:
-		return ConfigurationError("Consumer.Group.Rebalance.Retry.Max must be >= 0")
-	case c.Consumer.Group.Rebalance.Retry.Backoff < 0:
-		return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0")
-	}
-
-	// validate misc shared values
-	switch {
-	case c.ChannelBufferSize < 0:
-		return ConfigurationError("ChannelBufferSize must be >= 0")
-	case !validID.MatchString(c.ClientID):
-		return ConfigurationError("ClientID is invalid")
-	}
-
-	return nil
-}
-
-func (c *Config) getDialer() proxy.Dialer {
-	if c.Net.Proxy.Enable {
-		Logger.Printf("using proxy %s", c.Net.Proxy.Dialer)
-		return c.Net.Proxy.Dialer
-	} else {
-		return &net.Dialer{
-			Timeout:   c.Net.DialTimeout,
-			KeepAlive: c.Net.KeepAlive,
-			LocalAddr: c.Net.LocalAddr,
-		}
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go
deleted file mode 100644
index f9cd172..0000000
--- a/vendor/github.com/Shopify/sarama/consumer.go
+++ /dev/null
@@ -1,949 +0,0 @@
-package sarama
-
-import (
-	"errors"
-	"fmt"
-	"math"
-	"sync"
-	"sync/atomic"
-	"time"
-
-	"github.com/rcrowley/go-metrics"
-)
-
-// ConsumerMessage encapsulates a Kafka message returned by the consumer.
-type ConsumerMessage struct {
-	Headers        []*RecordHeader // only set if kafka is version 0.11+
-	Timestamp      time.Time       // only set if kafka is version 0.10+, inner message timestamp
-	BlockTimestamp time.Time       // only set if kafka is version 0.10+, outer (compressed) block timestamp
-
-	Key, Value []byte
-	Topic      string
-	Partition  int32
-	Offset     int64
-}
-
-// ConsumerError is what is provided to the user when an error occurs.
-// It wraps an error and includes the topic and partition.
-type ConsumerError struct {
-	Topic     string
-	Partition int32
-	Err       error
-}
-
-func (ce ConsumerError) Error() string {
-	return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
-}
-
-func (ce ConsumerError) Unwrap() error {
-	return ce.Err
-}
-
-// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
-// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
-// when stopping.
-type ConsumerErrors []*ConsumerError
-
-func (ce ConsumerErrors) Error() string {
-	return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
-}
-
-// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
-// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
-// scope.
-type Consumer interface {
-	// Topics returns the set of available topics as retrieved from the cluster
-	// metadata. This method is the same as Client.Topics(), and is provided for
-	// convenience.
-	Topics() ([]string, error)
-
-	// Partitions returns the sorted list of all partition IDs for the given topic.
-	// This method is the same as Client.Partitions(), and is provided for convenience.
-	Partitions(topic string) ([]int32, error)
-
-	// ConsumePartition creates a PartitionConsumer on the given topic/partition with
-	// the given offset. It will return an error if this Consumer is already consuming
-	// on the given topic/partition. Offset can be a literal offset, or OffsetNewest
-	// or OffsetOldest
-	ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
-
-	// HighWaterMarks returns the current high water marks for each topic and partition.
-	// Consistency between partitions is not guaranteed since high water marks are updated separately.
-	HighWaterMarks() map[string]map[int32]int64
-
-	// Close shuts down the consumer. It must be called after all child
-	// PartitionConsumers have already been closed.
-	Close() error
-}
-
-type consumer struct {
-	conf            *Config
-	children        map[string]map[int32]*partitionConsumer
-	brokerConsumers map[*Broker]*brokerConsumer
-	client          Client
-	lock            sync.Mutex
-}
-
-// NewConsumer creates a new consumer using the given broker addresses and configuration.
-func NewConsumer(addrs []string, config *Config) (Consumer, error) {
-	client, err := NewClient(addrs, config)
-	if err != nil {
-		return nil, err
-	}
-	return newConsumer(client)
-}
-
-// NewConsumerFromClient creates a new consumer using the given client. It is still
-// necessary to call Close() on the underlying client when shutting down this consumer.
-func NewConsumerFromClient(client Client) (Consumer, error) {
-	// For clients passed in by the client, ensure we don't
-	// call Close() on it.
-	cli := &nopCloserClient{client}
-	return newConsumer(cli)
-}
-
-func newConsumer(client Client) (Consumer, error) {
-	// Check that we are not dealing with a closed Client before processing any other arguments
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	c := &consumer{
-		client:          client,
-		conf:            client.Config(),
-		children:        make(map[string]map[int32]*partitionConsumer),
-		brokerConsumers: make(map[*Broker]*brokerConsumer),
-	}
-
-	return c, nil
-}
-
-func (c *consumer) Close() error {
-	return c.client.Close()
-}
-
-func (c *consumer) Topics() ([]string, error) {
-	return c.client.Topics()
-}
-
-func (c *consumer) Partitions(topic string) ([]int32, error) {
-	return c.client.Partitions(topic)
-}
-
-func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
-	child := &partitionConsumer{
-		consumer:  c,
-		conf:      c.conf,
-		topic:     topic,
-		partition: partition,
-		messages:  make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
-		errors:    make(chan *ConsumerError, c.conf.ChannelBufferSize),
-		feeder:    make(chan *FetchResponse, 1),
-		trigger:   make(chan none, 1),
-		dying:     make(chan none),
-		fetchSize: c.conf.Consumer.Fetch.Default,
-	}
-
-	if err := child.chooseStartingOffset(offset); err != nil {
-		return nil, err
-	}
-
-	var leader *Broker
-	var err error
-	if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
-		return nil, err
-	}
-
-	if err := c.addChild(child); err != nil {
-		return nil, err
-	}
-
-	go withRecover(child.dispatcher)
-	go withRecover(child.responseFeeder)
-
-	child.broker = c.refBrokerConsumer(leader)
-	child.broker.input <- child
-
-	return child, nil
-}
-
-func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
-	c.lock.Lock()
-	defer c.lock.Unlock()
-
-	hwms := make(map[string]map[int32]int64)
-	for topic, p := range c.children {
-		hwm := make(map[int32]int64, len(p))
-		for partition, pc := range p {
-			hwm[partition] = pc.HighWaterMarkOffset()
-		}
-		hwms[topic] = hwm
-	}
-
-	return hwms
-}
-
-func (c *consumer) addChild(child *partitionConsumer) error {
-	c.lock.Lock()
-	defer c.lock.Unlock()
-
-	topicChildren := c.children[child.topic]
-	if topicChildren == nil {
-		topicChildren = make(map[int32]*partitionConsumer)
-		c.children[child.topic] = topicChildren
-	}
-
-	if topicChildren[child.partition] != nil {
-		return ConfigurationError("That topic/partition is already being consumed")
-	}
-
-	topicChildren[child.partition] = child
-	return nil
-}
-
-func (c *consumer) removeChild(child *partitionConsumer) {
-	c.lock.Lock()
-	defer c.lock.Unlock()
-
-	delete(c.children[child.topic], child.partition)
-}
-
-func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
-	c.lock.Lock()
-	defer c.lock.Unlock()
-
-	bc := c.brokerConsumers[broker]
-	if bc == nil {
-		bc = c.newBrokerConsumer(broker)
-		c.brokerConsumers[broker] = bc
-	}
-
-	bc.refs++
-
-	return bc
-}
-
-func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
-	c.lock.Lock()
-	defer c.lock.Unlock()
-
-	brokerWorker.refs--
-
-	if brokerWorker.refs == 0 {
-		close(brokerWorker.input)
-		if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
-			delete(c.brokerConsumers, brokerWorker.broker)
-		}
-	}
-}
-
-func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
-	c.lock.Lock()
-	defer c.lock.Unlock()
-
-	delete(c.brokerConsumers, brokerWorker.broker)
-}
-
-// PartitionConsumer
-
-// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
-// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
-// of scope.
-//
-// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
-// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
-// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
-// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
-// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
-// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
-// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
-//
-// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
-// consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process
-// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
-// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
-// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
-type PartitionConsumer interface {
-	// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
-	// should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
-	// function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
-	// this before calling Close on the underlying client.
-	AsyncClose()
-
-	// Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
-	// the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
-	// the Messages channel when this function is called, you will be competing with Close for messages; consider
-	// calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
-	// out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
-	Close() error
-
-	// Messages returns the read channel for the messages that are returned by
-	// the broker.
-	Messages() <-chan *ConsumerMessage
-
-	// Errors returns a read channel of errors that occurred during consuming, if
-	// enabled. By default, errors are logged and not returned over this channel.
-	// If you want to implement any custom error handling, set your config's
-	// Consumer.Return.Errors setting to true, and read from this channel.
-	Errors() <-chan *ConsumerError
-
-	// HighWaterMarkOffset returns the high water mark offset of the partition,
-	// i.e. the offset that will be used for the next message that will be produced.
-	// You can use this to determine how far behind the processing is.
-	HighWaterMarkOffset() int64
-}
-
-type partitionConsumer struct {
-	highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
-
-	consumer *consumer
-	conf     *Config
-	broker   *brokerConsumer
-	messages chan *ConsumerMessage
-	errors   chan *ConsumerError
-	feeder   chan *FetchResponse
-
-	preferredReadReplica int32
-
-	trigger, dying chan none
-	closeOnce      sync.Once
-	topic          string
-	partition      int32
-	responseResult error
-	fetchSize      int32
-	offset         int64
-	retries        int32
-}
-
-var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
-
-func (child *partitionConsumer) sendError(err error) {
-	cErr := &ConsumerError{
-		Topic:     child.topic,
-		Partition: child.partition,
-		Err:       err,
-	}
-
-	if child.conf.Consumer.Return.Errors {
-		child.errors <- cErr
-	} else {
-		Logger.Println(cErr)
-	}
-}
-
-func (child *partitionConsumer) computeBackoff() time.Duration {
-	if child.conf.Consumer.Retry.BackoffFunc != nil {
-		retries := atomic.AddInt32(&child.retries, 1)
-		return child.conf.Consumer.Retry.BackoffFunc(int(retries))
-	}
-	return child.conf.Consumer.Retry.Backoff
-}
-
-func (child *partitionConsumer) dispatcher() {
-	for range child.trigger {
-		select {
-		case <-child.dying:
-			close(child.trigger)
-		case <-time.After(child.computeBackoff()):
-			if child.broker != nil {
-				child.consumer.unrefBrokerConsumer(child.broker)
-				child.broker = nil
-			}
-
-			Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
-			if err := child.dispatch(); err != nil {
-				child.sendError(err)
-				child.trigger <- none{}
-			}
-		}
-	}
-
-	if child.broker != nil {
-		child.consumer.unrefBrokerConsumer(child.broker)
-	}
-	child.consumer.removeChild(child)
-	close(child.feeder)
-}
-
-func (child *partitionConsumer) preferredBroker() (*Broker, error) {
-	if child.preferredReadReplica >= 0 {
-		broker, err := child.consumer.client.Broker(child.preferredReadReplica)
-		if err == nil {
-			return broker, nil
-		}
-	}
-
-	// if prefered replica cannot be found fallback to leader
-	return child.consumer.client.Leader(child.topic, child.partition)
-}
-
-func (child *partitionConsumer) dispatch() error {
-	if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
-		return err
-	}
-
-	broker, err := child.preferredBroker()
-	if err != nil {
-		return err
-	}
-
-	child.broker = child.consumer.refBrokerConsumer(broker)
-
-	child.broker.input <- child
-
-	return nil
-}
-
-func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
-	newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
-	if err != nil {
-		return err
-	}
-	oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
-	if err != nil {
-		return err
-	}
-
-	switch {
-	case offset == OffsetNewest:
-		child.offset = newestOffset
-	case offset == OffsetOldest:
-		child.offset = oldestOffset
-	case offset >= oldestOffset && offset <= newestOffset:
-		child.offset = offset
-	default:
-		return ErrOffsetOutOfRange
-	}
-
-	return nil
-}
-
-func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
-	return child.messages
-}
-
-func (child *partitionConsumer) Errors() <-chan *ConsumerError {
-	return child.errors
-}
-
-func (child *partitionConsumer) AsyncClose() {
-	// this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
-	// the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
-	// 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
-	// also just close itself)
-	child.closeOnce.Do(func() {
-		close(child.dying)
-	})
-}
-
-func (child *partitionConsumer) Close() error {
-	child.AsyncClose()
-
-	var consumerErrors ConsumerErrors
-	for err := range child.errors {
-		consumerErrors = append(consumerErrors, err)
-	}
-
-	if len(consumerErrors) > 0 {
-		return consumerErrors
-	}
-	return nil
-}
-
-func (child *partitionConsumer) HighWaterMarkOffset() int64 {
-	return atomic.LoadInt64(&child.highWaterMarkOffset)
-}
-
-func (child *partitionConsumer) responseFeeder() {
-	var msgs []*ConsumerMessage
-	expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
-	firstAttempt := true
-
-feederLoop:
-	for response := range child.feeder {
-		msgs, child.responseResult = child.parseResponse(response)
-
-		if child.responseResult == nil {
-			atomic.StoreInt32(&child.retries, 0)
-		}
-
-		for i, msg := range msgs {
-			child.interceptors(msg)
-		messageSelect:
-			select {
-			case <-child.dying:
-				child.broker.acks.Done()
-				continue feederLoop
-			case child.messages <- msg:
-				firstAttempt = true
-			case <-expiryTicker.C:
-				if !firstAttempt {
-					child.responseResult = errTimedOut
-					child.broker.acks.Done()
-				remainingLoop:
-					for _, msg = range msgs[i:] {
-						child.interceptors(msg)
-						select {
-						case child.messages <- msg:
-						case <-child.dying:
-							break remainingLoop
-						}
-					}
-					child.broker.input <- child
-					continue feederLoop
-				} else {
-					// current message has not been sent, return to select
-					// statement
-					firstAttempt = false
-					goto messageSelect
-				}
-			}
-		}
-
-		child.broker.acks.Done()
-	}
-
-	expiryTicker.Stop()
-	close(child.messages)
-	close(child.errors)
-}
-
-func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
-	var messages []*ConsumerMessage
-	for _, msgBlock := range msgSet.Messages {
-		for _, msg := range msgBlock.Messages() {
-			offset := msg.Offset
-			timestamp := msg.Msg.Timestamp
-			if msg.Msg.Version >= 1 {
-				baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
-				offset += baseOffset
-				if msg.Msg.LogAppendTime {
-					timestamp = msgBlock.Msg.Timestamp
-				}
-			}
-			if offset < child.offset {
-				continue
-			}
-			messages = append(messages, &ConsumerMessage{
-				Topic:          child.topic,
-				Partition:      child.partition,
-				Key:            msg.Msg.Key,
-				Value:          msg.Msg.Value,
-				Offset:         offset,
-				Timestamp:      timestamp,
-				BlockTimestamp: msgBlock.Msg.Timestamp,
-			})
-			child.offset = offset + 1
-		}
-	}
-	if len(messages) == 0 {
-		child.offset++
-	}
-	return messages, nil
-}
-
-func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
-	messages := make([]*ConsumerMessage, 0, len(batch.Records))
-
-	for _, rec := range batch.Records {
-		offset := batch.FirstOffset + rec.OffsetDelta
-		if offset < child.offset {
-			continue
-		}
-		timestamp := batch.FirstTimestamp.Add(rec.TimestampDelta)
-		if batch.LogAppendTime {
-			timestamp = batch.MaxTimestamp
-		}
-		messages = append(messages, &ConsumerMessage{
-			Topic:     child.topic,
-			Partition: child.partition,
-			Key:       rec.Key,
-			Value:     rec.Value,
-			Offset:    offset,
-			Timestamp: timestamp,
-			Headers:   rec.Headers,
-		})
-		child.offset = offset + 1
-	}
-	if len(messages) == 0 {
-		child.offset++
-	}
-	return messages, nil
-}
-
-func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
-	var (
-		metricRegistry          = child.conf.MetricRegistry
-		consumerBatchSizeMetric metrics.Histogram
-	)
-
-	if metricRegistry != nil {
-		consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", metricRegistry)
-	}
-
-	// If request was throttled and empty we log and return without error
-	if response.ThrottleTime != time.Duration(0) && len(response.Blocks) == 0 {
-		Logger.Printf(
-			"consumer/broker/%d FetchResponse throttled %v\n",
-			child.broker.broker.ID(), response.ThrottleTime)
-		return nil, nil
-	}
-
-	block := response.GetBlock(child.topic, child.partition)
-	if block == nil {
-		return nil, ErrIncompleteResponse
-	}
-
-	if block.Err != ErrNoError {
-		return nil, block.Err
-	}
-
-	nRecs, err := block.numRecords()
-	if err != nil {
-		return nil, err
-	}
-
-	consumerBatchSizeMetric.Update(int64(nRecs))
-
-	child.preferredReadReplica = block.PreferredReadReplica
-
-	if nRecs == 0 {
-		partialTrailingMessage, err := block.isPartial()
-		if err != nil {
-			return nil, err
-		}
-		// We got no messages. If we got a trailing one then we need to ask for more data.
-		// Otherwise we just poll again and wait for one to be produced...
-		if partialTrailingMessage {
-			if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
-				// we can't ask for more data, we've hit the configured limit
-				child.sendError(ErrMessageTooLarge)
-				child.offset++ // skip this one so we can keep processing future messages
-			} else {
-				child.fetchSize *= 2
-				// check int32 overflow
-				if child.fetchSize < 0 {
-					child.fetchSize = math.MaxInt32
-				}
-				if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
-					child.fetchSize = child.conf.Consumer.Fetch.Max
-				}
-			}
-		}
-
-		return nil, nil
-	}
-
-	// we got messages, reset our fetch size in case it was increased for a previous request
-	child.fetchSize = child.conf.Consumer.Fetch.Default
-	atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
-
-	// abortedProducerIDs contains producerID which message should be ignored as uncommitted
-	// - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset)
-	// - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over
-	abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions))
-	abortedTransactions := block.getAbortedTransactions()
-
-	var messages []*ConsumerMessage
-	for _, records := range block.RecordsSet {
-		switch records.recordsType {
-		case legacyRecords:
-			messageSetMessages, err := child.parseMessages(records.MsgSet)
-			if err != nil {
-				return nil, err
-			}
-
-			messages = append(messages, messageSetMessages...)
-		case defaultRecords:
-			// Consume remaining abortedTransaction up to last offset of current batch
-			for _, txn := range abortedTransactions {
-				if txn.FirstOffset > records.RecordBatch.LastOffset() {
-					break
-				}
-				abortedProducerIDs[txn.ProducerID] = struct{}{}
-				// Pop abortedTransactions so that we never add it again
-				abortedTransactions = abortedTransactions[1:]
-			}
-
-			recordBatchMessages, err := child.parseRecords(records.RecordBatch)
-			if err != nil {
-				return nil, err
-			}
-
-			// Parse and commit offset but do not expose messages that are:
-			// - control records
-			// - part of an aborted transaction when set to `ReadCommitted`
-
-			// control record
-			isControl, err := records.isControl()
-			if err != nil {
-				// I don't know why there is this continue in case of error to begin with
-				// Safe bet is to ignore control messages if ReadUncommitted
-				// and block on them in case of error and ReadCommitted
-				if child.conf.Consumer.IsolationLevel == ReadCommitted {
-					return nil, err
-				}
-				continue
-			}
-			if isControl {
-				controlRecord, err := records.getControlRecord()
-				if err != nil {
-					return nil, err
-				}
-
-				if controlRecord.Type == ControlRecordAbort {
-					delete(abortedProducerIDs, records.RecordBatch.ProducerID)
-				}
-				continue
-			}
-
-			// filter aborted transactions
-			if child.conf.Consumer.IsolationLevel == ReadCommitted {
-				_, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID]
-				if records.RecordBatch.IsTransactional && isAborted {
-					continue
-				}
-			}
-
-			messages = append(messages, recordBatchMessages...)
-		default:
-			return nil, fmt.Errorf("unknown records type: %v", records.recordsType)
-		}
-	}
-
-	return messages, nil
-}
-
-func (child *partitionConsumer) interceptors(msg *ConsumerMessage) {
-	for _, interceptor := range child.conf.Consumer.Interceptors {
-		msg.safelyApplyInterceptor(interceptor)
-	}
-}
-
-type brokerConsumer struct {
-	consumer         *consumer
-	broker           *Broker
-	input            chan *partitionConsumer
-	newSubscriptions chan []*partitionConsumer
-	subscriptions    map[*partitionConsumer]none
-	wait             chan none
-	acks             sync.WaitGroup
-	refs             int
-}
-
-func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
-	bc := &brokerConsumer{
-		consumer:         c,
-		broker:           broker,
-		input:            make(chan *partitionConsumer),
-		newSubscriptions: make(chan []*partitionConsumer),
-		wait:             make(chan none),
-		subscriptions:    make(map[*partitionConsumer]none),
-		refs:             0,
-	}
-
-	go withRecover(bc.subscriptionManager)
-	go withRecover(bc.subscriptionConsumer)
-
-	return bc
-}
-
-// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
-// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
-// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
-// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
-// so the main goroutine can block waiting for work if it has none.
-func (bc *brokerConsumer) subscriptionManager() {
-	var buffer []*partitionConsumer
-
-	for {
-		if len(buffer) > 0 {
-			select {
-			case event, ok := <-bc.input:
-				if !ok {
-					goto done
-				}
-				buffer = append(buffer, event)
-			case bc.newSubscriptions <- buffer:
-				buffer = nil
-			case bc.wait <- none{}:
-			}
-		} else {
-			select {
-			case event, ok := <-bc.input:
-				if !ok {
-					goto done
-				}
-				buffer = append(buffer, event)
-			case bc.newSubscriptions <- nil:
-			}
-		}
-	}
-
-done:
-	close(bc.wait)
-	if len(buffer) > 0 {
-		bc.newSubscriptions <- buffer
-	}
-	close(bc.newSubscriptions)
-}
-
-// subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
-func (bc *brokerConsumer) subscriptionConsumer() {
-	<-bc.wait // wait for our first piece of work
-
-	for newSubscriptions := range bc.newSubscriptions {
-		bc.updateSubscriptions(newSubscriptions)
-
-		if len(bc.subscriptions) == 0 {
-			// We're about to be shut down or we're about to receive more subscriptions.
-			// Either way, the signal just hasn't propagated to our goroutine yet.
-			<-bc.wait
-			continue
-		}
-
-		response, err := bc.fetchNewMessages()
-		if err != nil {
-			Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
-			bc.abort(err)
-			return
-		}
-
-		bc.acks.Add(len(bc.subscriptions))
-		for child := range bc.subscriptions {
-			child.feeder <- response
-		}
-		bc.acks.Wait()
-		bc.handleResponses()
-	}
-}
-
-func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
-	for _, child := range newSubscriptions {
-		bc.subscriptions[child] = none{}
-		Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
-	}
-
-	for child := range bc.subscriptions {
-		select {
-		case <-child.dying:
-			Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
-			close(child.trigger)
-			delete(bc.subscriptions, child)
-		default:
-			// no-op
-		}
-	}
-}
-
-// handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed
-func (bc *brokerConsumer) handleResponses() {
-	for child := range bc.subscriptions {
-		result := child.responseResult
-		child.responseResult = nil
-
-		if result == nil {
-			if preferredBroker, err := child.preferredBroker(); err == nil {
-				if bc.broker.ID() != preferredBroker.ID() {
-					// not an error but needs redispatching to consume from prefered replica
-					child.trigger <- none{}
-					delete(bc.subscriptions, child)
-				}
-			}
-			continue
-		}
-
-		// Discard any replica preference.
-		child.preferredReadReplica = -1
-
-		switch result {
-		case errTimedOut:
-			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
-				bc.broker.ID(), child.topic, child.partition)
-			delete(bc.subscriptions, child)
-		case ErrOffsetOutOfRange:
-			// there's no point in retrying this it will just fail the same way again
-			// shut it down and force the user to choose what to do
-			child.sendError(result)
-			Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
-			close(child.trigger)
-			delete(bc.subscriptions, child)
-		case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable:
-			// not an error, but does need redispatching
-			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
-				bc.broker.ID(), child.topic, child.partition, result)
-			child.trigger <- none{}
-			delete(bc.subscriptions, child)
-		default:
-			// dunno, tell the user and try redispatching
-			child.sendError(result)
-			Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
-				bc.broker.ID(), child.topic, child.partition, result)
-			child.trigger <- none{}
-			delete(bc.subscriptions, child)
-		}
-	}
-}
-
-func (bc *brokerConsumer) abort(err error) {
-	bc.consumer.abandonBrokerConsumer(bc)
-	_ = bc.broker.Close() // we don't care about the error this might return, we already have one
-
-	for child := range bc.subscriptions {
-		child.sendError(err)
-		child.trigger <- none{}
-	}
-
-	for newSubscriptions := range bc.newSubscriptions {
-		if len(newSubscriptions) == 0 {
-			<-bc.wait
-			continue
-		}
-		for _, child := range newSubscriptions {
-			child.sendError(err)
-			child.trigger <- none{}
-		}
-	}
-}
-
-func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
-	request := &FetchRequest{
-		MinBytes:    bc.consumer.conf.Consumer.Fetch.Min,
-		MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
-	}
-	if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) {
-		request.Version = 1
-	}
-	if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
-		request.Version = 2
-	}
-	if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
-		request.Version = 3
-		request.MaxBytes = MaxResponseSize
-	}
-	if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
-		request.Version = 4
-		request.Isolation = bc.consumer.conf.Consumer.IsolationLevel
-	}
-	if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) {
-		request.Version = 7
-		// We do not currently implement KIP-227 FetchSessions. Setting the id to 0
-		// and the epoch to -1 tells the broker not to generate as session ID we're going
-		// to just ignore anyway.
-		request.SessionID = 0
-		request.SessionEpoch = -1
-	}
-	if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) {
-		request.Version = 10
-	}
-	if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) {
-		request.Version = 11
-		request.RackID = bc.consumer.conf.RackID
-	}
-
-	for child := range bc.subscriptions {
-		request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
-	}
-
-	return bc.broker.Fetch(request)
-}
diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/Shopify/sarama/consumer_group.go
deleted file mode 100644
index 2bf236a..0000000
--- a/vendor/github.com/Shopify/sarama/consumer_group.go
+++ /dev/null
@@ -1,879 +0,0 @@
-package sarama
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"sort"
-	"sync"
-	"time"
-)
-
-// ErrClosedConsumerGroup is the error returned when a method is called on a consumer group that has been closed.
-var ErrClosedConsumerGroup = errors.New("kafka: tried to use a consumer group that was closed")
-
-// ConsumerGroup is responsible for dividing up processing of topics and partitions
-// over a collection of processes (the members of the consumer group).
-type ConsumerGroup interface {
-	// Consume joins a cluster of consumers for a given list of topics and
-	// starts a blocking ConsumerGroupSession through the ConsumerGroupHandler.
-	//
-	// The life-cycle of a session is represented by the following steps:
-	//
-	// 1. The consumers join the group (as explained in https://kafka.apache.org/documentation/#intro_consumers)
-	//    and is assigned their "fair share" of partitions, aka 'claims'.
-	// 2. Before processing starts, the handler's Setup() hook is called to notify the user
-	//    of the claims and allow any necessary preparation or alteration of state.
-	// 3. For each of the assigned claims the handler's ConsumeClaim() function is then called
-	//    in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected
-	//    from concurrent reads/writes.
-	// 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the
-	//    parent context is cancelled or when a server-side rebalance cycle is initiated.
-	// 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called
-	//    to allow the user to perform any final tasks before a rebalance.
-	// 6. Finally, marked offsets are committed one last time before claims are released.
-	//
-	// Please note, that once a rebalance is triggered, sessions must be completed within
-	// Config.Consumer.Group.Rebalance.Timeout. This means that ConsumeClaim() functions must exit
-	// as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout
-	// is exceeded, the consumer will be removed from the group by Kafka, which will cause offset
-	// commit failures.
-	// This method should be called inside an infinite loop, when a
-	// server-side rebalance happens, the consumer session will need to be
-	// recreated to get the new claims.
-	Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error
-
-	// Errors returns a read channel of errors that occurred during the consumer life-cycle.
-	// By default, errors are logged and not returned over this channel.
-	// If you want to implement any custom error handling, set your config's
-	// Consumer.Return.Errors setting to true, and read from this channel.
-	Errors() <-chan error
-
-	// Close stops the ConsumerGroup and detaches any running sessions. It is required to call
-	// this function before the object passes out of scope, as it will otherwise leak memory.
-	Close() error
-}
-
-type consumerGroup struct {
-	client Client
-
-	config   *Config
-	consumer Consumer
-	groupID  string
-	memberID string
-	errors   chan error
-
-	lock      sync.Mutex
-	closed    chan none
-	closeOnce sync.Once
-
-	userData []byte
-}
-
-// NewConsumerGroup creates a new consumer group the given broker addresses and configuration.
-func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerGroup, error) {
-	client, err := NewClient(addrs, config)
-	if err != nil {
-		return nil, err
-	}
-
-	c, err := newConsumerGroup(groupID, client)
-	if err != nil {
-		_ = client.Close()
-	}
-	return c, err
-}
-
-// NewConsumerGroupFromClient creates a new consumer group using the given client. It is still
-// necessary to call Close() on the underlying client when shutting down this consumer.
-// PLEASE NOTE: consumer groups can only re-use but not share clients.
-func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) {
-	// For clients passed in by the client, ensure we don't
-	// call Close() on it.
-	cli := &nopCloserClient{client}
-	return newConsumerGroup(groupID, cli)
-}
-
-func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) {
-	config := client.Config()
-	if !config.Version.IsAtLeast(V0_10_2_0) {
-		return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0")
-	}
-
-	consumer, err := NewConsumerFromClient(client)
-	if err != nil {
-		return nil, err
-	}
-
-	return &consumerGroup{
-		client:   client,
-		consumer: consumer,
-		config:   config,
-		groupID:  groupID,
-		errors:   make(chan error, config.ChannelBufferSize),
-		closed:   make(chan none),
-	}, nil
-}
-
-// Errors implements ConsumerGroup.
-func (c *consumerGroup) Errors() <-chan error { return c.errors }
-
-// Close implements ConsumerGroup.
-func (c *consumerGroup) Close() (err error) {
-	c.closeOnce.Do(func() {
-		close(c.closed)
-
-		// leave group
-		if e := c.leave(); e != nil {
-			err = e
-		}
-
-		// drain errors
-		go func() {
-			close(c.errors)
-		}()
-		for e := range c.errors {
-			err = e
-		}
-
-		if e := c.client.Close(); e != nil {
-			err = e
-		}
-	})
-	return
-}
-
-// Consume implements ConsumerGroup.
-func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error {
-	// Ensure group is not closed
-	select {
-	case <-c.closed:
-		return ErrClosedConsumerGroup
-	default:
-	}
-
-	c.lock.Lock()
-	defer c.lock.Unlock()
-
-	// Quick exit when no topics are provided
-	if len(topics) == 0 {
-		return fmt.Errorf("no topics provided")
-	}
-
-	// Refresh metadata for requested topics
-	if err := c.client.RefreshMetadata(topics...); err != nil {
-		return err
-	}
-
-	// Init session
-	sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max)
-	if err == ErrClosedClient {
-		return ErrClosedConsumerGroup
-	} else if err != nil {
-		return err
-	}
-
-	// loop check topic partition numbers changed
-	// will trigger rebalance when any topic partitions number had changed
-	// avoid Consume function called again that will generate more than loopCheckPartitionNumbers coroutine
-	go c.loopCheckPartitionNumbers(topics, sess)
-
-	// Wait for session exit signal
-	<-sess.ctx.Done()
-
-	// Gracefully release session claims
-	return sess.release(true)
-}
-
-func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) {
-	select {
-	case <-c.closed:
-		return nil, ErrClosedConsumerGroup
-	case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
-	}
-
-	if refreshCoordinator {
-		err := c.client.RefreshCoordinator(c.groupID)
-		if err != nil {
-			return c.retryNewSession(ctx, topics, handler, retries, true)
-		}
-	}
-
-	return c.newSession(ctx, topics, handler, retries-1)
-}
-
-func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) {
-	coordinator, err := c.client.Coordinator(c.groupID)
-	if err != nil {
-		if retries <= 0 {
-			return nil, err
-		}
-
-		return c.retryNewSession(ctx, topics, handler, retries, true)
-	}
-
-	// Join consumer group
-	join, err := c.joinGroupRequest(coordinator, topics)
-	if err != nil {
-		_ = coordinator.Close()
-		return nil, err
-	}
-	switch join.Err {
-	case ErrNoError:
-		c.memberID = join.MemberId
-	case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
-		c.memberID = ""
-		return c.newSession(ctx, topics, handler, retries)
-	case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
-		if retries <= 0 {
-			return nil, join.Err
-		}
-
-		return c.retryNewSession(ctx, topics, handler, retries, true)
-	case ErrRebalanceInProgress: // retry after backoff
-		if retries <= 0 {
-			return nil, join.Err
-		}
-
-		return c.retryNewSession(ctx, topics, handler, retries, false)
-	default:
-		return nil, join.Err
-	}
-
-	// Prepare distribution plan if we joined as the leader
-	var plan BalanceStrategyPlan
-	if join.LeaderId == join.MemberId {
-		members, err := join.GetMembers()
-		if err != nil {
-			return nil, err
-		}
-
-		plan, err = c.balance(members)
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	// Sync consumer group
-	groupRequest, err := c.syncGroupRequest(coordinator, plan, join.GenerationId)
-	if err != nil {
-		_ = coordinator.Close()
-		return nil, err
-	}
-	switch groupRequest.Err {
-	case ErrNoError:
-	case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
-		c.memberID = ""
-		return c.newSession(ctx, topics, handler, retries)
-	case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
-		if retries <= 0 {
-			return nil, groupRequest.Err
-		}
-
-		return c.retryNewSession(ctx, topics, handler, retries, true)
-	case ErrRebalanceInProgress: // retry after backoff
-		if retries <= 0 {
-			return nil, groupRequest.Err
-		}
-
-		return c.retryNewSession(ctx, topics, handler, retries, false)
-	default:
-		return nil, groupRequest.Err
-	}
-
-	// Retrieve and sort claims
-	var claims map[string][]int32
-	if len(groupRequest.MemberAssignment) > 0 {
-		members, err := groupRequest.GetMemberAssignment()
-		if err != nil {
-			return nil, err
-		}
-		claims = members.Topics
-		c.userData = members.UserData
-
-		for _, partitions := range claims {
-			sort.Sort(int32Slice(partitions))
-		}
-	}
-
-	return newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler)
-}
-
-func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) {
-	req := &JoinGroupRequest{
-		GroupId:        c.groupID,
-		MemberId:       c.memberID,
-		SessionTimeout: int32(c.config.Consumer.Group.Session.Timeout / time.Millisecond),
-		ProtocolType:   "consumer",
-	}
-	if c.config.Version.IsAtLeast(V0_10_1_0) {
-		req.Version = 1
-		req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond)
-	}
-
-	// use static user-data if configured, otherwise use consumer-group userdata from the last sync
-	userData := c.config.Consumer.Group.Member.UserData
-	if len(userData) == 0 {
-		userData = c.userData
-	}
-	meta := &ConsumerGroupMemberMetadata{
-		Topics:   topics,
-		UserData: userData,
-	}
-	strategy := c.config.Consumer.Group.Rebalance.Strategy
-	if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil {
-		return nil, err
-	}
-
-	return coordinator.JoinGroup(req)
-}
-
-func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrategyPlan, generationID int32) (*SyncGroupResponse, error) {
-	req := &SyncGroupRequest{
-		GroupId:      c.groupID,
-		MemberId:     c.memberID,
-		GenerationId: generationID,
-	}
-	strategy := c.config.Consumer.Group.Rebalance.Strategy
-	for memberID, topics := range plan {
-		assignment := &ConsumerGroupMemberAssignment{Topics: topics}
-		userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID)
-		if err != nil {
-			return nil, err
-		}
-		assignment.UserData = userDataBytes
-		if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil {
-			return nil, err
-		}
-	}
-	return coordinator.SyncGroup(req)
-}
-
-func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, generationID int32) (*HeartbeatResponse, error) {
-	req := &HeartbeatRequest{
-		GroupId:      c.groupID,
-		MemberId:     memberID,
-		GenerationId: generationID,
-	}
-
-	return coordinator.Heartbeat(req)
-}
-
-func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata) (BalanceStrategyPlan, error) {
-	topics := make(map[string][]int32)
-	for _, meta := range members {
-		for _, topic := range meta.Topics {
-			topics[topic] = nil
-		}
-	}
-
-	for topic := range topics {
-		partitions, err := c.client.Partitions(topic)
-		if err != nil {
-			return nil, err
-		}
-		topics[topic] = partitions
-	}
-
-	strategy := c.config.Consumer.Group.Rebalance.Strategy
-	return strategy.Plan(members, topics)
-}
-
-// Leaves the cluster, called by Close.
-func (c *consumerGroup) leave() error {
-	c.lock.Lock()
-	defer c.lock.Unlock()
-	if c.memberID == "" {
-		return nil
-	}
-
-	coordinator, err := c.client.Coordinator(c.groupID)
-	if err != nil {
-		return err
-	}
-
-	resp, err := coordinator.LeaveGroup(&LeaveGroupRequest{
-		GroupId:  c.groupID,
-		MemberId: c.memberID,
-	})
-	if err != nil {
-		_ = coordinator.Close()
-		return err
-	}
-
-	// Unset memberID
-	c.memberID = ""
-
-	// Check response
-	switch resp.Err {
-	case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError:
-		return nil
-	default:
-		return resp.Err
-	}
-}
-
-func (c *consumerGroup) handleError(err error, topic string, partition int32) {
-	if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 {
-		err = &ConsumerError{
-			Topic:     topic,
-			Partition: partition,
-			Err:       err,
-		}
-	}
-
-	if !c.config.Consumer.Return.Errors {
-		Logger.Println(err)
-		return
-	}
-
-	select {
-	case <-c.closed:
-		// consumer is closed
-		return
-	default:
-	}
-
-	select {
-	case c.errors <- err:
-	default:
-		// no error listener
-	}
-}
-
-func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *consumerGroupSession) {
-	pause := time.NewTicker(c.config.Metadata.RefreshFrequency)
-	defer session.cancel()
-	defer pause.Stop()
-	var oldTopicToPartitionNum map[string]int
-	var err error
-	if oldTopicToPartitionNum, err = c.topicToPartitionNumbers(topics); err != nil {
-		return
-	}
-	for {
-		if newTopicToPartitionNum, err := c.topicToPartitionNumbers(topics); err != nil {
-			return
-		} else {
-			for topic, num := range oldTopicToPartitionNum {
-				if newTopicToPartitionNum[topic] != num {
-					return // trigger the end of the session on exit
-				}
-			}
-		}
-		select {
-		case <-pause.C:
-		case <-session.ctx.Done():
-			Logger.Printf("loop check partition number coroutine will exit, topics %s", topics)
-			// if session closed by other, should be exited
-			return
-		case <-c.closed:
-			return
-		}
-	}
-}
-
-func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int, error) {
-	topicToPartitionNum := make(map[string]int, len(topics))
-	for _, topic := range topics {
-		if partitionNum, err := c.client.Partitions(topic); err != nil {
-			Logger.Printf("Consumer Group topic %s get partition number failed %v", topic, err)
-			return nil, err
-		} else {
-			topicToPartitionNum[topic] = len(partitionNum)
-		}
-	}
-	return topicToPartitionNum, nil
-}
-
-// --------------------------------------------------------------------
-
-// ConsumerGroupSession represents a consumer group member session.
-type ConsumerGroupSession interface {
-	// Claims returns information about the claimed partitions by topic.
-	Claims() map[string][]int32
-
-	// MemberID returns the cluster member ID.
-	MemberID() string
-
-	// GenerationID returns the current generation ID.
-	GenerationID() int32
-
-	// MarkOffset marks the provided offset, alongside a metadata string
-	// that represents the state of the partition consumer at that point in time. The
-	// metadata string can be used by another consumer to restore that state, so it
-	// can resume consumption.
-	//
-	// To follow upstream conventions, you are expected to mark the offset of the
-	// next message to read, not the last message read. Thus, when calling `MarkOffset`
-	// you should typically add one to the offset of the last consumed message.
-	//
-	// Note: calling MarkOffset does not necessarily commit the offset to the backend
-	// store immediately for efficiency reasons, and it may never be committed if
-	// your application crashes. This means that you may end up processing the same
-	// message twice, and your processing should ideally be idempotent.
-	MarkOffset(topic string, partition int32, offset int64, metadata string)
-
-	// Commit the offset to the backend
-	//
-	// Note: calling Commit performs a blocking synchronous operation.
-	Commit()
-
-	// ResetOffset resets to the provided offset, alongside a metadata string that
-	// represents the state of the partition consumer at that point in time. Reset
-	// acts as a counterpart to MarkOffset, the difference being that it allows to
-	// reset an offset to an earlier or smaller value, where MarkOffset only
-	// allows incrementing the offset. cf MarkOffset for more details.
-	ResetOffset(topic string, partition int32, offset int64, metadata string)
-
-	// MarkMessage marks a message as consumed.
-	MarkMessage(msg *ConsumerMessage, metadata string)
-
-	// Context returns the session context.
-	Context() context.Context
-}
-
-type consumerGroupSession struct {
-	parent       *consumerGroup
-	memberID     string
-	generationID int32
-	handler      ConsumerGroupHandler
-
-	claims  map[string][]int32
-	offsets *offsetManager
-	ctx     context.Context
-	cancel  func()
-
-	waitGroup       sync.WaitGroup
-	releaseOnce     sync.Once
-	hbDying, hbDead chan none
-}
-
-func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) {
-	// init offset manager
-	offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client)
-	if err != nil {
-		return nil, err
-	}
-
-	// init context
-	ctx, cancel := context.WithCancel(ctx)
-
-	// init session
-	sess := &consumerGroupSession{
-		parent:       parent,
-		memberID:     memberID,
-		generationID: generationID,
-		handler:      handler,
-		offsets:      offsets,
-		claims:       claims,
-		ctx:          ctx,
-		cancel:       cancel,
-		hbDying:      make(chan none),
-		hbDead:       make(chan none),
-	}
-
-	// start heartbeat loop
-	go sess.heartbeatLoop()
-
-	// create a POM for each claim
-	for topic, partitions := range claims {
-		for _, partition := range partitions {
-			pom, err := offsets.ManagePartition(topic, partition)
-			if err != nil {
-				_ = sess.release(false)
-				return nil, err
-			}
-
-			// handle POM errors
-			go func(topic string, partition int32) {
-				for err := range pom.Errors() {
-					sess.parent.handleError(err, topic, partition)
-				}
-			}(topic, partition)
-		}
-	}
-
-	// perform setup
-	if err := handler.Setup(sess); err != nil {
-		_ = sess.release(true)
-		return nil, err
-	}
-
-	// start consuming
-	for topic, partitions := range claims {
-		for _, partition := range partitions {
-			sess.waitGroup.Add(1)
-
-			go func(topic string, partition int32) {
-				defer sess.waitGroup.Done()
-
-				// cancel the as session as soon as the first
-				// goroutine exits
-				defer sess.cancel()
-
-				// consume a single topic/partition, blocking
-				sess.consume(topic, partition)
-			}(topic, partition)
-		}
-	}
-	return sess, nil
-}
-
-func (s *consumerGroupSession) Claims() map[string][]int32 { return s.claims }
-func (s *consumerGroupSession) MemberID() string           { return s.memberID }
-func (s *consumerGroupSession) GenerationID() int32        { return s.generationID }
-
-func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) {
-	if pom := s.offsets.findPOM(topic, partition); pom != nil {
-		pom.MarkOffset(offset, metadata)
-	}
-}
-
-func (s *consumerGroupSession) Commit() {
-	s.offsets.Commit()
-}
-
-func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) {
-	if pom := s.offsets.findPOM(topic, partition); pom != nil {
-		pom.ResetOffset(offset, metadata)
-	}
-}
-
-func (s *consumerGroupSession) MarkMessage(msg *ConsumerMessage, metadata string) {
-	s.MarkOffset(msg.Topic, msg.Partition, msg.Offset+1, metadata)
-}
-
-func (s *consumerGroupSession) Context() context.Context {
-	return s.ctx
-}
-
-func (s *consumerGroupSession) consume(topic string, partition int32) {
-	// quick exit if rebalance is due
-	select {
-	case <-s.ctx.Done():
-		return
-	case <-s.parent.closed:
-		return
-	default:
-	}
-
-	// get next offset
-	offset := s.parent.config.Consumer.Offsets.Initial
-	if pom := s.offsets.findPOM(topic, partition); pom != nil {
-		offset, _ = pom.NextOffset()
-	}
-
-	// create new claim
-	claim, err := newConsumerGroupClaim(s, topic, partition, offset)
-	if err != nil {
-		s.parent.handleError(err, topic, partition)
-		return
-	}
-
-	// handle errors
-	go func() {
-		for err := range claim.Errors() {
-			s.parent.handleError(err, topic, partition)
-		}
-	}()
-
-	// trigger close when session is done
-	go func() {
-		select {
-		case <-s.ctx.Done():
-		case <-s.parent.closed:
-		}
-		claim.AsyncClose()
-	}()
-
-	// start processing
-	if err := s.handler.ConsumeClaim(s, claim); err != nil {
-		s.parent.handleError(err, topic, partition)
-	}
-
-	// ensure consumer is closed & drained
-	claim.AsyncClose()
-	for _, err := range claim.waitClosed() {
-		s.parent.handleError(err, topic, partition)
-	}
-}
-
-func (s *consumerGroupSession) release(withCleanup bool) (err error) {
-	// signal release, stop heartbeat
-	s.cancel()
-
-	// wait for consumers to exit
-	s.waitGroup.Wait()
-
-	// perform release
-	s.releaseOnce.Do(func() {
-		if withCleanup {
-			if e := s.handler.Cleanup(s); e != nil {
-				s.parent.handleError(e, "", -1)
-				err = e
-			}
-		}
-
-		if e := s.offsets.Close(); e != nil {
-			err = e
-		}
-
-		close(s.hbDying)
-		<-s.hbDead
-	})
-
-	return
-}
-
-func (s *consumerGroupSession) heartbeatLoop() {
-	defer close(s.hbDead)
-	defer s.cancel() // trigger the end of the session on exit
-
-	pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval)
-	defer pause.Stop()
-
-	retryBackoff := time.NewTimer(s.parent.config.Metadata.Retry.Backoff)
-	defer retryBackoff.Stop()
-
-	retries := s.parent.config.Metadata.Retry.Max
-	for {
-		coordinator, err := s.parent.client.Coordinator(s.parent.groupID)
-		if err != nil {
-			if retries <= 0 {
-				s.parent.handleError(err, "", -1)
-				return
-			}
-			retryBackoff.Reset(s.parent.config.Metadata.Retry.Backoff)
-			select {
-			case <-s.hbDying:
-				return
-			case <-retryBackoff.C:
-				retries--
-			}
-			continue
-		}
-
-		resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID)
-		if err != nil {
-			_ = coordinator.Close()
-
-			if retries <= 0 {
-				s.parent.handleError(err, "", -1)
-				return
-			}
-
-			retries--
-			continue
-		}
-
-		switch resp.Err {
-		case ErrNoError:
-			retries = s.parent.config.Metadata.Retry.Max
-		case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration:
-			return
-		default:
-			s.parent.handleError(resp.Err, "", -1)
-			return
-		}
-
-		select {
-		case <-pause.C:
-		case <-s.hbDying:
-			return
-		}
-	}
-}
-
-// --------------------------------------------------------------------
-
-// ConsumerGroupHandler instances are used to handle individual topic/partition claims.
-// It also provides hooks for your consumer group session life-cycle and allow you to
-// trigger logic before or after the consume loop(s).
-//
-// PLEASE NOTE that handlers are likely be called from several goroutines concurrently,
-// ensure that all state is safely protected against race conditions.
-type ConsumerGroupHandler interface {
-	// Setup is run at the beginning of a new session, before ConsumeClaim.
-	Setup(ConsumerGroupSession) error
-
-	// Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
-	// but before the offsets are committed for the very last time.
-	Cleanup(ConsumerGroupSession) error
-
-	// ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
-	// Once the Messages() channel is closed, the Handler must finish its processing
-	// loop and exit.
-	ConsumeClaim(ConsumerGroupSession, ConsumerGroupClaim) error
-}
-
-// ConsumerGroupClaim processes Kafka messages from a given topic and partition within a consumer group.
-type ConsumerGroupClaim interface {
-	// Topic returns the consumed topic name.
-	Topic() string
-
-	// Partition returns the consumed partition.
-	Partition() int32
-
-	// InitialOffset returns the initial offset that was used as a starting point for this claim.
-	InitialOffset() int64
-
-	// HighWaterMarkOffset returns the high water mark offset of the partition,
-	// i.e. the offset that will be used for the next message that will be produced.
-	// You can use this to determine how far behind the processing is.
-	HighWaterMarkOffset() int64
-
-	// Messages returns the read channel for the messages that are returned by
-	// the broker. The messages channel will be closed when a new rebalance cycle
-	// is due. You must finish processing and mark offsets within
-	// Config.Consumer.Group.Session.Timeout before the topic/partition is eventually
-	// re-assigned to another group member.
-	Messages() <-chan *ConsumerMessage
-}
-
-type consumerGroupClaim struct {
-	topic     string
-	partition int32
-	offset    int64
-	PartitionConsumer
-}
-
-func newConsumerGroupClaim(sess *consumerGroupSession, topic string, partition int32, offset int64) (*consumerGroupClaim, error) {
-	pcm, err := sess.parent.consumer.ConsumePartition(topic, partition, offset)
-	if err == ErrOffsetOutOfRange {
-		offset = sess.parent.config.Consumer.Offsets.Initial
-		pcm, err = sess.parent.consumer.ConsumePartition(topic, partition, offset)
-	}
-	if err != nil {
-		return nil, err
-	}
-
-	go func() {
-		for err := range pcm.Errors() {
-			sess.parent.handleError(err, topic, partition)
-		}
-	}()
-
-	return &consumerGroupClaim{
-		topic:             topic,
-		partition:         partition,
-		offset:            offset,
-		PartitionConsumer: pcm,
-	}, nil
-}
-
-func (c *consumerGroupClaim) Topic() string        { return c.topic }
-func (c *consumerGroupClaim) Partition() int32     { return c.partition }
-func (c *consumerGroupClaim) InitialOffset() int64 { return c.offset }
-
-// Drains messages and errors, ensures the claim is fully closed.
-func (c *consumerGroupClaim) waitClosed() (errs ConsumerErrors) {
-	go func() {
-		for range c.Messages() {
-		}
-	}()
-
-	for err := range c.Errors() {
-		errs = append(errs, err)
-	}
-	return
-}
diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go
deleted file mode 100644
index 21b11e9..0000000
--- a/vendor/github.com/Shopify/sarama/consumer_group_members.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package sarama
-
-// ConsumerGroupMemberMetadata holds the metadata for consumer group
-type ConsumerGroupMemberMetadata struct {
-	Version  int16
-	Topics   []string
-	UserData []byte
-}
-
-func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
-	pe.putInt16(m.Version)
-
-	if err := pe.putStringArray(m.Topics); err != nil {
-		return err
-	}
-
-	if err := pe.putBytes(m.UserData); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
-	if m.Version, err = pd.getInt16(); err != nil {
-		return
-	}
-
-	if m.Topics, err = pd.getStringArray(); err != nil {
-		return
-	}
-
-	if m.UserData, err = pd.getBytes(); err != nil {
-		return
-	}
-
-	return nil
-}
-
-// ConsumerGroupMemberAssignment holds the member assignment for a consume group
-type ConsumerGroupMemberAssignment struct {
-	Version  int16
-	Topics   map[string][]int32
-	UserData []byte
-}
-
-func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
-	pe.putInt16(m.Version)
-
-	if err := pe.putArrayLength(len(m.Topics)); err != nil {
-		return err
-	}
-
-	for topic, partitions := range m.Topics {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := pe.putInt32Array(partitions); err != nil {
-			return err
-		}
-	}
-
-	if err := pe.putBytes(m.UserData); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
-	if m.Version, err = pd.getInt16(); err != nil {
-		return
-	}
-
-	var topicLen int
-	if topicLen, err = pd.getArrayLength(); err != nil {
-		return
-	}
-
-	m.Topics = make(map[string][]int32, topicLen)
-	for i := 0; i < topicLen; i++ {
-		var topic string
-		if topic, err = pd.getString(); err != nil {
-			return
-		}
-		if m.Topics[topic], err = pd.getInt32Array(); err != nil {
-			return
-		}
-	}
-
-	if m.UserData, err = pd.getBytes(); err != nil {
-		return
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
deleted file mode 100644
index 5c18e04..0000000
--- a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package sarama
-
-// ConsumerMetadataRequest is used for metadata requests
-type ConsumerMetadataRequest struct {
-	ConsumerGroup string
-}
-
-func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
-	tmp := new(FindCoordinatorRequest)
-	tmp.CoordinatorKey = r.ConsumerGroup
-	tmp.CoordinatorType = CoordinatorGroup
-	return tmp.encode(pe)
-}
-
-func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
-	tmp := new(FindCoordinatorRequest)
-	if err := tmp.decode(pd, version); err != nil {
-		return err
-	}
-	r.ConsumerGroup = tmp.CoordinatorKey
-	return nil
-}
-
-func (r *ConsumerMetadataRequest) key() int16 {
-	return 10
-}
-
-func (r *ConsumerMetadataRequest) version() int16 {
-	return 0
-}
-
-func (r *ConsumerMetadataRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
-	return V0_8_2_0
-}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
deleted file mode 100644
index 7fe0cf9..0000000
--- a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package sarama
-
-import (
-	"net"
-	"strconv"
-)
-
-// ConsumerMetadataResponse holds the response for a consumer group meta data requests
-type ConsumerMetadataResponse struct {
-	Err             KError
-	Coordinator     *Broker
-	CoordinatorID   int32  // deprecated: use Coordinator.ID()
-	CoordinatorHost string // deprecated: use Coordinator.Addr()
-	CoordinatorPort int32  // deprecated: use Coordinator.Addr()
-}
-
-func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
-	tmp := new(FindCoordinatorResponse)
-
-	if err := tmp.decode(pd, version); err != nil {
-		return err
-	}
-
-	r.Err = tmp.Err
-
-	r.Coordinator = tmp.Coordinator
-	if tmp.Coordinator == nil {
-		return nil
-	}
-
-	// this can all go away in 2.0, but we have to fill in deprecated fields to maintain
-	// backwards compatibility
-	host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
-	if err != nil {
-		return err
-	}
-	port, err := strconv.ParseInt(portstr, 10, 32)
-	if err != nil {
-		return err
-	}
-	r.CoordinatorID = r.Coordinator.ID()
-	r.CoordinatorHost = host
-	r.CoordinatorPort = int32(port)
-
-	return nil
-}
-
-func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
-	if r.Coordinator == nil {
-		r.Coordinator = new(Broker)
-		r.Coordinator.id = r.CoordinatorID
-		r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort)))
-	}
-
-	tmp := &FindCoordinatorResponse{
-		Version:     0,
-		Err:         r.Err,
-		Coordinator: r.Coordinator,
-	}
-
-	if err := tmp.encode(pe); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (r *ConsumerMetadataResponse) key() int16 {
-	return 10
-}
-
-func (r *ConsumerMetadataResponse) version() int16 {
-	return 0
-}
-
-func (r *ConsumerMetadataResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
-	return V0_8_2_0
-}
diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/Shopify/sarama/create_partitions_request.go
deleted file mode 100644
index 46fb044..0000000
--- a/vendor/github.com/Shopify/sarama/create_partitions_request.go
+++ /dev/null
@@ -1,125 +0,0 @@
-package sarama
-
-import "time"
-
-type CreatePartitionsRequest struct {
-	TopicPartitions map[string]*TopicPartition
-	Timeout         time.Duration
-	ValidateOnly    bool
-}
-
-func (c *CreatePartitionsRequest) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil {
-		return err
-	}
-
-	for topic, partition := range c.TopicPartitions {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := partition.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	pe.putInt32(int32(c.Timeout / time.Millisecond))
-
-	pe.putBool(c.ValidateOnly)
-
-	return nil
-}
-
-func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) {
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	c.TopicPartitions = make(map[string]*TopicPartition, n)
-	for i := 0; i < n; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		c.TopicPartitions[topic] = new(TopicPartition)
-		if err := c.TopicPartitions[topic].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	timeout, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	c.Timeout = time.Duration(timeout) * time.Millisecond
-
-	if c.ValidateOnly, err = pd.getBool(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (r *CreatePartitionsRequest) key() int16 {
-	return 37
-}
-
-func (r *CreatePartitionsRequest) version() int16 {
-	return 0
-}
-
-func (r *CreatePartitionsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
-	return V1_0_0_0
-}
-
-type TopicPartition struct {
-	Count      int32
-	Assignment [][]int32
-}
-
-func (t *TopicPartition) encode(pe packetEncoder) error {
-	pe.putInt32(t.Count)
-
-	if len(t.Assignment) == 0 {
-		pe.putInt32(-1)
-		return nil
-	}
-
-	if err := pe.putArrayLength(len(t.Assignment)); err != nil {
-		return err
-	}
-
-	for _, assign := range t.Assignment {
-		if err := pe.putInt32Array(assign); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) {
-	if t.Count, err = pd.getInt32(); err != nil {
-		return err
-	}
-
-	n, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	if n <= 0 {
-		return nil
-	}
-	t.Assignment = make([][]int32, n)
-
-	for i := 0; i < int(n); i++ {
-		if t.Assignment[i], err = pd.getInt32Array(); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go
deleted file mode 100644
index 12ce788..0000000
--- a/vendor/github.com/Shopify/sarama/create_partitions_response.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"time"
-)
-
-type CreatePartitionsResponse struct {
-	ThrottleTime         time.Duration
-	TopicPartitionErrors map[string]*TopicPartitionError
-}
-
-func (c *CreatePartitionsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
-	if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil {
-		return err
-	}
-
-	for topic, partitionError := range c.TopicPartitionErrors {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := partitionError.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n)
-	for i := 0; i < n; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		c.TopicPartitionErrors[topic] = new(TopicPartitionError)
-		if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *CreatePartitionsResponse) key() int16 {
-	return 37
-}
-
-func (r *CreatePartitionsResponse) version() int16 {
-	return 0
-}
-
-func (r *CreatePartitionsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
-	return V1_0_0_0
-}
-
-type TopicPartitionError struct {
-	Err    KError
-	ErrMsg *string
-}
-
-func (t *TopicPartitionError) Error() string {
-	text := t.Err.Error()
-	if t.ErrMsg != nil {
-		text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
-	}
-	return text
-}
-
-func (t *TopicPartitionError) encode(pe packetEncoder) error {
-	pe.putInt16(int16(t.Err))
-
-	if err := pe.putNullableString(t.ErrMsg); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	t.Err = KError(kerr)
-
-	if t.ErrMsg, err = pd.getNullableString(); err != nil {
-		return err
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/Shopify/sarama/create_topics_request.go
deleted file mode 100644
index 287acd0..0000000
--- a/vendor/github.com/Shopify/sarama/create_topics_request.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package sarama
-
-import (
-	"time"
-)
-
-type CreateTopicsRequest struct {
-	Version int16
-
-	TopicDetails map[string]*TopicDetail
-	Timeout      time.Duration
-	ValidateOnly bool
-}
-
-func (c *CreateTopicsRequest) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(c.TopicDetails)); err != nil {
-		return err
-	}
-	for topic, detail := range c.TopicDetails {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := detail.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	pe.putInt32(int32(c.Timeout / time.Millisecond))
-
-	if c.Version >= 1 {
-		pe.putBool(c.ValidateOnly)
-	}
-
-	return nil
-}
-
-func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	c.TopicDetails = make(map[string]*TopicDetail, n)
-
-	for i := 0; i < n; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		c.TopicDetails[topic] = new(TopicDetail)
-		if err = c.TopicDetails[topic].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	timeout, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	c.Timeout = time.Duration(timeout) * time.Millisecond
-
-	if version >= 1 {
-		c.ValidateOnly, err = pd.getBool()
-		if err != nil {
-			return err
-		}
-
-		c.Version = version
-	}
-
-	return nil
-}
-
-func (c *CreateTopicsRequest) key() int16 {
-	return 19
-}
-
-func (c *CreateTopicsRequest) version() int16 {
-	return c.Version
-}
-
-func (r *CreateTopicsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (c *CreateTopicsRequest) requiredVersion() KafkaVersion {
-	switch c.Version {
-	case 2:
-		return V1_0_0_0
-	case 1:
-		return V0_11_0_0
-	default:
-		return V0_10_1_0
-	}
-}
-
-type TopicDetail struct {
-	NumPartitions     int32
-	ReplicationFactor int16
-	ReplicaAssignment map[int32][]int32
-	ConfigEntries     map[string]*string
-}
-
-func (t *TopicDetail) encode(pe packetEncoder) error {
-	pe.putInt32(t.NumPartitions)
-	pe.putInt16(t.ReplicationFactor)
-
-	if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil {
-		return err
-	}
-	for partition, assignment := range t.ReplicaAssignment {
-		pe.putInt32(partition)
-		if err := pe.putInt32Array(assignment); err != nil {
-			return err
-		}
-	}
-
-	if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil {
-		return err
-	}
-	for configKey, configValue := range t.ConfigEntries {
-		if err := pe.putString(configKey); err != nil {
-			return err
-		}
-		if err := pe.putNullableString(configValue); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) {
-	if t.NumPartitions, err = pd.getInt32(); err != nil {
-		return err
-	}
-	if t.ReplicationFactor, err = pd.getInt16(); err != nil {
-		return err
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if n > 0 {
-		t.ReplicaAssignment = make(map[int32][]int32, n)
-		for i := 0; i < n; i++ {
-			replica, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-			if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil {
-				return err
-			}
-		}
-	}
-
-	n, err = pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if n > 0 {
-		t.ConfigEntries = make(map[string]*string, n)
-		for i := 0; i < n; i++ {
-			configKey, err := pd.getString()
-			if err != nil {
-				return err
-			}
-			if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go
deleted file mode 100644
index 7e1448a..0000000
--- a/vendor/github.com/Shopify/sarama/create_topics_response.go
+++ /dev/null
@@ -1,127 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"time"
-)
-
-type CreateTopicsResponse struct {
-	Version      int16
-	ThrottleTime time.Duration
-	TopicErrors  map[string]*TopicError
-}
-
-func (c *CreateTopicsResponse) encode(pe packetEncoder) error {
-	if c.Version >= 2 {
-		pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
-	}
-
-	if err := pe.putArrayLength(len(c.TopicErrors)); err != nil {
-		return err
-	}
-	for topic, topicError := range c.TopicErrors {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := topicError.encode(pe, c.Version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
-	c.Version = version
-
-	if version >= 2 {
-		throttleTime, err := pd.getInt32()
-		if err != nil {
-			return err
-		}
-		c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	c.TopicErrors = make(map[string]*TopicError, n)
-	for i := 0; i < n; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		c.TopicErrors[topic] = new(TopicError)
-		if err := c.TopicErrors[topic].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (c *CreateTopicsResponse) key() int16 {
-	return 19
-}
-
-func (c *CreateTopicsResponse) version() int16 {
-	return c.Version
-}
-
-func (c *CreateTopicsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (c *CreateTopicsResponse) requiredVersion() KafkaVersion {
-	switch c.Version {
-	case 2:
-		return V1_0_0_0
-	case 1:
-		return V0_11_0_0
-	default:
-		return V0_10_1_0
-	}
-}
-
-type TopicError struct {
-	Err    KError
-	ErrMsg *string
-}
-
-func (t *TopicError) Error() string {
-	text := t.Err.Error()
-	if t.ErrMsg != nil {
-		text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
-	}
-	return text
-}
-
-func (t *TopicError) encode(pe packetEncoder, version int16) error {
-	pe.putInt16(int16(t.Err))
-
-	if version >= 1 {
-		if err := pe.putNullableString(t.ErrMsg); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (t *TopicError) decode(pd packetDecoder, version int16) (err error) {
-	kErr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	t.Err = KError(kErr)
-
-	if version >= 1 {
-		if t.ErrMsg, err = pd.getNullableString(); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go
deleted file mode 100644
index af45fda..0000000
--- a/vendor/github.com/Shopify/sarama/decompress.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package sarama
-
-import (
-	"bytes"
-	"compress/gzip"
-	"fmt"
-	"io/ioutil"
-	"sync"
-
-	snappy "github.com/eapache/go-xerial-snappy"
-	"github.com/pierrec/lz4"
-)
-
-var (
-	lz4ReaderPool = sync.Pool{
-		New: func() interface{} {
-			return lz4.NewReader(nil)
-		},
-	}
-
-	gzipReaderPool sync.Pool
-)
-
-func decompress(cc CompressionCodec, data []byte) ([]byte, error) {
-	switch cc {
-	case CompressionNone:
-		return data, nil
-	case CompressionGZIP:
-		var err error
-		reader, ok := gzipReaderPool.Get().(*gzip.Reader)
-		if !ok {
-			reader, err = gzip.NewReader(bytes.NewReader(data))
-		} else {
-			err = reader.Reset(bytes.NewReader(data))
-		}
-
-		if err != nil {
-			return nil, err
-		}
-
-		defer gzipReaderPool.Put(reader)
-
-		return ioutil.ReadAll(reader)
-	case CompressionSnappy:
-		return snappy.Decode(data)
-	case CompressionLZ4:
-		reader, ok := lz4ReaderPool.Get().(*lz4.Reader)
-		if !ok {
-			reader = lz4.NewReader(bytes.NewReader(data))
-		} else {
-			reader.Reset(bytes.NewReader(data))
-		}
-		defer lz4ReaderPool.Put(reader)
-
-		return ioutil.ReadAll(reader)
-	case CompressionZSTD:
-		return zstdDecompress(nil, data)
-	default:
-		return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)}
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request.go b/vendor/github.com/Shopify/sarama/delete_groups_request.go
deleted file mode 100644
index 4ac8bbe..0000000
--- a/vendor/github.com/Shopify/sarama/delete_groups_request.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package sarama
-
-type DeleteGroupsRequest struct {
-	Groups []string
-}
-
-func (r *DeleteGroupsRequest) encode(pe packetEncoder) error {
-	return pe.putStringArray(r.Groups)
-}
-
-func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
-	r.Groups, err = pd.getStringArray()
-	return
-}
-
-func (r *DeleteGroupsRequest) key() int16 {
-	return 42
-}
-
-func (r *DeleteGroupsRequest) version() int16 {
-	return 0
-}
-
-func (r *DeleteGroupsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion {
-	return V1_1_0_0
-}
-
-func (r *DeleteGroupsRequest) AddGroup(group string) {
-	r.Groups = append(r.Groups, group)
-}
diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response.go b/vendor/github.com/Shopify/sarama/delete_groups_response.go
deleted file mode 100644
index 5e7b1ed..0000000
--- a/vendor/github.com/Shopify/sarama/delete_groups_response.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package sarama
-
-import (
-	"time"
-)
-
-type DeleteGroupsResponse struct {
-	ThrottleTime    time.Duration
-	GroupErrorCodes map[string]KError
-}
-
-func (r *DeleteGroupsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
-
-	if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil {
-		return err
-	}
-	for groupID, errorCode := range r.GroupErrorCodes {
-		if err := pe.putString(groupID); err != nil {
-			return err
-		}
-		pe.putInt16(int16(errorCode))
-	}
-
-	return nil
-}
-
-func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) error {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if n == 0 {
-		return nil
-	}
-
-	r.GroupErrorCodes = make(map[string]KError, n)
-	for i := 0; i < n; i++ {
-		groupID, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		errorCode, err := pd.getInt16()
-		if err != nil {
-			return err
-		}
-
-		r.GroupErrorCodes[groupID] = KError(errorCode)
-	}
-
-	return nil
-}
-
-func (r *DeleteGroupsResponse) key() int16 {
-	return 42
-}
-
-func (r *DeleteGroupsResponse) version() int16 {
-	return 0
-}
-
-func (r *DeleteGroupsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion {
-	return V1_1_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/delete_records_request.go b/vendor/github.com/Shopify/sarama/delete_records_request.go
deleted file mode 100644
index dc106b1..0000000
--- a/vendor/github.com/Shopify/sarama/delete_records_request.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package sarama
-
-import (
-	"sort"
-	"time"
-)
-
-// request message format is:
-// [topic] timeout(int32)
-// where topic is:
-//  name(string) [partition]
-// where partition is:
-//  id(int32) offset(int64)
-
-type DeleteRecordsRequest struct {
-	Topics  map[string]*DeleteRecordsRequestTopic
-	Timeout time.Duration
-}
-
-func (d *DeleteRecordsRequest) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(d.Topics)); err != nil {
-		return err
-	}
-	keys := make([]string, 0, len(d.Topics))
-	for topic := range d.Topics {
-		keys = append(keys, topic)
-	}
-	sort.Strings(keys)
-	for _, topic := range keys {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := d.Topics[topic].encode(pe); err != nil {
-			return err
-		}
-	}
-	pe.putInt32(int32(d.Timeout / time.Millisecond))
-
-	return nil
-}
-
-func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error {
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if n > 0 {
-		d.Topics = make(map[string]*DeleteRecordsRequestTopic, n)
-		for i := 0; i < n; i++ {
-			topic, err := pd.getString()
-			if err != nil {
-				return err
-			}
-			details := new(DeleteRecordsRequestTopic)
-			if err = details.decode(pd, version); err != nil {
-				return err
-			}
-			d.Topics[topic] = details
-		}
-	}
-
-	timeout, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	d.Timeout = time.Duration(timeout) * time.Millisecond
-
-	return nil
-}
-
-func (d *DeleteRecordsRequest) key() int16 {
-	return 21
-}
-
-func (d *DeleteRecordsRequest) version() int16 {
-	return 0
-}
-
-func (d *DeleteRecordsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
-
-type DeleteRecordsRequestTopic struct {
-	PartitionOffsets map[int32]int64 // partition => offset
-}
-
-func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil {
-		return err
-	}
-	keys := make([]int32, 0, len(t.PartitionOffsets))
-	for partition := range t.PartitionOffsets {
-		keys = append(keys, partition)
-	}
-	sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
-	for _, partition := range keys {
-		pe.putInt32(partition)
-		pe.putInt64(t.PartitionOffsets[partition])
-	}
-	return nil
-}
-
-func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error {
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if n > 0 {
-		t.PartitionOffsets = make(map[int32]int64, n)
-		for i := 0; i < n; i++ {
-			partition, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-			offset, err := pd.getInt64()
-			if err != nil {
-				return err
-			}
-			t.PartitionOffsets[partition] = offset
-		}
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/delete_records_response.go b/vendor/github.com/Shopify/sarama/delete_records_response.go
deleted file mode 100644
index d530b4c..0000000
--- a/vendor/github.com/Shopify/sarama/delete_records_response.go
+++ /dev/null
@@ -1,162 +0,0 @@
-package sarama
-
-import (
-	"sort"
-	"time"
-)
-
-// response message format is:
-// throttleMs(int32) [topic]
-// where topic is:
-//  name(string) [partition]
-// where partition is:
-//  id(int32) low_watermark(int64) error_code(int16)
-
-type DeleteRecordsResponse struct {
-	Version      int16
-	ThrottleTime time.Duration
-	Topics       map[string]*DeleteRecordsResponseTopic
-}
-
-func (d *DeleteRecordsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
-
-	if err := pe.putArrayLength(len(d.Topics)); err != nil {
-		return err
-	}
-	keys := make([]string, 0, len(d.Topics))
-	for topic := range d.Topics {
-		keys = append(keys, topic)
-	}
-	sort.Strings(keys)
-	for _, topic := range keys {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := d.Topics[topic].encode(pe); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) error {
-	d.Version = version
-
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if n > 0 {
-		d.Topics = make(map[string]*DeleteRecordsResponseTopic, n)
-		for i := 0; i < n; i++ {
-			topic, err := pd.getString()
-			if err != nil {
-				return err
-			}
-			details := new(DeleteRecordsResponseTopic)
-			if err = details.decode(pd, version); err != nil {
-				return err
-			}
-			d.Topics[topic] = details
-		}
-	}
-
-	return nil
-}
-
-func (d *DeleteRecordsResponse) key() int16 {
-	return 21
-}
-
-func (d *DeleteRecordsResponse) version() int16 {
-	return 0
-}
-
-func (d *DeleteRecordsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
-
-type DeleteRecordsResponseTopic struct {
-	Partitions map[int32]*DeleteRecordsResponsePartition
-}
-
-func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(t.Partitions)); err != nil {
-		return err
-	}
-	keys := make([]int32, 0, len(t.Partitions))
-	for partition := range t.Partitions {
-		keys = append(keys, partition)
-	}
-	sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
-	for _, partition := range keys {
-		pe.putInt32(partition)
-		if err := t.Partitions[partition].encode(pe); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error {
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if n > 0 {
-		t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n)
-		for i := 0; i < n; i++ {
-			partition, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-			details := new(DeleteRecordsResponsePartition)
-			if err = details.decode(pd, version); err != nil {
-				return err
-			}
-			t.Partitions[partition] = details
-		}
-	}
-
-	return nil
-}
-
-type DeleteRecordsResponsePartition struct {
-	LowWatermark int64
-	Err          KError
-}
-
-func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error {
-	pe.putInt64(t.LowWatermark)
-	pe.putInt16(int16(t.Err))
-	return nil
-}
-
-func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error {
-	lowWatermark, err := pd.getInt64()
-	if err != nil {
-		return err
-	}
-	t.LowWatermark = lowWatermark
-
-	kErr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	t.Err = KError(kErr)
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/Shopify/sarama/delete_topics_request.go
deleted file mode 100644
index ba6780a..0000000
--- a/vendor/github.com/Shopify/sarama/delete_topics_request.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package sarama
-
-import "time"
-
-type DeleteTopicsRequest struct {
-	Version int16
-	Topics  []string
-	Timeout time.Duration
-}
-
-func (d *DeleteTopicsRequest) encode(pe packetEncoder) error {
-	if err := pe.putStringArray(d.Topics); err != nil {
-		return err
-	}
-	pe.putInt32(int32(d.Timeout / time.Millisecond))
-
-	return nil
-}
-
-func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
-	if d.Topics, err = pd.getStringArray(); err != nil {
-		return err
-	}
-	timeout, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	d.Timeout = time.Duration(timeout) * time.Millisecond
-	d.Version = version
-	return nil
-}
-
-func (d *DeleteTopicsRequest) key() int16 {
-	return 20
-}
-
-func (d *DeleteTopicsRequest) version() int16 {
-	return d.Version
-}
-
-func (d *DeleteTopicsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion {
-	switch d.Version {
-	case 1:
-		return V0_11_0_0
-	default:
-		return V0_10_1_0
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/Shopify/sarama/delete_topics_response.go
deleted file mode 100644
index 733961a..0000000
--- a/vendor/github.com/Shopify/sarama/delete_topics_response.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package sarama
-
-import "time"
-
-type DeleteTopicsResponse struct {
-	Version         int16
-	ThrottleTime    time.Duration
-	TopicErrorCodes map[string]KError
-}
-
-func (d *DeleteTopicsResponse) encode(pe packetEncoder) error {
-	if d.Version >= 1 {
-		pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
-	}
-
-	if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil {
-		return err
-	}
-	for topic, errorCode := range d.TopicErrorCodes {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		pe.putInt16(int16(errorCode))
-	}
-
-	return nil
-}
-
-func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
-	if version >= 1 {
-		throttleTime, err := pd.getInt32()
-		if err != nil {
-			return err
-		}
-		d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-		d.Version = version
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	d.TopicErrorCodes = make(map[string]KError, n)
-
-	for i := 0; i < n; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		errorCode, err := pd.getInt16()
-		if err != nil {
-			return err
-		}
-
-		d.TopicErrorCodes[topic] = KError(errorCode)
-	}
-
-	return nil
-}
-
-func (d *DeleteTopicsResponse) key() int16 {
-	return 20
-}
-
-func (d *DeleteTopicsResponse) version() int16 {
-	return d.Version
-}
-
-func (d *DeleteTopicsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion {
-	switch d.Version {
-	case 1:
-		return V0_11_0_0
-	default:
-		return V0_10_1_0
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/Shopify/sarama/describe_configs_request.go
deleted file mode 100644
index 4c34880..0000000
--- a/vendor/github.com/Shopify/sarama/describe_configs_request.go
+++ /dev/null
@@ -1,115 +0,0 @@
-package sarama
-
-type DescribeConfigsRequest struct {
-	Version         int16
-	Resources       []*ConfigResource
-	IncludeSynonyms bool
-}
-
-type ConfigResource struct {
-	Type        ConfigResourceType
-	Name        string
-	ConfigNames []string
-}
-
-func (r *DescribeConfigsRequest) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(r.Resources)); err != nil {
-		return err
-	}
-
-	for _, c := range r.Resources {
-		pe.putInt8(int8(c.Type))
-		if err := pe.putString(c.Name); err != nil {
-			return err
-		}
-
-		if len(c.ConfigNames) == 0 {
-			pe.putInt32(-1)
-			continue
-		}
-		if err := pe.putStringArray(c.ConfigNames); err != nil {
-			return err
-		}
-	}
-
-	if r.Version >= 1 {
-		pe.putBool(r.IncludeSynonyms)
-	}
-
-	return nil
-}
-
-func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) {
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Resources = make([]*ConfigResource, n)
-
-	for i := 0; i < n; i++ {
-		r.Resources[i] = &ConfigResource{}
-		t, err := pd.getInt8()
-		if err != nil {
-			return err
-		}
-		r.Resources[i].Type = ConfigResourceType(t)
-		name, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		r.Resources[i].Name = name
-
-		confLength, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		if confLength == -1 {
-			continue
-		}
-
-		cfnames := make([]string, confLength)
-		for i := 0; i < confLength; i++ {
-			s, err := pd.getString()
-			if err != nil {
-				return err
-			}
-			cfnames[i] = s
-		}
-		r.Resources[i].ConfigNames = cfnames
-	}
-	r.Version = version
-	if r.Version >= 1 {
-		b, err := pd.getBool()
-		if err != nil {
-			return err
-		}
-		r.IncludeSynonyms = b
-	}
-
-	return nil
-}
-
-func (r *DescribeConfigsRequest) key() int16 {
-	return 32
-}
-
-func (r *DescribeConfigsRequest) version() int16 {
-	return r.Version
-}
-
-func (r *DescribeConfigsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V1_1_0_0
-	case 2:
-		return V2_0_0_0
-	default:
-		return V0_11_0_0
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go
deleted file mode 100644
index 928f5a5..0000000
--- a/vendor/github.com/Shopify/sarama/describe_configs_response.go
+++ /dev/null
@@ -1,327 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"time"
-)
-
-type ConfigSource int8
-
-func (s ConfigSource) String() string {
-	switch s {
-	case SourceUnknown:
-		return "Unknown"
-	case SourceTopic:
-		return "Topic"
-	case SourceDynamicBroker:
-		return "DynamicBroker"
-	case SourceDynamicDefaultBroker:
-		return "DynamicDefaultBroker"
-	case SourceStaticBroker:
-		return "StaticBroker"
-	case SourceDefault:
-		return "Default"
-	}
-	return fmt.Sprintf("Source Invalid: %d", int(s))
-}
-
-const (
-	SourceUnknown ConfigSource = iota
-	SourceTopic
-	SourceDynamicBroker
-	SourceDynamicDefaultBroker
-	SourceStaticBroker
-	SourceDefault
-)
-
-type DescribeConfigsResponse struct {
-	Version      int16
-	ThrottleTime time.Duration
-	Resources    []*ResourceResponse
-}
-
-type ResourceResponse struct {
-	ErrorCode int16
-	ErrorMsg  string
-	Type      ConfigResourceType
-	Name      string
-	Configs   []*ConfigEntry
-}
-
-type ConfigEntry struct {
-	Name      string
-	Value     string
-	ReadOnly  bool
-	Default   bool
-	Source    ConfigSource
-	Sensitive bool
-	Synonyms  []*ConfigSynonym
-}
-
-type ConfigSynonym struct {
-	ConfigName  string
-	ConfigValue string
-	Source      ConfigSource
-}
-
-func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) {
-	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
-	if err = pe.putArrayLength(len(r.Resources)); err != nil {
-		return err
-	}
-
-	for _, c := range r.Resources {
-		if err = c.encode(pe, r.Version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Resources = make([]*ResourceResponse, n)
-	for i := 0; i < n; i++ {
-		rr := &ResourceResponse{}
-		if err := rr.decode(pd, version); err != nil {
-			return err
-		}
-		r.Resources[i] = rr
-	}
-
-	return nil
-}
-
-func (r *DescribeConfigsResponse) key() int16 {
-	return 32
-}
-
-func (r *DescribeConfigsResponse) version() int16 {
-	return r.Version
-}
-
-func (r *DescribeConfigsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V1_0_0_0
-	case 2:
-		return V2_0_0_0
-	default:
-		return V0_11_0_0
-	}
-}
-
-func (r *ResourceResponse) encode(pe packetEncoder, version int16) (err error) {
-	pe.putInt16(r.ErrorCode)
-
-	if err = pe.putString(r.ErrorMsg); err != nil {
-		return err
-	}
-
-	pe.putInt8(int8(r.Type))
-
-	if err = pe.putString(r.Name); err != nil {
-		return err
-	}
-
-	if err = pe.putArrayLength(len(r.Configs)); err != nil {
-		return err
-	}
-
-	for _, c := range r.Configs {
-		if err = c.encode(pe, version); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) {
-	ec, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	r.ErrorCode = ec
-
-	em, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	r.ErrorMsg = em
-
-	t, err := pd.getInt8()
-	if err != nil {
-		return err
-	}
-	r.Type = ConfigResourceType(t)
-
-	name, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	r.Name = name
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Configs = make([]*ConfigEntry, n)
-	for i := 0; i < n; i++ {
-		c := &ConfigEntry{}
-		if err := c.decode(pd, version); err != nil {
-			return err
-		}
-		r.Configs[i] = c
-	}
-	return nil
-}
-
-func (r *ConfigEntry) encode(pe packetEncoder, version int16) (err error) {
-	if err = pe.putString(r.Name); err != nil {
-		return err
-	}
-
-	if err = pe.putString(r.Value); err != nil {
-		return err
-	}
-
-	pe.putBool(r.ReadOnly)
-
-	if version <= 0 {
-		pe.putBool(r.Default)
-		pe.putBool(r.Sensitive)
-	} else {
-		pe.putInt8(int8(r.Source))
-		pe.putBool(r.Sensitive)
-
-		if err := pe.putArrayLength(len(r.Synonyms)); err != nil {
-			return err
-		}
-		for _, c := range r.Synonyms {
-			if err = c.encode(pe, version); err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-// https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration
-func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
-	if version == 0 {
-		r.Source = SourceUnknown
-	}
-	name, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	r.Name = name
-
-	value, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	r.Value = value
-
-	read, err := pd.getBool()
-	if err != nil {
-		return err
-	}
-	r.ReadOnly = read
-
-	if version == 0 {
-		defaultB, err := pd.getBool()
-		if err != nil {
-			return err
-		}
-		r.Default = defaultB
-		if defaultB {
-			r.Source = SourceDefault
-		}
-	} else {
-		source, err := pd.getInt8()
-		if err != nil {
-			return err
-		}
-		r.Source = ConfigSource(source)
-		r.Default = r.Source == SourceDefault
-	}
-
-	sensitive, err := pd.getBool()
-	if err != nil {
-		return err
-	}
-	r.Sensitive = sensitive
-
-	if version > 0 {
-		n, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-		r.Synonyms = make([]*ConfigSynonym, n)
-
-		for i := 0; i < n; i++ {
-			s := &ConfigSynonym{}
-			if err := s.decode(pd, version); err != nil {
-				return err
-			}
-			r.Synonyms[i] = s
-		}
-	}
-	return nil
-}
-
-func (c *ConfigSynonym) encode(pe packetEncoder, version int16) (err error) {
-	err = pe.putString(c.ConfigName)
-	if err != nil {
-		return err
-	}
-
-	err = pe.putString(c.ConfigValue)
-	if err != nil {
-		return err
-	}
-
-	pe.putInt8(int8(c.Source))
-
-	return nil
-}
-
-func (c *ConfigSynonym) decode(pd packetDecoder, version int16) error {
-	name, err := pd.getString()
-	if err != nil {
-		return nil
-	}
-	c.ConfigName = name
-
-	value, err := pd.getString()
-	if err != nil {
-		return nil
-	}
-	c.ConfigValue = value
-
-	source, err := pd.getInt8()
-	if err != nil {
-		return nil
-	}
-	c.Source = ConfigSource(source)
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go
deleted file mode 100644
index f8962da..0000000
--- a/vendor/github.com/Shopify/sarama/describe_groups_request.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package sarama
-
-type DescribeGroupsRequest struct {
-	Groups []string
-}
-
-func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
-	return pe.putStringArray(r.Groups)
-}
-
-func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
-	r.Groups, err = pd.getStringArray()
-	return
-}
-
-func (r *DescribeGroupsRequest) key() int16 {
-	return 15
-}
-
-func (r *DescribeGroupsRequest) version() int16 {
-	return 0
-}
-
-func (r *DescribeGroupsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
-	return V0_9_0_0
-}
-
-func (r *DescribeGroupsRequest) AddGroup(group string) {
-	r.Groups = append(r.Groups, group)
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go
deleted file mode 100644
index bc242e4..0000000
--- a/vendor/github.com/Shopify/sarama/describe_groups_response.go
+++ /dev/null
@@ -1,191 +0,0 @@
-package sarama
-
-type DescribeGroupsResponse struct {
-	Groups []*GroupDescription
-}
-
-func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(r.Groups)); err != nil {
-		return err
-	}
-
-	for _, groupDescription := range r.Groups {
-		if err := groupDescription.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Groups = make([]*GroupDescription, n)
-	for i := 0; i < n; i++ {
-		r.Groups[i] = new(GroupDescription)
-		if err := r.Groups[i].decode(pd); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *DescribeGroupsResponse) key() int16 {
-	return 15
-}
-
-func (r *DescribeGroupsResponse) version() int16 {
-	return 0
-}
-
-func (r *DescribeGroupsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
-	return V0_9_0_0
-}
-
-type GroupDescription struct {
-	Err          KError
-	GroupId      string
-	State        string
-	ProtocolType string
-	Protocol     string
-	Members      map[string]*GroupMemberDescription
-}
-
-func (gd *GroupDescription) encode(pe packetEncoder) error {
-	pe.putInt16(int16(gd.Err))
-
-	if err := pe.putString(gd.GroupId); err != nil {
-		return err
-	}
-	if err := pe.putString(gd.State); err != nil {
-		return err
-	}
-	if err := pe.putString(gd.ProtocolType); err != nil {
-		return err
-	}
-	if err := pe.putString(gd.Protocol); err != nil {
-		return err
-	}
-
-	if err := pe.putArrayLength(len(gd.Members)); err != nil {
-		return err
-	}
-
-	for memberId, groupMemberDescription := range gd.Members {
-		if err := pe.putString(memberId); err != nil {
-			return err
-		}
-		if err := groupMemberDescription.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	gd.Err = KError(kerr)
-
-	if gd.GroupId, err = pd.getString(); err != nil {
-		return
-	}
-	if gd.State, err = pd.getString(); err != nil {
-		return
-	}
-	if gd.ProtocolType, err = pd.getString(); err != nil {
-		return
-	}
-	if gd.Protocol, err = pd.getString(); err != nil {
-		return
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if n == 0 {
-		return nil
-	}
-
-	gd.Members = make(map[string]*GroupMemberDescription)
-	for i := 0; i < n; i++ {
-		memberId, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		gd.Members[memberId] = new(GroupMemberDescription)
-		if err := gd.Members[memberId].decode(pd); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-type GroupMemberDescription struct {
-	ClientId         string
-	ClientHost       string
-	MemberMetadata   []byte
-	MemberAssignment []byte
-}
-
-func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
-	if err := pe.putString(gmd.ClientId); err != nil {
-		return err
-	}
-	if err := pe.putString(gmd.ClientHost); err != nil {
-		return err
-	}
-	if err := pe.putBytes(gmd.MemberMetadata); err != nil {
-		return err
-	}
-	if err := pe.putBytes(gmd.MemberAssignment); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
-	if gmd.ClientId, err = pd.getString(); err != nil {
-		return
-	}
-	if gmd.ClientHost, err = pd.getString(); err != nil {
-		return
-	}
-	if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
-		return
-	}
-	if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
-		return
-	}
-
-	return nil
-}
-
-func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
-	assignment := new(ConsumerGroupMemberAssignment)
-	err := decode(gmd.MemberAssignment, assignment)
-	return assignment, err
-}
-
-func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
-	metadata := new(ConsumerGroupMemberMetadata)
-	err := decode(gmd.MemberMetadata, metadata)
-	return metadata, err
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go
deleted file mode 100644
index c0bf04e..0000000
--- a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package sarama
-
-// DescribeLogDirsRequest is a describe request to get partitions' log size
-type DescribeLogDirsRequest struct {
-	// Version 0 and 1 are equal
-	// The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
-	Version int16
-
-	// If this is an empty array, all topics will be queried
-	DescribeTopics []DescribeLogDirsRequestTopic
-}
-
-// DescribeLogDirsRequestTopic is a describe request about the log dir of one or more partitions within a Topic
-type DescribeLogDirsRequestTopic struct {
-	Topic        string
-	PartitionIDs []int32
-}
-
-func (r *DescribeLogDirsRequest) encode(pe packetEncoder) error {
-	length := len(r.DescribeTopics)
-	if length == 0 {
-		// In order to query all topics we must send null
-		length = -1
-	}
-
-	if err := pe.putArrayLength(length); err != nil {
-		return err
-	}
-
-	for _, d := range r.DescribeTopics {
-		if err := pe.putString(d.Topic); err != nil {
-			return err
-		}
-
-		if err := pe.putInt32Array(d.PartitionIDs); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *DescribeLogDirsRequest) decode(pd packetDecoder, version int16) error {
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if n == -1 {
-		n = 0
-	}
-
-	topics := make([]DescribeLogDirsRequestTopic, n)
-	for i := 0; i < n; i++ {
-		topics[i] = DescribeLogDirsRequestTopic{}
-
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		topics[i].Topic = topic
-
-		pIDs, err := pd.getInt32Array()
-		if err != nil {
-			return err
-		}
-		topics[i].PartitionIDs = pIDs
-	}
-	r.DescribeTopics = topics
-
-	return nil
-}
-
-func (r *DescribeLogDirsRequest) key() int16 {
-	return 35
-}
-
-func (r *DescribeLogDirsRequest) version() int16 {
-	return r.Version
-}
-
-func (r *DescribeLogDirsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion {
-	return V1_0_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go
deleted file mode 100644
index 411da38..0000000
--- a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go
+++ /dev/null
@@ -1,229 +0,0 @@
-package sarama
-
-import "time"
-
-type DescribeLogDirsResponse struct {
-	ThrottleTime time.Duration
-
-	// Version 0 and 1 are equal
-	// The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
-	Version int16
-
-	LogDirs []DescribeLogDirsResponseDirMetadata
-}
-
-func (r *DescribeLogDirsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
-
-	if err := pe.putArrayLength(len(r.LogDirs)); err != nil {
-		return err
-	}
-
-	for _, dir := range r.LogDirs {
-		if err := dir.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *DescribeLogDirsResponse) decode(pd packetDecoder, version int16) error {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	// Decode array of DescribeLogDirsResponseDirMetadata
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.LogDirs = make([]DescribeLogDirsResponseDirMetadata, n)
-	for i := 0; i < n; i++ {
-		dir := DescribeLogDirsResponseDirMetadata{}
-		if err := dir.decode(pd, version); err != nil {
-			return err
-		}
-		r.LogDirs[i] = dir
-	}
-
-	return nil
-}
-
-func (r *DescribeLogDirsResponse) key() int16 {
-	return 35
-}
-
-func (r *DescribeLogDirsResponse) version() int16 {
-	return r.Version
-}
-
-func (r *DescribeLogDirsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion {
-	return V1_0_0_0
-}
-
-type DescribeLogDirsResponseDirMetadata struct {
-	ErrorCode KError
-
-	// The absolute log directory path
-	Path   string
-	Topics []DescribeLogDirsResponseTopic
-}
-
-func (r *DescribeLogDirsResponseDirMetadata) encode(pe packetEncoder) error {
-	pe.putInt16(int16(r.ErrorCode))
-
-	if err := pe.putString(r.Path); err != nil {
-		return err
-	}
-
-	if err := pe.putArrayLength(len(r.Topics)); err != nil {
-		return err
-	}
-	for _, topic := range r.Topics {
-		if err := topic.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *DescribeLogDirsResponseDirMetadata) decode(pd packetDecoder, version int16) error {
-	errCode, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	r.ErrorCode = KError(errCode)
-
-	path, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	r.Path = path
-
-	// Decode array of DescribeLogDirsResponseTopic
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Topics = make([]DescribeLogDirsResponseTopic, n)
-	for i := 0; i < n; i++ {
-		t := DescribeLogDirsResponseTopic{}
-
-		if err := t.decode(pd, version); err != nil {
-			return err
-		}
-
-		r.Topics[i] = t
-	}
-
-	return nil
-}
-
-// DescribeLogDirsResponseTopic contains a topic's partitions descriptions
-type DescribeLogDirsResponseTopic struct {
-	Topic      string
-	Partitions []DescribeLogDirsResponsePartition
-}
-
-func (r *DescribeLogDirsResponseTopic) encode(pe packetEncoder) error {
-	if err := pe.putString(r.Topic); err != nil {
-		return err
-	}
-
-	if err := pe.putArrayLength(len(r.Partitions)); err != nil {
-		return err
-	}
-	for _, partition := range r.Partitions {
-		if err := partition.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *DescribeLogDirsResponseTopic) decode(pd packetDecoder, version int16) error {
-	t, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	r.Topic = t
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	r.Partitions = make([]DescribeLogDirsResponsePartition, n)
-	for i := 0; i < n; i++ {
-		p := DescribeLogDirsResponsePartition{}
-		if err := p.decode(pd, version); err != nil {
-			return err
-		}
-		r.Partitions[i] = p
-	}
-
-	return nil
-}
-
-// DescribeLogDirsResponsePartition describes a partition's log directory
-type DescribeLogDirsResponsePartition struct {
-	PartitionID int32
-
-	// The size of the log segments of the partition in bytes.
-	Size int64
-
-	// The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or
-	// current replica's LEO (if it is the future log for the partition)
-	OffsetLag int64
-
-	// True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of
-	// the replica in the future.
-	IsTemporary bool
-}
-
-func (r *DescribeLogDirsResponsePartition) encode(pe packetEncoder) error {
-	pe.putInt32(r.PartitionID)
-	pe.putInt64(r.Size)
-	pe.putInt64(r.OffsetLag)
-	pe.putBool(r.IsTemporary)
-
-	return nil
-}
-
-func (r *DescribeLogDirsResponsePartition) decode(pd packetDecoder, version int16) error {
-	pID, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	r.PartitionID = pID
-
-	size, err := pd.getInt64()
-	if err != nil {
-		return err
-	}
-	r.Size = size
-
-	lag, err := pd.getInt64()
-	if err != nil {
-		return err
-	}
-	r.OffsetLag = lag
-
-	isTemp, err := pd.getBool()
-	if err != nil {
-		return err
-	}
-	r.IsTemporary = isTemp
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go
deleted file mode 100644
index b5b5940..0000000
--- a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package sarama
-
-// DescribeUserScramCredentialsRequest is a request to get list of SCRAM user names
-type DescribeUserScramCredentialsRequest struct {
-	// Version 0 is currently only supported
-	Version int16
-
-	// If this is an empty array, all users will be queried
-	DescribeUsers []DescribeUserScramCredentialsRequestUser
-}
-
-// DescribeUserScramCredentialsRequestUser is a describe request about specific user name
-type DescribeUserScramCredentialsRequestUser struct {
-	Name string
-}
-
-func (r *DescribeUserScramCredentialsRequest) encode(pe packetEncoder) error {
-	pe.putCompactArrayLength(len(r.DescribeUsers))
-	for _, d := range r.DescribeUsers {
-		if err := pe.putCompactString(d.Name); err != nil {
-			return err
-		}
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	pe.putEmptyTaggedFieldArray()
-	return nil
-}
-
-func (r *DescribeUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error {
-	n, err := pd.getCompactArrayLength()
-	if err != nil {
-		return err
-	}
-	if n == -1 {
-		n = 0
-	}
-
-	r.DescribeUsers = make([]DescribeUserScramCredentialsRequestUser, n)
-	for i := 0; i < n; i++ {
-		r.DescribeUsers[i] = DescribeUserScramCredentialsRequestUser{}
-		if r.DescribeUsers[i].Name, err = pd.getCompactString(); err != nil {
-			return err
-		}
-		if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
-			return err
-		}
-	}
-
-	if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (r *DescribeUserScramCredentialsRequest) key() int16 {
-	return 50
-}
-
-func (r *DescribeUserScramCredentialsRequest) version() int16 {
-	return r.Version
-}
-
-func (r *DescribeUserScramCredentialsRequest) headerVersion() int16 {
-	return 2
-}
-
-func (r *DescribeUserScramCredentialsRequest) requiredVersion() KafkaVersion {
-	return V2_7_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go
deleted file mode 100644
index 2656c2f..0000000
--- a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package sarama
-
-import "time"
-
-type ScramMechanismType int8
-
-const (
-	SCRAM_MECHANISM_UNKNOWN ScramMechanismType = iota // 0
-	SCRAM_MECHANISM_SHA_256                           // 1
-	SCRAM_MECHANISM_SHA_512                           // 2
-)
-
-func (s ScramMechanismType) String() string {
-	switch s {
-	case 1:
-		return SASLTypeSCRAMSHA256
-	case 2:
-		return SASLTypeSCRAMSHA512
-	default:
-		return "Unknown"
-	}
-}
-
-type DescribeUserScramCredentialsResponse struct {
-	// Version 0 is currently only supported
-	Version int16
-
-	ThrottleTime time.Duration
-
-	ErrorCode    KError
-	ErrorMessage *string
-
-	Results []*DescribeUserScramCredentialsResult
-}
-
-type DescribeUserScramCredentialsResult struct {
-	User string
-
-	ErrorCode    KError
-	ErrorMessage *string
-
-	CredentialInfos []*UserScramCredentialsResponseInfo
-}
-
-type UserScramCredentialsResponseInfo struct {
-	Mechanism  ScramMechanismType
-	Iterations int32
-}
-
-func (r *DescribeUserScramCredentialsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
-
-	pe.putInt16(int16(r.ErrorCode))
-	if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
-		return err
-	}
-
-	pe.putCompactArrayLength(len(r.Results))
-	for _, u := range r.Results {
-		if err := pe.putCompactString(u.User); err != nil {
-			return err
-		}
-		pe.putInt16(int16(u.ErrorCode))
-		if err := pe.putNullableCompactString(u.ErrorMessage); err != nil {
-			return err
-		}
-
-		pe.putCompactArrayLength(len(u.CredentialInfos))
-		for _, c := range u.CredentialInfos {
-			pe.putInt8(int8(c.Mechanism))
-			pe.putInt32(c.Iterations)
-			pe.putEmptyTaggedFieldArray()
-		}
-
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	pe.putEmptyTaggedFieldArray()
-	return nil
-}
-
-func (r *DescribeUserScramCredentialsResponse) decode(pd packetDecoder, version int16) error {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	r.ErrorCode = KError(kerr)
-	if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
-		return err
-	}
-
-	numUsers, err := pd.getCompactArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if numUsers > 0 {
-		r.Results = make([]*DescribeUserScramCredentialsResult, numUsers)
-		for i := 0; i < numUsers; i++ {
-			r.Results[i] = &DescribeUserScramCredentialsResult{}
-			if r.Results[i].User, err = pd.getCompactString(); err != nil {
-				return err
-			}
-
-			errorCode, err := pd.getInt16()
-			if err != nil {
-				return err
-			}
-			r.Results[i].ErrorCode = KError(errorCode)
-			if r.Results[i].ErrorMessage, err = pd.getCompactNullableString(); err != nil {
-				return err
-			}
-
-			numCredentialInfos, err := pd.getCompactArrayLength()
-			if err != nil {
-				return err
-			}
-
-			r.Results[i].CredentialInfos = make([]*UserScramCredentialsResponseInfo, numCredentialInfos)
-			for j := 0; j < numCredentialInfos; j++ {
-				r.Results[i].CredentialInfos[j] = &UserScramCredentialsResponseInfo{}
-				scramMechanism, err := pd.getInt8()
-				if err != nil {
-					return err
-				}
-				r.Results[i].CredentialInfos[j].Mechanism = ScramMechanismType(scramMechanism)
-				if r.Results[i].CredentialInfos[j].Iterations, err = pd.getInt32(); err != nil {
-					return err
-				}
-				if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
-					return err
-				}
-			}
-
-			if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
-				return err
-			}
-		}
-	}
-
-	if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (r *DescribeUserScramCredentialsResponse) key() int16 {
-	return 50
-}
-
-func (r *DescribeUserScramCredentialsResponse) version() int16 {
-	return r.Version
-}
-
-func (r *DescribeUserScramCredentialsResponse) headerVersion() int16 {
-	return 2
-}
-
-func (r *DescribeUserScramCredentialsResponse) requiredVersion() KafkaVersion {
-	return V2_7_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml
deleted file mode 100644
index 7bf9ff9..0000000
--- a/vendor/github.com/Shopify/sarama/dev.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-name: sarama
-
-up:
-  - go:
-      version: '1.16'
-
-commands:
-  test:
-    run: make test
-    desc: 'run unit tests'
diff --git a/vendor/github.com/Shopify/sarama/docker-compose.yml b/vendor/github.com/Shopify/sarama/docker-compose.yml
deleted file mode 100644
index 8e9c24e..0000000
--- a/vendor/github.com/Shopify/sarama/docker-compose.yml
+++ /dev/null
@@ -1,134 +0,0 @@
-version: '3.7'
-services:
-  zookeeper-1:
-    image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
-    restart: always
-    environment:
-      ZOOKEEPER_SERVER_ID: '1'
-      ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888'
-      ZOOKEEPER_CLIENT_PORT: '2181'
-      ZOOKEEPER_PEER_PORT: '2888'
-      ZOOKEEPER_LEADER_PORT: '3888'
-      ZOOKEEPER_INIT_LIMIT: '10'
-      ZOOKEEPER_SYNC_LIMIT: '5'
-      ZOOKEEPER_MAX_CLIENT_CONNS: '0'
-  zookeeper-2:
-    image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
-    restart: always
-    environment:
-      ZOOKEEPER_SERVER_ID: '2'
-      ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888'
-      ZOOKEEPER_CLIENT_PORT: '2181'
-      ZOOKEEPER_PEER_PORT: '2888'
-      ZOOKEEPER_LEADER_PORT: '3888'
-      ZOOKEEPER_INIT_LIMIT: '10'
-      ZOOKEEPER_SYNC_LIMIT: '5'
-      ZOOKEEPER_MAX_CLIENT_CONNS: '0'
-  zookeeper-3:
-    image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
-    restart: always
-    environment:
-      ZOOKEEPER_SERVER_ID: '3'
-      ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888'
-      ZOOKEEPER_CLIENT_PORT: '2181'
-      ZOOKEEPER_PEER_PORT: '2888'
-      ZOOKEEPER_LEADER_PORT: '3888'
-      ZOOKEEPER_INIT_LIMIT: '10'
-      ZOOKEEPER_SYNC_LIMIT: '5'
-      ZOOKEEPER_MAX_CLIENT_CONNS: '0'
-  kafka-1:
-    image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
-    restart: always
-    environment:
-      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
-      KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091'
-      KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091'
-      KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
-      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
-      KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
-      KAFKA_BROKER_ID: '1'
-      KAFKA_BROKER_RACK: '1'
-      KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
-      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
-      KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
-      KAFKA_DELETE_TOPIC_ENABLE: 'true'
-      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
-  kafka-2:
-    image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
-    restart: always
-    environment:
-      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
-      KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092'
-      KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092'
-      KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
-      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
-      KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
-      KAFKA_BROKER_ID: '2'
-      KAFKA_BROKER_RACK: '2'
-      KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
-      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
-      KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
-      KAFKA_DELETE_TOPIC_ENABLE: 'true'
-      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
-  kafka-3:
-    image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
-    restart: always
-    environment:
-      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
-      KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093'
-      KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093'
-      KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
-      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
-      KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
-      KAFKA_BROKER_ID: '3'
-      KAFKA_BROKER_RACK: '3'
-      KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
-      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
-      KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
-      KAFKA_DELETE_TOPIC_ENABLE: 'true'
-      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
-  kafka-4:
-    image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
-    restart: always
-    environment:
-      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
-      KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094'
-      KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094'
-      KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
-      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
-      KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
-      KAFKA_BROKER_ID: '4'
-      KAFKA_BROKER_RACK: '4'
-      KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
-      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
-      KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
-      KAFKA_DELETE_TOPIC_ENABLE: 'true'
-      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
-  kafka-5:
-    image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
-    restart: always
-    environment:
-      KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
-      KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095'
-      KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095'
-      KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
-      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
-      KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
-      KAFKA_BROKER_ID: '5'
-      KAFKA_BROKER_RACK: '5'
-      KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
-      KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
-      KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
-      KAFKA_DELETE_TOPIC_ENABLE: 'true'
-      KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
-  toxiproxy:
-    image: 'shopify/toxiproxy:2.1.4'
-    ports:
-      # The tests themselves actually start the proxies on these ports
-      - '29091:29091'
-      - '29092:29092'
-      - '29093:29093'
-      - '29094:29094'
-      - '29095:29095'
-      # This is the toxiproxy API port
-      - '8474:8474'
diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go
deleted file mode 100644
index dab54f8..0000000
--- a/vendor/github.com/Shopify/sarama/encoder_decoder.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-
-	"github.com/rcrowley/go-metrics"
-)
-
-// Encoder is the interface that wraps the basic Encode method.
-// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
-type encoder interface {
-	encode(pe packetEncoder) error
-}
-
-type encoderWithHeader interface {
-	encoder
-	headerVersion() int16
-}
-
-// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
-func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
-	if e == nil {
-		return nil, nil
-	}
-
-	var prepEnc prepEncoder
-	var realEnc realEncoder
-
-	err := e.encode(&prepEnc)
-	if err != nil {
-		return nil, err
-	}
-
-	if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
-		return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
-	}
-
-	realEnc.raw = make([]byte, prepEnc.length)
-	realEnc.registry = metricRegistry
-	err = e.encode(&realEnc)
-	if err != nil {
-		return nil, err
-	}
-
-	return realEnc.raw, nil
-}
-
-// decoder is the interface that wraps the basic Decode method.
-// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
-type decoder interface {
-	decode(pd packetDecoder) error
-}
-
-type versionedDecoder interface {
-	decode(pd packetDecoder, version int16) error
-}
-
-// decode takes bytes and a decoder and fills the fields of the decoder from the bytes,
-// interpreted using Kafka's encoding rules.
-func decode(buf []byte, in decoder) error {
-	if buf == nil {
-		return nil
-	}
-
-	helper := realDecoder{raw: buf}
-	err := in.decode(&helper)
-	if err != nil {
-		return err
-	}
-
-	if helper.off != len(buf) {
-		return PacketDecodingError{"invalid length"}
-	}
-
-	return nil
-}
-
-func versionedDecode(buf []byte, in versionedDecoder, version int16) error {
-	if buf == nil {
-		return nil
-	}
-
-	helper := realDecoder{raw: buf}
-	err := in.decode(&helper, version)
-	if err != nil {
-		return err
-	}
-
-	if helper.off != len(buf) {
-		return PacketDecodingError{"invalid length"}
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/Shopify/sarama/end_txn_request.go
deleted file mode 100644
index 6635425..0000000
--- a/vendor/github.com/Shopify/sarama/end_txn_request.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package sarama
-
-type EndTxnRequest struct {
-	TransactionalID   string
-	ProducerID        int64
-	ProducerEpoch     int16
-	TransactionResult bool
-}
-
-func (a *EndTxnRequest) encode(pe packetEncoder) error {
-	if err := pe.putString(a.TransactionalID); err != nil {
-		return err
-	}
-
-	pe.putInt64(a.ProducerID)
-
-	pe.putInt16(a.ProducerEpoch)
-
-	pe.putBool(a.TransactionResult)
-
-	return nil
-}
-
-func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) {
-	if a.TransactionalID, err = pd.getString(); err != nil {
-		return err
-	}
-	if a.ProducerID, err = pd.getInt64(); err != nil {
-		return err
-	}
-	if a.ProducerEpoch, err = pd.getInt16(); err != nil {
-		return err
-	}
-	if a.TransactionResult, err = pd.getBool(); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (a *EndTxnRequest) key() int16 {
-	return 26
-}
-
-func (a *EndTxnRequest) version() int16 {
-	return 0
-}
-
-func (r *EndTxnRequest) headerVersion() int16 {
-	return 1
-}
-
-func (a *EndTxnRequest) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/Shopify/sarama/end_txn_response.go
deleted file mode 100644
index 7639767..0000000
--- a/vendor/github.com/Shopify/sarama/end_txn_response.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package sarama
-
-import (
-	"time"
-)
-
-type EndTxnResponse struct {
-	ThrottleTime time.Duration
-	Err          KError
-}
-
-func (e *EndTxnResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(e.ThrottleTime / time.Millisecond))
-	pe.putInt16(int16(e.Err))
-	return nil
-}
-
-func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	e.Err = KError(kerr)
-
-	return nil
-}
-
-func (e *EndTxnResponse) key() int16 {
-	return 25
-}
-
-func (e *EndTxnResponse) version() int16 {
-	return 0
-}
-
-func (r *EndTxnResponse) headerVersion() int16 {
-	return 0
-}
-
-func (e *EndTxnResponse) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go
deleted file mode 100644
index 0fca0a3..0000000
--- a/vendor/github.com/Shopify/sarama/errors.go
+++ /dev/null
@@ -1,409 +0,0 @@
-package sarama
-
-import (
-	"errors"
-	"fmt"
-)
-
-// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
-// or otherwise failed to respond.
-var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
-
-// ErrBrokerNotFound is the error returned when there's no broker found for the requested ID.
-var ErrBrokerNotFound = errors.New("kafka: broker for ID is not found")
-
-// ErrClosedClient is the error returned when a method is called on a client that has been closed.
-var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
-
-// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
-// not contain the expected information.
-var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
-
-// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
-// (meaning one outside of the range [0...numPartitions-1]).
-var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
-
-// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
-var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
-
-// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
-var ErrNotConnected = errors.New("kafka: broker not connected")
-
-// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
-// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
-// of the message set.
-var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
-
-// ErrShuttingDown is returned when a producer receives a message during shutdown.
-var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
-
-// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
-var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
-
-// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing
-// a RecordBatch.
-var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch")
-
-// ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version
-// is lower than 0.10.0.0.
-var ErrControllerNotAvailable = errors.New("kafka: controller is not available")
-
-// ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update
-// the metadata.
-var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata")
-
-// ErrUnknownScramMechanism is returned when user tries to AlterUserScramCredentials with unknown SCRAM mechanism
-var ErrUnknownScramMechanism = errors.New("kafka: unknown SCRAM mechanism provided")
-
-// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
-// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
-type PacketEncodingError struct {
-	Info string
-}
-
-func (err PacketEncodingError) Error() string {
-	return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
-}
-
-// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
-// This can be a bad CRC or length field, or any other invalid value.
-type PacketDecodingError struct {
-	Info string
-}
-
-func (err PacketDecodingError) Error() string {
-	return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
-}
-
-// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
-// when the specified configuration is invalid.
-type ConfigurationError string
-
-func (err ConfigurationError) Error() string {
-	return "kafka: invalid configuration (" + string(err) + ")"
-}
-
-// KError is the type of error that can be returned directly by the Kafka broker.
-// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
-type KError int16
-
-// MultiError is used to contain multi error.
-type MultiError struct {
-	Errors *[]error
-}
-
-func (mErr MultiError) Error() string {
-	errString := ""
-	for _, err := range *mErr.Errors {
-		errString += err.Error() + ","
-	}
-	return errString
-}
-
-func (mErr MultiError) PrettyError() string {
-	errString := ""
-	for _, err := range *mErr.Errors {
-		errString += err.Error() + "\n"
-	}
-	return errString
-}
-
-// ErrDeleteRecords is the type of error returned when fail to delete the required records
-type ErrDeleteRecords struct {
-	MultiError
-}
-
-func (err ErrDeleteRecords) Error() string {
-	return "kafka server: failed to delete records " + err.MultiError.Error()
-}
-
-type ErrReassignPartitions struct {
-	MultiError
-}
-
-func (err ErrReassignPartitions) Error() string {
-	return fmt.Sprintf("failed to reassign partitions for topic: \n%s", err.MultiError.PrettyError())
-}
-
-// Numeric error codes returned by the Kafka server.
-const (
-	ErrNoError                            KError = 0
-	ErrUnknown                            KError = -1
-	ErrOffsetOutOfRange                   KError = 1
-	ErrInvalidMessage                     KError = 2
-	ErrUnknownTopicOrPartition            KError = 3
-	ErrInvalidMessageSize                 KError = 4
-	ErrLeaderNotAvailable                 KError = 5
-	ErrNotLeaderForPartition              KError = 6
-	ErrRequestTimedOut                    KError = 7
-	ErrBrokerNotAvailable                 KError = 8
-	ErrReplicaNotAvailable                KError = 9
-	ErrMessageSizeTooLarge                KError = 10
-	ErrStaleControllerEpochCode           KError = 11
-	ErrOffsetMetadataTooLarge             KError = 12
-	ErrNetworkException                   KError = 13
-	ErrOffsetsLoadInProgress              KError = 14
-	ErrConsumerCoordinatorNotAvailable    KError = 15
-	ErrNotCoordinatorForConsumer          KError = 16
-	ErrInvalidTopic                       KError = 17
-	ErrMessageSetSizeTooLarge             KError = 18
-	ErrNotEnoughReplicas                  KError = 19
-	ErrNotEnoughReplicasAfterAppend       KError = 20
-	ErrInvalidRequiredAcks                KError = 21
-	ErrIllegalGeneration                  KError = 22
-	ErrInconsistentGroupProtocol          KError = 23
-	ErrInvalidGroupId                     KError = 24
-	ErrUnknownMemberId                    KError = 25
-	ErrInvalidSessionTimeout              KError = 26
-	ErrRebalanceInProgress                KError = 27
-	ErrInvalidCommitOffsetSize            KError = 28
-	ErrTopicAuthorizationFailed           KError = 29
-	ErrGroupAuthorizationFailed           KError = 30
-	ErrClusterAuthorizationFailed         KError = 31
-	ErrInvalidTimestamp                   KError = 32
-	ErrUnsupportedSASLMechanism           KError = 33
-	ErrIllegalSASLState                   KError = 34
-	ErrUnsupportedVersion                 KError = 35
-	ErrTopicAlreadyExists                 KError = 36
-	ErrInvalidPartitions                  KError = 37
-	ErrInvalidReplicationFactor           KError = 38
-	ErrInvalidReplicaAssignment           KError = 39
-	ErrInvalidConfig                      KError = 40
-	ErrNotController                      KError = 41
-	ErrInvalidRequest                     KError = 42
-	ErrUnsupportedForMessageFormat        KError = 43
-	ErrPolicyViolation                    KError = 44
-	ErrOutOfOrderSequenceNumber           KError = 45
-	ErrDuplicateSequenceNumber            KError = 46
-	ErrInvalidProducerEpoch               KError = 47
-	ErrInvalidTxnState                    KError = 48
-	ErrInvalidProducerIDMapping           KError = 49
-	ErrInvalidTransactionTimeout          KError = 50
-	ErrConcurrentTransactions             KError = 51
-	ErrTransactionCoordinatorFenced       KError = 52
-	ErrTransactionalIDAuthorizationFailed KError = 53
-	ErrSecurityDisabled                   KError = 54
-	ErrOperationNotAttempted              KError = 55
-	ErrKafkaStorageError                  KError = 56
-	ErrLogDirNotFound                     KError = 57
-	ErrSASLAuthenticationFailed           KError = 58
-	ErrUnknownProducerID                  KError = 59
-	ErrReassignmentInProgress             KError = 60
-	ErrDelegationTokenAuthDisabled        KError = 61
-	ErrDelegationTokenNotFound            KError = 62
-	ErrDelegationTokenOwnerMismatch       KError = 63
-	ErrDelegationTokenRequestNotAllowed   KError = 64
-	ErrDelegationTokenAuthorizationFailed KError = 65
-	ErrDelegationTokenExpired             KError = 66
-	ErrInvalidPrincipalType               KError = 67
-	ErrNonEmptyGroup                      KError = 68
-	ErrGroupIDNotFound                    KError = 69
-	ErrFetchSessionIDNotFound             KError = 70
-	ErrInvalidFetchSessionEpoch           KError = 71
-	ErrListenerNotFound                   KError = 72
-	ErrTopicDeletionDisabled              KError = 73
-	ErrFencedLeaderEpoch                  KError = 74
-	ErrUnknownLeaderEpoch                 KError = 75
-	ErrUnsupportedCompressionType         KError = 76
-	ErrStaleBrokerEpoch                   KError = 77
-	ErrOffsetNotAvailable                 KError = 78
-	ErrMemberIdRequired                   KError = 79
-	ErrPreferredLeaderNotAvailable        KError = 80
-	ErrGroupMaxSizeReached                KError = 81
-	ErrFencedInstancedId                  KError = 82
-	ErrEligibleLeadersNotAvailable        KError = 83
-	ErrElectionNotNeeded                  KError = 84
-	ErrNoReassignmentInProgress           KError = 85
-	ErrGroupSubscribedToTopic             KError = 86
-	ErrInvalidRecord                      KError = 87
-	ErrUnstableOffsetCommit               KError = 88
-)
-
-func (err KError) Error() string {
-	// Error messages stolen/adapted from
-	// https://kafka.apache.org/protocol#protocol_error_codes
-	switch err {
-	case ErrNoError:
-		return "kafka server: Not an error, why are you printing me?"
-	case ErrUnknown:
-		return "kafka server: Unexpected (unknown?) server error."
-	case ErrOffsetOutOfRange:
-		return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
-	case ErrInvalidMessage:
-		return "kafka server: Message contents does not match its CRC."
-	case ErrUnknownTopicOrPartition:
-		return "kafka server: Request was for a topic or partition that does not exist on this broker."
-	case ErrInvalidMessageSize:
-		return "kafka server: The message has a negative size."
-	case ErrLeaderNotAvailable:
-		return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
-	case ErrNotLeaderForPartition:
-		return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
-	case ErrRequestTimedOut:
-		return "kafka server: Request exceeded the user-specified time limit in the request."
-	case ErrBrokerNotAvailable:
-		return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
-	case ErrReplicaNotAvailable:
-		return "kafka server: Replica information not available, one or more brokers are down."
-	case ErrMessageSizeTooLarge:
-		return "kafka server: Message was too large, server rejected it to avoid allocation error."
-	case ErrStaleControllerEpochCode:
-		return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
-	case ErrOffsetMetadataTooLarge:
-		return "kafka server: Specified a string larger than the configured maximum for offset metadata."
-	case ErrNetworkException:
-		return "kafka server: The server disconnected before a response was received."
-	case ErrOffsetsLoadInProgress:
-		return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
-	case ErrConsumerCoordinatorNotAvailable:
-		return "kafka server: Offset's topic has not yet been created."
-	case ErrNotCoordinatorForConsumer:
-		return "kafka server: Request was for a consumer group that is not coordinated by this broker."
-	case ErrInvalidTopic:
-		return "kafka server: The request attempted to perform an operation on an invalid topic."
-	case ErrMessageSetSizeTooLarge:
-		return "kafka server: The request included message batch larger than the configured segment size on the server."
-	case ErrNotEnoughReplicas:
-		return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
-	case ErrNotEnoughReplicasAfterAppend:
-		return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
-	case ErrInvalidRequiredAcks:
-		return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
-	case ErrIllegalGeneration:
-		return "kafka server: The provided generation id is not the current generation."
-	case ErrInconsistentGroupProtocol:
-		return "kafka server: The provider group protocol type is incompatible with the other members."
-	case ErrInvalidGroupId:
-		return "kafka server: The provided group id was empty."
-	case ErrUnknownMemberId:
-		return "kafka server: The provided member is not known in the current generation."
-	case ErrInvalidSessionTimeout:
-		return "kafka server: The provided session timeout is outside the allowed range."
-	case ErrRebalanceInProgress:
-		return "kafka server: A rebalance for the group is in progress. Please re-join the group."
-	case ErrInvalidCommitOffsetSize:
-		return "kafka server: The provided commit metadata was too large."
-	case ErrTopicAuthorizationFailed:
-		return "kafka server: The client is not authorized to access this topic."
-	case ErrGroupAuthorizationFailed:
-		return "kafka server: The client is not authorized to access this group."
-	case ErrClusterAuthorizationFailed:
-		return "kafka server: The client is not authorized to send this request type."
-	case ErrInvalidTimestamp:
-		return "kafka server: The timestamp of the message is out of acceptable range."
-	case ErrUnsupportedSASLMechanism:
-		return "kafka server: The broker does not support the requested SASL mechanism."
-	case ErrIllegalSASLState:
-		return "kafka server: Request is not valid given the current SASL state."
-	case ErrUnsupportedVersion:
-		return "kafka server: The version of API is not supported."
-	case ErrTopicAlreadyExists:
-		return "kafka server: Topic with this name already exists."
-	case ErrInvalidPartitions:
-		return "kafka server: Number of partitions is invalid."
-	case ErrInvalidReplicationFactor:
-		return "kafka server: Replication-factor is invalid."
-	case ErrInvalidReplicaAssignment:
-		return "kafka server: Replica assignment is invalid."
-	case ErrInvalidConfig:
-		return "kafka server: Configuration is invalid."
-	case ErrNotController:
-		return "kafka server: This is not the correct controller for this cluster."
-	case ErrInvalidRequest:
-		return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."
-	case ErrUnsupportedForMessageFormat:
-		return "kafka server: The requested operation is not supported by the message format version."
-	case ErrPolicyViolation:
-		return "kafka server: Request parameters do not satisfy the configured policy."
-	case ErrOutOfOrderSequenceNumber:
-		return "kafka server: The broker received an out of order sequence number."
-	case ErrDuplicateSequenceNumber:
-		return "kafka server: The broker received a duplicate sequence number."
-	case ErrInvalidProducerEpoch:
-		return "kafka server: Producer attempted an operation with an old epoch."
-	case ErrInvalidTxnState:
-		return "kafka server: The producer attempted a transactional operation in an invalid state."
-	case ErrInvalidProducerIDMapping:
-		return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id."
-	case ErrInvalidTransactionTimeout:
-		return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)."
-	case ErrConcurrentTransactions:
-		return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing."
-	case ErrTransactionCoordinatorFenced:
-		return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer."
-	case ErrTransactionalIDAuthorizationFailed:
-		return "kafka server: Transactional ID authorization failed."
-	case ErrSecurityDisabled:
-		return "kafka server: Security features are disabled."
-	case ErrOperationNotAttempted:
-		return "kafka server: The broker did not attempt to execute this operation."
-	case ErrKafkaStorageError:
-		return "kafka server: Disk error when trying to access log file on the disk."
-	case ErrLogDirNotFound:
-		return "kafka server: The specified log directory is not found in the broker config."
-	case ErrSASLAuthenticationFailed:
-		return "kafka server: SASL Authentication failed."
-	case ErrUnknownProducerID:
-		return "kafka server: The broker could not locate the producer metadata associated with the Producer ID."
-	case ErrReassignmentInProgress:
-		return "kafka server: A partition reassignment is in progress."
-	case ErrDelegationTokenAuthDisabled:
-		return "kafka server: Delegation Token feature is not enabled."
-	case ErrDelegationTokenNotFound:
-		return "kafka server: Delegation Token is not found on server."
-	case ErrDelegationTokenOwnerMismatch:
-		return "kafka server: Specified Principal is not valid Owner/Renewer."
-	case ErrDelegationTokenRequestNotAllowed:
-		return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels."
-	case ErrDelegationTokenAuthorizationFailed:
-		return "kafka server: Delegation Token authorization failed."
-	case ErrDelegationTokenExpired:
-		return "kafka server: Delegation Token is expired."
-	case ErrInvalidPrincipalType:
-		return "kafka server: Supplied principalType is not supported."
-	case ErrNonEmptyGroup:
-		return "kafka server: The group is not empty."
-	case ErrGroupIDNotFound:
-		return "kafka server: The group id does not exist."
-	case ErrFetchSessionIDNotFound:
-		return "kafka server: The fetch session ID was not found."
-	case ErrInvalidFetchSessionEpoch:
-		return "kafka server: The fetch session epoch is invalid."
-	case ErrListenerNotFound:
-		return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed."
-	case ErrTopicDeletionDisabled:
-		return "kafka server: Topic deletion is disabled."
-	case ErrFencedLeaderEpoch:
-		return "kafka server: The leader epoch in the request is older than the epoch on the broker."
-	case ErrUnknownLeaderEpoch:
-		return "kafka server: The leader epoch in the request is newer than the epoch on the broker."
-	case ErrUnsupportedCompressionType:
-		return "kafka server: The requesting client does not support the compression type of given partition."
-	case ErrStaleBrokerEpoch:
-		return "kafka server: Broker epoch has changed"
-	case ErrOffsetNotAvailable:
-		return "kafka server: The leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing"
-	case ErrMemberIdRequired:
-		return "kafka server: The group member needs to have a valid member id before actually entering a consumer group"
-	case ErrPreferredLeaderNotAvailable:
-		return "kafka server: The preferred leader was not available"
-	case ErrGroupMaxSizeReached:
-		return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members."
-	case ErrFencedInstancedId:
-		return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id."
-	case ErrEligibleLeadersNotAvailable:
-		return "kafka server: Eligible topic partition leaders are not available."
-	case ErrElectionNotNeeded:
-		return "kafka server: Leader election not needed for topic partition."
-	case ErrNoReassignmentInProgress:
-		return "kafka server: No partition reassignment is in progress."
-	case ErrGroupSubscribedToTopic:
-		return "kafka server: Deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it."
-	case ErrInvalidRecord:
-		return "kafka server: This record has failed the validation on broker and hence will be rejected."
-	case ErrUnstableOffsetCommit:
-		return "kafka server: There are unstable offsets that need to be cleared."
-	}
-
-	return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
-}
diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go
deleted file mode 100644
index f893aef..0000000
--- a/vendor/github.com/Shopify/sarama/fetch_request.go
+++ /dev/null
@@ -1,295 +0,0 @@
-package sarama
-
-type fetchRequestBlock struct {
-	Version            int16
-	currentLeaderEpoch int32
-	fetchOffset        int64
-	logStartOffset     int64
-	maxBytes           int32
-}
-
-func (b *fetchRequestBlock) encode(pe packetEncoder, version int16) error {
-	b.Version = version
-	if b.Version >= 9 {
-		pe.putInt32(b.currentLeaderEpoch)
-	}
-	pe.putInt64(b.fetchOffset)
-	if b.Version >= 5 {
-		pe.putInt64(b.logStartOffset)
-	}
-	pe.putInt32(b.maxBytes)
-	return nil
-}
-
-func (b *fetchRequestBlock) decode(pd packetDecoder, version int16) (err error) {
-	b.Version = version
-	if b.Version >= 9 {
-		if b.currentLeaderEpoch, err = pd.getInt32(); err != nil {
-			return err
-		}
-	}
-	if b.fetchOffset, err = pd.getInt64(); err != nil {
-		return err
-	}
-	if b.Version >= 5 {
-		if b.logStartOffset, err = pd.getInt64(); err != nil {
-			return err
-		}
-	}
-	if b.maxBytes, err = pd.getInt32(); err != nil {
-		return err
-	}
-	return nil
-}
-
-// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
-// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that.  The KIP is at
-// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
-type FetchRequest struct {
-	MaxWaitTime  int32
-	MinBytes     int32
-	MaxBytes     int32
-	Version      int16
-	Isolation    IsolationLevel
-	SessionID    int32
-	SessionEpoch int32
-	blocks       map[string]map[int32]*fetchRequestBlock
-	forgotten    map[string][]int32
-	RackID       string
-}
-
-type IsolationLevel int8
-
-const (
-	ReadUncommitted IsolationLevel = iota
-	ReadCommitted
-)
-
-func (r *FetchRequest) encode(pe packetEncoder) (err error) {
-	pe.putInt32(-1) // replica ID is always -1 for clients
-	pe.putInt32(r.MaxWaitTime)
-	pe.putInt32(r.MinBytes)
-	if r.Version >= 3 {
-		pe.putInt32(r.MaxBytes)
-	}
-	if r.Version >= 4 {
-		pe.putInt8(int8(r.Isolation))
-	}
-	if r.Version >= 7 {
-		pe.putInt32(r.SessionID)
-		pe.putInt32(r.SessionEpoch)
-	}
-	err = pe.putArrayLength(len(r.blocks))
-	if err != nil {
-		return err
-	}
-	for topic, blocks := range r.blocks {
-		err = pe.putString(topic)
-		if err != nil {
-			return err
-		}
-		err = pe.putArrayLength(len(blocks))
-		if err != nil {
-			return err
-		}
-		for partition, block := range blocks {
-			pe.putInt32(partition)
-			err = block.encode(pe, r.Version)
-			if err != nil {
-				return err
-			}
-		}
-	}
-	if r.Version >= 7 {
-		err = pe.putArrayLength(len(r.forgotten))
-		if err != nil {
-			return err
-		}
-		for topic, partitions := range r.forgotten {
-			err = pe.putString(topic)
-			if err != nil {
-				return err
-			}
-			err = pe.putArrayLength(len(partitions))
-			if err != nil {
-				return err
-			}
-			for _, partition := range partitions {
-				pe.putInt32(partition)
-			}
-		}
-	}
-	if r.Version >= 11 {
-		err = pe.putString(r.RackID)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if _, err = pd.getInt32(); err != nil {
-		return err
-	}
-	if r.MaxWaitTime, err = pd.getInt32(); err != nil {
-		return err
-	}
-	if r.MinBytes, err = pd.getInt32(); err != nil {
-		return err
-	}
-	if r.Version >= 3 {
-		if r.MaxBytes, err = pd.getInt32(); err != nil {
-			return err
-		}
-	}
-	if r.Version >= 4 {
-		isolation, err := pd.getInt8()
-		if err != nil {
-			return err
-		}
-		r.Isolation = IsolationLevel(isolation)
-	}
-	if r.Version >= 7 {
-		r.SessionID, err = pd.getInt32()
-		if err != nil {
-			return err
-		}
-		r.SessionEpoch, err = pd.getInt32()
-		if err != nil {
-			return err
-		}
-	}
-	topicCount, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if topicCount == 0 {
-		return nil
-	}
-	r.blocks = make(map[string]map[int32]*fetchRequestBlock)
-	for i := 0; i < topicCount; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		partitionCount, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-		r.blocks[topic] = make(map[int32]*fetchRequestBlock)
-		for j := 0; j < partitionCount; j++ {
-			partition, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-			fetchBlock := &fetchRequestBlock{}
-			if err = fetchBlock.decode(pd, r.Version); err != nil {
-				return err
-			}
-			r.blocks[topic][partition] = fetchBlock
-		}
-	}
-
-	if r.Version >= 7 {
-		forgottenCount, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-		r.forgotten = make(map[string][]int32)
-		for i := 0; i < forgottenCount; i++ {
-			topic, err := pd.getString()
-			if err != nil {
-				return err
-			}
-			partitionCount, err := pd.getArrayLength()
-			if err != nil {
-				return err
-			}
-			r.forgotten[topic] = make([]int32, partitionCount)
-
-			for j := 0; j < partitionCount; j++ {
-				partition, err := pd.getInt32()
-				if err != nil {
-					return err
-				}
-				r.forgotten[topic][j] = partition
-			}
-		}
-	}
-
-	if r.Version >= 11 {
-		r.RackID, err = pd.getString()
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *FetchRequest) key() int16 {
-	return 1
-}
-
-func (r *FetchRequest) version() int16 {
-	return r.Version
-}
-
-func (r *FetchRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *FetchRequest) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 0:
-		return MinVersion
-	case 1:
-		return V0_9_0_0
-	case 2:
-		return V0_10_0_0
-	case 3:
-		return V0_10_1_0
-	case 4, 5:
-		return V0_11_0_0
-	case 6:
-		return V1_0_0_0
-	case 7:
-		return V1_1_0_0
-	case 8:
-		return V2_0_0_0
-	case 9, 10:
-		return V2_1_0_0
-	case 11:
-		return V2_3_0_0
-	default:
-		return MaxVersion
-	}
-}
-
-func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
-	if r.blocks == nil {
-		r.blocks = make(map[string]map[int32]*fetchRequestBlock)
-	}
-
-	if r.Version >= 7 && r.forgotten == nil {
-		r.forgotten = make(map[string][]int32)
-	}
-
-	if r.blocks[topic] == nil {
-		r.blocks[topic] = make(map[int32]*fetchRequestBlock)
-	}
-
-	tmp := new(fetchRequestBlock)
-	tmp.Version = r.Version
-	tmp.maxBytes = maxBytes
-	tmp.fetchOffset = fetchOffset
-	if r.Version >= 9 {
-		tmp.currentLeaderEpoch = int32(-1)
-	}
-
-	r.blocks[topic][partitionID] = tmp
-}
diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go
deleted file mode 100644
index 54b8828..0000000
--- a/vendor/github.com/Shopify/sarama/fetch_response.go
+++ /dev/null
@@ -1,548 +0,0 @@
-package sarama
-
-import (
-	"sort"
-	"time"
-)
-
-type AbortedTransaction struct {
-	ProducerID  int64
-	FirstOffset int64
-}
-
-func (t *AbortedTransaction) decode(pd packetDecoder) (err error) {
-	if t.ProducerID, err = pd.getInt64(); err != nil {
-		return err
-	}
-
-	if t.FirstOffset, err = pd.getInt64(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
-	pe.putInt64(t.ProducerID)
-	pe.putInt64(t.FirstOffset)
-
-	return nil
-}
-
-type FetchResponseBlock struct {
-	Err                  KError
-	HighWaterMarkOffset  int64
-	LastStableOffset     int64
-	LogStartOffset       int64
-	AbortedTransactions  []*AbortedTransaction
-	PreferredReadReplica int32
-	Records              *Records // deprecated: use FetchResponseBlock.RecordsSet
-	RecordsSet           []*Records
-	Partial              bool
-}
-
-func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	b.Err = KError(tmp)
-
-	b.HighWaterMarkOffset, err = pd.getInt64()
-	if err != nil {
-		return err
-	}
-
-	if version >= 4 {
-		b.LastStableOffset, err = pd.getInt64()
-		if err != nil {
-			return err
-		}
-
-		if version >= 5 {
-			b.LogStartOffset, err = pd.getInt64()
-			if err != nil {
-				return err
-			}
-		}
-
-		numTransact, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		if numTransact >= 0 {
-			b.AbortedTransactions = make([]*AbortedTransaction, numTransact)
-		}
-
-		for i := 0; i < numTransact; i++ {
-			transact := new(AbortedTransaction)
-			if err = transact.decode(pd); err != nil {
-				return err
-			}
-			b.AbortedTransactions[i] = transact
-		}
-	}
-
-	if version >= 11 {
-		b.PreferredReadReplica, err = pd.getInt32()
-		if err != nil {
-			return err
-		}
-	} else {
-		b.PreferredReadReplica = -1
-	}
-
-	recordsSize, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	recordsDecoder, err := pd.getSubset(int(recordsSize))
-	if err != nil {
-		return err
-	}
-
-	b.RecordsSet = []*Records{}
-
-	for recordsDecoder.remaining() > 0 {
-		records := &Records{}
-		if err := records.decode(recordsDecoder); err != nil {
-			// If we have at least one decoded records, this is not an error
-			if err == ErrInsufficientData {
-				if len(b.RecordsSet) == 0 {
-					b.Partial = true
-				}
-				break
-			}
-			return err
-		}
-
-		partial, err := records.isPartial()
-		if err != nil {
-			return err
-		}
-
-		n, err := records.numRecords()
-		if err != nil {
-			return err
-		}
-
-		if n > 0 || (partial && len(b.RecordsSet) == 0) {
-			b.RecordsSet = append(b.RecordsSet, records)
-
-			if b.Records == nil {
-				b.Records = records
-			}
-		}
-
-		overflow, err := records.isOverflow()
-		if err != nil {
-			return err
-		}
-
-		if partial || overflow {
-			break
-		}
-	}
-
-	return nil
-}
-
-func (b *FetchResponseBlock) numRecords() (int, error) {
-	sum := 0
-
-	for _, records := range b.RecordsSet {
-		count, err := records.numRecords()
-		if err != nil {
-			return 0, err
-		}
-
-		sum += count
-	}
-
-	return sum, nil
-}
-
-func (b *FetchResponseBlock) isPartial() (bool, error) {
-	if b.Partial {
-		return true, nil
-	}
-
-	if len(b.RecordsSet) == 1 {
-		return b.RecordsSet[0].isPartial()
-	}
-
-	return false, nil
-}
-
-func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
-	pe.putInt16(int16(b.Err))
-
-	pe.putInt64(b.HighWaterMarkOffset)
-
-	if version >= 4 {
-		pe.putInt64(b.LastStableOffset)
-
-		if version >= 5 {
-			pe.putInt64(b.LogStartOffset)
-		}
-
-		if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
-			return err
-		}
-		for _, transact := range b.AbortedTransactions {
-			if err = transact.encode(pe); err != nil {
-				return err
-			}
-		}
-	}
-
-	if version >= 11 {
-		pe.putInt32(b.PreferredReadReplica)
-	}
-
-	pe.push(&lengthField{})
-	for _, records := range b.RecordsSet {
-		err = records.encode(pe)
-		if err != nil {
-			return err
-		}
-	}
-	return pe.pop()
-}
-
-func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction {
-	// I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered
-	// plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself
-	at := b.AbortedTransactions
-	sort.Slice(
-		at,
-		func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset },
-	)
-	return at
-}
-
-type FetchResponse struct {
-	Blocks        map[string]map[int32]*FetchResponseBlock
-	ThrottleTime  time.Duration
-	ErrorCode     int16
-	SessionID     int32
-	Version       int16
-	LogAppendTime bool
-	Timestamp     time.Time
-}
-
-func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if r.Version >= 1 {
-		throttle, err := pd.getInt32()
-		if err != nil {
-			return err
-		}
-		r.ThrottleTime = time.Duration(throttle) * time.Millisecond
-	}
-
-	if r.Version >= 7 {
-		r.ErrorCode, err = pd.getInt16()
-		if err != nil {
-			return err
-		}
-		r.SessionID, err = pd.getInt32()
-		if err != nil {
-			return err
-		}
-	}
-
-	numTopics, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
-	for i := 0; i < numTopics; i++ {
-		name, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		numBlocks, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
-
-		for j := 0; j < numBlocks; j++ {
-			id, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-
-			block := new(FetchResponseBlock)
-			err = block.decode(pd, version)
-			if err != nil {
-				return err
-			}
-			r.Blocks[name][id] = block
-		}
-	}
-
-	return nil
-}
-
-func (r *FetchResponse) encode(pe packetEncoder) (err error) {
-	if r.Version >= 1 {
-		pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
-	}
-
-	if r.Version >= 7 {
-		pe.putInt16(r.ErrorCode)
-		pe.putInt32(r.SessionID)
-	}
-
-	err = pe.putArrayLength(len(r.Blocks))
-	if err != nil {
-		return err
-	}
-
-	for topic, partitions := range r.Blocks {
-		err = pe.putString(topic)
-		if err != nil {
-			return err
-		}
-
-		err = pe.putArrayLength(len(partitions))
-		if err != nil {
-			return err
-		}
-
-		for id, block := range partitions {
-			pe.putInt32(id)
-			err = block.encode(pe, r.Version)
-			if err != nil {
-				return err
-			}
-		}
-	}
-	return nil
-}
-
-func (r *FetchResponse) key() int16 {
-	return 1
-}
-
-func (r *FetchResponse) version() int16 {
-	return r.Version
-}
-
-func (r *FetchResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *FetchResponse) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 0:
-		return MinVersion
-	case 1:
-		return V0_9_0_0
-	case 2:
-		return V0_10_0_0
-	case 3:
-		return V0_10_1_0
-	case 4, 5:
-		return V0_11_0_0
-	case 6:
-		return V1_0_0_0
-	case 7:
-		return V1_1_0_0
-	case 8:
-		return V2_0_0_0
-	case 9, 10:
-		return V2_1_0_0
-	case 11:
-		return V2_3_0_0
-	default:
-		return MaxVersion
-	}
-}
-
-func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
-	if r.Blocks == nil {
-		return nil
-	}
-
-	if r.Blocks[topic] == nil {
-		return nil
-	}
-
-	return r.Blocks[topic][partition]
-}
-
-func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
-	if r.Blocks == nil {
-		r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
-	}
-	partitions, ok := r.Blocks[topic]
-	if !ok {
-		partitions = make(map[int32]*FetchResponseBlock)
-		r.Blocks[topic] = partitions
-	}
-	frb, ok := partitions[partition]
-	if !ok {
-		frb = new(FetchResponseBlock)
-		partitions[partition] = frb
-	}
-	frb.Err = err
-}
-
-func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock {
-	if r.Blocks == nil {
-		r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
-	}
-	partitions, ok := r.Blocks[topic]
-	if !ok {
-		partitions = make(map[int32]*FetchResponseBlock)
-		r.Blocks[topic] = partitions
-	}
-	frb, ok := partitions[partition]
-	if !ok {
-		frb = new(FetchResponseBlock)
-		partitions[partition] = frb
-	}
-
-	return frb
-}
-
-func encodeKV(key, value Encoder) ([]byte, []byte) {
-	var kb []byte
-	var vb []byte
-	if key != nil {
-		kb, _ = key.Encode()
-	}
-	if value != nil {
-		vb, _ = value.Encode()
-	}
-
-	return kb, vb
-}
-
-func (r *FetchResponse) AddMessageWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time, version int8) {
-	frb := r.getOrCreateBlock(topic, partition)
-	kb, vb := encodeKV(key, value)
-	if r.LogAppendTime {
-		timestamp = r.Timestamp
-	}
-	msg := &Message{Key: kb, Value: vb, LogAppendTime: r.LogAppendTime, Timestamp: timestamp, Version: version}
-	msgBlock := &MessageBlock{Msg: msg, Offset: offset}
-	if len(frb.RecordsSet) == 0 {
-		records := newLegacyRecords(&MessageSet{})
-		frb.RecordsSet = []*Records{&records}
-	}
-	set := frb.RecordsSet[0].MsgSet
-	set.Messages = append(set.Messages, msgBlock)
-}
-
-func (r *FetchResponse) AddRecordWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time) {
-	frb := r.getOrCreateBlock(topic, partition)
-	kb, vb := encodeKV(key, value)
-	if len(frb.RecordsSet) == 0 {
-		records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
-		frb.RecordsSet = []*Records{&records}
-	}
-	batch := frb.RecordsSet[0].RecordBatch
-	rec := &Record{Key: kb, Value: vb, OffsetDelta: offset, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
-	batch.addRecord(rec)
-}
-
-// AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp
-// But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse
-// Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions
-func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) {
-	frb := r.getOrCreateBlock(topic, partition)
-	kb, vb := encodeKV(key, value)
-
-	records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
-	batch := &RecordBatch{
-		Version:         2,
-		LogAppendTime:   r.LogAppendTime,
-		FirstTimestamp:  timestamp,
-		MaxTimestamp:    r.Timestamp,
-		FirstOffset:     offset,
-		LastOffsetDelta: 0,
-		ProducerID:      producerID,
-		IsTransactional: isTransactional,
-	}
-	rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
-	batch.addRecord(rec)
-	records.RecordBatch = batch
-
-	frb.RecordsSet = append(frb.RecordsSet, &records)
-}
-
-func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) {
-	frb := r.getOrCreateBlock(topic, partition)
-
-	// batch
-	batch := &RecordBatch{
-		Version:         2,
-		LogAppendTime:   r.LogAppendTime,
-		FirstTimestamp:  timestamp,
-		MaxTimestamp:    r.Timestamp,
-		FirstOffset:     offset,
-		LastOffsetDelta: 0,
-		ProducerID:      producerID,
-		IsTransactional: true,
-		Control:         true,
-	}
-
-	// records
-	records := newDefaultRecords(nil)
-	records.RecordBatch = batch
-
-	// record
-	crAbort := ControlRecord{
-		Version: 0,
-		Type:    recordType,
-	}
-	crKey := &realEncoder{raw: make([]byte, 4)}
-	crValue := &realEncoder{raw: make([]byte, 6)}
-	crAbort.encode(crKey, crValue)
-	rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
-	batch.addRecord(rec)
-
-	frb.RecordsSet = append(frb.RecordsSet, &records)
-}
-
-func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
-	r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0)
-}
-
-func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
-	r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{})
-}
-
-func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) {
-	r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{})
-}
-
-func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) {
-	// define controlRecord key and value
-	r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{})
-}
-
-func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) {
-	frb := r.getOrCreateBlock(topic, partition)
-	if len(frb.RecordsSet) == 0 {
-		records := newDefaultRecords(&RecordBatch{Version: 2})
-		frb.RecordsSet = []*Records{&records}
-	}
-	batch := frb.RecordsSet[0].RecordBatch
-	batch.LastOffsetDelta = offset
-}
-
-func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
-	frb := r.getOrCreateBlock(topic, partition)
-	frb.LastStableOffset = offset
-}
diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/Shopify/sarama/find_coordinator_request.go
deleted file mode 100644
index 597bcbf..0000000
--- a/vendor/github.com/Shopify/sarama/find_coordinator_request.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package sarama
-
-type CoordinatorType int8
-
-const (
-	CoordinatorGroup CoordinatorType = iota
-	CoordinatorTransaction
-)
-
-type FindCoordinatorRequest struct {
-	Version         int16
-	CoordinatorKey  string
-	CoordinatorType CoordinatorType
-}
-
-func (f *FindCoordinatorRequest) encode(pe packetEncoder) error {
-	if err := pe.putString(f.CoordinatorKey); err != nil {
-		return err
-	}
-
-	if f.Version >= 1 {
-		pe.putInt8(int8(f.CoordinatorType))
-	}
-
-	return nil
-}
-
-func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) {
-	if f.CoordinatorKey, err = pd.getString(); err != nil {
-		return err
-	}
-
-	if version >= 1 {
-		f.Version = version
-		coordinatorType, err := pd.getInt8()
-		if err != nil {
-			return err
-		}
-
-		f.CoordinatorType = CoordinatorType(coordinatorType)
-	}
-
-	return nil
-}
-
-func (f *FindCoordinatorRequest) key() int16 {
-	return 10
-}
-
-func (f *FindCoordinatorRequest) version() int16 {
-	return f.Version
-}
-
-func (r *FindCoordinatorRequest) headerVersion() int16 {
-	return 1
-}
-
-func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion {
-	switch f.Version {
-	case 1:
-		return V0_11_0_0
-	default:
-		return V0_8_2_0
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response.go b/vendor/github.com/Shopify/sarama/find_coordinator_response.go
deleted file mode 100644
index 83a648a..0000000
--- a/vendor/github.com/Shopify/sarama/find_coordinator_response.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package sarama
-
-import (
-	"time"
-)
-
-var NoNode = &Broker{id: -1, addr: ":-1"}
-
-type FindCoordinatorResponse struct {
-	Version      int16
-	ThrottleTime time.Duration
-	Err          KError
-	ErrMsg       *string
-	Coordinator  *Broker
-}
-
-func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) {
-	if version >= 1 {
-		f.Version = version
-
-		throttleTime, err := pd.getInt32()
-		if err != nil {
-			return err
-		}
-		f.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-	}
-
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	f.Err = KError(tmp)
-
-	if version >= 1 {
-		if f.ErrMsg, err = pd.getNullableString(); err != nil {
-			return err
-		}
-	}
-
-	coordinator := new(Broker)
-	// The version is hardcoded to 0, as version 1 of the Broker-decode
-	// contains the rack-field which is not present in the FindCoordinatorResponse.
-	if err := coordinator.decode(pd, 0); err != nil {
-		return err
-	}
-	if coordinator.addr == ":0" {
-		return nil
-	}
-	f.Coordinator = coordinator
-
-	return nil
-}
-
-func (f *FindCoordinatorResponse) encode(pe packetEncoder) error {
-	if f.Version >= 1 {
-		pe.putInt32(int32(f.ThrottleTime / time.Millisecond))
-	}
-
-	pe.putInt16(int16(f.Err))
-
-	if f.Version >= 1 {
-		if err := pe.putNullableString(f.ErrMsg); err != nil {
-			return err
-		}
-	}
-
-	coordinator := f.Coordinator
-	if coordinator == nil {
-		coordinator = NoNode
-	}
-	if err := coordinator.encode(pe, 0); err != nil {
-		return err
-	}
-	return nil
-}
-
-func (f *FindCoordinatorResponse) key() int16 {
-	return 10
-}
-
-func (f *FindCoordinatorResponse) version() int16 {
-	return f.Version
-}
-
-func (r *FindCoordinatorResponse) headerVersion() int16 {
-	return 0
-}
-
-func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion {
-	switch f.Version {
-	case 1:
-		return V0_11_0_0
-	default:
-		return V0_8_2_0
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go
deleted file mode 100644
index ab8b701..0000000
--- a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go
+++ /dev/null
@@ -1,253 +0,0 @@
-package sarama
-
-import (
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io"
-	"math"
-	"strings"
-	"time"
-
-	"github.com/jcmturner/gofork/encoding/asn1"
-	"github.com/jcmturner/gokrb5/v8/asn1tools"
-	"github.com/jcmturner/gokrb5/v8/gssapi"
-	"github.com/jcmturner/gokrb5/v8/iana/chksumtype"
-	"github.com/jcmturner/gokrb5/v8/iana/keyusage"
-	"github.com/jcmturner/gokrb5/v8/messages"
-	"github.com/jcmturner/gokrb5/v8/types"
-)
-
-const (
-	TOK_ID_KRB_AP_REQ   = 256
-	GSS_API_GENERIC_TAG = 0x60
-	KRB5_USER_AUTH      = 1
-	KRB5_KEYTAB_AUTH    = 2
-	GSS_API_INITIAL     = 1
-	GSS_API_VERIFY      = 2
-	GSS_API_FINISH      = 3
-)
-
-type GSSAPIConfig struct {
-	AuthType           int
-	KeyTabPath         string
-	KerberosConfigPath string
-	ServiceName        string
-	Username           string
-	Password           string
-	Realm              string
-	DisablePAFXFAST    bool
-}
-
-type GSSAPIKerberosAuth struct {
-	Config                *GSSAPIConfig
-	ticket                messages.Ticket
-	encKey                types.EncryptionKey
-	NewKerberosClientFunc func(config *GSSAPIConfig) (KerberosClient, error)
-	step                  int
-}
-
-type KerberosClient interface {
-	Login() error
-	GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error)
-	Domain() string
-	CName() types.PrincipalName
-	Destroy()
-}
-
-// writePackage appends length in big endian before the payload, and sends it to kafka
-func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) {
-	length := uint64(len(payload))
-	size := length + 4 // 4 byte length header + payload
-	if size > math.MaxInt32 {
-		return 0, errors.New("payload too large, will overflow int32")
-	}
-	finalPackage := make([]byte, size)
-	copy(finalPackage[4:], payload)
-	binary.BigEndian.PutUint32(finalPackage, uint32(length))
-	bytes, err := broker.conn.Write(finalPackage)
-	if err != nil {
-		return bytes, err
-	}
-	return bytes, nil
-}
-
-// readPackage reads payload length (4 bytes) and then reads the payload into []byte
-func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) {
-	bytesRead := 0
-	lengthInBytes := make([]byte, 4)
-	bytes, err := io.ReadFull(broker.conn, lengthInBytes)
-	if err != nil {
-		return nil, bytesRead, err
-	}
-	bytesRead += bytes
-	payloadLength := binary.BigEndian.Uint32(lengthInBytes)
-	payloadBytes := make([]byte, payloadLength)         // buffer for read..
-	bytes, err = io.ReadFull(broker.conn, payloadBytes) // read bytes
-	if err != nil {
-		return payloadBytes, bytesRead, err
-	}
-	bytesRead += bytes
-	return payloadBytes, bytesRead, nil
-}
-
-func (krbAuth *GSSAPIKerberosAuth) newAuthenticatorChecksum() []byte {
-	a := make([]byte, 24)
-	flags := []int{gssapi.ContextFlagInteg, gssapi.ContextFlagConf}
-	binary.LittleEndian.PutUint32(a[:4], 16)
-	for _, i := range flags {
-		f := binary.LittleEndian.Uint32(a[20:24])
-		f |= uint32(i)
-		binary.LittleEndian.PutUint32(a[20:24], f)
-	}
-	return a
-}
-
-/*
-*
-* Construct Kerberos AP_REQ package, conforming to RFC-4120
-* https://tools.ietf.org/html/rfc4120#page-84
-*
- */
-func (krbAuth *GSSAPIKerberosAuth) createKrb5Token(
-	domain string, cname types.PrincipalName,
-	ticket messages.Ticket,
-	sessionKey types.EncryptionKey) ([]byte, error) {
-	auth, err := types.NewAuthenticator(domain, cname)
-	if err != nil {
-		return nil, err
-	}
-	auth.Cksum = types.Checksum{
-		CksumType: chksumtype.GSSAPI,
-		Checksum:  krbAuth.newAuthenticatorChecksum(),
-	}
-	APReq, err := messages.NewAPReq(
-		ticket,
-		sessionKey,
-		auth,
-	)
-	if err != nil {
-		return nil, err
-	}
-	aprBytes := make([]byte, 2)
-	binary.BigEndian.PutUint16(aprBytes, TOK_ID_KRB_AP_REQ)
-	tb, err := APReq.Marshal()
-	if err != nil {
-		return nil, err
-	}
-	aprBytes = append(aprBytes, tb...)
-	return aprBytes, nil
-}
-
-/*
-*
-*	Append the GSS-API header to the payload, conforming to RFC-2743
-*	Section 3.1, Mechanism-Independent Token Format
-*
-*	https://tools.ietf.org/html/rfc2743#page-81
-*
-*	GSSAPIHeader + <specific mechanism payload>
-*
- */
-func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) {
-	oidBytes, err := asn1.Marshal(gssapi.OIDKRB5.OID())
-	if err != nil {
-		return nil, err
-	}
-	tkoLengthBytes := asn1tools.MarshalLengthBytes(len(oidBytes) + len(payload))
-	GSSHeader := append([]byte{GSS_API_GENERIC_TAG}, tkoLengthBytes...)
-	GSSHeader = append(GSSHeader, oidBytes...)
-	GSSPackage := append(GSSHeader, payload...)
-	return GSSPackage, nil
-}
-
-func (krbAuth *GSSAPIKerberosAuth) initSecContext(bytes []byte, kerberosClient KerberosClient) ([]byte, error) {
-	switch krbAuth.step {
-	case GSS_API_INITIAL:
-		aprBytes, err := krbAuth.createKrb5Token(
-			kerberosClient.Domain(),
-			kerberosClient.CName(),
-			krbAuth.ticket,
-			krbAuth.encKey)
-		if err != nil {
-			return nil, err
-		}
-		krbAuth.step = GSS_API_VERIFY
-		return krbAuth.appendGSSAPIHeader(aprBytes)
-	case GSS_API_VERIFY:
-		wrapTokenReq := gssapi.WrapToken{}
-		if err := wrapTokenReq.Unmarshal(bytes, true); err != nil {
-			return nil, err
-		}
-		// Validate response.
-		isValid, err := wrapTokenReq.Verify(krbAuth.encKey, keyusage.GSSAPI_ACCEPTOR_SEAL)
-		if !isValid {
-			return nil, err
-		}
-
-		wrapTokenResponse, err := gssapi.NewInitiatorWrapToken(wrapTokenReq.Payload, krbAuth.encKey)
-		if err != nil {
-			return nil, err
-		}
-		krbAuth.step = GSS_API_FINISH
-		return wrapTokenResponse.Marshal()
-	}
-	return nil, nil
-}
-
-/* This does the handshake for authorization */
-func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error {
-	kerberosClient, err := krbAuth.NewKerberosClientFunc(krbAuth.Config)
-	if err != nil {
-		Logger.Printf("Kerberos client error: %s", err)
-		return err
-	}
-
-	err = kerberosClient.Login()
-	if err != nil {
-		Logger.Printf("Kerberos client error: %s", err)
-		return err
-	}
-	// Construct SPN using serviceName and host
-	// SPN format: <SERVICE>/<FQDN>
-
-	host := strings.SplitN(broker.addr, ":", 2)[0] // Strip port part
-	spn := fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host)
-
-	ticket, encKey, err := kerberosClient.GetServiceTicket(spn)
-	if err != nil {
-		Logger.Printf("Error getting Kerberos service ticket : %s", err)
-		return err
-	}
-	krbAuth.ticket = ticket
-	krbAuth.encKey = encKey
-	krbAuth.step = GSS_API_INITIAL
-	var receivedBytes []byte = nil
-	defer kerberosClient.Destroy()
-	for {
-		packBytes, err := krbAuth.initSecContext(receivedBytes, kerberosClient)
-		if err != nil {
-			Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
-			return err
-		}
-		requestTime := time.Now()
-		bytesWritten, err := krbAuth.writePackage(broker, packBytes)
-		if err != nil {
-			Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
-			return err
-		}
-		broker.updateOutgoingCommunicationMetrics(bytesWritten)
-		if krbAuth.step == GSS_API_VERIFY {
-			bytesRead := 0
-			receivedBytes, bytesRead, err = krbAuth.readPackage(broker)
-			requestLatency := time.Since(requestTime)
-			broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
-			if err != nil {
-				Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
-				return err
-			}
-		} else if krbAuth.step == GSS_API_FINISH {
-			return nil
-		}
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go
deleted file mode 100644
index e9d9af1..0000000
--- a/vendor/github.com/Shopify/sarama/heartbeat_request.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package sarama
-
-type HeartbeatRequest struct {
-	GroupId      string
-	GenerationId int32
-	MemberId     string
-}
-
-func (r *HeartbeatRequest) encode(pe packetEncoder) error {
-	if err := pe.putString(r.GroupId); err != nil {
-		return err
-	}
-
-	pe.putInt32(r.GenerationId)
-
-	if err := pe.putString(r.MemberId); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
-	if r.GroupId, err = pd.getString(); err != nil {
-		return
-	}
-	if r.GenerationId, err = pd.getInt32(); err != nil {
-		return
-	}
-	if r.MemberId, err = pd.getString(); err != nil {
-		return
-	}
-
-	return nil
-}
-
-func (r *HeartbeatRequest) key() int16 {
-	return 12
-}
-
-func (r *HeartbeatRequest) version() int16 {
-	return 0
-}
-
-func (r *HeartbeatRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
-	return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go
deleted file mode 100644
index 577ab72..0000000
--- a/vendor/github.com/Shopify/sarama/heartbeat_response.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package sarama
-
-type HeartbeatResponse struct {
-	Err KError
-}
-
-func (r *HeartbeatResponse) encode(pe packetEncoder) error {
-	pe.putInt16(int16(r.Err))
-	return nil
-}
-
-func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	r.Err = KError(kerr)
-
-	return nil
-}
-
-func (r *HeartbeatResponse) key() int16 {
-	return 12
-}
-
-func (r *HeartbeatResponse) version() int16 {
-	return 0
-}
-
-func (r *HeartbeatResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
-	return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go b/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go
deleted file mode 100644
index c4d05a9..0000000
--- a/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package sarama
-
-type IncrementalAlterConfigsOperation int8
-
-const (
-	IncrementalAlterConfigsOperationSet IncrementalAlterConfigsOperation = iota
-	IncrementalAlterConfigsOperationDelete
-	IncrementalAlterConfigsOperationAppend
-	IncrementalAlterConfigsOperationSubtract
-)
-
-// IncrementalAlterConfigsRequest is an incremental alter config request type
-type IncrementalAlterConfigsRequest struct {
-	Resources    []*IncrementalAlterConfigsResource
-	ValidateOnly bool
-}
-
-type IncrementalAlterConfigsResource struct {
-	Type          ConfigResourceType
-	Name          string
-	ConfigEntries map[string]IncrementalAlterConfigsEntry
-}
-
-type IncrementalAlterConfigsEntry struct {
-	Operation IncrementalAlterConfigsOperation
-	Value     *string
-}
-
-func (a *IncrementalAlterConfigsRequest) encode(pe packetEncoder) error {
-	if err := pe.putArrayLength(len(a.Resources)); err != nil {
-		return err
-	}
-
-	for _, r := range a.Resources {
-		if err := r.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	pe.putBool(a.ValidateOnly)
-	return nil
-}
-
-func (a *IncrementalAlterConfigsRequest) decode(pd packetDecoder, version int16) error {
-	resourceCount, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	a.Resources = make([]*IncrementalAlterConfigsResource, resourceCount)
-	for i := range a.Resources {
-		r := &IncrementalAlterConfigsResource{}
-		err = r.decode(pd, version)
-		if err != nil {
-			return err
-		}
-		a.Resources[i] = r
-	}
-
-	validateOnly, err := pd.getBool()
-	if err != nil {
-		return err
-	}
-
-	a.ValidateOnly = validateOnly
-
-	return nil
-}
-
-func (a *IncrementalAlterConfigsResource) encode(pe packetEncoder) error {
-	pe.putInt8(int8(a.Type))
-
-	if err := pe.putString(a.Name); err != nil {
-		return err
-	}
-
-	if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
-		return err
-	}
-
-	for name, e := range a.ConfigEntries {
-		if err := pe.putString(name); err != nil {
-			return err
-		}
-
-		if err := e.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (a *IncrementalAlterConfigsResource) decode(pd packetDecoder, version int16) error {
-	t, err := pd.getInt8()
-	if err != nil {
-		return err
-	}
-	a.Type = ConfigResourceType(t)
-
-	name, err := pd.getString()
-	if err != nil {
-		return err
-	}
-	a.Name = name
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	if n > 0 {
-		a.ConfigEntries = make(map[string]IncrementalAlterConfigsEntry, n)
-		for i := 0; i < n; i++ {
-			name, err := pd.getString()
-			if err != nil {
-				return err
-			}
-
-			var v IncrementalAlterConfigsEntry
-
-			if err := v.decode(pd, version); err != nil {
-				return err
-			}
-
-			a.ConfigEntries[name] = v
-		}
-	}
-	return err
-}
-
-func (a *IncrementalAlterConfigsEntry) encode(pe packetEncoder) error {
-	pe.putInt8(int8(a.Operation))
-
-	if err := pe.putNullableString(a.Value); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (a *IncrementalAlterConfigsEntry) decode(pd packetDecoder, version int16) error {
-	t, err := pd.getInt8()
-	if err != nil {
-		return err
-	}
-	a.Operation = IncrementalAlterConfigsOperation(t)
-
-	s, err := pd.getNullableString()
-	if err != nil {
-		return err
-	}
-
-	a.Value = s
-
-	return nil
-}
-
-func (a *IncrementalAlterConfigsRequest) key() int16 {
-	return 44
-}
-
-func (a *IncrementalAlterConfigsRequest) version() int16 {
-	return 0
-}
-
-func (a *IncrementalAlterConfigsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (a *IncrementalAlterConfigsRequest) requiredVersion() KafkaVersion {
-	return V2_3_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go b/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go
deleted file mode 100644
index 3e8c450..0000000
--- a/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package sarama
-
-import "time"
-
-// IncrementalAlterConfigsResponse is a response type for incremental alter config
-type IncrementalAlterConfigsResponse struct {
-	ThrottleTime time.Duration
-	Resources    []*AlterConfigsResourceResponse
-}
-
-func (a *IncrementalAlterConfigsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
-
-	if err := pe.putArrayLength(len(a.Resources)); err != nil {
-		return err
-	}
-
-	for _, v := range a.Resources {
-		if err := v.encode(pe); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (a *IncrementalAlterConfigsResponse) decode(pd packetDecoder, version int16) error {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	responseCount, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
-
-	for i := range a.Resources {
-		a.Resources[i] = new(AlterConfigsResourceResponse)
-
-		if err := a.Resources[i].decode(pd, version); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (a *IncrementalAlterConfigsResponse) key() int16 {
-	return 44
-}
-
-func (a *IncrementalAlterConfigsResponse) version() int16 {
-	return 0
-}
-
-func (a *IncrementalAlterConfigsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (a *IncrementalAlterConfigsResponse) requiredVersion() KafkaVersion {
-	return V2_3_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/Shopify/sarama/init_producer_id_request.go
deleted file mode 100644
index 6894443..0000000
--- a/vendor/github.com/Shopify/sarama/init_producer_id_request.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package sarama
-
-import "time"
-
-type InitProducerIDRequest struct {
-	TransactionalID    *string
-	TransactionTimeout time.Duration
-}
-
-func (i *InitProducerIDRequest) encode(pe packetEncoder) error {
-	if err := pe.putNullableString(i.TransactionalID); err != nil {
-		return err
-	}
-	pe.putInt32(int32(i.TransactionTimeout / time.Millisecond))
-
-	return nil
-}
-
-func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) {
-	if i.TransactionalID, err = pd.getNullableString(); err != nil {
-		return err
-	}
-
-	timeout, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	i.TransactionTimeout = time.Duration(timeout) * time.Millisecond
-
-	return nil
-}
-
-func (i *InitProducerIDRequest) key() int16 {
-	return 22
-}
-
-func (i *InitProducerIDRequest) version() int16 {
-	return 0
-}
-
-func (i *InitProducerIDRequest) headerVersion() int16 {
-	return 1
-}
-
-func (i *InitProducerIDRequest) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/Shopify/sarama/init_producer_id_response.go
deleted file mode 100644
index 3e1242b..0000000
--- a/vendor/github.com/Shopify/sarama/init_producer_id_response.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package sarama
-
-import "time"
-
-type InitProducerIDResponse struct {
-	ThrottleTime  time.Duration
-	Err           KError
-	ProducerID    int64
-	ProducerEpoch int16
-}
-
-func (i *InitProducerIDResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(i.ThrottleTime / time.Millisecond))
-	pe.putInt16(int16(i.Err))
-	pe.putInt64(i.ProducerID)
-	pe.putInt16(i.ProducerEpoch)
-
-	return nil
-}
-
-func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	i.Err = KError(kerr)
-
-	if i.ProducerID, err = pd.getInt64(); err != nil {
-		return err
-	}
-
-	if i.ProducerEpoch, err = pd.getInt16(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (i *InitProducerIDResponse) key() int16 {
-	return 22
-}
-
-func (i *InitProducerIDResponse) version() int16 {
-	return 0
-}
-
-func (i *InitProducerIDResponse) headerVersion() int16 {
-	return 0
-}
-
-func (i *InitProducerIDResponse) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/interceptors.go b/vendor/github.com/Shopify/sarama/interceptors.go
deleted file mode 100644
index d0d33e5..0000000
--- a/vendor/github.com/Shopify/sarama/interceptors.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package sarama
-
-// ProducerInterceptor allows you to intercept (and possibly mutate) the records
-// received by the producer before they are published to the Kafka cluster.
-// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation
-type ProducerInterceptor interface {
-
-	// OnSend is called when the producer message is intercepted. Please avoid
-	// modifying the message until it's safe to do so, as this is _not_ a copy
-	// of the message.
-	OnSend(*ProducerMessage)
-}
-
-// ConsumerInterceptor allows you to intercept (and possibly mutate) the records
-// received by the consumer before they are sent to the messages channel.
-// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation
-type ConsumerInterceptor interface {
-
-	// OnConsume is called when the consumed message is intercepted. Please
-	// avoid modifying the message until it's safe to do so, as this is _not_ a
-	// copy of the message.
-	OnConsume(*ConsumerMessage)
-}
-
-func (msg *ProducerMessage) safelyApplyInterceptor(interceptor ProducerInterceptor) {
-	defer func() {
-		if r := recover(); r != nil {
-			Logger.Printf("Error when calling producer interceptor: %s, %w\n", interceptor, r)
-		}
-	}()
-
-	interceptor.OnSend(msg)
-}
-
-func (msg *ConsumerMessage) safelyApplyInterceptor(interceptor ConsumerInterceptor) {
-	defer func() {
-		if r := recover(); r != nil {
-			Logger.Printf("Error when calling consumer interceptor: %s, %w\n", interceptor, r)
-		}
-	}()
-
-	interceptor.OnConsume(msg)
-}
diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go
deleted file mode 100644
index 3734e82..0000000
--- a/vendor/github.com/Shopify/sarama/join_group_request.go
+++ /dev/null
@@ -1,167 +0,0 @@
-package sarama
-
-type GroupProtocol struct {
-	Name     string
-	Metadata []byte
-}
-
-func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
-	p.Name, err = pd.getString()
-	if err != nil {
-		return err
-	}
-	p.Metadata, err = pd.getBytes()
-	return err
-}
-
-func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
-	if err := pe.putString(p.Name); err != nil {
-		return err
-	}
-	if err := pe.putBytes(p.Metadata); err != nil {
-		return err
-	}
-	return nil
-}
-
-type JoinGroupRequest struct {
-	Version               int16
-	GroupId               string
-	SessionTimeout        int32
-	RebalanceTimeout      int32
-	MemberId              string
-	ProtocolType          string
-	GroupProtocols        map[string][]byte // deprecated; use OrderedGroupProtocols
-	OrderedGroupProtocols []*GroupProtocol
-}
-
-func (r *JoinGroupRequest) encode(pe packetEncoder) error {
-	if err := pe.putString(r.GroupId); err != nil {
-		return err
-	}
-	pe.putInt32(r.SessionTimeout)
-	if r.Version >= 1 {
-		pe.putInt32(r.RebalanceTimeout)
-	}
-	if err := pe.putString(r.MemberId); err != nil {
-		return err
-	}
-	if err := pe.putString(r.ProtocolType); err != nil {
-		return err
-	}
-
-	if len(r.GroupProtocols) > 0 {
-		if len(r.OrderedGroupProtocols) > 0 {
-			return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
-		}
-
-		if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
-			return err
-		}
-		for name, metadata := range r.GroupProtocols {
-			if err := pe.putString(name); err != nil {
-				return err
-			}
-			if err := pe.putBytes(metadata); err != nil {
-				return err
-			}
-		}
-	} else {
-		if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
-			return err
-		}
-		for _, protocol := range r.OrderedGroupProtocols {
-			if err := protocol.encode(pe); err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if r.GroupId, err = pd.getString(); err != nil {
-		return
-	}
-
-	if r.SessionTimeout, err = pd.getInt32(); err != nil {
-		return
-	}
-
-	if version >= 1 {
-		if r.RebalanceTimeout, err = pd.getInt32(); err != nil {
-			return err
-		}
-	}
-
-	if r.MemberId, err = pd.getString(); err != nil {
-		return
-	}
-
-	if r.ProtocolType, err = pd.getString(); err != nil {
-		return
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if n == 0 {
-		return nil
-	}
-
-	r.GroupProtocols = make(map[string][]byte)
-	for i := 0; i < n; i++ {
-		protocol := &GroupProtocol{}
-		if err := protocol.decode(pd); err != nil {
-			return err
-		}
-		r.GroupProtocols[protocol.Name] = protocol.Metadata
-		r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
-	}
-
-	return nil
-}
-
-func (r *JoinGroupRequest) key() int16 {
-	return 11
-}
-
-func (r *JoinGroupRequest) version() int16 {
-	return r.Version
-}
-
-func (r *JoinGroupRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 2:
-		return V0_11_0_0
-	case 1:
-		return V0_10_1_0
-	default:
-		return V0_9_0_0
-	}
-}
-
-func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
-	r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
-		Name:     name,
-		Metadata: metadata,
-	})
-}
-
-func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
-	bin, err := encode(metadata, nil)
-	if err != nil {
-		return err
-	}
-
-	r.AddGroupProtocol(name, bin)
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go
deleted file mode 100644
index 54b0a45..0000000
--- a/vendor/github.com/Shopify/sarama/join_group_response.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package sarama
-
-type JoinGroupResponse struct {
-	Version       int16
-	ThrottleTime  int32
-	Err           KError
-	GenerationId  int32
-	GroupProtocol string
-	LeaderId      string
-	MemberId      string
-	Members       map[string][]byte
-}
-
-func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
-	members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
-	for id, bin := range r.Members {
-		meta := new(ConsumerGroupMemberMetadata)
-		if err := decode(bin, meta); err != nil {
-			return nil, err
-		}
-		members[id] = *meta
-	}
-	return members, nil
-}
-
-func (r *JoinGroupResponse) encode(pe packetEncoder) error {
-	if r.Version >= 2 {
-		pe.putInt32(r.ThrottleTime)
-	}
-	pe.putInt16(int16(r.Err))
-	pe.putInt32(r.GenerationId)
-
-	if err := pe.putString(r.GroupProtocol); err != nil {
-		return err
-	}
-	if err := pe.putString(r.LeaderId); err != nil {
-		return err
-	}
-	if err := pe.putString(r.MemberId); err != nil {
-		return err
-	}
-
-	if err := pe.putArrayLength(len(r.Members)); err != nil {
-		return err
-	}
-
-	for memberId, memberMetadata := range r.Members {
-		if err := pe.putString(memberId); err != nil {
-			return err
-		}
-
-		if err := pe.putBytes(memberMetadata); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if version >= 2 {
-		if r.ThrottleTime, err = pd.getInt32(); err != nil {
-			return
-		}
-	}
-
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	r.Err = KError(kerr)
-
-	if r.GenerationId, err = pd.getInt32(); err != nil {
-		return
-	}
-
-	if r.GroupProtocol, err = pd.getString(); err != nil {
-		return
-	}
-
-	if r.LeaderId, err = pd.getString(); err != nil {
-		return
-	}
-
-	if r.MemberId, err = pd.getString(); err != nil {
-		return
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if n == 0 {
-		return nil
-	}
-
-	r.Members = make(map[string][]byte)
-	for i := 0; i < n; i++ {
-		memberId, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		memberMetadata, err := pd.getBytes()
-		if err != nil {
-			return err
-		}
-
-		r.Members[memberId] = memberMetadata
-	}
-
-	return nil
-}
-
-func (r *JoinGroupResponse) key() int16 {
-	return 11
-}
-
-func (r *JoinGroupResponse) version() int16 {
-	return r.Version
-}
-
-func (r *JoinGroupResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 2:
-		return V0_11_0_0
-	case 1:
-		return V0_10_1_0
-	default:
-		return V0_9_0_0
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/kerberos_client.go b/vendor/github.com/Shopify/sarama/kerberos_client.go
deleted file mode 100644
index 01a5319..0000000
--- a/vendor/github.com/Shopify/sarama/kerberos_client.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package sarama
-
-import (
-	krb5client "github.com/jcmturner/gokrb5/v8/client"
-	krb5config "github.com/jcmturner/gokrb5/v8/config"
-	"github.com/jcmturner/gokrb5/v8/keytab"
-	"github.com/jcmturner/gokrb5/v8/types"
-)
-
-type KerberosGoKrb5Client struct {
-	krb5client.Client
-}
-
-func (c *KerberosGoKrb5Client) Domain() string {
-	return c.Credentials.Domain()
-}
-
-func (c *KerberosGoKrb5Client) CName() types.PrincipalName {
-	return c.Credentials.CName()
-}
-
-// NewKerberosClient creates kerberos client used to obtain TGT and TGS tokens.
-// It uses pure go Kerberos 5 solution (RFC-4121 and RFC-4120).
-// uses gokrb5 library underlying which is a pure go kerberos client with some GSS-API capabilities.
-func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) {
-	cfg, err := krb5config.Load(config.KerberosConfigPath)
-	if err != nil {
-		return nil, err
-	}
-	return createClient(config, cfg)
-}
-
-func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) {
-	var client *krb5client.Client
-	if config.AuthType == KRB5_KEYTAB_AUTH {
-		kt, err := keytab.Load(config.KeyTabPath)
-		if err != nil {
-			return nil, err
-		}
-		client = krb5client.NewWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
-	} else {
-		client = krb5client.NewWithPassword(config.Username,
-			config.Realm, config.Password, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
-	}
-	return &KerberosGoKrb5Client{*client}, nil
-}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go
deleted file mode 100644
index d7789b6..0000000
--- a/vendor/github.com/Shopify/sarama/leave_group_request.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package sarama
-
-type LeaveGroupRequest struct {
-	GroupId  string
-	MemberId string
-}
-
-func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
-	if err := pe.putString(r.GroupId); err != nil {
-		return err
-	}
-	if err := pe.putString(r.MemberId); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
-	if r.GroupId, err = pd.getString(); err != nil {
-		return
-	}
-	if r.MemberId, err = pd.getString(); err != nil {
-		return
-	}
-
-	return nil
-}
-
-func (r *LeaveGroupRequest) key() int16 {
-	return 13
-}
-
-func (r *LeaveGroupRequest) version() int16 {
-	return 0
-}
-
-func (r *LeaveGroupRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
-	return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go
deleted file mode 100644
index 25f8d5e..0000000
--- a/vendor/github.com/Shopify/sarama/leave_group_response.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package sarama
-
-type LeaveGroupResponse struct {
-	Err KError
-}
-
-func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
-	pe.putInt16(int16(r.Err))
-	return nil
-}
-
-func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	r.Err = KError(kerr)
-
-	return nil
-}
-
-func (r *LeaveGroupResponse) key() int16 {
-	return 13
-}
-
-func (r *LeaveGroupResponse) version() int16 {
-	return 0
-}
-
-func (r *LeaveGroupResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
-	return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go
deleted file mode 100644
index 4553b2d..0000000
--- a/vendor/github.com/Shopify/sarama/list_groups_request.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package sarama
-
-type ListGroupsRequest struct{}
-
-func (r *ListGroupsRequest) encode(pe packetEncoder) error {
-	return nil
-}
-
-func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
-	return nil
-}
-
-func (r *ListGroupsRequest) key() int16 {
-	return 16
-}
-
-func (r *ListGroupsRequest) version() int16 {
-	return 0
-}
-
-func (r *ListGroupsRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
-	return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go
deleted file mode 100644
index 777bae7..0000000
--- a/vendor/github.com/Shopify/sarama/list_groups_response.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package sarama
-
-type ListGroupsResponse struct {
-	Err    KError
-	Groups map[string]string
-}
-
-func (r *ListGroupsResponse) encode(pe packetEncoder) error {
-	pe.putInt16(int16(r.Err))
-
-	if err := pe.putArrayLength(len(r.Groups)); err != nil {
-		return err
-	}
-	for groupId, protocolType := range r.Groups {
-		if err := pe.putString(groupId); err != nil {
-			return err
-		}
-		if err := pe.putString(protocolType); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	r.Err = KError(kerr)
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if n == 0 {
-		return nil
-	}
-
-	r.Groups = make(map[string]string)
-	for i := 0; i < n; i++ {
-		groupId, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		protocolType, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		r.Groups[groupId] = protocolType
-	}
-
-	return nil
-}
-
-func (r *ListGroupsResponse) key() int16 {
-	return 16
-}
-
-func (r *ListGroupsResponse) version() int16 {
-	return 0
-}
-
-func (r *ListGroupsResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
-	return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go b/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go
deleted file mode 100644
index c1ffa9b..0000000
--- a/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package sarama
-
-type ListPartitionReassignmentsRequest struct {
-	TimeoutMs int32
-	blocks    map[string][]int32
-	Version   int16
-}
-
-func (r *ListPartitionReassignmentsRequest) encode(pe packetEncoder) error {
-	pe.putInt32(r.TimeoutMs)
-
-	pe.putCompactArrayLength(len(r.blocks))
-
-	for topic, partitions := range r.blocks {
-		if err := pe.putCompactString(topic); err != nil {
-			return err
-		}
-
-		if err := pe.putCompactInt32Array(partitions); err != nil {
-			return err
-		}
-
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	pe.putEmptyTaggedFieldArray()
-
-	return nil
-}
-
-func (r *ListPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if r.TimeoutMs, err = pd.getInt32(); err != nil {
-		return err
-	}
-
-	topicCount, err := pd.getCompactArrayLength()
-	if err != nil {
-		return err
-	}
-	if topicCount > 0 {
-		r.blocks = make(map[string][]int32)
-		for i := 0; i < topicCount; i++ {
-			topic, err := pd.getCompactString()
-			if err != nil {
-				return err
-			}
-			partitionCount, err := pd.getCompactArrayLength()
-			if err != nil {
-				return err
-			}
-			r.blocks[topic] = make([]int32, partitionCount)
-			for j := 0; j < partitionCount; j++ {
-				partition, err := pd.getInt32()
-				if err != nil {
-					return err
-				}
-				r.blocks[topic][j] = partition
-			}
-			if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-				return err
-			}
-		}
-	}
-
-	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-		return err
-	}
-
-	return
-}
-
-func (r *ListPartitionReassignmentsRequest) key() int16 {
-	return 46
-}
-
-func (r *ListPartitionReassignmentsRequest) version() int16 {
-	return r.Version
-}
-
-func (r *ListPartitionReassignmentsRequest) headerVersion() int16 {
-	return 2
-}
-
-func (r *ListPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
-	return V2_4_0_0
-}
-
-func (r *ListPartitionReassignmentsRequest) AddBlock(topic string, partitionIDs []int32) {
-	if r.blocks == nil {
-		r.blocks = make(map[string][]int32)
-	}
-
-	if r.blocks[topic] == nil {
-		r.blocks[topic] = partitionIDs
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go b/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go
deleted file mode 100644
index 4baa6a0..0000000
--- a/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go
+++ /dev/null
@@ -1,169 +0,0 @@
-package sarama
-
-type PartitionReplicaReassignmentsStatus struct {
-	Replicas         []int32
-	AddingReplicas   []int32
-	RemovingReplicas []int32
-}
-
-func (b *PartitionReplicaReassignmentsStatus) encode(pe packetEncoder) error {
-	if err := pe.putCompactInt32Array(b.Replicas); err != nil {
-		return err
-	}
-	if err := pe.putCompactInt32Array(b.AddingReplicas); err != nil {
-		return err
-	}
-	if err := pe.putCompactInt32Array(b.RemovingReplicas); err != nil {
-		return err
-	}
-
-	pe.putEmptyTaggedFieldArray()
-
-	return nil
-}
-
-func (b *PartitionReplicaReassignmentsStatus) decode(pd packetDecoder) (err error) {
-	if b.Replicas, err = pd.getCompactInt32Array(); err != nil {
-		return err
-	}
-
-	if b.AddingReplicas, err = pd.getCompactInt32Array(); err != nil {
-		return err
-	}
-
-	if b.RemovingReplicas, err = pd.getCompactInt32Array(); err != nil {
-		return err
-	}
-
-	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-		return err
-	}
-
-	return err
-}
-
-type ListPartitionReassignmentsResponse struct {
-	Version        int16
-	ThrottleTimeMs int32
-	ErrorCode      KError
-	ErrorMessage   *string
-	TopicStatus    map[string]map[int32]*PartitionReplicaReassignmentsStatus
-}
-
-func (r *ListPartitionReassignmentsResponse) AddBlock(topic string, partition int32, replicas, addingReplicas, removingReplicas []int32) {
-	if r.TopicStatus == nil {
-		r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus)
-	}
-	partitions := r.TopicStatus[topic]
-	if partitions == nil {
-		partitions = make(map[int32]*PartitionReplicaReassignmentsStatus)
-		r.TopicStatus[topic] = partitions
-	}
-
-	partitions[partition] = &PartitionReplicaReassignmentsStatus{Replicas: replicas, AddingReplicas: addingReplicas, RemovingReplicas: removingReplicas}
-}
-
-func (r *ListPartitionReassignmentsResponse) encode(pe packetEncoder) error {
-	pe.putInt32(r.ThrottleTimeMs)
-	pe.putInt16(int16(r.ErrorCode))
-	if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
-		return err
-	}
-
-	pe.putCompactArrayLength(len(r.TopicStatus))
-	for topic, partitions := range r.TopicStatus {
-		if err := pe.putCompactString(topic); err != nil {
-			return err
-		}
-		pe.putCompactArrayLength(len(partitions))
-		for partition, block := range partitions {
-			pe.putInt32(partition)
-
-			if err := block.encode(pe); err != nil {
-				return err
-			}
-		}
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	pe.putEmptyTaggedFieldArray()
-
-	return nil
-}
-
-func (r *ListPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
-		return err
-	}
-
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	r.ErrorCode = KError(kerr)
-
-	if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
-		return err
-	}
-
-	numTopics, err := pd.getCompactArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus, numTopics)
-	for i := 0; i < numTopics; i++ {
-		topic, err := pd.getCompactString()
-		if err != nil {
-			return err
-		}
-
-		ongoingPartitionReassignments, err := pd.getCompactArrayLength()
-		if err != nil {
-			return err
-		}
-
-		r.TopicStatus[topic] = make(map[int32]*PartitionReplicaReassignmentsStatus, ongoingPartitionReassignments)
-
-		for j := 0; j < ongoingPartitionReassignments; j++ {
-			partition, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-
-			block := &PartitionReplicaReassignmentsStatus{}
-			if err := block.decode(pd); err != nil {
-				return err
-			}
-			r.TopicStatus[topic][partition] = block
-		}
-
-		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-			return err
-		}
-	}
-	if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (r *ListPartitionReassignmentsResponse) key() int16 {
-	return 46
-}
-
-func (r *ListPartitionReassignmentsResponse) version() int16 {
-	return r.Version
-}
-
-func (r *ListPartitionReassignmentsResponse) headerVersion() int16 {
-	return 1
-}
-
-func (r *ListPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
-	return V2_4_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go
deleted file mode 100644
index fd0d1d9..0000000
--- a/vendor/github.com/Shopify/sarama/message.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"time"
-)
-
-const (
-	// CompressionNone no compression
-	CompressionNone CompressionCodec = iota
-	// CompressionGZIP compression using GZIP
-	CompressionGZIP
-	// CompressionSnappy compression using snappy
-	CompressionSnappy
-	// CompressionLZ4 compression using LZ4
-	CompressionLZ4
-	// CompressionZSTD compression using ZSTD
-	CompressionZSTD
-
-	// The lowest 3 bits contain the compression codec used for the message
-	compressionCodecMask int8 = 0x07
-
-	// Bit 3 set for "LogAppend" timestamps
-	timestampTypeMask = 0x08
-
-	// CompressionLevelDefault is the constant to use in CompressionLevel
-	// to have the default compression level for any codec. The value is picked
-	// that we don't use any existing compression levels.
-	CompressionLevelDefault = -1000
-)
-
-// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
-type CompressionCodec int8
-
-func (cc CompressionCodec) String() string {
-	return []string{
-		"none",
-		"gzip",
-		"snappy",
-		"lz4",
-		"zstd",
-	}[int(cc)]
-}
-
-// Message is a kafka message type
-type Message struct {
-	Codec            CompressionCodec // codec used to compress the message contents
-	CompressionLevel int              // compression level
-	LogAppendTime    bool             // the used timestamp is LogAppendTime
-	Key              []byte           // the message key, may be nil
-	Value            []byte           // the message contents
-	Set              *MessageSet      // the message set a message might wrap
-	Version          int8             // v1 requires Kafka 0.10
-	Timestamp        time.Time        // the timestamp of the message (version 1+ only)
-
-	compressedCache []byte
-	compressedSize  int // used for computing the compression ratio metrics
-}
-
-func (m *Message) encode(pe packetEncoder) error {
-	pe.push(newCRC32Field(crcIEEE))
-
-	pe.putInt8(m.Version)
-
-	attributes := int8(m.Codec) & compressionCodecMask
-	if m.LogAppendTime {
-		attributes |= timestampTypeMask
-	}
-	pe.putInt8(attributes)
-
-	if m.Version >= 1 {
-		if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil {
-			return err
-		}
-	}
-
-	err := pe.putBytes(m.Key)
-	if err != nil {
-		return err
-	}
-
-	var payload []byte
-
-	if m.compressedCache != nil {
-		payload = m.compressedCache
-		m.compressedCache = nil
-	} else if m.Value != nil {
-		payload, err = compress(m.Codec, m.CompressionLevel, m.Value)
-		if err != nil {
-			return err
-		}
-		m.compressedCache = payload
-		// Keep in mind the compressed payload size for metric gathering
-		m.compressedSize = len(payload)
-	}
-
-	if err = pe.putBytes(payload); err != nil {
-		return err
-	}
-
-	return pe.pop()
-}
-
-func (m *Message) decode(pd packetDecoder) (err error) {
-	crc32Decoder := acquireCrc32Field(crcIEEE)
-	defer releaseCrc32Field(crc32Decoder)
-
-	err = pd.push(crc32Decoder)
-	if err != nil {
-		return err
-	}
-
-	m.Version, err = pd.getInt8()
-	if err != nil {
-		return err
-	}
-
-	if m.Version > 1 {
-		return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)}
-	}
-
-	attribute, err := pd.getInt8()
-	if err != nil {
-		return err
-	}
-	m.Codec = CompressionCodec(attribute & compressionCodecMask)
-	m.LogAppendTime = attribute&timestampTypeMask == timestampTypeMask
-
-	if m.Version == 1 {
-		if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil {
-			return err
-		}
-	}
-
-	m.Key, err = pd.getBytes()
-	if err != nil {
-		return err
-	}
-
-	m.Value, err = pd.getBytes()
-	if err != nil {
-		return err
-	}
-
-	// Required for deep equal assertion during tests but might be useful
-	// for future metrics about the compression ratio in fetch requests
-	m.compressedSize = len(m.Value)
-
-	if m.Value != nil && m.Codec != CompressionNone {
-		m.Value, err = decompress(m.Codec, m.Value)
-		if err != nil {
-			return err
-		}
-
-		if err := m.decodeSet(); err != nil {
-			return err
-		}
-	}
-
-	return pd.pop()
-}
-
-// decodes a message set from a previously encoded bulk-message
-func (m *Message) decodeSet() (err error) {
-	pd := realDecoder{raw: m.Value}
-	m.Set = &MessageSet{}
-	return m.Set.decode(&pd)
-}
diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go
deleted file mode 100644
index 6523ec2..0000000
--- a/vendor/github.com/Shopify/sarama/message_set.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package sarama
-
-type MessageBlock struct {
-	Offset int64
-	Msg    *Message
-}
-
-// Messages convenience helper which returns either all the
-// messages that are wrapped in this block
-func (msb *MessageBlock) Messages() []*MessageBlock {
-	if msb.Msg.Set != nil {
-		return msb.Msg.Set.Messages
-	}
-	return []*MessageBlock{msb}
-}
-
-func (msb *MessageBlock) encode(pe packetEncoder) error {
-	pe.putInt64(msb.Offset)
-	pe.push(&lengthField{})
-	err := msb.Msg.encode(pe)
-	if err != nil {
-		return err
-	}
-	return pe.pop()
-}
-
-func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
-	if msb.Offset, err = pd.getInt64(); err != nil {
-		return err
-	}
-
-	lengthDecoder := acquireLengthField()
-	defer releaseLengthField(lengthDecoder)
-
-	if err = pd.push(lengthDecoder); err != nil {
-		return err
-	}
-
-	msb.Msg = new(Message)
-	if err = msb.Msg.decode(pd); err != nil {
-		return err
-	}
-
-	if err = pd.pop(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-type MessageSet struct {
-	PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
-	OverflowMessage        bool // whether the set on the wire contained an overflow message
-	Messages               []*MessageBlock
-}
-
-func (ms *MessageSet) encode(pe packetEncoder) error {
-	for i := range ms.Messages {
-		err := ms.Messages[i].encode(pe)
-		if err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (ms *MessageSet) decode(pd packetDecoder) (err error) {
-	ms.Messages = nil
-
-	for pd.remaining() > 0 {
-		magic, err := magicValue(pd)
-		if err != nil {
-			if err == ErrInsufficientData {
-				ms.PartialTrailingMessage = true
-				return nil
-			}
-			return err
-		}
-
-		if magic > 1 {
-			return nil
-		}
-
-		msb := new(MessageBlock)
-		err = msb.decode(pd)
-		switch err {
-		case nil:
-			ms.Messages = append(ms.Messages, msb)
-		case ErrInsufficientData:
-			// As an optimization the server is allowed to return a partial message at the
-			// end of the message set. Clients should handle this case. So we just ignore such things.
-			if msb.Offset == -1 {
-				// This is an overflow message caused by chunked down conversion
-				ms.OverflowMessage = true
-			} else {
-				ms.PartialTrailingMessage = true
-			}
-			return nil
-		default:
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (ms *MessageSet) addMessage(msg *Message) {
-	block := new(MessageBlock)
-	block.Msg = msg
-	ms.Messages = append(ms.Messages, block)
-}
diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go
deleted file mode 100644
index e835f5a..0000000
--- a/vendor/github.com/Shopify/sarama/metadata_request.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package sarama
-
-type MetadataRequest struct {
-	Version                int16
-	Topics                 []string
-	AllowAutoTopicCreation bool
-}
-
-func (r *MetadataRequest) encode(pe packetEncoder) error {
-	if r.Version < 0 || r.Version > 5 {
-		return PacketEncodingError{"invalid or unsupported MetadataRequest version field"}
-	}
-	if r.Version == 0 || len(r.Topics) > 0 {
-		err := pe.putArrayLength(len(r.Topics))
-		if err != nil {
-			return err
-		}
-
-		for i := range r.Topics {
-			err = pe.putString(r.Topics[i])
-			if err != nil {
-				return err
-			}
-		}
-	} else {
-		pe.putInt32(-1)
-	}
-	if r.Version > 3 {
-		pe.putBool(r.AllowAutoTopicCreation)
-	}
-	return nil
-}
-
-func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
-	r.Version = version
-	size, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	if size > 0 {
-		r.Topics = make([]string, size)
-		for i := range r.Topics {
-			topic, err := pd.getString()
-			if err != nil {
-				return err
-			}
-			r.Topics[i] = topic
-		}
-	}
-	if r.Version > 3 {
-		autoCreation, err := pd.getBool()
-		if err != nil {
-			return err
-		}
-		r.AllowAutoTopicCreation = autoCreation
-	}
-	return nil
-}
-
-func (r *MetadataRequest) key() int16 {
-	return 3
-}
-
-func (r *MetadataRequest) version() int16 {
-	return r.Version
-}
-
-func (r *MetadataRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *MetadataRequest) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V0_10_0_0
-	case 2:
-		return V0_10_1_0
-	case 3, 4:
-		return V0_11_0_0
-	case 5:
-		return V1_0_0_0
-	default:
-		return MinVersion
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go
deleted file mode 100644
index 0bb8702..0000000
--- a/vendor/github.com/Shopify/sarama/metadata_response.go
+++ /dev/null
@@ -1,325 +0,0 @@
-package sarama
-
-type PartitionMetadata struct {
-	Err             KError
-	ID              int32
-	Leader          int32
-	Replicas        []int32
-	Isr             []int32
-	OfflineReplicas []int32
-}
-
-func (pm *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) {
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	pm.Err = KError(tmp)
-
-	pm.ID, err = pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	pm.Leader, err = pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	pm.Replicas, err = pd.getInt32Array()
-	if err != nil {
-		return err
-	}
-
-	pm.Isr, err = pd.getInt32Array()
-	if err != nil {
-		return err
-	}
-
-	if version >= 5 {
-		pm.OfflineReplicas, err = pd.getInt32Array()
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (pm *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) {
-	pe.putInt16(int16(pm.Err))
-	pe.putInt32(pm.ID)
-	pe.putInt32(pm.Leader)
-
-	err = pe.putInt32Array(pm.Replicas)
-	if err != nil {
-		return err
-	}
-
-	err = pe.putInt32Array(pm.Isr)
-	if err != nil {
-		return err
-	}
-
-	if version >= 5 {
-		err = pe.putInt32Array(pm.OfflineReplicas)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-type TopicMetadata struct {
-	Err        KError
-	Name       string
-	IsInternal bool // Only valid for Version >= 1
-	Partitions []*PartitionMetadata
-}
-
-func (tm *TopicMetadata) decode(pd packetDecoder, version int16) (err error) {
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	tm.Err = KError(tmp)
-
-	tm.Name, err = pd.getString()
-	if err != nil {
-		return err
-	}
-
-	if version >= 1 {
-		tm.IsInternal, err = pd.getBool()
-		if err != nil {
-			return err
-		}
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	tm.Partitions = make([]*PartitionMetadata, n)
-	for i := 0; i < n; i++ {
-		tm.Partitions[i] = new(PartitionMetadata)
-		err = tm.Partitions[i].decode(pd, version)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (tm *TopicMetadata) encode(pe packetEncoder, version int16) (err error) {
-	pe.putInt16(int16(tm.Err))
-
-	err = pe.putString(tm.Name)
-	if err != nil {
-		return err
-	}
-
-	if version >= 1 {
-		pe.putBool(tm.IsInternal)
-	}
-
-	err = pe.putArrayLength(len(tm.Partitions))
-	if err != nil {
-		return err
-	}
-
-	for _, pm := range tm.Partitions {
-		err = pm.encode(pe, version)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-type MetadataResponse struct {
-	Version        int16
-	ThrottleTimeMs int32
-	Brokers        []*Broker
-	ClusterID      *string
-	ControllerID   int32
-	Topics         []*TopicMetadata
-}
-
-func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if version >= 3 {
-		r.ThrottleTimeMs, err = pd.getInt32()
-		if err != nil {
-			return err
-		}
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Brokers = make([]*Broker, n)
-	for i := 0; i < n; i++ {
-		r.Brokers[i] = new(Broker)
-		err = r.Brokers[i].decode(pd, version)
-		if err != nil {
-			return err
-		}
-	}
-
-	if version >= 2 {
-		r.ClusterID, err = pd.getNullableString()
-		if err != nil {
-			return err
-		}
-	}
-
-	if version >= 1 {
-		r.ControllerID, err = pd.getInt32()
-		if err != nil {
-			return err
-		}
-	} else {
-		r.ControllerID = -1
-	}
-
-	n, err = pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Topics = make([]*TopicMetadata, n)
-	for i := 0; i < n; i++ {
-		r.Topics[i] = new(TopicMetadata)
-		err = r.Topics[i].decode(pd, version)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *MetadataResponse) encode(pe packetEncoder) error {
-	if r.Version >= 3 {
-		pe.putInt32(r.ThrottleTimeMs)
-	}
-
-	err := pe.putArrayLength(len(r.Brokers))
-	if err != nil {
-		return err
-	}
-	for _, broker := range r.Brokers {
-		err = broker.encode(pe, r.Version)
-		if err != nil {
-			return err
-		}
-	}
-
-	if r.Version >= 2 {
-		err := pe.putNullableString(r.ClusterID)
-		if err != nil {
-			return err
-		}
-	}
-
-	if r.Version >= 1 {
-		pe.putInt32(r.ControllerID)
-	}
-
-	err = pe.putArrayLength(len(r.Topics))
-	if err != nil {
-		return err
-	}
-	for _, tm := range r.Topics {
-		err = tm.encode(pe, r.Version)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *MetadataResponse) key() int16 {
-	return 3
-}
-
-func (r *MetadataResponse) version() int16 {
-	return r.Version
-}
-
-func (r *MetadataResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *MetadataResponse) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V0_10_0_0
-	case 2:
-		return V0_10_1_0
-	case 3, 4:
-		return V0_11_0_0
-	case 5:
-		return V1_0_0_0
-	default:
-		return MinVersion
-	}
-}
-
-// testing API
-
-func (r *MetadataResponse) AddBroker(addr string, id int32) {
-	r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
-}
-
-func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
-	var tmatch *TopicMetadata
-
-	for _, tm := range r.Topics {
-		if tm.Name == topic {
-			tmatch = tm
-			goto foundTopic
-		}
-	}
-
-	tmatch = new(TopicMetadata)
-	tmatch.Name = topic
-	r.Topics = append(r.Topics, tmatch)
-
-foundTopic:
-
-	tmatch.Err = err
-	return tmatch
-}
-
-func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, offline []int32, err KError) {
-	tmatch := r.AddTopic(topic, ErrNoError)
-	var pmatch *PartitionMetadata
-
-	for _, pm := range tmatch.Partitions {
-		if pm.ID == partition {
-			pmatch = pm
-			goto foundPartition
-		}
-	}
-
-	pmatch = new(PartitionMetadata)
-	pmatch.ID = partition
-	tmatch.Partitions = append(tmatch.Partitions, pmatch)
-
-foundPartition:
-
-	pmatch.Leader = brokerID
-	pmatch.Replicas = replicas
-	pmatch.Isr = isr
-	pmatch.OfflineReplicas = offline
-	pmatch.Err = err
-}
diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go
deleted file mode 100644
index 90e5a87..0000000
--- a/vendor/github.com/Shopify/sarama/metrics.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"strings"
-
-	"github.com/rcrowley/go-metrics"
-)
-
-// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
-// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
-// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
-// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
-const (
-	metricsReservoirSize = 1028
-	metricsAlphaFactor   = 0.015
-)
-
-func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
-	return r.GetOrRegister(name, func() metrics.Histogram {
-		return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
-	}).(metrics.Histogram)
-}
-
-func getMetricNameForBroker(name string, broker *Broker) string {
-	// Use broker id like the Java client as it does not contain '.' or ':' characters that
-	// can be interpreted as special character by monitoring tool (e.g. Graphite)
-	return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
-}
-
-func getMetricNameForTopic(name string, topic string) string {
-	// Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
-	// cf. KAFKA-1902 and KAFKA-2337
-	return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1))
-}
-
-func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
-	return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
-}
-
-func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
-	return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
-}
diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go
deleted file mode 100644
index c2654d1..0000000
--- a/vendor/github.com/Shopify/sarama/mockbroker.go
+++ /dev/null
@@ -1,422 +0,0 @@
-package sarama
-
-import (
-	"bytes"
-	"encoding/binary"
-	"fmt"
-	"io"
-	"net"
-	"reflect"
-	"strconv"
-	"sync"
-	"time"
-
-	"github.com/davecgh/go-spew/spew"
-)
-
-const (
-	expectationTimeout = 500 * time.Millisecond
-)
-
-type GSSApiHandlerFunc func([]byte) []byte
-
-type requestHandlerFunc func(req *request) (res encoderWithHeader)
-
-// RequestNotifierFunc is invoked when a mock broker processes a request successfully
-// and will provides the number of bytes read and written.
-type RequestNotifierFunc func(bytesRead, bytesWritten int)
-
-// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
-// to facilitate testing of higher level or specialized consumers and producers
-// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
-// but rather provides a facility to do that. It takes care of the TCP
-// transport, request unmarshalling, response marshalling, and makes it the test
-// writer responsibility to program correct according to the Kafka API protocol
-// MockBroker behaviour.
-//
-// MockBroker is implemented as a TCP server listening on a kernel-selected
-// localhost port that can accept many connections. It reads Kafka requests
-// from that connection and returns responses programmed by the SetHandlerByMap
-// function. If a MockBroker receives a request that it has no programmed
-// response for, then it returns nothing and the request times out.
-//
-// A set of MockRequest builders to define mappings used by MockBroker is
-// provided by Sarama. But users can develop MockRequests of their own and use
-// them along with or instead of the standard ones.
-//
-// When running tests with MockBroker it is strongly recommended to specify
-// a timeout to `go test` so that if the broker hangs waiting for a response,
-// the test panics.
-//
-// It is not necessary to prefix message length or correlation ID to your
-// response bytes, the server does that automatically as a convenience.
-type MockBroker struct {
-	brokerID      int32
-	port          int32
-	closing       chan none
-	stopper       chan none
-	expectations  chan encoderWithHeader
-	listener      net.Listener
-	t             TestReporter
-	latency       time.Duration
-	handler       requestHandlerFunc
-	notifier      RequestNotifierFunc
-	history       []RequestResponse
-	lock          sync.Mutex
-	gssApiHandler GSSApiHandlerFunc
-}
-
-// RequestResponse represents a Request/Response pair processed by MockBroker.
-type RequestResponse struct {
-	Request  protocolBody
-	Response encoder
-}
-
-// SetLatency makes broker pause for the specified period every time before
-// replying.
-func (b *MockBroker) SetLatency(latency time.Duration) {
-	b.latency = latency
-}
-
-// SetHandlerByMap defines mapping of Request types to MockResponses. When a
-// request is received by the broker, it looks up the request type in the map
-// and uses the found MockResponse instance to generate an appropriate reply.
-// If the request type is not found in the map then nothing is sent.
-func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
-	b.setHandler(func(req *request) (res encoderWithHeader) {
-		reqTypeName := reflect.TypeOf(req.body).Elem().Name()
-		mockResponse := handlerMap[reqTypeName]
-		if mockResponse == nil {
-			return nil
-		}
-		return mockResponse.For(req.body)
-	})
-}
-
-// SetNotifier set a function that will get invoked whenever a request has been
-// processed successfully and will provide the number of bytes read and written
-func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
-	b.lock.Lock()
-	b.notifier = notifier
-	b.lock.Unlock()
-}
-
-// BrokerID returns broker ID assigned to the broker.
-func (b *MockBroker) BrokerID() int32 {
-	return b.brokerID
-}
-
-// History returns a slice of RequestResponse pairs in the order they were
-// processed by the broker. Note that in case of multiple connections to the
-// broker the order expected by a test can be different from the order recorded
-// in the history, unless some synchronization is implemented in the test.
-func (b *MockBroker) History() []RequestResponse {
-	b.lock.Lock()
-	history := make([]RequestResponse, len(b.history))
-	copy(history, b.history)
-	b.lock.Unlock()
-	return history
-}
-
-// Port returns the TCP port number the broker is listening for requests on.
-func (b *MockBroker) Port() int32 {
-	return b.port
-}
-
-// Addr returns the broker connection string in the form "<address>:<port>".
-func (b *MockBroker) Addr() string {
-	return b.listener.Addr().String()
-}
-
-// Close terminates the broker blocking until it stops internal goroutines and
-// releases all resources.
-func (b *MockBroker) Close() {
-	close(b.expectations)
-	if len(b.expectations) > 0 {
-		buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
-		for e := range b.expectations {
-			_, _ = buf.WriteString(spew.Sdump(e))
-		}
-		b.t.Error(buf.String())
-	}
-	close(b.closing)
-	<-b.stopper
-}
-
-// setHandler sets the specified function as the request handler. Whenever
-// a mock broker reads a request from the wire it passes the request to the
-// function and sends back whatever the handler function returns.
-func (b *MockBroker) setHandler(handler requestHandlerFunc) {
-	b.lock.Lock()
-	b.handler = handler
-	b.lock.Unlock()
-}
-
-func (b *MockBroker) serverLoop() {
-	defer close(b.stopper)
-	var err error
-	var conn net.Conn
-
-	go func() {
-		<-b.closing
-		err := b.listener.Close()
-		if err != nil {
-			b.t.Error(err)
-		}
-	}()
-
-	wg := &sync.WaitGroup{}
-	i := 0
-	for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
-		wg.Add(1)
-		go b.handleRequests(conn, i, wg)
-		i++
-	}
-	wg.Wait()
-	Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
-}
-
-func (b *MockBroker) SetGSSAPIHandler(handler GSSApiHandlerFunc) {
-	b.gssApiHandler = handler
-}
-
-func (b *MockBroker) readToBytes(r io.Reader) ([]byte, error) {
-	var (
-		bytesRead   int
-		lengthBytes = make([]byte, 4)
-	)
-
-	if _, err := io.ReadFull(r, lengthBytes); err != nil {
-		return nil, err
-	}
-
-	bytesRead += len(lengthBytes)
-	length := int32(binary.BigEndian.Uint32(lengthBytes))
-
-	if length <= 4 || length > MaxRequestSize {
-		return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
-	}
-
-	encodedReq := make([]byte, length)
-	if _, err := io.ReadFull(r, encodedReq); err != nil {
-		return nil, err
-	}
-
-	bytesRead += len(encodedReq)
-
-	fullBytes := append(lengthBytes, encodedReq...)
-
-	return fullBytes, nil
-}
-
-func (b *MockBroker) isGSSAPI(buffer []byte) bool {
-	return buffer[4] == 0x60 || bytes.Equal(buffer[4:6], []byte{0x05, 0x04})
-}
-
-func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.WaitGroup) {
-	defer wg.Done()
-	defer func() {
-		_ = conn.Close()
-	}()
-	s := spew.NewDefaultConfig()
-	s.MaxDepth = 1
-	Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
-	var err error
-
-	abort := make(chan none)
-	defer close(abort)
-	go func() {
-		select {
-		case <-b.closing:
-			_ = conn.Close()
-		case <-abort:
-		}
-	}()
-
-	var bytesWritten int
-	var bytesRead int
-	for {
-		buffer, err := b.readToBytes(conn)
-		if err != nil {
-			Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer))
-			b.serverError(err)
-			break
-		}
-
-		bytesWritten = 0
-		if !b.isGSSAPI(buffer) {
-			req, br, err := decodeRequest(bytes.NewReader(buffer))
-			bytesRead = br
-			if err != nil {
-				Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
-				b.serverError(err)
-				break
-			}
-
-			if b.latency > 0 {
-				time.Sleep(b.latency)
-			}
-
-			b.lock.Lock()
-			res := b.handler(req)
-			b.history = append(b.history, RequestResponse{req.body, res})
-			b.lock.Unlock()
-
-			if res == nil {
-				Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
-				continue
-			}
-			Logger.Printf(
-				"*** mockbroker/%d/%d: replied to %T with %T\n-> %s\n-> %s",
-				b.brokerID, idx, req.body, res,
-				s.Sprintf("%#v", req.body),
-				s.Sprintf("%#v", res),
-			)
-
-			encodedRes, err := encode(res, nil)
-			if err != nil {
-				b.serverError(err)
-				break
-			}
-			if len(encodedRes) == 0 {
-				b.lock.Lock()
-				if b.notifier != nil {
-					b.notifier(bytesRead, 0)
-				}
-				b.lock.Unlock()
-				continue
-			}
-
-			resHeader := b.encodeHeader(res.headerVersion(), req.correlationID, uint32(len(encodedRes)))
-			if _, err = conn.Write(resHeader); err != nil {
-				b.serverError(err)
-				break
-			}
-			if _, err = conn.Write(encodedRes); err != nil {
-				b.serverError(err)
-				break
-			}
-			bytesWritten = len(resHeader) + len(encodedRes)
-		} else {
-			// GSSAPI is not part of kafka protocol, but is supported for authentication proposes.
-			// Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism
-			b.lock.Lock()
-			res := b.gssApiHandler(buffer)
-			b.lock.Unlock()
-			if res == nil {
-				Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(buffer))
-				continue
-			}
-			if _, err = conn.Write(res); err != nil {
-				b.serverError(err)
-				break
-			}
-			bytesWritten = len(res)
-		}
-
-		b.lock.Lock()
-		if b.notifier != nil {
-			b.notifier(bytesRead, bytesWritten)
-		}
-		b.lock.Unlock()
-	}
-	Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
-}
-
-func (b *MockBroker) encodeHeader(headerVersion int16, correlationId int32, payloadLength uint32) []byte {
-	headerLength := uint32(8)
-
-	if headerVersion >= 1 {
-		headerLength = 9
-	}
-
-	resHeader := make([]byte, headerLength)
-	binary.BigEndian.PutUint32(resHeader, payloadLength+headerLength-4)
-	binary.BigEndian.PutUint32(resHeader[4:], uint32(correlationId))
-
-	if headerVersion >= 1 {
-		binary.PutUvarint(resHeader[8:], 0)
-	}
-
-	return resHeader
-}
-
-func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) {
-	select {
-	case res, ok := <-b.expectations:
-		if !ok {
-			return nil
-		}
-		return res
-	case <-time.After(expectationTimeout):
-		return nil
-	}
-}
-
-func (b *MockBroker) serverError(err error) {
-	isConnectionClosedError := false
-	if _, ok := err.(*net.OpError); ok {
-		isConnectionClosedError = true
-	} else if err == io.EOF {
-		isConnectionClosedError = true
-	} else if err.Error() == "use of closed network connection" {
-		isConnectionClosedError = true
-	}
-
-	if isConnectionClosedError {
-		return
-	}
-
-	b.t.Errorf(err.Error())
-}
-
-// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
-// test framework and a channel of responses to use.  If an error occurs it is
-// simply logged to the TestReporter and the broker exits.
-func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
-	return NewMockBrokerAddr(t, brokerID, "localhost:0")
-}
-
-// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
-// it rather than just some ephemeral port.
-func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
-	listener, err := net.Listen("tcp", addr)
-	if err != nil {
-		t.Fatal(err)
-	}
-	return NewMockBrokerListener(t, brokerID, listener)
-}
-
-// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified.
-func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker {
-	var err error
-
-	broker := &MockBroker{
-		closing:      make(chan none),
-		stopper:      make(chan none),
-		t:            t,
-		brokerID:     brokerID,
-		expectations: make(chan encoderWithHeader, 512),
-		listener:     listener,
-	}
-	broker.handler = broker.defaultRequestHandler
-
-	Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
-	_, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
-	if err != nil {
-		t.Fatal(err)
-	}
-	tmp, err := strconv.ParseInt(portStr, 10, 32)
-	if err != nil {
-		t.Fatal(err)
-	}
-	broker.port = int32(tmp)
-
-	go broker.serverLoop()
-
-	return broker
-}
-
-func (b *MockBroker) Returns(e encoderWithHeader) {
-	b.expectations <- e
-}
diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go
deleted file mode 100644
index 6654ed0..0000000
--- a/vendor/github.com/Shopify/sarama/mockresponses.go
+++ /dev/null
@@ -1,1273 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"strings"
-)
-
-// TestReporter has methods matching go's testing.T to avoid importing
-// `testing` in the main part of the library.
-type TestReporter interface {
-	Error(...interface{})
-	Errorf(string, ...interface{})
-	Fatal(...interface{})
-	Fatalf(string, ...interface{})
-}
-
-// MockResponse is a response builder interface it defines one method that
-// allows generating a response based on a request body. MockResponses are used
-// to program behavior of MockBroker in tests.
-type MockResponse interface {
-	For(reqBody versionedDecoder) (res encoderWithHeader)
-}
-
-// MockWrapper is a mock response builder that returns a particular concrete
-// response regardless of the actual request passed to the `For` method.
-type MockWrapper struct {
-	res encoderWithHeader
-}
-
-func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoderWithHeader) {
-	return mw.res
-}
-
-func NewMockWrapper(res encoderWithHeader) *MockWrapper {
-	return &MockWrapper{res: res}
-}
-
-// MockSequence is a mock response builder that is created from a sequence of
-// concrete responses. Every time when a `MockBroker` calls its `For` method
-// the next response from the sequence is returned. When the end of the
-// sequence is reached the last element from the sequence is returned.
-type MockSequence struct {
-	responses []MockResponse
-}
-
-func NewMockSequence(responses ...interface{}) *MockSequence {
-	ms := &MockSequence{}
-	ms.responses = make([]MockResponse, len(responses))
-	for i, res := range responses {
-		switch res := res.(type) {
-		case MockResponse:
-			ms.responses[i] = res
-		case encoderWithHeader:
-			ms.responses[i] = NewMockWrapper(res)
-		default:
-			panic(fmt.Sprintf("Unexpected response type: %T", res))
-		}
-	}
-	return ms
-}
-
-func (mc *MockSequence) For(reqBody versionedDecoder) (res encoderWithHeader) {
-	res = mc.responses[0].For(reqBody)
-	if len(mc.responses) > 1 {
-		mc.responses = mc.responses[1:]
-	}
-	return res
-}
-
-type MockListGroupsResponse struct {
-	groups map[string]string
-	t      TestReporter
-}
-
-func NewMockListGroupsResponse(t TestReporter) *MockListGroupsResponse {
-	return &MockListGroupsResponse{
-		groups: make(map[string]string),
-		t:      t,
-	}
-}
-
-func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	request := reqBody.(*ListGroupsRequest)
-	_ = request
-	response := &ListGroupsResponse{
-		Groups: m.groups,
-	}
-	return response
-}
-
-func (m *MockListGroupsResponse) AddGroup(groupID, protocolType string) *MockListGroupsResponse {
-	m.groups[groupID] = protocolType
-	return m
-}
-
-type MockDescribeGroupsResponse struct {
-	groups map[string]*GroupDescription
-	t      TestReporter
-}
-
-func NewMockDescribeGroupsResponse(t TestReporter) *MockDescribeGroupsResponse {
-	return &MockDescribeGroupsResponse{
-		t:      t,
-		groups: make(map[string]*GroupDescription),
-	}
-}
-
-func (m *MockDescribeGroupsResponse) AddGroupDescription(groupID string, description *GroupDescription) *MockDescribeGroupsResponse {
-	m.groups[groupID] = description
-	return m
-}
-
-func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	request := reqBody.(*DescribeGroupsRequest)
-
-	response := &DescribeGroupsResponse{}
-	for _, requestedGroup := range request.Groups {
-		if group, ok := m.groups[requestedGroup]; ok {
-			response.Groups = append(response.Groups, group)
-		} else {
-			// Mimic real kafka - if a group doesn't exist, return
-			// an entry with state "Dead"
-			response.Groups = append(response.Groups, &GroupDescription{
-				GroupId: requestedGroup,
-				State:   "Dead",
-			})
-		}
-	}
-
-	return response
-}
-
-// MockMetadataResponse is a `MetadataResponse` builder.
-type MockMetadataResponse struct {
-	controllerID int32
-	leaders      map[string]map[int32]int32
-	brokers      map[string]int32
-	t            TestReporter
-}
-
-func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
-	return &MockMetadataResponse{
-		leaders: make(map[string]map[int32]int32),
-		brokers: make(map[string]int32),
-		t:       t,
-	}
-}
-
-func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse {
-	partitions := mmr.leaders[topic]
-	if partitions == nil {
-		partitions = make(map[int32]int32)
-		mmr.leaders[topic] = partitions
-	}
-	partitions[partition] = brokerID
-	return mmr
-}
-
-func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse {
-	mmr.brokers[addr] = brokerID
-	return mmr
-}
-
-func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResponse {
-	mmr.controllerID = brokerID
-	return mmr
-}
-
-func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	metadataRequest := reqBody.(*MetadataRequest)
-	metadataResponse := &MetadataResponse{
-		Version:      metadataRequest.version(),
-		ControllerID: mmr.controllerID,
-	}
-	for addr, brokerID := range mmr.brokers {
-		metadataResponse.AddBroker(addr, brokerID)
-	}
-
-	// Generate set of replicas
-	var replicas []int32
-	var offlineReplicas []int32
-	for _, brokerID := range mmr.brokers {
-		replicas = append(replicas, brokerID)
-	}
-
-	if len(metadataRequest.Topics) == 0 {
-		for topic, partitions := range mmr.leaders {
-			for partition, brokerID := range partitions {
-				metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
-			}
-		}
-		return metadataResponse
-	}
-	for _, topic := range metadataRequest.Topics {
-		for partition, brokerID := range mmr.leaders[topic] {
-			metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
-		}
-	}
-	return metadataResponse
-}
-
-// MockOffsetResponse is an `OffsetResponse` builder.
-type MockOffsetResponse struct {
-	offsets map[string]map[int32]map[int64]int64
-	t       TestReporter
-	version int16
-}
-
-func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
-	return &MockOffsetResponse{
-		offsets: make(map[string]map[int32]map[int64]int64),
-		t:       t,
-	}
-}
-
-func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse {
-	mor.version = version
-	return mor
-}
-
-func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
-	partitions := mor.offsets[topic]
-	if partitions == nil {
-		partitions = make(map[int32]map[int64]int64)
-		mor.offsets[topic] = partitions
-	}
-	times := partitions[partition]
-	if times == nil {
-		times = make(map[int64]int64)
-		partitions[partition] = times
-	}
-	times[time] = offset
-	return mor
-}
-
-func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	offsetRequest := reqBody.(*OffsetRequest)
-	offsetResponse := &OffsetResponse{Version: mor.version}
-	for topic, partitions := range offsetRequest.blocks {
-		for partition, block := range partitions {
-			offset := mor.getOffset(topic, partition, block.time)
-			offsetResponse.AddTopicPartition(topic, partition, offset)
-		}
-	}
-	return offsetResponse
-}
-
-func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
-	partitions := mor.offsets[topic]
-	if partitions == nil {
-		mor.t.Errorf("missing topic: %s", topic)
-	}
-	times := partitions[partition]
-	if times == nil {
-		mor.t.Errorf("missing partition: %d", partition)
-	}
-	offset, ok := times[time]
-	if !ok {
-		mor.t.Errorf("missing time: %d", time)
-	}
-	return offset
-}
-
-// MockFetchResponse is a `FetchResponse` builder.
-type MockFetchResponse struct {
-	messages       map[string]map[int32]map[int64]Encoder
-	highWaterMarks map[string]map[int32]int64
-	t              TestReporter
-	batchSize      int
-	version        int16
-}
-
-func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
-	return &MockFetchResponse{
-		messages:       make(map[string]map[int32]map[int64]Encoder),
-		highWaterMarks: make(map[string]map[int32]int64),
-		t:              t,
-		batchSize:      batchSize,
-	}
-}
-
-func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse {
-	mfr.version = version
-	return mfr
-}
-
-func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
-	partitions := mfr.messages[topic]
-	if partitions == nil {
-		partitions = make(map[int32]map[int64]Encoder)
-		mfr.messages[topic] = partitions
-	}
-	messages := partitions[partition]
-	if messages == nil {
-		messages = make(map[int64]Encoder)
-		partitions[partition] = messages
-	}
-	messages[offset] = msg
-	return mfr
-}
-
-func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse {
-	partitions := mfr.highWaterMarks[topic]
-	if partitions == nil {
-		partitions = make(map[int32]int64)
-		mfr.highWaterMarks[topic] = partitions
-	}
-	partitions[partition] = offset
-	return mfr
-}
-
-func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	fetchRequest := reqBody.(*FetchRequest)
-	res := &FetchResponse{
-		Version: mfr.version,
-	}
-	for topic, partitions := range fetchRequest.blocks {
-		for partition, block := range partitions {
-			initialOffset := block.fetchOffset
-			offset := initialOffset
-			maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
-			for i := 0; i < mfr.batchSize && offset < maxOffset; {
-				msg := mfr.getMessage(topic, partition, offset)
-				if msg != nil {
-					res.AddMessage(topic, partition, nil, msg, offset)
-					i++
-				}
-				offset++
-			}
-			fb := res.GetBlock(topic, partition)
-			if fb == nil {
-				res.AddError(topic, partition, ErrNoError)
-				fb = res.GetBlock(topic, partition)
-			}
-			fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
-		}
-	}
-	return res
-}
-
-func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder {
-	partitions := mfr.messages[topic]
-	if partitions == nil {
-		return nil
-	}
-	messages := partitions[partition]
-	if messages == nil {
-		return nil
-	}
-	return messages[offset]
-}
-
-func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int {
-	partitions := mfr.messages[topic]
-	if partitions == nil {
-		return 0
-	}
-	messages := partitions[partition]
-	if messages == nil {
-		return 0
-	}
-	return len(messages)
-}
-
-func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
-	partitions := mfr.highWaterMarks[topic]
-	if partitions == nil {
-		return 0
-	}
-	return partitions[partition]
-}
-
-// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
-type MockConsumerMetadataResponse struct {
-	coordinators map[string]interface{}
-	t            TestReporter
-}
-
-func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse {
-	return &MockConsumerMetadataResponse{
-		coordinators: make(map[string]interface{}),
-		t:            t,
-	}
-}
-
-func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse {
-	mr.coordinators[group] = broker
-	return mr
-}
-
-func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse {
-	mr.coordinators[group] = kerror
-	return mr
-}
-
-func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*ConsumerMetadataRequest)
-	group := req.ConsumerGroup
-	res := &ConsumerMetadataResponse{}
-	v := mr.coordinators[group]
-	switch v := v.(type) {
-	case *MockBroker:
-		res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
-	case KError:
-		res.Err = v
-	}
-	return res
-}
-
-// MockFindCoordinatorResponse is a `FindCoordinatorResponse` builder.
-type MockFindCoordinatorResponse struct {
-	groupCoordinators map[string]interface{}
-	transCoordinators map[string]interface{}
-	t                 TestReporter
-}
-
-func NewMockFindCoordinatorResponse(t TestReporter) *MockFindCoordinatorResponse {
-	return &MockFindCoordinatorResponse{
-		groupCoordinators: make(map[string]interface{}),
-		transCoordinators: make(map[string]interface{}),
-		t:                 t,
-	}
-}
-
-func (mr *MockFindCoordinatorResponse) SetCoordinator(coordinatorType CoordinatorType, group string, broker *MockBroker) *MockFindCoordinatorResponse {
-	switch coordinatorType {
-	case CoordinatorGroup:
-		mr.groupCoordinators[group] = broker
-	case CoordinatorTransaction:
-		mr.transCoordinators[group] = broker
-	}
-	return mr
-}
-
-func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, group string, kerror KError) *MockFindCoordinatorResponse {
-	switch coordinatorType {
-	case CoordinatorGroup:
-		mr.groupCoordinators[group] = kerror
-	case CoordinatorTransaction:
-		mr.transCoordinators[group] = kerror
-	}
-	return mr
-}
-
-func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*FindCoordinatorRequest)
-	res := &FindCoordinatorResponse{}
-	var v interface{}
-	switch req.CoordinatorType {
-	case CoordinatorGroup:
-		v = mr.groupCoordinators[req.CoordinatorKey]
-	case CoordinatorTransaction:
-		v = mr.transCoordinators[req.CoordinatorKey]
-	}
-	switch v := v.(type) {
-	case *MockBroker:
-		res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
-	case KError:
-		res.Err = v
-	}
-	return res
-}
-
-// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
-type MockOffsetCommitResponse struct {
-	errors map[string]map[string]map[int32]KError
-	t      TestReporter
-}
-
-func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse {
-	return &MockOffsetCommitResponse{t: t}
-}
-
-func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse {
-	if mr.errors == nil {
-		mr.errors = make(map[string]map[string]map[int32]KError)
-	}
-	topics := mr.errors[group]
-	if topics == nil {
-		topics = make(map[string]map[int32]KError)
-		mr.errors[group] = topics
-	}
-	partitions := topics[topic]
-	if partitions == nil {
-		partitions = make(map[int32]KError)
-		topics[topic] = partitions
-	}
-	partitions[partition] = kerror
-	return mr
-}
-
-func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*OffsetCommitRequest)
-	group := req.ConsumerGroup
-	res := &OffsetCommitResponse{}
-	for topic, partitions := range req.blocks {
-		for partition := range partitions {
-			res.AddError(topic, partition, mr.getError(group, topic, partition))
-		}
-	}
-	return res
-}
-
-func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
-	topics := mr.errors[group]
-	if topics == nil {
-		return ErrNoError
-	}
-	partitions := topics[topic]
-	if partitions == nil {
-		return ErrNoError
-	}
-	kerror, ok := partitions[partition]
-	if !ok {
-		return ErrNoError
-	}
-	return kerror
-}
-
-// MockProduceResponse is a `ProduceResponse` builder.
-type MockProduceResponse struct {
-	version int16
-	errors  map[string]map[int32]KError
-	t       TestReporter
-}
-
-func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
-	return &MockProduceResponse{t: t}
-}
-
-func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse {
-	mr.version = version
-	return mr
-}
-
-func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
-	if mr.errors == nil {
-		mr.errors = make(map[string]map[int32]KError)
-	}
-	partitions := mr.errors[topic]
-	if partitions == nil {
-		partitions = make(map[int32]KError)
-		mr.errors[topic] = partitions
-	}
-	partitions[partition] = kerror
-	return mr
-}
-
-func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*ProduceRequest)
-	res := &ProduceResponse{
-		Version: mr.version,
-	}
-	for topic, partitions := range req.records {
-		for partition := range partitions {
-			res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
-		}
-	}
-	return res
-}
-
-func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
-	partitions := mr.errors[topic]
-	if partitions == nil {
-		return ErrNoError
-	}
-	kerror, ok := partitions[partition]
-	if !ok {
-		return ErrNoError
-	}
-	return kerror
-}
-
-// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
-type MockOffsetFetchResponse struct {
-	offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
-	error   KError
-	t       TestReporter
-}
-
-func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse {
-	return &MockOffsetFetchResponse{t: t}
-}
-
-func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse {
-	if mr.offsets == nil {
-		mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
-	}
-	topics := mr.offsets[group]
-	if topics == nil {
-		topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
-		mr.offsets[group] = topics
-	}
-	partitions := topics[topic]
-	if partitions == nil {
-		partitions = make(map[int32]*OffsetFetchResponseBlock)
-		topics[topic] = partitions
-	}
-	partitions[partition] = &OffsetFetchResponseBlock{offset, 0, metadata, kerror}
-	return mr
-}
-
-func (mr *MockOffsetFetchResponse) SetError(kerror KError) *MockOffsetFetchResponse {
-	mr.error = kerror
-	return mr
-}
-
-func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*OffsetFetchRequest)
-	group := req.ConsumerGroup
-	res := &OffsetFetchResponse{Version: req.Version}
-
-	for topic, partitions := range mr.offsets[group] {
-		for partition, block := range partitions {
-			res.AddBlock(topic, partition, block)
-		}
-	}
-
-	if res.Version >= 2 {
-		res.Err = mr.error
-	}
-	return res
-}
-
-type MockCreateTopicsResponse struct {
-	t TestReporter
-}
-
-func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse {
-	return &MockCreateTopicsResponse{t: t}
-}
-
-func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*CreateTopicsRequest)
-	res := &CreateTopicsResponse{
-		Version: req.Version,
-	}
-	res.TopicErrors = make(map[string]*TopicError)
-
-	for topic := range req.TopicDetails {
-		if res.Version >= 1 && strings.HasPrefix(topic, "_") {
-			msg := "insufficient permissions to create topic with reserved prefix"
-			res.TopicErrors[topic] = &TopicError{
-				Err:    ErrTopicAuthorizationFailed,
-				ErrMsg: &msg,
-			}
-			continue
-		}
-		res.TopicErrors[topic] = &TopicError{Err: ErrNoError}
-	}
-	return res
-}
-
-type MockDeleteTopicsResponse struct {
-	t TestReporter
-}
-
-func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse {
-	return &MockDeleteTopicsResponse{t: t}
-}
-
-func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*DeleteTopicsRequest)
-	res := &DeleteTopicsResponse{}
-	res.TopicErrorCodes = make(map[string]KError)
-
-	for _, topic := range req.Topics {
-		res.TopicErrorCodes[topic] = ErrNoError
-	}
-	res.Version = req.Version
-	return res
-}
-
-type MockCreatePartitionsResponse struct {
-	t TestReporter
-}
-
-func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsResponse {
-	return &MockCreatePartitionsResponse{t: t}
-}
-
-func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*CreatePartitionsRequest)
-	res := &CreatePartitionsResponse{}
-	res.TopicPartitionErrors = make(map[string]*TopicPartitionError)
-
-	for topic := range req.TopicPartitions {
-		if strings.HasPrefix(topic, "_") {
-			msg := "insufficient permissions to create partition on topic with reserved prefix"
-			res.TopicPartitionErrors[topic] = &TopicPartitionError{
-				Err:    ErrTopicAuthorizationFailed,
-				ErrMsg: &msg,
-			}
-			continue
-		}
-		res.TopicPartitionErrors[topic] = &TopicPartitionError{Err: ErrNoError}
-	}
-	return res
-}
-
-type MockAlterPartitionReassignmentsResponse struct {
-	t TestReporter
-}
-
-func NewMockAlterPartitionReassignmentsResponse(t TestReporter) *MockAlterPartitionReassignmentsResponse {
-	return &MockAlterPartitionReassignmentsResponse{t: t}
-}
-
-func (mr *MockAlterPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*AlterPartitionReassignmentsRequest)
-	_ = req
-	res := &AlterPartitionReassignmentsResponse{}
-	return res
-}
-
-type MockListPartitionReassignmentsResponse struct {
-	t TestReporter
-}
-
-func NewMockListPartitionReassignmentsResponse(t TestReporter) *MockListPartitionReassignmentsResponse {
-	return &MockListPartitionReassignmentsResponse{t: t}
-}
-
-func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*ListPartitionReassignmentsRequest)
-	_ = req
-	res := &ListPartitionReassignmentsResponse{}
-
-	for topic, partitions := range req.blocks {
-		for _, partition := range partitions {
-			res.AddBlock(topic, partition, []int32{0}, []int32{1}, []int32{2})
-		}
-	}
-
-	return res
-}
-
-type MockDeleteRecordsResponse struct {
-	t TestReporter
-}
-
-func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse {
-	return &MockDeleteRecordsResponse{t: t}
-}
-
-func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*DeleteRecordsRequest)
-	res := &DeleteRecordsResponse{}
-	res.Topics = make(map[string]*DeleteRecordsResponseTopic)
-
-	for topic, deleteRecordRequestTopic := range req.Topics {
-		partitions := make(map[int32]*DeleteRecordsResponsePartition)
-		for partition := range deleteRecordRequestTopic.PartitionOffsets {
-			partitions[partition] = &DeleteRecordsResponsePartition{Err: ErrNoError}
-		}
-		res.Topics[topic] = &DeleteRecordsResponseTopic{Partitions: partitions}
-	}
-	return res
-}
-
-type MockDescribeConfigsResponse struct {
-	t TestReporter
-}
-
-func NewMockDescribeConfigsResponse(t TestReporter) *MockDescribeConfigsResponse {
-	return &MockDescribeConfigsResponse{t: t}
-}
-
-func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*DescribeConfigsRequest)
-	res := &DescribeConfigsResponse{
-		Version: req.Version,
-	}
-
-	includeSynonyms := req.Version > 0
-	includeSource := req.Version > 0
-
-	for _, r := range req.Resources {
-		var configEntries []*ConfigEntry
-		switch r.Type {
-		case BrokerResource:
-			configEntries = append(configEntries,
-				&ConfigEntry{
-					Name:     "min.insync.replicas",
-					Value:    "2",
-					ReadOnly: false,
-					Default:  false,
-				},
-			)
-			res.Resources = append(res.Resources, &ResourceResponse{
-				Name:    r.Name,
-				Configs: configEntries,
-			})
-		case BrokerLoggerResource:
-			configEntries = append(configEntries,
-				&ConfigEntry{
-					Name:     "kafka.controller.KafkaController",
-					Value:    "DEBUG",
-					ReadOnly: false,
-					Default:  false,
-				},
-			)
-			res.Resources = append(res.Resources, &ResourceResponse{
-				Name:    r.Name,
-				Configs: configEntries,
-			})
-		case TopicResource:
-			maxMessageBytes := &ConfigEntry{
-				Name:      "max.message.bytes",
-				Value:     "1000000",
-				ReadOnly:  false,
-				Default:   !includeSource,
-				Sensitive: false,
-			}
-			if includeSource {
-				maxMessageBytes.Source = SourceDefault
-			}
-			if includeSynonyms {
-				maxMessageBytes.Synonyms = []*ConfigSynonym{
-					{
-						ConfigName:  "max.message.bytes",
-						ConfigValue: "500000",
-					},
-				}
-			}
-			retentionMs := &ConfigEntry{
-				Name:      "retention.ms",
-				Value:     "5000",
-				ReadOnly:  false,
-				Default:   false,
-				Sensitive: false,
-			}
-			if includeSynonyms {
-				retentionMs.Synonyms = []*ConfigSynonym{
-					{
-						ConfigName:  "log.retention.ms",
-						ConfigValue: "2500",
-					},
-				}
-			}
-			password := &ConfigEntry{
-				Name:      "password",
-				Value:     "12345",
-				ReadOnly:  false,
-				Default:   false,
-				Sensitive: true,
-			}
-			configEntries = append(
-				configEntries, maxMessageBytes, retentionMs, password)
-			res.Resources = append(res.Resources, &ResourceResponse{
-				Name:    r.Name,
-				Configs: configEntries,
-			})
-		}
-	}
-	return res
-}
-
-type MockDescribeConfigsResponseWithErrorCode struct {
-	t TestReporter
-}
-
-func NewMockDescribeConfigsResponseWithErrorCode(t TestReporter) *MockDescribeConfigsResponseWithErrorCode {
-	return &MockDescribeConfigsResponseWithErrorCode{t: t}
-}
-
-func (mr *MockDescribeConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*DescribeConfigsRequest)
-	res := &DescribeConfigsResponse{
-		Version: req.Version,
-	}
-
-	for _, r := range req.Resources {
-		res.Resources = append(res.Resources, &ResourceResponse{
-			Name:      r.Name,
-			Type:      r.Type,
-			ErrorCode: 83,
-			ErrorMsg:  "",
-		})
-	}
-	return res
-}
-
-type MockAlterConfigsResponse struct {
-	t TestReporter
-}
-
-func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse {
-	return &MockAlterConfigsResponse{t: t}
-}
-
-func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*AlterConfigsRequest)
-	res := &AlterConfigsResponse{}
-
-	for _, r := range req.Resources {
-		res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
-			Name:     r.Name,
-			Type:     r.Type,
-			ErrorMsg: "",
-		})
-	}
-	return res
-}
-
-type MockAlterConfigsResponseWithErrorCode struct {
-	t TestReporter
-}
-
-func NewMockAlterConfigsResponseWithErrorCode(t TestReporter) *MockAlterConfigsResponseWithErrorCode {
-	return &MockAlterConfigsResponseWithErrorCode{t: t}
-}
-
-func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*AlterConfigsRequest)
-	res := &AlterConfigsResponse{}
-
-	for _, r := range req.Resources {
-		res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
-			Name:      r.Name,
-			Type:      r.Type,
-			ErrorCode: 83,
-			ErrorMsg:  "",
-		})
-	}
-	return res
-}
-
-type MockCreateAclsResponse struct {
-	t TestReporter
-}
-
-func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse {
-	return &MockCreateAclsResponse{t: t}
-}
-
-func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*CreateAclsRequest)
-	res := &CreateAclsResponse{}
-
-	for range req.AclCreations {
-		res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrNoError})
-	}
-	return res
-}
-
-type MockListAclsResponse struct {
-	t TestReporter
-}
-
-func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse {
-	return &MockListAclsResponse{t: t}
-}
-
-func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*DescribeAclsRequest)
-	res := &DescribeAclsResponse{}
-	res.Err = ErrNoError
-	acl := &ResourceAcls{}
-	if req.ResourceName != nil {
-		acl.Resource.ResourceName = *req.ResourceName
-	}
-	acl.Resource.ResourcePatternType = req.ResourcePatternTypeFilter
-	acl.Resource.ResourceType = req.ResourceType
-
-	host := "*"
-	if req.Host != nil {
-		host = *req.Host
-	}
-
-	principal := "User:test"
-	if req.Principal != nil {
-		principal = *req.Principal
-	}
-
-	permissionType := req.PermissionType
-	if permissionType == AclPermissionAny {
-		permissionType = AclPermissionAllow
-	}
-
-	acl.Acls = append(acl.Acls, &Acl{Operation: req.Operation, PermissionType: permissionType, Host: host, Principal: principal})
-	res.ResourceAcls = append(res.ResourceAcls, acl)
-	res.Version = int16(req.Version)
-	return res
-}
-
-type MockSaslAuthenticateResponse struct {
-	t             TestReporter
-	kerror        KError
-	saslAuthBytes []byte
-}
-
-func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateResponse {
-	return &MockSaslAuthenticateResponse{t: t}
-}
-
-func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	res := &SaslAuthenticateResponse{}
-	res.Err = msar.kerror
-	res.SaslAuthBytes = msar.saslAuthBytes
-	return res
-}
-
-func (msar *MockSaslAuthenticateResponse) SetError(kerror KError) *MockSaslAuthenticateResponse {
-	msar.kerror = kerror
-	return msar
-}
-
-func (msar *MockSaslAuthenticateResponse) SetAuthBytes(saslAuthBytes []byte) *MockSaslAuthenticateResponse {
-	msar.saslAuthBytes = saslAuthBytes
-	return msar
-}
-
-type MockDeleteAclsResponse struct {
-	t TestReporter
-}
-
-type MockSaslHandshakeResponse struct {
-	enabledMechanisms []string
-	kerror            KError
-	t                 TestReporter
-}
-
-func NewMockSaslHandshakeResponse(t TestReporter) *MockSaslHandshakeResponse {
-	return &MockSaslHandshakeResponse{t: t}
-}
-
-func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	res := &SaslHandshakeResponse{}
-	res.Err = mshr.kerror
-	res.EnabledMechanisms = mshr.enabledMechanisms
-	return res
-}
-
-func (mshr *MockSaslHandshakeResponse) SetError(kerror KError) *MockSaslHandshakeResponse {
-	mshr.kerror = kerror
-	return mshr
-}
-
-func (mshr *MockSaslHandshakeResponse) SetEnabledMechanisms(enabledMechanisms []string) *MockSaslHandshakeResponse {
-	mshr.enabledMechanisms = enabledMechanisms
-	return mshr
-}
-
-func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse {
-	return &MockDeleteAclsResponse{t: t}
-}
-
-func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*DeleteAclsRequest)
-	res := &DeleteAclsResponse{}
-
-	for range req.Filters {
-		response := &FilterResponse{Err: ErrNoError}
-		response.MatchingAcls = append(response.MatchingAcls, &MatchingAcl{Err: ErrNoError})
-		res.FilterResponses = append(res.FilterResponses, response)
-	}
-	res.Version = int16(req.Version)
-	return res
-}
-
-type MockDeleteGroupsResponse struct {
-	deletedGroups []string
-}
-
-func NewMockDeleteGroupsRequest(t TestReporter) *MockDeleteGroupsResponse {
-	return &MockDeleteGroupsResponse{}
-}
-
-func (m *MockDeleteGroupsResponse) SetDeletedGroups(groups []string) *MockDeleteGroupsResponse {
-	m.deletedGroups = groups
-	return m
-}
-
-func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	resp := &DeleteGroupsResponse{
-		GroupErrorCodes: map[string]KError{},
-	}
-	for _, group := range m.deletedGroups {
-		resp.GroupErrorCodes[group] = ErrNoError
-	}
-	return resp
-}
-
-type MockJoinGroupResponse struct {
-	t TestReporter
-
-	ThrottleTime  int32
-	Err           KError
-	GenerationId  int32
-	GroupProtocol string
-	LeaderId      string
-	MemberId      string
-	Members       map[string][]byte
-}
-
-func NewMockJoinGroupResponse(t TestReporter) *MockJoinGroupResponse {
-	return &MockJoinGroupResponse{
-		t:       t,
-		Members: make(map[string][]byte),
-	}
-}
-
-func (m *MockJoinGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	req := reqBody.(*JoinGroupRequest)
-	resp := &JoinGroupResponse{
-		Version:       req.Version,
-		ThrottleTime:  m.ThrottleTime,
-		Err:           m.Err,
-		GenerationId:  m.GenerationId,
-		GroupProtocol: m.GroupProtocol,
-		LeaderId:      m.LeaderId,
-		MemberId:      m.MemberId,
-		Members:       m.Members,
-	}
-	return resp
-}
-
-func (m *MockJoinGroupResponse) SetThrottleTime(t int32) *MockJoinGroupResponse {
-	m.ThrottleTime = t
-	return m
-}
-
-func (m *MockJoinGroupResponse) SetError(kerr KError) *MockJoinGroupResponse {
-	m.Err = kerr
-	return m
-}
-
-func (m *MockJoinGroupResponse) SetGenerationId(id int32) *MockJoinGroupResponse {
-	m.GenerationId = id
-	return m
-}
-
-func (m *MockJoinGroupResponse) SetGroupProtocol(proto string) *MockJoinGroupResponse {
-	m.GroupProtocol = proto
-	return m
-}
-
-func (m *MockJoinGroupResponse) SetLeaderId(id string) *MockJoinGroupResponse {
-	m.LeaderId = id
-	return m
-}
-
-func (m *MockJoinGroupResponse) SetMemberId(id string) *MockJoinGroupResponse {
-	m.MemberId = id
-	return m
-}
-
-func (m *MockJoinGroupResponse) SetMember(id string, meta *ConsumerGroupMemberMetadata) *MockJoinGroupResponse {
-	bin, err := encode(meta, nil)
-	if err != nil {
-		panic(fmt.Sprintf("error encoding member metadata: %v", err))
-	}
-	m.Members[id] = bin
-	return m
-}
-
-type MockLeaveGroupResponse struct {
-	t TestReporter
-
-	Err KError
-}
-
-func NewMockLeaveGroupResponse(t TestReporter) *MockLeaveGroupResponse {
-	return &MockLeaveGroupResponse{t: t}
-}
-
-func (m *MockLeaveGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	resp := &LeaveGroupResponse{
-		Err: m.Err,
-	}
-	return resp
-}
-
-func (m *MockLeaveGroupResponse) SetError(kerr KError) *MockLeaveGroupResponse {
-	m.Err = kerr
-	return m
-}
-
-type MockSyncGroupResponse struct {
-	t TestReporter
-
-	Err              KError
-	MemberAssignment []byte
-}
-
-func NewMockSyncGroupResponse(t TestReporter) *MockSyncGroupResponse {
-	return &MockSyncGroupResponse{t: t}
-}
-
-func (m *MockSyncGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	resp := &SyncGroupResponse{
-		Err:              m.Err,
-		MemberAssignment: m.MemberAssignment,
-	}
-	return resp
-}
-
-func (m *MockSyncGroupResponse) SetError(kerr KError) *MockSyncGroupResponse {
-	m.Err = kerr
-	return m
-}
-
-func (m *MockSyncGroupResponse) SetMemberAssignment(assignment *ConsumerGroupMemberAssignment) *MockSyncGroupResponse {
-	bin, err := encode(assignment, nil)
-	if err != nil {
-		panic(fmt.Sprintf("error encoding member assignment: %v", err))
-	}
-	m.MemberAssignment = bin
-	return m
-}
-
-type MockHeartbeatResponse struct {
-	t TestReporter
-
-	Err KError
-}
-
-func NewMockHeartbeatResponse(t TestReporter) *MockHeartbeatResponse {
-	return &MockHeartbeatResponse{t: t}
-}
-
-func (m *MockHeartbeatResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	resp := &HeartbeatResponse{}
-	return resp
-}
-
-func (m *MockHeartbeatResponse) SetError(kerr KError) *MockHeartbeatResponse {
-	m.Err = kerr
-	return m
-}
-
-type MockDescribeLogDirsResponse struct {
-	t       TestReporter
-	logDirs []DescribeLogDirsResponseDirMetadata
-}
-
-func NewMockDescribeLogDirsResponse(t TestReporter) *MockDescribeLogDirsResponse {
-	return &MockDescribeLogDirsResponse{t: t}
-}
-
-func (m *MockDescribeLogDirsResponse) SetLogDirs(logDirPath string, topicPartitions map[string]int) *MockDescribeLogDirsResponse {
-	var topics []DescribeLogDirsResponseTopic
-	for topic := range topicPartitions {
-		var partitions []DescribeLogDirsResponsePartition
-		for i := 0; i < topicPartitions[topic]; i++ {
-			partitions = append(partitions, DescribeLogDirsResponsePartition{
-				PartitionID: int32(i),
-				IsTemporary: false,
-				OffsetLag:   int64(0),
-				Size:        int64(1234),
-			})
-		}
-		topics = append(topics, DescribeLogDirsResponseTopic{
-			Topic:      topic,
-			Partitions: partitions,
-		})
-	}
-	logDir := DescribeLogDirsResponseDirMetadata{
-		ErrorCode: ErrNoError,
-		Path:      logDirPath,
-		Topics:    topics,
-	}
-	m.logDirs = []DescribeLogDirsResponseDirMetadata{logDir}
-	return m
-}
-
-func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithHeader {
-	resp := &DescribeLogDirsResponse{
-		LogDirs: m.logDirs,
-	}
-	return resp
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go
deleted file mode 100644
index 9931cad..0000000
--- a/vendor/github.com/Shopify/sarama/offset_commit_request.go
+++ /dev/null
@@ -1,214 +0,0 @@
-package sarama
-
-import "errors"
-
-// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
-// tells the broker to set the timestamp to the time at which the request was received.
-// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
-const ReceiveTime int64 = -1
-
-// GroupGenerationUndefined is a special value for the group generation field of
-// Offset Commit Requests that should be used when a consumer group does not rely
-// on Kafka for partition management.
-const GroupGenerationUndefined = -1
-
-type offsetCommitRequestBlock struct {
-	offset    int64
-	timestamp int64
-	metadata  string
-}
-
-func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
-	pe.putInt64(b.offset)
-	if version == 1 {
-		pe.putInt64(b.timestamp)
-	} else if b.timestamp != 0 {
-		Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
-	}
-
-	return pe.putString(b.metadata)
-}
-
-func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
-	if b.offset, err = pd.getInt64(); err != nil {
-		return err
-	}
-	if version == 1 {
-		if b.timestamp, err = pd.getInt64(); err != nil {
-			return err
-		}
-	}
-	b.metadata, err = pd.getString()
-	return err
-}
-
-type OffsetCommitRequest struct {
-	ConsumerGroup           string
-	ConsumerGroupGeneration int32  // v1 or later
-	ConsumerID              string // v1 or later
-	RetentionTime           int64  // v2 or later
-
-	// Version can be:
-	// - 0 (kafka 0.8.1 and later)
-	// - 1 (kafka 0.8.2 and later)
-	// - 2 (kafka 0.9.0 and later)
-	// - 3 (kafka 0.11.0 and later)
-	// - 4 (kafka 2.0.0 and later)
-	Version int16
-	blocks  map[string]map[int32]*offsetCommitRequestBlock
-}
-
-func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
-	if r.Version < 0 || r.Version > 4 {
-		return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
-	}
-
-	if err := pe.putString(r.ConsumerGroup); err != nil {
-		return err
-	}
-
-	if r.Version >= 1 {
-		pe.putInt32(r.ConsumerGroupGeneration)
-		if err := pe.putString(r.ConsumerID); err != nil {
-			return err
-		}
-	} else {
-		if r.ConsumerGroupGeneration != 0 {
-			Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
-		}
-		if r.ConsumerID != "" {
-			Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
-		}
-	}
-
-	if r.Version >= 2 {
-		pe.putInt64(r.RetentionTime)
-	} else if r.RetentionTime != 0 {
-		Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
-	}
-
-	if err := pe.putArrayLength(len(r.blocks)); err != nil {
-		return err
-	}
-	for topic, partitions := range r.blocks {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := pe.putArrayLength(len(partitions)); err != nil {
-			return err
-		}
-		for partition, block := range partitions {
-			pe.putInt32(partition)
-			if err := block.encode(pe, r.Version); err != nil {
-				return err
-			}
-		}
-	}
-	return nil
-}
-
-func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if r.ConsumerGroup, err = pd.getString(); err != nil {
-		return err
-	}
-
-	if r.Version >= 1 {
-		if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
-			return err
-		}
-		if r.ConsumerID, err = pd.getString(); err != nil {
-			return err
-		}
-	}
-
-	if r.Version >= 2 {
-		if r.RetentionTime, err = pd.getInt64(); err != nil {
-			return err
-		}
-	}
-
-	topicCount, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if topicCount == 0 {
-		return nil
-	}
-	r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
-	for i := 0; i < topicCount; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		partitionCount, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-		r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
-		for j := 0; j < partitionCount; j++ {
-			partition, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-			block := &offsetCommitRequestBlock{}
-			if err := block.decode(pd, r.Version); err != nil {
-				return err
-			}
-			r.blocks[topic][partition] = block
-		}
-	}
-	return nil
-}
-
-func (r *OffsetCommitRequest) key() int16 {
-	return 8
-}
-
-func (r *OffsetCommitRequest) version() int16 {
-	return r.Version
-}
-
-func (r *OffsetCommitRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V0_8_2_0
-	case 2:
-		return V0_9_0_0
-	case 3:
-		return V0_11_0_0
-	case 4:
-		return V2_0_0_0
-	default:
-		return MinVersion
-	}
-}
-
-func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
-	if r.blocks == nil {
-		r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
-	}
-
-	if r.blocks[topic] == nil {
-		r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
-	}
-
-	r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
-}
-
-func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) {
-	partitions := r.blocks[topic]
-	if partitions == nil {
-		return 0, "", errors.New("no such offset")
-	}
-	block := partitions[partitionID]
-	if block == nil {
-		return 0, "", errors.New("no such offset")
-	}
-	return block.offset, block.metadata, nil
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go
deleted file mode 100644
index 342260e..0000000
--- a/vendor/github.com/Shopify/sarama/offset_commit_response.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package sarama
-
-type OffsetCommitResponse struct {
-	Version        int16
-	ThrottleTimeMs int32
-	Errors         map[string]map[int32]KError
-}
-
-func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
-	if r.Errors == nil {
-		r.Errors = make(map[string]map[int32]KError)
-	}
-	partitions := r.Errors[topic]
-	if partitions == nil {
-		partitions = make(map[int32]KError)
-		r.Errors[topic] = partitions
-	}
-	partitions[partition] = kerror
-}
-
-func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
-	if r.Version >= 3 {
-		pe.putInt32(r.ThrottleTimeMs)
-	}
-	if err := pe.putArrayLength(len(r.Errors)); err != nil {
-		return err
-	}
-	for topic, partitions := range r.Errors {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := pe.putArrayLength(len(partitions)); err != nil {
-			return err
-		}
-		for partition, kerror := range partitions {
-			pe.putInt32(partition)
-			pe.putInt16(int16(kerror))
-		}
-	}
-	return nil
-}
-
-func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	if version >= 3 {
-		r.ThrottleTimeMs, err = pd.getInt32()
-		if err != nil {
-			return err
-		}
-	}
-
-	numTopics, err := pd.getArrayLength()
-	if err != nil || numTopics == 0 {
-		return err
-	}
-
-	r.Errors = make(map[string]map[int32]KError, numTopics)
-	for i := 0; i < numTopics; i++ {
-		name, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		numErrors, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		r.Errors[name] = make(map[int32]KError, numErrors)
-
-		for j := 0; j < numErrors; j++ {
-			id, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-
-			tmp, err := pd.getInt16()
-			if err != nil {
-				return err
-			}
-			r.Errors[name][id] = KError(tmp)
-		}
-	}
-
-	return nil
-}
-
-func (r *OffsetCommitResponse) key() int16 {
-	return 8
-}
-
-func (r *OffsetCommitResponse) version() int16 {
-	return r.Version
-}
-
-func (r *OffsetCommitResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V0_8_2_0
-	case 2:
-		return V0_9_0_0
-	case 3:
-		return V0_11_0_0
-	case 4:
-		return V2_0_0_0
-	default:
-		return MinVersion
-	}
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
deleted file mode 100644
index 7e147eb..0000000
--- a/vendor/github.com/Shopify/sarama/offset_fetch_request.go
+++ /dev/null
@@ -1,207 +0,0 @@
-package sarama
-
-type OffsetFetchRequest struct {
-	Version       int16
-	ConsumerGroup string
-	RequireStable bool // requires v7+
-	partitions    map[string][]int32
-}
-
-func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
-	if r.Version < 0 || r.Version > 7 {
-		return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
-	}
-
-	isFlexible := r.Version >= 6
-
-	if isFlexible {
-		err = pe.putCompactString(r.ConsumerGroup)
-	} else {
-		err = pe.putString(r.ConsumerGroup)
-	}
-	if err != nil {
-		return err
-	}
-
-	if isFlexible {
-		if r.partitions == nil {
-			pe.putUVarint(0)
-		} else {
-			pe.putCompactArrayLength(len(r.partitions))
-		}
-	} else {
-		if r.partitions == nil && r.Version >= 2 {
-			pe.putInt32(-1)
-		} else {
-			if err = pe.putArrayLength(len(r.partitions)); err != nil {
-				return err
-			}
-		}
-	}
-
-	for topic, partitions := range r.partitions {
-		if isFlexible {
-			err = pe.putCompactString(topic)
-		} else {
-			err = pe.putString(topic)
-		}
-		if err != nil {
-			return err
-		}
-
-		//
-
-		if isFlexible {
-			err = pe.putCompactInt32Array(partitions)
-		} else {
-			err = pe.putInt32Array(partitions)
-		}
-		if err != nil {
-			return err
-		}
-
-		if isFlexible {
-			pe.putEmptyTaggedFieldArray()
-		}
-	}
-
-	if r.RequireStable && r.Version < 7 {
-		return PacketEncodingError{"requireStable is not supported. use version 7 or later"}
-	}
-
-	if r.Version >= 7 {
-		pe.putBool(r.RequireStable)
-	}
-
-	if isFlexible {
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	return nil
-}
-
-func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-	isFlexible := r.Version >= 6
-	if isFlexible {
-		r.ConsumerGroup, err = pd.getCompactString()
-	} else {
-		r.ConsumerGroup, err = pd.getString()
-	}
-	if err != nil {
-		return err
-	}
-
-	var partitionCount int
-
-	if isFlexible {
-		partitionCount, err = pd.getCompactArrayLength()
-	} else {
-		partitionCount, err = pd.getArrayLength()
-	}
-	if err != nil {
-		return err
-	}
-
-	if (partitionCount == 0 && version < 2) || partitionCount < 0 {
-		return nil
-	}
-
-	r.partitions = make(map[string][]int32, partitionCount)
-	for i := 0; i < partitionCount; i++ {
-		var topic string
-		if isFlexible {
-			topic, err = pd.getCompactString()
-		} else {
-			topic, err = pd.getString()
-		}
-		if err != nil {
-			return err
-		}
-
-		var partitions []int32
-		if isFlexible {
-			partitions, err = pd.getCompactInt32Array()
-		} else {
-			partitions, err = pd.getInt32Array()
-		}
-		if err != nil {
-			return err
-		}
-		if isFlexible {
-			_, err = pd.getEmptyTaggedFieldArray()
-			if err != nil {
-				return err
-			}
-		}
-
-		r.partitions[topic] = partitions
-	}
-
-	if r.Version >= 7 {
-		r.RequireStable, err = pd.getBool()
-		if err != nil {
-			return err
-		}
-	}
-
-	if isFlexible {
-		_, err = pd.getEmptyTaggedFieldArray()
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *OffsetFetchRequest) key() int16 {
-	return 9
-}
-
-func (r *OffsetFetchRequest) version() int16 {
-	return r.Version
-}
-
-func (r *OffsetFetchRequest) headerVersion() int16 {
-	if r.Version >= 6 {
-		return 2
-	}
-
-	return 1
-}
-
-func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V0_8_2_0
-	case 2:
-		return V0_10_2_0
-	case 3:
-		return V0_11_0_0
-	case 4:
-		return V2_0_0_0
-	case 5:
-		return V2_1_0_0
-	case 6:
-		return V2_4_0_0
-	case 7:
-		return V2_5_0_0
-	default:
-		return MinVersion
-	}
-}
-
-func (r *OffsetFetchRequest) ZeroPartitions() {
-	if r.partitions == nil && r.Version >= 2 {
-		r.partitions = make(map[string][]int32)
-	}
-}
-
-func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
-	if r.partitions == nil {
-		r.partitions = make(map[string][]int32)
-	}
-
-	r.partitions[topic] = append(r.partitions[topic], partitionID)
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
deleted file mode 100644
index 1944922..0000000
--- a/vendor/github.com/Shopify/sarama/offset_fetch_response.go
+++ /dev/null
@@ -1,280 +0,0 @@
-package sarama
-
-type OffsetFetchResponseBlock struct {
-	Offset      int64
-	LeaderEpoch int32
-	Metadata    string
-	Err         KError
-}
-
-func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
-	isFlexible := version >= 6
-
-	b.Offset, err = pd.getInt64()
-	if err != nil {
-		return err
-	}
-
-	if version >= 5 {
-		b.LeaderEpoch, err = pd.getInt32()
-		if err != nil {
-			return err
-		}
-	}
-
-	if isFlexible {
-		b.Metadata, err = pd.getCompactString()
-	} else {
-		b.Metadata, err = pd.getString()
-	}
-	if err != nil {
-		return err
-	}
-
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	b.Err = KError(tmp)
-
-	if isFlexible {
-		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (b *OffsetFetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
-	isFlexible := version >= 6
-	pe.putInt64(b.Offset)
-
-	if version >= 5 {
-		pe.putInt32(b.LeaderEpoch)
-	}
-	if isFlexible {
-		err = pe.putCompactString(b.Metadata)
-	} else {
-		err = pe.putString(b.Metadata)
-	}
-	if err != nil {
-		return err
-	}
-
-	pe.putInt16(int16(b.Err))
-
-	if isFlexible {
-		pe.putEmptyTaggedFieldArray()
-	}
-
-	return nil
-}
-
-type OffsetFetchResponse struct {
-	Version        int16
-	ThrottleTimeMs int32
-	Blocks         map[string]map[int32]*OffsetFetchResponseBlock
-	Err            KError
-}
-
-func (r *OffsetFetchResponse) encode(pe packetEncoder) (err error) {
-	isFlexible := r.Version >= 6
-
-	if r.Version >= 3 {
-		pe.putInt32(r.ThrottleTimeMs)
-	}
-	if isFlexible {
-		pe.putCompactArrayLength(len(r.Blocks))
-	} else {
-		err = pe.putArrayLength(len(r.Blocks))
-	}
-	if err != nil {
-		return err
-	}
-
-	for topic, partitions := range r.Blocks {
-		if isFlexible {
-			err = pe.putCompactString(topic)
-		} else {
-			err = pe.putString(topic)
-		}
-		if err != nil {
-			return err
-		}
-
-		if isFlexible {
-			pe.putCompactArrayLength(len(partitions))
-		} else {
-			err = pe.putArrayLength(len(partitions))
-		}
-		if err != nil {
-			return err
-		}
-		for partition, block := range partitions {
-			pe.putInt32(partition)
-			if err := block.encode(pe, r.Version); err != nil {
-				return err
-			}
-		}
-		if isFlexible {
-			pe.putEmptyTaggedFieldArray()
-		}
-	}
-	if r.Version >= 2 {
-		pe.putInt16(int16(r.Err))
-	}
-	if isFlexible {
-		pe.putEmptyTaggedFieldArray()
-	}
-	return nil
-}
-
-func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-	isFlexible := version >= 6
-
-	if version >= 3 {
-		r.ThrottleTimeMs, err = pd.getInt32()
-		if err != nil {
-			return err
-		}
-	}
-
-	var numTopics int
-	if isFlexible {
-		numTopics, err = pd.getCompactArrayLength()
-	} else {
-		numTopics, err = pd.getArrayLength()
-	}
-	if err != nil {
-		return err
-	}
-
-	if numTopics > 0 {
-		r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
-		for i := 0; i < numTopics; i++ {
-			var name string
-			if isFlexible {
-				name, err = pd.getCompactString()
-			} else {
-				name, err = pd.getString()
-			}
-			if err != nil {
-				return err
-			}
-
-			var numBlocks int
-			if isFlexible {
-				numBlocks, err = pd.getCompactArrayLength()
-			} else {
-				numBlocks, err = pd.getArrayLength()
-			}
-			if err != nil {
-				return err
-			}
-
-			r.Blocks[name] = nil
-			if numBlocks > 0 {
-				r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
-			}
-			for j := 0; j < numBlocks; j++ {
-				id, err := pd.getInt32()
-				if err != nil {
-					return err
-				}
-
-				block := new(OffsetFetchResponseBlock)
-				err = block.decode(pd, version)
-				if err != nil {
-					return err
-				}
-
-				r.Blocks[name][id] = block
-			}
-
-			if isFlexible {
-				if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-					return err
-				}
-			}
-		}
-	}
-
-	if version >= 2 {
-		kerr, err := pd.getInt16()
-		if err != nil {
-			return err
-		}
-		r.Err = KError(kerr)
-	}
-
-	if isFlexible {
-		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *OffsetFetchResponse) key() int16 {
-	return 9
-}
-
-func (r *OffsetFetchResponse) version() int16 {
-	return r.Version
-}
-
-func (r *OffsetFetchResponse) headerVersion() int16 {
-	if r.Version >= 6 {
-		return 1
-	}
-
-	return 0
-}
-
-func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V0_8_2_0
-	case 2:
-		return V0_10_2_0
-	case 3:
-		return V0_11_0_0
-	case 4:
-		return V2_0_0_0
-	case 5:
-		return V2_1_0_0
-	case 6:
-		return V2_4_0_0
-	case 7:
-		return V2_5_0_0
-	default:
-		return MinVersion
-	}
-}
-
-func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
-	if r.Blocks == nil {
-		return nil
-	}
-
-	if r.Blocks[topic] == nil {
-		return nil
-	}
-
-	return r.Blocks[topic][partition]
-}
-
-func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
-	if r.Blocks == nil {
-		r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
-	}
-	partitions := r.Blocks[topic]
-	if partitions == nil {
-		partitions = make(map[int32]*OffsetFetchResponseBlock)
-		r.Blocks[topic] = partitions
-	}
-	partitions[partition] = block
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go
deleted file mode 100644
index 4f480a0..0000000
--- a/vendor/github.com/Shopify/sarama/offset_manager.go
+++ /dev/null
@@ -1,593 +0,0 @@
-package sarama
-
-import (
-	"sync"
-	"time"
-)
-
-// Offset Manager
-
-// OffsetManager uses Kafka to store and fetch consumed partition offsets.
-type OffsetManager interface {
-	// ManagePartition creates a PartitionOffsetManager on the given topic/partition.
-	// It will return an error if this OffsetManager is already managing the given
-	// topic/partition.
-	ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
-
-	// Close stops the OffsetManager from managing offsets. It is required to call
-	// this function before an OffsetManager object passes out of scope, as it
-	// will otherwise leak memory. You must call this after all the
-	// PartitionOffsetManagers are closed.
-	Close() error
-
-	// Commit commits the offsets. This method can be used if AutoCommit.Enable is
-	// set to false.
-	Commit()
-}
-
-type offsetManager struct {
-	client Client
-	conf   *Config
-	group  string
-	ticker *time.Ticker
-
-	memberID   string
-	generation int32
-
-	broker     *Broker
-	brokerLock sync.RWMutex
-
-	poms     map[string]map[int32]*partitionOffsetManager
-	pomsLock sync.RWMutex
-
-	closeOnce sync.Once
-	closing   chan none
-	closed    chan none
-}
-
-// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
-// It is still necessary to call Close() on the underlying client when finished with the partition manager.
-func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
-	return newOffsetManagerFromClient(group, "", GroupGenerationUndefined, client)
-}
-
-func newOffsetManagerFromClient(group, memberID string, generation int32, client Client) (*offsetManager, error) {
-	// Check that we are not dealing with a closed Client before processing any other arguments
-	if client.Closed() {
-		return nil, ErrClosedClient
-	}
-
-	conf := client.Config()
-	om := &offsetManager{
-		client: client,
-		conf:   conf,
-		group:  group,
-		poms:   make(map[string]map[int32]*partitionOffsetManager),
-
-		memberID:   memberID,
-		generation: generation,
-
-		closing: make(chan none),
-		closed:  make(chan none),
-	}
-	if conf.Consumer.Offsets.AutoCommit.Enable {
-		om.ticker = time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval)
-		go withRecover(om.mainLoop)
-	}
-
-	return om, nil
-}
-
-func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
-	pom, err := om.newPartitionOffsetManager(topic, partition)
-	if err != nil {
-		return nil, err
-	}
-
-	om.pomsLock.Lock()
-	defer om.pomsLock.Unlock()
-
-	topicManagers := om.poms[topic]
-	if topicManagers == nil {
-		topicManagers = make(map[int32]*partitionOffsetManager)
-		om.poms[topic] = topicManagers
-	}
-
-	if topicManagers[partition] != nil {
-		return nil, ConfigurationError("That topic/partition is already being managed")
-	}
-
-	topicManagers[partition] = pom
-	return pom, nil
-}
-
-func (om *offsetManager) Close() error {
-	om.closeOnce.Do(func() {
-		// exit the mainLoop
-		close(om.closing)
-		if om.conf.Consumer.Offsets.AutoCommit.Enable {
-			<-om.closed
-		}
-
-		// mark all POMs as closed
-		om.asyncClosePOMs()
-
-		// flush one last time
-		if om.conf.Consumer.Offsets.AutoCommit.Enable {
-			for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ {
-				om.flushToBroker()
-				if om.releasePOMs(false) == 0 {
-					break
-				}
-			}
-		}
-
-		om.releasePOMs(true)
-		om.brokerLock.Lock()
-		om.broker = nil
-		om.brokerLock.Unlock()
-	})
-	return nil
-}
-
-func (om *offsetManager) computeBackoff(retries int) time.Duration {
-	if om.conf.Metadata.Retry.BackoffFunc != nil {
-		return om.conf.Metadata.Retry.BackoffFunc(retries, om.conf.Metadata.Retry.Max)
-	} else {
-		return om.conf.Metadata.Retry.Backoff
-	}
-}
-
-func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, string, error) {
-	broker, err := om.coordinator()
-	if err != nil {
-		if retries <= 0 {
-			return 0, "", err
-		}
-		return om.fetchInitialOffset(topic, partition, retries-1)
-	}
-
-	req := new(OffsetFetchRequest)
-	req.Version = 1
-	req.ConsumerGroup = om.group
-	req.AddPartition(topic, partition)
-
-	resp, err := broker.FetchOffset(req)
-	if err != nil {
-		if retries <= 0 {
-			return 0, "", err
-		}
-		om.releaseCoordinator(broker)
-		return om.fetchInitialOffset(topic, partition, retries-1)
-	}
-
-	block := resp.GetBlock(topic, partition)
-	if block == nil {
-		return 0, "", ErrIncompleteResponse
-	}
-
-	switch block.Err {
-	case ErrNoError:
-		return block.Offset, block.Metadata, nil
-	case ErrNotCoordinatorForConsumer:
-		if retries <= 0 {
-			return 0, "", block.Err
-		}
-		om.releaseCoordinator(broker)
-		return om.fetchInitialOffset(topic, partition, retries-1)
-	case ErrOffsetsLoadInProgress:
-		if retries <= 0 {
-			return 0, "", block.Err
-		}
-		backoff := om.computeBackoff(retries)
-		select {
-		case <-om.closing:
-			return 0, "", block.Err
-		case <-time.After(backoff):
-		}
-		return om.fetchInitialOffset(topic, partition, retries-1)
-	default:
-		return 0, "", block.Err
-	}
-}
-
-func (om *offsetManager) coordinator() (*Broker, error) {
-	om.brokerLock.RLock()
-	broker := om.broker
-	om.brokerLock.RUnlock()
-
-	if broker != nil {
-		return broker, nil
-	}
-
-	om.brokerLock.Lock()
-	defer om.brokerLock.Unlock()
-
-	if broker := om.broker; broker != nil {
-		return broker, nil
-	}
-
-	if err := om.client.RefreshCoordinator(om.group); err != nil {
-		return nil, err
-	}
-
-	broker, err := om.client.Coordinator(om.group)
-	if err != nil {
-		return nil, err
-	}
-
-	om.broker = broker
-	return broker, nil
-}
-
-func (om *offsetManager) releaseCoordinator(b *Broker) {
-	om.brokerLock.Lock()
-	if om.broker == b {
-		om.broker = nil
-	}
-	om.brokerLock.Unlock()
-}
-
-func (om *offsetManager) mainLoop() {
-	defer om.ticker.Stop()
-	defer close(om.closed)
-
-	for {
-		select {
-		case <-om.ticker.C:
-			om.Commit()
-		case <-om.closing:
-			return
-		}
-	}
-}
-
-func (om *offsetManager) Commit() {
-	om.flushToBroker()
-	om.releasePOMs(false)
-}
-
-func (om *offsetManager) flushToBroker() {
-	req := om.constructRequest()
-	if req == nil {
-		return
-	}
-
-	broker, err := om.coordinator()
-	if err != nil {
-		om.handleError(err)
-		return
-	}
-
-	resp, err := broker.CommitOffset(req)
-	if err != nil {
-		om.handleError(err)
-		om.releaseCoordinator(broker)
-		_ = broker.Close()
-		return
-	}
-
-	om.handleResponse(broker, req, resp)
-}
-
-func (om *offsetManager) constructRequest() *OffsetCommitRequest {
-	var r *OffsetCommitRequest
-	var perPartitionTimestamp int64
-	if om.conf.Consumer.Offsets.Retention == 0 {
-		perPartitionTimestamp = ReceiveTime
-		r = &OffsetCommitRequest{
-			Version:                 1,
-			ConsumerGroup:           om.group,
-			ConsumerID:              om.memberID,
-			ConsumerGroupGeneration: om.generation,
-		}
-	} else {
-		r = &OffsetCommitRequest{
-			Version:                 2,
-			RetentionTime:           int64(om.conf.Consumer.Offsets.Retention / time.Millisecond),
-			ConsumerGroup:           om.group,
-			ConsumerID:              om.memberID,
-			ConsumerGroupGeneration: om.generation,
-		}
-	}
-
-	om.pomsLock.RLock()
-	defer om.pomsLock.RUnlock()
-
-	for _, topicManagers := range om.poms {
-		for _, pom := range topicManagers {
-			pom.lock.Lock()
-			if pom.dirty {
-				r.AddBlock(pom.topic, pom.partition, pom.offset, perPartitionTimestamp, pom.metadata)
-			}
-			pom.lock.Unlock()
-		}
-	}
-
-	if len(r.blocks) > 0 {
-		return r
-	}
-
-	return nil
-}
-
-func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest, resp *OffsetCommitResponse) {
-	om.pomsLock.RLock()
-	defer om.pomsLock.RUnlock()
-
-	for _, topicManagers := range om.poms {
-		for _, pom := range topicManagers {
-			if req.blocks[pom.topic] == nil || req.blocks[pom.topic][pom.partition] == nil {
-				continue
-			}
-
-			var err KError
-			var ok bool
-
-			if resp.Errors[pom.topic] == nil {
-				pom.handleError(ErrIncompleteResponse)
-				continue
-			}
-			if err, ok = resp.Errors[pom.topic][pom.partition]; !ok {
-				pom.handleError(ErrIncompleteResponse)
-				continue
-			}
-
-			switch err {
-			case ErrNoError:
-				block := req.blocks[pom.topic][pom.partition]
-				pom.updateCommitted(block.offset, block.metadata)
-			case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
-				ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
-				// not a critical error, we just need to redispatch
-				om.releaseCoordinator(broker)
-			case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
-				// nothing we can do about this, just tell the user and carry on
-				pom.handleError(err)
-			case ErrOffsetsLoadInProgress:
-				// nothing wrong but we didn't commit, we'll get it next time round
-			case ErrUnknownTopicOrPartition:
-				// let the user know *and* try redispatching - if topic-auto-create is
-				// enabled, redispatching should trigger a metadata req and create the
-				// topic; if not then re-dispatching won't help, but we've let the user
-				// know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706)
-				fallthrough
-			default:
-				// dunno, tell the user and try redispatching
-				pom.handleError(err)
-				om.releaseCoordinator(broker)
-			}
-		}
-	}
-}
-
-func (om *offsetManager) handleError(err error) {
-	om.pomsLock.RLock()
-	defer om.pomsLock.RUnlock()
-
-	for _, topicManagers := range om.poms {
-		for _, pom := range topicManagers {
-			pom.handleError(err)
-		}
-	}
-}
-
-func (om *offsetManager) asyncClosePOMs() {
-	om.pomsLock.RLock()
-	defer om.pomsLock.RUnlock()
-
-	for _, topicManagers := range om.poms {
-		for _, pom := range topicManagers {
-			pom.AsyncClose()
-		}
-	}
-}
-
-// Releases/removes closed POMs once they are clean (or when forced)
-func (om *offsetManager) releasePOMs(force bool) (remaining int) {
-	om.pomsLock.Lock()
-	defer om.pomsLock.Unlock()
-
-	for topic, topicManagers := range om.poms {
-		for partition, pom := range topicManagers {
-			pom.lock.Lock()
-			releaseDue := pom.done && (force || !pom.dirty)
-			pom.lock.Unlock()
-
-			if releaseDue {
-				pom.release()
-
-				delete(om.poms[topic], partition)
-				if len(om.poms[topic]) == 0 {
-					delete(om.poms, topic)
-				}
-			}
-		}
-		remaining += len(om.poms[topic])
-	}
-	return
-}
-
-func (om *offsetManager) findPOM(topic string, partition int32) *partitionOffsetManager {
-	om.pomsLock.RLock()
-	defer om.pomsLock.RUnlock()
-
-	if partitions, ok := om.poms[topic]; ok {
-		if pom, ok := partitions[partition]; ok {
-			return pom
-		}
-	}
-	return nil
-}
-
-// Partition Offset Manager
-
-// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
-// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
-// out of scope.
-type PartitionOffsetManager interface {
-	// NextOffset returns the next offset that should be consumed for the managed
-	// partition, accompanied by metadata which can be used to reconstruct the state
-	// of the partition consumer when it resumes. NextOffset() will return
-	// `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
-	// was committed for this partition yet.
-	NextOffset() (int64, string)
-
-	// MarkOffset marks the provided offset, alongside a metadata string
-	// that represents the state of the partition consumer at that point in time. The
-	// metadata string can be used by another consumer to restore that state, so it
-	// can resume consumption.
-	//
-	// To follow upstream conventions, you are expected to mark the offset of the
-	// next message to read, not the last message read. Thus, when calling `MarkOffset`
-	// you should typically add one to the offset of the last consumed message.
-	//
-	// Note: calling MarkOffset does not necessarily commit the offset to the backend
-	// store immediately for efficiency reasons, and it may never be committed if
-	// your application crashes. This means that you may end up processing the same
-	// message twice, and your processing should ideally be idempotent.
-	MarkOffset(offset int64, metadata string)
-
-	// ResetOffset resets to the provided offset, alongside a metadata string that
-	// represents the state of the partition consumer at that point in time. Reset
-	// acts as a counterpart to MarkOffset, the difference being that it allows to
-	// reset an offset to an earlier or smaller value, where MarkOffset only
-	// allows incrementing the offset. cf MarkOffset for more details.
-	ResetOffset(offset int64, metadata string)
-
-	// Errors returns a read channel of errors that occur during offset management, if
-	// enabled. By default, errors are logged and not returned over this channel. If
-	// you want to implement any custom error handling, set your config's
-	// Consumer.Return.Errors setting to true, and read from this channel.
-	Errors() <-chan *ConsumerError
-
-	// AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
-	// return immediately, after which you should wait until the 'errors' channel has
-	// been drained and closed. It is required to call this function, or Close before
-	// a consumer object passes out of scope, as it will otherwise leak memory. You
-	// must call this before calling Close on the underlying client.
-	AsyncClose()
-
-	// Close stops the PartitionOffsetManager from managing offsets. It is required to
-	// call this function (or AsyncClose) before a PartitionOffsetManager object
-	// passes out of scope, as it will otherwise leak memory. You must call this
-	// before calling Close on the underlying client.
-	Close() error
-}
-
-type partitionOffsetManager struct {
-	parent    *offsetManager
-	topic     string
-	partition int32
-
-	lock     sync.Mutex
-	offset   int64
-	metadata string
-	dirty    bool
-	done     bool
-
-	releaseOnce sync.Once
-	errors      chan *ConsumerError
-}
-
-func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
-	offset, metadata, err := om.fetchInitialOffset(topic, partition, om.conf.Metadata.Retry.Max)
-	if err != nil {
-		return nil, err
-	}
-
-	return &partitionOffsetManager{
-		parent:    om,
-		topic:     topic,
-		partition: partition,
-		errors:    make(chan *ConsumerError, om.conf.ChannelBufferSize),
-		offset:    offset,
-		metadata:  metadata,
-	}, nil
-}
-
-func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
-	return pom.errors
-}
-
-func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
-	pom.lock.Lock()
-	defer pom.lock.Unlock()
-
-	if offset > pom.offset {
-		pom.offset = offset
-		pom.metadata = metadata
-		pom.dirty = true
-	}
-}
-
-func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) {
-	pom.lock.Lock()
-	defer pom.lock.Unlock()
-
-	if offset <= pom.offset {
-		pom.offset = offset
-		pom.metadata = metadata
-		pom.dirty = true
-	}
-}
-
-func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
-	pom.lock.Lock()
-	defer pom.lock.Unlock()
-
-	if pom.offset == offset && pom.metadata == metadata {
-		pom.dirty = false
-	}
-}
-
-func (pom *partitionOffsetManager) NextOffset() (int64, string) {
-	pom.lock.Lock()
-	defer pom.lock.Unlock()
-
-	if pom.offset >= 0 {
-		return pom.offset, pom.metadata
-	}
-
-	return pom.parent.conf.Consumer.Offsets.Initial, ""
-}
-
-func (pom *partitionOffsetManager) AsyncClose() {
-	pom.lock.Lock()
-	pom.done = true
-	pom.lock.Unlock()
-}
-
-func (pom *partitionOffsetManager) Close() error {
-	pom.AsyncClose()
-
-	var errors ConsumerErrors
-	for err := range pom.errors {
-		errors = append(errors, err)
-	}
-
-	if len(errors) > 0 {
-		return errors
-	}
-	return nil
-}
-
-func (pom *partitionOffsetManager) handleError(err error) {
-	cErr := &ConsumerError{
-		Topic:     pom.topic,
-		Partition: pom.partition,
-		Err:       err,
-	}
-
-	if pom.parent.conf.Consumer.Return.Errors {
-		pom.errors <- cErr
-	} else {
-		Logger.Println(cErr)
-	}
-}
-
-func (pom *partitionOffsetManager) release() {
-	pom.releaseOnce.Do(func() {
-		close(pom.errors)
-	})
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go
deleted file mode 100644
index 4c9ce4d..0000000
--- a/vendor/github.com/Shopify/sarama/offset_request.go
+++ /dev/null
@@ -1,179 +0,0 @@
-package sarama
-
-type offsetRequestBlock struct {
-	time       int64
-	maxOffsets int32 // Only used in version 0
-}
-
-func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
-	pe.putInt64(b.time)
-	if version == 0 {
-		pe.putInt32(b.maxOffsets)
-	}
-
-	return nil
-}
-
-func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) {
-	if b.time, err = pd.getInt64(); err != nil {
-		return err
-	}
-	if version == 0 {
-		if b.maxOffsets, err = pd.getInt32(); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-type OffsetRequest struct {
-	Version        int16
-	IsolationLevel IsolationLevel
-	replicaID      int32
-	isReplicaIDSet bool
-	blocks         map[string]map[int32]*offsetRequestBlock
-}
-
-func (r *OffsetRequest) encode(pe packetEncoder) error {
-	if r.isReplicaIDSet {
-		pe.putInt32(r.replicaID)
-	} else {
-		// default replica ID is always -1 for clients
-		pe.putInt32(-1)
-	}
-
-	if r.Version >= 2 {
-		pe.putBool(r.IsolationLevel == ReadCommitted)
-	}
-
-	err := pe.putArrayLength(len(r.blocks))
-	if err != nil {
-		return err
-	}
-	for topic, partitions := range r.blocks {
-		err = pe.putString(topic)
-		if err != nil {
-			return err
-		}
-		err = pe.putArrayLength(len(partitions))
-		if err != nil {
-			return err
-		}
-		for partition, block := range partitions {
-			pe.putInt32(partition)
-			if err = block.encode(pe, r.Version); err != nil {
-				return err
-			}
-		}
-	}
-	return nil
-}
-
-func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
-	r.Version = version
-
-	replicaID, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	if replicaID >= 0 {
-		r.SetReplicaID(replicaID)
-	}
-
-	if r.Version >= 2 {
-		tmp, err := pd.getBool()
-		if err != nil {
-			return err
-		}
-
-		r.IsolationLevel = ReadUncommitted
-		if tmp {
-			r.IsolationLevel = ReadCommitted
-		}
-	}
-
-	blockCount, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if blockCount == 0 {
-		return nil
-	}
-	r.blocks = make(map[string]map[int32]*offsetRequestBlock)
-	for i := 0; i < blockCount; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		partitionCount, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-		r.blocks[topic] = make(map[int32]*offsetRequestBlock)
-		for j := 0; j < partitionCount; j++ {
-			partition, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-			block := &offsetRequestBlock{}
-			if err := block.decode(pd, version); err != nil {
-				return err
-			}
-			r.blocks[topic][partition] = block
-		}
-	}
-	return nil
-}
-
-func (r *OffsetRequest) key() int16 {
-	return 2
-}
-
-func (r *OffsetRequest) version() int16 {
-	return r.Version
-}
-
-func (r *OffsetRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *OffsetRequest) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V0_10_1_0
-	case 2:
-		return V0_11_0_0
-	default:
-		return MinVersion
-	}
-}
-
-func (r *OffsetRequest) SetReplicaID(id int32) {
-	r.replicaID = id
-	r.isReplicaIDSet = true
-}
-
-func (r *OffsetRequest) ReplicaID() int32 {
-	if r.isReplicaIDSet {
-		return r.replicaID
-	}
-	return -1
-}
-
-func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) {
-	if r.blocks == nil {
-		r.blocks = make(map[string]map[int32]*offsetRequestBlock)
-	}
-
-	if r.blocks[topic] == nil {
-		r.blocks[topic] = make(map[int32]*offsetRequestBlock)
-	}
-
-	tmp := new(offsetRequestBlock)
-	tmp.time = time
-	if r.Version == 0 {
-		tmp.maxOffsets = maxOffsets
-	}
-
-	r.blocks[topic][partitionID] = tmp
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go
deleted file mode 100644
index 69349ef..0000000
--- a/vendor/github.com/Shopify/sarama/offset_response.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package sarama
-
-type OffsetResponseBlock struct {
-	Err       KError
-	Offsets   []int64 // Version 0
-	Offset    int64   // Version 1
-	Timestamp int64   // Version 1
-}
-
-func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) {
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	b.Err = KError(tmp)
-
-	if version == 0 {
-		b.Offsets, err = pd.getInt64Array()
-
-		return err
-	}
-
-	b.Timestamp, err = pd.getInt64()
-	if err != nil {
-		return err
-	}
-
-	b.Offset, err = pd.getInt64()
-	if err != nil {
-		return err
-	}
-
-	// For backwards compatibility put the offset in the offsets array too
-	b.Offsets = []int64{b.Offset}
-
-	return nil
-}
-
-func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) {
-	pe.putInt16(int16(b.Err))
-
-	if version == 0 {
-		return pe.putInt64Array(b.Offsets)
-	}
-
-	pe.putInt64(b.Timestamp)
-	pe.putInt64(b.Offset)
-
-	return nil
-}
-
-type OffsetResponse struct {
-	Version        int16
-	ThrottleTimeMs int32
-	Blocks         map[string]map[int32]*OffsetResponseBlock
-}
-
-func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
-	if version >= 2 {
-		r.ThrottleTimeMs, err = pd.getInt32()
-		if err != nil {
-			return err
-		}
-	}
-
-	numTopics, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
-	for i := 0; i < numTopics; i++ {
-		name, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		numBlocks, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
-
-		for j := 0; j < numBlocks; j++ {
-			id, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-
-			block := new(OffsetResponseBlock)
-			err = block.decode(pd, version)
-			if err != nil {
-				return err
-			}
-			r.Blocks[name][id] = block
-		}
-	}
-
-	return nil
-}
-
-func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
-	if r.Blocks == nil {
-		return nil
-	}
-
-	if r.Blocks[topic] == nil {
-		return nil
-	}
-
-	return r.Blocks[topic][partition]
-}
-
-/*
-// [0 0 0 1 ntopics
-0 8 109 121 95 116 111 112 105 99 topic
-0 0 0 1 npartitions
-0 0 0 0 id
-0 0
-
-0 0 0 1 0 0 0 0
-0 1 1 1 0 0 0 1
-0 8 109 121 95 116 111 112
-105 99 0 0 0 1 0 0
-0 0 0 0 0 0 0 1
-0 0 0 0 0 1 1 1] <nil>
-
-*/
-func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
-	if r.Version >= 2 {
-		pe.putInt32(r.ThrottleTimeMs)
-	}
-
-	if err = pe.putArrayLength(len(r.Blocks)); err != nil {
-		return err
-	}
-
-	for topic, partitions := range r.Blocks {
-		if err = pe.putString(topic); err != nil {
-			return err
-		}
-		if err = pe.putArrayLength(len(partitions)); err != nil {
-			return err
-		}
-		for partition, block := range partitions {
-			pe.putInt32(partition)
-			if err = block.encode(pe, r.version()); err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-func (r *OffsetResponse) key() int16 {
-	return 2
-}
-
-func (r *OffsetResponse) version() int16 {
-	return r.Version
-}
-
-func (r *OffsetResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *OffsetResponse) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V0_10_1_0
-	case 2:
-		return V0_11_0_0
-	default:
-		return MinVersion
-	}
-}
-
-// testing API
-
-func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
-	if r.Blocks == nil {
-		r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
-	}
-	byTopic, ok := r.Blocks[topic]
-	if !ok {
-		byTopic = make(map[int32]*OffsetResponseBlock)
-		r.Blocks[topic] = byTopic
-	}
-	byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset}
-}
diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go
deleted file mode 100644
index 184bc26..0000000
--- a/vendor/github.com/Shopify/sarama/packet_decoder.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package sarama
-
-// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
-// Types implementing Decoder only need to worry about calling methods like GetString,
-// not about how a string is represented in Kafka.
-type packetDecoder interface {
-	// Primitives
-	getInt8() (int8, error)
-	getInt16() (int16, error)
-	getInt32() (int32, error)
-	getInt64() (int64, error)
-	getVarint() (int64, error)
-	getUVarint() (uint64, error)
-	getArrayLength() (int, error)
-	getCompactArrayLength() (int, error)
-	getBool() (bool, error)
-	getEmptyTaggedFieldArray() (int, error)
-
-	// Collections
-	getBytes() ([]byte, error)
-	getVarintBytes() ([]byte, error)
-	getCompactBytes() ([]byte, error)
-	getRawBytes(length int) ([]byte, error)
-	getString() (string, error)
-	getNullableString() (*string, error)
-	getCompactString() (string, error)
-	getCompactNullableString() (*string, error)
-	getCompactInt32Array() ([]int32, error)
-	getInt32Array() ([]int32, error)
-	getInt64Array() ([]int64, error)
-	getStringArray() ([]string, error)
-
-	// Subsets
-	remaining() int
-	getSubset(length int) (packetDecoder, error)
-	peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset
-	peekInt8(offset int) (int8, error)              // similar to peek, but just one byte
-
-	// Stacks, see PushDecoder
-	push(in pushDecoder) error
-	pop() error
-}
-
-// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
-// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
-// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
-// depend upon have been decoded.
-type pushDecoder interface {
-	// Saves the offset into the input buffer as the location to actually read the calculated value when able.
-	saveOffset(in int)
-
-	// Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32).
-	reserveLength() int
-
-	// Indicates that all required data is now available to calculate and check the field.
-	// SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
-	// of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
-	check(curOffset int, buf []byte) error
-}
-
-// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the
-// fields itself is unknown until its value was decoded (for instance varint encoded length
-// fields).
-// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength()
-type dynamicPushDecoder interface {
-	pushDecoder
-	decoder
-}
diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go
deleted file mode 100644
index aea53ca..0000000
--- a/vendor/github.com/Shopify/sarama/packet_encoder.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package sarama
-
-import "github.com/rcrowley/go-metrics"
-
-// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
-// Types implementing Encoder only need to worry about calling methods like PutString,
-// not about how a string is represented in Kafka.
-type packetEncoder interface {
-	// Primitives
-	putInt8(in int8)
-	putInt16(in int16)
-	putInt32(in int32)
-	putInt64(in int64)
-	putVarint(in int64)
-	putUVarint(in uint64)
-	putCompactArrayLength(in int)
-	putArrayLength(in int) error
-	putBool(in bool)
-
-	// Collections
-	putBytes(in []byte) error
-	putVarintBytes(in []byte) error
-	putCompactBytes(in []byte) error
-	putRawBytes(in []byte) error
-	putCompactString(in string) error
-	putNullableCompactString(in *string) error
-	putString(in string) error
-	putNullableString(in *string) error
-	putStringArray(in []string) error
-	putCompactInt32Array(in []int32) error
-	putNullableCompactInt32Array(in []int32) error
-	putInt32Array(in []int32) error
-	putInt64Array(in []int64) error
-	putEmptyTaggedFieldArray()
-
-	// Provide the current offset to record the batch size metric
-	offset() int
-
-	// Stacks, see PushEncoder
-	push(in pushEncoder)
-	pop() error
-
-	// To record metrics when provided
-	metricRegistry() metrics.Registry
-}
-
-// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
-// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
-// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
-// depend upon have been written.
-type pushEncoder interface {
-	// Saves the offset into the input buffer as the location to actually write the calculated value when able.
-	saveOffset(in int)
-
-	// Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
-	reserveLength() int
-
-	// Indicates that all required data is now available to calculate and write the field.
-	// SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
-	// of data to the saved offset, based on the data between the saved offset and curOffset.
-	run(curOffset int, buf []byte) error
-}
-
-// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the
-// fields itself is unknown until its value was computed (for instance varint encoded length
-// fields).
-type dynamicPushEncoder interface {
-	pushEncoder
-
-	// Called during pop() to adjust the length of the field.
-	// It should return the difference in bytes between the last computed length and current length.
-	adjustLength(currOffset int) int
-}
diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go
deleted file mode 100644
index a66e11e..0000000
--- a/vendor/github.com/Shopify/sarama/partitioner.go
+++ /dev/null
@@ -1,217 +0,0 @@
-package sarama
-
-import (
-	"hash"
-	"hash/fnv"
-	"math/rand"
-	"time"
-)
-
-// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
-// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
-// as simple default implementations.
-type Partitioner interface {
-	// Partition takes a message and partition count and chooses a partition
-	Partition(message *ProducerMessage, numPartitions int32) (int32, error)
-
-	// RequiresConsistency indicates to the user of the partitioner whether the
-	// mapping of key->partition is consistent or not. Specifically, if a
-	// partitioner requires consistency then it must be allowed to choose from all
-	// partitions (even ones known to be unavailable), and its choice must be
-	// respected by the caller. The obvious example is the HashPartitioner.
-	RequiresConsistency() bool
-}
-
-// DynamicConsistencyPartitioner can optionally be implemented by Partitioners
-// in order to allow more flexibility than is originally allowed by the
-// RequiresConsistency method in the Partitioner interface. This allows
-// partitioners to require consistency sometimes, but not all times. It's useful
-// for, e.g., the HashPartitioner, which does not require consistency if the
-// message key is nil.
-type DynamicConsistencyPartitioner interface {
-	Partitioner
-
-	// MessageRequiresConsistency is similar to Partitioner.RequiresConsistency,
-	// but takes in the message being partitioned so that the partitioner can
-	// make a per-message determination.
-	MessageRequiresConsistency(message *ProducerMessage) bool
-}
-
-// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
-type PartitionerConstructor func(topic string) Partitioner
-
-type manualPartitioner struct{}
-
-// HashPartitionerOption lets you modify default values of the partitioner
-type HashPartitionerOption func(*hashPartitioner)
-
-// WithAbsFirst means that the partitioner handles absolute values
-// in the same way as the reference Java implementation
-func WithAbsFirst() HashPartitionerOption {
-	return func(hp *hashPartitioner) {
-		hp.referenceAbs = true
-	}
-}
-
-// WithCustomHashFunction lets you specify what hash function to use for the partitioning
-func WithCustomHashFunction(hasher func() hash.Hash32) HashPartitionerOption {
-	return func(hp *hashPartitioner) {
-		hp.hasher = hasher()
-	}
-}
-
-// WithCustomFallbackPartitioner lets you specify what HashPartitioner should be used in case a Distribution Key is empty
-func WithCustomFallbackPartitioner(randomHP *hashPartitioner) HashPartitionerOption {
-	return func(hp *hashPartitioner) {
-		hp.random = hp
-	}
-}
-
-// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
-// ProducerMessage's Partition field as the partition to produce to.
-func NewManualPartitioner(topic string) Partitioner {
-	return new(manualPartitioner)
-}
-
-func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
-	return message.Partition, nil
-}
-
-func (p *manualPartitioner) RequiresConsistency() bool {
-	return true
-}
-
-type randomPartitioner struct {
-	generator *rand.Rand
-}
-
-// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
-func NewRandomPartitioner(topic string) Partitioner {
-	p := new(randomPartitioner)
-	p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
-	return p
-}
-
-func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
-	return int32(p.generator.Intn(int(numPartitions))), nil
-}
-
-func (p *randomPartitioner) RequiresConsistency() bool {
-	return false
-}
-
-type roundRobinPartitioner struct {
-	partition int32
-}
-
-// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
-func NewRoundRobinPartitioner(topic string) Partitioner {
-	return &roundRobinPartitioner{}
-}
-
-func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
-	if p.partition >= numPartitions {
-		p.partition = 0
-	}
-	ret := p.partition
-	p.partition++
-	return ret, nil
-}
-
-func (p *roundRobinPartitioner) RequiresConsistency() bool {
-	return false
-}
-
-type hashPartitioner struct {
-	random       Partitioner
-	hasher       hash.Hash32
-	referenceAbs bool
-}
-
-// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher.
-// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that
-// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance.
-func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor {
-	return func(topic string) Partitioner {
-		p := new(hashPartitioner)
-		p.random = NewRandomPartitioner(topic)
-		p.hasher = hasher()
-		p.referenceAbs = false
-		return p
-	}
-}
-
-// NewCustomPartitioner creates a default Partitioner but lets you specify the behavior of each component via options
-func NewCustomPartitioner(options ...HashPartitionerOption) PartitionerConstructor {
-	return func(topic string) Partitioner {
-		p := new(hashPartitioner)
-		p.random = NewRandomPartitioner(topic)
-		p.hasher = fnv.New32a()
-		p.referenceAbs = false
-		for _, option := range options {
-			option(p)
-		}
-		return p
-	}
-}
-
-// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
-// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
-// modulus the number of partitions. This ensures that messages with the same key always end up on the
-// same partition.
-func NewHashPartitioner(topic string) Partitioner {
-	p := new(hashPartitioner)
-	p.random = NewRandomPartitioner(topic)
-	p.hasher = fnv.New32a()
-	p.referenceAbs = false
-	return p
-}
-
-// NewReferenceHashPartitioner is like NewHashPartitioner except that it handles absolute values
-// in the same way as the reference Java implementation. NewHashPartitioner was supposed to do
-// that but it had a mistake and now there are people depending on both behaviours. This will
-// all go away on the next major version bump.
-func NewReferenceHashPartitioner(topic string) Partitioner {
-	p := new(hashPartitioner)
-	p.random = NewRandomPartitioner(topic)
-	p.hasher = fnv.New32a()
-	p.referenceAbs = true
-	return p
-}
-
-func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
-	if message.Key == nil {
-		return p.random.Partition(message, numPartitions)
-	}
-	bytes, err := message.Key.Encode()
-	if err != nil {
-		return -1, err
-	}
-	p.hasher.Reset()
-	_, err = p.hasher.Write(bytes)
-	if err != nil {
-		return -1, err
-	}
-	var partition int32
-	// Turns out we were doing our absolute value in a subtly different way from the upstream
-	// implementation, but now we need to maintain backwards compat for people who started using
-	// the old version; if referenceAbs is set we are compatible with the reference java client
-	// but not past Sarama versions
-	if p.referenceAbs {
-		partition = (int32(p.hasher.Sum32()) & 0x7fffffff) % numPartitions
-	} else {
-		partition = int32(p.hasher.Sum32()) % numPartitions
-		if partition < 0 {
-			partition = -partition
-		}
-	}
-	return partition, nil
-}
-
-func (p *hashPartitioner) RequiresConsistency() bool {
-	return true
-}
-
-func (p *hashPartitioner) MessageRequiresConsistency(message *ProducerMessage) bool {
-	return message.Key != nil
-}
diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go
deleted file mode 100644
index 0d01374..0000000
--- a/vendor/github.com/Shopify/sarama/prep_encoder.go
+++ /dev/null
@@ -1,207 +0,0 @@
-package sarama
-
-import (
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"math"
-
-	"github.com/rcrowley/go-metrics"
-)
-
-type prepEncoder struct {
-	stack  []pushEncoder
-	length int
-}
-
-// primitives
-
-func (pe *prepEncoder) putInt8(in int8) {
-	pe.length++
-}
-
-func (pe *prepEncoder) putInt16(in int16) {
-	pe.length += 2
-}
-
-func (pe *prepEncoder) putInt32(in int32) {
-	pe.length += 4
-}
-
-func (pe *prepEncoder) putInt64(in int64) {
-	pe.length += 8
-}
-
-func (pe *prepEncoder) putVarint(in int64) {
-	var buf [binary.MaxVarintLen64]byte
-	pe.length += binary.PutVarint(buf[:], in)
-}
-
-func (pe *prepEncoder) putUVarint(in uint64) {
-	var buf [binary.MaxVarintLen64]byte
-	pe.length += binary.PutUvarint(buf[:], in)
-}
-
-func (pe *prepEncoder) putArrayLength(in int) error {
-	if in > math.MaxInt32 {
-		return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
-	}
-	pe.length += 4
-	return nil
-}
-
-func (pe *prepEncoder) putCompactArrayLength(in int) {
-	pe.putUVarint(uint64(in + 1))
-}
-
-func (pe *prepEncoder) putBool(in bool) {
-	pe.length++
-}
-
-// arrays
-
-func (pe *prepEncoder) putBytes(in []byte) error {
-	pe.length += 4
-	if in == nil {
-		return nil
-	}
-	return pe.putRawBytes(in)
-}
-
-func (pe *prepEncoder) putVarintBytes(in []byte) error {
-	if in == nil {
-		pe.putVarint(-1)
-		return nil
-	}
-	pe.putVarint(int64(len(in)))
-	return pe.putRawBytes(in)
-}
-
-func (pe *prepEncoder) putCompactBytes(in []byte) error {
-	pe.putUVarint(uint64(len(in) + 1))
-	return pe.putRawBytes(in)
-}
-
-func (pe *prepEncoder) putCompactString(in string) error {
-	pe.putCompactArrayLength(len(in))
-	return pe.putRawBytes([]byte(in))
-}
-
-func (pe *prepEncoder) putNullableCompactString(in *string) error {
-	if in == nil {
-		pe.putUVarint(0)
-		return nil
-	} else {
-		return pe.putCompactString(*in)
-	}
-}
-
-func (pe *prepEncoder) putRawBytes(in []byte) error {
-	if len(in) > math.MaxInt32 {
-		return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
-	}
-	pe.length += len(in)
-	return nil
-}
-
-func (pe *prepEncoder) putNullableString(in *string) error {
-	if in == nil {
-		pe.length += 2
-		return nil
-	}
-	return pe.putString(*in)
-}
-
-func (pe *prepEncoder) putString(in string) error {
-	pe.length += 2
-	if len(in) > math.MaxInt16 {
-		return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
-	}
-	pe.length += len(in)
-	return nil
-}
-
-func (pe *prepEncoder) putStringArray(in []string) error {
-	err := pe.putArrayLength(len(in))
-	if err != nil {
-		return err
-	}
-
-	for _, str := range in {
-		if err := pe.putString(str); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (pe *prepEncoder) putCompactInt32Array(in []int32) error {
-	if in == nil {
-		return errors.New("expected int32 array to be non null")
-	}
-
-	pe.putUVarint(uint64(len(in)) + 1)
-	pe.length += 4 * len(in)
-	return nil
-}
-
-func (pe *prepEncoder) putNullableCompactInt32Array(in []int32) error {
-	if in == nil {
-		pe.putUVarint(0)
-		return nil
-	}
-
-	pe.putUVarint(uint64(len(in)) + 1)
-	pe.length += 4 * len(in)
-	return nil
-}
-
-func (pe *prepEncoder) putInt32Array(in []int32) error {
-	err := pe.putArrayLength(len(in))
-	if err != nil {
-		return err
-	}
-	pe.length += 4 * len(in)
-	return nil
-}
-
-func (pe *prepEncoder) putInt64Array(in []int64) error {
-	err := pe.putArrayLength(len(in))
-	if err != nil {
-		return err
-	}
-	pe.length += 8 * len(in)
-	return nil
-}
-
-func (pe *prepEncoder) putEmptyTaggedFieldArray() {
-	pe.putUVarint(0)
-}
-
-func (pe *prepEncoder) offset() int {
-	return pe.length
-}
-
-// stackable
-
-func (pe *prepEncoder) push(in pushEncoder) {
-	in.saveOffset(pe.length)
-	pe.length += in.reserveLength()
-	pe.stack = append(pe.stack, in)
-}
-
-func (pe *prepEncoder) pop() error {
-	in := pe.stack[len(pe.stack)-1]
-	pe.stack = pe.stack[:len(pe.stack)-1]
-	if dpe, ok := in.(dynamicPushEncoder); ok {
-		pe.length += dpe.adjustLength(pe.length)
-	}
-
-	return nil
-}
-
-// we do not record metrics during the prep encoder pass
-func (pe *prepEncoder) metricRegistry() metrics.Registry {
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go
deleted file mode 100644
index 0034651..0000000
--- a/vendor/github.com/Shopify/sarama/produce_request.go
+++ /dev/null
@@ -1,258 +0,0 @@
-package sarama
-
-import "github.com/rcrowley/go-metrics"
-
-// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
-// it must see before responding. Any of the constants defined here are valid. On broker versions
-// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
-// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
-// by setting the `min.isr` value in the brokers configuration).
-type RequiredAcks int16
-
-const (
-	// NoResponse doesn't send any response, the TCP ACK is all you get.
-	NoResponse RequiredAcks = 0
-	// WaitForLocal waits for only the local commit to succeed before responding.
-	WaitForLocal RequiredAcks = 1
-	// WaitForAll waits for all in-sync replicas to commit before responding.
-	// The minimum number of in-sync replicas is configured on the broker via
-	// the `min.insync.replicas` configuration key.
-	WaitForAll RequiredAcks = -1
-)
-
-type ProduceRequest struct {
-	TransactionalID *string
-	RequiredAcks    RequiredAcks
-	Timeout         int32
-	Version         int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11
-	records         map[string]map[int32]Records
-}
-
-func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram,
-	topicCompressionRatioMetric metrics.Histogram) int64 {
-	var topicRecordCount int64
-	for _, messageBlock := range msgSet.Messages {
-		// Is this a fake "message" wrapping real messages?
-		if messageBlock.Msg.Set != nil {
-			topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
-		} else {
-			// A single uncompressed message
-			topicRecordCount++
-		}
-		// Better be safe than sorry when computing the compression ratio
-		if messageBlock.Msg.compressedSize != 0 {
-			compressionRatio := float64(len(messageBlock.Msg.Value)) /
-				float64(messageBlock.Msg.compressedSize)
-			// Histogram do not support decimal values, let's multiple it by 100 for better precision
-			intCompressionRatio := int64(100 * compressionRatio)
-			compressionRatioMetric.Update(intCompressionRatio)
-			topicCompressionRatioMetric.Update(intCompressionRatio)
-		}
-	}
-	return topicRecordCount
-}
-
-func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram,
-	topicCompressionRatioMetric metrics.Histogram) int64 {
-	if recordBatch.compressedRecords != nil {
-		compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100)
-		compressionRatioMetric.Update(compressionRatio)
-		topicCompressionRatioMetric.Update(compressionRatio)
-	}
-
-	return int64(len(recordBatch.Records))
-}
-
-func (r *ProduceRequest) encode(pe packetEncoder) error {
-	if r.Version >= 3 {
-		if err := pe.putNullableString(r.TransactionalID); err != nil {
-			return err
-		}
-	}
-	pe.putInt16(int16(r.RequiredAcks))
-	pe.putInt32(r.Timeout)
-	metricRegistry := pe.metricRegistry()
-	var batchSizeMetric metrics.Histogram
-	var compressionRatioMetric metrics.Histogram
-	if metricRegistry != nil {
-		batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
-		compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
-	}
-	totalRecordCount := int64(0)
-
-	err := pe.putArrayLength(len(r.records))
-	if err != nil {
-		return err
-	}
-
-	for topic, partitions := range r.records {
-		err = pe.putString(topic)
-		if err != nil {
-			return err
-		}
-		err = pe.putArrayLength(len(partitions))
-		if err != nil {
-			return err
-		}
-		topicRecordCount := int64(0)
-		var topicCompressionRatioMetric metrics.Histogram
-		if metricRegistry != nil {
-			topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
-		}
-		for id, records := range partitions {
-			startOffset := pe.offset()
-			pe.putInt32(id)
-			pe.push(&lengthField{})
-			err = records.encode(pe)
-			if err != nil {
-				return err
-			}
-			err = pe.pop()
-			if err != nil {
-				return err
-			}
-			if metricRegistry != nil {
-				if r.Version >= 3 {
-					topicRecordCount += updateBatchMetrics(records.RecordBatch, compressionRatioMetric, topicCompressionRatioMetric)
-				} else {
-					topicRecordCount += updateMsgSetMetrics(records.MsgSet, compressionRatioMetric, topicCompressionRatioMetric)
-				}
-				batchSize := int64(pe.offset() - startOffset)
-				batchSizeMetric.Update(batchSize)
-				getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
-			}
-		}
-		if topicRecordCount > 0 {
-			getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount)
-			getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount)
-			totalRecordCount += topicRecordCount
-		}
-	}
-	if totalRecordCount > 0 {
-		metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount)
-		getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount)
-	}
-
-	return nil
-}
-
-func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
-	r.Version = version
-
-	if version >= 3 {
-		id, err := pd.getNullableString()
-		if err != nil {
-			return err
-		}
-		r.TransactionalID = id
-	}
-	requiredAcks, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	r.RequiredAcks = RequiredAcks(requiredAcks)
-	if r.Timeout, err = pd.getInt32(); err != nil {
-		return err
-	}
-	topicCount, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if topicCount == 0 {
-		return nil
-	}
-
-	r.records = make(map[string]map[int32]Records)
-	for i := 0; i < topicCount; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		partitionCount, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-		r.records[topic] = make(map[int32]Records)
-
-		for j := 0; j < partitionCount; j++ {
-			partition, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-			size, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-			recordsDecoder, err := pd.getSubset(int(size))
-			if err != nil {
-				return err
-			}
-			var records Records
-			if err := records.decode(recordsDecoder); err != nil {
-				return err
-			}
-			r.records[topic][partition] = records
-		}
-	}
-
-	return nil
-}
-
-func (r *ProduceRequest) key() int16 {
-	return 0
-}
-
-func (r *ProduceRequest) version() int16 {
-	return r.Version
-}
-
-func (r *ProduceRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *ProduceRequest) requiredVersion() KafkaVersion {
-	switch r.Version {
-	case 1:
-		return V0_9_0_0
-	case 2:
-		return V0_10_0_0
-	case 3:
-		return V0_11_0_0
-	case 7:
-		return V2_1_0_0
-	default:
-		return MinVersion
-	}
-}
-
-func (r *ProduceRequest) ensureRecords(topic string, partition int32) {
-	if r.records == nil {
-		r.records = make(map[string]map[int32]Records)
-	}
-
-	if r.records[topic] == nil {
-		r.records[topic] = make(map[int32]Records)
-	}
-}
-
-func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
-	r.ensureRecords(topic, partition)
-	set := r.records[topic][partition].MsgSet
-
-	if set == nil {
-		set = new(MessageSet)
-		r.records[topic][partition] = newLegacyRecords(set)
-	}
-
-	set.addMessage(msg)
-}
-
-func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
-	r.ensureRecords(topic, partition)
-	r.records[topic][partition] = newLegacyRecords(set)
-}
-
-func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) {
-	r.ensureRecords(topic, partition)
-	r.records[topic][partition] = newDefaultRecords(batch)
-}
diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go
deleted file mode 100644
index edf9787..0000000
--- a/vendor/github.com/Shopify/sarama/produce_response.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"time"
-)
-
-// Protocol, http://kafka.apache.org/protocol.html
-// v1
-// v2 = v3 = v4
-// v5 = v6 = v7
-// Produce Response (Version: 7) => [responses] throttle_time_ms
-//   responses => topic [partition_responses]
-//     topic => STRING
-//     partition_responses => partition error_code base_offset log_append_time log_start_offset
-//       partition => INT32
-//       error_code => INT16
-//       base_offset => INT64
-//       log_append_time => INT64
-//       log_start_offset => INT64
-//   throttle_time_ms => INT32
-
-// partition_responses in protocol
-type ProduceResponseBlock struct {
-	Err         KError    // v0, error_code
-	Offset      int64     // v0, base_offset
-	Timestamp   time.Time // v2, log_append_time, and the broker is configured with `LogAppendTime`
-	StartOffset int64     // v5, log_start_offset
-}
-
-func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
-	tmp, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	b.Err = KError(tmp)
-
-	b.Offset, err = pd.getInt64()
-	if err != nil {
-		return err
-	}
-
-	if version >= 2 {
-		if millis, err := pd.getInt64(); err != nil {
-			return err
-		} else if millis != -1 {
-			b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
-		}
-	}
-
-	if version >= 5 {
-		b.StartOffset, err = pd.getInt64()
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) {
-	pe.putInt16(int16(b.Err))
-	pe.putInt64(b.Offset)
-
-	if version >= 2 {
-		timestamp := int64(-1)
-		if !b.Timestamp.Before(time.Unix(0, 0)) {
-			timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond)
-		} else if !b.Timestamp.IsZero() {
-			return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)}
-		}
-		pe.putInt64(timestamp)
-	}
-
-	if version >= 5 {
-		pe.putInt64(b.StartOffset)
-	}
-
-	return nil
-}
-
-type ProduceResponse struct {
-	Blocks       map[string]map[int32]*ProduceResponseBlock // v0, responses
-	Version      int16
-	ThrottleTime time.Duration // v1, throttle_time_ms
-}
-
-func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
-	r.Version = version
-
-	numTopics, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
-	for i := 0; i < numTopics; i++ {
-		name, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		numBlocks, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
-
-		for j := 0; j < numBlocks; j++ {
-			id, err := pd.getInt32()
-			if err != nil {
-				return err
-			}
-
-			block := new(ProduceResponseBlock)
-			err = block.decode(pd, version)
-			if err != nil {
-				return err
-			}
-			r.Blocks[name][id] = block
-		}
-	}
-
-	if r.Version >= 1 {
-		millis, err := pd.getInt32()
-		if err != nil {
-			return err
-		}
-
-		r.ThrottleTime = time.Duration(millis) * time.Millisecond
-	}
-
-	return nil
-}
-
-func (r *ProduceResponse) encode(pe packetEncoder) error {
-	err := pe.putArrayLength(len(r.Blocks))
-	if err != nil {
-		return err
-	}
-	for topic, partitions := range r.Blocks {
-		err = pe.putString(topic)
-		if err != nil {
-			return err
-		}
-		err = pe.putArrayLength(len(partitions))
-		if err != nil {
-			return err
-		}
-		for id, prb := range partitions {
-			pe.putInt32(id)
-			err = prb.encode(pe, r.Version)
-			if err != nil {
-				return err
-			}
-		}
-	}
-
-	if r.Version >= 1 {
-		pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
-	}
-	return nil
-}
-
-func (r *ProduceResponse) key() int16 {
-	return 0
-}
-
-func (r *ProduceResponse) version() int16 {
-	return r.Version
-}
-
-func (r *ProduceResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *ProduceResponse) requiredVersion() KafkaVersion {
-	return MinVersion
-}
-
-func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
-	if r.Blocks == nil {
-		return nil
-	}
-
-	if r.Blocks[topic] == nil {
-		return nil
-	}
-
-	return r.Blocks[topic][partition]
-}
-
-// Testing API
-
-func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
-	if r.Blocks == nil {
-		r.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
-	}
-	byTopic, ok := r.Blocks[topic]
-	if !ok {
-		byTopic = make(map[int32]*ProduceResponseBlock)
-		r.Blocks[topic] = byTopic
-	}
-	block := &ProduceResponseBlock{
-		Err: err,
-	}
-	if r.Version >= 2 {
-		block.Timestamp = time.Now()
-	}
-	byTopic[partition] = block
-}
diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go
deleted file mode 100644
index 9c70f81..0000000
--- a/vendor/github.com/Shopify/sarama/produce_set.go
+++ /dev/null
@@ -1,273 +0,0 @@
-package sarama
-
-import (
-	"encoding/binary"
-	"errors"
-	"time"
-)
-
-type partitionSet struct {
-	msgs          []*ProducerMessage
-	recordsToSend Records
-	bufferBytes   int
-}
-
-type produceSet struct {
-	parent        *asyncProducer
-	msgs          map[string]map[int32]*partitionSet
-	producerID    int64
-	producerEpoch int16
-
-	bufferBytes int
-	bufferCount int
-}
-
-func newProduceSet(parent *asyncProducer) *produceSet {
-	pid, epoch := parent.txnmgr.getProducerID()
-	return &produceSet{
-		msgs:          make(map[string]map[int32]*partitionSet),
-		parent:        parent,
-		producerID:    pid,
-		producerEpoch: epoch,
-	}
-}
-
-func (ps *produceSet) add(msg *ProducerMessage) error {
-	var err error
-	var key, val []byte
-
-	if msg.Key != nil {
-		if key, err = msg.Key.Encode(); err != nil {
-			return err
-		}
-	}
-
-	if msg.Value != nil {
-		if val, err = msg.Value.Encode(); err != nil {
-			return err
-		}
-	}
-
-	timestamp := msg.Timestamp
-	if timestamp.IsZero() {
-		timestamp = time.Now()
-	}
-	timestamp = timestamp.Truncate(time.Millisecond)
-
-	partitions := ps.msgs[msg.Topic]
-	if partitions == nil {
-		partitions = make(map[int32]*partitionSet)
-		ps.msgs[msg.Topic] = partitions
-	}
-
-	var size int
-
-	set := partitions[msg.Partition]
-	if set == nil {
-		if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
-			batch := &RecordBatch{
-				FirstTimestamp:   timestamp,
-				Version:          2,
-				Codec:            ps.parent.conf.Producer.Compression,
-				CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
-				ProducerID:       ps.producerID,
-				ProducerEpoch:    ps.producerEpoch,
-			}
-			if ps.parent.conf.Producer.Idempotent {
-				batch.FirstSequence = msg.sequenceNumber
-			}
-			set = &partitionSet{recordsToSend: newDefaultRecords(batch)}
-			size = recordBatchOverhead
-		} else {
-			set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))}
-		}
-		partitions[msg.Partition] = set
-	}
-
-	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
-		if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence {
-			return errors.New("assertion failed: message out of sequence added to a batch")
-		}
-	}
-
-	// Past this point we can't return an error, because we've already added the message to the set.
-	set.msgs = append(set.msgs, msg)
-
-	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
-		// We are being conservative here to avoid having to prep encode the record
-		size += maximumRecordOverhead
-		rec := &Record{
-			Key:            key,
-			Value:          val,
-			TimestampDelta: timestamp.Sub(set.recordsToSend.RecordBatch.FirstTimestamp),
-		}
-		size += len(key) + len(val)
-		if len(msg.Headers) > 0 {
-			rec.Headers = make([]*RecordHeader, len(msg.Headers))
-			for i := range msg.Headers {
-				rec.Headers[i] = &msg.Headers[i]
-				size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32
-			}
-		}
-		set.recordsToSend.RecordBatch.addRecord(rec)
-	} else {
-		msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
-		if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
-			msgToSend.Timestamp = timestamp
-			msgToSend.Version = 1
-		}
-		set.recordsToSend.MsgSet.addMessage(msgToSend)
-		size = producerMessageOverhead + len(key) + len(val)
-	}
-
-	set.bufferBytes += size
-	ps.bufferBytes += size
-	ps.bufferCount++
-
-	return nil
-}
-
-func (ps *produceSet) buildRequest() *ProduceRequest {
-	req := &ProduceRequest{
-		RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
-		Timeout:      int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
-	}
-	if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
-		req.Version = 2
-	}
-	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
-		req.Version = 3
-	}
-
-	if ps.parent.conf.Producer.Compression == CompressionZSTD && ps.parent.conf.Version.IsAtLeast(V2_1_0_0) {
-		req.Version = 7
-	}
-
-	for topic, partitionSets := range ps.msgs {
-		for partition, set := range partitionSets {
-			if req.Version >= 3 {
-				// If the API version we're hitting is 3 or greater, we need to calculate
-				// offsets for each record in the batch relative to FirstOffset.
-				// Additionally, we must set LastOffsetDelta to the value of the last offset
-				// in the batch. Since the OffsetDelta of the first record is 0, we know that the
-				// final record of any batch will have an offset of (# of records in batch) - 1.
-				// (See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets
-				//  under the RecordBatch section for details.)
-				rb := set.recordsToSend.RecordBatch
-				if len(rb.Records) > 0 {
-					rb.LastOffsetDelta = int32(len(rb.Records) - 1)
-					for i, record := range rb.Records {
-						record.OffsetDelta = int64(i)
-					}
-				}
-				req.AddBatch(topic, partition, rb)
-				continue
-			}
-			if ps.parent.conf.Producer.Compression == CompressionNone {
-				req.AddSet(topic, partition, set.recordsToSend.MsgSet)
-			} else {
-				// When compression is enabled, the entire set for each partition is compressed
-				// and sent as the payload of a single fake "message" with the appropriate codec
-				// set and no key. When the server sees a message with a compression codec, it
-				// decompresses the payload and treats the result as its message set.
-
-				if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
-					// If our version is 0.10 or later, assign relative offsets
-					// to the inner messages. This lets the broker avoid
-					// recompressing the message set.
-					// (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets
-					// for details on relative offsets.)
-					for i, msg := range set.recordsToSend.MsgSet.Messages {
-						msg.Offset = int64(i)
-					}
-				}
-				payload, err := encode(set.recordsToSend.MsgSet, ps.parent.conf.MetricRegistry)
-				if err != nil {
-					Logger.Println(err) // if this happens, it's basically our fault.
-					panic(err)
-				}
-				compMsg := &Message{
-					Codec:            ps.parent.conf.Producer.Compression,
-					CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
-					Key:              nil,
-					Value:            payload,
-					Set:              set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics
-				}
-				if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
-					compMsg.Version = 1
-					compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp
-				}
-				req.AddMessage(topic, partition, compMsg)
-			}
-		}
-	}
-
-	return req
-}
-
-func (ps *produceSet) eachPartition(cb func(topic string, partition int32, pSet *partitionSet)) {
-	for topic, partitionSet := range ps.msgs {
-		for partition, set := range partitionSet {
-			cb(topic, partition, set)
-		}
-	}
-}
-
-func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
-	if ps.msgs[topic] == nil {
-		return nil
-	}
-	set := ps.msgs[topic][partition]
-	if set == nil {
-		return nil
-	}
-	ps.bufferBytes -= set.bufferBytes
-	ps.bufferCount -= len(set.msgs)
-	delete(ps.msgs[topic], partition)
-	return set.msgs
-}
-
-func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
-	version := 1
-	if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
-		version = 2
-	}
-
-	switch {
-	// Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
-	case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)):
-		return true
-	// Would we overflow the size-limit of a message-batch for this partition?
-	case ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
-		ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes:
-		return true
-	// Would we overflow simply in number of messages?
-	case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
-		return true
-	default:
-		return false
-	}
-}
-
-func (ps *produceSet) readyToFlush() bool {
-	switch {
-	// If we don't have any messages, nothing else matters
-	case ps.empty():
-		return false
-	// If all three config values are 0, we always flush as-fast-as-possible
-	case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
-		return true
-	// If we've passed the message trigger-point
-	case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
-		return true
-	// If we've passed the byte trigger-point
-	case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
-		return true
-	default:
-		return false
-	}
-}
-
-func (ps *produceSet) empty() bool {
-	return ps.bufferCount == 0
-}
diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go
deleted file mode 100644
index 2482c63..0000000
--- a/vendor/github.com/Shopify/sarama/real_decoder.go
+++ /dev/null
@@ -1,437 +0,0 @@
-package sarama
-
-import (
-	"encoding/binary"
-	"math"
-)
-
-var (
-	errInvalidArrayLength      = PacketDecodingError{"invalid array length"}
-	errInvalidByteSliceLength  = PacketDecodingError{"invalid byteslice length"}
-	errInvalidStringLength     = PacketDecodingError{"invalid string length"}
-	errVarintOverflow          = PacketDecodingError{"varint overflow"}
-	errUVarintOverflow         = PacketDecodingError{"uvarint overflow"}
-	errInvalidBool             = PacketDecodingError{"invalid bool"}
-	errUnsupportedTaggedFields = PacketDecodingError{"non-empty tagged fields are not supported yet"}
-)
-
-type realDecoder struct {
-	raw   []byte
-	off   int
-	stack []pushDecoder
-}
-
-// primitives
-
-func (rd *realDecoder) getInt8() (int8, error) {
-	if rd.remaining() < 1 {
-		rd.off = len(rd.raw)
-		return -1, ErrInsufficientData
-	}
-	tmp := int8(rd.raw[rd.off])
-	rd.off++
-	return tmp, nil
-}
-
-func (rd *realDecoder) getInt16() (int16, error) {
-	if rd.remaining() < 2 {
-		rd.off = len(rd.raw)
-		return -1, ErrInsufficientData
-	}
-	tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
-	rd.off += 2
-	return tmp, nil
-}
-
-func (rd *realDecoder) getInt32() (int32, error) {
-	if rd.remaining() < 4 {
-		rd.off = len(rd.raw)
-		return -1, ErrInsufficientData
-	}
-	tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
-	rd.off += 4
-	return tmp, nil
-}
-
-func (rd *realDecoder) getInt64() (int64, error) {
-	if rd.remaining() < 8 {
-		rd.off = len(rd.raw)
-		return -1, ErrInsufficientData
-	}
-	tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
-	rd.off += 8
-	return tmp, nil
-}
-
-func (rd *realDecoder) getVarint() (int64, error) {
-	tmp, n := binary.Varint(rd.raw[rd.off:])
-	if n == 0 {
-		rd.off = len(rd.raw)
-		return -1, ErrInsufficientData
-	}
-	if n < 0 {
-		rd.off -= n
-		return -1, errVarintOverflow
-	}
-	rd.off += n
-	return tmp, nil
-}
-
-func (rd *realDecoder) getUVarint() (uint64, error) {
-	tmp, n := binary.Uvarint(rd.raw[rd.off:])
-	if n == 0 {
-		rd.off = len(rd.raw)
-		return 0, ErrInsufficientData
-	}
-
-	if n < 0 {
-		rd.off -= n
-		return 0, errUVarintOverflow
-	}
-
-	rd.off += n
-	return tmp, nil
-}
-
-func (rd *realDecoder) getArrayLength() (int, error) {
-	if rd.remaining() < 4 {
-		rd.off = len(rd.raw)
-		return -1, ErrInsufficientData
-	}
-	tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:])))
-	rd.off += 4
-	if tmp > rd.remaining() {
-		rd.off = len(rd.raw)
-		return -1, ErrInsufficientData
-	} else if tmp > 2*math.MaxUint16 {
-		return -1, errInvalidArrayLength
-	}
-	return tmp, nil
-}
-
-func (rd *realDecoder) getCompactArrayLength() (int, error) {
-	n, err := rd.getUVarint()
-	if err != nil {
-		return 0, err
-	}
-
-	if n == 0 {
-		return 0, nil
-	}
-
-	return int(n) - 1, nil
-}
-
-func (rd *realDecoder) getBool() (bool, error) {
-	b, err := rd.getInt8()
-	if err != nil || b == 0 {
-		return false, err
-	}
-	if b != 1 {
-		return false, errInvalidBool
-	}
-	return true, nil
-}
-
-func (rd *realDecoder) getEmptyTaggedFieldArray() (int, error) {
-	tagCount, err := rd.getUVarint()
-	if err != nil {
-		return 0, err
-	}
-
-	if tagCount != 0 {
-		return 0, errUnsupportedTaggedFields
-	}
-
-	return 0, nil
-}
-
-// collections
-
-func (rd *realDecoder) getBytes() ([]byte, error) {
-	tmp, err := rd.getInt32()
-	if err != nil {
-		return nil, err
-	}
-	if tmp == -1 {
-		return nil, nil
-	}
-
-	return rd.getRawBytes(int(tmp))
-}
-
-func (rd *realDecoder) getVarintBytes() ([]byte, error) {
-	tmp, err := rd.getVarint()
-	if err != nil {
-		return nil, err
-	}
-	if tmp == -1 {
-		return nil, nil
-	}
-
-	return rd.getRawBytes(int(tmp))
-}
-
-func (rd *realDecoder) getCompactBytes() ([]byte, error) {
-	n, err := rd.getUVarint()
-	if err != nil {
-		return nil, err
-	}
-
-	length := int(n - 1)
-	return rd.getRawBytes(length)
-}
-
-func (rd *realDecoder) getStringLength() (int, error) {
-	length, err := rd.getInt16()
-	if err != nil {
-		return 0, err
-	}
-
-	n := int(length)
-
-	switch {
-	case n < -1:
-		return 0, errInvalidStringLength
-	case n > rd.remaining():
-		rd.off = len(rd.raw)
-		return 0, ErrInsufficientData
-	}
-
-	return n, nil
-}
-
-func (rd *realDecoder) getString() (string, error) {
-	n, err := rd.getStringLength()
-	if err != nil || n == -1 {
-		return "", err
-	}
-
-	tmpStr := string(rd.raw[rd.off : rd.off+n])
-	rd.off += n
-	return tmpStr, nil
-}
-
-func (rd *realDecoder) getNullableString() (*string, error) {
-	n, err := rd.getStringLength()
-	if err != nil || n == -1 {
-		return nil, err
-	}
-
-	tmpStr := string(rd.raw[rd.off : rd.off+n])
-	rd.off += n
-	return &tmpStr, err
-}
-
-func (rd *realDecoder) getCompactString() (string, error) {
-	n, err := rd.getUVarint()
-	if err != nil {
-		return "", err
-	}
-
-	length := int(n - 1)
-
-	tmpStr := string(rd.raw[rd.off : rd.off+length])
-	rd.off += length
-	return tmpStr, nil
-}
-
-func (rd *realDecoder) getCompactNullableString() (*string, error) {
-	n, err := rd.getUVarint()
-	if err != nil {
-		return nil, err
-	}
-
-	length := int(n - 1)
-
-	if length < 0 {
-		return nil, err
-	}
-
-	tmpStr := string(rd.raw[rd.off : rd.off+length])
-	rd.off += length
-	return &tmpStr, err
-}
-
-func (rd *realDecoder) getCompactInt32Array() ([]int32, error) {
-	n, err := rd.getUVarint()
-	if err != nil {
-		return nil, err
-	}
-
-	if n == 0 {
-		return nil, nil
-	}
-
-	arrayLength := int(n) - 1
-
-	ret := make([]int32, arrayLength)
-
-	for i := range ret {
-		ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
-		rd.off += 4
-	}
-	return ret, nil
-}
-
-func (rd *realDecoder) getInt32Array() ([]int32, error) {
-	if rd.remaining() < 4 {
-		rd.off = len(rd.raw)
-		return nil, ErrInsufficientData
-	}
-	n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
-	rd.off += 4
-
-	if rd.remaining() < 4*n {
-		rd.off = len(rd.raw)
-		return nil, ErrInsufficientData
-	}
-
-	if n == 0 {
-		return nil, nil
-	}
-
-	if n < 0 {
-		return nil, errInvalidArrayLength
-	}
-
-	ret := make([]int32, n)
-	for i := range ret {
-		ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
-		rd.off += 4
-	}
-	return ret, nil
-}
-
-func (rd *realDecoder) getInt64Array() ([]int64, error) {
-	if rd.remaining() < 4 {
-		rd.off = len(rd.raw)
-		return nil, ErrInsufficientData
-	}
-	n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
-	rd.off += 4
-
-	if rd.remaining() < 8*n {
-		rd.off = len(rd.raw)
-		return nil, ErrInsufficientData
-	}
-
-	if n == 0 {
-		return nil, nil
-	}
-
-	if n < 0 {
-		return nil, errInvalidArrayLength
-	}
-
-	ret := make([]int64, n)
-	for i := range ret {
-		ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
-		rd.off += 8
-	}
-	return ret, nil
-}
-
-func (rd *realDecoder) getStringArray() ([]string, error) {
-	if rd.remaining() < 4 {
-		rd.off = len(rd.raw)
-		return nil, ErrInsufficientData
-	}
-	n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
-	rd.off += 4
-
-	if n == 0 {
-		return nil, nil
-	}
-
-	if n < 0 {
-		return nil, errInvalidArrayLength
-	}
-
-	ret := make([]string, n)
-	for i := range ret {
-		str, err := rd.getString()
-		if err != nil {
-			return nil, err
-		}
-
-		ret[i] = str
-	}
-	return ret, nil
-}
-
-// subsets
-
-func (rd *realDecoder) remaining() int {
-	return len(rd.raw) - rd.off
-}
-
-func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
-	buf, err := rd.getRawBytes(length)
-	if err != nil {
-		return nil, err
-	}
-	return &realDecoder{raw: buf}, nil
-}
-
-func (rd *realDecoder) getRawBytes(length int) ([]byte, error) {
-	if length < 0 {
-		return nil, errInvalidByteSliceLength
-	} else if length > rd.remaining() {
-		rd.off = len(rd.raw)
-		return nil, ErrInsufficientData
-	}
-
-	start := rd.off
-	rd.off += length
-	return rd.raw[start:rd.off], nil
-}
-
-func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) {
-	if rd.remaining() < offset+length {
-		return nil, ErrInsufficientData
-	}
-	off := rd.off + offset
-	return &realDecoder{raw: rd.raw[off : off+length]}, nil
-}
-
-func (rd *realDecoder) peekInt8(offset int) (int8, error) {
-	const byteLen = 1
-	if rd.remaining() < offset+byteLen {
-		return -1, ErrInsufficientData
-	}
-	return int8(rd.raw[rd.off+offset]), nil
-}
-
-// stacks
-
-func (rd *realDecoder) push(in pushDecoder) error {
-	in.saveOffset(rd.off)
-
-	var reserve int
-	if dpd, ok := in.(dynamicPushDecoder); ok {
-		if err := dpd.decode(rd); err != nil {
-			return err
-		}
-	} else {
-		reserve = in.reserveLength()
-		if rd.remaining() < reserve {
-			rd.off = len(rd.raw)
-			return ErrInsufficientData
-		}
-	}
-
-	rd.stack = append(rd.stack, in)
-
-	rd.off += reserve
-
-	return nil
-}
-
-func (rd *realDecoder) pop() error {
-	// this is go's ugly pop pattern (the inverse of append)
-	in := rd.stack[len(rd.stack)-1]
-	rd.stack = rd.stack[:len(rd.stack)-1]
-
-	return in.check(rd.off, rd.raw)
-}
diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go
deleted file mode 100644
index c07204c..0000000
--- a/vendor/github.com/Shopify/sarama/real_encoder.go
+++ /dev/null
@@ -1,213 +0,0 @@
-package sarama
-
-import (
-	"encoding/binary"
-	"errors"
-
-	"github.com/rcrowley/go-metrics"
-)
-
-type realEncoder struct {
-	raw      []byte
-	off      int
-	stack    []pushEncoder
-	registry metrics.Registry
-}
-
-// primitives
-
-func (re *realEncoder) putInt8(in int8) {
-	re.raw[re.off] = byte(in)
-	re.off++
-}
-
-func (re *realEncoder) putInt16(in int16) {
-	binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
-	re.off += 2
-}
-
-func (re *realEncoder) putInt32(in int32) {
-	binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
-	re.off += 4
-}
-
-func (re *realEncoder) putInt64(in int64) {
-	binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
-	re.off += 8
-}
-
-func (re *realEncoder) putVarint(in int64) {
-	re.off += binary.PutVarint(re.raw[re.off:], in)
-}
-
-func (re *realEncoder) putUVarint(in uint64) {
-	re.off += binary.PutUvarint(re.raw[re.off:], in)
-}
-
-func (re *realEncoder) putArrayLength(in int) error {
-	re.putInt32(int32(in))
-	return nil
-}
-
-func (re *realEncoder) putCompactArrayLength(in int) {
-	// 0 represents a null array, so +1 has to be added
-	re.putUVarint(uint64(in + 1))
-}
-
-func (re *realEncoder) putBool(in bool) {
-	if in {
-		re.putInt8(1)
-		return
-	}
-	re.putInt8(0)
-}
-
-// collection
-
-func (re *realEncoder) putRawBytes(in []byte) error {
-	copy(re.raw[re.off:], in)
-	re.off += len(in)
-	return nil
-}
-
-func (re *realEncoder) putBytes(in []byte) error {
-	if in == nil {
-		re.putInt32(-1)
-		return nil
-	}
-	re.putInt32(int32(len(in)))
-	return re.putRawBytes(in)
-}
-
-func (re *realEncoder) putVarintBytes(in []byte) error {
-	if in == nil {
-		re.putVarint(-1)
-		return nil
-	}
-	re.putVarint(int64(len(in)))
-	return re.putRawBytes(in)
-}
-
-func (re *realEncoder) putCompactBytes(in []byte) error {
-	re.putUVarint(uint64(len(in) + 1))
-	return re.putRawBytes(in)
-}
-
-func (re *realEncoder) putCompactString(in string) error {
-	re.putCompactArrayLength(len(in))
-	return re.putRawBytes([]byte(in))
-}
-
-func (re *realEncoder) putNullableCompactString(in *string) error {
-	if in == nil {
-		re.putInt8(0)
-		return nil
-	}
-	return re.putCompactString(*in)
-}
-
-func (re *realEncoder) putString(in string) error {
-	re.putInt16(int16(len(in)))
-	copy(re.raw[re.off:], in)
-	re.off += len(in)
-	return nil
-}
-
-func (re *realEncoder) putNullableString(in *string) error {
-	if in == nil {
-		re.putInt16(-1)
-		return nil
-	}
-	return re.putString(*in)
-}
-
-func (re *realEncoder) putStringArray(in []string) error {
-	err := re.putArrayLength(len(in))
-	if err != nil {
-		return err
-	}
-
-	for _, val := range in {
-		if err := re.putString(val); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (re *realEncoder) putCompactInt32Array(in []int32) error {
-	if in == nil {
-		return errors.New("expected int32 array to be non null")
-	}
-	// 0 represents a null array, so +1 has to be added
-	re.putUVarint(uint64(len(in)) + 1)
-	for _, val := range in {
-		re.putInt32(val)
-	}
-	return nil
-}
-
-func (re *realEncoder) putNullableCompactInt32Array(in []int32) error {
-	if in == nil {
-		re.putUVarint(0)
-		return nil
-	}
-	// 0 represents a null array, so +1 has to be added
-	re.putUVarint(uint64(len(in)) + 1)
-	for _, val := range in {
-		re.putInt32(val)
-	}
-	return nil
-}
-
-func (re *realEncoder) putInt32Array(in []int32) error {
-	err := re.putArrayLength(len(in))
-	if err != nil {
-		return err
-	}
-	for _, val := range in {
-		re.putInt32(val)
-	}
-	return nil
-}
-
-func (re *realEncoder) putInt64Array(in []int64) error {
-	err := re.putArrayLength(len(in))
-	if err != nil {
-		return err
-	}
-	for _, val := range in {
-		re.putInt64(val)
-	}
-	return nil
-}
-
-func (re *realEncoder) putEmptyTaggedFieldArray() {
-	re.putUVarint(0)
-}
-
-func (re *realEncoder) offset() int {
-	return re.off
-}
-
-// stacks
-
-func (re *realEncoder) push(in pushEncoder) {
-	in.saveOffset(re.off)
-	re.off += in.reserveLength()
-	re.stack = append(re.stack, in)
-}
-
-func (re *realEncoder) pop() error {
-	// this is go's ugly pop pattern (the inverse of append)
-	in := re.stack[len(re.stack)-1]
-	re.stack = re.stack[:len(re.stack)-1]
-
-	return in.run(re.off, re.raw)
-}
-
-// we do record metrics during the real encoder pass
-func (re *realEncoder) metricRegistry() metrics.Registry {
-	return re.registry
-}
diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go
deleted file mode 100644
index c653763..0000000
--- a/vendor/github.com/Shopify/sarama/record_batch.go
+++ /dev/null
@@ -1,225 +0,0 @@
-package sarama
-
-import (
-	"fmt"
-	"time"
-)
-
-const recordBatchOverhead = 49
-
-type recordsArray []*Record
-
-func (e recordsArray) encode(pe packetEncoder) error {
-	for _, r := range e {
-		if err := r.encode(pe); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (e recordsArray) decode(pd packetDecoder) error {
-	for i := range e {
-		rec := &Record{}
-		if err := rec.decode(pd); err != nil {
-			return err
-		}
-		e[i] = rec
-	}
-	return nil
-}
-
-type RecordBatch struct {
-	FirstOffset           int64
-	PartitionLeaderEpoch  int32
-	Version               int8
-	Codec                 CompressionCodec
-	CompressionLevel      int
-	Control               bool
-	LogAppendTime         bool
-	LastOffsetDelta       int32
-	FirstTimestamp        time.Time
-	MaxTimestamp          time.Time
-	ProducerID            int64
-	ProducerEpoch         int16
-	FirstSequence         int32
-	Records               []*Record
-	PartialTrailingRecord bool
-	IsTransactional       bool
-
-	compressedRecords []byte
-	recordsLen        int // uncompressed records size
-}
-
-func (b *RecordBatch) LastOffset() int64 {
-	return b.FirstOffset + int64(b.LastOffsetDelta)
-}
-
-func (b *RecordBatch) encode(pe packetEncoder) error {
-	if b.Version != 2 {
-		return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)}
-	}
-	pe.putInt64(b.FirstOffset)
-	pe.push(&lengthField{})
-	pe.putInt32(b.PartitionLeaderEpoch)
-	pe.putInt8(b.Version)
-	pe.push(newCRC32Field(crcCastagnoli))
-	pe.putInt16(b.computeAttributes())
-	pe.putInt32(b.LastOffsetDelta)
-
-	if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil {
-		return err
-	}
-
-	if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil {
-		return err
-	}
-
-	pe.putInt64(b.ProducerID)
-	pe.putInt16(b.ProducerEpoch)
-	pe.putInt32(b.FirstSequence)
-
-	if err := pe.putArrayLength(len(b.Records)); err != nil {
-		return err
-	}
-
-	if b.compressedRecords == nil {
-		if err := b.encodeRecords(pe); err != nil {
-			return err
-		}
-	}
-	if err := pe.putRawBytes(b.compressedRecords); err != nil {
-		return err
-	}
-
-	if err := pe.pop(); err != nil {
-		return err
-	}
-	return pe.pop()
-}
-
-func (b *RecordBatch) decode(pd packetDecoder) (err error) {
-	if b.FirstOffset, err = pd.getInt64(); err != nil {
-		return err
-	}
-
-	batchLen, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil {
-		return err
-	}
-
-	if b.Version, err = pd.getInt8(); err != nil {
-		return err
-	}
-
-	crc32Decoder := acquireCrc32Field(crcCastagnoli)
-	defer releaseCrc32Field(crc32Decoder)
-
-	if err = pd.push(crc32Decoder); err != nil {
-		return err
-	}
-
-	attributes, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-	b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask)
-	b.Control = attributes&controlMask == controlMask
-	b.LogAppendTime = attributes&timestampTypeMask == timestampTypeMask
-	b.IsTransactional = attributes&isTransactionalMask == isTransactionalMask
-
-	if b.LastOffsetDelta, err = pd.getInt32(); err != nil {
-		return err
-	}
-
-	if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil {
-		return err
-	}
-
-	if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil {
-		return err
-	}
-
-	if b.ProducerID, err = pd.getInt64(); err != nil {
-		return err
-	}
-
-	if b.ProducerEpoch, err = pd.getInt16(); err != nil {
-		return err
-	}
-
-	if b.FirstSequence, err = pd.getInt32(); err != nil {
-		return err
-	}
-
-	numRecs, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if numRecs >= 0 {
-		b.Records = make([]*Record, numRecs)
-	}
-
-	bufSize := int(batchLen) - recordBatchOverhead
-	recBuffer, err := pd.getRawBytes(bufSize)
-	if err != nil {
-		if err == ErrInsufficientData {
-			b.PartialTrailingRecord = true
-			b.Records = nil
-			return nil
-		}
-		return err
-	}
-
-	if err = pd.pop(); err != nil {
-		return err
-	}
-
-	recBuffer, err = decompress(b.Codec, recBuffer)
-	if err != nil {
-		return err
-	}
-
-	b.recordsLen = len(recBuffer)
-	err = decode(recBuffer, recordsArray(b.Records))
-	if err == ErrInsufficientData {
-		b.PartialTrailingRecord = true
-		b.Records = nil
-		return nil
-	}
-	return err
-}
-
-func (b *RecordBatch) encodeRecords(pe packetEncoder) error {
-	var raw []byte
-	var err error
-	if raw, err = encode(recordsArray(b.Records), pe.metricRegistry()); err != nil {
-		return err
-	}
-	b.recordsLen = len(raw)
-
-	b.compressedRecords, err = compress(b.Codec, b.CompressionLevel, raw)
-	return err
-}
-
-func (b *RecordBatch) computeAttributes() int16 {
-	attr := int16(b.Codec) & int16(compressionCodecMask)
-	if b.Control {
-		attr |= controlMask
-	}
-	if b.LogAppendTime {
-		attr |= timestampTypeMask
-	}
-	if b.IsTransactional {
-		attr |= isTransactionalMask
-	}
-	return attr
-}
-
-func (b *RecordBatch) addRecord(r *Record) {
-	b.Records = append(b.Records, r)
-}
diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go
deleted file mode 100644
index f4c5e95..0000000
--- a/vendor/github.com/Shopify/sarama/records.go
+++ /dev/null
@@ -1,203 +0,0 @@
-package sarama
-
-import "fmt"
-
-const (
-	unknownRecords = iota
-	legacyRecords
-	defaultRecords
-
-	magicOffset = 16
-)
-
-// Records implements a union type containing either a RecordBatch or a legacy MessageSet.
-type Records struct {
-	recordsType int
-	MsgSet      *MessageSet
-	RecordBatch *RecordBatch
-}
-
-func newLegacyRecords(msgSet *MessageSet) Records {
-	return Records{recordsType: legacyRecords, MsgSet: msgSet}
-}
-
-func newDefaultRecords(batch *RecordBatch) Records {
-	return Records{recordsType: defaultRecords, RecordBatch: batch}
-}
-
-// setTypeFromFields sets type of Records depending on which of MsgSet or RecordBatch is not nil.
-// The first return value indicates whether both fields are nil (and the type is not set).
-// If both fields are not nil, it returns an error.
-func (r *Records) setTypeFromFields() (bool, error) {
-	if r.MsgSet == nil && r.RecordBatch == nil {
-		return true, nil
-	}
-	if r.MsgSet != nil && r.RecordBatch != nil {
-		return false, fmt.Errorf("both MsgSet and RecordBatch are set, but record type is unknown")
-	}
-	r.recordsType = defaultRecords
-	if r.MsgSet != nil {
-		r.recordsType = legacyRecords
-	}
-	return false, nil
-}
-
-func (r *Records) encode(pe packetEncoder) error {
-	if r.recordsType == unknownRecords {
-		if empty, err := r.setTypeFromFields(); err != nil || empty {
-			return err
-		}
-	}
-
-	switch r.recordsType {
-	case legacyRecords:
-		if r.MsgSet == nil {
-			return nil
-		}
-		return r.MsgSet.encode(pe)
-	case defaultRecords:
-		if r.RecordBatch == nil {
-			return nil
-		}
-		return r.RecordBatch.encode(pe)
-	}
-
-	return fmt.Errorf("unknown records type: %v", r.recordsType)
-}
-
-func (r *Records) setTypeFromMagic(pd packetDecoder) error {
-	magic, err := magicValue(pd)
-	if err != nil {
-		return err
-	}
-
-	r.recordsType = defaultRecords
-	if magic < 2 {
-		r.recordsType = legacyRecords
-	}
-
-	return nil
-}
-
-func (r *Records) decode(pd packetDecoder) error {
-	if r.recordsType == unknownRecords {
-		if err := r.setTypeFromMagic(pd); err != nil {
-			return err
-		}
-	}
-
-	switch r.recordsType {
-	case legacyRecords:
-		r.MsgSet = &MessageSet{}
-		return r.MsgSet.decode(pd)
-	case defaultRecords:
-		r.RecordBatch = &RecordBatch{}
-		return r.RecordBatch.decode(pd)
-	}
-	return fmt.Errorf("unknown records type: %v", r.recordsType)
-}
-
-func (r *Records) numRecords() (int, error) {
-	if r.recordsType == unknownRecords {
-		if empty, err := r.setTypeFromFields(); err != nil || empty {
-			return 0, err
-		}
-	}
-
-	switch r.recordsType {
-	case legacyRecords:
-		if r.MsgSet == nil {
-			return 0, nil
-		}
-		return len(r.MsgSet.Messages), nil
-	case defaultRecords:
-		if r.RecordBatch == nil {
-			return 0, nil
-		}
-		return len(r.RecordBatch.Records), nil
-	}
-	return 0, fmt.Errorf("unknown records type: %v", r.recordsType)
-}
-
-func (r *Records) isPartial() (bool, error) {
-	if r.recordsType == unknownRecords {
-		if empty, err := r.setTypeFromFields(); err != nil || empty {
-			return false, err
-		}
-	}
-
-	switch r.recordsType {
-	case unknownRecords:
-		return false, nil
-	case legacyRecords:
-		if r.MsgSet == nil {
-			return false, nil
-		}
-		return r.MsgSet.PartialTrailingMessage, nil
-	case defaultRecords:
-		if r.RecordBatch == nil {
-			return false, nil
-		}
-		return r.RecordBatch.PartialTrailingRecord, nil
-	}
-	return false, fmt.Errorf("unknown records type: %v", r.recordsType)
-}
-
-func (r *Records) isControl() (bool, error) {
-	if r.recordsType == unknownRecords {
-		if empty, err := r.setTypeFromFields(); err != nil || empty {
-			return false, err
-		}
-	}
-
-	switch r.recordsType {
-	case legacyRecords:
-		return false, nil
-	case defaultRecords:
-		if r.RecordBatch == nil {
-			return false, nil
-		}
-		return r.RecordBatch.Control, nil
-	}
-	return false, fmt.Errorf("unknown records type: %v", r.recordsType)
-}
-
-func (r *Records) isOverflow() (bool, error) {
-	if r.recordsType == unknownRecords {
-		if empty, err := r.setTypeFromFields(); err != nil || empty {
-			return false, err
-		}
-	}
-
-	switch r.recordsType {
-	case unknownRecords:
-		return false, nil
-	case legacyRecords:
-		if r.MsgSet == nil {
-			return false, nil
-		}
-		return r.MsgSet.OverflowMessage, nil
-	case defaultRecords:
-		return false, nil
-	}
-	return false, fmt.Errorf("unknown records type: %v", r.recordsType)
-}
-
-func magicValue(pd packetDecoder) (int8, error) {
-	return pd.peekInt8(magicOffset)
-}
-
-func (r *Records) getControlRecord() (ControlRecord, error) {
-	if r.RecordBatch == nil || len(r.RecordBatch.Records) <= 0 {
-		return ControlRecord{}, fmt.Errorf("cannot get control record, record batch is empty")
-	}
-
-	firstRecord := r.RecordBatch.Records[0]
-	controlRecord := ControlRecord{}
-	err := controlRecord.decode(&realDecoder{raw: firstRecord.Key}, &realDecoder{raw: firstRecord.Value})
-	if err != nil {
-		return ControlRecord{}, err
-	}
-
-	return controlRecord, nil
-}
diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go
deleted file mode 100644
index d899df5..0000000
--- a/vendor/github.com/Shopify/sarama/request.go
+++ /dev/null
@@ -1,197 +0,0 @@
-package sarama
-
-import (
-	"encoding/binary"
-	"fmt"
-	"io"
-)
-
-type protocolBody interface {
-	encoder
-	versionedDecoder
-	key() int16
-	version() int16
-	headerVersion() int16
-	requiredVersion() KafkaVersion
-}
-
-type request struct {
-	correlationID int32
-	clientID      string
-	body          protocolBody
-}
-
-func (r *request) encode(pe packetEncoder) error {
-	pe.push(&lengthField{})
-	pe.putInt16(r.body.key())
-	pe.putInt16(r.body.version())
-	pe.putInt32(r.correlationID)
-
-	if r.body.headerVersion() >= 1 {
-		err := pe.putString(r.clientID)
-		if err != nil {
-			return err
-		}
-	}
-
-	if r.body.headerVersion() >= 2 {
-		// we don't use tag headers at the moment so we just put an array length of 0
-		pe.putUVarint(0)
-	}
-
-	err := r.body.encode(pe)
-	if err != nil {
-		return err
-	}
-
-	return pe.pop()
-}
-
-func (r *request) decode(pd packetDecoder) (err error) {
-	key, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	version, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	r.correlationID, err = pd.getInt32()
-	if err != nil {
-		return err
-	}
-
-	r.clientID, err = pd.getString()
-	if err != nil {
-		return err
-	}
-
-	r.body = allocateBody(key, version)
-	if r.body == nil {
-		return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
-	}
-
-	if r.body.headerVersion() >= 2 {
-		// tagged field
-		_, err = pd.getUVarint()
-		if err != nil {
-			return err
-		}
-	}
-
-	return r.body.decode(pd, version)
-}
-
-func decodeRequest(r io.Reader) (*request, int, error) {
-	var (
-		bytesRead   int
-		lengthBytes = make([]byte, 4)
-	)
-
-	if _, err := io.ReadFull(r, lengthBytes); err != nil {
-		return nil, bytesRead, err
-	}
-
-	bytesRead += len(lengthBytes)
-	length := int32(binary.BigEndian.Uint32(lengthBytes))
-
-	if length <= 4 || length > MaxRequestSize {
-		return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
-	}
-
-	encodedReq := make([]byte, length)
-	if _, err := io.ReadFull(r, encodedReq); err != nil {
-		return nil, bytesRead, err
-	}
-
-	bytesRead += len(encodedReq)
-
-	req := &request{}
-	if err := decode(encodedReq, req); err != nil {
-		return nil, bytesRead, err
-	}
-
-	return req, bytesRead, nil
-}
-
-func allocateBody(key, version int16) protocolBody {
-	switch key {
-	case 0:
-		return &ProduceRequest{}
-	case 1:
-		return &FetchRequest{Version: version}
-	case 2:
-		return &OffsetRequest{Version: version}
-	case 3:
-		return &MetadataRequest{}
-	case 8:
-		return &OffsetCommitRequest{Version: version}
-	case 9:
-		return &OffsetFetchRequest{Version: version}
-	case 10:
-		return &FindCoordinatorRequest{}
-	case 11:
-		return &JoinGroupRequest{}
-	case 12:
-		return &HeartbeatRequest{}
-	case 13:
-		return &LeaveGroupRequest{}
-	case 14:
-		return &SyncGroupRequest{}
-	case 15:
-		return &DescribeGroupsRequest{}
-	case 16:
-		return &ListGroupsRequest{}
-	case 17:
-		return &SaslHandshakeRequest{}
-	case 18:
-		return &ApiVersionsRequest{}
-	case 19:
-		return &CreateTopicsRequest{}
-	case 20:
-		return &DeleteTopicsRequest{}
-	case 21:
-		return &DeleteRecordsRequest{}
-	case 22:
-		return &InitProducerIDRequest{}
-	case 24:
-		return &AddPartitionsToTxnRequest{}
-	case 25:
-		return &AddOffsetsToTxnRequest{}
-	case 26:
-		return &EndTxnRequest{}
-	case 28:
-		return &TxnOffsetCommitRequest{}
-	case 29:
-		return &DescribeAclsRequest{}
-	case 30:
-		return &CreateAclsRequest{}
-	case 31:
-		return &DeleteAclsRequest{}
-	case 32:
-		return &DescribeConfigsRequest{}
-	case 33:
-		return &AlterConfigsRequest{}
-	case 35:
-		return &DescribeLogDirsRequest{}
-	case 36:
-		return &SaslAuthenticateRequest{}
-	case 37:
-		return &CreatePartitionsRequest{}
-	case 42:
-		return &DeleteGroupsRequest{}
-	case 44:
-		return &IncrementalAlterConfigsRequest{}
-	case 45:
-		return &AlterPartitionReassignmentsRequest{}
-	case 46:
-		return &ListPartitionReassignmentsRequest{}
-	case 50:
-		return &DescribeUserScramCredentialsRequest{}
-	case 51:
-		return &AlterUserScramCredentialsRequest{}
-	}
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go
deleted file mode 100644
index fbcef0b..0000000
--- a/vendor/github.com/Shopify/sarama/response_header.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package sarama
-
-import "fmt"
-
-const (
-	responseLengthSize = 4
-	correlationIDSize  = 4
-)
-
-type responseHeader struct {
-	length        int32
-	correlationID int32
-}
-
-func (r *responseHeader) decode(pd packetDecoder, version int16) (err error) {
-	r.length, err = pd.getInt32()
-	if err != nil {
-		return err
-	}
-	if r.length <= 4 || r.length > MaxResponseSize {
-		return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
-	}
-
-	r.correlationID, err = pd.getInt32()
-
-	if version >= 1 {
-		if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
-			return err
-		}
-	}
-
-	return err
-}
diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go
deleted file mode 100644
index 48f362d..0000000
--- a/vendor/github.com/Shopify/sarama/sarama.go
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
-Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level
-API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level
-API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation.
-
-To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel
-and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases.
-The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be
-useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees
-depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the
-SyncProducer can still sometimes be lost.
-
-To consume messages, use Consumer or Consumer-Group API.
-
-For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
-and message sent on the wire; the Client provides higher-level metadata management that is shared between
-the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up
-exactly with the protocol fields documented by Kafka at
-https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
-
-Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry.
-
-Broker related metrics:
-
-	+----------------------------------------------+------------+---------------------------------------------------------------+
-	| Name                                         | Type       | Description                                                   |
-	+----------------------------------------------+------------+---------------------------------------------------------------+
-	| incoming-byte-rate                           | meter      | Bytes/second read off all brokers                             |
-	| incoming-byte-rate-for-broker-<broker-id>    | meter      | Bytes/second read off a given broker                          |
-	| outgoing-byte-rate                           | meter      | Bytes/second written off all brokers                          |
-	| outgoing-byte-rate-for-broker-<broker-id>    | meter      | Bytes/second written off a given broker                       |
-	| request-rate                                 | meter      | Requests/second sent to all brokers                           |
-	| request-rate-for-broker-<broker-id>          | meter      | Requests/second sent to a given broker                        |
-	| request-size                                 | histogram  | Distribution of the request size in bytes for all brokers     |
-	| request-size-for-broker-<broker-id>          | histogram  | Distribution of the request size in bytes for a given broker  |
-	| request-latency-in-ms                        | histogram  | Distribution of the request latency in ms for all brokers     |
-	| request-latency-in-ms-for-broker-<broker-id> | histogram  | Distribution of the request latency in ms for a given broker  |
-	| response-rate                                | meter      | Responses/second received from all brokers                    |
-	| response-rate-for-broker-<broker-id>         | meter      | Responses/second received from a given broker                 |
-	| response-size                                | histogram  | Distribution of the response size in bytes for all brokers    |
-	| response-size-for-broker-<broker-id>         | histogram  | Distribution of the response size in bytes for a given broker |
-	| requests-in-flight                           | counter    | The current number of in-flight requests awaiting a response  |
-	|                                              |            | for all brokers                                               |
-	| requests-in-flight-for-broker-<broker-id>    | counter    | The current number of in-flight requests awaiting a response  |
-	|                                              |            | for a given broker                                            |
-	+----------------------------------------------+------------+---------------------------------------------------------------+
-
-Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
-
-Producer related metrics:
-
-	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
-	| Name                                      | Type       | Description                                                                          |
-	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
-	| batch-size                                | histogram  | Distribution of the number of bytes sent per partition per request for all topics    |
-	| batch-size-for-topic-<topic>              | histogram  | Distribution of the number of bytes sent per partition per request for a given topic |
-	| record-send-rate                          | meter      | Records/second sent to all topics                                                    |
-	| record-send-rate-for-topic-<topic>        | meter      | Records/second sent to a given topic                                                 |
-	| records-per-request                       | histogram  | Distribution of the number of records sent per request for all topics                |
-	| records-per-request-for-topic-<topic>     | histogram  | Distribution of the number of records sent per request for a given topic             |
-	| compression-ratio                         | histogram  | Distribution of the compression ratio times 100 of record batches for all topics     |
-	| compression-ratio-for-topic-<topic>       | histogram  | Distribution of the compression ratio times 100 of record batches for a given topic  |
-	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
-
-Consumer related metrics:
-
-	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
-	| Name                                      | Type       | Description                                                                          |
-	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
-	| consumer-batch-size                       | histogram  | Distribution of the number of messages in a batch                                    |
-	+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
-
-*/
-package sarama
-
-import (
-	"io/ioutil"
-	"log"
-)
-
-var (
-	// Logger is the instance of a StdLogger interface that Sarama writes connection
-	// management events to. By default it is set to discard all log messages via ioutil.Discard,
-	// but you can set it to redirect wherever you want.
-	Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
-
-	// PanicHandler is called for recovering from panics spawned internally to the library (and thus
-	// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
-	PanicHandler func(interface{})
-
-	// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
-	// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
-	// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
-	// to process.
-	MaxRequestSize int32 = 100 * 1024 * 1024
-
-	// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
-	// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
-	// protect the client from running out of memory. Please note that brokers do not have any natural limit on
-	// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
-	// (see https://issues.apache.org/jira/browse/KAFKA-2063).
-	MaxResponseSize int32 = 100 * 1024 * 1024
-)
-
-// StdLogger is used to log error messages.
-type StdLogger interface {
-	Print(v ...interface{})
-	Printf(format string, v ...interface{})
-	Println(v ...interface{})
-}
diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go
deleted file mode 100644
index 90504df..0000000
--- a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package sarama
-
-type SaslAuthenticateRequest struct {
-	SaslAuthBytes []byte
-}
-
-// APIKeySASLAuth is the API key for the SaslAuthenticate Kafka API
-const APIKeySASLAuth = 36
-
-func (r *SaslAuthenticateRequest) encode(pe packetEncoder) error {
-	return pe.putBytes(r.SaslAuthBytes)
-}
-
-func (r *SaslAuthenticateRequest) decode(pd packetDecoder, version int16) (err error) {
-	r.SaslAuthBytes, err = pd.getBytes()
-	return err
-}
-
-func (r *SaslAuthenticateRequest) key() int16 {
-	return APIKeySASLAuth
-}
-
-func (r *SaslAuthenticateRequest) version() int16 {
-	return 0
-}
-
-func (r *SaslAuthenticateRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion {
-	return V1_0_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go
deleted file mode 100644
index 3ef57b5..0000000
--- a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package sarama
-
-type SaslAuthenticateResponse struct {
-	Err           KError
-	ErrorMessage  *string
-	SaslAuthBytes []byte
-}
-
-func (r *SaslAuthenticateResponse) encode(pe packetEncoder) error {
-	pe.putInt16(int16(r.Err))
-	if err := pe.putNullableString(r.ErrorMessage); err != nil {
-		return err
-	}
-	return pe.putBytes(r.SaslAuthBytes)
-}
-
-func (r *SaslAuthenticateResponse) decode(pd packetDecoder, version int16) error {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	r.Err = KError(kerr)
-
-	if r.ErrorMessage, err = pd.getNullableString(); err != nil {
-		return err
-	}
-
-	r.SaslAuthBytes, err = pd.getBytes()
-
-	return err
-}
-
-func (r *SaslAuthenticateResponse) key() int16 {
-	return APIKeySASLAuth
-}
-
-func (r *SaslAuthenticateResponse) version() int16 {
-	return 0
-}
-
-func (r *SaslAuthenticateResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion {
-	return V1_0_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
deleted file mode 100644
index 74dc307..0000000
--- a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package sarama
-
-type SaslHandshakeRequest struct {
-	Mechanism string
-	Version   int16
-}
-
-func (r *SaslHandshakeRequest) encode(pe packetEncoder) error {
-	if err := pe.putString(r.Mechanism); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) {
-	if r.Mechanism, err = pd.getString(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (r *SaslHandshakeRequest) key() int16 {
-	return 17
-}
-
-func (r *SaslHandshakeRequest) version() int16 {
-	return r.Version
-}
-
-func (r *SaslHandshakeRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion {
-	return V0_10_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
deleted file mode 100644
index 69dfc31..0000000
--- a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package sarama
-
-type SaslHandshakeResponse struct {
-	Err               KError
-	EnabledMechanisms []string
-}
-
-func (r *SaslHandshakeResponse) encode(pe packetEncoder) error {
-	pe.putInt16(int16(r.Err))
-	return pe.putStringArray(r.EnabledMechanisms)
-}
-
-func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	r.Err = KError(kerr)
-
-	if r.EnabledMechanisms, err = pd.getStringArray(); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (r *SaslHandshakeResponse) key() int16 {
-	return 17
-}
-
-func (r *SaslHandshakeResponse) version() int16 {
-	return 0
-}
-
-func (r *SaslHandshakeResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion {
-	return V0_10_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go
deleted file mode 100644
index ac6ecb1..0000000
--- a/vendor/github.com/Shopify/sarama/sync_group_request.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package sarama
-
-type SyncGroupRequest struct {
-	GroupId          string
-	GenerationId     int32
-	MemberId         string
-	GroupAssignments map[string][]byte
-}
-
-func (r *SyncGroupRequest) encode(pe packetEncoder) error {
-	if err := pe.putString(r.GroupId); err != nil {
-		return err
-	}
-
-	pe.putInt32(r.GenerationId)
-
-	if err := pe.putString(r.MemberId); err != nil {
-		return err
-	}
-
-	if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil {
-		return err
-	}
-	for memberId, memberAssignment := range r.GroupAssignments {
-		if err := pe.putString(memberId); err != nil {
-			return err
-		}
-		if err := pe.putBytes(memberAssignment); err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) {
-	if r.GroupId, err = pd.getString(); err != nil {
-		return
-	}
-	if r.GenerationId, err = pd.getInt32(); err != nil {
-		return
-	}
-	if r.MemberId, err = pd.getString(); err != nil {
-		return
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-	if n == 0 {
-		return nil
-	}
-
-	r.GroupAssignments = make(map[string][]byte)
-	for i := 0; i < n; i++ {
-		memberId, err := pd.getString()
-		if err != nil {
-			return err
-		}
-		memberAssignment, err := pd.getBytes()
-		if err != nil {
-			return err
-		}
-
-		r.GroupAssignments[memberId] = memberAssignment
-	}
-
-	return nil
-}
-
-func (r *SyncGroupRequest) key() int16 {
-	return 14
-}
-
-func (r *SyncGroupRequest) version() int16 {
-	return 0
-}
-
-func (r *SyncGroupRequest) headerVersion() int16 {
-	return 1
-}
-
-func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
-	return V0_9_0_0
-}
-
-func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) {
-	if r.GroupAssignments == nil {
-		r.GroupAssignments = make(map[string][]byte)
-	}
-
-	r.GroupAssignments[memberId] = memberAssignment
-}
-
-func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error {
-	bin, err := encode(memberAssignment, nil)
-	if err != nil {
-		return err
-	}
-
-	r.AddGroupAssignment(memberId, bin)
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go
deleted file mode 100644
index af019c4..0000000
--- a/vendor/github.com/Shopify/sarama/sync_group_response.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package sarama
-
-type SyncGroupResponse struct {
-	Err              KError
-	MemberAssignment []byte
-}
-
-func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
-	assignment := new(ConsumerGroupMemberAssignment)
-	err := decode(r.MemberAssignment, assignment)
-	return assignment, err
-}
-
-func (r *SyncGroupResponse) encode(pe packetEncoder) error {
-	pe.putInt16(int16(r.Err))
-	return pe.putBytes(r.MemberAssignment)
-}
-
-func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
-	kerr, err := pd.getInt16()
-	if err != nil {
-		return err
-	}
-
-	r.Err = KError(kerr)
-
-	r.MemberAssignment, err = pd.getBytes()
-	return
-}
-
-func (r *SyncGroupResponse) key() int16 {
-	return 14
-}
-
-func (r *SyncGroupResponse) version() int16 {
-	return 0
-}
-
-func (r *SyncGroupResponse) headerVersion() int16 {
-	return 0
-}
-
-func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
-	return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go
deleted file mode 100644
index 021c5a0..0000000
--- a/vendor/github.com/Shopify/sarama/sync_producer.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package sarama
-
-import "sync"
-
-// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct
-// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer
-// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope.
-//
-// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual
-// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`.
-// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost.
-//
-// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to
-// be set to true in its configuration.
-type SyncProducer interface {
-
-	// SendMessage produces a given message, and returns only when it either has
-	// succeeded or failed to produce. It will return the partition and the offset
-	// of the produced message, or an error if the message failed to produce.
-	SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
-
-	// SendMessages produces a given set of messages, and returns only when all
-	// messages in the set have either succeeded or failed. Note that messages
-	// can succeed and fail individually; if some succeed and some fail,
-	// SendMessages will return an error.
-	SendMessages(msgs []*ProducerMessage) error
-
-	// Close shuts down the producer and waits for any buffered messages to be
-	// flushed. You must call this function before a producer object passes out of
-	// scope, as it may otherwise leak memory. You must call this before calling
-	// Close on the underlying client.
-	Close() error
-}
-
-type syncProducer struct {
-	producer *asyncProducer
-	wg       sync.WaitGroup
-}
-
-// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
-func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
-	if config == nil {
-		config = NewConfig()
-		config.Producer.Return.Successes = true
-	}
-
-	if err := verifyProducerConfig(config); err != nil {
-		return nil, err
-	}
-
-	p, err := NewAsyncProducer(addrs, config)
-	if err != nil {
-		return nil, err
-	}
-	return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
-}
-
-// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
-// necessary to call Close() on the underlying client when shutting down this producer.
-func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
-	if err := verifyProducerConfig(client.Config()); err != nil {
-		return nil, err
-	}
-
-	p, err := NewAsyncProducerFromClient(client)
-	if err != nil {
-		return nil, err
-	}
-	return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
-}
-
-func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
-	sp := &syncProducer{producer: p}
-
-	sp.wg.Add(2)
-	go withRecover(sp.handleSuccesses)
-	go withRecover(sp.handleErrors)
-
-	return sp
-}
-
-func verifyProducerConfig(config *Config) error {
-	if !config.Producer.Return.Errors {
-		return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer")
-	}
-	if !config.Producer.Return.Successes {
-		return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer")
-	}
-	return nil
-}
-
-func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
-	expectation := make(chan *ProducerError, 1)
-	msg.expectation = expectation
-	sp.producer.Input() <- msg
-
-	if err := <-expectation; err != nil {
-		return -1, -1, err.Err
-	}
-
-	return msg.Partition, msg.Offset, nil
-}
-
-func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error {
-	expectations := make(chan chan *ProducerError, len(msgs))
-	go func() {
-		for _, msg := range msgs {
-			expectation := make(chan *ProducerError, 1)
-			msg.expectation = expectation
-			sp.producer.Input() <- msg
-			expectations <- expectation
-		}
-		close(expectations)
-	}()
-
-	var errors ProducerErrors
-	for expectation := range expectations {
-		if err := <-expectation; err != nil {
-			errors = append(errors, err)
-		}
-	}
-
-	if len(errors) > 0 {
-		return errors
-	}
-	return nil
-}
-
-func (sp *syncProducer) handleSuccesses() {
-	defer sp.wg.Done()
-	for msg := range sp.producer.Successes() {
-		expectation := msg.expectation
-		expectation <- nil
-	}
-}
-
-func (sp *syncProducer) handleErrors() {
-	defer sp.wg.Done()
-	for err := range sp.producer.Errors() {
-		expectation := err.Msg.expectation
-		expectation <- err
-	}
-}
-
-func (sp *syncProducer) Close() error {
-	sp.producer.AsyncClose()
-	sp.wg.Wait()
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
deleted file mode 100644
index c4043a3..0000000
--- a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package sarama
-
-type TxnOffsetCommitRequest struct {
-	TransactionalID string
-	GroupID         string
-	ProducerID      int64
-	ProducerEpoch   int16
-	Topics          map[string][]*PartitionOffsetMetadata
-}
-
-func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error {
-	if err := pe.putString(t.TransactionalID); err != nil {
-		return err
-	}
-	if err := pe.putString(t.GroupID); err != nil {
-		return err
-	}
-	pe.putInt64(t.ProducerID)
-	pe.putInt16(t.ProducerEpoch)
-
-	if err := pe.putArrayLength(len(t.Topics)); err != nil {
-		return err
-	}
-	for topic, partitions := range t.Topics {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := pe.putArrayLength(len(partitions)); err != nil {
-			return err
-		}
-		for _, partition := range partitions {
-			if err := partition.encode(pe); err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
-	if t.TransactionalID, err = pd.getString(); err != nil {
-		return err
-	}
-	if t.GroupID, err = pd.getString(); err != nil {
-		return err
-	}
-	if t.ProducerID, err = pd.getInt64(); err != nil {
-		return err
-	}
-	if t.ProducerEpoch, err = pd.getInt16(); err != nil {
-		return err
-	}
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	t.Topics = make(map[string][]*PartitionOffsetMetadata)
-	for i := 0; i < n; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		m, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		t.Topics[topic] = make([]*PartitionOffsetMetadata, m)
-
-		for j := 0; j < m; j++ {
-			partitionOffsetMetadata := new(PartitionOffsetMetadata)
-			if err := partitionOffsetMetadata.decode(pd, version); err != nil {
-				return err
-			}
-			t.Topics[topic][j] = partitionOffsetMetadata
-		}
-	}
-
-	return nil
-}
-
-func (a *TxnOffsetCommitRequest) key() int16 {
-	return 28
-}
-
-func (a *TxnOffsetCommitRequest) version() int16 {
-	return 0
-}
-
-func (a *TxnOffsetCommitRequest) headerVersion() int16 {
-	return 1
-}
-
-func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
-
-type PartitionOffsetMetadata struct {
-	Partition int32
-	Offset    int64
-	Metadata  *string
-}
-
-func (p *PartitionOffsetMetadata) encode(pe packetEncoder) error {
-	pe.putInt32(p.Partition)
-	pe.putInt64(p.Offset)
-	if err := pe.putNullableString(p.Metadata); err != nil {
-		return err
-	}
-
-	return nil
-}
-
-func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err error) {
-	if p.Partition, err = pd.getInt32(); err != nil {
-		return err
-	}
-	if p.Offset, err = pd.getInt64(); err != nil {
-		return err
-	}
-	if p.Metadata, err = pd.getNullableString(); err != nil {
-		return err
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
deleted file mode 100644
index 94d8029..0000000
--- a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package sarama
-
-import (
-	"time"
-)
-
-type TxnOffsetCommitResponse struct {
-	ThrottleTime time.Duration
-	Topics       map[string][]*PartitionError
-}
-
-func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error {
-	pe.putInt32(int32(t.ThrottleTime / time.Millisecond))
-	if err := pe.putArrayLength(len(t.Topics)); err != nil {
-		return err
-	}
-
-	for topic, e := range t.Topics {
-		if err := pe.putString(topic); err != nil {
-			return err
-		}
-		if err := pe.putArrayLength(len(e)); err != nil {
-			return err
-		}
-		for _, partitionError := range e {
-			if err := partitionError.encode(pe); err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
-	throttleTime, err := pd.getInt32()
-	if err != nil {
-		return err
-	}
-	t.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
-	n, err := pd.getArrayLength()
-	if err != nil {
-		return err
-	}
-
-	t.Topics = make(map[string][]*PartitionError)
-
-	for i := 0; i < n; i++ {
-		topic, err := pd.getString()
-		if err != nil {
-			return err
-		}
-
-		m, err := pd.getArrayLength()
-		if err != nil {
-			return err
-		}
-
-		t.Topics[topic] = make([]*PartitionError, m)
-
-		for j := 0; j < m; j++ {
-			t.Topics[topic][j] = new(PartitionError)
-			if err := t.Topics[topic][j].decode(pd, version); err != nil {
-				return err
-			}
-		}
-	}
-
-	return nil
-}
-
-func (a *TxnOffsetCommitResponse) key() int16 {
-	return 28
-}
-
-func (a *TxnOffsetCommitResponse) version() int16 {
-	return 0
-}
-
-func (a *TxnOffsetCommitResponse) headerVersion() int16 {
-	return 0
-}
-
-func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion {
-	return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go
deleted file mode 100644
index 1859d29..0000000
--- a/vendor/github.com/Shopify/sarama/utils.go
+++ /dev/null
@@ -1,234 +0,0 @@
-package sarama
-
-import (
-	"bufio"
-	"fmt"
-	"net"
-	"regexp"
-)
-
-type none struct{}
-
-// make []int32 sortable so we can sort partition numbers
-type int32Slice []int32
-
-func (slice int32Slice) Len() int {
-	return len(slice)
-}
-
-func (slice int32Slice) Less(i, j int) bool {
-	return slice[i] < slice[j]
-}
-
-func (slice int32Slice) Swap(i, j int) {
-	slice[i], slice[j] = slice[j], slice[i]
-}
-
-func dupInt32Slice(input []int32) []int32 {
-	ret := make([]int32, 0, len(input))
-	ret = append(ret, input...)
-	return ret
-}
-
-func withRecover(fn func()) {
-	defer func() {
-		handler := PanicHandler
-		if handler != nil {
-			if err := recover(); err != nil {
-				handler(err)
-			}
-		}
-	}()
-
-	fn()
-}
-
-func safeAsyncClose(b *Broker) {
-	tmp := b // local var prevents clobbering in goroutine
-	go withRecover(func() {
-		if connected, _ := tmp.Connected(); connected {
-			if err := tmp.Close(); err != nil {
-				Logger.Println("Error closing broker", tmp.ID(), ":", err)
-			}
-		}
-	})
-}
-
-// Encoder is a simple interface for any type that can be encoded as an array of bytes
-// in order to be sent as the key or value of a Kafka message. Length() is provided as an
-// optimization, and must return the same as len() on the result of Encode().
-type Encoder interface {
-	Encode() ([]byte, error)
-	Length() int
-}
-
-// make strings and byte slices encodable for convenience so they can be used as keys
-// and/or values in kafka messages
-
-// StringEncoder implements the Encoder interface for Go strings so that they can be used
-// as the Key or Value in a ProducerMessage.
-type StringEncoder string
-
-func (s StringEncoder) Encode() ([]byte, error) {
-	return []byte(s), nil
-}
-
-func (s StringEncoder) Length() int {
-	return len(s)
-}
-
-// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
-// as the Key or Value in a ProducerMessage.
-type ByteEncoder []byte
-
-func (b ByteEncoder) Encode() ([]byte, error) {
-	return b, nil
-}
-
-func (b ByteEncoder) Length() int {
-	return len(b)
-}
-
-// bufConn wraps a net.Conn with a buffer for reads to reduce the number of
-// reads that trigger syscalls.
-type bufConn struct {
-	net.Conn
-	buf *bufio.Reader
-}
-
-func newBufConn(conn net.Conn) *bufConn {
-	return &bufConn{
-		Conn: conn,
-		buf:  bufio.NewReader(conn),
-	}
-}
-
-func (bc *bufConn) Read(b []byte) (n int, err error) {
-	return bc.buf.Read(b)
-}
-
-// KafkaVersion instances represent versions of the upstream Kafka broker.
-type KafkaVersion struct {
-	// it's a struct rather than just typing the array directly to make it opaque and stop people
-	// generating their own arbitrary versions
-	version [4]uint
-}
-
-func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion {
-	return KafkaVersion{
-		version: [4]uint{major, minor, veryMinor, patch},
-	}
-}
-
-// IsAtLeast return true if and only if the version it is called on is
-// greater than or equal to the version passed in:
-//    V1.IsAtLeast(V2) // false
-//    V2.IsAtLeast(V1) // true
-func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
-	for i := range v.version {
-		if v.version[i] > other.version[i] {
-			return true
-		} else if v.version[i] < other.version[i] {
-			return false
-		}
-	}
-	return true
-}
-
-// Effective constants defining the supported kafka versions.
-var (
-	V0_8_2_0  = newKafkaVersion(0, 8, 2, 0)
-	V0_8_2_1  = newKafkaVersion(0, 8, 2, 1)
-	V0_8_2_2  = newKafkaVersion(0, 8, 2, 2)
-	V0_9_0_0  = newKafkaVersion(0, 9, 0, 0)
-	V0_9_0_1  = newKafkaVersion(0, 9, 0, 1)
-	V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
-	V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
-	V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
-	V0_10_1_1 = newKafkaVersion(0, 10, 1, 1)
-	V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
-	V0_10_2_1 = newKafkaVersion(0, 10, 2, 1)
-	V0_11_0_0 = newKafkaVersion(0, 11, 0, 0)
-	V0_11_0_1 = newKafkaVersion(0, 11, 0, 1)
-	V0_11_0_2 = newKafkaVersion(0, 11, 0, 2)
-	V1_0_0_0  = newKafkaVersion(1, 0, 0, 0)
-	V1_1_0_0  = newKafkaVersion(1, 1, 0, 0)
-	V1_1_1_0  = newKafkaVersion(1, 1, 1, 0)
-	V2_0_0_0  = newKafkaVersion(2, 0, 0, 0)
-	V2_0_1_0  = newKafkaVersion(2, 0, 1, 0)
-	V2_1_0_0  = newKafkaVersion(2, 1, 0, 0)
-	V2_2_0_0  = newKafkaVersion(2, 2, 0, 0)
-	V2_3_0_0  = newKafkaVersion(2, 3, 0, 0)
-	V2_4_0_0  = newKafkaVersion(2, 4, 0, 0)
-	V2_5_0_0  = newKafkaVersion(2, 5, 0, 0)
-	V2_6_0_0  = newKafkaVersion(2, 6, 0, 0)
-	V2_7_0_0  = newKafkaVersion(2, 7, 0, 0)
-	V2_8_0_0  = newKafkaVersion(2, 8, 0, 0)
-
-	SupportedVersions = []KafkaVersion{
-		V0_8_2_0,
-		V0_8_2_1,
-		V0_8_2_2,
-		V0_9_0_0,
-		V0_9_0_1,
-		V0_10_0_0,
-		V0_10_0_1,
-		V0_10_1_0,
-		V0_10_1_1,
-		V0_10_2_0,
-		V0_10_2_1,
-		V0_11_0_0,
-		V0_11_0_1,
-		V0_11_0_2,
-		V1_0_0_0,
-		V1_1_0_0,
-		V1_1_1_0,
-		V2_0_0_0,
-		V2_0_1_0,
-		V2_1_0_0,
-		V2_2_0_0,
-		V2_3_0_0,
-		V2_4_0_0,
-		V2_5_0_0,
-		V2_6_0_0,
-		V2_7_0_0,
-		V2_8_0_0,
-	}
-	MinVersion     = V0_8_2_0
-	MaxVersion     = V2_8_0_0
-	DefaultVersion = V1_0_0_0
-)
-
-// ParseKafkaVersion parses and returns kafka version or error from a string
-func ParseKafkaVersion(s string) (KafkaVersion, error) {
-	if len(s) < 5 {
-		return DefaultVersion, fmt.Errorf("invalid version `%s`", s)
-	}
-	var major, minor, veryMinor, patch uint
-	var err error
-	if s[0] == '0' {
-		err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch})
-	} else {
-		err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor})
-	}
-	if err != nil {
-		return DefaultVersion, err
-	}
-	return newKafkaVersion(major, minor, veryMinor, patch), nil
-}
-
-func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error {
-	if !regexp.MustCompile(pattern).MatchString(s) {
-		return fmt.Errorf("invalid version `%s`", s)
-	}
-	_, err := fmt.Sscanf(s, format, v[0], v[1], v[2])
-	return err
-}
-
-func (v KafkaVersion) String() string {
-	if v.version[0] == 0 {
-		return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3])
-	}
-
-	return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2])
-}
diff --git a/vendor/github.com/Shopify/sarama/zstd.go b/vendor/github.com/Shopify/sarama/zstd.go
deleted file mode 100644
index e23bfc4..0000000
--- a/vendor/github.com/Shopify/sarama/zstd.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package sarama
-
-import (
-	"github.com/klauspost/compress/zstd"
-)
-
-var (
-	zstdDec, _ = zstd.NewReader(nil)
-	zstdEnc, _ = zstd.NewWriter(nil, zstd.WithZeroFrames(true))
-)
-
-func zstdDecompress(dst, src []byte) ([]byte, error) {
-	return zstdDec.DecodeAll(src, dst)
-}
-
-func zstdCompress(dst, src []byte) ([]byte, error) {
-	return zstdEnc.EncodeAll(src, dst), nil
-}
diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE
new file mode 100644
index 0000000..339177b
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/LICENSE
@@ -0,0 +1,20 @@
+Copyright (C) 2013 Blake Mizerany
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
new file mode 100644
index 0000000..1602287
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt
@@ -0,0 +1,2388 @@
+8
+5
+26
+12
+5
+235
+13
+6
+28
+30
+3
+3
+3
+3
+5
+2
+33
+7
+2
+4
+7
+12
+14
+5
+8
+3
+10
+4
+5
+3
+6
+6
+209
+20
+3
+10
+14
+3
+4
+6
+8
+5
+11
+7
+3
+2
+3
+3
+212
+5
+222
+4
+10
+10
+5
+6
+3
+8
+3
+10
+254
+220
+2
+3
+5
+24
+5
+4
+222
+7
+3
+3
+223
+8
+15
+12
+14
+14
+3
+2
+2
+3
+13
+3
+11
+4
+4
+6
+5
+7
+13
+5
+3
+5
+2
+5
+3
+5
+2
+7
+15
+17
+14
+3
+6
+6
+3
+17
+5
+4
+7
+6
+4
+4
+8
+6
+8
+3
+9
+3
+6
+3
+4
+5
+3
+3
+660
+4
+6
+10
+3
+6
+3
+2
+5
+13
+2
+4
+4
+10
+4
+8
+4
+3
+7
+9
+9
+3
+10
+37
+3
+13
+4
+12
+3
+6
+10
+8
+5
+21
+2
+3
+8
+3
+2
+3
+3
+4
+12
+2
+4
+8
+8
+4
+3
+2
+20
+1
+6
+32
+2
+11
+6
+18
+3
+8
+11
+3
+212
+3
+4
+2
+6
+7
+12
+11
+3
+2
+16
+10
+6
+4
+6
+3
+2
+7
+3
+2
+2
+2
+2
+5
+6
+4
+3
+10
+3
+4
+6
+5
+3
+4
+4
+5
+6
+4
+3
+4
+4
+5
+7
+5
+5
+3
+2
+7
+2
+4
+12
+4
+5
+6
+2
+4
+4
+8
+4
+15
+13
+7
+16
+5
+3
+23
+5
+5
+7
+3
+2
+9
+8
+7
+5
+8
+11
+4
+10
+76
+4
+47
+4
+3
+2
+7
+4
+2
+3
+37
+10
+4
+2
+20
+5
+4
+4
+10
+10
+4
+3
+7
+23
+240
+7
+13
+5
+5
+3
+3
+2
+5
+4
+2
+8
+7
+19
+2
+23
+8
+7
+2
+5
+3
+8
+3
+8
+13
+5
+5
+5
+2
+3
+23
+4
+9
+8
+4
+3
+3
+5
+220
+2
+3
+4
+6
+14
+3
+53
+6
+2
+5
+18
+6
+3
+219
+6
+5
+2
+5
+3
+6
+5
+15
+4
+3
+17
+3
+2
+4
+7
+2
+3
+3
+4
+4
+3
+2
+664
+6
+3
+23
+5
+5
+16
+5
+8
+2
+4
+2
+24
+12
+3
+2
+3
+5
+8
+3
+5
+4
+3
+14
+3
+5
+8
+2
+3
+7
+9
+4
+2
+3
+6
+8
+4
+3
+4
+6
+5
+3
+3
+6
+3
+19
+4
+4
+6
+3
+6
+3
+5
+22
+5
+4
+4
+3
+8
+11
+4
+9
+7
+6
+13
+4
+4
+4
+6
+17
+9
+3
+3
+3
+4
+3
+221
+5
+11
+3
+4
+2
+12
+6
+3
+5
+7
+5
+7
+4
+9
+7
+14
+37
+19
+217
+16
+3
+5
+2
+2
+7
+19
+7
+6
+7
+4
+24
+5
+11
+4
+7
+7
+9
+13
+3
+4
+3
+6
+28
+4
+4
+5
+5
+2
+5
+6
+4
+4
+6
+10
+5
+4
+3
+2
+3
+3
+6
+5
+5
+4
+3
+2
+3
+7
+4
+6
+18
+16
+8
+16
+4
+5
+8
+6
+9
+13
+1545
+6
+215
+6
+5
+6
+3
+45
+31
+5
+2
+2
+4
+3
+3
+2
+5
+4
+3
+5
+7
+7
+4
+5
+8
+5
+4
+749
+2
+31
+9
+11
+2
+11
+5
+4
+4
+7
+9
+11
+4
+5
+4
+7
+3
+4
+6
+2
+15
+3
+4
+3
+4
+3
+5
+2
+13
+5
+5
+3
+3
+23
+4
+4
+5
+7
+4
+13
+2
+4
+3
+4
+2
+6
+2
+7
+3
+5
+5
+3
+29
+5
+4
+4
+3
+10
+2
+3
+79
+16
+6
+6
+7
+7
+3
+5
+5
+7
+4
+3
+7
+9
+5
+6
+5
+9
+6
+3
+6
+4
+17
+2
+10
+9
+3
+6
+2
+3
+21
+22
+5
+11
+4
+2
+17
+2
+224
+2
+14
+3
+4
+4
+2
+4
+4
+4
+4
+5
+3
+4
+4
+10
+2
+6
+3
+3
+5
+7
+2
+7
+5
+6
+3
+218
+2
+2
+5
+2
+6
+3
+5
+222
+14
+6
+33
+3
+2
+5
+3
+3
+3
+9
+5
+3
+3
+2
+7
+4
+3
+4
+3
+5
+6
+5
+26
+4
+13
+9
+7
+3
+221
+3
+3
+4
+4
+4
+4
+2
+18
+5
+3
+7
+9
+6
+8
+3
+10
+3
+11
+9
+5
+4
+17
+5
+5
+6
+6
+3
+2
+4
+12
+17
+6
+7
+218
+4
+2
+4
+10
+3
+5
+15
+3
+9
+4
+3
+3
+6
+29
+3
+3
+4
+5
+5
+3
+8
+5
+6
+6
+7
+5
+3
+5
+3
+29
+2
+31
+5
+15
+24
+16
+5
+207
+4
+3
+3
+2
+15
+4
+4
+13
+5
+5
+4
+6
+10
+2
+7
+8
+4
+6
+20
+5
+3
+4
+3
+12
+12
+5
+17
+7
+3
+3
+3
+6
+10
+3
+5
+25
+80
+4
+9
+3
+2
+11
+3
+3
+2
+3
+8
+7
+5
+5
+19
+5
+3
+3
+12
+11
+2
+6
+5
+5
+5
+3
+3
+3
+4
+209
+14
+3
+2
+5
+19
+4
+4
+3
+4
+14
+5
+6
+4
+13
+9
+7
+4
+7
+10
+2
+9
+5
+7
+2
+8
+4
+6
+5
+5
+222
+8
+7
+12
+5
+216
+3
+4
+4
+6
+3
+14
+8
+7
+13
+4
+3
+3
+3
+3
+17
+5
+4
+3
+33
+6
+6
+33
+7
+5
+3
+8
+7
+5
+2
+9
+4
+2
+233
+24
+7
+4
+8
+10
+3
+4
+15
+2
+16
+3
+3
+13
+12
+7
+5
+4
+207
+4
+2
+4
+27
+15
+2
+5
+2
+25
+6
+5
+5
+6
+13
+6
+18
+6
+4
+12
+225
+10
+7
+5
+2
+2
+11
+4
+14
+21
+8
+10
+3
+5
+4
+232
+2
+5
+5
+3
+7
+17
+11
+6
+6
+23
+4
+6
+3
+5
+4
+2
+17
+3
+6
+5
+8
+3
+2
+2
+14
+9
+4
+4
+2
+5
+5
+3
+7
+6
+12
+6
+10
+3
+6
+2
+2
+19
+5
+4
+4
+9
+2
+4
+13
+3
+5
+6
+3
+6
+5
+4
+9
+6
+3
+5
+7
+3
+6
+6
+4
+3
+10
+6
+3
+221
+3
+5
+3
+6
+4
+8
+5
+3
+6
+4
+4
+2
+54
+5
+6
+11
+3
+3
+4
+4
+4
+3
+7
+3
+11
+11
+7
+10
+6
+13
+223
+213
+15
+231
+7
+3
+7
+228
+2
+3
+4
+4
+5
+6
+7
+4
+13
+3
+4
+5
+3
+6
+4
+6
+7
+2
+4
+3
+4
+3
+3
+6
+3
+7
+3
+5
+18
+5
+6
+8
+10
+3
+3
+3
+2
+4
+2
+4
+4
+5
+6
+6
+4
+10
+13
+3
+12
+5
+12
+16
+8
+4
+19
+11
+2
+4
+5
+6
+8
+5
+6
+4
+18
+10
+4
+2
+216
+6
+6
+6
+2
+4
+12
+8
+3
+11
+5
+6
+14
+5
+3
+13
+4
+5
+4
+5
+3
+28
+6
+3
+7
+219
+3
+9
+7
+3
+10
+6
+3
+4
+19
+5
+7
+11
+6
+15
+19
+4
+13
+11
+3
+7
+5
+10
+2
+8
+11
+2
+6
+4
+6
+24
+6
+3
+3
+3
+3
+6
+18
+4
+11
+4
+2
+5
+10
+8
+3
+9
+5
+3
+4
+5
+6
+2
+5
+7
+4
+4
+14
+6
+4
+4
+5
+5
+7
+2
+4
+3
+7
+3
+3
+6
+4
+5
+4
+4
+4
+3
+3
+3
+3
+8
+14
+2
+3
+5
+3
+2
+4
+5
+3
+7
+3
+3
+18
+3
+4
+4
+5
+7
+3
+3
+3
+13
+5
+4
+8
+211
+5
+5
+3
+5
+2
+5
+4
+2
+655
+6
+3
+5
+11
+2
+5
+3
+12
+9
+15
+11
+5
+12
+217
+2
+6
+17
+3
+3
+207
+5
+5
+4
+5
+9
+3
+2
+8
+5
+4
+3
+2
+5
+12
+4
+14
+5
+4
+2
+13
+5
+8
+4
+225
+4
+3
+4
+5
+4
+3
+3
+6
+23
+9
+2
+6
+7
+233
+4
+4
+6
+18
+3
+4
+6
+3
+4
+4
+2
+3
+7
+4
+13
+227
+4
+3
+5
+4
+2
+12
+9
+17
+3
+7
+14
+6
+4
+5
+21
+4
+8
+9
+2
+9
+25
+16
+3
+6
+4
+7
+8
+5
+2
+3
+5
+4
+3
+3
+5
+3
+3
+3
+2
+3
+19
+2
+4
+3
+4
+2
+3
+4
+4
+2
+4
+3
+3
+3
+2
+6
+3
+17
+5
+6
+4
+3
+13
+5
+3
+3
+3
+4
+9
+4
+2
+14
+12
+4
+5
+24
+4
+3
+37
+12
+11
+21
+3
+4
+3
+13
+4
+2
+3
+15
+4
+11
+4
+4
+3
+8
+3
+4
+4
+12
+8
+5
+3
+3
+4
+2
+220
+3
+5
+223
+3
+3
+3
+10
+3
+15
+4
+241
+9
+7
+3
+6
+6
+23
+4
+13
+7
+3
+4
+7
+4
+9
+3
+3
+4
+10
+5
+5
+1
+5
+24
+2
+4
+5
+5
+6
+14
+3
+8
+2
+3
+5
+13
+13
+3
+5
+2
+3
+15
+3
+4
+2
+10
+4
+4
+4
+5
+5
+3
+5
+3
+4
+7
+4
+27
+3
+6
+4
+15
+3
+5
+6
+6
+5
+4
+8
+3
+9
+2
+6
+3
+4
+3
+7
+4
+18
+3
+11
+3
+3
+8
+9
+7
+24
+3
+219
+7
+10
+4
+5
+9
+12
+2
+5
+4
+4
+4
+3
+3
+19
+5
+8
+16
+8
+6
+22
+3
+23
+3
+242
+9
+4
+3
+3
+5
+7
+3
+3
+5
+8
+3
+7
+5
+14
+8
+10
+3
+4
+3
+7
+4
+6
+7
+4
+10
+4
+3
+11
+3
+7
+10
+3
+13
+6
+8
+12
+10
+5
+7
+9
+3
+4
+7
+7
+10
+8
+30
+9
+19
+4
+3
+19
+15
+4
+13
+3
+215
+223
+4
+7
+4
+8
+17
+16
+3
+7
+6
+5
+5
+4
+12
+3
+7
+4
+4
+13
+4
+5
+2
+5
+6
+5
+6
+6
+7
+10
+18
+23
+9
+3
+3
+6
+5
+2
+4
+2
+7
+3
+3
+2
+5
+5
+14
+10
+224
+6
+3
+4
+3
+7
+5
+9
+3
+6
+4
+2
+5
+11
+4
+3
+3
+2
+8
+4
+7
+4
+10
+7
+3
+3
+18
+18
+17
+3
+3
+3
+4
+5
+3
+3
+4
+12
+7
+3
+11
+13
+5
+4
+7
+13
+5
+4
+11
+3
+12
+3
+6
+4
+4
+21
+4
+6
+9
+5
+3
+10
+8
+4
+6
+4
+4
+6
+5
+4
+8
+6
+4
+6
+4
+4
+5
+9
+6
+3
+4
+2
+9
+3
+18
+2
+4
+3
+13
+3
+6
+6
+8
+7
+9
+3
+2
+16
+3
+4
+6
+3
+2
+33
+22
+14
+4
+9
+12
+4
+5
+6
+3
+23
+9
+4
+3
+5
+5
+3
+4
+5
+3
+5
+3
+10
+4
+5
+5
+8
+4
+4
+6
+8
+5
+4
+3
+4
+6
+3
+3
+3
+5
+9
+12
+6
+5
+9
+3
+5
+3
+2
+2
+2
+18
+3
+2
+21
+2
+5
+4
+6
+4
+5
+10
+3
+9
+3
+2
+10
+7
+3
+6
+6
+4
+4
+8
+12
+7
+3
+7
+3
+3
+9
+3
+4
+5
+4
+4
+5
+5
+10
+15
+4
+4
+14
+6
+227
+3
+14
+5
+216
+22
+5
+4
+2
+2
+6
+3
+4
+2
+9
+9
+4
+3
+28
+13
+11
+4
+5
+3
+3
+2
+3
+3
+5
+3
+4
+3
+5
+23
+26
+3
+4
+5
+6
+4
+6
+3
+5
+5
+3
+4
+3
+2
+2
+2
+7
+14
+3
+6
+7
+17
+2
+2
+15
+14
+16
+4
+6
+7
+13
+6
+4
+5
+6
+16
+3
+3
+28
+3
+6
+15
+3
+9
+2
+4
+6
+3
+3
+22
+4
+12
+6
+7
+2
+5
+4
+10
+3
+16
+6
+9
+2
+5
+12
+7
+5
+5
+5
+5
+2
+11
+9
+17
+4
+3
+11
+7
+3
+5
+15
+4
+3
+4
+211
+8
+7
+5
+4
+7
+6
+7
+6
+3
+6
+5
+6
+5
+3
+4
+4
+26
+4
+6
+10
+4
+4
+3
+2
+3
+3
+4
+5
+9
+3
+9
+4
+4
+5
+5
+8
+2
+4
+2
+3
+8
+4
+11
+19
+5
+8
+6
+3
+5
+6
+12
+3
+2
+4
+16
+12
+3
+4
+4
+8
+6
+5
+6
+6
+219
+8
+222
+6
+16
+3
+13
+19
+5
+4
+3
+11
+6
+10
+4
+7
+7
+12
+5
+3
+3
+5
+6
+10
+3
+8
+2
+5
+4
+7
+2
+4
+4
+2
+12
+9
+6
+4
+2
+40
+2
+4
+10
+4
+223
+4
+2
+20
+6
+7
+24
+5
+4
+5
+2
+20
+16
+6
+5
+13
+2
+3
+3
+19
+3
+2
+4
+5
+6
+7
+11
+12
+5
+6
+7
+7
+3
+5
+3
+5
+3
+14
+3
+4
+4
+2
+11
+1
+7
+3
+9
+6
+11
+12
+5
+8
+6
+221
+4
+2
+12
+4
+3
+15
+4
+5
+226
+7
+218
+7
+5
+4
+5
+18
+4
+5
+9
+4
+4
+2
+9
+18
+18
+9
+5
+6
+6
+3
+3
+7
+3
+5
+4
+4
+4
+12
+3
+6
+31
+5
+4
+7
+3
+6
+5
+6
+5
+11
+2
+2
+11
+11
+6
+7
+5
+8
+7
+10
+5
+23
+7
+4
+3
+5
+34
+2
+5
+23
+7
+3
+6
+8
+4
+4
+4
+2
+5
+3
+8
+5
+4
+8
+25
+2
+3
+17
+8
+3
+4
+8
+7
+3
+15
+6
+5
+7
+21
+9
+5
+6
+6
+5
+3
+2
+3
+10
+3
+6
+3
+14
+7
+4
+4
+8
+7
+8
+2
+6
+12
+4
+213
+6
+5
+21
+8
+2
+5
+23
+3
+11
+2
+3
+6
+25
+2
+3
+6
+7
+6
+6
+4
+4
+6
+3
+17
+9
+7
+6
+4
+3
+10
+7
+2
+3
+3
+3
+11
+8
+3
+7
+6
+4
+14
+36
+3
+4
+3
+3
+22
+13
+21
+4
+2
+7
+4
+4
+17
+15
+3
+7
+11
+2
+4
+7
+6
+209
+6
+3
+2
+2
+24
+4
+9
+4
+3
+3
+3
+29
+2
+2
+4
+3
+3
+5
+4
+6
+3
+3
+2
+4
diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go
new file mode 100644
index 0000000..d7d14f8
--- /dev/null
+++ b/vendor/github.com/beorn7/perks/quantile/stream.go
@@ -0,0 +1,316 @@
+// Package quantile computes approximate quantiles over an unbounded data
+// stream within low memory and CPU bounds.
+//
+// A small amount of accuracy is traded to achieve the above properties.
+//
+// Multiple streams can be merged before calling Query to generate a single set
+// of results. This is meaningful when the streams represent the same type of
+// data. See Merge and Samples.
+//
+// For more detailed information about the algorithm used, see:
+//
+// Effective Computation of Biased Quantiles over Data Streams
+//
+// http://www.cs.rutgers.edu/~muthu/bquant.pdf
+package quantile
+
+import (
+	"math"
+	"sort"
+)
+
+// Sample holds an observed value and meta information for compression. JSON
+// tags have been added for convenience.
+type Sample struct {
+	Value float64 `json:",string"`
+	Width float64 `json:",string"`
+	Delta float64 `json:",string"`
+}
+
+// Samples represents a slice of samples. It implements sort.Interface.
+type Samples []Sample
+
+func (a Samples) Len() int           { return len(a) }
+func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
+func (a Samples) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+
+type invariant func(s *stream, r float64) float64
+
+// NewLowBiased returns an initialized Stream for low-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the lower ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewLowBiased(epsilon float64) *Stream {
+	ƒ := func(s *stream, r float64) float64 {
+		return 2 * epsilon * r
+	}
+	return newStream(ƒ)
+}
+
+// NewHighBiased returns an initialized Stream for high-biased quantiles
+// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
+// error guarantees can still be given even for the higher ranks of the data
+// distribution.
+//
+// The provided epsilon is a relative error, i.e. the true quantile of a value
+// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
+// properties.
+func NewHighBiased(epsilon float64) *Stream {
+	ƒ := func(s *stream, r float64) float64 {
+		return 2 * epsilon * (s.n - r)
+	}
+	return newStream(ƒ)
+}
+
+// NewTargeted returns an initialized Stream concerned with a particular set of
+// quantile values that are supplied a priori. Knowing these a priori reduces
+// space and computation time. The targets map maps the desired quantiles to
+// their absolute errors, i.e. the true quantile of a value returned by a query
+// is guaranteed to be within (Quantile±Epsilon).
+//
+// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
+func NewTargeted(targetMap map[float64]float64) *Stream {
+	// Convert map to slice to avoid slow iterations on a map.
+	// ƒ is called on the hot path, so converting the map to a slice
+	// beforehand results in significant CPU savings.
+	targets := targetMapToSlice(targetMap)
+
+	ƒ := func(s *stream, r float64) float64 {
+		var m = math.MaxFloat64
+		var f float64
+		for _, t := range targets {
+			if t.quantile*s.n <= r {
+				f = (2 * t.epsilon * r) / t.quantile
+			} else {
+				f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile)
+			}
+			if f < m {
+				m = f
+			}
+		}
+		return m
+	}
+	return newStream(ƒ)
+}
+
+type target struct {
+	quantile float64
+	epsilon  float64
+}
+
+func targetMapToSlice(targetMap map[float64]float64) []target {
+	targets := make([]target, 0, len(targetMap))
+
+	for quantile, epsilon := range targetMap {
+		t := target{
+			quantile: quantile,
+			epsilon:  epsilon,
+		}
+		targets = append(targets, t)
+	}
+
+	return targets
+}
+
+// Stream computes quantiles for a stream of float64s. It is not thread-safe by
+// design. Take care when using across multiple goroutines.
+type Stream struct {
+	*stream
+	b      Samples
+	sorted bool
+}
+
+func newStream(ƒ invariant) *Stream {
+	x := &stream{ƒ: ƒ}
+	return &Stream{x, make(Samples, 0, 500), true}
+}
+
+// Insert inserts v into the stream.
+func (s *Stream) Insert(v float64) {
+	s.insert(Sample{Value: v, Width: 1})
+}
+
+func (s *Stream) insert(sample Sample) {
+	s.b = append(s.b, sample)
+	s.sorted = false
+	if len(s.b) == cap(s.b) {
+		s.flush()
+	}
+}
+
+// Query returns the computed qth percentiles value. If s was created with
+// NewTargeted, and q is not in the set of quantiles provided a priori, Query
+// will return an unspecified result.
+func (s *Stream) Query(q float64) float64 {
+	if !s.flushed() {
+		// Fast path when there hasn't been enough data for a flush;
+		// this also yields better accuracy for small sets of data.
+		l := len(s.b)
+		if l == 0 {
+			return 0
+		}
+		i := int(math.Ceil(float64(l) * q))
+		if i > 0 {
+			i -= 1
+		}
+		s.maybeSort()
+		return s.b[i].Value
+	}
+	s.flush()
+	return s.stream.query(q)
+}
+
+// Merge merges samples into the underlying streams samples. This is handy when
+// merging multiple streams from separate threads, database shards, etc.
+//
+// ATTENTION: This method is broken and does not yield correct results. The
+// underlying algorithm is not capable of merging streams correctly.
+func (s *Stream) Merge(samples Samples) {
+	sort.Sort(samples)
+	s.stream.merge(samples)
+}
+
+// Reset reinitializes and clears the list reusing the samples buffer memory.
+func (s *Stream) Reset() {
+	s.stream.reset()
+	s.b = s.b[:0]
+}
+
+// Samples returns stream samples held by s.
+func (s *Stream) Samples() Samples {
+	if !s.flushed() {
+		return s.b
+	}
+	s.flush()
+	return s.stream.samples()
+}
+
+// Count returns the total number of samples observed in the stream
+// since initialization.
+func (s *Stream) Count() int {
+	return len(s.b) + s.stream.count()
+}
+
+func (s *Stream) flush() {
+	s.maybeSort()
+	s.stream.merge(s.b)
+	s.b = s.b[:0]
+}
+
+func (s *Stream) maybeSort() {
+	if !s.sorted {
+		s.sorted = true
+		sort.Sort(s.b)
+	}
+}
+
+func (s *Stream) flushed() bool {
+	return len(s.stream.l) > 0
+}
+
+type stream struct {
+	n float64
+	l []Sample
+	ƒ invariant
+}
+
+func (s *stream) reset() {
+	s.l = s.l[:0]
+	s.n = 0
+}
+
+func (s *stream) insert(v float64) {
+	s.merge(Samples{{v, 1, 0}})
+}
+
+func (s *stream) merge(samples Samples) {
+	// TODO(beorn7): This tries to merge not only individual samples, but
+	// whole summaries. The paper doesn't mention merging summaries at
+	// all. Unittests show that the merging is inaccurate. Find out how to
+	// do merges properly.
+	var r float64
+	i := 0
+	for _, sample := range samples {
+		for ; i < len(s.l); i++ {
+			c := s.l[i]
+			if c.Value > sample.Value {
+				// Insert at position i.
+				s.l = append(s.l, Sample{})
+				copy(s.l[i+1:], s.l[i:])
+				s.l[i] = Sample{
+					sample.Value,
+					sample.Width,
+					math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
+					// TODO(beorn7): How to calculate delta correctly?
+				}
+				i++
+				goto inserted
+			}
+			r += c.Width
+		}
+		s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
+		i++
+	inserted:
+		s.n += sample.Width
+		r += sample.Width
+	}
+	s.compress()
+}
+
+func (s *stream) count() int {
+	return int(s.n)
+}
+
+func (s *stream) query(q float64) float64 {
+	t := math.Ceil(q * s.n)
+	t += math.Ceil(s.ƒ(s, t) / 2)
+	p := s.l[0]
+	var r float64
+	for _, c := range s.l[1:] {
+		r += p.Width
+		if r+c.Width+c.Delta > t {
+			return p.Value
+		}
+		p = c
+	}
+	return p.Value
+}
+
+func (s *stream) compress() {
+	if len(s.l) < 2 {
+		return
+	}
+	x := s.l[len(s.l)-1]
+	xi := len(s.l) - 1
+	r := s.n - 1 - x.Width
+
+	for i := len(s.l) - 2; i >= 0; i-- {
+		c := s.l[i]
+		if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
+			x.Width += c.Width
+			s.l[xi] = x
+			// Remove element at i.
+			copy(s.l[i:], s.l[i+1:])
+			s.l = s.l[:len(s.l)-1]
+			xi -= 1
+		} else {
+			x = c
+			xi = i
+		}
+		r -= c.Width
+	}
+}
+
+func (s *stream) samples() Samples {
+	samples := make(Samples, len(s.l))
+	copy(samples, s.l)
+	return samples
+}
diff --git a/vendor/github.com/bsm/sarama-cluster/.gitignore b/vendor/github.com/bsm/sarama-cluster/.gitignore
deleted file mode 100644
index 88113c5..0000000
--- a/vendor/github.com/bsm/sarama-cluster/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-*.log
-*.pid
-kafka*/
-vendor/
diff --git a/vendor/github.com/bsm/sarama-cluster/.travis.yml b/vendor/github.com/bsm/sarama-cluster/.travis.yml
deleted file mode 100644
index 07c7c10..0000000
--- a/vendor/github.com/bsm/sarama-cluster/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-sudo: false
-language: go
-go:
-  - 1.10.x
-  - 1.9.x
-install:
-  - go get -u github.com/golang/dep/cmd/dep
-  - dep ensure
-env:
-  - SCALA_VERSION=2.12 KAFKA_VERSION=0.11.0.1
-  - SCALA_VERSION=2.12 KAFKA_VERSION=1.0.1
-  - SCALA_VERSION=2.12 KAFKA_VERSION=1.1.0
-script:
-  - make default test-race
-addons:
-  apt:
-    packages:
-      - oracle-java8-set-default
diff --git a/vendor/github.com/bsm/sarama-cluster/Gopkg.lock b/vendor/github.com/bsm/sarama-cluster/Gopkg.lock
deleted file mode 100644
index e1bc110..0000000
--- a/vendor/github.com/bsm/sarama-cluster/Gopkg.lock
+++ /dev/null
@@ -1,151 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
-  name = "github.com/Shopify/sarama"
-  packages = ["."]
-  revision = "35324cf48e33d8260e1c7c18854465a904ade249"
-  version = "v1.17.0"
-
-[[projects]]
-  name = "github.com/davecgh/go-spew"
-  packages = ["spew"]
-  revision = "346938d642f2ec3594ed81d874461961cd0faa76"
-  version = "v1.1.0"
-
-[[projects]]
-  name = "github.com/eapache/go-resiliency"
-  packages = ["breaker"]
-  revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce"
-  version = "v1.1.0"
-
-[[projects]]
-  branch = "master"
-  name = "github.com/eapache/go-xerial-snappy"
-  packages = ["."]
-  revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c"
-
-[[projects]]
-  name = "github.com/eapache/queue"
-  packages = ["."]
-  revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98"
-  version = "v1.1.0"
-
-[[projects]]
-  branch = "master"
-  name = "github.com/golang/snappy"
-  packages = ["."]
-  revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
-
-[[projects]]
-  name = "github.com/onsi/ginkgo"
-  packages = [
-    ".",
-    "config",
-    "extensions/table",
-    "internal/codelocation",
-    "internal/containernode",
-    "internal/failer",
-    "internal/leafnodes",
-    "internal/remote",
-    "internal/spec",
-    "internal/spec_iterator",
-    "internal/specrunner",
-    "internal/suite",
-    "internal/testingtproxy",
-    "internal/writer",
-    "reporters",
-    "reporters/stenographer",
-    "reporters/stenographer/support/go-colorable",
-    "reporters/stenographer/support/go-isatty",
-    "types"
-  ]
-  revision = "fa5fabab2a1bfbd924faf4c067d07ae414e2aedf"
-  version = "v1.5.0"
-
-[[projects]]
-  name = "github.com/onsi/gomega"
-  packages = [
-    ".",
-    "format",
-    "internal/assertion",
-    "internal/asyncassertion",
-    "internal/oraclematcher",
-    "internal/testingtsupport",
-    "matchers",
-    "matchers/support/goraph/bipartitegraph",
-    "matchers/support/goraph/edge",
-    "matchers/support/goraph/node",
-    "matchers/support/goraph/util",
-    "types"
-  ]
-  revision = "62bff4df71bdbc266561a0caee19f0594b17c240"
-  version = "v1.4.0"
-
-[[projects]]
-  name = "github.com/pierrec/lz4"
-  packages = [
-    ".",
-    "internal/xxh32"
-  ]
-  revision = "6b9367c9ff401dbc54fabce3fb8d972e799b702d"
-  version = "v2.0.2"
-
-[[projects]]
-  branch = "master"
-  name = "github.com/rcrowley/go-metrics"
-  packages = ["."]
-  revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
-
-[[projects]]
-  branch = "master"
-  name = "golang.org/x/net"
-  packages = [
-    "html",
-    "html/atom",
-    "html/charset"
-  ]
-  revision = "afe8f62b1d6bbd81f31868121a50b06d8188e1f9"
-
-[[projects]]
-  branch = "master"
-  name = "golang.org/x/sys"
-  packages = ["unix"]
-  revision = "63fc586f45fe72d95d5240a5d5eb95e6503907d3"
-
-[[projects]]
-  name = "golang.org/x/text"
-  packages = [
-    "encoding",
-    "encoding/charmap",
-    "encoding/htmlindex",
-    "encoding/internal",
-    "encoding/internal/identifier",
-    "encoding/japanese",
-    "encoding/korean",
-    "encoding/simplifiedchinese",
-    "encoding/traditionalchinese",
-    "encoding/unicode",
-    "internal/gen",
-    "internal/tag",
-    "internal/utf8internal",
-    "language",
-    "runes",
-    "transform",
-    "unicode/cldr"
-  ]
-  revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
-  version = "v0.3.0"
-
-[[projects]]
-  name = "gopkg.in/yaml.v2"
-  packages = ["."]
-  revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
-  version = "v2.2.1"
-
-[solve-meta]
-  analyzer-name = "dep"
-  analyzer-version = 1
-  inputs-digest = "2fa33a2d1ae87e0905ef09332bb4b3fda29179f6bcd48fd3b94070774b9e458b"
-  solver-name = "gps-cdcl"
-  solver-version = 1
diff --git a/vendor/github.com/bsm/sarama-cluster/Gopkg.toml b/vendor/github.com/bsm/sarama-cluster/Gopkg.toml
deleted file mode 100644
index 1eecfef..0000000
--- a/vendor/github.com/bsm/sarama-cluster/Gopkg.toml
+++ /dev/null
@@ -1,26 +0,0 @@
-
-# Gopkg.toml example
-#
-# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
-# for detailed Gopkg.toml documentation.
-#
-# required = ["github.com/user/thing/cmd/thing"]
-# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
-#
-# [[constraint]]
-#   name = "github.com/user/project"
-#   version = "1.0.0"
-#
-# [[constraint]]
-#   name = "github.com/user/project2"
-#   branch = "dev"
-#   source = "github.com/myfork/project2"
-#
-# [[override]]
-#  name = "github.com/x/y"
-#  version = "2.4.0"
-
-
-[[constraint]]
-  name = "github.com/Shopify/sarama"
-  version = "^1.14.0"
diff --git a/vendor/github.com/bsm/sarama-cluster/LICENSE b/vendor/github.com/bsm/sarama-cluster/LICENSE
deleted file mode 100644
index 127751c..0000000
--- a/vendor/github.com/bsm/sarama-cluster/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-(The MIT License)
-
-Copyright (c) 2017 Black Square Media Ltd
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/bsm/sarama-cluster/Makefile b/vendor/github.com/bsm/sarama-cluster/Makefile
deleted file mode 100644
index 25c5bc2..0000000
--- a/vendor/github.com/bsm/sarama-cluster/Makefile
+++ /dev/null
@@ -1,35 +0,0 @@
-SCALA_VERSION?= 2.12
-KAFKA_VERSION?= 1.1.0
-KAFKA_DIR= kafka_$(SCALA_VERSION)-$(KAFKA_VERSION)
-KAFKA_SRC= https://archive.apache.org/dist/kafka/$(KAFKA_VERSION)/$(KAFKA_DIR).tgz
-KAFKA_ROOT= testdata/$(KAFKA_DIR)
-PKG=$(shell go list ./... | grep -v vendor)
-
-default: vet test
-
-vet:
-	go vet $(PKG)
-
-test: testdeps
-	KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60
-
-test-verbose: testdeps
-	KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60 -v
-
-test-race: testdeps
-	KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60 -v -race
-
-testdeps: $(KAFKA_ROOT)
-
-doc: README.md
-
-.PHONY: test testdeps vet doc
-
-# ---------------------------------------------------------------------
-
-$(KAFKA_ROOT):
-	@mkdir -p $(dir $@)
-	cd $(dir $@) && curl -sSL $(KAFKA_SRC) | tar xz
-
-README.md: README.md.tpl $(wildcard *.go)
-	becca -package $(subst $(GOPATH)/src/,,$(PWD))
diff --git a/vendor/github.com/bsm/sarama-cluster/README.md b/vendor/github.com/bsm/sarama-cluster/README.md
deleted file mode 100644
index ebcd755..0000000
--- a/vendor/github.com/bsm/sarama-cluster/README.md
+++ /dev/null
@@ -1,151 +0,0 @@
-# Sarama Cluster
-
-[![GoDoc](https://godoc.org/github.com/bsm/sarama-cluster?status.svg)](https://godoc.org/github.com/bsm/sarama-cluster)
-[![Build Status](https://travis-ci.org/bsm/sarama-cluster.svg?branch=master)](https://travis-ci.org/bsm/sarama-cluster)
-[![Go Report Card](https://goreportcard.com/badge/github.com/bsm/sarama-cluster)](https://goreportcard.com/report/github.com/bsm/sarama-cluster)
-[![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
-
-Cluster extensions for [Sarama](https://github.com/Shopify/sarama), the Go client library for Apache Kafka 0.9 (and later).
-
-## Documentation
-
-Documentation and example are available via godoc at http://godoc.org/github.com/bsm/sarama-cluster
-
-## Examples
-
-Consumers have two modes of operation. In the default multiplexed mode messages (and errors) of multiple
-topics and partitions are all passed to the single channel:
-
-```go
-package main
-
-import (
-	"fmt"
-	"log"
-	"os"
-	"os/signal"
-
-	cluster "github.com/bsm/sarama-cluster"
-)
-
-func main() {
-
-	// init (custom) config, enable errors and notifications
-	config := cluster.NewConfig()
-	config.Consumer.Return.Errors = true
-	config.Group.Return.Notifications = true
-
-	// init consumer
-	brokers := []string{"127.0.0.1:9092"}
-	topics := []string{"my_topic", "other_topic"}
-	consumer, err := cluster.NewConsumer(brokers, "my-consumer-group", topics, config)
-	if err != nil {
-		panic(err)
-	}
-	defer consumer.Close()
-
-	// trap SIGINT to trigger a shutdown.
-	signals := make(chan os.Signal, 1)
-	signal.Notify(signals, os.Interrupt)
-
-	// consume errors
-	go func() {
-		for err := range consumer.Errors() {
-			log.Printf("Error: %s\n", err.Error())
-		}
-	}()
-
-	// consume notifications
-	go func() {
-		for ntf := range consumer.Notifications() {
-			log.Printf("Rebalanced: %+v\n", ntf)
-		}
-	}()
-
-	// consume messages, watch signals
-	for {
-		select {
-		case msg, ok := <-consumer.Messages():
-			if ok {
-				fmt.Fprintf(os.Stdout, "%s/%d/%d\t%s\t%s\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
-				consumer.MarkOffset(msg, "")	// mark message as processed
-			}
-		case <-signals:
-			return
-		}
-	}
-}
-```
-
-Users who require access to individual partitions can use the partitioned mode which exposes access to partition-level
-consumers:
-
-```go
-package main
-
-import (
-  "fmt"
-  "log"
-  "os"
-  "os/signal"
-
-  cluster "github.com/bsm/sarama-cluster"
-)
-
-func main() {
-
-	// init (custom) config, set mode to ConsumerModePartitions
-	config := cluster.NewConfig()
-	config.Group.Mode = cluster.ConsumerModePartitions
-
-	// init consumer
-	brokers := []string{"127.0.0.1:9092"}
-	topics := []string{"my_topic", "other_topic"}
-	consumer, err := cluster.NewConsumer(brokers, "my-consumer-group", topics, config)
-	if err != nil {
-		panic(err)
-	}
-	defer consumer.Close()
-
-	// trap SIGINT to trigger a shutdown.
-	signals := make(chan os.Signal, 1)
-	signal.Notify(signals, os.Interrupt)
-
-	// consume partitions
-	for {
-		select {
-		case part, ok := <-consumer.Partitions():
-			if !ok {
-				return
-			}
-
-			// start a separate goroutine to consume messages
-			go func(pc cluster.PartitionConsumer) {
-				for msg := range pc.Messages() {
-					fmt.Fprintf(os.Stdout, "%s/%d/%d\t%s\t%s\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
-					consumer.MarkOffset(msg, "")	// mark message as processed
-				}
-			}(part)
-		case <-signals:
-			return
-		}
-	}
-}
-```
-
-## Running tests
-
-You need to install Ginkgo & Gomega to run tests. Please see
-http://onsi.github.io/ginkgo for more details.
-
-To run tests, call:
-
-	$ make test
-
-## Troubleshooting
-
-### Consumer not receiving any messages?
-
-By default, sarama's `Config.Consumer.Offsets.Initial` is set to `sarama.OffsetNewest`. This means that in the event that a brand new consumer is created, and it has never committed any offsets to kafka, it will only receive messages starting from the message after the current one that was written.
-
-If you wish to receive all messages (from the start of all messages in the topic) in the event that a consumer does not have any offsets committed to kafka, you need to set `Config.Consumer.Offsets.Initial` to `sarama.OffsetOldest`.
diff --git a/vendor/github.com/bsm/sarama-cluster/README.md.tpl b/vendor/github.com/bsm/sarama-cluster/README.md.tpl
deleted file mode 100644
index 5f63a69..0000000
--- a/vendor/github.com/bsm/sarama-cluster/README.md.tpl
+++ /dev/null
@@ -1,67 +0,0 @@
-# Sarama Cluster
-
-[![GoDoc](https://godoc.org/github.com/bsm/sarama-cluster?status.svg)](https://godoc.org/github.com/bsm/sarama-cluster)
-[![Build Status](https://travis-ci.org/bsm/sarama-cluster.svg?branch=master)](https://travis-ci.org/bsm/sarama-cluster)
-[![Go Report Card](https://goreportcard.com/badge/github.com/bsm/sarama-cluster)](https://goreportcard.com/report/github.com/bsm/sarama-cluster)
-[![License](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
-
-Cluster extensions for [Sarama](https://github.com/Shopify/sarama), the Go client library for Apache Kafka 0.9 (and later).
-
-## Documentation
-
-Documentation and example are available via godoc at http://godoc.org/github.com/bsm/sarama-cluster
-
-## Examples
-
-Consumers have two modes of operation. In the default multiplexed mode messages (and errors) of multiple
-topics and partitions are all passed to the single channel:
-
-```go
-package main
-
-import (
-	"fmt"
-	"log"
-	"os"
-	"os/signal"
-
-	cluster "github.com/bsm/sarama-cluster"
-)
-
-func main() {{ "ExampleConsumer" | code }}
-```
-
-Users who require access to individual partitions can use the partitioned mode which exposes access to partition-level
-consumers:
-
-```go
-package main
-
-import (
-  "fmt"
-  "log"
-  "os"
-  "os/signal"
-
-  cluster "github.com/bsm/sarama-cluster"
-)
-
-func main() {{ "ExampleConsumer_Partitions" | code }}
-```
-
-## Running tests
-
-You need to install Ginkgo & Gomega to run tests. Please see
-http://onsi.github.io/ginkgo for more details.
-
-To run tests, call:
-
-	$ make test
-
-## Troubleshooting
-
-### Consumer not receiving any messages?
-
-By default, sarama's `Config.Consumer.Offsets.Initial` is set to `sarama.OffsetNewest`. This means that in the event that a brand new consumer is created, and it has never committed any offsets to kafka, it will only receive messages starting from the message after the current one that was written.
-
-If you wish to receive all messages (from the start of all messages in the topic) in the event that a consumer does not have any offsets committed to kafka, you need to set `Config.Consumer.Offsets.Initial` to `sarama.OffsetOldest`.
diff --git a/vendor/github.com/bsm/sarama-cluster/balancer.go b/vendor/github.com/bsm/sarama-cluster/balancer.go
deleted file mode 100644
index 3aeaece..0000000
--- a/vendor/github.com/bsm/sarama-cluster/balancer.go
+++ /dev/null
@@ -1,170 +0,0 @@
-package cluster
-
-import (
-	"math"
-	"sort"
-
-	"github.com/Shopify/sarama"
-)
-
-// NotificationType defines the type of notification
-type NotificationType uint8
-
-// String describes the notification type
-func (t NotificationType) String() string {
-	switch t {
-	case RebalanceStart:
-		return "rebalance start"
-	case RebalanceOK:
-		return "rebalance OK"
-	case RebalanceError:
-		return "rebalance error"
-	}
-	return "unknown"
-}
-
-const (
-	UnknownNotification NotificationType = iota
-	RebalanceStart
-	RebalanceOK
-	RebalanceError
-)
-
-// Notification are state events emitted by the consumers on rebalance
-type Notification struct {
-	// Type exposes the notification type
-	Type NotificationType
-
-	// Claimed contains topic/partitions that were claimed by this rebalance cycle
-	Claimed map[string][]int32
-
-	// Released contains topic/partitions that were released as part of this rebalance cycle
-	Released map[string][]int32
-
-	// Current are topic/partitions that are currently claimed to the consumer
-	Current map[string][]int32
-}
-
-func newNotification(current map[string][]int32) *Notification {
-	return &Notification{
-		Type:    RebalanceStart,
-		Current: current,
-	}
-}
-
-func (n *Notification) success(current map[string][]int32) *Notification {
-	o := &Notification{
-		Type:     RebalanceOK,
-		Claimed:  make(map[string][]int32),
-		Released: make(map[string][]int32),
-		Current:  current,
-	}
-	for topic, partitions := range current {
-		o.Claimed[topic] = int32Slice(partitions).Diff(int32Slice(n.Current[topic]))
-	}
-	for topic, partitions := range n.Current {
-		o.Released[topic] = int32Slice(partitions).Diff(int32Slice(current[topic]))
-	}
-	return o
-}
-
-// --------------------------------------------------------------------
-
-type topicInfo struct {
-	Partitions []int32
-	MemberIDs  []string
-}
-
-func (info topicInfo) Perform(s Strategy) map[string][]int32 {
-	if s == StrategyRoundRobin {
-		return info.RoundRobin()
-	}
-	return info.Ranges()
-}
-
-func (info topicInfo) Ranges() map[string][]int32 {
-	sort.Strings(info.MemberIDs)
-
-	mlen := len(info.MemberIDs)
-	plen := len(info.Partitions)
-	res := make(map[string][]int32, mlen)
-
-	for pos, memberID := range info.MemberIDs {
-		n, i := float64(plen)/float64(mlen), float64(pos)
-		min := int(math.Floor(i*n + 0.5))
-		max := int(math.Floor((i+1)*n + 0.5))
-		sub := info.Partitions[min:max]
-		if len(sub) > 0 {
-			res[memberID] = sub
-		}
-	}
-	return res
-}
-
-func (info topicInfo) RoundRobin() map[string][]int32 {
-	sort.Strings(info.MemberIDs)
-
-	mlen := len(info.MemberIDs)
-	res := make(map[string][]int32, mlen)
-	for i, pnum := range info.Partitions {
-		memberID := info.MemberIDs[i%mlen]
-		res[memberID] = append(res[memberID], pnum)
-	}
-	return res
-}
-
-// --------------------------------------------------------------------
-
-type balancer struct {
-	client sarama.Client
-	topics map[string]topicInfo
-}
-
-func newBalancerFromMeta(client sarama.Client, members map[string]sarama.ConsumerGroupMemberMetadata) (*balancer, error) {
-	balancer := newBalancer(client)
-	for memberID, meta := range members {
-		for _, topic := range meta.Topics {
-			if err := balancer.Topic(topic, memberID); err != nil {
-				return nil, err
-			}
-		}
-	}
-	return balancer, nil
-}
-
-func newBalancer(client sarama.Client) *balancer {
-	return &balancer{
-		client: client,
-		topics: make(map[string]topicInfo),
-	}
-}
-
-func (r *balancer) Topic(name string, memberID string) error {
-	topic, ok := r.topics[name]
-	if !ok {
-		nums, err := r.client.Partitions(name)
-		if err != nil {
-			return err
-		}
-		topic = topicInfo{
-			Partitions: nums,
-			MemberIDs:  make([]string, 0, 1),
-		}
-	}
-	topic.MemberIDs = append(topic.MemberIDs, memberID)
-	r.topics[name] = topic
-	return nil
-}
-
-func (r *balancer) Perform(s Strategy) map[string]map[string][]int32 {
-	res := make(map[string]map[string][]int32, 1)
-	for topic, info := range r.topics {
-		for memberID, partitions := range info.Perform(s) {
-			if _, ok := res[memberID]; !ok {
-				res[memberID] = make(map[string][]int32, 1)
-			}
-			res[memberID][topic] = partitions
-		}
-	}
-	return res
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/client.go b/vendor/github.com/bsm/sarama-cluster/client.go
deleted file mode 100644
index 42ffb30..0000000
--- a/vendor/github.com/bsm/sarama-cluster/client.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package cluster
-
-import (
-	"errors"
-	"sync/atomic"
-
-	"github.com/Shopify/sarama"
-)
-
-var errClientInUse = errors.New("cluster: client is already used by another consumer")
-
-// Client is a group client
-type Client struct {
-	sarama.Client
-	config Config
-
-	inUse uint32
-}
-
-// NewClient creates a new client instance
-func NewClient(addrs []string, config *Config) (*Client, error) {
-	if config == nil {
-		config = NewConfig()
-	}
-
-	if err := config.Validate(); err != nil {
-		return nil, err
-	}
-
-	client, err := sarama.NewClient(addrs, &config.Config)
-	if err != nil {
-		return nil, err
-	}
-
-	return &Client{Client: client, config: *config}, nil
-}
-
-// ClusterConfig returns the cluster configuration.
-func (c *Client) ClusterConfig() *Config {
-	cfg := c.config
-	return &cfg
-}
-
-func (c *Client) claim() bool {
-	return atomic.CompareAndSwapUint32(&c.inUse, 0, 1)
-}
-
-func (c *Client) release() {
-	atomic.CompareAndSwapUint32(&c.inUse, 1, 0)
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/cluster.go b/vendor/github.com/bsm/sarama-cluster/cluster.go
deleted file mode 100644
index adcf0e9..0000000
--- a/vendor/github.com/bsm/sarama-cluster/cluster.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package cluster
-
-// Strategy for partition to consumer assignement
-type Strategy string
-
-const (
-	// StrategyRange is the default and assigns partition ranges to consumers.
-	// Example with six partitions and two consumers:
-	//   C1: [0, 1, 2]
-	//   C2: [3, 4, 5]
-	StrategyRange Strategy = "range"
-
-	// StrategyRoundRobin assigns partitions by alternating over consumers.
-	// Example with six partitions and two consumers:
-	//   C1: [0, 2, 4]
-	//   C2: [1, 3, 5]
-	StrategyRoundRobin Strategy = "roundrobin"
-)
-
-// Error instances are wrappers for internal errors with a context and
-// may be returned through the consumer's Errors() channel
-type Error struct {
-	Ctx string
-	error
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/config.go b/vendor/github.com/bsm/sarama-cluster/config.go
deleted file mode 100644
index 084b835..0000000
--- a/vendor/github.com/bsm/sarama-cluster/config.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package cluster
-
-import (
-	"regexp"
-	"time"
-
-	"github.com/Shopify/sarama"
-)
-
-var minVersion = sarama.V0_9_0_0
-
-type ConsumerMode uint8
-
-const (
-	ConsumerModeMultiplex ConsumerMode = iota
-	ConsumerModePartitions
-)
-
-// Config extends sarama.Config with Group specific namespace
-type Config struct {
-	sarama.Config
-
-	// Group is the namespace for group management properties
-	Group struct {
-
-		// The strategy to use for the allocation of partitions to consumers (defaults to StrategyRange)
-		PartitionStrategy Strategy
-
-		// By default, messages and errors from the subscribed topics and partitions are all multiplexed and
-		// made available through the consumer's Messages() and Errors() channels.
-		//
-		// Users who require low-level access can enable ConsumerModePartitions where individual partitions
-		// are exposed on the Partitions() channel. Messages and errors must then be consumed on the partitions
-		// themselves.
-		Mode ConsumerMode
-
-		Offsets struct {
-			Retry struct {
-				// The numer retries when committing offsets (defaults to 3).
-				Max int
-			}
-			Synchronization struct {
-				// The duration allowed for other clients to commit their offsets before resumption in this client, e.g. during a rebalance
-				// NewConfig sets this to the Consumer.MaxProcessingTime duration of the Sarama configuration
-				DwellTime time.Duration
-			}
-		}
-
-		Session struct {
-			// The allowed session timeout for registered consumers (defaults to 30s).
-			// Must be within the allowed server range.
-			Timeout time.Duration
-		}
-
-		Heartbeat struct {
-			// Interval between each heartbeat (defaults to 3s). It should be no more
-			// than 1/3rd of the Group.Session.Timout setting
-			Interval time.Duration
-		}
-
-		// Return specifies which group channels will be populated. If they are set to true,
-		// you must read from the respective channels to prevent deadlock.
-		Return struct {
-			// If enabled, rebalance notification will be returned on the
-			// Notifications channel (default disabled).
-			Notifications bool
-		}
-
-		Topics struct {
-			// An additional whitelist of topics to subscribe to.
-			Whitelist *regexp.Regexp
-			// An additional blacklist of topics to avoid. If set, this will precede over
-			// the Whitelist setting.
-			Blacklist *regexp.Regexp
-		}
-
-		Member struct {
-			// Custom metadata to include when joining the group. The user data for all joined members
-			// can be retrieved by sending a DescribeGroupRequest to the broker that is the
-			// coordinator for the group.
-			UserData []byte
-		}
-	}
-}
-
-// NewConfig returns a new configuration instance with sane defaults.
-func NewConfig() *Config {
-	c := &Config{
-		Config: *sarama.NewConfig(),
-	}
-	c.Group.PartitionStrategy = StrategyRange
-	c.Group.Offsets.Retry.Max = 3
-	c.Group.Offsets.Synchronization.DwellTime = c.Consumer.MaxProcessingTime
-	c.Group.Session.Timeout = 30 * time.Second
-	c.Group.Heartbeat.Interval = 3 * time.Second
-	c.Config.Version = minVersion
-	return c
-}
-
-// Validate checks a Config instance. It will return a
-// sarama.ConfigurationError if the specified values don't make sense.
-func (c *Config) Validate() error {
-	if c.Group.Heartbeat.Interval%time.Millisecond != 0 {
-		sarama.Logger.Println("Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.")
-	}
-	if c.Group.Session.Timeout%time.Millisecond != 0 {
-		sarama.Logger.Println("Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.")
-	}
-	if c.Group.PartitionStrategy != StrategyRange && c.Group.PartitionStrategy != StrategyRoundRobin {
-		sarama.Logger.Println("Group.PartitionStrategy is not supported; range will be assumed.")
-	}
-	if !c.Version.IsAtLeast(minVersion) {
-		sarama.Logger.Println("Version is not supported; 0.9. will be assumed.")
-		c.Version = minVersion
-	}
-	if err := c.Config.Validate(); err != nil {
-		return err
-	}
-
-	// validate the Group values
-	switch {
-	case c.Group.Offsets.Retry.Max < 0:
-		return sarama.ConfigurationError("Group.Offsets.Retry.Max must be >= 0")
-	case c.Group.Offsets.Synchronization.DwellTime <= 0:
-		return sarama.ConfigurationError("Group.Offsets.Synchronization.DwellTime must be > 0")
-	case c.Group.Offsets.Synchronization.DwellTime > 10*time.Minute:
-		return sarama.ConfigurationError("Group.Offsets.Synchronization.DwellTime must be <= 10m")
-	case c.Group.Heartbeat.Interval <= 0:
-		return sarama.ConfigurationError("Group.Heartbeat.Interval must be > 0")
-	case c.Group.Session.Timeout <= 0:
-		return sarama.ConfigurationError("Group.Session.Timeout must be > 0")
-	case !c.Metadata.Full && c.Group.Topics.Whitelist != nil:
-		return sarama.ConfigurationError("Metadata.Full must be enabled when Group.Topics.Whitelist is used")
-	case !c.Metadata.Full && c.Group.Topics.Blacklist != nil:
-		return sarama.ConfigurationError("Metadata.Full must be enabled when Group.Topics.Blacklist is used")
-	}
-
-	// ensure offset is correct
-	switch c.Consumer.Offsets.Initial {
-	case sarama.OffsetOldest, sarama.OffsetNewest:
-	default:
-		return sarama.ConfigurationError("Consumer.Offsets.Initial must be either OffsetOldest or OffsetNewest")
-	}
-
-	return nil
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/consumer.go b/vendor/github.com/bsm/sarama-cluster/consumer.go
deleted file mode 100644
index e7a67da..0000000
--- a/vendor/github.com/bsm/sarama-cluster/consumer.go
+++ /dev/null
@@ -1,919 +0,0 @@
-package cluster
-
-import (
-	"sort"
-	"sync"
-	"sync/atomic"
-	"time"
-
-	"github.com/Shopify/sarama"
-)
-
-// Consumer is a cluster group consumer
-type Consumer struct {
-	client    *Client
-	ownClient bool
-
-	consumer sarama.Consumer
-	subs     *partitionMap
-
-	consumerID string
-	groupID    string
-
-	memberID     string
-	generationID int32
-	membershipMu sync.RWMutex
-
-	coreTopics  []string
-	extraTopics []string
-
-	dying, dead chan none
-	closeOnce   sync.Once
-
-	consuming     int32
-	messages      chan *sarama.ConsumerMessage
-	errors        chan error
-	partitions    chan PartitionConsumer
-	notifications chan *Notification
-
-	commitMu sync.Mutex
-}
-
-// NewConsumer initializes a new consumer
-func NewConsumer(addrs []string, groupID string, topics []string, config *Config) (*Consumer, error) {
-	client, err := NewClient(addrs, config)
-	if err != nil {
-		return nil, err
-	}
-
-	consumer, err := NewConsumerFromClient(client, groupID, topics)
-	if err != nil {
-		return nil, err
-	}
-	consumer.ownClient = true
-	return consumer, nil
-}
-
-// NewConsumerFromClient initializes a new consumer from an existing client.
-//
-// Please note that clients cannot be shared between consumers (due to Kafka internals),
-// they can only be re-used which requires the user to call Close() on the first consumer
-// before using this method again to initialize another one. Attempts to use a client with
-// more than one consumer at a time will return errors.
-func NewConsumerFromClient(client *Client, groupID string, topics []string) (*Consumer, error) {
-	if !client.claim() {
-		return nil, errClientInUse
-	}
-
-	consumer, err := sarama.NewConsumerFromClient(client.Client)
-	if err != nil {
-		client.release()
-		return nil, err
-	}
-
-	sort.Strings(topics)
-	c := &Consumer{
-		client:   client,
-		consumer: consumer,
-		subs:     newPartitionMap(),
-		groupID:  groupID,
-
-		coreTopics: topics,
-
-		dying: make(chan none),
-		dead:  make(chan none),
-
-		messages:      make(chan *sarama.ConsumerMessage),
-		errors:        make(chan error, client.config.ChannelBufferSize),
-		partitions:    make(chan PartitionConsumer, 1),
-		notifications: make(chan *Notification),
-	}
-	if err := c.client.RefreshCoordinator(groupID); err != nil {
-		client.release()
-		return nil, err
-	}
-
-	go c.mainLoop()
-	return c, nil
-}
-
-// Messages returns the read channel for the messages that are returned by
-// the broker.
-//
-// This channel will only return if Config.Group.Mode option is set to
-// ConsumerModeMultiplex (default).
-func (c *Consumer) Messages() <-chan *sarama.ConsumerMessage { return c.messages }
-
-// Partitions returns the read channels for individual partitions of this broker.
-//
-// This will channel will only return if Config.Group.Mode option is set to
-// ConsumerModePartitions.
-//
-// The Partitions() channel must be listened to for the life of this consumer;
-// when a rebalance happens old partitions will be closed (naturally come to
-// completion) and new ones will be emitted. The returned channel will only close
-// when the consumer is completely shut down.
-func (c *Consumer) Partitions() <-chan PartitionConsumer { return c.partitions }
-
-// Errors returns a read channel of errors that occur during offset management, if
-// enabled. By default, errors are logged and not returned over this channel. If
-// you want to implement any custom error handling, set your config's
-// Consumer.Return.Errors setting to true, and read from this channel.
-func (c *Consumer) Errors() <-chan error { return c.errors }
-
-// Notifications returns a channel of Notifications that occur during consumer
-// rebalancing. Notifications will only be emitted over this channel, if your config's
-// Group.Return.Notifications setting to true.
-func (c *Consumer) Notifications() <-chan *Notification { return c.notifications }
-
-// HighWaterMarks returns the current high water marks for each topic and partition
-// Consistency between partitions is not guaranteed since high water marks are updated separately.
-func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 { return c.consumer.HighWaterMarks() }
-
-// MarkOffset marks the provided message as processed, alongside a metadata string
-// that represents the state of the partition consumer at that point in time. The
-// metadata string can be used by another consumer to restore that state, so it
-// can resume consumption.
-//
-// Note: calling MarkOffset does not necessarily commit the offset to the backend
-// store immediately for efficiency reasons, and it may never be committed if
-// your application crashes. This means that you may end up processing the same
-// message twice, and your processing should ideally be idempotent.
-func (c *Consumer) MarkOffset(msg *sarama.ConsumerMessage, metadata string) {
-	if sub := c.subs.Fetch(msg.Topic, msg.Partition); sub != nil {
-		sub.MarkOffset(msg.Offset, metadata)
-	}
-}
-
-// MarkPartitionOffset marks an offset of the provided topic/partition as processed.
-// See MarkOffset for additional explanation.
-func (c *Consumer) MarkPartitionOffset(topic string, partition int32, offset int64, metadata string) {
-	if sub := c.subs.Fetch(topic, partition); sub != nil {
-		sub.MarkOffset(offset, metadata)
-	}
-}
-
-// MarkOffsets marks stashed offsets as processed.
-// See MarkOffset for additional explanation.
-func (c *Consumer) MarkOffsets(s *OffsetStash) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	for tp, info := range s.offsets {
-		if sub := c.subs.Fetch(tp.Topic, tp.Partition); sub != nil {
-			sub.MarkOffset(info.Offset, info.Metadata)
-		}
-		delete(s.offsets, tp)
-	}
-}
-
-// ResetOffsets marks the provided message as processed, alongside a metadata string
-// that represents the state of the partition consumer at that point in time. The
-// metadata string can be used by another consumer to restore that state, so it
-// can resume consumption.
-//
-// Difference between ResetOffset and MarkOffset is that it allows to rewind to an earlier offset
-func (c *Consumer) ResetOffset(msg *sarama.ConsumerMessage, metadata string) {
-	if sub := c.subs.Fetch(msg.Topic, msg.Partition); sub != nil {
-		sub.ResetOffset(msg.Offset, metadata)
-	}
-}
-
-// ResetPartitionOffset marks an offset of the provided topic/partition as processed.
-// See ResetOffset for additional explanation.
-func (c *Consumer) ResetPartitionOffset(topic string, partition int32, offset int64, metadata string) {
-	sub := c.subs.Fetch(topic, partition)
-	if sub != nil {
-		sub.ResetOffset(offset, metadata)
-	}
-}
-
-// ResetOffsets marks stashed offsets as processed.
-// See ResetOffset for additional explanation.
-func (c *Consumer) ResetOffsets(s *OffsetStash) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	for tp, info := range s.offsets {
-		if sub := c.subs.Fetch(tp.Topic, tp.Partition); sub != nil {
-			sub.ResetOffset(info.Offset, info.Metadata)
-		}
-		delete(s.offsets, tp)
-	}
-}
-
-// Subscriptions returns the consumed topics and partitions
-func (c *Consumer) Subscriptions() map[string][]int32 {
-	return c.subs.Info()
-}
-
-// CommitOffsets allows to manually commit previously marked offsets. By default there is no
-// need to call this function as the consumer will commit offsets automatically
-// using the Config.Consumer.Offsets.CommitInterval setting.
-//
-// Please be aware that calling this function during an internal rebalance cycle may return
-// broker errors (e.g. sarama.ErrUnknownMemberId or sarama.ErrIllegalGeneration).
-func (c *Consumer) CommitOffsets() error {
-	c.commitMu.Lock()
-	defer c.commitMu.Unlock()
-
-	memberID, generationID := c.membership()
-	req := &sarama.OffsetCommitRequest{
-		Version:                 2,
-		ConsumerGroup:           c.groupID,
-		ConsumerGroupGeneration: generationID,
-		ConsumerID:              memberID,
-		RetentionTime:           -1,
-	}
-
-	if ns := c.client.config.Consumer.Offsets.Retention; ns != 0 {
-		req.RetentionTime = int64(ns / time.Millisecond)
-	}
-
-	snap := c.subs.Snapshot()
-	dirty := false
-	for tp, state := range snap {
-		if state.Dirty {
-			dirty = true
-			req.AddBlock(tp.Topic, tp.Partition, state.Info.Offset, 0, state.Info.Metadata)
-		}
-	}
-	if !dirty {
-		return nil
-	}
-
-	broker, err := c.client.Coordinator(c.groupID)
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return err
-	}
-
-	resp, err := broker.CommitOffset(req)
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return err
-	}
-
-	for topic, errs := range resp.Errors {
-		for partition, kerr := range errs {
-			if kerr != sarama.ErrNoError {
-				err = kerr
-			} else if state, ok := snap[topicPartition{topic, partition}]; ok {
-				if sub := c.subs.Fetch(topic, partition); sub != nil {
-					sub.markCommitted(state.Info.Offset)
-				}
-			}
-		}
-	}
-	return err
-}
-
-// Close safely closes the consumer and releases all resources
-func (c *Consumer) Close() (err error) {
-	c.closeOnce.Do(func() {
-		close(c.dying)
-		<-c.dead
-
-		if e := c.release(); e != nil {
-			err = e
-		}
-		if e := c.consumer.Close(); e != nil {
-			err = e
-		}
-		close(c.messages)
-		close(c.errors)
-
-		if e := c.leaveGroup(); e != nil {
-			err = e
-		}
-		close(c.partitions)
-		close(c.notifications)
-
-		// drain
-		for range c.messages {
-		}
-		for range c.errors {
-		}
-		for p := range c.partitions {
-			_ = p.Close()
-		}
-		for range c.notifications {
-		}
-
-		c.client.release()
-		if c.ownClient {
-			if e := c.client.Close(); e != nil {
-				err = e
-			}
-		}
-	})
-	return
-}
-
-func (c *Consumer) mainLoop() {
-	defer close(c.dead)
-	defer atomic.StoreInt32(&c.consuming, 0)
-
-	for {
-		atomic.StoreInt32(&c.consuming, 0)
-
-		// Check if close was requested
-		select {
-		case <-c.dying:
-			return
-		default:
-		}
-
-		// Start next consume cycle
-		c.nextTick()
-	}
-}
-
-func (c *Consumer) nextTick() {
-	// Remember previous subscriptions
-	var notification *Notification
-	if c.client.config.Group.Return.Notifications {
-		notification = newNotification(c.subs.Info())
-	}
-
-	// Refresh coordinator
-	if err := c.refreshCoordinator(); err != nil {
-		c.rebalanceError(err, nil)
-		return
-	}
-
-	// Release subscriptions
-	if err := c.release(); err != nil {
-		c.rebalanceError(err, nil)
-		return
-	}
-
-	// Issue rebalance start notification
-	if c.client.config.Group.Return.Notifications {
-		c.handleNotification(notification)
-	}
-
-	// Rebalance, fetch new subscriptions
-	subs, err := c.rebalance()
-	if err != nil {
-		c.rebalanceError(err, notification)
-		return
-	}
-
-	// Coordinate loops, make sure everything is
-	// stopped on exit
-	tomb := newLoopTomb()
-	defer tomb.Close()
-
-	// Start the heartbeat
-	tomb.Go(c.hbLoop)
-
-	// Subscribe to topic/partitions
-	if err := c.subscribe(tomb, subs); err != nil {
-		c.rebalanceError(err, notification)
-		return
-	}
-
-	// Update/issue notification with new claims
-	if c.client.config.Group.Return.Notifications {
-		notification = notification.success(subs)
-		c.handleNotification(notification)
-	}
-
-	// Start topic watcher loop
-	tomb.Go(c.twLoop)
-
-	// Start consuming and committing offsets
-	tomb.Go(c.cmLoop)
-	atomic.StoreInt32(&c.consuming, 1)
-
-	// Wait for signals
-	select {
-	case <-tomb.Dying():
-	case <-c.dying:
-	}
-}
-
-// heartbeat loop, triggered by the mainLoop
-func (c *Consumer) hbLoop(stopped <-chan none) {
-	ticker := time.NewTicker(c.client.config.Group.Heartbeat.Interval)
-	defer ticker.Stop()
-
-	for {
-		select {
-		case <-ticker.C:
-			switch err := c.heartbeat(); err {
-			case nil, sarama.ErrNoError:
-			case sarama.ErrNotCoordinatorForConsumer, sarama.ErrRebalanceInProgress:
-				return
-			default:
-				c.handleError(&Error{Ctx: "heartbeat", error: err})
-				return
-			}
-		case <-stopped:
-			return
-		case <-c.dying:
-			return
-		}
-	}
-}
-
-// topic watcher loop, triggered by the mainLoop
-func (c *Consumer) twLoop(stopped <-chan none) {
-	ticker := time.NewTicker(c.client.config.Metadata.RefreshFrequency / 2)
-	defer ticker.Stop()
-
-	for {
-		select {
-		case <-ticker.C:
-			topics, err := c.client.Topics()
-			if err != nil {
-				c.handleError(&Error{Ctx: "topics", error: err})
-				return
-			}
-
-			for _, topic := range topics {
-				if !c.isKnownCoreTopic(topic) &&
-					!c.isKnownExtraTopic(topic) &&
-					c.isPotentialExtraTopic(topic) {
-					return
-				}
-			}
-		case <-stopped:
-			return
-		case <-c.dying:
-			return
-		}
-	}
-}
-
-// commit loop, triggered by the mainLoop
-func (c *Consumer) cmLoop(stopped <-chan none) {
-	ticker := time.NewTicker(c.client.config.Consumer.Offsets.CommitInterval)
-	defer ticker.Stop()
-
-	for {
-		select {
-		case <-ticker.C:
-			if err := c.commitOffsetsWithRetry(c.client.config.Group.Offsets.Retry.Max); err != nil {
-				c.handleError(&Error{Ctx: "commit", error: err})
-				return
-			}
-		case <-stopped:
-			return
-		case <-c.dying:
-			return
-		}
-	}
-}
-
-func (c *Consumer) rebalanceError(err error, n *Notification) {
-	if n != nil {
-		n.Type = RebalanceError
-		c.handleNotification(n)
-	}
-
-	switch err {
-	case sarama.ErrRebalanceInProgress:
-	default:
-		c.handleError(&Error{Ctx: "rebalance", error: err})
-	}
-
-	select {
-	case <-c.dying:
-	case <-time.After(c.client.config.Metadata.Retry.Backoff):
-	}
-}
-
-func (c *Consumer) handleNotification(n *Notification) {
-	if c.client.config.Group.Return.Notifications {
-		select {
-		case c.notifications <- n:
-		case <-c.dying:
-			return
-		}
-	}
-}
-
-func (c *Consumer) handleError(e *Error) {
-	if c.client.config.Consumer.Return.Errors {
-		select {
-		case c.errors <- e:
-		case <-c.dying:
-			return
-		}
-	} else {
-		sarama.Logger.Printf("%s error: %s\n", e.Ctx, e.Error())
-	}
-}
-
-// Releases the consumer and commits offsets, called from rebalance() and Close()
-func (c *Consumer) release() (err error) {
-	// Stop all consumers
-	c.subs.Stop()
-
-	// Clear subscriptions on exit
-	defer c.subs.Clear()
-
-	// Wait for messages to be processed
-	timeout := time.NewTimer(c.client.config.Group.Offsets.Synchronization.DwellTime)
-	defer timeout.Stop()
-
-	select {
-	case <-c.dying:
-	case <-timeout.C:
-	}
-
-	// Commit offsets, continue on errors
-	if e := c.commitOffsetsWithRetry(c.client.config.Group.Offsets.Retry.Max); e != nil {
-		err = e
-	}
-
-	return
-}
-
-// --------------------------------------------------------------------
-
-// Performs a heartbeat, part of the mainLoop()
-func (c *Consumer) heartbeat() error {
-	broker, err := c.client.Coordinator(c.groupID)
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return err
-	}
-
-	memberID, generationID := c.membership()
-	resp, err := broker.Heartbeat(&sarama.HeartbeatRequest{
-		GroupId:      c.groupID,
-		MemberId:     memberID,
-		GenerationId: generationID,
-	})
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return err
-	}
-	return resp.Err
-}
-
-// Performs a rebalance, part of the mainLoop()
-func (c *Consumer) rebalance() (map[string][]int32, error) {
-	memberID, _ := c.membership()
-	sarama.Logger.Printf("cluster/consumer %s rebalance\n", memberID)
-
-	allTopics, err := c.client.Topics()
-	if err != nil {
-		return nil, err
-	}
-	c.extraTopics = c.selectExtraTopics(allTopics)
-	sort.Strings(c.extraTopics)
-
-	// Re-join consumer group
-	strategy, err := c.joinGroup()
-	switch {
-	case err == sarama.ErrUnknownMemberId:
-		c.membershipMu.Lock()
-		c.memberID = ""
-		c.membershipMu.Unlock()
-		return nil, err
-	case err != nil:
-		return nil, err
-	}
-
-	// Sync consumer group state, fetch subscriptions
-	subs, err := c.syncGroup(strategy)
-	switch {
-	case err == sarama.ErrRebalanceInProgress:
-		return nil, err
-	case err != nil:
-		_ = c.leaveGroup()
-		return nil, err
-	}
-	return subs, nil
-}
-
-// Performs the subscription, part of the mainLoop()
-func (c *Consumer) subscribe(tomb *loopTomb, subs map[string][]int32) error {
-	// fetch offsets
-	offsets, err := c.fetchOffsets(subs)
-	if err != nil {
-		_ = c.leaveGroup()
-		return err
-	}
-
-	// create consumers in parallel
-	var mu sync.Mutex
-	var wg sync.WaitGroup
-
-	for topic, partitions := range subs {
-		for _, partition := range partitions {
-			wg.Add(1)
-
-			info := offsets[topic][partition]
-			go func(topic string, partition int32) {
-				if e := c.createConsumer(tomb, topic, partition, info); e != nil {
-					mu.Lock()
-					err = e
-					mu.Unlock()
-				}
-				wg.Done()
-			}(topic, partition)
-		}
-	}
-	wg.Wait()
-
-	if err != nil {
-		_ = c.release()
-		_ = c.leaveGroup()
-	}
-	return err
-}
-
-// --------------------------------------------------------------------
-
-// Send a request to the broker to join group on rebalance()
-func (c *Consumer) joinGroup() (*balancer, error) {
-	memberID, _ := c.membership()
-	req := &sarama.JoinGroupRequest{
-		GroupId:        c.groupID,
-		MemberId:       memberID,
-		SessionTimeout: int32(c.client.config.Group.Session.Timeout / time.Millisecond),
-		ProtocolType:   "consumer",
-	}
-
-	meta := &sarama.ConsumerGroupMemberMetadata{
-		Version:  1,
-		Topics:   append(c.coreTopics, c.extraTopics...),
-		UserData: c.client.config.Group.Member.UserData,
-	}
-	err := req.AddGroupProtocolMetadata(string(StrategyRange), meta)
-	if err != nil {
-		return nil, err
-	}
-	err = req.AddGroupProtocolMetadata(string(StrategyRoundRobin), meta)
-	if err != nil {
-		return nil, err
-	}
-
-	broker, err := c.client.Coordinator(c.groupID)
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return nil, err
-	}
-
-	resp, err := broker.JoinGroup(req)
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return nil, err
-	} else if resp.Err != sarama.ErrNoError {
-		c.closeCoordinator(broker, resp.Err)
-		return nil, resp.Err
-	}
-
-	var strategy *balancer
-	if resp.LeaderId == resp.MemberId {
-		members, err := resp.GetMembers()
-		if err != nil {
-			return nil, err
-		}
-
-		strategy, err = newBalancerFromMeta(c.client, members)
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	c.membershipMu.Lock()
-	c.memberID = resp.MemberId
-	c.generationID = resp.GenerationId
-	c.membershipMu.Unlock()
-
-	return strategy, nil
-}
-
-// Send a request to the broker to sync the group on rebalance().
-// Returns a list of topics and partitions to consume.
-func (c *Consumer) syncGroup(strategy *balancer) (map[string][]int32, error) {
-	memberID, generationID := c.membership()
-	req := &sarama.SyncGroupRequest{
-		GroupId:      c.groupID,
-		MemberId:     memberID,
-		GenerationId: generationID,
-	}
-
-	if strategy != nil {
-		for memberID, topics := range strategy.Perform(c.client.config.Group.PartitionStrategy) {
-			if err := req.AddGroupAssignmentMember(memberID, &sarama.ConsumerGroupMemberAssignment{
-				Topics: topics,
-			}); err != nil {
-				return nil, err
-			}
-		}
-	}
-
-	broker, err := c.client.Coordinator(c.groupID)
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return nil, err
-	}
-
-	resp, err := broker.SyncGroup(req)
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return nil, err
-	} else if resp.Err != sarama.ErrNoError {
-		c.closeCoordinator(broker, resp.Err)
-		return nil, resp.Err
-	}
-
-	// Return if there is nothing to subscribe to
-	if len(resp.MemberAssignment) == 0 {
-		return nil, nil
-	}
-
-	// Get assigned subscriptions
-	members, err := resp.GetMemberAssignment()
-	if err != nil {
-		return nil, err
-	}
-
-	// Sort partitions, for each topic
-	for topic := range members.Topics {
-		sort.Sort(int32Slice(members.Topics[topic]))
-	}
-	return members.Topics, nil
-}
-
-// Fetches latest committed offsets for all subscriptions
-func (c *Consumer) fetchOffsets(subs map[string][]int32) (map[string]map[int32]offsetInfo, error) {
-	offsets := make(map[string]map[int32]offsetInfo, len(subs))
-	req := &sarama.OffsetFetchRequest{
-		Version:       1,
-		ConsumerGroup: c.groupID,
-	}
-
-	for topic, partitions := range subs {
-		offsets[topic] = make(map[int32]offsetInfo, len(partitions))
-		for _, partition := range partitions {
-			offsets[topic][partition] = offsetInfo{Offset: -1}
-			req.AddPartition(topic, partition)
-		}
-	}
-
-	broker, err := c.client.Coordinator(c.groupID)
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return nil, err
-	}
-
-	resp, err := broker.FetchOffset(req)
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return nil, err
-	}
-
-	for topic, partitions := range subs {
-		for _, partition := range partitions {
-			block := resp.GetBlock(topic, partition)
-			if block == nil {
-				return nil, sarama.ErrIncompleteResponse
-			}
-
-			if block.Err == sarama.ErrNoError {
-				offsets[topic][partition] = offsetInfo{Offset: block.Offset, Metadata: block.Metadata}
-			} else {
-				return nil, block.Err
-			}
-		}
-	}
-	return offsets, nil
-}
-
-// Send a request to the broker to leave the group on failes rebalance() and on Close()
-func (c *Consumer) leaveGroup() error {
-	broker, err := c.client.Coordinator(c.groupID)
-	if err != nil {
-		c.closeCoordinator(broker, err)
-		return err
-	}
-
-	memberID, _ := c.membership()
-	if _, err = broker.LeaveGroup(&sarama.LeaveGroupRequest{
-		GroupId:  c.groupID,
-		MemberId: memberID,
-	}); err != nil {
-		c.closeCoordinator(broker, err)
-	}
-	return err
-}
-
-// --------------------------------------------------------------------
-
-func (c *Consumer) createConsumer(tomb *loopTomb, topic string, partition int32, info offsetInfo) error {
-	memberID, _ := c.membership()
-	sarama.Logger.Printf("cluster/consumer %s consume %s/%d from %d\n", memberID, topic, partition, info.NextOffset(c.client.config.Consumer.Offsets.Initial))
-
-	// Create partitionConsumer
-	pc, err := newPartitionConsumer(c.consumer, topic, partition, info, c.client.config.Consumer.Offsets.Initial)
-	if err != nil {
-		return err
-	}
-
-	// Store in subscriptions
-	c.subs.Store(topic, partition, pc)
-
-	// Start partition consumer goroutine
-	tomb.Go(func(stopper <-chan none) {
-		if c.client.config.Group.Mode == ConsumerModePartitions {
-			pc.waitFor(stopper, c.errors)
-		} else {
-			pc.multiplex(stopper, c.messages, c.errors)
-		}
-	})
-
-	if c.client.config.Group.Mode == ConsumerModePartitions {
-		c.partitions <- pc
-	}
-	return nil
-}
-
-func (c *Consumer) commitOffsetsWithRetry(retries int) error {
-	err := c.CommitOffsets()
-	if err != nil && retries > 0 {
-		return c.commitOffsetsWithRetry(retries - 1)
-	}
-	return err
-}
-
-func (c *Consumer) closeCoordinator(broker *sarama.Broker, err error) {
-	if broker != nil {
-		_ = broker.Close()
-	}
-
-	switch err {
-	case sarama.ErrConsumerCoordinatorNotAvailable, sarama.ErrNotCoordinatorForConsumer:
-		_ = c.client.RefreshCoordinator(c.groupID)
-	}
-}
-
-func (c *Consumer) selectExtraTopics(allTopics []string) []string {
-	extra := allTopics[:0]
-	for _, topic := range allTopics {
-		if !c.isKnownCoreTopic(topic) && c.isPotentialExtraTopic(topic) {
-			extra = append(extra, topic)
-		}
-	}
-	return extra
-}
-
-func (c *Consumer) isKnownCoreTopic(topic string) bool {
-	pos := sort.SearchStrings(c.coreTopics, topic)
-	return pos < len(c.coreTopics) && c.coreTopics[pos] == topic
-}
-
-func (c *Consumer) isKnownExtraTopic(topic string) bool {
-	pos := sort.SearchStrings(c.extraTopics, topic)
-	return pos < len(c.extraTopics) && c.extraTopics[pos] == topic
-}
-
-func (c *Consumer) isPotentialExtraTopic(topic string) bool {
-	rx := c.client.config.Group.Topics
-	if rx.Blacklist != nil && rx.Blacklist.MatchString(topic) {
-		return false
-	}
-	if rx.Whitelist != nil && rx.Whitelist.MatchString(topic) {
-		return true
-	}
-	return false
-}
-
-func (c *Consumer) refreshCoordinator() error {
-	if err := c.refreshMetadata(); err != nil {
-		return err
-	}
-	return c.client.RefreshCoordinator(c.groupID)
-}
-
-func (c *Consumer) refreshMetadata() (err error) {
-	if c.client.config.Metadata.Full {
-		err = c.client.RefreshMetadata()
-	} else {
-		var topics []string
-		if topics, err = c.client.Topics(); err == nil && len(topics) != 0 {
-			err = c.client.RefreshMetadata(topics...)
-		}
-	}
-
-	// maybe we didn't have authorization to describe all topics
-	switch err {
-	case sarama.ErrTopicAuthorizationFailed:
-		err = c.client.RefreshMetadata(c.coreTopics...)
-	}
-	return
-}
-
-func (c *Consumer) membership() (memberID string, generationID int32) {
-	c.membershipMu.RLock()
-	memberID, generationID = c.memberID, c.generationID
-	c.membershipMu.RUnlock()
-	return
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/doc.go b/vendor/github.com/bsm/sarama-cluster/doc.go
deleted file mode 100644
index 9c8ff16..0000000
--- a/vendor/github.com/bsm/sarama-cluster/doc.go
+++ /dev/null
@@ -1,8 +0,0 @@
-/*
-Package cluster provides cluster extensions for Sarama, enabing users
-to consume topics across from multiple, balanced nodes.
-
-It requires Kafka v0.9+ and follows the steps guide, described in:
-https://cwiki.apache.org/confluence/display/KAFKA/Kafka+0.9+Consumer+Rewrite+Design
-*/
-package cluster
diff --git a/vendor/github.com/bsm/sarama-cluster/offsets.go b/vendor/github.com/bsm/sarama-cluster/offsets.go
deleted file mode 100644
index 4223ac5..0000000
--- a/vendor/github.com/bsm/sarama-cluster/offsets.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package cluster
-
-import (
-	"sync"
-
-	"github.com/Shopify/sarama"
-)
-
-// OffsetStash allows to accumulate offsets and
-// mark them as processed in a bulk
-type OffsetStash struct {
-	offsets map[topicPartition]offsetInfo
-	mu      sync.Mutex
-}
-
-// NewOffsetStash inits a blank stash
-func NewOffsetStash() *OffsetStash {
-	return &OffsetStash{offsets: make(map[topicPartition]offsetInfo)}
-}
-
-// MarkOffset stashes the provided message offset
-func (s *OffsetStash) MarkOffset(msg *sarama.ConsumerMessage, metadata string) {
-	s.MarkPartitionOffset(msg.Topic, msg.Partition, msg.Offset, metadata)
-}
-
-// MarkPartitionOffset stashes the offset for the provided topic/partition combination
-func (s *OffsetStash) MarkPartitionOffset(topic string, partition int32, offset int64, metadata string) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	key := topicPartition{Topic: topic, Partition: partition}
-	if info := s.offsets[key]; offset >= info.Offset {
-		info.Offset = offset
-		info.Metadata = metadata
-		s.offsets[key] = info
-	}
-}
-
-// ResetPartitionOffset stashes the offset for the provided topic/partition combination.
-// Difference between ResetPartitionOffset and MarkPartitionOffset is that, ResetPartitionOffset supports earlier offsets
-func (s *OffsetStash) ResetPartitionOffset(topic string, partition int32, offset int64, metadata string) {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	key := topicPartition{Topic: topic, Partition: partition}
-	if info := s.offsets[key]; offset <= info.Offset {
-		info.Offset = offset
-		info.Metadata = metadata
-		s.offsets[key] = info
-	}
-}
-
-// ResetOffset stashes the provided message offset
-// See ResetPartitionOffset for explanation
-func (s *OffsetStash) ResetOffset(msg *sarama.ConsumerMessage, metadata string) {
-	s.ResetPartitionOffset(msg.Topic, msg.Partition, msg.Offset, metadata)
-}
-
-// Offsets returns the latest stashed offsets by topic-partition
-func (s *OffsetStash) Offsets() map[string]int64 {
-	s.mu.Lock()
-	defer s.mu.Unlock()
-
-	res := make(map[string]int64, len(s.offsets))
-	for tp, info := range s.offsets {
-		res[tp.String()] = info.Offset
-	}
-	return res
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/partitions.go b/vendor/github.com/bsm/sarama-cluster/partitions.go
deleted file mode 100644
index bfaa587..0000000
--- a/vendor/github.com/bsm/sarama-cluster/partitions.go
+++ /dev/null
@@ -1,290 +0,0 @@
-package cluster
-
-import (
-	"sort"
-	"sync"
-	"time"
-
-	"github.com/Shopify/sarama"
-)
-
-// PartitionConsumer allows code to consume individual partitions from the cluster.
-//
-// See docs for Consumer.Partitions() for more on how to implement this.
-type PartitionConsumer interface {
-	sarama.PartitionConsumer
-
-	// Topic returns the consumed topic name
-	Topic() string
-
-	// Partition returns the consumed partition
-	Partition() int32
-
-	// InitialOffset returns the offset used for creating the PartitionConsumer instance.
-	// The returned offset can be a literal offset, or OffsetNewest, or OffsetOldest
-	InitialOffset() int64
-  
-	// MarkOffset marks the offset of a message as preocessed.
-	MarkOffset(offset int64, metadata string)
-
-	// ResetOffset resets the offset to a previously processed message.
-	ResetOffset(offset int64, metadata string)
-}
-
-type partitionConsumer struct {
-	sarama.PartitionConsumer
-
-	state partitionState
-	mu    sync.Mutex
-
-	topic         string
-	partition     int32
-	initialOffset int64
-
-	closeOnce sync.Once
-	closeErr  error
-
-	dying, dead chan none
-}
-
-func newPartitionConsumer(manager sarama.Consumer, topic string, partition int32, info offsetInfo, defaultOffset int64) (*partitionConsumer, error) {
-	offset := info.NextOffset(defaultOffset)
-	pcm, err := manager.ConsumePartition(topic, partition, offset)
-
-	// Resume from default offset, if requested offset is out-of-range
-	if err == sarama.ErrOffsetOutOfRange {
-		info.Offset = -1
-		offset = defaultOffset
-		pcm, err = manager.ConsumePartition(topic, partition, offset)
-	}
-	if err != nil {
-		return nil, err
-	}
-
-	return &partitionConsumer{
-		PartitionConsumer: pcm,
-		state:             partitionState{Info: info},
-
-		topic:         topic,
-		partition:     partition,
-		initialOffset: offset,
-
-		dying: make(chan none),
-		dead:  make(chan none),
-	}, nil
-}
-
-// Topic implements PartitionConsumer
-func (c *partitionConsumer) Topic() string { return c.topic }
-
-// Partition implements PartitionConsumer
-func (c *partitionConsumer) Partition() int32 { return c.partition }
-
-// InitialOffset implements PartitionConsumer
-func (c *partitionConsumer) InitialOffset() int64 { return c.initialOffset }
-
-// AsyncClose implements PartitionConsumer
-func (c *partitionConsumer) AsyncClose() {
-	c.closeOnce.Do(func() {
-		c.closeErr = c.PartitionConsumer.Close()
-		close(c.dying)
-	})
-}
-
-// Close implements PartitionConsumer
-func (c *partitionConsumer) Close() error {
-	c.AsyncClose()
-	<-c.dead
-	return c.closeErr
-}
-
-func (c *partitionConsumer) waitFor(stopper <-chan none, errors chan<- error) {
-	defer close(c.dead)
-
-	for {
-		select {
-		case err, ok := <-c.Errors():
-			if !ok {
-				return
-			}
-			select {
-			case errors <- err:
-			case <-stopper:
-				return
-			case <-c.dying:
-				return
-			}
-		case <-stopper:
-			return
-		case <-c.dying:
-			return
-		}
-	}
-}
-
-func (c *partitionConsumer) multiplex(stopper <-chan none, messages chan<- *sarama.ConsumerMessage, errors chan<- error) {
-	defer close(c.dead)
-
-	for {
-		select {
-		case msg, ok := <-c.Messages():
-			if !ok {
-				return
-			}
-			select {
-			case messages <- msg:
-			case <-stopper:
-				return
-			case <-c.dying:
-				return
-			}
-		case err, ok := <-c.Errors():
-			if !ok {
-				return
-			}
-			select {
-			case errors <- err:
-			case <-stopper:
-				return
-			case <-c.dying:
-				return
-			}
-		case <-stopper:
-			return
-		case <-c.dying:
-			return
-		}
-	}
-}
-
-func (c *partitionConsumer) getState() partitionState {
-	c.mu.Lock()
-	state := c.state
-	c.mu.Unlock()
-
-	return state
-}
-
-func (c *partitionConsumer) markCommitted(offset int64) {
-	c.mu.Lock()
-	if offset == c.state.Info.Offset {
-		c.state.Dirty = false
-	}
-	c.mu.Unlock()
-}
-
-// MarkOffset implements PartitionConsumer
-func (c *partitionConsumer) MarkOffset(offset int64, metadata string) {
-	c.mu.Lock()
-	if next := offset + 1; next > c.state.Info.Offset {
-		c.state.Info.Offset = next
-		c.state.Info.Metadata = metadata
-		c.state.Dirty = true
-	}
-	c.mu.Unlock()
-}
-
-// ResetOffset implements PartitionConsumer
-func (c *partitionConsumer) ResetOffset(offset int64, metadata string) {
-	c.mu.Lock()
-	if next := offset + 1; next <= c.state.Info.Offset {
-		c.state.Info.Offset = next
-		c.state.Info.Metadata = metadata
-		c.state.Dirty = true
-	}
-	c.mu.Unlock()
-}
-
-// --------------------------------------------------------------------
-
-type partitionState struct {
-	Info       offsetInfo
-	Dirty      bool
-	LastCommit time.Time
-}
-
-// --------------------------------------------------------------------
-
-type partitionMap struct {
-	data map[topicPartition]*partitionConsumer
-	mu   sync.RWMutex
-}
-
-func newPartitionMap() *partitionMap {
-	return &partitionMap{
-		data: make(map[topicPartition]*partitionConsumer),
-	}
-}
-
-func (m *partitionMap) IsSubscribedTo(topic string) bool {
-	m.mu.RLock()
-	defer m.mu.RUnlock()
-
-	for tp := range m.data {
-		if tp.Topic == topic {
-			return true
-		}
-	}
-	return false
-}
-
-func (m *partitionMap) Fetch(topic string, partition int32) *partitionConsumer {
-	m.mu.RLock()
-	pc, _ := m.data[topicPartition{topic, partition}]
-	m.mu.RUnlock()
-	return pc
-}
-
-func (m *partitionMap) Store(topic string, partition int32, pc *partitionConsumer) {
-	m.mu.Lock()
-	m.data[topicPartition{topic, partition}] = pc
-	m.mu.Unlock()
-}
-
-func (m *partitionMap) Snapshot() map[topicPartition]partitionState {
-	m.mu.RLock()
-	defer m.mu.RUnlock()
-
-	snap := make(map[topicPartition]partitionState, len(m.data))
-	for tp, pc := range m.data {
-		snap[tp] = pc.getState()
-	}
-	return snap
-}
-
-func (m *partitionMap) Stop() {
-	m.mu.RLock()
-	defer m.mu.RUnlock()
-
-	var wg sync.WaitGroup
-	for tp := range m.data {
-		wg.Add(1)
-		go func(p *partitionConsumer) {
-			_ = p.Close()
-			wg.Done()
-		}(m.data[tp])
-	}
-	wg.Wait()
-}
-
-func (m *partitionMap) Clear() {
-	m.mu.Lock()
-	for tp := range m.data {
-		delete(m.data, tp)
-	}
-	m.mu.Unlock()
-}
-
-func (m *partitionMap) Info() map[string][]int32 {
-	info := make(map[string][]int32)
-	m.mu.RLock()
-	for tp := range m.data {
-		info[tp.Topic] = append(info[tp.Topic], tp.Partition)
-	}
-	m.mu.RUnlock()
-
-	for topic := range info {
-		sort.Sort(int32Slice(info[topic]))
-	}
-	return info
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/util.go b/vendor/github.com/bsm/sarama-cluster/util.go
deleted file mode 100644
index e7cb5dd..0000000
--- a/vendor/github.com/bsm/sarama-cluster/util.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package cluster
-
-import (
-	"fmt"
-	"sort"
-	"sync"
-)
-
-type none struct{}
-
-type topicPartition struct {
-	Topic     string
-	Partition int32
-}
-
-func (tp *topicPartition) String() string {
-	return fmt.Sprintf("%s-%d", tp.Topic, tp.Partition)
-}
-
-type offsetInfo struct {
-	Offset   int64
-	Metadata string
-}
-
-func (i offsetInfo) NextOffset(fallback int64) int64 {
-	if i.Offset > -1 {
-		return i.Offset
-	}
-	return fallback
-}
-
-type int32Slice []int32
-
-func (p int32Slice) Len() int           { return len(p) }
-func (p int32Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p int32Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
-
-func (p int32Slice) Diff(o int32Slice) (res []int32) {
-	on := len(o)
-	for _, x := range p {
-		n := sort.Search(on, func(i int) bool { return o[i] >= x })
-		if n < on && o[n] == x {
-			continue
-		}
-		res = append(res, x)
-	}
-	return
-}
-
-// --------------------------------------------------------------------
-
-type loopTomb struct {
-	c chan none
-	o sync.Once
-	w sync.WaitGroup
-}
-
-func newLoopTomb() *loopTomb {
-	return &loopTomb{c: make(chan none)}
-}
-
-func (t *loopTomb) stop()  { t.o.Do(func() { close(t.c) }) }
-func (t *loopTomb) Close() { t.stop(); t.w.Wait() }
-
-func (t *loopTomb) Dying() <-chan none { return t.c }
-func (t *loopTomb) Go(f func(<-chan none)) {
-	t.w.Add(1)
-
-	go func() {
-		defer t.stop()
-		defer t.w.Done()
-
-		f(t.c)
-	}()
-}
diff --git a/vendor/github.com/bufbuild/protocompile/LICENSE b/vendor/github.com/bufbuild/protocompile/LICENSE
new file mode 100644
index 0000000..553cbbf
--- /dev/null
+++ b/vendor/github.com/bufbuild/protocompile/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2020-2024 Buf Technologies, Inc.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/bufbuild/protocompile/internal/editions/editions.go b/vendor/github.com/bufbuild/protocompile/internal/editions/editions.go
new file mode 100644
index 0000000..ee054fa
--- /dev/null
+++ b/vendor/github.com/bufbuild/protocompile/internal/editions/editions.go
@@ -0,0 +1,420 @@
+// Copyright 2020-2024 Buf Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package editions contains helpers related to resolving features for
+// Protobuf editions. These are lower-level helpers. Higher-level helpers
+// (which use this package under the hood) can be found in the exported
+// protoutil package.
+package editions
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+
+	"google.golang.org/protobuf/encoding/prototext"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/types/descriptorpb"
+	"google.golang.org/protobuf/types/dynamicpb"
+)
+
+const (
+	// MinSupportedEdition is the earliest edition supported by this module.
+	// It should be 2023 (the first edition) for the indefinite future.
+	MinSupportedEdition = descriptorpb.Edition_EDITION_2023
+
+	// MaxSupportedEdition is the most recent edition supported by this module.
+	MaxSupportedEdition = descriptorpb.Edition_EDITION_2023
+)
+
+var (
+	// SupportedEditions is the exhaustive set of editions that protocompile
+	// can support. We don't allow it to compile future/unknown editions, to
+	// make sure we don't generate incorrect descriptors, in the event that
+	// a future edition introduces a change or new feature that requires
+	// new logic in the compiler.
+	SupportedEditions = computeSupportedEditions(MinSupportedEdition, MaxSupportedEdition)
+
+	// FeatureSetDescriptor is the message descriptor for the compiled-in
+	// version (in the descriptorpb package) of the google.protobuf.FeatureSet
+	// message type.
+	FeatureSetDescriptor = (*descriptorpb.FeatureSet)(nil).ProtoReflect().Descriptor()
+	// FeatureSetType is the message type for the compiled-in version (in
+	// the descriptorpb package) of google.protobuf.FeatureSet.
+	FeatureSetType = (*descriptorpb.FeatureSet)(nil).ProtoReflect().Type()
+
+	editionDefaults     map[descriptorpb.Edition]*descriptorpb.FeatureSet
+	editionDefaultsInit sync.Once
+)
+
+// HasFeatures is implemented by all options messages and provides a
+// nil-receiver-safe way of accessing the features explicitly configured
+// in those options.
+type HasFeatures interface {
+	GetFeatures() *descriptorpb.FeatureSet
+}
+
+var _ HasFeatures = (*descriptorpb.FileOptions)(nil)
+var _ HasFeatures = (*descriptorpb.MessageOptions)(nil)
+var _ HasFeatures = (*descriptorpb.FieldOptions)(nil)
+var _ HasFeatures = (*descriptorpb.OneofOptions)(nil)
+var _ HasFeatures = (*descriptorpb.ExtensionRangeOptions)(nil)
+var _ HasFeatures = (*descriptorpb.EnumOptions)(nil)
+var _ HasFeatures = (*descriptorpb.EnumValueOptions)(nil)
+var _ HasFeatures = (*descriptorpb.ServiceOptions)(nil)
+var _ HasFeatures = (*descriptorpb.MethodOptions)(nil)
+
+// ResolveFeature resolves a feature for the given descriptor. This simple
+// helper examines the given element and its ancestors, searching for an
+// override. If there is no overridden value, it returns a zero value.
+func ResolveFeature(
+	element protoreflect.Descriptor,
+	fields ...protoreflect.FieldDescriptor,
+) (protoreflect.Value, error) {
+	for {
+		var features *descriptorpb.FeatureSet
+		if withFeatures, ok := element.Options().(HasFeatures); ok {
+			// It should not really be possible for 'ok' to ever be false...
+			features = withFeatures.GetFeatures()
+		}
+
+		// TODO: adaptFeatureSet is only looking at the first field. But if we needed to
+		//       support an extension field inside a custom feature, we'd really need
+		//       to check all fields. That gets particularly complicated if the traversal
+		//       path of fields includes list and map values. Luckily, features are not
+		//       supposed to be repeated and not supposed to themselves have extensions.
+		//       So this should be fine, at least for now.
+		msgRef, err := adaptFeatureSet(features, fields[0])
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		// Navigate the fields to find the value
+		var val protoreflect.Value
+		for i, field := range fields {
+			if i > 0 {
+				msgRef = val.Message()
+			}
+			if !msgRef.Has(field) {
+				val = protoreflect.Value{}
+				break
+			}
+			val = msgRef.Get(field)
+		}
+		if val.IsValid() {
+			// All fields were set!
+			return val, nil
+		}
+
+		parent := element.Parent()
+		if parent == nil {
+			// We've reached the end of the inheritance chain.
+			return protoreflect.Value{}, nil
+		}
+		element = parent
+	}
+}
+
+// HasEdition should be implemented by values that implement
+// [protoreflect.FileDescriptor], to provide access to the file's
+// edition when its syntax is [protoreflect.Editions].
+type HasEdition interface {
+	// Edition returns the numeric value of a google.protobuf.Edition enum
+	// value that corresponds to the edition of this file. If the file does
+	// not use editions, it should return the enum value that corresponds
+	// to the syntax level, EDITION_PROTO2 or EDITION_PROTO3.
+	Edition() int32
+}
+
+// GetEdition returns the edition for a given element. It returns
+// EDITION_PROTO2 or EDITION_PROTO3 if the element is in a file that
+// uses proto2 or proto3 syntax, respectively. It returns EDITION_UNKNOWN
+// if the syntax of the given element is not recognized or if the edition
+// cannot be ascertained from the element's [protoreflect.FileDescriptor].
+func GetEdition(d protoreflect.Descriptor) descriptorpb.Edition {
+	switch d.ParentFile().Syntax() {
+	case protoreflect.Proto2:
+		return descriptorpb.Edition_EDITION_PROTO2
+	case protoreflect.Proto3:
+		return descriptorpb.Edition_EDITION_PROTO3
+	case protoreflect.Editions:
+		withEdition, ok := d.ParentFile().(HasEdition)
+		if !ok {
+			// The parent file should always be a *result, so we should
+			// never be able to actually get in here. If we somehow did
+			// have another implementation of protoreflect.FileDescriptor,
+			// it doesn't provide a way to get the edition, other than the
+			// potentially expensive step of generating a FileDescriptorProto
+			// and then querying for the edition from that. :/
+			return descriptorpb.Edition_EDITION_UNKNOWN
+		}
+		return descriptorpb.Edition(withEdition.Edition())
+	default:
+		return descriptorpb.Edition_EDITION_UNKNOWN
+	}
+}
+
+// GetEditionDefaults returns the default feature values for the given edition.
+// It returns nil if the given edition is not known.
+//
+// This only populates known features, those that are fields of [*descriptorpb.FeatureSet].
+// It does not populate any extension fields.
+//
+// The returned value must not be mutated as it references shared package state.
+func GetEditionDefaults(edition descriptorpb.Edition) *descriptorpb.FeatureSet {
+	editionDefaultsInit.Do(func() {
+		editionDefaults = make(map[descriptorpb.Edition]*descriptorpb.FeatureSet, len(descriptorpb.Edition_name))
+		// Compute default for all known editions in descriptorpb.
+		for editionInt := range descriptorpb.Edition_name {
+			edition := descriptorpb.Edition(editionInt)
+			defaults := &descriptorpb.FeatureSet{}
+			defaultsRef := defaults.ProtoReflect()
+			fields := defaultsRef.Descriptor().Fields()
+			// Note: we are not computing defaults for extensions. Those are not needed
+			// by anything in the compiler, so we can get away with just computing
+			// defaults for these static, non-extension fields.
+			for i, length := 0, fields.Len(); i < length; i++ {
+				field := fields.Get(i)
+				val, err := GetFeatureDefault(edition, FeatureSetType, field)
+				if err != nil {
+					// should we fail somehow??
+					continue
+				}
+				defaultsRef.Set(field, val)
+			}
+			editionDefaults[edition] = defaults
+		}
+	})
+	return editionDefaults[edition]
+}
+
+// GetFeatureDefault computes the default value for a feature. The given container
+// is the message type that contains the field. This should usually be the descriptor
+// for google.protobuf.FeatureSet, but can be a different message for computing the
+// default value of custom features.
+//
+// Note that this always re-computes the default. For known fields of FeatureSet,
+// it is more efficient to query from the statically computed default messages,
+// like so:
+//
+//	editions.GetEditionDefaults(edition).ProtoReflect().Get(feature)
+func GetFeatureDefault(edition descriptorpb.Edition, container protoreflect.MessageType, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+	opts, ok := feature.Options().(*descriptorpb.FieldOptions)
+	if !ok {
+		// this is most likely impossible except for contrived use cases...
+		return protoreflect.Value{}, fmt.Errorf("options is %T instead of *descriptorpb.FieldOptions", feature.Options())
+	}
+	maxEdition := descriptorpb.Edition(-1)
+	var maxVal string
+	for _, def := range opts.EditionDefaults {
+		if def.GetEdition() <= edition && def.GetEdition() > maxEdition {
+			maxEdition = def.GetEdition()
+			maxVal = def.GetValue()
+		}
+	}
+	if maxEdition == -1 {
+		// no matching default found
+		return protoreflect.Value{}, fmt.Errorf("no relevant default for edition %s", edition)
+	}
+	// We use a typed nil so that it won't fall back to the global registry. Features
+	// should not use extensions or google.protobuf.Any, so a nil *Types is fine.
+	unmarshaler := prototext.UnmarshalOptions{Resolver: (*protoregistry.Types)(nil)}
+	// The string value is in the text format: either a field value literal or a
+	// message literal. (Repeated and map features aren't supported, so there's no
+	// array or map literal syntax to worry about.)
+	if feature.Kind() == protoreflect.MessageKind || feature.Kind() == protoreflect.GroupKind {
+		fldVal := container.Zero().NewField(feature)
+		err := unmarshaler.Unmarshal([]byte(maxVal), fldVal.Message().Interface())
+		if err != nil {
+			return protoreflect.Value{}, err
+		}
+		return fldVal, nil
+	}
+	// The value is the textformat for the field. But prototext doesn't provide a way
+	// to unmarshal a single field value. To work around, we unmarshal into an enclosing
+	// message, which means we must prefix the value with the field name.
+	if feature.IsExtension() {
+		maxVal = fmt.Sprintf("[%s]: %s", feature.FullName(), maxVal)
+	} else {
+		maxVal = fmt.Sprintf("%s: %s", feature.Name(), maxVal)
+	}
+	empty := container.New()
+	err := unmarshaler.Unmarshal([]byte(maxVal), empty.Interface())
+	if err != nil {
+		return protoreflect.Value{}, err
+	}
+	return empty.Get(feature), nil
+}
+
+func adaptFeatureSet(msg *descriptorpb.FeatureSet, field protoreflect.FieldDescriptor) (protoreflect.Message, error) {
+	msgRef := msg.ProtoReflect()
+	var actualField protoreflect.FieldDescriptor
+	switch {
+	case field.IsExtension():
+		// Extensions can be used directly with the feature set, even if
+		// field.ContainingMessage() != FeatureSetDescriptor. But only if
+		// the value is either not a message or is a message with the
+		// right descriptor, i.e. val.Descriptor() == field.Message().
+		if actualField = actualDescriptor(msgRef, field); actualField == nil || actualField == field {
+			if msgRef.Has(field) || len(msgRef.GetUnknown()) == 0 {
+				return msgRef, nil
+			}
+			// The field is not present, but the message has unrecognized values. So
+			// let's try to parse the unrecognized bytes, just in case they contain
+			// this extension.
+			temp := &descriptorpb.FeatureSet{}
+			unmarshaler := proto.UnmarshalOptions{
+				AllowPartial: true,
+				Resolver:     resolverForExtension{field},
+			}
+			if err := unmarshaler.Unmarshal(msgRef.GetUnknown(), temp); err != nil {
+				return nil, fmt.Errorf("failed to parse unrecognized fields of FeatureSet: %w", err)
+			}
+			return temp.ProtoReflect(), nil
+		}
+	case field.ContainingMessage() == FeatureSetDescriptor:
+		// Known field, not dynamically generated. Can directly use with the feature set.
+		return msgRef, nil
+	default:
+		actualField = FeatureSetDescriptor.Fields().ByNumber(field.Number())
+	}
+
+	// If we get here, we have a dynamic field descriptor or an extension
+	// descriptor whose message type does not match the descriptor of the
+	// stored value. We need to copy its value into a dynamic message,
+	// which requires marshalling/unmarshalling.
+	// We only need to copy over the unrecognized bytes (if any)
+	// and the same field (if present).
+	data := msgRef.GetUnknown()
+	if actualField != nil && msgRef.Has(actualField) {
+		subset := &descriptorpb.FeatureSet{}
+		subset.ProtoReflect().Set(actualField, msgRef.Get(actualField))
+		var err error
+		data, err = proto.MarshalOptions{AllowPartial: true}.MarshalAppend(data, subset)
+		if err != nil {
+			return nil, fmt.Errorf("failed to marshal FeatureSet field %s to bytes: %w", field.Name(), err)
+		}
+	}
+	if len(data) == 0 {
+		// No relevant data to copy over, so we can just return
+		// a zero value message
+		return dynamicpb.NewMessageType(field.ContainingMessage()).Zero(), nil
+	}
+
+	other := dynamicpb.NewMessage(field.ContainingMessage())
+	// We don't need to use a resolver for this step because we know that
+	// field is not an extension. And features are not allowed to themselves
+	// have extensions.
+	if err := (proto.UnmarshalOptions{AllowPartial: true}).Unmarshal(data, other); err != nil {
+		return nil, fmt.Errorf("failed to marshal FeatureSet field %s to bytes: %w", field.Name(), err)
+	}
+	return other, nil
+}
+
+type resolverForExtension struct {
+	ext protoreflect.ExtensionDescriptor
+}
+
+func (r resolverForExtension) FindMessageByName(_ protoreflect.FullName) (protoreflect.MessageType, error) {
+	return nil, protoregistry.NotFound
+}
+
+func (r resolverForExtension) FindMessageByURL(_ string) (protoreflect.MessageType, error) {
+	return nil, protoregistry.NotFound
+}
+
+func (r resolverForExtension) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+	if field == r.ext.FullName() {
+		return asExtensionType(r.ext), nil
+	}
+	return nil, protoregistry.NotFound
+}
+
+func (r resolverForExtension) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+	if message == r.ext.ContainingMessage().FullName() && field == r.ext.Number() {
+		return asExtensionType(r.ext), nil
+	}
+	return nil, protoregistry.NotFound
+}
+
+func asExtensionType(ext protoreflect.ExtensionDescriptor) protoreflect.ExtensionType {
+	if xtd, ok := ext.(protoreflect.ExtensionTypeDescriptor); ok {
+		return xtd.Type()
+	}
+	return dynamicpb.NewExtensionType(ext)
+}
+
+func computeSupportedEditions(minEdition, maxEdition descriptorpb.Edition) map[string]descriptorpb.Edition {
+	supportedEditions := map[string]descriptorpb.Edition{}
+	for editionNum := range descriptorpb.Edition_name {
+		edition := descriptorpb.Edition(editionNum)
+		if edition >= minEdition && edition <= maxEdition {
+			name := strings.TrimPrefix(edition.String(), "EDITION_")
+			supportedEditions[name] = edition
+		}
+	}
+	return supportedEditions
+}
+
+// actualDescriptor returns the actual field descriptor referenced by msg that
+// corresponds to the given ext (i.e. same number). It returns nil if msg has
+// no reference, if the actual descriptor is the same as ext, or if ext is
+// otherwise safe to use as is.
+func actualDescriptor(msg protoreflect.Message, ext protoreflect.ExtensionDescriptor) protoreflect.FieldDescriptor {
+	if !msg.Has(ext) || ext.Message() == nil {
+		// nothing to match; safe as is
+		return nil
+	}
+	val := msg.Get(ext)
+	switch {
+	case ext.IsMap(): // should not actually be possible
+		expectedDescriptor := ext.MapValue().Message()
+		if expectedDescriptor == nil {
+			return nil // nothing to match
+		}
+		// We know msg.Has(field) is true, from above, so there's at least one entry.
+		var matches bool
+		val.Map().Range(func(_ protoreflect.MapKey, val protoreflect.Value) bool {
+			matches = val.Message().Descriptor() == expectedDescriptor
+			return false
+		})
+		if matches {
+			return nil
+		}
+	case ext.IsList():
+		// We know msg.Has(field) is true, from above, so there's at least one entry.
+		if val.List().Get(0).Message().Descriptor() == ext.Message() {
+			return nil
+		}
+	case !ext.IsMap():
+		if val.Message().Descriptor() == ext.Message() {
+			return nil
+		}
+	}
+	// The underlying message descriptors do not match. So we need to return
+	// the actual field descriptor. Sadly, protoreflect.Message provides no way
+	// to query the field descriptor in a message by number. For non-extensions,
+	// one can query the associated message descriptor. But for extensions, we
+	// have to do the slow thing, and range through all fields looking for it.
+	var actualField protoreflect.FieldDescriptor
+	msg.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+		if fd.Number() == ext.Number() {
+			actualField = fd
+			return false
+		}
+		return true
+	})
+	return actualField
+}
diff --git a/vendor/github.com/bufbuild/protocompile/protoutil/editions.go b/vendor/github.com/bufbuild/protocompile/protoutil/editions.go
new file mode 100644
index 0000000..fb21dff
--- /dev/null
+++ b/vendor/github.com/bufbuild/protocompile/protoutil/editions.go
@@ -0,0 +1,140 @@
+// Copyright 2020-2024 Buf Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package protoutil
+
+import (
+	"fmt"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/types/descriptorpb"
+	"google.golang.org/protobuf/types/dynamicpb"
+
+	"github.com/bufbuild/protocompile/internal/editions"
+)
+
+// GetFeatureDefault gets the default value for the given feature and the given
+// edition. The given feature must represent a field of the google.protobuf.FeatureSet
+// message and must not be an extension.
+//
+// If the given field is from a dynamically built descriptor (i.e. it's containing
+// message descriptor is different from the linked-in descriptor for
+// [*descriptorpb.FeatureSet]), the returned value may be a dynamic value. In such
+// cases, the value may not be directly usable using [protoreflect.Message.Set] with
+// an instance of [*descriptorpb.FeatureSet] and must instead be used with a
+// [*dynamicpb.Message].
+//
+// To get the default value of a custom feature, use [GetCustomFeatureDefault]
+// instead.
+func GetFeatureDefault(edition descriptorpb.Edition, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+	if feature.ContainingMessage().FullName() != editions.FeatureSetDescriptor.FullName() {
+		return protoreflect.Value{}, fmt.Errorf("feature %s is a field of %s but should be a field of %s",
+			feature.Name(), feature.ContainingMessage().FullName(), editions.FeatureSetDescriptor.FullName())
+	}
+	var msgType protoreflect.MessageType
+	if feature.ContainingMessage() == editions.FeatureSetDescriptor {
+		msgType = editions.FeatureSetType
+	} else {
+		msgType = dynamicpb.NewMessageType(feature.ContainingMessage())
+	}
+	return editions.GetFeatureDefault(edition, msgType, feature)
+}
+
+// GetCustomFeatureDefault gets the default value for the given custom feature
+// and given edition. A custom feature is a field whose containing message is the
+// type of an extension field of google.protobuf.FeatureSet. The given extension
+// describes that extension field and message type. The given feature must be a
+// field of that extension's message type.
+func GetCustomFeatureDefault(edition descriptorpb.Edition, extension protoreflect.ExtensionType, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+	extDesc := extension.TypeDescriptor()
+	if extDesc.ContainingMessage().FullName() != editions.FeatureSetDescriptor.FullName() {
+		return protoreflect.Value{}, fmt.Errorf("extension %s does not extend %s", extDesc.FullName(), editions.FeatureSetDescriptor.FullName())
+	}
+	if extDesc.Message() == nil {
+		return protoreflect.Value{}, fmt.Errorf("extensions of %s should be messages; %s is instead %s",
+			editions.FeatureSetDescriptor.FullName(), extDesc.FullName(), extDesc.Kind().String())
+	}
+	if feature.IsExtension() {
+		return protoreflect.Value{}, fmt.Errorf("feature %s is an extension, but feature extension %s may not itself have extensions",
+			feature.FullName(), extDesc.FullName())
+	}
+	if feature.ContainingMessage().FullName() != extDesc.Message().FullName() {
+		return protoreflect.Value{}, fmt.Errorf("feature %s is a field of %s but should be a field of %s",
+			feature.Name(), feature.ContainingMessage().FullName(), extDesc.Message().FullName())
+	}
+	if feature.ContainingMessage() != extDesc.Message() {
+		return protoreflect.Value{}, fmt.Errorf("feature %s has a different message descriptor from the given extension type for %s",
+			feature.Name(), extDesc.Message().FullName())
+	}
+	return editions.GetFeatureDefault(edition, extension.Zero().Message().Type(), feature)
+}
+
+// ResolveFeature resolves a feature for the given descriptor.
+//
+// If the given element is in a proto2 or proto3 syntax file, this skips
+// resolution and just returns the relevant default (since such files are not
+// allowed to override features). If neither the given element nor any of its
+// ancestors override the given feature, the relevant default is returned.
+//
+// This has the same caveat as GetFeatureDefault if the given feature is from a
+// dynamically built descriptor.
+func ResolveFeature(element protoreflect.Descriptor, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+	edition := editions.GetEdition(element)
+	defaultVal, err := GetFeatureDefault(edition, feature)
+	if err != nil {
+		return protoreflect.Value{}, err
+	}
+	return resolveFeature(edition, defaultVal, element, feature)
+}
+
+// ResolveCustomFeature resolves a custom feature for the given extension and
+// field descriptor.
+//
+// The given extension must be an extension of google.protobuf.FeatureSet that
+// represents a non-repeated message value. The given feature is a field in
+// that extension's message type.
+//
+// If the given element is in a proto2 or proto3 syntax file, this skips
+// resolution and just returns the relevant default (since such files are not
+// allowed to override features). If neither the given element nor any of its
+// ancestors override the given feature, the relevant default is returned.
+func ResolveCustomFeature(element protoreflect.Descriptor, extension protoreflect.ExtensionType, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+	edition := editions.GetEdition(element)
+	defaultVal, err := GetCustomFeatureDefault(edition, extension, feature)
+	if err != nil {
+		return protoreflect.Value{}, err
+	}
+	return resolveFeature(edition, defaultVal, element, extension.TypeDescriptor(), feature)
+}
+
+func resolveFeature(
+	edition descriptorpb.Edition,
+	defaultVal protoreflect.Value,
+	element protoreflect.Descriptor,
+	fields ...protoreflect.FieldDescriptor,
+) (protoreflect.Value, error) {
+	if edition == descriptorpb.Edition_EDITION_PROTO2 || edition == descriptorpb.Edition_EDITION_PROTO3 {
+		// these syntax levels can't specify features, so we can short-circuit the search
+		// through the descriptor hierarchy for feature overrides
+		return defaultVal, nil
+	}
+	val, err := editions.ResolveFeature(element, fields...)
+	if err != nil {
+		return protoreflect.Value{}, err
+	}
+	if val.IsValid() {
+		return val, nil
+	}
+	return defaultVal, nil
+}
diff --git a/vendor/github.com/bufbuild/protocompile/protoutil/protos.go b/vendor/github.com/bufbuild/protocompile/protoutil/protos.go
new file mode 100644
index 0000000..9c55999
--- /dev/null
+++ b/vendor/github.com/bufbuild/protocompile/protoutil/protos.go
@@ -0,0 +1,262 @@
+// Copyright 2020-2024 Buf Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package protoutil contains useful functions for interacting with descriptors.
+// For now these include only functions for efficiently converting descriptors
+// produced by the compiler to descriptor protos and functions for resolving
+// "features" (a core concept of Protobuf Editions).
+//
+// Despite the fact that descriptor protos are mutable, calling code should NOT
+// mutate any of the protos returned from this package. For efficiency, some
+// values returned from this package may reference internal state of a compiler
+// result, and mutating the proto could corrupt or invalidate parts of that
+// result.
+package protoutil
+
+import (
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protodesc"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/types/descriptorpb"
+)
+
+// DescriptorProtoWrapper is a protoreflect.Descriptor that wraps an
+// underlying descriptor proto. It provides the same interface as
+// Descriptor but with one extra operation, to efficiently query for
+// the underlying descriptor proto.
+//
+// Descriptors that implement this will also implement another method
+// whose specified return type is the concrete type returned by the
+// AsProto method. The name of this method varies by the type of this
+// descriptor:
+//
+//	 Descriptor Type        Other Method Name
+//	---------------------+------------------------------------
+//	 FileDescriptor      |  FileDescriptorProto()
+//	 MessageDescriptor   |  MessageDescriptorProto()
+//	 FieldDescriptor     |  FieldDescriptorProto()
+//	 OneofDescriptor     |  OneofDescriptorProto()
+//	 EnumDescriptor      |  EnumDescriptorProto()
+//	 EnumValueDescriptor |  EnumValueDescriptorProto()
+//	 ServiceDescriptor   |  ServiceDescriptorProto()
+//	 MethodDescriptor    |  MethodDescriptorProto()
+//
+// For example, a DescriptorProtoWrapper that implements FileDescriptor
+// returns a *descriptorpb.FileDescriptorProto value from its AsProto
+// method and also provides a method with the following signature:
+//
+//	FileDescriptorProto() *descriptorpb.FileDescriptorProto
+type DescriptorProtoWrapper interface {
+	protoreflect.Descriptor
+	// AsProto returns the underlying descriptor proto. The concrete
+	// type of the proto message depends on the type of this
+	// descriptor:
+	//    Descriptor Type        Proto Message Type
+	//   ---------------------+------------------------------------
+	//    FileDescriptor      |  *descriptorpb.FileDescriptorProto
+	//    MessageDescriptor   |  *descriptorpb.DescriptorProto
+	//    FieldDescriptor     |  *descriptorpb.FieldDescriptorProto
+	//    OneofDescriptor     |  *descriptorpb.OneofDescriptorProto
+	//    EnumDescriptor      |  *descriptorpb.EnumDescriptorProto
+	//    EnumValueDescriptor |  *descriptorpb.EnumValueDescriptorProto
+	//    ServiceDescriptor   |  *descriptorpb.ServiceDescriptorProto
+	//    MethodDescriptor    |  *descriptorpb.MethodDescriptorProto
+	AsProto() proto.Message
+}
+
+// ProtoFromDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromDescriptor(d protoreflect.Descriptor) proto.Message {
+	switch d := d.(type) {
+	case protoreflect.FileDescriptor:
+		return ProtoFromFileDescriptor(d)
+	case protoreflect.MessageDescriptor:
+		return ProtoFromMessageDescriptor(d)
+	case protoreflect.FieldDescriptor:
+		return ProtoFromFieldDescriptor(d)
+	case protoreflect.OneofDescriptor:
+		return ProtoFromOneofDescriptor(d)
+	case protoreflect.EnumDescriptor:
+		return ProtoFromEnumDescriptor(d)
+	case protoreflect.EnumValueDescriptor:
+		return ProtoFromEnumValueDescriptor(d)
+	case protoreflect.ServiceDescriptor:
+		return ProtoFromServiceDescriptor(d)
+	case protoreflect.MethodDescriptor:
+		return ProtoFromMethodDescriptor(d)
+	default:
+		// WTF??
+		if res, ok := d.(DescriptorProtoWrapper); ok {
+			return res.AsProto()
+		}
+		return nil
+	}
+}
+
+// ProtoFromFileDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For file descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. File descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromFileDescriptor(d protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto {
+	if imp, ok := d.(protoreflect.FileImport); ok {
+		d = imp.FileDescriptor
+	}
+	type canProto interface {
+		FileDescriptorProto() *descriptorpb.FileDescriptorProto
+	}
+	if res, ok := d.(canProto); ok {
+		return res.FileDescriptorProto()
+	}
+	if res, ok := d.(DescriptorProtoWrapper); ok {
+		if fd, ok := res.AsProto().(*descriptorpb.FileDescriptorProto); ok {
+			return fd
+		}
+	}
+	return protodesc.ToFileDescriptorProto(d)
+}
+
+// ProtoFromMessageDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For message descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Message descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromMessageDescriptor(d protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto {
+	type canProto interface {
+		MessageDescriptorProto() *descriptorpb.DescriptorProto
+	}
+	if res, ok := d.(canProto); ok {
+		return res.MessageDescriptorProto()
+	}
+	if res, ok := d.(DescriptorProtoWrapper); ok {
+		if md, ok := res.AsProto().(*descriptorpb.DescriptorProto); ok {
+			return md
+		}
+	}
+	return protodesc.ToDescriptorProto(d)
+}
+
+// ProtoFromFieldDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For field descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Field descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromFieldDescriptor(d protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto {
+	type canProto interface {
+		FieldDescriptorProto() *descriptorpb.FieldDescriptorProto
+	}
+	if res, ok := d.(canProto); ok {
+		return res.FieldDescriptorProto()
+	}
+	if res, ok := d.(DescriptorProtoWrapper); ok {
+		if fd, ok := res.AsProto().(*descriptorpb.FieldDescriptorProto); ok {
+			return fd
+		}
+	}
+	return protodesc.ToFieldDescriptorProto(d)
+}
+
+// ProtoFromOneofDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For oneof descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Oneof descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromOneofDescriptor(d protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto {
+	type canProto interface {
+		OneofDescriptorProto() *descriptorpb.OneofDescriptorProto
+	}
+	if res, ok := d.(canProto); ok {
+		return res.OneofDescriptorProto()
+	}
+	if res, ok := d.(DescriptorProtoWrapper); ok {
+		if ood, ok := res.AsProto().(*descriptorpb.OneofDescriptorProto); ok {
+			return ood
+		}
+	}
+	return protodesc.ToOneofDescriptorProto(d)
+}
+
+// ProtoFromEnumDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For enum descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Enum descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromEnumDescriptor(d protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto {
+	type canProto interface {
+		EnumDescriptorProto() *descriptorpb.EnumDescriptorProto
+	}
+	if res, ok := d.(canProto); ok {
+		return res.EnumDescriptorProto()
+	}
+	if res, ok := d.(DescriptorProtoWrapper); ok {
+		if ed, ok := res.AsProto().(*descriptorpb.EnumDescriptorProto); ok {
+			return ed
+		}
+	}
+	return protodesc.ToEnumDescriptorProto(d)
+}
+
+// ProtoFromEnumValueDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For enum value descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Enum value descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromEnumValueDescriptor(d protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto {
+	type canProto interface {
+		EnumValueDescriptorProto() *descriptorpb.EnumValueDescriptorProto
+	}
+	if res, ok := d.(canProto); ok {
+		return res.EnumValueDescriptorProto()
+	}
+	if res, ok := d.(DescriptorProtoWrapper); ok {
+		if ed, ok := res.AsProto().(*descriptorpb.EnumValueDescriptorProto); ok {
+			return ed
+		}
+	}
+	return protodesc.ToEnumValueDescriptorProto(d)
+}
+
+// ProtoFromServiceDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For service descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Service descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromServiceDescriptor(d protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto {
+	type canProto interface {
+		ServiceDescriptorProto() *descriptorpb.ServiceDescriptorProto
+	}
+	if res, ok := d.(canProto); ok {
+		return res.ServiceDescriptorProto()
+	}
+	if res, ok := d.(DescriptorProtoWrapper); ok {
+		if sd, ok := res.AsProto().(*descriptorpb.ServiceDescriptorProto); ok {
+			return sd
+		}
+	}
+	return protodesc.ToServiceDescriptorProto(d)
+}
+
+// ProtoFromMethodDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For method descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Method descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromMethodDescriptor(d protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto {
+	type canProto interface {
+		MethodDescriptorProto() *descriptorpb.MethodDescriptorProto
+	}
+	if res, ok := d.(canProto); ok {
+		return res.MethodDescriptorProto()
+	}
+	if res, ok := d.(DescriptorProtoWrapper); ok {
+		if md, ok := res.AsProto().(*descriptorpb.MethodDescriptorProto); ok {
+			return md
+		}
+	}
+	return protodesc.ToMethodDescriptorProto(d)
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
index 8bf0e5b..33c8830 100644
--- a/vendor/github.com/cespare/xxhash/v2/README.md
+++ b/vendor/github.com/cespare/xxhash/v2/README.md
@@ -70,3 +70,5 @@
 - [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
 - [FreeCache](https://github.com/coocood/freecache)
 - [FastCache](https://github.com/VictoriaMetrics/fastcache)
+- [Ristretto](https://github.com/dgraph-io/ristretto)
+- [Badger](https://github.com/dgraph-io/badger)
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
index a9e0d45..78bddf1 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go
@@ -19,10 +19,13 @@
 // Store the primes in an array as well.
 //
 // The consts are used when possible in Go code to avoid MOVs but we need a
-// contiguous array of the assembly code.
+// contiguous array for the assembly code.
 var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
 
 // Digest implements hash.Hash64.
+//
+// Note that a zero-valued Digest is not ready to receive writes.
+// Call Reset or create a Digest using New before calling other methods.
 type Digest struct {
 	v1    uint64
 	v2    uint64
@@ -33,19 +36,31 @@
 	n     int // how much of mem is used
 }
 
-// New creates a new Digest that computes the 64-bit xxHash algorithm.
+// New creates a new Digest with a zero seed.
 func New() *Digest {
+	return NewWithSeed(0)
+}
+
+// NewWithSeed creates a new Digest with the given seed.
+func NewWithSeed(seed uint64) *Digest {
 	var d Digest
-	d.Reset()
+	d.ResetWithSeed(seed)
 	return &d
 }
 
 // Reset clears the Digest's state so that it can be reused.
+// It uses a seed value of zero.
 func (d *Digest) Reset() {
-	d.v1 = primes[0] + prime2
-	d.v2 = prime2
-	d.v3 = 0
-	d.v4 = -primes[0]
+	d.ResetWithSeed(0)
+}
+
+// ResetWithSeed clears the Digest's state so that it can be reused.
+// It uses the given seed to initialize the state.
+func (d *Digest) ResetWithSeed(seed uint64) {
+	d.v1 = seed + prime1 + prime2
+	d.v2 = seed + prime2
+	d.v3 = seed
+	d.v4 = seed - prime1
 	d.total = 0
 	d.n = 0
 }
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
index 9216e0a..78f95f2 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
@@ -6,7 +6,7 @@
 
 package xxhash
 
-// Sum64 computes the 64-bit xxHash digest of b.
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
 //
 //go:noescape
 func Sum64(b []byte) uint64
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
index 26df13b..118e49e 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
@@ -3,7 +3,7 @@
 
 package xxhash
 
-// Sum64 computes the 64-bit xxHash digest of b.
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
 func Sum64(b []byte) uint64 {
 	// A simpler version would be
 	//   d := New()
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
index e86f1b5..05f5e7d 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
@@ -5,7 +5,7 @@
 
 package xxhash
 
-// Sum64String computes the 64-bit xxHash digest of s.
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
 func Sum64String(s string) uint64 {
 	return Sum64([]byte(s))
 }
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
index 1c1638f..cf9d42a 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
@@ -33,7 +33,7 @@
 //
 // See https://github.com/golang/go/issues/42739 for discussion.
 
-// Sum64String computes the 64-bit xxHash digest of s.
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
 // It may be faster than Sum64([]byte(s)) by avoiding a copy.
 func Sum64String(s string) uint64 {
 	b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
diff --git a/vendor/github.com/cevaris/ordered_map/.travis.yml b/vendor/github.com/cevaris/ordered_map/.travis.yml
deleted file mode 100644
index 193242f..0000000
--- a/vendor/github.com/cevaris/ordered_map/.travis.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-language: go
-
-go:
-  - tip
-  - 1.12
-  - 1.11
-  - 1.10
-  - 1.9
-  - 1.8
-  - 1.7
-  - 1.6
-  - 1.5
-  - 1.4
-  - 1.3
-
-install:
-  - make
-  - make test
diff --git a/vendor/github.com/cevaris/ordered_map/README.md b/vendor/github.com/cevaris/ordered_map/README.md
index bc3e366..a77994d 100644
--- a/vendor/github.com/cevaris/ordered_map/README.md
+++ b/vendor/github.com/cevaris/ordered_map/README.md
@@ -9,7 +9,7 @@
 - Full support Key/Value for all data types
 - Exposes an Iterator that iterates in order of insertion
 - Full Get/Set/Delete map interface
-- Supports Golang v1.3 through v1.12
+- Supports Golang v1.3 through v1.19
 
 ## Download and Install 
   
diff --git a/vendor/github.com/coreos/etcd/LICENSE b/vendor/github.com/coreos/etcd/LICENSE
deleted file mode 100644
index d645695..0000000
--- a/vendor/github.com/coreos/etcd/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/vendor/github.com/coreos/etcd/NOTICE b/vendor/github.com/coreos/etcd/NOTICE
deleted file mode 100644
index b39ddfa..0000000
--- a/vendor/github.com/coreos/etcd/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-CoreOS Project
-Copyright 2014 CoreOS, Inc
-
-This product includes software developed at CoreOS, Inc.
-(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
deleted file mode 100644
index c5faf00..0000000
--- a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
+++ /dev/null
@@ -1,972 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: auth.proto
-
-package authpb
-
-import (
-	fmt "fmt"
-	io "io"
-	math "math"
-	math_bits "math/bits"
-
-	_ "github.com/gogo/protobuf/gogoproto"
-	proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type Permission_Type int32
-
-const (
-	READ      Permission_Type = 0
-	WRITE     Permission_Type = 1
-	READWRITE Permission_Type = 2
-)
-
-var Permission_Type_name = map[int32]string{
-	0: "READ",
-	1: "WRITE",
-	2: "READWRITE",
-}
-
-var Permission_Type_value = map[string]int32{
-	"READ":      0,
-	"WRITE":     1,
-	"READWRITE": 2,
-}
-
-func (x Permission_Type) String() string {
-	return proto.EnumName(Permission_Type_name, int32(x))
-}
-
-func (Permission_Type) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_8bbd6f3875b0e874, []int{1, 0}
-}
-
-// User is a single entry in the bucket authUsers
-type User struct {
-	Name                 []byte   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	Password             []byte   `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
-	Roles                []string `protobuf:"bytes,3,rep,name=roles,proto3" json:"roles,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *User) Reset()         { *m = User{} }
-func (m *User) String() string { return proto.CompactTextString(m) }
-func (*User) ProtoMessage()    {}
-func (*User) Descriptor() ([]byte, []int) {
-	return fileDescriptor_8bbd6f3875b0e874, []int{0}
-}
-func (m *User) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_User.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *User) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_User.Merge(m, src)
-}
-func (m *User) XXX_Size() int {
-	return m.Size()
-}
-func (m *User) XXX_DiscardUnknown() {
-	xxx_messageInfo_User.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_User proto.InternalMessageInfo
-
-// Permission is a single entity
-type Permission struct {
-	PermType             Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"`
-	Key                  []byte          `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
-	RangeEnd             []byte          `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *Permission) Reset()         { *m = Permission{} }
-func (m *Permission) String() string { return proto.CompactTextString(m) }
-func (*Permission) ProtoMessage()    {}
-func (*Permission) Descriptor() ([]byte, []int) {
-	return fileDescriptor_8bbd6f3875b0e874, []int{1}
-}
-func (m *Permission) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Permission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Permission.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Permission) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Permission.Merge(m, src)
-}
-func (m *Permission) XXX_Size() int {
-	return m.Size()
-}
-func (m *Permission) XXX_DiscardUnknown() {
-	xxx_messageInfo_Permission.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Permission proto.InternalMessageInfo
-
-// Role is a single entry in the bucket authRoles
-type Role struct {
-	Name                 []byte        `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	KeyPermission        []*Permission `protobuf:"bytes,2,rep,name=keyPermission,proto3" json:"keyPermission,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
-	XXX_unrecognized     []byte        `json:"-"`
-	XXX_sizecache        int32         `json:"-"`
-}
-
-func (m *Role) Reset()         { *m = Role{} }
-func (m *Role) String() string { return proto.CompactTextString(m) }
-func (*Role) ProtoMessage()    {}
-func (*Role) Descriptor() ([]byte, []int) {
-	return fileDescriptor_8bbd6f3875b0e874, []int{2}
-}
-func (m *Role) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Role.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Role) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Role.Merge(m, src)
-}
-func (m *Role) XXX_Size() int {
-	return m.Size()
-}
-func (m *Role) XXX_DiscardUnknown() {
-	xxx_messageInfo_Role.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Role proto.InternalMessageInfo
-
-func init() {
-	proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
-	proto.RegisterType((*User)(nil), "authpb.User")
-	proto.RegisterType((*Permission)(nil), "authpb.Permission")
-	proto.RegisterType((*Role)(nil), "authpb.Role")
-}
-
-func init() { proto.RegisterFile("auth.proto", fileDescriptor_8bbd6f3875b0e874) }
-
-var fileDescriptor_8bbd6f3875b0e874 = []byte{
-	// 288 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
-	0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78,
-	0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c,
-	0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d,
-	0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d,
-	0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd,
-	0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51,
-	0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef,
-	0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00,
-	0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc,
-	0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70,
-	0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41,
-	0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc,
-	0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b,
-	0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1,
-	0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee,
-	0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4,
-	0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00,
-}
-
-func (m *User) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *User) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *User) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Roles) > 0 {
-		for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.Roles[iNdEx])
-			copy(dAtA[i:], m.Roles[iNdEx])
-			i = encodeVarintAuth(dAtA, i, uint64(len(m.Roles[iNdEx])))
-			i--
-			dAtA[i] = 0x1a
-		}
-	}
-	if len(m.Password) > 0 {
-		i -= len(m.Password)
-		copy(dAtA[i:], m.Password)
-		i = encodeVarintAuth(dAtA, i, uint64(len(m.Password)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *Permission) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Permission) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Permission) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.RangeEnd) > 0 {
-		i -= len(m.RangeEnd)
-		copy(dAtA[i:], m.RangeEnd)
-		i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd)))
-		i--
-		dAtA[i] = 0x1a
-	}
-	if len(m.Key) > 0 {
-		i -= len(m.Key)
-		copy(dAtA[i:], m.Key)
-		i = encodeVarintAuth(dAtA, i, uint64(len(m.Key)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if m.PermType != 0 {
-		i = encodeVarintAuth(dAtA, i, uint64(m.PermType))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *Role) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Role) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.KeyPermission) > 0 {
-		for iNdEx := len(m.KeyPermission) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.KeyPermission[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintAuth(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
-	offset -= sovAuth(v)
-	base := offset
-	for v >= 1<<7 {
-		dAtA[offset] = uint8(v&0x7f | 0x80)
-		v >>= 7
-		offset++
-	}
-	dAtA[offset] = uint8(v)
-	return base
-}
-func (m *User) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovAuth(uint64(l))
-	}
-	l = len(m.Password)
-	if l > 0 {
-		n += 1 + l + sovAuth(uint64(l))
-	}
-	if len(m.Roles) > 0 {
-		for _, s := range m.Roles {
-			l = len(s)
-			n += 1 + l + sovAuth(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *Permission) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.PermType != 0 {
-		n += 1 + sovAuth(uint64(m.PermType))
-	}
-	l = len(m.Key)
-	if l > 0 {
-		n += 1 + l + sovAuth(uint64(l))
-	}
-	l = len(m.RangeEnd)
-	if l > 0 {
-		n += 1 + l + sovAuth(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *Role) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovAuth(uint64(l))
-	}
-	if len(m.KeyPermission) > 0 {
-		for _, e := range m.KeyPermission {
-			l = e.Size()
-			n += 1 + l + sovAuth(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func sovAuth(x uint64) (n int) {
-	return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozAuth(x uint64) (n int) {
-	return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *User) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowAuth
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: User: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowAuth
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthAuth
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
-			if m.Name == nil {
-				m.Name = []byte{}
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowAuth
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthAuth
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...)
-			if m.Password == nil {
-				m.Password = []byte{}
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowAuth
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthAuth
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipAuth(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *Permission) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowAuth
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Permission: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType)
-			}
-			m.PermType = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowAuth
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.PermType |= Permission_Type(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowAuth
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthAuth
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
-			if m.Key == nil {
-				m.Key = []byte{}
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowAuth
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthAuth
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
-			if m.RangeEnd == nil {
-				m.RangeEnd = []byte{}
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipAuth(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *Role) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowAuth
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Role: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowAuth
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthAuth
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
-			if m.Name == nil {
-				m.Name = []byte{}
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowAuth
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthAuth
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.KeyPermission = append(m.KeyPermission, &Permission{})
-			if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipAuth(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthAuth
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func skipAuth(dAtA []byte) (n int, err error) {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return 0, ErrIntOverflowAuth
-			}
-			if iNdEx >= l {
-				return 0, io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		wireType := int(wire & 0x7)
-		switch wireType {
-		case 0:
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowAuth
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				iNdEx++
-				if dAtA[iNdEx-1] < 0x80 {
-					break
-				}
-			}
-			return iNdEx, nil
-		case 1:
-			iNdEx += 8
-			return iNdEx, nil
-		case 2:
-			var length int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowAuth
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				length |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if length < 0 {
-				return 0, ErrInvalidLengthAuth
-			}
-			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthAuth
-			}
-			return iNdEx, nil
-		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowAuth
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipAuth(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthAuth
-				}
-			}
-			return iNdEx, nil
-		case 4:
-			return iNdEx, nil
-		case 5:
-			iNdEx += 4
-			return iNdEx, nil
-		default:
-			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
-		}
-	}
-	panic("unreachable")
-}
-
-var (
-	ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowAuth   = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.proto b/vendor/github.com/coreos/etcd/auth/authpb/auth.proto
deleted file mode 100644
index 001d334..0000000
--- a/vendor/github.com/coreos/etcd/auth/authpb/auth.proto
+++ /dev/null
@@ -1,37 +0,0 @@
-syntax = "proto3";
-package authpb;
-
-import "gogoproto/gogo.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-option (gogoproto.goproto_enum_prefix_all) = false;
-
-// User is a single entry in the bucket authUsers
-message User {
-  bytes name = 1;
-  bytes password = 2;
-  repeated string roles = 3;
-}
-
-// Permission is a single entity
-message Permission {
-  enum Type {
-    READ = 0;
-    WRITE = 1;
-    READWRITE = 2;
-  }
-  Type permType = 1;
-
-  bytes key = 2;
-  bytes range_end = 3;
-}
-
-// Role is a single entry in the bucket authRoles
-message Role {
-  bytes name = 1;
-
-  repeated Permission keyPermission = 2;
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go b/vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go
deleted file mode 100644
index 9306385..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go
+++ /dev/null
@@ -1,293 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package balancer implements client balancer.
-package balancer
-
-import (
-	"strconv"
-	"sync"
-	"time"
-
-	"github.com/coreos/etcd/clientv3/balancer/connectivity"
-	"github.com/coreos/etcd/clientv3/balancer/picker"
-
-	"go.uber.org/zap"
-	"google.golang.org/grpc/balancer"
-	grpcconnectivity "google.golang.org/grpc/connectivity"
-	"google.golang.org/grpc/resolver"
-	_ "google.golang.org/grpc/resolver/dns"         // register DNS resolver
-	_ "google.golang.org/grpc/resolver/passthrough" // register passthrough resolver
-)
-
-// Config defines balancer configurations.
-type Config struct {
-	// Policy configures balancer policy.
-	Policy picker.Policy
-
-	// Picker implements gRPC picker.
-	// Leave empty if "Policy" field is not custom.
-	// TODO: currently custom policy is not supported.
-	// Picker picker.Picker
-
-	// Name defines an additional name for balancer.
-	// Useful for balancer testing to avoid register conflicts.
-	// If empty, defaults to policy name.
-	Name string
-
-	// Logger configures balancer logging.
-	// If nil, logs are discarded.
-	Logger *zap.Logger
-}
-
-// RegisterBuilder creates and registers a builder. Since this function calls balancer.Register, it
-// must be invoked at initialization time.
-func RegisterBuilder(cfg Config) {
-	bb := &builder{cfg}
-	balancer.Register(bb)
-
-	bb.cfg.Logger.Debug(
-		"registered balancer",
-		zap.String("policy", bb.cfg.Policy.String()),
-		zap.String("name", bb.cfg.Name),
-	)
-}
-
-type builder struct {
-	cfg Config
-}
-
-// Build is called initially when creating "ccBalancerWrapper".
-// "grpc.Dial" is called to this client connection.
-// Then, resolved addresses will be handled via "HandleResolvedAddrs".
-func (b *builder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
-	bb := &baseBalancer{
-		id:     strconv.FormatInt(time.Now().UnixNano(), 36),
-		policy: b.cfg.Policy,
-		name:   b.cfg.Name,
-		lg:     b.cfg.Logger,
-
-		addrToSc: make(map[resolver.Address]balancer.SubConn),
-		scToAddr: make(map[balancer.SubConn]resolver.Address),
-		scToSt:   make(map[balancer.SubConn]grpcconnectivity.State),
-
-		currentConn:          nil,
-		connectivityRecorder: connectivity.New(b.cfg.Logger),
-
-		// initialize picker always returns "ErrNoSubConnAvailable"
-		picker: picker.NewErr(balancer.ErrNoSubConnAvailable),
-	}
-
-	// TODO: support multiple connections
-	bb.mu.Lock()
-	bb.currentConn = cc
-	bb.mu.Unlock()
-
-	bb.lg.Info(
-		"built balancer",
-		zap.String("balancer-id", bb.id),
-		zap.String("policy", bb.policy.String()),
-		zap.String("resolver-target", cc.Target()),
-	)
-	return bb
-}
-
-// Name implements "grpc/balancer.Builder" interface.
-func (b *builder) Name() string { return b.cfg.Name }
-
-// Balancer defines client balancer interface.
-type Balancer interface {
-	// Balancer is called on specified client connection. Client initiates gRPC
-	// connection with "grpc.Dial(addr, grpc.WithBalancerName)", and then those resolved
-	// addresses are passed to "grpc/balancer.Balancer.HandleResolvedAddrs".
-	// For each resolved address, balancer calls "balancer.ClientConn.NewSubConn".
-	// "grpc/balancer.Balancer.HandleSubConnStateChange" is called when connectivity state
-	// changes, thus requires failover logic in this method.
-	balancer.Balancer
-
-	// Picker calls "Pick" for every client request.
-	picker.Picker
-}
-
-type baseBalancer struct {
-	id     string
-	policy picker.Policy
-	name   string
-	lg     *zap.Logger
-
-	mu sync.RWMutex
-
-	addrToSc map[resolver.Address]balancer.SubConn
-	scToAddr map[balancer.SubConn]resolver.Address
-	scToSt   map[balancer.SubConn]grpcconnectivity.State
-
-	currentConn          balancer.ClientConn
-	connectivityRecorder connectivity.Recorder
-
-	picker picker.Picker
-}
-
-// HandleResolvedAddrs implements "grpc/balancer.Balancer" interface.
-// gRPC sends initial or updated resolved addresses from "Build".
-func (bb *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
-	if err != nil {
-		bb.lg.Warn("HandleResolvedAddrs called with error", zap.String("balancer-id", bb.id), zap.Error(err))
-		return
-	}
-	bb.lg.Info("resolved",
-		zap.String("picker", bb.picker.String()),
-		zap.String("balancer-id", bb.id),
-		zap.Strings("addresses", addrsToStrings(addrs)),
-	)
-
-	bb.mu.Lock()
-	defer bb.mu.Unlock()
-
-	resolved := make(map[resolver.Address]struct{})
-	for _, addr := range addrs {
-		resolved[addr] = struct{}{}
-		if _, ok := bb.addrToSc[addr]; !ok {
-			sc, err := bb.currentConn.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{})
-			if err != nil {
-				bb.lg.Warn("NewSubConn failed", zap.String("picker", bb.picker.String()), zap.String("balancer-id", bb.id), zap.Error(err), zap.String("address", addr.Addr))
-				continue
-			}
-			bb.lg.Info("created subconn", zap.String("address", addr.Addr))
-			bb.addrToSc[addr] = sc
-			bb.scToAddr[sc] = addr
-			bb.scToSt[sc] = grpcconnectivity.Idle
-			sc.Connect()
-		}
-	}
-
-	for addr, sc := range bb.addrToSc {
-		if _, ok := resolved[addr]; !ok {
-			// was removed by resolver or failed to create subconn
-			bb.currentConn.RemoveSubConn(sc)
-			delete(bb.addrToSc, addr)
-
-			bb.lg.Info(
-				"removed subconn",
-				zap.String("picker", bb.picker.String()),
-				zap.String("balancer-id", bb.id),
-				zap.String("address", addr.Addr),
-				zap.String("subconn", scToString(sc)),
-			)
-
-			// Keep the state of this sc in bb.scToSt until sc's state becomes Shutdown.
-			// The entry will be deleted in HandleSubConnStateChange.
-			// (DO NOT) delete(bb.scToAddr, sc)
-			// (DO NOT) delete(bb.scToSt, sc)
-		}
-	}
-}
-
-// HandleSubConnStateChange implements "grpc/balancer.Balancer" interface.
-func (bb *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s grpcconnectivity.State) {
-	bb.mu.Lock()
-	defer bb.mu.Unlock()
-
-	old, ok := bb.scToSt[sc]
-	if !ok {
-		bb.lg.Warn(
-			"state change for an unknown subconn",
-			zap.String("picker", bb.picker.String()),
-			zap.String("balancer-id", bb.id),
-			zap.String("subconn", scToString(sc)),
-			zap.Int("subconn-size", len(bb.scToAddr)),
-			zap.String("state", s.String()),
-		)
-		return
-	}
-
-	bb.lg.Info(
-		"state changed",
-		zap.String("picker", bb.picker.String()),
-		zap.String("balancer-id", bb.id),
-		zap.Bool("connected", s == grpcconnectivity.Ready),
-		zap.String("subconn", scToString(sc)),
-		zap.Int("subconn-size", len(bb.scToAddr)),
-		zap.String("address", bb.scToAddr[sc].Addr),
-		zap.String("old-state", old.String()),
-		zap.String("new-state", s.String()),
-	)
-
-	bb.scToSt[sc] = s
-	switch s {
-	case grpcconnectivity.Idle:
-		sc.Connect()
-	case grpcconnectivity.Shutdown:
-		// When an address was removed by resolver, b called RemoveSubConn but
-		// kept the sc's state in scToSt. Remove state for this sc here.
-		delete(bb.scToAddr, sc)
-		delete(bb.scToSt, sc)
-	}
-
-	oldAggrState := bb.connectivityRecorder.GetCurrentState()
-	bb.connectivityRecorder.RecordTransition(old, s)
-
-	// Update balancer picker when one of the following happens:
-	//  - this sc became ready from not-ready
-	//  - this sc became not-ready from ready
-	//  - the aggregated state of balancer became TransientFailure from non-TransientFailure
-	//  - the aggregated state of balancer became non-TransientFailure from TransientFailure
-	if (s == grpcconnectivity.Ready) != (old == grpcconnectivity.Ready) ||
-		(bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure) != (oldAggrState == grpcconnectivity.TransientFailure) {
-		bb.updatePicker()
-	}
-
-	bb.currentConn.UpdateBalancerState(bb.connectivityRecorder.GetCurrentState(), bb.picker)
-}
-
-func (bb *baseBalancer) updatePicker() {
-	if bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure {
-		bb.picker = picker.NewErr(balancer.ErrTransientFailure)
-		bb.lg.Info(
-			"updated picker to transient error picker",
-			zap.String("picker", bb.picker.String()),
-			zap.String("balancer-id", bb.id),
-			zap.String("policy", bb.policy.String()),
-		)
-		return
-	}
-
-	// only pass ready subconns to picker
-	scToAddr := make(map[balancer.SubConn]resolver.Address)
-	for addr, sc := range bb.addrToSc {
-		if st, ok := bb.scToSt[sc]; ok && st == grpcconnectivity.Ready {
-			scToAddr[sc] = addr
-		}
-	}
-
-	bb.picker = picker.New(picker.Config{
-		Policy:                   bb.policy,
-		Logger:                   bb.lg,
-		SubConnToResolverAddress: scToAddr,
-	})
-	bb.lg.Info(
-		"updated picker",
-		zap.String("picker", bb.picker.String()),
-		zap.String("balancer-id", bb.id),
-		zap.String("policy", bb.policy.String()),
-		zap.Strings("subconn-ready", scsToStrings(scToAddr)),
-		zap.Int("subconn-size", len(scToAddr)),
-	)
-}
-
-// Close implements "grpc/balancer.Balancer" interface.
-// Close is a nop because base balancer doesn't have internal state to clean up,
-// and it doesn't need to call RemoveSubConn for the SubConns.
-func (bb *baseBalancer) Close() {
-	// TODO
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go b/vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go
deleted file mode 100644
index 4c4ad36..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package connectivity implements client connectivity operations.
-package connectivity
-
-import (
-	"sync"
-
-	"go.uber.org/zap"
-	"google.golang.org/grpc/connectivity"
-)
-
-// Recorder records gRPC connectivity.
-type Recorder interface {
-	GetCurrentState() connectivity.State
-	RecordTransition(oldState, newState connectivity.State)
-}
-
-// New returns a new Recorder.
-func New(lg *zap.Logger) Recorder {
-	return &recorder{lg: lg}
-}
-
-// recorder takes the connectivity states of multiple SubConns
-// and returns one aggregated connectivity state.
-// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go
-type recorder struct {
-	lg *zap.Logger
-
-	mu sync.RWMutex
-
-	cur connectivity.State
-
-	numReady            uint64 // Number of addrConns in ready state.
-	numConnecting       uint64 // Number of addrConns in connecting state.
-	numTransientFailure uint64 // Number of addrConns in transientFailure.
-}
-
-func (rc *recorder) GetCurrentState() (state connectivity.State) {
-	rc.mu.RLock()
-	defer rc.mu.RUnlock()
-	return rc.cur
-}
-
-// RecordTransition records state change happening in subConn and based on that
-// it evaluates what aggregated state should be.
-//
-//  - If at least one SubConn in Ready, the aggregated state is Ready;
-//  - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
-//  - Else the aggregated state is TransientFailure.
-//
-// Idle and Shutdown are not considered.
-//
-// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go
-func (rc *recorder) RecordTransition(oldState, newState connectivity.State) {
-	rc.mu.Lock()
-	defer rc.mu.Unlock()
-
-	for idx, state := range []connectivity.State{oldState, newState} {
-		updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
-		switch state {
-		case connectivity.Ready:
-			rc.numReady += updateVal
-		case connectivity.Connecting:
-			rc.numConnecting += updateVal
-		case connectivity.TransientFailure:
-			rc.numTransientFailure += updateVal
-		default:
-			rc.lg.Warn("connectivity recorder received unknown state", zap.String("connectivity-state", state.String()))
-		}
-	}
-
-	switch { // must be exclusive, no overlap
-	case rc.numReady > 0:
-		rc.cur = connectivity.Ready
-	case rc.numConnecting > 0:
-		rc.cur = connectivity.Connecting
-	default:
-		rc.cur = connectivity.TransientFailure
-	}
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go
deleted file mode 100644
index 35dabf5..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package picker defines/implements client balancer picker policy.
-package picker
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go
deleted file mode 100644
index 9e04378..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package picker
-
-import (
-	"context"
-
-	"google.golang.org/grpc/balancer"
-)
-
-// NewErr returns a picker that always returns err on "Pick".
-func NewErr(err error) Picker {
-	return &errPicker{p: Error, err: err}
-}
-
-type errPicker struct {
-	p   Policy
-	err error
-}
-
-func (ep *errPicker) String() string {
-	return ep.p.String()
-}
-
-func (ep *errPicker) Pick(context.Context, balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
-	return nil, nil, ep.err
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go
deleted file mode 100644
index bd1a5d2..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package picker
-
-import (
-	"fmt"
-
-	"go.uber.org/zap"
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/resolver"
-)
-
-// Picker defines balancer Picker methods.
-type Picker interface {
-	balancer.Picker
-	String() string
-}
-
-// Config defines picker configuration.
-type Config struct {
-	// Policy specifies etcd clientv3's built in balancer policy.
-	Policy Policy
-
-	// Logger defines picker logging object.
-	Logger *zap.Logger
-
-	// SubConnToResolverAddress maps each gRPC sub-connection to an address.
-	// Basically, it is a list of addresses that the Picker can pick from.
-	SubConnToResolverAddress map[balancer.SubConn]resolver.Address
-}
-
-// Policy defines balancer picker policy.
-type Policy uint8
-
-const (
-	// Error is error picker policy.
-	Error Policy = iota
-
-	// RoundrobinBalanced balances loads over multiple endpoints
-	// and implements failover in roundrobin fashion.
-	RoundrobinBalanced
-
-	// Custom defines custom balancer picker.
-	// TODO: custom picker is not supported yet.
-	Custom
-)
-
-func (p Policy) String() string {
-	switch p {
-	case Error:
-		return "picker-error"
-
-	case RoundrobinBalanced:
-		return "picker-roundrobin-balanced"
-
-	case Custom:
-		panic("'custom' picker policy is not supported yet")
-
-	default:
-		panic(fmt.Errorf("invalid balancer picker policy (%d)", p))
-	}
-}
-
-// New creates a new Picker.
-func New(cfg Config) Picker {
-	switch cfg.Policy {
-	case Error:
-		panic("'error' picker policy is not supported here; use 'picker.NewErr'")
-
-	case RoundrobinBalanced:
-		return newRoundrobinBalanced(cfg)
-
-	case Custom:
-		panic("'custom' picker policy is not supported yet")
-
-	default:
-		panic(fmt.Errorf("invalid balancer picker policy (%d)", cfg.Policy))
-	}
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go
deleted file mode 100644
index 1b8b285..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package picker
-
-import (
-	"context"
-	"sync"
-
-	"go.uber.org/zap"
-	"go.uber.org/zap/zapcore"
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/resolver"
-)
-
-// newRoundrobinBalanced returns a new roundrobin balanced picker.
-func newRoundrobinBalanced(cfg Config) Picker {
-	scs := make([]balancer.SubConn, 0, len(cfg.SubConnToResolverAddress))
-	for sc := range cfg.SubConnToResolverAddress {
-		scs = append(scs, sc)
-	}
-	return &rrBalanced{
-		p:        RoundrobinBalanced,
-		lg:       cfg.Logger,
-		scs:      scs,
-		scToAddr: cfg.SubConnToResolverAddress,
-	}
-}
-
-type rrBalanced struct {
-	p Policy
-
-	lg *zap.Logger
-
-	mu       sync.RWMutex
-	next     int
-	scs      []balancer.SubConn
-	scToAddr map[balancer.SubConn]resolver.Address
-}
-
-func (rb *rrBalanced) String() string { return rb.p.String() }
-
-// Pick is called for every client request.
-func (rb *rrBalanced) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
-	rb.mu.RLock()
-	n := len(rb.scs)
-	rb.mu.RUnlock()
-	if n == 0 {
-		return nil, nil, balancer.ErrNoSubConnAvailable
-	}
-
-	rb.mu.Lock()
-	cur := rb.next
-	sc := rb.scs[cur]
-	picked := rb.scToAddr[sc].Addr
-	rb.next = (rb.next + 1) % len(rb.scs)
-	rb.mu.Unlock()
-
-	rb.lg.Debug(
-		"picked",
-		zap.String("picker", rb.p.String()),
-		zap.String("address", picked),
-		zap.Int("subconn-index", cur),
-		zap.Int("subconn-size", n),
-	)
-
-	doneFunc := func(info balancer.DoneInfo) {
-		// TODO: error handling?
-		fss := []zapcore.Field{
-			zap.Error(info.Err),
-			zap.String("picker", rb.p.String()),
-			zap.String("address", picked),
-			zap.Bool("success", info.Err == nil),
-			zap.Bool("bytes-sent", info.BytesSent),
-			zap.Bool("bytes-received", info.BytesReceived),
-		}
-		if info.Err == nil {
-			rb.lg.Debug("balancer done", fss...)
-		} else {
-			rb.lg.Warn("balancer failed", fss...)
-		}
-	}
-	return sc, doneFunc, nil
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go b/vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
deleted file mode 100644
index 864b5df..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
+++ /dev/null
@@ -1,247 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package endpoint resolves etcd entpoints using grpc targets of the form 'endpoint://<id>/<endpoint>'.
-package endpoint
-
-import (
-	"context"
-	"fmt"
-	"net"
-	"net/url"
-	"strings"
-	"sync"
-
-	"google.golang.org/grpc/resolver"
-)
-
-const scheme = "endpoint"
-
-var (
-	targetPrefix = fmt.Sprintf("%s://", scheme)
-
-	bldr *builder
-)
-
-func init() {
-	bldr = &builder{
-		resolverGroups: make(map[string]*ResolverGroup),
-	}
-	resolver.Register(bldr)
-}
-
-type builder struct {
-	mu             sync.RWMutex
-	resolverGroups map[string]*ResolverGroup
-}
-
-// NewResolverGroup creates a new ResolverGroup with the given id.
-func NewResolverGroup(id string) (*ResolverGroup, error) {
-	return bldr.newResolverGroup(id)
-}
-
-// ResolverGroup keeps all endpoints of resolvers using a common endpoint://<id>/ target
-// up-to-date.
-type ResolverGroup struct {
-	mu        sync.RWMutex
-	id        string
-	endpoints []string
-	resolvers []*Resolver
-}
-
-func (e *ResolverGroup) addResolver(r *Resolver) {
-	e.mu.Lock()
-	addrs := epsToAddrs(e.endpoints...)
-	e.resolvers = append(e.resolvers, r)
-	e.mu.Unlock()
-	r.cc.NewAddress(addrs)
-}
-
-func (e *ResolverGroup) removeResolver(r *Resolver) {
-	e.mu.Lock()
-	for i, er := range e.resolvers {
-		if er == r {
-			e.resolvers = append(e.resolvers[:i], e.resolvers[i+1:]...)
-			break
-		}
-	}
-	e.mu.Unlock()
-}
-
-// SetEndpoints updates the endpoints for ResolverGroup. All registered resolver are updated
-// immediately with the new endpoints.
-func (e *ResolverGroup) SetEndpoints(endpoints []string) {
-	addrs := epsToAddrs(endpoints...)
-	e.mu.Lock()
-	e.endpoints = endpoints
-	for _, r := range e.resolvers {
-		r.cc.NewAddress(addrs)
-	}
-	e.mu.Unlock()
-}
-
-// Target constructs a endpoint target using the endpoint id of the ResolverGroup.
-func (e *ResolverGroup) Target(endpoint string) string {
-	return Target(e.id, endpoint)
-}
-
-// Target constructs a endpoint resolver target.
-func Target(id, endpoint string) string {
-	return fmt.Sprintf("%s://%s/%s", scheme, id, endpoint)
-}
-
-// IsTarget checks if a given target string in an endpoint resolver target.
-func IsTarget(target string) bool {
-	return strings.HasPrefix(target, "endpoint://")
-}
-
-func (e *ResolverGroup) Close() {
-	bldr.close(e.id)
-}
-
-// Build creates or reuses an etcd resolver for the etcd cluster name identified by the authority part of the target.
-func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
-	if len(target.Authority) < 1 {
-		return nil, fmt.Errorf("'etcd' target scheme requires non-empty authority identifying etcd cluster being routed to")
-	}
-	id := target.Authority
-	es, err := b.getResolverGroup(id)
-	if err != nil {
-		return nil, fmt.Errorf("failed to build resolver: %v", err)
-	}
-	r := &Resolver{
-		endpointID: id,
-		cc:         cc,
-	}
-	es.addResolver(r)
-	return r, nil
-}
-
-func (b *builder) newResolverGroup(id string) (*ResolverGroup, error) {
-	b.mu.RLock()
-	_, ok := b.resolverGroups[id]
-	b.mu.RUnlock()
-	if ok {
-		return nil, fmt.Errorf("Endpoint already exists for id: %s", id)
-	}
-
-	es := &ResolverGroup{id: id}
-	b.mu.Lock()
-	b.resolverGroups[id] = es
-	b.mu.Unlock()
-	return es, nil
-}
-
-func (b *builder) getResolverGroup(id string) (*ResolverGroup, error) {
-	b.mu.RLock()
-	es, ok := b.resolverGroups[id]
-	b.mu.RUnlock()
-	if !ok {
-		return nil, fmt.Errorf("ResolverGroup not found for id: %s", id)
-	}
-	return es, nil
-}
-
-func (b *builder) close(id string) {
-	b.mu.Lock()
-	delete(b.resolverGroups, id)
-	b.mu.Unlock()
-}
-
-func (b *builder) Scheme() string {
-	return scheme
-}
-
-// Resolver provides a resolver for a single etcd cluster, identified by name.
-type Resolver struct {
-	endpointID string
-	cc         resolver.ClientConn
-	sync.RWMutex
-}
-
-// TODO: use balancer.epsToAddrs
-func epsToAddrs(eps ...string) (addrs []resolver.Address) {
-	addrs = make([]resolver.Address, 0, len(eps))
-	for _, ep := range eps {
-		addrs = append(addrs, resolver.Address{Addr: ep})
-	}
-	return addrs
-}
-
-func (*Resolver) ResolveNow(o resolver.ResolveNowOption) {}
-
-func (r *Resolver) Close() {
-	es, err := bldr.getResolverGroup(r.endpointID)
-	if err != nil {
-		return
-	}
-	es.removeResolver(r)
-}
-
-// ParseEndpoint endpoint parses an endpoint of the form
-// (http|https)://<host>*|(unix|unixs)://<path>)
-// and returns a protocol ('tcp' or 'unix'),
-// host (or filepath if a unix socket),
-// scheme (http, https, unix, unixs).
-func ParseEndpoint(endpoint string) (proto string, host string, scheme string) {
-	proto = "tcp"
-	host = endpoint
-	url, uerr := url.Parse(endpoint)
-	if uerr != nil || !strings.Contains(endpoint, "://") {
-		return proto, host, scheme
-	}
-	scheme = url.Scheme
-
-	// strip scheme:// prefix since grpc dials by host
-	host = url.Host
-	switch url.Scheme {
-	case "http", "https":
-	case "unix", "unixs":
-		proto = "unix"
-		host = url.Host + url.Path
-	default:
-		proto, host = "", ""
-	}
-	return proto, host, scheme
-}
-
-// ParseTarget parses a endpoint://<id>/<endpoint> string and returns the parsed id and endpoint.
-// If the target is malformed, an error is returned.
-func ParseTarget(target string) (string, string, error) {
-	noPrefix := strings.TrimPrefix(target, targetPrefix)
-	if noPrefix == target {
-		return "", "", fmt.Errorf("malformed target, %s prefix is required: %s", targetPrefix, target)
-	}
-	parts := strings.SplitN(noPrefix, "/", 2)
-	if len(parts) != 2 {
-		return "", "", fmt.Errorf("malformed target, expected %s://<id>/<endpoint>, but got %s", scheme, target)
-	}
-	return parts[0], parts[1], nil
-}
-
-// Dialer dials a endpoint using net.Dialer.
-// Context cancelation and timeout are supported.
-func Dialer(ctx context.Context, dialEp string) (net.Conn, error) {
-	proto, host, _ := ParseEndpoint(dialEp)
-	select {
-	case <-ctx.Done():
-		return nil, ctx.Err()
-	default:
-	}
-	dialer := &net.Dialer{}
-	if deadline, ok := ctx.Deadline(); ok {
-		dialer.Deadline = deadline
-	}
-	return dialer.DialContext(ctx, proto, host)
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/utils.go b/vendor/github.com/coreos/etcd/clientv3/balancer/utils.go
deleted file mode 100644
index 48eb875..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/utils.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package balancer
-
-import (
-	"fmt"
-	"net/url"
-	"sort"
-	"sync/atomic"
-	"time"
-
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/resolver"
-)
-
-func scToString(sc balancer.SubConn) string {
-	return fmt.Sprintf("%p", sc)
-}
-
-func scsToStrings(scs map[balancer.SubConn]resolver.Address) (ss []string) {
-	ss = make([]string, 0, len(scs))
-	for sc, a := range scs {
-		ss = append(ss, fmt.Sprintf("%s (%s)", a.Addr, scToString(sc)))
-	}
-	sort.Strings(ss)
-	return ss
-}
-
-func addrsToStrings(addrs []resolver.Address) (ss []string) {
-	ss = make([]string, len(addrs))
-	for i := range addrs {
-		ss[i] = addrs[i].Addr
-	}
-	sort.Strings(ss)
-	return ss
-}
-
-func epsToAddrs(eps ...string) (addrs []resolver.Address) {
-	addrs = make([]resolver.Address, 0, len(eps))
-	for _, ep := range eps {
-		u, err := url.Parse(ep)
-		if err != nil {
-			addrs = append(addrs, resolver.Address{Addr: ep, Type: resolver.Backend})
-			continue
-		}
-		addrs = append(addrs, resolver.Address{Addr: u.Host, Type: resolver.Backend})
-	}
-	return addrs
-}
-
-var genN = new(uint32)
-
-func genName() string {
-	now := time.Now().UnixNano()
-	return fmt.Sprintf("%X%X", now, atomic.AddUint32(genN, 1))
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go b/vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go
deleted file mode 100644
index 2dc2012..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package credentials implements gRPC credential interface with etcd specific logic.
-// e.g., client handshake with custom authority parameter
-package credentials
-
-import (
-	"context"
-	"crypto/tls"
-	"net"
-	"sync"
-
-	"github.com/coreos/etcd/clientv3/balancer/resolver/endpoint"
-	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
-	grpccredentials "google.golang.org/grpc/credentials"
-)
-
-// Config defines gRPC credential configuration.
-type Config struct {
-	TLSConfig *tls.Config
-}
-
-// Bundle defines gRPC credential interface.
-type Bundle interface {
-	grpccredentials.Bundle
-	UpdateAuthToken(token string)
-}
-
-// NewBundle constructs a new gRPC credential bundle.
-func NewBundle(cfg Config) Bundle {
-	return &bundle{
-		tc: newTransportCredential(cfg.TLSConfig),
-		rc: newPerRPCCredential(),
-	}
-}
-
-// bundle implements "grpccredentials.Bundle" interface.
-type bundle struct {
-	tc *transportCredential
-	rc *perRPCCredential
-}
-
-func (b *bundle) TransportCredentials() grpccredentials.TransportCredentials {
-	return b.tc
-}
-
-func (b *bundle) PerRPCCredentials() grpccredentials.PerRPCCredentials {
-	return b.rc
-}
-
-func (b *bundle) NewWithMode(mode string) (grpccredentials.Bundle, error) {
-	// no-op
-	return nil, nil
-}
-
-// transportCredential implements "grpccredentials.TransportCredentials" interface.
-// transportCredential wraps TransportCredentials to track which
-// addresses are dialed for which endpoints, and then sets the authority when checking the endpoint's cert to the
-// hostname or IP of the dialed endpoint.
-// This is a workaround of a gRPC load balancer issue. gRPC uses the dialed target's service name as the authority when
-// checking all endpoint certs, which does not work for etcd servers using their hostname or IP as the Subject Alternative Name
-// in their TLS certs.
-// To enable, include both WithTransportCredentials(creds) and WithContextDialer(creds.Dialer)
-// when dialing.
-type transportCredential struct {
-	gtc grpccredentials.TransportCredentials
-	mu  sync.Mutex
-	// addrToEndpoint maps from the connection addresses that are dialed to the hostname or IP of the
-	// endpoint provided to the dialer when dialing
-	addrToEndpoint map[string]string
-}
-
-func newTransportCredential(cfg *tls.Config) *transportCredential {
-	return &transportCredential{
-		gtc:            grpccredentials.NewTLS(cfg),
-		addrToEndpoint: map[string]string{},
-	}
-}
-
-func (tc *transportCredential) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
-	// Set the authority when checking the endpoint's cert to the hostname or IP of the dialed endpoint
-	tc.mu.Lock()
-	dialEp, ok := tc.addrToEndpoint[rawConn.RemoteAddr().String()]
-	tc.mu.Unlock()
-	if ok {
-		_, host, _ := endpoint.ParseEndpoint(dialEp)
-		authority = host
-	}
-	return tc.gtc.ClientHandshake(ctx, authority, rawConn)
-}
-
-// return true if given string is an IP.
-func isIP(ep string) bool {
-	return net.ParseIP(ep) != nil
-}
-
-func (tc *transportCredential) ServerHandshake(rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
-	return tc.gtc.ServerHandshake(rawConn)
-}
-
-func (tc *transportCredential) Info() grpccredentials.ProtocolInfo {
-	return tc.gtc.Info()
-}
-
-func (tc *transportCredential) Clone() grpccredentials.TransportCredentials {
-	copy := map[string]string{}
-	tc.mu.Lock()
-	for k, v := range tc.addrToEndpoint {
-		copy[k] = v
-	}
-	tc.mu.Unlock()
-	return &transportCredential{
-		gtc:            tc.gtc.Clone(),
-		addrToEndpoint: copy,
-	}
-}
-
-func (tc *transportCredential) OverrideServerName(serverNameOverride string) error {
-	return tc.gtc.OverrideServerName(serverNameOverride)
-}
-
-func (tc *transportCredential) Dialer(ctx context.Context, dialEp string) (net.Conn, error) {
-	// Keep track of which addresses are dialed for which endpoints
-	conn, err := endpoint.Dialer(ctx, dialEp)
-	if conn != nil {
-		tc.mu.Lock()
-		tc.addrToEndpoint[conn.RemoteAddr().String()] = dialEp
-		tc.mu.Unlock()
-	}
-	return conn, err
-}
-
-// perRPCCredential implements "grpccredentials.PerRPCCredentials" interface.
-type perRPCCredential struct {
-	authToken   string
-	authTokenMu sync.RWMutex
-}
-
-func newPerRPCCredential() *perRPCCredential { return &perRPCCredential{} }
-
-func (rc *perRPCCredential) RequireTransportSecurity() bool { return false }
-
-func (rc *perRPCCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
-	rc.authTokenMu.RLock()
-	authToken := rc.authToken
-	rc.authTokenMu.RUnlock()
-	return map[string]string{rpctypes.TokenFieldNameGRPC: authToken}, nil
-}
-
-func (b *bundle) UpdateAuthToken(token string) {
-	if b.rc == nil {
-		return
-	}
-	b.rc.UpdateAuthToken(token)
-}
-
-func (rc *perRPCCredential) UpdateAuthToken(token string) {
-	rc.authTokenMu.Lock()
-	rc.authToken = token
-	rc.authTokenMu.Unlock()
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go
deleted file mode 100644
index f72c6a6..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package rpctypes has types and values shared by the etcd server and client for v3 RPC interaction.
-package rpctypes
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
deleted file mode 100644
index bc1ad7b..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpctypes
-
-import (
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/status"
-)
-
-// server-side error
-var (
-	ErrGRPCEmptyKey      = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err()
-	ErrGRPCKeyNotFound   = status.New(codes.InvalidArgument, "etcdserver: key not found").Err()
-	ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err()
-	ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err()
-	ErrGRPCTooManyOps    = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err()
-	ErrGRPCDuplicateKey  = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err()
-	ErrGRPCCompacted     = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err()
-	ErrGRPCFutureRev     = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err()
-	ErrGRPCNoSpace       = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err()
-
-	ErrGRPCLeaseNotFound    = status.New(codes.NotFound, "etcdserver: requested lease not found").Err()
-	ErrGRPCLeaseExist       = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err()
-	ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err()
-
-	ErrGRPCMemberExist            = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err()
-	ErrGRPCPeerURLExist           = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err()
-	ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err()
-	ErrGRPCMemberBadURLs          = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err()
-	ErrGRPCMemberNotFound         = status.New(codes.NotFound, "etcdserver: member not found").Err()
-
-	ErrGRPCRequestTooLarge        = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err()
-	ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err()
-
-	ErrGRPCRootUserNotExist     = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err()
-	ErrGRPCRootRoleNotExist     = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err()
-	ErrGRPCUserAlreadyExist     = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err()
-	ErrGRPCUserEmpty            = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err()
-	ErrGRPCUserNotFound         = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err()
-	ErrGRPCRoleAlreadyExist     = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err()
-	ErrGRPCRoleNotFound         = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err()
-	ErrGRPCAuthFailed           = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err()
-	ErrGRPCPermissionDenied     = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err()
-	ErrGRPCRoleNotGranted       = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err()
-	ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err()
-	ErrGRPCAuthNotEnabled       = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err()
-	ErrGRPCInvalidAuthToken     = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err()
-	ErrGRPCInvalidAuthMgmt      = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err()
-
-	ErrGRPCNoLeader                   = status.New(codes.Unavailable, "etcdserver: no leader").Err()
-	ErrGRPCNotLeader                  = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err()
-	ErrGRPCLeaderChanged              = status.New(codes.Unavailable, "etcdserver: leader changed").Err()
-	ErrGRPCNotCapable                 = status.New(codes.Unavailable, "etcdserver: not capable").Err()
-	ErrGRPCStopped                    = status.New(codes.Unavailable, "etcdserver: server stopped").Err()
-	ErrGRPCTimeout                    = status.New(codes.Unavailable, "etcdserver: request timed out").Err()
-	ErrGRPCTimeoutDueToLeaderFail     = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err()
-	ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err()
-	ErrGRPCUnhealthy                  = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err()
-	ErrGRPCCorrupt                    = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err()
-
-	errStringToError = map[string]error{
-		ErrorDesc(ErrGRPCEmptyKey):      ErrGRPCEmptyKey,
-		ErrorDesc(ErrGRPCKeyNotFound):   ErrGRPCKeyNotFound,
-		ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided,
-		ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided,
-
-		ErrorDesc(ErrGRPCTooManyOps):   ErrGRPCTooManyOps,
-		ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
-		ErrorDesc(ErrGRPCCompacted):    ErrGRPCCompacted,
-		ErrorDesc(ErrGRPCFutureRev):    ErrGRPCFutureRev,
-		ErrorDesc(ErrGRPCNoSpace):      ErrGRPCNoSpace,
-
-		ErrorDesc(ErrGRPCLeaseNotFound):    ErrGRPCLeaseNotFound,
-		ErrorDesc(ErrGRPCLeaseExist):       ErrGRPCLeaseExist,
-		ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge,
-
-		ErrorDesc(ErrGRPCMemberExist):            ErrGRPCMemberExist,
-		ErrorDesc(ErrGRPCPeerURLExist):           ErrGRPCPeerURLExist,
-		ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted,
-		ErrorDesc(ErrGRPCMemberBadURLs):          ErrGRPCMemberBadURLs,
-		ErrorDesc(ErrGRPCMemberNotFound):         ErrGRPCMemberNotFound,
-
-		ErrorDesc(ErrGRPCRequestTooLarge):        ErrGRPCRequestTooLarge,
-		ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
-
-		ErrorDesc(ErrGRPCRootUserNotExist):     ErrGRPCRootUserNotExist,
-		ErrorDesc(ErrGRPCRootRoleNotExist):     ErrGRPCRootRoleNotExist,
-		ErrorDesc(ErrGRPCUserAlreadyExist):     ErrGRPCUserAlreadyExist,
-		ErrorDesc(ErrGRPCUserEmpty):            ErrGRPCUserEmpty,
-		ErrorDesc(ErrGRPCUserNotFound):         ErrGRPCUserNotFound,
-		ErrorDesc(ErrGRPCRoleAlreadyExist):     ErrGRPCRoleAlreadyExist,
-		ErrorDesc(ErrGRPCRoleNotFound):         ErrGRPCRoleNotFound,
-		ErrorDesc(ErrGRPCAuthFailed):           ErrGRPCAuthFailed,
-		ErrorDesc(ErrGRPCPermissionDenied):     ErrGRPCPermissionDenied,
-		ErrorDesc(ErrGRPCRoleNotGranted):       ErrGRPCRoleNotGranted,
-		ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
-		ErrorDesc(ErrGRPCAuthNotEnabled):       ErrGRPCAuthNotEnabled,
-		ErrorDesc(ErrGRPCInvalidAuthToken):     ErrGRPCInvalidAuthToken,
-		ErrorDesc(ErrGRPCInvalidAuthMgmt):      ErrGRPCInvalidAuthMgmt,
-
-		ErrorDesc(ErrGRPCNoLeader):                   ErrGRPCNoLeader,
-		ErrorDesc(ErrGRPCNotLeader):                  ErrGRPCNotLeader,
-		ErrorDesc(ErrGRPCNotCapable):                 ErrGRPCNotCapable,
-		ErrorDesc(ErrGRPCStopped):                    ErrGRPCStopped,
-		ErrorDesc(ErrGRPCTimeout):                    ErrGRPCTimeout,
-		ErrorDesc(ErrGRPCTimeoutDueToLeaderFail):     ErrGRPCTimeoutDueToLeaderFail,
-		ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost,
-		ErrorDesc(ErrGRPCUnhealthy):                  ErrGRPCUnhealthy,
-		ErrorDesc(ErrGRPCCorrupt):                    ErrGRPCCorrupt,
-	}
-)
-
-// client-side error
-var (
-	ErrEmptyKey      = Error(ErrGRPCEmptyKey)
-	ErrKeyNotFound   = Error(ErrGRPCKeyNotFound)
-	ErrValueProvided = Error(ErrGRPCValueProvided)
-	ErrLeaseProvided = Error(ErrGRPCLeaseProvided)
-	ErrTooManyOps    = Error(ErrGRPCTooManyOps)
-	ErrDuplicateKey  = Error(ErrGRPCDuplicateKey)
-	ErrCompacted     = Error(ErrGRPCCompacted)
-	ErrFutureRev     = Error(ErrGRPCFutureRev)
-	ErrNoSpace       = Error(ErrGRPCNoSpace)
-
-	ErrLeaseNotFound    = Error(ErrGRPCLeaseNotFound)
-	ErrLeaseExist       = Error(ErrGRPCLeaseExist)
-	ErrLeaseTTLTooLarge = Error(ErrGRPCLeaseTTLTooLarge)
-
-	ErrMemberExist            = Error(ErrGRPCMemberExist)
-	ErrPeerURLExist           = Error(ErrGRPCPeerURLExist)
-	ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted)
-	ErrMemberBadURLs          = Error(ErrGRPCMemberBadURLs)
-	ErrMemberNotFound         = Error(ErrGRPCMemberNotFound)
-
-	ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
-	ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests)
-
-	ErrRootUserNotExist     = Error(ErrGRPCRootUserNotExist)
-	ErrRootRoleNotExist     = Error(ErrGRPCRootRoleNotExist)
-	ErrUserAlreadyExist     = Error(ErrGRPCUserAlreadyExist)
-	ErrUserEmpty            = Error(ErrGRPCUserEmpty)
-	ErrUserNotFound         = Error(ErrGRPCUserNotFound)
-	ErrRoleAlreadyExist     = Error(ErrGRPCRoleAlreadyExist)
-	ErrRoleNotFound         = Error(ErrGRPCRoleNotFound)
-	ErrAuthFailed           = Error(ErrGRPCAuthFailed)
-	ErrPermissionDenied     = Error(ErrGRPCPermissionDenied)
-	ErrRoleNotGranted       = Error(ErrGRPCRoleNotGranted)
-	ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
-	ErrAuthNotEnabled       = Error(ErrGRPCAuthNotEnabled)
-	ErrInvalidAuthToken     = Error(ErrGRPCInvalidAuthToken)
-	ErrInvalidAuthMgmt      = Error(ErrGRPCInvalidAuthMgmt)
-
-	ErrNoLeader                   = Error(ErrGRPCNoLeader)
-	ErrNotLeader                  = Error(ErrGRPCNotLeader)
-	ErrLeaderChanged              = Error(ErrGRPCLeaderChanged)
-	ErrNotCapable                 = Error(ErrGRPCNotCapable)
-	ErrStopped                    = Error(ErrGRPCStopped)
-	ErrTimeout                    = Error(ErrGRPCTimeout)
-	ErrTimeoutDueToLeaderFail     = Error(ErrGRPCTimeoutDueToLeaderFail)
-	ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost)
-	ErrUnhealthy                  = Error(ErrGRPCUnhealthy)
-	ErrCorrupt                    = Error(ErrGRPCCorrupt)
-)
-
-// EtcdError defines gRPC server errors.
-// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323)
-type EtcdError struct {
-	code codes.Code
-	desc string
-}
-
-// Code returns grpc/codes.Code.
-// TODO: define clientv3/codes.Code.
-func (e EtcdError) Code() codes.Code {
-	return e.code
-}
-
-func (e EtcdError) Error() string {
-	return e.desc
-}
-
-func Error(err error) error {
-	if err == nil {
-		return nil
-	}
-	verr, ok := errStringToError[ErrorDesc(err)]
-	if !ok { // not gRPC error
-		return err
-	}
-	ev, ok := status.FromError(verr)
-	var desc string
-	if ok {
-		desc = ev.Message()
-	} else {
-		desc = verr.Error()
-	}
-	return EtcdError{code: ev.Code(), desc: desc}
-}
-
-func ErrorDesc(err error) string {
-	if s, ok := status.FromError(err); ok {
-		return s.Message()
-	}
-	return err.Error()
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go
deleted file mode 100644
index 90b8b83..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpctypes
-
-var (
-	MetadataRequireLeaderKey = "hasleader"
-	MetadataHasLeader        = "true"
-
-	MetadataClientAPIVersionKey = "client-api-version"
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go
deleted file mode 100644
index 8f8ac60..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpctypes
-
-var (
-	TokenFieldNameGRPC    = "token"
-	TokenFieldNameSwagger = "authorization"
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
deleted file mode 100644
index 12b6763..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
+++ /dev/null
@@ -1,1034 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: etcdserver.proto
-
-package etcdserverpb
-
-import (
-	fmt "fmt"
-	io "io"
-	math "math"
-	math_bits "math/bits"
-
-	_ "github.com/gogo/protobuf/gogoproto"
-	proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type Request struct {
-	ID                   uint64   `protobuf:"varint,1,opt,name=ID" json:"ID"`
-	Method               string   `protobuf:"bytes,2,opt,name=Method" json:"Method"`
-	Path                 string   `protobuf:"bytes,3,opt,name=Path" json:"Path"`
-	Val                  string   `protobuf:"bytes,4,opt,name=Val" json:"Val"`
-	Dir                  bool     `protobuf:"varint,5,opt,name=Dir" json:"Dir"`
-	PrevValue            string   `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"`
-	PrevIndex            uint64   `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"`
-	PrevExist            *bool    `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"`
-	Expiration           int64    `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"`
-	Wait                 bool     `protobuf:"varint,10,opt,name=Wait" json:"Wait"`
-	Since                uint64   `protobuf:"varint,11,opt,name=Since" json:"Since"`
-	Recursive            bool     `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"`
-	Sorted               bool     `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"`
-	Quorum               bool     `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"`
-	Time                 int64    `protobuf:"varint,15,opt,name=Time" json:"Time"`
-	Stream               bool     `protobuf:"varint,16,opt,name=Stream" json:"Stream"`
-	Refresh              *bool    `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Request) Reset()         { *m = Request{} }
-func (m *Request) String() string { return proto.CompactTextString(m) }
-func (*Request) ProtoMessage()    {}
-func (*Request) Descriptor() ([]byte, []int) {
-	return fileDescriptor_09ffbeb3bebbce7e, []int{0}
-}
-func (m *Request) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Request.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Request) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Request.Merge(m, src)
-}
-func (m *Request) XXX_Size() int {
-	return m.Size()
-}
-func (m *Request) XXX_DiscardUnknown() {
-	xxx_messageInfo_Request.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Request proto.InternalMessageInfo
-
-type Metadata struct {
-	NodeID               uint64   `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"`
-	ClusterID            uint64   `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Metadata) Reset()         { *m = Metadata{} }
-func (m *Metadata) String() string { return proto.CompactTextString(m) }
-func (*Metadata) ProtoMessage()    {}
-func (*Metadata) Descriptor() ([]byte, []int) {
-	return fileDescriptor_09ffbeb3bebbce7e, []int{1}
-}
-func (m *Metadata) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Metadata) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Metadata.Merge(m, src)
-}
-func (m *Metadata) XXX_Size() int {
-	return m.Size()
-}
-func (m *Metadata) XXX_DiscardUnknown() {
-	xxx_messageInfo_Metadata.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Metadata proto.InternalMessageInfo
-
-func init() {
-	proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
-	proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata")
-}
-
-func init() { proto.RegisterFile("etcdserver.proto", fileDescriptor_09ffbeb3bebbce7e) }
-
-var fileDescriptor_09ffbeb3bebbce7e = []byte{
-	// 380 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30,
-	0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb,
-	0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58,
-	0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f,
-	0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79,
-	0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d,
-	0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a,
-	0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89,
-	0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93,
-	0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe,
-	0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c,
-	0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70,
-	0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab,
-	0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11,
-	0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7,
-	0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89,
-	0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82,
-	0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6,
-	0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63,
-	0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6,
-	0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff,
-	0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea,
-	0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f,
-	0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00,
-}
-
-func (m *Request) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Request) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Refresh != nil {
-		i--
-		if *m.Refresh {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x1
-		i--
-		dAtA[i] = 0x88
-	}
-	i--
-	if m.Stream {
-		dAtA[i] = 1
-	} else {
-		dAtA[i] = 0
-	}
-	i--
-	dAtA[i] = 0x1
-	i--
-	dAtA[i] = 0x80
-	i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time))
-	i--
-	dAtA[i] = 0x78
-	i--
-	if m.Quorum {
-		dAtA[i] = 1
-	} else {
-		dAtA[i] = 0
-	}
-	i--
-	dAtA[i] = 0x70
-	i--
-	if m.Sorted {
-		dAtA[i] = 1
-	} else {
-		dAtA[i] = 0
-	}
-	i--
-	dAtA[i] = 0x68
-	i--
-	if m.Recursive {
-		dAtA[i] = 1
-	} else {
-		dAtA[i] = 0
-	}
-	i--
-	dAtA[i] = 0x60
-	i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since))
-	i--
-	dAtA[i] = 0x58
-	i--
-	if m.Wait {
-		dAtA[i] = 1
-	} else {
-		dAtA[i] = 0
-	}
-	i--
-	dAtA[i] = 0x50
-	i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration))
-	i--
-	dAtA[i] = 0x48
-	if m.PrevExist != nil {
-		i--
-		if *m.PrevExist {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x40
-	}
-	i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex))
-	i--
-	dAtA[i] = 0x38
-	i -= len(m.PrevValue)
-	copy(dAtA[i:], m.PrevValue)
-	i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue)))
-	i--
-	dAtA[i] = 0x32
-	i--
-	if m.Dir {
-		dAtA[i] = 1
-	} else {
-		dAtA[i] = 0
-	}
-	i--
-	dAtA[i] = 0x28
-	i -= len(m.Val)
-	copy(dAtA[i:], m.Val)
-	i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val)))
-	i--
-	dAtA[i] = 0x22
-	i -= len(m.Path)
-	copy(dAtA[i:], m.Path)
-	i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path)))
-	i--
-	dAtA[i] = 0x1a
-	i -= len(m.Method)
-	copy(dAtA[i:], m.Method)
-	i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method)))
-	i--
-	dAtA[i] = 0x12
-	i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID))
-	i--
-	dAtA[i] = 0x8
-	return len(dAtA) - i, nil
-}
-
-func (m *Metadata) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID))
-	i--
-	dAtA[i] = 0x10
-	i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID))
-	i--
-	dAtA[i] = 0x8
-	return len(dAtA) - i, nil
-}
-
-func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int {
-	offset -= sovEtcdserver(v)
-	base := offset
-	for v >= 1<<7 {
-		dAtA[offset] = uint8(v&0x7f | 0x80)
-		v >>= 7
-		offset++
-	}
-	dAtA[offset] = uint8(v)
-	return base
-}
-func (m *Request) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovEtcdserver(uint64(m.ID))
-	l = len(m.Method)
-	n += 1 + l + sovEtcdserver(uint64(l))
-	l = len(m.Path)
-	n += 1 + l + sovEtcdserver(uint64(l))
-	l = len(m.Val)
-	n += 1 + l + sovEtcdserver(uint64(l))
-	n += 2
-	l = len(m.PrevValue)
-	n += 1 + l + sovEtcdserver(uint64(l))
-	n += 1 + sovEtcdserver(uint64(m.PrevIndex))
-	if m.PrevExist != nil {
-		n += 2
-	}
-	n += 1 + sovEtcdserver(uint64(m.Expiration))
-	n += 2
-	n += 1 + sovEtcdserver(uint64(m.Since))
-	n += 2
-	n += 2
-	n += 2
-	n += 1 + sovEtcdserver(uint64(m.Time))
-	n += 3
-	if m.Refresh != nil {
-		n += 3
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *Metadata) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovEtcdserver(uint64(m.NodeID))
-	n += 1 + sovEtcdserver(uint64(m.ClusterID))
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func sovEtcdserver(x uint64) (n int) {
-	return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozEtcdserver(x uint64) (n int) {
-	return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *Request) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowEtcdserver
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Request: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Method = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Path = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 4:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Val = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 5:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Dir = bool(v != 0)
-		case 6:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PrevValue", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.PrevValue = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 7:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PrevIndex", wireType)
-			}
-			m.PrevIndex = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.PrevIndex |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 8:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PrevExist", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			b := bool(v != 0)
-			m.PrevExist = &b
-		case 9:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType)
-			}
-			m.Expiration = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Expiration |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 10:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Wait = bool(v != 0)
-		case 11:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType)
-			}
-			m.Since = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Since |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 12:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Recursive = bool(v != 0)
-		case 13:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Sorted", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Sorted = bool(v != 0)
-		case 14:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Quorum", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Quorum = bool(v != 0)
-		case 15:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
-			}
-			m.Time = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Time |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 16:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Stream = bool(v != 0)
-		case 17:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			b := bool(v != 0)
-			m.Refresh = &b
-		default:
-			iNdEx = preIndex
-			skippy, err := skipEtcdserver(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *Metadata) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowEtcdserver
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Metadata: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
-			}
-			m.NodeID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.NodeID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType)
-			}
-			m.ClusterID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ClusterID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipEtcdserver(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthEtcdserver
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func skipEtcdserver(dAtA []byte) (n int, err error) {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return 0, ErrIntOverflowEtcdserver
-			}
-			if iNdEx >= l {
-				return 0, io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		wireType := int(wire & 0x7)
-		switch wireType {
-		case 0:
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				iNdEx++
-				if dAtA[iNdEx-1] < 0x80 {
-					break
-				}
-			}
-			return iNdEx, nil
-		case 1:
-			iNdEx += 8
-			return iNdEx, nil
-		case 2:
-			var length int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowEtcdserver
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				length |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if length < 0 {
-				return 0, ErrInvalidLengthEtcdserver
-			}
-			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthEtcdserver
-			}
-			return iNdEx, nil
-		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowEtcdserver
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipEtcdserver(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthEtcdserver
-				}
-			}
-			return iNdEx, nil
-		case 4:
-			return iNdEx, nil
-		case 5:
-			iNdEx += 4
-			return iNdEx, nil
-		default:
-			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
-		}
-	}
-	panic("unreachable")
-}
-
-var (
-	ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowEtcdserver   = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto
deleted file mode 100644
index 25e0aca..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto
+++ /dev/null
@@ -1,34 +0,0 @@
-syntax = "proto2";
-package etcdserverpb;
-
-import "gogoproto/gogo.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-
-message Request {
-	optional uint64 ID         =  1 [(gogoproto.nullable) = false];
-	optional string Method     =  2 [(gogoproto.nullable) = false];
-	optional string Path       =  3 [(gogoproto.nullable) = false];
-	optional string Val        =  4 [(gogoproto.nullable) = false];
-	optional bool   Dir        =  5 [(gogoproto.nullable) = false];
-	optional string PrevValue  =  6 [(gogoproto.nullable) = false];
-	optional uint64 PrevIndex  =  7 [(gogoproto.nullable) = false];
-	optional bool   PrevExist  =  8 [(gogoproto.nullable) = true];
-	optional int64  Expiration =  9 [(gogoproto.nullable) = false];
-	optional bool   Wait       = 10 [(gogoproto.nullable) = false];
-	optional uint64 Since      = 11 [(gogoproto.nullable) = false];
-	optional bool   Recursive  = 12 [(gogoproto.nullable) = false];
-	optional bool   Sorted     = 13 [(gogoproto.nullable) = false];
-	optional bool   Quorum     = 14 [(gogoproto.nullable) = false];
-	optional int64  Time       = 15 [(gogoproto.nullable) = false];
-	optional bool   Stream     = 16 [(gogoproto.nullable) = false];
-	optional bool   Refresh    = 17 [(gogoproto.nullable) = true];
-}
-
-message Metadata {
-	optional uint64 NodeID    = 1 [(gogoproto.nullable) = false];
-	optional uint64 ClusterID = 2 [(gogoproto.nullable) = false];
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
deleted file mode 100644
index b3a199e..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
+++ /dev/null
@@ -1,2427 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: raft_internal.proto
-
-package etcdserverpb
-
-import (
-	fmt "fmt"
-	io "io"
-	math "math"
-	math_bits "math/bits"
-
-	_ "github.com/gogo/protobuf/gogoproto"
-	proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type RequestHeader struct {
-	ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
-	// username is a username that is associated with an auth token of gRPC connection
-	Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"`
-	// auth_revision is a revision number of auth.authStore. It is not related to mvcc
-	AuthRevision         uint64   `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *RequestHeader) Reset()         { *m = RequestHeader{} }
-func (m *RequestHeader) String() string { return proto.CompactTextString(m) }
-func (*RequestHeader) ProtoMessage()    {}
-func (*RequestHeader) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b4c9a9be0cfca103, []int{0}
-}
-func (m *RequestHeader) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *RequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_RequestHeader.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *RequestHeader) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_RequestHeader.Merge(m, src)
-}
-func (m *RequestHeader) XXX_Size() int {
-	return m.Size()
-}
-func (m *RequestHeader) XXX_DiscardUnknown() {
-	xxx_messageInfo_RequestHeader.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RequestHeader proto.InternalMessageInfo
-
-// An InternalRaftRequest is the union of all requests which can be
-// sent via raft.
-type InternalRaftRequest struct {
-	Header                   *RequestHeader                   `protobuf:"bytes,100,opt,name=header,proto3" json:"header,omitempty"`
-	ID                       uint64                           `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
-	V2                       *Request                         `protobuf:"bytes,2,opt,name=v2,proto3" json:"v2,omitempty"`
-	Range                    *RangeRequest                    `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"`
-	Put                      *PutRequest                      `protobuf:"bytes,4,opt,name=put,proto3" json:"put,omitempty"`
-	DeleteRange              *DeleteRangeRequest              `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange,proto3" json:"delete_range,omitempty"`
-	Txn                      *TxnRequest                      `protobuf:"bytes,6,opt,name=txn,proto3" json:"txn,omitempty"`
-	Compaction               *CompactionRequest               `protobuf:"bytes,7,opt,name=compaction,proto3" json:"compaction,omitempty"`
-	LeaseGrant               *LeaseGrantRequest               `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant,proto3" json:"lease_grant,omitempty"`
-	LeaseRevoke              *LeaseRevokeRequest              `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke,proto3" json:"lease_revoke,omitempty"`
-	Alarm                    *AlarmRequest                    `protobuf:"bytes,10,opt,name=alarm,proto3" json:"alarm,omitempty"`
-	AuthEnable               *AuthEnableRequest               `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable,proto3" json:"auth_enable,omitempty"`
-	AuthDisable              *AuthDisableRequest              `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable,proto3" json:"auth_disable,omitempty"`
-	Authenticate             *InternalAuthenticateRequest     `protobuf:"bytes,1012,opt,name=authenticate,proto3" json:"authenticate,omitempty"`
-	AuthUserAdd              *AuthUserAddRequest              `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd,proto3" json:"auth_user_add,omitempty"`
-	AuthUserDelete           *AuthUserDeleteRequest           `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete,proto3" json:"auth_user_delete,omitempty"`
-	AuthUserGet              *AuthUserGetRequest              `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet,proto3" json:"auth_user_get,omitempty"`
-	AuthUserChangePassword   *AuthUserChangePasswordRequest   `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword,proto3" json:"auth_user_change_password,omitempty"`
-	AuthUserGrantRole        *AuthUserGrantRoleRequest        `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole,proto3" json:"auth_user_grant_role,omitempty"`
-	AuthUserRevokeRole       *AuthUserRevokeRoleRequest       `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole,proto3" json:"auth_user_revoke_role,omitempty"`
-	AuthUserList             *AuthUserListRequest             `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList,proto3" json:"auth_user_list,omitempty"`
-	AuthRoleList             *AuthRoleListRequest             `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList,proto3" json:"auth_role_list,omitempty"`
-	AuthRoleAdd              *AuthRoleAddRequest              `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd,proto3" json:"auth_role_add,omitempty"`
-	AuthRoleDelete           *AuthRoleDeleteRequest           `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete,proto3" json:"auth_role_delete,omitempty"`
-	AuthRoleGet              *AuthRoleGetRequest              `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet,proto3" json:"auth_role_get,omitempty"`
-	AuthRoleGrantPermission  *AuthRoleGrantPermissionRequest  `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission,proto3" json:"auth_role_grant_permission,omitempty"`
-	AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission,proto3" json:"auth_role_revoke_permission,omitempty"`
-	XXX_NoUnkeyedLiteral     struct{}                         `json:"-"`
-	XXX_unrecognized         []byte                           `json:"-"`
-	XXX_sizecache            int32                            `json:"-"`
-}
-
-func (m *InternalRaftRequest) Reset()         { *m = InternalRaftRequest{} }
-func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) }
-func (*InternalRaftRequest) ProtoMessage()    {}
-func (*InternalRaftRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b4c9a9be0cfca103, []int{1}
-}
-func (m *InternalRaftRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *InternalRaftRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_InternalRaftRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *InternalRaftRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_InternalRaftRequest.Merge(m, src)
-}
-func (m *InternalRaftRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *InternalRaftRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_InternalRaftRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InternalRaftRequest proto.InternalMessageInfo
-
-type EmptyResponse struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *EmptyResponse) Reset()         { *m = EmptyResponse{} }
-func (m *EmptyResponse) String() string { return proto.CompactTextString(m) }
-func (*EmptyResponse) ProtoMessage()    {}
-func (*EmptyResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b4c9a9be0cfca103, []int{2}
-}
-func (m *EmptyResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *EmptyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_EmptyResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *EmptyResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_EmptyResponse.Merge(m, src)
-}
-func (m *EmptyResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *EmptyResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_EmptyResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EmptyResponse proto.InternalMessageInfo
-
-// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
-// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
-// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
-type InternalAuthenticateRequest struct {
-	Name     string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
-	// simple_token is generated in API layer (etcdserver/v3_server.go)
-	SimpleToken          string   `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *InternalAuthenticateRequest) Reset()         { *m = InternalAuthenticateRequest{} }
-func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) }
-func (*InternalAuthenticateRequest) ProtoMessage()    {}
-func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b4c9a9be0cfca103, []int{3}
-}
-func (m *InternalAuthenticateRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *InternalAuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_InternalAuthenticateRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *InternalAuthenticateRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_InternalAuthenticateRequest.Merge(m, src)
-}
-func (m *InternalAuthenticateRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *InternalAuthenticateRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_InternalAuthenticateRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InternalAuthenticateRequest proto.InternalMessageInfo
-
-func init() {
-	proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader")
-	proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest")
-	proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse")
-	proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest")
-}
-
-func init() { proto.RegisterFile("raft_internal.proto", fileDescriptor_b4c9a9be0cfca103) }
-
-var fileDescriptor_b4c9a9be0cfca103 = []byte{
-	// 840 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xdb, 0x4e, 0xdb, 0x48,
-	0x18, 0xc7, 0x71, 0x38, 0x66, 0x12, 0xb2, 0xec, 0x00, 0xbb, 0xb3, 0x41, 0xca, 0x86, 0xa0, 0xdd,
-	0x65, 0x77, 0x5b, 0x5a, 0x85, 0x07, 0x68, 0x53, 0x82, 0x00, 0x09, 0x21, 0x64, 0x51, 0xa9, 0x52,
-	0x2f, 0xdc, 0x21, 0xfe, 0x48, 0x5c, 0x1c, 0xdb, 0x1d, 0x4f, 0x52, 0xfa, 0x26, 0x7d, 0x8c, 0x9e,
-	0x1e, 0x82, 0x8b, 0x1e, 0x68, 0xfb, 0x02, 0x2d, 0xbd, 0xe9, 0x55, 0x6f, 0xda, 0x07, 0xa8, 0xe6,
-	0x60, 0x3b, 0x4e, 0x1c, 0xee, 0xec, 0x6f, 0xfe, 0xdf, 0xef, 0xfb, 0x0f, 0xf3, 0x37, 0x13, 0xb4,
-	0xc8, 0xe8, 0x09, 0xb7, 0x1c, 0x8f, 0x03, 0xf3, 0xa8, 0xbb, 0x11, 0x30, 0x9f, 0xfb, 0xb8, 0x08,
-	0xbc, 0x65, 0x87, 0xc0, 0xfa, 0xc0, 0x82, 0xe3, 0xf2, 0x52, 0xdb, 0x6f, 0xfb, 0x72, 0xe1, 0x86,
-	0x78, 0x52, 0x9a, 0xf2, 0x42, 0xa2, 0xd1, 0x95, 0x3c, 0x0b, 0x5a, 0xea, 0xb1, 0xf6, 0x00, 0xcd,
-	0x9b, 0xf0, 0xa8, 0x07, 0x21, 0xdf, 0x05, 0x6a, 0x03, 0xc3, 0x25, 0x94, 0xdb, 0x6b, 0x12, 0xa3,
-	0x6a, 0xac, 0x4f, 0x99, 0xb9, 0xbd, 0x26, 0x2e, 0xa3, 0xb9, 0x5e, 0x28, 0x46, 0x76, 0x81, 0xe4,
-	0xaa, 0xc6, 0x7a, 0xde, 0x8c, 0xdf, 0xf1, 0x1a, 0x9a, 0xa7, 0x3d, 0xde, 0xb1, 0x18, 0xf4, 0x9d,
-	0xd0, 0xf1, 0x3d, 0x32, 0x29, 0xdb, 0x8a, 0xa2, 0x68, 0xea, 0x5a, 0xed, 0x5b, 0x09, 0x2d, 0xee,
-	0x69, 0xd7, 0x26, 0x3d, 0xe1, 0x7a, 0x1c, 0xde, 0x44, 0x33, 0x1d, 0x39, 0x92, 0xd8, 0x55, 0x63,
-	0xbd, 0x50, 0x5f, 0xd9, 0x18, 0xdc, 0xcb, 0x46, 0xca, 0x95, 0xa9, 0xa5, 0x23, 0xee, 0xfe, 0x42,
-	0xb9, 0x7e, 0x5d, 0xfa, 0x2a, 0xd4, 0x97, 0x33, 0x01, 0x66, 0xae, 0x5f, 0xc7, 0x37, 0xd1, 0x34,
-	0xa3, 0x5e, 0x1b, 0xa4, 0xc1, 0x42, 0xbd, 0x3c, 0xa4, 0x14, 0x4b, 0x91, 0x5c, 0x09, 0xf1, 0x7f,
-	0x68, 0x32, 0xe8, 0x71, 0x32, 0x25, 0xf5, 0x24, 0xad, 0x3f, 0xec, 0x45, 0x9b, 0x30, 0x85, 0x08,
-	0x6f, 0xa1, 0xa2, 0x0d, 0x2e, 0x70, 0xb0, 0xd4, 0x90, 0x69, 0xd9, 0x54, 0x4d, 0x37, 0x35, 0xa5,
-	0x22, 0x35, 0xaa, 0x60, 0x27, 0x35, 0x31, 0x90, 0x9f, 0x79, 0x64, 0x26, 0x6b, 0xe0, 0xd1, 0x99,
-	0x17, 0x0f, 0xe4, 0x67, 0x1e, 0xbe, 0x85, 0x50, 0xcb, 0xef, 0x06, 0xb4, 0xc5, 0xc5, 0x1f, 0x7d,
-	0x56, 0xb6, 0xfc, 0x99, 0x6e, 0xd9, 0x8a, 0xd7, 0xa3, 0xce, 0x81, 0x16, 0x7c, 0x1b, 0x15, 0x5c,
-	0xa0, 0x21, 0x58, 0x6d, 0x46, 0x3d, 0x4e, 0xe6, 0xb2, 0x08, 0xfb, 0x42, 0xb0, 0x23, 0xd6, 0x63,
-	0x82, 0x1b, 0x97, 0xc4, 0x9e, 0x15, 0x81, 0x41, 0xdf, 0x3f, 0x05, 0x92, 0xcf, 0xda, 0xb3, 0x44,
-	0x98, 0x52, 0x10, 0xef, 0xd9, 0x4d, 0x6a, 0xe2, 0x58, 0xa8, 0x4b, 0x59, 0x97, 0xa0, 0xac, 0x63,
-	0x69, 0x88, 0xa5, 0xf8, 0x58, 0xa4, 0x10, 0x37, 0x50, 0x41, 0x26, 0x0e, 0x3c, 0x7a, 0xec, 0x02,
-	0xf9, 0x9a, 0xb9, 0xf7, 0x46, 0x8f, 0x77, 0xb6, 0xa5, 0x20, 0x76, 0x4e, 0xe3, 0x12, 0x6e, 0x22,
-	0x99, 0x4f, 0xcb, 0x76, 0x42, 0xc9, 0xf8, 0x3e, 0x9b, 0x65, 0x5d, 0x30, 0x9a, 0x4a, 0x11, 0x5b,
-	0xa7, 0x49, 0x0d, 0x1f, 0x28, 0x0a, 0x78, 0xdc, 0x69, 0x51, 0x0e, 0xe4, 0x87, 0xa2, 0xfc, 0x9b,
-	0xa6, 0x44, 0xb9, 0x6f, 0x0c, 0x48, 0x23, 0x5c, 0xaa, 0x1f, 0x6f, 0xeb, 0x4f, 0x49, 0x7c, 0x5b,
-	0x16, 0xb5, 0x6d, 0xf2, 0x7a, 0x6e, 0x9c, 0xad, 0xbb, 0x21, 0xb0, 0x86, 0x6d, 0xa7, 0x6c, 0xe9,
-	0x1a, 0x3e, 0x40, 0x0b, 0x09, 0x46, 0xc5, 0x8b, 0xbc, 0x51, 0xa4, 0xb5, 0x6c, 0x92, 0xce, 0xa5,
-	0x86, 0x95, 0x68, 0xaa, 0x9c, 0xb6, 0xd5, 0x06, 0x4e, 0xde, 0x5e, 0x69, 0x6b, 0x07, 0xf8, 0x88,
-	0xad, 0x1d, 0xe0, 0xb8, 0x8d, 0xfe, 0x48, 0x30, 0xad, 0x8e, 0x08, 0xbc, 0x15, 0xd0, 0x30, 0x7c,
-	0xec, 0x33, 0x9b, 0xbc, 0x53, 0xc8, 0xff, 0xb3, 0x91, 0x5b, 0x52, 0x7d, 0xa8, 0xc5, 0x11, 0xfd,
-	0x37, 0x9a, 0xb9, 0x8c, 0xef, 0xa1, 0xa5, 0x01, 0xbf, 0x22, 0xa9, 0x16, 0xf3, 0x5d, 0x20, 0x17,
-	0x6a, 0xc6, 0xdf, 0x63, 0x6c, 0xcb, 0x94, 0xfb, 0xc9, 0x51, 0xff, 0x4a, 0x87, 0x57, 0xf0, 0x7d,
-	0xb4, 0x9c, 0x90, 0x55, 0xe8, 0x15, 0xfa, 0xbd, 0x42, 0xff, 0x93, 0x8d, 0xd6, 0xe9, 0x1f, 0x60,
-	0x63, 0x3a, 0xb2, 0x84, 0x77, 0x51, 0x29, 0x81, 0xbb, 0x4e, 0xc8, 0xc9, 0x07, 0x45, 0x5d, 0xcd,
-	0xa6, 0xee, 0x3b, 0x21, 0x4f, 0xe5, 0x28, 0x2a, 0xc6, 0x24, 0x61, 0x4d, 0x91, 0x3e, 0x8e, 0x25,
-	0x89, 0xd1, 0x23, 0xa4, 0xa8, 0x18, 0x1f, 0xbd, 0x24, 0x89, 0x44, 0x3e, 0xcb, 0x8f, 0x3b, 0x7a,
-	0xd1, 0x33, 0x9c, 0x48, 0x5d, 0x8b, 0x13, 0x29, 0x31, 0x3a, 0x91, 0xcf, 0xf3, 0xe3, 0x12, 0x29,
-	0xba, 0x32, 0x12, 0x99, 0x94, 0xd3, 0xb6, 0x44, 0x22, 0x5f, 0x5c, 0x69, 0x6b, 0x38, 0x91, 0xba,
-	0x86, 0x1f, 0xa2, 0xf2, 0x00, 0x46, 0x06, 0x25, 0x00, 0xd6, 0x75, 0x42, 0x79, 0x8f, 0xbd, 0x54,
-	0xcc, 0x6b, 0x63, 0x98, 0x42, 0x7e, 0x18, 0xab, 0x23, 0xfe, 0xef, 0x34, 0x7b, 0x1d, 0x77, 0xd1,
-	0x4a, 0x32, 0x4b, 0x47, 0x67, 0x60, 0xd8, 0x2b, 0x35, 0xec, 0x7a, 0xf6, 0x30, 0x95, 0x92, 0xd1,
-	0x69, 0x84, 0x8e, 0x11, 0xd4, 0x7e, 0x41, 0xf3, 0xdb, 0xdd, 0x80, 0x3f, 0x31, 0x21, 0x0c, 0x7c,
-	0x2f, 0x84, 0x5a, 0x80, 0x56, 0xae, 0xf8, 0x47, 0x84, 0x31, 0x9a, 0x92, 0xb7, 0xbb, 0x21, 0x6f,
-	0x77, 0xf9, 0x2c, 0x6e, 0xfd, 0xf8, 0xfb, 0xd4, 0xb7, 0x7e, 0xf4, 0x8e, 0x57, 0x51, 0x31, 0x74,
-	0xba, 0x81, 0x0b, 0x16, 0xf7, 0x4f, 0x41, 0x5d, 0xfa, 0x79, 0xb3, 0xa0, 0x6a, 0x47, 0xa2, 0x74,
-	0x67, 0xe9, 0xfc, 0x73, 0x65, 0xe2, 0xfc, 0xb2, 0x62, 0x5c, 0x5c, 0x56, 0x8c, 0x4f, 0x97, 0x15,
-	0xe3, 0xe9, 0x97, 0xca, 0xc4, 0xf1, 0x8c, 0xfc, 0xc9, 0xb1, 0xf9, 0x33, 0x00, 0x00, 0xff, 0xff,
-	0xa0, 0xbb, 0x20, 0x2c, 0xca, 0x08, 0x00, 0x00,
-}
-
-func (m *RequestHeader) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RequestHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.AuthRevision != 0 {
-		i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision))
-		i--
-		dAtA[i] = 0x18
-	}
-	if len(m.Username) > 0 {
-		i -= len(m.Username)
-		copy(dAtA[i:], m.Username)
-		i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if m.ID != 0 {
-		i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *InternalRaftRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.AuthRoleRevokePermission != nil {
-		{
-			size, err := m.AuthRoleRevokePermission.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x4b
-		i--
-		dAtA[i] = 0xa2
-	}
-	if m.AuthRoleGrantPermission != nil {
-		{
-			size, err := m.AuthRoleGrantPermission.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x4b
-		i--
-		dAtA[i] = 0x9a
-	}
-	if m.AuthRoleGet != nil {
-		{
-			size, err := m.AuthRoleGet.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x4b
-		i--
-		dAtA[i] = 0x92
-	}
-	if m.AuthRoleDelete != nil {
-		{
-			size, err := m.AuthRoleDelete.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x4b
-		i--
-		dAtA[i] = 0x8a
-	}
-	if m.AuthRoleAdd != nil {
-		{
-			size, err := m.AuthRoleAdd.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x4b
-		i--
-		dAtA[i] = 0x82
-	}
-	if m.AuthRoleList != nil {
-		{
-			size, err := m.AuthRoleList.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x45
-		i--
-		dAtA[i] = 0x9a
-	}
-	if m.AuthUserList != nil {
-		{
-			size, err := m.AuthUserList.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x45
-		i--
-		dAtA[i] = 0x92
-	}
-	if m.AuthUserRevokeRole != nil {
-		{
-			size, err := m.AuthUserRevokeRole.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x45
-		i--
-		dAtA[i] = 0x8a
-	}
-	if m.AuthUserGrantRole != nil {
-		{
-			size, err := m.AuthUserGrantRole.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x45
-		i--
-		dAtA[i] = 0x82
-	}
-	if m.AuthUserChangePassword != nil {
-		{
-			size, err := m.AuthUserChangePassword.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x44
-		i--
-		dAtA[i] = 0xfa
-	}
-	if m.AuthUserGet != nil {
-		{
-			size, err := m.AuthUserGet.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x44
-		i--
-		dAtA[i] = 0xf2
-	}
-	if m.AuthUserDelete != nil {
-		{
-			size, err := m.AuthUserDelete.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x44
-		i--
-		dAtA[i] = 0xea
-	}
-	if m.AuthUserAdd != nil {
-		{
-			size, err := m.AuthUserAdd.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x44
-		i--
-		dAtA[i] = 0xe2
-	}
-	if m.Authenticate != nil {
-		{
-			size, err := m.Authenticate.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x3f
-		i--
-		dAtA[i] = 0xa2
-	}
-	if m.AuthDisable != nil {
-		{
-			size, err := m.AuthDisable.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x3f
-		i--
-		dAtA[i] = 0x9a
-	}
-	if m.AuthEnable != nil {
-		{
-			size, err := m.AuthEnable.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x3e
-		i--
-		dAtA[i] = 0xc2
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x6
-		i--
-		dAtA[i] = 0xa2
-	}
-	if m.Alarm != nil {
-		{
-			size, err := m.Alarm.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x52
-	}
-	if m.LeaseRevoke != nil {
-		{
-			size, err := m.LeaseRevoke.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x4a
-	}
-	if m.LeaseGrant != nil {
-		{
-			size, err := m.LeaseGrant.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x42
-	}
-	if m.Compaction != nil {
-		{
-			size, err := m.Compaction.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x3a
-	}
-	if m.Txn != nil {
-		{
-			size, err := m.Txn.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x32
-	}
-	if m.DeleteRange != nil {
-		{
-			size, err := m.DeleteRange.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x2a
-	}
-	if m.Put != nil {
-		{
-			size, err := m.Put.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x22
-	}
-	if m.Range != nil {
-		{
-			size, err := m.Range.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x1a
-	}
-	if m.V2 != nil {
-		{
-			size, err := m.V2.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x12
-	}
-	if m.ID != 0 {
-		i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *EmptyResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *EmptyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *InternalAuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.SimpleToken) > 0 {
-		i -= len(m.SimpleToken)
-		copy(dAtA[i:], m.SimpleToken)
-		i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken)))
-		i--
-		dAtA[i] = 0x1a
-	}
-	if len(m.Password) > 0 {
-		i -= len(m.Password)
-		copy(dAtA[i:], m.Password)
-		i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int {
-	offset -= sovRaftInternal(v)
-	base := offset
-	for v >= 1<<7 {
-		dAtA[offset] = uint8(v&0x7f | 0x80)
-		v >>= 7
-		offset++
-	}
-	dAtA[offset] = uint8(v)
-	return base
-}
-func (m *RequestHeader) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ID != 0 {
-		n += 1 + sovRaftInternal(uint64(m.ID))
-	}
-	l = len(m.Username)
-	if l > 0 {
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthRevision != 0 {
-		n += 1 + sovRaftInternal(uint64(m.AuthRevision))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *InternalRaftRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ID != 0 {
-		n += 1 + sovRaftInternal(uint64(m.ID))
-	}
-	if m.V2 != nil {
-		l = m.V2.Size()
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.Range != nil {
-		l = m.Range.Size()
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.Put != nil {
-		l = m.Put.Size()
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.DeleteRange != nil {
-		l = m.DeleteRange.Size()
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.Txn != nil {
-		l = m.Txn.Size()
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.Compaction != nil {
-		l = m.Compaction.Size()
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.LeaseGrant != nil {
-		l = m.LeaseGrant.Size()
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.LeaseRevoke != nil {
-		l = m.LeaseRevoke.Size()
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.Alarm != nil {
-		l = m.Alarm.Size()
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthEnable != nil {
-		l = m.AuthEnable.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthDisable != nil {
-		l = m.AuthDisable.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.Authenticate != nil {
-		l = m.Authenticate.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthUserAdd != nil {
-		l = m.AuthUserAdd.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthUserDelete != nil {
-		l = m.AuthUserDelete.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthUserGet != nil {
-		l = m.AuthUserGet.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthUserChangePassword != nil {
-		l = m.AuthUserChangePassword.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthUserGrantRole != nil {
-		l = m.AuthUserGrantRole.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthUserRevokeRole != nil {
-		l = m.AuthUserRevokeRole.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthUserList != nil {
-		l = m.AuthUserList.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthRoleList != nil {
-		l = m.AuthRoleList.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthRoleAdd != nil {
-		l = m.AuthRoleAdd.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthRoleDelete != nil {
-		l = m.AuthRoleDelete.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthRoleGet != nil {
-		l = m.AuthRoleGet.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthRoleGrantPermission != nil {
-		l = m.AuthRoleGrantPermission.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.AuthRoleRevokePermission != nil {
-		l = m.AuthRoleRevokePermission.Size()
-		n += 2 + l + sovRaftInternal(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *EmptyResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *InternalAuthenticateRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	l = len(m.Password)
-	if l > 0 {
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	l = len(m.SimpleToken)
-	if l > 0 {
-		n += 1 + l + sovRaftInternal(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func sovRaftInternal(x uint64) (n int) {
-	return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozRaftInternal(x uint64) (n int) {
-	return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *RequestHeader) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaftInternal
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Username = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType)
-			}
-			m.AuthRevision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.AuthRevision |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaftInternal(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaftInternal
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field V2", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.V2 == nil {
-				m.V2 = &Request{}
-			}
-			if err := m.V2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Range == nil {
-				m.Range = &RangeRequest{}
-			}
-			if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 4:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Put == nil {
-				m.Put = &PutRequest{}
-			}
-			if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 5:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.DeleteRange == nil {
-				m.DeleteRange = &DeleteRangeRequest{}
-			}
-			if err := m.DeleteRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 6:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Txn == nil {
-				m.Txn = &TxnRequest{}
-			}
-			if err := m.Txn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 7:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Compaction", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Compaction == nil {
-				m.Compaction = &CompactionRequest{}
-			}
-			if err := m.Compaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 8:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field LeaseGrant", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.LeaseGrant == nil {
-				m.LeaseGrant = &LeaseGrantRequest{}
-			}
-			if err := m.LeaseGrant.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 9:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field LeaseRevoke", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.LeaseRevoke == nil {
-				m.LeaseRevoke = &LeaseRevokeRequest{}
-			}
-			if err := m.LeaseRevoke.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 10:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Alarm == nil {
-				m.Alarm = &AlarmRequest{}
-			}
-			if err := m.Alarm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 100:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &RequestHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1000:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthEnable", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthEnable == nil {
-				m.AuthEnable = &AuthEnableRequest{}
-			}
-			if err := m.AuthEnable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1011:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthDisable", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthDisable == nil {
-				m.AuthDisable = &AuthDisableRequest{}
-			}
-			if err := m.AuthDisable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1012:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Authenticate", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Authenticate == nil {
-				m.Authenticate = &InternalAuthenticateRequest{}
-			}
-			if err := m.Authenticate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1100:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserAdd", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthUserAdd == nil {
-				m.AuthUserAdd = &AuthUserAddRequest{}
-			}
-			if err := m.AuthUserAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1101:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserDelete", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthUserDelete == nil {
-				m.AuthUserDelete = &AuthUserDeleteRequest{}
-			}
-			if err := m.AuthUserDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1102:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGet", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthUserGet == nil {
-				m.AuthUserGet = &AuthUserGetRequest{}
-			}
-			if err := m.AuthUserGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1103:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserChangePassword", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthUserChangePassword == nil {
-				m.AuthUserChangePassword = &AuthUserChangePasswordRequest{}
-			}
-			if err := m.AuthUserChangePassword.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1104:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGrantRole", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthUserGrantRole == nil {
-				m.AuthUserGrantRole = &AuthUserGrantRoleRequest{}
-			}
-			if err := m.AuthUserGrantRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1105:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserRevokeRole", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthUserRevokeRole == nil {
-				m.AuthUserRevokeRole = &AuthUserRevokeRoleRequest{}
-			}
-			if err := m.AuthUserRevokeRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1106:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserList", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthUserList == nil {
-				m.AuthUserList = &AuthUserListRequest{}
-			}
-			if err := m.AuthUserList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1107:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleList", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthRoleList == nil {
-				m.AuthRoleList = &AuthRoleListRequest{}
-			}
-			if err := m.AuthRoleList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1200:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleAdd", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthRoleAdd == nil {
-				m.AuthRoleAdd = &AuthRoleAddRequest{}
-			}
-			if err := m.AuthRoleAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1201:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleDelete", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthRoleDelete == nil {
-				m.AuthRoleDelete = &AuthRoleDeleteRequest{}
-			}
-			if err := m.AuthRoleDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1202:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGet", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthRoleGet == nil {
-				m.AuthRoleGet = &AuthRoleGetRequest{}
-			}
-			if err := m.AuthRoleGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1203:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGrantPermission", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthRoleGrantPermission == nil {
-				m.AuthRoleGrantPermission = &AuthRoleGrantPermissionRequest{}
-			}
-			if err := m.AuthRoleGrantPermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 1204:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleRevokePermission", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.AuthRoleRevokePermission == nil {
-				m.AuthRoleRevokePermission = &AuthRoleRevokePermissionRequest{}
-			}
-			if err := m.AuthRoleRevokePermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaftInternal(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *EmptyResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaftInternal
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: EmptyResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: EmptyResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaftInternal(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaftInternal
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: InternalAuthenticateRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: InternalAuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Password = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field SimpleToken", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.SimpleToken = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaftInternal(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaftInternal
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func skipRaftInternal(dAtA []byte) (n int, err error) {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return 0, ErrIntOverflowRaftInternal
-			}
-			if iNdEx >= l {
-				return 0, io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		wireType := int(wire & 0x7)
-		switch wireType {
-		case 0:
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				iNdEx++
-				if dAtA[iNdEx-1] < 0x80 {
-					break
-				}
-			}
-			return iNdEx, nil
-		case 1:
-			iNdEx += 8
-			return iNdEx, nil
-		case 2:
-			var length int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowRaftInternal
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				length |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if length < 0 {
-				return 0, ErrInvalidLengthRaftInternal
-			}
-			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthRaftInternal
-			}
-			return iNdEx, nil
-		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowRaftInternal
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipRaftInternal(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthRaftInternal
-				}
-			}
-			return iNdEx, nil
-		case 4:
-			return iNdEx, nil
-		case 5:
-			iNdEx += 4
-			return iNdEx, nil
-		default:
-			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
-		}
-	}
-	panic("unreachable")
-}
-
-var (
-	ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowRaftInternal   = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto
deleted file mode 100644
index 25d45d3..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto
+++ /dev/null
@@ -1,74 +0,0 @@
-syntax = "proto3";
-package etcdserverpb;
-
-import "gogoproto/gogo.proto";
-import "etcdserver.proto";
-import "rpc.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-
-message RequestHeader {
-  uint64 ID = 1;
-  // username is a username that is associated with an auth token of gRPC connection
-  string username = 2;
-  // auth_revision is a revision number of auth.authStore. It is not related to mvcc
-  uint64 auth_revision = 3;
-}
-
-// An InternalRaftRequest is the union of all requests which can be
-// sent via raft.
-message InternalRaftRequest {
-  RequestHeader header = 100;
-  uint64 ID = 1;
-
-  Request v2 = 2;
-
-  RangeRequest range = 3;
-  PutRequest put = 4;
-  DeleteRangeRequest delete_range = 5;
-  TxnRequest txn = 6;
-  CompactionRequest compaction = 7;
-
-  LeaseGrantRequest lease_grant = 8;
-  LeaseRevokeRequest lease_revoke = 9;
-
-  AlarmRequest alarm = 10;
-
-  AuthEnableRequest auth_enable = 1000;
-  AuthDisableRequest auth_disable = 1011;
-
-  InternalAuthenticateRequest authenticate = 1012;
-
-  AuthUserAddRequest auth_user_add = 1100;
-  AuthUserDeleteRequest auth_user_delete = 1101;
-  AuthUserGetRequest auth_user_get = 1102;
-  AuthUserChangePasswordRequest auth_user_change_password = 1103;
-  AuthUserGrantRoleRequest auth_user_grant_role = 1104;
-  AuthUserRevokeRoleRequest auth_user_revoke_role = 1105;
-  AuthUserListRequest auth_user_list = 1106;
-  AuthRoleListRequest auth_role_list = 1107;
-
-  AuthRoleAddRequest auth_role_add = 1200;
-  AuthRoleDeleteRequest auth_role_delete = 1201;
-  AuthRoleGetRequest auth_role_get = 1202;
-  AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203;
-  AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204;
-}
-
-message EmptyResponse {
-}
-
-// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
-// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
-// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
-message InternalAuthenticateRequest {
-  string name = 1;
-  string password = 2;
-
-  // simple_token is generated in API layer (etcdserver/v3_server.go)
-  string simple_token = 3;
-}
-
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
deleted file mode 100644
index 31e121e..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserverpb
-
-import (
-	"fmt"
-	"strings"
-
-	proto "github.com/golang/protobuf/proto"
-)
-
-// InternalRaftStringer implements custom proto Stringer:
-// redact password, replace value fields with value_size fields.
-type InternalRaftStringer struct {
-	Request *InternalRaftRequest
-}
-
-func (as *InternalRaftStringer) String() string {
-	switch {
-	case as.Request.LeaseGrant != nil:
-		return fmt.Sprintf("header:<%s> lease_grant:<ttl:%d-second id:%016x>",
-			as.Request.Header.String(),
-			as.Request.LeaseGrant.TTL,
-			as.Request.LeaseGrant.ID,
-		)
-	case as.Request.LeaseRevoke != nil:
-		return fmt.Sprintf("header:<%s> lease_revoke:<id:%016x>",
-			as.Request.Header.String(),
-			as.Request.LeaseRevoke.ID,
-		)
-	case as.Request.Authenticate != nil:
-		return fmt.Sprintf("header:<%s> authenticate:<name:%s simple_token:%s>",
-			as.Request.Header.String(),
-			as.Request.Authenticate.Name,
-			as.Request.Authenticate.SimpleToken,
-		)
-	case as.Request.AuthUserAdd != nil:
-		return fmt.Sprintf("header:<%s> auth_user_add:<name:%s>",
-			as.Request.Header.String(),
-			as.Request.AuthUserAdd.Name,
-		)
-	case as.Request.AuthUserChangePassword != nil:
-		return fmt.Sprintf("header:<%s> auth_user_change_password:<name:%s>",
-			as.Request.Header.String(),
-			as.Request.AuthUserChangePassword.Name,
-		)
-	case as.Request.Put != nil:
-		return fmt.Sprintf("header:<%s> put:<%s>",
-			as.Request.Header.String(),
-			NewLoggablePutRequest(as.Request.Put).String(),
-		)
-	case as.Request.Txn != nil:
-		return fmt.Sprintf("header:<%s> txn:<%s>",
-			as.Request.Header.String(),
-			NewLoggableTxnRequest(as.Request.Txn).String(),
-		)
-	default:
-		// nothing to redact
-	}
-	return as.Request.String()
-}
-
-// txnRequestStringer implements a custom proto String to replace value bytes fields with value size
-// fields in any nested txn and put operations.
-type txnRequestStringer struct {
-	Request *TxnRequest
-}
-
-func NewLoggableTxnRequest(request *TxnRequest) *txnRequestStringer {
-	return &txnRequestStringer{request}
-}
-
-func (as *txnRequestStringer) String() string {
-	var compare []string
-	for _, c := range as.Request.Compare {
-		switch cv := c.TargetUnion.(type) {
-		case *Compare_Value:
-			compare = append(compare, newLoggableValueCompare(c, cv).String())
-		default:
-			// nothing to redact
-			compare = append(compare, c.String())
-		}
-	}
-	var success []string
-	for _, s := range as.Request.Success {
-		success = append(success, newLoggableRequestOp(s).String())
-	}
-	var failure []string
-	for _, f := range as.Request.Failure {
-		failure = append(failure, newLoggableRequestOp(f).String())
-	}
-	return fmt.Sprintf("compare:<%s> success:<%s> failure:<%s>",
-		strings.Join(compare, " "),
-		strings.Join(success, " "),
-		strings.Join(failure, " "),
-	)
-}
-
-// requestOpStringer implements a custom proto String to replace value bytes fields with value
-// size fields in any nested txn and put operations.
-type requestOpStringer struct {
-	Op *RequestOp
-}
-
-func newLoggableRequestOp(op *RequestOp) *requestOpStringer {
-	return &requestOpStringer{op}
-}
-
-func (as *requestOpStringer) String() string {
-	switch op := as.Op.Request.(type) {
-	case *RequestOp_RequestPut:
-		return fmt.Sprintf("request_put:<%s>", NewLoggablePutRequest(op.RequestPut).String())
-	case *RequestOp_RequestTxn:
-		return fmt.Sprintf("request_txn:<%s>", NewLoggableTxnRequest(op.RequestTxn).String())
-	default:
-		// nothing to redact
-	}
-	return as.Op.String()
-}
-
-// loggableValueCompare implements a custom proto String for Compare.Value union member types to
-// replace the value bytes field with a value size field.
-// To preserve proto encoding of the key and range_end bytes, a faked out proto type is used here.
-type loggableValueCompare struct {
-	Result    Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult"`
-	Target    Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget"`
-	Key       []byte                `protobuf:"bytes,3,opt,name=key,proto3"`
-	ValueSize int64                 `protobuf:"varint,7,opt,name=value_size,proto3"`
-	RangeEnd  []byte                `protobuf:"bytes,64,opt,name=range_end,proto3"`
-}
-
-func newLoggableValueCompare(c *Compare, cv *Compare_Value) *loggableValueCompare {
-	return &loggableValueCompare{
-		c.Result,
-		c.Target,
-		c.Key,
-		int64(len(cv.Value)),
-		c.RangeEnd,
-	}
-}
-
-func (m *loggableValueCompare) Reset()         { *m = loggableValueCompare{} }
-func (m *loggableValueCompare) String() string { return proto.CompactTextString(m) }
-func (*loggableValueCompare) ProtoMessage()    {}
-
-// loggablePutRequest implements a custom proto String to replace value bytes field with a value
-// size field.
-// To preserve proto encoding of the key bytes, a faked out proto type is used here.
-type loggablePutRequest struct {
-	Key         []byte `protobuf:"bytes,1,opt,name=key,proto3"`
-	ValueSize   int64  `protobuf:"varint,2,opt,name=value_size,proto3"`
-	Lease       int64  `protobuf:"varint,3,opt,name=lease,proto3"`
-	PrevKv      bool   `protobuf:"varint,4,opt,name=prev_kv,proto3"`
-	IgnoreValue bool   `protobuf:"varint,5,opt,name=ignore_value,proto3"`
-	IgnoreLease bool   `protobuf:"varint,6,opt,name=ignore_lease,proto3"`
-}
-
-func NewLoggablePutRequest(request *PutRequest) *loggablePutRequest {
-	return &loggablePutRequest{
-		request.Key,
-		int64(len(request.Value)),
-		request.Lease,
-		request.PrevKv,
-		request.IgnoreValue,
-		request.IgnoreLease,
-	}
-}
-
-func (m *loggablePutRequest) Reset()         { *m = loggablePutRequest{} }
-func (m *loggablePutRequest) String() string { return proto.CompactTextString(m) }
-func (*loggablePutRequest) ProtoMessage()    {}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
deleted file mode 100644
index a0cff8f..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
+++ /dev/null
@@ -1,24010 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: rpc.proto
-
-package etcdserverpb
-
-import (
-	context "context"
-	fmt "fmt"
-	io "io"
-	math "math"
-	math_bits "math/bits"
-
-	authpb "github.com/coreos/etcd/auth/authpb"
-	mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
-	_ "github.com/gogo/protobuf/gogoproto"
-	proto "github.com/golang/protobuf/proto"
-	_ "google.golang.org/genproto/googleapis/api/annotations"
-	grpc "google.golang.org/grpc"
-	codes "google.golang.org/grpc/codes"
-	status "google.golang.org/grpc/status"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type AlarmType int32
-
-const (
-	AlarmType_NONE    AlarmType = 0
-	AlarmType_NOSPACE AlarmType = 1
-	AlarmType_CORRUPT AlarmType = 2
-)
-
-var AlarmType_name = map[int32]string{
-	0: "NONE",
-	1: "NOSPACE",
-	2: "CORRUPT",
-}
-
-var AlarmType_value = map[string]int32{
-	"NONE":    0,
-	"NOSPACE": 1,
-	"CORRUPT": 2,
-}
-
-func (x AlarmType) String() string {
-	return proto.EnumName(AlarmType_name, int32(x))
-}
-
-func (AlarmType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{0}
-}
-
-type RangeRequest_SortOrder int32
-
-const (
-	RangeRequest_NONE    RangeRequest_SortOrder = 0
-	RangeRequest_ASCEND  RangeRequest_SortOrder = 1
-	RangeRequest_DESCEND RangeRequest_SortOrder = 2
-)
-
-var RangeRequest_SortOrder_name = map[int32]string{
-	0: "NONE",
-	1: "ASCEND",
-	2: "DESCEND",
-}
-
-var RangeRequest_SortOrder_value = map[string]int32{
-	"NONE":    0,
-	"ASCEND":  1,
-	"DESCEND": 2,
-}
-
-func (x RangeRequest_SortOrder) String() string {
-	return proto.EnumName(RangeRequest_SortOrder_name, int32(x))
-}
-
-func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{1, 0}
-}
-
-type RangeRequest_SortTarget int32
-
-const (
-	RangeRequest_KEY     RangeRequest_SortTarget = 0
-	RangeRequest_VERSION RangeRequest_SortTarget = 1
-	RangeRequest_CREATE  RangeRequest_SortTarget = 2
-	RangeRequest_MOD     RangeRequest_SortTarget = 3
-	RangeRequest_VALUE   RangeRequest_SortTarget = 4
-)
-
-var RangeRequest_SortTarget_name = map[int32]string{
-	0: "KEY",
-	1: "VERSION",
-	2: "CREATE",
-	3: "MOD",
-	4: "VALUE",
-}
-
-var RangeRequest_SortTarget_value = map[string]int32{
-	"KEY":     0,
-	"VERSION": 1,
-	"CREATE":  2,
-	"MOD":     3,
-	"VALUE":   4,
-}
-
-func (x RangeRequest_SortTarget) String() string {
-	return proto.EnumName(RangeRequest_SortTarget_name, int32(x))
-}
-
-func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{1, 1}
-}
-
-type Compare_CompareResult int32
-
-const (
-	Compare_EQUAL     Compare_CompareResult = 0
-	Compare_GREATER   Compare_CompareResult = 1
-	Compare_LESS      Compare_CompareResult = 2
-	Compare_NOT_EQUAL Compare_CompareResult = 3
-)
-
-var Compare_CompareResult_name = map[int32]string{
-	0: "EQUAL",
-	1: "GREATER",
-	2: "LESS",
-	3: "NOT_EQUAL",
-}
-
-var Compare_CompareResult_value = map[string]int32{
-	"EQUAL":     0,
-	"GREATER":   1,
-	"LESS":      2,
-	"NOT_EQUAL": 3,
-}
-
-func (x Compare_CompareResult) String() string {
-	return proto.EnumName(Compare_CompareResult_name, int32(x))
-}
-
-func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{9, 0}
-}
-
-type Compare_CompareTarget int32
-
-const (
-	Compare_VERSION Compare_CompareTarget = 0
-	Compare_CREATE  Compare_CompareTarget = 1
-	Compare_MOD     Compare_CompareTarget = 2
-	Compare_VALUE   Compare_CompareTarget = 3
-	Compare_LEASE   Compare_CompareTarget = 4
-)
-
-var Compare_CompareTarget_name = map[int32]string{
-	0: "VERSION",
-	1: "CREATE",
-	2: "MOD",
-	3: "VALUE",
-	4: "LEASE",
-}
-
-var Compare_CompareTarget_value = map[string]int32{
-	"VERSION": 0,
-	"CREATE":  1,
-	"MOD":     2,
-	"VALUE":   3,
-	"LEASE":   4,
-}
-
-func (x Compare_CompareTarget) String() string {
-	return proto.EnumName(Compare_CompareTarget_name, int32(x))
-}
-
-func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{9, 1}
-}
-
-type WatchCreateRequest_FilterType int32
-
-const (
-	// filter out put event.
-	WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0
-	// filter out delete event.
-	WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1
-)
-
-var WatchCreateRequest_FilterType_name = map[int32]string{
-	0: "NOPUT",
-	1: "NODELETE",
-}
-
-var WatchCreateRequest_FilterType_value = map[string]int32{
-	"NOPUT":    0,
-	"NODELETE": 1,
-}
-
-func (x WatchCreateRequest_FilterType) String() string {
-	return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x))
-}
-
-func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{21, 0}
-}
-
-type AlarmRequest_AlarmAction int32
-
-const (
-	AlarmRequest_GET        AlarmRequest_AlarmAction = 0
-	AlarmRequest_ACTIVATE   AlarmRequest_AlarmAction = 1
-	AlarmRequest_DEACTIVATE AlarmRequest_AlarmAction = 2
-)
-
-var AlarmRequest_AlarmAction_name = map[int32]string{
-	0: "GET",
-	1: "ACTIVATE",
-	2: "DEACTIVATE",
-}
-
-var AlarmRequest_AlarmAction_value = map[string]int32{
-	"GET":        0,
-	"ACTIVATE":   1,
-	"DEACTIVATE": 2,
-}
-
-func (x AlarmRequest_AlarmAction) String() string {
-	return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x))
-}
-
-func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{49, 0}
-}
-
-type ResponseHeader struct {
-	// cluster_id is the ID of the cluster which sent the response.
-	ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
-	// member_id is the ID of the member which sent the response.
-	MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"`
-	// revision is the key-value store revision when the request was applied.
-	// For watch progress responses, the header.revision indicates progress. All future events
-	// recieved in this stream are guaranteed to have a higher revision number than the
-	// header.revision number.
-	Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"`
-	// raft_term is the raft term when the request was applied.
-	RaftTerm             uint64   `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *ResponseHeader) Reset()         { *m = ResponseHeader{} }
-func (m *ResponseHeader) String() string { return proto.CompactTextString(m) }
-func (*ResponseHeader) ProtoMessage()    {}
-func (*ResponseHeader) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{0}
-}
-func (m *ResponseHeader) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *ResponseHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_ResponseHeader.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *ResponseHeader) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ResponseHeader.Merge(m, src)
-}
-func (m *ResponseHeader) XXX_Size() int {
-	return m.Size()
-}
-func (m *ResponseHeader) XXX_DiscardUnknown() {
-	xxx_messageInfo_ResponseHeader.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResponseHeader proto.InternalMessageInfo
-
-func (m *ResponseHeader) GetClusterId() uint64 {
-	if m != nil {
-		return m.ClusterId
-	}
-	return 0
-}
-
-func (m *ResponseHeader) GetMemberId() uint64 {
-	if m != nil {
-		return m.MemberId
-	}
-	return 0
-}
-
-func (m *ResponseHeader) GetRevision() int64 {
-	if m != nil {
-		return m.Revision
-	}
-	return 0
-}
-
-func (m *ResponseHeader) GetRaftTerm() uint64 {
-	if m != nil {
-		return m.RaftTerm
-	}
-	return 0
-}
-
-type RangeRequest struct {
-	// key is the first key for the range. If range_end is not given, the request only looks up key.
-	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
-	// range_end is the upper bound on the requested range [key, range_end).
-	// If range_end is '\0', the range is all keys >= key.
-	// If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
-	// then the range request gets all keys prefixed with key.
-	// If both key and range_end are '\0', then the range request returns all keys.
-	RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
-	// limit is a limit on the number of keys returned for the request. When limit is set to 0,
-	// it is treated as no limit.
-	Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
-	// revision is the point-in-time of the key-value store to use for the range.
-	// If revision is less or equal to zero, the range is over the newest key-value store.
-	// If the revision has been compacted, ErrCompacted is returned as a response.
-	Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"`
-	// sort_order is the order for returned sorted results.
-	SortOrder RangeRequest_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=etcdserverpb.RangeRequest_SortOrder" json:"sort_order,omitempty"`
-	// sort_target is the key-value field to use for sorting.
-	SortTarget RangeRequest_SortTarget `protobuf:"varint,6,opt,name=sort_target,json=sortTarget,proto3,enum=etcdserverpb.RangeRequest_SortTarget" json:"sort_target,omitempty"`
-	// serializable sets the range request to use serializable member-local reads.
-	// Range requests are linearizable by default; linearizable requests have higher
-	// latency and lower throughput than serializable requests but reflect the current
-	// consensus of the cluster. For better performance, in exchange for possible stale reads,
-	// a serializable range request is served locally without needing to reach consensus
-	// with other nodes in the cluster.
-	Serializable bool `protobuf:"varint,7,opt,name=serializable,proto3" json:"serializable,omitempty"`
-	// keys_only when set returns only the keys and not the values.
-	KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"`
-	// count_only when set returns only the count of the keys in the range.
-	CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"`
-	// min_mod_revision is the lower bound for returned key mod revisions; all keys with
-	// lesser mod revisions will be filtered away.
-	MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"`
-	// max_mod_revision is the upper bound for returned key mod revisions; all keys with
-	// greater mod revisions will be filtered away.
-	MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"`
-	// min_create_revision is the lower bound for returned key create revisions; all keys with
-	// lesser create trevisions will be filtered away.
-	MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"`
-	// max_create_revision is the upper bound for returned key create revisions; all keys with
-	// greater create revisions will be filtered away.
-	MaxCreateRevision    int64    `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *RangeRequest) Reset()         { *m = RangeRequest{} }
-func (m *RangeRequest) String() string { return proto.CompactTextString(m) }
-func (*RangeRequest) ProtoMessage()    {}
-func (*RangeRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{1}
-}
-func (m *RangeRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *RangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_RangeRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *RangeRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_RangeRequest.Merge(m, src)
-}
-func (m *RangeRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *RangeRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_RangeRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RangeRequest proto.InternalMessageInfo
-
-func (m *RangeRequest) GetKey() []byte {
-	if m != nil {
-		return m.Key
-	}
-	return nil
-}
-
-func (m *RangeRequest) GetRangeEnd() []byte {
-	if m != nil {
-		return m.RangeEnd
-	}
-	return nil
-}
-
-func (m *RangeRequest) GetLimit() int64 {
-	if m != nil {
-		return m.Limit
-	}
-	return 0
-}
-
-func (m *RangeRequest) GetRevision() int64 {
-	if m != nil {
-		return m.Revision
-	}
-	return 0
-}
-
-func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder {
-	if m != nil {
-		return m.SortOrder
-	}
-	return RangeRequest_NONE
-}
-
-func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget {
-	if m != nil {
-		return m.SortTarget
-	}
-	return RangeRequest_KEY
-}
-
-func (m *RangeRequest) GetSerializable() bool {
-	if m != nil {
-		return m.Serializable
-	}
-	return false
-}
-
-func (m *RangeRequest) GetKeysOnly() bool {
-	if m != nil {
-		return m.KeysOnly
-	}
-	return false
-}
-
-func (m *RangeRequest) GetCountOnly() bool {
-	if m != nil {
-		return m.CountOnly
-	}
-	return false
-}
-
-func (m *RangeRequest) GetMinModRevision() int64 {
-	if m != nil {
-		return m.MinModRevision
-	}
-	return 0
-}
-
-func (m *RangeRequest) GetMaxModRevision() int64 {
-	if m != nil {
-		return m.MaxModRevision
-	}
-	return 0
-}
-
-func (m *RangeRequest) GetMinCreateRevision() int64 {
-	if m != nil {
-		return m.MinCreateRevision
-	}
-	return 0
-}
-
-func (m *RangeRequest) GetMaxCreateRevision() int64 {
-	if m != nil {
-		return m.MaxCreateRevision
-	}
-	return 0
-}
-
-type RangeResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// kvs is the list of key-value pairs matched by the range request.
-	// kvs is empty when count is requested.
-	Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs,proto3" json:"kvs,omitempty"`
-	// more indicates if there are more keys to return in the requested range.
-	More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"`
-	// count is set to the number of keys within the range when requested.
-	Count                int64    `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *RangeResponse) Reset()         { *m = RangeResponse{} }
-func (m *RangeResponse) String() string { return proto.CompactTextString(m) }
-func (*RangeResponse) ProtoMessage()    {}
-func (*RangeResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{2}
-}
-func (m *RangeResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *RangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_RangeResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *RangeResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_RangeResponse.Merge(m, src)
-}
-func (m *RangeResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *RangeResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_RangeResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RangeResponse proto.InternalMessageInfo
-
-func (m *RangeResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue {
-	if m != nil {
-		return m.Kvs
-	}
-	return nil
-}
-
-func (m *RangeResponse) GetMore() bool {
-	if m != nil {
-		return m.More
-	}
-	return false
-}
-
-func (m *RangeResponse) GetCount() int64 {
-	if m != nil {
-		return m.Count
-	}
-	return 0
-}
-
-type PutRequest struct {
-	// key is the key, in bytes, to put into the key-value store.
-	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
-	// value is the value, in bytes, to associate with the key in the key-value store.
-	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
-	// lease is the lease ID to associate with the key in the key-value store. A lease
-	// value of 0 indicates no lease.
-	Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"`
-	// If prev_kv is set, etcd gets the previous key-value pair before changing it.
-	// The previous key-value pair will be returned in the put response.
-	PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
-	// If ignore_value is set, etcd updates the key using its current value.
-	// Returns an error if the key does not exist.
-	IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"`
-	// If ignore_lease is set, etcd updates the key using its current lease.
-	// Returns an error if the key does not exist.
-	IgnoreLease          bool     `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *PutRequest) Reset()         { *m = PutRequest{} }
-func (m *PutRequest) String() string { return proto.CompactTextString(m) }
-func (*PutRequest) ProtoMessage()    {}
-func (*PutRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{3}
-}
-func (m *PutRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *PutRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_PutRequest.Merge(m, src)
-}
-func (m *PutRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *PutRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_PutRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PutRequest proto.InternalMessageInfo
-
-func (m *PutRequest) GetKey() []byte {
-	if m != nil {
-		return m.Key
-	}
-	return nil
-}
-
-func (m *PutRequest) GetValue() []byte {
-	if m != nil {
-		return m.Value
-	}
-	return nil
-}
-
-func (m *PutRequest) GetLease() int64 {
-	if m != nil {
-		return m.Lease
-	}
-	return 0
-}
-
-func (m *PutRequest) GetPrevKv() bool {
-	if m != nil {
-		return m.PrevKv
-	}
-	return false
-}
-
-func (m *PutRequest) GetIgnoreValue() bool {
-	if m != nil {
-		return m.IgnoreValue
-	}
-	return false
-}
-
-func (m *PutRequest) GetIgnoreLease() bool {
-	if m != nil {
-		return m.IgnoreLease
-	}
-	return false
-}
-
-type PutResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// if prev_kv is set in the request, the previous key-value pair will be returned.
-	PrevKv               *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
-	XXX_unrecognized     []byte           `json:"-"`
-	XXX_sizecache        int32            `json:"-"`
-}
-
-func (m *PutResponse) Reset()         { *m = PutResponse{} }
-func (m *PutResponse) String() string { return proto.CompactTextString(m) }
-func (*PutResponse) ProtoMessage()    {}
-func (*PutResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{4}
-}
-func (m *PutResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *PutResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_PutResponse.Merge(m, src)
-}
-func (m *PutResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *PutResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_PutResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PutResponse proto.InternalMessageInfo
-
-func (m *PutResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue {
-	if m != nil {
-		return m.PrevKv
-	}
-	return nil
-}
-
-type DeleteRangeRequest struct {
-	// key is the first key to delete in the range.
-	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
-	// range_end is the key following the last key to delete for the range [key, range_end).
-	// If range_end is not given, the range is defined to contain only the key argument.
-	// If range_end is one bit larger than the given key, then the range is all the keys
-	// with the prefix (the given key).
-	// If range_end is '\0', the range is all keys greater than or equal to the key argument.
-	RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
-	// If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
-	// The previous key-value pairs will be returned in the delete response.
-	PrevKv               bool     `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *DeleteRangeRequest) Reset()         { *m = DeleteRangeRequest{} }
-func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) }
-func (*DeleteRangeRequest) ProtoMessage()    {}
-func (*DeleteRangeRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{5}
-}
-func (m *DeleteRangeRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *DeleteRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_DeleteRangeRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *DeleteRangeRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DeleteRangeRequest.Merge(m, src)
-}
-func (m *DeleteRangeRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *DeleteRangeRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_DeleteRangeRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeleteRangeRequest proto.InternalMessageInfo
-
-func (m *DeleteRangeRequest) GetKey() []byte {
-	if m != nil {
-		return m.Key
-	}
-	return nil
-}
-
-func (m *DeleteRangeRequest) GetRangeEnd() []byte {
-	if m != nil {
-		return m.RangeEnd
-	}
-	return nil
-}
-
-func (m *DeleteRangeRequest) GetPrevKv() bool {
-	if m != nil {
-		return m.PrevKv
-	}
-	return false
-}
-
-type DeleteRangeResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// deleted is the number of keys deleted by the delete range request.
-	Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"`
-	// if prev_kv is set in the request, the previous key-value pairs will be returned.
-	PrevKvs              []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs,proto3" json:"prev_kvs,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
-	XXX_unrecognized     []byte             `json:"-"`
-	XXX_sizecache        int32              `json:"-"`
-}
-
-func (m *DeleteRangeResponse) Reset()         { *m = DeleteRangeResponse{} }
-func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) }
-func (*DeleteRangeResponse) ProtoMessage()    {}
-func (*DeleteRangeResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{6}
-}
-func (m *DeleteRangeResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *DeleteRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_DeleteRangeResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *DeleteRangeResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DeleteRangeResponse.Merge(m, src)
-}
-func (m *DeleteRangeResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *DeleteRangeResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_DeleteRangeResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeleteRangeResponse proto.InternalMessageInfo
-
-func (m *DeleteRangeResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *DeleteRangeResponse) GetDeleted() int64 {
-	if m != nil {
-		return m.Deleted
-	}
-	return 0
-}
-
-func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue {
-	if m != nil {
-		return m.PrevKvs
-	}
-	return nil
-}
-
-type RequestOp struct {
-	// request is a union of request types accepted by a transaction.
-	//
-	// Types that are valid to be assigned to Request:
-	//	*RequestOp_RequestRange
-	//	*RequestOp_RequestPut
-	//	*RequestOp_RequestDeleteRange
-	//	*RequestOp_RequestTxn
-	Request              isRequestOp_Request `protobuf_oneof:"request"`
-	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
-	XXX_unrecognized     []byte              `json:"-"`
-	XXX_sizecache        int32               `json:"-"`
-}
-
-func (m *RequestOp) Reset()         { *m = RequestOp{} }
-func (m *RequestOp) String() string { return proto.CompactTextString(m) }
-func (*RequestOp) ProtoMessage()    {}
-func (*RequestOp) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{7}
-}
-func (m *RequestOp) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *RequestOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_RequestOp.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *RequestOp) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_RequestOp.Merge(m, src)
-}
-func (m *RequestOp) XXX_Size() int {
-	return m.Size()
-}
-func (m *RequestOp) XXX_DiscardUnknown() {
-	xxx_messageInfo_RequestOp.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RequestOp proto.InternalMessageInfo
-
-type isRequestOp_Request interface {
-	isRequestOp_Request()
-	MarshalTo([]byte) (int, error)
-	Size() int
-}
-
-type RequestOp_RequestRange struct {
-	RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,proto3,oneof"`
-}
-type RequestOp_RequestPut struct {
-	RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,proto3,oneof"`
-}
-type RequestOp_RequestDeleteRange struct {
-	RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,proto3,oneof"`
-}
-type RequestOp_RequestTxn struct {
-	RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,proto3,oneof"`
-}
-
-func (*RequestOp_RequestRange) isRequestOp_Request()       {}
-func (*RequestOp_RequestPut) isRequestOp_Request()         {}
-func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {}
-func (*RequestOp_RequestTxn) isRequestOp_Request()         {}
-
-func (m *RequestOp) GetRequest() isRequestOp_Request {
-	if m != nil {
-		return m.Request
-	}
-	return nil
-}
-
-func (m *RequestOp) GetRequestRange() *RangeRequest {
-	if x, ok := m.GetRequest().(*RequestOp_RequestRange); ok {
-		return x.RequestRange
-	}
-	return nil
-}
-
-func (m *RequestOp) GetRequestPut() *PutRequest {
-	if x, ok := m.GetRequest().(*RequestOp_RequestPut); ok {
-		return x.RequestPut
-	}
-	return nil
-}
-
-func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest {
-	if x, ok := m.GetRequest().(*RequestOp_RequestDeleteRange); ok {
-		return x.RequestDeleteRange
-	}
-	return nil
-}
-
-func (m *RequestOp) GetRequestTxn() *TxnRequest {
-	if x, ok := m.GetRequest().(*RequestOp_RequestTxn); ok {
-		return x.RequestTxn
-	}
-	return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*RequestOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _RequestOp_OneofMarshaler, _RequestOp_OneofUnmarshaler, _RequestOp_OneofSizer, []interface{}{
-		(*RequestOp_RequestRange)(nil),
-		(*RequestOp_RequestPut)(nil),
-		(*RequestOp_RequestDeleteRange)(nil),
-		(*RequestOp_RequestTxn)(nil),
-	}
-}
-
-func _RequestOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*RequestOp)
-	// request
-	switch x := m.Request.(type) {
-	case *RequestOp_RequestRange:
-		_ = b.EncodeVarint(1<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.RequestRange); err != nil {
-			return err
-		}
-	case *RequestOp_RequestPut:
-		_ = b.EncodeVarint(2<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.RequestPut); err != nil {
-			return err
-		}
-	case *RequestOp_RequestDeleteRange:
-		_ = b.EncodeVarint(3<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.RequestDeleteRange); err != nil {
-			return err
-		}
-	case *RequestOp_RequestTxn:
-		_ = b.EncodeVarint(4<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.RequestTxn); err != nil {
-			return err
-		}
-	case nil:
-	default:
-		return fmt.Errorf("RequestOp.Request has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _RequestOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*RequestOp)
-	switch tag {
-	case 1: // request.request_range
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(RangeRequest)
-		err := b.DecodeMessage(msg)
-		m.Request = &RequestOp_RequestRange{msg}
-		return true, err
-	case 2: // request.request_put
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(PutRequest)
-		err := b.DecodeMessage(msg)
-		m.Request = &RequestOp_RequestPut{msg}
-		return true, err
-	case 3: // request.request_delete_range
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(DeleteRangeRequest)
-		err := b.DecodeMessage(msg)
-		m.Request = &RequestOp_RequestDeleteRange{msg}
-		return true, err
-	case 4: // request.request_txn
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(TxnRequest)
-		err := b.DecodeMessage(msg)
-		m.Request = &RequestOp_RequestTxn{msg}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _RequestOp_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*RequestOp)
-	// request
-	switch x := m.Request.(type) {
-	case *RequestOp_RequestRange:
-		s := proto.Size(x.RequestRange)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *RequestOp_RequestPut:
-		s := proto.Size(x.RequestPut)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *RequestOp_RequestDeleteRange:
-		s := proto.Size(x.RequestDeleteRange)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *RequestOp_RequestTxn:
-		s := proto.Size(x.RequestTxn)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
-type ResponseOp struct {
-	// response is a union of response types returned by a transaction.
-	//
-	// Types that are valid to be assigned to Response:
-	//	*ResponseOp_ResponseRange
-	//	*ResponseOp_ResponsePut
-	//	*ResponseOp_ResponseDeleteRange
-	//	*ResponseOp_ResponseTxn
-	Response             isResponseOp_Response `protobuf_oneof:"response"`
-	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
-	XXX_unrecognized     []byte                `json:"-"`
-	XXX_sizecache        int32                 `json:"-"`
-}
-
-func (m *ResponseOp) Reset()         { *m = ResponseOp{} }
-func (m *ResponseOp) String() string { return proto.CompactTextString(m) }
-func (*ResponseOp) ProtoMessage()    {}
-func (*ResponseOp) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{8}
-}
-func (m *ResponseOp) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *ResponseOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_ResponseOp.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *ResponseOp) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ResponseOp.Merge(m, src)
-}
-func (m *ResponseOp) XXX_Size() int {
-	return m.Size()
-}
-func (m *ResponseOp) XXX_DiscardUnknown() {
-	xxx_messageInfo_ResponseOp.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResponseOp proto.InternalMessageInfo
-
-type isResponseOp_Response interface {
-	isResponseOp_Response()
-	MarshalTo([]byte) (int, error)
-	Size() int
-}
-
-type ResponseOp_ResponseRange struct {
-	ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,proto3,oneof"`
-}
-type ResponseOp_ResponsePut struct {
-	ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,proto3,oneof"`
-}
-type ResponseOp_ResponseDeleteRange struct {
-	ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,proto3,oneof"`
-}
-type ResponseOp_ResponseTxn struct {
-	ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,proto3,oneof"`
-}
-
-func (*ResponseOp_ResponseRange) isResponseOp_Response()       {}
-func (*ResponseOp_ResponsePut) isResponseOp_Response()         {}
-func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {}
-func (*ResponseOp_ResponseTxn) isResponseOp_Response()         {}
-
-func (m *ResponseOp) GetResponse() isResponseOp_Response {
-	if m != nil {
-		return m.Response
-	}
-	return nil
-}
-
-func (m *ResponseOp) GetResponseRange() *RangeResponse {
-	if x, ok := m.GetResponse().(*ResponseOp_ResponseRange); ok {
-		return x.ResponseRange
-	}
-	return nil
-}
-
-func (m *ResponseOp) GetResponsePut() *PutResponse {
-	if x, ok := m.GetResponse().(*ResponseOp_ResponsePut); ok {
-		return x.ResponsePut
-	}
-	return nil
-}
-
-func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse {
-	if x, ok := m.GetResponse().(*ResponseOp_ResponseDeleteRange); ok {
-		return x.ResponseDeleteRange
-	}
-	return nil
-}
-
-func (m *ResponseOp) GetResponseTxn() *TxnResponse {
-	if x, ok := m.GetResponse().(*ResponseOp_ResponseTxn); ok {
-		return x.ResponseTxn
-	}
-	return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*ResponseOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _ResponseOp_OneofMarshaler, _ResponseOp_OneofUnmarshaler, _ResponseOp_OneofSizer, []interface{}{
-		(*ResponseOp_ResponseRange)(nil),
-		(*ResponseOp_ResponsePut)(nil),
-		(*ResponseOp_ResponseDeleteRange)(nil),
-		(*ResponseOp_ResponseTxn)(nil),
-	}
-}
-
-func _ResponseOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*ResponseOp)
-	// response
-	switch x := m.Response.(type) {
-	case *ResponseOp_ResponseRange:
-		_ = b.EncodeVarint(1<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.ResponseRange); err != nil {
-			return err
-		}
-	case *ResponseOp_ResponsePut:
-		_ = b.EncodeVarint(2<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.ResponsePut); err != nil {
-			return err
-		}
-	case *ResponseOp_ResponseDeleteRange:
-		_ = b.EncodeVarint(3<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.ResponseDeleteRange); err != nil {
-			return err
-		}
-	case *ResponseOp_ResponseTxn:
-		_ = b.EncodeVarint(4<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.ResponseTxn); err != nil {
-			return err
-		}
-	case nil:
-	default:
-		return fmt.Errorf("ResponseOp.Response has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _ResponseOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*ResponseOp)
-	switch tag {
-	case 1: // response.response_range
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(RangeResponse)
-		err := b.DecodeMessage(msg)
-		m.Response = &ResponseOp_ResponseRange{msg}
-		return true, err
-	case 2: // response.response_put
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(PutResponse)
-		err := b.DecodeMessage(msg)
-		m.Response = &ResponseOp_ResponsePut{msg}
-		return true, err
-	case 3: // response.response_delete_range
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(DeleteRangeResponse)
-		err := b.DecodeMessage(msg)
-		m.Response = &ResponseOp_ResponseDeleteRange{msg}
-		return true, err
-	case 4: // response.response_txn
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(TxnResponse)
-		err := b.DecodeMessage(msg)
-		m.Response = &ResponseOp_ResponseTxn{msg}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _ResponseOp_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*ResponseOp)
-	// response
-	switch x := m.Response.(type) {
-	case *ResponseOp_ResponseRange:
-		s := proto.Size(x.ResponseRange)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *ResponseOp_ResponsePut:
-		s := proto.Size(x.ResponsePut)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *ResponseOp_ResponseDeleteRange:
-		s := proto.Size(x.ResponseDeleteRange)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *ResponseOp_ResponseTxn:
-		s := proto.Size(x.ResponseTxn)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
-type Compare struct {
-	// result is logical comparison operation for this comparison.
-	Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"`
-	// target is the key-value field to inspect for the comparison.
-	Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"`
-	// key is the subject key for the comparison operation.
-	Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
-	// Types that are valid to be assigned to TargetUnion:
-	//	*Compare_Version
-	//	*Compare_CreateRevision
-	//	*Compare_ModRevision
-	//	*Compare_Value
-	//	*Compare_Lease
-	TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"`
-	// range_end compares the given target to all keys in the range [key, range_end).
-	// See RangeRequest for more details on key ranges.
-	RangeEnd             []byte   `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Compare) Reset()         { *m = Compare{} }
-func (m *Compare) String() string { return proto.CompactTextString(m) }
-func (*Compare) ProtoMessage()    {}
-func (*Compare) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{9}
-}
-func (m *Compare) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Compare) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Compare.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Compare) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Compare.Merge(m, src)
-}
-func (m *Compare) XXX_Size() int {
-	return m.Size()
-}
-func (m *Compare) XXX_DiscardUnknown() {
-	xxx_messageInfo_Compare.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Compare proto.InternalMessageInfo
-
-type isCompare_TargetUnion interface {
-	isCompare_TargetUnion()
-	MarshalTo([]byte) (int, error)
-	Size() int
-}
-
-type Compare_Version struct {
-	Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof"`
-}
-type Compare_CreateRevision struct {
-	CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,json=createRevision,proto3,oneof"`
-}
-type Compare_ModRevision struct {
-	ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,json=modRevision,proto3,oneof"`
-}
-type Compare_Value struct {
-	Value []byte `protobuf:"bytes,7,opt,name=value,proto3,oneof"`
-}
-type Compare_Lease struct {
-	Lease int64 `protobuf:"varint,8,opt,name=lease,proto3,oneof"`
-}
-
-func (*Compare_Version) isCompare_TargetUnion()        {}
-func (*Compare_CreateRevision) isCompare_TargetUnion() {}
-func (*Compare_ModRevision) isCompare_TargetUnion()    {}
-func (*Compare_Value) isCompare_TargetUnion()          {}
-func (*Compare_Lease) isCompare_TargetUnion()          {}
-
-func (m *Compare) GetTargetUnion() isCompare_TargetUnion {
-	if m != nil {
-		return m.TargetUnion
-	}
-	return nil
-}
-
-func (m *Compare) GetResult() Compare_CompareResult {
-	if m != nil {
-		return m.Result
-	}
-	return Compare_EQUAL
-}
-
-func (m *Compare) GetTarget() Compare_CompareTarget {
-	if m != nil {
-		return m.Target
-	}
-	return Compare_VERSION
-}
-
-func (m *Compare) GetKey() []byte {
-	if m != nil {
-		return m.Key
-	}
-	return nil
-}
-
-func (m *Compare) GetVersion() int64 {
-	if x, ok := m.GetTargetUnion().(*Compare_Version); ok {
-		return x.Version
-	}
-	return 0
-}
-
-func (m *Compare) GetCreateRevision() int64 {
-	if x, ok := m.GetTargetUnion().(*Compare_CreateRevision); ok {
-		return x.CreateRevision
-	}
-	return 0
-}
-
-func (m *Compare) GetModRevision() int64 {
-	if x, ok := m.GetTargetUnion().(*Compare_ModRevision); ok {
-		return x.ModRevision
-	}
-	return 0
-}
-
-func (m *Compare) GetValue() []byte {
-	if x, ok := m.GetTargetUnion().(*Compare_Value); ok {
-		return x.Value
-	}
-	return nil
-}
-
-func (m *Compare) GetLease() int64 {
-	if x, ok := m.GetTargetUnion().(*Compare_Lease); ok {
-		return x.Lease
-	}
-	return 0
-}
-
-func (m *Compare) GetRangeEnd() []byte {
-	if m != nil {
-		return m.RangeEnd
-	}
-	return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*Compare) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _Compare_OneofMarshaler, _Compare_OneofUnmarshaler, _Compare_OneofSizer, []interface{}{
-		(*Compare_Version)(nil),
-		(*Compare_CreateRevision)(nil),
-		(*Compare_ModRevision)(nil),
-		(*Compare_Value)(nil),
-		(*Compare_Lease)(nil),
-	}
-}
-
-func _Compare_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*Compare)
-	// target_union
-	switch x := m.TargetUnion.(type) {
-	case *Compare_Version:
-		_ = b.EncodeVarint(4<<3 | proto.WireVarint)
-		_ = b.EncodeVarint(uint64(x.Version))
-	case *Compare_CreateRevision:
-		_ = b.EncodeVarint(5<<3 | proto.WireVarint)
-		_ = b.EncodeVarint(uint64(x.CreateRevision))
-	case *Compare_ModRevision:
-		_ = b.EncodeVarint(6<<3 | proto.WireVarint)
-		_ = b.EncodeVarint(uint64(x.ModRevision))
-	case *Compare_Value:
-		_ = b.EncodeVarint(7<<3 | proto.WireBytes)
-		_ = b.EncodeRawBytes(x.Value)
-	case *Compare_Lease:
-		_ = b.EncodeVarint(8<<3 | proto.WireVarint)
-		_ = b.EncodeVarint(uint64(x.Lease))
-	case nil:
-	default:
-		return fmt.Errorf("Compare.TargetUnion has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _Compare_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*Compare)
-	switch tag {
-	case 4: // target_union.version
-		if wire != proto.WireVarint {
-			return true, proto.ErrInternalBadWireType
-		}
-		x, err := b.DecodeVarint()
-		m.TargetUnion = &Compare_Version{int64(x)}
-		return true, err
-	case 5: // target_union.create_revision
-		if wire != proto.WireVarint {
-			return true, proto.ErrInternalBadWireType
-		}
-		x, err := b.DecodeVarint()
-		m.TargetUnion = &Compare_CreateRevision{int64(x)}
-		return true, err
-	case 6: // target_union.mod_revision
-		if wire != proto.WireVarint {
-			return true, proto.ErrInternalBadWireType
-		}
-		x, err := b.DecodeVarint()
-		m.TargetUnion = &Compare_ModRevision{int64(x)}
-		return true, err
-	case 7: // target_union.value
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		x, err := b.DecodeRawBytes(true)
-		m.TargetUnion = &Compare_Value{x}
-		return true, err
-	case 8: // target_union.lease
-		if wire != proto.WireVarint {
-			return true, proto.ErrInternalBadWireType
-		}
-		x, err := b.DecodeVarint()
-		m.TargetUnion = &Compare_Lease{int64(x)}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _Compare_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*Compare)
-	// target_union
-	switch x := m.TargetUnion.(type) {
-	case *Compare_Version:
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(x.Version))
-	case *Compare_CreateRevision:
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(x.CreateRevision))
-	case *Compare_ModRevision:
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(x.ModRevision))
-	case *Compare_Value:
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(len(x.Value)))
-		n += len(x.Value)
-	case *Compare_Lease:
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(x.Lease))
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
-// From google paxosdb paper:
-// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
-// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
-// and consists of three components:
-// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
-// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
-// may apply to the same or different entries in the database. All tests in the guard are applied and
-// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
-// it executes f op (see item 3 below).
-// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
-// lookup operation, and applies to a single database entry. Two different operations in the list may apply
-// to the same or different entries in the database. These operations are executed
-// if guard evaluates to
-// true.
-// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
-type TxnRequest struct {
-	// compare is a list of predicates representing a conjunction of terms.
-	// If the comparisons succeed, then the success requests will be processed in order,
-	// and the response will contain their respective responses in order.
-	// If the comparisons fail, then the failure requests will be processed in order,
-	// and the response will contain their respective responses in order.
-	Compare []*Compare `protobuf:"bytes,1,rep,name=compare,proto3" json:"compare,omitempty"`
-	// success is a list of requests which will be applied when compare evaluates to true.
-	Success []*RequestOp `protobuf:"bytes,2,rep,name=success,proto3" json:"success,omitempty"`
-	// failure is a list of requests which will be applied when compare evaluates to false.
-	Failure              []*RequestOp `protobuf:"bytes,3,rep,name=failure,proto3" json:"failure,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
-	XXX_unrecognized     []byte       `json:"-"`
-	XXX_sizecache        int32        `json:"-"`
-}
-
-func (m *TxnRequest) Reset()         { *m = TxnRequest{} }
-func (m *TxnRequest) String() string { return proto.CompactTextString(m) }
-func (*TxnRequest) ProtoMessage()    {}
-func (*TxnRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{10}
-}
-func (m *TxnRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *TxnRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_TxnRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *TxnRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_TxnRequest.Merge(m, src)
-}
-func (m *TxnRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *TxnRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_TxnRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TxnRequest proto.InternalMessageInfo
-
-func (m *TxnRequest) GetCompare() []*Compare {
-	if m != nil {
-		return m.Compare
-	}
-	return nil
-}
-
-func (m *TxnRequest) GetSuccess() []*RequestOp {
-	if m != nil {
-		return m.Success
-	}
-	return nil
-}
-
-func (m *TxnRequest) GetFailure() []*RequestOp {
-	if m != nil {
-		return m.Failure
-	}
-	return nil
-}
-
-type TxnResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// succeeded is set to true if the compare evaluated to true or false otherwise.
-	Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"`
-	// responses is a list of responses corresponding to the results from applying
-	// success if succeeded is true or failure if succeeded is false.
-	Responses            []*ResponseOp `protobuf:"bytes,3,rep,name=responses,proto3" json:"responses,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
-	XXX_unrecognized     []byte        `json:"-"`
-	XXX_sizecache        int32         `json:"-"`
-}
-
-func (m *TxnResponse) Reset()         { *m = TxnResponse{} }
-func (m *TxnResponse) String() string { return proto.CompactTextString(m) }
-func (*TxnResponse) ProtoMessage()    {}
-func (*TxnResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{11}
-}
-func (m *TxnResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *TxnResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_TxnResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *TxnResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_TxnResponse.Merge(m, src)
-}
-func (m *TxnResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *TxnResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_TxnResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TxnResponse proto.InternalMessageInfo
-
-func (m *TxnResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *TxnResponse) GetSucceeded() bool {
-	if m != nil {
-		return m.Succeeded
-	}
-	return false
-}
-
-func (m *TxnResponse) GetResponses() []*ResponseOp {
-	if m != nil {
-		return m.Responses
-	}
-	return nil
-}
-
-// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
-// with a revision less than the compaction revision will be removed.
-type CompactionRequest struct {
-	// revision is the key-value store revision for the compaction operation.
-	Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"`
-	// physical is set so the RPC will wait until the compaction is physically
-	// applied to the local database such that compacted entries are totally
-	// removed from the backend database.
-	Physical             bool     `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *CompactionRequest) Reset()         { *m = CompactionRequest{} }
-func (m *CompactionRequest) String() string { return proto.CompactTextString(m) }
-func (*CompactionRequest) ProtoMessage()    {}
-func (*CompactionRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{12}
-}
-func (m *CompactionRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *CompactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_CompactionRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *CompactionRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_CompactionRequest.Merge(m, src)
-}
-func (m *CompactionRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *CompactionRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_CompactionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompactionRequest proto.InternalMessageInfo
-
-func (m *CompactionRequest) GetRevision() int64 {
-	if m != nil {
-		return m.Revision
-	}
-	return 0
-}
-
-func (m *CompactionRequest) GetPhysical() bool {
-	if m != nil {
-		return m.Physical
-	}
-	return false
-}
-
-type CompactionResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *CompactionResponse) Reset()         { *m = CompactionResponse{} }
-func (m *CompactionResponse) String() string { return proto.CompactTextString(m) }
-func (*CompactionResponse) ProtoMessage()    {}
-func (*CompactionResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{13}
-}
-func (m *CompactionResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *CompactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_CompactionResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *CompactionResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_CompactionResponse.Merge(m, src)
-}
-func (m *CompactionResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *CompactionResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_CompactionResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompactionResponse proto.InternalMessageInfo
-
-func (m *CompactionResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type HashRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *HashRequest) Reset()         { *m = HashRequest{} }
-func (m *HashRequest) String() string { return proto.CompactTextString(m) }
-func (*HashRequest) ProtoMessage()    {}
-func (*HashRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{14}
-}
-func (m *HashRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *HashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_HashRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *HashRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_HashRequest.Merge(m, src)
-}
-func (m *HashRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *HashRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_HashRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HashRequest proto.InternalMessageInfo
-
-type HashKVRequest struct {
-	// revision is the key-value store revision for the hash operation.
-	Revision             int64    `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *HashKVRequest) Reset()         { *m = HashKVRequest{} }
-func (m *HashKVRequest) String() string { return proto.CompactTextString(m) }
-func (*HashKVRequest) ProtoMessage()    {}
-func (*HashKVRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{15}
-}
-func (m *HashKVRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *HashKVRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_HashKVRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *HashKVRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_HashKVRequest.Merge(m, src)
-}
-func (m *HashKVRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *HashKVRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_HashKVRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HashKVRequest proto.InternalMessageInfo
-
-func (m *HashKVRequest) GetRevision() int64 {
-	if m != nil {
-		return m.Revision
-	}
-	return 0
-}
-
-type HashKVResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// hash is the hash value computed from the responding member's MVCC keys up to a given revision.
-	Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
-	// compact_revision is the compacted revision of key-value store when hash begins.
-	CompactRevision      int64    `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *HashKVResponse) Reset()         { *m = HashKVResponse{} }
-func (m *HashKVResponse) String() string { return proto.CompactTextString(m) }
-func (*HashKVResponse) ProtoMessage()    {}
-func (*HashKVResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{16}
-}
-func (m *HashKVResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *HashKVResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_HashKVResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *HashKVResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_HashKVResponse.Merge(m, src)
-}
-func (m *HashKVResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *HashKVResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_HashKVResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HashKVResponse proto.InternalMessageInfo
-
-func (m *HashKVResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *HashKVResponse) GetHash() uint32 {
-	if m != nil {
-		return m.Hash
-	}
-	return 0
-}
-
-func (m *HashKVResponse) GetCompactRevision() int64 {
-	if m != nil {
-		return m.CompactRevision
-	}
-	return 0
-}
-
-type HashResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// hash is the hash value computed from the responding member's KV's backend.
-	Hash                 uint32   `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *HashResponse) Reset()         { *m = HashResponse{} }
-func (m *HashResponse) String() string { return proto.CompactTextString(m) }
-func (*HashResponse) ProtoMessage()    {}
-func (*HashResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{17}
-}
-func (m *HashResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *HashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_HashResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *HashResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_HashResponse.Merge(m, src)
-}
-func (m *HashResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *HashResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_HashResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HashResponse proto.InternalMessageInfo
-
-func (m *HashResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *HashResponse) GetHash() uint32 {
-	if m != nil {
-		return m.Hash
-	}
-	return 0
-}
-
-type SnapshotRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *SnapshotRequest) Reset()         { *m = SnapshotRequest{} }
-func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) }
-func (*SnapshotRequest) ProtoMessage()    {}
-func (*SnapshotRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{18}
-}
-func (m *SnapshotRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *SnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_SnapshotRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *SnapshotRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_SnapshotRequest.Merge(m, src)
-}
-func (m *SnapshotRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *SnapshotRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_SnapshotRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SnapshotRequest proto.InternalMessageInfo
-
-type SnapshotResponse struct {
-	// header has the current key-value store information. The first header in the snapshot
-	// stream indicates the point in time of the snapshot.
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// remaining_bytes is the number of blob bytes to be sent after this message
-	RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"`
-	// blob contains the next chunk of the snapshot in the snapshot stream.
-	Blob                 []byte   `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *SnapshotResponse) Reset()         { *m = SnapshotResponse{} }
-func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) }
-func (*SnapshotResponse) ProtoMessage()    {}
-func (*SnapshotResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{19}
-}
-func (m *SnapshotResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *SnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_SnapshotResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *SnapshotResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_SnapshotResponse.Merge(m, src)
-}
-func (m *SnapshotResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *SnapshotResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_SnapshotResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SnapshotResponse proto.InternalMessageInfo
-
-func (m *SnapshotResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *SnapshotResponse) GetRemainingBytes() uint64 {
-	if m != nil {
-		return m.RemainingBytes
-	}
-	return 0
-}
-
-func (m *SnapshotResponse) GetBlob() []byte {
-	if m != nil {
-		return m.Blob
-	}
-	return nil
-}
-
-type WatchRequest struct {
-	// request_union is a request to either create a new watcher or cancel an existing watcher.
-	//
-	// Types that are valid to be assigned to RequestUnion:
-	//	*WatchRequest_CreateRequest
-	//	*WatchRequest_CancelRequest
-	//	*WatchRequest_ProgressRequest
-	RequestUnion         isWatchRequest_RequestUnion `protobuf_oneof:"request_union"`
-	XXX_NoUnkeyedLiteral struct{}                    `json:"-"`
-	XXX_unrecognized     []byte                      `json:"-"`
-	XXX_sizecache        int32                       `json:"-"`
-}
-
-func (m *WatchRequest) Reset()         { *m = WatchRequest{} }
-func (m *WatchRequest) String() string { return proto.CompactTextString(m) }
-func (*WatchRequest) ProtoMessage()    {}
-func (*WatchRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{20}
-}
-func (m *WatchRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *WatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_WatchRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *WatchRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_WatchRequest.Merge(m, src)
-}
-func (m *WatchRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *WatchRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_WatchRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchRequest proto.InternalMessageInfo
-
-type isWatchRequest_RequestUnion interface {
-	isWatchRequest_RequestUnion()
-	MarshalTo([]byte) (int, error)
-	Size() int
-}
-
-type WatchRequest_CreateRequest struct {
-	CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,proto3,oneof"`
-}
-type WatchRequest_CancelRequest struct {
-	CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,proto3,oneof"`
-}
-type WatchRequest_ProgressRequest struct {
-	ProgressRequest *WatchProgressRequest `protobuf:"bytes,3,opt,name=progress_request,json=progressRequest,proto3,oneof"`
-}
-
-func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion()   {}
-func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion()   {}
-func (*WatchRequest_ProgressRequest) isWatchRequest_RequestUnion() {}
-
-func (m *WatchRequest) GetRequestUnion() isWatchRequest_RequestUnion {
-	if m != nil {
-		return m.RequestUnion
-	}
-	return nil
-}
-
-func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest {
-	if x, ok := m.GetRequestUnion().(*WatchRequest_CreateRequest); ok {
-		return x.CreateRequest
-	}
-	return nil
-}
-
-func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest {
-	if x, ok := m.GetRequestUnion().(*WatchRequest_CancelRequest); ok {
-		return x.CancelRequest
-	}
-	return nil
-}
-
-func (m *WatchRequest) GetProgressRequest() *WatchProgressRequest {
-	if x, ok := m.GetRequestUnion().(*WatchRequest_ProgressRequest); ok {
-		return x.ProgressRequest
-	}
-	return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*WatchRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _WatchRequest_OneofMarshaler, _WatchRequest_OneofUnmarshaler, _WatchRequest_OneofSizer, []interface{}{
-		(*WatchRequest_CreateRequest)(nil),
-		(*WatchRequest_CancelRequest)(nil),
-		(*WatchRequest_ProgressRequest)(nil),
-	}
-}
-
-func _WatchRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*WatchRequest)
-	// request_union
-	switch x := m.RequestUnion.(type) {
-	case *WatchRequest_CreateRequest:
-		_ = b.EncodeVarint(1<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.CreateRequest); err != nil {
-			return err
-		}
-	case *WatchRequest_CancelRequest:
-		_ = b.EncodeVarint(2<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.CancelRequest); err != nil {
-			return err
-		}
-	case *WatchRequest_ProgressRequest:
-		_ = b.EncodeVarint(3<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.ProgressRequest); err != nil {
-			return err
-		}
-	case nil:
-	default:
-		return fmt.Errorf("WatchRequest.RequestUnion has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _WatchRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*WatchRequest)
-	switch tag {
-	case 1: // request_union.create_request
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(WatchCreateRequest)
-		err := b.DecodeMessage(msg)
-		m.RequestUnion = &WatchRequest_CreateRequest{msg}
-		return true, err
-	case 2: // request_union.cancel_request
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(WatchCancelRequest)
-		err := b.DecodeMessage(msg)
-		m.RequestUnion = &WatchRequest_CancelRequest{msg}
-		return true, err
-	case 3: // request_union.progress_request
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(WatchProgressRequest)
-		err := b.DecodeMessage(msg)
-		m.RequestUnion = &WatchRequest_ProgressRequest{msg}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _WatchRequest_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*WatchRequest)
-	// request_union
-	switch x := m.RequestUnion.(type) {
-	case *WatchRequest_CreateRequest:
-		s := proto.Size(x.CreateRequest)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *WatchRequest_CancelRequest:
-		s := proto.Size(x.CancelRequest)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *WatchRequest_ProgressRequest:
-		s := proto.Size(x.ProgressRequest)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
-type WatchCreateRequest struct {
-	// key is the key to register for watching.
-	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
-	// range_end is the end of the range [key, range_end) to watch. If range_end is not given,
-	// only the key argument is watched. If range_end is equal to '\0', all keys greater than
-	// or equal to the key argument are watched.
-	// If the range_end is one bit larger than the given key,
-	// then all keys with the prefix (the given key) will be watched.
-	RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
-	// start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
-	StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"`
-	// progress_notify is set so that the etcd server will periodically send a WatchResponse with
-	// no events to the new watcher if there are no recent events. It is useful when clients
-	// wish to recover a disconnected watcher starting from a recent known revision.
-	// The etcd server may decide how often it will send notifications based on current load.
-	ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"`
-	// filters filter the events at server side before it sends back to the watcher.
-	Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,proto3,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"`
-	// If prev_kv is set, created watcher gets the previous KV before the event happens.
-	// If the previous KV is already compacted, nothing will be returned.
-	PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
-	// If watch_id is provided and non-zero, it will be assigned to this watcher.
-	// Since creating a watcher in etcd is not a synchronous operation,
-	// this can be used ensure that ordering is correct when creating multiple
-	// watchers on the same stream. Creating a watcher with an ID already in
-	// use on the stream will cause an error to be returned.
-	WatchId int64 `protobuf:"varint,7,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
-	// fragment enables splitting large revisions into multiple watch responses.
-	Fragment             bool     `protobuf:"varint,8,opt,name=fragment,proto3" json:"fragment,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *WatchCreateRequest) Reset()         { *m = WatchCreateRequest{} }
-func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) }
-func (*WatchCreateRequest) ProtoMessage()    {}
-func (*WatchCreateRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{21}
-}
-func (m *WatchCreateRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *WatchCreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_WatchCreateRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *WatchCreateRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_WatchCreateRequest.Merge(m, src)
-}
-func (m *WatchCreateRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *WatchCreateRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_WatchCreateRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchCreateRequest proto.InternalMessageInfo
-
-func (m *WatchCreateRequest) GetKey() []byte {
-	if m != nil {
-		return m.Key
-	}
-	return nil
-}
-
-func (m *WatchCreateRequest) GetRangeEnd() []byte {
-	if m != nil {
-		return m.RangeEnd
-	}
-	return nil
-}
-
-func (m *WatchCreateRequest) GetStartRevision() int64 {
-	if m != nil {
-		return m.StartRevision
-	}
-	return 0
-}
-
-func (m *WatchCreateRequest) GetProgressNotify() bool {
-	if m != nil {
-		return m.ProgressNotify
-	}
-	return false
-}
-
-func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType {
-	if m != nil {
-		return m.Filters
-	}
-	return nil
-}
-
-func (m *WatchCreateRequest) GetPrevKv() bool {
-	if m != nil {
-		return m.PrevKv
-	}
-	return false
-}
-
-func (m *WatchCreateRequest) GetWatchId() int64 {
-	if m != nil {
-		return m.WatchId
-	}
-	return 0
-}
-
-func (m *WatchCreateRequest) GetFragment() bool {
-	if m != nil {
-		return m.Fragment
-	}
-	return false
-}
-
-type WatchCancelRequest struct {
-	// watch_id is the watcher id to cancel so that no more events are transmitted.
-	WatchId              int64    `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *WatchCancelRequest) Reset()         { *m = WatchCancelRequest{} }
-func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) }
-func (*WatchCancelRequest) ProtoMessage()    {}
-func (*WatchCancelRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{22}
-}
-func (m *WatchCancelRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *WatchCancelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_WatchCancelRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *WatchCancelRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_WatchCancelRequest.Merge(m, src)
-}
-func (m *WatchCancelRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *WatchCancelRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_WatchCancelRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchCancelRequest proto.InternalMessageInfo
-
-func (m *WatchCancelRequest) GetWatchId() int64 {
-	if m != nil {
-		return m.WatchId
-	}
-	return 0
-}
-
-// Requests the a watch stream progress status be sent in the watch response stream as soon as
-// possible.
-type WatchProgressRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *WatchProgressRequest) Reset()         { *m = WatchProgressRequest{} }
-func (m *WatchProgressRequest) String() string { return proto.CompactTextString(m) }
-func (*WatchProgressRequest) ProtoMessage()    {}
-func (*WatchProgressRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{23}
-}
-func (m *WatchProgressRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *WatchProgressRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_WatchProgressRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *WatchProgressRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_WatchProgressRequest.Merge(m, src)
-}
-func (m *WatchProgressRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *WatchProgressRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_WatchProgressRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchProgressRequest proto.InternalMessageInfo
-
-type WatchResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// watch_id is the ID of the watcher that corresponds to the response.
-	WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
-	// created is set to true if the response is for a create watch request.
-	// The client should record the watch_id and expect to receive events for
-	// the created watcher from the same stream.
-	// All events sent to the created watcher will attach with the same watch_id.
-	Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"`
-	// canceled is set to true if the response is for a cancel watch request.
-	// No further events will be sent to the canceled watcher.
-	Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"`
-	// compact_revision is set to the minimum index if a watcher tries to watch
-	// at a compacted index.
-	//
-	// This happens when creating a watcher at a compacted revision or the watcher cannot
-	// catch up with the progress of the key-value store.
-	//
-	// The client should treat the watcher as canceled and should not try to create any
-	// watcher with the same start_revision again.
-	CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
-	// cancel_reason indicates the reason for canceling the watcher.
-	CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"`
-	// framgment is true if large watch response was split over multiple responses.
-	Fragment             bool            `protobuf:"varint,7,opt,name=fragment,proto3" json:"fragment,omitempty"`
-	Events               []*mvccpb.Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *WatchResponse) Reset()         { *m = WatchResponse{} }
-func (m *WatchResponse) String() string { return proto.CompactTextString(m) }
-func (*WatchResponse) ProtoMessage()    {}
-func (*WatchResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{24}
-}
-func (m *WatchResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *WatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_WatchResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *WatchResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_WatchResponse.Merge(m, src)
-}
-func (m *WatchResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *WatchResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_WatchResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchResponse proto.InternalMessageInfo
-
-func (m *WatchResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *WatchResponse) GetWatchId() int64 {
-	if m != nil {
-		return m.WatchId
-	}
-	return 0
-}
-
-func (m *WatchResponse) GetCreated() bool {
-	if m != nil {
-		return m.Created
-	}
-	return false
-}
-
-func (m *WatchResponse) GetCanceled() bool {
-	if m != nil {
-		return m.Canceled
-	}
-	return false
-}
-
-func (m *WatchResponse) GetCompactRevision() int64 {
-	if m != nil {
-		return m.CompactRevision
-	}
-	return 0
-}
-
-func (m *WatchResponse) GetCancelReason() string {
-	if m != nil {
-		return m.CancelReason
-	}
-	return ""
-}
-
-func (m *WatchResponse) GetFragment() bool {
-	if m != nil {
-		return m.Fragment
-	}
-	return false
-}
-
-func (m *WatchResponse) GetEvents() []*mvccpb.Event {
-	if m != nil {
-		return m.Events
-	}
-	return nil
-}
-
-type LeaseGrantRequest struct {
-	// TTL is the advisory time-to-live in seconds. Expired lease will return -1.
-	TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"`
-	// ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
-	ID                   int64    `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LeaseGrantRequest) Reset()         { *m = LeaseGrantRequest{} }
-func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseGrantRequest) ProtoMessage()    {}
-func (*LeaseGrantRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{25}
-}
-func (m *LeaseGrantRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseGrantRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseGrantRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseGrantRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseGrantRequest.Merge(m, src)
-}
-func (m *LeaseGrantRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseGrantRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseGrantRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseGrantRequest proto.InternalMessageInfo
-
-func (m *LeaseGrantRequest) GetTTL() int64 {
-	if m != nil {
-		return m.TTL
-	}
-	return 0
-}
-
-func (m *LeaseGrantRequest) GetID() int64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-type LeaseGrantResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// ID is the lease ID for the granted lease.
-	ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
-	// TTL is the server chosen lease time-to-live in seconds.
-	TTL                  int64    `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
-	Error                string   `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LeaseGrantResponse) Reset()         { *m = LeaseGrantResponse{} }
-func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseGrantResponse) ProtoMessage()    {}
-func (*LeaseGrantResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{26}
-}
-func (m *LeaseGrantResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseGrantResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseGrantResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseGrantResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseGrantResponse.Merge(m, src)
-}
-func (m *LeaseGrantResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseGrantResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseGrantResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseGrantResponse proto.InternalMessageInfo
-
-func (m *LeaseGrantResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *LeaseGrantResponse) GetID() int64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-func (m *LeaseGrantResponse) GetTTL() int64 {
-	if m != nil {
-		return m.TTL
-	}
-	return 0
-}
-
-func (m *LeaseGrantResponse) GetError() string {
-	if m != nil {
-		return m.Error
-	}
-	return ""
-}
-
-type LeaseRevokeRequest struct {
-	// ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
-	ID                   int64    `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LeaseRevokeRequest) Reset()         { *m = LeaseRevokeRequest{} }
-func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseRevokeRequest) ProtoMessage()    {}
-func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{27}
-}
-func (m *LeaseRevokeRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseRevokeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseRevokeRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseRevokeRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseRevokeRequest.Merge(m, src)
-}
-func (m *LeaseRevokeRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseRevokeRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseRevokeRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseRevokeRequest proto.InternalMessageInfo
-
-func (m *LeaseRevokeRequest) GetID() int64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-type LeaseRevokeResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *LeaseRevokeResponse) Reset()         { *m = LeaseRevokeResponse{} }
-func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseRevokeResponse) ProtoMessage()    {}
-func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{28}
-}
-func (m *LeaseRevokeResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseRevokeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseRevokeResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseRevokeResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseRevokeResponse.Merge(m, src)
-}
-func (m *LeaseRevokeResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseRevokeResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseRevokeResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseRevokeResponse proto.InternalMessageInfo
-
-func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type LeaseKeepAliveRequest struct {
-	// ID is the lease ID for the lease to keep alive.
-	ID                   int64    `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LeaseKeepAliveRequest) Reset()         { *m = LeaseKeepAliveRequest{} }
-func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseKeepAliveRequest) ProtoMessage()    {}
-func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{29}
-}
-func (m *LeaseKeepAliveRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseKeepAliveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseKeepAliveRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseKeepAliveRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseKeepAliveRequest.Merge(m, src)
-}
-func (m *LeaseKeepAliveRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseKeepAliveRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseKeepAliveRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseKeepAliveRequest proto.InternalMessageInfo
-
-func (m *LeaseKeepAliveRequest) GetID() int64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-type LeaseKeepAliveResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// ID is the lease ID from the keep alive request.
-	ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
-	// TTL is the new time-to-live for the lease.
-	TTL                  int64    `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LeaseKeepAliveResponse) Reset()         { *m = LeaseKeepAliveResponse{} }
-func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseKeepAliveResponse) ProtoMessage()    {}
-func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{30}
-}
-func (m *LeaseKeepAliveResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseKeepAliveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseKeepAliveResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseKeepAliveResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseKeepAliveResponse.Merge(m, src)
-}
-func (m *LeaseKeepAliveResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseKeepAliveResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseKeepAliveResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseKeepAliveResponse proto.InternalMessageInfo
-
-func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *LeaseKeepAliveResponse) GetID() int64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-func (m *LeaseKeepAliveResponse) GetTTL() int64 {
-	if m != nil {
-		return m.TTL
-	}
-	return 0
-}
-
-type LeaseTimeToLiveRequest struct {
-	// ID is the lease ID for the lease.
-	ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
-	// keys is true to query all the keys attached to this lease.
-	Keys                 bool     `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LeaseTimeToLiveRequest) Reset()         { *m = LeaseTimeToLiveRequest{} }
-func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseTimeToLiveRequest) ProtoMessage()    {}
-func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{31}
-}
-func (m *LeaseTimeToLiveRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseTimeToLiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseTimeToLiveRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseTimeToLiveRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseTimeToLiveRequest.Merge(m, src)
-}
-func (m *LeaseTimeToLiveRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseTimeToLiveRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseTimeToLiveRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseTimeToLiveRequest proto.InternalMessageInfo
-
-func (m *LeaseTimeToLiveRequest) GetID() int64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-func (m *LeaseTimeToLiveRequest) GetKeys() bool {
-	if m != nil {
-		return m.Keys
-	}
-	return false
-}
-
-type LeaseTimeToLiveResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// ID is the lease ID from the keep alive request.
-	ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
-	// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
-	TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
-	// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
-	GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"`
-	// Keys is the list of keys attached to this lease.
-	Keys                 [][]byte `protobuf:"bytes,5,rep,name=keys,proto3" json:"keys,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LeaseTimeToLiveResponse) Reset()         { *m = LeaseTimeToLiveResponse{} }
-func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseTimeToLiveResponse) ProtoMessage()    {}
-func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{32}
-}
-func (m *LeaseTimeToLiveResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseTimeToLiveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseTimeToLiveResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseTimeToLiveResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseTimeToLiveResponse.Merge(m, src)
-}
-func (m *LeaseTimeToLiveResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseTimeToLiveResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseTimeToLiveResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseTimeToLiveResponse proto.InternalMessageInfo
-
-func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *LeaseTimeToLiveResponse) GetID() int64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-func (m *LeaseTimeToLiveResponse) GetTTL() int64 {
-	if m != nil {
-		return m.TTL
-	}
-	return 0
-}
-
-func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 {
-	if m != nil {
-		return m.GrantedTTL
-	}
-	return 0
-}
-
-func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte {
-	if m != nil {
-		return m.Keys
-	}
-	return nil
-}
-
-type LeaseLeasesRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LeaseLeasesRequest) Reset()         { *m = LeaseLeasesRequest{} }
-func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseLeasesRequest) ProtoMessage()    {}
-func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{33}
-}
-func (m *LeaseLeasesRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseLeasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseLeasesRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseLeasesRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseLeasesRequest.Merge(m, src)
-}
-func (m *LeaseLeasesRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseLeasesRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseLeasesRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseLeasesRequest proto.InternalMessageInfo
-
-type LeaseStatus struct {
-	ID                   int64    `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *LeaseStatus) Reset()         { *m = LeaseStatus{} }
-func (m *LeaseStatus) String() string { return proto.CompactTextString(m) }
-func (*LeaseStatus) ProtoMessage()    {}
-func (*LeaseStatus) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{34}
-}
-func (m *LeaseStatus) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseStatus.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseStatus) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseStatus.Merge(m, src)
-}
-func (m *LeaseStatus) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseStatus) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseStatus proto.InternalMessageInfo
-
-func (m *LeaseStatus) GetID() int64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-type LeaseLeasesResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	Leases               []*LeaseStatus  `protobuf:"bytes,2,rep,name=leases,proto3" json:"leases,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *LeaseLeasesResponse) Reset()         { *m = LeaseLeasesResponse{} }
-func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseLeasesResponse) ProtoMessage()    {}
-func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{35}
-}
-func (m *LeaseLeasesResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *LeaseLeasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_LeaseLeasesResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *LeaseLeasesResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_LeaseLeasesResponse.Merge(m, src)
-}
-func (m *LeaseLeasesResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *LeaseLeasesResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_LeaseLeasesResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseLeasesResponse proto.InternalMessageInfo
-
-func (m *LeaseLeasesResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *LeaseLeasesResponse) GetLeases() []*LeaseStatus {
-	if m != nil {
-		return m.Leases
-	}
-	return nil
-}
-
-type Member struct {
-	// ID is the member ID for this member.
-	ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
-	// name is the human-readable name of the member. If the member is not started, the name will be an empty string.
-	Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
-	// peerURLs is the list of URLs the member exposes to the cluster for communication.
-	PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
-	// clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
-	ClientURLs           []string `protobuf:"bytes,4,rep,name=clientURLs,proto3" json:"clientURLs,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *Member) Reset()         { *m = Member{} }
-func (m *Member) String() string { return proto.CompactTextString(m) }
-func (*Member) ProtoMessage()    {}
-func (*Member) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{36}
-}
-func (m *Member) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Member.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Member) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Member.Merge(m, src)
-}
-func (m *Member) XXX_Size() int {
-	return m.Size()
-}
-func (m *Member) XXX_DiscardUnknown() {
-	xxx_messageInfo_Member.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Member proto.InternalMessageInfo
-
-func (m *Member) GetID() uint64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-func (m *Member) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *Member) GetPeerURLs() []string {
-	if m != nil {
-		return m.PeerURLs
-	}
-	return nil
-}
-
-func (m *Member) GetClientURLs() []string {
-	if m != nil {
-		return m.ClientURLs
-	}
-	return nil
-}
-
-type MemberAddRequest struct {
-	// peerURLs is the list of URLs the added member will use to communicate with the cluster.
-	PeerURLs             []string `protobuf:"bytes,1,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *MemberAddRequest) Reset()         { *m = MemberAddRequest{} }
-func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) }
-func (*MemberAddRequest) ProtoMessage()    {}
-func (*MemberAddRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{37}
-}
-func (m *MemberAddRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *MemberAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_MemberAddRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *MemberAddRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MemberAddRequest.Merge(m, src)
-}
-func (m *MemberAddRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *MemberAddRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_MemberAddRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberAddRequest proto.InternalMessageInfo
-
-func (m *MemberAddRequest) GetPeerURLs() []string {
-	if m != nil {
-		return m.PeerURLs
-	}
-	return nil
-}
-
-type MemberAddResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// member is the member information for the added member.
-	Member *Member `protobuf:"bytes,2,opt,name=member,proto3" json:"member,omitempty"`
-	// members is a list of all members after adding the new member.
-	Members              []*Member `protobuf:"bytes,3,rep,name=members,proto3" json:"members,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
-}
-
-func (m *MemberAddResponse) Reset()         { *m = MemberAddResponse{} }
-func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) }
-func (*MemberAddResponse) ProtoMessage()    {}
-func (*MemberAddResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{38}
-}
-func (m *MemberAddResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *MemberAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_MemberAddResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *MemberAddResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MemberAddResponse.Merge(m, src)
-}
-func (m *MemberAddResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *MemberAddResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_MemberAddResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberAddResponse proto.InternalMessageInfo
-
-func (m *MemberAddResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *MemberAddResponse) GetMember() *Member {
-	if m != nil {
-		return m.Member
-	}
-	return nil
-}
-
-func (m *MemberAddResponse) GetMembers() []*Member {
-	if m != nil {
-		return m.Members
-	}
-	return nil
-}
-
-type MemberRemoveRequest struct {
-	// ID is the member ID of the member to remove.
-	ID                   uint64   `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *MemberRemoveRequest) Reset()         { *m = MemberRemoveRequest{} }
-func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) }
-func (*MemberRemoveRequest) ProtoMessage()    {}
-func (*MemberRemoveRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{39}
-}
-func (m *MemberRemoveRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *MemberRemoveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_MemberRemoveRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *MemberRemoveRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MemberRemoveRequest.Merge(m, src)
-}
-func (m *MemberRemoveRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *MemberRemoveRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_MemberRemoveRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberRemoveRequest proto.InternalMessageInfo
-
-func (m *MemberRemoveRequest) GetID() uint64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-type MemberRemoveResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// members is a list of all members after removing the member.
-	Members              []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
-}
-
-func (m *MemberRemoveResponse) Reset()         { *m = MemberRemoveResponse{} }
-func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) }
-func (*MemberRemoveResponse) ProtoMessage()    {}
-func (*MemberRemoveResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{40}
-}
-func (m *MemberRemoveResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *MemberRemoveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_MemberRemoveResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *MemberRemoveResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MemberRemoveResponse.Merge(m, src)
-}
-func (m *MemberRemoveResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *MemberRemoveResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_MemberRemoveResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberRemoveResponse proto.InternalMessageInfo
-
-func (m *MemberRemoveResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *MemberRemoveResponse) GetMembers() []*Member {
-	if m != nil {
-		return m.Members
-	}
-	return nil
-}
-
-type MemberUpdateRequest struct {
-	// ID is the member ID of the member to update.
-	ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
-	// peerURLs is the new list of URLs the member will use to communicate with the cluster.
-	PeerURLs             []string `protobuf:"bytes,2,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *MemberUpdateRequest) Reset()         { *m = MemberUpdateRequest{} }
-func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) }
-func (*MemberUpdateRequest) ProtoMessage()    {}
-func (*MemberUpdateRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{41}
-}
-func (m *MemberUpdateRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *MemberUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_MemberUpdateRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *MemberUpdateRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MemberUpdateRequest.Merge(m, src)
-}
-func (m *MemberUpdateRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *MemberUpdateRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_MemberUpdateRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberUpdateRequest proto.InternalMessageInfo
-
-func (m *MemberUpdateRequest) GetID() uint64 {
-	if m != nil {
-		return m.ID
-	}
-	return 0
-}
-
-func (m *MemberUpdateRequest) GetPeerURLs() []string {
-	if m != nil {
-		return m.PeerURLs
-	}
-	return nil
-}
-
-type MemberUpdateResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// members is a list of all members after updating the member.
-	Members              []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
-}
-
-func (m *MemberUpdateResponse) Reset()         { *m = MemberUpdateResponse{} }
-func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) }
-func (*MemberUpdateResponse) ProtoMessage()    {}
-func (*MemberUpdateResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{42}
-}
-func (m *MemberUpdateResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *MemberUpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_MemberUpdateResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *MemberUpdateResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MemberUpdateResponse.Merge(m, src)
-}
-func (m *MemberUpdateResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *MemberUpdateResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_MemberUpdateResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberUpdateResponse proto.InternalMessageInfo
-
-func (m *MemberUpdateResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *MemberUpdateResponse) GetMembers() []*Member {
-	if m != nil {
-		return m.Members
-	}
-	return nil
-}
-
-type MemberListRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *MemberListRequest) Reset()         { *m = MemberListRequest{} }
-func (m *MemberListRequest) String() string { return proto.CompactTextString(m) }
-func (*MemberListRequest) ProtoMessage()    {}
-func (*MemberListRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{43}
-}
-func (m *MemberListRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *MemberListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_MemberListRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *MemberListRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MemberListRequest.Merge(m, src)
-}
-func (m *MemberListRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *MemberListRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_MemberListRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberListRequest proto.InternalMessageInfo
-
-type MemberListResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// members is a list of all members associated with the cluster.
-	Members              []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
-}
-
-func (m *MemberListResponse) Reset()         { *m = MemberListResponse{} }
-func (m *MemberListResponse) String() string { return proto.CompactTextString(m) }
-func (*MemberListResponse) ProtoMessage()    {}
-func (*MemberListResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{44}
-}
-func (m *MemberListResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *MemberListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_MemberListResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *MemberListResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MemberListResponse.Merge(m, src)
-}
-func (m *MemberListResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *MemberListResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_MemberListResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberListResponse proto.InternalMessageInfo
-
-func (m *MemberListResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *MemberListResponse) GetMembers() []*Member {
-	if m != nil {
-		return m.Members
-	}
-	return nil
-}
-
-type DefragmentRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *DefragmentRequest) Reset()         { *m = DefragmentRequest{} }
-func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) }
-func (*DefragmentRequest) ProtoMessage()    {}
-func (*DefragmentRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{45}
-}
-func (m *DefragmentRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *DefragmentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_DefragmentRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *DefragmentRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DefragmentRequest.Merge(m, src)
-}
-func (m *DefragmentRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *DefragmentRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_DefragmentRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DefragmentRequest proto.InternalMessageInfo
-
-type DefragmentResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *DefragmentResponse) Reset()         { *m = DefragmentResponse{} }
-func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) }
-func (*DefragmentResponse) ProtoMessage()    {}
-func (*DefragmentResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{46}
-}
-func (m *DefragmentResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *DefragmentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_DefragmentResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *DefragmentResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_DefragmentResponse.Merge(m, src)
-}
-func (m *DefragmentResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *DefragmentResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_DefragmentResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DefragmentResponse proto.InternalMessageInfo
-
-func (m *DefragmentResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type MoveLeaderRequest struct {
-	// targetID is the node ID for the new leader.
-	TargetID             uint64   `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *MoveLeaderRequest) Reset()         { *m = MoveLeaderRequest{} }
-func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) }
-func (*MoveLeaderRequest) ProtoMessage()    {}
-func (*MoveLeaderRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{47}
-}
-func (m *MoveLeaderRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *MoveLeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_MoveLeaderRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *MoveLeaderRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MoveLeaderRequest.Merge(m, src)
-}
-func (m *MoveLeaderRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *MoveLeaderRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_MoveLeaderRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MoveLeaderRequest proto.InternalMessageInfo
-
-func (m *MoveLeaderRequest) GetTargetID() uint64 {
-	if m != nil {
-		return m.TargetID
-	}
-	return 0
-}
-
-type MoveLeaderResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *MoveLeaderResponse) Reset()         { *m = MoveLeaderResponse{} }
-func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) }
-func (*MoveLeaderResponse) ProtoMessage()    {}
-func (*MoveLeaderResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{48}
-}
-func (m *MoveLeaderResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *MoveLeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_MoveLeaderResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *MoveLeaderResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MoveLeaderResponse.Merge(m, src)
-}
-func (m *MoveLeaderResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *MoveLeaderResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_MoveLeaderResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MoveLeaderResponse proto.InternalMessageInfo
-
-func (m *MoveLeaderResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AlarmRequest struct {
-	// action is the kind of alarm request to issue. The action
-	// may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
-	// raised alarm.
-	Action AlarmRequest_AlarmAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.AlarmRequest_AlarmAction" json:"action,omitempty"`
-	// memberID is the ID of the member associated with the alarm. If memberID is 0, the
-	// alarm request covers all members.
-	MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"`
-	// alarm is the type of alarm to consider for this request.
-	Alarm                AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
-}
-
-func (m *AlarmRequest) Reset()         { *m = AlarmRequest{} }
-func (m *AlarmRequest) String() string { return proto.CompactTextString(m) }
-func (*AlarmRequest) ProtoMessage()    {}
-func (*AlarmRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{49}
-}
-func (m *AlarmRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AlarmRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AlarmRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AlarmRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AlarmRequest.Merge(m, src)
-}
-func (m *AlarmRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AlarmRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AlarmRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AlarmRequest proto.InternalMessageInfo
-
-func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction {
-	if m != nil {
-		return m.Action
-	}
-	return AlarmRequest_GET
-}
-
-func (m *AlarmRequest) GetMemberID() uint64 {
-	if m != nil {
-		return m.MemberID
-	}
-	return 0
-}
-
-func (m *AlarmRequest) GetAlarm() AlarmType {
-	if m != nil {
-		return m.Alarm
-	}
-	return AlarmType_NONE
-}
-
-type AlarmMember struct {
-	// memberID is the ID of the member associated with the raised alarm.
-	MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"`
-	// alarm is the type of alarm which has been raised.
-	Alarm                AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
-}
-
-func (m *AlarmMember) Reset()         { *m = AlarmMember{} }
-func (m *AlarmMember) String() string { return proto.CompactTextString(m) }
-func (*AlarmMember) ProtoMessage()    {}
-func (*AlarmMember) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{50}
-}
-func (m *AlarmMember) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AlarmMember) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AlarmMember.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AlarmMember) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AlarmMember.Merge(m, src)
-}
-func (m *AlarmMember) XXX_Size() int {
-	return m.Size()
-}
-func (m *AlarmMember) XXX_DiscardUnknown() {
-	xxx_messageInfo_AlarmMember.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AlarmMember proto.InternalMessageInfo
-
-func (m *AlarmMember) GetMemberID() uint64 {
-	if m != nil {
-		return m.MemberID
-	}
-	return 0
-}
-
-func (m *AlarmMember) GetAlarm() AlarmType {
-	if m != nil {
-		return m.Alarm
-	}
-	return AlarmType_NONE
-}
-
-type AlarmResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// alarms is a list of alarms associated with the alarm request.
-	Alarms               []*AlarmMember `protobuf:"bytes,2,rep,name=alarms,proto3" json:"alarms,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
-	XXX_unrecognized     []byte         `json:"-"`
-	XXX_sizecache        int32          `json:"-"`
-}
-
-func (m *AlarmResponse) Reset()         { *m = AlarmResponse{} }
-func (m *AlarmResponse) String() string { return proto.CompactTextString(m) }
-func (*AlarmResponse) ProtoMessage()    {}
-func (*AlarmResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{51}
-}
-func (m *AlarmResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AlarmResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AlarmResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AlarmResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AlarmResponse.Merge(m, src)
-}
-func (m *AlarmResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AlarmResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AlarmResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AlarmResponse proto.InternalMessageInfo
-
-func (m *AlarmResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *AlarmResponse) GetAlarms() []*AlarmMember {
-	if m != nil {
-		return m.Alarms
-	}
-	return nil
-}
-
-type StatusRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *StatusRequest) Reset()         { *m = StatusRequest{} }
-func (m *StatusRequest) String() string { return proto.CompactTextString(m) }
-func (*StatusRequest) ProtoMessage()    {}
-func (*StatusRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{52}
-}
-func (m *StatusRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *StatusRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_StatusRequest.Merge(m, src)
-}
-func (m *StatusRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *StatusRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_StatusRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StatusRequest proto.InternalMessageInfo
-
-type StatusResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// version is the cluster protocol version used by the responding member.
-	Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
-	// dbSize is the size of the backend database, in bytes, of the responding member.
-	DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"`
-	// leader is the member ID which the responding member believes is the current leader.
-	Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"`
-	// raftIndex is the current raft index of the responding member.
-	RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"`
-	// raftTerm is the current raft term of the responding member.
-	RaftTerm             uint64   `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *StatusResponse) Reset()         { *m = StatusResponse{} }
-func (m *StatusResponse) String() string { return proto.CompactTextString(m) }
-func (*StatusResponse) ProtoMessage()    {}
-func (*StatusResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{53}
-}
-func (m *StatusResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *StatusResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_StatusResponse.Merge(m, src)
-}
-func (m *StatusResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *StatusResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_StatusResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StatusResponse proto.InternalMessageInfo
-
-func (m *StatusResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *StatusResponse) GetVersion() string {
-	if m != nil {
-		return m.Version
-	}
-	return ""
-}
-
-func (m *StatusResponse) GetDbSize() int64 {
-	if m != nil {
-		return m.DbSize
-	}
-	return 0
-}
-
-func (m *StatusResponse) GetLeader() uint64 {
-	if m != nil {
-		return m.Leader
-	}
-	return 0
-}
-
-func (m *StatusResponse) GetRaftIndex() uint64 {
-	if m != nil {
-		return m.RaftIndex
-	}
-	return 0
-}
-
-func (m *StatusResponse) GetRaftTerm() uint64 {
-	if m != nil {
-		return m.RaftTerm
-	}
-	return 0
-}
-
-type AuthEnableRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthEnableRequest) Reset()         { *m = AuthEnableRequest{} }
-func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthEnableRequest) ProtoMessage()    {}
-func (*AuthEnableRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{54}
-}
-func (m *AuthEnableRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthEnableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthEnableRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthEnableRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthEnableRequest.Merge(m, src)
-}
-func (m *AuthEnableRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthEnableRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthEnableRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthEnableRequest proto.InternalMessageInfo
-
-type AuthDisableRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthDisableRequest) Reset()         { *m = AuthDisableRequest{} }
-func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthDisableRequest) ProtoMessage()    {}
-func (*AuthDisableRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{55}
-}
-func (m *AuthDisableRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthDisableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthDisableRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthDisableRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthDisableRequest.Merge(m, src)
-}
-func (m *AuthDisableRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthDisableRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthDisableRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthDisableRequest proto.InternalMessageInfo
-
-type AuthenticateRequest struct {
-	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	Password             string   `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthenticateRequest) Reset()         { *m = AuthenticateRequest{} }
-func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthenticateRequest) ProtoMessage()    {}
-func (*AuthenticateRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{56}
-}
-func (m *AuthenticateRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthenticateRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthenticateRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthenticateRequest.Merge(m, src)
-}
-func (m *AuthenticateRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthenticateRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthenticateRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthenticateRequest proto.InternalMessageInfo
-
-func (m *AuthenticateRequest) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *AuthenticateRequest) GetPassword() string {
-	if m != nil {
-		return m.Password
-	}
-	return ""
-}
-
-type AuthUserAddRequest struct {
-	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	Password             string   `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthUserAddRequest) Reset()         { *m = AuthUserAddRequest{} }
-func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserAddRequest) ProtoMessage()    {}
-func (*AuthUserAddRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{57}
-}
-func (m *AuthUserAddRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserAddRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserAddRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserAddRequest.Merge(m, src)
-}
-func (m *AuthUserAddRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserAddRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserAddRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserAddRequest proto.InternalMessageInfo
-
-func (m *AuthUserAddRequest) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *AuthUserAddRequest) GetPassword() string {
-	if m != nil {
-		return m.Password
-	}
-	return ""
-}
-
-type AuthUserGetRequest struct {
-	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthUserGetRequest) Reset()         { *m = AuthUserGetRequest{} }
-func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserGetRequest) ProtoMessage()    {}
-func (*AuthUserGetRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{58}
-}
-func (m *AuthUserGetRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserGetRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserGetRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserGetRequest.Merge(m, src)
-}
-func (m *AuthUserGetRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserGetRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserGetRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserGetRequest proto.InternalMessageInfo
-
-func (m *AuthUserGetRequest) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-type AuthUserDeleteRequest struct {
-	// name is the name of the user to delete.
-	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthUserDeleteRequest) Reset()         { *m = AuthUserDeleteRequest{} }
-func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserDeleteRequest) ProtoMessage()    {}
-func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{59}
-}
-func (m *AuthUserDeleteRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserDeleteRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserDeleteRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserDeleteRequest.Merge(m, src)
-}
-func (m *AuthUserDeleteRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserDeleteRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserDeleteRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserDeleteRequest proto.InternalMessageInfo
-
-func (m *AuthUserDeleteRequest) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-type AuthUserChangePasswordRequest struct {
-	// name is the name of the user whose password is being changed.
-	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	// password is the new password for the user.
-	Password             string   `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthUserChangePasswordRequest) Reset()         { *m = AuthUserChangePasswordRequest{} }
-func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserChangePasswordRequest) ProtoMessage()    {}
-func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{60}
-}
-func (m *AuthUserChangePasswordRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserChangePasswordRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserChangePasswordRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserChangePasswordRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserChangePasswordRequest.Merge(m, src)
-}
-func (m *AuthUserChangePasswordRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserChangePasswordRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserChangePasswordRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserChangePasswordRequest proto.InternalMessageInfo
-
-func (m *AuthUserChangePasswordRequest) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *AuthUserChangePasswordRequest) GetPassword() string {
-	if m != nil {
-		return m.Password
-	}
-	return ""
-}
-
-type AuthUserGrantRoleRequest struct {
-	// user is the name of the user which should be granted a given role.
-	User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
-	// role is the name of the role to grant to the user.
-	Role                 string   `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthUserGrantRoleRequest) Reset()         { *m = AuthUserGrantRoleRequest{} }
-func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserGrantRoleRequest) ProtoMessage()    {}
-func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{61}
-}
-func (m *AuthUserGrantRoleRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserGrantRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserGrantRoleRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserGrantRoleRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserGrantRoleRequest.Merge(m, src)
-}
-func (m *AuthUserGrantRoleRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserGrantRoleRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserGrantRoleRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserGrantRoleRequest proto.InternalMessageInfo
-
-func (m *AuthUserGrantRoleRequest) GetUser() string {
-	if m != nil {
-		return m.User
-	}
-	return ""
-}
-
-func (m *AuthUserGrantRoleRequest) GetRole() string {
-	if m != nil {
-		return m.Role
-	}
-	return ""
-}
-
-type AuthUserRevokeRoleRequest struct {
-	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	Role                 string   `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthUserRevokeRoleRequest) Reset()         { *m = AuthUserRevokeRoleRequest{} }
-func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserRevokeRoleRequest) ProtoMessage()    {}
-func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{62}
-}
-func (m *AuthUserRevokeRoleRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserRevokeRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserRevokeRoleRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserRevokeRoleRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserRevokeRoleRequest.Merge(m, src)
-}
-func (m *AuthUserRevokeRoleRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserRevokeRoleRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserRevokeRoleRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserRevokeRoleRequest proto.InternalMessageInfo
-
-func (m *AuthUserRevokeRoleRequest) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *AuthUserRevokeRoleRequest) GetRole() string {
-	if m != nil {
-		return m.Role
-	}
-	return ""
-}
-
-type AuthRoleAddRequest struct {
-	// name is the name of the role to add to the authentication system.
-	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthRoleAddRequest) Reset()         { *m = AuthRoleAddRequest{} }
-func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleAddRequest) ProtoMessage()    {}
-func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{63}
-}
-func (m *AuthRoleAddRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleAddRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleAddRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleAddRequest.Merge(m, src)
-}
-func (m *AuthRoleAddRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleAddRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleAddRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleAddRequest proto.InternalMessageInfo
-
-func (m *AuthRoleAddRequest) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-type AuthRoleGetRequest struct {
-	Role                 string   `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthRoleGetRequest) Reset()         { *m = AuthRoleGetRequest{} }
-func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleGetRequest) ProtoMessage()    {}
-func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{64}
-}
-func (m *AuthRoleGetRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleGetRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleGetRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleGetRequest.Merge(m, src)
-}
-func (m *AuthRoleGetRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleGetRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleGetRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleGetRequest proto.InternalMessageInfo
-
-func (m *AuthRoleGetRequest) GetRole() string {
-	if m != nil {
-		return m.Role
-	}
-	return ""
-}
-
-type AuthUserListRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthUserListRequest) Reset()         { *m = AuthUserListRequest{} }
-func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserListRequest) ProtoMessage()    {}
-func (*AuthUserListRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{65}
-}
-func (m *AuthUserListRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserListRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserListRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserListRequest.Merge(m, src)
-}
-func (m *AuthUserListRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserListRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserListRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserListRequest proto.InternalMessageInfo
-
-type AuthRoleListRequest struct {
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthRoleListRequest) Reset()         { *m = AuthRoleListRequest{} }
-func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleListRequest) ProtoMessage()    {}
-func (*AuthRoleListRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{66}
-}
-func (m *AuthRoleListRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleListRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleListRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleListRequest.Merge(m, src)
-}
-func (m *AuthRoleListRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleListRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleListRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleListRequest proto.InternalMessageInfo
-
-type AuthRoleDeleteRequest struct {
-	Role                 string   `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthRoleDeleteRequest) Reset()         { *m = AuthRoleDeleteRequest{} }
-func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleDeleteRequest) ProtoMessage()    {}
-func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{67}
-}
-func (m *AuthRoleDeleteRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleDeleteRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleDeleteRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleDeleteRequest.Merge(m, src)
-}
-func (m *AuthRoleDeleteRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleDeleteRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleDeleteRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleDeleteRequest proto.InternalMessageInfo
-
-func (m *AuthRoleDeleteRequest) GetRole() string {
-	if m != nil {
-		return m.Role
-	}
-	return ""
-}
-
-type AuthRoleGrantPermissionRequest struct {
-	// name is the name of the role which will be granted the permission.
-	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	// perm is the permission to grant to the role.
-	Perm                 *authpb.Permission `protobuf:"bytes,2,opt,name=perm,proto3" json:"perm,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
-	XXX_unrecognized     []byte             `json:"-"`
-	XXX_sizecache        int32              `json:"-"`
-}
-
-func (m *AuthRoleGrantPermissionRequest) Reset()         { *m = AuthRoleGrantPermissionRequest{} }
-func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleGrantPermissionRequest) ProtoMessage()    {}
-func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{68}
-}
-func (m *AuthRoleGrantPermissionRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleGrantPermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleGrantPermissionRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleGrantPermissionRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleGrantPermissionRequest.Merge(m, src)
-}
-func (m *AuthRoleGrantPermissionRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleGrantPermissionRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleGrantPermissionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleGrantPermissionRequest proto.InternalMessageInfo
-
-func (m *AuthRoleGrantPermissionRequest) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission {
-	if m != nil {
-		return m.Perm
-	}
-	return nil
-}
-
-type AuthRoleRevokePermissionRequest struct {
-	Role                 string   `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
-	Key                  string   `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
-	RangeEnd             string   `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthRoleRevokePermissionRequest) Reset()         { *m = AuthRoleRevokePermissionRequest{} }
-func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleRevokePermissionRequest) ProtoMessage()    {}
-func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{69}
-}
-func (m *AuthRoleRevokePermissionRequest) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleRevokePermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleRevokePermissionRequest.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleRevokePermissionRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleRevokePermissionRequest.Merge(m, src)
-}
-func (m *AuthRoleRevokePermissionRequest) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleRevokePermissionRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleRevokePermissionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleRevokePermissionRequest proto.InternalMessageInfo
-
-func (m *AuthRoleRevokePermissionRequest) GetRole() string {
-	if m != nil {
-		return m.Role
-	}
-	return ""
-}
-
-func (m *AuthRoleRevokePermissionRequest) GetKey() string {
-	if m != nil {
-		return m.Key
-	}
-	return ""
-}
-
-func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() string {
-	if m != nil {
-		return m.RangeEnd
-	}
-	return ""
-}
-
-type AuthEnableResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthEnableResponse) Reset()         { *m = AuthEnableResponse{} }
-func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthEnableResponse) ProtoMessage()    {}
-func (*AuthEnableResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{70}
-}
-func (m *AuthEnableResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthEnableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthEnableResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthEnableResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthEnableResponse.Merge(m, src)
-}
-func (m *AuthEnableResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthEnableResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthEnableResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthEnableResponse proto.InternalMessageInfo
-
-func (m *AuthEnableResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AuthDisableResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthDisableResponse) Reset()         { *m = AuthDisableResponse{} }
-func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthDisableResponse) ProtoMessage()    {}
-func (*AuthDisableResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{71}
-}
-func (m *AuthDisableResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthDisableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthDisableResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthDisableResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthDisableResponse.Merge(m, src)
-}
-func (m *AuthDisableResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthDisableResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthDisableResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthDisableResponse proto.InternalMessageInfo
-
-func (m *AuthDisableResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AuthenticateResponse struct {
-	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	// token is an authorized token that can be used in succeeding RPCs
-	Token                string   `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *AuthenticateResponse) Reset()         { *m = AuthenticateResponse{} }
-func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthenticateResponse) ProtoMessage()    {}
-func (*AuthenticateResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{72}
-}
-func (m *AuthenticateResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthenticateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthenticateResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthenticateResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthenticateResponse.Merge(m, src)
-}
-func (m *AuthenticateResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthenticateResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthenticateResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthenticateResponse proto.InternalMessageInfo
-
-func (m *AuthenticateResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *AuthenticateResponse) GetToken() string {
-	if m != nil {
-		return m.Token
-	}
-	return ""
-}
-
-type AuthUserAddResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthUserAddResponse) Reset()         { *m = AuthUserAddResponse{} }
-func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserAddResponse) ProtoMessage()    {}
-func (*AuthUserAddResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{73}
-}
-func (m *AuthUserAddResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserAddResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserAddResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserAddResponse.Merge(m, src)
-}
-func (m *AuthUserAddResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserAddResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserAddResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserAddResponse proto.InternalMessageInfo
-
-func (m *AuthUserAddResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AuthUserGetResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	Roles                []string        `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthUserGetResponse) Reset()         { *m = AuthUserGetResponse{} }
-func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserGetResponse) ProtoMessage()    {}
-func (*AuthUserGetResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{74}
-}
-func (m *AuthUserGetResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserGetResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserGetResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserGetResponse.Merge(m, src)
-}
-func (m *AuthUserGetResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserGetResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserGetResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserGetResponse proto.InternalMessageInfo
-
-func (m *AuthUserGetResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *AuthUserGetResponse) GetRoles() []string {
-	if m != nil {
-		return m.Roles
-	}
-	return nil
-}
-
-type AuthUserDeleteResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthUserDeleteResponse) Reset()         { *m = AuthUserDeleteResponse{} }
-func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserDeleteResponse) ProtoMessage()    {}
-func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{75}
-}
-func (m *AuthUserDeleteResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserDeleteResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserDeleteResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserDeleteResponse.Merge(m, src)
-}
-func (m *AuthUserDeleteResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserDeleteResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserDeleteResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserDeleteResponse proto.InternalMessageInfo
-
-func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AuthUserChangePasswordResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthUserChangePasswordResponse) Reset()         { *m = AuthUserChangePasswordResponse{} }
-func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserChangePasswordResponse) ProtoMessage()    {}
-func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{76}
-}
-func (m *AuthUserChangePasswordResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserChangePasswordResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserChangePasswordResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserChangePasswordResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserChangePasswordResponse.Merge(m, src)
-}
-func (m *AuthUserChangePasswordResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserChangePasswordResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserChangePasswordResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserChangePasswordResponse proto.InternalMessageInfo
-
-func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AuthUserGrantRoleResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthUserGrantRoleResponse) Reset()         { *m = AuthUserGrantRoleResponse{} }
-func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserGrantRoleResponse) ProtoMessage()    {}
-func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{77}
-}
-func (m *AuthUserGrantRoleResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserGrantRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserGrantRoleResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserGrantRoleResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserGrantRoleResponse.Merge(m, src)
-}
-func (m *AuthUserGrantRoleResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserGrantRoleResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserGrantRoleResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserGrantRoleResponse proto.InternalMessageInfo
-
-func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AuthUserRevokeRoleResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthUserRevokeRoleResponse) Reset()         { *m = AuthUserRevokeRoleResponse{} }
-func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserRevokeRoleResponse) ProtoMessage()    {}
-func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{78}
-}
-func (m *AuthUserRevokeRoleResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserRevokeRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserRevokeRoleResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserRevokeRoleResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserRevokeRoleResponse.Merge(m, src)
-}
-func (m *AuthUserRevokeRoleResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserRevokeRoleResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserRevokeRoleResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserRevokeRoleResponse proto.InternalMessageInfo
-
-func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AuthRoleAddResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthRoleAddResponse) Reset()         { *m = AuthRoleAddResponse{} }
-func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleAddResponse) ProtoMessage()    {}
-func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{79}
-}
-func (m *AuthRoleAddResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleAddResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleAddResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleAddResponse.Merge(m, src)
-}
-func (m *AuthRoleAddResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleAddResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleAddResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleAddResponse proto.InternalMessageInfo
-
-func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AuthRoleGetResponse struct {
-	Header               *ResponseHeader      `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	Perm                 []*authpb.Permission `protobuf:"bytes,2,rep,name=perm,proto3" json:"perm,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
-	XXX_unrecognized     []byte               `json:"-"`
-	XXX_sizecache        int32                `json:"-"`
-}
-
-func (m *AuthRoleGetResponse) Reset()         { *m = AuthRoleGetResponse{} }
-func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleGetResponse) ProtoMessage()    {}
-func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{80}
-}
-func (m *AuthRoleGetResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleGetResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleGetResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleGetResponse.Merge(m, src)
-}
-func (m *AuthRoleGetResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleGetResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleGetResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleGetResponse proto.InternalMessageInfo
-
-func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission {
-	if m != nil {
-		return m.Perm
-	}
-	return nil
-}
-
-type AuthRoleListResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	Roles                []string        `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthRoleListResponse) Reset()         { *m = AuthRoleListResponse{} }
-func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleListResponse) ProtoMessage()    {}
-func (*AuthRoleListResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{81}
-}
-func (m *AuthRoleListResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleListResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleListResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleListResponse.Merge(m, src)
-}
-func (m *AuthRoleListResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleListResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleListResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleListResponse proto.InternalMessageInfo
-
-func (m *AuthRoleListResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *AuthRoleListResponse) GetRoles() []string {
-	if m != nil {
-		return m.Roles
-	}
-	return nil
-}
-
-type AuthUserListResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	Users                []string        `protobuf:"bytes,2,rep,name=users,proto3" json:"users,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthUserListResponse) Reset()         { *m = AuthUserListResponse{} }
-func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserListResponse) ProtoMessage()    {}
-func (*AuthUserListResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{82}
-}
-func (m *AuthUserListResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthUserListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthUserListResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthUserListResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthUserListResponse.Merge(m, src)
-}
-func (m *AuthUserListResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthUserListResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthUserListResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserListResponse proto.InternalMessageInfo
-
-func (m *AuthUserListResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func (m *AuthUserListResponse) GetUsers() []string {
-	if m != nil {
-		return m.Users
-	}
-	return nil
-}
-
-type AuthRoleDeleteResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthRoleDeleteResponse) Reset()         { *m = AuthRoleDeleteResponse{} }
-func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleDeleteResponse) ProtoMessage()    {}
-func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{83}
-}
-func (m *AuthRoleDeleteResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleDeleteResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleDeleteResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleDeleteResponse.Merge(m, src)
-}
-func (m *AuthRoleDeleteResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleDeleteResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleDeleteResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleDeleteResponse proto.InternalMessageInfo
-
-func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AuthRoleGrantPermissionResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthRoleGrantPermissionResponse) Reset()         { *m = AuthRoleGrantPermissionResponse{} }
-func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleGrantPermissionResponse) ProtoMessage()    {}
-func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{84}
-}
-func (m *AuthRoleGrantPermissionResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleGrantPermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleGrantPermissionResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleGrantPermissionResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleGrantPermissionResponse.Merge(m, src)
-}
-func (m *AuthRoleGrantPermissionResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleGrantPermissionResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleGrantPermissionResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleGrantPermissionResponse proto.InternalMessageInfo
-
-func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-type AuthRoleRevokePermissionResponse struct {
-	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
-	XXX_unrecognized     []byte          `json:"-"`
-	XXX_sizecache        int32           `json:"-"`
-}
-
-func (m *AuthRoleRevokePermissionResponse) Reset()         { *m = AuthRoleRevokePermissionResponse{} }
-func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleRevokePermissionResponse) ProtoMessage()    {}
-func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_77a6da22d6a3feb1, []int{85}
-}
-func (m *AuthRoleRevokePermissionResponse) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *AuthRoleRevokePermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_AuthRoleRevokePermissionResponse.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *AuthRoleRevokePermissionResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_AuthRoleRevokePermissionResponse.Merge(m, src)
-}
-func (m *AuthRoleRevokePermissionResponse) XXX_Size() int {
-	return m.Size()
-}
-func (m *AuthRoleRevokePermissionResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_AuthRoleRevokePermissionResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleRevokePermissionResponse proto.InternalMessageInfo
-
-func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader {
-	if m != nil {
-		return m.Header
-	}
-	return nil
-}
-
-func init() {
-	proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value)
-	proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value)
-	proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value)
-	proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value)
-	proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value)
-	proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value)
-	proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value)
-	proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader")
-	proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest")
-	proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse")
-	proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest")
-	proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse")
-	proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest")
-	proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse")
-	proto.RegisterType((*RequestOp)(nil), "etcdserverpb.RequestOp")
-	proto.RegisterType((*ResponseOp)(nil), "etcdserverpb.ResponseOp")
-	proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare")
-	proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest")
-	proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse")
-	proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest")
-	proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse")
-	proto.RegisterType((*HashRequest)(nil), "etcdserverpb.HashRequest")
-	proto.RegisterType((*HashKVRequest)(nil), "etcdserverpb.HashKVRequest")
-	proto.RegisterType((*HashKVResponse)(nil), "etcdserverpb.HashKVResponse")
-	proto.RegisterType((*HashResponse)(nil), "etcdserverpb.HashResponse")
-	proto.RegisterType((*SnapshotRequest)(nil), "etcdserverpb.SnapshotRequest")
-	proto.RegisterType((*SnapshotResponse)(nil), "etcdserverpb.SnapshotResponse")
-	proto.RegisterType((*WatchRequest)(nil), "etcdserverpb.WatchRequest")
-	proto.RegisterType((*WatchCreateRequest)(nil), "etcdserverpb.WatchCreateRequest")
-	proto.RegisterType((*WatchCancelRequest)(nil), "etcdserverpb.WatchCancelRequest")
-	proto.RegisterType((*WatchProgressRequest)(nil), "etcdserverpb.WatchProgressRequest")
-	proto.RegisterType((*WatchResponse)(nil), "etcdserverpb.WatchResponse")
-	proto.RegisterType((*LeaseGrantRequest)(nil), "etcdserverpb.LeaseGrantRequest")
-	proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse")
-	proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest")
-	proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse")
-	proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest")
-	proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse")
-	proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest")
-	proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse")
-	proto.RegisterType((*LeaseLeasesRequest)(nil), "etcdserverpb.LeaseLeasesRequest")
-	proto.RegisterType((*LeaseStatus)(nil), "etcdserverpb.LeaseStatus")
-	proto.RegisterType((*LeaseLeasesResponse)(nil), "etcdserverpb.LeaseLeasesResponse")
-	proto.RegisterType((*Member)(nil), "etcdserverpb.Member")
-	proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest")
-	proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse")
-	proto.RegisterType((*MemberRemoveRequest)(nil), "etcdserverpb.MemberRemoveRequest")
-	proto.RegisterType((*MemberRemoveResponse)(nil), "etcdserverpb.MemberRemoveResponse")
-	proto.RegisterType((*MemberUpdateRequest)(nil), "etcdserverpb.MemberUpdateRequest")
-	proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse")
-	proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest")
-	proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse")
-	proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest")
-	proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse")
-	proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest")
-	proto.RegisterType((*MoveLeaderResponse)(nil), "etcdserverpb.MoveLeaderResponse")
-	proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest")
-	proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember")
-	proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse")
-	proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest")
-	proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse")
-	proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest")
-	proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest")
-	proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest")
-	proto.RegisterType((*AuthUserAddRequest)(nil), "etcdserverpb.AuthUserAddRequest")
-	proto.RegisterType((*AuthUserGetRequest)(nil), "etcdserverpb.AuthUserGetRequest")
-	proto.RegisterType((*AuthUserDeleteRequest)(nil), "etcdserverpb.AuthUserDeleteRequest")
-	proto.RegisterType((*AuthUserChangePasswordRequest)(nil), "etcdserverpb.AuthUserChangePasswordRequest")
-	proto.RegisterType((*AuthUserGrantRoleRequest)(nil), "etcdserverpb.AuthUserGrantRoleRequest")
-	proto.RegisterType((*AuthUserRevokeRoleRequest)(nil), "etcdserverpb.AuthUserRevokeRoleRequest")
-	proto.RegisterType((*AuthRoleAddRequest)(nil), "etcdserverpb.AuthRoleAddRequest")
-	proto.RegisterType((*AuthRoleGetRequest)(nil), "etcdserverpb.AuthRoleGetRequest")
-	proto.RegisterType((*AuthUserListRequest)(nil), "etcdserverpb.AuthUserListRequest")
-	proto.RegisterType((*AuthRoleListRequest)(nil), "etcdserverpb.AuthRoleListRequest")
-	proto.RegisterType((*AuthRoleDeleteRequest)(nil), "etcdserverpb.AuthRoleDeleteRequest")
-	proto.RegisterType((*AuthRoleGrantPermissionRequest)(nil), "etcdserverpb.AuthRoleGrantPermissionRequest")
-	proto.RegisterType((*AuthRoleRevokePermissionRequest)(nil), "etcdserverpb.AuthRoleRevokePermissionRequest")
-	proto.RegisterType((*AuthEnableResponse)(nil), "etcdserverpb.AuthEnableResponse")
-	proto.RegisterType((*AuthDisableResponse)(nil), "etcdserverpb.AuthDisableResponse")
-	proto.RegisterType((*AuthenticateResponse)(nil), "etcdserverpb.AuthenticateResponse")
-	proto.RegisterType((*AuthUserAddResponse)(nil), "etcdserverpb.AuthUserAddResponse")
-	proto.RegisterType((*AuthUserGetResponse)(nil), "etcdserverpb.AuthUserGetResponse")
-	proto.RegisterType((*AuthUserDeleteResponse)(nil), "etcdserverpb.AuthUserDeleteResponse")
-	proto.RegisterType((*AuthUserChangePasswordResponse)(nil), "etcdserverpb.AuthUserChangePasswordResponse")
-	proto.RegisterType((*AuthUserGrantRoleResponse)(nil), "etcdserverpb.AuthUserGrantRoleResponse")
-	proto.RegisterType((*AuthUserRevokeRoleResponse)(nil), "etcdserverpb.AuthUserRevokeRoleResponse")
-	proto.RegisterType((*AuthRoleAddResponse)(nil), "etcdserverpb.AuthRoleAddResponse")
-	proto.RegisterType((*AuthRoleGetResponse)(nil), "etcdserverpb.AuthRoleGetResponse")
-	proto.RegisterType((*AuthRoleListResponse)(nil), "etcdserverpb.AuthRoleListResponse")
-	proto.RegisterType((*AuthUserListResponse)(nil), "etcdserverpb.AuthUserListResponse")
-	proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse")
-	proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse")
-	proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse")
-}
-
-func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) }
-
-var fileDescriptor_77a6da22d6a3feb1 = []byte{
-	// 3711 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5b, 0x73, 0x1b, 0x47,
-	0x76, 0xe6, 0x00, 0x04, 0x40, 0x1c, 0x5c, 0x08, 0x35, 0x29, 0x09, 0x84, 0x24, 0x8a, 0x6a, 0xdd,
-	0xa8, 0x8b, 0x09, 0x9b, 0x76, 0xf2, 0xa0, 0xa4, 0x5c, 0xa6, 0x48, 0x58, 0xa4, 0x49, 0x91, 0xf4,
-	0x10, 0x94, 0x9d, 0x2a, 0x27, 0xac, 0x21, 0xd0, 0x22, 0x11, 0x02, 0x33, 0xc8, 0xcc, 0x00, 0x22,
-	0x15, 0x57, 0x52, 0xe5, 0x38, 0xae, 0x3c, 0xc7, 0x55, 0xa9, 0x24, 0xaf, 0x5b, 0x5b, 0x2e, 0xff,
-	0x82, 0xfd, 0x0b, 0x5b, 0xfb, 0xb2, 0xbb, 0xb5, 0x7f, 0x60, 0xcb, 0xbb, 0x2f, 0xfb, 0x0b, 0xf6,
-	0xf2, 0xb4, 0xd5, 0xb7, 0x99, 0x9e, 0x1b, 0x48, 0x1b, 0xb6, 0x5f, 0xa8, 0xe9, 0xd3, 0xa7, 0xcf,
-	0x39, 0x7d, 0xba, 0xcf, 0x39, 0xdd, 0x5f, 0x43, 0x90, 0xb7, 0xfb, 0xad, 0xa5, 0xbe, 0x6d, 0xb9,
-	0x16, 0x2a, 0x12, 0xb7, 0xd5, 0x76, 0x88, 0x3d, 0x24, 0x76, 0xff, 0xb0, 0x36, 0x7b, 0x64, 0x1d,
-	0x59, 0xac, 0xa3, 0x4e, 0xbf, 0x38, 0x4f, 0x6d, 0x8e, 0xf2, 0xd4, 0x7b, 0xc3, 0x56, 0x8b, 0xfd,
-	0xe9, 0x1f, 0xd6, 0x4f, 0x86, 0xa2, 0xeb, 0x1a, 0xeb, 0x32, 0x06, 0xee, 0x31, 0xfb, 0xd3, 0x3f,
-	0x64, 0xff, 0x88, 0xce, 0xeb, 0x47, 0x96, 0x75, 0xd4, 0x25, 0x75, 0xa3, 0xdf, 0xa9, 0x1b, 0xa6,
-	0x69, 0xb9, 0x86, 0xdb, 0xb1, 0x4c, 0x87, 0xf7, 0xe2, 0xff, 0xd4, 0xa0, 0xac, 0x13, 0xa7, 0x6f,
-	0x99, 0x0e, 0x59, 0x27, 0x46, 0x9b, 0xd8, 0xe8, 0x06, 0x40, 0xab, 0x3b, 0x70, 0x5c, 0x62, 0x1f,
-	0x74, 0xda, 0x55, 0x6d, 0x41, 0x5b, 0x9c, 0xd4, 0xf3, 0x82, 0xb2, 0xd1, 0x46, 0xd7, 0x20, 0xdf,
-	0x23, 0xbd, 0x43, 0xde, 0x9b, 0x62, 0xbd, 0x53, 0x9c, 0xb0, 0xd1, 0x46, 0x35, 0x98, 0xb2, 0xc9,
-	0xb0, 0xe3, 0x74, 0x2c, 0xb3, 0x9a, 0x5e, 0xd0, 0x16, 0xd3, 0xba, 0xd7, 0xa6, 0x03, 0x6d, 0xe3,
-	0xa5, 0x7b, 0xe0, 0x12, 0xbb, 0x57, 0x9d, 0xe4, 0x03, 0x29, 0xa1, 0x49, 0xec, 0x1e, 0xfe, 0x3c,
-	0x03, 0x45, 0xdd, 0x30, 0x8f, 0x88, 0x4e, 0xfe, 0x65, 0x40, 0x1c, 0x17, 0x55, 0x20, 0x7d, 0x42,
-	0xce, 0x98, 0xfa, 0xa2, 0x4e, 0x3f, 0xf9, 0x78, 0xf3, 0x88, 0x1c, 0x10, 0x93, 0x2b, 0x2e, 0xd2,
-	0xf1, 0xe6, 0x11, 0x69, 0x98, 0x6d, 0x34, 0x0b, 0x99, 0x6e, 0xa7, 0xd7, 0x71, 0x85, 0x56, 0xde,
-	0x08, 0x98, 0x33, 0x19, 0x32, 0x67, 0x15, 0xc0, 0xb1, 0x6c, 0xf7, 0xc0, 0xb2, 0xdb, 0xc4, 0xae,
-	0x66, 0x16, 0xb4, 0xc5, 0xf2, 0xf2, 0x9d, 0x25, 0x75, 0x21, 0x96, 0x54, 0x83, 0x96, 0xf6, 0x2c,
-	0xdb, 0xdd, 0xa1, 0xbc, 0x7a, 0xde, 0x91, 0x9f, 0xe8, 0x7d, 0x28, 0x30, 0x21, 0xae, 0x61, 0x1f,
-	0x11, 0xb7, 0x9a, 0x65, 0x52, 0xee, 0x9e, 0x23, 0xa5, 0xc9, 0x98, 0x75, 0xa6, 0x9e, 0x7f, 0x23,
-	0x0c, 0x45, 0x87, 0xd8, 0x1d, 0xa3, 0xdb, 0x79, 0x6d, 0x1c, 0x76, 0x49, 0x35, 0xb7, 0xa0, 0x2d,
-	0x4e, 0xe9, 0x01, 0x1a, 0x9d, 0xff, 0x09, 0x39, 0x73, 0x0e, 0x2c, 0xb3, 0x7b, 0x56, 0x9d, 0x62,
-	0x0c, 0x53, 0x94, 0xb0, 0x63, 0x76, 0xcf, 0xd8, 0xa2, 0x59, 0x03, 0xd3, 0xe5, 0xbd, 0x79, 0xd6,
-	0x9b, 0x67, 0x14, 0xd6, 0xbd, 0x08, 0x95, 0x5e, 0xc7, 0x3c, 0xe8, 0x59, 0xed, 0x03, 0xcf, 0x21,
-	0xc0, 0x1c, 0x52, 0xee, 0x75, 0xcc, 0xe7, 0x56, 0x5b, 0x97, 0x6e, 0xa1, 0x9c, 0xc6, 0x69, 0x90,
-	0xb3, 0x20, 0x38, 0x8d, 0x53, 0x95, 0x73, 0x09, 0x66, 0xa8, 0xcc, 0x96, 0x4d, 0x0c, 0x97, 0xf8,
-	0xcc, 0x45, 0xc6, 0x7c, 0xa9, 0xd7, 0x31, 0x57, 0x59, 0x4f, 0x80, 0xdf, 0x38, 0x8d, 0xf0, 0x97,
-	0x04, 0xbf, 0x71, 0x1a, 0xe4, 0xc7, 0x4b, 0x90, 0xf7, 0x7c, 0x8e, 0xa6, 0x60, 0x72, 0x7b, 0x67,
-	0xbb, 0x51, 0x99, 0x40, 0x00, 0xd9, 0x95, 0xbd, 0xd5, 0xc6, 0xf6, 0x5a, 0x45, 0x43, 0x05, 0xc8,
-	0xad, 0x35, 0x78, 0x23, 0x85, 0x9f, 0x02, 0xf8, 0xde, 0x45, 0x39, 0x48, 0x6f, 0x36, 0xfe, 0xa1,
-	0x32, 0x41, 0x79, 0x5e, 0x34, 0xf4, 0xbd, 0x8d, 0x9d, 0xed, 0x8a, 0x46, 0x07, 0xaf, 0xea, 0x8d,
-	0x95, 0x66, 0xa3, 0x92, 0xa2, 0x1c, 0xcf, 0x77, 0xd6, 0x2a, 0x69, 0x94, 0x87, 0xcc, 0x8b, 0x95,
-	0xad, 0xfd, 0x46, 0x65, 0x12, 0x7f, 0xa9, 0x41, 0x49, 0xac, 0x17, 0x8f, 0x09, 0xf4, 0x0e, 0x64,
-	0x8f, 0x59, 0x5c, 0xb0, 0xad, 0x58, 0x58, 0xbe, 0x1e, 0x5a, 0xdc, 0x40, 0xec, 0xe8, 0x82, 0x17,
-	0x61, 0x48, 0x9f, 0x0c, 0x9d, 0x6a, 0x6a, 0x21, 0xbd, 0x58, 0x58, 0xae, 0x2c, 0xf1, 0x80, 0x5d,
-	0xda, 0x24, 0x67, 0x2f, 0x8c, 0xee, 0x80, 0xe8, 0xb4, 0x13, 0x21, 0x98, 0xec, 0x59, 0x36, 0x61,
-	0x3b, 0x76, 0x4a, 0x67, 0xdf, 0x74, 0x1b, 0xb3, 0x45, 0x13, 0xbb, 0x95, 0x37, 0xf0, 0xd7, 0x1a,
-	0xc0, 0xee, 0xc0, 0x4d, 0x0e, 0x8d, 0x59, 0xc8, 0x0c, 0xa9, 0x60, 0x11, 0x16, 0xbc, 0xc1, 0x62,
-	0x82, 0x18, 0x0e, 0xf1, 0x62, 0x82, 0x36, 0xd0, 0x55, 0xc8, 0xf5, 0x6d, 0x32, 0x3c, 0x38, 0x19,
-	0x32, 0x25, 0x53, 0x7a, 0x96, 0x36, 0x37, 0x87, 0xe8, 0x16, 0x14, 0x3b, 0x47, 0xa6, 0x65, 0x93,
-	0x03, 0x2e, 0x2b, 0xc3, 0x7a, 0x0b, 0x9c, 0xc6, 0xec, 0x56, 0x58, 0xb8, 0xe0, 0xac, 0xca, 0xb2,
-	0x45, 0x49, 0xd8, 0x84, 0x02, 0x33, 0x75, 0x2c, 0xf7, 0x3d, 0xf0, 0x6d, 0x4c, 0xb1, 0x61, 0x51,
-	0x17, 0x0a, 0xab, 0xf1, 0x27, 0x80, 0xd6, 0x48, 0x97, 0xb8, 0x64, 0x9c, 0xec, 0xa1, 0xf8, 0x24,
-	0xad, 0xfa, 0x04, 0xff, 0xb7, 0x06, 0x33, 0x01, 0xf1, 0x63, 0x4d, 0xab, 0x0a, 0xb9, 0x36, 0x13,
-	0xc6, 0x2d, 0x48, 0xeb, 0xb2, 0x89, 0x1e, 0xc1, 0x94, 0x30, 0xc0, 0xa9, 0xa6, 0x13, 0x36, 0x4d,
-	0x8e, 0xdb, 0xe4, 0xe0, 0xaf, 0x53, 0x90, 0x17, 0x13, 0xdd, 0xe9, 0xa3, 0x15, 0x28, 0xd9, 0xbc,
-	0x71, 0xc0, 0xe6, 0x23, 0x2c, 0xaa, 0x25, 0x27, 0xa1, 0xf5, 0x09, 0xbd, 0x28, 0x86, 0x30, 0x32,
-	0xfa, 0x3b, 0x28, 0x48, 0x11, 0xfd, 0x81, 0x2b, 0x5c, 0x5e, 0x0d, 0x0a, 0xf0, 0xf7, 0xdf, 0xfa,
-	0x84, 0x0e, 0x82, 0x7d, 0x77, 0xe0, 0xa2, 0x26, 0xcc, 0xca, 0xc1, 0x7c, 0x36, 0xc2, 0x8c, 0x34,
-	0x93, 0xb2, 0x10, 0x94, 0x12, 0x5d, 0xaa, 0xf5, 0x09, 0x1d, 0x89, 0xf1, 0x4a, 0xa7, 0x6a, 0x92,
-	0x7b, 0xca, 0x93, 0x77, 0xc4, 0xa4, 0xe6, 0xa9, 0x19, 0x35, 0xa9, 0x79, 0x6a, 0x3e, 0xcd, 0x43,
-	0x4e, 0xb4, 0xf0, 0xcf, 0x52, 0x00, 0x72, 0x35, 0x76, 0xfa, 0x68, 0x0d, 0xca, 0xb6, 0x68, 0x05,
-	0xbc, 0x75, 0x2d, 0xd6, 0x5b, 0x62, 0x11, 0x27, 0xf4, 0x92, 0x1c, 0xc4, 0x8d, 0x7b, 0x17, 0x8a,
-	0x9e, 0x14, 0xdf, 0x61, 0x73, 0x31, 0x0e, 0xf3, 0x24, 0x14, 0xe4, 0x00, 0xea, 0xb2, 0x8f, 0xe0,
-	0xb2, 0x37, 0x3e, 0xc6, 0x67, 0xb7, 0x46, 0xf8, 0xcc, 0x13, 0x38, 0x23, 0x25, 0xa8, 0x5e, 0x53,
-	0x0d, 0xf3, 0xdd, 0x36, 0x17, 0xe3, 0xb6, 0xa8, 0x61, 0xd4, 0x71, 0x40, 0xeb, 0x25, 0x6f, 0xe2,
-	0x3f, 0xa4, 0x21, 0xb7, 0x6a, 0xf5, 0xfa, 0x86, 0x4d, 0x57, 0x23, 0x6b, 0x13, 0x67, 0xd0, 0x75,
-	0x99, 0xbb, 0xca, 0xcb, 0xb7, 0x83, 0x12, 0x05, 0x9b, 0xfc, 0x57, 0x67, 0xac, 0xba, 0x18, 0x42,
-	0x07, 0x8b, 0xf2, 0x98, 0xba, 0xc0, 0x60, 0x51, 0x1c, 0xc5, 0x10, 0x19, 0xc8, 0x69, 0x3f, 0x90,
-	0x6b, 0x90, 0x1b, 0x12, 0xdb, 0x2f, 0xe9, 0xeb, 0x13, 0xba, 0x24, 0xa0, 0x07, 0x30, 0x1d, 0x2e,
-	0x2f, 0x19, 0xc1, 0x53, 0x6e, 0x05, 0xab, 0xd1, 0x6d, 0x28, 0x06, 0x6a, 0x5c, 0x56, 0xf0, 0x15,
-	0x7a, 0x4a, 0x89, 0xbb, 0x22, 0xf3, 0x2a, 0xad, 0xc7, 0xc5, 0xf5, 0x09, 0x99, 0x59, 0xaf, 0xc8,
-	0xcc, 0x3a, 0x25, 0x46, 0x89, 0xdc, 0x1a, 0x48, 0x32, 0xef, 0x05, 0x93, 0x0c, 0x7e, 0x0f, 0x4a,
-	0x01, 0x07, 0xd1, 0xba, 0xd3, 0xf8, 0x70, 0x7f, 0x65, 0x8b, 0x17, 0xa9, 0x67, 0xac, 0x2e, 0xe9,
-	0x15, 0x8d, 0xd6, 0xba, 0xad, 0xc6, 0xde, 0x5e, 0x25, 0x85, 0x4a, 0x90, 0xdf, 0xde, 0x69, 0x1e,
-	0x70, 0xae, 0x34, 0x7e, 0xe6, 0x49, 0x10, 0x45, 0x4e, 0xa9, 0x6d, 0x13, 0x4a, 0x6d, 0xd3, 0x64,
-	0x6d, 0x4b, 0xf9, 0xb5, 0x8d, 0x95, 0xb9, 0xad, 0xc6, 0xca, 0x5e, 0xa3, 0x32, 0xf9, 0xb4, 0x0c,
-	0x45, 0xee, 0xdf, 0x83, 0x81, 0x49, 0x4b, 0xed, 0x4f, 0x34, 0x00, 0x3f, 0x9a, 0x50, 0x1d, 0x72,
-	0x2d, 0xae, 0xa7, 0xaa, 0xb1, 0x64, 0x74, 0x39, 0x76, 0xc9, 0x74, 0xc9, 0x85, 0xde, 0x82, 0x9c,
-	0x33, 0x68, 0xb5, 0x88, 0x23, 0x4b, 0xde, 0xd5, 0x70, 0x3e, 0x14, 0xd9, 0x4a, 0x97, 0x7c, 0x74,
-	0xc8, 0x4b, 0xa3, 0xd3, 0x1d, 0xb0, 0x02, 0x38, 0x7a, 0x88, 0xe0, 0xc3, 0xff, 0xa7, 0x41, 0x41,
-	0xd9, 0xbc, 0xdf, 0x31, 0x09, 0x5f, 0x87, 0x3c, 0xb3, 0x81, 0xb4, 0x45, 0x1a, 0x9e, 0xd2, 0x7d,
-	0x02, 0xfa, 0x5b, 0xc8, 0xcb, 0x08, 0x90, 0x99, 0xb8, 0x1a, 0x2f, 0x76, 0xa7, 0xaf, 0xfb, 0xac,
-	0x78, 0x13, 0x2e, 0x31, 0xaf, 0xb4, 0xe8, 0xe1, 0x5a, 0xfa, 0x51, 0x3d, 0x7e, 0x6a, 0xa1, 0xe3,
-	0x67, 0x0d, 0xa6, 0xfa, 0xc7, 0x67, 0x4e, 0xa7, 0x65, 0x74, 0x85, 0x15, 0x5e, 0x1b, 0x7f, 0x00,
-	0x48, 0x15, 0x36, 0xce, 0x74, 0x71, 0x09, 0x0a, 0xeb, 0x86, 0x73, 0x2c, 0x4c, 0xc2, 0x8f, 0xa0,
-	0x44, 0x9b, 0x9b, 0x2f, 0x2e, 0x60, 0x23, 0xbb, 0x1c, 0x48, 0xee, 0xb1, 0x7c, 0x8e, 0x60, 0xf2,
-	0xd8, 0x70, 0x8e, 0xd9, 0x44, 0x4b, 0x3a, 0xfb, 0x46, 0x0f, 0xa0, 0xd2, 0xe2, 0x93, 0x3c, 0x08,
-	0x5d, 0x19, 0xa6, 0x05, 0xdd, 0x3b, 0x09, 0x7e, 0x0c, 0x45, 0x3e, 0x87, 0xef, 0xdb, 0x08, 0x7c,
-	0x09, 0xa6, 0xf7, 0x4c, 0xa3, 0xef, 0x1c, 0x5b, 0xb2, 0xba, 0xd1, 0x49, 0x57, 0x7c, 0xda, 0x58,
-	0x1a, 0xef, 0xc3, 0xb4, 0x4d, 0x7a, 0x46, 0xc7, 0xec, 0x98, 0x47, 0x07, 0x87, 0x67, 0x2e, 0x71,
-	0xc4, 0x85, 0xa9, 0xec, 0x91, 0x9f, 0x52, 0x2a, 0x35, 0xed, 0xb0, 0x6b, 0x1d, 0x8a, 0x34, 0xc7,
-	0xbe, 0xf1, 0x17, 0x29, 0x28, 0x7e, 0x64, 0xb8, 0x2d, 0xb9, 0x74, 0x68, 0x03, 0xca, 0x5e, 0x72,
-	0x63, 0x14, 0x61, 0x4b, 0xa8, 0xc4, 0xb2, 0x31, 0xf2, 0x28, 0x2d, 0xab, 0x63, 0xa9, 0xa5, 0x12,
-	0x98, 0x28, 0xc3, 0x6c, 0x91, 0xae, 0x27, 0x2a, 0x95, 0x2c, 0x8a, 0x31, 0xaa, 0xa2, 0x54, 0x02,
-	0xda, 0x81, 0x4a, 0xdf, 0xb6, 0x8e, 0x6c, 0xe2, 0x38, 0x9e, 0x30, 0x5e, 0xc6, 0x70, 0x8c, 0xb0,
-	0x5d, 0xc1, 0xea, 0x8b, 0x9b, 0xee, 0x07, 0x49, 0x4f, 0xa7, 0xfd, 0xf3, 0x0c, 0x4f, 0x4e, 0xbf,
-	0x4e, 0x01, 0x8a, 0x4e, 0xea, 0xdb, 0x1e, 0xf1, 0xee, 0x42, 0xd9, 0x71, 0x0d, 0x3b, 0xb2, 0xd9,
-	0x4a, 0x8c, 0xea, 0x65, 0xfc, 0xfb, 0xe0, 0x19, 0x74, 0x60, 0x5a, 0x6e, 0xe7, 0xe5, 0x99, 0x38,
-	0x25, 0x97, 0x25, 0x79, 0x9b, 0x51, 0x51, 0x03, 0x72, 0x2f, 0x3b, 0x5d, 0x97, 0xd8, 0x4e, 0x35,
-	0xb3, 0x90, 0x5e, 0x2c, 0x2f, 0x3f, 0x3a, 0x6f, 0x19, 0x96, 0xde, 0x67, 0xfc, 0xcd, 0xb3, 0x3e,
-	0xd1, 0xe5, 0x58, 0xf5, 0xe4, 0x99, 0x0d, 0x9c, 0xc6, 0xe7, 0x60, 0xea, 0x15, 0x15, 0x41, 0x6f,
-	0xd9, 0x39, 0x7e, 0x58, 0x64, 0x6d, 0x7e, 0xc9, 0x7e, 0x69, 0x1b, 0x47, 0x3d, 0x62, 0xba, 0xf2,
-	0x1e, 0x28, 0xdb, 0xf8, 0x2e, 0x80, 0xaf, 0x86, 0xa6, 0xfc, 0xed, 0x9d, 0xdd, 0xfd, 0x66, 0x65,
-	0x02, 0x15, 0x61, 0x6a, 0x7b, 0x67, 0xad, 0xb1, 0xd5, 0xa0, 0xf5, 0x01, 0xd7, 0xa5, 0x4b, 0x03,
-	0x6b, 0xa9, 0xea, 0xd4, 0x02, 0x3a, 0xf1, 0x15, 0x98, 0x8d, 0x5b, 0x40, 0x7a, 0x16, 0x2d, 0x89,
-	0x5d, 0x3a, 0x56, 0xa8, 0xa8, 0xaa, 0x53, 0xc1, 0xe9, 0x56, 0x21, 0xc7, 0x77, 0x6f, 0x5b, 0x1c,
-	0xce, 0x65, 0x93, 0x3a, 0x82, 0x6f, 0x46, 0xd2, 0x16, 0xab, 0xe4, 0xb5, 0x63, 0xd3, 0x4b, 0x26,
-	0x36, 0xbd, 0xa0, 0xdb, 0x50, 0xf2, 0xa2, 0xc1, 0x70, 0xc4, 0x59, 0x20, 0xaf, 0x17, 0xe5, 0x46,
-	0xa7, 0xb4, 0x80, 0xd3, 0x73, 0x41, 0xa7, 0xa3, 0xbb, 0x90, 0x25, 0x43, 0x62, 0xba, 0x4e, 0xb5,
-	0xc0, 0x2a, 0x46, 0x49, 0x9e, 0xdd, 0x1b, 0x94, 0xaa, 0x8b, 0x4e, 0xfc, 0x37, 0x70, 0x89, 0xdd,
-	0x91, 0x9e, 0xd9, 0x86, 0xa9, 0x5e, 0xe6, 0x9a, 0xcd, 0x2d, 0xe1, 0x6e, 0xfa, 0x89, 0xca, 0x90,
-	0xda, 0x58, 0x13, 0x4e, 0x48, 0x6d, 0xac, 0xe1, 0xcf, 0x34, 0x40, 0xea, 0xb8, 0xb1, 0xfc, 0x1c,
-	0x12, 0x2e, 0xd5, 0xa7, 0x7d, 0xf5, 0xb3, 0x90, 0x21, 0xb6, 0x6d, 0xd9, 0xcc, 0xa3, 0x79, 0x9d,
-	0x37, 0xf0, 0x1d, 0x61, 0x83, 0x4e, 0x86, 0xd6, 0x89, 0x17, 0x83, 0x5c, 0x9a, 0xe6, 0x99, 0xba,
-	0x09, 0x33, 0x01, 0xae, 0xb1, 0x2a, 0xd7, 0x7d, 0xb8, 0xcc, 0x84, 0x6d, 0x12, 0xd2, 0x5f, 0xe9,
-	0x76, 0x86, 0x89, 0x5a, 0xfb, 0x70, 0x25, 0xcc, 0xf8, 0xc3, 0xfa, 0x08, 0xff, 0xbd, 0xd0, 0xd8,
-	0xec, 0xf4, 0x48, 0xd3, 0xda, 0x4a, 0xb6, 0x8d, 0x66, 0xf6, 0x13, 0x72, 0xe6, 0x88, 0x12, 0xcf,
-	0xbe, 0xf1, 0x4f, 0x35, 0xb8, 0x1a, 0x19, 0xfe, 0x03, 0xaf, 0xea, 0x3c, 0xc0, 0x11, 0xdd, 0x3e,
-	0xa4, 0x4d, 0x3b, 0x38, 0xba, 0xa0, 0x50, 0x3c, 0x3b, 0x69, 0x2e, 0x2b, 0x0a, 0x3b, 0x67, 0xc5,
-	0x9a, 0xb3, 0x3f, 0x5e, 0xc4, 0xdf, 0x80, 0x02, 0x23, 0xec, 0xb9, 0x86, 0x3b, 0x70, 0x22, 0x8b,
-	0xf1, 0x6f, 0x62, 0x0b, 0xc8, 0x41, 0x63, 0xcd, 0xeb, 0x2d, 0xc8, 0xb2, 0x83, 0xb5, 0x3c, 0x56,
-	0x86, 0x6e, 0x32, 0x8a, 0x1d, 0xba, 0x60, 0xc4, 0xc7, 0x90, 0x7d, 0xce, 0xd0, 0x48, 0xc5, 0xb2,
-	0x49, 0xb9, 0x14, 0xa6, 0xd1, 0xe3, 0x18, 0x49, 0x5e, 0x67, 0xdf, 0xec, 0x14, 0x46, 0x88, 0xbd,
-	0xaf, 0x6f, 0xf1, 0xd3, 0x5e, 0x5e, 0xf7, 0xda, 0xd4, 0x65, 0xad, 0x6e, 0x87, 0x98, 0x2e, 0xeb,
-	0x9d, 0x64, 0xbd, 0x0a, 0x05, 0x2f, 0x41, 0x85, 0x6b, 0x5a, 0x69, 0xb7, 0x95, 0xd3, 0x94, 0x27,
-	0x4f, 0x0b, 0xca, 0xc3, 0x5f, 0x69, 0x70, 0x49, 0x19, 0x30, 0x96, 0x63, 0x1e, 0x43, 0x96, 0x63,
-	0xae, 0xa2, 0x70, 0xcf, 0x06, 0x47, 0x71, 0x35, 0xba, 0xe0, 0x41, 0x4b, 0x90, 0xe3, 0x5f, 0xf2,
-	0x48, 0x1b, 0xcf, 0x2e, 0x99, 0xf0, 0x5d, 0x98, 0x11, 0x24, 0xd2, 0xb3, 0xe2, 0xf6, 0x36, 0x73,
-	0x28, 0xfe, 0x14, 0x66, 0x83, 0x6c, 0x63, 0x4d, 0x49, 0x31, 0x32, 0x75, 0x11, 0x23, 0x57, 0xa4,
-	0x91, 0xfb, 0xfd, 0xb6, 0x72, 0x2c, 0x08, 0xaf, 0xba, 0xba, 0x22, 0xa9, 0xd0, 0x8a, 0x78, 0x13,
-	0x90, 0x22, 0x7e, 0xd4, 0x09, 0xcc, 0xc8, 0xed, 0xb0, 0xd5, 0x71, 0xbc, 0xd3, 0xe7, 0x6b, 0x40,
-	0x2a, 0xf1, 0xc7, 0x36, 0x68, 0x8d, 0xc8, 0xa2, 0x26, 0x0d, 0xfa, 0x00, 0x90, 0x4a, 0x1c, 0x2b,
-	0xa3, 0xd7, 0xe1, 0xd2, 0x73, 0x6b, 0x48, 0x53, 0x03, 0xa5, 0xfa, 0x21, 0xc3, 0xef, 0xa2, 0xde,
-	0xb2, 0x79, 0x6d, 0xaa, 0x5c, 0x1d, 0x30, 0x96, 0xf2, 0x5f, 0x6a, 0x50, 0x5c, 0xe9, 0x1a, 0x76,
-	0x4f, 0x2a, 0x7e, 0x17, 0xb2, 0xfc, 0x86, 0x25, 0x40, 0x8d, 0x7b, 0x41, 0x31, 0x2a, 0x2f, 0x6f,
-	0xac, 0xf0, 0xfb, 0x98, 0x18, 0x45, 0x0d, 0x17, 0xef, 0x1e, 0x6b, 0xa1, 0x77, 0x90, 0x35, 0xf4,
-	0x06, 0x64, 0x0c, 0x3a, 0x84, 0xa5, 0xe0, 0x72, 0xf8, 0x6e, 0xcb, 0xa4, 0xb1, 0x73, 0x20, 0xe7,
-	0xc2, 0xef, 0x40, 0x41, 0xd1, 0x40, 0x6f, 0xef, 0xcf, 0x1a, 0xe2, 0xd0, 0xb6, 0xb2, 0xda, 0xdc,
-	0x78, 0xc1, 0x2f, 0xf5, 0x65, 0x80, 0xb5, 0x86, 0xd7, 0x4e, 0xe1, 0x8f, 0xc5, 0x28, 0x91, 0xef,
-	0x54, 0x7b, 0xb4, 0x24, 0x7b, 0x52, 0x17, 0xb2, 0xe7, 0x14, 0x4a, 0x62, 0xfa, 0xe3, 0xa6, 0x6f,
-	0x26, 0x2f, 0x21, 0x7d, 0x2b, 0xc6, 0xeb, 0x82, 0x11, 0x4f, 0x43, 0x49, 0x24, 0x74, 0xb1, 0xff,
-	0x7e, 0xa1, 0x41, 0x59, 0x52, 0xc6, 0x05, 0x5f, 0x25, 0x6e, 0xc4, 0x2b, 0x80, 0x87, 0x1a, 0x5d,
-	0x81, 0x6c, 0xfb, 0x70, 0xaf, 0xf3, 0x5a, 0x02, 0xe5, 0xa2, 0x45, 0xe9, 0x5d, 0xae, 0x87, 0xbf,
-	0x56, 0x89, 0x16, 0xba, 0xce, 0x1f, 0xb2, 0x36, 0xcc, 0x36, 0x39, 0x65, 0x67, 0xca, 0x49, 0xdd,
-	0x27, 0xb0, 0x0b, 0xb5, 0x78, 0xd5, 0x62, 0x07, 0x49, 0xf5, 0x95, 0x6b, 0x06, 0x2e, 0xad, 0x0c,
-	0xdc, 0xe3, 0x86, 0x69, 0x1c, 0x76, 0x65, 0xc6, 0xa2, 0x65, 0x96, 0x12, 0xd7, 0x3a, 0x8e, 0x4a,
-	0x6d, 0xc0, 0x0c, 0xa5, 0x12, 0xd3, 0xed, 0xb4, 0x94, 0xf4, 0x26, 0x8b, 0x98, 0x16, 0x2a, 0x62,
-	0x86, 0xe3, 0xbc, 0xb2, 0xec, 0xb6, 0x98, 0x9a, 0xd7, 0xc6, 0x6b, 0x5c, 0xf8, 0xbe, 0x13, 0x28,
-	0x53, 0xdf, 0x56, 0xca, 0xa2, 0x2f, 0xe5, 0x19, 0x71, 0x47, 0x48, 0xc1, 0x8f, 0xe0, 0xb2, 0xe4,
-	0x14, 0xc0, 0xe4, 0x08, 0xe6, 0x1d, 0xb8, 0x21, 0x99, 0x57, 0x8f, 0xe9, 0x45, 0x6d, 0x57, 0x28,
-	0xfc, 0xae, 0x76, 0x3e, 0x85, 0xaa, 0x67, 0x27, 0x3b, 0x2c, 0x5b, 0x5d, 0xd5, 0x80, 0x81, 0x23,
-	0xf6, 0x4c, 0x5e, 0x67, 0xdf, 0x94, 0x66, 0x5b, 0x5d, 0xef, 0x48, 0x40, 0xbf, 0xf1, 0x2a, 0xcc,
-	0x49, 0x19, 0xe2, 0x18, 0x1b, 0x14, 0x12, 0x31, 0x28, 0x4e, 0x88, 0x70, 0x18, 0x1d, 0x3a, 0xda,
-	0xed, 0x2a, 0x67, 0xd0, 0xb5, 0x4c, 0xa6, 0xa6, 0xc8, 0xbc, 0xcc, 0x77, 0x04, 0x35, 0x4c, 0xad,
-	0x18, 0x82, 0x4c, 0x05, 0xa8, 0x64, 0xb1, 0x10, 0x94, 0x1c, 0x59, 0x88, 0x88, 0xe8, 0x4f, 0x60,
-	0xde, 0x33, 0x82, 0xfa, 0x6d, 0x97, 0xd8, 0xbd, 0x8e, 0xe3, 0x28, 0x50, 0x56, 0xdc, 0xc4, 0xef,
-	0xc1, 0x64, 0x9f, 0x88, 0x9c, 0x52, 0x58, 0x46, 0x4b, 0xfc, 0xed, 0x79, 0x49, 0x19, 0xcc, 0xfa,
-	0x71, 0x1b, 0x6e, 0x4a, 0xe9, 0xdc, 0xa3, 0xb1, 0xe2, 0xc3, 0x46, 0xc9, 0x0b, 0x3e, 0x77, 0x6b,
-	0xf4, 0x82, 0x9f, 0xe6, 0x6b, 0xef, 0xc1, 0xab, 0x1f, 0x70, 0x47, 0xca, 0xd8, 0x1a, 0xab, 0x56,
-	0x6c, 0x72, 0x9f, 0x7a, 0x21, 0x39, 0x96, 0xb0, 0x43, 0x98, 0x0d, 0x46, 0xf2, 0x58, 0x69, 0x6c,
-	0x16, 0x32, 0xae, 0x75, 0x42, 0x64, 0x12, 0xe3, 0x0d, 0x69, 0xb0, 0x17, 0xe6, 0x63, 0x19, 0x6c,
-	0xf8, 0xc2, 0xd8, 0x96, 0x1c, 0xd7, 0x5e, 0xba, 0x9a, 0xf2, 0xf0, 0xc5, 0x1b, 0x78, 0x1b, 0xae,
-	0x84, 0xd3, 0xc4, 0x58, 0x26, 0xbf, 0xe0, 0x1b, 0x38, 0x2e, 0x93, 0x8c, 0x25, 0xf7, 0x43, 0x3f,
-	0x19, 0x28, 0x09, 0x65, 0x2c, 0x91, 0x3a, 0xd4, 0xe2, 0xf2, 0xcb, 0xf7, 0xb1, 0x5f, 0xbd, 0x74,
-	0x33, 0x96, 0x30, 0xc7, 0x17, 0x36, 0xfe, 0xf2, 0xfb, 0x39, 0x22, 0x3d, 0x32, 0x47, 0x88, 0x20,
-	0xf1, 0xb3, 0xd8, 0x0f, 0xb0, 0xe9, 0x84, 0x0e, 0x3f, 0x81, 0x8e, 0xab, 0x83, 0xd6, 0x10, 0x4f,
-	0x07, 0x6b, 0xc8, 0x8d, 0xad, 0xa6, 0xdd, 0xb1, 0x16, 0xe3, 0x23, 0x3f, 0x77, 0x46, 0x32, 0xf3,
-	0x58, 0x82, 0x3f, 0x86, 0x85, 0xe4, 0xa4, 0x3c, 0x8e, 0xe4, 0x87, 0x75, 0xc8, 0x7b, 0x07, 0x4a,
-	0xe5, 0x77, 0x1b, 0x05, 0xc8, 0x6d, 0xef, 0xec, 0xed, 0xae, 0xac, 0x36, 0xf8, 0x0f, 0x37, 0x56,
-	0x77, 0x74, 0x7d, 0x7f, 0xb7, 0x59, 0x49, 0x2d, 0xff, 0x29, 0x0d, 0xa9, 0xcd, 0x17, 0xe8, 0x1f,
-	0x21, 0xc3, 0x5f, 0x31, 0x47, 0x3c, 0x5d, 0xd7, 0x46, 0x3d, 0xd4, 0xe2, 0x6b, 0x9f, 0xfd, 0xe6,
-	0xf7, 0x5f, 0xa6, 0x2e, 0xe3, 0x4a, 0x7d, 0xf8, 0xf6, 0x21, 0x71, 0x8d, 0xfa, 0xc9, 0xb0, 0xce,
-	0xea, 0xc3, 0x13, 0xed, 0x21, 0xda, 0x87, 0xf4, 0xee, 0xc0, 0x45, 0x89, 0xcf, 0xda, 0xb5, 0xe4,
-	0xf7, 0x5b, 0x3c, 0xc7, 0x04, 0xcf, 0xe0, 0xb2, 0x22, 0xb8, 0x3f, 0x70, 0xa9, 0xd8, 0x01, 0x14,
-	0xd4, 0x17, 0xd8, 0x73, 0xdf, 0xbb, 0x6b, 0xe7, 0xbf, 0xee, 0xe2, 0x5b, 0x4c, 0xdd, 0x35, 0x7c,
-	0x45, 0x51, 0xc7, 0xdf, 0x89, 0xd5, 0xd9, 0x34, 0x4f, 0x4d, 0x94, 0xf8, 0x22, 0x5e, 0x4b, 0x7e,
-	0xf4, 0x8d, 0x9d, 0x8d, 0x7b, 0x6a, 0x52, 0xb1, 0xa6, 0x78, 0xf3, 0x6d, 0xb9, 0xe8, 0x66, 0xcc,
-	0x9b, 0x9f, 0xfa, 0xba, 0x55, 0x5b, 0x48, 0x66, 0x10, 0x8a, 0x16, 0x98, 0xa2, 0x1a, 0xbe, 0xac,
-	0x28, 0x6a, 0x79, 0x6c, 0x4f, 0xb4, 0x87, 0xcb, 0x47, 0x90, 0x61, 0xe8, 0x31, 0xfa, 0x27, 0xf9,
-	0x51, 0x8b, 0x81, 0xd1, 0x13, 0x16, 0x3f, 0x80, 0x3b, 0xe3, 0x2a, 0x53, 0x86, 0x70, 0x49, 0x2a,
-	0x63, 0xf8, 0xf1, 0x13, 0xed, 0xe1, 0xa2, 0xf6, 0xa6, 0xb6, 0xfc, 0xc7, 0x49, 0xc8, 0x30, 0xb8,
-	0x08, 0x59, 0x00, 0x3e, 0x9a, 0x1a, 0x9e, 0x65, 0x04, 0x9f, 0x0d, 0xcf, 0x32, 0x0a, 0xc4, 0xe2,
-	0x79, 0xa6, 0xb8, 0x8a, 0x67, 0xa4, 0x62, 0x86, 0x44, 0xd5, 0x19, 0xb8, 0x46, 0x7d, 0x3a, 0x14,
-	0x80, 0x19, 0x0f, 0x33, 0x14, 0x27, 0x30, 0x80, 0xaa, 0x86, 0x77, 0x48, 0x0c, 0xa2, 0x8a, 0x31,
-	0xd3, 0x79, 0x1d, 0x5f, 0x55, 0x3c, 0xcb, 0xd5, 0xda, 0x8c, 0x91, 0xea, 0xfd, 0x0f, 0x0d, 0xca,
-	0x41, 0x5c, 0x14, 0xdd, 0x8e, 0x91, 0x1c, 0x86, 0x57, 0x6b, 0x77, 0x46, 0x33, 0x25, 0x59, 0xc0,
-	0xd5, 0x9f, 0x10, 0xd2, 0x37, 0x28, 0xa3, 0x70, 0x3c, 0xfa, 0x42, 0x83, 0xe9, 0x10, 0xd8, 0x89,
-	0xe2, 0x34, 0x44, 0xa0, 0xd4, 0xda, 0xdd, 0x73, 0xb8, 0x84, 0x21, 0xf7, 0x98, 0x21, 0x0b, 0xf8,
-	0x5a, 0xc4, 0x15, 0x6e, 0xa7, 0x47, 0x5c, 0x4b, 0x18, 0xe3, 0x2d, 0x03, 0x07, 0x26, 0x63, 0x97,
-	0x21, 0x00, 0x74, 0xc6, 0x2e, 0x43, 0x10, 0xd5, 0x1c, 0xb1, 0x0c, 0x1c, 0x8d, 0xa4, 0x5b, 0xfc,
-	0xcf, 0x69, 0xc8, 0xad, 0xf2, 0x5f, 0x4f, 0x22, 0x07, 0xf2, 0x1e, 0x02, 0x88, 0xe6, 0xe3, 0xd0,
-	0x18, 0xff, 0xb6, 0x50, 0xbb, 0x99, 0xd8, 0x2f, 0xb4, 0xdf, 0x65, 0xda, 0x6f, 0xe2, 0x9a, 0xd4,
-	0x2e, 0x7e, 0xa4, 0x59, 0xe7, 0xd7, 0xfe, 0xba, 0xd1, 0x6e, 0xd3, 0x89, 0xff, 0x3b, 0x14, 0x55,
-	0x98, 0x0e, 0xdd, 0x8a, 0x45, 0x81, 0x54, 0xa4, 0xaf, 0x86, 0x47, 0xb1, 0x08, 0xed, 0x8b, 0x4c,
-	0x3b, 0xc6, 0x37, 0x12, 0xb4, 0xdb, 0x8c, 0x3d, 0x60, 0x00, 0x87, 0xd9, 0xe2, 0x0d, 0x08, 0xa0,
-	0x78, 0xf1, 0x06, 0x04, 0x51, 0xba, 0x73, 0x0d, 0x18, 0x30, 0x76, 0x6a, 0xc0, 0x2b, 0x00, 0x1f,
-	0x54, 0x43, 0xb1, 0x7e, 0x55, 0xae, 0x4e, 0xe1, 0x90, 0x8f, 0xe2, 0x71, 0xd1, 0x3d, 0x17, 0x52,
-	0xdd, 0xed, 0x38, 0x34, 0xf4, 0x97, 0xbf, 0xca, 0x42, 0xe1, 0xb9, 0xd1, 0x31, 0x5d, 0x62, 0x1a,
-	0x66, 0x8b, 0xa0, 0x97, 0x90, 0x61, 0xa5, 0x31, 0x9c, 0xe5, 0x54, 0xac, 0x29, 0x9c, 0xe5, 0x02,
-	0x40, 0x0c, 0xbe, 0xc3, 0x34, 0xcf, 0xe3, 0x39, 0xa9, 0xb9, 0xe7, 0x8b, 0xaf, 0x33, 0x0c, 0x85,
-	0x4e, 0xf8, 0x9f, 0x21, 0x2b, 0xe0, 0xf9, 0x90, 0xb0, 0x00, 0xb6, 0x52, 0xbb, 0x1e, 0xdf, 0x99,
-	0xb4, 0xbd, 0x54, 0x55, 0x0e, 0xe3, 0xa5, 0xba, 0x5e, 0x03, 0xf8, 0x00, 0x61, 0xd8, 0xb9, 0x11,
-	0x3c, 0xb1, 0xb6, 0x90, 0xcc, 0x20, 0xf4, 0x3e, 0x60, 0x7a, 0x6f, 0xe3, 0xf9, 0x38, 0xbd, 0x6d,
-	0x8f, 0x9f, 0xea, 0x3e, 0x84, 0xc9, 0x75, 0xc3, 0x39, 0x46, 0xa1, 0x62, 0xa7, 0xfc, 0xe0, 0xa1,
-	0x56, 0x8b, 0xeb, 0x12, 0x9a, 0x6e, 0x33, 0x4d, 0x37, 0x70, 0x35, 0x4e, 0xd3, 0xb1, 0xe1, 0xd0,
-	0xea, 0x81, 0x8e, 0x21, 0xcb, 0x7f, 0x03, 0x11, 0xf6, 0x65, 0xe0, 0x77, 0x14, 0x61, 0x5f, 0x06,
-	0x7f, 0x36, 0x71, 0x31, 0x4d, 0x2e, 0x4c, 0xc9, 0x1f, 0x1e, 0xa0, 0x1b, 0xa1, 0xa5, 0x09, 0xfe,
-	0x48, 0xa1, 0x36, 0x9f, 0xd4, 0x2d, 0xf4, 0xdd, 0x67, 0xfa, 0x6e, 0xe1, 0xeb, 0xb1, 0x6b, 0x27,
-	0xb8, 0x9f, 0x68, 0x0f, 0xdf, 0xd4, 0x68, 0x99, 0x00, 0x1f, 0x64, 0x8d, 0x44, 0x47, 0x18, 0xaf,
-	0x8d, 0x44, 0x47, 0x04, 0x9f, 0xc5, 0xcb, 0x4c, 0xf9, 0x63, 0x7c, 0x3f, 0x4e, 0xb9, 0x6b, 0x1b,
-	0xa6, 0xf3, 0x92, 0xd8, 0x6f, 0x70, 0x30, 0xcd, 0x39, 0xee, 0xf4, 0x69, 0xa4, 0xfc, 0x65, 0x1a,
-	0x26, 0xe9, 0x79, 0x94, 0x96, 0x67, 0xff, 0x1a, 0x1f, 0xb6, 0x26, 0x02, 0x9e, 0x85, 0xad, 0x89,
-	0x22, 0x00, 0xd1, 0xf2, 0xcc, 0x7e, 0x27, 0x4f, 0x18, 0x13, 0xf5, 0xba, 0x03, 0x05, 0xe5, 0xae,
-	0x8f, 0x62, 0x04, 0x06, 0x91, 0xb9, 0x70, 0x5d, 0x88, 0x01, 0x0a, 0xf0, 0x4d, 0xa6, 0x73, 0x0e,
-	0xcf, 0x06, 0x74, 0xb6, 0x39, 0x17, 0x55, 0xfa, 0xaf, 0x50, 0x54, 0x31, 0x01, 0x14, 0x23, 0x33,
-	0x84, 0xfc, 0x85, 0x53, 0x62, 0x1c, 0xa4, 0x10, 0xcd, 0x0e, 0xde, 0xff, 0x09, 0x90, 0xac, 0x54,
-	0x79, 0x1f, 0x72, 0x02, 0x28, 0x88, 0x9b, 0x6d, 0x10, 0x2a, 0x8c, 0x9b, 0x6d, 0x08, 0x65, 0x88,
-	0x1e, 0xf3, 0x98, 0x56, 0x7a, 0x1f, 0x92, 0x25, 0x48, 0x68, 0x7c, 0x46, 0xdc, 0x24, 0x8d, 0x3e,
-	0xf6, 0x95, 0xa4, 0x51, 0xb9, 0x8b, 0x8e, 0xd2, 0x78, 0x44, 0x5c, 0x11, 0x4b, 0xf2, 0x9e, 0x87,
-	0x12, 0x04, 0xaa, 0x29, 0x1f, 0x8f, 0x62, 0x49, 0x3a, 0x95, 0xfb, 0x4a, 0x45, 0xbe, 0x47, 0x9f,
-	0x02, 0xf8, 0x90, 0x46, 0xf8, 0xb4, 0x15, 0x8b, 0x8b, 0x86, 0x4f, 0x5b, 0xf1, 0xa8, 0x48, 0x34,
-	0x7f, 0xf8, 0xba, 0xf9, 0xc5, 0x80, 0x6a, 0xff, 0x1f, 0x0d, 0x50, 0x14, 0x01, 0x41, 0x8f, 0xe2,
-	0x35, 0xc4, 0x22, 0xae, 0xb5, 0xc7, 0x17, 0x63, 0x4e, 0x2a, 0x11, 0xbe, 0x59, 0x2d, 0x36, 0xa2,
-	0xff, 0x8a, 0x1a, 0xf6, 0xb9, 0x06, 0xa5, 0x00, 0x84, 0x82, 0xee, 0x25, 0xac, 0x71, 0x08, 0xb4,
-	0xad, 0xdd, 0x3f, 0x97, 0x2f, 0xe9, 0x24, 0xa6, 0xec, 0x08, 0x79, 0x10, 0xff, 0x2f, 0x0d, 0xca,
-	0x41, 0xd8, 0x05, 0x25, 0xc8, 0x8f, 0x00, 0xbf, 0xb5, 0xc5, 0xf3, 0x19, 0xcf, 0x5f, 0x2a, 0xff,
-	0x6c, 0xde, 0x87, 0x9c, 0x00, 0x6b, 0xe2, 0x02, 0x22, 0x08, 0x1b, 0xc7, 0x05, 0x44, 0x08, 0xe9,
-	0x49, 0x08, 0x08, 0xdb, 0xea, 0x12, 0x25, 0x04, 0x05, 0xa2, 0x93, 0xa4, 0x71, 0x74, 0x08, 0x86,
-	0xe0, 0xa0, 0x51, 0x1a, 0xfd, 0x10, 0x94, 0x70, 0x0e, 0x4a, 0x10, 0x78, 0x4e, 0x08, 0x86, 0xd1,
-	0xa0, 0x84, 0x10, 0x64, 0x4a, 0x95, 0x10, 0xf4, 0xc1, 0x97, 0xb8, 0x10, 0x8c, 0x20, 0xe2, 0x71,
-	0x21, 0x18, 0xc5, 0x6f, 0x12, 0xd6, 0x95, 0xe9, 0x0e, 0x84, 0xe0, 0x4c, 0x0c, 0x56, 0x83, 0x1e,
-	0x27, 0x38, 0x34, 0x16, 0x6c, 0xaf, 0xbd, 0x71, 0x41, 0xee, 0x91, 0x7b, 0x9f, 0x2f, 0x85, 0xdc,
-	0xfb, 0xff, 0xaf, 0xc1, 0x6c, 0x1c, 0xd6, 0x83, 0x12, 0x74, 0x25, 0x00, 0xf5, 0xb5, 0xa5, 0x8b,
-	0xb2, 0x9f, 0xef, 0x35, 0x2f, 0x1a, 0x9e, 0x56, 0x7e, 0xfe, 0xcd, 0xbc, 0xf6, 0xab, 0x6f, 0xe6,
-	0xb5, 0xdf, 0x7e, 0x33, 0xaf, 0xfd, 0xef, 0xef, 0xe6, 0x27, 0x0e, 0xb3, 0xec, 0x7f, 0xa7, 0xbd,
-	0xfd, 0xd7, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x77, 0x6b, 0x63, 0x24, 0x37, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// KVClient is the client API for KV service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type KVClient interface {
-	// Range gets the keys in the range from the key-value store.
-	Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error)
-	// Put puts the given key into the key-value store.
-	// A put request increments the revision of the key-value store
-	// and generates one event in the event history.
-	Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error)
-	// DeleteRange deletes the given range from the key-value store.
-	// A delete request increments the revision of the key-value store
-	// and generates a delete event in the event history for every deleted key.
-	DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error)
-	// Txn processes multiple requests in a single transaction.
-	// A txn request increments the revision of the key-value store
-	// and generates events with the same revision for every completed request.
-	// It is not allowed to modify the same key several times within one txn.
-	Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error)
-	// Compact compacts the event history in the etcd key-value store. The key-value
-	// store should be periodically compacted or the event history will continue to grow
-	// indefinitely.
-	Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error)
-}
-
-type kVClient struct {
-	cc *grpc.ClientConn
-}
-
-func NewKVClient(cc *grpc.ClientConn) KVClient {
-	return &kVClient{cc}
-}
-
-func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) {
-	out := new(RangeResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) {
-	out := new(PutResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) {
-	out := new(DeleteRangeResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) {
-	out := new(TxnResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) {
-	out := new(CompactionResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// KVServer is the server API for KV service.
-type KVServer interface {
-	// Range gets the keys in the range from the key-value store.
-	Range(context.Context, *RangeRequest) (*RangeResponse, error)
-	// Put puts the given key into the key-value store.
-	// A put request increments the revision of the key-value store
-	// and generates one event in the event history.
-	Put(context.Context, *PutRequest) (*PutResponse, error)
-	// DeleteRange deletes the given range from the key-value store.
-	// A delete request increments the revision of the key-value store
-	// and generates a delete event in the event history for every deleted key.
-	DeleteRange(context.Context, *DeleteRangeRequest) (*DeleteRangeResponse, error)
-	// Txn processes multiple requests in a single transaction.
-	// A txn request increments the revision of the key-value store
-	// and generates events with the same revision for every completed request.
-	// It is not allowed to modify the same key several times within one txn.
-	Txn(context.Context, *TxnRequest) (*TxnResponse, error)
-	// Compact compacts the event history in the etcd key-value store. The key-value
-	// store should be periodically compacted or the event history will continue to grow
-	// indefinitely.
-	Compact(context.Context, *CompactionRequest) (*CompactionResponse, error)
-}
-
-// UnimplementedKVServer can be embedded to have forward compatible implementations.
-type UnimplementedKVServer struct {
-}
-
-func (*UnimplementedKVServer) Range(ctx context.Context, req *RangeRequest) (*RangeResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method Range not implemented")
-}
-func (*UnimplementedKVServer) Put(ctx context.Context, req *PutRequest) (*PutResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method Put not implemented")
-}
-func (*UnimplementedKVServer) DeleteRange(ctx context.Context, req *DeleteRangeRequest) (*DeleteRangeResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method DeleteRange not implemented")
-}
-func (*UnimplementedKVServer) Txn(ctx context.Context, req *TxnRequest) (*TxnResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method Txn not implemented")
-}
-func (*UnimplementedKVServer) Compact(ctx context.Context, req *CompactionRequest) (*CompactionResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method Compact not implemented")
-}
-
-func RegisterKVServer(s *grpc.Server, srv KVServer) {
-	s.RegisterService(&_KV_serviceDesc, srv)
-}
-
-func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(RangeRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(KVServer).Range(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.KV/Range",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(KVServer).Range(ctx, req.(*RangeRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _KV_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(PutRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(KVServer).Put(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.KV/Put",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(KVServer).Put(ctx, req.(*PutRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _KV_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(DeleteRangeRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(KVServer).DeleteRange(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.KV/DeleteRange",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(KVServer).DeleteRange(ctx, req.(*DeleteRangeRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _KV_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(TxnRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(KVServer).Txn(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.KV/Txn",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(KVServer).Txn(ctx, req.(*TxnRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _KV_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(CompactionRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(KVServer).Compact(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.KV/Compact",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(KVServer).Compact(ctx, req.(*CompactionRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-var _KV_serviceDesc = grpc.ServiceDesc{
-	ServiceName: "etcdserverpb.KV",
-	HandlerType: (*KVServer)(nil),
-	Methods: []grpc.MethodDesc{
-		{
-			MethodName: "Range",
-			Handler:    _KV_Range_Handler,
-		},
-		{
-			MethodName: "Put",
-			Handler:    _KV_Put_Handler,
-		},
-		{
-			MethodName: "DeleteRange",
-			Handler:    _KV_DeleteRange_Handler,
-		},
-		{
-			MethodName: "Txn",
-			Handler:    _KV_Txn_Handler,
-		},
-		{
-			MethodName: "Compact",
-			Handler:    _KV_Compact_Handler,
-		},
-	},
-	Streams:  []grpc.StreamDesc{},
-	Metadata: "rpc.proto",
-}
-
-// WatchClient is the client API for Watch service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type WatchClient interface {
-	// Watch watches for events happening or that have happened. Both input and output
-	// are streams; the input stream is for creating and canceling watchers and the output
-	// stream sends events. One watch RPC can watch on multiple key ranges, streaming events
-	// for several watches at once. The entire event history can be watched starting from the
-	// last compaction revision.
-	Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error)
-}
-
-type watchClient struct {
-	cc *grpc.ClientConn
-}
-
-func NewWatchClient(cc *grpc.ClientConn) WatchClient {
-	return &watchClient{cc}
-}
-
-func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) {
-	stream, err := c.cc.NewStream(ctx, &_Watch_serviceDesc.Streams[0], "/etcdserverpb.Watch/Watch", opts...)
-	if err != nil {
-		return nil, err
-	}
-	x := &watchWatchClient{stream}
-	return x, nil
-}
-
-type Watch_WatchClient interface {
-	Send(*WatchRequest) error
-	Recv() (*WatchResponse, error)
-	grpc.ClientStream
-}
-
-type watchWatchClient struct {
-	grpc.ClientStream
-}
-
-func (x *watchWatchClient) Send(m *WatchRequest) error {
-	return x.ClientStream.SendMsg(m)
-}
-
-func (x *watchWatchClient) Recv() (*WatchResponse, error) {
-	m := new(WatchResponse)
-	if err := x.ClientStream.RecvMsg(m); err != nil {
-		return nil, err
-	}
-	return m, nil
-}
-
-// WatchServer is the server API for Watch service.
-type WatchServer interface {
-	// Watch watches for events happening or that have happened. Both input and output
-	// are streams; the input stream is for creating and canceling watchers and the output
-	// stream sends events. One watch RPC can watch on multiple key ranges, streaming events
-	// for several watches at once. The entire event history can be watched starting from the
-	// last compaction revision.
-	Watch(Watch_WatchServer) error
-}
-
-// UnimplementedWatchServer can be embedded to have forward compatible implementations.
-type UnimplementedWatchServer struct {
-}
-
-func (*UnimplementedWatchServer) Watch(srv Watch_WatchServer) error {
-	return status.Errorf(codes.Unimplemented, "method Watch not implemented")
-}
-
-func RegisterWatchServer(s *grpc.Server, srv WatchServer) {
-	s.RegisterService(&_Watch_serviceDesc, srv)
-}
-
-func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
-	return srv.(WatchServer).Watch(&watchWatchServer{stream})
-}
-
-type Watch_WatchServer interface {
-	Send(*WatchResponse) error
-	Recv() (*WatchRequest, error)
-	grpc.ServerStream
-}
-
-type watchWatchServer struct {
-	grpc.ServerStream
-}
-
-func (x *watchWatchServer) Send(m *WatchResponse) error {
-	return x.ServerStream.SendMsg(m)
-}
-
-func (x *watchWatchServer) Recv() (*WatchRequest, error) {
-	m := new(WatchRequest)
-	if err := x.ServerStream.RecvMsg(m); err != nil {
-		return nil, err
-	}
-	return m, nil
-}
-
-var _Watch_serviceDesc = grpc.ServiceDesc{
-	ServiceName: "etcdserverpb.Watch",
-	HandlerType: (*WatchServer)(nil),
-	Methods:     []grpc.MethodDesc{},
-	Streams: []grpc.StreamDesc{
-		{
-			StreamName:    "Watch",
-			Handler:       _Watch_Watch_Handler,
-			ServerStreams: true,
-			ClientStreams: true,
-		},
-	},
-	Metadata: "rpc.proto",
-}
-
-// LeaseClient is the client API for Lease service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type LeaseClient interface {
-	// LeaseGrant creates a lease which expires if the server does not receive a keepAlive
-	// within a given time to live period. All keys attached to the lease will be expired and
-	// deleted if the lease expires. Each expired key generates a delete event in the event history.
-	LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error)
-	// LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
-	LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error)
-	// LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
-	// to the server and streaming keep alive responses from the server to the client.
-	LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error)
-	// LeaseTimeToLive retrieves lease information.
-	LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error)
-	// LeaseLeases lists all existing leases.
-	LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error)
-}
-
-type leaseClient struct {
-	cc *grpc.ClientConn
-}
-
-func NewLeaseClient(cc *grpc.ClientConn) LeaseClient {
-	return &leaseClient{cc}
-}
-
-func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) {
-	out := new(LeaseGrantResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) {
-	out := new(LeaseRevokeResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) {
-	stream, err := c.cc.NewStream(ctx, &_Lease_serviceDesc.Streams[0], "/etcdserverpb.Lease/LeaseKeepAlive", opts...)
-	if err != nil {
-		return nil, err
-	}
-	x := &leaseLeaseKeepAliveClient{stream}
-	return x, nil
-}
-
-type Lease_LeaseKeepAliveClient interface {
-	Send(*LeaseKeepAliveRequest) error
-	Recv() (*LeaseKeepAliveResponse, error)
-	grpc.ClientStream
-}
-
-type leaseLeaseKeepAliveClient struct {
-	grpc.ClientStream
-}
-
-func (x *leaseLeaseKeepAliveClient) Send(m *LeaseKeepAliveRequest) error {
-	return x.ClientStream.SendMsg(m)
-}
-
-func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) {
-	m := new(LeaseKeepAliveResponse)
-	if err := x.ClientStream.RecvMsg(m); err != nil {
-		return nil, err
-	}
-	return m, nil
-}
-
-func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) {
-	out := new(LeaseTimeToLiveResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *leaseClient) LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) {
-	out := new(LeaseLeasesResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// LeaseServer is the server API for Lease service.
-type LeaseServer interface {
-	// LeaseGrant creates a lease which expires if the server does not receive a keepAlive
-	// within a given time to live period. All keys attached to the lease will be expired and
-	// deleted if the lease expires. Each expired key generates a delete event in the event history.
-	LeaseGrant(context.Context, *LeaseGrantRequest) (*LeaseGrantResponse, error)
-	// LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
-	LeaseRevoke(context.Context, *LeaseRevokeRequest) (*LeaseRevokeResponse, error)
-	// LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
-	// to the server and streaming keep alive responses from the server to the client.
-	LeaseKeepAlive(Lease_LeaseKeepAliveServer) error
-	// LeaseTimeToLive retrieves lease information.
-	LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error)
-	// LeaseLeases lists all existing leases.
-	LeaseLeases(context.Context, *LeaseLeasesRequest) (*LeaseLeasesResponse, error)
-}
-
-// UnimplementedLeaseServer can be embedded to have forward compatible implementations.
-type UnimplementedLeaseServer struct {
-}
-
-func (*UnimplementedLeaseServer) LeaseGrant(ctx context.Context, req *LeaseGrantRequest) (*LeaseGrantResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method LeaseGrant not implemented")
-}
-func (*UnimplementedLeaseServer) LeaseRevoke(ctx context.Context, req *LeaseRevokeRequest) (*LeaseRevokeResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method LeaseRevoke not implemented")
-}
-func (*UnimplementedLeaseServer) LeaseKeepAlive(srv Lease_LeaseKeepAliveServer) error {
-	return status.Errorf(codes.Unimplemented, "method LeaseKeepAlive not implemented")
-}
-func (*UnimplementedLeaseServer) LeaseTimeToLive(ctx context.Context, req *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method LeaseTimeToLive not implemented")
-}
-func (*UnimplementedLeaseServer) LeaseLeases(ctx context.Context, req *LeaseLeasesRequest) (*LeaseLeasesResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method LeaseLeases not implemented")
-}
-
-func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) {
-	s.RegisterService(&_Lease_serviceDesc, srv)
-}
-
-func _Lease_LeaseGrant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(LeaseGrantRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(LeaseServer).LeaseGrant(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Lease/LeaseGrant",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(LeaseServer).LeaseGrant(ctx, req.(*LeaseGrantRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Lease_LeaseRevoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(LeaseRevokeRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(LeaseServer).LeaseRevoke(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Lease/LeaseRevoke",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(LeaseServer).LeaseRevoke(ctx, req.(*LeaseRevokeRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Lease_LeaseKeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error {
-	return srv.(LeaseServer).LeaseKeepAlive(&leaseLeaseKeepAliveServer{stream})
-}
-
-type Lease_LeaseKeepAliveServer interface {
-	Send(*LeaseKeepAliveResponse) error
-	Recv() (*LeaseKeepAliveRequest, error)
-	grpc.ServerStream
-}
-
-type leaseLeaseKeepAliveServer struct {
-	grpc.ServerStream
-}
-
-func (x *leaseLeaseKeepAliveServer) Send(m *LeaseKeepAliveResponse) error {
-	return x.ServerStream.SendMsg(m)
-}
-
-func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) {
-	m := new(LeaseKeepAliveRequest)
-	if err := x.ServerStream.RecvMsg(m); err != nil {
-		return nil, err
-	}
-	return m, nil
-}
-
-func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(LeaseTimeToLiveRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(LeaseServer).LeaseTimeToLive(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Lease_LeaseLeases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(LeaseLeasesRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(LeaseServer).LeaseLeases(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Lease/LeaseLeases",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(LeaseServer).LeaseLeases(ctx, req.(*LeaseLeasesRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-var _Lease_serviceDesc = grpc.ServiceDesc{
-	ServiceName: "etcdserverpb.Lease",
-	HandlerType: (*LeaseServer)(nil),
-	Methods: []grpc.MethodDesc{
-		{
-			MethodName: "LeaseGrant",
-			Handler:    _Lease_LeaseGrant_Handler,
-		},
-		{
-			MethodName: "LeaseRevoke",
-			Handler:    _Lease_LeaseRevoke_Handler,
-		},
-		{
-			MethodName: "LeaseTimeToLive",
-			Handler:    _Lease_LeaseTimeToLive_Handler,
-		},
-		{
-			MethodName: "LeaseLeases",
-			Handler:    _Lease_LeaseLeases_Handler,
-		},
-	},
-	Streams: []grpc.StreamDesc{
-		{
-			StreamName:    "LeaseKeepAlive",
-			Handler:       _Lease_LeaseKeepAlive_Handler,
-			ServerStreams: true,
-			ClientStreams: true,
-		},
-	},
-	Metadata: "rpc.proto",
-}
-
-// ClusterClient is the client API for Cluster service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type ClusterClient interface {
-	// MemberAdd adds a member into the cluster.
-	MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error)
-	// MemberRemove removes an existing member from the cluster.
-	MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error)
-	// MemberUpdate updates the member configuration.
-	MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error)
-	// MemberList lists all the members in the cluster.
-	MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error)
-}
-
-type clusterClient struct {
-	cc *grpc.ClientConn
-}
-
-func NewClusterClient(cc *grpc.ClientConn) ClusterClient {
-	return &clusterClient{cc}
-}
-
-func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) {
-	out := new(MemberAddResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) {
-	out := new(MemberRemoveResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) {
-	out := new(MemberUpdateResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) {
-	out := new(MemberListResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// ClusterServer is the server API for Cluster service.
-type ClusterServer interface {
-	// MemberAdd adds a member into the cluster.
-	MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error)
-	// MemberRemove removes an existing member from the cluster.
-	MemberRemove(context.Context, *MemberRemoveRequest) (*MemberRemoveResponse, error)
-	// MemberUpdate updates the member configuration.
-	MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error)
-	// MemberList lists all the members in the cluster.
-	MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error)
-}
-
-// UnimplementedClusterServer can be embedded to have forward compatible implementations.
-type UnimplementedClusterServer struct {
-}
-
-func (*UnimplementedClusterServer) MemberAdd(ctx context.Context, req *MemberAddRequest) (*MemberAddResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method MemberAdd not implemented")
-}
-func (*UnimplementedClusterServer) MemberRemove(ctx context.Context, req *MemberRemoveRequest) (*MemberRemoveResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method MemberRemove not implemented")
-}
-func (*UnimplementedClusterServer) MemberUpdate(ctx context.Context, req *MemberUpdateRequest) (*MemberUpdateResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method MemberUpdate not implemented")
-}
-func (*UnimplementedClusterServer) MemberList(ctx context.Context, req *MemberListRequest) (*MemberListResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method MemberList not implemented")
-}
-
-func RegisterClusterServer(s *grpc.Server, srv ClusterServer) {
-	s.RegisterService(&_Cluster_serviceDesc, srv)
-}
-
-func _Cluster_MemberAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(MemberAddRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ClusterServer).MemberAdd(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Cluster/MemberAdd",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ClusterServer).MemberAdd(ctx, req.(*MemberAddRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Cluster_MemberRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(MemberRemoveRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ClusterServer).MemberRemove(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Cluster/MemberRemove",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ClusterServer).MemberRemove(ctx, req.(*MemberRemoveRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Cluster_MemberUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(MemberUpdateRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ClusterServer).MemberUpdate(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Cluster/MemberUpdate",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ClusterServer).MemberUpdate(ctx, req.(*MemberUpdateRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(MemberListRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(ClusterServer).MemberList(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Cluster/MemberList",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(ClusterServer).MemberList(ctx, req.(*MemberListRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-var _Cluster_serviceDesc = grpc.ServiceDesc{
-	ServiceName: "etcdserverpb.Cluster",
-	HandlerType: (*ClusterServer)(nil),
-	Methods: []grpc.MethodDesc{
-		{
-			MethodName: "MemberAdd",
-			Handler:    _Cluster_MemberAdd_Handler,
-		},
-		{
-			MethodName: "MemberRemove",
-			Handler:    _Cluster_MemberRemove_Handler,
-		},
-		{
-			MethodName: "MemberUpdate",
-			Handler:    _Cluster_MemberUpdate_Handler,
-		},
-		{
-			MethodName: "MemberList",
-			Handler:    _Cluster_MemberList_Handler,
-		},
-	},
-	Streams:  []grpc.StreamDesc{},
-	Metadata: "rpc.proto",
-}
-
-// MaintenanceClient is the client API for Maintenance service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type MaintenanceClient interface {
-	// Alarm activates, deactivates, and queries alarms regarding cluster health.
-	Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error)
-	// Status gets the status of the member.
-	Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error)
-	// Defragment defragments a member's backend database to recover storage space.
-	Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error)
-	// Hash computes the hash of the KV's backend.
-	// This is designed for testing; do not use this in production when there
-	// are ongoing transactions.
-	Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error)
-	// HashKV computes the hash of all MVCC keys up to a given revision.
-	HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error)
-	// Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
-	Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error)
-	// MoveLeader requests current leader node to transfer its leadership to transferee.
-	MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error)
-}
-
-type maintenanceClient struct {
-	cc *grpc.ClientConn
-}
-
-func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient {
-	return &maintenanceClient{cc}
-}
-
-func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) {
-	out := new(AlarmResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) {
-	out := new(StatusResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) {
-	out := new(DefragmentResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) {
-	out := new(HashResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) {
-	out := new(HashKVResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) {
-	stream, err := c.cc.NewStream(ctx, &_Maintenance_serviceDesc.Streams[0], "/etcdserverpb.Maintenance/Snapshot", opts...)
-	if err != nil {
-		return nil, err
-	}
-	x := &maintenanceSnapshotClient{stream}
-	if err := x.ClientStream.SendMsg(in); err != nil {
-		return nil, err
-	}
-	if err := x.ClientStream.CloseSend(); err != nil {
-		return nil, err
-	}
-	return x, nil
-}
-
-type Maintenance_SnapshotClient interface {
-	Recv() (*SnapshotResponse, error)
-	grpc.ClientStream
-}
-
-type maintenanceSnapshotClient struct {
-	grpc.ClientStream
-}
-
-func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) {
-	m := new(SnapshotResponse)
-	if err := x.ClientStream.RecvMsg(m); err != nil {
-		return nil, err
-	}
-	return m, nil
-}
-
-func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) {
-	out := new(MoveLeaderResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// MaintenanceServer is the server API for Maintenance service.
-type MaintenanceServer interface {
-	// Alarm activates, deactivates, and queries alarms regarding cluster health.
-	Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error)
-	// Status gets the status of the member.
-	Status(context.Context, *StatusRequest) (*StatusResponse, error)
-	// Defragment defragments a member's backend database to recover storage space.
-	Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error)
-	// Hash computes the hash of the KV's backend.
-	// This is designed for testing; do not use this in production when there
-	// are ongoing transactions.
-	Hash(context.Context, *HashRequest) (*HashResponse, error)
-	// HashKV computes the hash of all MVCC keys up to a given revision.
-	HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error)
-	// Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
-	Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error
-	// MoveLeader requests current leader node to transfer its leadership to transferee.
-	MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error)
-}
-
-// UnimplementedMaintenanceServer can be embedded to have forward compatible implementations.
-type UnimplementedMaintenanceServer struct {
-}
-
-func (*UnimplementedMaintenanceServer) Alarm(ctx context.Context, req *AlarmRequest) (*AlarmResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method Alarm not implemented")
-}
-func (*UnimplementedMaintenanceServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method Status not implemented")
-}
-func (*UnimplementedMaintenanceServer) Defragment(ctx context.Context, req *DefragmentRequest) (*DefragmentResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method Defragment not implemented")
-}
-func (*UnimplementedMaintenanceServer) Hash(ctx context.Context, req *HashRequest) (*HashResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method Hash not implemented")
-}
-func (*UnimplementedMaintenanceServer) HashKV(ctx context.Context, req *HashKVRequest) (*HashKVResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method HashKV not implemented")
-}
-func (*UnimplementedMaintenanceServer) Snapshot(req *SnapshotRequest, srv Maintenance_SnapshotServer) error {
-	return status.Errorf(codes.Unimplemented, "method Snapshot not implemented")
-}
-func (*UnimplementedMaintenanceServer) MoveLeader(ctx context.Context, req *MoveLeaderRequest) (*MoveLeaderResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method MoveLeader not implemented")
-}
-
-func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) {
-	s.RegisterService(&_Maintenance_serviceDesc, srv)
-}
-
-func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AlarmRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(MaintenanceServer).Alarm(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Maintenance/Alarm",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(MaintenanceServer).Alarm(ctx, req.(*AlarmRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(StatusRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(MaintenanceServer).Status(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Maintenance/Status",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(MaintenanceServer).Status(ctx, req.(*StatusRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(DefragmentRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(MaintenanceServer).Defragment(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Maintenance/Defragment",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(MaintenanceServer).Defragment(ctx, req.(*DefragmentRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_Hash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(HashRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(MaintenanceServer).Hash(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Maintenance/Hash",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(MaintenanceServer).Hash(ctx, req.(*HashRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_HashKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(HashKVRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(MaintenanceServer).HashKV(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Maintenance/HashKV",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(MaintenanceServer).HashKV(ctx, req.(*HashKVRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error {
-	m := new(SnapshotRequest)
-	if err := stream.RecvMsg(m); err != nil {
-		return err
-	}
-	return srv.(MaintenanceServer).Snapshot(m, &maintenanceSnapshotServer{stream})
-}
-
-type Maintenance_SnapshotServer interface {
-	Send(*SnapshotResponse) error
-	grpc.ServerStream
-}
-
-type maintenanceSnapshotServer struct {
-	grpc.ServerStream
-}
-
-func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error {
-	return x.ServerStream.SendMsg(m)
-}
-
-func _Maintenance_MoveLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(MoveLeaderRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(MaintenanceServer).MoveLeader(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Maintenance/MoveLeader",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(MaintenanceServer).MoveLeader(ctx, req.(*MoveLeaderRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-var _Maintenance_serviceDesc = grpc.ServiceDesc{
-	ServiceName: "etcdserverpb.Maintenance",
-	HandlerType: (*MaintenanceServer)(nil),
-	Methods: []grpc.MethodDesc{
-		{
-			MethodName: "Alarm",
-			Handler:    _Maintenance_Alarm_Handler,
-		},
-		{
-			MethodName: "Status",
-			Handler:    _Maintenance_Status_Handler,
-		},
-		{
-			MethodName: "Defragment",
-			Handler:    _Maintenance_Defragment_Handler,
-		},
-		{
-			MethodName: "Hash",
-			Handler:    _Maintenance_Hash_Handler,
-		},
-		{
-			MethodName: "HashKV",
-			Handler:    _Maintenance_HashKV_Handler,
-		},
-		{
-			MethodName: "MoveLeader",
-			Handler:    _Maintenance_MoveLeader_Handler,
-		},
-	},
-	Streams: []grpc.StreamDesc{
-		{
-			StreamName:    "Snapshot",
-			Handler:       _Maintenance_Snapshot_Handler,
-			ServerStreams: true,
-		},
-	},
-	Metadata: "rpc.proto",
-}
-
-// AuthClient is the client API for Auth service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type AuthClient interface {
-	// AuthEnable enables authentication.
-	AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error)
-	// AuthDisable disables authentication.
-	AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error)
-	// Authenticate processes an authenticate request.
-	Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error)
-	// UserAdd adds a new user.
-	UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error)
-	// UserGet gets detailed user information.
-	UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error)
-	// UserList gets a list of all users.
-	UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error)
-	// UserDelete deletes a specified user.
-	UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error)
-	// UserChangePassword changes the password of a specified user.
-	UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error)
-	// UserGrant grants a role to a specified user.
-	UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error)
-	// UserRevokeRole revokes a role of specified user.
-	UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error)
-	// RoleAdd adds a new role.
-	RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error)
-	// RoleGet gets detailed role information.
-	RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error)
-	// RoleList gets lists of all roles.
-	RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error)
-	// RoleDelete deletes a specified role.
-	RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error)
-	// RoleGrantPermission grants a permission of a specified key or range to a specified role.
-	RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error)
-	// RoleRevokePermission revokes a key or range permission of a specified role.
-	RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error)
-}
-
-type authClient struct {
-	cc *grpc.ClientConn
-}
-
-func NewAuthClient(cc *grpc.ClientConn) AuthClient {
-	return &authClient{cc}
-}
-
-func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) {
-	out := new(AuthEnableResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) {
-	out := new(AuthDisableResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) {
-	out := new(AuthenticateResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) {
-	out := new(AuthUserAddResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) {
-	out := new(AuthUserGetResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) {
-	out := new(AuthUserListResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) {
-	out := new(AuthUserDeleteResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) {
-	out := new(AuthUserChangePasswordResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) {
-	out := new(AuthUserGrantRoleResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) {
-	out := new(AuthUserRevokeRoleResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) {
-	out := new(AuthRoleAddResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) {
-	out := new(AuthRoleGetResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) {
-	out := new(AuthRoleListResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) {
-	out := new(AuthRoleDeleteResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) {
-	out := new(AuthRoleGrantPermissionResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) {
-	out := new(AuthRoleRevokePermissionResponse)
-	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// AuthServer is the server API for Auth service.
-type AuthServer interface {
-	// AuthEnable enables authentication.
-	AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error)
-	// AuthDisable disables authentication.
-	AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error)
-	// Authenticate processes an authenticate request.
-	Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error)
-	// UserAdd adds a new user.
-	UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error)
-	// UserGet gets detailed user information.
-	UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error)
-	// UserList gets a list of all users.
-	UserList(context.Context, *AuthUserListRequest) (*AuthUserListResponse, error)
-	// UserDelete deletes a specified user.
-	UserDelete(context.Context, *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error)
-	// UserChangePassword changes the password of a specified user.
-	UserChangePassword(context.Context, *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error)
-	// UserGrant grants a role to a specified user.
-	UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error)
-	// UserRevokeRole revokes a role of specified user.
-	UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error)
-	// RoleAdd adds a new role.
-	RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error)
-	// RoleGet gets detailed role information.
-	RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error)
-	// RoleList gets lists of all roles.
-	RoleList(context.Context, *AuthRoleListRequest) (*AuthRoleListResponse, error)
-	// RoleDelete deletes a specified role.
-	RoleDelete(context.Context, *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error)
-	// RoleGrantPermission grants a permission of a specified key or range to a specified role.
-	RoleGrantPermission(context.Context, *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error)
-	// RoleRevokePermission revokes a key or range permission of a specified role.
-	RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error)
-}
-
-// UnimplementedAuthServer can be embedded to have forward compatible implementations.
-type UnimplementedAuthServer struct {
-}
-
-func (*UnimplementedAuthServer) AuthEnable(ctx context.Context, req *AuthEnableRequest) (*AuthEnableResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method AuthEnable not implemented")
-}
-func (*UnimplementedAuthServer) AuthDisable(ctx context.Context, req *AuthDisableRequest) (*AuthDisableResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method AuthDisable not implemented")
-}
-func (*UnimplementedAuthServer) Authenticate(ctx context.Context, req *AuthenticateRequest) (*AuthenticateResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method Authenticate not implemented")
-}
-func (*UnimplementedAuthServer) UserAdd(ctx context.Context, req *AuthUserAddRequest) (*AuthUserAddResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method UserAdd not implemented")
-}
-func (*UnimplementedAuthServer) UserGet(ctx context.Context, req *AuthUserGetRequest) (*AuthUserGetResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method UserGet not implemented")
-}
-func (*UnimplementedAuthServer) UserList(ctx context.Context, req *AuthUserListRequest) (*AuthUserListResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method UserList not implemented")
-}
-func (*UnimplementedAuthServer) UserDelete(ctx context.Context, req *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method UserDelete not implemented")
-}
-func (*UnimplementedAuthServer) UserChangePassword(ctx context.Context, req *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method UserChangePassword not implemented")
-}
-func (*UnimplementedAuthServer) UserGrantRole(ctx context.Context, req *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method UserGrantRole not implemented")
-}
-func (*UnimplementedAuthServer) UserRevokeRole(ctx context.Context, req *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method UserRevokeRole not implemented")
-}
-func (*UnimplementedAuthServer) RoleAdd(ctx context.Context, req *AuthRoleAddRequest) (*AuthRoleAddResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method RoleAdd not implemented")
-}
-func (*UnimplementedAuthServer) RoleGet(ctx context.Context, req *AuthRoleGetRequest) (*AuthRoleGetResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method RoleGet not implemented")
-}
-func (*UnimplementedAuthServer) RoleList(ctx context.Context, req *AuthRoleListRequest) (*AuthRoleListResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method RoleList not implemented")
-}
-func (*UnimplementedAuthServer) RoleDelete(ctx context.Context, req *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method RoleDelete not implemented")
-}
-func (*UnimplementedAuthServer) RoleGrantPermission(ctx context.Context, req *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method RoleGrantPermission not implemented")
-}
-func (*UnimplementedAuthServer) RoleRevokePermission(ctx context.Context, req *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) {
-	return nil, status.Errorf(codes.Unimplemented, "method RoleRevokePermission not implemented")
-}
-
-func RegisterAuthServer(s *grpc.Server, srv AuthServer) {
-	s.RegisterService(&_Auth_serviceDesc, srv)
-}
-
-func _Auth_AuthEnable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthEnableRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).AuthEnable(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/AuthEnable",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).AuthEnable(ctx, req.(*AuthEnableRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_AuthDisable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthDisableRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).AuthDisable(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/AuthDisable",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).AuthDisable(ctx, req.(*AuthDisableRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthenticateRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).Authenticate(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/Authenticate",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).Authenticate(ctx, req.(*AuthenticateRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthUserAddRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).UserAdd(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/UserAdd",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).UserAdd(ctx, req.(*AuthUserAddRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthUserGetRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).UserGet(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/UserGet",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).UserGet(ctx, req.(*AuthUserGetRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthUserListRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).UserList(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/UserList",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).UserList(ctx, req.(*AuthUserListRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthUserDeleteRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).UserDelete(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/UserDelete",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).UserDelete(ctx, req.(*AuthUserDeleteRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthUserChangePasswordRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).UserChangePassword(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/UserChangePassword",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).UserChangePassword(ctx, req.(*AuthUserChangePasswordRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserGrantRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthUserGrantRoleRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).UserGrantRole(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/UserGrantRole",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).UserGrantRole(ctx, req.(*AuthUserGrantRoleRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserRevokeRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthUserRevokeRoleRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).UserRevokeRole(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/UserRevokeRole",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).UserRevokeRole(ctx, req.(*AuthUserRevokeRoleRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthRoleAddRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).RoleAdd(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/RoleAdd",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).RoleAdd(ctx, req.(*AuthRoleAddRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthRoleGetRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).RoleGet(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/RoleGet",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).RoleGet(ctx, req.(*AuthRoleGetRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthRoleListRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).RoleList(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/RoleList",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).RoleList(ctx, req.(*AuthRoleListRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthRoleDeleteRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).RoleDelete(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/RoleDelete",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).RoleDelete(ctx, req.(*AuthRoleDeleteRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleGrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthRoleGrantPermissionRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).RoleGrantPermission(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/RoleGrantPermission",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).RoleGrantPermission(ctx, req.(*AuthRoleGrantPermissionRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleRevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AuthRoleRevokePermissionRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(AuthServer).RoleRevokePermission(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/etcdserverpb.Auth/RoleRevokePermission",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(AuthServer).RoleRevokePermission(ctx, req.(*AuthRoleRevokePermissionRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-var _Auth_serviceDesc = grpc.ServiceDesc{
-	ServiceName: "etcdserverpb.Auth",
-	HandlerType: (*AuthServer)(nil),
-	Methods: []grpc.MethodDesc{
-		{
-			MethodName: "AuthEnable",
-			Handler:    _Auth_AuthEnable_Handler,
-		},
-		{
-			MethodName: "AuthDisable",
-			Handler:    _Auth_AuthDisable_Handler,
-		},
-		{
-			MethodName: "Authenticate",
-			Handler:    _Auth_Authenticate_Handler,
-		},
-		{
-			MethodName: "UserAdd",
-			Handler:    _Auth_UserAdd_Handler,
-		},
-		{
-			MethodName: "UserGet",
-			Handler:    _Auth_UserGet_Handler,
-		},
-		{
-			MethodName: "UserList",
-			Handler:    _Auth_UserList_Handler,
-		},
-		{
-			MethodName: "UserDelete",
-			Handler:    _Auth_UserDelete_Handler,
-		},
-		{
-			MethodName: "UserChangePassword",
-			Handler:    _Auth_UserChangePassword_Handler,
-		},
-		{
-			MethodName: "UserGrantRole",
-			Handler:    _Auth_UserGrantRole_Handler,
-		},
-		{
-			MethodName: "UserRevokeRole",
-			Handler:    _Auth_UserRevokeRole_Handler,
-		},
-		{
-			MethodName: "RoleAdd",
-			Handler:    _Auth_RoleAdd_Handler,
-		},
-		{
-			MethodName: "RoleGet",
-			Handler:    _Auth_RoleGet_Handler,
-		},
-		{
-			MethodName: "RoleList",
-			Handler:    _Auth_RoleList_Handler,
-		},
-		{
-			MethodName: "RoleDelete",
-			Handler:    _Auth_RoleDelete_Handler,
-		},
-		{
-			MethodName: "RoleGrantPermission",
-			Handler:    _Auth_RoleGrantPermission_Handler,
-		},
-		{
-			MethodName: "RoleRevokePermission",
-			Handler:    _Auth_RoleRevokePermission_Handler,
-		},
-	},
-	Streams:  []grpc.StreamDesc{},
-	Metadata: "rpc.proto",
-}
-
-func (m *ResponseHeader) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResponseHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.RaftTerm != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm))
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.Revision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.MemberId != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.MemberId))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.ClusterId != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *RangeRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.MaxCreateRevision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision))
-		i--
-		dAtA[i] = 0x68
-	}
-	if m.MinCreateRevision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision))
-		i--
-		dAtA[i] = 0x60
-	}
-	if m.MaxModRevision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision))
-		i--
-		dAtA[i] = 0x58
-	}
-	if m.MinModRevision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision))
-		i--
-		dAtA[i] = 0x50
-	}
-	if m.CountOnly {
-		i--
-		if m.CountOnly {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x48
-	}
-	if m.KeysOnly {
-		i--
-		if m.KeysOnly {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x40
-	}
-	if m.Serializable {
-		i--
-		if m.Serializable {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x38
-	}
-	if m.SortTarget != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget))
-		i--
-		dAtA[i] = 0x30
-	}
-	if m.SortOrder != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder))
-		i--
-		dAtA[i] = 0x28
-	}
-	if m.Revision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.Limit != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Limit))
-		i--
-		dAtA[i] = 0x18
-	}
-	if len(m.RangeEnd) > 0 {
-		i -= len(m.RangeEnd)
-		copy(dAtA[i:], m.RangeEnd)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Key) > 0 {
-		i -= len(m.Key)
-		copy(dAtA[i:], m.Key)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *RangeResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Count != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Count))
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.More {
-		i--
-		if m.More {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x18
-	}
-	if len(m.Kvs) > 0 {
-		for iNdEx := len(m.Kvs) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Kvs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *PutRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *PutRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.IgnoreLease {
-		i--
-		if m.IgnoreLease {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x30
-	}
-	if m.IgnoreValue {
-		i--
-		if m.IgnoreValue {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x28
-	}
-	if m.PrevKv {
-		i--
-		if m.PrevKv {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.Lease != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Lease))
-		i--
-		dAtA[i] = 0x18
-	}
-	if len(m.Value) > 0 {
-		i -= len(m.Value)
-		copy(dAtA[i:], m.Value)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Value)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Key) > 0 {
-		i -= len(m.Key)
-		copy(dAtA[i:], m.Key)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *PutResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *PutResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.PrevKv != nil {
-		{
-			size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x12
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeleteRangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.PrevKv {
-		i--
-		if m.PrevKv {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x18
-	}
-	if len(m.RangeEnd) > 0 {
-		i -= len(m.RangeEnd)
-		copy(dAtA[i:], m.RangeEnd)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Key) > 0 {
-		i -= len(m.Key)
-		copy(dAtA[i:], m.Key)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeleteRangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.PrevKvs) > 0 {
-		for iNdEx := len(m.PrevKvs) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.PrevKvs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x1a
-		}
-	}
-	if m.Deleted != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Deleted))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *RequestOp) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RequestOp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Request != nil {
-		{
-			size := m.Request.Size()
-			i -= size
-			if _, err := m.Request.MarshalTo(dAtA[i:]); err != nil {
-				return 0, err
-			}
-		}
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *RequestOp_RequestRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.RequestRange != nil {
-		{
-			size, err := m.RequestRange.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *RequestOp_RequestPut) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.RequestPut != nil {
-		{
-			size, err := m.RequestPut.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x12
-	}
-	return len(dAtA) - i, nil
-}
-func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *RequestOp_RequestDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.RequestDeleteRange != nil {
-		{
-			size, err := m.RequestDeleteRange.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x1a
-	}
-	return len(dAtA) - i, nil
-}
-func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *RequestOp_RequestTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.RequestTxn != nil {
-		{
-			size, err := m.RequestTxn.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x22
-	}
-	return len(dAtA) - i, nil
-}
-func (m *ResponseOp) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResponseOp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Response != nil {
-		{
-			size := m.Response.Size()
-			i -= size
-			if _, err := m.Response.MarshalTo(dAtA[i:]); err != nil {
-				return 0, err
-			}
-		}
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *ResponseOp_ResponseRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.ResponseRange != nil {
-		{
-			size, err := m.ResponseRange.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *ResponseOp_ResponsePut) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.ResponsePut != nil {
-		{
-			size, err := m.ResponsePut.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x12
-	}
-	return len(dAtA) - i, nil
-}
-func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *ResponseOp_ResponseDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.ResponseDeleteRange != nil {
-		{
-			size, err := m.ResponseDeleteRange.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x1a
-	}
-	return len(dAtA) - i, nil
-}
-func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *ResponseOp_ResponseTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.ResponseTxn != nil {
-		{
-			size, err := m.ResponseTxn.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x22
-	}
-	return len(dAtA) - i, nil
-}
-func (m *Compare) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Compare) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Compare) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.RangeEnd) > 0 {
-		i -= len(m.RangeEnd)
-		copy(dAtA[i:], m.RangeEnd)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
-		i--
-		dAtA[i] = 0x4
-		i--
-		dAtA[i] = 0x82
-	}
-	if m.TargetUnion != nil {
-		{
-			size := m.TargetUnion.Size()
-			i -= size
-			if _, err := m.TargetUnion.MarshalTo(dAtA[i:]); err != nil {
-				return 0, err
-			}
-		}
-	}
-	if len(m.Key) > 0 {
-		i -= len(m.Key)
-		copy(dAtA[i:], m.Key)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
-		i--
-		dAtA[i] = 0x1a
-	}
-	if m.Target != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Target))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Result != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Result))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *Compare_Version) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	i = encodeVarintRpc(dAtA, i, uint64(m.Version))
-	i--
-	dAtA[i] = 0x20
-	return len(dAtA) - i, nil
-}
-func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *Compare_CreateRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision))
-	i--
-	dAtA[i] = 0x28
-	return len(dAtA) - i, nil
-}
-func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *Compare_ModRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision))
-	i--
-	dAtA[i] = 0x30
-	return len(dAtA) - i, nil
-}
-func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *Compare_Value) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.Value != nil {
-		i -= len(m.Value)
-		copy(dAtA[i:], m.Value)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Value)))
-		i--
-		dAtA[i] = 0x3a
-	}
-	return len(dAtA) - i, nil
-}
-func (m *Compare_Lease) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *Compare_Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	i = encodeVarintRpc(dAtA, i, uint64(m.Lease))
-	i--
-	dAtA[i] = 0x40
-	return len(dAtA) - i, nil
-}
-func (m *TxnRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *TxnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Failure) > 0 {
-		for iNdEx := len(m.Failure) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Failure[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x1a
-		}
-	}
-	if len(m.Success) > 0 {
-		for iNdEx := len(m.Success) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Success[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if len(m.Compare) > 0 {
-		for iNdEx := len(m.Compare) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Compare[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0xa
-		}
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *TxnResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *TxnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Responses) > 0 {
-		for iNdEx := len(m.Responses) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Responses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x1a
-		}
-	}
-	if m.Succeeded {
-		i--
-		if m.Succeeded {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *CompactionRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *CompactionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Physical {
-		i--
-		if m.Physical {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Revision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *CompactionResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *CompactionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *HashRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *HashKVRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *HashKVRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HashKVRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Revision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *HashKVResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *HashKVResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HashKVResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.CompactRevision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.Hash != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Hash))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *HashResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Hash != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Hash))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *SnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *SnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Blob) > 0 {
-		i -= len(m.Blob)
-		copy(dAtA[i:], m.Blob)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob)))
-		i--
-		dAtA[i] = 0x1a
-	}
-	if m.RemainingBytes != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *WatchRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.RequestUnion != nil {
-		{
-			size := m.RequestUnion.Size()
-			i -= size
-			if _, err := m.RequestUnion.MarshalTo(dAtA[i:]); err != nil {
-				return 0, err
-			}
-		}
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *WatchRequest_CreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.CreateRequest != nil {
-		{
-			size, err := m.CreateRequest.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *WatchRequest_CancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.CancelRequest != nil {
-		{
-			size, err := m.CancelRequest.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x12
-	}
-	return len(dAtA) - i, nil
-}
-func (m *WatchRequest_ProgressRequest) MarshalTo(dAtA []byte) (int, error) {
-	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *WatchRequest_ProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	if m.ProgressRequest != nil {
-		{
-			size, err := m.ProgressRequest.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x1a
-	}
-	return len(dAtA) - i, nil
-}
-func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchCreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Fragment {
-		i--
-		if m.Fragment {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x40
-	}
-	if m.WatchId != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
-		i--
-		dAtA[i] = 0x38
-	}
-	if m.PrevKv {
-		i--
-		if m.PrevKv {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x30
-	}
-	if len(m.Filters) > 0 {
-		dAtA22 := make([]byte, len(m.Filters)*10)
-		var j21 int
-		for _, num := range m.Filters {
-			for num >= 1<<7 {
-				dAtA22[j21] = uint8(uint64(num)&0x7f | 0x80)
-				num >>= 7
-				j21++
-			}
-			dAtA22[j21] = uint8(num)
-			j21++
-		}
-		i -= j21
-		copy(dAtA[i:], dAtA22[:j21])
-		i = encodeVarintRpc(dAtA, i, uint64(j21))
-		i--
-		dAtA[i] = 0x2a
-	}
-	if m.ProgressNotify {
-		i--
-		if m.ProgressNotify {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.StartRevision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision))
-		i--
-		dAtA[i] = 0x18
-	}
-	if len(m.RangeEnd) > 0 {
-		i -= len(m.RangeEnd)
-		copy(dAtA[i:], m.RangeEnd)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Key) > 0 {
-		i -= len(m.Key)
-		copy(dAtA[i:], m.Key)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchCancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.WatchId != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *WatchProgressRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *WatchProgressRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *WatchResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Events) > 0 {
-		for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x5a
-		}
-	}
-	if m.Fragment {
-		i--
-		if m.Fragment {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x38
-	}
-	if len(m.CancelReason) > 0 {
-		i -= len(m.CancelReason)
-		copy(dAtA[i:], m.CancelReason)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason)))
-		i--
-		dAtA[i] = 0x32
-	}
-	if m.CompactRevision != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
-		i--
-		dAtA[i] = 0x28
-	}
-	if m.Canceled {
-		i--
-		if m.Canceled {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.Created {
-		i--
-		if m.Created {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.WatchId != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseGrantRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.TTL != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseGrantResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Error) > 0 {
-		i -= len(m.Error)
-		copy(dAtA[i:], m.Error)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Error)))
-		i--
-		dAtA[i] = 0x22
-	}
-	if m.TTL != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseRevokeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseRevokeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseKeepAliveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseKeepAliveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.TTL != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseTimeToLiveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Keys {
-		i--
-		if m.Keys {
-			dAtA[i] = 1
-		} else {
-			dAtA[i] = 0
-		}
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseTimeToLiveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Keys) > 0 {
-		for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.Keys[iNdEx])
-			copy(dAtA[i:], m.Keys[iNdEx])
-			i = encodeVarintRpc(dAtA, i, uint64(len(m.Keys[iNdEx])))
-			i--
-			dAtA[i] = 0x2a
-		}
-	}
-	if m.GrantedTTL != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL))
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.TTL != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseLeasesRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseLeasesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseStatus) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseStatus) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *LeaseLeasesResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseLeasesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Leases) > 0 {
-		for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *Member) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Member) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.ClientURLs) > 0 {
-		for iNdEx := len(m.ClientURLs) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.ClientURLs[iNdEx])
-			copy(dAtA[i:], m.ClientURLs[iNdEx])
-			i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientURLs[iNdEx])))
-			i--
-			dAtA[i] = 0x22
-		}
-	}
-	if len(m.PeerURLs) > 0 {
-		for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.PeerURLs[iNdEx])
-			copy(dAtA[i:], m.PeerURLs[iNdEx])
-			i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
-			i--
-			dAtA[i] = 0x1a
-		}
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.PeerURLs) > 0 {
-		for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.PeerURLs[iNdEx])
-			copy(dAtA[i:], m.PeerURLs[iNdEx])
-			i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
-			i--
-			dAtA[i] = 0xa
-		}
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Members) > 0 {
-		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x1a
-		}
-	}
-	if m.Member != nil {
-		{
-			size, err := m.Member.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x12
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberRemoveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberRemoveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Members) > 0 {
-		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberUpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.PeerURLs) > 0 {
-		for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.PeerURLs[iNdEx])
-			copy(dAtA[i:], m.PeerURLs[iNdEx])
-			i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.ID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberUpdateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Members) > 0 {
-		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *MemberListRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *MemberListResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Members) > 0 {
-		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DefragmentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DefragmentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MoveLeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.TargetID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.TargetID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MoveLeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AlarmRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AlarmRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Alarm != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Alarm))
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.MemberID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.MemberID))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.Action != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Action))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AlarmMember) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AlarmMember) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Alarm != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Alarm))
-		i--
-		dAtA[i] = 0x10
-	}
-	if m.MemberID != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.MemberID))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AlarmResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AlarmResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Alarms) > 0 {
-		for iNdEx := len(m.Alarms) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Alarms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *StatusRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *StatusResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.RaftTerm != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm))
-		i--
-		dAtA[i] = 0x30
-	}
-	if m.RaftIndex != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex))
-		i--
-		dAtA[i] = 0x28
-	}
-	if m.Leader != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.Leader))
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.DbSize != 0 {
-		i = encodeVarintRpc(dAtA, i, uint64(m.DbSize))
-		i--
-		dAtA[i] = 0x18
-	}
-	if len(m.Version) > 0 {
-		i -= len(m.Version)
-		copy(dAtA[i:], m.Version)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Version)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthEnableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthDisableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Password) > 0 {
-		i -= len(m.Password)
-		copy(dAtA[i:], m.Password)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Password) > 0 {
-		i -= len(m.Password)
-		copy(dAtA[i:], m.Password)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserChangePasswordRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Password) > 0 {
-		i -= len(m.Password)
-		copy(dAtA[i:], m.Password)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserGrantRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Role) > 0 {
-		i -= len(m.Role)
-		copy(dAtA[i:], m.Role)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.User) > 0 {
-		i -= len(m.User)
-		copy(dAtA[i:], m.User)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.User)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserRevokeRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Role) > 0 {
-		i -= len(m.Role)
-		copy(dAtA[i:], m.Role)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Role) > 0 {
-		i -= len(m.Role)
-		copy(dAtA[i:], m.Role)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Role) > 0 {
-		i -= len(m.Role)
-		copy(dAtA[i:], m.Role)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleGrantPermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Perm != nil {
-		{
-			size, err := m.Perm.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Name) > 0 {
-		i -= len(m.Name)
-		copy(dAtA[i:], m.Name)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleRevokePermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.RangeEnd) > 0 {
-		i -= len(m.RangeEnd)
-		copy(dAtA[i:], m.RangeEnd)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
-		i--
-		dAtA[i] = 0x1a
-	}
-	if len(m.Key) > 0 {
-		i -= len(m.Key)
-		copy(dAtA[i:], m.Key)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if len(m.Role) > 0 {
-		i -= len(m.Role)
-		copy(dAtA[i:], m.Role)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthEnableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthDisableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthenticateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Token) > 0 {
-		i -= len(m.Token)
-		copy(dAtA[i:], m.Token)
-		i = encodeVarintRpc(dAtA, i, uint64(len(m.Token)))
-		i--
-		dAtA[i] = 0x12
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Roles) > 0 {
-		for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.Roles[iNdEx])
-			copy(dAtA[i:], m.Roles[iNdEx])
-			i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx])))
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserChangePasswordResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserGrantRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserRevokeRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Perm) > 0 {
-		for iNdEx := len(m.Perm) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Perm[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRpc(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Roles) > 0 {
-		for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.Roles[iNdEx])
-			copy(dAtA[i:], m.Roles[iNdEx])
-			i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx])))
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Users) > 0 {
-		for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- {
-			i -= len(m.Users[iNdEx])
-			copy(dAtA[i:], m.Users[iNdEx])
-			i = encodeVarintRpc(dAtA, i, uint64(len(m.Users[iNdEx])))
-			i--
-			dAtA[i] = 0x12
-		}
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleGrantPermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleRevokePermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Header != nil {
-		{
-			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintRpc(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func encodeVarintRpc(dAtA []byte, offset int, v uint64) int {
-	offset -= sovRpc(v)
-	base := offset
-	for v >= 1<<7 {
-		dAtA[offset] = uint8(v&0x7f | 0x80)
-		v >>= 7
-		offset++
-	}
-	dAtA[offset] = uint8(v)
-	return base
-}
-func (m *ResponseHeader) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ClusterId != 0 {
-		n += 1 + sovRpc(uint64(m.ClusterId))
-	}
-	if m.MemberId != 0 {
-		n += 1 + sovRpc(uint64(m.MemberId))
-	}
-	if m.Revision != 0 {
-		n += 1 + sovRpc(uint64(m.Revision))
-	}
-	if m.RaftTerm != 0 {
-		n += 1 + sovRpc(uint64(m.RaftTerm))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *RangeRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Key)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.RangeEnd)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.Limit != 0 {
-		n += 1 + sovRpc(uint64(m.Limit))
-	}
-	if m.Revision != 0 {
-		n += 1 + sovRpc(uint64(m.Revision))
-	}
-	if m.SortOrder != 0 {
-		n += 1 + sovRpc(uint64(m.SortOrder))
-	}
-	if m.SortTarget != 0 {
-		n += 1 + sovRpc(uint64(m.SortTarget))
-	}
-	if m.Serializable {
-		n += 2
-	}
-	if m.KeysOnly {
-		n += 2
-	}
-	if m.CountOnly {
-		n += 2
-	}
-	if m.MinModRevision != 0 {
-		n += 1 + sovRpc(uint64(m.MinModRevision))
-	}
-	if m.MaxModRevision != 0 {
-		n += 1 + sovRpc(uint64(m.MaxModRevision))
-	}
-	if m.MinCreateRevision != 0 {
-		n += 1 + sovRpc(uint64(m.MinCreateRevision))
-	}
-	if m.MaxCreateRevision != 0 {
-		n += 1 + sovRpc(uint64(m.MaxCreateRevision))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *RangeResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Kvs) > 0 {
-		for _, e := range m.Kvs {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.More {
-		n += 2
-	}
-	if m.Count != 0 {
-		n += 1 + sovRpc(uint64(m.Count))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *PutRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Key)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.Value)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.Lease != 0 {
-		n += 1 + sovRpc(uint64(m.Lease))
-	}
-	if m.PrevKv {
-		n += 2
-	}
-	if m.IgnoreValue {
-		n += 2
-	}
-	if m.IgnoreLease {
-		n += 2
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *PutResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.PrevKv != nil {
-		l = m.PrevKv.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *DeleteRangeRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Key)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.RangeEnd)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.PrevKv {
-		n += 2
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *DeleteRangeResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.Deleted != 0 {
-		n += 1 + sovRpc(uint64(m.Deleted))
-	}
-	if len(m.PrevKvs) > 0 {
-		for _, e := range m.PrevKvs {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *RequestOp) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Request != nil {
-		n += m.Request.Size()
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *RequestOp_RequestRange) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.RequestRange != nil {
-		l = m.RequestRange.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *RequestOp_RequestPut) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.RequestPut != nil {
-		l = m.RequestPut.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *RequestOp_RequestDeleteRange) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.RequestDeleteRange != nil {
-		l = m.RequestDeleteRange.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *RequestOp_RequestTxn) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.RequestTxn != nil {
-		l = m.RequestTxn.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *ResponseOp) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Response != nil {
-		n += m.Response.Size()
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *ResponseOp_ResponseRange) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ResponseRange != nil {
-		l = m.ResponseRange.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *ResponseOp_ResponsePut) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ResponsePut != nil {
-		l = m.ResponsePut.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *ResponseOp_ResponseDeleteRange) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ResponseDeleteRange != nil {
-		l = m.ResponseDeleteRange.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *ResponseOp_ResponseTxn) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ResponseTxn != nil {
-		l = m.ResponseTxn.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *Compare) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Result != 0 {
-		n += 1 + sovRpc(uint64(m.Result))
-	}
-	if m.Target != 0 {
-		n += 1 + sovRpc(uint64(m.Target))
-	}
-	l = len(m.Key)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.TargetUnion != nil {
-		n += m.TargetUnion.Size()
-	}
-	l = len(m.RangeEnd)
-	if l > 0 {
-		n += 2 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *Compare_Version) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovRpc(uint64(m.Version))
-	return n
-}
-func (m *Compare_CreateRevision) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovRpc(uint64(m.CreateRevision))
-	return n
-}
-func (m *Compare_ModRevision) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovRpc(uint64(m.ModRevision))
-	return n
-}
-func (m *Compare_Value) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Value != nil {
-		l = len(m.Value)
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *Compare_Lease) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovRpc(uint64(m.Lease))
-	return n
-}
-func (m *TxnRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if len(m.Compare) > 0 {
-		for _, e := range m.Compare {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if len(m.Success) > 0 {
-		for _, e := range m.Success {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if len(m.Failure) > 0 {
-		for _, e := range m.Failure {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *TxnResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.Succeeded {
-		n += 2
-	}
-	if len(m.Responses) > 0 {
-		for _, e := range m.Responses {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *CompactionRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Revision != 0 {
-		n += 1 + sovRpc(uint64(m.Revision))
-	}
-	if m.Physical {
-		n += 2
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *CompactionResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *HashRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *HashKVRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Revision != 0 {
-		n += 1 + sovRpc(uint64(m.Revision))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *HashKVResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.Hash != 0 {
-		n += 1 + sovRpc(uint64(m.Hash))
-	}
-	if m.CompactRevision != 0 {
-		n += 1 + sovRpc(uint64(m.CompactRevision))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *HashResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.Hash != 0 {
-		n += 1 + sovRpc(uint64(m.Hash))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *SnapshotRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *SnapshotResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.RemainingBytes != 0 {
-		n += 1 + sovRpc(uint64(m.RemainingBytes))
-	}
-	l = len(m.Blob)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *WatchRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.RequestUnion != nil {
-		n += m.RequestUnion.Size()
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *WatchRequest_CreateRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.CreateRequest != nil {
-		l = m.CreateRequest.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *WatchRequest_CancelRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.CancelRequest != nil {
-		l = m.CancelRequest.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *WatchRequest_ProgressRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ProgressRequest != nil {
-		l = m.ProgressRequest.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	return n
-}
-func (m *WatchCreateRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Key)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.RangeEnd)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.StartRevision != 0 {
-		n += 1 + sovRpc(uint64(m.StartRevision))
-	}
-	if m.ProgressNotify {
-		n += 2
-	}
-	if len(m.Filters) > 0 {
-		l = 0
-		for _, e := range m.Filters {
-			l += sovRpc(uint64(e))
-		}
-		n += 1 + sovRpc(uint64(l)) + l
-	}
-	if m.PrevKv {
-		n += 2
-	}
-	if m.WatchId != 0 {
-		n += 1 + sovRpc(uint64(m.WatchId))
-	}
-	if m.Fragment {
-		n += 2
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *WatchCancelRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.WatchId != 0 {
-		n += 1 + sovRpc(uint64(m.WatchId))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *WatchProgressRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *WatchResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.WatchId != 0 {
-		n += 1 + sovRpc(uint64(m.WatchId))
-	}
-	if m.Created {
-		n += 2
-	}
-	if m.Canceled {
-		n += 2
-	}
-	if m.CompactRevision != 0 {
-		n += 1 + sovRpc(uint64(m.CompactRevision))
-	}
-	l = len(m.CancelReason)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.Fragment {
-		n += 2
-	}
-	if len(m.Events) > 0 {
-		for _, e := range m.Events {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseGrantRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.TTL != 0 {
-		n += 1 + sovRpc(uint64(m.TTL))
-	}
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseGrantResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	if m.TTL != 0 {
-		n += 1 + sovRpc(uint64(m.TTL))
-	}
-	l = len(m.Error)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseRevokeRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseRevokeResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseKeepAliveRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseKeepAliveResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	if m.TTL != 0 {
-		n += 1 + sovRpc(uint64(m.TTL))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseTimeToLiveRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	if m.Keys {
-		n += 2
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseTimeToLiveResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	if m.TTL != 0 {
-		n += 1 + sovRpc(uint64(m.TTL))
-	}
-	if m.GrantedTTL != 0 {
-		n += 1 + sovRpc(uint64(m.GrantedTTL))
-	}
-	if len(m.Keys) > 0 {
-		for _, b := range m.Keys {
-			l = len(b)
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseLeasesRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseStatus) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *LeaseLeasesResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Leases) > 0 {
-		for _, e := range m.Leases {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *Member) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.PeerURLs) > 0 {
-		for _, s := range m.PeerURLs {
-			l = len(s)
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if len(m.ClientURLs) > 0 {
-		for _, s := range m.ClientURLs {
-			l = len(s)
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *MemberAddRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if len(m.PeerURLs) > 0 {
-		for _, s := range m.PeerURLs {
-			l = len(s)
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *MemberAddResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.Member != nil {
-		l = m.Member.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Members) > 0 {
-		for _, e := range m.Members {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *MemberRemoveRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *MemberRemoveResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Members) > 0 {
-		for _, e := range m.Members {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *MemberUpdateRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.ID != 0 {
-		n += 1 + sovRpc(uint64(m.ID))
-	}
-	if len(m.PeerURLs) > 0 {
-		for _, s := range m.PeerURLs {
-			l = len(s)
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *MemberUpdateResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Members) > 0 {
-		for _, e := range m.Members {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *MemberListRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *MemberListResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Members) > 0 {
-		for _, e := range m.Members {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *DefragmentRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *DefragmentResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *MoveLeaderRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.TargetID != 0 {
-		n += 1 + sovRpc(uint64(m.TargetID))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *MoveLeaderResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AlarmRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Action != 0 {
-		n += 1 + sovRpc(uint64(m.Action))
-	}
-	if m.MemberID != 0 {
-		n += 1 + sovRpc(uint64(m.MemberID))
-	}
-	if m.Alarm != 0 {
-		n += 1 + sovRpc(uint64(m.Alarm))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AlarmMember) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.MemberID != 0 {
-		n += 1 + sovRpc(uint64(m.MemberID))
-	}
-	if m.Alarm != 0 {
-		n += 1 + sovRpc(uint64(m.Alarm))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AlarmResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Alarms) > 0 {
-		for _, e := range m.Alarms {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *StatusRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *StatusResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.Version)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.DbSize != 0 {
-		n += 1 + sovRpc(uint64(m.DbSize))
-	}
-	if m.Leader != 0 {
-		n += 1 + sovRpc(uint64(m.Leader))
-	}
-	if m.RaftIndex != 0 {
-		n += 1 + sovRpc(uint64(m.RaftIndex))
-	}
-	if m.RaftTerm != 0 {
-		n += 1 + sovRpc(uint64(m.RaftTerm))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthEnableRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthDisableRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthenticateRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.Password)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserAddRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.Password)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserGetRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserDeleteRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserChangePasswordRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.Password)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserGrantRoleRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.User)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.Role)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserRevokeRoleRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.Role)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleAddRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleGetRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Role)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserListRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleListRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleDeleteRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Role)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleGrantPermissionRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Name)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.Perm != nil {
-		l = m.Perm.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleRevokePermissionRequest) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Role)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.Key)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.RangeEnd)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthEnableResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthDisableResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthenticateResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	l = len(m.Token)
-	if l > 0 {
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserAddResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserGetResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Roles) > 0 {
-		for _, s := range m.Roles {
-			l = len(s)
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserDeleteResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserChangePasswordResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserGrantRoleResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserRevokeRoleResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleAddResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleGetResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Perm) > 0 {
-		for _, e := range m.Perm {
-			l = e.Size()
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleListResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Roles) > 0 {
-		for _, s := range m.Roles {
-			l = len(s)
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthUserListResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if len(m.Users) > 0 {
-		for _, s := range m.Users {
-			l = len(s)
-			n += 1 + l + sovRpc(uint64(l))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleDeleteResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleGrantPermissionResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *AuthRoleRevokePermissionResponse) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Header != nil {
-		l = m.Header.Size()
-		n += 1 + l + sovRpc(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func sovRpc(x uint64) (n int) {
-	return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozRpc(x uint64) (n int) {
-	return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *ResponseHeader) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
-			}
-			m.ClusterId = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ClusterId |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType)
-			}
-			m.MemberId = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.MemberId |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
-			}
-			m.Revision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Revision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType)
-			}
-			m.RaftTerm = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.RaftTerm |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *RangeRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: RangeRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: RangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
-			if m.Key == nil {
-				m.Key = []byte{}
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
-			if m.RangeEnd == nil {
-				m.RangeEnd = []byte{}
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
-			}
-			m.Limit = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Limit |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
-			}
-			m.Revision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Revision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 5:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field SortOrder", wireType)
-			}
-			m.SortOrder = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.SortOrder |= RangeRequest_SortOrder(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 6:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field SortTarget", wireType)
-			}
-			m.SortTarget = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.SortTarget |= RangeRequest_SortTarget(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 7:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Serializable", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Serializable = bool(v != 0)
-		case 8:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field KeysOnly", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.KeysOnly = bool(v != 0)
-		case 9:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CountOnly", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.CountOnly = bool(v != 0)
-		case 10:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field MinModRevision", wireType)
-			}
-			m.MinModRevision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.MinModRevision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 11:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field MaxModRevision", wireType)
-			}
-			m.MaxModRevision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.MaxModRevision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 12:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field MinCreateRevision", wireType)
-			}
-			m.MinCreateRevision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.MinCreateRevision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 13:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field MaxCreateRevision", wireType)
-			}
-			m.MaxCreateRevision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.MaxCreateRevision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *RangeResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: RangeResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: RangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Kvs = append(m.Kvs, &mvccpb.KeyValue{})
-			if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field More", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.More = bool(v != 0)
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
-			}
-			m.Count = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Count |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *PutRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: PutRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
-			if m.Key == nil {
-				m.Key = []byte{}
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
-			if m.Value == nil {
-				m.Value = []byte{}
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
-			}
-			m.Lease = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Lease |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.PrevKv = bool(v != 0)
-		case 5:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.IgnoreValue = bool(v != 0)
-		case 6:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.IgnoreLease = bool(v != 0)
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *PutResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: PutResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.PrevKv == nil {
-				m.PrevKv = &mvccpb.KeyValue{}
-			}
-			if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
-			if m.Key == nil {
-				m.Key = []byte{}
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
-			if m.RangeEnd == nil {
-				m.RangeEnd = []byte{}
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.PrevKv = bool(v != 0)
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
-			}
-			m.Deleted = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Deleted |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PrevKvs", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.PrevKvs = append(m.PrevKvs, &mvccpb.KeyValue{})
-			if err := m.PrevKvs[len(m.PrevKvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *RequestOp) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: RequestOp: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: RequestOp: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RequestRange", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &RangeRequest{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.Request = &RequestOp_RequestRange{v}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RequestPut", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &PutRequest{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.Request = &RequestOp_RequestPut{v}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RequestDeleteRange", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &DeleteRangeRequest{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.Request = &RequestOp_RequestDeleteRange{v}
-			iNdEx = postIndex
-		case 4:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RequestTxn", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &TxnRequest{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.Request = &RequestOp_RequestTxn{v}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *ResponseOp) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: ResponseOp: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: ResponseOp: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &RangeResponse{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.Response = &ResponseOp_ResponseRange{v}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &PutResponse{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.Response = &ResponseOp_ResponsePut{v}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &DeleteRangeResponse{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.Response = &ResponseOp_ResponseDeleteRange{v}
-			iNdEx = postIndex
-		case 4:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ResponseTxn", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &TxnResponse{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.Response = &ResponseOp_ResponseTxn{v}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *Compare) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Compare: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Compare: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
-			}
-			m.Result = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Result |= Compare_CompareResult(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
-			}
-			m.Target = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Target |= Compare_CompareTarget(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
-			if m.Key == nil {
-				m.Key = []byte{}
-			}
-			iNdEx = postIndex
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
-			}
-			var v int64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.TargetUnion = &Compare_Version{v}
-		case 5:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
-			}
-			var v int64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.TargetUnion = &Compare_CreateRevision{v}
-		case 6:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
-			}
-			var v int64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.TargetUnion = &Compare_ModRevision{v}
-		case 7:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := make([]byte, postIndex-iNdEx)
-			copy(v, dAtA[iNdEx:postIndex])
-			m.TargetUnion = &Compare_Value{v}
-			iNdEx = postIndex
-		case 8:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
-			}
-			var v int64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.TargetUnion = &Compare_Lease{v}
-		case 64:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
-			if m.RangeEnd == nil {
-				m.RangeEnd = []byte{}
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *TxnRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: TxnRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: TxnRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Compare", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Compare = append(m.Compare, &Compare{})
-			if err := m.Compare[len(m.Compare)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Success = append(m.Success, &RequestOp{})
-			if err := m.Success[len(m.Success)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Failure", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Failure = append(m.Failure, &RequestOp{})
-			if err := m.Failure[len(m.Failure)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *TxnResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: TxnResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: TxnResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Succeeded = bool(v != 0)
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Responses = append(m.Responses, &ResponseOp{})
-			if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *CompactionRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: CompactionRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: CompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
-			}
-			m.Revision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Revision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Physical = bool(v != 0)
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *CompactionResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: CompactionResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: CompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *HashRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: HashRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: HashRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *HashKVRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: HashKVRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: HashKVRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
-			}
-			m.Revision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Revision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *HashKVResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: HashKVResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: HashKVResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
-			}
-			m.Hash = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Hash |= uint32(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType)
-			}
-			m.CompactRevision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.CompactRevision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *HashResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: HashResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: HashResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
-			}
-			m.Hash = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Hash |= uint32(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *SnapshotRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: SnapshotRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: SnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *SnapshotResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: SnapshotResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: SnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RemainingBytes", wireType)
-			}
-			m.RemainingBytes = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.RemainingBytes |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Blob = append(m.Blob[:0], dAtA[iNdEx:postIndex]...)
-			if m.Blob == nil {
-				m.Blob = []byte{}
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *WatchRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CreateRequest", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &WatchCreateRequest{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.RequestUnion = &WatchRequest_CreateRequest{v}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CancelRequest", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &WatchCancelRequest{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.RequestUnion = &WatchRequest_CancelRequest{v}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ProgressRequest", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			v := &WatchProgressRequest{}
-			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			m.RequestUnion = &WatchRequest_ProgressRequest{v}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: WatchCreateRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: WatchCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
-			if m.Key == nil {
-				m.Key = []byte{}
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
-			if m.RangeEnd == nil {
-				m.RangeEnd = []byte{}
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field StartRevision", wireType)
-			}
-			m.StartRevision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.StartRevision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ProgressNotify", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.ProgressNotify = bool(v != 0)
-		case 5:
-			if wireType == 0 {
-				var v WatchCreateRequest_FilterType
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowRpc
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					v |= WatchCreateRequest_FilterType(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				m.Filters = append(m.Filters, v)
-			} else if wireType == 2 {
-				var packedLen int
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowRpc
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					packedLen |= int(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				if packedLen < 0 {
-					return ErrInvalidLengthRpc
-				}
-				postIndex := iNdEx + packedLen
-				if postIndex < 0 {
-					return ErrInvalidLengthRpc
-				}
-				if postIndex > l {
-					return io.ErrUnexpectedEOF
-				}
-				var elementCount int
-				if elementCount != 0 && len(m.Filters) == 0 {
-					m.Filters = make([]WatchCreateRequest_FilterType, 0, elementCount)
-				}
-				for iNdEx < postIndex {
-					var v WatchCreateRequest_FilterType
-					for shift := uint(0); ; shift += 7 {
-						if shift >= 64 {
-							return ErrIntOverflowRpc
-						}
-						if iNdEx >= l {
-							return io.ErrUnexpectedEOF
-						}
-						b := dAtA[iNdEx]
-						iNdEx++
-						v |= WatchCreateRequest_FilterType(b&0x7F) << shift
-						if b < 0x80 {
-							break
-						}
-					}
-					m.Filters = append(m.Filters, v)
-				}
-			} else {
-				return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
-			}
-		case 6:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.PrevKv = bool(v != 0)
-		case 7:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
-			}
-			m.WatchId = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.WatchId |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 8:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Fragment = bool(v != 0)
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: WatchCancelRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: WatchCancelRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
-			}
-			m.WatchId = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.WatchId |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *WatchProgressRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: WatchProgressRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: WatchProgressRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *WatchResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: WatchResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: WatchResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
-			}
-			m.WatchId = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.WatchId |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Created = bool(v != 0)
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Canceled = bool(v != 0)
-		case 5:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType)
-			}
-			m.CompactRevision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.CompactRevision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 6:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.CancelReason = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 7:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Fragment = bool(v != 0)
-		case 11:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Events = append(m.Events, &mvccpb.Event{})
-			if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
-			}
-			m.TTL = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.TTL |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
-			}
-			m.TTL = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.TTL |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Error = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseKeepAliveRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseKeepAliveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseKeepAliveResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseKeepAliveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
-			}
-			m.TTL = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.TTL |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseTimeToLiveRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseTimeToLiveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Keys = bool(v != 0)
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseTimeToLiveResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseTimeToLiveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
-			}
-			m.TTL = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.TTL |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field GrantedTTL", wireType)
-			}
-			m.GrantedTTL = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.GrantedTTL |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 5:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx))
-			copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseLeasesRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseStatus) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseStatus: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseStatus: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: LeaseLeasesResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: LeaseLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Leases = append(m.Leases, &LeaseStatus{})
-			if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *Member) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Member: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
-			iNdEx = postIndex
-		case 4:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex]))
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *MemberAddRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: MemberAddRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MemberAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *MemberAddResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: MemberAddResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MemberAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Member == nil {
-				m.Member = &Member{}
-			}
-			if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Members = append(m.Members, &Member{})
-			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Members = append(m.Members, &Member{})
-			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Members = append(m.Members, &Member{})
-			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *MemberListRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *MemberListResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Members = append(m.Members, &Member{})
-			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *DefragmentRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: DefragmentRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: DefragmentRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *DefragmentResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: DefragmentResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: DefragmentResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: MoveLeaderRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MoveLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field TargetID", wireType)
-			}
-			m.TargetID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.TargetID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: MoveLeaderResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: MoveLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AlarmRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AlarmRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AlarmRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
-			}
-			m.Action = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Action |= AlarmRequest_AlarmAction(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType)
-			}
-			m.MemberID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.MemberID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
-			}
-			m.Alarm = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Alarm |= AlarmType(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AlarmMember) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AlarmMember: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AlarmMember: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType)
-			}
-			m.MemberID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.MemberID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
-			}
-			m.Alarm = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Alarm |= AlarmType(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AlarmResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AlarmResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AlarmResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Alarms", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Alarms = append(m.Alarms, &AlarmMember{})
-			if err := m.Alarms[len(m.Alarms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *StatusRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *StatusResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Version = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field DbSize", wireType)
-			}
-			m.DbSize = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.DbSize |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
-			}
-			m.Leader = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Leader |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 5:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType)
-			}
-			m.RaftIndex = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.RaftIndex |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 6:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType)
-			}
-			m.RaftTerm = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.RaftTerm |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthEnableRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthEnableRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthDisableRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthDisableRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthenticateRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Password = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserAddRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Password = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserGetRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserDeleteRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserChangePasswordRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserChangePasswordRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Password = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserGrantRoleRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserGrantRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.User = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Role = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserRevokeRoleRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserRevokeRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Role = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleAddRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleGetRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Role = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserListRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleListRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleDeleteRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Role = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Name = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Perm == nil {
-				m.Perm = &authpb.Permission{}
-			}
-			if err := m.Perm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Role = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Key = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.RangeEnd = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthEnableResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthEnableResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthDisableResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthDisableResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthenticateResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthenticateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Token = string(dAtA[iNdEx:postIndex])
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserAddResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserGetResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserDeleteResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserChangePasswordResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserChangePasswordResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserGrantRoleResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserGrantRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserRevokeRoleResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserRevokeRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleAddResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleGetResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Perm = append(m.Perm, &authpb.Permission{})
-			if err := m.Perm[len(m.Perm)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleListResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthUserListResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthUserListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType)
-			}
-			var stringLen uint64
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				stringLen |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			intStringLen := int(stringLen)
-			if intStringLen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + intStringLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Users = append(m.Users, string(dAtA[iNdEx:postIndex]))
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleDeleteResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRpc
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Header == nil {
-				m.Header = &ResponseHeader{}
-			}
-			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRpc(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRpc
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func skipRpc(dAtA []byte) (n int, err error) {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return 0, ErrIntOverflowRpc
-			}
-			if iNdEx >= l {
-				return 0, io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		wireType := int(wire & 0x7)
-		switch wireType {
-		case 0:
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				iNdEx++
-				if dAtA[iNdEx-1] < 0x80 {
-					break
-				}
-			}
-			return iNdEx, nil
-		case 1:
-			iNdEx += 8
-			return iNdEx, nil
-		case 2:
-			var length int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowRpc
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				length |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if length < 0 {
-				return 0, ErrInvalidLengthRpc
-			}
-			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthRpc
-			}
-			return iNdEx, nil
-		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowRpc
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipRpc(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthRpc
-				}
-			}
-			return iNdEx, nil
-		case 4:
-			return iNdEx, nil
-		case 5:
-			iNdEx += 4
-			return iNdEx, nil
-		default:
-			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
-		}
-	}
-	panic("unreachable")
-}
-
-var (
-	ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowRpc   = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
deleted file mode 100644
index d9da43c..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
+++ /dev/null
@@ -1,1075 +0,0 @@
-syntax = "proto3";
-package etcdserverpb;
-
-import "gogoproto/gogo.proto";
-import "etcd/mvcc/mvccpb/kv.proto";
-import "etcd/auth/authpb/auth.proto";
-
-// for grpc-gateway
-import "google/api/annotations.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-
-service KV {
-  // Range gets the keys in the range from the key-value store.
-  rpc Range(RangeRequest) returns (RangeResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/kv/range"
-        body: "*"
-    };
-  }
-
-  // Put puts the given key into the key-value store.
-  // A put request increments the revision of the key-value store
-  // and generates one event in the event history.
-  rpc Put(PutRequest) returns (PutResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/kv/put"
-        body: "*"
-    };
-  }
-
-  // DeleteRange deletes the given range from the key-value store.
-  // A delete request increments the revision of the key-value store
-  // and generates a delete event in the event history for every deleted key.
-  rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/kv/deleterange"
-        body: "*"
-    };
-  }
-
-  // Txn processes multiple requests in a single transaction.
-  // A txn request increments the revision of the key-value store
-  // and generates events with the same revision for every completed request.
-  // It is not allowed to modify the same key several times within one txn.
-  rpc Txn(TxnRequest) returns (TxnResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/kv/txn"
-        body: "*"
-    };
-  }
-
-  // Compact compacts the event history in the etcd key-value store. The key-value
-  // store should be periodically compacted or the event history will continue to grow
-  // indefinitely.
-  rpc Compact(CompactionRequest) returns (CompactionResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/kv/compaction"
-        body: "*"
-    };
-  }
-}
-
-service Watch {
-  // Watch watches for events happening or that have happened. Both input and output
-  // are streams; the input stream is for creating and canceling watchers and the output
-  // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
-  // for several watches at once. The entire event history can be watched starting from the
-  // last compaction revision.
-  rpc Watch(stream WatchRequest) returns (stream WatchResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/watch"
-        body: "*"
-    };
-  }
-}
-
-service Lease {
-  // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
-  // within a given time to live period. All keys attached to the lease will be expired and
-  // deleted if the lease expires. Each expired key generates a delete event in the event history.
-  rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/lease/grant"
-        body: "*"
-    };
-  }
-
-  // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
-  rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/kv/lease/revoke"
-        body: "*"
-    };
-  }
-
-  // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
-  // to the server and streaming keep alive responses from the server to the client.
-  rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/lease/keepalive"
-        body: "*"
-    };
-  }
-
-  // LeaseTimeToLive retrieves lease information.
-  rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/kv/lease/timetolive"
-        body: "*"
-    };
-  }
-
-  // LeaseLeases lists all existing leases.
-  rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/kv/lease/leases"
-        body: "*"
-    };
-  }
-}
-
-service Cluster {
-  // MemberAdd adds a member into the cluster.
-  rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/cluster/member/add"
-        body: "*"
-    };
-  }
-
-  // MemberRemove removes an existing member from the cluster.
-  rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/cluster/member/remove"
-        body: "*"
-    };
-  }
-
-  // MemberUpdate updates the member configuration.
-  rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/cluster/member/update"
-        body: "*"
-    };
-  }
-
-  // MemberList lists all the members in the cluster.
-  rpc MemberList(MemberListRequest) returns (MemberListResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/cluster/member/list"
-        body: "*"
-    };
-  }
-}
-
-service Maintenance {
-  // Alarm activates, deactivates, and queries alarms regarding cluster health.
-  rpc Alarm(AlarmRequest) returns (AlarmResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/maintenance/alarm"
-        body: "*"
-    };
-  }
-
-  // Status gets the status of the member.
-  rpc Status(StatusRequest) returns (StatusResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/maintenance/status"
-        body: "*"
-    };
-  }
-
-  // Defragment defragments a member's backend database to recover storage space.
-  rpc Defragment(DefragmentRequest) returns (DefragmentResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/maintenance/defragment"
-        body: "*"
-    };
-  }
-
-  // Hash computes the hash of the KV's backend.
-  // This is designed for testing; do not use this in production when there
-  // are ongoing transactions.
-  rpc Hash(HashRequest) returns (HashResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/maintenance/hash"
-        body: "*"
-    };
-  }
-
-  // HashKV computes the hash of all MVCC keys up to a given revision.
-  rpc HashKV(HashKVRequest) returns (HashKVResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/maintenance/hash"
-        body: "*"
-    };
-  }
-
-  // Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
-  rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/maintenance/snapshot"
-        body: "*"
-    };
-  }
-
-  // MoveLeader requests current leader node to transfer its leadership to transferee.
-  rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/maintenance/transfer-leadership"
-        body: "*"
-    };
-  }
-}
-
-service Auth {
-  // AuthEnable enables authentication.
-  rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/enable"
-        body: "*"
-    };
-  }
-
-  // AuthDisable disables authentication.
-  rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/disable"
-        body: "*"
-    };
-  }
-
-  // Authenticate processes an authenticate request.
-  rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/authenticate"
-        body: "*"
-    };
-  }
-
-  // UserAdd adds a new user.
-  rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/user/add"
-        body: "*"
-    };
-  }
-
-  // UserGet gets detailed user information.
-  rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/user/get"
-        body: "*"
-    };
-  }
-
-  // UserList gets a list of all users.
-  rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/user/list"
-        body: "*"
-    };
-  }
-
-  // UserDelete deletes a specified user.
-  rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/user/delete"
-        body: "*"
-    };
-  }
-
-  // UserChangePassword changes the password of a specified user.
-  rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/user/changepw"
-        body: "*"
-    };
-  }
-
-  // UserGrant grants a role to a specified user.
-  rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/user/grant"
-        body: "*"
-    };
-  }
-
-  // UserRevokeRole revokes a role of specified user.
-  rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/user/revoke"
-        body: "*"
-    };
-  }
-
-  // RoleAdd adds a new role.
-  rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/role/add"
-        body: "*"
-    };
-  }
-
-  // RoleGet gets detailed role information.
-  rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/role/get"
-        body: "*"
-    };
-  }
-
-  // RoleList gets lists of all roles.
-  rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/role/list"
-        body: "*"
-    };
-  }
-
-  // RoleDelete deletes a specified role.
-  rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/role/delete"
-        body: "*"
-    };
-  }
-
-  // RoleGrantPermission grants a permission of a specified key or range to a specified role.
-  rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/role/grant"
-        body: "*"
-    };
-  }
-
-  // RoleRevokePermission revokes a key or range permission of a specified role.
-  rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) {
-      option (google.api.http) = {
-        post: "/v3beta/auth/role/revoke"
-        body: "*"
-    };
-  }
-}
-
-message ResponseHeader {
-  // cluster_id is the ID of the cluster which sent the response.
-  uint64 cluster_id = 1;
-  // member_id is the ID of the member which sent the response.
-  uint64 member_id = 2;
-  // revision is the key-value store revision when the request was applied.
-  // For watch progress responses, the header.revision indicates progress. All future events
-  // recieved in this stream are guaranteed to have a higher revision number than the
-  // header.revision number.
-  int64 revision = 3;
-  // raft_term is the raft term when the request was applied.
-  uint64 raft_term = 4;
-}
-
-message RangeRequest {
-  enum SortOrder {
-	NONE = 0; // default, no sorting
-	ASCEND = 1; // lowest target value first
-	DESCEND = 2; // highest target value first
-  }
-  enum SortTarget {
-	KEY = 0;
-	VERSION = 1;
-	CREATE = 2;
-	MOD = 3;
-	VALUE = 4;
-  }
-
-  // key is the first key for the range. If range_end is not given, the request only looks up key.
-  bytes key = 1;
-  // range_end is the upper bound on the requested range [key, range_end).
-  // If range_end is '\0', the range is all keys >= key.
-  // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
-  // then the range request gets all keys prefixed with key.
-  // If both key and range_end are '\0', then the range request returns all keys.
-  bytes range_end = 2;
-  // limit is a limit on the number of keys returned for the request. When limit is set to 0,
-  // it is treated as no limit.
-  int64 limit = 3;
-  // revision is the point-in-time of the key-value store to use for the range.
-  // If revision is less or equal to zero, the range is over the newest key-value store.
-  // If the revision has been compacted, ErrCompacted is returned as a response.
-  int64 revision = 4;
-
-  // sort_order is the order for returned sorted results.
-  SortOrder sort_order = 5;
-
-  // sort_target is the key-value field to use for sorting.
-  SortTarget sort_target = 6;
-
-  // serializable sets the range request to use serializable member-local reads.
-  // Range requests are linearizable by default; linearizable requests have higher
-  // latency and lower throughput than serializable requests but reflect the current
-  // consensus of the cluster. For better performance, in exchange for possible stale reads,
-  // a serializable range request is served locally without needing to reach consensus
-  // with other nodes in the cluster.
-  bool serializable = 7;
-
-  // keys_only when set returns only the keys and not the values.
-  bool keys_only = 8;
-
-  // count_only when set returns only the count of the keys in the range.
-  bool count_only = 9;
-
-  // min_mod_revision is the lower bound for returned key mod revisions; all keys with
-  // lesser mod revisions will be filtered away.
-  int64 min_mod_revision = 10;
-
-  // max_mod_revision is the upper bound for returned key mod revisions; all keys with
-  // greater mod revisions will be filtered away.
-  int64 max_mod_revision = 11;
-
-  // min_create_revision is the lower bound for returned key create revisions; all keys with
-  // lesser create trevisions will be filtered away.
-  int64 min_create_revision = 12;
-
-  // max_create_revision is the upper bound for returned key create revisions; all keys with
-  // greater create revisions will be filtered away.
-  int64 max_create_revision = 13;
-}
-
-message RangeResponse {
-  ResponseHeader header = 1;
-  // kvs is the list of key-value pairs matched by the range request.
-  // kvs is empty when count is requested.
-  repeated mvccpb.KeyValue kvs = 2;
-  // more indicates if there are more keys to return in the requested range.
-  bool more = 3;
-  // count is set to the number of keys within the range when requested.
-  int64 count = 4;
-}
-
-message PutRequest {
-  // key is the key, in bytes, to put into the key-value store.
-  bytes key = 1;
-  // value is the value, in bytes, to associate with the key in the key-value store.
-  bytes value = 2;
-  // lease is the lease ID to associate with the key in the key-value store. A lease
-  // value of 0 indicates no lease.
-  int64 lease = 3;
-
-  // If prev_kv is set, etcd gets the previous key-value pair before changing it.
-  // The previous key-value pair will be returned in the put response.
-  bool prev_kv = 4;
-
-  // If ignore_value is set, etcd updates the key using its current value.
-  // Returns an error if the key does not exist.
-  bool ignore_value = 5;
-
-  // If ignore_lease is set, etcd updates the key using its current lease.
-  // Returns an error if the key does not exist.
-  bool ignore_lease = 6;
-}
-
-message PutResponse {
-  ResponseHeader header = 1;
-  // if prev_kv is set in the request, the previous key-value pair will be returned.
-  mvccpb.KeyValue prev_kv = 2;
-}
-
-message DeleteRangeRequest {
-  // key is the first key to delete in the range.
-  bytes key = 1;
-  // range_end is the key following the last key to delete for the range [key, range_end).
-  // If range_end is not given, the range is defined to contain only the key argument.
-  // If range_end is one bit larger than the given key, then the range is all the keys
-  // with the prefix (the given key).
-  // If range_end is '\0', the range is all keys greater than or equal to the key argument.
-  bytes range_end = 2;
-
-  // If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
-  // The previous key-value pairs will be returned in the delete response.
-  bool prev_kv = 3;
-}
-
-message DeleteRangeResponse {
-  ResponseHeader header = 1;
-  // deleted is the number of keys deleted by the delete range request.
-  int64 deleted = 2;
-  // if prev_kv is set in the request, the previous key-value pairs will be returned.
-  repeated mvccpb.KeyValue prev_kvs = 3;
-}
-
-message RequestOp {
-  // request is a union of request types accepted by a transaction.
-  oneof request {
-    RangeRequest request_range = 1;
-    PutRequest request_put = 2;
-    DeleteRangeRequest request_delete_range = 3;
-    TxnRequest request_txn = 4;
-  }
-}
-
-message ResponseOp {
-  // response is a union of response types returned by a transaction.
-  oneof response {
-    RangeResponse response_range = 1;
-    PutResponse response_put = 2;
-    DeleteRangeResponse response_delete_range = 3;
-    TxnResponse response_txn = 4;
-  }
-}
-
-message Compare {
-  enum CompareResult {
-    EQUAL = 0;
-    GREATER = 1;
-    LESS = 2;
-    NOT_EQUAL = 3;
-  }
-  enum CompareTarget {
-    VERSION = 0;
-    CREATE = 1;
-    MOD = 2;
-    VALUE= 3;
-    LEASE = 4;
-  }
-  // result is logical comparison operation for this comparison.
-  CompareResult result = 1;
-  // target is the key-value field to inspect for the comparison.
-  CompareTarget target = 2;
-  // key is the subject key for the comparison operation.
-  bytes key = 3;
-  oneof target_union {
-    // version is the version of the given key
-    int64 version = 4;
-    // create_revision is the creation revision of the given key
-    int64 create_revision = 5;
-    // mod_revision is the last modified revision of the given key.
-    int64 mod_revision = 6;
-    // value is the value of the given key, in bytes.
-    bytes value = 7;
-    // lease is the lease id of the given key.
-    int64 lease = 8;
-    // leave room for more target_union field tags, jump to 64
-  }
-
-  // range_end compares the given target to all keys in the range [key, range_end).
-  // See RangeRequest for more details on key ranges.
-  bytes range_end = 64;
-  // TODO: fill out with most of the rest of RangeRequest fields when needed.
-}
-
-// From google paxosdb paper:
-// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
-// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
-// and consists of three components:
-// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
-// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
-// may apply to the same or different entries in the database. All tests in the guard are applied and
-// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
-// it executes f op (see item 3 below).
-// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
-// lookup operation, and applies to a single database entry. Two different operations in the list may apply
-// to the same or different entries in the database. These operations are executed
-// if guard evaluates to
-// true.
-// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
-message TxnRequest {
-  // compare is a list of predicates representing a conjunction of terms.
-  // If the comparisons succeed, then the success requests will be processed in order,
-  // and the response will contain their respective responses in order.
-  // If the comparisons fail, then the failure requests will be processed in order,
-  // and the response will contain their respective responses in order.
-  repeated Compare compare = 1;
-  // success is a list of requests which will be applied when compare evaluates to true.
-  repeated RequestOp success = 2;
-  // failure is a list of requests which will be applied when compare evaluates to false.
-  repeated RequestOp failure = 3;
-}
-
-message TxnResponse {
-  ResponseHeader header = 1;
-  // succeeded is set to true if the compare evaluated to true or false otherwise.
-  bool succeeded = 2;
-  // responses is a list of responses corresponding to the results from applying
-  // success if succeeded is true or failure if succeeded is false.
-  repeated ResponseOp responses = 3;
-}
-
-// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
-// with a revision less than the compaction revision will be removed.
-message CompactionRequest {
-  // revision is the key-value store revision for the compaction operation.
-  int64 revision = 1;
-  // physical is set so the RPC will wait until the compaction is physically
-  // applied to the local database such that compacted entries are totally
-  // removed from the backend database.
-  bool physical = 2;
-}
-
-message CompactionResponse {
-  ResponseHeader header = 1;
-}
-
-message HashRequest {
-}
-
-message HashKVRequest {
-  // revision is the key-value store revision for the hash operation.
-  int64 revision = 1;
-}
-
-message HashKVResponse {
-  ResponseHeader header = 1;
-  // hash is the hash value computed from the responding member's MVCC keys up to a given revision.
-  uint32 hash = 2;
-  // compact_revision is the compacted revision of key-value store when hash begins.
-  int64 compact_revision = 3;
-}
-
-message HashResponse {
-  ResponseHeader header = 1;
-  // hash is the hash value computed from the responding member's KV's backend.
-  uint32 hash = 2;
-}
-
-message SnapshotRequest {
-}
-
-message SnapshotResponse {
-  // header has the current key-value store information. The first header in the snapshot
-  // stream indicates the point in time of the snapshot.
-  ResponseHeader header = 1;
-
-  // remaining_bytes is the number of blob bytes to be sent after this message
-  uint64 remaining_bytes = 2;
-
-  // blob contains the next chunk of the snapshot in the snapshot stream.
-  bytes blob = 3;
-}
-
-message WatchRequest {
-  // request_union is a request to either create a new watcher or cancel an existing watcher.
-  oneof request_union {
-    WatchCreateRequest create_request = 1;
-    WatchCancelRequest cancel_request = 2;
-    WatchProgressRequest progress_request = 3;
-  }
-}
-
-message WatchCreateRequest {
-  // key is the key to register for watching.
-  bytes key = 1;
-  // range_end is the end of the range [key, range_end) to watch. If range_end is not given,
-  // only the key argument is watched. If range_end is equal to '\0', all keys greater than
-  // or equal to the key argument are watched.
-  // If the range_end is one bit larger than the given key,
-  // then all keys with the prefix (the given key) will be watched.
-  bytes range_end = 2;
-  // start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
-  int64 start_revision = 3;
-  // progress_notify is set so that the etcd server will periodically send a WatchResponse with
-  // no events to the new watcher if there are no recent events. It is useful when clients
-  // wish to recover a disconnected watcher starting from a recent known revision.
-  // The etcd server may decide how often it will send notifications based on current load.
-  bool progress_notify = 4;
-
-  enum FilterType {
-  // filter out put event.
-  NOPUT = 0;
-  // filter out delete event.
-  NODELETE = 1;
-  }
-  // filters filter the events at server side before it sends back to the watcher.
-  repeated FilterType filters = 5;
-
-  // If prev_kv is set, created watcher gets the previous KV before the event happens.
-  // If the previous KV is already compacted, nothing will be returned.
-  bool prev_kv = 6;
-
-  // If watch_id is provided and non-zero, it will be assigned to this watcher.
-  // Since creating a watcher in etcd is not a synchronous operation,
-  // this can be used ensure that ordering is correct when creating multiple
-  // watchers on the same stream. Creating a watcher with an ID already in
-  // use on the stream will cause an error to be returned.
-  int64 watch_id = 7;
-
-  // fragment enables splitting large revisions into multiple watch responses.
-  bool fragment = 8;
-}
-
-message WatchCancelRequest {
-  // watch_id is the watcher id to cancel so that no more events are transmitted.
-  int64 watch_id = 1;
-}
-
-// Requests the a watch stream progress status be sent in the watch response stream as soon as
-// possible.
-message WatchProgressRequest {
-}
-
-message WatchResponse {
-  ResponseHeader header = 1;
-  // watch_id is the ID of the watcher that corresponds to the response.
-  int64 watch_id = 2;
-  // created is set to true if the response is for a create watch request.
-  // The client should record the watch_id and expect to receive events for
-  // the created watcher from the same stream.
-  // All events sent to the created watcher will attach with the same watch_id.
-  bool created = 3;
-  // canceled is set to true if the response is for a cancel watch request.
-  // No further events will be sent to the canceled watcher.
-  bool canceled = 4;
-  // compact_revision is set to the minimum index if a watcher tries to watch
-  // at a compacted index.
-  //
-  // This happens when creating a watcher at a compacted revision or the watcher cannot
-  // catch up with the progress of the key-value store.
-  //
-  // The client should treat the watcher as canceled and should not try to create any
-  // watcher with the same start_revision again.
-  int64 compact_revision  = 5;
-
-  // cancel_reason indicates the reason for canceling the watcher.
-  string cancel_reason = 6;
-
-  // framgment is true if large watch response was split over multiple responses.
-  bool fragment = 7;
-
-  repeated mvccpb.Event events = 11;
-}
-
-message LeaseGrantRequest {
-  // TTL is the advisory time-to-live in seconds. Expired lease will return -1.
-  int64 TTL = 1;
-  // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
-  int64 ID = 2;
-}
-
-message LeaseGrantResponse {
-  ResponseHeader header = 1;
-  // ID is the lease ID for the granted lease.
-  int64 ID = 2;
-  // TTL is the server chosen lease time-to-live in seconds.
-  int64 TTL = 3;
-  string error = 4;
-}
-
-message LeaseRevokeRequest {
-  // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
-  int64 ID = 1;
-}
-
-message LeaseRevokeResponse {
-  ResponseHeader header = 1;
-}
-
-message LeaseKeepAliveRequest {
-  // ID is the lease ID for the lease to keep alive.
-  int64 ID = 1;
-}
-
-message LeaseKeepAliveResponse {
-  ResponseHeader header = 1;
-  // ID is the lease ID from the keep alive request.
-  int64 ID = 2;
-  // TTL is the new time-to-live for the lease.
-  int64 TTL = 3;
-}
-
-message LeaseTimeToLiveRequest {
-  // ID is the lease ID for the lease.
-  int64 ID = 1;
-  // keys is true to query all the keys attached to this lease.
-  bool keys = 2;
-}
-
-message LeaseTimeToLiveResponse {
-  ResponseHeader header = 1;
-  // ID is the lease ID from the keep alive request.
-  int64 ID = 2;
-  // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
-  int64 TTL = 3;
-  // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
-  int64 grantedTTL = 4;
-  // Keys is the list of keys attached to this lease.
-  repeated bytes keys = 5;
-}
-
-message LeaseLeasesRequest {
-}
-
-message LeaseStatus {
-  int64 ID = 1;
-  // TODO: int64 TTL = 2;
-}
-
-message LeaseLeasesResponse {
-  ResponseHeader header = 1;
-  repeated LeaseStatus leases = 2;
-}
-
-message Member {
-  // ID is the member ID for this member.
-  uint64 ID = 1;
-  // name is the human-readable name of the member. If the member is not started, the name will be an empty string.
-  string name = 2;
-  // peerURLs is the list of URLs the member exposes to the cluster for communication.
-  repeated string peerURLs = 3;
-  // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
-  repeated string clientURLs = 4;
-}
-
-message MemberAddRequest {
-  // peerURLs is the list of URLs the added member will use to communicate with the cluster.
-  repeated string peerURLs = 1;
-}
-
-message MemberAddResponse {
-  ResponseHeader header = 1;
-  // member is the member information for the added member.
-  Member member = 2;
-  // members is a list of all members after adding the new member.
-  repeated Member members = 3;
-}
-
-message MemberRemoveRequest {
-  // ID is the member ID of the member to remove.
-  uint64 ID = 1;
-}
-
-message MemberRemoveResponse {
-  ResponseHeader header = 1;
-  // members is a list of all members after removing the member.
-  repeated Member members = 2;
-}
-
-message MemberUpdateRequest {
-  // ID is the member ID of the member to update.
-  uint64 ID = 1;
-  // peerURLs is the new list of URLs the member will use to communicate with the cluster.
-  repeated string peerURLs = 2;
-}
-
-message MemberUpdateResponse{
-  ResponseHeader header = 1;
-  // members is a list of all members after updating the member.
-  repeated Member members = 2;
-}
-
-message MemberListRequest {
-}
-
-message MemberListResponse {
-  ResponseHeader header = 1;
-  // members is a list of all members associated with the cluster.
-  repeated Member members = 2;
-}
-
-message DefragmentRequest {
-}
-
-message DefragmentResponse {
-  ResponseHeader header = 1;
-}
-
-message MoveLeaderRequest {
-  // targetID is the node ID for the new leader.
-  uint64 targetID = 1;
-}
-
-message MoveLeaderResponse {
-  ResponseHeader header = 1;
-}
-
-enum AlarmType {
-	NONE = 0; // default, used to query if any alarm is active
-	NOSPACE = 1; // space quota is exhausted
-	CORRUPT = 2; // kv store corruption detected
-}
-
-message AlarmRequest {
-  enum AlarmAction {
-	GET = 0;
-	ACTIVATE = 1;
-	DEACTIVATE = 2;
-  }
-  // action is the kind of alarm request to issue. The action
-  // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
-  // raised alarm.
-  AlarmAction action = 1;
-  // memberID is the ID of the member associated with the alarm. If memberID is 0, the
-  // alarm request covers all members.
-  uint64 memberID = 2;
-  // alarm is the type of alarm to consider for this request.
-  AlarmType alarm = 3;
-}
-
-message AlarmMember {
-  // memberID is the ID of the member associated with the raised alarm.
-  uint64 memberID = 1;
-  // alarm is the type of alarm which has been raised.
-  AlarmType alarm = 2;
-}
-
-message AlarmResponse {
-  ResponseHeader header = 1;
-  // alarms is a list of alarms associated with the alarm request.
-  repeated AlarmMember alarms = 2;
-}
-
-message StatusRequest {
-}
-
-message StatusResponse {
-  ResponseHeader header = 1;
-  // version is the cluster protocol version used by the responding member.
-  string version = 2;
-  // dbSize is the size of the backend database, in bytes, of the responding member.
-  int64 dbSize = 3;
-  // leader is the member ID which the responding member believes is the current leader.
-  uint64 leader = 4;
-  // raftIndex is the current raft index of the responding member.
-  uint64 raftIndex = 5;
-  // raftTerm is the current raft term of the responding member.
-  uint64 raftTerm = 6;
-}
-
-message AuthEnableRequest {
-}
-
-message AuthDisableRequest {
-}
-
-message AuthenticateRequest {
-  string name = 1;
-  string password = 2;
-}
-
-message AuthUserAddRequest {
-  string name = 1;
-  string password = 2;
-}
-
-message AuthUserGetRequest {
-  string name = 1;
-}
-
-message AuthUserDeleteRequest {
-  // name is the name of the user to delete.
-  string name = 1;
-}
-
-message AuthUserChangePasswordRequest {
-  // name is the name of the user whose password is being changed.
-  string name = 1;
-  // password is the new password for the user.
-  string password = 2;
-}
-
-message AuthUserGrantRoleRequest {
-  // user is the name of the user which should be granted a given role.
-  string user = 1;
-  // role is the name of the role to grant to the user.
-  string role = 2;
-}
-
-message AuthUserRevokeRoleRequest {
-  string name = 1;
-  string role = 2;
-}
-
-message AuthRoleAddRequest {
-  // name is the name of the role to add to the authentication system.
-  string name = 1;
-}
-
-message AuthRoleGetRequest {
-  string role = 1;
-}
-
-message AuthUserListRequest {
-}
-
-message AuthRoleListRequest {
-}
-
-message AuthRoleDeleteRequest {
-  string role = 1;
-}
-
-message AuthRoleGrantPermissionRequest {
-  // name is the name of the role which will be granted the permission.
-  string name = 1;
-  // perm is the permission to grant to the role.
-  authpb.Permission perm = 2;
-}
-
-message AuthRoleRevokePermissionRequest {
-  string role = 1;
-  string key = 2;
-  string range_end = 3;
-}
-
-message AuthEnableResponse {
-  ResponseHeader header = 1;
-}
-
-message AuthDisableResponse {
-  ResponseHeader header = 1;
-}
-
-message AuthenticateResponse {
-  ResponseHeader header = 1;
-  // token is an authorized token that can be used in succeeding RPCs
-  string token = 2;
-}
-
-message AuthUserAddResponse {
-  ResponseHeader header = 1;
-}
-
-message AuthUserGetResponse {
-  ResponseHeader header = 1;
-
-  repeated string roles = 2;
-}
-
-message AuthUserDeleteResponse {
-  ResponseHeader header = 1;
-}
-
-message AuthUserChangePasswordResponse {
-  ResponseHeader header = 1;
-}
-
-message AuthUserGrantRoleResponse {
-  ResponseHeader header = 1;
-}
-
-message AuthUserRevokeRoleResponse {
-  ResponseHeader header = 1;
-}
-
-message AuthRoleAddResponse {
-  ResponseHeader header = 1;
-}
-
-message AuthRoleGetResponse {
-  ResponseHeader header = 1;
-
-  repeated authpb.Permission perm = 2;
-}
-
-message AuthRoleListResponse {
-  ResponseHeader header = 1;
-
-  repeated string roles = 2;
-}
-
-message AuthUserListResponse {
-  ResponseHeader header = 1;
-
-  repeated string users = 2;
-}
-
-message AuthRoleDeleteResponse {
-  ResponseHeader header = 1;
-}
-
-message AuthRoleGrantPermissionResponse {
-  ResponseHeader header = 1;
-}
-
-message AuthRoleRevokePermissionResponse {
-  ResponseHeader header = 1;
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go
deleted file mode 100644
index 4679da5..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go
+++ /dev/null
@@ -1,830 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: kv.proto
-
-package mvccpb
-
-import (
-	fmt "fmt"
-	io "io"
-	math "math"
-	math_bits "math/bits"
-
-	_ "github.com/gogo/protobuf/gogoproto"
-	proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type Event_EventType int32
-
-const (
-	PUT    Event_EventType = 0
-	DELETE Event_EventType = 1
-)
-
-var Event_EventType_name = map[int32]string{
-	0: "PUT",
-	1: "DELETE",
-}
-
-var Event_EventType_value = map[string]int32{
-	"PUT":    0,
-	"DELETE": 1,
-}
-
-func (x Event_EventType) String() string {
-	return proto.EnumName(Event_EventType_name, int32(x))
-}
-
-func (Event_EventType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_2216fe83c9c12408, []int{1, 0}
-}
-
-type KeyValue struct {
-	// key is the key in bytes. An empty key is not allowed.
-	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
-	// create_revision is the revision of last creation on this key.
-	CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"`
-	// mod_revision is the revision of last modification on this key.
-	ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"`
-	// version is the version of the key. A deletion resets
-	// the version to zero and any modification of the key
-	// increases its version.
-	Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
-	// value is the value held by the key, in bytes.
-	Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"`
-	// lease is the ID of the lease that attached to key.
-	// When the attached lease expires, the key will be deleted.
-	// If lease is 0, then no lease is attached to the key.
-	Lease                int64    `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *KeyValue) Reset()         { *m = KeyValue{} }
-func (m *KeyValue) String() string { return proto.CompactTextString(m) }
-func (*KeyValue) ProtoMessage()    {}
-func (*KeyValue) Descriptor() ([]byte, []int) {
-	return fileDescriptor_2216fe83c9c12408, []int{0}
-}
-func (m *KeyValue) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *KeyValue) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_KeyValue.Merge(m, src)
-}
-func (m *KeyValue) XXX_Size() int {
-	return m.Size()
-}
-func (m *KeyValue) XXX_DiscardUnknown() {
-	xxx_messageInfo_KeyValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_KeyValue proto.InternalMessageInfo
-
-type Event struct {
-	// type is the kind of event. If type is a PUT, it indicates
-	// new data has been stored to the key. If type is a DELETE,
-	// it indicates the key was deleted.
-	Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"`
-	// kv holds the KeyValue for the event.
-	// A PUT event contains current kv pair.
-	// A PUT event with kv.Version=1 indicates the creation of a key.
-	// A DELETE/EXPIRE event contains the deleted key with
-	// its modification revision set to the revision of deletion.
-	Kv *KeyValue `protobuf:"bytes,2,opt,name=kv,proto3" json:"kv,omitempty"`
-	// prev_kv holds the key-value pair before the event happens.
-	PrevKv               *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
-}
-
-func (m *Event) Reset()         { *m = Event{} }
-func (m *Event) String() string { return proto.CompactTextString(m) }
-func (*Event) ProtoMessage()    {}
-func (*Event) Descriptor() ([]byte, []int) {
-	return fileDescriptor_2216fe83c9c12408, []int{1}
-}
-func (m *Event) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Event.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Event) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Event.Merge(m, src)
-}
-func (m *Event) XXX_Size() int {
-	return m.Size()
-}
-func (m *Event) XXX_DiscardUnknown() {
-	xxx_messageInfo_Event.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Event proto.InternalMessageInfo
-
-func init() {
-	proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value)
-	proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue")
-	proto.RegisterType((*Event)(nil), "mvccpb.Event")
-}
-
-func init() { proto.RegisterFile("kv.proto", fileDescriptor_2216fe83c9c12408) }
-
-var fileDescriptor_2216fe83c9c12408 = []byte{
-	// 303 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40,
-	0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18,
-	0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94,
-	0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa,
-	0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3,
-	0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae,
-	0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7,
-	0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3,
-	0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d,
-	0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b,
-	0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23,
-	0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36,
-	0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34,
-	0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad,
-	0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30,
-	0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a,
-	0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94,
-	0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff,
-	0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00,
-}
-
-func (m *KeyValue) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *KeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Lease != 0 {
-		i = encodeVarintKv(dAtA, i, uint64(m.Lease))
-		i--
-		dAtA[i] = 0x30
-	}
-	if len(m.Value) > 0 {
-		i -= len(m.Value)
-		copy(dAtA[i:], m.Value)
-		i = encodeVarintKv(dAtA, i, uint64(len(m.Value)))
-		i--
-		dAtA[i] = 0x2a
-	}
-	if m.Version != 0 {
-		i = encodeVarintKv(dAtA, i, uint64(m.Version))
-		i--
-		dAtA[i] = 0x20
-	}
-	if m.ModRevision != 0 {
-		i = encodeVarintKv(dAtA, i, uint64(m.ModRevision))
-		i--
-		dAtA[i] = 0x18
-	}
-	if m.CreateRevision != 0 {
-		i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision))
-		i--
-		dAtA[i] = 0x10
-	}
-	if len(m.Key) > 0 {
-		i -= len(m.Key)
-		copy(dAtA[i:], m.Key)
-		i = encodeVarintKv(dAtA, i, uint64(len(m.Key)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *Event) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Event) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.PrevKv != nil {
-		{
-			size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintKv(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x1a
-	}
-	if m.Kv != nil {
-		{
-			size, err := m.Kv.MarshalToSizedBuffer(dAtA[:i])
-			if err != nil {
-				return 0, err
-			}
-			i -= size
-			i = encodeVarintKv(dAtA, i, uint64(size))
-		}
-		i--
-		dAtA[i] = 0x12
-	}
-	if m.Type != 0 {
-		i = encodeVarintKv(dAtA, i, uint64(m.Type))
-		i--
-		dAtA[i] = 0x8
-	}
-	return len(dAtA) - i, nil
-}
-
-func encodeVarintKv(dAtA []byte, offset int, v uint64) int {
-	offset -= sovKv(v)
-	base := offset
-	for v >= 1<<7 {
-		dAtA[offset] = uint8(v&0x7f | 0x80)
-		v >>= 7
-		offset++
-	}
-	dAtA[offset] = uint8(v)
-	return base
-}
-func (m *KeyValue) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = len(m.Key)
-	if l > 0 {
-		n += 1 + l + sovKv(uint64(l))
-	}
-	if m.CreateRevision != 0 {
-		n += 1 + sovKv(uint64(m.CreateRevision))
-	}
-	if m.ModRevision != 0 {
-		n += 1 + sovKv(uint64(m.ModRevision))
-	}
-	if m.Version != 0 {
-		n += 1 + sovKv(uint64(m.Version))
-	}
-	l = len(m.Value)
-	if l > 0 {
-		n += 1 + l + sovKv(uint64(l))
-	}
-	if m.Lease != 0 {
-		n += 1 + sovKv(uint64(m.Lease))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *Event) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Type != 0 {
-		n += 1 + sovKv(uint64(m.Type))
-	}
-	if m.Kv != nil {
-		l = m.Kv.Size()
-		n += 1 + l + sovKv(uint64(l))
-	}
-	if m.PrevKv != nil {
-		l = m.PrevKv.Size()
-		n += 1 + l + sovKv(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func sovKv(x uint64) (n int) {
-	return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozKv(x uint64) (n int) {
-	return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *KeyValue) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowKv
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: KeyValue: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthKv
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthKv
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
-			if m.Key == nil {
-				m.Key = []byte{}
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
-			}
-			m.CreateRevision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.CreateRevision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
-			}
-			m.ModRevision = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ModRevision |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
-			}
-			m.Version = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Version |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 5:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthKv
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthKv
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
-			if m.Value == nil {
-				m.Value = []byte{}
-			}
-			iNdEx = postIndex
-		case 6:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
-			}
-			m.Lease = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Lease |= int64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipKv(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthKv
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthKv
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *Event) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowKv
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Event: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
-			}
-			m.Type = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Type |= Event_EventType(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthKv
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthKv
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.Kv == nil {
-				m.Kv = &KeyValue{}
-			}
-			if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 3:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthKv
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthKv
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if m.PrevKv == nil {
-				m.PrevKv = &KeyValue{}
-			}
-			if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipKv(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthKv
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthKv
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func skipKv(dAtA []byte) (n int, err error) {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return 0, ErrIntOverflowKv
-			}
-			if iNdEx >= l {
-				return 0, io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		wireType := int(wire & 0x7)
-		switch wireType {
-		case 0:
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				iNdEx++
-				if dAtA[iNdEx-1] < 0x80 {
-					break
-				}
-			}
-			return iNdEx, nil
-		case 1:
-			iNdEx += 8
-			return iNdEx, nil
-		case 2:
-			var length int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowKv
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				length |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if length < 0 {
-				return 0, ErrInvalidLengthKv
-			}
-			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthKv
-			}
-			return iNdEx, nil
-		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowKv
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipKv(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthKv
-				}
-			}
-			return iNdEx, nil
-		case 4:
-			return iNdEx, nil
-		case 5:
-			iNdEx += 4
-			return iNdEx, nil
-		default:
-			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
-		}
-	}
-	panic("unreachable")
-}
-
-var (
-	ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowKv   = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto
deleted file mode 100644
index 23c911b..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto
+++ /dev/null
@@ -1,49 +0,0 @@
-syntax = "proto3";
-package mvccpb;
-
-import "gogoproto/gogo.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-option (gogoproto.goproto_enum_prefix_all) = false;
-
-message KeyValue {
-  // key is the key in bytes. An empty key is not allowed.
-  bytes key = 1;
-  // create_revision is the revision of last creation on this key.
-  int64 create_revision = 2;
-  // mod_revision is the revision of last modification on this key.
-  int64 mod_revision = 3;
-  // version is the version of the key. A deletion resets
-  // the version to zero and any modification of the key
-  // increases its version.
-  int64 version = 4;
-  // value is the value held by the key, in bytes.
-  bytes value = 5;
-  // lease is the ID of the lease that attached to key.
-  // When the attached lease expires, the key will be deleted.
-  // If lease is 0, then no lease is attached to the key.
-  int64 lease = 6;
-}
-
-message Event {
-  enum EventType {
-    PUT = 0;
-    DELETE = 1;
-  }
-  // type is the kind of event. If type is a PUT, it indicates
-  // new data has been stored to the key. If type is a DELETE,
-  // it indicates the key was deleted.
-  EventType type = 1;
-  // kv holds the KeyValue for the event.
-  // A PUT event contains current kv pair.
-  // A PUT event with kv.Version=1 indicates the creation of a key.
-  // A DELETE/EXPIRE event contains the deleted key with
-  // its modification revision set to the revision of deletion.
-  KeyValue kv = 2;
-
-  // prev_kv holds the key-value pair before the event happens.
-  KeyValue prev_kv = 3;
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go
deleted file mode 100644
index 81b0a9d..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
-	"log"
-
-	"google.golang.org/grpc/grpclog"
-)
-
-// assert that "discardLogger" satisfy "Logger" interface
-var _ Logger = &discardLogger{}
-
-// NewDiscardLogger returns a new Logger that discards everything except "fatal".
-func NewDiscardLogger() Logger { return &discardLogger{} }
-
-type discardLogger struct{}
-
-func (l *discardLogger) Info(args ...interface{})                    {}
-func (l *discardLogger) Infoln(args ...interface{})                  {}
-func (l *discardLogger) Infof(format string, args ...interface{})    {}
-func (l *discardLogger) Warning(args ...interface{})                 {}
-func (l *discardLogger) Warningln(args ...interface{})               {}
-func (l *discardLogger) Warningf(format string, args ...interface{}) {}
-func (l *discardLogger) Error(args ...interface{})                   {}
-func (l *discardLogger) Errorln(args ...interface{})                 {}
-func (l *discardLogger) Errorf(format string, args ...interface{})   {}
-func (l *discardLogger) Fatal(args ...interface{})                   { log.Fatal(args...) }
-func (l *discardLogger) Fatalln(args ...interface{})                 { log.Fatalln(args...) }
-func (l *discardLogger) Fatalf(format string, args ...interface{})   { log.Fatalf(format, args...) }
-func (l *discardLogger) V(lvl int) bool {
-	return false
-}
-func (l *discardLogger) Lvl(lvl int) grpclog.LoggerV2 { return l }
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/log_level.go b/vendor/github.com/coreos/etcd/pkg/logutil/log_level.go
deleted file mode 100644
index d57e173..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/log_level.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
-	"fmt"
-
-	"github.com/coreos/pkg/capnslog"
-	"go.uber.org/zap"
-	"go.uber.org/zap/zapcore"
-)
-
-var DefaultLogLevel = "info"
-
-// ConvertToZapLevel converts log level string to zapcore.Level.
-func ConvertToZapLevel(lvl string) zapcore.Level {
-	switch lvl {
-	case "debug":
-		return zap.DebugLevel
-	case "info":
-		return zap.InfoLevel
-	case "warn":
-		return zap.WarnLevel
-	case "error":
-		return zap.ErrorLevel
-	case "dpanic":
-		return zap.DPanicLevel
-	case "panic":
-		return zap.PanicLevel
-	case "fatal":
-		return zap.FatalLevel
-	default:
-		panic(fmt.Sprintf("unknown level %q", lvl))
-	}
-}
-
-// ConvertToCapnslogLogLevel convert log level string to capnslog.LogLevel.
-// TODO: deprecate this in 3.5
-func ConvertToCapnslogLogLevel(lvl string) capnslog.LogLevel {
-	switch lvl {
-	case "debug":
-		return capnslog.DEBUG
-	case "info":
-		return capnslog.INFO
-	case "warn":
-		return capnslog.WARNING
-	case "error":
-		return capnslog.ERROR
-	case "dpanic":
-		return capnslog.CRITICAL
-	case "panic":
-		return capnslog.CRITICAL
-	case "fatal":
-		return capnslog.CRITICAL
-	default:
-		panic(fmt.Sprintf("unknown level %q", lvl))
-	}
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/logger.go
deleted file mode 100644
index e7da80e..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/logger.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import "google.golang.org/grpc/grpclog"
-
-// Logger defines logging interface.
-// TODO: deprecate in v3.5.
-type Logger interface {
-	grpclog.LoggerV2
-
-	// Lvl returns logger if logger's verbosity level >= "lvl".
-	// Otherwise, logger that discards everything.
-	Lvl(lvl int) grpclog.LoggerV2
-}
-
-// assert that "defaultLogger" satisfy "Logger" interface
-var _ Logger = &defaultLogger{}
-
-// NewLogger wraps "grpclog.LoggerV2" that implements "Logger" interface.
-//
-// For example:
-//
-//  var defaultLogger Logger
-//  g := grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 4)
-//  defaultLogger = NewLogger(g)
-//
-func NewLogger(g grpclog.LoggerV2) Logger { return &defaultLogger{g: g} }
-
-type defaultLogger struct {
-	g grpclog.LoggerV2
-}
-
-func (l *defaultLogger) Info(args ...interface{})                    { l.g.Info(args...) }
-func (l *defaultLogger) Infoln(args ...interface{})                  { l.g.Info(args...) }
-func (l *defaultLogger) Infof(format string, args ...interface{})    { l.g.Infof(format, args...) }
-func (l *defaultLogger) Warning(args ...interface{})                 { l.g.Warning(args...) }
-func (l *defaultLogger) Warningln(args ...interface{})               { l.g.Warning(args...) }
-func (l *defaultLogger) Warningf(format string, args ...interface{}) { l.g.Warningf(format, args...) }
-func (l *defaultLogger) Error(args ...interface{})                   { l.g.Error(args...) }
-func (l *defaultLogger) Errorln(args ...interface{})                 { l.g.Error(args...) }
-func (l *defaultLogger) Errorf(format string, args ...interface{})   { l.g.Errorf(format, args...) }
-func (l *defaultLogger) Fatal(args ...interface{})                   { l.g.Fatal(args...) }
-func (l *defaultLogger) Fatalln(args ...interface{})                 { l.g.Fatal(args...) }
-func (l *defaultLogger) Fatalf(format string, args ...interface{})   { l.g.Fatalf(format, args...) }
-func (l *defaultLogger) V(lvl int) bool                              { return l.g.V(lvl) }
-func (l *defaultLogger) Lvl(lvl int) grpclog.LoggerV2 {
-	if l.g.V(lvl) {
-		return l
-	}
-	return &discardLogger{}
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go
deleted file mode 100644
index 866b6f7..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go
+++ /dev/null
@@ -1,194 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
-	"fmt"
-	"sync"
-	"time"
-
-	"github.com/coreos/pkg/capnslog"
-)
-
-var (
-	defaultMergePeriod     = time.Second
-	defaultTimeOutputScale = 10 * time.Millisecond
-
-	outputInterval = time.Second
-)
-
-// line represents a log line that can be printed out
-// through capnslog.PackageLogger.
-type line struct {
-	level capnslog.LogLevel
-	str   string
-}
-
-func (l line) append(s string) line {
-	return line{
-		level: l.level,
-		str:   l.str + " " + s,
-	}
-}
-
-// status represents the merge status of a line.
-type status struct {
-	period time.Duration
-
-	start time.Time // start time of latest merge period
-	count int       // number of merged lines from starting
-}
-
-func (s *status) isInMergePeriod(now time.Time) bool {
-	return s.period == 0 || s.start.Add(s.period).After(now)
-}
-
-func (s *status) isEmpty() bool { return s.count == 0 }
-
-func (s *status) summary(now time.Time) string {
-	ts := s.start.Round(defaultTimeOutputScale)
-	took := now.Round(defaultTimeOutputScale).Sub(ts)
-	return fmt.Sprintf("[merged %d repeated lines in %s]", s.count, took)
-}
-
-func (s *status) reset(now time.Time) {
-	s.start = now
-	s.count = 0
-}
-
-// MergeLogger supports merge logging, which merges repeated log lines
-// and prints summary log lines instead.
-//
-// For merge logging, MergeLogger prints out the line when the line appears
-// at the first time. MergeLogger holds the same log line printed within
-// defaultMergePeriod, and prints out summary log line at the end of defaultMergePeriod.
-// It stops merging when the line doesn't appear within the
-// defaultMergePeriod.
-type MergeLogger struct {
-	*capnslog.PackageLogger
-
-	mu      sync.Mutex // protect statusm
-	statusm map[line]*status
-}
-
-func NewMergeLogger(logger *capnslog.PackageLogger) *MergeLogger {
-	l := &MergeLogger{
-		PackageLogger: logger,
-		statusm:       make(map[line]*status),
-	}
-	go l.outputLoop()
-	return l
-}
-
-func (l *MergeLogger) MergeInfo(entries ...interface{}) {
-	l.merge(line{
-		level: capnslog.INFO,
-		str:   fmt.Sprint(entries...),
-	})
-}
-
-func (l *MergeLogger) MergeInfof(format string, args ...interface{}) {
-	l.merge(line{
-		level: capnslog.INFO,
-		str:   fmt.Sprintf(format, args...),
-	})
-}
-
-func (l *MergeLogger) MergeNotice(entries ...interface{}) {
-	l.merge(line{
-		level: capnslog.NOTICE,
-		str:   fmt.Sprint(entries...),
-	})
-}
-
-func (l *MergeLogger) MergeNoticef(format string, args ...interface{}) {
-	l.merge(line{
-		level: capnslog.NOTICE,
-		str:   fmt.Sprintf(format, args...),
-	})
-}
-
-func (l *MergeLogger) MergeWarning(entries ...interface{}) {
-	l.merge(line{
-		level: capnslog.WARNING,
-		str:   fmt.Sprint(entries...),
-	})
-}
-
-func (l *MergeLogger) MergeWarningf(format string, args ...interface{}) {
-	l.merge(line{
-		level: capnslog.WARNING,
-		str:   fmt.Sprintf(format, args...),
-	})
-}
-
-func (l *MergeLogger) MergeError(entries ...interface{}) {
-	l.merge(line{
-		level: capnslog.ERROR,
-		str:   fmt.Sprint(entries...),
-	})
-}
-
-func (l *MergeLogger) MergeErrorf(format string, args ...interface{}) {
-	l.merge(line{
-		level: capnslog.ERROR,
-		str:   fmt.Sprintf(format, args...),
-	})
-}
-
-func (l *MergeLogger) merge(ln line) {
-	l.mu.Lock()
-
-	// increase count if the logger is merging the line
-	if status, ok := l.statusm[ln]; ok {
-		status.count++
-		l.mu.Unlock()
-		return
-	}
-
-	// initialize status of the line
-	l.statusm[ln] = &status{
-		period: defaultMergePeriod,
-		start:  time.Now(),
-	}
-	// release the lock before IO operation
-	l.mu.Unlock()
-	// print out the line at its first time
-	l.PackageLogger.Logf(ln.level, ln.str)
-}
-
-func (l *MergeLogger) outputLoop() {
-	for now := range time.Tick(outputInterval) {
-		var outputs []line
-
-		l.mu.Lock()
-		for ln, status := range l.statusm {
-			if status.isInMergePeriod(now) {
-				continue
-			}
-			if status.isEmpty() {
-				delete(l.statusm, ln)
-				continue
-			}
-			outputs = append(outputs, ln.append(status.summary(now)))
-			status.reset(now)
-		}
-		l.mu.Unlock()
-
-		for _, o := range outputs {
-			l.PackageLogger.Logf(o.level, o.str)
-		}
-	}
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go
deleted file mode 100644
index 378bee0..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
-	"github.com/coreos/pkg/capnslog"
-	"google.golang.org/grpc/grpclog"
-)
-
-// assert that "packageLogger" satisfy "Logger" interface
-var _ Logger = &packageLogger{}
-
-// NewPackageLogger wraps "*capnslog.PackageLogger" that implements "Logger" interface.
-//
-// For example:
-//
-//  var defaultLogger Logger
-//  defaultLogger = NewPackageLogger("github.com/coreos/etcd", "snapshot")
-//
-func NewPackageLogger(repo, pkg string) Logger {
-	return &packageLogger{p: capnslog.NewPackageLogger(repo, pkg)}
-}
-
-type packageLogger struct {
-	p *capnslog.PackageLogger
-}
-
-func (l *packageLogger) Info(args ...interface{})                    { l.p.Info(args...) }
-func (l *packageLogger) Infoln(args ...interface{})                  { l.p.Info(args...) }
-func (l *packageLogger) Infof(format string, args ...interface{})    { l.p.Infof(format, args...) }
-func (l *packageLogger) Warning(args ...interface{})                 { l.p.Warning(args...) }
-func (l *packageLogger) Warningln(args ...interface{})               { l.p.Warning(args...) }
-func (l *packageLogger) Warningf(format string, args ...interface{}) { l.p.Warningf(format, args...) }
-func (l *packageLogger) Error(args ...interface{})                   { l.p.Error(args...) }
-func (l *packageLogger) Errorln(args ...interface{})                 { l.p.Error(args...) }
-func (l *packageLogger) Errorf(format string, args ...interface{})   { l.p.Errorf(format, args...) }
-func (l *packageLogger) Fatal(args ...interface{})                   { l.p.Fatal(args...) }
-func (l *packageLogger) Fatalln(args ...interface{})                 { l.p.Fatal(args...) }
-func (l *packageLogger) Fatalf(format string, args ...interface{})   { l.p.Fatalf(format, args...) }
-func (l *packageLogger) V(lvl int) bool {
-	return l.p.LevelAt(capnslog.LogLevel(lvl))
-}
-func (l *packageLogger) Lvl(lvl int) grpclog.LoggerV2 {
-	if l.p.LevelAt(capnslog.LogLevel(lvl)) {
-		return l
-	}
-	return &discardLogger{}
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap.go
deleted file mode 100644
index 2f69223..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/zap.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
-	"sort"
-
-	"go.uber.org/zap"
-	"go.uber.org/zap/zapcore"
-)
-
-// DefaultZapLoggerConfig defines default zap logger configuration.
-var DefaultZapLoggerConfig = zap.Config{
-	Level: zap.NewAtomicLevelAt(ConvertToZapLevel(DefaultLogLevel)),
-
-	Development: false,
-	Sampling: &zap.SamplingConfig{
-		Initial:    100,
-		Thereafter: 100,
-	},
-
-	Encoding: "json",
-
-	// copied from "zap.NewProductionEncoderConfig" with some updates
-	EncoderConfig: zapcore.EncoderConfig{
-		TimeKey:        "ts",
-		LevelKey:       "level",
-		NameKey:        "logger",
-		CallerKey:      "caller",
-		MessageKey:     "msg",
-		StacktraceKey:  "stacktrace",
-		LineEnding:     zapcore.DefaultLineEnding,
-		EncodeLevel:    zapcore.LowercaseLevelEncoder,
-		EncodeTime:     zapcore.ISO8601TimeEncoder,
-		EncodeDuration: zapcore.StringDurationEncoder,
-		EncodeCaller:   zapcore.ShortCallerEncoder,
-	},
-
-	// Use "/dev/null" to discard all
-	OutputPaths:      []string{"stderr"},
-	ErrorOutputPaths: []string{"stderr"},
-}
-
-// AddOutputPaths adds output paths to the existing output paths, resolving conflicts.
-func AddOutputPaths(cfg zap.Config, outputPaths, errorOutputPaths []string) zap.Config {
-	outputs := make(map[string]struct{})
-	for _, v := range cfg.OutputPaths {
-		outputs[v] = struct{}{}
-	}
-	for _, v := range outputPaths {
-		outputs[v] = struct{}{}
-	}
-	outputSlice := make([]string, 0)
-	if _, ok := outputs["/dev/null"]; ok {
-		// "/dev/null" to discard all
-		outputSlice = []string{"/dev/null"}
-	} else {
-		for k := range outputs {
-			outputSlice = append(outputSlice, k)
-		}
-	}
-	cfg.OutputPaths = outputSlice
-	sort.Strings(cfg.OutputPaths)
-
-	errOutputs := make(map[string]struct{})
-	for _, v := range cfg.ErrorOutputPaths {
-		errOutputs[v] = struct{}{}
-	}
-	for _, v := range errorOutputPaths {
-		errOutputs[v] = struct{}{}
-	}
-	errOutputSlice := make([]string, 0)
-	if _, ok := errOutputs["/dev/null"]; ok {
-		// "/dev/null" to discard all
-		errOutputSlice = []string{"/dev/null"}
-	} else {
-		for k := range errOutputs {
-			errOutputSlice = append(errOutputSlice, k)
-		}
-	}
-	cfg.ErrorOutputPaths = errOutputSlice
-	sort.Strings(cfg.ErrorOutputPaths)
-
-	return cfg
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go
deleted file mode 100644
index 3f48d81..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
-	"go.uber.org/zap"
-	"go.uber.org/zap/zapcore"
-	"google.golang.org/grpc/grpclog"
-)
-
-// NewGRPCLoggerV2 converts "*zap.Logger" to "grpclog.LoggerV2".
-// It discards all INFO level logging in gRPC, if debug level
-// is not enabled in "*zap.Logger".
-func NewGRPCLoggerV2(lcfg zap.Config) (grpclog.LoggerV2, error) {
-	lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
-	if err != nil {
-		return nil, err
-	}
-	return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}, nil
-}
-
-// NewGRPCLoggerV2FromZapCore creates "grpclog.LoggerV2" from "zap.Core"
-// and "zapcore.WriteSyncer". It discards all INFO level logging in gRPC,
-// if debug level is not enabled in "*zap.Logger".
-func NewGRPCLoggerV2FromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) grpclog.LoggerV2 {
-	// "AddCallerSkip" to annotate caller outside of "logutil"
-	lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer))
-	return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}
-}
-
-type zapGRPCLogger struct {
-	lg    *zap.Logger
-	sugar *zap.SugaredLogger
-}
-
-func (zl *zapGRPCLogger) Info(args ...interface{}) {
-	if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
-		return
-	}
-	zl.sugar.Info(args...)
-}
-
-func (zl *zapGRPCLogger) Infoln(args ...interface{}) {
-	if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
-		return
-	}
-	zl.sugar.Info(args...)
-}
-
-func (zl *zapGRPCLogger) Infof(format string, args ...interface{}) {
-	if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
-		return
-	}
-	zl.sugar.Infof(format, args...)
-}
-
-func (zl *zapGRPCLogger) Warning(args ...interface{}) {
-	zl.sugar.Warn(args...)
-}
-
-func (zl *zapGRPCLogger) Warningln(args ...interface{}) {
-	zl.sugar.Warn(args...)
-}
-
-func (zl *zapGRPCLogger) Warningf(format string, args ...interface{}) {
-	zl.sugar.Warnf(format, args...)
-}
-
-func (zl *zapGRPCLogger) Error(args ...interface{}) {
-	zl.sugar.Error(args...)
-}
-
-func (zl *zapGRPCLogger) Errorln(args ...interface{}) {
-	zl.sugar.Error(args...)
-}
-
-func (zl *zapGRPCLogger) Errorf(format string, args ...interface{}) {
-	zl.sugar.Errorf(format, args...)
-}
-
-func (zl *zapGRPCLogger) Fatal(args ...interface{}) {
-	zl.sugar.Fatal(args...)
-}
-
-func (zl *zapGRPCLogger) Fatalln(args ...interface{}) {
-	zl.sugar.Fatal(args...)
-}
-
-func (zl *zapGRPCLogger) Fatalf(format string, args ...interface{}) {
-	zl.sugar.Fatalf(format, args...)
-}
-
-func (zl *zapGRPCLogger) V(l int) bool {
-	// infoLog == 0
-	if l <= 0 { // debug level, then we ignore info level in gRPC
-		return !zl.lg.Core().Enabled(zapcore.DebugLevel)
-	}
-	return true
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go
deleted file mode 100644
index b1788bc..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package logutil
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"io"
-	"os"
-	"path/filepath"
-
-	"github.com/coreos/etcd/pkg/systemd"
-
-	"github.com/coreos/go-systemd/journal"
-	"go.uber.org/zap/zapcore"
-)
-
-// NewJournalWriter wraps "io.Writer" to redirect log output
-// to the local systemd journal. If journald send fails, it fails
-// back to writing to the original writer.
-// The decode overhead is only <30µs per write.
-// Reference: https://github.com/coreos/pkg/blob/master/capnslog/journald_formatter.go
-func NewJournalWriter(wr io.Writer) (io.Writer, error) {
-	return &journalWriter{Writer: wr}, systemd.DialJournal()
-}
-
-type journalWriter struct {
-	io.Writer
-}
-
-// WARN: assume that etcd uses default field names in zap encoder config
-// make sure to keep this up-to-date!
-type logLine struct {
-	Level  string `json:"level"`
-	Caller string `json:"caller"`
-}
-
-func (w *journalWriter) Write(p []byte) (int, error) {
-	line := &logLine{}
-	if err := json.NewDecoder(bytes.NewReader(p)).Decode(line); err != nil {
-		return 0, err
-	}
-
-	var pri journal.Priority
-	switch line.Level {
-	case zapcore.DebugLevel.String():
-		pri = journal.PriDebug
-	case zapcore.InfoLevel.String():
-		pri = journal.PriInfo
-
-	case zapcore.WarnLevel.String():
-		pri = journal.PriWarning
-	case zapcore.ErrorLevel.String():
-		pri = journal.PriErr
-
-	case zapcore.DPanicLevel.String():
-		pri = journal.PriCrit
-	case zapcore.PanicLevel.String():
-		pri = journal.PriCrit
-	case zapcore.FatalLevel.String():
-		pri = journal.PriCrit
-
-	default:
-		panic(fmt.Errorf("unknown log level: %q", line.Level))
-	}
-
-	err := journal.Send(string(p), pri, map[string]string{
-		"PACKAGE":           filepath.Dir(line.Caller),
-		"SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
-	})
-	if err != nil {
-		// "journal" also falls back to stderr
-		// "fmt.Fprintln(os.Stderr, s)"
-		return w.Writer.Write(p)
-	}
-	return 0, nil
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go
deleted file mode 100644
index 012d688..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
-	"errors"
-
-	"github.com/coreos/etcd/raft"
-
-	"go.uber.org/zap"
-	"go.uber.org/zap/zapcore"
-)
-
-// NewRaftLogger builds "raft.Logger" from "*zap.Config".
-func NewRaftLogger(lcfg *zap.Config) (raft.Logger, error) {
-	if lcfg == nil {
-		return nil, errors.New("nil zap.Config")
-	}
-	lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
-	if err != nil {
-		return nil, err
-	}
-	return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}, nil
-}
-
-// NewRaftLoggerZap converts "*zap.Logger" to "raft.Logger".
-func NewRaftLoggerZap(lg *zap.Logger) raft.Logger {
-	return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}
-}
-
-// NewRaftLoggerFromZapCore creates "raft.Logger" from "zap.Core"
-// and "zapcore.WriteSyncer".
-func NewRaftLoggerFromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) raft.Logger {
-	// "AddCallerSkip" to annotate caller outside of "logutil"
-	lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer))
-	return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}
-}
-
-type zapRaftLogger struct {
-	lg    *zap.Logger
-	sugar *zap.SugaredLogger
-}
-
-func (zl *zapRaftLogger) Debug(args ...interface{}) {
-	zl.sugar.Debug(args...)
-}
-
-func (zl *zapRaftLogger) Debugf(format string, args ...interface{}) {
-	zl.sugar.Debugf(format, args...)
-}
-
-func (zl *zapRaftLogger) Error(args ...interface{}) {
-	zl.sugar.Error(args...)
-}
-
-func (zl *zapRaftLogger) Errorf(format string, args ...interface{}) {
-	zl.sugar.Errorf(format, args...)
-}
-
-func (zl *zapRaftLogger) Info(args ...interface{}) {
-	zl.sugar.Info(args...)
-}
-
-func (zl *zapRaftLogger) Infof(format string, args ...interface{}) {
-	zl.sugar.Infof(format, args...)
-}
-
-func (zl *zapRaftLogger) Warning(args ...interface{}) {
-	zl.sugar.Warn(args...)
-}
-
-func (zl *zapRaftLogger) Warningf(format string, args ...interface{}) {
-	zl.sugar.Warnf(format, args...)
-}
-
-func (zl *zapRaftLogger) Fatal(args ...interface{}) {
-	zl.sugar.Fatal(args...)
-}
-
-func (zl *zapRaftLogger) Fatalf(format string, args ...interface{}) {
-	zl.sugar.Fatalf(format, args...)
-}
-
-func (zl *zapRaftLogger) Panic(args ...interface{}) {
-	zl.sugar.Panic(args...)
-}
-
-func (zl *zapRaftLogger) Panicf(format string, args ...interface{}) {
-	zl.sugar.Panicf(format, args...)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/systemd/journal.go b/vendor/github.com/coreos/etcd/pkg/systemd/journal.go
deleted file mode 100644
index b861c69..0000000
--- a/vendor/github.com/coreos/etcd/pkg/systemd/journal.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package systemd
-
-import "net"
-
-// DialJournal returns no error if the process can dial journal socket.
-// Returns an error if dial failed, whichi indicates journald is not available
-// (e.g. run embedded etcd as docker daemon).
-// Reference: https://github.com/coreos/go-systemd/blob/master/journal/journal.go.
-func DialJournal() error {
-	conn, err := net.Dial("unixgram", "/run/systemd/journal/socket")
-	if conn != nil {
-		defer conn.Close()
-	}
-	return err
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/id.go b/vendor/github.com/coreos/etcd/pkg/types/id.go
deleted file mode 100644
index 1b042d9..0000000
--- a/vendor/github.com/coreos/etcd/pkg/types/id.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
-	"strconv"
-)
-
-// ID represents a generic identifier which is canonically
-// stored as a uint64 but is typically represented as a
-// base-16 string for input/output
-type ID uint64
-
-func (i ID) String() string {
-	return strconv.FormatUint(uint64(i), 16)
-}
-
-// IDFromString attempts to create an ID from a base-16 string.
-func IDFromString(s string) (ID, error) {
-	i, err := strconv.ParseUint(s, 16, 64)
-	return ID(i), err
-}
-
-// IDSlice implements the sort interface
-type IDSlice []ID
-
-func (p IDSlice) Len() int           { return len(p) }
-func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) }
-func (p IDSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/coreos/etcd/pkg/types/set.go b/vendor/github.com/coreos/etcd/pkg/types/set.go
deleted file mode 100644
index c111b0c..0000000
--- a/vendor/github.com/coreos/etcd/pkg/types/set.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
-	"reflect"
-	"sort"
-	"sync"
-)
-
-type Set interface {
-	Add(string)
-	Remove(string)
-	Contains(string) bool
-	Equals(Set) bool
-	Length() int
-	Values() []string
-	Copy() Set
-	Sub(Set) Set
-}
-
-func NewUnsafeSet(values ...string) *unsafeSet {
-	set := &unsafeSet{make(map[string]struct{})}
-	for _, v := range values {
-		set.Add(v)
-	}
-	return set
-}
-
-func NewThreadsafeSet(values ...string) *tsafeSet {
-	us := NewUnsafeSet(values...)
-	return &tsafeSet{us, sync.RWMutex{}}
-}
-
-type unsafeSet struct {
-	d map[string]struct{}
-}
-
-// Add adds a new value to the set (no-op if the value is already present)
-func (us *unsafeSet) Add(value string) {
-	us.d[value] = struct{}{}
-}
-
-// Remove removes the given value from the set
-func (us *unsafeSet) Remove(value string) {
-	delete(us.d, value)
-}
-
-// Contains returns whether the set contains the given value
-func (us *unsafeSet) Contains(value string) (exists bool) {
-	_, exists = us.d[value]
-	return exists
-}
-
-// ContainsAll returns whether the set contains all given values
-func (us *unsafeSet) ContainsAll(values []string) bool {
-	for _, s := range values {
-		if !us.Contains(s) {
-			return false
-		}
-	}
-	return true
-}
-
-// Equals returns whether the contents of two sets are identical
-func (us *unsafeSet) Equals(other Set) bool {
-	v1 := sort.StringSlice(us.Values())
-	v2 := sort.StringSlice(other.Values())
-	v1.Sort()
-	v2.Sort()
-	return reflect.DeepEqual(v1, v2)
-}
-
-// Length returns the number of elements in the set
-func (us *unsafeSet) Length() int {
-	return len(us.d)
-}
-
-// Values returns the values of the Set in an unspecified order.
-func (us *unsafeSet) Values() (values []string) {
-	values = make([]string, 0)
-	for val := range us.d {
-		values = append(values, val)
-	}
-	return values
-}
-
-// Copy creates a new Set containing the values of the first
-func (us *unsafeSet) Copy() Set {
-	cp := NewUnsafeSet()
-	for val := range us.d {
-		cp.Add(val)
-	}
-
-	return cp
-}
-
-// Sub removes all elements in other from the set
-func (us *unsafeSet) Sub(other Set) Set {
-	oValues := other.Values()
-	result := us.Copy().(*unsafeSet)
-
-	for _, val := range oValues {
-		if _, ok := result.d[val]; !ok {
-			continue
-		}
-		delete(result.d, val)
-	}
-
-	return result
-}
-
-type tsafeSet struct {
-	us *unsafeSet
-	m  sync.RWMutex
-}
-
-func (ts *tsafeSet) Add(value string) {
-	ts.m.Lock()
-	defer ts.m.Unlock()
-	ts.us.Add(value)
-}
-
-func (ts *tsafeSet) Remove(value string) {
-	ts.m.Lock()
-	defer ts.m.Unlock()
-	ts.us.Remove(value)
-}
-
-func (ts *tsafeSet) Contains(value string) (exists bool) {
-	ts.m.RLock()
-	defer ts.m.RUnlock()
-	return ts.us.Contains(value)
-}
-
-func (ts *tsafeSet) Equals(other Set) bool {
-	ts.m.RLock()
-	defer ts.m.RUnlock()
-	return ts.us.Equals(other)
-}
-
-func (ts *tsafeSet) Length() int {
-	ts.m.RLock()
-	defer ts.m.RUnlock()
-	return ts.us.Length()
-}
-
-func (ts *tsafeSet) Values() (values []string) {
-	ts.m.RLock()
-	defer ts.m.RUnlock()
-	return ts.us.Values()
-}
-
-func (ts *tsafeSet) Copy() Set {
-	ts.m.RLock()
-	defer ts.m.RUnlock()
-	usResult := ts.us.Copy().(*unsafeSet)
-	return &tsafeSet{usResult, sync.RWMutex{}}
-}
-
-func (ts *tsafeSet) Sub(other Set) Set {
-	ts.m.RLock()
-	defer ts.m.RUnlock()
-	usResult := ts.us.Sub(other).(*unsafeSet)
-	return &tsafeSet{usResult, sync.RWMutex{}}
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/urls.go b/vendor/github.com/coreos/etcd/pkg/types/urls.go
deleted file mode 100644
index 9e5d03f..0000000
--- a/vendor/github.com/coreos/etcd/pkg/types/urls.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
-	"errors"
-	"fmt"
-	"net"
-	"net/url"
-	"sort"
-	"strings"
-)
-
-type URLs []url.URL
-
-func NewURLs(strs []string) (URLs, error) {
-	all := make([]url.URL, len(strs))
-	if len(all) == 0 {
-		return nil, errors.New("no valid URLs given")
-	}
-	for i, in := range strs {
-		in = strings.TrimSpace(in)
-		u, err := url.Parse(in)
-		if err != nil {
-			return nil, err
-		}
-		if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" {
-			return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in)
-		}
-		if _, _, err := net.SplitHostPort(u.Host); err != nil {
-			return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
-		}
-		if u.Path != "" {
-			return nil, fmt.Errorf("URL must not contain a path: %s", in)
-		}
-		all[i] = *u
-	}
-	us := URLs(all)
-	us.Sort()
-
-	return us, nil
-}
-
-func MustNewURLs(strs []string) URLs {
-	urls, err := NewURLs(strs)
-	if err != nil {
-		panic(err)
-	}
-	return urls
-}
-
-func (us URLs) String() string {
-	return strings.Join(us.StringSlice(), ",")
-}
-
-func (us *URLs) Sort() {
-	sort.Sort(us)
-}
-func (us URLs) Len() int           { return len(us) }
-func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() }
-func (us URLs) Swap(i, j int)      { us[i], us[j] = us[j], us[i] }
-
-func (us URLs) StringSlice() []string {
-	out := make([]string, len(us))
-	for i := range us {
-		out[i] = us[i].String()
-	}
-
-	return out
-}
diff --git a/vendor/github.com/coreos/etcd/raft/README.md b/vendor/github.com/coreos/etcd/raft/README.md
deleted file mode 100644
index fde22b1..0000000
--- a/vendor/github.com/coreos/etcd/raft/README.md
+++ /dev/null
@@ -1,196 +0,0 @@
-# Raft library
-
-Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
-The state machine is kept in sync through the use of a replicated log.
-For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
-(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout.
-
-This Raft library is stable and feature complete. As of 2016, it is **the most widely used** Raft library in production, serving tens of thousands clusters each day. It powers distributed systems such as etcd, Kubernetes, Docker Swarm, Cloud Foundry Diego, CockroachDB, TiDB, Project Calico, Flannel, and more.
-
-Most Raft implementations have a monolithic design, including storage handling, messaging serialization, and network transport. This library instead follows a minimalistic design philosophy by only implementing the core raft algorithm. This minimalism buys flexibility, determinism, and performance.
-
-To keep the codebase small as well as provide flexibility, the library only implements the Raft algorithm; both network and disk IO are left to the user. Library users must implement their own transportation layer for message passing between Raft peers over the wire. Similarly, users must implement their own storage layer to persist the Raft log and state.
-
-In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine.  The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output.
-
-A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/coreos/etcd/tree/master/contrib/raftexample
-
-# Features
-
-This raft implementation is a full feature implementation of Raft protocol. Features includes:
-
-- Leader election
-- Log replication
-- Log compaction 
-- Membership changes
-- Leadership transfer extension
-- Efficient linearizable read-only queries served by both the leader and followers
-  - leader checks with quorum and bypasses Raft log before processing read-only queries
-  - followers asks leader to get a safe read index before processing read-only queries
-- More efficient lease-based linearizable read-only queries served by both the leader and followers
-  - leader bypasses Raft log and processing read-only queries locally
-  - followers asks leader to get a safe read index before processing read-only queries
-  - this approach relies on the clock of the all the machines in raft group
-
-This raft implementation also includes a few optional enhancements:
-
-- Optimistic pipelining to reduce log replication latency
-- Flow control for log replication
-- Batching Raft messages to reduce synchronized network I/O calls
-- Batching log entries to reduce disk synchronized I/O
-- Writing to leader's disk in parallel
-- Internal proposal redirection from followers to leader
-- Automatic stepping down when the leader loses quorum 
-
-## Notable Users
-
-- [cockroachdb](https://github.com/cockroachdb/cockroach) A Scalable, Survivable, Strongly-Consistent SQL Database
-- [dgraph](https://github.com/dgraph-io/dgraph) A Scalable, Distributed, Low Latency, High Throughput Graph Database
-- [etcd](https://github.com/coreos/etcd) A distributed reliable key-value store
-- [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft
-- [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale.
-- [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks
-
-## Usage
-
-The primary object in raft is a Node. Either start a Node from scratch using raft.StartNode or start a Node from some initial state using raft.RestartNode.
-
-To start a three-node cluster
-```go
-  storage := raft.NewMemoryStorage()
-  c := &Config{
-    ID:              0x01,
-    ElectionTick:    10,
-    HeartbeatTick:   1,
-    Storage:         storage,
-    MaxSizePerMsg:   4096,
-    MaxInflightMsgs: 256,
-  }
-  // Set peer list to the other nodes in the cluster.
-  // Note that they need to be started separately as well.
-  n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
-```
-
-Start a single node cluster, like so:
-```go
-  // Create storage and config as shown above.
-  // Set peer list to itself, so this node can become the leader of this single-node cluster.
-  peers := []raft.Peer{{ID: 0x01}}
-  n := raft.StartNode(c, peers)
-```
-
-To allow a new node to join this cluster, do not pass in any peers. First, add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, start the node with an empty peer list, like so:
-```go
-  // Create storage and config as shown above.
-  n := raft.StartNode(c, nil)
-```
-
-To restart a node from previous state:
-```go
-  storage := raft.NewMemoryStorage()
-
-  // Recover the in-memory storage from persistent snapshot, state and entries.
-  storage.ApplySnapshot(snapshot)
-  storage.SetHardState(state)
-  storage.Append(entries)
-
-  c := &Config{
-    ID:              0x01,
-    ElectionTick:    10,
-    HeartbeatTick:   1,
-    Storage:         storage,
-    MaxSizePerMsg:   4096,
-    MaxInflightMsgs: 256,
-  }
-
-  // Restart raft without peer information.
-  // Peer information is already included in the storage.
-  n := raft.RestartNode(c)
-```
-
-After creating a Node, the user has a few responsibilities:
-
-First, read from the Node.Ready() channel and process the updates it contains. These steps may be performed in parallel, except as noted in step 2.
-
-1. Write Entries, HardState and Snapshot to persistent storage in order, i.e. Entries first, then HardState and Snapshot if they are not empty. If persistent storage supports atomic writes then all of them can be written together. Note that when writing an Entry with Index i, any previously-persisted entries with Index >= i must be discarded.
-
-2. Send all Messages to the nodes named in the To field. It is important that no messages be sent until the latest HardState has been persisted to disk, and all Entries written by any previous Ready batch (Messages may be sent while entries from the same batch are being persisted). To reduce the I/O latency, an optimization can be applied to make leader write to disk in parallel with its followers (as explained at section 10.2.1 in Raft thesis). If any Message has type MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be large). Note: Marshalling messages is not thread-safe; it is important to make sure that no new entries are persisted while marshalling. The easiest way to achieve this is to serialise the messages directly inside the main raft loop.
-
-3. Apply Snapshot (if any) and CommittedEntries to the state machine. If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() to apply it to the node. The configuration change may be cancelled at this point by setting the NodeID field to zero before calling ApplyConfChange (but ApplyConfChange must be called one way or the other, and the decision to cancel must be based solely on the state machine and not external information such as the observed health of the node).
-
-4. Call Node.Advance() to signal readiness for the next batch of updates. This may be done at any time after step 1, although all updates must be processed in the order they were returned by Ready.
-
-Second, all persisted log entries must be made available via an implementation of the Storage interface. The provided MemoryStorage type can be used for this (if repopulating its state upon a restart), or a custom disk-backed implementation can be supplied.
-
-Third, after receiving a message from another node, pass it to Node.Step:
-
-```go
-	func recvRaftRPC(ctx context.Context, m raftpb.Message) {
-		n.Step(ctx, m)
-	}
-```
-
-Finally, call `Node.Tick()` at regular intervals (probably via a `time.Ticker`). Raft has two important timeouts: heartbeat and the election timeout. However, internally to the raft package time is represented by an abstract "tick".
-
-The total state machine handling loop will look something like this:
-
-```go
-  for {
-    select {
-    case <-s.Ticker:
-      n.Tick()
-    case rd := <-s.Node.Ready():
-      saveToStorage(rd.State, rd.Entries, rd.Snapshot)
-      send(rd.Messages)
-      if !raft.IsEmptySnap(rd.Snapshot) {
-        processSnapshot(rd.Snapshot)
-      }
-      for _, entry := range rd.CommittedEntries {
-        process(entry)
-        if entry.Type == raftpb.EntryConfChange {
-          var cc raftpb.ConfChange
-          cc.Unmarshal(entry.Data)
-          s.Node.ApplyConfChange(cc)
-        }
-      }
-      s.Node.Advance()
-    case <-s.done:
-      return
-    }
-  }
-```
-
-To propose changes to the state machine from the node to take application data, serialize it into a byte slice and call:
-
-```go
-	n.Propose(ctx, data)
-```
-
-If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout. 
-
-To add or remove node in a cluster, build ConfChange struct 'cc' and call:
-
-```go
-	n.ProposeConfChange(ctx, cc)
-```
-
-After config change is committed, some committed entry with type raftpb.EntryConfChange will be returned. This must be applied to node through:
-
-```go
-	var cc raftpb.ConfChange
-	cc.Unmarshal(data)
-	n.ApplyConfChange(cc)
-```
-
-Note: An ID represents a unique node in a cluster for all time. A
-given ID MUST be used only once even if the old node has been removed.
-This means that for example IP addresses make poor node IDs since they
-may be reused. Node IDs must be non-zero.
-
-## Implementation notes
-
-This implementation is up to date with the final Raft thesis (https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap.
-
-To ensure there is no attempt to commit two membership changes at once by matching log positions (which would be unsafe since they should have different quorum requirements), any proposed membership change is simply disallowed while any uncommitted change appears in the leader's log.
-
-This approach introduces a problem when removing a member from a two-member cluster: If one of the members dies before the other one receives the commit of the confchange entry, then the member cannot be removed any more since the cluster cannot make progress. For this reason it is highly recommended to use three or more nodes in every cluster.
diff --git a/vendor/github.com/coreos/etcd/raft/design.md b/vendor/github.com/coreos/etcd/raft/design.md
deleted file mode 100644
index 7bc0531..0000000
--- a/vendor/github.com/coreos/etcd/raft/design.md
+++ /dev/null
@@ -1,57 +0,0 @@
-## Progress
-
-Progress represents a follower’s progress in the view of the leader. Leader maintains progresses of all followers, and sends `replication message` to the follower based on its progress. 
-
-`replication message` is a `msgApp` with log entries.
-
-A progress has two attribute: `match` and `next`. `match` is the index of the highest known matched entry. If leader knows nothing about follower’s replication status, `match` is set to zero. `next` is the index of the first entry that will be replicated to the follower. Leader puts entries from `next` to its latest one in next `replication message`.
-
-A progress is in one of the three state: `probe`, `replicate`, `snapshot`. 
-
-```
-                            +--------------------------------------------------------+          
-                            |                  send snapshot                         |          
-                            |                                                        |          
-                  +---------+----------+                                  +----------v---------+
-              +--->       probe        |                                  |      snapshot      |
-              |   |  max inflight = 1  <----------------------------------+  max inflight = 0  |
-              |   +---------+----------+                                  +--------------------+
-              |             |            1. snapshot success                                    
-              |             |               (next=snapshot.index + 1)                           
-              |             |            2. snapshot failure                                    
-              |             |               (no change)                                         
-              |             |            3. receives msgAppResp(rej=false&&index>lastsnap.index)
-              |             |               (match=m.index,next=match+1)                        
-receives msgAppResp(rej=true)                                                                   
-(next=match+1)|             |                                                                   
-              |             |                                                                   
-              |             |                                                                   
-              |             |   receives msgAppResp(rej=false&&index>match)                     
-              |             |   (match=m.index,next=match+1)                                    
-              |             |                                                                   
-              |             |                                                                   
-              |             |                                                                   
-              |   +---------v----------+                                                        
-              |   |     replicate      |                                                        
-              +---+  max inflight = n  |                                                        
-                  +--------------------+                                                        
-```
-
-When the progress of a follower is in `probe` state, leader sends at most one `replication message` per heartbeat interval. The leader sends `replication message` slowly and probing the actual progress of the follower. A `msgHeartbeatResp` or a `msgAppResp` with reject might trigger the sending of the next `replication message`.
-
-When the progress of a follower is in `replicate` state, leader sends `replication message`, then optimistically increases `next` to the latest entry sent. This is an optimized state for fast replicating log entries to the follower.
-
-When the progress of a follower is in `snapshot` state, leader stops sending any `replication message`.
-
-A newly elected leader sets the progresses of all the followers to `probe` state with `match` = 0 and `next` = last index. The leader slowly (at most once per heartbeat) sends `replication message` to the follower and probes its progress.
-
-A progress changes to `replicate` when the follower replies with a non-rejection `msgAppResp`, which implies that it has matched the index sent. At this point, leader starts to stream log entries to the follower fast. The progress will fall back to `probe` when the follower replies a rejection `msgAppResp` or the link layer reports the follower is unreachable. We aggressively reset `next` to `match`+1 since if we receive any `msgAppResp` soon, both `match` and `next` will increase directly to the `index` in `msgAppResp`. (We might end up with sending some duplicate entries when aggressively reset `next` too low.  see open question)
-
-A progress changes from `probe` to `snapshot` when the follower falls very far behind and requires a snapshot. After sending `msgSnap`, the leader waits until the success, failure or abortion of the previous snapshot sent. The progress will go back to `probe` after the sending result is applied.
-
-### Flow Control
-
-1. limit the max size of message sent per message. Max should be configurable.
-Lower the cost at probing state as we limit the size per message; lower the penalty when aggressively decreased to a too low `next`
-
-2. limit the # of in flight messages < N when in `replicate` state. N should be configurable. Most implementation will have a sending buffer on top of its actual network transport layer (not blocking raft node). We want to make sure raft does not overflow that buffer, which can cause message dropping and triggering a bunch of unnecessary resending repeatedly. 
diff --git a/vendor/github.com/coreos/etcd/raft/doc.go b/vendor/github.com/coreos/etcd/raft/doc.go
deleted file mode 100644
index b55c591..0000000
--- a/vendor/github.com/coreos/etcd/raft/doc.go
+++ /dev/null
@@ -1,300 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package raft sends and receives messages in the Protocol Buffer format
-defined in the raftpb package.
-
-Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
-The state machine is kept in sync through the use of a replicated log.
-For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
-(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout.
-
-A simple example application, _raftexample_, is also available to help illustrate
-how to use this package in practice:
-https://github.com/coreos/etcd/tree/master/contrib/raftexample
-
-Usage
-
-The primary object in raft is a Node. You either start a Node from scratch
-using raft.StartNode or start a Node from some initial state using raft.RestartNode.
-
-To start a node from scratch:
-
-  storage := raft.NewMemoryStorage()
-  c := &Config{
-    ID:              0x01,
-    ElectionTick:    10,
-    HeartbeatTick:   1,
-    Storage:         storage,
-    MaxSizePerMsg:   4096,
-    MaxInflightMsgs: 256,
-  }
-  n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
-
-To restart a node from previous state:
-
-  storage := raft.NewMemoryStorage()
-
-  // recover the in-memory storage from persistent
-  // snapshot, state and entries.
-  storage.ApplySnapshot(snapshot)
-  storage.SetHardState(state)
-  storage.Append(entries)
-
-  c := &Config{
-    ID:              0x01,
-    ElectionTick:    10,
-    HeartbeatTick:   1,
-    Storage:         storage,
-    MaxSizePerMsg:   4096,
-    MaxInflightMsgs: 256,
-  }
-
-  // restart raft without peer information.
-  // peer information is already included in the storage.
-  n := raft.RestartNode(c)
-
-Now that you are holding onto a Node you have a few responsibilities:
-
-First, you must read from the Node.Ready() channel and process the updates
-it contains. These steps may be performed in parallel, except as noted in step
-2.
-
-1. Write HardState, Entries, and Snapshot to persistent storage if they are
-not empty. Note that when writing an Entry with Index i, any
-previously-persisted entries with Index >= i must be discarded.
-
-2. Send all Messages to the nodes named in the To field. It is important that
-no messages be sent until the latest HardState has been persisted to disk,
-and all Entries written by any previous Ready batch (Messages may be sent while
-entries from the same batch are being persisted). To reduce the I/O latency, an
-optimization can be applied to make leader write to disk in parallel with its
-followers (as explained at section 10.2.1 in Raft thesis). If any Message has type
-MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be
-large).
-
-Note: Marshalling messages is not thread-safe; it is important that you
-make sure that no new entries are persisted while marshalling.
-The easiest way to achieve this is to serialise the messages directly inside
-your main raft loop.
-
-3. Apply Snapshot (if any) and CommittedEntries to the state machine.
-If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange()
-to apply it to the node. The configuration change may be cancelled at this point
-by setting the NodeID field to zero before calling ApplyConfChange
-(but ApplyConfChange must be called one way or the other, and the decision to cancel
-must be based solely on the state machine and not external information such as
-the observed health of the node).
-
-4. Call Node.Advance() to signal readiness for the next batch of updates.
-This may be done at any time after step 1, although all updates must be processed
-in the order they were returned by Ready.
-
-Second, all persisted log entries must be made available via an
-implementation of the Storage interface. The provided MemoryStorage
-type can be used for this (if you repopulate its state upon a
-restart), or you can supply your own disk-backed implementation.
-
-Third, when you receive a message from another node, pass it to Node.Step:
-
-	func recvRaftRPC(ctx context.Context, m raftpb.Message) {
-		n.Step(ctx, m)
-	}
-
-Finally, you need to call Node.Tick() at regular intervals (probably
-via a time.Ticker). Raft has two important timeouts: heartbeat and the
-election timeout. However, internally to the raft package time is
-represented by an abstract "tick".
-
-The total state machine handling loop will look something like this:
-
-  for {
-    select {
-    case <-s.Ticker:
-      n.Tick()
-    case rd := <-s.Node.Ready():
-      saveToStorage(rd.State, rd.Entries, rd.Snapshot)
-      send(rd.Messages)
-      if !raft.IsEmptySnap(rd.Snapshot) {
-        processSnapshot(rd.Snapshot)
-      }
-      for _, entry := range rd.CommittedEntries {
-        process(entry)
-        if entry.Type == raftpb.EntryConfChange {
-          var cc raftpb.ConfChange
-          cc.Unmarshal(entry.Data)
-          s.Node.ApplyConfChange(cc)
-        }
-      }
-      s.Node.Advance()
-    case <-s.done:
-      return
-    }
-  }
-
-To propose changes to the state machine from your node take your application
-data, serialize it into a byte slice and call:
-
-	n.Propose(ctx, data)
-
-If the proposal is committed, data will appear in committed entries with type
-raftpb.EntryNormal. There is no guarantee that a proposed command will be
-committed; you may have to re-propose after a timeout.
-
-To add or remove node in a cluster, build ConfChange struct 'cc' and call:
-
-	n.ProposeConfChange(ctx, cc)
-
-After config change is committed, some committed entry with type
-raftpb.EntryConfChange will be returned. You must apply it to node through:
-
-	var cc raftpb.ConfChange
-	cc.Unmarshal(data)
-	n.ApplyConfChange(cc)
-
-Note: An ID represents a unique node in a cluster for all time. A
-given ID MUST be used only once even if the old node has been removed.
-This means that for example IP addresses make poor node IDs since they
-may be reused. Node IDs must be non-zero.
-
-Implementation notes
-
-This implementation is up to date with the final Raft thesis
-(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our
-implementation of the membership change protocol differs somewhat from
-that described in chapter 4. The key invariant that membership changes
-happen one node at a time is preserved, but in our implementation the
-membership change takes effect when its entry is applied, not when it
-is added to the log (so the entry is committed under the old
-membership instead of the new). This is equivalent in terms of safety,
-since the old and new configurations are guaranteed to overlap.
-
-To ensure that we do not attempt to commit two membership changes at
-once by matching log positions (which would be unsafe since they
-should have different quorum requirements), we simply disallow any
-proposed membership change while any uncommitted change appears in
-the leader's log.
-
-This approach introduces a problem when you try to remove a member
-from a two-member cluster: If one of the members dies before the
-other one receives the commit of the confchange entry, then the member
-cannot be removed any more since the cluster cannot make progress.
-For this reason it is highly recommended to use three or more nodes in
-every cluster.
-
-MessageType
-
-Package raft sends and receives message in Protocol Buffer format (defined
-in raftpb package). Each state (follower, candidate, leader) implements its
-own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when
-advancing with the given raftpb.Message. Each step is determined by its
-raftpb.MessageType. Note that every step is checked by one common method
-'Step' that safety-checks the terms of node and incoming message to prevent
-stale log entries:
-
-	'MsgHup' is used for election. If a node is a follower or candidate, the
-	'tick' function in 'raft' struct is set as 'tickElection'. If a follower or
-	candidate has not received any heartbeat before the election timeout, it
-	passes 'MsgHup' to its Step method and becomes (or remains) a candidate to
-	start a new election.
-
-	'MsgBeat' is an internal type that signals the leader to send a heartbeat of
-	the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in
-	the 'raft' struct is set as 'tickHeartbeat', and triggers the leader to
-	send periodic 'MsgHeartbeat' messages to its followers.
-
-	'MsgProp' proposes to append data to its log entries. This is a special
-	type to redirect proposals to leader. Therefore, send method overwrites
-	raftpb.Message's term with its HardState's term to avoid attaching its
-	local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step'
-	method, the leader first calls the 'appendEntry' method to append entries
-	to its log, and then calls 'bcastAppend' method to send those entries to
-	its peers. When passed to candidate, 'MsgProp' is dropped. When passed to
-	follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send
-	method. It is stored with sender's ID and later forwarded to leader by
-	rafthttp package.
-
-	'MsgApp' contains log entries to replicate. A leader calls bcastAppend,
-	which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp'
-	type. When 'MsgApp' is passed to candidate's Step method, candidate reverts
-	back to follower, because it indicates that there is a valid leader sending
-	'MsgApp' messages. Candidate and follower respond to this message in
-	'MsgAppResp' type.
-
-	'MsgAppResp' is response to log replication request('MsgApp'). When
-	'MsgApp' is passed to candidate or follower's Step method, it responds by
-	calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft
-	mailbox.
-
-	'MsgVote' requests votes for election. When a node is a follower or
-	candidate and 'MsgHup' is passed to its Step method, then the node calls
-	'campaign' method to campaign itself to become a leader. Once 'campaign'
-	method is called, the node becomes candidate and sends 'MsgVote' to peers
-	in cluster to request votes. When passed to leader or candidate's Step
-	method and the message's Term is lower than leader's or candidate's,
-	'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true).
-	If leader or candidate receives 'MsgVote' with higher term, it will revert
-	back to follower. When 'MsgVote' is passed to follower, it votes for the
-	sender only when sender's last term is greater than MsgVote's term or
-	sender's last term is equal to MsgVote's term but sender's last committed
-	index is greater than or equal to follower's.
-
-	'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is
-	passed to candidate, the candidate calculates how many votes it has won. If
-	it's more than majority (quorum), it becomes leader and calls 'bcastAppend'.
-	If candidate receives majority of votes of denials, it reverts back to
-	follower.
-
-	'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election
-	protocol. When Config.PreVote is true, a pre-election is carried out first
-	(using the same rules as a regular election), and no node increases its term
-	number unless the pre-election indicates that the campaigining node would win.
-	This minimizes disruption when a partitioned node rejoins the cluster.
-
-	'MsgSnap' requests to install a snapshot message. When a node has just
-	become a leader or the leader receives 'MsgProp' message, it calls
-	'bcastAppend' method, which then calls 'sendAppend' method to each
-	follower. In 'sendAppend', if a leader fails to get term or entries,
-	the leader requests snapshot by sending 'MsgSnap' type message.
-
-	'MsgSnapStatus' tells the result of snapshot install message. When a
-	follower rejected 'MsgSnap', it indicates the snapshot request with
-	'MsgSnap' had failed from network issues which causes the network layer
-	to fail to send out snapshots to its followers. Then leader considers
-	follower's progress as probe. When 'MsgSnap' were not rejected, it
-	indicates that the snapshot succeeded and the leader sets follower's
-	progress to probe and resumes its log replication.
-
-	'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed
-	to candidate and message's term is higher than candidate's, the candidate
-	reverts back to follower and updates its committed index from the one in
-	this heartbeat. And it sends the message to its mailbox. When
-	'MsgHeartbeat' is passed to follower's Step method and message's term is
-	higher than follower's, the follower updates its leaderID with the ID
-	from the message.
-
-	'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp'
-	is passed to leader's Step method, the leader knows which follower
-	responded. And only when the leader's last committed index is greater than
-	follower's Match index, the leader runs 'sendAppend` method.
-
-	'MsgUnreachable' tells that request(message) wasn't delivered. When
-	'MsgUnreachable' is passed to leader's Step method, the leader discovers
-	that the follower that sent this 'MsgUnreachable' is not reachable, often
-	indicating 'MsgApp' is lost. When follower's progress state is replicate,
-	the leader sets it back to probe.
-
-*/
-package raft
diff --git a/vendor/github.com/coreos/etcd/raft/log.go b/vendor/github.com/coreos/etcd/raft/log.go
deleted file mode 100644
index c3036d3..0000000
--- a/vendor/github.com/coreos/etcd/raft/log.go
+++ /dev/null
@@ -1,358 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
-	"fmt"
-	"log"
-
-	pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-type raftLog struct {
-	// storage contains all stable entries since the last snapshot.
-	storage Storage
-
-	// unstable contains all unstable entries and snapshot.
-	// they will be saved into storage.
-	unstable unstable
-
-	// committed is the highest log position that is known to be in
-	// stable storage on a quorum of nodes.
-	committed uint64
-	// applied is the highest log position that the application has
-	// been instructed to apply to its state machine.
-	// Invariant: applied <= committed
-	applied uint64
-
-	logger Logger
-}
-
-// newLog returns log using the given storage. It recovers the log to the state
-// that it just commits and applies the latest snapshot.
-func newLog(storage Storage, logger Logger) *raftLog {
-	if storage == nil {
-		log.Panic("storage must not be nil")
-	}
-	log := &raftLog{
-		storage: storage,
-		logger:  logger,
-	}
-	firstIndex, err := storage.FirstIndex()
-	if err != nil {
-		panic(err) // TODO(bdarnell)
-	}
-	lastIndex, err := storage.LastIndex()
-	if err != nil {
-		panic(err) // TODO(bdarnell)
-	}
-	log.unstable.offset = lastIndex + 1
-	log.unstable.logger = logger
-	// Initialize our committed and applied pointers to the time of the last compaction.
-	log.committed = firstIndex - 1
-	log.applied = firstIndex - 1
-
-	return log
-}
-
-func (l *raftLog) String() string {
-	return fmt.Sprintf("committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries))
-}
-
-// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise,
-// it returns (last index of new entries, true).
-func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) {
-	if l.matchTerm(index, logTerm) {
-		lastnewi = index + uint64(len(ents))
-		ci := l.findConflict(ents)
-		switch {
-		case ci == 0:
-		case ci <= l.committed:
-			l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed)
-		default:
-			offset := index + 1
-			l.append(ents[ci-offset:]...)
-		}
-		l.commitTo(min(committed, lastnewi))
-		return lastnewi, true
-	}
-	return 0, false
-}
-
-func (l *raftLog) append(ents ...pb.Entry) uint64 {
-	if len(ents) == 0 {
-		return l.lastIndex()
-	}
-	if after := ents[0].Index - 1; after < l.committed {
-		l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed)
-	}
-	l.unstable.truncateAndAppend(ents)
-	return l.lastIndex()
-}
-
-// findConflict finds the index of the conflict.
-// It returns the first pair of conflicting entries between the existing
-// entries and the given entries, if there are any.
-// If there is no conflicting entries, and the existing entries contains
-// all the given entries, zero will be returned.
-// If there is no conflicting entries, but the given entries contains new
-// entries, the index of the first new entry will be returned.
-// An entry is considered to be conflicting if it has the same index but
-// a different term.
-// The first entry MUST have an index equal to the argument 'from'.
-// The index of the given entries MUST be continuously increasing.
-func (l *raftLog) findConflict(ents []pb.Entry) uint64 {
-	for _, ne := range ents {
-		if !l.matchTerm(ne.Index, ne.Term) {
-			if ne.Index <= l.lastIndex() {
-				l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]",
-					ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term)
-			}
-			return ne.Index
-		}
-	}
-	return 0
-}
-
-func (l *raftLog) unstableEntries() []pb.Entry {
-	if len(l.unstable.entries) == 0 {
-		return nil
-	}
-	return l.unstable.entries
-}
-
-// nextEnts returns all the available entries for execution.
-// If applied is smaller than the index of snapshot, it returns all committed
-// entries after the index of snapshot.
-func (l *raftLog) nextEnts() (ents []pb.Entry) {
-	off := max(l.applied+1, l.firstIndex())
-	if l.committed+1 > off {
-		ents, err := l.slice(off, l.committed+1, noLimit)
-		if err != nil {
-			l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err)
-		}
-		return ents
-	}
-	return nil
-}
-
-// hasNextEnts returns if there is any available entries for execution. This
-// is a fast check without heavy raftLog.slice() in raftLog.nextEnts().
-func (l *raftLog) hasNextEnts() bool {
-	off := max(l.applied+1, l.firstIndex())
-	return l.committed+1 > off
-}
-
-func (l *raftLog) snapshot() (pb.Snapshot, error) {
-	if l.unstable.snapshot != nil {
-		return *l.unstable.snapshot, nil
-	}
-	return l.storage.Snapshot()
-}
-
-func (l *raftLog) firstIndex() uint64 {
-	if i, ok := l.unstable.maybeFirstIndex(); ok {
-		return i
-	}
-	index, err := l.storage.FirstIndex()
-	if err != nil {
-		panic(err) // TODO(bdarnell)
-	}
-	return index
-}
-
-func (l *raftLog) lastIndex() uint64 {
-	if i, ok := l.unstable.maybeLastIndex(); ok {
-		return i
-	}
-	i, err := l.storage.LastIndex()
-	if err != nil {
-		panic(err) // TODO(bdarnell)
-	}
-	return i
-}
-
-func (l *raftLog) commitTo(tocommit uint64) {
-	// never decrease commit
-	if l.committed < tocommit {
-		if l.lastIndex() < tocommit {
-			l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex())
-		}
-		l.committed = tocommit
-	}
-}
-
-func (l *raftLog) appliedTo(i uint64) {
-	if i == 0 {
-		return
-	}
-	if l.committed < i || i < l.applied {
-		l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed)
-	}
-	l.applied = i
-}
-
-func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) }
-
-func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) }
-
-func (l *raftLog) lastTerm() uint64 {
-	t, err := l.term(l.lastIndex())
-	if err != nil {
-		l.logger.Panicf("unexpected error when getting the last term (%v)", err)
-	}
-	return t
-}
-
-func (l *raftLog) term(i uint64) (uint64, error) {
-	// the valid term range is [index of dummy entry, last index]
-	dummyIndex := l.firstIndex() - 1
-	if i < dummyIndex || i > l.lastIndex() {
-		// TODO: return an error instead?
-		return 0, nil
-	}
-
-	if t, ok := l.unstable.maybeTerm(i); ok {
-		return t, nil
-	}
-
-	t, err := l.storage.Term(i)
-	if err == nil {
-		return t, nil
-	}
-	if err == ErrCompacted || err == ErrUnavailable {
-		return 0, err
-	}
-	panic(err) // TODO(bdarnell)
-}
-
-func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) {
-	if i > l.lastIndex() {
-		return nil, nil
-	}
-	return l.slice(i, l.lastIndex()+1, maxsize)
-}
-
-// allEntries returns all entries in the log.
-func (l *raftLog) allEntries() []pb.Entry {
-	ents, err := l.entries(l.firstIndex(), noLimit)
-	if err == nil {
-		return ents
-	}
-	if err == ErrCompacted { // try again if there was a racing compaction
-		return l.allEntries()
-	}
-	// TODO (xiangli): handle error?
-	panic(err)
-}
-
-// isUpToDate determines if the given (lastIndex,term) log is more up-to-date
-// by comparing the index and term of the last entries in the existing logs.
-// If the logs have last entries with different terms, then the log with the
-// later term is more up-to-date. If the logs end with the same term, then
-// whichever log has the larger lastIndex is more up-to-date. If the logs are
-// the same, the given log is up-to-date.
-func (l *raftLog) isUpToDate(lasti, term uint64) bool {
-	return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex())
-}
-
-func (l *raftLog) matchTerm(i, term uint64) bool {
-	t, err := l.term(i)
-	if err != nil {
-		return false
-	}
-	return t == term
-}
-
-func (l *raftLog) maybeCommit(maxIndex, term uint64) bool {
-	if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term {
-		l.commitTo(maxIndex)
-		return true
-	}
-	return false
-}
-
-func (l *raftLog) restore(s pb.Snapshot) {
-	l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term)
-	l.committed = s.Metadata.Index
-	l.unstable.restore(s)
-}
-
-// slice returns a slice of log entries from lo through hi-1, inclusive.
-func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) {
-	err := l.mustCheckOutOfBounds(lo, hi)
-	if err != nil {
-		return nil, err
-	}
-	if lo == hi {
-		return nil, nil
-	}
-	var ents []pb.Entry
-	if lo < l.unstable.offset {
-		storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize)
-		if err == ErrCompacted {
-			return nil, err
-		} else if err == ErrUnavailable {
-			l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset))
-		} else if err != nil {
-			panic(err) // TODO(bdarnell)
-		}
-
-		// check if ents has reached the size limitation
-		if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo {
-			return storedEnts, nil
-		}
-
-		ents = storedEnts
-	}
-	if hi > l.unstable.offset {
-		unstable := l.unstable.slice(max(lo, l.unstable.offset), hi)
-		if len(ents) > 0 {
-			ents = append([]pb.Entry{}, ents...)
-			ents = append(ents, unstable...)
-		} else {
-			ents = unstable
-		}
-	}
-	return limitSize(ents, maxSize), nil
-}
-
-// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries)
-func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error {
-	if lo > hi {
-		l.logger.Panicf("invalid slice %d > %d", lo, hi)
-	}
-	fi := l.firstIndex()
-	if lo < fi {
-		return ErrCompacted
-	}
-
-	length := l.lastIndex() + 1 - fi
-	if lo < fi || hi > fi+length {
-		l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex())
-	}
-	return nil
-}
-
-func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 {
-	if err == nil {
-		return t
-	}
-	if err == ErrCompacted {
-		return 0
-	}
-	l.logger.Panicf("unexpected error (%v)", err)
-	return 0
-}
diff --git a/vendor/github.com/coreos/etcd/raft/log_unstable.go b/vendor/github.com/coreos/etcd/raft/log_unstable.go
deleted file mode 100644
index 263af9c..0000000
--- a/vendor/github.com/coreos/etcd/raft/log_unstable.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import pb "github.com/coreos/etcd/raft/raftpb"
-
-// unstable.entries[i] has raft log position i+unstable.offset.
-// Note that unstable.offset may be less than the highest log
-// position in storage; this means that the next write to storage
-// might need to truncate the log before persisting unstable.entries.
-type unstable struct {
-	// the incoming unstable snapshot, if any.
-	snapshot *pb.Snapshot
-	// all entries that have not yet been written to storage.
-	entries []pb.Entry
-	offset  uint64
-
-	logger Logger
-}
-
-// maybeFirstIndex returns the index of the first possible entry in entries
-// if it has a snapshot.
-func (u *unstable) maybeFirstIndex() (uint64, bool) {
-	if u.snapshot != nil {
-		return u.snapshot.Metadata.Index + 1, true
-	}
-	return 0, false
-}
-
-// maybeLastIndex returns the last index if it has at least one
-// unstable entry or snapshot.
-func (u *unstable) maybeLastIndex() (uint64, bool) {
-	if l := len(u.entries); l != 0 {
-		return u.offset + uint64(l) - 1, true
-	}
-	if u.snapshot != nil {
-		return u.snapshot.Metadata.Index, true
-	}
-	return 0, false
-}
-
-// maybeTerm returns the term of the entry at index i, if there
-// is any.
-func (u *unstable) maybeTerm(i uint64) (uint64, bool) {
-	if i < u.offset {
-		if u.snapshot == nil {
-			return 0, false
-		}
-		if u.snapshot.Metadata.Index == i {
-			return u.snapshot.Metadata.Term, true
-		}
-		return 0, false
-	}
-
-	last, ok := u.maybeLastIndex()
-	if !ok {
-		return 0, false
-	}
-	if i > last {
-		return 0, false
-	}
-	return u.entries[i-u.offset].Term, true
-}
-
-func (u *unstable) stableTo(i, t uint64) {
-	gt, ok := u.maybeTerm(i)
-	if !ok {
-		return
-	}
-	// if i < offset, term is matched with the snapshot
-	// only update the unstable entries if term is matched with
-	// an unstable entry.
-	if gt == t && i >= u.offset {
-		u.entries = u.entries[i+1-u.offset:]
-		u.offset = i + 1
-		u.shrinkEntriesArray()
-	}
-}
-
-// shrinkEntriesArray discards the underlying array used by the entries slice
-// if most of it isn't being used. This avoids holding references to a bunch of
-// potentially large entries that aren't needed anymore. Simply clearing the
-// entries wouldn't be safe because clients might still be using them.
-func (u *unstable) shrinkEntriesArray() {
-	// We replace the array if we're using less than half of the space in
-	// it. This number is fairly arbitrary, chosen as an attempt to balance
-	// memory usage vs number of allocations. It could probably be improved
-	// with some focused tuning.
-	const lenMultiple = 2
-	if len(u.entries) == 0 {
-		u.entries = nil
-	} else if len(u.entries)*lenMultiple < cap(u.entries) {
-		newEntries := make([]pb.Entry, len(u.entries))
-		copy(newEntries, u.entries)
-		u.entries = newEntries
-	}
-}
-
-func (u *unstable) stableSnapTo(i uint64) {
-	if u.snapshot != nil && u.snapshot.Metadata.Index == i {
-		u.snapshot = nil
-	}
-}
-
-func (u *unstable) restore(s pb.Snapshot) {
-	u.offset = s.Metadata.Index + 1
-	u.entries = nil
-	u.snapshot = &s
-}
-
-func (u *unstable) truncateAndAppend(ents []pb.Entry) {
-	after := ents[0].Index
-	switch {
-	case after == u.offset+uint64(len(u.entries)):
-		// after is the next index in the u.entries
-		// directly append
-		u.entries = append(u.entries, ents...)
-	case after <= u.offset:
-		u.logger.Infof("replace the unstable entries from index %d", after)
-		// The log is being truncated to before our current offset
-		// portion, so set the offset and replace the entries
-		u.offset = after
-		u.entries = ents
-	default:
-		// truncate to after and copy to u.entries
-		// then append
-		u.logger.Infof("truncate the unstable entries before index %d", after)
-		u.entries = append([]pb.Entry{}, u.slice(u.offset, after)...)
-		u.entries = append(u.entries, ents...)
-	}
-}
-
-func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry {
-	u.mustCheckOutOfBounds(lo, hi)
-	return u.entries[lo-u.offset : hi-u.offset]
-}
-
-// u.offset <= lo <= hi <= u.offset+len(u.offset)
-func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) {
-	if lo > hi {
-		u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi)
-	}
-	upper := u.offset + uint64(len(u.entries))
-	if lo < u.offset || hi > upper {
-		u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper)
-	}
-}
diff --git a/vendor/github.com/coreos/etcd/raft/logger.go b/vendor/github.com/coreos/etcd/raft/logger.go
deleted file mode 100644
index 426a77d..0000000
--- a/vendor/github.com/coreos/etcd/raft/logger.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
-	"fmt"
-	"io/ioutil"
-	"log"
-	"os"
-)
-
-type Logger interface {
-	Debug(v ...interface{})
-	Debugf(format string, v ...interface{})
-
-	Error(v ...interface{})
-	Errorf(format string, v ...interface{})
-
-	Info(v ...interface{})
-	Infof(format string, v ...interface{})
-
-	Warning(v ...interface{})
-	Warningf(format string, v ...interface{})
-
-	Fatal(v ...interface{})
-	Fatalf(format string, v ...interface{})
-
-	Panic(v ...interface{})
-	Panicf(format string, v ...interface{})
-}
-
-func SetLogger(l Logger) { raftLogger = l }
-
-var (
-	defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)}
-	discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)}
-	raftLogger    = Logger(defaultLogger)
-)
-
-const (
-	calldepth = 2
-)
-
-// DefaultLogger is a default implementation of the Logger interface.
-type DefaultLogger struct {
-	*log.Logger
-	debug bool
-}
-
-func (l *DefaultLogger) EnableTimestamps() {
-	l.SetFlags(l.Flags() | log.Ldate | log.Ltime)
-}
-
-func (l *DefaultLogger) EnableDebug() {
-	l.debug = true
-}
-
-func (l *DefaultLogger) Debug(v ...interface{}) {
-	if l.debug {
-		l.Output(calldepth, header("DEBUG", fmt.Sprint(v...)))
-	}
-}
-
-func (l *DefaultLogger) Debugf(format string, v ...interface{}) {
-	if l.debug {
-		l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...)))
-	}
-}
-
-func (l *DefaultLogger) Info(v ...interface{}) {
-	l.Output(calldepth, header("INFO", fmt.Sprint(v...)))
-}
-
-func (l *DefaultLogger) Infof(format string, v ...interface{}) {
-	l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...)))
-}
-
-func (l *DefaultLogger) Error(v ...interface{}) {
-	l.Output(calldepth, header("ERROR", fmt.Sprint(v...)))
-}
-
-func (l *DefaultLogger) Errorf(format string, v ...interface{}) {
-	l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...)))
-}
-
-func (l *DefaultLogger) Warning(v ...interface{}) {
-	l.Output(calldepth, header("WARN", fmt.Sprint(v...)))
-}
-
-func (l *DefaultLogger) Warningf(format string, v ...interface{}) {
-	l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...)))
-}
-
-func (l *DefaultLogger) Fatal(v ...interface{}) {
-	l.Output(calldepth, header("FATAL", fmt.Sprint(v...)))
-	os.Exit(1)
-}
-
-func (l *DefaultLogger) Fatalf(format string, v ...interface{}) {
-	l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...)))
-	os.Exit(1)
-}
-
-func (l *DefaultLogger) Panic(v ...interface{}) {
-	l.Logger.Panic(v...)
-}
-
-func (l *DefaultLogger) Panicf(format string, v ...interface{}) {
-	l.Logger.Panicf(format, v...)
-}
-
-func header(lvl, msg string) string {
-	return fmt.Sprintf("%s: %s", lvl, msg)
-}
diff --git a/vendor/github.com/coreos/etcd/raft/node.go b/vendor/github.com/coreos/etcd/raft/node.go
deleted file mode 100644
index 33a9db8..0000000
--- a/vendor/github.com/coreos/etcd/raft/node.go
+++ /dev/null
@@ -1,539 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
-	"context"
-	"errors"
-
-	pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-type SnapshotStatus int
-
-const (
-	SnapshotFinish  SnapshotStatus = 1
-	SnapshotFailure SnapshotStatus = 2
-)
-
-var (
-	emptyState = pb.HardState{}
-
-	// ErrStopped is returned by methods on Nodes that have been stopped.
-	ErrStopped = errors.New("raft: stopped")
-)
-
-// SoftState provides state that is useful for logging and debugging.
-// The state is volatile and does not need to be persisted to the WAL.
-type SoftState struct {
-	Lead      uint64 // must use atomic operations to access; keep 64-bit aligned.
-	RaftState StateType
-}
-
-func (a *SoftState) equal(b *SoftState) bool {
-	return a.Lead == b.Lead && a.RaftState == b.RaftState
-}
-
-// Ready encapsulates the entries and messages that are ready to read,
-// be saved to stable storage, committed or sent to other peers.
-// All fields in Ready are read-only.
-type Ready struct {
-	// The current volatile state of a Node.
-	// SoftState will be nil if there is no update.
-	// It is not required to consume or store SoftState.
-	*SoftState
-
-	// The current state of a Node to be saved to stable storage BEFORE
-	// Messages are sent.
-	// HardState will be equal to empty state if there is no update.
-	pb.HardState
-
-	// ReadStates can be used for node to serve linearizable read requests locally
-	// when its applied index is greater than the index in ReadState.
-	// Note that the readState will be returned when raft receives msgReadIndex.
-	// The returned is only valid for the request that requested to read.
-	ReadStates []ReadState
-
-	// Entries specifies entries to be saved to stable storage BEFORE
-	// Messages are sent.
-	Entries []pb.Entry
-
-	// Snapshot specifies the snapshot to be saved to stable storage.
-	Snapshot pb.Snapshot
-
-	// CommittedEntries specifies entries to be committed to a
-	// store/state-machine. These have previously been committed to stable
-	// store.
-	CommittedEntries []pb.Entry
-
-	// Messages specifies outbound messages to be sent AFTER Entries are
-	// committed to stable storage.
-	// If it contains a MsgSnap message, the application MUST report back to raft
-	// when the snapshot has been received or has failed by calling ReportSnapshot.
-	Messages []pb.Message
-
-	// MustSync indicates whether the HardState and Entries must be synchronously
-	// written to disk or if an asynchronous write is permissible.
-	MustSync bool
-}
-
-func isHardStateEqual(a, b pb.HardState) bool {
-	return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit
-}
-
-// IsEmptyHardState returns true if the given HardState is empty.
-func IsEmptyHardState(st pb.HardState) bool {
-	return isHardStateEqual(st, emptyState)
-}
-
-// IsEmptySnap returns true if the given Snapshot is empty.
-func IsEmptySnap(sp pb.Snapshot) bool {
-	return sp.Metadata.Index == 0
-}
-
-func (rd Ready) containsUpdates() bool {
-	return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) ||
-		!IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 ||
-		len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0
-}
-
-// Node represents a node in a raft cluster.
-type Node interface {
-	// Tick increments the internal logical clock for the Node by a single tick. Election
-	// timeouts and heartbeat timeouts are in units of ticks.
-	Tick()
-	// Campaign causes the Node to transition to candidate state and start campaigning to become leader.
-	Campaign(ctx context.Context) error
-	// Propose proposes that data be appended to the log.
-	Propose(ctx context.Context, data []byte) error
-	// ProposeConfChange proposes config change.
-	// At most one ConfChange can be in the process of going through consensus.
-	// Application needs to call ApplyConfChange when applying EntryConfChange type entry.
-	ProposeConfChange(ctx context.Context, cc pb.ConfChange) error
-	// Step advances the state machine using the given message. ctx.Err() will be returned, if any.
-	Step(ctx context.Context, msg pb.Message) error
-
-	// Ready returns a channel that returns the current point-in-time state.
-	// Users of the Node must call Advance after retrieving the state returned by Ready.
-	//
-	// NOTE: No committed entries from the next Ready may be applied until all committed entries
-	// and snapshots from the previous one have finished.
-	Ready() <-chan Ready
-
-	// Advance notifies the Node that the application has saved progress up to the last Ready.
-	// It prepares the node to return the next available Ready.
-	//
-	// The application should generally call Advance after it applies the entries in last Ready.
-	//
-	// However, as an optimization, the application may call Advance while it is applying the
-	// commands. For example. when the last Ready contains a snapshot, the application might take
-	// a long time to apply the snapshot data. To continue receiving Ready without blocking raft
-	// progress, it can call Advance before finishing applying the last ready.
-	Advance()
-	// ApplyConfChange applies config change to the local node.
-	// Returns an opaque ConfState protobuf which must be recorded
-	// in snapshots. Will never return nil; it returns a pointer only
-	// to match MemoryStorage.Compact.
-	ApplyConfChange(cc pb.ConfChange) *pb.ConfState
-
-	// TransferLeadership attempts to transfer leadership to the given transferee.
-	TransferLeadership(ctx context.Context, lead, transferee uint64)
-
-	// ReadIndex request a read state. The read state will be set in the ready.
-	// Read state has a read index. Once the application advances further than the read
-	// index, any linearizable read requests issued before the read request can be
-	// processed safely. The read state will have the same rctx attached.
-	ReadIndex(ctx context.Context, rctx []byte) error
-
-	// Status returns the current status of the raft state machine.
-	Status() Status
-	// ReportUnreachable reports the given node is not reachable for the last send.
-	ReportUnreachable(id uint64)
-	// ReportSnapshot reports the status of the sent snapshot.
-	ReportSnapshot(id uint64, status SnapshotStatus)
-	// Stop performs any necessary termination of the Node.
-	Stop()
-}
-
-type Peer struct {
-	ID      uint64
-	Context []byte
-}
-
-// StartNode returns a new Node given configuration and a list of raft peers.
-// It appends a ConfChangeAddNode entry for each given peer to the initial log.
-func StartNode(c *Config, peers []Peer) Node {
-	r := newRaft(c)
-	// become the follower at term 1 and apply initial configuration
-	// entries of term 1
-	r.becomeFollower(1, None)
-	for _, peer := range peers {
-		cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
-		d, err := cc.Marshal()
-		if err != nil {
-			panic("unexpected marshal error")
-		}
-		e := pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: r.raftLog.lastIndex() + 1, Data: d}
-		r.raftLog.append(e)
-	}
-	// Mark these initial entries as committed.
-	// TODO(bdarnell): These entries are still unstable; do we need to preserve
-	// the invariant that committed < unstable?
-	r.raftLog.committed = r.raftLog.lastIndex()
-	// Now apply them, mainly so that the application can call Campaign
-	// immediately after StartNode in tests. Note that these nodes will
-	// be added to raft twice: here and when the application's Ready
-	// loop calls ApplyConfChange. The calls to addNode must come after
-	// all calls to raftLog.append so progress.next is set after these
-	// bootstrapping entries (it is an error if we try to append these
-	// entries since they have already been committed).
-	// We do not set raftLog.applied so the application will be able
-	// to observe all conf changes via Ready.CommittedEntries.
-	for _, peer := range peers {
-		r.addNode(peer.ID)
-	}
-
-	n := newNode()
-	n.logger = c.Logger
-	go n.run(r)
-	return &n
-}
-
-// RestartNode is similar to StartNode but does not take a list of peers.
-// The current membership of the cluster will be restored from the Storage.
-// If the caller has an existing state machine, pass in the last log index that
-// has been applied to it; otherwise use zero.
-func RestartNode(c *Config) Node {
-	r := newRaft(c)
-
-	n := newNode()
-	n.logger = c.Logger
-	go n.run(r)
-	return &n
-}
-
-// node is the canonical implementation of the Node interface
-type node struct {
-	propc      chan pb.Message
-	recvc      chan pb.Message
-	confc      chan pb.ConfChange
-	confstatec chan pb.ConfState
-	readyc     chan Ready
-	advancec   chan struct{}
-	tickc      chan struct{}
-	done       chan struct{}
-	stop       chan struct{}
-	status     chan chan Status
-
-	logger Logger
-}
-
-func newNode() node {
-	return node{
-		propc:      make(chan pb.Message),
-		recvc:      make(chan pb.Message),
-		confc:      make(chan pb.ConfChange),
-		confstatec: make(chan pb.ConfState),
-		readyc:     make(chan Ready),
-		advancec:   make(chan struct{}),
-		// make tickc a buffered chan, so raft node can buffer some ticks when the node
-		// is busy processing raft messages. Raft node will resume process buffered
-		// ticks when it becomes idle.
-		tickc:  make(chan struct{}, 128),
-		done:   make(chan struct{}),
-		stop:   make(chan struct{}),
-		status: make(chan chan Status),
-	}
-}
-
-func (n *node) Stop() {
-	select {
-	case n.stop <- struct{}{}:
-		// Not already stopped, so trigger it
-	case <-n.done:
-		// Node has already been stopped - no need to do anything
-		return
-	}
-	// Block until the stop has been acknowledged by run()
-	<-n.done
-}
-
-func (n *node) run(r *raft) {
-	var propc chan pb.Message
-	var readyc chan Ready
-	var advancec chan struct{}
-	var prevLastUnstablei, prevLastUnstablet uint64
-	var havePrevLastUnstablei bool
-	var prevSnapi uint64
-	var rd Ready
-
-	lead := None
-	prevSoftSt := r.softState()
-	prevHardSt := emptyState
-
-	for {
-		if advancec != nil {
-			readyc = nil
-		} else {
-			rd = newReady(r, prevSoftSt, prevHardSt)
-			if rd.containsUpdates() {
-				readyc = n.readyc
-			} else {
-				readyc = nil
-			}
-		}
-
-		if lead != r.lead {
-			if r.hasLeader() {
-				if lead == None {
-					r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term)
-				} else {
-					r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term)
-				}
-				propc = n.propc
-			} else {
-				r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term)
-				propc = nil
-			}
-			lead = r.lead
-		}
-
-		select {
-		// TODO: maybe buffer the config propose if there exists one (the way
-		// described in raft dissertation)
-		// Currently it is dropped in Step silently.
-		case m := <-propc:
-			m.From = r.id
-			r.Step(m)
-		case m := <-n.recvc:
-			// filter out response message from unknown From.
-			if pr := r.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) {
-				r.Step(m) // raft never returns an error
-			}
-		case cc := <-n.confc:
-			if cc.NodeID == None {
-				r.resetPendingConf()
-				select {
-				case n.confstatec <- pb.ConfState{Nodes: r.nodes()}:
-				case <-n.done:
-				}
-				break
-			}
-			switch cc.Type {
-			case pb.ConfChangeAddNode:
-				r.addNode(cc.NodeID)
-			case pb.ConfChangeAddLearnerNode:
-				r.addLearner(cc.NodeID)
-			case pb.ConfChangeRemoveNode:
-				// block incoming proposal when local node is
-				// removed
-				if cc.NodeID == r.id {
-					propc = nil
-				}
-				r.removeNode(cc.NodeID)
-			case pb.ConfChangeUpdateNode:
-				r.resetPendingConf()
-			default:
-				panic("unexpected conf type")
-			}
-			select {
-			case n.confstatec <- pb.ConfState{Nodes: r.nodes()}:
-			case <-n.done:
-			}
-		case <-n.tickc:
-			r.tick()
-		case readyc <- rd:
-			if rd.SoftState != nil {
-				prevSoftSt = rd.SoftState
-			}
-			if len(rd.Entries) > 0 {
-				prevLastUnstablei = rd.Entries[len(rd.Entries)-1].Index
-				prevLastUnstablet = rd.Entries[len(rd.Entries)-1].Term
-				havePrevLastUnstablei = true
-			}
-			if !IsEmptyHardState(rd.HardState) {
-				prevHardSt = rd.HardState
-			}
-			if !IsEmptySnap(rd.Snapshot) {
-				prevSnapi = rd.Snapshot.Metadata.Index
-			}
-
-			r.msgs = nil
-			r.readStates = nil
-			advancec = n.advancec
-		case <-advancec:
-			if prevHardSt.Commit != 0 {
-				r.raftLog.appliedTo(prevHardSt.Commit)
-			}
-			if havePrevLastUnstablei {
-				r.raftLog.stableTo(prevLastUnstablei, prevLastUnstablet)
-				havePrevLastUnstablei = false
-			}
-			r.raftLog.stableSnapTo(prevSnapi)
-			advancec = nil
-		case c := <-n.status:
-			c <- getStatus(r)
-		case <-n.stop:
-			close(n.done)
-			return
-		}
-	}
-}
-
-// Tick increments the internal logical clock for this Node. Election timeouts
-// and heartbeat timeouts are in units of ticks.
-func (n *node) Tick() {
-	select {
-	case n.tickc <- struct{}{}:
-	case <-n.done:
-	default:
-		n.logger.Warningf("A tick missed to fire. Node blocks too long!")
-	}
-}
-
-func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) }
-
-func (n *node) Propose(ctx context.Context, data []byte) error {
-	return n.step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
-}
-
-func (n *node) Step(ctx context.Context, m pb.Message) error {
-	// ignore unexpected local messages receiving over network
-	if IsLocalMsg(m.Type) {
-		// TODO: return an error?
-		return nil
-	}
-	return n.step(ctx, m)
-}
-
-func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChange) error {
-	data, err := cc.Marshal()
-	if err != nil {
-		return err
-	}
-	return n.Step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange, Data: data}}})
-}
-
-// Step advances the state machine using msgs. The ctx.Err() will be returned,
-// if any.
-func (n *node) step(ctx context.Context, m pb.Message) error {
-	ch := n.recvc
-	if m.Type == pb.MsgProp {
-		ch = n.propc
-	}
-
-	select {
-	case ch <- m:
-		return nil
-	case <-ctx.Done():
-		return ctx.Err()
-	case <-n.done:
-		return ErrStopped
-	}
-}
-
-func (n *node) Ready() <-chan Ready { return n.readyc }
-
-func (n *node) Advance() {
-	select {
-	case n.advancec <- struct{}{}:
-	case <-n.done:
-	}
-}
-
-func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
-	var cs pb.ConfState
-	select {
-	case n.confc <- cc:
-	case <-n.done:
-	}
-	select {
-	case cs = <-n.confstatec:
-	case <-n.done:
-	}
-	return &cs
-}
-
-func (n *node) Status() Status {
-	c := make(chan Status)
-	select {
-	case n.status <- c:
-		return <-c
-	case <-n.done:
-		return Status{}
-	}
-}
-
-func (n *node) ReportUnreachable(id uint64) {
-	select {
-	case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}:
-	case <-n.done:
-	}
-}
-
-func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) {
-	rej := status == SnapshotFailure
-
-	select {
-	case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}:
-	case <-n.done:
-	}
-}
-
-func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) {
-	select {
-	// manually set 'from' and 'to', so that leader can voluntarily transfers its leadership
-	case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}:
-	case <-n.done:
-	case <-ctx.Done():
-	}
-}
-
-func (n *node) ReadIndex(ctx context.Context, rctx []byte) error {
-	return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
-}
-
-func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready {
-	rd := Ready{
-		Entries:          r.raftLog.unstableEntries(),
-		CommittedEntries: r.raftLog.nextEnts(),
-		Messages:         r.msgs,
-	}
-	if softSt := r.softState(); !softSt.equal(prevSoftSt) {
-		rd.SoftState = softSt
-	}
-	if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) {
-		rd.HardState = hardSt
-	}
-	if r.raftLog.unstable.snapshot != nil {
-		rd.Snapshot = *r.raftLog.unstable.snapshot
-	}
-	if len(r.readStates) != 0 {
-		rd.ReadStates = r.readStates
-	}
-	rd.MustSync = MustSync(rd.HardState, prevHardSt, len(rd.Entries))
-	return rd
-}
-
-// MustSync returns true if the hard state and count of Raft entries indicate
-// that a synchronous write to persistent storage is required.
-func MustSync(st, prevst pb.HardState, entsnum int) bool {
-	// Persistent state on all servers:
-	// (Updated on stable storage before responding to RPCs)
-	// currentTerm
-	// votedFor
-	// log entries[]
-	return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term
-}
diff --git a/vendor/github.com/coreos/etcd/raft/progress.go b/vendor/github.com/coreos/etcd/raft/progress.go
deleted file mode 100644
index ef3787d..0000000
--- a/vendor/github.com/coreos/etcd/raft/progress.go
+++ /dev/null
@@ -1,284 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import "fmt"
-
-const (
-	ProgressStateProbe ProgressStateType = iota
-	ProgressStateReplicate
-	ProgressStateSnapshot
-)
-
-type ProgressStateType uint64
-
-var prstmap = [...]string{
-	"ProgressStateProbe",
-	"ProgressStateReplicate",
-	"ProgressStateSnapshot",
-}
-
-func (st ProgressStateType) String() string { return prstmap[uint64(st)] }
-
-// Progress represents a follower’s progress in the view of the leader. Leader maintains
-// progresses of all followers, and sends entries to the follower based on its progress.
-type Progress struct {
-	Match, Next uint64
-	// State defines how the leader should interact with the follower.
-	//
-	// When in ProgressStateProbe, leader sends at most one replication message
-	// per heartbeat interval. It also probes actual progress of the follower.
-	//
-	// When in ProgressStateReplicate, leader optimistically increases next
-	// to the latest entry sent after sending replication message. This is
-	// an optimized state for fast replicating log entries to the follower.
-	//
-	// When in ProgressStateSnapshot, leader should have sent out snapshot
-	// before and stops sending any replication message.
-	State ProgressStateType
-
-	// Paused is used in ProgressStateProbe.
-	// When Paused is true, raft should pause sending replication message to this peer.
-	Paused bool
-	// PendingSnapshot is used in ProgressStateSnapshot.
-	// If there is a pending snapshot, the pendingSnapshot will be set to the
-	// index of the snapshot. If pendingSnapshot is set, the replication process of
-	// this Progress will be paused. raft will not resend snapshot until the pending one
-	// is reported to be failed.
-	PendingSnapshot uint64
-
-	// RecentActive is true if the progress is recently active. Receiving any messages
-	// from the corresponding follower indicates the progress is active.
-	// RecentActive can be reset to false after an election timeout.
-	RecentActive bool
-
-	// inflights is a sliding window for the inflight messages.
-	// Each inflight message contains one or more log entries.
-	// The max number of entries per message is defined in raft config as MaxSizePerMsg.
-	// Thus inflight effectively limits both the number of inflight messages
-	// and the bandwidth each Progress can use.
-	// When inflights is full, no more message should be sent.
-	// When a leader sends out a message, the index of the last
-	// entry should be added to inflights. The index MUST be added
-	// into inflights in order.
-	// When a leader receives a reply, the previous inflights should
-	// be freed by calling inflights.freeTo with the index of the last
-	// received entry.
-	ins *inflights
-
-	// IsLearner is true if this progress is tracked for a learner.
-	IsLearner bool
-}
-
-func (pr *Progress) resetState(state ProgressStateType) {
-	pr.Paused = false
-	pr.PendingSnapshot = 0
-	pr.State = state
-	pr.ins.reset()
-}
-
-func (pr *Progress) becomeProbe() {
-	// If the original state is ProgressStateSnapshot, progress knows that
-	// the pending snapshot has been sent to this peer successfully, then
-	// probes from pendingSnapshot + 1.
-	if pr.State == ProgressStateSnapshot {
-		pendingSnapshot := pr.PendingSnapshot
-		pr.resetState(ProgressStateProbe)
-		pr.Next = max(pr.Match+1, pendingSnapshot+1)
-	} else {
-		pr.resetState(ProgressStateProbe)
-		pr.Next = pr.Match + 1
-	}
-}
-
-func (pr *Progress) becomeReplicate() {
-	pr.resetState(ProgressStateReplicate)
-	pr.Next = pr.Match + 1
-}
-
-func (pr *Progress) becomeSnapshot(snapshoti uint64) {
-	pr.resetState(ProgressStateSnapshot)
-	pr.PendingSnapshot = snapshoti
-}
-
-// maybeUpdate returns false if the given n index comes from an outdated message.
-// Otherwise it updates the progress and returns true.
-func (pr *Progress) maybeUpdate(n uint64) bool {
-	var updated bool
-	if pr.Match < n {
-		pr.Match = n
-		updated = true
-		pr.resume()
-	}
-	if pr.Next < n+1 {
-		pr.Next = n + 1
-	}
-	return updated
-}
-
-func (pr *Progress) optimisticUpdate(n uint64) { pr.Next = n + 1 }
-
-// maybeDecrTo returns false if the given to index comes from an out of order message.
-// Otherwise it decreases the progress next index to min(rejected, last) and returns true.
-func (pr *Progress) maybeDecrTo(rejected, last uint64) bool {
-	if pr.State == ProgressStateReplicate {
-		// the rejection must be stale if the progress has matched and "rejected"
-		// is smaller than "match".
-		if rejected <= pr.Match {
-			return false
-		}
-		// directly decrease next to match + 1
-		pr.Next = pr.Match + 1
-		return true
-	}
-
-	// the rejection must be stale if "rejected" does not match next - 1
-	if pr.Next-1 != rejected {
-		return false
-	}
-
-	if pr.Next = min(rejected, last+1); pr.Next < 1 {
-		pr.Next = 1
-	}
-	pr.resume()
-	return true
-}
-
-func (pr *Progress) pause()  { pr.Paused = true }
-func (pr *Progress) resume() { pr.Paused = false }
-
-// IsPaused returns whether sending log entries to this node has been
-// paused. A node may be paused because it has rejected recent
-// MsgApps, is currently waiting for a snapshot, or has reached the
-// MaxInflightMsgs limit.
-func (pr *Progress) IsPaused() bool {
-	switch pr.State {
-	case ProgressStateProbe:
-		return pr.Paused
-	case ProgressStateReplicate:
-		return pr.ins.full()
-	case ProgressStateSnapshot:
-		return true
-	default:
-		panic("unexpected state")
-	}
-}
-
-func (pr *Progress) snapshotFailure() { pr.PendingSnapshot = 0 }
-
-// needSnapshotAbort returns true if snapshot progress's Match
-// is equal or higher than the pendingSnapshot.
-func (pr *Progress) needSnapshotAbort() bool {
-	return pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot
-}
-
-func (pr *Progress) String() string {
-	return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.IsPaused(), pr.PendingSnapshot)
-}
-
-type inflights struct {
-	// the starting index in the buffer
-	start int
-	// number of inflights in the buffer
-	count int
-
-	// the size of the buffer
-	size int
-
-	// buffer contains the index of the last entry
-	// inside one message.
-	buffer []uint64
-}
-
-func newInflights(size int) *inflights {
-	return &inflights{
-		size: size,
-	}
-}
-
-// add adds an inflight into inflights
-func (in *inflights) add(inflight uint64) {
-	if in.full() {
-		panic("cannot add into a full inflights")
-	}
-	next := in.start + in.count
-	size := in.size
-	if next >= size {
-		next -= size
-	}
-	if next >= len(in.buffer) {
-		in.growBuf()
-	}
-	in.buffer[next] = inflight
-	in.count++
-}
-
-// grow the inflight buffer by doubling up to inflights.size. We grow on demand
-// instead of preallocating to inflights.size to handle systems which have
-// thousands of Raft groups per process.
-func (in *inflights) growBuf() {
-	newSize := len(in.buffer) * 2
-	if newSize == 0 {
-		newSize = 1
-	} else if newSize > in.size {
-		newSize = in.size
-	}
-	newBuffer := make([]uint64, newSize)
-	copy(newBuffer, in.buffer)
-	in.buffer = newBuffer
-}
-
-// freeTo frees the inflights smaller or equal to the given `to` flight.
-func (in *inflights) freeTo(to uint64) {
-	if in.count == 0 || to < in.buffer[in.start] {
-		// out of the left side of the window
-		return
-	}
-
-	idx := in.start
-	var i int
-	for i = 0; i < in.count; i++ {
-		if to < in.buffer[idx] { // found the first large inflight
-			break
-		}
-
-		// increase index and maybe rotate
-		size := in.size
-		if idx++; idx >= size {
-			idx -= size
-		}
-	}
-	// free i inflights and set new start index
-	in.count -= i
-	in.start = idx
-	if in.count == 0 {
-		// inflights is empty, reset the start index so that we don't grow the
-		// buffer unnecessarily.
-		in.start = 0
-	}
-}
-
-func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) }
-
-// full returns true if the inflights is full.
-func (in *inflights) full() bool {
-	return in.count == in.size
-}
-
-// resets frees all inflights.
-func (in *inflights) reset() {
-	in.count = 0
-	in.start = 0
-}
diff --git a/vendor/github.com/coreos/etcd/raft/raft.go b/vendor/github.com/coreos/etcd/raft/raft.go
deleted file mode 100644
index 22ff138..0000000
--- a/vendor/github.com/coreos/etcd/raft/raft.go
+++ /dev/null
@@ -1,1407 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
-	"bytes"
-	"errors"
-	"fmt"
-	"math"
-	"math/rand"
-	"sort"
-	"strings"
-	"sync"
-	"time"
-
-	pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-// None is a placeholder node ID used when there is no leader.
-const None uint64 = 0
-const noLimit = math.MaxUint64
-
-// Possible values for StateType.
-const (
-	StateFollower StateType = iota
-	StateCandidate
-	StateLeader
-	StatePreCandidate
-	numStates
-)
-
-type ReadOnlyOption int
-
-const (
-	// ReadOnlySafe guarantees the linearizability of the read only request by
-	// communicating with the quorum. It is the default and suggested option.
-	ReadOnlySafe ReadOnlyOption = iota
-	// ReadOnlyLeaseBased ensures linearizability of the read only request by
-	// relying on the leader lease. It can be affected by clock drift.
-	// If the clock drift is unbounded, leader might keep the lease longer than it
-	// should (clock can move backward/pause without any bound). ReadIndex is not safe
-	// in that case.
-	ReadOnlyLeaseBased
-)
-
-// Possible values for CampaignType
-const (
-	// campaignPreElection represents the first phase of a normal election when
-	// Config.PreVote is true.
-	campaignPreElection CampaignType = "CampaignPreElection"
-	// campaignElection represents a normal (time-based) election (the second phase
-	// of the election when Config.PreVote is true).
-	campaignElection CampaignType = "CampaignElection"
-	// campaignTransfer represents the type of leader transfer
-	campaignTransfer CampaignType = "CampaignTransfer"
-)
-
-// lockedRand is a small wrapper around rand.Rand to provide
-// synchronization. Only the methods needed by the code are exposed
-// (e.g. Intn).
-type lockedRand struct {
-	mu   sync.Mutex
-	rand *rand.Rand
-}
-
-func (r *lockedRand) Intn(n int) int {
-	r.mu.Lock()
-	v := r.rand.Intn(n)
-	r.mu.Unlock()
-	return v
-}
-
-var globalRand = &lockedRand{
-	rand: rand.New(rand.NewSource(time.Now().UnixNano())),
-}
-
-// CampaignType represents the type of campaigning
-// the reason we use the type of string instead of uint64
-// is because it's simpler to compare and fill in raft entries
-type CampaignType string
-
-// StateType represents the role of a node in a cluster.
-type StateType uint64
-
-var stmap = [...]string{
-	"StateFollower",
-	"StateCandidate",
-	"StateLeader",
-	"StatePreCandidate",
-}
-
-func (st StateType) String() string {
-	return stmap[uint64(st)]
-}
-
-// Config contains the parameters to start a raft.
-type Config struct {
-	// ID is the identity of the local raft. ID cannot be 0.
-	ID uint64
-
-	// peers contains the IDs of all nodes (including self) in the raft cluster. It
-	// should only be set when starting a new raft cluster. Restarting raft from
-	// previous configuration will panic if peers is set. peer is private and only
-	// used for testing right now.
-	peers []uint64
-
-	// learners contains the IDs of all leaner nodes (including self if the local node is a leaner) in the raft cluster.
-	// learners only receives entries from the leader node. It does not vote or promote itself.
-	learners []uint64
-
-	// ElectionTick is the number of Node.Tick invocations that must pass between
-	// elections. That is, if a follower does not receive any message from the
-	// leader of current term before ElectionTick has elapsed, it will become
-	// candidate and start an election. ElectionTick must be greater than
-	// HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid
-	// unnecessary leader switching.
-	ElectionTick int
-	// HeartbeatTick is the number of Node.Tick invocations that must pass between
-	// heartbeats. That is, a leader sends heartbeat messages to maintain its
-	// leadership every HeartbeatTick ticks.
-	HeartbeatTick int
-
-	// Storage is the storage for raft. raft generates entries and states to be
-	// stored in storage. raft reads the persisted entries and states out of
-	// Storage when it needs. raft reads out the previous state and configuration
-	// out of storage when restarting.
-	Storage Storage
-	// Applied is the last applied index. It should only be set when restarting
-	// raft. raft will not return entries to the application smaller or equal to
-	// Applied. If Applied is unset when restarting, raft might return previous
-	// applied entries. This is a very application dependent configuration.
-	Applied uint64
-
-	// MaxSizePerMsg limits the max size of each append message. Smaller value
-	// lowers the raft recovery cost(initial probing and message lost during normal
-	// operation). On the other side, it might affect the throughput during normal
-	// replication. Note: math.MaxUint64 for unlimited, 0 for at most one entry per
-	// message.
-	MaxSizePerMsg uint64
-	// MaxInflightMsgs limits the max number of in-flight append messages during
-	// optimistic replication phase. The application transportation layer usually
-	// has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid
-	// overflowing that sending buffer. TODO (xiangli): feedback to application to
-	// limit the proposal rate?
-	MaxInflightMsgs int
-
-	// CheckQuorum specifies if the leader should check quorum activity. Leader
-	// steps down when quorum is not active for an electionTimeout.
-	CheckQuorum bool
-
-	// PreVote enables the Pre-Vote algorithm described in raft thesis section
-	// 9.6. This prevents disruption when a node that has been partitioned away
-	// rejoins the cluster.
-	PreVote bool
-
-	// ReadOnlyOption specifies how the read only request is processed.
-	//
-	// ReadOnlySafe guarantees the linearizability of the read only request by
-	// communicating with the quorum. It is the default and suggested option.
-	//
-	// ReadOnlyLeaseBased ensures linearizability of the read only request by
-	// relying on the leader lease. It can be affected by clock drift.
-	// If the clock drift is unbounded, leader might keep the lease longer than it
-	// should (clock can move backward/pause without any bound). ReadIndex is not safe
-	// in that case.
-	// CheckQuorum MUST be enabled if ReadOnlyOption is ReadOnlyLeaseBased.
-	ReadOnlyOption ReadOnlyOption
-
-	// Logger is the logger used for raft log. For multinode which can host
-	// multiple raft group, each raft group can have its own logger
-	Logger Logger
-
-	// DisableProposalForwarding set to true means that followers will drop
-	// proposals, rather than forwarding them to the leader. One use case for
-	// this feature would be in a situation where the Raft leader is used to
-	// compute the data of a proposal, for example, adding a timestamp from a
-	// hybrid logical clock to data in a monotonically increasing way. Forwarding
-	// should be disabled to prevent a follower with an innaccurate hybrid
-	// logical clock from assigning the timestamp and then forwarding the data
-	// to the leader.
-	DisableProposalForwarding bool
-}
-
-func (c *Config) validate() error {
-	if c.ID == None {
-		return errors.New("cannot use none as id")
-	}
-
-	if c.HeartbeatTick <= 0 {
-		return errors.New("heartbeat tick must be greater than 0")
-	}
-
-	if c.ElectionTick <= c.HeartbeatTick {
-		return errors.New("election tick must be greater than heartbeat tick")
-	}
-
-	if c.Storage == nil {
-		return errors.New("storage cannot be nil")
-	}
-
-	if c.MaxInflightMsgs <= 0 {
-		return errors.New("max inflight messages must be greater than 0")
-	}
-
-	if c.Logger == nil {
-		c.Logger = raftLogger
-	}
-
-	if c.ReadOnlyOption == ReadOnlyLeaseBased && !c.CheckQuorum {
-		return errors.New("CheckQuorum must be enabled when ReadOnlyOption is ReadOnlyLeaseBased")
-	}
-
-	return nil
-}
-
-type raft struct {
-	id uint64
-
-	Term uint64
-	Vote uint64
-
-	readStates []ReadState
-
-	// the log
-	raftLog *raftLog
-
-	maxInflight int
-	maxMsgSize  uint64
-	prs         map[uint64]*Progress
-	learnerPrs  map[uint64]*Progress
-
-	state StateType
-
-	// isLearner is true if the local raft node is a learner.
-	isLearner bool
-
-	votes map[uint64]bool
-
-	msgs []pb.Message
-
-	// the leader id
-	lead uint64
-	// leadTransferee is id of the leader transfer target when its value is not zero.
-	// Follow the procedure defined in raft thesis 3.10.
-	leadTransferee uint64
-	// New configuration is ignored if there exists unapplied configuration.
-	pendingConf bool
-
-	readOnly *readOnly
-
-	// number of ticks since it reached last electionTimeout when it is leader
-	// or candidate.
-	// number of ticks since it reached last electionTimeout or received a
-	// valid message from current leader when it is a follower.
-	electionElapsed int
-
-	// number of ticks since it reached last heartbeatTimeout.
-	// only leader keeps heartbeatElapsed.
-	heartbeatElapsed int
-
-	checkQuorum bool
-	preVote     bool
-
-	heartbeatTimeout int
-	electionTimeout  int
-	// randomizedElectionTimeout is a random number between
-	// [electiontimeout, 2 * electiontimeout - 1]. It gets reset
-	// when raft changes its state to follower or candidate.
-	randomizedElectionTimeout int
-	disableProposalForwarding bool
-
-	tick func()
-	step stepFunc
-
-	logger Logger
-}
-
-func newRaft(c *Config) *raft {
-	if err := c.validate(); err != nil {
-		panic(err.Error())
-	}
-	raftlog := newLog(c.Storage, c.Logger)
-	hs, cs, err := c.Storage.InitialState()
-	if err != nil {
-		panic(err) // TODO(bdarnell)
-	}
-	peers := c.peers
-	learners := c.learners
-	if len(cs.Nodes) > 0 || len(cs.Learners) > 0 {
-		if len(peers) > 0 || len(learners) > 0 {
-			// TODO(bdarnell): the peers argument is always nil except in
-			// tests; the argument should be removed and these tests should be
-			// updated to specify their nodes through a snapshot.
-			panic("cannot specify both newRaft(peers, learners) and ConfState.(Nodes, Learners)")
-		}
-		peers = cs.Nodes
-		learners = cs.Learners
-	}
-	r := &raft{
-		id:                        c.ID,
-		lead:                      None,
-		isLearner:                 false,
-		raftLog:                   raftlog,
-		maxMsgSize:                c.MaxSizePerMsg,
-		maxInflight:               c.MaxInflightMsgs,
-		prs:                       make(map[uint64]*Progress),
-		learnerPrs:                make(map[uint64]*Progress),
-		electionTimeout:           c.ElectionTick,
-		heartbeatTimeout:          c.HeartbeatTick,
-		logger:                    c.Logger,
-		checkQuorum:               c.CheckQuorum,
-		preVote:                   c.PreVote,
-		readOnly:                  newReadOnly(c.ReadOnlyOption),
-		disableProposalForwarding: c.DisableProposalForwarding,
-	}
-	for _, p := range peers {
-		r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)}
-	}
-	for _, p := range learners {
-		if _, ok := r.prs[p]; ok {
-			panic(fmt.Sprintf("node %x is in both learner and peer list", p))
-		}
-		r.learnerPrs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight), IsLearner: true}
-		if r.id == p {
-			r.isLearner = true
-		}
-	}
-
-	if !isHardStateEqual(hs, emptyState) {
-		r.loadState(hs)
-	}
-	if c.Applied > 0 {
-		raftlog.appliedTo(c.Applied)
-	}
-	r.becomeFollower(r.Term, None)
-
-	var nodesStrs []string
-	for _, n := range r.nodes() {
-		nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n))
-	}
-
-	r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]",
-		r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm())
-	return r
-}
-
-func (r *raft) hasLeader() bool { return r.lead != None }
-
-func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} }
-
-func (r *raft) hardState() pb.HardState {
-	return pb.HardState{
-		Term:   r.Term,
-		Vote:   r.Vote,
-		Commit: r.raftLog.committed,
-	}
-}
-
-func (r *raft) quorum() int { return len(r.prs)/2 + 1 }
-
-func (r *raft) nodes() []uint64 {
-	nodes := make([]uint64, 0, len(r.prs)+len(r.learnerPrs))
-	for id := range r.prs {
-		nodes = append(nodes, id)
-	}
-	for id := range r.learnerPrs {
-		nodes = append(nodes, id)
-	}
-	sort.Sort(uint64Slice(nodes))
-	return nodes
-}
-
-// send persists state to stable storage and then sends to its mailbox.
-func (r *raft) send(m pb.Message) {
-	m.From = r.id
-	if m.Type == pb.MsgVote || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVote || m.Type == pb.MsgPreVoteResp {
-		if m.Term == 0 {
-			// All {pre-,}campaign messages need to have the term set when
-			// sending.
-			// - MsgVote: m.Term is the term the node is campaigning for,
-			//   non-zero as we increment the term when campaigning.
-			// - MsgVoteResp: m.Term is the new r.Term if the MsgVote was
-			//   granted, non-zero for the same reason MsgVote is
-			// - MsgPreVote: m.Term is the term the node will campaign,
-			//   non-zero as we use m.Term to indicate the next term we'll be
-			//   campaigning for
-			// - MsgPreVoteResp: m.Term is the term received in the original
-			//   MsgPreVote if the pre-vote was granted, non-zero for the
-			//   same reasons MsgPreVote is
-			panic(fmt.Sprintf("term should be set when sending %s", m.Type))
-		}
-	} else {
-		if m.Term != 0 {
-			panic(fmt.Sprintf("term should not be set when sending %s (was %d)", m.Type, m.Term))
-		}
-		// do not attach term to MsgProp, MsgReadIndex
-		// proposals are a way to forward to the leader and
-		// should be treated as local message.
-		// MsgReadIndex is also forwarded to leader.
-		if m.Type != pb.MsgProp && m.Type != pb.MsgReadIndex {
-			m.Term = r.Term
-		}
-	}
-	r.msgs = append(r.msgs, m)
-}
-
-func (r *raft) getProgress(id uint64) *Progress {
-	if pr, ok := r.prs[id]; ok {
-		return pr
-	}
-
-	return r.learnerPrs[id]
-}
-
-// sendAppend sends RPC, with entries to the given peer.
-func (r *raft) sendAppend(to uint64) {
-	pr := r.getProgress(to)
-	if pr.IsPaused() {
-		return
-	}
-	m := pb.Message{}
-	m.To = to
-
-	term, errt := r.raftLog.term(pr.Next - 1)
-	ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize)
-
-	if errt != nil || erre != nil { // send snapshot if we failed to get term or entries
-		if !pr.RecentActive {
-			r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to)
-			return
-		}
-
-		m.Type = pb.MsgSnap
-		snapshot, err := r.raftLog.snapshot()
-		if err != nil {
-			if err == ErrSnapshotTemporarilyUnavailable {
-				r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to)
-				return
-			}
-			panic(err) // TODO(bdarnell)
-		}
-		if IsEmptySnap(snapshot) {
-			panic("need non-empty snapshot")
-		}
-		m.Snapshot = snapshot
-		sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term
-		r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]",
-			r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr)
-		pr.becomeSnapshot(sindex)
-		r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr)
-	} else {
-		m.Type = pb.MsgApp
-		m.Index = pr.Next - 1
-		m.LogTerm = term
-		m.Entries = ents
-		m.Commit = r.raftLog.committed
-		if n := len(m.Entries); n != 0 {
-			switch pr.State {
-			// optimistically increase the next when in ProgressStateReplicate
-			case ProgressStateReplicate:
-				last := m.Entries[n-1].Index
-				pr.optimisticUpdate(last)
-				pr.ins.add(last)
-			case ProgressStateProbe:
-				pr.pause()
-			default:
-				r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State)
-			}
-		}
-	}
-	r.send(m)
-}
-
-// sendHeartbeat sends an empty MsgApp
-func (r *raft) sendHeartbeat(to uint64, ctx []byte) {
-	// Attach the commit as min(to.matched, r.committed).
-	// When the leader sends out heartbeat message,
-	// the receiver(follower) might not be matched with the leader
-	// or it might not have all the committed entries.
-	// The leader MUST NOT forward the follower's commit to
-	// an unmatched index.
-	commit := min(r.getProgress(to).Match, r.raftLog.committed)
-	m := pb.Message{
-		To:      to,
-		Type:    pb.MsgHeartbeat,
-		Commit:  commit,
-		Context: ctx,
-	}
-
-	r.send(m)
-}
-
-func (r *raft) forEachProgress(f func(id uint64, pr *Progress)) {
-	for id, pr := range r.prs {
-		f(id, pr)
-	}
-
-	for id, pr := range r.learnerPrs {
-		f(id, pr)
-	}
-}
-
-// bcastAppend sends RPC, with entries to all peers that are not up-to-date
-// according to the progress recorded in r.prs.
-func (r *raft) bcastAppend() {
-	r.forEachProgress(func(id uint64, _ *Progress) {
-		if id == r.id {
-			return
-		}
-
-		r.sendAppend(id)
-	})
-}
-
-// bcastHeartbeat sends RPC, without entries to all the peers.
-func (r *raft) bcastHeartbeat() {
-	lastCtx := r.readOnly.lastPendingRequestCtx()
-	if len(lastCtx) == 0 {
-		r.bcastHeartbeatWithCtx(nil)
-	} else {
-		r.bcastHeartbeatWithCtx([]byte(lastCtx))
-	}
-}
-
-func (r *raft) bcastHeartbeatWithCtx(ctx []byte) {
-	r.forEachProgress(func(id uint64, _ *Progress) {
-		if id == r.id {
-			return
-		}
-		r.sendHeartbeat(id, ctx)
-	})
-}
-
-// maybeCommit attempts to advance the commit index. Returns true if
-// the commit index changed (in which case the caller should call
-// r.bcastAppend).
-func (r *raft) maybeCommit() bool {
-	// TODO(bmizerany): optimize.. Currently naive
-	mis := make(uint64Slice, 0, len(r.prs))
-	for _, p := range r.prs {
-		mis = append(mis, p.Match)
-	}
-	sort.Sort(sort.Reverse(mis))
-	mci := mis[r.quorum()-1]
-	return r.raftLog.maybeCommit(mci, r.Term)
-}
-
-func (r *raft) reset(term uint64) {
-	if r.Term != term {
-		r.Term = term
-		r.Vote = None
-	}
-	r.lead = None
-
-	r.electionElapsed = 0
-	r.heartbeatElapsed = 0
-	r.resetRandomizedElectionTimeout()
-
-	r.abortLeaderTransfer()
-
-	r.votes = make(map[uint64]bool)
-	r.forEachProgress(func(id uint64, pr *Progress) {
-		*pr = Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight), IsLearner: pr.IsLearner}
-		if id == r.id {
-			pr.Match = r.raftLog.lastIndex()
-		}
-	})
-
-	r.pendingConf = false
-	r.readOnly = newReadOnly(r.readOnly.option)
-}
-
-func (r *raft) appendEntry(es ...pb.Entry) {
-	li := r.raftLog.lastIndex()
-	for i := range es {
-		es[i].Term = r.Term
-		es[i].Index = li + 1 + uint64(i)
-	}
-	r.raftLog.append(es...)
-	r.getProgress(r.id).maybeUpdate(r.raftLog.lastIndex())
-	// Regardless of maybeCommit's return, our caller will call bcastAppend.
-	r.maybeCommit()
-}
-
-// tickElection is run by followers and candidates after r.electionTimeout.
-func (r *raft) tickElection() {
-	r.electionElapsed++
-
-	if r.promotable() && r.pastElectionTimeout() {
-		r.electionElapsed = 0
-		r.Step(pb.Message{From: r.id, Type: pb.MsgHup})
-	}
-}
-
-// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout.
-func (r *raft) tickHeartbeat() {
-	r.heartbeatElapsed++
-	r.electionElapsed++
-
-	if r.electionElapsed >= r.electionTimeout {
-		r.electionElapsed = 0
-		if r.checkQuorum {
-			r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum})
-		}
-		// If current leader cannot transfer leadership in electionTimeout, it becomes leader again.
-		if r.state == StateLeader && r.leadTransferee != None {
-			r.abortLeaderTransfer()
-		}
-	}
-
-	if r.state != StateLeader {
-		return
-	}
-
-	if r.heartbeatElapsed >= r.heartbeatTimeout {
-		r.heartbeatElapsed = 0
-		r.Step(pb.Message{From: r.id, Type: pb.MsgBeat})
-	}
-}
-
-func (r *raft) becomeFollower(term uint64, lead uint64) {
-	r.step = stepFollower
-	r.reset(term)
-	r.tick = r.tickElection
-	r.lead = lead
-	r.state = StateFollower
-	r.logger.Infof("%x became follower at term %d", r.id, r.Term)
-}
-
-func (r *raft) becomeCandidate() {
-	// TODO(xiangli) remove the panic when the raft implementation is stable
-	if r.state == StateLeader {
-		panic("invalid transition [leader -> candidate]")
-	}
-	r.step = stepCandidate
-	r.reset(r.Term + 1)
-	r.tick = r.tickElection
-	r.Vote = r.id
-	r.state = StateCandidate
-	r.logger.Infof("%x became candidate at term %d", r.id, r.Term)
-}
-
-func (r *raft) becomePreCandidate() {
-	// TODO(xiangli) remove the panic when the raft implementation is stable
-	if r.state == StateLeader {
-		panic("invalid transition [leader -> pre-candidate]")
-	}
-	// Becoming a pre-candidate changes our step functions and state,
-	// but doesn't change anything else. In particular it does not increase
-	// r.Term or change r.Vote.
-	r.step = stepCandidate
-	r.votes = make(map[uint64]bool)
-	r.tick = r.tickElection
-	r.lead = None
-	r.state = StatePreCandidate
-	r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term)
-}
-
-func (r *raft) becomeLeader() {
-	// TODO(xiangli) remove the panic when the raft implementation is stable
-	if r.state == StateFollower {
-		panic("invalid transition [follower -> leader]")
-	}
-	r.step = stepLeader
-	r.reset(r.Term)
-	r.tick = r.tickHeartbeat
-	r.lead = r.id
-	r.state = StateLeader
-	ents, err := r.raftLog.entries(r.raftLog.committed+1, noLimit)
-	if err != nil {
-		r.logger.Panicf("unexpected error getting uncommitted entries (%v)", err)
-	}
-
-	nconf := numOfPendingConf(ents)
-	if nconf > 1 {
-		panic("unexpected multiple uncommitted config entry")
-	}
-	if nconf == 1 {
-		r.pendingConf = true
-	}
-
-	r.appendEntry(pb.Entry{Data: nil})
-	r.logger.Infof("%x became leader at term %d", r.id, r.Term)
-}
-
-func (r *raft) campaign(t CampaignType) {
-	var term uint64
-	var voteMsg pb.MessageType
-	if t == campaignPreElection {
-		r.becomePreCandidate()
-		voteMsg = pb.MsgPreVote
-		// PreVote RPCs are sent for the next term before we've incremented r.Term.
-		term = r.Term + 1
-	} else {
-		r.becomeCandidate()
-		voteMsg = pb.MsgVote
-		term = r.Term
-	}
-	if r.quorum() == r.poll(r.id, voteRespMsgType(voteMsg), true) {
-		// We won the election after voting for ourselves (which must mean that
-		// this is a single-node cluster). Advance to the next state.
-		if t == campaignPreElection {
-			r.campaign(campaignElection)
-		} else {
-			r.becomeLeader()
-		}
-		return
-	}
-	for id := range r.prs {
-		if id == r.id {
-			continue
-		}
-		r.logger.Infof("%x [logterm: %d, index: %d] sent %s request to %x at term %d",
-			r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), voteMsg, id, r.Term)
-
-		var ctx []byte
-		if t == campaignTransfer {
-			ctx = []byte(t)
-		}
-		r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx})
-	}
-}
-
-func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int) {
-	if v {
-		r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term)
-	} else {
-		r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term)
-	}
-	if _, ok := r.votes[id]; !ok {
-		r.votes[id] = v
-	}
-	for _, vv := range r.votes {
-		if vv {
-			granted++
-		}
-	}
-	return granted
-}
-
-func (r *raft) Step(m pb.Message) error {
-	// Handle the message term, which may result in our stepping down to a follower.
-	switch {
-	case m.Term == 0:
-		// local message
-	case m.Term > r.Term:
-		if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote {
-			force := bytes.Equal(m.Context, []byte(campaignTransfer))
-			inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout
-			if !force && inLease {
-				// If a server receives a RequestVote request within the minimum election timeout
-				// of hearing from a current leader, it does not update its term or grant its vote
-				r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)",
-					r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed)
-				return nil
-			}
-		}
-		switch {
-		case m.Type == pb.MsgPreVote:
-			// Never change our term in response to a PreVote
-		case m.Type == pb.MsgPreVoteResp && !m.Reject:
-			// We send pre-vote requests with a term in our future. If the
-			// pre-vote is granted, we will increment our term when we get a
-			// quorum. If it is not, the term comes from the node that
-			// rejected our vote so we should become a follower at the new
-			// term.
-		default:
-			r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]",
-				r.id, r.Term, m.Type, m.From, m.Term)
-			if m.Type == pb.MsgApp || m.Type == pb.MsgHeartbeat || m.Type == pb.MsgSnap {
-				r.becomeFollower(m.Term, m.From)
-			} else {
-				r.becomeFollower(m.Term, None)
-			}
-		}
-
-	case m.Term < r.Term:
-		if r.checkQuorum && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) {
-			// We have received messages from a leader at a lower term. It is possible
-			// that these messages were simply delayed in the network, but this could
-			// also mean that this node has advanced its term number during a network
-			// partition, and it is now unable to either win an election or to rejoin
-			// the majority on the old term. If checkQuorum is false, this will be
-			// handled by incrementing term numbers in response to MsgVote with a
-			// higher term, but if checkQuorum is true we may not advance the term on
-			// MsgVote and must generate other messages to advance the term. The net
-			// result of these two features is to minimize the disruption caused by
-			// nodes that have been removed from the cluster's configuration: a
-			// removed node will send MsgVotes (or MsgPreVotes) which will be ignored,
-			// but it will not receive MsgApp or MsgHeartbeat, so it will not create
-			// disruptive term increases
-			r.send(pb.Message{To: m.From, Type: pb.MsgAppResp})
-		} else {
-			// ignore other cases
-			r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]",
-				r.id, r.Term, m.Type, m.From, m.Term)
-		}
-		return nil
-	}
-
-	switch m.Type {
-	case pb.MsgHup:
-		if r.state != StateLeader {
-			ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit)
-			if err != nil {
-				r.logger.Panicf("unexpected error getting unapplied entries (%v)", err)
-			}
-			if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied {
-				r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n)
-				return nil
-			}
-
-			r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term)
-			if r.preVote {
-				r.campaign(campaignPreElection)
-			} else {
-				r.campaign(campaignElection)
-			}
-		} else {
-			r.logger.Debugf("%x ignoring MsgHup because already leader", r.id)
-		}
-
-	case pb.MsgVote, pb.MsgPreVote:
-		if r.isLearner {
-			// TODO: learner may need to vote, in case of node down when confchange.
-			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: learner can not vote",
-				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
-			return nil
-		}
-		// The m.Term > r.Term clause is for MsgPreVote. For MsgVote m.Term should
-		// always equal r.Term.
-		if (r.Vote == None || m.Term > r.Term || r.Vote == m.From) && r.raftLog.isUpToDate(m.Index, m.LogTerm) {
-			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d",
-				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
-			// When responding to Msg{Pre,}Vote messages we include the term
-			// from the message, not the local term. To see why consider the
-			// case where a single node was previously partitioned away and
-			// it's local term is now of date. If we include the local term
-			// (recall that for pre-votes we don't update the local term), the
-			// (pre-)campaigning node on the other end will proceed to ignore
-			// the message (it ignores all out of date messages).
-			// The term in the original message and current local term are the
-			// same in the case of regular votes, but different for pre-votes.
-			r.send(pb.Message{To: m.From, Term: m.Term, Type: voteRespMsgType(m.Type)})
-			if m.Type == pb.MsgVote {
-				// Only record real votes.
-				r.electionElapsed = 0
-				r.Vote = m.From
-			}
-		} else {
-			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
-				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
-			r.send(pb.Message{To: m.From, Term: r.Term, Type: voteRespMsgType(m.Type), Reject: true})
-		}
-
-	default:
-		r.step(r, m)
-	}
-	return nil
-}
-
-type stepFunc func(r *raft, m pb.Message)
-
-func stepLeader(r *raft, m pb.Message) {
-	// These message types do not require any progress for m.From.
-	switch m.Type {
-	case pb.MsgBeat:
-		r.bcastHeartbeat()
-		return
-	case pb.MsgCheckQuorum:
-		if !r.checkQuorumActive() {
-			r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id)
-			r.becomeFollower(r.Term, None)
-		}
-		return
-	case pb.MsgProp:
-		if len(m.Entries) == 0 {
-			r.logger.Panicf("%x stepped empty MsgProp", r.id)
-		}
-		if _, ok := r.prs[r.id]; !ok {
-			// If we are not currently a member of the range (i.e. this node
-			// was removed from the configuration while serving as leader),
-			// drop any new proposals.
-			return
-		}
-		if r.leadTransferee != None {
-			r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee)
-			return
-		}
-
-		for i, e := range m.Entries {
-			if e.Type == pb.EntryConfChange {
-				if r.pendingConf {
-					r.logger.Infof("propose conf %s ignored since pending unapplied configuration", e.String())
-					m.Entries[i] = pb.Entry{Type: pb.EntryNormal}
-				}
-				r.pendingConf = true
-			}
-		}
-		r.appendEntry(m.Entries...)
-		r.bcastAppend()
-		return
-	case pb.MsgReadIndex:
-		if r.quorum() > 1 {
-			if r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(r.raftLog.committed)) != r.Term {
-				// Reject read only request when this leader has not committed any log entry at its term.
-				return
-			}
-
-			// thinking: use an interally defined context instead of the user given context.
-			// We can express this in terms of the term and index instead of a user-supplied value.
-			// This would allow multiple reads to piggyback on the same message.
-			switch r.readOnly.option {
-			case ReadOnlySafe:
-				r.readOnly.addRequest(r.raftLog.committed, m)
-				r.bcastHeartbeatWithCtx(m.Entries[0].Data)
-			case ReadOnlyLeaseBased:
-				ri := r.raftLog.committed
-				if m.From == None || m.From == r.id { // from local member
-					r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
-				} else {
-					r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: ri, Entries: m.Entries})
-				}
-			}
-		} else {
-			r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
-		}
-
-		return
-	}
-
-	// All other message types require a progress for m.From (pr).
-	pr := r.getProgress(m.From)
-	if pr == nil {
-		r.logger.Debugf("%x no progress available for %x", r.id, m.From)
-		return
-	}
-	switch m.Type {
-	case pb.MsgAppResp:
-		pr.RecentActive = true
-
-		if m.Reject {
-			r.logger.Debugf("%x received msgApp rejection(lastindex: %d) from %x for index %d",
-				r.id, m.RejectHint, m.From, m.Index)
-			if pr.maybeDecrTo(m.Index, m.RejectHint) {
-				r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr)
-				if pr.State == ProgressStateReplicate {
-					pr.becomeProbe()
-				}
-				r.sendAppend(m.From)
-			}
-		} else {
-			oldPaused := pr.IsPaused()
-			if pr.maybeUpdate(m.Index) {
-				switch {
-				case pr.State == ProgressStateProbe:
-					pr.becomeReplicate()
-				case pr.State == ProgressStateSnapshot && pr.needSnapshotAbort():
-					r.logger.Debugf("%x snapshot aborted, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
-					pr.becomeProbe()
-				case pr.State == ProgressStateReplicate:
-					pr.ins.freeTo(m.Index)
-				}
-
-				if r.maybeCommit() {
-					r.bcastAppend()
-				} else if oldPaused {
-					// update() reset the wait state on this node. If we had delayed sending
-					// an update before, send it now.
-					r.sendAppend(m.From)
-				}
-				// Transfer leadership is in progress.
-				if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() {
-					r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From)
-					r.sendTimeoutNow(m.From)
-				}
-			}
-		}
-	case pb.MsgHeartbeatResp:
-		pr.RecentActive = true
-		pr.resume()
-
-		// free one slot for the full inflights window to allow progress.
-		if pr.State == ProgressStateReplicate && pr.ins.full() {
-			pr.ins.freeFirstOne()
-		}
-		if pr.Match < r.raftLog.lastIndex() {
-			r.sendAppend(m.From)
-		}
-
-		if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 {
-			return
-		}
-
-		ackCount := r.readOnly.recvAck(m)
-		if ackCount < r.quorum() {
-			return
-		}
-
-		rss := r.readOnly.advance(m)
-		for _, rs := range rss {
-			req := rs.req
-			if req.From == None || req.From == r.id { // from local member
-				r.readStates = append(r.readStates, ReadState{Index: rs.index, RequestCtx: req.Entries[0].Data})
-			} else {
-				r.send(pb.Message{To: req.From, Type: pb.MsgReadIndexResp, Index: rs.index, Entries: req.Entries})
-			}
-		}
-	case pb.MsgSnapStatus:
-		if pr.State != ProgressStateSnapshot {
-			return
-		}
-		if !m.Reject {
-			pr.becomeProbe()
-			r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
-		} else {
-			pr.snapshotFailure()
-			pr.becomeProbe()
-			r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
-		}
-		// If snapshot finish, wait for the msgAppResp from the remote node before sending
-		// out the next msgApp.
-		// If snapshot failure, wait for a heartbeat interval before next try
-		pr.pause()
-	case pb.MsgUnreachable:
-		// During optimistic replication, if the remote becomes unreachable,
-		// there is huge probability that a MsgApp is lost.
-		if pr.State == ProgressStateReplicate {
-			pr.becomeProbe()
-		}
-		r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr)
-	case pb.MsgTransferLeader:
-		if pr.IsLearner {
-			r.logger.Debugf("%x is learner. Ignored transferring leadership", r.id)
-			return
-		}
-		leadTransferee := m.From
-		lastLeadTransferee := r.leadTransferee
-		if lastLeadTransferee != None {
-			if lastLeadTransferee == leadTransferee {
-				r.logger.Infof("%x [term %d] transfer leadership to %x is in progress, ignores request to same node %x",
-					r.id, r.Term, leadTransferee, leadTransferee)
-				return
-			}
-			r.abortLeaderTransfer()
-			r.logger.Infof("%x [term %d] abort previous transferring leadership to %x", r.id, r.Term, lastLeadTransferee)
-		}
-		if leadTransferee == r.id {
-			r.logger.Debugf("%x is already leader. Ignored transferring leadership to self", r.id)
-			return
-		}
-		// Transfer leadership to third party.
-		r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee)
-		// Transfer leadership should be finished in one electionTimeout, so reset r.electionElapsed.
-		r.electionElapsed = 0
-		r.leadTransferee = leadTransferee
-		if pr.Match == r.raftLog.lastIndex() {
-			r.sendTimeoutNow(leadTransferee)
-			r.logger.Infof("%x sends MsgTimeoutNow to %x immediately as %x already has up-to-date log", r.id, leadTransferee, leadTransferee)
-		} else {
-			r.sendAppend(leadTransferee)
-		}
-	}
-}
-
-// stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is
-// whether they respond to MsgVoteResp or MsgPreVoteResp.
-func stepCandidate(r *raft, m pb.Message) {
-	// Only handle vote responses corresponding to our candidacy (while in
-	// StateCandidate, we may get stale MsgPreVoteResp messages in this term from
-	// our pre-candidate state).
-	var myVoteRespType pb.MessageType
-	if r.state == StatePreCandidate {
-		myVoteRespType = pb.MsgPreVoteResp
-	} else {
-		myVoteRespType = pb.MsgVoteResp
-	}
-	switch m.Type {
-	case pb.MsgProp:
-		r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
-		return
-	case pb.MsgApp:
-		r.becomeFollower(r.Term, m.From)
-		r.handleAppendEntries(m)
-	case pb.MsgHeartbeat:
-		r.becomeFollower(r.Term, m.From)
-		r.handleHeartbeat(m)
-	case pb.MsgSnap:
-		r.becomeFollower(m.Term, m.From)
-		r.handleSnapshot(m)
-	case myVoteRespType:
-		gr := r.poll(m.From, m.Type, !m.Reject)
-		r.logger.Infof("%x [quorum:%d] has received %d %s votes and %d vote rejections", r.id, r.quorum(), gr, m.Type, len(r.votes)-gr)
-		switch r.quorum() {
-		case gr:
-			if r.state == StatePreCandidate {
-				r.campaign(campaignElection)
-			} else {
-				r.becomeLeader()
-				r.bcastAppend()
-			}
-		case len(r.votes) - gr:
-			r.becomeFollower(r.Term, None)
-		}
-	case pb.MsgTimeoutNow:
-		r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From)
-	}
-}
-
-func stepFollower(r *raft, m pb.Message) {
-	switch m.Type {
-	case pb.MsgProp:
-		if r.lead == None {
-			r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
-			return
-		} else if r.disableProposalForwarding {
-			r.logger.Infof("%x not forwarding to leader %x at term %d; dropping proposal", r.id, r.lead, r.Term)
-			return
-		}
-		m.To = r.lead
-		r.send(m)
-	case pb.MsgApp:
-		r.electionElapsed = 0
-		r.lead = m.From
-		r.handleAppendEntries(m)
-	case pb.MsgHeartbeat:
-		r.electionElapsed = 0
-		r.lead = m.From
-		r.handleHeartbeat(m)
-	case pb.MsgSnap:
-		r.electionElapsed = 0
-		r.lead = m.From
-		r.handleSnapshot(m)
-	case pb.MsgTransferLeader:
-		if r.lead == None {
-			r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term)
-			return
-		}
-		m.To = r.lead
-		r.send(m)
-	case pb.MsgTimeoutNow:
-		if r.promotable() {
-			r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From)
-			// Leadership transfers never use pre-vote even if r.preVote is true; we
-			// know we are not recovering from a partition so there is no need for the
-			// extra round trip.
-			r.campaign(campaignTransfer)
-		} else {
-			r.logger.Infof("%x received MsgTimeoutNow from %x but is not promotable", r.id, m.From)
-		}
-	case pb.MsgReadIndex:
-		if r.lead == None {
-			r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term)
-			return
-		}
-		m.To = r.lead
-		r.send(m)
-	case pb.MsgReadIndexResp:
-		if len(m.Entries) != 1 {
-			r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries))
-			return
-		}
-		r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data})
-	}
-}
-
-func (r *raft) handleAppendEntries(m pb.Message) {
-	if m.Index < r.raftLog.committed {
-		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
-		return
-	}
-
-	if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok {
-		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex})
-	} else {
-		r.logger.Debugf("%x [logterm: %d, index: %d] rejected msgApp [logterm: %d, index: %d] from %x",
-			r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From)
-		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()})
-	}
-}
-
-func (r *raft) handleHeartbeat(m pb.Message) {
-	r.raftLog.commitTo(m.Commit)
-	r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp, Context: m.Context})
-}
-
-func (r *raft) handleSnapshot(m pb.Message) {
-	sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term
-	if r.restore(m.Snapshot) {
-		r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]",
-			r.id, r.raftLog.committed, sindex, sterm)
-		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()})
-	} else {
-		r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]",
-			r.id, r.raftLog.committed, sindex, sterm)
-		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
-	}
-}
-
-// restore recovers the state machine from a snapshot. It restores the log and the
-// configuration of state machine.
-func (r *raft) restore(s pb.Snapshot) bool {
-	if s.Metadata.Index <= r.raftLog.committed {
-		return false
-	}
-	if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) {
-		r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]",
-			r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
-		r.raftLog.commitTo(s.Metadata.Index)
-		return false
-	}
-
-	// The normal peer can't become learner.
-	if !r.isLearner {
-		for _, id := range s.Metadata.ConfState.Learners {
-			if id == r.id {
-				r.logger.Errorf("%x can't become learner when restores snapshot [index: %d, term: %d]", r.id, s.Metadata.Index, s.Metadata.Term)
-				return false
-			}
-		}
-	}
-
-	r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] starts to restore snapshot [index: %d, term: %d]",
-		r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
-
-	r.raftLog.restore(s)
-	r.prs = make(map[uint64]*Progress)
-	r.learnerPrs = make(map[uint64]*Progress)
-	r.restoreNode(s.Metadata.ConfState.Nodes, false)
-	r.restoreNode(s.Metadata.ConfState.Learners, true)
-	return true
-}
-
-func (r *raft) restoreNode(nodes []uint64, isLearner bool) {
-	for _, n := range nodes {
-		match, next := uint64(0), r.raftLog.lastIndex()+1
-		if n == r.id {
-			match = next - 1
-			r.isLearner = isLearner
-		}
-		r.setProgress(n, match, next, isLearner)
-		r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.getProgress(n))
-	}
-}
-
-// promotable indicates whether state machine can be promoted to leader,
-// which is true when its own id is in progress list.
-func (r *raft) promotable() bool {
-	_, ok := r.prs[r.id]
-	return ok
-}
-
-func (r *raft) addNode(id uint64) {
-	r.addNodeOrLearnerNode(id, false)
-}
-
-func (r *raft) addLearner(id uint64) {
-	r.addNodeOrLearnerNode(id, true)
-}
-
-func (r *raft) addNodeOrLearnerNode(id uint64, isLearner bool) {
-	r.pendingConf = false
-	pr := r.getProgress(id)
-	if pr == nil {
-		r.setProgress(id, 0, r.raftLog.lastIndex()+1, isLearner)
-	} else {
-		if isLearner && !pr.IsLearner {
-			// can only change Learner to Voter
-			r.logger.Infof("%x ignored addLeaner: do not support changing %x from raft peer to learner.", r.id, id)
-			return
-		}
-
-		if isLearner == pr.IsLearner {
-			// Ignore any redundant addNode calls (which can happen because the
-			// initial bootstrapping entries are applied twice).
-			return
-		}
-
-		// change Learner to Voter, use origin Learner progress
-		delete(r.learnerPrs, id)
-		pr.IsLearner = false
-		r.prs[id] = pr
-	}
-
-	if r.id == id {
-		r.isLearner = isLearner
-	}
-
-	// When a node is first added, we should mark it as recently active.
-	// Otherwise, CheckQuorum may cause us to step down if it is invoked
-	// before the added node has a chance to communicate with us.
-	pr = r.getProgress(id)
-	pr.RecentActive = true
-}
-
-func (r *raft) removeNode(id uint64) {
-	r.delProgress(id)
-	r.pendingConf = false
-
-	// do not try to commit or abort transferring if there is no nodes in the cluster.
-	if len(r.prs) == 0 && len(r.learnerPrs) == 0 {
-		return
-	}
-
-	// The quorum size is now smaller, so see if any pending entries can
-	// be committed.
-	if r.maybeCommit() {
-		r.bcastAppend()
-	}
-	// If the removed node is the leadTransferee, then abort the leadership transferring.
-	if r.state == StateLeader && r.leadTransferee == id {
-		r.abortLeaderTransfer()
-	}
-}
-
-func (r *raft) resetPendingConf() { r.pendingConf = false }
-
-func (r *raft) setProgress(id, match, next uint64, isLearner bool) {
-	if !isLearner {
-		delete(r.learnerPrs, id)
-		r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)}
-		return
-	}
-
-	if _, ok := r.prs[id]; ok {
-		panic(fmt.Sprintf("%x unexpected changing from voter to learner for %x", r.id, id))
-	}
-	r.learnerPrs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight), IsLearner: true}
-}
-
-func (r *raft) delProgress(id uint64) {
-	delete(r.prs, id)
-	delete(r.learnerPrs, id)
-}
-
-func (r *raft) loadState(state pb.HardState) {
-	if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() {
-		r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex())
-	}
-	r.raftLog.committed = state.Commit
-	r.Term = state.Term
-	r.Vote = state.Vote
-}
-
-// pastElectionTimeout returns true iff r.electionElapsed is greater
-// than or equal to the randomized election timeout in
-// [electiontimeout, 2 * electiontimeout - 1].
-func (r *raft) pastElectionTimeout() bool {
-	return r.electionElapsed >= r.randomizedElectionTimeout
-}
-
-func (r *raft) resetRandomizedElectionTimeout() {
-	r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout)
-}
-
-// checkQuorumActive returns true if the quorum is active from
-// the view of the local raft state machine. Otherwise, it returns
-// false.
-// checkQuorumActive also resets all RecentActive to false.
-func (r *raft) checkQuorumActive() bool {
-	var act int
-
-	r.forEachProgress(func(id uint64, pr *Progress) {
-		if id == r.id { // self is always active
-			act++
-			return
-		}
-
-		if pr.RecentActive && !pr.IsLearner {
-			act++
-		}
-
-		pr.RecentActive = false
-	})
-
-	return act >= r.quorum()
-}
-
-func (r *raft) sendTimeoutNow(to uint64) {
-	r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow})
-}
-
-func (r *raft) abortLeaderTransfer() {
-	r.leadTransferee = None
-}
-
-func numOfPendingConf(ents []pb.Entry) int {
-	n := 0
-	for i := range ents {
-		if ents[i].Type == pb.EntryConfChange {
-			n++
-		}
-	}
-	return n
-}
diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go
deleted file mode 100644
index 753bd84..0000000
--- a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go
+++ /dev/null
@@ -1,2365 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: raft.proto
-
-package raftpb
-
-import (
-	fmt "fmt"
-	io "io"
-	math "math"
-	math_bits "math/bits"
-
-	_ "github.com/gogo/protobuf/gogoproto"
-	proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type EntryType int32
-
-const (
-	EntryNormal     EntryType = 0
-	EntryConfChange EntryType = 1
-)
-
-var EntryType_name = map[int32]string{
-	0: "EntryNormal",
-	1: "EntryConfChange",
-}
-
-var EntryType_value = map[string]int32{
-	"EntryNormal":     0,
-	"EntryConfChange": 1,
-}
-
-func (x EntryType) Enum() *EntryType {
-	p := new(EntryType)
-	*p = x
-	return p
-}
-
-func (x EntryType) String() string {
-	return proto.EnumName(EntryType_name, int32(x))
-}
-
-func (x *EntryType) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType")
-	if err != nil {
-		return err
-	}
-	*x = EntryType(value)
-	return nil
-}
-
-func (EntryType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_b042552c306ae59b, []int{0}
-}
-
-type MessageType int32
-
-const (
-	MsgHup            MessageType = 0
-	MsgBeat           MessageType = 1
-	MsgProp           MessageType = 2
-	MsgApp            MessageType = 3
-	MsgAppResp        MessageType = 4
-	MsgVote           MessageType = 5
-	MsgVoteResp       MessageType = 6
-	MsgSnap           MessageType = 7
-	MsgHeartbeat      MessageType = 8
-	MsgHeartbeatResp  MessageType = 9
-	MsgUnreachable    MessageType = 10
-	MsgSnapStatus     MessageType = 11
-	MsgCheckQuorum    MessageType = 12
-	MsgTransferLeader MessageType = 13
-	MsgTimeoutNow     MessageType = 14
-	MsgReadIndex      MessageType = 15
-	MsgReadIndexResp  MessageType = 16
-	MsgPreVote        MessageType = 17
-	MsgPreVoteResp    MessageType = 18
-)
-
-var MessageType_name = map[int32]string{
-	0:  "MsgHup",
-	1:  "MsgBeat",
-	2:  "MsgProp",
-	3:  "MsgApp",
-	4:  "MsgAppResp",
-	5:  "MsgVote",
-	6:  "MsgVoteResp",
-	7:  "MsgSnap",
-	8:  "MsgHeartbeat",
-	9:  "MsgHeartbeatResp",
-	10: "MsgUnreachable",
-	11: "MsgSnapStatus",
-	12: "MsgCheckQuorum",
-	13: "MsgTransferLeader",
-	14: "MsgTimeoutNow",
-	15: "MsgReadIndex",
-	16: "MsgReadIndexResp",
-	17: "MsgPreVote",
-	18: "MsgPreVoteResp",
-}
-
-var MessageType_value = map[string]int32{
-	"MsgHup":            0,
-	"MsgBeat":           1,
-	"MsgProp":           2,
-	"MsgApp":            3,
-	"MsgAppResp":        4,
-	"MsgVote":           5,
-	"MsgVoteResp":       6,
-	"MsgSnap":           7,
-	"MsgHeartbeat":      8,
-	"MsgHeartbeatResp":  9,
-	"MsgUnreachable":    10,
-	"MsgSnapStatus":     11,
-	"MsgCheckQuorum":    12,
-	"MsgTransferLeader": 13,
-	"MsgTimeoutNow":     14,
-	"MsgReadIndex":      15,
-	"MsgReadIndexResp":  16,
-	"MsgPreVote":        17,
-	"MsgPreVoteResp":    18,
-}
-
-func (x MessageType) Enum() *MessageType {
-	p := new(MessageType)
-	*p = x
-	return p
-}
-
-func (x MessageType) String() string {
-	return proto.EnumName(MessageType_name, int32(x))
-}
-
-func (x *MessageType) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType")
-	if err != nil {
-		return err
-	}
-	*x = MessageType(value)
-	return nil
-}
-
-func (MessageType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_b042552c306ae59b, []int{1}
-}
-
-type ConfChangeType int32
-
-const (
-	ConfChangeAddNode        ConfChangeType = 0
-	ConfChangeRemoveNode     ConfChangeType = 1
-	ConfChangeUpdateNode     ConfChangeType = 2
-	ConfChangeAddLearnerNode ConfChangeType = 3
-)
-
-var ConfChangeType_name = map[int32]string{
-	0: "ConfChangeAddNode",
-	1: "ConfChangeRemoveNode",
-	2: "ConfChangeUpdateNode",
-	3: "ConfChangeAddLearnerNode",
-}
-
-var ConfChangeType_value = map[string]int32{
-	"ConfChangeAddNode":        0,
-	"ConfChangeRemoveNode":     1,
-	"ConfChangeUpdateNode":     2,
-	"ConfChangeAddLearnerNode": 3,
-}
-
-func (x ConfChangeType) Enum() *ConfChangeType {
-	p := new(ConfChangeType)
-	*p = x
-	return p
-}
-
-func (x ConfChangeType) String() string {
-	return proto.EnumName(ConfChangeType_name, int32(x))
-}
-
-func (x *ConfChangeType) UnmarshalJSON(data []byte) error {
-	value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType")
-	if err != nil {
-		return err
-	}
-	*x = ConfChangeType(value)
-	return nil
-}
-
-func (ConfChangeType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_b042552c306ae59b, []int{2}
-}
-
-type Entry struct {
-	Term                 uint64    `protobuf:"varint,2,opt,name=Term" json:"Term"`
-	Index                uint64    `protobuf:"varint,3,opt,name=Index" json:"Index"`
-	Type                 EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"`
-	Data                 []byte    `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
-}
-
-func (m *Entry) Reset()         { *m = Entry{} }
-func (m *Entry) String() string { return proto.CompactTextString(m) }
-func (*Entry) ProtoMessage()    {}
-func (*Entry) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b042552c306ae59b, []int{0}
-}
-func (m *Entry) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Entry.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Entry) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Entry.Merge(m, src)
-}
-func (m *Entry) XXX_Size() int {
-	return m.Size()
-}
-func (m *Entry) XXX_DiscardUnknown() {
-	xxx_messageInfo_Entry.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Entry proto.InternalMessageInfo
-
-type SnapshotMetadata struct {
-	ConfState            ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"`
-	Index                uint64    `protobuf:"varint,2,opt,name=index" json:"index"`
-	Term                 uint64    `protobuf:"varint,3,opt,name=term" json:"term"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
-}
-
-func (m *SnapshotMetadata) Reset()         { *m = SnapshotMetadata{} }
-func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) }
-func (*SnapshotMetadata) ProtoMessage()    {}
-func (*SnapshotMetadata) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b042552c306ae59b, []int{1}
-}
-func (m *SnapshotMetadata) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *SnapshotMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_SnapshotMetadata.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *SnapshotMetadata) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_SnapshotMetadata.Merge(m, src)
-}
-func (m *SnapshotMetadata) XXX_Size() int {
-	return m.Size()
-}
-func (m *SnapshotMetadata) XXX_DiscardUnknown() {
-	xxx_messageInfo_SnapshotMetadata.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SnapshotMetadata proto.InternalMessageInfo
-
-type Snapshot struct {
-	Data                 []byte           `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
-	Metadata             SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"`
-	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
-	XXX_unrecognized     []byte           `json:"-"`
-	XXX_sizecache        int32            `json:"-"`
-}
-
-func (m *Snapshot) Reset()         { *m = Snapshot{} }
-func (m *Snapshot) String() string { return proto.CompactTextString(m) }
-func (*Snapshot) ProtoMessage()    {}
-func (*Snapshot) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b042552c306ae59b, []int{2}
-}
-func (m *Snapshot) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Snapshot) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Snapshot.Merge(m, src)
-}
-func (m *Snapshot) XXX_Size() int {
-	return m.Size()
-}
-func (m *Snapshot) XXX_DiscardUnknown() {
-	xxx_messageInfo_Snapshot.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Snapshot proto.InternalMessageInfo
-
-type Message struct {
-	Type                 MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"`
-	To                   uint64      `protobuf:"varint,2,opt,name=to" json:"to"`
-	From                 uint64      `protobuf:"varint,3,opt,name=from" json:"from"`
-	Term                 uint64      `protobuf:"varint,4,opt,name=term" json:"term"`
-	LogTerm              uint64      `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"`
-	Index                uint64      `protobuf:"varint,6,opt,name=index" json:"index"`
-	Entries              []Entry     `protobuf:"bytes,7,rep,name=entries" json:"entries"`
-	Commit               uint64      `protobuf:"varint,8,opt,name=commit" json:"commit"`
-	Snapshot             Snapshot    `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"`
-	Reject               bool        `protobuf:"varint,10,opt,name=reject" json:"reject"`
-	RejectHint           uint64      `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"`
-	Context              []byte      `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
-	XXX_unrecognized     []byte      `json:"-"`
-	XXX_sizecache        int32       `json:"-"`
-}
-
-func (m *Message) Reset()         { *m = Message{} }
-func (m *Message) String() string { return proto.CompactTextString(m) }
-func (*Message) ProtoMessage()    {}
-func (*Message) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b042552c306ae59b, []int{3}
-}
-func (m *Message) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_Message.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *Message) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Message.Merge(m, src)
-}
-func (m *Message) XXX_Size() int {
-	return m.Size()
-}
-func (m *Message) XXX_DiscardUnknown() {
-	xxx_messageInfo_Message.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Message proto.InternalMessageInfo
-
-type HardState struct {
-	Term                 uint64   `protobuf:"varint,1,opt,name=term" json:"term"`
-	Vote                 uint64   `protobuf:"varint,2,opt,name=vote" json:"vote"`
-	Commit               uint64   `protobuf:"varint,3,opt,name=commit" json:"commit"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *HardState) Reset()         { *m = HardState{} }
-func (m *HardState) String() string { return proto.CompactTextString(m) }
-func (*HardState) ProtoMessage()    {}
-func (*HardState) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b042552c306ae59b, []int{4}
-}
-func (m *HardState) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *HardState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_HardState.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *HardState) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_HardState.Merge(m, src)
-}
-func (m *HardState) XXX_Size() int {
-	return m.Size()
-}
-func (m *HardState) XXX_DiscardUnknown() {
-	xxx_messageInfo_HardState.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HardState proto.InternalMessageInfo
-
-type ConfState struct {
-	Nodes                []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"`
-	Learners             []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
-}
-
-func (m *ConfState) Reset()         { *m = ConfState{} }
-func (m *ConfState) String() string { return proto.CompactTextString(m) }
-func (*ConfState) ProtoMessage()    {}
-func (*ConfState) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b042552c306ae59b, []int{5}
-}
-func (m *ConfState) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *ConfState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_ConfState.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *ConfState) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ConfState.Merge(m, src)
-}
-func (m *ConfState) XXX_Size() int {
-	return m.Size()
-}
-func (m *ConfState) XXX_DiscardUnknown() {
-	xxx_messageInfo_ConfState.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ConfState proto.InternalMessageInfo
-
-type ConfChange struct {
-	ID                   uint64         `protobuf:"varint,1,opt,name=ID" json:"ID"`
-	Type                 ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"`
-	NodeID               uint64         `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"`
-	Context              []byte         `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
-	XXX_unrecognized     []byte         `json:"-"`
-	XXX_sizecache        int32          `json:"-"`
-}
-
-func (m *ConfChange) Reset()         { *m = ConfChange{} }
-func (m *ConfChange) String() string { return proto.CompactTextString(m) }
-func (*ConfChange) ProtoMessage()    {}
-func (*ConfChange) Descriptor() ([]byte, []int) {
-	return fileDescriptor_b042552c306ae59b, []int{6}
-}
-func (m *ConfChange) XXX_Unmarshal(b []byte) error {
-	return m.Unmarshal(b)
-}
-func (m *ConfChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	if deterministic {
-		return xxx_messageInfo_ConfChange.Marshal(b, m, deterministic)
-	} else {
-		b = b[:cap(b)]
-		n, err := m.MarshalToSizedBuffer(b)
-		if err != nil {
-			return nil, err
-		}
-		return b[:n], nil
-	}
-}
-func (m *ConfChange) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ConfChange.Merge(m, src)
-}
-func (m *ConfChange) XXX_Size() int {
-	return m.Size()
-}
-func (m *ConfChange) XXX_DiscardUnknown() {
-	xxx_messageInfo_ConfChange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ConfChange proto.InternalMessageInfo
-
-func init() {
-	proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value)
-	proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value)
-	proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value)
-	proto.RegisterType((*Entry)(nil), "raftpb.Entry")
-	proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata")
-	proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot")
-	proto.RegisterType((*Message)(nil), "raftpb.Message")
-	proto.RegisterType((*HardState)(nil), "raftpb.HardState")
-	proto.RegisterType((*ConfState)(nil), "raftpb.ConfState")
-	proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange")
-}
-
-func init() { proto.RegisterFile("raft.proto", fileDescriptor_b042552c306ae59b) }
-
-var fileDescriptor_b042552c306ae59b = []byte{
-	// 816 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0x23, 0x45,
-	0x10, 0xf6, 0x8c, 0xc7, 0x7f, 0x35, 0x8e, 0xd3, 0xa9, 0x35, 0xa8, 0x15, 0x45, 0xc6, 0xb2, 0x38,
-	0x58, 0x41, 0x1b, 0x20, 0x07, 0x0e, 0x48, 0x1c, 0x36, 0x09, 0x52, 0x22, 0xad, 0xa3, 0xc5, 0x9b,
-	0xe5, 0x80, 0x84, 0x50, 0xc7, 0x53, 0x9e, 0x18, 0x32, 0xd3, 0xa3, 0x9e, 0xf6, 0xb2, 0xb9, 0x20,
-	0x1e, 0x80, 0x07, 0xe0, 0xc2, 0xfb, 0xe4, 0xb8, 0x12, 0x77, 0xc4, 0x86, 0x17, 0x41, 0xdd, 0xd3,
-	0x63, 0xcf, 0x24, 0xb7, 0xae, 0xaf, 0x6a, 0xbe, 0xfa, 0xbe, 0xea, 0xea, 0x01, 0x50, 0x62, 0xa9,
-	0x8f, 0x32, 0x25, 0xb5, 0xc4, 0xb6, 0x39, 0x67, 0xd7, 0xfb, 0xc3, 0x58, 0xc6, 0xd2, 0x42, 0x9f,
-	0x9b, 0x53, 0x91, 0x9d, 0xfc, 0x06, 0xad, 0x6f, 0x53, 0xad, 0xee, 0x90, 0x43, 0x70, 0x45, 0x2a,
-	0xe1, 0xfe, 0xd8, 0x9b, 0x06, 0x27, 0xc1, 0xfd, 0x3f, 0x9f, 0x34, 0xe6, 0x16, 0xc1, 0x7d, 0x68,
-	0x5d, 0xa4, 0x11, 0xbd, 0xe3, 0xcd, 0x4a, 0xaa, 0x80, 0xf0, 0x33, 0x08, 0xae, 0xee, 0x32, 0xe2,
-	0xde, 0xd8, 0x9b, 0x0e, 0x8e, 0xf7, 0x8e, 0x8a, 0x5e, 0x47, 0x96, 0xd2, 0x24, 0x36, 0x44, 0x77,
-	0x19, 0x21, 0x42, 0x70, 0x26, 0xb4, 0xe0, 0xc1, 0xd8, 0x9b, 0xf6, 0xe7, 0xf6, 0x3c, 0xf9, 0xdd,
-	0x03, 0xf6, 0x3a, 0x15, 0x59, 0x7e, 0x23, 0xf5, 0x8c, 0xb4, 0x88, 0x84, 0x16, 0xf8, 0x15, 0xc0,
-	0x42, 0xa6, 0xcb, 0x9f, 0x72, 0x2d, 0x74, 0xc1, 0x1d, 0x6e, 0xb9, 0x4f, 0x65, 0xba, 0x7c, 0x6d,
-	0x12, 0x8e, 0xbb, 0xb7, 0x28, 0x01, 0xa3, 0x74, 0x65, 0x95, 0x56, 0x4d, 0x14, 0x90, 0xf1, 0xa7,
-	0x8d, 0xbf, 0xaa, 0x09, 0x8b, 0x4c, 0x7e, 0x80, 0x6e, 0xa9, 0xc0, 0x48, 0x34, 0x0a, 0x6c, 0xcf,
-	0xfe, 0xdc, 0x9e, 0xf1, 0x6b, 0xe8, 0x26, 0x4e, 0x99, 0x25, 0x0e, 0x8f, 0x79, 0xa9, 0xe5, 0xb1,
-	0x72, 0xc7, 0xbb, 0xa9, 0x9f, 0xfc, 0xd5, 0x84, 0xce, 0x8c, 0xf2, 0x5c, 0xc4, 0x84, 0xcf, 0x21,
-	0xd0, 0xdb, 0x59, 0x3d, 0x2b, 0x39, 0x5c, 0xba, 0x3a, 0x2d, 0x53, 0x86, 0x43, 0xf0, 0xb5, 0xac,
-	0x39, 0xf1, 0xb5, 0x34, 0x36, 0x96, 0x4a, 0x3e, 0xb2, 0x61, 0x90, 0x8d, 0xc1, 0xe0, 0xb1, 0x41,
-	0x1c, 0x41, 0xe7, 0x56, 0xc6, 0xf6, 0x76, 0x5b, 0x95, 0x64, 0x09, 0x6e, 0xc7, 0xd6, 0x7e, 0x3a,
-	0xb6, 0xe7, 0xd0, 0xa1, 0x54, 0xab, 0x15, 0xe5, 0xbc, 0x33, 0x6e, 0x4e, 0xc3, 0xe3, 0x9d, 0xda,
-	0x1d, 0x97, 0x54, 0xae, 0x06, 0x0f, 0xa0, 0xbd, 0x90, 0x49, 0xb2, 0xd2, 0xbc, 0x5b, 0xe1, 0x72,
-	0x18, 0x1e, 0x43, 0x37, 0x77, 0x13, 0xe3, 0x3d, 0x3b, 0x49, 0xf6, 0x78, 0x92, 0xe5, 0x04, 0xcb,
-	0x3a, 0xc3, 0xa8, 0xe8, 0x67, 0x5a, 0x68, 0x0e, 0x63, 0x6f, 0xda, 0x2d, 0x19, 0x0b, 0x0c, 0x3f,
-	0x05, 0x28, 0x4e, 0xe7, 0xab, 0x54, 0xf3, 0xb0, 0xd2, 0xb3, 0x82, 0x23, 0x87, 0xce, 0x42, 0xa6,
-	0x9a, 0xde, 0x69, 0xde, 0xb7, 0x17, 0x5b, 0x86, 0x93, 0x1f, 0xa1, 0x77, 0x2e, 0x54, 0x54, 0xac,
-	0x4f, 0x39, 0x41, 0xef, 0xc9, 0x04, 0x39, 0x04, 0x6f, 0xa5, 0xa6, 0xfa, 0xe3, 0x30, 0x48, 0xc5,
-	0x70, 0xf3, 0xa9, 0xe1, 0xc9, 0x37, 0xd0, 0xdb, 0xac, 0x2b, 0x0e, 0xa1, 0x95, 0xca, 0x88, 0x72,
-	0xee, 0x8d, 0x9b, 0xd3, 0x60, 0x5e, 0x04, 0xb8, 0x0f, 0xdd, 0x5b, 0x12, 0x2a, 0x25, 0x95, 0x73,
-	0xdf, 0x26, 0x36, 0xf1, 0xe4, 0x0f, 0x0f, 0xc0, 0x7c, 0x7f, 0x7a, 0x23, 0xd2, 0xd8, 0x6e, 0xc4,
-	0xc5, 0x59, 0x4d, 0x9d, 0x7f, 0x71, 0x86, 0x5f, 0xb8, 0x27, 0xe8, 0xdb, 0xb5, 0xfa, 0xb8, 0xfa,
-	0x4c, 0x8a, 0xef, 0x9e, 0xbc, 0xc3, 0x03, 0x68, 0x5f, 0xca, 0x88, 0x2e, 0xce, 0xea, 0x9a, 0x0b,
-	0xcc, 0x0c, 0xeb, 0xd4, 0x0d, 0xab, 0x78, 0xa8, 0x65, 0x78, 0xf8, 0x25, 0xf4, 0x36, 0x0f, 0x1b,
-	0x77, 0x21, 0xb4, 0xc1, 0xa5, 0x54, 0x89, 0xb8, 0x65, 0x0d, 0x7c, 0x06, 0xbb, 0x16, 0xd8, 0x36,
-	0x66, 0xde, 0xe1, 0xdf, 0x3e, 0x84, 0x95, 0x05, 0x47, 0x80, 0xf6, 0x2c, 0x8f, 0xcf, 0xd7, 0x19,
-	0x6b, 0x60, 0x08, 0x9d, 0x59, 0x1e, 0x9f, 0x90, 0xd0, 0xcc, 0x73, 0xc1, 0x2b, 0x25, 0x33, 0xe6,
-	0xbb, 0xaa, 0x17, 0x59, 0xc6, 0x9a, 0x38, 0x00, 0x28, 0xce, 0x73, 0xca, 0x33, 0x16, 0xb8, 0xc2,
-	0xef, 0xa5, 0x26, 0xd6, 0x32, 0x22, 0x5c, 0x60, 0xb3, 0x6d, 0x97, 0x35, 0xcb, 0xc4, 0x3a, 0xc8,
-	0xa0, 0x6f, 0x9a, 0x91, 0x50, 0xfa, 0xda, 0x74, 0xe9, 0xe2, 0x10, 0x58, 0x15, 0xb1, 0x1f, 0xf5,
-	0x10, 0x61, 0x30, 0xcb, 0xe3, 0x37, 0xa9, 0x22, 0xb1, 0xb8, 0x11, 0xd7, 0xb7, 0xc4, 0x00, 0xf7,
-	0x60, 0xc7, 0x11, 0x99, 0xcb, 0x5b, 0xe7, 0x2c, 0x74, 0x65, 0xa7, 0x37, 0xb4, 0xf8, 0xe5, 0xbb,
-	0xb5, 0x54, 0xeb, 0x84, 0xf5, 0xf1, 0x23, 0xd8, 0x9b, 0xe5, 0xf1, 0x95, 0x12, 0x69, 0xbe, 0x24,
-	0xf5, 0x92, 0x44, 0x44, 0x8a, 0xed, 0xb8, 0xaf, 0xaf, 0x56, 0x09, 0xc9, 0xb5, 0xbe, 0x94, 0xbf,
-	0xb2, 0x81, 0x13, 0x33, 0x27, 0x11, 0xd9, 0x3f, 0x27, 0xdb, 0x75, 0x62, 0x36, 0x88, 0x15, 0xc3,
-	0x9c, 0xdf, 0x57, 0x8a, 0xac, 0xc5, 0x3d, 0xd7, 0xd5, 0xc5, 0xb6, 0x06, 0x0f, 0xef, 0x60, 0x50,
-	0xbf, 0x5e, 0xa3, 0x63, 0x8b, 0xbc, 0x88, 0x22, 0x73, 0x97, 0xac, 0x81, 0x1c, 0x86, 0x5b, 0x78,
-	0x4e, 0x89, 0x7c, 0x4b, 0x36, 0xe3, 0xd5, 0x33, 0x6f, 0xb2, 0x48, 0xe8, 0x22, 0xe3, 0xe3, 0x01,
-	0xf0, 0x1a, 0xd5, 0xcb, 0x62, 0x1b, 0x6d, 0xb6, 0x79, 0xc2, 0xef, 0x3f, 0x8c, 0x1a, 0xef, 0x3f,
-	0x8c, 0x1a, 0xf7, 0x0f, 0x23, 0xef, 0xfd, 0xc3, 0xc8, 0xfb, 0xf7, 0x61, 0xe4, 0xfd, 0xf9, 0xdf,
-	0xa8, 0xf1, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x30, 0xe1, 0x02, 0x69, 0x74, 0x06, 0x00, 0x00,
-}
-
-func (m *Entry) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Entry) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Data != nil {
-		i -= len(m.Data)
-		copy(dAtA[i:], m.Data)
-		i = encodeVarintRaft(dAtA, i, uint64(len(m.Data)))
-		i--
-		dAtA[i] = 0x22
-	}
-	i = encodeVarintRaft(dAtA, i, uint64(m.Index))
-	i--
-	dAtA[i] = 0x18
-	i = encodeVarintRaft(dAtA, i, uint64(m.Term))
-	i--
-	dAtA[i] = 0x10
-	i = encodeVarintRaft(dAtA, i, uint64(m.Type))
-	i--
-	dAtA[i] = 0x8
-	return len(dAtA) - i, nil
-}
-
-func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *SnapshotMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	i = encodeVarintRaft(dAtA, i, uint64(m.Term))
-	i--
-	dAtA[i] = 0x18
-	i = encodeVarintRaft(dAtA, i, uint64(m.Index))
-	i--
-	dAtA[i] = 0x10
-	{
-		size, err := m.ConfState.MarshalToSizedBuffer(dAtA[:i])
-		if err != nil {
-			return 0, err
-		}
-		i -= size
-		i = encodeVarintRaft(dAtA, i, uint64(size))
-	}
-	i--
-	dAtA[i] = 0xa
-	return len(dAtA) - i, nil
-}
-
-func (m *Snapshot) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	{
-		size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
-		if err != nil {
-			return 0, err
-		}
-		i -= size
-		i = encodeVarintRaft(dAtA, i, uint64(size))
-	}
-	i--
-	dAtA[i] = 0x12
-	if m.Data != nil {
-		i -= len(m.Data)
-		copy(dAtA[i:], m.Data)
-		i = encodeVarintRaft(dAtA, i, uint64(len(m.Data)))
-		i--
-		dAtA[i] = 0xa
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *Message) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *Message) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Context != nil {
-		i -= len(m.Context)
-		copy(dAtA[i:], m.Context)
-		i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
-		i--
-		dAtA[i] = 0x62
-	}
-	i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint))
-	i--
-	dAtA[i] = 0x58
-	i--
-	if m.Reject {
-		dAtA[i] = 1
-	} else {
-		dAtA[i] = 0
-	}
-	i--
-	dAtA[i] = 0x50
-	{
-		size, err := m.Snapshot.MarshalToSizedBuffer(dAtA[:i])
-		if err != nil {
-			return 0, err
-		}
-		i -= size
-		i = encodeVarintRaft(dAtA, i, uint64(size))
-	}
-	i--
-	dAtA[i] = 0x4a
-	i = encodeVarintRaft(dAtA, i, uint64(m.Commit))
-	i--
-	dAtA[i] = 0x40
-	if len(m.Entries) > 0 {
-		for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- {
-			{
-				size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
-				if err != nil {
-					return 0, err
-				}
-				i -= size
-				i = encodeVarintRaft(dAtA, i, uint64(size))
-			}
-			i--
-			dAtA[i] = 0x3a
-		}
-	}
-	i = encodeVarintRaft(dAtA, i, uint64(m.Index))
-	i--
-	dAtA[i] = 0x30
-	i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm))
-	i--
-	dAtA[i] = 0x28
-	i = encodeVarintRaft(dAtA, i, uint64(m.Term))
-	i--
-	dAtA[i] = 0x20
-	i = encodeVarintRaft(dAtA, i, uint64(m.From))
-	i--
-	dAtA[i] = 0x18
-	i = encodeVarintRaft(dAtA, i, uint64(m.To))
-	i--
-	dAtA[i] = 0x10
-	i = encodeVarintRaft(dAtA, i, uint64(m.Type))
-	i--
-	dAtA[i] = 0x8
-	return len(dAtA) - i, nil
-}
-
-func (m *HardState) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *HardState) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HardState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	i = encodeVarintRaft(dAtA, i, uint64(m.Commit))
-	i--
-	dAtA[i] = 0x18
-	i = encodeVarintRaft(dAtA, i, uint64(m.Vote))
-	i--
-	dAtA[i] = 0x10
-	i = encodeVarintRaft(dAtA, i, uint64(m.Term))
-	i--
-	dAtA[i] = 0x8
-	return len(dAtA) - i, nil
-}
-
-func (m *ConfState) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *ConfState) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ConfState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if len(m.Learners) > 0 {
-		for iNdEx := len(m.Learners) - 1; iNdEx >= 0; iNdEx-- {
-			i = encodeVarintRaft(dAtA, i, uint64(m.Learners[iNdEx]))
-			i--
-			dAtA[i] = 0x10
-		}
-	}
-	if len(m.Nodes) > 0 {
-		for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- {
-			i = encodeVarintRaft(dAtA, i, uint64(m.Nodes[iNdEx]))
-			i--
-			dAtA[i] = 0x8
-		}
-	}
-	return len(dAtA) - i, nil
-}
-
-func (m *ConfChange) Marshal() (dAtA []byte, err error) {
-	size := m.Size()
-	dAtA = make([]byte, size)
-	n, err := m.MarshalToSizedBuffer(dAtA[:size])
-	if err != nil {
-		return nil, err
-	}
-	return dAtA[:n], nil
-}
-
-func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) {
-	size := m.Size()
-	return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ConfChange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
-	i := len(dAtA)
-	_ = i
-	var l int
-	_ = l
-	if m.XXX_unrecognized != nil {
-		i -= len(m.XXX_unrecognized)
-		copy(dAtA[i:], m.XXX_unrecognized)
-	}
-	if m.Context != nil {
-		i -= len(m.Context)
-		copy(dAtA[i:], m.Context)
-		i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
-		i--
-		dAtA[i] = 0x22
-	}
-	i = encodeVarintRaft(dAtA, i, uint64(m.NodeID))
-	i--
-	dAtA[i] = 0x18
-	i = encodeVarintRaft(dAtA, i, uint64(m.Type))
-	i--
-	dAtA[i] = 0x10
-	i = encodeVarintRaft(dAtA, i, uint64(m.ID))
-	i--
-	dAtA[i] = 0x8
-	return len(dAtA) - i, nil
-}
-
-func encodeVarintRaft(dAtA []byte, offset int, v uint64) int {
-	offset -= sovRaft(v)
-	base := offset
-	for v >= 1<<7 {
-		dAtA[offset] = uint8(v&0x7f | 0x80)
-		v >>= 7
-		offset++
-	}
-	dAtA[offset] = uint8(v)
-	return base
-}
-func (m *Entry) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovRaft(uint64(m.Type))
-	n += 1 + sovRaft(uint64(m.Term))
-	n += 1 + sovRaft(uint64(m.Index))
-	if m.Data != nil {
-		l = len(m.Data)
-		n += 1 + l + sovRaft(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *SnapshotMetadata) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	l = m.ConfState.Size()
-	n += 1 + l + sovRaft(uint64(l))
-	n += 1 + sovRaft(uint64(m.Index))
-	n += 1 + sovRaft(uint64(m.Term))
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *Snapshot) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if m.Data != nil {
-		l = len(m.Data)
-		n += 1 + l + sovRaft(uint64(l))
-	}
-	l = m.Metadata.Size()
-	n += 1 + l + sovRaft(uint64(l))
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *Message) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovRaft(uint64(m.Type))
-	n += 1 + sovRaft(uint64(m.To))
-	n += 1 + sovRaft(uint64(m.From))
-	n += 1 + sovRaft(uint64(m.Term))
-	n += 1 + sovRaft(uint64(m.LogTerm))
-	n += 1 + sovRaft(uint64(m.Index))
-	if len(m.Entries) > 0 {
-		for _, e := range m.Entries {
-			l = e.Size()
-			n += 1 + l + sovRaft(uint64(l))
-		}
-	}
-	n += 1 + sovRaft(uint64(m.Commit))
-	l = m.Snapshot.Size()
-	n += 1 + l + sovRaft(uint64(l))
-	n += 2
-	n += 1 + sovRaft(uint64(m.RejectHint))
-	if m.Context != nil {
-		l = len(m.Context)
-		n += 1 + l + sovRaft(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *HardState) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovRaft(uint64(m.Term))
-	n += 1 + sovRaft(uint64(m.Vote))
-	n += 1 + sovRaft(uint64(m.Commit))
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *ConfState) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	if len(m.Nodes) > 0 {
-		for _, e := range m.Nodes {
-			n += 1 + sovRaft(uint64(e))
-		}
-	}
-	if len(m.Learners) > 0 {
-		for _, e := range m.Learners {
-			n += 1 + sovRaft(uint64(e))
-		}
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func (m *ConfChange) Size() (n int) {
-	if m == nil {
-		return 0
-	}
-	var l int
-	_ = l
-	n += 1 + sovRaft(uint64(m.ID))
-	n += 1 + sovRaft(uint64(m.Type))
-	n += 1 + sovRaft(uint64(m.NodeID))
-	if m.Context != nil {
-		l = len(m.Context)
-		n += 1 + l + sovRaft(uint64(l))
-	}
-	if m.XXX_unrecognized != nil {
-		n += len(m.XXX_unrecognized)
-	}
-	return n
-}
-
-func sovRaft(x uint64) (n int) {
-	return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozRaft(x uint64) (n int) {
-	return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *Entry) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaft
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Entry: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
-			}
-			m.Type = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Type |= EntryType(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
-			}
-			m.Term = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Term |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
-			}
-			m.Index = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Index |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRaft
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
-			if m.Data == nil {
-				m.Data = []byte{}
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaft(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaft
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaft
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
-			}
-			m.Index = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Index |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
-			}
-			m.Term = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Term |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaft(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *Snapshot) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaft
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Snapshot: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRaft
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
-			if m.Data == nil {
-				m.Data = []byte{}
-			}
-			iNdEx = postIndex
-		case 2:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaft
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaft(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *Message) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaft
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: Message: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
-			}
-			m.Type = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Type |= MessageType(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
-			}
-			m.To = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.To |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
-			}
-			m.From = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.From |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
-			}
-			m.Term = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Term |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 5:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType)
-			}
-			m.LogTerm = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.LogTerm |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 6:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
-			}
-			m.Index = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Index |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 7:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaft
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Entries = append(m.Entries, Entry{})
-			if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 8:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
-			}
-			m.Commit = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Commit |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 9:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType)
-			}
-			var msglen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				msglen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if msglen < 0 {
-				return ErrInvalidLengthRaft
-			}
-			postIndex := iNdEx + msglen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
-				return err
-			}
-			iNdEx = postIndex
-		case 10:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType)
-			}
-			var v int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				v |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			m.Reject = bool(v != 0)
-		case 11:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType)
-			}
-			m.RejectHint = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.RejectHint |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 12:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRaft
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
-			if m.Context == nil {
-				m.Context = []byte{}
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaft(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *HardState) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaft
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: HardState: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
-			}
-			m.Term = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Term |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType)
-			}
-			m.Vote = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Vote |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
-			}
-			m.Commit = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Commit |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaft(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *ConfState) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaft
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: ConfState: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType == 0 {
-				var v uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowRaft
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					v |= uint64(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				m.Nodes = append(m.Nodes, v)
-			} else if wireType == 2 {
-				var packedLen int
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowRaft
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					packedLen |= int(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				if packedLen < 0 {
-					return ErrInvalidLengthRaft
-				}
-				postIndex := iNdEx + packedLen
-				if postIndex < 0 {
-					return ErrInvalidLengthRaft
-				}
-				if postIndex > l {
-					return io.ErrUnexpectedEOF
-				}
-				var elementCount int
-				var count int
-				for _, integer := range dAtA[iNdEx:postIndex] {
-					if integer < 128 {
-						count++
-					}
-				}
-				elementCount = count
-				if elementCount != 0 && len(m.Nodes) == 0 {
-					m.Nodes = make([]uint64, 0, elementCount)
-				}
-				for iNdEx < postIndex {
-					var v uint64
-					for shift := uint(0); ; shift += 7 {
-						if shift >= 64 {
-							return ErrIntOverflowRaft
-						}
-						if iNdEx >= l {
-							return io.ErrUnexpectedEOF
-						}
-						b := dAtA[iNdEx]
-						iNdEx++
-						v |= uint64(b&0x7F) << shift
-						if b < 0x80 {
-							break
-						}
-					}
-					m.Nodes = append(m.Nodes, v)
-				}
-			} else {
-				return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType)
-			}
-		case 2:
-			if wireType == 0 {
-				var v uint64
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowRaft
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					v |= uint64(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				m.Learners = append(m.Learners, v)
-			} else if wireType == 2 {
-				var packedLen int
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return ErrIntOverflowRaft
-					}
-					if iNdEx >= l {
-						return io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					packedLen |= int(b&0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				if packedLen < 0 {
-					return ErrInvalidLengthRaft
-				}
-				postIndex := iNdEx + packedLen
-				if postIndex < 0 {
-					return ErrInvalidLengthRaft
-				}
-				if postIndex > l {
-					return io.ErrUnexpectedEOF
-				}
-				var elementCount int
-				var count int
-				for _, integer := range dAtA[iNdEx:postIndex] {
-					if integer < 128 {
-						count++
-					}
-				}
-				elementCount = count
-				if elementCount != 0 && len(m.Learners) == 0 {
-					m.Learners = make([]uint64, 0, elementCount)
-				}
-				for iNdEx < postIndex {
-					var v uint64
-					for shift := uint(0); ; shift += 7 {
-						if shift >= 64 {
-							return ErrIntOverflowRaft
-						}
-						if iNdEx >= l {
-							return io.ErrUnexpectedEOF
-						}
-						b := dAtA[iNdEx]
-						iNdEx++
-						v |= uint64(b&0x7F) << shift
-						if b < 0x80 {
-							break
-						}
-					}
-					m.Learners = append(m.Learners, v)
-				}
-			} else {
-				return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType)
-			}
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaft(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func (m *ConfChange) Unmarshal(dAtA []byte) error {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		preIndex := iNdEx
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return ErrIntOverflowRaft
-			}
-			if iNdEx >= l {
-				return io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= uint64(b&0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		fieldNum := int32(wire >> 3)
-		wireType := int(wire & 0x7)
-		if wireType == 4 {
-			return fmt.Errorf("proto: ConfChange: wiretype end group for non-group")
-		}
-		if fieldNum <= 0 {
-			return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire)
-		}
-		switch fieldNum {
-		case 1:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
-			}
-			m.ID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.ID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 2:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
-			}
-			m.Type = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.Type |= ConfChangeType(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 3:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
-			}
-			m.NodeID = 0
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				m.NodeID |= uint64(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-		case 4:
-			if wireType != 2 {
-				return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
-			}
-			var byteLen int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				byteLen |= int(b&0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if byteLen < 0 {
-				return ErrInvalidLengthRaft
-			}
-			postIndex := iNdEx + byteLen
-			if postIndex < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if postIndex > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
-			if m.Context == nil {
-				m.Context = []byte{}
-			}
-			iNdEx = postIndex
-		default:
-			iNdEx = preIndex
-			skippy, err := skipRaft(dAtA[iNdEx:])
-			if err != nil {
-				return err
-			}
-			if skippy < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) < 0 {
-				return ErrInvalidLengthRaft
-			}
-			if (iNdEx + skippy) > l {
-				return io.ErrUnexpectedEOF
-			}
-			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
-			iNdEx += skippy
-		}
-	}
-
-	if iNdEx > l {
-		return io.ErrUnexpectedEOF
-	}
-	return nil
-}
-func skipRaft(dAtA []byte) (n int, err error) {
-	l := len(dAtA)
-	iNdEx := 0
-	for iNdEx < l {
-		var wire uint64
-		for shift := uint(0); ; shift += 7 {
-			if shift >= 64 {
-				return 0, ErrIntOverflowRaft
-			}
-			if iNdEx >= l {
-				return 0, io.ErrUnexpectedEOF
-			}
-			b := dAtA[iNdEx]
-			iNdEx++
-			wire |= (uint64(b) & 0x7F) << shift
-			if b < 0x80 {
-				break
-			}
-		}
-		wireType := int(wire & 0x7)
-		switch wireType {
-		case 0:
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				iNdEx++
-				if dAtA[iNdEx-1] < 0x80 {
-					break
-				}
-			}
-			return iNdEx, nil
-		case 1:
-			iNdEx += 8
-			return iNdEx, nil
-		case 2:
-			var length int
-			for shift := uint(0); ; shift += 7 {
-				if shift >= 64 {
-					return 0, ErrIntOverflowRaft
-				}
-				if iNdEx >= l {
-					return 0, io.ErrUnexpectedEOF
-				}
-				b := dAtA[iNdEx]
-				iNdEx++
-				length |= (int(b) & 0x7F) << shift
-				if b < 0x80 {
-					break
-				}
-			}
-			if length < 0 {
-				return 0, ErrInvalidLengthRaft
-			}
-			iNdEx += length
-			if iNdEx < 0 {
-				return 0, ErrInvalidLengthRaft
-			}
-			return iNdEx, nil
-		case 3:
-			for {
-				var innerWire uint64
-				var start int = iNdEx
-				for shift := uint(0); ; shift += 7 {
-					if shift >= 64 {
-						return 0, ErrIntOverflowRaft
-					}
-					if iNdEx >= l {
-						return 0, io.ErrUnexpectedEOF
-					}
-					b := dAtA[iNdEx]
-					iNdEx++
-					innerWire |= (uint64(b) & 0x7F) << shift
-					if b < 0x80 {
-						break
-					}
-				}
-				innerWireType := int(innerWire & 0x7)
-				if innerWireType == 4 {
-					break
-				}
-				next, err := skipRaft(dAtA[start:])
-				if err != nil {
-					return 0, err
-				}
-				iNdEx = start + next
-				if iNdEx < 0 {
-					return 0, ErrInvalidLengthRaft
-				}
-			}
-			return iNdEx, nil
-		case 4:
-			return iNdEx, nil
-		case 5:
-			iNdEx += 4
-			return iNdEx, nil
-		default:
-			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
-		}
-	}
-	panic("unreachable")
-}
-
-var (
-	ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling")
-	ErrIntOverflowRaft   = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto b/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto
deleted file mode 100644
index 644ce7b..0000000
--- a/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto
+++ /dev/null
@@ -1,95 +0,0 @@
-syntax = "proto2";
-package raftpb;
-
-import "gogoproto/gogo.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-option (gogoproto.goproto_enum_prefix_all) = false;
-
-enum EntryType {
-	EntryNormal     = 0;
-	EntryConfChange = 1;
-}
-
-message Entry {
-	optional uint64     Term  = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
-	optional uint64     Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
-	optional EntryType  Type  = 1 [(gogoproto.nullable) = false];
-	optional bytes      Data  = 4;
-}
-
-message SnapshotMetadata {
-	optional ConfState conf_state = 1 [(gogoproto.nullable) = false];
-	optional uint64    index      = 2 [(gogoproto.nullable) = false];
-	optional uint64    term       = 3 [(gogoproto.nullable) = false];
-}
-
-message Snapshot {
-	optional bytes            data     = 1;
-	optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false];
-}
-
-enum MessageType {
-	MsgHup             = 0;
-	MsgBeat            = 1;
-	MsgProp            = 2;
-	MsgApp             = 3;
-	MsgAppResp         = 4;
-	MsgVote            = 5;
-	MsgVoteResp        = 6;
-	MsgSnap            = 7;
-	MsgHeartbeat       = 8;
-	MsgHeartbeatResp   = 9;
-	MsgUnreachable     = 10;
-	MsgSnapStatus      = 11;
-	MsgCheckQuorum     = 12;
-	MsgTransferLeader  = 13;
-	MsgTimeoutNow      = 14;
-	MsgReadIndex       = 15;
-	MsgReadIndexResp   = 16;
-	MsgPreVote         = 17;
-	MsgPreVoteResp     = 18;
-}
-
-message Message {
-	optional MessageType type        = 1  [(gogoproto.nullable) = false];
-	optional uint64      to          = 2  [(gogoproto.nullable) = false];
-	optional uint64      from        = 3  [(gogoproto.nullable) = false];
-	optional uint64      term        = 4  [(gogoproto.nullable) = false];
-	optional uint64      logTerm     = 5  [(gogoproto.nullable) = false];
-	optional uint64      index       = 6  [(gogoproto.nullable) = false];
-	repeated Entry       entries     = 7  [(gogoproto.nullable) = false];
-	optional uint64      commit      = 8  [(gogoproto.nullable) = false];
-	optional Snapshot    snapshot    = 9  [(gogoproto.nullable) = false];
-	optional bool        reject      = 10 [(gogoproto.nullable) = false];
-	optional uint64      rejectHint  = 11 [(gogoproto.nullable) = false];
-	optional bytes       context     = 12;
-}
-
-message HardState {
-	optional uint64 term   = 1 [(gogoproto.nullable) = false];
-	optional uint64 vote   = 2 [(gogoproto.nullable) = false];
-	optional uint64 commit = 3 [(gogoproto.nullable) = false];
-}
-
-message ConfState {
-	repeated uint64 nodes    = 1;
-	repeated uint64 learners = 2;
-}
-
-enum ConfChangeType {
-	ConfChangeAddNode        = 0;
-	ConfChangeRemoveNode     = 1;
-	ConfChangeUpdateNode     = 2;
-	ConfChangeAddLearnerNode = 3;
-}
-
-message ConfChange {
-	optional uint64          ID      = 1 [(gogoproto.nullable) = false];
-	optional ConfChangeType  Type    = 2 [(gogoproto.nullable) = false];
-	optional uint64          NodeID  = 3 [(gogoproto.nullable) = false];
-	optional bytes           Context = 4;
-}
diff --git a/vendor/github.com/coreos/etcd/raft/rawnode.go b/vendor/github.com/coreos/etcd/raft/rawnode.go
deleted file mode 100644
index 925cb85..0000000
--- a/vendor/github.com/coreos/etcd/raft/rawnode.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
-	"errors"
-
-	pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-// ErrStepLocalMsg is returned when try to step a local raft message
-var ErrStepLocalMsg = errors.New("raft: cannot step raft local message")
-
-// ErrStepPeerNotFound is returned when try to step a response message
-// but there is no peer found in raft.prs for that node.
-var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found")
-
-// RawNode is a thread-unsafe Node.
-// The methods of this struct correspond to the methods of Node and are described
-// more fully there.
-type RawNode struct {
-	raft       *raft
-	prevSoftSt *SoftState
-	prevHardSt pb.HardState
-}
-
-func (rn *RawNode) newReady() Ready {
-	return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt)
-}
-
-func (rn *RawNode) commitReady(rd Ready) {
-	if rd.SoftState != nil {
-		rn.prevSoftSt = rd.SoftState
-	}
-	if !IsEmptyHardState(rd.HardState) {
-		rn.prevHardSt = rd.HardState
-	}
-	if rn.prevHardSt.Commit != 0 {
-		// In most cases, prevHardSt and rd.HardState will be the same
-		// because when there are new entries to apply we just sent a
-		// HardState with an updated Commit value. However, on initial
-		// startup the two are different because we don't send a HardState
-		// until something changes, but we do send any un-applied but
-		// committed entries (and previously-committed entries may be
-		// incorporated into the snapshot, even if rd.CommittedEntries is
-		// empty). Therefore we mark all committed entries as applied
-		// whether they were included in rd.HardState or not.
-		rn.raft.raftLog.appliedTo(rn.prevHardSt.Commit)
-	}
-	if len(rd.Entries) > 0 {
-		e := rd.Entries[len(rd.Entries)-1]
-		rn.raft.raftLog.stableTo(e.Index, e.Term)
-	}
-	if !IsEmptySnap(rd.Snapshot) {
-		rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index)
-	}
-	if len(rd.ReadStates) != 0 {
-		rn.raft.readStates = nil
-	}
-}
-
-// NewRawNode returns a new RawNode given configuration and a list of raft peers.
-func NewRawNode(config *Config, peers []Peer) (*RawNode, error) {
-	if config.ID == 0 {
-		panic("config.ID must not be zero")
-	}
-	r := newRaft(config)
-	rn := &RawNode{
-		raft: r,
-	}
-	lastIndex, err := config.Storage.LastIndex()
-	if err != nil {
-		panic(err) // TODO(bdarnell)
-	}
-	// If the log is empty, this is a new RawNode (like StartNode); otherwise it's
-	// restoring an existing RawNode (like RestartNode).
-	// TODO(bdarnell): rethink RawNode initialization and whether the application needs
-	// to be able to tell us when it expects the RawNode to exist.
-	if lastIndex == 0 {
-		r.becomeFollower(1, None)
-		ents := make([]pb.Entry, len(peers))
-		for i, peer := range peers {
-			cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
-			data, err := cc.Marshal()
-			if err != nil {
-				panic("unexpected marshal error")
-			}
-
-			ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data}
-		}
-		r.raftLog.append(ents...)
-		r.raftLog.committed = uint64(len(ents))
-		for _, peer := range peers {
-			r.addNode(peer.ID)
-		}
-	}
-
-	// Set the initial hard and soft states after performing all initialization.
-	rn.prevSoftSt = r.softState()
-	if lastIndex == 0 {
-		rn.prevHardSt = emptyState
-	} else {
-		rn.prevHardSt = r.hardState()
-	}
-
-	return rn, nil
-}
-
-// Tick advances the internal logical clock by a single tick.
-func (rn *RawNode) Tick() {
-	rn.raft.tick()
-}
-
-// TickQuiesced advances the internal logical clock by a single tick without
-// performing any other state machine processing. It allows the caller to avoid
-// periodic heartbeats and elections when all of the peers in a Raft group are
-// known to be at the same state. Expected usage is to periodically invoke Tick
-// or TickQuiesced depending on whether the group is "active" or "quiesced".
-//
-// WARNING: Be very careful about using this method as it subverts the Raft
-// state machine. You should probably be using Tick instead.
-func (rn *RawNode) TickQuiesced() {
-	rn.raft.electionElapsed++
-}
-
-// Campaign causes this RawNode to transition to candidate state.
-func (rn *RawNode) Campaign() error {
-	return rn.raft.Step(pb.Message{
-		Type: pb.MsgHup,
-	})
-}
-
-// Propose proposes data be appended to the raft log.
-func (rn *RawNode) Propose(data []byte) error {
-	return rn.raft.Step(pb.Message{
-		Type: pb.MsgProp,
-		From: rn.raft.id,
-		Entries: []pb.Entry{
-			{Data: data},
-		}})
-}
-
-// ProposeConfChange proposes a config change.
-func (rn *RawNode) ProposeConfChange(cc pb.ConfChange) error {
-	data, err := cc.Marshal()
-	if err != nil {
-		return err
-	}
-	return rn.raft.Step(pb.Message{
-		Type: pb.MsgProp,
-		Entries: []pb.Entry{
-			{Type: pb.EntryConfChange, Data: data},
-		},
-	})
-}
-
-// ApplyConfChange applies a config change to the local node.
-func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
-	if cc.NodeID == None {
-		rn.raft.resetPendingConf()
-		return &pb.ConfState{Nodes: rn.raft.nodes()}
-	}
-	switch cc.Type {
-	case pb.ConfChangeAddNode:
-		rn.raft.addNode(cc.NodeID)
-	case pb.ConfChangeAddLearnerNode:
-		rn.raft.addLearner(cc.NodeID)
-	case pb.ConfChangeRemoveNode:
-		rn.raft.removeNode(cc.NodeID)
-	case pb.ConfChangeUpdateNode:
-		rn.raft.resetPendingConf()
-	default:
-		panic("unexpected conf type")
-	}
-	return &pb.ConfState{Nodes: rn.raft.nodes()}
-}
-
-// Step advances the state machine using the given message.
-func (rn *RawNode) Step(m pb.Message) error {
-	// ignore unexpected local messages receiving over network
-	if IsLocalMsg(m.Type) {
-		return ErrStepLocalMsg
-	}
-	if pr := rn.raft.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) {
-		return rn.raft.Step(m)
-	}
-	return ErrStepPeerNotFound
-}
-
-// Ready returns the current point-in-time state of this RawNode.
-func (rn *RawNode) Ready() Ready {
-	rd := rn.newReady()
-	rn.raft.msgs = nil
-	return rd
-}
-
-// HasReady called when RawNode user need to check if any Ready pending.
-// Checking logic in this method should be consistent with Ready.containsUpdates().
-func (rn *RawNode) HasReady() bool {
-	r := rn.raft
-	if !r.softState().equal(rn.prevSoftSt) {
-		return true
-	}
-	if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) {
-		return true
-	}
-	if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) {
-		return true
-	}
-	if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() {
-		return true
-	}
-	if len(r.readStates) != 0 {
-		return true
-	}
-	return false
-}
-
-// Advance notifies the RawNode that the application has applied and saved progress in the
-// last Ready results.
-func (rn *RawNode) Advance(rd Ready) {
-	rn.commitReady(rd)
-}
-
-// Status returns the current status of the given group.
-func (rn *RawNode) Status() *Status {
-	status := getStatus(rn.raft)
-	return &status
-}
-
-// ReportUnreachable reports the given node is not reachable for the last send.
-func (rn *RawNode) ReportUnreachable(id uint64) {
-	_ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id})
-}
-
-// ReportSnapshot reports the status of the sent snapshot.
-func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) {
-	rej := status == SnapshotFailure
-
-	_ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej})
-}
-
-// TransferLeader tries to transfer leadership to the given transferee.
-func (rn *RawNode) TransferLeader(transferee uint64) {
-	_ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee})
-}
-
-// ReadIndex requests a read state. The read state will be set in ready.
-// Read State has a read index. Once the application advances further than the read
-// index, any linearizable read requests issued before the read request can be
-// processed safely. The read state will have the same rctx attached.
-func (rn *RawNode) ReadIndex(rctx []byte) {
-	_ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
-}
diff --git a/vendor/github.com/coreos/etcd/raft/read_only.go b/vendor/github.com/coreos/etcd/raft/read_only.go
deleted file mode 100644
index ae746fa..0000000
--- a/vendor/github.com/coreos/etcd/raft/read_only.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import pb "github.com/coreos/etcd/raft/raftpb"
-
-// ReadState provides state for read only query.
-// It's caller's responsibility to call ReadIndex first before getting
-// this state from ready, it's also caller's duty to differentiate if this
-// state is what it requests through RequestCtx, eg. given a unique id as
-// RequestCtx
-type ReadState struct {
-	Index      uint64
-	RequestCtx []byte
-}
-
-type readIndexStatus struct {
-	req   pb.Message
-	index uint64
-	acks  map[uint64]struct{}
-}
-
-type readOnly struct {
-	option           ReadOnlyOption
-	pendingReadIndex map[string]*readIndexStatus
-	readIndexQueue   []string
-}
-
-func newReadOnly(option ReadOnlyOption) *readOnly {
-	return &readOnly{
-		option:           option,
-		pendingReadIndex: make(map[string]*readIndexStatus),
-	}
-}
-
-// addRequest adds a read only reuqest into readonly struct.
-// `index` is the commit index of the raft state machine when it received
-// the read only request.
-// `m` is the original read only request message from the local or remote node.
-func (ro *readOnly) addRequest(index uint64, m pb.Message) {
-	ctx := string(m.Entries[0].Data)
-	if _, ok := ro.pendingReadIndex[ctx]; ok {
-		return
-	}
-	ro.pendingReadIndex[ctx] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]struct{})}
-	ro.readIndexQueue = append(ro.readIndexQueue, ctx)
-}
-
-// recvAck notifies the readonly struct that the raft state machine received
-// an acknowledgment of the heartbeat that attached with the read only request
-// context.
-func (ro *readOnly) recvAck(m pb.Message) int {
-	rs, ok := ro.pendingReadIndex[string(m.Context)]
-	if !ok {
-		return 0
-	}
-
-	rs.acks[m.From] = struct{}{}
-	// add one to include an ack from local node
-	return len(rs.acks) + 1
-}
-
-// advance advances the read only request queue kept by the readonly struct.
-// It dequeues the requests until it finds the read only request that has
-// the same context as the given `m`.
-func (ro *readOnly) advance(m pb.Message) []*readIndexStatus {
-	var (
-		i     int
-		found bool
-	)
-
-	ctx := string(m.Context)
-	rss := []*readIndexStatus{}
-
-	for _, okctx := range ro.readIndexQueue {
-		i++
-		rs, ok := ro.pendingReadIndex[okctx]
-		if !ok {
-			panic("cannot find corresponding read state from pending map")
-		}
-		rss = append(rss, rs)
-		if okctx == ctx {
-			found = true
-			break
-		}
-	}
-
-	if found {
-		ro.readIndexQueue = ro.readIndexQueue[i:]
-		for _, rs := range rss {
-			delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data))
-		}
-		return rss
-	}
-
-	return nil
-}
-
-// lastPendingRequestCtx returns the context of the last pending read only
-// request in readonly struct.
-func (ro *readOnly) lastPendingRequestCtx() string {
-	if len(ro.readIndexQueue) == 0 {
-		return ""
-	}
-	return ro.readIndexQueue[len(ro.readIndexQueue)-1]
-}
diff --git a/vendor/github.com/coreos/etcd/raft/status.go b/vendor/github.com/coreos/etcd/raft/status.go
deleted file mode 100644
index f4d3d86..0000000
--- a/vendor/github.com/coreos/etcd/raft/status.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
-	"fmt"
-
-	pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-type Status struct {
-	ID uint64
-
-	pb.HardState
-	SoftState
-
-	Applied  uint64
-	Progress map[uint64]Progress
-
-	LeadTransferee uint64
-}
-
-// getStatus gets a copy of the current raft status.
-func getStatus(r *raft) Status {
-	s := Status{
-		ID:             r.id,
-		LeadTransferee: r.leadTransferee,
-	}
-
-	s.HardState = r.hardState()
-	s.SoftState = *r.softState()
-
-	s.Applied = r.raftLog.applied
-
-	if s.RaftState == StateLeader {
-		s.Progress = make(map[uint64]Progress)
-		for id, p := range r.prs {
-			s.Progress[id] = *p
-		}
-
-		for id, p := range r.learnerPrs {
-			s.Progress[id] = *p
-		}
-	}
-
-	return s
-}
-
-// MarshalJSON translates the raft status into JSON.
-// TODO: try to simplify this by introducing ID type into raft
-func (s Status) MarshalJSON() ([]byte, error) {
-	j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"applied":%d,"progress":{`,
-		s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState, s.Applied)
-
-	if len(s.Progress) == 0 {
-		j += "},"
-	} else {
-		for k, v := range s.Progress {
-			subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State)
-			j += subj
-		}
-		// remove the trailing ","
-		j = j[:len(j)-1] + "},"
-	}
-
-	j += fmt.Sprintf(`"leadtransferee":"%x"}`, s.LeadTransferee)
-	return []byte(j), nil
-}
-
-func (s Status) String() string {
-	b, err := s.MarshalJSON()
-	if err != nil {
-		raftLogger.Panicf("unexpected error: %v", err)
-	}
-	return string(b)
-}
diff --git a/vendor/github.com/coreos/etcd/raft/storage.go b/vendor/github.com/coreos/etcd/raft/storage.go
deleted file mode 100644
index 69c3a7d..0000000
--- a/vendor/github.com/coreos/etcd/raft/storage.go
+++ /dev/null
@@ -1,271 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
-	"errors"
-	"sync"
-
-	pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-// ErrCompacted is returned by Storage.Entries/Compact when a requested
-// index is unavailable because it predates the last snapshot.
-var ErrCompacted = errors.New("requested index is unavailable due to compaction")
-
-// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested
-// index is older than the existing snapshot.
-var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot")
-
-// ErrUnavailable is returned by Storage interface when the requested log entries
-// are unavailable.
-var ErrUnavailable = errors.New("requested entry at index is unavailable")
-
-// ErrSnapshotTemporarilyUnavailable is returned by the Storage interface when the required
-// snapshot is temporarily unavailable.
-var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable")
-
-// Storage is an interface that may be implemented by the application
-// to retrieve log entries from storage.
-//
-// If any Storage method returns an error, the raft instance will
-// become inoperable and refuse to participate in elections; the
-// application is responsible for cleanup and recovery in this case.
-type Storage interface {
-	// InitialState returns the saved HardState and ConfState information.
-	InitialState() (pb.HardState, pb.ConfState, error)
-	// Entries returns a slice of log entries in the range [lo,hi).
-	// MaxSize limits the total size of the log entries returned, but
-	// Entries returns at least one entry if any.
-	Entries(lo, hi, maxSize uint64) ([]pb.Entry, error)
-	// Term returns the term of entry i, which must be in the range
-	// [FirstIndex()-1, LastIndex()]. The term of the entry before
-	// FirstIndex is retained for matching purposes even though the
-	// rest of that entry may not be available.
-	Term(i uint64) (uint64, error)
-	// LastIndex returns the index of the last entry in the log.
-	LastIndex() (uint64, error)
-	// FirstIndex returns the index of the first log entry that is
-	// possibly available via Entries (older entries have been incorporated
-	// into the latest Snapshot; if storage only contains the dummy entry the
-	// first log entry is not available).
-	FirstIndex() (uint64, error)
-	// Snapshot returns the most recent snapshot.
-	// If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable,
-	// so raft state machine could know that Storage needs some time to prepare
-	// snapshot and call Snapshot later.
-	Snapshot() (pb.Snapshot, error)
-}
-
-// MemoryStorage implements the Storage interface backed by an
-// in-memory array.
-type MemoryStorage struct {
-	// Protects access to all fields. Most methods of MemoryStorage are
-	// run on the raft goroutine, but Append() is run on an application
-	// goroutine.
-	sync.Mutex
-
-	hardState pb.HardState
-	snapshot  pb.Snapshot
-	// ents[i] has raft log position i+snapshot.Metadata.Index
-	ents []pb.Entry
-}
-
-// NewMemoryStorage creates an empty MemoryStorage.
-func NewMemoryStorage() *MemoryStorage {
-	return &MemoryStorage{
-		// When starting from scratch populate the list with a dummy entry at term zero.
-		ents: make([]pb.Entry, 1),
-	}
-}
-
-// InitialState implements the Storage interface.
-func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) {
-	return ms.hardState, ms.snapshot.Metadata.ConfState, nil
-}
-
-// SetHardState saves the current HardState.
-func (ms *MemoryStorage) SetHardState(st pb.HardState) error {
-	ms.Lock()
-	defer ms.Unlock()
-	ms.hardState = st
-	return nil
-}
-
-// Entries implements the Storage interface.
-func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) {
-	ms.Lock()
-	defer ms.Unlock()
-	offset := ms.ents[0].Index
-	if lo <= offset {
-		return nil, ErrCompacted
-	}
-	if hi > ms.lastIndex()+1 {
-		raftLogger.Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex())
-	}
-	// only contains dummy entries.
-	if len(ms.ents) == 1 {
-		return nil, ErrUnavailable
-	}
-
-	ents := ms.ents[lo-offset : hi-offset]
-	return limitSize(ents, maxSize), nil
-}
-
-// Term implements the Storage interface.
-func (ms *MemoryStorage) Term(i uint64) (uint64, error) {
-	ms.Lock()
-	defer ms.Unlock()
-	offset := ms.ents[0].Index
-	if i < offset {
-		return 0, ErrCompacted
-	}
-	if int(i-offset) >= len(ms.ents) {
-		return 0, ErrUnavailable
-	}
-	return ms.ents[i-offset].Term, nil
-}
-
-// LastIndex implements the Storage interface.
-func (ms *MemoryStorage) LastIndex() (uint64, error) {
-	ms.Lock()
-	defer ms.Unlock()
-	return ms.lastIndex(), nil
-}
-
-func (ms *MemoryStorage) lastIndex() uint64 {
-	return ms.ents[0].Index + uint64(len(ms.ents)) - 1
-}
-
-// FirstIndex implements the Storage interface.
-func (ms *MemoryStorage) FirstIndex() (uint64, error) {
-	ms.Lock()
-	defer ms.Unlock()
-	return ms.firstIndex(), nil
-}
-
-func (ms *MemoryStorage) firstIndex() uint64 {
-	return ms.ents[0].Index + 1
-}
-
-// Snapshot implements the Storage interface.
-func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) {
-	ms.Lock()
-	defer ms.Unlock()
-	return ms.snapshot, nil
-}
-
-// ApplySnapshot overwrites the contents of this Storage object with
-// those of the given snapshot.
-func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error {
-	ms.Lock()
-	defer ms.Unlock()
-
-	//handle check for old snapshot being applied
-	msIndex := ms.snapshot.Metadata.Index
-	snapIndex := snap.Metadata.Index
-	if msIndex >= snapIndex {
-		return ErrSnapOutOfDate
-	}
-
-	ms.snapshot = snap
-	ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}}
-	return nil
-}
-
-// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and
-// can be used to reconstruct the state at that point.
-// If any configuration changes have been made since the last compaction,
-// the result of the last ApplyConfChange must be passed in.
-func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) {
-	ms.Lock()
-	defer ms.Unlock()
-	if i <= ms.snapshot.Metadata.Index {
-		return pb.Snapshot{}, ErrSnapOutOfDate
-	}
-
-	offset := ms.ents[0].Index
-	if i > ms.lastIndex() {
-		raftLogger.Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex())
-	}
-
-	ms.snapshot.Metadata.Index = i
-	ms.snapshot.Metadata.Term = ms.ents[i-offset].Term
-	if cs != nil {
-		ms.snapshot.Metadata.ConfState = *cs
-	}
-	ms.snapshot.Data = data
-	return ms.snapshot, nil
-}
-
-// Compact discards all log entries prior to compactIndex.
-// It is the application's responsibility to not attempt to compact an index
-// greater than raftLog.applied.
-func (ms *MemoryStorage) Compact(compactIndex uint64) error {
-	ms.Lock()
-	defer ms.Unlock()
-	offset := ms.ents[0].Index
-	if compactIndex <= offset {
-		return ErrCompacted
-	}
-	if compactIndex > ms.lastIndex() {
-		raftLogger.Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex())
-	}
-
-	i := compactIndex - offset
-	ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i)
-	ents[0].Index = ms.ents[i].Index
-	ents[0].Term = ms.ents[i].Term
-	ents = append(ents, ms.ents[i+1:]...)
-	ms.ents = ents
-	return nil
-}
-
-// Append the new entries to storage.
-// TODO (xiangli): ensure the entries are continuous and
-// entries[0].Index > ms.entries[0].Index
-func (ms *MemoryStorage) Append(entries []pb.Entry) error {
-	if len(entries) == 0 {
-		return nil
-	}
-
-	ms.Lock()
-	defer ms.Unlock()
-
-	first := ms.firstIndex()
-	last := entries[0].Index + uint64(len(entries)) - 1
-
-	// shortcut if there is no new entry.
-	if last < first {
-		return nil
-	}
-	// truncate compacted entries
-	if first > entries[0].Index {
-		entries = entries[first-entries[0].Index:]
-	}
-
-	offset := entries[0].Index - ms.ents[0].Index
-	switch {
-	case uint64(len(ms.ents)) > offset:
-		ms.ents = append([]pb.Entry{}, ms.ents[:offset]...)
-		ms.ents = append(ms.ents, entries...)
-	case uint64(len(ms.ents)) == offset:
-		ms.ents = append(ms.ents, entries...)
-	default:
-		raftLogger.Panicf("missing log entry [last: %d, append at: %d]",
-			ms.lastIndex(), entries[0].Index)
-	}
-	return nil
-}
diff --git a/vendor/github.com/coreos/etcd/raft/util.go b/vendor/github.com/coreos/etcd/raft/util.go
deleted file mode 100644
index f4141fe..0000000
--- a/vendor/github.com/coreos/etcd/raft/util.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
-	"bytes"
-	"fmt"
-
-	pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-func (st StateType) MarshalJSON() ([]byte, error) {
-	return []byte(fmt.Sprintf("%q", st.String())), nil
-}
-
-// uint64Slice implements sort interface
-type uint64Slice []uint64
-
-func (p uint64Slice) Len() int           { return len(p) }
-func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p uint64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
-
-func min(a, b uint64) uint64 {
-	if a > b {
-		return b
-	}
-	return a
-}
-
-func max(a, b uint64) uint64 {
-	if a > b {
-		return a
-	}
-	return b
-}
-
-func IsLocalMsg(msgt pb.MessageType) bool {
-	return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable ||
-		msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum
-}
-
-func IsResponseMsg(msgt pb.MessageType) bool {
-	return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp
-}
-
-// voteResponseType maps vote and prevote message types to their corresponding responses.
-func voteRespMsgType(msgt pb.MessageType) pb.MessageType {
-	switch msgt {
-	case pb.MsgVote:
-		return pb.MsgVoteResp
-	case pb.MsgPreVote:
-		return pb.MsgPreVoteResp
-	default:
-		panic(fmt.Sprintf("not a vote message: %s", msgt))
-	}
-}
-
-// EntryFormatter can be implemented by the application to provide human-readable formatting
-// of entry data. Nil is a valid EntryFormatter and will use a default format.
-type EntryFormatter func([]byte) string
-
-// DescribeMessage returns a concise human-readable description of a
-// Message for debugging.
-func DescribeMessage(m pb.Message, f EntryFormatter) string {
-	var buf bytes.Buffer
-	fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index)
-	if m.Reject {
-		fmt.Fprintf(&buf, " Rejected")
-		if m.RejectHint != 0 {
-			fmt.Fprintf(&buf, "(Hint:%d)", m.RejectHint)
-		}
-	}
-	if m.Commit != 0 {
-		fmt.Fprintf(&buf, " Commit:%d", m.Commit)
-	}
-	if len(m.Entries) > 0 {
-		fmt.Fprintf(&buf, " Entries:[")
-		for i, e := range m.Entries {
-			if i != 0 {
-				buf.WriteString(", ")
-			}
-			buf.WriteString(DescribeEntry(e, f))
-		}
-		fmt.Fprintf(&buf, "]")
-	}
-	if !IsEmptySnap(m.Snapshot) {
-		fmt.Fprintf(&buf, " Snapshot:%v", m.Snapshot)
-	}
-	return buf.String()
-}
-
-// DescribeEntry returns a concise human-readable description of an
-// Entry for debugging.
-func DescribeEntry(e pb.Entry, f EntryFormatter) string {
-	var formatted string
-	if e.Type == pb.EntryNormal && f != nil {
-		formatted = f(e.Data)
-	} else {
-		formatted = fmt.Sprintf("%q", e.Data)
-	}
-	return fmt.Sprintf("%d/%d %s %s", e.Term, e.Index, e.Type, formatted)
-}
-
-func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry {
-	if len(ents) == 0 {
-		return ents
-	}
-	size := ents[0].Size()
-	var limit int
-	for limit = 1; limit < len(ents); limit++ {
-		size += ents[limit].Size()
-		if uint64(size) > maxSize {
-			break
-		}
-	}
-	return ents[:limit]
-}
diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go
deleted file mode 100644
index aa96ffa..0000000
--- a/vendor/github.com/coreos/etcd/version/version.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package version implements etcd version parsing and contains latest version
-// information.
-package version
-
-import (
-	"fmt"
-	"strings"
-
-	"github.com/coreos/go-semver/semver"
-)
-
-var (
-	// MinClusterVersion is the min cluster version this etcd binary is compatible with.
-	MinClusterVersion = "3.0.0"
-	Version           = "3.3.25"
-	APIVersion        = "unknown"
-
-	// Git SHA Value will be set during build
-	GitSHA = "Not provided (use ./build instead of go build)"
-)
-
-func init() {
-	ver, err := semver.NewVersion(Version)
-	if err == nil {
-		APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor)
-	}
-}
-
-type Versions struct {
-	Server  string `json:"etcdserver"`
-	Cluster string `json:"etcdcluster"`
-	// TODO: raft state machine version
-}
-
-// Cluster only keeps the major.minor.
-func Cluster(v string) string {
-	vs := strings.Split(v, ".")
-	if len(vs) <= 2 {
-		return v
-	}
-	return fmt.Sprintf("%s.%s", vs[0], vs[1])
-}
diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go
index 76cf485..eb9fb7f 100644
--- a/vendor/github.com/coreos/go-semver/semver/semver.go
+++ b/vendor/github.com/coreos/go-semver/semver/semver.go
@@ -85,7 +85,7 @@
 		return fmt.Errorf("failed to validate metadata: %v", err)
 	}
 
-	parsed := make([]int64, 3, 3)
+	parsed := make([]int64, 3)
 
 	for i, v := range dotParts[:3] {
 		val, err := strconv.ParseInt(v, 10, 64)
diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go
deleted file mode 100644
index a0f4837..0000000
--- a/vendor/github.com/coreos/go-systemd/journal/journal.go
+++ /dev/null
@@ -1,225 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package journal provides write bindings to the local systemd journal.
-// It is implemented in pure Go and connects to the journal directly over its
-// unix socket.
-//
-// To read from the journal, see the "sdjournal" package, which wraps the
-// sd-journal a C API.
-//
-// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
-package journal
-
-import (
-	"bytes"
-	"encoding/binary"
-	"errors"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net"
-	"os"
-	"strconv"
-	"strings"
-	"sync"
-	"sync/atomic"
-	"syscall"
-	"unsafe"
-)
-
-// Priority of a journal message
-type Priority int
-
-const (
-	PriEmerg Priority = iota
-	PriAlert
-	PriCrit
-	PriErr
-	PriWarning
-	PriNotice
-	PriInfo
-	PriDebug
-)
-
-var (
-	// This can be overridden at build-time:
-	// https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable
-	journalSocket = "/run/systemd/journal/socket"
-
-	// unixConnPtr atomically holds the local unconnected Unix-domain socket.
-	// Concrete safe pointer type: *net.UnixConn
-	unixConnPtr unsafe.Pointer
-	// onceConn ensures that unixConnPtr is initialized exactly once.
-	onceConn sync.Once
-)
-
-func init() {
-	onceConn.Do(initConn)
-}
-
-// Enabled checks whether the local systemd journal is available for logging.
-func Enabled() bool {
-	onceConn.Do(initConn)
-
-	if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil {
-		return false
-	}
-
-	if _, err := net.Dial("unixgram", journalSocket); err != nil {
-		return false
-	}
-
-	return true
-}
-
-// Send a message to the local systemd journal. vars is a map of journald
-// fields to values.  Fields must be composed of uppercase letters, numbers,
-// and underscores, but must not start with an underscore. Within these
-// restrictions, any arbitrary field name may be used.  Some names have special
-// significance: see the journalctl documentation
-// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
-// for more details.  vars may be nil.
-func Send(message string, priority Priority, vars map[string]string) error {
-	conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
-	if conn == nil {
-		return errors.New("could not initialize socket to journald")
-	}
-
-	socketAddr := &net.UnixAddr{
-		Name: journalSocket,
-		Net:  "unixgram",
-	}
-
-	data := new(bytes.Buffer)
-	appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
-	appendVariable(data, "MESSAGE", message)
-	for k, v := range vars {
-		appendVariable(data, k, v)
-	}
-
-	_, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr)
-	if err == nil {
-		return nil
-	}
-	if !isSocketSpaceError(err) {
-		return err
-	}
-
-	// Large log entry, send it via tempfile and ancillary-fd.
-	file, err := tempFd()
-	if err != nil {
-		return err
-	}
-	defer file.Close()
-	_, err = io.Copy(file, data)
-	if err != nil {
-		return err
-	}
-	rights := syscall.UnixRights(int(file.Fd()))
-	_, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr)
-	if err != nil {
-		return err
-	}
-
-	return nil
-}
-
-// Print prints a message to the local systemd journal using Send().
-func Print(priority Priority, format string, a ...interface{}) error {
-	return Send(fmt.Sprintf(format, a...), priority, nil)
-}
-
-func appendVariable(w io.Writer, name, value string) {
-	if err := validVarName(name); err != nil {
-		fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name)
-	}
-	if strings.ContainsRune(value, '\n') {
-		/* When the value contains a newline, we write:
-		 * - the variable name, followed by a newline
-		 * - the size (in 64bit little endian format)
-		 * - the data, followed by a newline
-		 */
-		fmt.Fprintln(w, name)
-		binary.Write(w, binary.LittleEndian, uint64(len(value)))
-		fmt.Fprintln(w, value)
-	} else {
-		/* just write the variable and value all on one line */
-		fmt.Fprintf(w, "%s=%s\n", name, value)
-	}
-}
-
-// validVarName validates a variable name to make sure journald will accept it.
-// The variable name must be in uppercase and consist only of characters,
-// numbers and underscores, and may not begin with an underscore:
-// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html
-func validVarName(name string) error {
-	if name == "" {
-		return errors.New("Empty variable name")
-	} else if name[0] == '_' {
-		return errors.New("Variable name begins with an underscore")
-	}
-
-	for _, c := range name {
-		if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') {
-			return errors.New("Variable name contains invalid characters")
-		}
-	}
-	return nil
-}
-
-// isSocketSpaceError checks whether the error is signaling
-// an "overlarge message" condition.
-func isSocketSpaceError(err error) bool {
-	opErr, ok := err.(*net.OpError)
-	if !ok || opErr == nil {
-		return false
-	}
-
-	sysErr, ok := opErr.Err.(*os.SyscallError)
-	if !ok || sysErr == nil {
-		return false
-	}
-
-	return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS
-}
-
-// tempFd creates a temporary, unlinked file under `/dev/shm`.
-func tempFd() (*os.File, error) {
-	file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
-	if err != nil {
-		return nil, err
-	}
-	err = syscall.Unlink(file.Name())
-	if err != nil {
-		return nil, err
-	}
-	return file, nil
-}
-
-// initConn initializes the global `unixConnPtr` socket.
-// It is meant to be called exactly once, at program startup.
-func initConn() {
-	autobind, err := net.ResolveUnixAddr("unixgram", "")
-	if err != nil {
-		return
-	}
-
-	sock, err := net.ListenUnixgram("unixgram", autobind)
-	if err != nil {
-		return
-	}
-
-	atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock))
-}
diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/v22/LICENSE
similarity index 100%
rename from vendor/github.com/coreos/go-systemd/LICENSE
rename to vendor/github.com/coreos/go-systemd/v22/LICENSE
diff --git a/vendor/github.com/coreos/go-systemd/NOTICE b/vendor/github.com/coreos/go-systemd/v22/NOTICE
similarity index 100%
rename from vendor/github.com/coreos/go-systemd/NOTICE
rename to vendor/github.com/coreos/go-systemd/v22/NOTICE
diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal.go
new file mode 100644
index 0000000..ac24c77
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal.go
@@ -0,0 +1,46 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package journal provides write bindings to the local systemd journal.
+// It is implemented in pure Go and connects to the journal directly over its
+// unix socket.
+//
+// To read from the journal, see the "sdjournal" package, which wraps the
+// sd-journal a C API.
+//
+// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
+package journal
+
+import (
+	"fmt"
+)
+
+// Priority of a journal message
+type Priority int
+
+const (
+	PriEmerg Priority = iota
+	PriAlert
+	PriCrit
+	PriErr
+	PriWarning
+	PriNotice
+	PriInfo
+	PriDebug
+)
+
+// Print prints a message to the local systemd journal using Send().
+func Print(priority Priority, format string, a ...interface{}) error {
+	return Send(fmt.Sprintf(format, a...), priority, nil)
+}
diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go
new file mode 100644
index 0000000..c5b23a8
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go
@@ -0,0 +1,267 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+// Package journal provides write bindings to the local systemd journal.
+// It is implemented in pure Go and connects to the journal directly over its
+// unix socket.
+//
+// To read from the journal, see the "sdjournal" package, which wraps the
+// sd-journal a C API.
+//
+// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
+package journal
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net"
+	"os"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"syscall"
+	"unsafe"
+)
+
+var (
+	// This can be overridden at build-time:
+	// https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable
+	journalSocket = "/run/systemd/journal/socket"
+
+	// unixConnPtr atomically holds the local unconnected Unix-domain socket.
+	// Concrete safe pointer type: *net.UnixConn
+	unixConnPtr unsafe.Pointer
+	// onceConn ensures that unixConnPtr is initialized exactly once.
+	onceConn sync.Once
+)
+
+// Enabled checks whether the local systemd journal is available for logging.
+func Enabled() bool {
+	if c := getOrInitConn(); c == nil {
+		return false
+	}
+
+	conn, err := net.Dial("unixgram", journalSocket)
+	if err != nil {
+		return false
+	}
+	defer conn.Close()
+
+	return true
+}
+
+// StderrIsJournalStream returns whether the process stderr is connected
+// to the Journal's stream transport.
+//
+// This can be used for automatic protocol upgrading described in [Journal Native Protocol].
+//
+// Returns true if JOURNAL_STREAM environment variable is present,
+// and stderr's device and inode numbers match it.
+//
+// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable
+// is present, but malformed, fstat syscall fails, etc.
+//
+// [Journal Native Protocol]: https://systemd.io/JOURNAL_NATIVE_PROTOCOL/#automatic-protocol-upgrading
+func StderrIsJournalStream() (bool, error) {
+	return fdIsJournalStream(syscall.Stderr)
+}
+
+// StdoutIsJournalStream returns whether the process stdout is connected
+// to the Journal's stream transport.
+//
+// Returns true if JOURNAL_STREAM environment variable is present,
+// and stdout's device and inode numbers match it.
+//
+// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable
+// is present, but malformed, fstat syscall fails, etc.
+//
+// Most users should probably use [StderrIsJournalStream].
+func StdoutIsJournalStream() (bool, error) {
+	return fdIsJournalStream(syscall.Stdout)
+}
+
+func fdIsJournalStream(fd int) (bool, error) {
+	journalStream := os.Getenv("JOURNAL_STREAM")
+	if journalStream == "" {
+		return false, nil
+	}
+
+	var expectedStat syscall.Stat_t
+	_, err := fmt.Sscanf(journalStream, "%d:%d", &expectedStat.Dev, &expectedStat.Ino)
+	if err != nil {
+		return false, fmt.Errorf("failed to parse JOURNAL_STREAM=%q: %v", journalStream, err)
+	}
+
+	var stat syscall.Stat_t
+	err = syscall.Fstat(fd, &stat)
+	if err != nil {
+		return false, err
+	}
+
+	match := stat.Dev == expectedStat.Dev && stat.Ino == expectedStat.Ino
+	return match, nil
+}
+
+// Send a message to the local systemd journal. vars is a map of journald
+// fields to values.  Fields must be composed of uppercase letters, numbers,
+// and underscores, but must not start with an underscore. Within these
+// restrictions, any arbitrary field name may be used.  Some names have special
+// significance: see the journalctl documentation
+// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
+// for more details.  vars may be nil.
+func Send(message string, priority Priority, vars map[string]string) error {
+	conn := getOrInitConn()
+	if conn == nil {
+		return errors.New("could not initialize socket to journald")
+	}
+
+	socketAddr := &net.UnixAddr{
+		Name: journalSocket,
+		Net:  "unixgram",
+	}
+
+	data := new(bytes.Buffer)
+	appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
+	appendVariable(data, "MESSAGE", message)
+	for k, v := range vars {
+		appendVariable(data, k, v)
+	}
+
+	_, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr)
+	if err == nil {
+		return nil
+	}
+	if !isSocketSpaceError(err) {
+		return err
+	}
+
+	// Large log entry, send it via tempfile and ancillary-fd.
+	file, err := tempFd()
+	if err != nil {
+		return err
+	}
+	defer file.Close()
+	_, err = io.Copy(file, data)
+	if err != nil {
+		return err
+	}
+	rights := syscall.UnixRights(int(file.Fd()))
+	_, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// getOrInitConn attempts to get the global `unixConnPtr` socket, initializing if necessary
+func getOrInitConn() *net.UnixConn {
+	conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
+	if conn != nil {
+		return conn
+	}
+	onceConn.Do(initConn)
+	return (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
+}
+
+func appendVariable(w io.Writer, name, value string) {
+	if err := validVarName(name); err != nil {
+		fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name)
+	}
+	if strings.ContainsRune(value, '\n') {
+		/* When the value contains a newline, we write:
+		 * - the variable name, followed by a newline
+		 * - the size (in 64bit little endian format)
+		 * - the data, followed by a newline
+		 */
+		fmt.Fprintln(w, name)
+		binary.Write(w, binary.LittleEndian, uint64(len(value)))
+		fmt.Fprintln(w, value)
+	} else {
+		/* just write the variable and value all on one line */
+		fmt.Fprintf(w, "%s=%s\n", name, value)
+	}
+}
+
+// validVarName validates a variable name to make sure journald will accept it.
+// The variable name must be in uppercase and consist only of characters,
+// numbers and underscores, and may not begin with an underscore:
+// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html
+func validVarName(name string) error {
+	if name == "" {
+		return errors.New("Empty variable name")
+	} else if name[0] == '_' {
+		return errors.New("Variable name begins with an underscore")
+	}
+
+	for _, c := range name {
+		if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') {
+			return errors.New("Variable name contains invalid characters")
+		}
+	}
+	return nil
+}
+
+// isSocketSpaceError checks whether the error is signaling
+// an "overlarge message" condition.
+func isSocketSpaceError(err error) bool {
+	opErr, ok := err.(*net.OpError)
+	if !ok || opErr == nil {
+		return false
+	}
+
+	sysErr, ok := opErr.Err.(*os.SyscallError)
+	if !ok || sysErr == nil {
+		return false
+	}
+
+	return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS
+}
+
+// tempFd creates a temporary, unlinked file under `/dev/shm`.
+func tempFd() (*os.File, error) {
+	file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
+	if err != nil {
+		return nil, err
+	}
+	err = syscall.Unlink(file.Name())
+	if err != nil {
+		return nil, err
+	}
+	return file, nil
+}
+
+// initConn initializes the global `unixConnPtr` socket.
+// It is automatically called when needed.
+func initConn() {
+	autobind, err := net.ResolveUnixAddr("unixgram", "")
+	if err != nil {
+		return
+	}
+
+	sock, err := net.ListenUnixgram("unixgram", autobind)
+	if err != nil {
+		return
+	}
+
+	atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock))
+}
diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
new file mode 100644
index 0000000..322e41e
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
@@ -0,0 +1,43 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package journal provides write bindings to the local systemd journal.
+// It is implemented in pure Go and connects to the journal directly over its
+// unix socket.
+//
+// To read from the journal, see the "sdjournal" package, which wraps the
+// sd-journal a C API.
+//
+// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
+package journal
+
+import (
+	"errors"
+)
+
+func Enabled() bool {
+	return false
+}
+
+func Send(message string, priority Priority, vars map[string]string) error {
+	return errors.New("could not initialize socket to journald")
+}
+
+func StderrIsJournalStream() (bool, error) {
+	return false, nil
+}
+
+func StdoutIsJournalStream() (bool, error) {
+	return false, nil
+}
diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE
deleted file mode 100644
index e06d208..0000000
--- a/vendor/github.com/coreos/pkg/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "{}"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright {yyyy} {name of copyright owner}
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
-
diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE
deleted file mode 100644
index b39ddfa..0000000
--- a/vendor/github.com/coreos/pkg/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-CoreOS Project
-Copyright 2014 CoreOS, Inc
-
-This product includes software developed at CoreOS, Inc.
-(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/pkg/capnslog/README.md b/vendor/github.com/coreos/pkg/capnslog/README.md
deleted file mode 100644
index f79dbfc..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/README.md
+++ /dev/null
@@ -1,39 +0,0 @@
-# capnslog, the CoreOS logging package
-
-There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?).
-capnslog provides a simple but consistent logging interface suitable for all kinds of projects.
-
-### Design Principles
-
-##### `package main` is the place where logging gets turned on and routed
-
-A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak.
-
-##### All log options are runtime-configurable. 
-
-Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. 
-
-##### There is one log object per package. It is registered under its repository and package name.
-
-`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs.
-
-##### There is *one* output stream, and it is an `io.Writer` composed with a formatter.
-
-Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer.
-
-Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependent. These are, at best, provided as options, but more likely, provided by your application.
-
-##### Log objects are an interface
-
-An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed.
-
-##### Log levels have specific meanings:
-
-  * Critical: Unrecoverable. Must fail.
-  * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost
-  * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning.
-  * Notice: Normal, but important (uncommon) log information.
-  * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations.
-  * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices.
-  * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query.
-
diff --git a/vendor/github.com/coreos/pkg/capnslog/formatters.go b/vendor/github.com/coreos/pkg/capnslog/formatters.go
deleted file mode 100644
index b305a84..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/formatters.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import (
-	"bufio"
-	"fmt"
-	"io"
-	"log"
-	"runtime"
-	"strings"
-	"time"
-)
-
-type Formatter interface {
-	Format(pkg string, level LogLevel, depth int, entries ...interface{})
-	Flush()
-}
-
-func NewStringFormatter(w io.Writer) Formatter {
-	return &StringFormatter{
-		w: bufio.NewWriter(w),
-	}
-}
-
-type StringFormatter struct {
-	w *bufio.Writer
-}
-
-func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) {
-	now := time.Now().UTC()
-	s.w.WriteString(now.Format(time.RFC3339))
-	s.w.WriteByte(' ')
-	writeEntries(s.w, pkg, l, i, entries...)
-	s.Flush()
-}
-
-func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) {
-	if pkg != "" {
-		w.WriteString(pkg + ": ")
-	}
-	str := fmt.Sprint(entries...)
-	endsInNL := strings.HasSuffix(str, "\n")
-	w.WriteString(str)
-	if !endsInNL {
-		w.WriteString("\n")
-	}
-}
-
-func (s *StringFormatter) Flush() {
-	s.w.Flush()
-}
-
-func NewPrettyFormatter(w io.Writer, debug bool) Formatter {
-	return &PrettyFormatter{
-		w:     bufio.NewWriter(w),
-		debug: debug,
-	}
-}
-
-type PrettyFormatter struct {
-	w     *bufio.Writer
-	debug bool
-}
-
-func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) {
-	now := time.Now()
-	ts := now.Format("2006-01-02 15:04:05")
-	c.w.WriteString(ts)
-	ms := now.Nanosecond() / 1000
-	c.w.WriteString(fmt.Sprintf(".%06d", ms))
-	if c.debug {
-		_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
-		if !ok {
-			file = "???"
-			line = 1
-		} else {
-			slash := strings.LastIndex(file, "/")
-			if slash >= 0 {
-				file = file[slash+1:]
-			}
-		}
-		if line < 0 {
-			line = 0 // not a real line number
-		}
-		c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line))
-	}
-	c.w.WriteString(fmt.Sprint(" ", l.Char(), " | "))
-	writeEntries(c.w, pkg, l, depth, entries...)
-	c.Flush()
-}
-
-func (c *PrettyFormatter) Flush() {
-	c.w.Flush()
-}
-
-// LogFormatter emulates the form of the traditional built-in logger.
-type LogFormatter struct {
-	logger *log.Logger
-	prefix string
-}
-
-// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the
-// golang log package to actually do the logging work so that logs look similar.
-func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter {
-	return &LogFormatter{
-		logger: log.New(w, "", flag), // don't use prefix here
-		prefix: prefix,               // save it instead
-	}
-}
-
-// Format builds a log message for the LogFormatter. The LogLevel is ignored.
-func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) {
-	str := fmt.Sprint(entries...)
-	prefix := lf.prefix
-	if pkg != "" {
-		prefix = fmt.Sprintf("%s%s: ", prefix, pkg)
-	}
-	lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5
-}
-
-// Flush is included so that the interface is complete, but is a no-op.
-func (lf *LogFormatter) Flush() {
-	// noop
-}
-
-// NilFormatter is a no-op log formatter that does nothing.
-type NilFormatter struct {
-}
-
-// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no
-// messages so that you can cause part of your logging to be silent.
-func NewNilFormatter() Formatter {
-	return &NilFormatter{}
-}
-
-// Format does nothing.
-func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) {
-	// noop
-}
-
-// Flush is included so that the interface is complete, but is a no-op.
-func (_ *NilFormatter) Flush() {
-	// noop
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go
deleted file mode 100644
index 426603e..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import (
-	"bufio"
-	"bytes"
-	"io"
-	"os"
-	"runtime"
-	"strconv"
-	"strings"
-	"time"
-)
-
-var pid = os.Getpid()
-
-type GlogFormatter struct {
-	StringFormatter
-}
-
-func NewGlogFormatter(w io.Writer) *GlogFormatter {
-	g := &GlogFormatter{}
-	g.w = bufio.NewWriter(w)
-	return g
-}
-
-func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) {
-	g.w.Write(GlogHeader(level, depth+1))
-	g.StringFormatter.Format(pkg, level, depth+1, entries...)
-}
-
-func GlogHeader(level LogLevel, depth int) []byte {
-	// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
-	now := time.Now().UTC()
-	_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
-	if !ok {
-		file = "???"
-		line = 1
-	} else {
-		slash := strings.LastIndex(file, "/")
-		if slash >= 0 {
-			file = file[slash+1:]
-		}
-	}
-	if line < 0 {
-		line = 0 // not a real line number
-	}
-	buf := &bytes.Buffer{}
-	buf.Grow(30)
-	_, month, day := now.Date()
-	hour, minute, second := now.Clock()
-	buf.WriteString(level.Char())
-	twoDigits(buf, int(month))
-	twoDigits(buf, day)
-	buf.WriteByte(' ')
-	twoDigits(buf, hour)
-	buf.WriteByte(':')
-	twoDigits(buf, minute)
-	buf.WriteByte(':')
-	twoDigits(buf, second)
-	buf.WriteByte('.')
-	buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000))
-	buf.WriteByte('Z')
-	buf.WriteByte(' ')
-	buf.WriteString(strconv.Itoa(pid))
-	buf.WriteByte(' ')
-	buf.WriteString(file)
-	buf.WriteByte(':')
-	buf.WriteString(strconv.Itoa(line))
-	buf.WriteByte(']')
-	buf.WriteByte(' ')
-	return buf.Bytes()
-}
-
-const digits = "0123456789"
-
-func twoDigits(b *bytes.Buffer, d int) {
-	c2 := digits[d%10]
-	d /= 10
-	c1 := digits[d%10]
-	b.WriteByte(c1)
-	b.WriteByte(c2)
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/init.go b/vendor/github.com/coreos/pkg/capnslog/init.go
deleted file mode 100644
index 38ce6d2..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/init.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// +build !windows
-
-package capnslog
-
-import (
-	"io"
-	"os"
-	"syscall"
-)
-
-// Here's where the opinionation comes in. We need some sensible defaults,
-// especially after taking over the log package. Your project (whatever it may
-// be) may see things differently. That's okay; there should be no defaults in
-// the main package that cannot be controlled or overridden programatically,
-// otherwise it's a bug. Doing so is creating your own init_log.go file much
-// like this one.
-
-func init() {
-	initHijack()
-
-	// Go `log` package uses os.Stderr.
-	SetFormatter(NewDefaultFormatter(os.Stderr))
-	SetGlobalLogLevel(INFO)
-}
-
-func NewDefaultFormatter(out io.Writer) Formatter {
-	if syscall.Getppid() == 1 {
-		// We're running under init, which may be systemd.
-		f, err := NewJournaldFormatter()
-		if err == nil {
-			return f
-		}
-	}
-	return NewPrettyFormatter(out, false)
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/init_windows.go b/vendor/github.com/coreos/pkg/capnslog/init_windows.go
deleted file mode 100644
index 4553050..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/init_windows.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import "os"
-
-func init() {
-	initHijack()
-
-	// Go `log` package uses os.Stderr.
-	SetFormatter(NewPrettyFormatter(os.Stderr, false))
-	SetGlobalLogLevel(INFO)
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go
deleted file mode 100644
index 72e0520..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// +build !windows
-
-package capnslog
-
-import (
-	"errors"
-	"fmt"
-	"os"
-	"path/filepath"
-
-	"github.com/coreos/go-systemd/journal"
-)
-
-func NewJournaldFormatter() (Formatter, error) {
-	if !journal.Enabled() {
-		return nil, errors.New("No systemd detected")
-	}
-	return &journaldFormatter{}, nil
-}
-
-type journaldFormatter struct{}
-
-func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
-	var pri journal.Priority
-	switch l {
-	case CRITICAL:
-		pri = journal.PriCrit
-	case ERROR:
-		pri = journal.PriErr
-	case WARNING:
-		pri = journal.PriWarning
-	case NOTICE:
-		pri = journal.PriNotice
-	case INFO:
-		pri = journal.PriInfo
-	case DEBUG:
-		pri = journal.PriDebug
-	case TRACE:
-		pri = journal.PriDebug
-	default:
-		panic("Unhandled loglevel")
-	}
-	msg := fmt.Sprint(entries...)
-	tags := map[string]string{
-		"PACKAGE":           pkg,
-		"SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
-	}
-	err := journal.Send(msg, pri, tags)
-	if err != nil {
-		fmt.Fprintln(os.Stderr, err)
-	}
-}
-
-func (j *journaldFormatter) Flush() {}
diff --git a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go
deleted file mode 100644
index 970086b..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import (
-	"log"
-)
-
-func initHijack() {
-	pkg := NewPackageLogger("log", "")
-	w := packageWriter{pkg}
-	log.SetFlags(0)
-	log.SetPrefix("")
-	log.SetOutput(w)
-}
-
-type packageWriter struct {
-	pl *PackageLogger
-}
-
-func (p packageWriter) Write(b []byte) (int, error) {
-	if p.pl.level < INFO {
-		return 0, nil
-	}
-	p.pl.internalLog(calldepth+2, INFO, string(b))
-	return len(b), nil
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/logmap.go b/vendor/github.com/coreos/pkg/capnslog/logmap.go
deleted file mode 100644
index 226b60c..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/logmap.go
+++ /dev/null
@@ -1,245 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import (
-	"errors"
-	"strings"
-	"sync"
-)
-
-// LogLevel is the set of all log levels.
-type LogLevel int8
-
-const (
-	// CRITICAL is the lowest log level; only errors which will end the program will be propagated.
-	CRITICAL LogLevel = iota - 1
-	// ERROR is for errors that are not fatal but lead to troubling behavior.
-	ERROR
-	// WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations.
-	WARNING
-	// NOTICE is for normal but significant conditions.
-	NOTICE
-	// INFO is a log level for common, everyday log updates.
-	INFO
-	// DEBUG is the default hidden level for more verbose updates about internal processes.
-	DEBUG
-	// TRACE is for (potentially) call by call tracing of programs.
-	TRACE
-)
-
-// Char returns a single-character representation of the log level.
-func (l LogLevel) Char() string {
-	switch l {
-	case CRITICAL:
-		return "C"
-	case ERROR:
-		return "E"
-	case WARNING:
-		return "W"
-	case NOTICE:
-		return "N"
-	case INFO:
-		return "I"
-	case DEBUG:
-		return "D"
-	case TRACE:
-		return "T"
-	default:
-		panic("Unhandled loglevel")
-	}
-}
-
-// String returns a multi-character representation of the log level.
-func (l LogLevel) String() string {
-	switch l {
-	case CRITICAL:
-		return "CRITICAL"
-	case ERROR:
-		return "ERROR"
-	case WARNING:
-		return "WARNING"
-	case NOTICE:
-		return "NOTICE"
-	case INFO:
-		return "INFO"
-	case DEBUG:
-		return "DEBUG"
-	case TRACE:
-		return "TRACE"
-	default:
-		panic("Unhandled loglevel")
-	}
-}
-
-// Update using the given string value. Fulfills the flag.Value interface.
-func (l *LogLevel) Set(s string) error {
-	value, err := ParseLevel(s)
-	if err != nil {
-		return err
-	}
-
-	*l = value
-	return nil
-}
-
-// Returns an empty string, only here to fulfill the pflag.Value interface.
-func (l *LogLevel) Type() string {
-	return ""
-}
-
-// ParseLevel translates some potential loglevel strings into their corresponding levels.
-func ParseLevel(s string) (LogLevel, error) {
-	switch s {
-	case "CRITICAL", "C":
-		return CRITICAL, nil
-	case "ERROR", "0", "E":
-		return ERROR, nil
-	case "WARNING", "1", "W":
-		return WARNING, nil
-	case "NOTICE", "2", "N":
-		return NOTICE, nil
-	case "INFO", "3", "I":
-		return INFO, nil
-	case "DEBUG", "4", "D":
-		return DEBUG, nil
-	case "TRACE", "5", "T":
-		return TRACE, nil
-	}
-	return CRITICAL, errors.New("couldn't parse log level " + s)
-}
-
-type RepoLogger map[string]*PackageLogger
-
-type loggerStruct struct {
-	sync.Mutex
-	repoMap   map[string]RepoLogger
-	formatter Formatter
-}
-
-// logger is the global logger
-var logger = new(loggerStruct)
-
-// SetGlobalLogLevel sets the log level for all packages in all repositories
-// registered with capnslog.
-func SetGlobalLogLevel(l LogLevel) {
-	logger.Lock()
-	defer logger.Unlock()
-	for _, r := range logger.repoMap {
-		r.setRepoLogLevelInternal(l)
-	}
-}
-
-// GetRepoLogger may return the handle to the repository's set of packages' loggers.
-func GetRepoLogger(repo string) (RepoLogger, error) {
-	logger.Lock()
-	defer logger.Unlock()
-	r, ok := logger.repoMap[repo]
-	if !ok {
-		return nil, errors.New("no packages registered for repo " + repo)
-	}
-	return r, nil
-}
-
-// MustRepoLogger returns the handle to the repository's packages' loggers.
-func MustRepoLogger(repo string) RepoLogger {
-	r, err := GetRepoLogger(repo)
-	if err != nil {
-		panic(err)
-	}
-	return r
-}
-
-// SetRepoLogLevel sets the log level for all packages in the repository.
-func (r RepoLogger) SetRepoLogLevel(l LogLevel) {
-	logger.Lock()
-	defer logger.Unlock()
-	r.setRepoLogLevelInternal(l)
-}
-
-func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) {
-	for _, v := range r {
-		v.level = l
-	}
-}
-
-// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in
-// order, and returns a map of the results, for use in SetLogLevel.
-func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) {
-	setlist := strings.Split(conf, ",")
-	out := make(map[string]LogLevel)
-	for _, setstring := range setlist {
-		setting := strings.Split(setstring, "=")
-		if len(setting) != 2 {
-			return nil, errors.New("oddly structured `pkg=level` option: " + setstring)
-		}
-		l, err := ParseLevel(setting[1])
-		if err != nil {
-			return nil, err
-		}
-		out[setting[0]] = l
-	}
-	return out, nil
-}
-
-// SetLogLevel takes a map of package names within a repository to their desired
-// loglevel, and sets the levels appropriately. Unknown packages are ignored.
-// "*" is a special package name that corresponds to all packages, and will be
-// processed first.
-func (r RepoLogger) SetLogLevel(m map[string]LogLevel) {
-	logger.Lock()
-	defer logger.Unlock()
-	if l, ok := m["*"]; ok {
-		r.setRepoLogLevelInternal(l)
-	}
-	for k, v := range m {
-		l, ok := r[k]
-		if !ok {
-			continue
-		}
-		l.level = v
-	}
-}
-
-// SetFormatter sets the formatting function for all logs.
-func SetFormatter(f Formatter) {
-	logger.Lock()
-	defer logger.Unlock()
-	logger.formatter = f
-}
-
-// NewPackageLogger creates a package logger object.
-// This should be defined as a global var in your package, referencing your repo.
-func NewPackageLogger(repo string, pkg string) (p *PackageLogger) {
-	logger.Lock()
-	defer logger.Unlock()
-	if logger.repoMap == nil {
-		logger.repoMap = make(map[string]RepoLogger)
-	}
-	r, rok := logger.repoMap[repo]
-	if !rok {
-		logger.repoMap[repo] = make(RepoLogger)
-		r = logger.repoMap[repo]
-	}
-	p, pok := r[pkg]
-	if !pok {
-		r[pkg] = &PackageLogger{
-			pkg:   pkg,
-			level: INFO,
-		}
-		p = r[pkg]
-	}
-	return
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
deleted file mode 100644
index 00ff371..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import (
-	"fmt"
-	"os"
-)
-
-type PackageLogger struct {
-	pkg   string
-	level LogLevel
-}
-
-const calldepth = 2
-
-func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) {
-	logger.Lock()
-	defer logger.Unlock()
-	if inLevel != CRITICAL && p.level < inLevel {
-		return
-	}
-	if logger.formatter != nil {
-		logger.formatter.Format(p.pkg, inLevel, depth+1, entries...)
-	}
-}
-
-// SetLevel allows users to change the current logging level.
-func (p *PackageLogger) SetLevel(l LogLevel) {
-	logger.Lock()
-	defer logger.Unlock()
-	p.level = l
-}
-
-// LevelAt checks if the given log level will be outputted under current setting.
-func (p *PackageLogger) LevelAt(l LogLevel) bool {
-	logger.Lock()
-	defer logger.Unlock()
-	return p.level >= l
-}
-
-// Log a formatted string at any level between ERROR and TRACE
-func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) {
-	p.internalLog(calldepth, l, fmt.Sprintf(format, args...))
-}
-
-// Log a message at any level between ERROR and TRACE
-func (p *PackageLogger) Log(l LogLevel, args ...interface{}) {
-	p.internalLog(calldepth, l, fmt.Sprint(args...))
-}
-
-// log stdlib compatibility
-
-func (p *PackageLogger) Println(args ...interface{}) {
-	p.internalLog(calldepth, INFO, fmt.Sprintln(args...))
-}
-
-func (p *PackageLogger) Printf(format string, args ...interface{}) {
-	p.Logf(INFO, format, args...)
-}
-
-func (p *PackageLogger) Print(args ...interface{}) {
-	p.internalLog(calldepth, INFO, fmt.Sprint(args...))
-}
-
-// Panic and fatal
-
-func (p *PackageLogger) Panicf(format string, args ...interface{}) {
-	s := fmt.Sprintf(format, args...)
-	p.internalLog(calldepth, CRITICAL, s)
-	panic(s)
-}
-
-func (p *PackageLogger) Panic(args ...interface{}) {
-	s := fmt.Sprint(args...)
-	p.internalLog(calldepth, CRITICAL, s)
-	panic(s)
-}
-
-func (p *PackageLogger) Panicln(args ...interface{}) {
-	s := fmt.Sprintln(args...)
-	p.internalLog(calldepth, CRITICAL, s)
-	panic(s)
-}
-
-func (p *PackageLogger) Fatalf(format string, args ...interface{}) {
-	p.Logf(CRITICAL, format, args...)
-	os.Exit(1)
-}
-
-func (p *PackageLogger) Fatal(args ...interface{}) {
-	s := fmt.Sprint(args...)
-	p.internalLog(calldepth, CRITICAL, s)
-	os.Exit(1)
-}
-
-func (p *PackageLogger) Fatalln(args ...interface{}) {
-	s := fmt.Sprintln(args...)
-	p.internalLog(calldepth, CRITICAL, s)
-	os.Exit(1)
-}
-
-// Error Functions
-
-func (p *PackageLogger) Errorf(format string, args ...interface{}) {
-	p.Logf(ERROR, format, args...)
-}
-
-func (p *PackageLogger) Error(entries ...interface{}) {
-	p.internalLog(calldepth, ERROR, entries...)
-}
-
-// Warning Functions
-
-func (p *PackageLogger) Warningf(format string, args ...interface{}) {
-	p.Logf(WARNING, format, args...)
-}
-
-func (p *PackageLogger) Warning(entries ...interface{}) {
-	p.internalLog(calldepth, WARNING, entries...)
-}
-
-// Notice Functions
-
-func (p *PackageLogger) Noticef(format string, args ...interface{}) {
-	p.Logf(NOTICE, format, args...)
-}
-
-func (p *PackageLogger) Notice(entries ...interface{}) {
-	p.internalLog(calldepth, NOTICE, entries...)
-}
-
-// Info Functions
-
-func (p *PackageLogger) Infof(format string, args ...interface{}) {
-	p.Logf(INFO, format, args...)
-}
-
-func (p *PackageLogger) Info(entries ...interface{}) {
-	p.internalLog(calldepth, INFO, entries...)
-}
-
-// Debug Functions
-
-func (p *PackageLogger) Debugf(format string, args ...interface{}) {
-	if p.level < DEBUG {
-		return
-	}
-	p.Logf(DEBUG, format, args...)
-}
-
-func (p *PackageLogger) Debug(entries ...interface{}) {
-	if p.level < DEBUG {
-		return
-	}
-	p.internalLog(calldepth, DEBUG, entries...)
-}
-
-// Trace Functions
-
-func (p *PackageLogger) Tracef(format string, args ...interface{}) {
-	if p.level < TRACE {
-		return
-	}
-	p.Logf(TRACE, format, args...)
-}
-
-func (p *PackageLogger) Trace(entries ...interface{}) {
-	if p.level < TRACE {
-		return
-	}
-	p.internalLog(calldepth, TRACE, entries...)
-}
-
-func (p *PackageLogger) Flush() {
-	logger.Lock()
-	defer logger.Unlock()
-	logger.formatter.Flush()
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go
deleted file mode 100644
index 4be5a1f..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// +build !windows
-
-package capnslog
-
-import (
-	"fmt"
-	"log/syslog"
-)
-
-func NewSyslogFormatter(w *syslog.Writer) Formatter {
-	return &syslogFormatter{w}
-}
-
-func NewDefaultSyslogFormatter(tag string) (Formatter, error) {
-	w, err := syslog.New(syslog.LOG_DEBUG, tag)
-	if err != nil {
-		return nil, err
-	}
-	return NewSyslogFormatter(w), nil
-}
-
-type syslogFormatter struct {
-	w *syslog.Writer
-}
-
-func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
-	for _, entry := range entries {
-		str := fmt.Sprint(entry)
-		switch l {
-		case CRITICAL:
-			s.w.Crit(str)
-		case ERROR:
-			s.w.Err(str)
-		case WARNING:
-			s.w.Warning(str)
-		case NOTICE:
-			s.w.Notice(str)
-		case INFO:
-			s.w.Info(str)
-		case DEBUG:
-			s.w.Debug(str)
-		case TRACE:
-			s.w.Debug(str)
-		default:
-			panic("Unhandled loglevel")
-		}
-	}
-}
-
-func (s *syslogFormatter) Flush() {
-}
diff --git a/vendor/github.com/eapache/go-resiliency/breaker/README.md b/vendor/github.com/eapache/go-resiliency/breaker/README.md
index 2d1b3d9..76f5007 100644
--- a/vendor/github.com/eapache/go-resiliency/breaker/README.md
+++ b/vendor/github.com/eapache/go-resiliency/breaker/README.md
@@ -1,7 +1,7 @@
 circuit-breaker
 ===============
 
-[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency)
+[![Golang CI](https://github.com/eapache/go-resiliency/actions/workflows/golang-ci.yml/badge.svg)](https://github.com/eapache/go-resiliency/actions/workflows/golang-ci.yml)
 [![GoDoc](https://godoc.org/github.com/eapache/go-resiliency/breaker?status.svg)](https://godoc.org/github.com/eapache/go-resiliency/breaker)
 [![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html)
 
diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
index f88ca72..9214386 100644
--- a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
+++ b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
@@ -12,10 +12,13 @@
 // because the breaker is currently open.
 var ErrBreakerOpen = errors.New("circuit breaker is open")
 
+// State is a type representing the possible states of a circuit breaker.
+type State uint32
+
 const (
-	closed uint32 = iota
-	open
-	halfOpen
+	Closed State = iota
+	Open
+	HalfOpen
 )
 
 // Breaker implements the circuit-breaker resiliency pattern
@@ -24,7 +27,7 @@
 	timeout                          time.Duration
 
 	lock              sync.Mutex
-	state             uint32
+	state             State
 	errors, successes int
 	lastError         time.Time
 }
@@ -46,9 +49,9 @@
 // already open, or it will run the given function and pass along its return
 // value. It is safe to call Run concurrently on the same Breaker.
 func (b *Breaker) Run(work func() error) error {
-	state := atomic.LoadUint32(&b.state)
+	state := b.GetState()
 
-	if state == open {
+	if state == Open {
 		return ErrBreakerOpen
 	}
 
@@ -61,9 +64,9 @@
 // the return value of the function. It is safe to call Go concurrently on the
 // same Breaker.
 func (b *Breaker) Go(work func() error) error {
-	state := atomic.LoadUint32(&b.state)
+	state := b.GetState()
 
-	if state == open {
+	if state == Open {
 		return ErrBreakerOpen
 	}
 
@@ -75,7 +78,13 @@
 	return nil
 }
 
-func (b *Breaker) doWork(state uint32, work func() error) error {
+// GetState returns the current State of the circuit-breaker at the moment
+// that it is called.
+func (b *Breaker) GetState() State {
+	return (State)(atomic.LoadUint32((*uint32)(&b.state)))
+}
+
+func (b *Breaker) doWork(state State, work func() error) error {
 	var panicValue interface{}
 
 	result := func() error {
@@ -85,7 +94,7 @@
 		return work()
 	}()
 
-	if result == nil && panicValue == nil && state == closed {
+	if result == nil && panicValue == nil && state == Closed {
 		// short-circuit the normal, success path without contending
 		// on the lock
 		return nil
@@ -108,7 +117,7 @@
 	defer b.lock.Unlock()
 
 	if result == nil && panicValue == nil {
-		if b.state == halfOpen {
+		if b.state == HalfOpen {
 			b.successes++
 			if b.successes == b.successThreshold {
 				b.closeBreaker()
@@ -123,26 +132,26 @@
 		}
 
 		switch b.state {
-		case closed:
+		case Closed:
 			b.errors++
 			if b.errors == b.errorThreshold {
 				b.openBreaker()
 			} else {
 				b.lastError = time.Now()
 			}
-		case halfOpen:
+		case HalfOpen:
 			b.openBreaker()
 		}
 	}
 }
 
 func (b *Breaker) openBreaker() {
-	b.changeState(open)
+	b.changeState(Open)
 	go b.timer()
 }
 
 func (b *Breaker) closeBreaker() {
-	b.changeState(closed)
+	b.changeState(Closed)
 }
 
 func (b *Breaker) timer() {
@@ -151,11 +160,11 @@
 	b.lock.Lock()
 	defer b.lock.Unlock()
 
-	b.changeState(halfOpen)
+	b.changeState(HalfOpen)
 }
 
-func (b *Breaker) changeState(newState uint32) {
+func (b *Breaker) changeState(newState State) {
 	b.errors = 0
 	b.successes = 0
-	atomic.StoreUint32(&b.state, newState)
+	atomic.StoreUint32((*uint32)(&b.state), (uint32)(newState))
 }
diff --git a/vendor/github.com/eapache/go-xerial-snappy/fuzz.go b/vendor/github.com/eapache/go-xerial-snappy/fuzz.go
deleted file mode 100644
index 6a46f47..0000000
--- a/vendor/github.com/eapache/go-xerial-snappy/fuzz.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build gofuzz
-
-package snappy
-
-func Fuzz(data []byte) int {
-	decode, err := Decode(data)
-	if decode == nil && err == nil {
-		panic("nil error with nil result")
-	}
-
-	if err != nil {
-		return 0
-	}
-
-	return 1
-}
diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go
index ea8f7af..c2eb205 100644
--- a/vendor/github.com/eapache/go-xerial-snappy/snappy.go
+++ b/vendor/github.com/eapache/go-xerial-snappy/snappy.go
@@ -25,10 +25,10 @@
 )
 
 func min(x, y int) int {
-    if x < y {
-        return x
-    }
-    return y
+	if x < y {
+		return x
+	}
+	return y
 }
 
 // Encode encodes data as snappy with no framing header.
@@ -48,14 +48,14 @@
 
 	// Snappy encode in blocks of maximum 32KB
 	var (
-		max = len(src)
+		max       = len(src)
 		blockSize = 32 * 1024
-		pos   = 0
-		chunk []byte
+		pos       = 0
+		chunk     []byte
 	)
 
 	for pos < max {
-		newPos := min(pos + blockSize, max)
+		newPos := min(pos+blockSize, max)
 		chunk = master.Encode(chunk[:cap(chunk)], src[pos:newPos])
 
 		// First encode the compressed size (big-endian)
@@ -83,13 +83,23 @@
 // for use by this function. If `dst` is nil *or* insufficiently large to hold
 // the decoded `src`, new space will be allocated.
 func DecodeInto(dst, src []byte) ([]byte, error) {
+	if len(src) < 8 || !bytes.Equal(src[:8], xerialHeader) {
+		dst, err := master.Decode(dst[:cap(dst)], src)
+		if err != nil && len(src) < len(xerialHeader) {
+			// Keep compatibility and return ErrMalformed when there is a
+			// short or truncated header.
+			return nil, ErrMalformed
+		}
+		return dst, err
+	}
+
 	var max = len(src)
 	if max < len(xerialHeader) {
 		return nil, ErrMalformed
 	}
 
-	if !bytes.Equal(src[:8], xerialHeader) {
-		return master.Decode(dst[:cap(dst)], src)
+	if max == sizeOffset {
+		return []byte{}, nil
 	}
 
 	if max < sizeOffset+sizeBytes {
@@ -104,7 +114,7 @@
 	var (
 		pos   = sizeOffset
 		chunk []byte
-		err       error
+		err   error
 	)
 
 	for pos+sizeBytes <= max {
diff --git a/vendor/github.com/go-redis/redis/v8/.golangci.yml b/vendor/github.com/go-redis/redis/v8/.golangci.yml
index 1e8d238..de51455 100644
--- a/vendor/github.com/go-redis/redis/v8/.golangci.yml
+++ b/vendor/github.com/go-redis/redis/v8/.golangci.yml
@@ -2,20 +2,3 @@
   concurrency: 8
   deadline: 5m
   tests: false
-linters:
-  enable-all: true
-  disable:
-    - funlen
-    - gochecknoglobals
-    - gochecknoinits
-    - gocognit
-    - goconst
-    - godox
-    - gosec
-    - maligned
-    - wsl
-    - gomnd
-    - goerr113
-    - exhaustive
-    - nestif
-    - nlreturn
diff --git a/vendor/github.com/go-redis/redis/v8/.prettierrc b/vendor/github.com/go-redis/redis/v8/.prettierrc.yml
similarity index 100%
rename from vendor/github.com/go-redis/redis/v8/.prettierrc
rename to vendor/github.com/go-redis/redis/v8/.prettierrc.yml
diff --git a/vendor/github.com/go-redis/redis/v8/.travis.yml b/vendor/github.com/go-redis/redis/v8/.travis.yml
deleted file mode 100644
index 1bf578d..0000000
--- a/vendor/github.com/go-redis/redis/v8/.travis.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-dist: xenial
-language: go
-
-services:
-  - redis-server
-
-go:
-  - 1.14.x
-  - 1.15.x
-  - tip
-
-matrix:
-  allow_failures:
-    - go: tip
-
-go_import_path: github.com/go-redis/redis
-
-before_install:
-  - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s --
-    -b $(go env GOPATH)/bin v1.31.0
diff --git a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
index 8392d54..195e519 100644
--- a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
+++ b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
@@ -1,5 +1,177 @@
-# Changelog
+## [8.11.5](https://github.com/go-redis/redis/compare/v8.11.4...v8.11.5) (2022-03-17)
 
-> :heart: [**Uptrace.dev** - distributed traces, logs, and errors in one place](https://uptrace.dev)
 
-See https://redis.uptrace.dev/changelog/
+### Bug Fixes
+
+* add missing Expire methods to Cmdable ([17e3b43](https://github.com/go-redis/redis/commit/17e3b43879d516437ada71cf9c0deac6a382ed9a))
+* add whitespace for avoid unlikely colisions ([7f7c181](https://github.com/go-redis/redis/commit/7f7c1817617cfec909efb13d14ad22ef05a6ad4c))
+* example/otel compile error ([#2028](https://github.com/go-redis/redis/issues/2028)) ([187c07c](https://github.com/go-redis/redis/commit/187c07c41bf68dc3ab280bc3a925e960bbef6475))
+* **extra/redisotel:** set span.kind attribute to client ([065b200](https://github.com/go-redis/redis/commit/065b200070b41e6e949710b4f9e01b50ccc60ab2))
+* format ([96f53a0](https://github.com/go-redis/redis/commit/96f53a0159a28affa94beec1543a62234e7f8b32))
+* invalid type assert in stringArg ([de6c131](https://github.com/go-redis/redis/commit/de6c131865b8263400c8491777b295035f2408e4))
+* rename Golang to Go ([#2030](https://github.com/go-redis/redis/issues/2030)) ([b82a2d9](https://github.com/go-redis/redis/commit/b82a2d9d4d2de7b7cbe8fcd4895be62dbcacacbc))
+* set timeout for WAIT command. Fixes [#1963](https://github.com/go-redis/redis/issues/1963) ([333fee1](https://github.com/go-redis/redis/commit/333fee1a8fd98a2fbff1ab187c1b03246a7eb01f))
+* update some argument counts in pre-allocs ([f6974eb](https://github.com/go-redis/redis/commit/f6974ebb5c40a8adf90d2cacab6dc297f4eba4c2))
+
+
+### Features
+
+* Add redis v7's NX, XX, GT, LT expire variants ([e19bbb2](https://github.com/go-redis/redis/commit/e19bbb26e2e395c6e077b48d80d79e99f729a8b8))
+* add support for acl sentinel auth in universal client ([ab0ccc4](https://github.com/go-redis/redis/commit/ab0ccc47413f9b2a6eabc852fed5005a3ee1af6e))
+* add support for COPY command ([#2016](https://github.com/go-redis/redis/issues/2016)) ([730afbc](https://github.com/go-redis/redis/commit/730afbcffb93760e8a36cc06cfe55ab102b693a7))
+* add support for passing extra attributes added to spans ([39faaa1](https://github.com/go-redis/redis/commit/39faaa171523834ba527c9789710c4fde87f5a2e))
+* add support for time.Duration write and scan ([2f1b74e](https://github.com/go-redis/redis/commit/2f1b74e20cdd7719b2aecf0768d3e3ae7c3e781b))
+* **redisotel:** ability to override TracerProvider ([#1998](https://github.com/go-redis/redis/issues/1998)) ([bf8d4aa](https://github.com/go-redis/redis/commit/bf8d4aa60c00366cda2e98c3ddddc8cf68507417))
+* set net.peer.name and net.peer.port in otel example ([69bf454](https://github.com/go-redis/redis/commit/69bf454f706204211cd34835f76b2e8192d3766d))
+
+
+
+## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04)
+
+
+### Features
+
+* add acl auth support for sentinels ([f66582f](https://github.com/go-redis/redis/commit/f66582f44f3dc3a4705a5260f982043fde4aa634))
+* add Cmd.{String,Int,Float,Bool}Slice helpers and an example ([5d3d293](https://github.com/go-redis/redis/commit/5d3d293cc9c60b90871e2420602001463708ce24))
+* add SetVal method for each command ([168981d](https://github.com/go-redis/redis/commit/168981da2d84ee9e07d15d3e74d738c162e264c4))
+
+
+
+## v8.11
+
+- Remove OpenTelemetry metrics.
+- Supports more redis commands and options.
+
+## v8.10
+
+- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a
+  single span with a Redis command (instead of 4 spans). There are multiple reasons behind this
+  decision:
+
+  - Traces become smaller and less noisy.
+  - It may be costly to process those 3 extra spans for each query.
+  - go-redis no longer depends on OpenTelemetry.
+
+  Eventually we hope to replace the information that we no longer collect with OpenTelemetry
+  Metrics.
+
+## v8.9
+
+- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`,
+  `WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings.
+
+## v8.8
+
+- To make updating easier, extra modules now have the same version as go-redis does. That means that
+  you need to update your imports:
+
+```
+github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8
+github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8
+```
+
+## v8.5
+
+- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a
+  struct:
+
+```go
+err := rdb.HGetAll(ctx, "hash").Scan(&data)
+
+err := rdb.MGet(ctx, "key1", "key2").Scan(&data)
+```
+
+- Please check [redismock](https://github.com/go-redis/redismock) by
+  [monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client.
+
+## v8
+
+- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not
+  using `context.Context` yet, the simplest option is to define global package variable
+  `var ctx = context.TODO()` and use it when `ctx` is required.
+
+- Full support for `context.Context` canceling.
+
+- Added `redis.NewFailoverClusterClient` that supports routing read-only commands to a slave node.
+
+- Added `redisext.OpenTemetryHook` that adds
+  [Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/).
+
+- Redis slow log support.
+
+- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move
+  existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme:
+
+```go
+import "github.com/golang/groupcache/consistenthash"
+
+ring := redis.NewRing(&redis.RingOptions{
+    NewConsistentHash: func() {
+        return consistenthash.New(100, crc32.ChecksumIEEE)
+    },
+})
+```
+
+- `ClusterOptions.MaxRedirects` default value is changed from 8 to 3.
+- `Options.MaxRetries` default value is changed from 0 to 3.
+
+- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`.
+
+## v7.3
+
+- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection
+  URL contains username.
+
+## v7.2
+
+- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users.
+
+## v7.1
+
+- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer`
+  interface.
+
+## v7
+
+- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a
+  transactional pipeline.
+- WrapProcess is replaced with more convenient AddHook that has access to context.Context.
+- WithContext now can not be used to create a shallow copy of the client.
+- New methods ProcessContext, DoContext, and ExecContext.
+- Client respects Context.Deadline when setting net.Conn deadline.
+- Client listens on Context.Done while waiting for a connection from the pool and returns an error
+  when context context is cancelled.
+- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow
+  detecting reconnections.
+- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse
+  the time.
+- `SetLimiter` is removed and added `Options.Limiter` instead.
+- `HMSet` is deprecated as of Redis v4.
+
+## v6.15
+
+- Cluster and Ring pipelines process commands for each node in its own goroutine.
+
+## 6.14
+
+- Added Options.MinIdleConns.
+- Added Options.MaxConnAge.
+- PoolStats.FreeConns is renamed to PoolStats.IdleConns.
+- Add Client.Do to simplify creating custom commands.
+- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers.
+- Lower memory usage.
+
+## v6.13
+
+- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set
+  `HashReplicas = 1000` for better keys distribution between shards.
+- Cluster client was optimized to use much less memory when reloading cluster state.
+- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout
+  occurres. In most cases it is recommended to use PubSub.Channel instead.
+- Dialer.KeepAlive is set to 5 minutes by default.
+
+## v6.12
+
+- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis
+  Servers that don't have cluster mode enabled. See
+  https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup
diff --git a/vendor/github.com/go-redis/redis/v8/Makefile b/vendor/github.com/go-redis/redis/v8/Makefile
index 49e4c96..a4cfe05 100644
--- a/vendor/github.com/go-redis/redis/v8/Makefile
+++ b/vendor/github.com/go-redis/redis/v8/Makefile
@@ -1,10 +1,11 @@
-all: testdeps
+PACKAGE_DIRS := $(shell find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | sort)
+
+test: testdeps
 	go test ./...
 	go test ./... -short -race
 	go test ./... -run=NONE -bench=. -benchmem
 	env GOOS=linux GOARCH=386 go test ./...
 	go vet
-	golangci-lint run
 
 testdeps: testdata/redis/src/redis-server
 
@@ -15,7 +16,20 @@
 
 testdata/redis:
 	mkdir -p $@
-	wget -qO- http://download.redis.io/redis-stable.tar.gz | tar xvz --strip-components=1 -C $@
+	wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@
 
 testdata/redis/src/redis-server: testdata/redis
 	cd $< && make all
+
+fmt:
+	gofmt -w -s ./
+	goimports -w  -local github.com/go-redis/redis ./
+
+go_mod_tidy:
+	go get -u && go mod tidy
+	set -e; for dir in $(PACKAGE_DIRS); do \
+	  echo "go mod tidy in $${dir}"; \
+	  (cd "$${dir}" && \
+	    go get -u && \
+	    go mod tidy); \
+	done
diff --git a/vendor/github.com/go-redis/redis/v8/README.md b/vendor/github.com/go-redis/redis/v8/README.md
index da5d0fb..f3b6a01 100644
--- a/vendor/github.com/go-redis/redis/v8/README.md
+++ b/vendor/github.com/go-redis/redis/v8/README.md
@@ -1,23 +1,32 @@
-# Redis client for Golang
+# Redis client for Go
 
-[![Build Status](https://travis-ci.org/go-redis/redis.png?branch=master)](https://travis-ci.org/go-redis/redis)
+![build workflow](https://github.com/go-redis/redis/actions/workflows/build.yml/badge.svg)
 [![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
 [![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/)
-[![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj)
 
-> :heart: [**Uptrace.dev** - distributed traces, logs, and errors in one place](https://uptrace.dev)
+go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace).
+Uptrace is an open source and blazingly fast **distributed tracing** backend powered by
+OpenTelemetry and ClickHouse. Give it a star as well!
 
-- Join [Discord](https://discord.gg/rWtp5Aj) to ask questions.
+## Resources
+
+- [Discussions](https://github.com/go-redis/redis/discussions)
 - [Documentation](https://redis.uptrace.dev)
 - [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
 - [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples)
 - [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app)
 
+Other projects you may like:
+
+- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite.
+- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go.
+
 ## Ecosystem
 
-- [Distributed Locks](https://github.com/bsm/redislock).
-- [Redis Cache](https://github.com/go-redis/cache).
-- [Rate limiting](https://github.com/go-redis/redis_rate).
+- [Redis Mock](https://github.com/go-redis/redismock)
+- [Distributed Locks](https://github.com/bsm/redislock)
+- [Redis Cache](https://github.com/go-redis/cache)
+- [Rate limiting](https://github.com/go-redis/redis_rate)
 
 ## Features
 
@@ -26,16 +35,16 @@
   [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
 - [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub).
 - [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
-- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-Pipeline) and
-  [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
+- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.Pipeline) and
+  [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.TxPipeline).
 - [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script).
 - [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options).
 - [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient).
 - [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient).
-- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient--ManualSetup)
+- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient-ManualSetup)
   without using cluster mode and Redis Sentinel.
 - [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing).
-- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#ex-package--Instrumentation).
+- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-package-Instrumentation).
 
 ## Installation
 
@@ -47,7 +56,7 @@
 go mod init github.com/my/repo
 ```
 
-And then install go-redis (note _v8_ in the import; omitting it is a popular mistake):
+And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake):
 
 ```shell
 go get github.com/go-redis/redis/v8
@@ -59,6 +68,7 @@
 import (
     "context"
     "github.com/go-redis/redis/v8"
+    "fmt"
 )
 
 var ctx = context.Background()
@@ -129,9 +139,37 @@
 res, err := rdb.Do(ctx, "set", "key", "value").Result()
 ```
 
-## See also
+## Run the test
 
-- [Fast and flexible HTTP router](https://github.com/vmihailenco/treemux)
-- [Golang PostgreSQL ORM](https://github.com/go-pg/pg)
-- [Golang msgpack](https://github.com/vmihailenco/msgpack)
-- [Golang message task queue](https://github.com/vmihailenco/taskq)
+go-redis will start a redis-server and run the test cases.
+
+The paths of redis-server bin file and redis config file are defined in `main_test.go`:
+
+```
+var (
+	redisServerBin, _  = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
+	redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
+)
+```
+
+For local testing, you can change the variables to refer to your local files, or create a soft link
+to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
+
+```
+ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
+cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
+```
+
+Lastly, run:
+
+```
+go test
+```
+
+## Contributors
+
+Thanks to all the people who already contributed!
+
+<a href="https://github.com/go-redis/redis/graphs/contributors">
+  <img src="https://contributors-img.web.app/image?repo=go-redis/redis" />
+</a>
diff --git a/vendor/github.com/go-redis/redis/v8/RELEASING.md b/vendor/github.com/go-redis/redis/v8/RELEASING.md
new file mode 100644
index 0000000..1115db4
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/RELEASING.md
@@ -0,0 +1,15 @@
+# Releasing
+
+1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub:
+
+```shell
+TAG=v1.0.0 ./scripts/release.sh
+```
+
+2. Open a pull request and wait for the build to finish.
+
+3. Merge the pull request and run `tag.sh` to create tags for packages:
+
+```shell
+TAG=v1.0.0 ./scripts/tag.sh
+```
diff --git a/vendor/github.com/go-redis/redis/v8/cluster.go b/vendor/github.com/go-redis/redis/v8/cluster.go
index a6ce5c5..a54f2f3 100644
--- a/vendor/github.com/go-redis/redis/v8/cluster.go
+++ b/vendor/github.com/go-redis/redis/v8/cluster.go
@@ -68,6 +68,9 @@
 	ReadTimeout  time.Duration
 	WriteTimeout time.Duration
 
+	// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+	PoolFIFO bool
+
 	// PoolSize applies per cluster node and not for the whole cluster.
 	PoolSize           int
 	MinIdleConns       int
@@ -86,12 +89,12 @@
 		opt.MaxRedirects = 3
 	}
 
-	if (opt.RouteByLatency || opt.RouteRandomly) && opt.ClusterSlots == nil {
+	if opt.RouteByLatency || opt.RouteRandomly {
 		opt.ReadOnly = true
 	}
 
 	if opt.PoolSize == 0 {
-		opt.PoolSize = 5 * runtime.NumCPU()
+		opt.PoolSize = 5 * runtime.GOMAXPROCS(0)
 	}
 
 	switch opt.ReadTimeout {
@@ -146,6 +149,7 @@
 		ReadTimeout:  opt.ReadTimeout,
 		WriteTimeout: opt.WriteTimeout,
 
+		PoolFIFO:           opt.PoolFIFO,
 		PoolSize:           opt.PoolSize,
 		MinIdleConns:       opt.MinIdleConns,
 		MaxConnAge:         opt.MaxConnAge,
@@ -153,9 +157,13 @@
 		IdleTimeout:        opt.IdleTimeout,
 		IdleCheckFrequency: disableIdleCheck,
 
-		readOnly: opt.ReadOnly,
-
 		TLSConfig: opt.TLSConfig,
+		// If ClusterSlots is populated, then we probably have an artificial
+		// cluster whose nodes are not in clustering mode (otherwise there isn't
+		// much use for ClusterSlots config).  This means we cannot execute the
+		// READONLY command against that node -- setting readOnly to false in such
+		// situations in the options below will prevent that from happening.
+		readOnly: opt.ReadOnly && opt.ClusterSlots == nil,
 	}
 }
 
@@ -291,8 +299,9 @@
 
 func (c *clusterNodes) Addrs() ([]string, error) {
 	var addrs []string
+
 	c.mu.RLock()
-	closed := c.closed
+	closed := c.closed //nolint:ifshort
 	if !closed {
 		if len(c.activeAddrs) > 0 {
 			addrs = c.activeAddrs
@@ -343,7 +352,7 @@
 	}
 }
 
-func (c *clusterNodes) Get(addr string) (*clusterNode, error) {
+func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
 	node, err := c.get(addr)
 	if err != nil {
 		return nil, err
@@ -407,7 +416,7 @@
 	}
 
 	n := rand.Intn(len(addrs))
-	return c.Get(addrs[n])
+	return c.GetOrCreate(addrs[n])
 }
 
 //------------------------------------------------------------------------------
@@ -465,7 +474,7 @@
 				addr = replaceLoopbackHost(addr, originHost)
 			}
 
-			node, err := c.nodes.Get(addr)
+			node, err := c.nodes.GetOrCreate(addr)
 			if err != nil {
 				return nil, err
 			}
@@ -586,8 +595,16 @@
 	if len(nodes) == 0 {
 		return c.nodes.Random()
 	}
-	n := rand.Intn(len(nodes))
-	return nodes[n], nil
+	if len(nodes) == 1 {
+		return nodes[0], nil
+	}
+	randomNodes := rand.Perm(len(nodes))
+	for _, idx := range randomNodes {
+		if node := nodes[idx]; !node.Failing() {
+			return node, nil
+		}
+	}
+	return nodes[randomNodes[0]], nil
 }
 
 func (c *clusterState) slotNodes(slot int) []*clusterNode {
@@ -628,14 +645,14 @@
 	return state, nil
 }
 
-func (c *clusterStateHolder) LazyReload(ctx context.Context) {
+func (c *clusterStateHolder) LazyReload() {
 	if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
 		return
 	}
 	go func() {
 		defer atomic.StoreUint32(&c.reloading, 0)
 
-		_, err := c.Reload(ctx)
+		_, err := c.Reload(context.Background())
 		if err != nil {
 			return
 		}
@@ -645,14 +662,15 @@
 
 func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) {
 	v := c.state.Load()
-	if v != nil {
-		state := v.(*clusterState)
-		if time.Since(state.createdAt) > 10*time.Second {
-			c.LazyReload(ctx)
-		}
-		return state, nil
+	if v == nil {
+		return c.Reload(ctx)
 	}
-	return c.Reload(ctx)
+
+	state := v.(*clusterState)
+	if time.Since(state.createdAt) > 10*time.Second {
+		c.LazyReload()
+	}
+	return state, nil
 }
 
 func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) {
@@ -728,7 +746,7 @@
 // ReloadState reloads cluster state. If available it calls ClusterSlots func
 // to get cluster slots information.
 func (c *ClusterClient) ReloadState(ctx context.Context) {
-	c.state.LazyReload(ctx)
+	c.state.LazyReload()
 }
 
 // Close closes the cluster client, releasing any open resources.
@@ -789,7 +807,7 @@
 		}
 		if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed {
 			if isReadOnly {
-				c.state.LazyReload(ctx)
+				c.state.LazyReload()
 			}
 			node = nil
 			continue
@@ -806,8 +824,10 @@
 		var addr string
 		moved, ask, addr = isMovedError(lastErr)
 		if moved || ask {
+			c.state.LazyReload()
+
 			var err error
-			node, err = c.nodes.Get(addr)
+			node, err = c.nodes.GetOrCreate(addr)
 			if err != nil {
 				return err
 			}
@@ -1004,7 +1024,7 @@
 	for _, idx := range rand.Perm(len(addrs)) {
 		addr := addrs[idx]
 
-		node, err := c.nodes.Get(addr)
+		node, err := c.nodes.GetOrCreate(addr)
 		if err != nil {
 			if firstErr == nil {
 				firstErr = err
@@ -1218,13 +1238,13 @@
 		return false
 	}
 
-	node, err := c.nodes.Get(addr)
+	node, err := c.nodes.GetOrCreate(addr)
 	if err != nil {
 		return false
 	}
 
 	if moved {
-		c.state.LazyReload(ctx)
+		c.state.LazyReload()
 		failedCmds.Add(node, cmd)
 		return true
 	}
@@ -1252,10 +1272,13 @@
 }
 
 func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
-	return c.hooks.processPipeline(ctx, cmds, c._processTxPipeline)
+	return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline)
 }
 
 func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error {
+	// Trim multi .. exec.
+	cmds = cmds[1 : len(cmds)-1]
+
 	state, err := c.state.Get(ctx)
 	if err != nil {
 		setCmdsErr(cmds, err)
@@ -1291,6 +1314,7 @@
 					if err == nil {
 						return
 					}
+
 					if attempt < c.opt.MaxRedirects {
 						if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
 							setCmdsErr(cmds, err)
@@ -1400,13 +1424,13 @@
 	addr string,
 	failedCmds *cmdsMap,
 ) error {
-	node, err := c.nodes.Get(addr)
+	node, err := c.nodes.GetOrCreate(addr)
 	if err != nil {
 		return err
 	}
 
 	if moved {
-		c.state.LazyReload(ctx)
+		c.state.LazyReload()
 		for _, cmd := range cmds {
 			failedCmds.Add(node, cmd)
 		}
@@ -1455,7 +1479,7 @@
 
 		moved, ask, addr := isMovedError(err)
 		if moved || ask {
-			node, err = c.nodes.Get(addr)
+			node, err = c.nodes.GetOrCreate(addr)
 			if err != nil {
 				return err
 			}
@@ -1464,7 +1488,7 @@
 
 		if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed {
 			if isReadOnly {
-				c.state.LazyReload(ctx)
+				c.state.LazyReload()
 			}
 			node, err = c.slotMasterNode(ctx, slot)
 			if err != nil {
@@ -1567,7 +1591,7 @@
 	for _, idx := range perm {
 		addr := addrs[idx]
 
-		node, err := c.nodes.Get(addr)
+		node, err := c.nodes.GetOrCreate(addr)
 		if err != nil {
 			if firstErr == nil {
 				firstErr = err
@@ -1631,7 +1655,7 @@
 		return nil, err
 	}
 
-	if (c.opt.RouteByLatency || c.opt.RouteRandomly) && cmdInfo != nil && cmdInfo.ReadOnly {
+	if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly {
 		return c.slotReadOnlyNode(state, slot)
 	}
 	return state.slotMasterNode(slot)
@@ -1655,6 +1679,35 @@
 	return state.slotMasterNode(slot)
 }
 
+// SlaveForKey gets a client for a replica node to run any command on it.
+// This is especially useful if we want to run a particular lua script which has
+// only read only commands on the replica.
+// This is because other redis commands generally have a flag that points that
+// they are read only and automatically run on the replica nodes
+// if ClusterOptions.ReadOnly flag is set to true.
+func (c *ClusterClient) SlaveForKey(ctx context.Context, key string) (*Client, error) {
+	state, err := c.state.Get(ctx)
+	if err != nil {
+		return nil, err
+	}
+	slot := hashtag.Slot(key)
+	node, err := c.slotReadOnlyNode(state, slot)
+	if err != nil {
+		return nil, err
+	}
+	return node.Client, err
+}
+
+// MasterForKey return a client to the master node for a particular key.
+func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, error) {
+	slot := hashtag.Slot(key)
+	node, err := c.slotMasterNode(ctx, slot)
+	if err != nil {
+		return nil, err
+	}
+	return node.Client, err
+}
+
 func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
 	for _, n := range nodes {
 		if n == node {
diff --git a/vendor/github.com/go-redis/redis/v8/cluster_commands.go b/vendor/github.com/go-redis/redis/v8/cluster_commands.go
index 1f0bae0..085bce8 100644
--- a/vendor/github.com/go-redis/redis/v8/cluster_commands.go
+++ b/vendor/github.com/go-redis/redis/v8/cluster_commands.go
@@ -2,24 +2,108 @@
 
 import (
 	"context"
+	"sync"
 	"sync/atomic"
 )
 
 func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
 	cmd := NewIntCmd(ctx, "dbsize")
-	var size int64
-	err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
-		n, err := master.DBSize(ctx).Result()
+	_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+		var size int64
+		err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
+			n, err := master.DBSize(ctx).Result()
+			if err != nil {
+				return err
+			}
+			atomic.AddInt64(&size, n)
+			return nil
+		})
 		if err != nil {
-			return err
+			cmd.SetErr(err)
+		} else {
+			cmd.val = size
 		}
-		atomic.AddInt64(&size, n)
 		return nil
 	})
-	if err != nil {
-		cmd.SetErr(err)
-		return cmd
+	return cmd
+}
+
+func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd {
+	cmd := NewStringCmd(ctx, "script", "load", script)
+	_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+		mu := &sync.Mutex{}
+		err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+			val, err := shard.ScriptLoad(ctx, script).Result()
+			if err != nil {
+				return err
+			}
+
+			mu.Lock()
+			if cmd.Val() == "" {
+				cmd.val = val
+			}
+			mu.Unlock()
+
+			return nil
+		})
+		if err != nil {
+			cmd.SetErr(err)
+		}
+		return nil
+	})
+	return cmd
+}
+
+func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "script", "flush")
+	_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+		err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+			return shard.ScriptFlush(ctx).Err()
+		})
+		if err != nil {
+			cmd.SetErr(err)
+		}
+		return nil
+	})
+	return cmd
+}
+
+func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
+	args := make([]interface{}, 2+len(hashes))
+	args[0] = "script"
+	args[1] = "exists"
+	for i, hash := range hashes {
+		args[2+i] = hash
 	}
-	cmd.val = size
+	cmd := NewBoolSliceCmd(ctx, args...)
+
+	result := make([]bool, len(hashes))
+	for i := range result {
+		result[i] = true
+	}
+
+	_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+		mu := &sync.Mutex{}
+		err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+			val, err := shard.ScriptExists(ctx, hashes...).Result()
+			if err != nil {
+				return err
+			}
+
+			mu.Lock()
+			for i, v := range val {
+				result[i] = result[i] && v
+			}
+			mu.Unlock()
+
+			return nil
+		})
+		if err != nil {
+			cmd.SetErr(err)
+		} else {
+			cmd.val = result
+		}
+		return nil
+	})
 	return cmd
 }
diff --git a/vendor/github.com/go-redis/redis/v8/command.go b/vendor/github.com/go-redis/redis/v8/command.go
index 5dd5533..4bb12a8 100644
--- a/vendor/github.com/go-redis/redis/v8/command.go
+++ b/vendor/github.com/go-redis/redis/v8/command.go
@@ -8,6 +8,7 @@
 	"time"
 
 	"github.com/go-redis/redis/v8/internal"
+	"github.com/go-redis/redis/v8/internal/hscan"
 	"github.com/go-redis/redis/v8/internal/proto"
 	"github.com/go-redis/redis/v8/internal/util"
 )
@@ -19,7 +20,7 @@
 	String() string
 	stringArg(int) string
 	firstKeyPos() int8
-	setFirstKeyPos(int8)
+	SetFirstKeyPos(int8)
 
 	readTimeout() *time.Duration
 	readReply(rd *proto.Reader) error
@@ -150,15 +151,21 @@
 	if pos < 0 || pos >= len(cmd.args) {
 		return ""
 	}
-	s, _ := cmd.args[pos].(string)
-	return s
+	arg := cmd.args[pos]
+	switch v := arg.(type) {
+	case string:
+		return v
+	default:
+		// TODO: consider using appendArg
+		return fmt.Sprint(v)
+	}
 }
 
 func (cmd *baseCmd) firstKeyPos() int8 {
 	return cmd.keyPos
 }
 
-func (cmd *baseCmd) setFirstKeyPos(keyPos int8) {
+func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) {
 	cmd.keyPos = keyPos
 }
 
@@ -199,6 +206,10 @@
 	return cmdString(cmd, cmd.val)
 }
 
+func (cmd *Cmd) SetVal(val interface{}) {
+	cmd.val = val
+}
+
 func (cmd *Cmd) Val() interface{} {
 	return cmd.val
 }
@@ -211,7 +222,11 @@
 	if cmd.err != nil {
 		return "", cmd.err
 	}
-	switch val := cmd.val.(type) {
+	return toString(cmd.val)
+}
+
+func toString(val interface{}) (string, error) {
+	switch val := val.(type) {
 	case string:
 		return val, nil
 	default:
@@ -239,7 +254,11 @@
 	if cmd.err != nil {
 		return 0, cmd.err
 	}
-	switch val := cmd.val.(type) {
+	return toInt64(cmd.val)
+}
+
+func toInt64(val interface{}) (int64, error) {
+	switch val := val.(type) {
 	case int64:
 		return val, nil
 	case string:
@@ -254,7 +273,11 @@
 	if cmd.err != nil {
 		return 0, cmd.err
 	}
-	switch val := cmd.val.(type) {
+	return toUint64(cmd.val)
+}
+
+func toUint64(val interface{}) (uint64, error) {
+	switch val := val.(type) {
 	case int64:
 		return uint64(val), nil
 	case string:
@@ -269,7 +292,11 @@
 	if cmd.err != nil {
 		return 0, cmd.err
 	}
-	switch val := cmd.val.(type) {
+	return toFloat32(cmd.val)
+}
+
+func toFloat32(val interface{}) (float32, error) {
+	switch val := val.(type) {
 	case int64:
 		return float32(val), nil
 	case string:
@@ -288,7 +315,11 @@
 	if cmd.err != nil {
 		return 0, cmd.err
 	}
-	switch val := cmd.val.(type) {
+	return toFloat64(cmd.val)
+}
+
+func toFloat64(val interface{}) (float64, error) {
+	switch val := val.(type) {
 	case int64:
 		return float64(val), nil
 	case string:
@@ -303,7 +334,11 @@
 	if cmd.err != nil {
 		return false, cmd.err
 	}
-	switch val := cmd.val.(type) {
+	return toBool(cmd.val)
+}
+
+func toBool(val interface{}) (bool, error) {
+	switch val := val.(type) {
 	case int64:
 		return val != 0, nil
 	case string:
@@ -314,6 +349,120 @@
 	}
 }
 
+func (cmd *Cmd) Slice() ([]interface{}, error) {
+	if cmd.err != nil {
+		return nil, cmd.err
+	}
+	switch val := cmd.val.(type) {
+	case []interface{}:
+		return val, nil
+	default:
+		return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val)
+	}
+}
+
+func (cmd *Cmd) StringSlice() ([]string, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	ss := make([]string, len(slice))
+	for i, iface := range slice {
+		val, err := toString(iface)
+		if err != nil {
+			return nil, err
+		}
+		ss[i] = val
+	}
+	return ss, nil
+}
+
+func (cmd *Cmd) Int64Slice() ([]int64, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	nums := make([]int64, len(slice))
+	for i, iface := range slice {
+		val, err := toInt64(iface)
+		if err != nil {
+			return nil, err
+		}
+		nums[i] = val
+	}
+	return nums, nil
+}
+
+func (cmd *Cmd) Uint64Slice() ([]uint64, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	nums := make([]uint64, len(slice))
+	for i, iface := range slice {
+		val, err := toUint64(iface)
+		if err != nil {
+			return nil, err
+		}
+		nums[i] = val
+	}
+	return nums, nil
+}
+
+func (cmd *Cmd) Float32Slice() ([]float32, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	floats := make([]float32, len(slice))
+	for i, iface := range slice {
+		val, err := toFloat32(iface)
+		if err != nil {
+			return nil, err
+		}
+		floats[i] = val
+	}
+	return floats, nil
+}
+
+func (cmd *Cmd) Float64Slice() ([]float64, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	floats := make([]float64, len(slice))
+	for i, iface := range slice {
+		val, err := toFloat64(iface)
+		if err != nil {
+			return nil, err
+		}
+		floats[i] = val
+	}
+	return floats, nil
+}
+
+func (cmd *Cmd) BoolSlice() ([]bool, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	bools := make([]bool, len(slice))
+	for i, iface := range slice {
+		val, err := toBool(iface)
+		if err != nil {
+			return nil, err
+		}
+		bools[i] = val
+	}
+	return bools, nil
+}
+
 func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
 	cmd.val, err = rd.ReadReply(sliceParser)
 	return err
@@ -359,6 +508,10 @@
 	}
 }
 
+func (cmd *SliceCmd) SetVal(val []interface{}) {
+	cmd.val = val
+}
+
 func (cmd *SliceCmd) Val() []interface{} {
 	return cmd.val
 }
@@ -371,6 +524,26 @@
 	return cmdString(cmd, cmd.val)
 }
 
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *SliceCmd) Scan(dst interface{}) error {
+	if cmd.err != nil {
+		return cmd.err
+	}
+
+	// Pass the list of keys and values.
+	// Skip the first two args for: HMGET key
+	var args []interface{}
+	if cmd.args[0] == "hmget" {
+		args = cmd.args[2:]
+	} else {
+		// Otherwise, it's: MGET field field ...
+		args = cmd.args[1:]
+	}
+
+	return hscan.Scan(dst, args, cmd.val)
+}
+
 func (cmd *SliceCmd) readReply(rd *proto.Reader) error {
 	v, err := rd.ReadArrayReply(sliceParser)
 	if err != nil {
@@ -399,6 +572,10 @@
 	}
 }
 
+func (cmd *StatusCmd) SetVal(val string) {
+	cmd.val = val
+}
+
 func (cmd *StatusCmd) Val() string {
 	return cmd.val
 }
@@ -435,6 +612,10 @@
 	}
 }
 
+func (cmd *IntCmd) SetVal(val int64) {
+	cmd.val = val
+}
+
 func (cmd *IntCmd) Val() int64 {
 	return cmd.val
 }
@@ -475,6 +656,10 @@
 	}
 }
 
+func (cmd *IntSliceCmd) SetVal(val []int64) {
+	cmd.val = val
+}
+
 func (cmd *IntSliceCmd) Val() []int64 {
 	return cmd.val
 }
@@ -523,6 +708,10 @@
 	}
 }
 
+func (cmd *DurationCmd) SetVal(val time.Duration) {
+	cmd.val = val
+}
+
 func (cmd *DurationCmd) Val() time.Duration {
 	return cmd.val
 }
@@ -570,6 +759,10 @@
 	}
 }
 
+func (cmd *TimeCmd) SetVal(val time.Time) {
+	cmd.val = val
+}
+
 func (cmd *TimeCmd) Val() time.Time {
 	return cmd.val
 }
@@ -623,6 +816,10 @@
 	}
 }
 
+func (cmd *BoolCmd) SetVal(val bool) {
+	cmd.val = val
+}
+
 func (cmd *BoolCmd) Val() bool {
 	return cmd.val
 }
@@ -677,6 +874,10 @@
 	}
 }
 
+func (cmd *StringCmd) SetVal(val string) {
+	cmd.val = val
+}
+
 func (cmd *StringCmd) Val() string {
 	return cmd.val
 }
@@ -689,6 +890,13 @@
 	return util.StringToBytes(cmd.val), cmd.err
 }
 
+func (cmd *StringCmd) Bool() (bool, error) {
+	if cmd.err != nil {
+		return false, cmd.err
+	}
+	return strconv.ParseBool(cmd.val)
+}
+
 func (cmd *StringCmd) Int() (int, error) {
 	if cmd.err != nil {
 		return 0, cmd.err
@@ -770,6 +978,10 @@
 	}
 }
 
+func (cmd *FloatCmd) SetVal(val float64) {
+	cmd.val = val
+}
+
 func (cmd *FloatCmd) Val() float64 {
 	return cmd.val
 }
@@ -789,6 +1001,59 @@
 
 //------------------------------------------------------------------------------
 
+type FloatSliceCmd struct {
+	baseCmd
+
+	val []float64
+}
+
+var _ Cmder = (*FloatSliceCmd)(nil)
+
+func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd {
+	return &FloatSliceCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *FloatSliceCmd) SetVal(val []float64) {
+	cmd.val = val
+}
+
+func (cmd *FloatSliceCmd) Val() []float64 {
+	return cmd.val
+}
+
+func (cmd *FloatSliceCmd) Result() ([]float64, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *FloatSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]float64, n)
+		for i := 0; i < len(cmd.val); i++ {
+			switch num, err := rd.ReadFloatReply(); {
+			case err == Nil:
+				cmd.val[i] = 0
+			case err != nil:
+				return nil, err
+			default:
+				cmd.val[i] = num
+			}
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
 type StringSliceCmd struct {
 	baseCmd
 
@@ -806,6 +1071,10 @@
 	}
 }
 
+func (cmd *StringSliceCmd) SetVal(val []string) {
+	cmd.val = val
+}
+
 func (cmd *StringSliceCmd) Val() []string {
 	return cmd.val
 }
@@ -859,6 +1128,10 @@
 	}
 }
 
+func (cmd *BoolSliceCmd) SetVal(val []bool) {
+	cmd.val = val
+}
+
 func (cmd *BoolSliceCmd) Val() []bool {
 	return cmd.val
 }
@@ -905,6 +1178,10 @@
 	}
 }
 
+func (cmd *StringStringMapCmd) SetVal(val map[string]string) {
+	cmd.val = val
+}
+
 func (cmd *StringStringMapCmd) Val() map[string]string {
 	return cmd.val
 }
@@ -917,6 +1194,27 @@
 	return cmdString(cmd, cmd.val)
 }
 
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *StringStringMapCmd) Scan(dest interface{}) error {
+	if cmd.err != nil {
+		return cmd.err
+	}
+
+	strct, err := hscan.Struct(dest)
+	if err != nil {
+		return err
+	}
+
+	for k, v := range cmd.val {
+		if err := strct.Scan(k, v); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
 func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error {
 	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
 		cmd.val = make(map[string]string, n/2)
@@ -957,6 +1255,10 @@
 	}
 }
 
+func (cmd *StringIntMapCmd) SetVal(val map[string]int64) {
+	cmd.val = val
+}
+
 func (cmd *StringIntMapCmd) Val() map[string]int64 {
 	return cmd.val
 }
@@ -1009,6 +1311,10 @@
 	}
 }
 
+func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) {
+	cmd.val = val
+}
+
 func (cmd *StringStructMapCmd) Val() map[string]struct{} {
 	return cmd.val
 }
@@ -1060,6 +1366,10 @@
 	}
 }
 
+func (cmd *XMessageSliceCmd) SetVal(val []XMessage) {
+	cmd.val = val
+}
+
 func (cmd *XMessageSliceCmd) Val() []XMessage {
 	return cmd.val
 }
@@ -1169,6 +1479,10 @@
 	}
 }
 
+func (cmd *XStreamSliceCmd) SetVal(val []XStream) {
+	cmd.val = val
+}
+
 func (cmd *XStreamSliceCmd) Val() []XStream {
 	return cmd.val
 }
@@ -1241,6 +1555,10 @@
 	}
 }
 
+func (cmd *XPendingCmd) SetVal(val *XPending) {
+	cmd.val = val
+}
+
 func (cmd *XPendingCmd) Val() *XPending {
 	return cmd.val
 }
@@ -1343,6 +1661,10 @@
 	}
 }
 
+func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) {
+	cmd.val = val
+}
+
 func (cmd *XPendingExtCmd) Val() []XPendingExt {
 	return cmd.val
 }
@@ -1403,6 +1725,233 @@
 
 //------------------------------------------------------------------------------
 
+type XAutoClaimCmd struct {
+	baseCmd
+
+	start string
+	val   []XMessage
+}
+
+var _ Cmder = (*XAutoClaimCmd)(nil)
+
+func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd {
+	return &XAutoClaimCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) {
+	cmd.val = val
+	cmd.start = start
+}
+
+func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) {
+	return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) {
+	return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		if n != 2 {
+			return nil, fmt.Errorf("got %d, wanted 2", n)
+		}
+		var err error
+
+		cmd.start, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.val, err = readXMessageSlice(rd)
+		if err != nil {
+			return nil, err
+		}
+
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimJustIDCmd struct {
+	baseCmd
+
+	start string
+	val   []string
+}
+
+var _ Cmder = (*XAutoClaimJustIDCmd)(nil)
+
+func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd {
+	return &XAutoClaimJustIDCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) {
+	cmd.val = val
+	cmd.start = start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) {
+	return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) {
+	return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimJustIDCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		if n != 2 {
+			return nil, fmt.Errorf("got %d, wanted 2", n)
+		}
+		var err error
+
+		cmd.start, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		nn, err := rd.ReadArrayLen()
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.val = make([]string, nn)
+		for i := 0; i < nn; i++ {
+			cmd.val[i], err = rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoConsumersCmd struct {
+	baseCmd
+	val []XInfoConsumer
+}
+
+type XInfoConsumer struct {
+	Name    string
+	Pending int64
+	Idle    int64
+}
+
+var _ Cmder = (*XInfoConsumersCmd)(nil)
+
+func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd {
+	return &XInfoConsumersCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: []interface{}{"xinfo", "consumers", stream, group},
+		},
+	}
+}
+
+func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) {
+	cmd.val = val
+}
+
+func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer {
+	return cmd.val
+}
+
+func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XInfoConsumersCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return err
+	}
+
+	cmd.val = make([]XInfoConsumer, n)
+
+	for i := 0; i < n; i++ {
+		cmd.val[i], err = readXConsumerInfo(rd)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func readXConsumerInfo(rd *proto.Reader) (XInfoConsumer, error) {
+	var consumer XInfoConsumer
+
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return consumer, err
+	}
+	if n != 6 {
+		return consumer, fmt.Errorf("redis: got %d elements in XINFO CONSUMERS reply, wanted 6", n)
+	}
+
+	for i := 0; i < 3; i++ {
+		key, err := rd.ReadString()
+		if err != nil {
+			return consumer, err
+		}
+
+		val, err := rd.ReadString()
+		if err != nil {
+			return consumer, err
+		}
+
+		switch key {
+		case "name":
+			consumer.Name = val
+		case "pending":
+			consumer.Pending, err = strconv.ParseInt(val, 0, 64)
+			if err != nil {
+				return consumer, err
+			}
+		case "idle":
+			consumer.Idle, err = strconv.ParseInt(val, 0, 64)
+			if err != nil {
+				return consumer, err
+			}
+		default:
+			return consumer, fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key)
+		}
+	}
+
+	return consumer, nil
+}
+
+//------------------------------------------------------------------------------
+
 type XInfoGroupsCmd struct {
 	baseCmd
 	val []XInfoGroup
@@ -1426,6 +1975,10 @@
 	}
 }
 
+func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) {
+	cmd.val = val
+}
+
 func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
 	return cmd.val
 }
@@ -1529,6 +2082,10 @@
 	}
 }
 
+func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) {
+	cmd.val = val
+}
+
 func (cmd *XInfoStreamCmd) Val() *XInfoStream {
 	return cmd.val
 }
@@ -1574,8 +2131,14 @@
 			info.LastGeneratedID, err = rd.ReadString()
 		case "first-entry":
 			info.FirstEntry, err = readXMessage(rd)
+			if err == Nil {
+				err = nil
+			}
 		case "last-entry":
 			info.LastEntry, err = readXMessage(rd)
+			if err == Nil {
+				err = nil
+			}
 		default:
 			return nil, fmt.Errorf("redis: unexpected content %s "+
 				"in XINFO STREAM reply", key)
@@ -1589,6 +2152,306 @@
 
 //------------------------------------------------------------------------------
 
+type XInfoStreamFullCmd struct {
+	baseCmd
+	val *XInfoStreamFull
+}
+
+type XInfoStreamFull struct {
+	Length          int64
+	RadixTreeKeys   int64
+	RadixTreeNodes  int64
+	LastGeneratedID string
+	Entries         []XMessage
+	Groups          []XInfoStreamGroup
+}
+
+type XInfoStreamGroup struct {
+	Name            string
+	LastDeliveredID string
+	PelCount        int64
+	Pending         []XInfoStreamGroupPending
+	Consumers       []XInfoStreamConsumer
+}
+
+type XInfoStreamGroupPending struct {
+	ID            string
+	Consumer      string
+	DeliveryTime  time.Time
+	DeliveryCount int64
+}
+
+type XInfoStreamConsumer struct {
+	Name     string
+	SeenTime time.Time
+	PelCount int64
+	Pending  []XInfoStreamConsumerPending
+}
+
+type XInfoStreamConsumerPending struct {
+	ID            string
+	DeliveryTime  time.Time
+	DeliveryCount int64
+}
+
+var _ Cmder = (*XInfoStreamFullCmd)(nil)
+
+func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd {
+	return &XInfoStreamFullCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) {
+	cmd.val = val
+}
+
+func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull {
+	return cmd.val
+}
+
+func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamFullCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return err
+	}
+	if n != 12 {
+		return fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+			"wanted 12", n)
+	}
+
+	cmd.val = &XInfoStreamFull{}
+
+	for i := 0; i < 6; i++ {
+		key, err := rd.ReadString()
+		if err != nil {
+			return err
+		}
+
+		switch key {
+		case "length":
+			cmd.val.Length, err = rd.ReadIntReply()
+		case "radix-tree-keys":
+			cmd.val.RadixTreeKeys, err = rd.ReadIntReply()
+		case "radix-tree-nodes":
+			cmd.val.RadixTreeNodes, err = rd.ReadIntReply()
+		case "last-generated-id":
+			cmd.val.LastGeneratedID, err = rd.ReadString()
+		case "entries":
+			cmd.val.Entries, err = readXMessageSlice(rd)
+		case "groups":
+			cmd.val.Groups, err = readStreamGroups(rd)
+		default:
+			return fmt.Errorf("redis: unexpected content %s "+
+				"in XINFO STREAM reply", key)
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return nil, err
+	}
+	groups := make([]XInfoStreamGroup, 0, n)
+	for i := 0; i < n; i++ {
+		nn, err := rd.ReadArrayLen()
+		if err != nil {
+			return nil, err
+		}
+		if nn != 10 {
+			return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+				"wanted 10", nn)
+		}
+
+		group := XInfoStreamGroup{}
+
+		for f := 0; f < 5; f++ {
+			key, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			switch key {
+			case "name":
+				group.Name, err = rd.ReadString()
+			case "last-delivered-id":
+				group.LastDeliveredID, err = rd.ReadString()
+			case "pel-count":
+				group.PelCount, err = rd.ReadIntReply()
+			case "pending":
+				group.Pending, err = readXInfoStreamGroupPending(rd)
+			case "consumers":
+				group.Consumers, err = readXInfoStreamConsumers(rd)
+			default:
+				return nil, fmt.Errorf("redis: unexpected content %s "+
+					"in XINFO STREAM reply", key)
+			}
+
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		groups = append(groups, group)
+	}
+
+	return groups, nil
+}
+
+func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return nil, err
+	}
+
+	pending := make([]XInfoStreamGroupPending, 0, n)
+
+	for i := 0; i < n; i++ {
+		nn, err := rd.ReadArrayLen()
+		if err != nil {
+			return nil, err
+		}
+		if nn != 4 {
+			return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+				"wanted 4", nn)
+		}
+
+		p := XInfoStreamGroupPending{}
+
+		p.ID, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		p.Consumer, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		delivery, err := rd.ReadIntReply()
+		if err != nil {
+			return nil, err
+		}
+		p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+		p.DeliveryCount, err = rd.ReadIntReply()
+		if err != nil {
+			return nil, err
+		}
+
+		pending = append(pending, p)
+	}
+
+	return pending, nil
+}
+
+func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return nil, err
+	}
+
+	consumers := make([]XInfoStreamConsumer, 0, n)
+
+	for i := 0; i < n; i++ {
+		nn, err := rd.ReadArrayLen()
+		if err != nil {
+			return nil, err
+		}
+		if nn != 8 {
+			return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+				"wanted 8", nn)
+		}
+
+		c := XInfoStreamConsumer{}
+
+		for f := 0; f < 4; f++ {
+			cKey, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			switch cKey {
+			case "name":
+				c.Name, err = rd.ReadString()
+			case "seen-time":
+				seen, err := rd.ReadIntReply()
+				if err != nil {
+					return nil, err
+				}
+				c.SeenTime = time.Unix(seen/1000, seen%1000*int64(time.Millisecond))
+			case "pel-count":
+				c.PelCount, err = rd.ReadIntReply()
+			case "pending":
+				pendingNumber, err := rd.ReadArrayLen()
+				if err != nil {
+					return nil, err
+				}
+
+				c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber)
+
+				for pn := 0; pn < pendingNumber; pn++ {
+					nn, err := rd.ReadArrayLen()
+					if err != nil {
+						return nil, err
+					}
+					if nn != 3 {
+						return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
+							"wanted 3", nn)
+					}
+
+					p := XInfoStreamConsumerPending{}
+
+					p.ID, err = rd.ReadString()
+					if err != nil {
+						return nil, err
+					}
+
+					delivery, err := rd.ReadIntReply()
+					if err != nil {
+						return nil, err
+					}
+					p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+					p.DeliveryCount, err = rd.ReadIntReply()
+					if err != nil {
+						return nil, err
+					}
+
+					c.Pending = append(c.Pending, p)
+				}
+			default:
+				return nil, fmt.Errorf("redis: unexpected content %s "+
+					"in XINFO STREAM reply", cKey)
+			}
+			if err != nil {
+				return nil, err
+			}
+		}
+		consumers = append(consumers, c)
+	}
+
+	return consumers, nil
+}
+
+//------------------------------------------------------------------------------
+
 type ZSliceCmd struct {
 	baseCmd
 
@@ -1606,6 +2469,10 @@
 	}
 }
 
+func (cmd *ZSliceCmd) SetVal(val []Z) {
+	cmd.val = val
+}
+
 func (cmd *ZSliceCmd) Val() []Z {
 	return cmd.val
 }
@@ -1661,6 +2528,10 @@
 	}
 }
 
+func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) {
+	cmd.val = val
+}
+
 func (cmd *ZWithKeyCmd) Val() *ZWithKey {
 	return cmd.val
 }
@@ -1725,6 +2596,11 @@
 	}
 }
 
+func (cmd *ScanCmd) SetVal(page []string, cursor uint64) {
+	cmd.page = page
+	cmd.cursor = cursor
+}
+
 func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
 	return cmd.page, cmd.cursor
 }
@@ -1779,6 +2655,10 @@
 	}
 }
 
+func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) {
+	cmd.val = val
+}
+
 func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
 	return cmd.val
 }
@@ -1933,6 +2813,10 @@
 	return args
 }
 
+func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) {
+	cmd.locations = locations
+}
+
 func (cmd *GeoLocationCmd) Val() []GeoLocation {
 	return cmd.locations
 }
@@ -2024,6 +2908,192 @@
 
 //------------------------------------------------------------------------------
 
+// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query.
+type GeoSearchQuery struct {
+	Member string
+
+	// Latitude and Longitude when using FromLonLat option.
+	Longitude float64
+	Latitude  float64
+
+	// Distance and unit when using ByRadius option.
+	// Can use m, km, ft, or mi. Default is km.
+	Radius     float64
+	RadiusUnit string
+
+	// Height, width and unit when using ByBox option.
+	// Can be m, km, ft, or mi. Default is km.
+	BoxWidth  float64
+	BoxHeight float64
+	BoxUnit   string
+
+	// Can be ASC or DESC. Default is no sort order.
+	Sort     string
+	Count    int
+	CountAny bool
+}
+
+type GeoSearchLocationQuery struct {
+	GeoSearchQuery
+
+	WithCoord bool
+	WithDist  bool
+	WithHash  bool
+}
+
+type GeoSearchStoreQuery struct {
+	GeoSearchQuery
+
+	// When using the StoreDist option, the command stores the items in a
+	// sorted set populated with their distance from the center of the circle or box,
+	// as a floating-point number, in the same unit specified for that shape.
+	StoreDist bool
+}
+
+func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} {
+	args = geoSearchArgs(&q.GeoSearchQuery, args)
+
+	if q.WithCoord {
+		args = append(args, "withcoord")
+	}
+	if q.WithDist {
+		args = append(args, "withdist")
+	}
+	if q.WithHash {
+		args = append(args, "withhash")
+	}
+
+	return args
+}
+
+func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} {
+	if q.Member != "" {
+		args = append(args, "frommember", q.Member)
+	} else {
+		args = append(args, "fromlonlat", q.Longitude, q.Latitude)
+	}
+
+	if q.Radius > 0 {
+		if q.RadiusUnit == "" {
+			q.RadiusUnit = "km"
+		}
+		args = append(args, "byradius", q.Radius, q.RadiusUnit)
+	} else {
+		if q.BoxUnit == "" {
+			q.BoxUnit = "km"
+		}
+		args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit)
+	}
+
+	if q.Sort != "" {
+		args = append(args, q.Sort)
+	}
+
+	if q.Count > 0 {
+		args = append(args, "count", q.Count)
+		if q.CountAny {
+			args = append(args, "any")
+		}
+	}
+
+	return args
+}
+
+type GeoSearchLocationCmd struct {
+	baseCmd
+
+	opt *GeoSearchLocationQuery
+	val []GeoLocation
+}
+
+var _ Cmder = (*GeoSearchLocationCmd)(nil)
+
+func NewGeoSearchLocationCmd(
+	ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{},
+) *GeoSearchLocationCmd {
+	return &GeoSearchLocationCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+		opt: opt,
+	}
+}
+
+func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) {
+	cmd.val = val
+}
+
+func (cmd *GeoSearchLocationCmd) Val() []GeoLocation {
+	return cmd.val
+}
+
+func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *GeoSearchLocationCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return err
+	}
+
+	cmd.val = make([]GeoLocation, n)
+	for i := 0; i < n; i++ {
+		_, err = rd.ReadArrayLen()
+		if err != nil {
+			return err
+		}
+
+		var loc GeoLocation
+
+		loc.Name, err = rd.ReadString()
+		if err != nil {
+			return err
+		}
+		if cmd.opt.WithDist {
+			loc.Dist, err = rd.ReadFloatReply()
+			if err != nil {
+				return err
+			}
+		}
+		if cmd.opt.WithHash {
+			loc.GeoHash, err = rd.ReadIntReply()
+			if err != nil {
+				return err
+			}
+		}
+		if cmd.opt.WithCoord {
+			nn, err := rd.ReadArrayLen()
+			if err != nil {
+				return err
+			}
+			if nn != 2 {
+				return fmt.Errorf("got %d coordinates, expected 2", nn)
+			}
+
+			loc.Longitude, err = rd.ReadFloatReply()
+			if err != nil {
+				return err
+			}
+			loc.Latitude, err = rd.ReadFloatReply()
+			if err != nil {
+				return err
+			}
+		}
+
+		cmd.val[i] = loc
+	}
+
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
 type GeoPos struct {
 	Longitude, Latitude float64
 }
@@ -2045,6 +3115,10 @@
 	}
 }
 
+func (cmd *GeoPosCmd) SetVal(val []*GeoPos) {
+	cmd.val = val
+}
+
 func (cmd *GeoPosCmd) Val() []*GeoPos {
 	return cmd.val
 }
@@ -2122,6 +3196,10 @@
 	}
 }
 
+func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) {
+	cmd.val = val
+}
+
 func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
 	return cmd.val
 }
@@ -2309,6 +3387,10 @@
 	}
 }
 
+func (cmd *SlowLogCmd) SetVal(val []SlowLog) {
+	cmd.val = val
+}
+
 func (cmd *SlowLogCmd) Val() []SlowLog {
 	return cmd.val
 }
diff --git a/vendor/github.com/go-redis/redis/v8/commands.go b/vendor/github.com/go-redis/redis/v8/commands.go
index 79698ba..bbfe089 100644
--- a/vendor/github.com/go-redis/redis/v8/commands.go
+++ b/vendor/github.com/go-redis/redis/v8/commands.go
@@ -9,7 +9,8 @@
 	"github.com/go-redis/redis/v8/internal"
 )
 
-// KeepTTL is an option for Set command to keep key's existing TTL.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
 // For example:
 //
 //    rdb.Set(ctx, key, value, redis.KeepTTL)
@@ -67,6 +68,11 @@
 			dst = append(dst, k, v)
 		}
 		return dst
+	case map[string]string:
+		for k, v := range arg {
+			dst = append(dst, k, v)
+		}
+		return dst
 	default:
 		return append(dst, arg)
 	}
@@ -90,6 +96,10 @@
 	Exists(ctx context.Context, keys ...string) *IntCmd
 	Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
 	ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+	ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+	ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+	ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+	ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
 	Keys(ctx context.Context, pattern string) *StringSliceCmd
 	Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd
 	Move(ctx context.Context, key string, db int) *BoolCmd
@@ -117,6 +127,8 @@
 	Get(ctx context.Context, key string) *StringCmd
 	GetRange(ctx context.Context, key string, start, end int64) *StringCmd
 	GetSet(ctx context.Context, key string, value interface{}) *StringCmd
+	GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd
+	GetDel(ctx context.Context, key string) *StringCmd
 	Incr(ctx context.Context, key string) *IntCmd
 	IncrBy(ctx context.Context, key string, value int64) *IntCmd
 	IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd
@@ -124,11 +136,14 @@
 	MSet(ctx context.Context, values ...interface{}) *StatusCmd
 	MSetNX(ctx context.Context, values ...interface{}) *BoolCmd
 	Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+	SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd
+	// TODO: rename to SetEx
 	SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
 	SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
 	SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
 	SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd
 	StrLen(ctx context.Context, key string) *IntCmd
+	Copy(ctx context.Context, sourceKey string, destKey string, db int, replace bool) *IntCmd
 
 	GetBit(ctx context.Context, key string, offset int64) *IntCmd
 	SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd
@@ -141,6 +156,7 @@
 	BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd
 
 	Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd
+	ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd
 	SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
 	HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
 	ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
@@ -158,6 +174,7 @@
 	HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd
 	HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd
 	HVals(ctx context.Context, key string) *StringSliceCmd
+	HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd
 
 	BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
 	BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
@@ -168,6 +185,7 @@
 	LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd
 	LLen(ctx context.Context, key string) *IntCmd
 	LPop(ctx context.Context, key string) *StringCmd
+	LPopCount(ctx context.Context, key string, count int) *StringSliceCmd
 	LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd
 	LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd
 	LPush(ctx context.Context, key string, values ...interface{}) *IntCmd
@@ -177,9 +195,12 @@
 	LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd
 	LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd
 	RPop(ctx context.Context, key string) *StringCmd
+	RPopCount(ctx context.Context, key string, count int) *StringSliceCmd
 	RPopLPush(ctx context.Context, source, destination string) *StringCmd
 	RPush(ctx context.Context, key string, values ...interface{}) *IntCmd
 	RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+	LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd
+	BLMove(ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration) *StringCmd
 
 	SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd
 	SCard(ctx context.Context, key string) *IntCmd
@@ -188,6 +209,7 @@
 	SInter(ctx context.Context, keys ...string) *StringSliceCmd
 	SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd
 	SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd
+	SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd
 	SMembers(ctx context.Context, key string) *StringSliceCmd
 	SMembersMap(ctx context.Context, key string) *StringStructMapCmd
 	SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd
@@ -212,6 +234,7 @@
 	XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd
 	XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd
 	XGroupDestroy(ctx context.Context, stream, group string) *IntCmd
+	XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
 	XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
 	XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd
 	XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd
@@ -219,19 +242,42 @@
 	XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd
 	XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd
 	XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd
+	XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd
+	XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd
+
+	// TODO: XTrim and XTrimApprox remove in v9.
 	XTrim(ctx context.Context, key string, maxLen int64) *IntCmd
 	XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd
+	XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd
+	XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd
+	XTrimMinID(ctx context.Context, key string, minID string) *IntCmd
+	XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd
 	XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd
 	XInfoStream(ctx context.Context, key string) *XInfoStreamCmd
+	XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd
+	XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd
 
 	BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
 	BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+
+	// TODO: remove
+	//		ZAddCh
+	//		ZIncr
+	//		ZAddNXCh
+	//		ZAddXXCh
+	//		ZIncrNX
+	//		ZIncrXX
+	// 	in v9.
+	// 	use ZAddArgs and ZAddArgsIncr.
+
 	ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd
 	ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd
 	ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd
 	ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd
 	ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd
 	ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd
+	ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd
+	ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd
 	ZIncr(ctx context.Context, key string, member *Z) *FloatCmd
 	ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd
 	ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd
@@ -239,7 +285,10 @@
 	ZCount(ctx context.Context, key, min, max string) *IntCmd
 	ZLexCount(ctx context.Context, key, min, max string) *IntCmd
 	ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd
+	ZInter(ctx context.Context, store *ZStore) *StringSliceCmd
+	ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd
 	ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd
+	ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd
 	ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd
 	ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd
 	ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
@@ -247,6 +296,9 @@
 	ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
 	ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
 	ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+	ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd
+	ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd
+	ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd
 	ZRank(ctx context.Context, key, member string) *IntCmd
 	ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd
 	ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd
@@ -260,6 +312,12 @@
 	ZRevRank(ctx context.Context, key, member string) *IntCmd
 	ZScore(ctx context.Context, key, member string) *FloatCmd
 	ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd
+	ZUnion(ctx context.Context, store ZStore) *StringSliceCmd
+	ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd
+	ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd
+	ZDiff(ctx context.Context, keys ...string) *StringSliceCmd
+	ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd
+	ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
 
 	PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd
 	PFCount(ctx context.Context, keys ...string) *IntCmd
@@ -332,6 +390,9 @@
 	GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd
 	GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd
 	GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd
+	GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd
+	GeoSearchLocation(ctx context.Context, key string, q *GeoSearchLocationQuery) *GeoSearchLocationCmd
+	GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd
 	GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd
 	GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd
 }
@@ -364,7 +425,7 @@
 	return cmd
 }
 
-// Perform an AUTH command, using the given user and pass.
+// AuthACL Perform an AUTH command, using the given user and pass.
 // Should be used to authenticate the current connection with one of the connections defined in the ACL list
 // when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
 func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd {
@@ -375,6 +436,7 @@
 
 func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd {
 	cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond))
+	cmd.setReadTimeout(timeout)
 	_ = c(ctx, cmd)
 	return cmd
 }
@@ -425,7 +487,7 @@
 	return cmd
 }
 
-func (c cmdable) Quit(ctx context.Context) *StatusCmd {
+func (c cmdable) Quit(_ context.Context) *StatusCmd {
 	panic("not implemented")
 }
 
@@ -469,7 +531,37 @@
 }
 
 func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
-	cmd := NewBoolCmd(ctx, "expire", key, formatSec(ctx, expiration))
+	return c.expire(ctx, key, expiration, "")
+}
+
+func (c cmdable) ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+	return c.expire(ctx, key, expiration, "NX")
+}
+
+func (c cmdable) ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+	return c.expire(ctx, key, expiration, "XX")
+}
+
+func (c cmdable) ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+	return c.expire(ctx, key, expiration, "GT")
+}
+
+func (c cmdable) ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+	return c.expire(ctx, key, expiration, "LT")
+}
+
+func (c cmdable) expire(
+	ctx context.Context, key string, expiration time.Duration, mode string,
+) *BoolCmd {
+	args := make([]interface{}, 3, 4)
+	args[0] = "expire"
+	args[1] = key
+	args[2] = formatSec(ctx, expiration)
+	if mode != "" {
+		args = append(args, mode)
+	}
+
+	cmd := NewBoolCmd(ctx, args...)
 	_ = c(ctx, cmd)
 	return cmd
 }
@@ -688,7 +780,7 @@
 	return cmd
 }
 
-// Redis `GET key` command. It returns redis.Nil error when key does not exist.
+// Get Redis `GET key` command. It returns redis.Nil error when key does not exist.
 func (c cmdable) Get(ctx context.Context, key string) *StringCmd {
 	cmd := NewStringCmd(ctx, "get", key)
 	_ = c(ctx, cmd)
@@ -707,6 +799,33 @@
 	return cmd
 }
 
+// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist).
+// Requires Redis >= 6.2.0.
+func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd {
+	args := make([]interface{}, 0, 4)
+	args = append(args, "getex", key)
+	if expiration > 0 {
+		if usePrecise(expiration) {
+			args = append(args, "px", formatMs(ctx, expiration))
+		} else {
+			args = append(args, "ex", formatSec(ctx, expiration))
+		}
+	} else if expiration == 0 {
+		args = append(args, "persist")
+	}
+
+	cmd := NewStringCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// GetDel redis-server version >= 6.2.0.
+func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd {
+	cmd := NewStringCmd(ctx, "getdel", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 func (c cmdable) Incr(ctx context.Context, key string) *IntCmd {
 	cmd := NewIntCmd(ctx, "incr", key)
 	_ = c(ctx, cmd)
@@ -762,11 +881,12 @@
 	return cmd
 }
 
-// Redis `SET key value [expiration]` command.
+// Set Redis `SET key value [expiration]` command.
 // Use expiration for `SETEX`-like behavior.
 //
 // Zero expiration means the key has no expiration time.
-// KeepTTL(-1) expiration is a Redis KEEPTTL option to keep existing TTL.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
 func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
 	args := make([]interface{}, 3, 5)
 	args[0] = "set"
@@ -787,17 +907,69 @@
 	return cmd
 }
 
-// Redis `SETEX key expiration value` command.
+// SetArgs provides arguments for the SetArgs function.
+type SetArgs struct {
+	// Mode can be `NX` or `XX` or empty.
+	Mode string
+
+	// Zero `TTL` or `Expiration` means that the key has no expiration time.
+	TTL      time.Duration
+	ExpireAt time.Time
+
+	// When Get is true, the command returns the old value stored at key, or nil when key did not exist.
+	Get bool
+
+	// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+	// otherwise you will receive an error: (error) ERR syntax error.
+	KeepTTL bool
+}
+
+// SetArgs supports all the options that the SET command supports.
+// It is the alternative to the Set function when you want
+// to have more control over the options.
+func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd {
+	args := []interface{}{"set", key, value}
+
+	if a.KeepTTL {
+		args = append(args, "keepttl")
+	}
+
+	if !a.ExpireAt.IsZero() {
+		args = append(args, "exat", a.ExpireAt.Unix())
+	}
+	if a.TTL > 0 {
+		if usePrecise(a.TTL) {
+			args = append(args, "px", formatMs(ctx, a.TTL))
+		} else {
+			args = append(args, "ex", formatSec(ctx, a.TTL))
+		}
+	}
+
+	if a.Mode != "" {
+		args = append(args, a.Mode)
+	}
+
+	if a.Get {
+		args = append(args, "get")
+	}
+
+	cmd := NewStatusCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SetEX Redis `SETEX key expiration value` command.
 func (c cmdable) SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
 	cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value)
 	_ = c(ctx, cmd)
 	return cmd
 }
 
-// Redis `SET key value [expiration] NX` command.
+// SetNX Redis `SET key value [expiration] NX` command.
 //
 // Zero expiration means the key has no expiration time.
-// KeepTTL(-1) expiration is a Redis KEEPTTL option to keep existing TTL.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
 func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
 	var cmd *BoolCmd
 	switch expiration {
@@ -818,10 +990,11 @@
 	return cmd
 }
 
-// Redis `SET key value [expiration] XX` command.
+// SetXX Redis `SET key value [expiration] XX` command.
 //
 // Zero expiration means the key has no expiration time.
-// KeepTTL(-1) expiration is a Redis KEEPTTL option to keep existing TTL.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
 func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
 	var cmd *BoolCmd
 	switch expiration {
@@ -853,6 +1026,16 @@
 	return cmd
 }
 
+func (c cmdable) Copy(ctx context.Context, sourceKey, destKey string, db int, replace bool) *IntCmd {
+	args := []interface{}{"copy", sourceKey, destKey, "DB", db}
+	if replace {
+		args = append(args, "REPLACE")
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 //------------------------------------------------------------------------------
 
 func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd {
@@ -965,6 +1148,22 @@
 	return cmd
 }
 
+func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd {
+	args := []interface{}{"scan", cursor}
+	if match != "" {
+		args = append(args, "match", match)
+	}
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	if keyType != "" {
+		args = append(args, "type", keyType)
+	}
+	cmd := NewScanCmd(ctx, c, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
 	args := []interface{}{"sscan", key, cursor}
 	if match != "" {
@@ -1113,6 +1312,21 @@
 	return cmd
 }
 
+// HRandField redis-server version >= 6.2.0.
+func (c cmdable) HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd {
+	args := make([]interface{}, 0, 4)
+
+	// Although count=0 is meaningless, redis accepts count=0.
+	args = append(args, "hrandfield", key, count)
+	if withValues {
+		args = append(args, "withvalues")
+	}
+
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 //------------------------------------------------------------------------------
 
 func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
@@ -1190,6 +1404,12 @@
 	return cmd
 }
 
+func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "lpop", key, count)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 type LPosArgs struct {
 	Rank, MaxLen int64
 }
@@ -1283,6 +1503,12 @@
 	return cmd
 }
 
+func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "rpop", key, count)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd {
 	cmd := NewStringCmd(ctx, "rpoplpush", source, destination)
 	_ = c(ctx, cmd)
@@ -1309,6 +1535,21 @@
 	return cmd
 }
 
+func (c cmdable) LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd {
+	cmd := NewStringCmd(ctx, "lmove", source, destination, srcpos, destpos)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) BLMove(
+	ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration,
+) *StringCmd {
+	cmd := NewStringCmd(ctx, "blmove", source, destination, srcpos, destpos, formatSec(ctx, timeout))
+	cmd.setReadTimeout(timeout)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 //------------------------------------------------------------------------------
 
 func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd {
@@ -1379,14 +1620,25 @@
 	return cmd
 }
 
-// Redis `SMEMBERS key` command output as a slice.
+// SMIsMember Redis `SMISMEMBER key member [member ...]` command.
+func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd {
+	args := make([]interface{}, 2, 2+len(members))
+	args[0] = "smismember"
+	args[1] = key
+	args = appendArgs(args, members)
+	cmd := NewBoolSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SMembers Redis `SMEMBERS key` command output as a slice.
 func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd {
 	cmd := NewStringSliceCmd(ctx, "smembers", key)
 	_ = c(ctx, cmd)
 	return cmd
 }
 
-// Redis `SMEMBERS key` command output as a map.
+// SMembersMap Redis `SMEMBERS key` command output as a map.
 func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd {
 	cmd := NewStringStructMapCmd(ctx, "smembers", key)
 	_ = c(ctx, cmd)
@@ -1399,28 +1651,28 @@
 	return cmd
 }
 
-// Redis `SPOP key` command.
+// SPop Redis `SPOP key` command.
 func (c cmdable) SPop(ctx context.Context, key string) *StringCmd {
 	cmd := NewStringCmd(ctx, "spop", key)
 	_ = c(ctx, cmd)
 	return cmd
 }
 
-// Redis `SPOP key count` command.
+// SPopN Redis `SPOP key count` command.
 func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd {
 	cmd := NewStringSliceCmd(ctx, "spop", key, count)
 	_ = c(ctx, cmd)
 	return cmd
 }
 
-// Redis `SRANDMEMBER key` command.
+// SRandMember Redis `SRANDMEMBER key` command.
 func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd {
 	cmd := NewStringCmd(ctx, "srandmember", key)
 	_ = c(ctx, cmd)
 	return cmd
 }
 
-// Redis `SRANDMEMBER key count` command.
+// SRandMemberN Redis `SRANDMEMBER key count` command.
 func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd {
 	cmd := NewStringSliceCmd(ctx, "srandmember", key, count)
 	_ = c(ctx, cmd)
@@ -1468,22 +1720,50 @@
 //   - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"}
 //
 // Note that map will not preserve the order of key-value pairs.
+// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used.
 type XAddArgs struct {
-	Stream       string
-	MaxLen       int64 // MAXLEN N
+	Stream     string
+	NoMkStream bool
+	MaxLen     int64 // MAXLEN N
+
+	// Deprecated: use MaxLen+Approx, remove in v9.
 	MaxLenApprox int64 // MAXLEN ~ N
-	ID           string
-	Values       interface{}
+
+	MinID string
+	// Approx causes MaxLen and MinID to use "~" matcher (instead of "=").
+	Approx bool
+	Limit  int64
+	ID     string
+	Values interface{}
 }
 
+// XAdd a.Limit has a bug, please confirm it and use it.
+// issue: https://github.com/redis/redis/issues/9046
 func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd {
-	args := make([]interface{}, 0, 8)
-	args = append(args, "xadd")
-	args = append(args, a.Stream)
-	if a.MaxLen > 0 {
-		args = append(args, "maxlen", a.MaxLen)
-	} else if a.MaxLenApprox > 0 {
+	args := make([]interface{}, 0, 11)
+	args = append(args, "xadd", a.Stream)
+	if a.NoMkStream {
+		args = append(args, "nomkstream")
+	}
+	switch {
+	case a.MaxLen > 0:
+		if a.Approx {
+			args = append(args, "maxlen", "~", a.MaxLen)
+		} else {
+			args = append(args, "maxlen", a.MaxLen)
+		}
+	case a.MaxLenApprox > 0:
+		// TODO remove in v9.
 		args = append(args, "maxlen", "~", a.MaxLenApprox)
+	case a.MinID != "":
+		if a.Approx {
+			args = append(args, "minid", "~", a.MinID)
+		} else {
+			args = append(args, "minid", a.MinID)
+		}
+	}
+	if a.Limit > 0 {
+		args = append(args, "limit", a.Limit)
 	}
 	if a.ID != "" {
 		args = append(args, a.ID)
@@ -1544,7 +1824,7 @@
 }
 
 func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd {
-	args := make([]interface{}, 0, 5+len(a.Streams))
+	args := make([]interface{}, 0, 6+len(a.Streams))
 	args = append(args, "xread")
 
 	keyPos := int8(1)
@@ -1568,7 +1848,7 @@
 	if a.Block >= 0 {
 		cmd.setReadTimeout(a.Block)
 	}
-	cmd.setFirstKeyPos(keyPos)
+	cmd.SetFirstKeyPos(keyPos)
 	_ = c(ctx, cmd)
 	return cmd
 }
@@ -1604,6 +1884,12 @@
 	return cmd
 }
 
+func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+	cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
 	cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer)
 	_ = c(ctx, cmd)
@@ -1620,10 +1906,10 @@
 }
 
 func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd {
-	args := make([]interface{}, 0, 8+len(a.Streams))
+	args := make([]interface{}, 0, 10+len(a.Streams))
 	args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
 
-	keyPos := int8(1)
+	keyPos := int8(4)
 	if a.Count > 0 {
 		args = append(args, "count", a.Count)
 		keyPos += 2
@@ -1646,7 +1932,7 @@
 	if a.Block >= 0 {
 		cmd.setReadTimeout(a.Block)
 	}
-	cmd.setFirstKeyPos(keyPos)
+	cmd.SetFirstKeyPos(keyPos)
 	_ = c(ctx, cmd)
 	return cmd
 }
@@ -1670,6 +1956,7 @@
 type XPendingExtArgs struct {
 	Stream   string
 	Group    string
+	Idle     time.Duration
 	Start    string
 	End      string
 	Count    int64
@@ -1677,8 +1964,12 @@
 }
 
 func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd {
-	args := make([]interface{}, 0, 7)
-	args = append(args, "xpending", a.Stream, a.Group, a.Start, a.End, a.Count)
+	args := make([]interface{}, 0, 9)
+	args = append(args, "xpending", a.Stream, a.Group)
+	if a.Idle != 0 {
+		args = append(args, "idle", formatMs(ctx, a.Idle))
+	}
+	args = append(args, a.Start, a.End, a.Count)
 	if a.Consumer != "" {
 		args = append(args, a.Consumer)
 	}
@@ -1687,6 +1978,39 @@
 	return cmd
 }
 
+type XAutoClaimArgs struct {
+	Stream   string
+	Group    string
+	MinIdle  time.Duration
+	Start    string
+	Count    int64
+	Consumer string
+}
+
+func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd {
+	args := xAutoClaimArgs(ctx, a)
+	cmd := NewXAutoClaimCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd {
+	args := xAutoClaimArgs(ctx, a)
+	args = append(args, "justid")
+	cmd := NewXAutoClaimJustIDCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} {
+	args := make([]interface{}, 0, 8)
+	args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start)
+	if a.Count > 0 {
+		args = append(args, "count", a.Count)
+	}
+	return args
+}
+
 type XClaimArgs struct {
 	Stream   string
 	Group    string
@@ -1711,7 +2035,7 @@
 }
 
 func xClaimArgs(a *XClaimArgs) []interface{} {
-	args := make([]interface{}, 0, 4+len(a.Messages))
+	args := make([]interface{}, 0, 5+len(a.Messages))
 	args = append(args,
 		"xclaim",
 		a.Stream,
@@ -1723,14 +2047,67 @@
 	return args
 }
 
-func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd {
-	cmd := NewIntCmd(ctx, "xtrim", key, "maxlen", maxLen)
+// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default).
+// example:
+//		XTRIM key MAXLEN/MINID threshold LIMIT limit.
+//		XTRIM key MAXLEN/MINID ~ threshold LIMIT limit.
+// The redis-server version is lower than 6.2, please set limit to 0.
+func (c cmdable) xTrim(
+	ctx context.Context, key, strategy string,
+	approx bool, threshold interface{}, limit int64,
+) *IntCmd {
+	args := make([]interface{}, 0, 7)
+	args = append(args, "xtrim", key, strategy)
+	if approx {
+		args = append(args, "~")
+	}
+	args = append(args, threshold)
+	if limit > 0 {
+		args = append(args, "limit", limit)
+	}
+	cmd := NewIntCmd(ctx, args...)
 	_ = c(ctx, cmd)
 	return cmd
 }
 
+// Deprecated: use XTrimMaxLen, remove in v9.
+func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd {
+	return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
+}
+
+// Deprecated: use XTrimMaxLenApprox, remove in v9.
 func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd {
-	cmd := NewIntCmd(ctx, "xtrim", key, "maxlen", "~", maxLen)
+	return c.xTrim(ctx, key, "maxlen", true, maxLen, 0)
+}
+
+// XTrimMaxLen No `~` rules are used, `limit` cannot be used.
+// cmd: XTRIM key MAXLEN maxLen
+func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd {
+	return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
+}
+
+// XTrimMaxLenApprox LIMIT has a bug, please confirm it and use it.
+// issue: https://github.com/redis/redis/issues/9046
+// cmd: XTRIM key MAXLEN ~ maxLen LIMIT limit
+func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd {
+	return c.xTrim(ctx, key, "maxlen", true, maxLen, limit)
+}
+
+// XTrimMinID No `~` rules are used, `limit` cannot be used.
+// cmd: XTRIM key MINID minID
+func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd {
+	return c.xTrim(ctx, key, "minid", false, minID, 0)
+}
+
+// XTrimMinIDApprox LIMIT has a bug, please confirm it and use it.
+// issue: https://github.com/redis/redis/issues/9046
+// cmd: XTRIM key MINID ~ minID LIMIT limit
+func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd {
+	return c.xTrim(ctx, key, "minid", true, minID, limit)
+}
+
+func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd {
+	cmd := NewXInfoConsumersCmd(ctx, key, group)
 	_ = c(ctx, cmd)
 	return cmd
 }
@@ -1747,6 +2124,19 @@
 	return cmd
 }
 
+// XInfoStreamFull XINFO STREAM FULL [COUNT count]
+// redis-server >= 6.0.
+func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd {
+	args := make([]interface{}, 0, 6)
+	args = append(args, "xinfo", "stream", key, "full")
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	cmd := NewXInfoStreamFullCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 //------------------------------------------------------------------------------
 
 // Z represents sorted set member.
@@ -1761,7 +2151,7 @@
 	Key string
 }
 
-// ZStore is used as an arg to ZInterStore and ZUnionStore.
+// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore.
 type ZStore struct {
 	Keys    []string
 	Weights []float64
@@ -1769,7 +2159,34 @@
 	Aggregate string
 }
 
-// Redis `BZPOPMAX key [key ...] timeout` command.
+func (z ZStore) len() (n int) {
+	n = len(z.Keys)
+	if len(z.Weights) > 0 {
+		n += 1 + len(z.Weights)
+	}
+	if z.Aggregate != "" {
+		n += 2
+	}
+	return n
+}
+
+func (z ZStore) appendArgs(args []interface{}) []interface{} {
+	for _, key := range z.Keys {
+		args = append(args, key)
+	}
+	if len(z.Weights) > 0 {
+		args = append(args, "weights")
+		for _, weights := range z.Weights {
+			args = append(args, weights)
+		}
+	}
+	if z.Aggregate != "" {
+		args = append(args, "aggregate", z.Aggregate)
+	}
+	return args
+}
+
+// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command.
 func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
 	args := make([]interface{}, 1+len(keys)+1)
 	args[0] = "bzpopmax"
@@ -1783,7 +2200,7 @@
 	return cmd
 }
 
-// Redis `BZPOPMIN key [key ...] timeout` command.
+// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command.
 func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
 	args := make([]interface{}, 1+len(keys)+1)
 	args[0] = "bzpopmin"
@@ -1797,96 +2214,169 @@
 	return cmd
 }
 
-func (c cmdable) zAdd(ctx context.Context, a []interface{}, n int, members ...*Z) *IntCmd {
-	for i, m := range members {
-		a[n+2*i] = m.Score
-		a[n+2*i+1] = m.Member
+// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive.
+type ZAddArgs struct {
+	NX      bool
+	XX      bool
+	LT      bool
+	GT      bool
+	Ch      bool
+	Members []Z
+}
+
+func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} {
+	a := make([]interface{}, 0, 6+2*len(args.Members))
+	a = append(a, "zadd", key)
+
+	// The GT, LT and NX options are mutually exclusive.
+	if args.NX {
+		a = append(a, "nx")
+	} else {
+		if args.XX {
+			a = append(a, "xx")
+		}
+		if args.GT {
+			a = append(a, "gt")
+		} else if args.LT {
+			a = append(a, "lt")
+		}
 	}
-	cmd := NewIntCmd(ctx, a...)
+	if args.Ch {
+		a = append(a, "ch")
+	}
+	if incr {
+		a = append(a, "incr")
+	}
+	for _, m := range args.Members {
+		a = append(a, m.Score)
+		a = append(a, m.Member)
+	}
+	return a
+}
+
+func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd {
+	cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
 	_ = c(ctx, cmd)
 	return cmd
 }
 
-// Redis `ZADD key score member [score member ...]` command.
+func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd {
+	cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// TODO: Compatible with v8 api, will be removed in v9.
+func (c cmdable) zAdd(ctx context.Context, key string, args ZAddArgs, members ...*Z) *IntCmd {
+	args.Members = make([]Z, len(members))
+	for i, m := range members {
+		args.Members[i] = *m
+	}
+	cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZAdd Redis `ZADD key score member [score member ...]` command.
 func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd {
-	const n = 2
-	a := make([]interface{}, n+2*len(members))
-	a[0], a[1] = "zadd", key
-	return c.zAdd(ctx, a, n, members...)
+	return c.zAdd(ctx, key, ZAddArgs{}, members...)
 }
 
-// Redis `ZADD key NX score member [score member ...]` command.
+// ZAddNX Redis `ZADD key NX score member [score member ...]` command.
 func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd {
-	const n = 3
-	a := make([]interface{}, n+2*len(members))
-	a[0], a[1], a[2] = "zadd", key, "nx"
-	return c.zAdd(ctx, a, n, members...)
+	return c.zAdd(ctx, key, ZAddArgs{
+		NX: true,
+	}, members...)
 }
 
-// Redis `ZADD key XX score member [score member ...]` command.
+// ZAddXX Redis `ZADD key XX score member [score member ...]` command.
 func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd {
-	const n = 3
-	a := make([]interface{}, n+2*len(members))
-	a[0], a[1], a[2] = "zadd", key, "xx"
-	return c.zAdd(ctx, a, n, members...)
+	return c.zAdd(ctx, key, ZAddArgs{
+		XX: true,
+	}, members...)
 }
 
-// Redis `ZADD key CH score member [score member ...]` command.
+// ZAddCh Redis `ZADD key CH score member [score member ...]` command.
+// Deprecated: Use
+//		client.ZAddArgs(ctx, ZAddArgs{
+//			Ch: true,
+//			Members: []Z,
+//		})
+//	remove in v9.
 func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd {
-	const n = 3
-	a := make([]interface{}, n+2*len(members))
-	a[0], a[1], a[2] = "zadd", key, "ch"
-	return c.zAdd(ctx, a, n, members...)
+	return c.zAdd(ctx, key, ZAddArgs{
+		Ch: true,
+	}, members...)
 }
 
-// Redis `ZADD key NX CH score member [score member ...]` command.
+// ZAddNXCh Redis `ZADD key NX CH score member [score member ...]` command.
+// Deprecated: Use
+//		client.ZAddArgs(ctx, ZAddArgs{
+//			NX: true,
+//			Ch: true,
+//			Members: []Z,
+//		})
+//	remove in v9.
 func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
-	const n = 4
-	a := make([]interface{}, n+2*len(members))
-	a[0], a[1], a[2], a[3] = "zadd", key, "nx", "ch"
-	return c.zAdd(ctx, a, n, members...)
+	return c.zAdd(ctx, key, ZAddArgs{
+		NX: true,
+		Ch: true,
+	}, members...)
 }
 
-// Redis `ZADD key XX CH score member [score member ...]` command.
+// ZAddXXCh Redis `ZADD key XX CH score member [score member ...]` command.
+// Deprecated: Use
+//		client.ZAddArgs(ctx, ZAddArgs{
+//			XX: true,
+//			Ch: true,
+//			Members: []Z,
+//		})
+//	remove in v9.
 func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
-	const n = 4
-	a := make([]interface{}, n+2*len(members))
-	a[0], a[1], a[2], a[3] = "zadd", key, "xx", "ch"
-	return c.zAdd(ctx, a, n, members...)
+	return c.zAdd(ctx, key, ZAddArgs{
+		XX: true,
+		Ch: true,
+	}, members...)
 }
 
-func (c cmdable) zIncr(ctx context.Context, a []interface{}, n int, members ...*Z) *FloatCmd {
-	for i, m := range members {
-		a[n+2*i] = m.Score
-		a[n+2*i+1] = m.Member
-	}
-	cmd := NewFloatCmd(ctx, a...)
-	_ = c(ctx, cmd)
-	return cmd
-}
-
-// Redis `ZADD key INCR score member` command.
+// ZIncr Redis `ZADD key INCR score member` command.
+// Deprecated: Use
+//		client.ZAddArgsIncr(ctx, ZAddArgs{
+//			Members: []Z,
+//		})
+//	remove in v9.
 func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd {
-	const n = 3
-	a := make([]interface{}, n+2)
-	a[0], a[1], a[2] = "zadd", key, "incr"
-	return c.zIncr(ctx, a, n, member)
+	return c.ZAddArgsIncr(ctx, key, ZAddArgs{
+		Members: []Z{*member},
+	})
 }
 
-// Redis `ZADD key NX INCR score member` command.
+// ZIncrNX Redis `ZADD key NX INCR score member` command.
+// Deprecated: Use
+//		client.ZAddArgsIncr(ctx, ZAddArgs{
+//			NX: true,
+//			Members: []Z,
+//		})
+//	remove in v9.
 func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd {
-	const n = 4
-	a := make([]interface{}, n+2)
-	a[0], a[1], a[2], a[3] = "zadd", key, "incr", "nx"
-	return c.zIncr(ctx, a, n, member)
+	return c.ZAddArgsIncr(ctx, key, ZAddArgs{
+		NX:      true,
+		Members: []Z{*member},
+	})
 }
 
-// Redis `ZADD key XX INCR score member` command.
+// ZIncrXX Redis `ZADD key XX INCR score member` command.
+// Deprecated: Use
+//		client.ZAddArgsIncr(ctx, ZAddArgs{
+//			XX: true,
+//			Members: []Z,
+//		})
+//	remove in v9.
 func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd {
-	const n = 4
-	a := make([]interface{}, n+2)
-	a[0], a[1], a[2], a[3] = "zadd", key, "incr", "xx"
-	return c.zIncr(ctx, a, n, member)
+	return c.ZAddArgsIncr(ctx, key, ZAddArgs{
+		XX:      true,
+		Members: []Z{*member},
+	})
 }
 
 func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd {
@@ -1914,24 +2404,44 @@
 }
 
 func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd {
-	args := make([]interface{}, 3+len(store.Keys))
-	args[0] = "zinterstore"
-	args[1] = destination
-	args[2] = len(store.Keys)
-	for i, key := range store.Keys {
-		args[3+i] = key
-	}
-	if len(store.Weights) > 0 {
-		args = append(args, "weights")
-		for _, weight := range store.Weights {
-			args = append(args, weight)
-		}
-	}
-	if store.Aggregate != "" {
-		args = append(args, "aggregate", store.Aggregate)
-	}
+	args := make([]interface{}, 0, 3+store.len())
+	args = append(args, "zinterstore", destination, len(store.Keys))
+	args = store.appendArgs(args)
 	cmd := NewIntCmd(ctx, args...)
-	cmd.setFirstKeyPos(3)
+	cmd.SetFirstKeyPos(3)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd {
+	args := make([]interface{}, 0, 2+store.len())
+	args = append(args, "zinter", len(store.Keys))
+	args = store.appendArgs(args)
+	cmd := NewStringSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd {
+	args := make([]interface{}, 0, 3+store.len())
+	args = append(args, "zinter", len(store.Keys))
+	args = store.appendArgs(args)
+	args = append(args, "withscores")
+	cmd := NewZSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd {
+	args := make([]interface{}, 2+len(members))
+	args[0] = "zmscore"
+	args[1] = key
+	for i, member := range members {
+		args[2+i] = member
+	}
+	cmd := NewFloatSliceCmd(ctx, args...)
 	_ = c(ctx, cmd)
 	return cmd
 }
@@ -1976,29 +2486,112 @@
 	return cmd
 }
 
-func (c cmdable) zRange(ctx context.Context, key string, start, stop int64, withScores bool) *StringSliceCmd {
-	args := []interface{}{
-		"zrange",
-		key,
-		start,
-		stop,
+// ZRangeArgs is all the options of the ZRange command.
+// In version> 6.2.0, you can replace the(cmd):
+//		ZREVRANGE,
+//		ZRANGEBYSCORE,
+//		ZREVRANGEBYSCORE,
+//		ZRANGEBYLEX,
+//		ZREVRANGEBYLEX.
+// Please pay attention to your redis-server version.
+//
+// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher.
+type ZRangeArgs struct {
+	Key string
+
+	// When the ByScore option is provided, the open interval(exclusive) can be set.
+	// By default, the score intervals specified by <Start> and <Stop> are closed (inclusive).
+	// It is similar to the deprecated(6.2.0+) ZRangeByScore command.
+	// For example:
+	//		ZRangeArgs{
+	//			Key: 				"example-key",
+	//	 		Start: 				"(3",
+	//	 		Stop: 				8,
+	//			ByScore:			true,
+	//	 	}
+	// 	 	cmd: "ZRange example-key (3 8 ByScore"  (3 < score <= 8).
+	//
+	// For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command.
+	// You can set the <Start> and <Stop> options as follows:
+	//		ZRangeArgs{
+	//			Key: 				"example-key",
+	//	 		Start: 				"[abc",
+	//	 		Stop: 				"(def",
+	//			ByLex:				true,
+	//	 	}
+	//		cmd: "ZRange example-key [abc (def ByLex"
+	//
+	// For normal cases (ByScore==false && ByLex==false), <Start> and <Stop> should be set to the index range (int).
+	// You can read the documentation for more information: https://redis.io/commands/zrange
+	Start interface{}
+	Stop  interface{}
+
+	// The ByScore and ByLex options are mutually exclusive.
+	ByScore bool
+	ByLex   bool
+
+	Rev bool
+
+	// limit offset count.
+	Offset int64
+	Count  int64
+}
+
+func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} {
+	// For Rev+ByScore/ByLex, we need to adjust the position of <Start> and <Stop>.
+	if z.Rev && (z.ByScore || z.ByLex) {
+		args = append(args, z.Key, z.Stop, z.Start)
+	} else {
+		args = append(args, z.Key, z.Start, z.Stop)
 	}
-	if withScores {
-		args = append(args, "withscores")
+
+	if z.ByScore {
+		args = append(args, "byscore")
+	} else if z.ByLex {
+		args = append(args, "bylex")
 	}
+	if z.Rev {
+		args = append(args, "rev")
+	}
+	if z.Offset != 0 || z.Count != 0 {
+		args = append(args, "limit", z.Offset, z.Count)
+	}
+	return args
+}
+
+func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd {
+	args := make([]interface{}, 0, 9)
+	args = append(args, "zrange")
+	args = z.appendArgs(args)
 	cmd := NewStringSliceCmd(ctx, args...)
 	_ = c(ctx, cmd)
 	return cmd
 }
 
+func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd {
+	args := make([]interface{}, 0, 10)
+	args = append(args, "zrange")
+	args = z.appendArgs(args)
+	args = append(args, "withscores")
+	cmd := NewZSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
-	return c.zRange(ctx, key, start, stop, false)
+	return c.ZRangeArgs(ctx, ZRangeArgs{
+		Key:   key,
+		Start: start,
+		Stop:  stop,
+	})
 }
 
 func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
-	cmd := NewZSliceCmd(ctx, "zrange", key, start, stop, "withscores")
-	_ = c(ctx, cmd)
-	return cmd
+	return c.ZRangeArgsWithScores(ctx, ZRangeArgs{
+		Key:   key,
+		Start: start,
+		Stop:  stop,
+	})
 }
 
 type ZRangeBy struct {
@@ -2047,6 +2640,15 @@
 	return cmd
 }
 
+func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd {
+	args := make([]interface{}, 0, 10)
+	args = append(args, "zrangestore", dst)
+	args = z.appendArgs(args)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd {
 	cmd := NewIntCmd(ctx, "zrank", key, member)
 	_ = c(ctx, cmd)
@@ -2149,26 +2751,91 @@
 	return cmd
 }
 
+func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd {
+	args := make([]interface{}, 0, 2+store.len())
+	args = append(args, "zunion", len(store.Keys))
+	args = store.appendArgs(args)
+	cmd := NewStringSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd {
+	args := make([]interface{}, 0, 3+store.len())
+	args = append(args, "zunion", len(store.Keys))
+	args = store.appendArgs(args)
+	args = append(args, "withscores")
+	cmd := NewZSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd {
-	args := make([]interface{}, 3+len(store.Keys))
-	args[0] = "zunionstore"
-	args[1] = dest
-	args[2] = len(store.Keys)
-	for i, key := range store.Keys {
-		args[3+i] = key
-	}
-	if len(store.Weights) > 0 {
-		args = append(args, "weights")
-		for _, weight := range store.Weights {
-			args = append(args, weight)
-		}
-	}
-	if store.Aggregate != "" {
-		args = append(args, "aggregate", store.Aggregate)
+	args := make([]interface{}, 0, 3+store.len())
+	args = append(args, "zunionstore", dest, len(store.Keys))
+	args = store.appendArgs(args)
+	cmd := NewIntCmd(ctx, args...)
+	cmd.SetFirstKeyPos(3)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZRandMember redis-server version >= 6.2.0.
+func (c cmdable) ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd {
+	args := make([]interface{}, 0, 4)
+
+	// Although count=0 is meaningless, redis accepts count=0.
+	args = append(args, "zrandmember", key, count)
+	if withScores {
+		args = append(args, "withscores")
 	}
 
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZDiff redis-server version >= 6.2.0.
+func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "zdiff"
+	args[1] = len(keys)
+	for i, key := range keys {
+		args[i+2] = key
+	}
+
+	cmd := NewStringSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZDiffWithScores redis-server version >= 6.2.0.
+func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd {
+	args := make([]interface{}, 3+len(keys))
+	args[0] = "zdiff"
+	args[1] = len(keys)
+	for i, key := range keys {
+		args[i+2] = key
+	}
+	args[len(keys)+2] = "withscores"
+
+	cmd := NewZSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZDiffStore redis-server version >=6.2.0.
+func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+	args := make([]interface{}, 0, 3+len(keys))
+	args = append(args, "zdiffstore", destination, len(keys))
+	for _, key := range keys {
+		args = append(args, key)
+	}
 	cmd := NewIntCmd(ctx, args...)
-	cmd.setFirstKeyPos(3)
 	_ = c(ctx, cmd)
 	return cmd
 }
@@ -2395,7 +3062,7 @@
 	return cmd
 }
 
-func (c cmdable) Sync(ctx context.Context) {
+func (c cmdable) Sync(_ context.Context) {
 	panic("not implemented")
 }
 
@@ -2432,6 +3099,7 @@
 		args = append(args, "SAMPLES", samples[0])
 	}
 	cmd := NewIntCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
 	_ = c(ctx, cmd)
 	return cmd
 }
@@ -2448,6 +3116,7 @@
 	}
 	cmdArgs = appendArgs(cmdArgs, args)
 	cmd := NewCmd(ctx, cmdArgs...)
+	cmd.SetFirstKeyPos(3)
 	_ = c(ctx, cmd)
 	return cmd
 }
@@ -2462,6 +3131,7 @@
 	}
 	cmdArgs = appendArgs(cmdArgs, args)
 	cmd := NewCmd(ctx, cmdArgs...)
+	cmd.SetFirstKeyPos(3)
 	_ = c(ctx, cmd)
 	return cmd
 }
@@ -2710,7 +3380,7 @@
 	return cmd
 }
 
-// GeoRadius is a read-only GEORADIUSBYMEMBER_RO command.
+// GeoRadiusByMember is a read-only GEORADIUSBYMEMBER_RO command.
 func (c cmdable) GeoRadiusByMember(
 	ctx context.Context, key, member string, query *GeoRadiusQuery,
 ) *GeoLocationCmd {
@@ -2737,6 +3407,38 @@
 	return cmd
 }
 
+func (c cmdable) GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd {
+	args := make([]interface{}, 0, 13)
+	args = append(args, "geosearch", key)
+	args = geoSearchArgs(q, args)
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) GeoSearchLocation(
+	ctx context.Context, key string, q *GeoSearchLocationQuery,
+) *GeoSearchLocationCmd {
+	args := make([]interface{}, 0, 16)
+	args = append(args, "geosearch", key)
+	args = geoSearchLocationArgs(q, args)
+	cmd := NewGeoSearchLocationCmd(ctx, q, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd {
+	args := make([]interface{}, 0, 15)
+	args = append(args, "geosearchstore", store, key)
+	args = geoSearchArgs(&q.GeoSearchQuery, args)
+	if q.StoreDist {
+		args = append(args, "storedist")
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
 func (c cmdable) GeoDist(
 	ctx context.Context, key string, member1, member2, unit string,
 ) *FloatCmd {
diff --git a/vendor/github.com/go-redis/redis/v8/error.go b/vendor/github.com/go-redis/redis/v8/error.go
index 9fe1376..521594b 100644
--- a/vendor/github.com/go-redis/redis/v8/error.go
+++ b/vendor/github.com/go-redis/redis/v8/error.go
@@ -10,6 +10,7 @@
 	"github.com/go-redis/redis/v8/internal/proto"
 )
 
+// ErrClosed performs any operation on the closed client will return this error.
 var ErrClosed = pool.ErrClosed
 
 type Error interface {
@@ -64,15 +65,28 @@
 	return ok
 }
 
-func isBadConn(err error, allowTimeout bool) bool {
-	if err == nil {
+func isBadConn(err error, allowTimeout bool, addr string) bool {
+	switch err {
+	case nil:
 		return false
+	case context.Canceled, context.DeadlineExceeded:
+		return true
 	}
 
 	if isRedisError(err) {
-		// Close connections in read only state in case domain addr is used
-		// and domain resolves to a different Redis Server. See #790.
-		return isReadOnlyError(err)
+		switch {
+		case isReadOnlyError(err):
+			// Close connections in read only state in case domain addr is used
+			// and domain resolves to a different Redis Server. See #790.
+			return true
+		case isMovedSameConnAddr(err, addr):
+			// Close connections when we are asked to move to the same addr
+			// of the connection. Force a DNS resolution when all connections
+			// of the pool are recycled
+			return true
+		default:
+			return false
+		}
 	}
 
 	if allowTimeout {
@@ -115,6 +129,14 @@
 	return strings.HasPrefix(err.Error(), "READONLY ")
 }
 
+func isMovedSameConnAddr(err error, addr string) bool {
+	redisError := err.Error()
+	if !strings.HasPrefix(redisError, "MOVED ") {
+		return false
+	}
+	return strings.HasSuffix(redisError, " "+addr)
+}
+
 //------------------------------------------------------------------------------
 
 type timeoutError interface {
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go b/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
index 2fc74ad..b3a4f21 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
@@ -60,7 +60,7 @@
 	return rand.Intn(slotNumber)
 }
 
-// hashSlot returns a consistent slot number between 0 and 16383
+// Slot returns a consistent slot number between 0 and 16383
 // for any given string key.
 func Slot(key string) int {
 	if key == "" {
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go b/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go
new file mode 100644
index 0000000..852c8bd
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go
@@ -0,0 +1,201 @@
+package hscan
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+)
+
+// decoderFunc represents decoding functions for default built-in types.
+type decoderFunc func(reflect.Value, string) error
+
+var (
+	// List of built-in decoders indexed by their numeric constant values (eg: reflect.Bool = 1).
+	decoders = []decoderFunc{
+		reflect.Bool:          decodeBool,
+		reflect.Int:           decodeInt,
+		reflect.Int8:          decodeInt8,
+		reflect.Int16:         decodeInt16,
+		reflect.Int32:         decodeInt32,
+		reflect.Int64:         decodeInt64,
+		reflect.Uint:          decodeUint,
+		reflect.Uint8:         decodeUint8,
+		reflect.Uint16:        decodeUint16,
+		reflect.Uint32:        decodeUint32,
+		reflect.Uint64:        decodeUint64,
+		reflect.Float32:       decodeFloat32,
+		reflect.Float64:       decodeFloat64,
+		reflect.Complex64:     decodeUnsupported,
+		reflect.Complex128:    decodeUnsupported,
+		reflect.Array:         decodeUnsupported,
+		reflect.Chan:          decodeUnsupported,
+		reflect.Func:          decodeUnsupported,
+		reflect.Interface:     decodeUnsupported,
+		reflect.Map:           decodeUnsupported,
+		reflect.Ptr:           decodeUnsupported,
+		reflect.Slice:         decodeSlice,
+		reflect.String:        decodeString,
+		reflect.Struct:        decodeUnsupported,
+		reflect.UnsafePointer: decodeUnsupported,
+	}
+
+	// Global map of struct field specs that is populated once for every new
+	// struct type that is scanned. This caches the field types and the corresponding
+	// decoder functions to avoid iterating through struct fields on subsequent scans.
+	globalStructMap = newStructMap()
+)
+
+func Struct(dst interface{}) (StructValue, error) {
+	v := reflect.ValueOf(dst)
+
+	// The destination to scan into should be a struct pointer.
+	if v.Kind() != reflect.Ptr || v.IsNil() {
+		return StructValue{}, fmt.Errorf("redis.Scan(non-pointer %T)", dst)
+	}
+
+	v = v.Elem()
+	if v.Kind() != reflect.Struct {
+		return StructValue{}, fmt.Errorf("redis.Scan(non-struct %T)", dst)
+	}
+
+	return StructValue{
+		spec:  globalStructMap.get(v.Type()),
+		value: v,
+	}, nil
+}
+
+// Scan scans the results from a key-value Redis map result set to a destination struct.
+// The Redis keys are matched to the struct's field with the `redis` tag.
+func Scan(dst interface{}, keys []interface{}, vals []interface{}) error {
+	if len(keys) != len(vals) {
+		return errors.New("args should have the same number of keys and vals")
+	}
+
+	strct, err := Struct(dst)
+	if err != nil {
+		return err
+	}
+
+	// Iterate through the (key, value) sequence.
+	for i := 0; i < len(vals); i++ {
+		key, ok := keys[i].(string)
+		if !ok {
+			continue
+		}
+
+		val, ok := vals[i].(string)
+		if !ok {
+			continue
+		}
+
+		if err := strct.Scan(key, val); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func decodeBool(f reflect.Value, s string) error {
+	b, err := strconv.ParseBool(s)
+	if err != nil {
+		return err
+	}
+	f.SetBool(b)
+	return nil
+}
+
+func decodeInt8(f reflect.Value, s string) error {
+	return decodeNumber(f, s, 8)
+}
+
+func decodeInt16(f reflect.Value, s string) error {
+	return decodeNumber(f, s, 16)
+}
+
+func decodeInt32(f reflect.Value, s string) error {
+	return decodeNumber(f, s, 32)
+}
+
+func decodeInt64(f reflect.Value, s string) error {
+	return decodeNumber(f, s, 64)
+}
+
+func decodeInt(f reflect.Value, s string) error {
+	return decodeNumber(f, s, 0)
+}
+
+func decodeNumber(f reflect.Value, s string, bitSize int) error {
+	v, err := strconv.ParseInt(s, 10, bitSize)
+	if err != nil {
+		return err
+	}
+	f.SetInt(v)
+	return nil
+}
+
+func decodeUint8(f reflect.Value, s string) error {
+	return decodeUnsignedNumber(f, s, 8)
+}
+
+func decodeUint16(f reflect.Value, s string) error {
+	return decodeUnsignedNumber(f, s, 16)
+}
+
+func decodeUint32(f reflect.Value, s string) error {
+	return decodeUnsignedNumber(f, s, 32)
+}
+
+func decodeUint64(f reflect.Value, s string) error {
+	return decodeUnsignedNumber(f, s, 64)
+}
+
+func decodeUint(f reflect.Value, s string) error {
+	return decodeUnsignedNumber(f, s, 0)
+}
+
+func decodeUnsignedNumber(f reflect.Value, s string, bitSize int) error {
+	v, err := strconv.ParseUint(s, 10, bitSize)
+	if err != nil {
+		return err
+	}
+	f.SetUint(v)
+	return nil
+}
+
+func decodeFloat32(f reflect.Value, s string) error {
+	v, err := strconv.ParseFloat(s, 32)
+	if err != nil {
+		return err
+	}
+	f.SetFloat(v)
+	return nil
+}
+
+// although the default is float64, but we better define it.
+func decodeFloat64(f reflect.Value, s string) error {
+	v, err := strconv.ParseFloat(s, 64)
+	if err != nil {
+		return err
+	}
+	f.SetFloat(v)
+	return nil
+}
+
+func decodeString(f reflect.Value, s string) error {
+	f.SetString(s)
+	return nil
+}
+
+func decodeSlice(f reflect.Value, s string) error {
+	// []byte slice ([]uint8).
+	if f.Type().Elem().Kind() == reflect.Uint8 {
+		f.SetBytes([]byte(s))
+	}
+	return nil
+}
+
+func decodeUnsupported(v reflect.Value, s string) error {
+	return fmt.Errorf("redis.Scan(unsupported %s)", v.Type())
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go b/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go
new file mode 100644
index 0000000..6839412
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go
@@ -0,0 +1,93 @@
+package hscan
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+)
+
+// structMap contains the map of struct fields for target structs
+// indexed by the struct type.
+type structMap struct {
+	m sync.Map
+}
+
+func newStructMap() *structMap {
+	return new(structMap)
+}
+
+func (s *structMap) get(t reflect.Type) *structSpec {
+	if v, ok := s.m.Load(t); ok {
+		return v.(*structSpec)
+	}
+
+	spec := newStructSpec(t, "redis")
+	s.m.Store(t, spec)
+	return spec
+}
+
+//------------------------------------------------------------------------------
+
+// structSpec contains the list of all fields in a target struct.
+type structSpec struct {
+	m map[string]*structField
+}
+
+func (s *structSpec) set(tag string, sf *structField) {
+	s.m[tag] = sf
+}
+
+func newStructSpec(t reflect.Type, fieldTag string) *structSpec {
+	numField := t.NumField()
+	out := &structSpec{
+		m: make(map[string]*structField, numField),
+	}
+
+	for i := 0; i < numField; i++ {
+		f := t.Field(i)
+
+		tag := f.Tag.Get(fieldTag)
+		if tag == "" || tag == "-" {
+			continue
+		}
+
+		tag = strings.Split(tag, ",")[0]
+		if tag == "" {
+			continue
+		}
+
+		// Use the built-in decoder.
+		out.set(tag, &structField{index: i, fn: decoders[f.Type.Kind()]})
+	}
+
+	return out
+}
+
+//------------------------------------------------------------------------------
+
+// structField represents a single field in a target struct.
+type structField struct {
+	index int
+	fn    decoderFunc
+}
+
+//------------------------------------------------------------------------------
+
+type StructValue struct {
+	spec  *structSpec
+	value reflect.Value
+}
+
+func (s StructValue) Scan(key string, value string) error {
+	field, ok := s.spec.m[key]
+	if !ok {
+		return nil
+	}
+	if err := field.fn(s.value.Field(field.index), value); err != nil {
+		t := s.value.Type()
+		return fmt.Errorf("cannot scan redis.result %s into struct field %s.%s of type %s, error-%s",
+			value, t.Name(), t.Field(field.index).Name, t.Field(field.index).Type, err.Error())
+	}
+	return nil
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/instruments.go b/vendor/github.com/go-redis/redis/v8/internal/instruments.go
deleted file mode 100644
index e837526..0000000
--- a/vendor/github.com/go-redis/redis/v8/internal/instruments.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package internal
-
-import (
-	"context"
-
-	"go.opentelemetry.io/otel/api/global"
-	"go.opentelemetry.io/otel/api/metric"
-)
-
-var (
-	// WritesCounter is a count of write commands performed.
-	WritesCounter metric.Int64Counter
-	// NewConnectionsCounter is a count of new connections.
-	NewConnectionsCounter metric.Int64Counter
-)
-
-func init() {
-	defer func() {
-		if r := recover(); r != nil {
-			Logger.Printf(context.Background(), "Error creating meter github.com/go-redis/redis for Instruments", r)
-		}
-	}()
-
-	meter := metric.Must(global.Meter("github.com/go-redis/redis"))
-
-	WritesCounter = meter.NewInt64Counter("redis.writes",
-		metric.WithDescription("the number of writes initiated"),
-	)
-
-	NewConnectionsCounter = meter.NewInt64Counter("redis.new_connections",
-		metric.WithDescription("the number of connections created"),
-	)
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/log.go b/vendor/github.com/go-redis/redis/v8/internal/log.go
index 3810f9e..c8b9213 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/log.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/log.go
@@ -19,6 +19,8 @@
 	_ = l.log.Output(2, fmt.Sprintf(format, v...))
 }
 
+// Logger calls Output to print to the stderr.
+// Arguments are handled in the manner of fmt.Print.
 var Logger Logging = &logger{
 	log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile),
 }
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go b/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
index 08a2071..5661659 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
@@ -7,9 +7,7 @@
 	"sync/atomic"
 	"time"
 
-	"github.com/go-redis/redis/v8/internal"
 	"github.com/go-redis/redis/v8/internal/proto"
-	"go.opentelemetry.io/otel/api/trace"
 )
 
 var noDeadline = time.Time{}
@@ -66,41 +64,28 @@
 }
 
 func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
-	return internal.WithSpan(ctx, "redis.with_reader", func(ctx context.Context, span trace.Span) error {
-		if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
-			return internal.RecordError(ctx, err)
-		}
-		if err := fn(cn.rd); err != nil {
-			return internal.RecordError(ctx, err)
-		}
-		return nil
-	})
+	if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
+		return err
+	}
+	return fn(cn.rd)
 }
 
 func (cn *Conn) WithWriter(
 	ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
 ) error {
-	return internal.WithSpan(ctx, "redis.with_writer", func(ctx context.Context, span trace.Span) error {
-		if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
-			return internal.RecordError(ctx, err)
-		}
+	if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
+		return err
+	}
 
-		if cn.bw.Buffered() > 0 {
-			cn.bw.Reset(cn.netConn)
-		}
+	if cn.bw.Buffered() > 0 {
+		cn.bw.Reset(cn.netConn)
+	}
 
-		if err := fn(cn.wr); err != nil {
-			return internal.RecordError(ctx, err)
-		}
+	if err := fn(cn.wr); err != nil {
+		return err
+	}
 
-		if err := cn.bw.Flush(); err != nil {
-			return internal.RecordError(ctx, err)
-		}
-
-		internal.WritesCounter.Add(ctx, 1)
-
-		return nil
-	})
+	return cn.bw.Flush()
 }
 
 func (cn *Conn) Close() error {
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
index 355742b..44a4e77 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
@@ -12,7 +12,10 @@
 )
 
 var (
-	ErrClosed      = errors.New("redis: client is closed")
+	// ErrClosed performs any operation on the closed client will return this error.
+	ErrClosed = errors.New("redis: client is closed")
+
+	// ErrPoolTimeout timed out waiting to get a connection from the connection pool.
 	ErrPoolTimeout = errors.New("redis: connection pool timeout")
 )
 
@@ -54,6 +57,7 @@
 	Dialer  func(context.Context) (net.Conn, error)
 	OnClose func(*Conn) error
 
+	PoolFIFO           bool
 	PoolSize           int
 	MinIdleConns       int
 	MaxConnAge         time.Duration
@@ -117,9 +121,10 @@
 	for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
 		p.poolSize++
 		p.idleConnsLen++
+
 		go func() {
 			err := p.addIdleConn()
-			if err != nil {
+			if err != nil && err != ErrClosed {
 				p.connsMu.Lock()
 				p.poolSize--
 				p.idleConnsLen--
@@ -136,9 +141,16 @@
 	}
 
 	p.connsMu.Lock()
+	defer p.connsMu.Unlock()
+
+	// It is not allowed to add new connections to the closed connection pool.
+	if p.closed() {
+		_ = cn.Close()
+		return ErrClosed
+	}
+
 	p.conns = append(p.conns, cn)
 	p.idleConns = append(p.idleConns, cn)
-	p.connsMu.Unlock()
 	return nil
 }
 
@@ -153,6 +165,14 @@
 	}
 
 	p.connsMu.Lock()
+	defer p.connsMu.Unlock()
+
+	// It is not allowed to add new connections to the closed connection pool.
+	if p.closed() {
+		_ = cn.Close()
+		return nil, ErrClosed
+	}
+
 	p.conns = append(p.conns, cn)
 	if pooled {
 		// If pool is full remove the cn on next Put.
@@ -162,7 +182,6 @@
 			p.poolSize++
 		}
 	}
-	p.connsMu.Unlock()
 
 	return cn, nil
 }
@@ -185,7 +204,6 @@
 		return nil, err
 	}
 
-	internal.NewConnectionsCounter.Add(ctx, 1)
 	cn := NewConn(netConn)
 	cn.pooled = pooled
 	return cn, nil
@@ -228,16 +246,19 @@
 		return nil, ErrClosed
 	}
 
-	err := p.waitTurn(ctx)
-	if err != nil {
+	if err := p.waitTurn(ctx); err != nil {
 		return nil, err
 	}
 
 	for {
 		p.connsMu.Lock()
-		cn := p.popIdle()
+		cn, err := p.popIdle()
 		p.connsMu.Unlock()
 
+		if err != nil {
+			return nil, err
+		}
+
 		if cn == nil {
 			break
 		}
@@ -306,17 +327,28 @@
 	<-p.queue
 }
 
-func (p *ConnPool) popIdle() *Conn {
-	if len(p.idleConns) == 0 {
-		return nil
+func (p *ConnPool) popIdle() (*Conn, error) {
+	if p.closed() {
+		return nil, ErrClosed
+	}
+	n := len(p.idleConns)
+	if n == 0 {
+		return nil, nil
 	}
 
-	idx := len(p.idleConns) - 1
-	cn := p.idleConns[idx]
-	p.idleConns = p.idleConns[:idx]
+	var cn *Conn
+	if p.opt.PoolFIFO {
+		cn = p.idleConns[0]
+		copy(p.idleConns, p.idleConns[1:])
+		p.idleConns = p.idleConns[:n-1]
+	} else {
+		idx := n - 1
+		cn = p.idleConns[idx]
+		p.idleConns = p.idleConns[:idx]
+	}
 	p.idleConnsLen--
 	p.checkMinIdleConns()
-	return cn
+	return cn, nil
 }
 
 func (p *ConnPool) Put(ctx context.Context, cn *Conn) {
@@ -477,6 +509,7 @@
 		p.connsMu.Lock()
 		cn := p.reapStaleConn()
 		p.connsMu.Unlock()
+
 		p.freeTurn()
 
 		if cn != nil {
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
index c3e7e7c..3adb99b 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
@@ -172,8 +172,7 @@
 
 func (p *StickyConnPool) badConnError() error {
 	if v := p._badConnError.Load(); v != nil {
-		err := v.(BadConnError)
-		if err.wrapped != nil {
+		if err := v.(BadConnError); err.wrapped != nil {
 			return err
 		}
 	}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go b/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
index 0fbc51e..0e6ca77 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
@@ -8,6 +8,7 @@
 	"github.com/go-redis/redis/v8/internal/util"
 )
 
+// redis resp protocol data type.
 const (
 	ErrorReply  = '-'
 	StatusReply = '+'
@@ -18,7 +19,7 @@
 
 //------------------------------------------------------------------------------
 
-const Nil = RedisError("redis: nil")
+const Nil = RedisError("redis: nil") // nolint:errname
 
 type RedisError string
 
@@ -83,7 +84,7 @@
 			return nil, err
 		}
 
-		full = append(full, b...)
+		full = append(full, b...) //nolint:makezero
 		b = full
 	}
 	if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
index 08d18d3..0e99476 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
@@ -10,7 +10,7 @@
 )
 
 // Scan parses bytes `b` to `v` with appropriate type.
-// nolint: gocyclo
+//nolint:gocyclo
 func Scan(b []byte, v interface{}) error {
 	switch v := v.(type) {
 	case nil:
@@ -106,6 +106,13 @@
 		var err error
 		*v, err = time.Parse(time.RFC3339Nano, util.BytesToString(b))
 		return err
+	case *time.Duration:
+		n, err := util.ParseInt(b, 10, 64)
+		if err != nil {
+			return err
+		}
+		*v = time.Duration(n)
+		return nil
 	case encoding.BinaryUnmarshaler:
 		return v.UnmarshalBinary(b)
 	default:
@@ -131,7 +138,7 @@
 	for i, s := range data {
 		elem := next()
 		if err := Scan([]byte(s), elem.Addr().Interface()); err != nil {
-			err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %s", i, s, err)
+			err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %w", i, s, err)
 			return err
 		}
 	}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go b/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
index 81b09b8..c426098 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
@@ -98,6 +98,8 @@
 	case time.Time:
 		w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano)
 		return w.bytes(w.numBuf)
+	case time.Duration:
+		return w.int(v.Nanoseconds())
 	case encoding.BinaryMarshaler:
 		b, err := v.MarshalBinary()
 		if err != nil {
diff --git a/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go b/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
index 40676f3..2edccba 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
@@ -43,3 +43,8 @@
 	s.src.Seed(seed)
 	s.mu.Unlock()
 }
+
+// Shuffle pseudo-randomizes the order of elements.
+// n is the number of elements.
+// swap swaps the elements with indexes i and j.
+func Shuffle(n int, swap func(i, j int)) { pseudo.Shuffle(n, swap) }
diff --git a/vendor/github.com/go-redis/redis/v8/internal/safe.go b/vendor/github.com/go-redis/redis/v8/internal/safe.go
index 862ff0e..fd2f434 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/safe.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/safe.go
@@ -1,3 +1,4 @@
+//go:build appengine
 // +build appengine
 
 package internal
diff --git a/vendor/github.com/go-redis/redis/v8/internal/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
index 4bc7970..9f2e418 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
@@ -1,3 +1,4 @@
+//go:build !appengine
 // +build !appengine
 
 package internal
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util.go b/vendor/github.com/go-redis/redis/v8/internal/util.go
index 894382b..e34a7f0 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/util.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/util.go
@@ -4,24 +4,19 @@
 	"context"
 	"time"
 
-	"github.com/go-redis/redis/v8/internal/proto"
 	"github.com/go-redis/redis/v8/internal/util"
-	"go.opentelemetry.io/otel/api/global"
-	"go.opentelemetry.io/otel/api/trace"
 )
 
 func Sleep(ctx context.Context, dur time.Duration) error {
-	return WithSpan(ctx, "time.Sleep", func(ctx context.Context, span trace.Span) error {
-		t := time.NewTimer(dur)
-		defer t.Stop()
+	t := time.NewTimer(dur)
+	defer t.Stop()
 
-		select {
-		case <-t.C:
-			return nil
-		case <-ctx.Done():
-			return ctx.Err()
-		}
-	})
+	select {
+	case <-t.C:
+		return nil
+	case <-ctx.Done():
+		return ctx.Err()
+	}
 }
 
 func ToLower(s string) string {
@@ -49,33 +44,3 @@
 	}
 	return true
 }
-
-func Unwrap(err error) error {
-	u, ok := err.(interface {
-		Unwrap() error
-	})
-	if !ok {
-		return nil
-	}
-	return u.Unwrap()
-}
-
-//------------------------------------------------------------------------------
-
-func WithSpan(ctx context.Context, name string, fn func(context.Context, trace.Span) error) error {
-	if span := trace.SpanFromContext(ctx); !span.IsRecording() {
-		return fn(ctx, span)
-	}
-
-	ctx, span := global.Tracer("github.com/go-redis/redis").Start(ctx, name)
-	defer span.End()
-
-	return fn(ctx, span)
-}
-
-func RecordError(ctx context.Context, err error) error {
-	if err != proto.Nil {
-		trace.SpanFromContext(ctx).RecordError(ctx, err)
-	}
-	return err
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/safe.go b/vendor/github.com/go-redis/redis/v8/internal/util/safe.go
index 1b3060e..2130711 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/util/safe.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/util/safe.go
@@ -1,3 +1,4 @@
+//go:build appengine
 // +build appengine
 
 package util
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
index c9868aa..daa8d76 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
@@ -1,3 +1,4 @@
+//go:build !appengine
 // +build !appengine
 
 package util
diff --git a/vendor/github.com/go-redis/redis/v8/options.go b/vendor/github.com/go-redis/redis/v8/options.go
index f2c16c5..a4abe32 100644
--- a/vendor/github.com/go-redis/redis/v8/options.go
+++ b/vendor/github.com/go-redis/redis/v8/options.go
@@ -8,14 +8,12 @@
 	"net"
 	"net/url"
 	"runtime"
+	"sort"
 	"strconv"
 	"strings"
 	"time"
 
-	"github.com/go-redis/redis/v8/internal"
 	"github.com/go-redis/redis/v8/internal/pool"
-	"go.opentelemetry.io/otel/api/trace"
-	"go.opentelemetry.io/otel/label"
 )
 
 // Limiter is the interface of a rate limiter or a circuit breaker.
@@ -58,7 +56,7 @@
 	DB int
 
 	// Maximum number of retries before giving up.
-	// Default is 3 retries.
+	// Default is 3 retries; -1 (not 0) disables retries.
 	MaxRetries int
 	// Minimum backoff between each retry.
 	// Default is 8 milliseconds; -1 disables backoff.
@@ -79,8 +77,12 @@
 	// Default is ReadTimeout.
 	WriteTimeout time.Duration
 
+	// Type of connection pool.
+	// true for FIFO pool, false for LIFO pool.
+	// Note that fifo has higher overhead compared to lifo.
+	PoolFIFO bool
 	// Maximum number of socket connections.
-	// Default is 10 connections per every CPU as reported by runtime.NumCPU.
+	// Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.
 	PoolSize int
 	// Minimum number of idle connections which is useful when establishing
 	// new connection is slow.
@@ -139,7 +141,7 @@
 		}
 	}
 	if opt.PoolSize == 0 {
-		opt.PoolSize = 10 * runtime.NumCPU()
+		opt.PoolSize = 10 * runtime.GOMAXPROCS(0)
 	}
 	switch opt.ReadTimeout {
 	case -1:
@@ -191,9 +193,32 @@
 // Scheme is required.
 // There are two connection types: by tcp socket and by unix socket.
 // Tcp connection:
-// 		redis://<user>:<password>@<host>:<port>/<db_number>
+//		redis://<user>:<password>@<host>:<port>/<db_number>
 // Unix connection:
 //		unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>
+// Most Option fields can be set using query parameters, with the following restrictions:
+//	- field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
+//	- only scalar type fields are supported (bool, int, time.Duration)
+//	- for time.Duration fields, values must be a valid input for time.ParseDuration();
+//	  additionally a plain integer as value (i.e. without unit) is intepreted as seconds
+//	- to disable a duration field, use value less than or equal to 0; to use the default
+//	  value, leave the value blank or remove the parameter
+//	- only the last value is interpreted if a parameter is given multiple times
+//	- fields "network", "addr", "username" and "password" can only be set using other
+//	  URL attributes (scheme, host, userinfo, resp.), query paremeters using these
+//	  names will be treated as unknown parameters
+//	- unknown parameter names will result in an error
+// Examples:
+//		redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2
+//		is equivalent to:
+//		&Options{
+//			Network:     "tcp",
+//			Addr:        "localhost:6789",
+//			DB:          1,               // path "/3" was overridden by "&db=1"
+//			DialTimeout: 3 * time.Second, // no time unit = seconds
+//			ReadTimeout: 6 * time.Second,
+//			MaxRetries:  2,
+//		}
 func ParseURL(redisURL string) (*Options, error) {
 	u, err := url.Parse(redisURL)
 	if err != nil {
@@ -215,10 +240,6 @@
 
 	o.Username, o.Password = getUserPassword(u)
 
-	if len(u.Query()) > 0 {
-		return nil, errors.New("redis: no options supported")
-	}
-
 	h, p, err := net.SplitHostPort(u.Host)
 	if err != nil {
 		h = u.Host
@@ -249,7 +270,7 @@
 		o.TLSConfig = &tls.Config{ServerName: h}
 	}
 
-	return o, nil
+	return setupConnParams(u, o)
 }
 
 func setupUnixConn(u *url.URL) (*Options, error) {
@@ -261,19 +282,122 @@
 		return nil, errors.New("redis: empty unix socket path")
 	}
 	o.Addr = u.Path
-
 	o.Username, o.Password = getUserPassword(u)
+	return setupConnParams(u, o)
+}
 
-	dbStr := u.Query().Get("db")
-	if dbStr == "" {
-		return o, nil // if database is not set, connect to 0 db.
+type queryOptions struct {
+	q   url.Values
+	err error
+}
+
+func (o *queryOptions) string(name string) string {
+	vs := o.q[name]
+	if len(vs) == 0 {
+		return ""
+	}
+	delete(o.q, name) // enable detection of unknown parameters
+	return vs[len(vs)-1]
+}
+
+func (o *queryOptions) int(name string) int {
+	s := o.string(name)
+	if s == "" {
+		return 0
+	}
+	i, err := strconv.Atoi(s)
+	if err == nil {
+		return i
+	}
+	if o.err == nil {
+		o.err = fmt.Errorf("redis: invalid %s number: %s", name, err)
+	}
+	return 0
+}
+
+func (o *queryOptions) duration(name string) time.Duration {
+	s := o.string(name)
+	if s == "" {
+		return 0
+	}
+	// try plain number first
+	if i, err := strconv.Atoi(s); err == nil {
+		if i <= 0 {
+			// disable timeouts
+			return -1
+		}
+		return time.Duration(i) * time.Second
+	}
+	dur, err := time.ParseDuration(s)
+	if err == nil {
+		return dur
+	}
+	if o.err == nil {
+		o.err = fmt.Errorf("redis: invalid %s duration: %w", name, err)
+	}
+	return 0
+}
+
+func (o *queryOptions) bool(name string) bool {
+	switch s := o.string(name); s {
+	case "true", "1":
+		return true
+	case "false", "0", "":
+		return false
+	default:
+		if o.err == nil {
+			o.err = fmt.Errorf("redis: invalid %s boolean: expected true/false/1/0 or an empty string, got %q", name, s)
+		}
+		return false
+	}
+}
+
+func (o *queryOptions) remaining() []string {
+	if len(o.q) == 0 {
+		return nil
+	}
+	keys := make([]string, 0, len(o.q))
+	for k := range o.q {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+	return keys
+}
+
+// setupConnParams converts query parameters in u to option value in o.
+func setupConnParams(u *url.URL, o *Options) (*Options, error) {
+	q := queryOptions{q: u.Query()}
+
+	// compat: a future major release may use q.int("db")
+	if tmp := q.string("db"); tmp != "" {
+		db, err := strconv.Atoi(tmp)
+		if err != nil {
+			return nil, fmt.Errorf("redis: invalid database number: %w", err)
+		}
+		o.DB = db
 	}
 
-	db, err := strconv.Atoi(dbStr)
-	if err != nil {
-		return nil, fmt.Errorf("redis: invalid database number: %s", err)
+	o.MaxRetries = q.int("max_retries")
+	o.MinRetryBackoff = q.duration("min_retry_backoff")
+	o.MaxRetryBackoff = q.duration("max_retry_backoff")
+	o.DialTimeout = q.duration("dial_timeout")
+	o.ReadTimeout = q.duration("read_timeout")
+	o.WriteTimeout = q.duration("write_timeout")
+	o.PoolFIFO = q.bool("pool_fifo")
+	o.PoolSize = q.int("pool_size")
+	o.MinIdleConns = q.int("min_idle_conns")
+	o.MaxConnAge = q.duration("max_conn_age")
+	o.PoolTimeout = q.duration("pool_timeout")
+	o.IdleTimeout = q.duration("idle_timeout")
+	o.IdleCheckFrequency = q.duration("idle_check_frequency")
+	if q.err != nil {
+		return nil, q.err
 	}
-	o.DB = db
+
+	// any parameters left?
+	if r := q.remaining(); len(r) > 0 {
+		return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
+	}
 
 	return o, nil
 }
@@ -292,21 +416,9 @@
 func newConnPool(opt *Options) *pool.ConnPool {
 	return pool.NewConnPool(&pool.Options{
 		Dialer: func(ctx context.Context) (net.Conn, error) {
-			var conn net.Conn
-			err := internal.WithSpan(ctx, "redis.dial", func(ctx context.Context, span trace.Span) error {
-				span.SetAttributes(
-					label.String("db.connection_string", opt.Addr),
-				)
-
-				var err error
-				conn, err = opt.Dialer(ctx, opt.Network, opt.Addr)
-				if err != nil {
-					_ = internal.RecordError(ctx, err)
-				}
-				return err
-			})
-			return conn, err
+			return opt.Dialer(ctx, opt.Network, opt.Addr)
 		},
+		PoolFIFO:           opt.PoolFIFO,
 		PoolSize:           opt.PoolSize,
 		MinIdleConns:       opt.MinIdleConns,
 		MaxConnAge:         opt.MaxConnAge,
diff --git a/vendor/github.com/go-redis/redis/v8/package.json b/vendor/github.com/go-redis/redis/v8/package.json
new file mode 100644
index 0000000..e4ea4bb
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/package.json
@@ -0,0 +1,8 @@
+{
+  "name": "redis",
+  "version": "8.11.5",
+  "main": "index.js",
+  "repository": "git@github.com:go-redis/redis.git",
+  "author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",
+  "license": "BSD-2-clause"
+}
diff --git a/vendor/github.com/go-redis/redis/v8/pipeline.go b/vendor/github.com/go-redis/redis/v8/pipeline.go
index c6ec340..31bab97 100644
--- a/vendor/github.com/go-redis/redis/v8/pipeline.go
+++ b/vendor/github.com/go-redis/redis/v8/pipeline.go
@@ -24,6 +24,7 @@
 // depends of your batch size and/or use TxPipeline.
 type Pipeliner interface {
 	StatefulCmdable
+	Len() int
 	Do(ctx context.Context, args ...interface{}) *Cmd
 	Process(ctx context.Context, cmd Cmder) error
 	Close() error
@@ -53,6 +54,15 @@
 	c.statefulCmdable = c.Process
 }
 
+// Len returns the number of queued commands.
+func (c *Pipeline) Len() int {
+	c.mu.Lock()
+	ln := len(c.cmds)
+	c.mu.Unlock()
+	return ln
+}
+
+// Do queues the custom command for later execution.
 func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd {
 	cmd := NewCmd(ctx, args...)
 	_ = c.Process(ctx, cmd)
diff --git a/vendor/github.com/go-redis/redis/v8/pubsub.go b/vendor/github.com/go-redis/redis/v8/pubsub.go
index c56270b..efc2354 100644
--- a/vendor/github.com/go-redis/redis/v8/pubsub.go
+++ b/vendor/github.com/go-redis/redis/v8/pubsub.go
@@ -2,7 +2,6 @@
 
 import (
 	"context"
-	"errors"
 	"fmt"
 	"strings"
 	"sync"
@@ -13,13 +12,6 @@
 	"github.com/go-redis/redis/v8/internal/proto"
 )
 
-const (
-	pingTimeout     = time.Second
-	chanSendTimeout = time.Minute
-)
-
-var errPingTimeout = errors.New("redis: ping timeout")
-
 // PubSub implements Pub/Sub commands as described in
 // http://redis.io/topics/pubsub. Message receiving is NOT safe
 // for concurrent use by multiple goroutines.
@@ -43,9 +35,12 @@
 	cmd *Cmd
 
 	chOnce sync.Once
-	msgCh  chan *Message
-	allCh  chan interface{}
-	ping   chan struct{}
+	msgCh  *channel
+	allCh  *channel
+}
+
+func (c *PubSub) init() {
+	c.exit = make(chan struct{})
 }
 
 func (c *PubSub) String() string {
@@ -54,10 +49,6 @@
 	return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
 }
 
-func (c *PubSub) init() {
-	c.exit = make(chan struct{})
-}
-
 func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) {
 	c.mu.Lock()
 	cn, err := c.conn(ctx, nil)
@@ -150,7 +141,7 @@
 	if c.cn != cn {
 		return
 	}
-	if isBadConn(err, allowTimeout) {
+	if isBadConn(err, allowTimeout, c.opt.Addr) {
 		c.reconnect(ctx, err)
 	}
 }
@@ -261,13 +252,16 @@
 	}
 	cmd := NewCmd(ctx, args...)
 
-	cn, err := c.connWithLock(ctx)
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	cn, err := c.conn(ctx, nil)
 	if err != nil {
 		return err
 	}
 
 	err = c.writeCmd(ctx, cn, cmd)
-	c.releaseConnWithLock(ctx, cn, err, false)
+	c.releaseConn(ctx, cn, err, false)
 	return err
 }
 
@@ -370,6 +364,8 @@
 		c.cmd = NewCmd(ctx)
 	}
 
+	// Don't hold the lock to allow subscriptions and pings.
+
 	cn, err := c.connWithLock(ctx)
 	if err != nil {
 		return nil, err
@@ -380,6 +376,7 @@
 	})
 
 	c.releaseConnWithLock(ctx, cn, err, timeout > 0)
+
 	if err != nil {
 		return nil, err
 	}
@@ -418,56 +415,6 @@
 	}
 }
 
-// Channel returns a Go channel for concurrently receiving messages.
-// The channel is closed together with the PubSub. If the Go channel
-// is blocked full for 30 seconds the message is dropped.
-// Receive* APIs can not be used after channel is created.
-//
-// go-redis periodically sends ping messages to test connection health
-// and re-subscribes if ping can not not received for 30 seconds.
-func (c *PubSub) Channel() <-chan *Message {
-	return c.ChannelSize(100)
-}
-
-// ChannelSize is like Channel, but creates a Go channel
-// with specified buffer size.
-func (c *PubSub) ChannelSize(size int) <-chan *Message {
-	c.chOnce.Do(func() {
-		c.initPing()
-		c.initMsgChan(size)
-	})
-	if c.msgCh == nil {
-		err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
-		panic(err)
-	}
-	if cap(c.msgCh) != size {
-		err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created")
-		panic(err)
-	}
-	return c.msgCh
-}
-
-// ChannelWithSubscriptions is like Channel, but message type can be either
-// *Subscription or *Message. Subscription messages can be used to detect
-// reconnections.
-//
-// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
-func (c *PubSub) ChannelWithSubscriptions(ctx context.Context, size int) <-chan interface{} {
-	c.chOnce.Do(func() {
-		c.initPing()
-		c.initAllChan(size)
-	})
-	if c.allCh == nil {
-		err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
-		panic(err)
-	}
-	if cap(c.allCh) != size {
-		err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created")
-		panic(err)
-	}
-	return c.allCh
-}
-
 func (c *PubSub) getContext() context.Context {
 	if c.cmd != nil {
 		return c.cmd.ctx
@@ -475,36 +422,135 @@
 	return context.Background()
 }
 
-func (c *PubSub) initPing() {
+//------------------------------------------------------------------------------
+
+// Channel returns a Go channel for concurrently receiving messages.
+// The channel is closed together with the PubSub. If the Go channel
+// is blocked full for 30 seconds the message is dropped.
+// Receive* APIs can not be used after channel is created.
+//
+// go-redis periodically sends ping messages to test connection health
+// and re-subscribes if ping can not not received for 30 seconds.
+func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message {
+	c.chOnce.Do(func() {
+		c.msgCh = newChannel(c, opts...)
+		c.msgCh.initMsgChan()
+	})
+	if c.msgCh == nil {
+		err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
+		panic(err)
+	}
+	return c.msgCh.msgCh
+}
+
+// ChannelSize is like Channel, but creates a Go channel
+// with specified buffer size.
+//
+// Deprecated: use Channel(WithChannelSize(size)), remove in v9.
+func (c *PubSub) ChannelSize(size int) <-chan *Message {
+	return c.Channel(WithChannelSize(size))
+}
+
+// ChannelWithSubscriptions is like Channel, but message type can be either
+// *Subscription or *Message. Subscription messages can be used to detect
+// reconnections.
+//
+// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
+func (c *PubSub) ChannelWithSubscriptions(_ context.Context, size int) <-chan interface{} {
+	c.chOnce.Do(func() {
+		c.allCh = newChannel(c, WithChannelSize(size))
+		c.allCh.initAllChan()
+	})
+	if c.allCh == nil {
+		err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
+		panic(err)
+	}
+	return c.allCh.allCh
+}
+
+type ChannelOption func(c *channel)
+
+// WithChannelSize specifies the Go chan size that is used to buffer incoming messages.
+//
+// The default is 100 messages.
+func WithChannelSize(size int) ChannelOption {
+	return func(c *channel) {
+		c.chanSize = size
+	}
+}
+
+// WithChannelHealthCheckInterval specifies the health check interval.
+// PubSub will ping Redis Server if it does not receive any messages within the interval.
+// To disable health check, use zero interval.
+//
+// The default is 3 seconds.
+func WithChannelHealthCheckInterval(d time.Duration) ChannelOption {
+	return func(c *channel) {
+		c.checkInterval = d
+	}
+}
+
+// WithChannelSendTimeout specifies the channel send timeout after which
+// the message is dropped.
+//
+// The default is 60 seconds.
+func WithChannelSendTimeout(d time.Duration) ChannelOption {
+	return func(c *channel) {
+		c.chanSendTimeout = d
+	}
+}
+
+type channel struct {
+	pubSub *PubSub
+
+	msgCh chan *Message
+	allCh chan interface{}
+	ping  chan struct{}
+
+	chanSize        int
+	chanSendTimeout time.Duration
+	checkInterval   time.Duration
+}
+
+func newChannel(pubSub *PubSub, opts ...ChannelOption) *channel {
+	c := &channel{
+		pubSub: pubSub,
+
+		chanSize:        100,
+		chanSendTimeout: time.Minute,
+		checkInterval:   3 * time.Second,
+	}
+	for _, opt := range opts {
+		opt(c)
+	}
+	if c.checkInterval > 0 {
+		c.initHealthCheck()
+	}
+	return c
+}
+
+func (c *channel) initHealthCheck() {
 	ctx := context.TODO()
 	c.ping = make(chan struct{}, 1)
+
 	go func() {
 		timer := time.NewTimer(time.Minute)
 		timer.Stop()
 
-		healthy := true
 		for {
-			timer.Reset(pingTimeout)
+			timer.Reset(c.checkInterval)
 			select {
 			case <-c.ping:
-				healthy = true
 				if !timer.Stop() {
 					<-timer.C
 				}
 			case <-timer.C:
-				pingErr := c.Ping(ctx)
-				if healthy {
-					healthy = false
-				} else {
-					if pingErr == nil {
-						pingErr = errPingTimeout
-					}
-					c.mu.Lock()
-					c.reconnect(ctx, pingErr)
-					healthy = true
-					c.mu.Unlock()
+				if pingErr := c.pubSub.Ping(ctx); pingErr != nil {
+					c.pubSub.mu.Lock()
+					c.pubSub.reconnect(ctx, pingErr)
+					c.pubSub.mu.Unlock()
 				}
-			case <-c.exit:
+			case <-c.pubSub.exit:
 				return
 			}
 		}
@@ -512,16 +558,17 @@
 }
 
 // initMsgChan must be in sync with initAllChan.
-func (c *PubSub) initMsgChan(size int) {
+func (c *channel) initMsgChan() {
 	ctx := context.TODO()
-	c.msgCh = make(chan *Message, size)
+	c.msgCh = make(chan *Message, c.chanSize)
+
 	go func() {
 		timer := time.NewTimer(time.Minute)
 		timer.Stop()
 
 		var errCount int
 		for {
-			msg, err := c.Receive(ctx)
+			msg, err := c.pubSub.Receive(ctx)
 			if err != nil {
 				if err == pool.ErrClosed {
 					close(c.msgCh)
@@ -548,7 +595,7 @@
 			case *Pong:
 				// Ignore.
 			case *Message:
-				timer.Reset(chanSendTimeout)
+				timer.Reset(c.chanSendTimeout)
 				select {
 				case c.msgCh <- msg:
 					if !timer.Stop() {
@@ -556,30 +603,28 @@
 					}
 				case <-timer.C:
 					internal.Logger.Printf(
-						c.getContext(),
-						"redis: %s channel is full for %s (message is dropped)",
-						c,
-						chanSendTimeout,
-					)
+						ctx, "redis: %s channel is full for %s (message is dropped)",
+						c, c.chanSendTimeout)
 				}
 			default:
-				internal.Logger.Printf(c.getContext(), "redis: unknown message type: %T", msg)
+				internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
 			}
 		}
 	}()
 }
 
 // initAllChan must be in sync with initMsgChan.
-func (c *PubSub) initAllChan(size int) {
+func (c *channel) initAllChan() {
 	ctx := context.TODO()
-	c.allCh = make(chan interface{}, size)
+	c.allCh = make(chan interface{}, c.chanSize)
+
 	go func() {
-		timer := time.NewTimer(pingTimeout)
+		timer := time.NewTimer(time.Minute)
 		timer.Stop()
 
 		var errCount int
 		for {
-			msg, err := c.Receive(ctx)
+			msg, err := c.pubSub.Receive(ctx)
 			if err != nil {
 				if err == pool.ErrClosed {
 					close(c.allCh)
@@ -601,29 +646,23 @@
 			}
 
 			switch msg := msg.(type) {
-			case *Subscription:
-				c.sendMessage(msg, timer)
 			case *Pong:
 				// Ignore.
-			case *Message:
-				c.sendMessage(msg, timer)
+			case *Subscription, *Message:
+				timer.Reset(c.chanSendTimeout)
+				select {
+				case c.allCh <- msg:
+					if !timer.Stop() {
+						<-timer.C
+					}
+				case <-timer.C:
+					internal.Logger.Printf(
+						ctx, "redis: %s channel is full for %s (message is dropped)",
+						c, c.chanSendTimeout)
+				}
 			default:
-				internal.Logger.Printf(c.getContext(), "redis: unknown message type: %T", msg)
+				internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
 			}
 		}
 	}()
 }
-
-func (c *PubSub) sendMessage(msg interface{}, timer *time.Timer) {
-	timer.Reset(pingTimeout)
-	select {
-	case c.allCh <- msg:
-		if !timer.Stop() {
-			<-timer.C
-		}
-	case <-timer.C:
-		internal.Logger.Printf(
-			c.getContext(),
-			"redis: %s channel is full for %s (message is dropped)", c, pingTimeout)
-	}
-}
diff --git a/vendor/github.com/go-redis/redis/v8/redis.go b/vendor/github.com/go-redis/redis/v8/redis.go
index efad7f1..bcf8a2a 100644
--- a/vendor/github.com/go-redis/redis/v8/redis.go
+++ b/vendor/github.com/go-redis/redis/v8/redis.go
@@ -2,14 +2,14 @@
 
 import (
 	"context"
+	"errors"
 	"fmt"
+	"sync/atomic"
 	"time"
 
 	"github.com/go-redis/redis/v8/internal"
 	"github.com/go-redis/redis/v8/internal/pool"
 	"github.com/go-redis/redis/v8/internal/proto"
-	"go.opentelemetry.io/otel/api/trace"
-	"go.opentelemetry.io/otel/label"
 )
 
 // Nil reply returned by Redis when key does not exist.
@@ -51,9 +51,7 @@
 	ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
 ) error {
 	if len(hs.hooks) == 0 {
-		err := hs.withContext(ctx, func() error {
-			return fn(ctx, cmd)
-		})
+		err := fn(ctx, cmd)
 		cmd.SetErr(err)
 		return err
 	}
@@ -69,9 +67,7 @@
 	}
 
 	if retErr == nil {
-		retErr = hs.withContext(ctx, func() error {
-			return fn(ctx, cmd)
-		})
+		retErr = fn(ctx, cmd)
 		cmd.SetErr(retErr)
 	}
 
@@ -89,9 +85,7 @@
 	ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
 ) error {
 	if len(hs.hooks) == 0 {
-		err := hs.withContext(ctx, func() error {
-			return fn(ctx, cmds)
-		})
+		err := fn(ctx, cmds)
 		return err
 	}
 
@@ -106,9 +100,7 @@
 	}
 
 	if retErr == nil {
-		retErr = hs.withContext(ctx, func() error {
-			return fn(ctx, cmds)
-		})
+		retErr = fn(ctx, cmds)
 	}
 
 	for hookIndex--; hookIndex >= 0; hookIndex-- {
@@ -128,23 +120,6 @@
 	return hs.processPipeline(ctx, cmds, fn)
 }
 
-func (hs hooks) withContext(ctx context.Context, fn func() error) error {
-	done := ctx.Done()
-	if done == nil {
-		return fn()
-	}
-
-	errc := make(chan error, 1)
-	go func() { errc <- fn() }()
-
-	select {
-	case <-done:
-		return ctx.Err()
-	case err := <-errc:
-		return err
-	}
-}
-
 //------------------------------------------------------------------------------
 
 type baseClient struct {
@@ -225,12 +200,9 @@
 		return cn, nil
 	}
 
-	err = internal.WithSpan(ctx, "redis.init_conn", func(ctx context.Context, span trace.Span) error {
-		return c.initConn(ctx, cn)
-	})
-	if err != nil {
+	if err := c.initConn(ctx, cn); err != nil {
 		c.connPool.Remove(ctx, cn, err)
-		if err := internal.Unwrap(err); err != nil {
+		if err := errors.Unwrap(err); err != nil {
 			return nil, err
 		}
 		return nil, err
@@ -289,7 +261,7 @@
 		c.opt.Limiter.ReportResult(err)
 	}
 
-	if isBadConn(err, false) {
+	if isBadConn(err, false, c.opt.Addr) {
 		c.connPool.Remove(ctx, cn, err)
 	} else {
 		c.connPool.Put(ctx, cn)
@@ -299,25 +271,36 @@
 func (c *baseClient) withConn(
 	ctx context.Context, fn func(context.Context, *pool.Conn) error,
 ) error {
-	return internal.WithSpan(ctx, "redis.with_conn", func(ctx context.Context, span trace.Span) error {
-		cn, err := c.getConn(ctx)
-		if err != nil {
-			return err
-		}
+	cn, err := c.getConn(ctx)
+	if err != nil {
+		return err
+	}
 
-		if span.IsRecording() {
-			if remoteAddr := cn.RemoteAddr(); remoteAddr != nil {
-				span.SetAttributes(label.String("net.peer.ip", remoteAddr.String()))
-			}
-		}
+	defer func() {
+		c.releaseConn(ctx, cn, err)
+	}()
 
-		defer func() {
-			c.releaseConn(ctx, cn, err)
-		}()
+	done := ctx.Done() //nolint:ifshort
 
+	if done == nil {
 		err = fn(ctx, cn)
 		return err
-	})
+	}
+
+	errc := make(chan error, 1)
+	go func() { errc <- fn(ctx, cn) }()
+
+	select {
+	case <-done:
+		_ = cn.Close()
+		// Wait for the goroutine to finish and send something.
+		<-errc
+
+		err = ctx.Err()
+		return err
+	case err = <-errc:
+		return err
+	}
 }
 
 func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
@@ -325,45 +308,50 @@
 	for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
 		attempt := attempt
 
-		var retry bool
-		err := internal.WithSpan(ctx, "redis.process", func(ctx context.Context, span trace.Span) error {
-			if attempt > 0 {
-				if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
-					return err
-				}
-			}
-
-			retryTimeout := true
-			err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
-				err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
-					return writeCmd(wr, cmd)
-				})
-				if err != nil {
-					return err
-				}
-
-				err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
-				if err != nil {
-					retryTimeout = cmd.readTimeout() == nil
-					return err
-				}
-
-				return nil
-			})
-			if err == nil {
-				return nil
-			}
-			retry = shouldRetry(err, retryTimeout)
-			return err
-		})
+		retry, err := c._process(ctx, cmd, attempt)
 		if err == nil || !retry {
 			return err
 		}
+
 		lastErr = err
 	}
 	return lastErr
 }
 
+func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) {
+	if attempt > 0 {
+		if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+			return false, err
+		}
+	}
+
+	retryTimeout := uint32(1)
+	err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+		err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+			return writeCmd(wr, cmd)
+		})
+		if err != nil {
+			return err
+		}
+
+		err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
+		if err != nil {
+			if cmd.readTimeout() == nil {
+				atomic.StoreUint32(&retryTimeout, 1)
+			}
+			return err
+		}
+
+		return nil
+	})
+	if err == nil {
+		return false, nil
+	}
+
+	retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
+	return retry, err
+}
+
 func (c *baseClient) retryBackoff(attempt int) time.Duration {
 	return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
 }
@@ -722,7 +710,9 @@
 	hooks // TODO: inherit hooks
 }
 
-// Conn is like Client, but its pool contains single connection.
+// Conn represents a single Redis connection rather than a pool of connections.
+// Prefer running commands from Client unless there is a specific need
+// for a continuous single Redis connection.
 type Conn struct {
 	*conn
 	ctx context.Context
diff --git a/vendor/github.com/go-redis/redis/v8/renovate.json b/vendor/github.com/go-redis/redis/v8/renovate.json
deleted file mode 100644
index f45d8f1..0000000
--- a/vendor/github.com/go-redis/redis/v8/renovate.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
-  "extends": [
-    "config:base"
-  ]
-}
diff --git a/vendor/github.com/go-redis/redis/v8/ring.go b/vendor/github.com/go-redis/redis/v8/ring.go
index 34d05f3..4df00fc 100644
--- a/vendor/github.com/go-redis/redis/v8/ring.go
+++ b/vendor/github.com/go-redis/redis/v8/ring.go
@@ -12,7 +12,8 @@
 	"time"
 
 	"github.com/cespare/xxhash/v2"
-	"github.com/dgryski/go-rendezvous"
+	rendezvous "github.com/dgryski/go-rendezvous" //nolint
+
 	"github.com/go-redis/redis/v8/internal"
 	"github.com/go-redis/redis/v8/internal/hashtag"
 	"github.com/go-redis/redis/v8/internal/pool"
@@ -78,6 +79,9 @@
 	ReadTimeout  time.Duration
 	WriteTimeout time.Duration
 
+	// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+	PoolFIFO bool
+
 	PoolSize           int
 	MinIdleConns       int
 	MaxConnAge         time.Duration
@@ -138,6 +142,7 @@
 		ReadTimeout:  opt.ReadTimeout,
 		WriteTimeout: opt.WriteTimeout,
 
+		PoolFIFO:           opt.PoolFIFO,
 		PoolSize:           opt.PoolSize,
 		MinIdleConns:       opt.MinIdleConns,
 		MaxConnAge:         opt.MaxConnAge,
@@ -571,7 +576,7 @@
 	}
 	info := cmdsInfo[name]
 	if info == nil {
-		internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
+		internal.Logger.Printf(ctx, "info for cmd=%s not found", name)
 	}
 	return info
 }
diff --git a/vendor/github.com/go-redis/redis/v8/script.go b/vendor/github.com/go-redis/redis/v8/script.go
index 07ed482..5cab18d 100644
--- a/vendor/github.com/go-redis/redis/v8/script.go
+++ b/vendor/github.com/go-redis/redis/v8/script.go
@@ -8,7 +8,7 @@
 	"strings"
 )
 
-type scripter interface {
+type Scripter interface {
 	Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
 	EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
 	ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
@@ -16,9 +16,9 @@
 }
 
 var (
-	_ scripter = (*Client)(nil)
-	_ scripter = (*Ring)(nil)
-	_ scripter = (*ClusterClient)(nil)
+	_ Scripter = (*Client)(nil)
+	_ Scripter = (*Ring)(nil)
+	_ Scripter = (*ClusterClient)(nil)
 )
 
 type Script struct {
@@ -38,25 +38,25 @@
 	return s.hash
 }
 
-func (s *Script) Load(ctx context.Context, c scripter) *StringCmd {
+func (s *Script) Load(ctx context.Context, c Scripter) *StringCmd {
 	return c.ScriptLoad(ctx, s.src)
 }
 
-func (s *Script) Exists(ctx context.Context, c scripter) *BoolSliceCmd {
+func (s *Script) Exists(ctx context.Context, c Scripter) *BoolSliceCmd {
 	return c.ScriptExists(ctx, s.hash)
 }
 
-func (s *Script) Eval(ctx context.Context, c scripter, keys []string, args ...interface{}) *Cmd {
+func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
 	return c.Eval(ctx, s.src, keys, args...)
 }
 
-func (s *Script) EvalSha(ctx context.Context, c scripter, keys []string, args ...interface{}) *Cmd {
+func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
 	return c.EvalSha(ctx, s.hash, keys, args...)
 }
 
 // Run optimistically uses EVALSHA to run the script. If script does not exist
 // it is retried using EVAL.
-func (s *Script) Run(ctx context.Context, c scripter, keys []string, args ...interface{}) *Cmd {
+func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
 	r := s.EvalSha(ctx, c, keys, args...)
 	if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
 		return s.Eval(ctx, c, keys, args...)
diff --git a/vendor/github.com/go-redis/redis/v8/sentinel.go b/vendor/github.com/go-redis/redis/v8/sentinel.go
index 7db9843..ec6221d 100644
--- a/vendor/github.com/go-redis/redis/v8/sentinel.go
+++ b/vendor/github.com/go-redis/redis/v8/sentinel.go
@@ -23,7 +23,13 @@
 	MasterName string
 	// A seed list of host:port addresses of sentinel nodes.
 	SentinelAddrs []string
-	// Sentinel password from "requirepass <password>" (if enabled) in Sentinel configuration
+
+	// If specified with SentinelPassword, enables ACL-based authentication (via
+	// AUTH <user> <pass>).
+	SentinelUsername string
+	// Sentinel password from "requirepass <password>" (if enabled) in Sentinel
+	// configuration, or, if SentinelUsername is also supplied, used for ACL-based
+	// authentication.
 	SentinelPassword string
 
 	// Allows routing read-only commands to the closest master or slave node.
@@ -36,6 +42,10 @@
 	// Route all commands to slave read-only nodes.
 	SlaveOnly bool
 
+	// Use slaves disconnected with master when cannot get connected slaves
+	// Now, this option only works in RandomSlaveAddr function.
+	UseDisconnectedSlaves bool
+
 	// Following options are copied from Options struct.
 
 	Dialer    func(ctx context.Context, network, addr string) (net.Conn, error)
@@ -53,6 +63,9 @@
 	ReadTimeout  time.Duration
 	WriteTimeout time.Duration
 
+	// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+	PoolFIFO bool
+
 	PoolSize           int
 	MinIdleConns       int
 	MaxConnAge         time.Duration
@@ -82,6 +95,7 @@
 		ReadTimeout:  opt.ReadTimeout,
 		WriteTimeout: opt.WriteTimeout,
 
+		PoolFIFO:           opt.PoolFIFO,
 		PoolSize:           opt.PoolSize,
 		PoolTimeout:        opt.PoolTimeout,
 		IdleTimeout:        opt.IdleTimeout,
@@ -101,6 +115,7 @@
 		OnConnect: opt.OnConnect,
 
 		DB:       0,
+		Username: opt.SentinelUsername,
 		Password: opt.SentinelPassword,
 
 		MaxRetries:      opt.MaxRetries,
@@ -111,6 +126,7 @@
 		ReadTimeout:  opt.ReadTimeout,
 		WriteTimeout: opt.WriteTimeout,
 
+		PoolFIFO:           opt.PoolFIFO,
 		PoolSize:           opt.PoolSize,
 		PoolTimeout:        opt.PoolTimeout,
 		IdleTimeout:        opt.IdleTimeout,
@@ -142,6 +158,7 @@
 		ReadTimeout:  opt.ReadTimeout,
 		WriteTimeout: opt.WriteTimeout,
 
+		PoolFIFO:           opt.PoolFIFO,
 		PoolSize:           opt.PoolSize,
 		PoolTimeout:        opt.PoolTimeout,
 		IdleTimeout:        opt.IdleTimeout,
@@ -167,6 +184,10 @@
 	sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
 	copy(sentinelAddrs, failoverOpt.SentinelAddrs)
 
+	rand.Shuffle(len(sentinelAddrs), func(i, j int) {
+		sentinelAddrs[i], sentinelAddrs[j] = sentinelAddrs[j], sentinelAddrs[i]
+	})
+
 	failover := &sentinelFailover{
 		opt:           failoverOpt,
 		sentinelAddrs: sentinelAddrs,
@@ -177,11 +198,14 @@
 	opt.init()
 
 	connPool := newConnPool(opt)
+
+	failover.mu.Lock()
 	failover.onFailover = func(ctx context.Context, addr string) {
 		_ = connPool.Filter(func(cn *pool.Conn) bool {
 			return cn.RemoteAddr().String() != addr
 		})
 	}
+	failover.mu.Unlock()
 
 	c := Client{
 		baseClient: newBaseClient(opt, connPool),
@@ -208,14 +232,21 @@
 				failover.trySwitchMaster(ctx, addr)
 			}
 		}
-
 		if err != nil {
 			return nil, err
 		}
 		if failover.opt.Dialer != nil {
 			return failover.opt.Dialer(ctx, network, addr)
 		}
-		return net.DialTimeout("tcp", addr, failover.opt.DialTimeout)
+
+		netDialer := &net.Dialer{
+			Timeout:   failover.opt.DialTimeout,
+			KeepAlive: 5 * time.Minute,
+		}
+		if failover.opt.TLSConfig == nil {
+			return netDialer.DialContext(ctx, network, addr)
+		}
+		return tls.DialWithDialer(netDialer, network, addr, failover.opt.TLSConfig)
 	}
 }
 
@@ -430,10 +461,22 @@
 }
 
 func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) {
-	addresses, err := c.slaveAddrs(ctx)
+	if c.opt == nil {
+		return "", errors.New("opt is nil")
+	}
+
+	addresses, err := c.slaveAddrs(ctx, false)
 	if err != nil {
 		return "", err
 	}
+
+	if len(addresses) == 0 && c.opt.UseDisconnectedSlaves {
+		addresses, err = c.slaveAddrs(ctx, true)
+		if err != nil {
+			return "", err
+		}
+	}
+
 	if len(addresses) == 0 {
 		return c.MasterAddr(ctx)
 	}
@@ -482,10 +525,10 @@
 		return addr, nil
 	}
 
-	return "", errors.New("redis: all sentinels are unreachable")
+	return "", errors.New("redis: all sentinels specified in configuration are unreachable")
 }
 
-func (c *sentinelFailover) slaveAddrs(ctx context.Context) ([]string, error) {
+func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool) ([]string, error) {
 	c.mu.RLock()
 	sentinel := c.sentinel
 	c.mu.RUnlock()
@@ -508,6 +551,8 @@
 		_ = c.closeSentinel()
 	}
 
+	var sentinelReachable bool
+
 	for i, sentinelAddr := range c.sentinelAddrs {
 		sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
 
@@ -518,16 +563,22 @@
 			_ = sentinel.Close()
 			continue
 		}
-
+		sentinelReachable = true
+		addrs := parseSlaveAddrs(slaves, useDisconnected)
+		if len(addrs) == 0 {
+			continue
+		}
 		// Push working sentinel to the top.
 		c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
 		c.setSentinel(ctx, sentinel)
 
-		addrs := parseSlaveAddrs(slaves)
 		return addrs, nil
 	}
 
-	return []string{}, errors.New("redis: all sentinels are unreachable")
+	if sentinelReachable {
+		return []string{}, nil
+	}
+	return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable")
 }
 
 func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string {
@@ -547,12 +598,11 @@
 			c.opt.MasterName, err)
 		return []string{}
 	}
-	return parseSlaveAddrs(addrs)
+	return parseSlaveAddrs(addrs, false)
 }
 
-func parseSlaveAddrs(addrs []interface{}) []string {
+func parseSlaveAddrs(addrs []interface{}, keepDisconnected bool) []string {
 	nodes := make([]string, 0, len(addrs))
-
 	for _, node := range addrs {
 		ip := ""
 		port := ""
@@ -574,8 +624,12 @@
 
 		for _, flag := range flags {
 			switch flag {
-			case "s_down", "o_down", "disconnected":
+			case "s_down", "o_down":
 				isDown = true
+			case "disconnected":
+				if !keepDisconnected {
+					isDown = true
+				}
 			}
 		}
 
@@ -589,7 +643,7 @@
 
 func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) {
 	c.mu.RLock()
-	currentAddr := c._masterAddr
+	currentAddr := c._masterAddr //nolint:ifshort
 	c.mu.RUnlock()
 
 	if addr == currentAddr {
@@ -630,15 +684,22 @@
 	}
 	for _, sentinel := range sentinels {
 		vals := sentinel.([]interface{})
+		var ip, port string
 		for i := 0; i < len(vals); i += 2 {
 			key := vals[i].(string)
-			if key == "name" {
-				sentinelAddr := vals[i+1].(string)
-				if !contains(c.sentinelAddrs, sentinelAddr) {
-					internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
-						sentinelAddr, c.opt.MasterName)
-					c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
-				}
+			switch key {
+			case "ip":
+				ip = vals[i+1].(string)
+			case "port":
+				port = vals[i+1].(string)
+			}
+		}
+		if ip != "" && port != "" {
+			sentinelAddr := net.JoinHostPort(ip, port)
+			if !contains(c.sentinelAddrs, sentinelAddr) {
+				internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
+					sentinelAddr, c.opt.MasterName)
+				c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
 			}
 		}
 	}
@@ -646,6 +707,7 @@
 
 func (c *sentinelFailover) listen(pubsub *PubSub) {
 	ctx := context.TODO()
+
 	if c.onUpdate != nil {
 		c.onUpdate(ctx)
 	}
@@ -701,7 +763,7 @@
 			Addr: masterAddr,
 		}}
 
-		slaveAddrs, err := failover.slaveAddrs(ctx)
+		slaveAddrs, err := failover.slaveAddrs(ctx, false)
 		if err != nil {
 			return nil, err
 		}
@@ -723,9 +785,12 @@
 	}
 
 	c := NewClusterClient(opt)
+
+	failover.mu.Lock()
 	failover.onUpdate = func(ctx context.Context) {
 		c.ReloadState(ctx)
 	}
+	failover.mu.Unlock()
 
 	return c
 }
diff --git a/vendor/github.com/go-redis/redis/v8/tx.go b/vendor/github.com/go-redis/redis/v8/tx.go
index ad825c6..8c9d872 100644
--- a/vendor/github.com/go-redis/redis/v8/tx.go
+++ b/vendor/github.com/go-redis/redis/v8/tx.go
@@ -13,7 +13,8 @@
 // Tx implements Redis transactions as described in
 // http://redis.io/topics/transactions. It's NOT safe for concurrent use
 // by multiple goroutines, because Exec resets list of watched keys.
-// If you don't need WATCH it is better to use Pipeline.
+//
+// If you don't need WATCH, use Pipeline instead.
 type Tx struct {
 	baseClient
 	cmdable
@@ -65,16 +66,13 @@
 // The transaction is automatically closed when fn exits.
 func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
 	tx := c.newTx(ctx)
+	defer tx.Close(ctx)
 	if len(keys) > 0 {
 		if err := tx.Watch(ctx, keys...).Err(); err != nil {
-			_ = tx.Close(ctx)
 			return err
 		}
 	}
-
-	err := fn(tx)
-	_ = tx.Close(ctx)
-	return err
+	return fn(tx)
 }
 
 // Close closes the transaction, releasing any open resources.
diff --git a/vendor/github.com/go-redis/redis/v8/universal.go b/vendor/github.com/go-redis/redis/v8/universal.go
index 5f0e1e3..c89b3e5 100644
--- a/vendor/github.com/go-redis/redis/v8/universal.go
+++ b/vendor/github.com/go-redis/redis/v8/universal.go
@@ -25,6 +25,7 @@
 
 	Username         string
 	Password         string
+	SentinelUsername string
 	SentinelPassword string
 
 	MaxRetries      int
@@ -35,6 +36,9 @@
 	ReadTimeout  time.Duration
 	WriteTimeout time.Duration
 
+	// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+	PoolFIFO bool
+
 	PoolSize           int
 	MinIdleConns       int
 	MaxConnAge         time.Duration
@@ -53,6 +57,7 @@
 
 	// The sentinel master name.
 	// Only failover clients.
+
 	MasterName string
 }
 
@@ -82,6 +87,7 @@
 		DialTimeout:        o.DialTimeout,
 		ReadTimeout:        o.ReadTimeout,
 		WriteTimeout:       o.WriteTimeout,
+		PoolFIFO:           o.PoolFIFO,
 		PoolSize:           o.PoolSize,
 		MinIdleConns:       o.MinIdleConns,
 		MaxConnAge:         o.MaxConnAge,
@@ -109,6 +115,7 @@
 		DB:               o.DB,
 		Username:         o.Username,
 		Password:         o.Password,
+		SentinelUsername: o.SentinelUsername,
 		SentinelPassword: o.SentinelPassword,
 
 		MaxRetries:      o.MaxRetries,
@@ -119,6 +126,7 @@
 		ReadTimeout:  o.ReadTimeout,
 		WriteTimeout: o.WriteTimeout,
 
+		PoolFIFO:           o.PoolFIFO,
 		PoolSize:           o.PoolSize,
 		MinIdleConns:       o.MinIdleConns,
 		MaxConnAge:         o.MaxConnAge,
@@ -154,6 +162,7 @@
 		ReadTimeout:  o.ReadTimeout,
 		WriteTimeout: o.WriteTimeout,
 
+		PoolFIFO:           o.PoolFIFO,
 		PoolSize:           o.PoolSize,
 		MinIdleConns:       o.MinIdleConns,
 		MaxConnAge:         o.MaxConnAge,
@@ -168,9 +177,9 @@
 // --------------------------------------------------------------------
 
 // UniversalClient is an abstract client which - based on the provided options -
-// can connect to either clusters, or sentinel-backed failover instances
-// or simple single-instance servers. This can be useful for testing
-// cluster-specific applications locally.
+// represents either a ClusterClient, a FailoverClient, or a single-node Client.
+// This can be useful for testing cluster-specific applications locally or having different
+// clients in different environments.
 type UniversalClient interface {
 	Cmdable
 	Context() context.Context
@@ -190,12 +199,12 @@
 	_ UniversalClient = (*Ring)(nil)
 )
 
-// NewUniversalClient returns a new multi client. The type of client returned depends
-// on the following three conditions:
+// NewUniversalClient returns a new multi client. The type of the returned client depends
+// on the following conditions:
 //
-// 1. if a MasterName is passed a sentinel-backed FailoverClient will be returned
-// 2. if the number of Addrs is two or more, a ClusterClient will be returned
-// 3. otherwise, a single-node redis Client will be returned.
+// 1. If the MasterName option is specified, a sentinel-backed FailoverClient is returned.
+// 2. if the number of Addrs is two or more, a ClusterClient is returned.
+// 3. Otherwise, a single-node Client is returned.
 func NewUniversalClient(opts *UniversalOptions) UniversalClient {
 	if opts.MasterName != "" {
 		return NewFailoverClient(opts.Failover())
diff --git a/vendor/github.com/go-redis/redis/v8/version.go b/vendor/github.com/go-redis/redis/v8/version.go
new file mode 100644
index 0000000..112c9a2
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/version.go
@@ -0,0 +1,6 @@
+package redis
+
+// Version is the current release version.
+func Version() string {
+	return "8.11.5"
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
deleted file mode 100644
index a5a1386..0000000
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
+++ /dev/null
@@ -1,324 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
-
-package descriptor
-
-import (
-	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
-	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
-	descriptorpb "google.golang.org/protobuf/types/descriptorpb"
-	reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/descriptor.proto.
-
-type Edition = descriptorpb.Edition
-
-const Edition_EDITION_UNKNOWN = descriptorpb.Edition_EDITION_UNKNOWN
-const Edition_EDITION_PROTO2 = descriptorpb.Edition_EDITION_PROTO2
-const Edition_EDITION_PROTO3 = descriptorpb.Edition_EDITION_PROTO3
-const Edition_EDITION_2023 = descriptorpb.Edition_EDITION_2023
-const Edition_EDITION_2024 = descriptorpb.Edition_EDITION_2024
-const Edition_EDITION_1_TEST_ONLY = descriptorpb.Edition_EDITION_1_TEST_ONLY
-const Edition_EDITION_2_TEST_ONLY = descriptorpb.Edition_EDITION_2_TEST_ONLY
-const Edition_EDITION_99997_TEST_ONLY = descriptorpb.Edition_EDITION_99997_TEST_ONLY
-const Edition_EDITION_99998_TEST_ONLY = descriptorpb.Edition_EDITION_99998_TEST_ONLY
-const Edition_EDITION_99999_TEST_ONLY = descriptorpb.Edition_EDITION_99999_TEST_ONLY
-const Edition_EDITION_MAX = descriptorpb.Edition_EDITION_MAX
-
-var Edition_name = descriptorpb.Edition_name
-var Edition_value = descriptorpb.Edition_value
-
-type ExtensionRangeOptions_VerificationState = descriptorpb.ExtensionRangeOptions_VerificationState
-
-const ExtensionRangeOptions_DECLARATION = descriptorpb.ExtensionRangeOptions_DECLARATION
-const ExtensionRangeOptions_UNVERIFIED = descriptorpb.ExtensionRangeOptions_UNVERIFIED
-
-var ExtensionRangeOptions_VerificationState_name = descriptorpb.ExtensionRangeOptions_VerificationState_name
-var ExtensionRangeOptions_VerificationState_value = descriptorpb.ExtensionRangeOptions_VerificationState_value
-
-type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type
-
-const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE
-const FieldDescriptorProto_TYPE_FLOAT = descriptorpb.FieldDescriptorProto_TYPE_FLOAT
-const FieldDescriptorProto_TYPE_INT64 = descriptorpb.FieldDescriptorProto_TYPE_INT64
-const FieldDescriptorProto_TYPE_UINT64 = descriptorpb.FieldDescriptorProto_TYPE_UINT64
-const FieldDescriptorProto_TYPE_INT32 = descriptorpb.FieldDescriptorProto_TYPE_INT32
-const FieldDescriptorProto_TYPE_FIXED64 = descriptorpb.FieldDescriptorProto_TYPE_FIXED64
-const FieldDescriptorProto_TYPE_FIXED32 = descriptorpb.FieldDescriptorProto_TYPE_FIXED32
-const FieldDescriptorProto_TYPE_BOOL = descriptorpb.FieldDescriptorProto_TYPE_BOOL
-const FieldDescriptorProto_TYPE_STRING = descriptorpb.FieldDescriptorProto_TYPE_STRING
-const FieldDescriptorProto_TYPE_GROUP = descriptorpb.FieldDescriptorProto_TYPE_GROUP
-const FieldDescriptorProto_TYPE_MESSAGE = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE
-const FieldDescriptorProto_TYPE_BYTES = descriptorpb.FieldDescriptorProto_TYPE_BYTES
-const FieldDescriptorProto_TYPE_UINT32 = descriptorpb.FieldDescriptorProto_TYPE_UINT32
-const FieldDescriptorProto_TYPE_ENUM = descriptorpb.FieldDescriptorProto_TYPE_ENUM
-const FieldDescriptorProto_TYPE_SFIXED32 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED32
-const FieldDescriptorProto_TYPE_SFIXED64 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED64
-const FieldDescriptorProto_TYPE_SINT32 = descriptorpb.FieldDescriptorProto_TYPE_SINT32
-const FieldDescriptorProto_TYPE_SINT64 = descriptorpb.FieldDescriptorProto_TYPE_SINT64
-
-var FieldDescriptorProto_Type_name = descriptorpb.FieldDescriptorProto_Type_name
-var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_value
-
-type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label
-
-const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL
-const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED
-const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED
-
-var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name
-var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value
-
-type FileOptions_OptimizeMode = descriptorpb.FileOptions_OptimizeMode
-
-const FileOptions_SPEED = descriptorpb.FileOptions_SPEED
-const FileOptions_CODE_SIZE = descriptorpb.FileOptions_CODE_SIZE
-const FileOptions_LITE_RUNTIME = descriptorpb.FileOptions_LITE_RUNTIME
-
-var FileOptions_OptimizeMode_name = descriptorpb.FileOptions_OptimizeMode_name
-var FileOptions_OptimizeMode_value = descriptorpb.FileOptions_OptimizeMode_value
-
-type FieldOptions_CType = descriptorpb.FieldOptions_CType
-
-const FieldOptions_STRING = descriptorpb.FieldOptions_STRING
-const FieldOptions_CORD = descriptorpb.FieldOptions_CORD
-const FieldOptions_STRING_PIECE = descriptorpb.FieldOptions_STRING_PIECE
-
-var FieldOptions_CType_name = descriptorpb.FieldOptions_CType_name
-var FieldOptions_CType_value = descriptorpb.FieldOptions_CType_value
-
-type FieldOptions_JSType = descriptorpb.FieldOptions_JSType
-
-const FieldOptions_JS_NORMAL = descriptorpb.FieldOptions_JS_NORMAL
-const FieldOptions_JS_STRING = descriptorpb.FieldOptions_JS_STRING
-const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER
-
-var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name
-var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value
-
-type FieldOptions_OptionRetention = descriptorpb.FieldOptions_OptionRetention
-
-const FieldOptions_RETENTION_UNKNOWN = descriptorpb.FieldOptions_RETENTION_UNKNOWN
-const FieldOptions_RETENTION_RUNTIME = descriptorpb.FieldOptions_RETENTION_RUNTIME
-const FieldOptions_RETENTION_SOURCE = descriptorpb.FieldOptions_RETENTION_SOURCE
-
-var FieldOptions_OptionRetention_name = descriptorpb.FieldOptions_OptionRetention_name
-var FieldOptions_OptionRetention_value = descriptorpb.FieldOptions_OptionRetention_value
-
-type FieldOptions_OptionTargetType = descriptorpb.FieldOptions_OptionTargetType
-
-const FieldOptions_TARGET_TYPE_UNKNOWN = descriptorpb.FieldOptions_TARGET_TYPE_UNKNOWN
-const FieldOptions_TARGET_TYPE_FILE = descriptorpb.FieldOptions_TARGET_TYPE_FILE
-const FieldOptions_TARGET_TYPE_EXTENSION_RANGE = descriptorpb.FieldOptions_TARGET_TYPE_EXTENSION_RANGE
-const FieldOptions_TARGET_TYPE_MESSAGE = descriptorpb.FieldOptions_TARGET_TYPE_MESSAGE
-const FieldOptions_TARGET_TYPE_FIELD = descriptorpb.FieldOptions_TARGET_TYPE_FIELD
-const FieldOptions_TARGET_TYPE_ONEOF = descriptorpb.FieldOptions_TARGET_TYPE_ONEOF
-const FieldOptions_TARGET_TYPE_ENUM = descriptorpb.FieldOptions_TARGET_TYPE_ENUM
-const FieldOptions_TARGET_TYPE_ENUM_ENTRY = descriptorpb.FieldOptions_TARGET_TYPE_ENUM_ENTRY
-const FieldOptions_TARGET_TYPE_SERVICE = descriptorpb.FieldOptions_TARGET_TYPE_SERVICE
-const FieldOptions_TARGET_TYPE_METHOD = descriptorpb.FieldOptions_TARGET_TYPE_METHOD
-
-var FieldOptions_OptionTargetType_name = descriptorpb.FieldOptions_OptionTargetType_name
-var FieldOptions_OptionTargetType_value = descriptorpb.FieldOptions_OptionTargetType_value
-
-type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel
-
-const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN
-const MethodOptions_NO_SIDE_EFFECTS = descriptorpb.MethodOptions_NO_SIDE_EFFECTS
-const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT
-
-var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name
-var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value
-
-type FeatureSet_FieldPresence = descriptorpb.FeatureSet_FieldPresence
-
-const FeatureSet_FIELD_PRESENCE_UNKNOWN = descriptorpb.FeatureSet_FIELD_PRESENCE_UNKNOWN
-const FeatureSet_EXPLICIT = descriptorpb.FeatureSet_EXPLICIT
-const FeatureSet_IMPLICIT = descriptorpb.FeatureSet_IMPLICIT
-const FeatureSet_LEGACY_REQUIRED = descriptorpb.FeatureSet_LEGACY_REQUIRED
-
-var FeatureSet_FieldPresence_name = descriptorpb.FeatureSet_FieldPresence_name
-var FeatureSet_FieldPresence_value = descriptorpb.FeatureSet_FieldPresence_value
-
-type FeatureSet_EnumType = descriptorpb.FeatureSet_EnumType
-
-const FeatureSet_ENUM_TYPE_UNKNOWN = descriptorpb.FeatureSet_ENUM_TYPE_UNKNOWN
-const FeatureSet_OPEN = descriptorpb.FeatureSet_OPEN
-const FeatureSet_CLOSED = descriptorpb.FeatureSet_CLOSED
-
-var FeatureSet_EnumType_name = descriptorpb.FeatureSet_EnumType_name
-var FeatureSet_EnumType_value = descriptorpb.FeatureSet_EnumType_value
-
-type FeatureSet_RepeatedFieldEncoding = descriptorpb.FeatureSet_RepeatedFieldEncoding
-
-const FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN = descriptorpb.FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN
-const FeatureSet_PACKED = descriptorpb.FeatureSet_PACKED
-const FeatureSet_EXPANDED = descriptorpb.FeatureSet_EXPANDED
-
-var FeatureSet_RepeatedFieldEncoding_name = descriptorpb.FeatureSet_RepeatedFieldEncoding_name
-var FeatureSet_RepeatedFieldEncoding_value = descriptorpb.FeatureSet_RepeatedFieldEncoding_value
-
-type FeatureSet_Utf8Validation = descriptorpb.FeatureSet_Utf8Validation
-
-const FeatureSet_UTF8_VALIDATION_UNKNOWN = descriptorpb.FeatureSet_UTF8_VALIDATION_UNKNOWN
-const FeatureSet_VERIFY = descriptorpb.FeatureSet_VERIFY
-const FeatureSet_NONE = descriptorpb.FeatureSet_NONE
-
-var FeatureSet_Utf8Validation_name = descriptorpb.FeatureSet_Utf8Validation_name
-var FeatureSet_Utf8Validation_value = descriptorpb.FeatureSet_Utf8Validation_value
-
-type FeatureSet_MessageEncoding = descriptorpb.FeatureSet_MessageEncoding
-
-const FeatureSet_MESSAGE_ENCODING_UNKNOWN = descriptorpb.FeatureSet_MESSAGE_ENCODING_UNKNOWN
-const FeatureSet_LENGTH_PREFIXED = descriptorpb.FeatureSet_LENGTH_PREFIXED
-const FeatureSet_DELIMITED = descriptorpb.FeatureSet_DELIMITED
-
-var FeatureSet_MessageEncoding_name = descriptorpb.FeatureSet_MessageEncoding_name
-var FeatureSet_MessageEncoding_value = descriptorpb.FeatureSet_MessageEncoding_value
-
-type FeatureSet_JsonFormat = descriptorpb.FeatureSet_JsonFormat
-
-const FeatureSet_JSON_FORMAT_UNKNOWN = descriptorpb.FeatureSet_JSON_FORMAT_UNKNOWN
-const FeatureSet_ALLOW = descriptorpb.FeatureSet_ALLOW
-const FeatureSet_LEGACY_BEST_EFFORT = descriptorpb.FeatureSet_LEGACY_BEST_EFFORT
-
-var FeatureSet_JsonFormat_name = descriptorpb.FeatureSet_JsonFormat_name
-var FeatureSet_JsonFormat_value = descriptorpb.FeatureSet_JsonFormat_value
-
-type GeneratedCodeInfo_Annotation_Semantic = descriptorpb.GeneratedCodeInfo_Annotation_Semantic
-
-const GeneratedCodeInfo_Annotation_NONE = descriptorpb.GeneratedCodeInfo_Annotation_NONE
-const GeneratedCodeInfo_Annotation_SET = descriptorpb.GeneratedCodeInfo_Annotation_SET
-const GeneratedCodeInfo_Annotation_ALIAS = descriptorpb.GeneratedCodeInfo_Annotation_ALIAS
-
-var GeneratedCodeInfo_Annotation_Semantic_name = descriptorpb.GeneratedCodeInfo_Annotation_Semantic_name
-var GeneratedCodeInfo_Annotation_Semantic_value = descriptorpb.GeneratedCodeInfo_Annotation_Semantic_value
-
-type FileDescriptorSet = descriptorpb.FileDescriptorSet
-type FileDescriptorProto = descriptorpb.FileDescriptorProto
-type DescriptorProto = descriptorpb.DescriptorProto
-type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions
-
-const Default_ExtensionRangeOptions_Verification = descriptorpb.Default_ExtensionRangeOptions_Verification
-
-type FieldDescriptorProto = descriptorpb.FieldDescriptorProto
-type OneofDescriptorProto = descriptorpb.OneofDescriptorProto
-type EnumDescriptorProto = descriptorpb.EnumDescriptorProto
-type EnumValueDescriptorProto = descriptorpb.EnumValueDescriptorProto
-type ServiceDescriptorProto = descriptorpb.ServiceDescriptorProto
-type MethodDescriptorProto = descriptorpb.MethodDescriptorProto
-
-const Default_MethodDescriptorProto_ClientStreaming = descriptorpb.Default_MethodDescriptorProto_ClientStreaming
-const Default_MethodDescriptorProto_ServerStreaming = descriptorpb.Default_MethodDescriptorProto_ServerStreaming
-
-type FileOptions = descriptorpb.FileOptions
-
-const Default_FileOptions_JavaMultipleFiles = descriptorpb.Default_FileOptions_JavaMultipleFiles
-const Default_FileOptions_JavaStringCheckUtf8 = descriptorpb.Default_FileOptions_JavaStringCheckUtf8
-const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_OptimizeFor
-const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices
-const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices
-const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices
-const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated
-const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas
-
-type MessageOptions = descriptorpb.MessageOptions
-
-const Default_MessageOptions_MessageSetWireFormat = descriptorpb.Default_MessageOptions_MessageSetWireFormat
-const Default_MessageOptions_NoStandardDescriptorAccessor = descriptorpb.Default_MessageOptions_NoStandardDescriptorAccessor
-const Default_MessageOptions_Deprecated = descriptorpb.Default_MessageOptions_Deprecated
-
-type FieldOptions = descriptorpb.FieldOptions
-
-const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype
-const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype
-const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy
-const Default_FieldOptions_UnverifiedLazy = descriptorpb.Default_FieldOptions_UnverifiedLazy
-const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated
-const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak
-const Default_FieldOptions_DebugRedact = descriptorpb.Default_FieldOptions_DebugRedact
-
-type OneofOptions = descriptorpb.OneofOptions
-type EnumOptions = descriptorpb.EnumOptions
-
-const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecated
-
-type EnumValueOptions = descriptorpb.EnumValueOptions
-
-const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated
-const Default_EnumValueOptions_DebugRedact = descriptorpb.Default_EnumValueOptions_DebugRedact
-
-type ServiceOptions = descriptorpb.ServiceOptions
-
-const Default_ServiceOptions_Deprecated = descriptorpb.Default_ServiceOptions_Deprecated
-
-type MethodOptions = descriptorpb.MethodOptions
-
-const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Deprecated
-const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel
-
-type UninterpretedOption = descriptorpb.UninterpretedOption
-type FeatureSet = descriptorpb.FeatureSet
-type FeatureSetDefaults = descriptorpb.FeatureSetDefaults
-type SourceCodeInfo = descriptorpb.SourceCodeInfo
-type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo
-type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange
-type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange
-type ExtensionRangeOptions_Declaration = descriptorpb.ExtensionRangeOptions_Declaration
-type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange
-type FieldOptions_EditionDefault = descriptorpb.FieldOptions_EditionDefault
-type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart
-type FeatureSetDefaults_FeatureSetEditionDefault = descriptorpb.FeatureSetDefaults_FeatureSetEditionDefault
-type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location
-type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation
-
-var File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = []byte{
-	0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
-	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
-	0x69, 0x70, 0x74, 0x6f, 0x72, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
-	0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68,
-	0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65,
-	0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b,
-	0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x32,
-}
-
-var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = []int32{
-	0, // [0:0] is the sub-list for method output_type
-	0, // [0:0] is the sub-list for method input_type
-	0, // [0:0] is the sub-list for extension type_name
-	0, // [0:0] is the sub-list for extension extendee
-	0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() }
-func file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() {
-	if File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto != nil {
-		return
-	}
-	type x struct{}
-	out := protoimpl.TypeBuilder{
-		File: protoimpl.DescBuilder{
-			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
-			RawDescriptor: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc,
-			NumEnums:      0,
-			NumMessages:   0,
-			NumExtensions: 0,
-			NumServices:   0,
-		},
-		GoTypes:           file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes,
-		DependencyIndexes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs,
-	}.Build()
-	File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto = out.File
-	file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = nil
-	file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = nil
-	file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
deleted file mode 100644
index fdff3fd..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/any.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ptypes
-
-import (
-	"fmt"
-	"strings"
-
-	"github.com/golang/protobuf/proto"
-	"google.golang.org/protobuf/reflect/protoreflect"
-	"google.golang.org/protobuf/reflect/protoregistry"
-
-	anypb "github.com/golang/protobuf/ptypes/any"
-)
-
-const urlPrefix = "type.googleapis.com/"
-
-// AnyMessageName returns the message name contained in an anypb.Any message.
-// Most type assertions should use the Is function instead.
-//
-// Deprecated: Call the any.MessageName method instead.
-func AnyMessageName(any *anypb.Any) (string, error) {
-	name, err := anyMessageName(any)
-	return string(name), err
-}
-func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
-	if any == nil {
-		return "", fmt.Errorf("message is nil")
-	}
-	name := protoreflect.FullName(any.TypeUrl)
-	if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
-		name = name[i+len("/"):]
-	}
-	if !name.IsValid() {
-		return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
-	}
-	return name, nil
-}
-
-// MarshalAny marshals the given message m into an anypb.Any message.
-//
-// Deprecated: Call the anypb.New function instead.
-func MarshalAny(m proto.Message) (*anypb.Any, error) {
-	switch dm := m.(type) {
-	case DynamicAny:
-		m = dm.Message
-	case *DynamicAny:
-		if dm == nil {
-			return nil, proto.ErrNil
-		}
-		m = dm.Message
-	}
-	b, err := proto.Marshal(m)
-	if err != nil {
-		return nil, err
-	}
-	return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
-}
-
-// Empty returns a new message of the type specified in an anypb.Any message.
-// It returns protoregistry.NotFound if the corresponding message type could not
-// be resolved in the global registry.
-//
-// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
-// to resolve the message name and create a new instance of it.
-func Empty(any *anypb.Any) (proto.Message, error) {
-	name, err := anyMessageName(any)
-	if err != nil {
-		return nil, err
-	}
-	mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
-	if err != nil {
-		return nil, err
-	}
-	return proto.MessageV1(mt.New().Interface()), nil
-}
-
-// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
-// into the provided message m. It returns an error if the target message
-// does not match the type in the Any message or if an unmarshal error occurs.
-//
-// The target message m may be a *DynamicAny message. If the underlying message
-// type could not be resolved, then this returns protoregistry.NotFound.
-//
-// Deprecated: Call the any.UnmarshalTo method instead.
-func UnmarshalAny(any *anypb.Any, m proto.Message) error {
-	if dm, ok := m.(*DynamicAny); ok {
-		if dm.Message == nil {
-			var err error
-			dm.Message, err = Empty(any)
-			if err != nil {
-				return err
-			}
-		}
-		m = dm.Message
-	}
-
-	anyName, err := AnyMessageName(any)
-	if err != nil {
-		return err
-	}
-	msgName := proto.MessageName(m)
-	if anyName != msgName {
-		return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
-	}
-	return proto.Unmarshal(any.Value, m)
-}
-
-// Is reports whether the Any message contains a message of the specified type.
-//
-// Deprecated: Call the any.MessageIs method instead.
-func Is(any *anypb.Any, m proto.Message) bool {
-	if any == nil || m == nil {
-		return false
-	}
-	name := proto.MessageName(m)
-	if !strings.HasSuffix(any.TypeUrl, name) {
-		return false
-	}
-	return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
-}
-
-// DynamicAny is a value that can be passed to UnmarshalAny to automatically
-// allocate a proto.Message for the type specified in an anypb.Any message.
-// The allocated message is stored in the embedded proto.Message.
-//
-// Example:
-//
-//	var x ptypes.DynamicAny
-//	if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
-//	fmt.Printf("unmarshaled message: %v", x.Message)
-//
-// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
-// the any message contents into a new instance of the underlying message.
-type DynamicAny struct{ proto.Message }
-
-func (m DynamicAny) String() string {
-	if m.Message == nil {
-		return "<nil>"
-	}
-	return m.Message.String()
-}
-func (m DynamicAny) Reset() {
-	if m.Message == nil {
-		return
-	}
-	m.Message.Reset()
-}
-func (m DynamicAny) ProtoMessage() {
-	return
-}
-func (m DynamicAny) ProtoReflect() protoreflect.Message {
-	if m.Message == nil {
-		return nil
-	}
-	return dynamicAny{proto.MessageReflect(m.Message)}
-}
-
-type dynamicAny struct{ protoreflect.Message }
-
-func (m dynamicAny) Type() protoreflect.MessageType {
-	return dynamicAnyType{m.Message.Type()}
-}
-func (m dynamicAny) New() protoreflect.Message {
-	return dynamicAnyType{m.Message.Type()}.New()
-}
-func (m dynamicAny) Interface() protoreflect.ProtoMessage {
-	return DynamicAny{proto.MessageV1(m.Message.Interface())}
-}
-
-type dynamicAnyType struct{ protoreflect.MessageType }
-
-func (t dynamicAnyType) New() protoreflect.Message {
-	return dynamicAny{t.MessageType.New()}
-}
-func (t dynamicAnyType) Zero() protoreflect.Message {
-	return dynamicAny{t.MessageType.Zero()}
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
deleted file mode 100644
index d3c3325..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/doc.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ptypes provides functionality for interacting with well-known types.
-//
-// Deprecated: Well-known types have specialized functionality directly
-// injected into the generated packages for each message type.
-// See the deprecation notice for each function for the suggested alternative.
-package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
deleted file mode 100644
index b2b55dd..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/duration.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ptypes
-
-import (
-	"errors"
-	"fmt"
-	"time"
-
-	durationpb "github.com/golang/protobuf/ptypes/duration"
-)
-
-// Range of google.protobuf.Duration as specified in duration.proto.
-// This is about 10,000 years in seconds.
-const (
-	maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
-	minSeconds = -maxSeconds
-)
-
-// Duration converts a durationpb.Duration to a time.Duration.
-// Duration returns an error if dur is invalid or overflows a time.Duration.
-//
-// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
-func Duration(dur *durationpb.Duration) (time.Duration, error) {
-	if err := validateDuration(dur); err != nil {
-		return 0, err
-	}
-	d := time.Duration(dur.Seconds) * time.Second
-	if int64(d/time.Second) != dur.Seconds {
-		return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
-	}
-	if dur.Nanos != 0 {
-		d += time.Duration(dur.Nanos) * time.Nanosecond
-		if (d < 0) != (dur.Nanos < 0) {
-			return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
-		}
-	}
-	return d, nil
-}
-
-// DurationProto converts a time.Duration to a durationpb.Duration.
-//
-// Deprecated: Call the durationpb.New function instead.
-func DurationProto(d time.Duration) *durationpb.Duration {
-	nanos := d.Nanoseconds()
-	secs := nanos / 1e9
-	nanos -= secs * 1e9
-	return &durationpb.Duration{
-		Seconds: int64(secs),
-		Nanos:   int32(nanos),
-	}
-}
-
-// validateDuration determines whether the durationpb.Duration is valid
-// according to the definition in google/protobuf/duration.proto.
-// A valid durpb.Duration may still be too large to fit into a time.Duration
-// Note that the range of durationpb.Duration is about 10,000 years,
-// while the range of time.Duration is about 290 years.
-func validateDuration(dur *durationpb.Duration) error {
-	if dur == nil {
-		return errors.New("duration: nil Duration")
-	}
-	if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
-		return fmt.Errorf("duration: %v: seconds out of range", dur)
-	}
-	if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
-		return fmt.Errorf("duration: %v: nanos out of range", dur)
-	}
-	// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
-	if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
-		return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
-	}
-	return nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
deleted file mode 100644
index d0079ee..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/duration/duration.proto
-
-package duration
-
-import (
-	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
-	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
-	durationpb "google.golang.org/protobuf/types/known/durationpb"
-	reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/duration.proto.
-
-type Duration = durationpb.Duration
-
-var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
-	0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
-	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
-	0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
-	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
-	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
-	0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
-	0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
-	0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
-	0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
-	0, // [0:0] is the sub-list for method output_type
-	0, // [0:0] is the sub-list for method input_type
-	0, // [0:0] is the sub-list for extension type_name
-	0, // [0:0] is the sub-list for extension extendee
-	0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
-func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
-	if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
-		return
-	}
-	type x struct{}
-	out := protoimpl.TypeBuilder{
-		File: protoimpl.DescBuilder{
-			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
-			RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
-			NumEnums:      0,
-			NumMessages:   0,
-			NumExtensions: 0,
-			NumServices:   0,
-		},
-		GoTypes:           file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
-		DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
-	}.Build()
-	File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
-	file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
-	file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
-	file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
deleted file mode 100644
index 8d82abe..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/struct/struct.proto
-
-package structpb
-
-import (
-	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
-	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
-	structpb "google.golang.org/protobuf/types/known/structpb"
-	reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/struct.proto.
-
-type NullValue = structpb.NullValue
-
-const NullValue_NULL_VALUE = structpb.NullValue_NULL_VALUE
-
-var NullValue_name = structpb.NullValue_name
-var NullValue_value = structpb.NullValue_value
-
-type Struct = structpb.Struct
-type Value = structpb.Value
-type Value_NullValue = structpb.Value_NullValue
-type Value_NumberValue = structpb.Value_NumberValue
-type Value_StringValue = structpb.Value_StringValue
-type Value_BoolValue = structpb.Value_BoolValue
-type Value_StructValue = structpb.Value_StructValue
-type Value_ListValue = structpb.Value_ListValue
-type ListValue = structpb.ListValue
-
-var File_github_com_golang_protobuf_ptypes_struct_struct_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc = []byte{
-	0x0a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
-	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
-	0x70, 0x65, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63,
-	0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
-	0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63,
-	0x74, 0x3b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x33,
-}
-
-var file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs = []int32{
-	0, // [0:0] is the sub-list for method output_type
-	0, // [0:0] is the sub-list for method input_type
-	0, // [0:0] is the sub-list for extension type_name
-	0, // [0:0] is the sub-list for extension extendee
-	0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_ptypes_struct_struct_proto_init() }
-func file_github_com_golang_protobuf_ptypes_struct_struct_proto_init() {
-	if File_github_com_golang_protobuf_ptypes_struct_struct_proto != nil {
-		return
-	}
-	type x struct{}
-	out := protoimpl.TypeBuilder{
-		File: protoimpl.DescBuilder{
-			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
-			RawDescriptor: file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc,
-			NumEnums:      0,
-			NumMessages:   0,
-			NumExtensions: 0,
-			NumServices:   0,
-		},
-		GoTypes:           file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes,
-		DependencyIndexes: file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs,
-	}.Build()
-	File_github_com_golang_protobuf_ptypes_struct_struct_proto = out.File
-	file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc = nil
-	file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes = nil
-	file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
deleted file mode 100644
index 8368a3f..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ptypes
-
-import (
-	"errors"
-	"fmt"
-	"time"
-
-	timestamppb "github.com/golang/protobuf/ptypes/timestamp"
-)
-
-// Range of google.protobuf.Duration as specified in timestamp.proto.
-const (
-	// Seconds field of the earliest valid Timestamp.
-	// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
-	minValidSeconds = -62135596800
-	// Seconds field just after the latest valid Timestamp.
-	// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
-	maxValidSeconds = 253402300800
-)
-
-// Timestamp converts a timestamppb.Timestamp to a time.Time.
-// It returns an error if the argument is invalid.
-//
-// Unlike most Go functions, if Timestamp returns an error, the first return
-// value is not the zero time.Time. Instead, it is the value obtained from the
-// time.Unix function when passed the contents of the Timestamp, in the UTC
-// locale. This may or may not be a meaningful time; many invalid Timestamps
-// do map to valid time.Times.
-//
-// A nil Timestamp returns an error. The first return value in that case is
-// undefined.
-//
-// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
-func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
-	// Don't return the zero value on error, because corresponds to a valid
-	// timestamp. Instead return whatever time.Unix gives us.
-	var t time.Time
-	if ts == nil {
-		t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
-	} else {
-		t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
-	}
-	return t, validateTimestamp(ts)
-}
-
-// TimestampNow returns a google.protobuf.Timestamp for the current time.
-//
-// Deprecated: Call the timestamppb.Now function instead.
-func TimestampNow() *timestamppb.Timestamp {
-	ts, err := TimestampProto(time.Now())
-	if err != nil {
-		panic("ptypes: time.Now() out of Timestamp range")
-	}
-	return ts
-}
-
-// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
-// It returns an error if the resulting Timestamp is invalid.
-//
-// Deprecated: Call the timestamppb.New function instead.
-func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
-	ts := &timestamppb.Timestamp{
-		Seconds: t.Unix(),
-		Nanos:   int32(t.Nanosecond()),
-	}
-	if err := validateTimestamp(ts); err != nil {
-		return nil, err
-	}
-	return ts, nil
-}
-
-// TimestampString returns the RFC 3339 string for valid Timestamps.
-// For invalid Timestamps, it returns an error message in parentheses.
-//
-// Deprecated: Call the ts.AsTime method instead,
-// followed by a call to the Format method on the time.Time value.
-func TimestampString(ts *timestamppb.Timestamp) string {
-	t, err := Timestamp(ts)
-	if err != nil {
-		return fmt.Sprintf("(%v)", err)
-	}
-	return t.Format(time.RFC3339Nano)
-}
-
-// validateTimestamp determines whether a Timestamp is valid.
-// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
-// and has a Nanos field in the range [0, 1e9).
-//
-// If the Timestamp is valid, validateTimestamp returns nil.
-// Otherwise, it returns an error that describes the problem.
-//
-// Every valid Timestamp can be represented by a time.Time,
-// but the converse is not true.
-func validateTimestamp(ts *timestamppb.Timestamp) error {
-	if ts == nil {
-		return errors.New("timestamp: nil Timestamp")
-	}
-	if ts.Seconds < minValidSeconds {
-		return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
-	}
-	if ts.Seconds >= maxValidSeconds {
-		return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
-	}
-	if ts.Nanos < 0 || ts.Nanos >= 1e9 {
-		return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
-	}
-	return nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
deleted file mode 100644
index cc40f27..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
-
-package wrappers
-
-import (
-	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
-	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
-	wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
-	reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/wrappers.proto.
-
-type DoubleValue = wrapperspb.DoubleValue
-type FloatValue = wrapperspb.FloatValue
-type Int64Value = wrapperspb.Int64Value
-type UInt64Value = wrapperspb.UInt64Value
-type Int32Value = wrapperspb.Int32Value
-type UInt32Value = wrapperspb.UInt32Value
-type BoolValue = wrapperspb.BoolValue
-type StringValue = wrapperspb.StringValue
-type BytesValue = wrapperspb.BytesValue
-
-var File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = []byte{
-	0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
-	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
-	0x70, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2f, 0x77, 0x72, 0x61,
-	0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61,
-	0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
-	0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
-	0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
-	0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x3b, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
-	0x72, 0x73, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = []int32{
-	0, // [0:0] is the sub-list for method output_type
-	0, // [0:0] is the sub-list for method input_type
-	0, // [0:0] is the sub-list for extension type_name
-	0, // [0:0] is the sub-list for extension extendee
-	0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() }
-func file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() {
-	if File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto != nil {
-		return
-	}
-	type x struct{}
-	out := protoimpl.TypeBuilder{
-		File: protoimpl.DescBuilder{
-			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
-			RawDescriptor: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc,
-			NumEnums:      0,
-			NumMessages:   0,
-			NumExtensions: 0,
-			NumServices:   0,
-		},
-		GoTypes:           file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes,
-		DependencyIndexes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs,
-	}.Build()
-	File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto = out.File
-	file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = nil
-	file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = nil
-	file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml
deleted file mode 100644
index d8156a6..0000000
--- a/vendor/github.com/google/uuid/.travis.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-language: go
-
-go:
-  - 1.4.3
-  - 1.5.3
-  - tip
-
-script:
-  - go test -v ./...
diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md
new file mode 100644
index 0000000..7ec5ac7
--- /dev/null
+++ b/vendor/github.com/google/uuid/CHANGELOG.md
@@ -0,0 +1,41 @@
+# Changelog
+
+## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
+
+
+### Features
+
+* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
+
+
+### Bug Fixes
+
+* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
+* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
+
+## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
+
+
+### Features
+
+* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
+
+## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
+
+
+### Features
+
+* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
+
+### Fixes
+
+* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
+
+## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
+
+
+### Bug Fixes
+
+* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
+
+## Changelog
diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md
index 04fdf09..a502fdc 100644
--- a/vendor/github.com/google/uuid/CONTRIBUTING.md
+++ b/vendor/github.com/google/uuid/CONTRIBUTING.md
@@ -2,6 +2,22 @@
 
 We definitely welcome patches and contribution to this project!
 
+### Tips
+
+Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
+
+Always try to include a test case! If it is not possible or not necessary,
+please explain why in the pull request description.
+
+### Releasing
+
+Commits that would precipitate a SemVer change, as described in the Conventional
+Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
+to create a release candidate pull request. Once submitted, `release-please`
+will create a release.
+
+For tips on how to work with `release-please`, see its documentation.
+
 ### Legal requirements
 
 In order to protect both you and ourselves, you will need to sign the
diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md
index f765a46..3e9a618 100644
--- a/vendor/github.com/google/uuid/README.md
+++ b/vendor/github.com/google/uuid/README.md
@@ -1,6 +1,6 @@
-# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master)
+# uuid
 The uuid package generates and inspects UUIDs based on
-[RFC 4122](http://tools.ietf.org/html/rfc4122)
+[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
 and DCE 1.1: Authentication and Security Services. 
 
 This package is based on the github.com/pborman/uuid package (previously named
@@ -9,10 +9,12 @@
 change is the ability to represent an invalid UUID (vs a NIL UUID).
 
 ###### Install
-`go get github.com/google/uuid`
+```sh
+go get github.com/google/uuid
+```
 
 ###### Documentation 
-[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid)
+[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid)
 
 Full `go doc` style documentation for the package can be viewed online without
 installing this package by using the GoDoc site here: 
diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go
index b404f4b..dc60082 100644
--- a/vendor/github.com/google/uuid/hash.go
+++ b/vendor/github.com/google/uuid/hash.go
@@ -17,6 +17,12 @@
 	NameSpaceOID  = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
 	NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
 	Nil           UUID // empty UUID, all zeros
+
+	// The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
+	Max = UUID{
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+		0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+	}
 )
 
 // NewHash returns a new UUID derived from the hash of space concatenated with
diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go
index 24b78ed..b2a0bc8 100644
--- a/vendor/github.com/google/uuid/node_js.go
+++ b/vendor/github.com/google/uuid/node_js.go
@@ -7,6 +7,6 @@
 package uuid
 
 // getHardwareInterface returns nil values for the JS version of the code.
-// This remvoves the "net" dependency, because it is not used in the browser.
+// This removes the "net" dependency, because it is not used in the browser.
 // Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
 func getHardwareInterface(name string) (string, []byte) { return "", nil }
diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go
index e6ef06c..c351129 100644
--- a/vendor/github.com/google/uuid/time.go
+++ b/vendor/github.com/google/uuid/time.go
@@ -108,12 +108,23 @@
 }
 
 // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
-// uuid.  The time is only defined for version 1 and 2 UUIDs.
+// uuid.  The time is only defined for version 1, 2, 6 and 7 UUIDs.
 func (uuid UUID) Time() Time {
-	time := int64(binary.BigEndian.Uint32(uuid[0:4]))
-	time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
-	time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
-	return Time(time)
+	var t Time
+	switch uuid.Version() {
+	case 6:
+		time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
+		t = Time(time)
+	case 7:
+		time := binary.BigEndian.Uint64(uuid[:8])
+		t = Time((time>>16)*10000 + g1582ns100)
+	default: // forward compatible
+		time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+		time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+		time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+		t = Time(time)
+	}
+	return t
 }
 
 // ClockSequence returns the clock sequence encoded in uuid.
diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go
index a57207a..5232b48 100644
--- a/vendor/github.com/google/uuid/uuid.go
+++ b/vendor/github.com/google/uuid/uuid.go
@@ -56,11 +56,15 @@
 	return ok
 }
 
-// Parse decodes s into a UUID or returns an error.  Both the standard UUID
-// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
-// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
-// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
-// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
+// Parse decodes s into a UUID or returns an error if it cannot be parsed.  Both
+// the standard UUID forms defined in RFC 4122
+// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded.  In addition,
+// Parse accepts non-standard strings such as the raw hex encoding
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
+// e.g.  {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}.  Only the middle 36 bytes are
+// examined in the latter case.  Parse should not be used to validate strings as
+// it parses non-standard encodings as indicated above.
 func Parse(s string) (UUID, error) {
 	var uuid UUID
 	switch len(s) {
@@ -69,7 +73,7 @@
 
 	// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
 	case 36 + 9:
-		if strings.ToLower(s[:9]) != "urn:uuid:" {
+		if !strings.EqualFold(s[:9], "urn:uuid:") {
 			return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
 		}
 		s = s[9:]
@@ -101,7 +105,8 @@
 		9, 11,
 		14, 16,
 		19, 21,
-		24, 26, 28, 30, 32, 34} {
+		24, 26, 28, 30, 32, 34,
+	} {
 		v, ok := xtob(s[x], s[x+1])
 		if !ok {
 			return uuid, errors.New("invalid UUID format")
@@ -117,7 +122,7 @@
 	switch len(b) {
 	case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
 	case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
-		if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
+		if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
 			return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
 		}
 		b = b[9:]
@@ -145,7 +150,8 @@
 		9, 11,
 		14, 16,
 		19, 21,
-		24, 26, 28, 30, 32, 34} {
+		24, 26, 28, 30, 32, 34,
+	} {
 		v, ok := xtob(b[x], b[x+1])
 		if !ok {
 			return uuid, errors.New("invalid UUID format")
@@ -180,6 +186,59 @@
 	return uuid
 }
 
+// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
+//   xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+//   urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+//   xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+//   {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+// It returns an error if the format is invalid, otherwise nil.
+func Validate(s string) error {
+	switch len(s) {
+	// Standard UUID format
+	case 36:
+
+	// UUID with "urn:uuid:" prefix
+	case 36 + 9:
+		if !strings.EqualFold(s[:9], "urn:uuid:") {
+			return fmt.Errorf("invalid urn prefix: %q", s[:9])
+		}
+		s = s[9:]
+
+	// UUID enclosed in braces
+	case 36 + 2:
+		if s[0] != '{' || s[len(s)-1] != '}' {
+			return fmt.Errorf("invalid bracketed UUID format")
+		}
+		s = s[1 : len(s)-1]
+
+	// UUID without hyphens
+	case 32:
+		for i := 0; i < len(s); i += 2 {
+			_, ok := xtob(s[i], s[i+1])
+			if !ok {
+				return errors.New("invalid UUID format")
+			}
+		}
+
+	default:
+		return invalidLengthError{len(s)}
+	}
+
+	// Check for standard UUID format
+	if len(s) == 36 {
+		if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+			return errors.New("invalid UUID format")
+		}
+		for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
+			if _, ok := xtob(s[x], s[x+1]); !ok {
+				return errors.New("invalid UUID format")
+			}
+		}
+	}
+
+	return nil
+}
+
 // String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
 // , or "" if uuid is invalid.
 func (uuid UUID) String() string {
@@ -292,3 +351,15 @@
 	poolMu.Lock()
 	poolPos = randPoolSize
 }
+
+// UUIDs is a slice of UUID types.
+type UUIDs []UUID
+
+// Strings returns a string slice containing the string form of each UUID in uuids.
+func (uuids UUIDs) Strings() []string {
+	var uuidStrs = make([]string, len(uuids))
+	for i, uuid := range uuids {
+		uuidStrs[i] = uuid.String()
+	}
+	return uuidStrs
+}
diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go
new file mode 100644
index 0000000..339a959
--- /dev/null
+++ b/vendor/github.com/google/uuid/version6.go
@@ -0,0 +1,56 @@
+// Copyright 2023 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "encoding/binary"
+
+// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
+// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
+// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
+//
+// NewV6 returns a Version 6 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewV6 returns Nil and an error.
+func NewV6() (UUID, error) {
+	var uuid UUID
+	now, seq, err := GetTime()
+	if err != nil {
+		return uuid, err
+	}
+
+	/*
+	    0                   1                   2                   3
+	    0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+	   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	   |                           time_high                           |
+	   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	   |           time_mid            |      time_low_and_version     |
+	   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	   |clk_seq_hi_res |  clk_seq_low  |         node (0-1)            |
+	   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	   |                         node (2-5)                            |
+	   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	*/
+
+	binary.BigEndian.PutUint64(uuid[0:], uint64(now))
+	binary.BigEndian.PutUint16(uuid[8:], seq)
+
+	uuid[6] = 0x60 | (uuid[6] & 0x0F)
+	uuid[8] = 0x80 | (uuid[8] & 0x3F)
+
+	nodeMu.Lock()
+	if nodeID == zeroID {
+		setNodeInterface("")
+	}
+	copy(uuid[10:], nodeID[:])
+	nodeMu.Unlock()
+
+	return uuid, nil
+}
diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go
new file mode 100644
index 0000000..3167b64
--- /dev/null
+++ b/vendor/github.com/google/uuid/version7.go
@@ -0,0 +1,104 @@
+// Copyright 2023 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"io"
+)
+
+// UUID version 7 features a time-ordered value field derived from the widely
+// implemented and well known Unix Epoch timestamp source,
+// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
+// As well as improved entropy characteristics over versions 1 or 6.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
+//
+// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
+//
+// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
+// Uses the randomness pool if it was enabled with EnableRandPool.
+// On error, NewV7 returns Nil and an error
+func NewV7() (UUID, error) {
+	uuid, err := NewRandom()
+	if err != nil {
+		return uuid, err
+	}
+	makeV7(uuid[:])
+	return uuid, nil
+}
+
+// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
+// it use NewRandomFromReader fill random bits.
+// On error, NewV7FromReader returns Nil and an error.
+func NewV7FromReader(r io.Reader) (UUID, error) {
+	uuid, err := NewRandomFromReader(r)
+	if err != nil {
+		return uuid, err
+	}
+
+	makeV7(uuid[:])
+	return uuid, nil
+}
+
+// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
+// uuid[8] already has the right version number (Variant is 10)
+// see function NewV7 and NewV7FromReader
+func makeV7(uuid []byte) {
+	/*
+		 0                   1                   2                   3
+		 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+		+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		|                           unix_ts_ms                          |
+		+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		|          unix_ts_ms           |  ver  |  rand_a (12 bit seq)  |
+		+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		|var|                        rand_b                             |
+		+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+		|                            rand_b                             |
+		+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+	*/
+	_ = uuid[15] // bounds check
+
+	t, s := getV7Time()
+
+	uuid[0] = byte(t >> 40)
+	uuid[1] = byte(t >> 32)
+	uuid[2] = byte(t >> 24)
+	uuid[3] = byte(t >> 16)
+	uuid[4] = byte(t >> 8)
+	uuid[5] = byte(t)
+
+	uuid[6] = 0x70 | (0x0F & byte(s>>8))
+	uuid[7] = byte(s)
+}
+
+// lastV7time is the last time we returned stored as:
+//
+//	52 bits of time in milliseconds since epoch
+//	12 bits of (fractional nanoseconds) >> 8
+var lastV7time int64
+
+const nanoPerMilli = 1000000
+
+// getV7Time returns the time in milliseconds and nanoseconds / 256.
+// The returned (milli << 12 + seq) is guarenteed to be greater than
+// (milli << 12 + seq) returned by any previous call to getV7Time.
+func getV7Time() (milli, seq int64) {
+	timeMu.Lock()
+	defer timeMu.Unlock()
+
+	nano := timeNow().UnixNano()
+	milli = nano / nanoPerMilli
+	// Sequence number is between 0 and 3906 (nanoPerMilli>>8)
+	seq = (nano - milli*nanoPerMilli) >> 8
+	now := milli<<12 + seq
+	if now <= lastV7time {
+		now = lastV7time + 1
+		milli = now >> 12
+		seq = now & 0xfff
+	}
+	lastV7time = now
+	return milli, seq
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml
deleted file mode 100644
index fc198d8..0000000
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-sudo: false
-language: go
-go:
-  - 1.13.x
-  - 1.14.x
-  - 1.15.x
-
-env:
-  global:
-    - GO111MODULE=on
-
-script:
- - make test
-
-after_success:
-  - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md
deleted file mode 100644
index 6eeb7e2..0000000
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md
+++ /dev/null
@@ -1,51 +0,0 @@
-# Changelog
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
-and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
-
-Types of changes:
-- `Added` for new features.
-- `Changed` for changes in existing functionality.
-- `Deprecated` for soon-to-be removed features.
-- `Removed` for now removed features.
-- `Fixed` for any bug fixes.
-- `Security` in case of vulnerabilities.
-
-## [Unreleased]
-
-### Added
-
-- [#223](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/223) Add go-kit logging middleware - [adrien-f](https://github.com/adrien-f)
-
-## [v1.1.0] - 2019-09-12
-### Added
-- [#226](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/226) Support for go modules.
-- [#221](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/221) logging/zap add support for gRPC LoggerV2  - [kush-patel-hs](https://github.com/kush-patel-hs)
-- [#181](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/181) Rate Limit support - [ceshihao](https://github.com/ceshihao)
-- [#161](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/161) Retry on server stream call - [lonnblad](https://github.com/lonnblad)
-- [#152](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/152) Exponential backoff functions - [polyfloyd](https://github.com/polyfloyd)
-- [#147](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/147) Jaeger support for ctxtags extraction - [vporoshok](https://github.com/vporoshok)
-- [#184](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/184) ctxTags identifies if the call was sampled
-
-### Deprecated
-- [#201](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/201) `golang.org/x/net/context` - [houz42](https://github.com/houz42)
-- [#183](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/183) Documentation Generation in favour of <godoc.org>.
-
-### Fixed
-- [172](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/172) Passing ctx into retry and recover - [johanbrandhorst](https://github.com/johanbrandhorst)
-- Numerious documentation fixes.
-
-## v1.0.0 - 2018-05-08
-### Added
-- grpc_auth 
-- grpc_ctxtags
-- grpc_zap
-- grpc_logrus
-- grpc_opentracing
-- grpc_retry
-- grpc_validator
-- grpc_recovery
-
-[Unreleased]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.1.0...HEAD 
-[v1.1.0]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.0.0...v1.1.0 
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
index 814e155..a12b409 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
@@ -7,16 +7,23 @@
 [![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware)
 [![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE)
 [![quality: production](https://img.shields.io/badge/quality-production-orange.svg)](#status)
-[![Slack](https://img.shields.io/badge/slack-%23grpc--middleware-brightgreen)](https://slack.com/share/IRUQCFC23/9Tm7hxRFVKKNoajQfMOcUiIk/enQtODc4ODI4NTIyMDcxLWM5NDA0ZTE4Njg5YjRjYWZkMTI5MzQwNDY3YzBjMzE1YzdjOGM5ZjI1NDNiM2JmNzI2YjM5ODE5OTRiNTEyOWE)
+[![Slack](https://img.shields.io/badge/slack-%23grpc--middleware-brightgreen)](https://gophers.slack.com/archives/CNJL30P4P)
 
 [gRPC Go](https://github.com/grpc/grpc-go) Middleware: interceptors, helpers, utilities.
 
+## ⚠️  Status
+
+Version [v2](https://github.com/grpc-ecosystem/go-grpc-middleware/tree/v2) is about to be released, with migration guide, which will replace v1. Try v2 and give us feedback! 
+
+Version v1 is currently in deprecation mode, which means only critical and safety bug fixes will be merged.
+
+
 ## Middleware
 
 [gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for
-Interceptors, i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs) 
+Interceptors, i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs)
 that is executed either on the gRPC Server before the request is passed onto the user's application logic, or on the gRPC client around the user call. It is a perfect way to implement
-common patterns: auth, logging, message, validation, retries or monitoring.
+common patterns: auth, logging, message, validation, retries, or monitoring.
 
 These are generic building blocks that make it easy to build multiple microservices easily.
 The purpose of this repository is to act as a go-to point for such reusable functionality. It contains
@@ -29,57 +36,57 @@
 
 myServer := grpc.NewServer(
     grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
-        grpc_recovery.StreamServerInterceptor(),
         grpc_ctxtags.StreamServerInterceptor(),
         grpc_opentracing.StreamServerInterceptor(),
         grpc_prometheus.StreamServerInterceptor,
         grpc_zap.StreamServerInterceptor(zapLogger),
         grpc_auth.StreamServerInterceptor(myAuthFunction),
+        grpc_recovery.StreamServerInterceptor(),
     )),
     grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
-        grpc_recovery.UnaryServerInterceptor(),
         grpc_ctxtags.UnaryServerInterceptor(),
         grpc_opentracing.UnaryServerInterceptor(),
         grpc_prometheus.UnaryServerInterceptor,
         grpc_zap.UnaryServerInterceptor(zapLogger),
         grpc_auth.UnaryServerInterceptor(myAuthFunction),
+        grpc_recovery.UnaryServerInterceptor(),
     )),
 )
 ```
 
 ## Interceptors
 
-*Please send a PR to add new interceptors or middleware to this list*
+_Please send a PR to add new interceptors or middleware to this list_
 
 #### Auth
-   * [`grpc_auth`](auth) - a customizable (via `AuthFunc`) piece of auth middleware 
+
+- [`grpc_auth`](auth) - a customizable (via `AuthFunc`) piece of auth middleware
 
 #### Logging
-   * [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body
-   * [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers.
-   * [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers.
-   * [`grpc_kit`](logging/kit/) - integration of [go-kit](https://github.com/go-kit/kit/tree/master/log) logging library into gRPC handlers.
-   * [`grpc_grpc_logsettable`](logging/settable/) - a wrapper around `grpclog.LoggerV2` that allows to replace loggers in runtime (thread-safe).
+
+- [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body
+- [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers.
+- [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers.
+- [`grpc_kit`](logging/kit/) - integration of [go-kit/log](https://github.com/go-kit/log) logging library into gRPC handlers.
+- [`grpc_grpc_logsettable`](logging/settable/) - a wrapper around `grpclog.LoggerV2` that allows to replace loggers in runtime (thread-safe).
 
 #### Monitoring
-   * [`grpc_prometheus`⚡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware
-   * [`otgrpc`⚡](https://github.com/grpc-ecosystem/grpc-opentracing/tree/master/go/otgrpc) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors
-   * [`grpc_opentracing`](tracing/opentracing) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors with support for streaming and handler-returned tags
+
+- [`grpc_prometheus`⚡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware
+- [`otgrpc`⚡](https://github.com/grpc-ecosystem/grpc-opentracing/tree/master/go/otgrpc) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors
+- [`grpc_opentracing`](tracing/opentracing) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors with support for streaming and handler-returned tags
+- [`otelgrpc`](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation/google.golang.org/grpc/otelgrpc) - [OpenTelemetry](https://opentelemetry.io/) client-side and server-side interceptors
 
 #### Client
-   * [`grpc_retry`](retry/) - a generic gRPC response code retry mechanism, client-side middleware
+
+- [`grpc_retry`](retry/) - a generic gRPC response code retry mechanism, client-side middleware
 
 #### Server
-   * [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options
-   * [`grpc_recovery`](recovery/) - turn panics into gRPC errors
-   * [`ratelimit`](ratelimit/) - grpc rate limiting by your own limiter
 
+- [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options
+- [`grpc_recovery`](recovery/) - turn panics into gRPC errors
+- [`ratelimit`](ratelimit/) - grpc rate limiting by your own limiter
 
-## Status
-
-This code has been running in *production* since May 2016 as the basis of the gRPC micro services stack at [Improbable](https://improbable.io).
-
-Additional tooling will be added, and contributions are welcome.
 
 ## License
 
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
index ea3738b..407d933 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
@@ -16,22 +16,41 @@
 // Execution is done in left-to-right order, including passing of context.
 // For example ChainUnaryServer(one, two, three) will execute one before two before three, and three
 // will see context changes of one and two.
+//
+// While this can be useful in some scenarios, it is generally advisable to use google.golang.org/grpc.ChainUnaryInterceptor directly.
 func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor {
 	n := len(interceptors)
 
+	// Dummy interceptor maintained for backward compatibility to avoid returning nil.
+	if n == 0 {
+		return func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+			return handler(ctx, req)
+		}
+	}
+
+	// The degenerate case, just return the single wrapped interceptor directly.
+	if n == 1 {
+		return interceptors[0]
+	}
+
+	// Return a function which satisfies the interceptor interface, and which is
+	// a closure over the given list of interceptors to be chained.
 	return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
-		chainer := func(currentInter grpc.UnaryServerInterceptor, currentHandler grpc.UnaryHandler) grpc.UnaryHandler {
-			return func(currentCtx context.Context, currentReq interface{}) (interface{}, error) {
-				return currentInter(currentCtx, currentReq, info, currentHandler)
+		currHandler := handler
+		// Iterate backwards through all interceptors except the first (outermost).
+		// Wrap each one in a function which satisfies the handler interface, but
+		// is also a closure over the `info` and `handler` parameters. Then pass
+		// each pseudo-handler to the next outer interceptor as the handler to be called.
+		for i := n - 1; i > 0; i-- {
+			// Rebind to loop-local vars so they can be closed over.
+			innerHandler, i := currHandler, i
+			currHandler = func(currentCtx context.Context, currentReq interface{}) (interface{}, error) {
+				return interceptors[i](currentCtx, currentReq, info, innerHandler)
 			}
 		}
-
-		chainedHandler := handler
-		for i := n - 1; i >= 0; i-- {
-			chainedHandler = chainer(interceptors[i], chainedHandler)
-		}
-
-		return chainedHandler(ctx, req)
+		// Finally return the result of calling the outermost interceptor with the
+		// outermost pseudo-handler created above as its handler.
+		return interceptors[0](ctx, req, info, currHandler)
 	}
 }
 
@@ -40,22 +59,31 @@
 // Execution is done in left-to-right order, including passing of context.
 // For example ChainUnaryServer(one, two, three) will execute one before two before three.
 // If you want to pass context between interceptors, use WrapServerStream.
+//
+// While this can be useful in some scenarios, it is generally advisable to use google.golang.org/grpc.ChainStreamInterceptor directly.
 func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor {
 	n := len(interceptors)
 
-	return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
-		chainer := func(currentInter grpc.StreamServerInterceptor, currentHandler grpc.StreamHandler) grpc.StreamHandler {
-			return func(currentSrv interface{}, currentStream grpc.ServerStream) error {
-				return currentInter(currentSrv, currentStream, info, currentHandler)
+	// Dummy interceptor maintained for backward compatibility to avoid returning nil.
+	if n == 0 {
+		return func(srv interface{}, stream grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+			return handler(srv, stream)
+		}
+	}
+
+	if n == 1 {
+		return interceptors[0]
+	}
+
+	return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+		currHandler := handler
+		for i := n - 1; i > 0; i-- {
+			innerHandler, i := currHandler, i
+			currHandler = func(currentSrv interface{}, currentStream grpc.ServerStream) error {
+				return interceptors[i](currentSrv, currentStream, info, innerHandler)
 			}
 		}
-
-		chainedHandler := handler
-		for i := n - 1; i >= 0; i-- {
-			chainedHandler = chainer(interceptors[i], chainedHandler)
-		}
-
-		return chainedHandler(srv, ss)
+		return interceptors[0](srv, stream, info, currHandler)
 	}
 }
 
@@ -66,19 +94,26 @@
 func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor {
 	n := len(interceptors)
 
+	// Dummy interceptor maintained for backward compatibility to avoid returning nil.
+	if n == 0 {
+		return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+			return invoker(ctx, method, req, reply, cc, opts...)
+		}
+	}
+
+	if n == 1 {
+		return interceptors[0]
+	}
+
 	return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
-		chainer := func(currentInter grpc.UnaryClientInterceptor, currentInvoker grpc.UnaryInvoker) grpc.UnaryInvoker {
-			return func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
-				return currentInter(currentCtx, currentMethod, currentReq, currentRepl, currentConn, currentInvoker, currentOpts...)
+		currInvoker := invoker
+		for i := n - 1; i > 0; i-- {
+			innerInvoker, i := currInvoker, i
+			currInvoker = func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
+				return interceptors[i](currentCtx, currentMethod, currentReq, currentRepl, currentConn, innerInvoker, currentOpts...)
 			}
 		}
-
-		chainedInvoker := invoker
-		for i := n - 1; i >= 0; i-- {
-			chainedInvoker = chainer(interceptors[i], chainedInvoker)
-		}
-
-		return chainedInvoker(ctx, method, req, reply, cc, opts...)
+		return interceptors[0](ctx, method, req, reply, cc, currInvoker, opts...)
 	}
 }
 
@@ -89,19 +124,26 @@
 func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor {
 	n := len(interceptors)
 
+	// Dummy interceptor maintained for backward compatibility to avoid returning nil.
+	if n == 0 {
+		return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+			return streamer(ctx, desc, cc, method, opts...)
+		}
+	}
+
+	if n == 1 {
+		return interceptors[0]
+	}
+
 	return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
-		chainer := func(currentInter grpc.StreamClientInterceptor, currentStreamer grpc.Streamer) grpc.Streamer {
-			return func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) {
-				return currentInter(currentCtx, currentDesc, currentConn, currentMethod, currentStreamer, currentOpts...)
+		currStreamer := streamer
+		for i := n - 1; i > 0; i-- {
+			innerStreamer, i := currStreamer, i
+			currStreamer = func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) {
+				return interceptors[i](currentCtx, currentDesc, currentConn, currentMethod, innerStreamer, currentOpts...)
 			}
 		}
-
-		chainedStreamer := streamer
-		for i := n - 1; i >= 0; i-- {
-			chainedStreamer = chainer(interceptors[i], chainedStreamer)
-		}
-
-		return chainedStreamer(ctx, desc, cc, method, opts...)
+		return interceptors[0](ctx, desc, cc, method, currStreamer, opts...)
 	}
 }
 
@@ -109,12 +151,16 @@
 //
 // WithUnaryServerChain is a grpc.Server config option that accepts multiple unary interceptors.
 // Basically syntactic sugar.
+//
+// Deprecated: use google.golang.org/grpc.ChainUnaryInterceptor instead.
 func WithUnaryServerChain(interceptors ...grpc.UnaryServerInterceptor) grpc.ServerOption {
-	return grpc.UnaryInterceptor(ChainUnaryServer(interceptors...))
+	return grpc.ChainUnaryInterceptor(interceptors...)
 }
 
 // WithStreamServerChain is a grpc.Server config option that accepts multiple stream interceptors.
 // Basically syntactic sugar.
+//
+// Deprecated: use google.golang.org/grpc.ChainStreamInterceptor instead.
 func WithStreamServerChain(interceptors ...grpc.StreamServerInterceptor) grpc.ServerOption {
-	return grpc.StreamInterceptor(ChainStreamServer(interceptors...))
+	return grpc.ChainStreamInterceptor(interceptors...)
 }
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go
index afd924a..f8ba719 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go
@@ -18,7 +18,7 @@
 linear backoff with 10% jitter.
 
 For chained interceptors, the retry interceptor will call every interceptor that follows it
-whenever when a retry happens.
+whenever a retry happens.
 
 Please see examples for more advanced use.
 */
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go
index 62d8312..003bbd9 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go
@@ -5,8 +5,8 @@
 
 import (
 	"context"
-	"fmt"
 	"io"
+	"strconv"
 	"sync"
 	"time"
 
@@ -136,7 +136,6 @@
 type serverStreamingRetryingStream struct {
 	grpc.ClientStream
 	bufferedSends []interface{} // single message that the client can sen
-	receivedGood  bool          // indicates whether any prior receives were successful
 	wasClosedSend bool          // indicates that CloseSend was closed
 	parentCtx     context.Context
 	callOpts      *options
@@ -209,17 +208,8 @@
 }
 
 func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) {
-	s.mu.RLock()
-	wasGood := s.receivedGood
-	s.mu.RUnlock()
 	err := s.getStream().RecvMsg(m)
 	if err == nil || err == io.EOF {
-		s.mu.Lock()
-		s.receivedGood = true
-		s.mu.Unlock()
-		return false, err
-	} else if wasGood {
-		// previous RecvMsg in the stream succeeded, no retry logic should interfere
 		return false, err
 	}
 	if isContextError(err) {
@@ -303,7 +293,7 @@
 		ctx, _ = context.WithTimeout(ctx, callOpts.perCallTimeout)
 	}
 	if attempt > 0 && callOpts.includeHeader {
-		mdClone := metautils.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, fmt.Sprintf("%d", attempt))
+		mdClone := metautils.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, strconv.FormatUint(uint64(attempt), 10))
 		ctx = mdClone.ToOutgoing(ctx)
 	}
 	return ctx
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go
index 1c60585..15225d7 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go
@@ -10,7 +10,7 @@
 	"google.golang.org/grpc/metadata"
 )
 
-// NiceMD is a convenience wrapper definiting extra functions on the metadata.
+// NiceMD is a convenience wrapper defining extra functions on the metadata.
 type NiceMD metadata.MD
 
 // ExtractIncoming extracts an inbound metadata from the server-side context.
@@ -39,7 +39,7 @@
 
 // Clone performs a *deep* copy of the metadata.MD.
 //
-// You can specify the lower-case copiedKeys to only copy certain whitelisted keys. If no keys are explicitly whitelisted
+// You can specify the lower-case copiedKeys to only copy certain allow-listed keys. If no keys are explicitly allow-listed
 // all keys get copied.
 func (m NiceMD) Clone(copiedKeys ...string) NiceMD {
 	newMd := NiceMD(metadata.Pairs())
@@ -61,7 +61,7 @@
 		newMd[k] = make([]string, len(vv))
 		copy(newMd[k], vv)
 	}
-	return NiceMD(newMd)
+	return newMd
 }
 
 // ToOutgoing sets the given NiceMD as a client-side context for dispatching.
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore
new file mode 100644
index 0000000..2233cff
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.gitignore
@@ -0,0 +1,201 @@
+#vendor
+vendor/
+
+# Created by .ignore support plugin (hsz.mobi)
+coverage.txt
+### Go template
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+### Windows template
+# Windows image file caches
+Thumbs.db
+ehthumbs.db
+
+# Folder config file
+Desktop.ini
+
+# Recycle Bin used on file shares
+$RECYCLE.BIN/
+
+# Windows Installer files
+*.cab
+*.msi
+*.msm
+*.msp
+
+# Windows shortcuts
+*.lnk
+### Kate template
+# Swap Files #
+.*.kate-swp
+.swp.*
+### SublimeText template
+# cache files for sublime text
+*.tmlanguage.cache
+*.tmPreferences.cache
+*.stTheme.cache
+
+# workspace files are user-specific
+*.sublime-workspace
+
+# project files should be checked into the repository, unless a significant
+# proportion of contributors will probably not be using SublimeText
+# *.sublime-project
+
+# sftp configuration file
+sftp-config.json
+### Linux template
+*~
+
+# temporary files which can be created if a process still has a handle open of a deleted file
+.fuse_hidden*
+
+# KDE directory preferences
+.directory
+
+# Linux trash folder which might appear on any partition or disk
+.Trash-*
+### JetBrains template
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff:
+.idea
+.idea/tasks.xml
+.idea/dictionaries
+.idea/vcs.xml
+.idea/jsLibraryMappings.xml
+
+# Sensitive or high-churn files:
+.idea/dataSources.ids
+.idea/dataSources.xml
+.idea/dataSources.local.xml
+.idea/sqlDataSources.xml
+.idea/dynamic.xml
+.idea/uiDesigner.xml
+
+# Gradle:
+.idea/gradle.xml
+.idea/libraries
+
+# Mongo Explorer plugin:
+.idea/mongoSettings.xml
+
+## File-based project format:
+*.iws
+
+## Plugin-specific files:
+
+# IntelliJ
+/out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
+### Xcode template
+# Xcode
+#
+# gitignore contributors: remember to update Global/Xcode.gitignore, Objective-C.gitignore & Swift.gitignore
+
+## Build generated
+build/
+DerivedData/
+
+## Various settings
+*.pbxuser
+!default.pbxuser
+*.mode1v3
+!default.mode1v3
+*.mode2v3
+!default.mode2v3
+*.perspectivev3
+!default.perspectivev3
+xcuserdata/
+
+## Other
+*.moved-aside
+*.xccheckout
+*.xcscmblueprint
+### Eclipse template
+
+.metadata
+bin/
+tmp/
+*.tmp
+*.bak
+*.swp
+*~.nib
+local.properties
+.settings/
+.loadpath
+.recommenders
+
+# Eclipse Core
+.project
+
+# External tool builders
+.externalToolBuilders/
+
+# Locally stored "Eclipse launch configurations"
+*.launch
+
+# PyDev specific (Python IDE for Eclipse)
+*.pydevproject
+
+# CDT-specific (C/C++ Development Tooling)
+.cproject
+
+# JDT-specific (Eclipse Java Development Tools)
+.classpath
+
+# Java annotation processor (APT)
+.factorypath
+
+# PDT-specific (PHP Development Tools)
+.buildpath
+
+# sbteclipse plugin
+.target
+
+# Tern plugin
+.tern-project
+
+# TeXlipse plugin
+.texlipse
+
+# STS (Spring Tool Suite)
+.springBeans
+
+# Code Recommenders
+.recommenders/
+
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml
new file mode 100644
index 0000000..2a845b9
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/.travis.yml
@@ -0,0 +1,25 @@
+sudo: false
+language: go
+# * github.com/grpc/grpc-go still supports go1.6
+#   - When we drop support for go1.6 we can remove golang.org/x/net/context
+#     below as it is part of the Go std library since go1.7
+# * github.com/prometheus/client_golang already requires at least go1.7 since
+#   September 2017
+go:
+  - 1.6.x
+  - 1.7.x
+  - 1.8.x
+  - 1.9.x
+  - 1.10.x
+  - master
+
+install:
+  - go get github.com/prometheus/client_golang/prometheus
+  - go get google.golang.org/grpc
+  - go get golang.org/x/net/context
+  - go get github.com/stretchr/testify
+script:
+ - make test 
+
+after_success:
+  - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md
new file mode 100644
index 0000000..19a8059
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md
@@ -0,0 +1,24 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
+and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
+
+## [1.2.0](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases/tag/v1.2.0) - 2018-06-04
+
+### Added
+
+* Provide metrics object as `prometheus.Collector`, for conventional metric registration.
+* Support non-default/global Prometheus registry.
+* Allow configuring counters with `prometheus.CounterOpts`.
+
+### Changed
+
+* Remove usage of deprecated `grpc.Code()`.
+* Remove usage of deprecated `grpc.Errorf` and replace with `status.Errorf`.
+
+---
+
+This changelog was started with version `v1.2.0`, for earlier versions refer to the respective [GitHub releases](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases).
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE
new file mode 100644
index 0000000..b2b0650
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE
@@ -0,0 +1,201 @@
+                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
\ No newline at end of file
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
new file mode 100644
index 0000000..499c583
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md
@@ -0,0 +1,247 @@
+# Go gRPC Interceptors for Prometheus monitoring 
+
+[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus.svg)](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus)
+[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-prometheus)](http://goreportcard.com/report/grpc-ecosystem/go-grpc-prometheus)
+[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-prometheus)
+[![SourceGraph](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/-/badge.svg)](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/?badge)
+[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus)
+[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE)
+
+[Prometheus](https://prometheus.io/) monitoring for your [gRPC Go](https://github.com/grpc/grpc-go) servers and clients.
+
+A sister implementation for [gRPC Java](https://github.com/grpc/grpc-java) (same metrics, same semantics) is in [grpc-ecosystem/java-grpc-prometheus](https://github.com/grpc-ecosystem/java-grpc-prometheus).
+
+## Interceptors
+
+[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for Interceptors, i.e. middleware that is executed
+by a gRPC Server before the request is passed onto the user's application logic. It is a perfect way to implement
+common patterns: auth, logging and... monitoring.
+
+To use Interceptors in chains, please see [`go-grpc-middleware`](https://github.com/mwitkow/go-grpc-middleware).
+
+## Usage
+
+There are two types of interceptors: client-side and server-side. This package provides monitoring Interceptors for both.
+
+### Server-side
+
+```go
+import "github.com/grpc-ecosystem/go-grpc-prometheus"
+...
+    // Initialize your gRPC server's interceptor.
+    myServer := grpc.NewServer(
+        grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
+        grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
+    )
+    // Register your gRPC service implementations.
+    myservice.RegisterMyServiceServer(s.server, &myServiceImpl{})
+    // After all your registrations, make sure all of the Prometheus metrics are initialized.
+    grpc_prometheus.Register(myServer)
+    // Register Prometheus metrics handler.    
+    http.Handle("/metrics", promhttp.Handler())
+...
+```
+
+### Client-side
+
+```go
+import "github.com/grpc-ecosystem/go-grpc-prometheus"
+...
+   clientConn, err = grpc.Dial(
+       address,
+		   grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor),
+		   grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor)
+   )
+   client = pb_testproto.NewTestServiceClient(clientConn)
+   resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"})
+...
+```
+
+# Metrics
+
+## Labels
+
+All server-side metrics start with `grpc_server` as Prometheus subsystem name. All client-side metrics start with `grpc_client`. Both of them have mirror-concepts. Similarly all methods
+contain the same rich labels:
+  
+  * `grpc_service` - the [gRPC service](http://www.grpc.io/docs/#defining-a-service) name, which is the combination of protobuf `package` and
+    the `grpc_service` section name. E.g. for `package = mwitkow.testproto` and 
+     `service TestService` the label will be `grpc_service="mwitkow.testproto.TestService"`
+  * `grpc_method` - the name of the method called on the gRPC service. E.g.  
+    `grpc_method="Ping"`
+  * `grpc_type` - the gRPC [type of request](http://www.grpc.io/docs/guides/concepts.html#rpc-life-cycle). 
+    Differentiating between the two is important especially for latency measurements.
+
+     - `unary` is single request, single response RPC
+     - `client_stream` is a multi-request, single response RPC
+     - `server_stream` is a single request, multi-response RPC
+     - `bidi_stream` is a multi-request, multi-response RPC
+    
+
+Additionally for completed RPCs, the following labels are used:
+
+  * `grpc_code` - the human-readable [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go).
+    The list of all statuses is to long, but here are some common ones:
+      
+      - `OK` - means the RPC was successful
+      - `IllegalArgument` - RPC contained bad values
+      - `Internal` - server-side error not disclosed to the clients
+      
+## Counters
+
+The counters and their up to date documentation is in [server_reporter.go](server_reporter.go) and [client_reporter.go](client_reporter.go) 
+the respective Prometheus handler (usually `/metrics`). 
+
+For the purpose of this documentation we will only discuss `grpc_server` metrics. The `grpc_client` ones contain mirror concepts.
+
+For simplicity, let's assume we're tracking a single server-side RPC call of [`mwitkow.testproto.TestService`](examples/testproto/test.proto),
+calling the method `PingList`. The call succeeds and returns 20 messages in the stream.
+
+First, immediately after the server receives the call it will increment the
+`grpc_server_started_total` and start the handling time clock (if histograms are enabled). 
+
+```jsoniq
+grpc_server_started_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
+```
+
+Then the user logic gets invoked. It receives one message from the client containing the request 
+(it's a `server_stream`):
+
+```jsoniq
+grpc_server_msg_received_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
+```
+
+The user logic may return an error, or send multiple messages back to the client. In this case, on 
+each of the 20 messages sent back, a counter will be incremented:
+
+```jsoniq
+grpc_server_msg_sent_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 20
+```
+
+After the call completes, its status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go)) 
+and the relevant call labels increment the `grpc_server_handled_total` counter.
+
+```jsoniq
+grpc_server_handled_total{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
+```
+
+## Histograms
+
+[Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram) are a great way
+to measure latency distributions of your RPCs. However, since it is bad practice to have metrics
+of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels)
+the latency monitoring metrics are disabled by default. To enable them please call the following
+in your server initialization code:
+
+```jsoniq
+grpc_prometheus.EnableHandlingTimeHistogram()
+```
+
+After the call completes, its handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram)
+variable `grpc_server_handling_seconds`. The histogram variable contains three sub-metrics:
+
+ * `grpc_server_handling_seconds_count` - the count of all completed RPCs by status and method 
+ * `grpc_server_handling_seconds_sum` - cumulative time of RPCs by status and method, useful for 
+   calculating average handling times
+ * `grpc_server_handling_seconds_bucket` - contains the counts of RPCs by status and method in respective
+   handling-time buckets. These buckets can be used by Prometheus to estimate SLAs (see [here](https://prometheus.io/docs/practices/histograms/))
+
+The counter values will look as follows:
+
+```jsoniq
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.005"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.01"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.025"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.05"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.1"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.25"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.5"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="1"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="2.5"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="5"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="10"} 1
+grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="+Inf"} 1
+grpc_server_handling_seconds_sum{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 0.0003866430000000001
+grpc_server_handling_seconds_count{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1
+```
+
+
+## Useful query examples
+
+Prometheus philosophy is to provide raw metrics to the monitoring system, and
+let the aggregations be handled there. The verbosity of above metrics make it possible to have that
+flexibility. Here's a couple of useful monitoring queries:
+
+
+### request inbound rate
+```jsoniq
+sum(rate(grpc_server_started_total{job="foo"}[1m])) by (grpc_service)
+```
+For `job="foo"` (common label to differentiate between Prometheus monitoring targets), calculate the
+rate of requests per second (1 minute window) for each gRPC `grpc_service` that the job has. Please note
+how the `grpc_method` is being omitted here: all methods of a given gRPC service will be summed together.
+
+### unary request error rate
+```jsoniq
+sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service)
+```
+For `job="foo"`, calculate the per-`grpc_service` rate of `unary` (1:1) RPCs that failed, i.e. the 
+ones that didn't finish with `OK` code.
+
+### unary request error percentage
+```jsoniq
+sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service)
+ / 
+sum(rate(grpc_server_started_total{job="foo",grpc_type="unary"}[1m])) by (grpc_service)
+ * 100.0
+```
+For `job="foo"`, calculate the percentage of failed requests by service. It's easy to notice that
+this is a combination of the two above examples. This is an example of a query you would like to
+[alert on](https://prometheus.io/docs/alerting/rules/) in your system for SLA violations, e.g.
+"no more than 1% requests should fail".
+
+### average response stream size
+```jsoniq
+sum(rate(grpc_server_msg_sent_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service)
+ /
+sum(rate(grpc_server_started_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service)
+```
+For `job="foo"` what is the `grpc_service`-wide `10m` average of messages returned for all `
+server_stream` RPCs. This allows you to track the stream sizes returned by your system, e.g. allows 
+you to track when clients started to send "wide" queries that ret
+Note the divisor is the number of started RPCs, in order to account for in-flight requests.
+
+### 99%-tile latency of unary requests
+```jsoniq
+histogram_quantile(0.99, 
+  sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary"}[5m])) by (grpc_service,le)
+)
+```
+For `job="foo"`, returns an 99%-tile [quantile estimation](https://prometheus.io/docs/practices/histograms/#quantiles)
+of the handling time of RPCs per service. Please note the `5m` rate, this means that the quantile
+estimation will take samples in a rolling `5m` window. When combined with other quantiles
+(e.g. 50%, 90%), this query gives you tremendous insight into the responsiveness of your system 
+(e.g. impact of caching).
+
+### percentage of slow unary queries (>250ms)
+```jsoniq
+100.0 - (
+sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary",le="0.25"}[5m])) by (grpc_service)
+ / 
+sum(rate(grpc_server_handling_seconds_count{job="foo",grpc_type="unary"}[5m])) by (grpc_service)
+) * 100.0
+```
+For `job="foo"` calculate the by-`grpc_service` fraction of slow requests that took longer than `0.25` 
+seconds. This query is relatively complex, since the Prometheus aggregations use `le` (less or equal)
+buckets, meaning that counting "fast" requests fractions is easier. However, simple maths helps.
+This is an example of a query you would like to alert on in your system for SLA violations, 
+e.g. "less than 1% of requests are slower than 250ms".
+
+
+## Status
+
+This code has been used since August 2015 as the basis for monitoring of *production* gRPC micro services  at [Improbable](https://improbable.io).
+
+## License
+
+`go-grpc-prometheus` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details.
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go
new file mode 100644
index 0000000..751a4c7
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go
@@ -0,0 +1,39 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+// gRPC Prometheus monitoring interceptors for client-side gRPC.
+
+package grpc_prometheus
+
+import (
+	prom "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+	// DefaultClientMetrics is the default instance of ClientMetrics. It is
+	// intended to be used in conjunction the default Prometheus metrics
+	// registry.
+	DefaultClientMetrics = NewClientMetrics()
+
+	// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
+	UnaryClientInterceptor = DefaultClientMetrics.UnaryClientInterceptor()
+
+	// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
+	StreamClientInterceptor = DefaultClientMetrics.StreamClientInterceptor()
+)
+
+func init() {
+	prom.MustRegister(DefaultClientMetrics.clientStartedCounter)
+	prom.MustRegister(DefaultClientMetrics.clientHandledCounter)
+	prom.MustRegister(DefaultClientMetrics.clientStreamMsgReceived)
+	prom.MustRegister(DefaultClientMetrics.clientStreamMsgSent)
+}
+
+// EnableClientHandlingTimeHistogram turns on recording of handling time of
+// RPCs. Histogram metrics can be very expensive for Prometheus to retain and
+// query. This function acts on the DefaultClientMetrics variable and the
+// default Prometheus metrics registry.
+func EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
+	DefaultClientMetrics.EnableClientHandlingTimeHistogram(opts...)
+	prom.Register(DefaultClientMetrics.clientHandledHistogram)
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go
new file mode 100644
index 0000000..9b476f9
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go
@@ -0,0 +1,170 @@
+package grpc_prometheus
+
+import (
+	"io"
+
+	prom "github.com/prometheus/client_golang/prometheus"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+// ClientMetrics represents a collection of metrics to be registered on a
+// Prometheus metrics registry for a gRPC client.
+type ClientMetrics struct {
+	clientStartedCounter          *prom.CounterVec
+	clientHandledCounter          *prom.CounterVec
+	clientStreamMsgReceived       *prom.CounterVec
+	clientStreamMsgSent           *prom.CounterVec
+	clientHandledHistogramEnabled bool
+	clientHandledHistogramOpts    prom.HistogramOpts
+	clientHandledHistogram        *prom.HistogramVec
+}
+
+// NewClientMetrics returns a ClientMetrics object. Use a new instance of
+// ClientMetrics when not using the default Prometheus metrics registry, for
+// example when wanting to control which metrics are added to a registry as
+// opposed to automatically adding metrics via init functions.
+func NewClientMetrics(counterOpts ...CounterOption) *ClientMetrics {
+	opts := counterOptions(counterOpts)
+	return &ClientMetrics{
+		clientStartedCounter: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_client_started_total",
+				Help: "Total number of RPCs started on the client.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
+
+		clientHandledCounter: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_client_handled_total",
+				Help: "Total number of RPCs completed by the client, regardless of success or failure.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
+
+		clientStreamMsgReceived: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_client_msg_received_total",
+				Help: "Total number of RPC stream messages received by the client.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
+
+		clientStreamMsgSent: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_client_msg_sent_total",
+				Help: "Total number of gRPC stream messages sent by the client.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
+
+		clientHandledHistogramEnabled: false,
+		clientHandledHistogramOpts: prom.HistogramOpts{
+			Name:    "grpc_client_handling_seconds",
+			Help:    "Histogram of response latency (seconds) of the gRPC until it is finished by the application.",
+			Buckets: prom.DefBuckets,
+		},
+		clientHandledHistogram: nil,
+	}
+}
+
+// Describe sends the super-set of all possible descriptors of metrics
+// collected by this Collector to the provided channel and returns once
+// the last descriptor has been sent.
+func (m *ClientMetrics) Describe(ch chan<- *prom.Desc) {
+	m.clientStartedCounter.Describe(ch)
+	m.clientHandledCounter.Describe(ch)
+	m.clientStreamMsgReceived.Describe(ch)
+	m.clientStreamMsgSent.Describe(ch)
+	if m.clientHandledHistogramEnabled {
+		m.clientHandledHistogram.Describe(ch)
+	}
+}
+
+// Collect is called by the Prometheus registry when collecting
+// metrics. The implementation sends each collected metric via the
+// provided channel and returns once the last metric has been sent.
+func (m *ClientMetrics) Collect(ch chan<- prom.Metric) {
+	m.clientStartedCounter.Collect(ch)
+	m.clientHandledCounter.Collect(ch)
+	m.clientStreamMsgReceived.Collect(ch)
+	m.clientStreamMsgSent.Collect(ch)
+	if m.clientHandledHistogramEnabled {
+		m.clientHandledHistogram.Collect(ch)
+	}
+}
+
+// EnableClientHandlingTimeHistogram turns on recording of handling time of RPCs.
+// Histogram metrics can be very expensive for Prometheus to retain and query.
+func (m *ClientMetrics) EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
+	for _, o := range opts {
+		o(&m.clientHandledHistogramOpts)
+	}
+	if !m.clientHandledHistogramEnabled {
+		m.clientHandledHistogram = prom.NewHistogramVec(
+			m.clientHandledHistogramOpts,
+			[]string{"grpc_type", "grpc_service", "grpc_method"},
+		)
+	}
+	m.clientHandledHistogramEnabled = true
+}
+
+// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
+func (m *ClientMetrics) UnaryClientInterceptor() func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+	return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+		monitor := newClientReporter(m, Unary, method)
+		monitor.SentMessage()
+		err := invoker(ctx, method, req, reply, cc, opts...)
+		if err != nil {
+			monitor.ReceivedMessage()
+		}
+		st, _ := status.FromError(err)
+		monitor.Handled(st.Code())
+		return err
+	}
+}
+
+// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
+func (m *ClientMetrics) StreamClientInterceptor() func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+	return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+		monitor := newClientReporter(m, clientStreamType(desc), method)
+		clientStream, err := streamer(ctx, desc, cc, method, opts...)
+		if err != nil {
+			st, _ := status.FromError(err)
+			monitor.Handled(st.Code())
+			return nil, err
+		}
+		return &monitoredClientStream{clientStream, monitor}, nil
+	}
+}
+
+func clientStreamType(desc *grpc.StreamDesc) grpcType {
+	if desc.ClientStreams && !desc.ServerStreams {
+		return ClientStream
+	} else if !desc.ClientStreams && desc.ServerStreams {
+		return ServerStream
+	}
+	return BidiStream
+}
+
+// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to increment counters.
+type monitoredClientStream struct {
+	grpc.ClientStream
+	monitor *clientReporter
+}
+
+func (s *monitoredClientStream) SendMsg(m interface{}) error {
+	err := s.ClientStream.SendMsg(m)
+	if err == nil {
+		s.monitor.SentMessage()
+	}
+	return err
+}
+
+func (s *monitoredClientStream) RecvMsg(m interface{}) error {
+	err := s.ClientStream.RecvMsg(m)
+	if err == nil {
+		s.monitor.ReceivedMessage()
+	} else if err == io.EOF {
+		s.monitor.Handled(codes.OK)
+	} else {
+		st, _ := status.FromError(err)
+		s.monitor.Handled(st.Code())
+	}
+	return err
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go
new file mode 100644
index 0000000..cbf1532
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go
@@ -0,0 +1,46 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package grpc_prometheus
+
+import (
+	"time"
+
+	"google.golang.org/grpc/codes"
+)
+
+type clientReporter struct {
+	metrics     *ClientMetrics
+	rpcType     grpcType
+	serviceName string
+	methodName  string
+	startTime   time.Time
+}
+
+func newClientReporter(m *ClientMetrics, rpcType grpcType, fullMethod string) *clientReporter {
+	r := &clientReporter{
+		metrics: m,
+		rpcType: rpcType,
+	}
+	if r.metrics.clientHandledHistogramEnabled {
+		r.startTime = time.Now()
+	}
+	r.serviceName, r.methodName = splitMethodName(fullMethod)
+	r.metrics.clientStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+	return r
+}
+
+func (r *clientReporter) ReceivedMessage() {
+	r.metrics.clientStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+}
+
+func (r *clientReporter) SentMessage() {
+	r.metrics.clientStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+}
+
+func (r *clientReporter) Handled(code codes.Code) {
+	r.metrics.clientHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
+	if r.metrics.clientHandledHistogramEnabled {
+		r.metrics.clientHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile
new file mode 100644
index 0000000..74c0842
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile
@@ -0,0 +1,16 @@
+SHELL="/bin/bash"
+
+GOFILES_NOVENDOR = $(shell go list ./... | grep -v /vendor/)
+
+all: vet fmt test
+
+fmt:
+	go fmt $(GOFILES_NOVENDOR)
+
+vet:
+	go vet $(GOFILES_NOVENDOR)
+
+test: vet
+	./scripts/test_all.sh
+
+.PHONY: all vet test
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go
new file mode 100644
index 0000000..9d51aec
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go
@@ -0,0 +1,41 @@
+package grpc_prometheus
+
+import (
+	prom "github.com/prometheus/client_golang/prometheus"
+)
+
+// A CounterOption lets you add options to Counter metrics using With* funcs.
+type CounterOption func(*prom.CounterOpts)
+
+type counterOptions []CounterOption
+
+func (co counterOptions) apply(o prom.CounterOpts) prom.CounterOpts {
+	for _, f := range co {
+		f(&o)
+	}
+	return o
+}
+
+// WithConstLabels allows you to add ConstLabels to Counter metrics.
+func WithConstLabels(labels prom.Labels) CounterOption {
+	return func(o *prom.CounterOpts) {
+		o.ConstLabels = labels
+	}
+}
+
+// A HistogramOption lets you add options to Histogram metrics using With*
+// funcs.
+type HistogramOption func(*prom.HistogramOpts)
+
+// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on.
+func WithHistogramBuckets(buckets []float64) HistogramOption {
+	return func(o *prom.HistogramOpts) { o.Buckets = buckets }
+}
+
+// WithHistogramConstLabels allows you to add custom ConstLabels to
+// histograms metrics.
+func WithHistogramConstLabels(labels prom.Labels) HistogramOption {
+	return func(o *prom.HistogramOpts) {
+		o.ConstLabels = labels
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go
new file mode 100644
index 0000000..322f990
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go
@@ -0,0 +1,48 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+// gRPC Prometheus monitoring interceptors for server-side gRPC.
+
+package grpc_prometheus
+
+import (
+	prom "github.com/prometheus/client_golang/prometheus"
+	"google.golang.org/grpc"
+)
+
+var (
+	// DefaultServerMetrics is the default instance of ServerMetrics. It is
+	// intended to be used in conjunction the default Prometheus metrics
+	// registry.
+	DefaultServerMetrics = NewServerMetrics()
+
+	// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
+	UnaryServerInterceptor = DefaultServerMetrics.UnaryServerInterceptor()
+
+	// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
+	StreamServerInterceptor = DefaultServerMetrics.StreamServerInterceptor()
+)
+
+func init() {
+	prom.MustRegister(DefaultServerMetrics.serverStartedCounter)
+	prom.MustRegister(DefaultServerMetrics.serverHandledCounter)
+	prom.MustRegister(DefaultServerMetrics.serverStreamMsgReceived)
+	prom.MustRegister(DefaultServerMetrics.serverStreamMsgSent)
+}
+
+// Register takes a gRPC server and pre-initializes all counters to 0. This
+// allows for easier monitoring in Prometheus (no missing metrics), and should
+// be called *after* all services have been registered with the server. This
+// function acts on the DefaultServerMetrics variable.
+func Register(server *grpc.Server) {
+	DefaultServerMetrics.InitializeMetrics(server)
+}
+
+// EnableHandlingTimeHistogram turns on recording of handling time
+// of RPCs. Histogram metrics can be very expensive for Prometheus
+// to retain and query. This function acts on the DefaultServerMetrics
+// variable and the default Prometheus metrics registry.
+func EnableHandlingTimeHistogram(opts ...HistogramOption) {
+	DefaultServerMetrics.EnableHandlingTimeHistogram(opts...)
+	prom.Register(DefaultServerMetrics.serverHandledHistogram)
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go
new file mode 100644
index 0000000..5b1467e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go
@@ -0,0 +1,185 @@
+package grpc_prometheus
+
+import (
+	prom "github.com/prometheus/client_golang/prometheus"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/status"
+)
+
+// ServerMetrics represents a collection of metrics to be registered on a
+// Prometheus metrics registry for a gRPC server.
+type ServerMetrics struct {
+	serverStartedCounter          *prom.CounterVec
+	serverHandledCounter          *prom.CounterVec
+	serverStreamMsgReceived       *prom.CounterVec
+	serverStreamMsgSent           *prom.CounterVec
+	serverHandledHistogramEnabled bool
+	serverHandledHistogramOpts    prom.HistogramOpts
+	serverHandledHistogram        *prom.HistogramVec
+}
+
+// NewServerMetrics returns a ServerMetrics object. Use a new instance of
+// ServerMetrics when not using the default Prometheus metrics registry, for
+// example when wanting to control which metrics are added to a registry as
+// opposed to automatically adding metrics via init functions.
+func NewServerMetrics(counterOpts ...CounterOption) *ServerMetrics {
+	opts := counterOptions(counterOpts)
+	return &ServerMetrics{
+		serverStartedCounter: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_server_started_total",
+				Help: "Total number of RPCs started on the server.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
+		serverHandledCounter: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_server_handled_total",
+				Help: "Total number of RPCs completed on the server, regardless of success or failure.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
+		serverStreamMsgReceived: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_server_msg_received_total",
+				Help: "Total number of RPC stream messages received on the server.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
+		serverStreamMsgSent: prom.NewCounterVec(
+			opts.apply(prom.CounterOpts{
+				Name: "grpc_server_msg_sent_total",
+				Help: "Total number of gRPC stream messages sent by the server.",
+			}), []string{"grpc_type", "grpc_service", "grpc_method"}),
+		serverHandledHistogramEnabled: false,
+		serverHandledHistogramOpts: prom.HistogramOpts{
+			Name:    "grpc_server_handling_seconds",
+			Help:    "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.",
+			Buckets: prom.DefBuckets,
+		},
+		serverHandledHistogram: nil,
+	}
+}
+
+// EnableHandlingTimeHistogram enables histograms being registered when
+// registering the ServerMetrics on a Prometheus registry. Histograms can be
+// expensive on Prometheus servers. It takes options to configure histogram
+// options such as the defined buckets.
+func (m *ServerMetrics) EnableHandlingTimeHistogram(opts ...HistogramOption) {
+	for _, o := range opts {
+		o(&m.serverHandledHistogramOpts)
+	}
+	if !m.serverHandledHistogramEnabled {
+		m.serverHandledHistogram = prom.NewHistogramVec(
+			m.serverHandledHistogramOpts,
+			[]string{"grpc_type", "grpc_service", "grpc_method"},
+		)
+	}
+	m.serverHandledHistogramEnabled = true
+}
+
+// Describe sends the super-set of all possible descriptors of metrics
+// collected by this Collector to the provided channel and returns once
+// the last descriptor has been sent.
+func (m *ServerMetrics) Describe(ch chan<- *prom.Desc) {
+	m.serverStartedCounter.Describe(ch)
+	m.serverHandledCounter.Describe(ch)
+	m.serverStreamMsgReceived.Describe(ch)
+	m.serverStreamMsgSent.Describe(ch)
+	if m.serverHandledHistogramEnabled {
+		m.serverHandledHistogram.Describe(ch)
+	}
+}
+
+// Collect is called by the Prometheus registry when collecting
+// metrics. The implementation sends each collected metric via the
+// provided channel and returns once the last metric has been sent.
+func (m *ServerMetrics) Collect(ch chan<- prom.Metric) {
+	m.serverStartedCounter.Collect(ch)
+	m.serverHandledCounter.Collect(ch)
+	m.serverStreamMsgReceived.Collect(ch)
+	m.serverStreamMsgSent.Collect(ch)
+	if m.serverHandledHistogramEnabled {
+		m.serverHandledHistogram.Collect(ch)
+	}
+}
+
+// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
+func (m *ServerMetrics) UnaryServerInterceptor() func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+	return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+		monitor := newServerReporter(m, Unary, info.FullMethod)
+		monitor.ReceivedMessage()
+		resp, err := handler(ctx, req)
+		st, _ := status.FromError(err)
+		monitor.Handled(st.Code())
+		if err == nil {
+			monitor.SentMessage()
+		}
+		return resp, err
+	}
+}
+
+// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
+func (m *ServerMetrics) StreamServerInterceptor() func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+	return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+		monitor := newServerReporter(m, streamRPCType(info), info.FullMethod)
+		err := handler(srv, &monitoredServerStream{ss, monitor})
+		st, _ := status.FromError(err)
+		monitor.Handled(st.Code())
+		return err
+	}
+}
+
+// InitializeMetrics initializes all metrics, with their appropriate null
+// value, for all gRPC methods registered on a gRPC server. This is useful, to
+// ensure that all metrics exist when collecting and querying.
+func (m *ServerMetrics) InitializeMetrics(server *grpc.Server) {
+	serviceInfo := server.GetServiceInfo()
+	for serviceName, info := range serviceInfo {
+		for _, mInfo := range info.Methods {
+			preRegisterMethod(m, serviceName, &mInfo)
+		}
+	}
+}
+
+func streamRPCType(info *grpc.StreamServerInfo) grpcType {
+	if info.IsClientStream && !info.IsServerStream {
+		return ClientStream
+	} else if !info.IsClientStream && info.IsServerStream {
+		return ServerStream
+	}
+	return BidiStream
+}
+
+// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to increment counters.
+type monitoredServerStream struct {
+	grpc.ServerStream
+	monitor *serverReporter
+}
+
+func (s *monitoredServerStream) SendMsg(m interface{}) error {
+	err := s.ServerStream.SendMsg(m)
+	if err == nil {
+		s.monitor.SentMessage()
+	}
+	return err
+}
+
+func (s *monitoredServerStream) RecvMsg(m interface{}) error {
+	err := s.ServerStream.RecvMsg(m)
+	if err == nil {
+		s.monitor.ReceivedMessage()
+	}
+	return err
+}
+
+// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated.
+func preRegisterMethod(metrics *ServerMetrics, serviceName string, mInfo *grpc.MethodInfo) {
+	methodName := mInfo.Name
+	methodType := string(typeFromMethodInfo(mInfo))
+	// These are just references (no increments), as just referencing will create the labels but not set values.
+	metrics.serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName)
+	metrics.serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName)
+	metrics.serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName)
+	if metrics.serverHandledHistogramEnabled {
+		metrics.serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName)
+	}
+	for _, code := range allCodes {
+		metrics.serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String())
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go
new file mode 100644
index 0000000..aa9db54
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go
@@ -0,0 +1,46 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package grpc_prometheus
+
+import (
+	"time"
+
+	"google.golang.org/grpc/codes"
+)
+
+type serverReporter struct {
+	metrics     *ServerMetrics
+	rpcType     grpcType
+	serviceName string
+	methodName  string
+	startTime   time.Time
+}
+
+func newServerReporter(m *ServerMetrics, rpcType grpcType, fullMethod string) *serverReporter {
+	r := &serverReporter{
+		metrics: m,
+		rpcType: rpcType,
+	}
+	if r.metrics.serverHandledHistogramEnabled {
+		r.startTime = time.Now()
+	}
+	r.serviceName, r.methodName = splitMethodName(fullMethod)
+	r.metrics.serverStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+	return r
+}
+
+func (r *serverReporter) ReceivedMessage() {
+	r.metrics.serverStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+}
+
+func (r *serverReporter) SentMessage() {
+	r.metrics.serverStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
+}
+
+func (r *serverReporter) Handled(code codes.Code) {
+	r.metrics.serverHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
+	if r.metrics.serverHandledHistogramEnabled {
+		r.metrics.serverHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go
new file mode 100644
index 0000000..7987de3
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go
@@ -0,0 +1,50 @@
+// Copyright 2016 Michal Witkowski. All Rights Reserved.
+// See LICENSE for licensing terms.
+
+package grpc_prometheus
+
+import (
+	"strings"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+)
+
+type grpcType string
+
+const (
+	Unary        grpcType = "unary"
+	ClientStream grpcType = "client_stream"
+	ServerStream grpcType = "server_stream"
+	BidiStream   grpcType = "bidi_stream"
+)
+
+var (
+	allCodes = []codes.Code{
+		codes.OK, codes.Canceled, codes.Unknown, codes.InvalidArgument, codes.DeadlineExceeded, codes.NotFound,
+		codes.AlreadyExists, codes.PermissionDenied, codes.Unauthenticated, codes.ResourceExhausted,
+		codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.Unimplemented, codes.Internal,
+		codes.Unavailable, codes.DataLoss,
+	}
+)
+
+func splitMethodName(fullMethodName string) (string, string) {
+	fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash
+	if i := strings.Index(fullMethodName, "/"); i >= 0 {
+		return fullMethodName[:i], fullMethodName[i+1:]
+	}
+	return "unknown", "unknown"
+}
+
+func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType {
+	if !mInfo.IsClientStream && !mInfo.IsServerStream {
+		return Unary
+	}
+	if mInfo.IsClientStream && !mInfo.IsServerStream {
+		return ClientStream
+	}
+	if !mInfo.IsClientStream && mInfo.IsServerStream {
+		return ServerStream
+	}
+	return BidiStream
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE
new file mode 100644
index 0000000..3645162
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2015, Gengo, Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice,
+      this list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    * Neither the name of Gengo, Inc. nor the names of its
+      contributors may be used to endorse or promote products derived from this
+      software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel
new file mode 100644
index 0000000..d71991e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel
@@ -0,0 +1,44 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
+load("@rules_proto//proto:defs.bzl", "proto_library")
+
+package(default_visibility = ["//visibility:public"])
+
+filegroup(
+    name = "options_proto_files",
+    srcs = [
+        "annotations.proto",
+        "openapiv2.proto",
+    ],
+)
+
+go_library(
+    name = "options",
+    embed = [":options_go_proto"],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options",
+)
+
+proto_library(
+    name = "options_proto",
+    srcs = [
+        "annotations.proto",
+        "openapiv2.proto",
+    ],
+    deps = [
+        "@com_google_protobuf//:descriptor_proto",
+        "@com_google_protobuf//:struct_proto",
+    ],
+)
+
+go_proto_library(
+    name = "options_go_proto",
+    compilers = ["//:go_apiv2"],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options",
+    proto = ":options_proto",
+)
+
+alias(
+    name = "go_default_library",
+    actual = ":options",
+    visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go
new file mode 100644
index 0000000..738c975
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go
@@ -0,0 +1,269 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.36.0
+// 	protoc        (unknown)
+// source: protoc-gen-openapiv2/options/annotations.proto
+
+//go:build !protoopaque
+
+package options
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+	reflect "reflect"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.ExtensionInfo{
+	{
+		ExtendedType:  (*descriptorpb.FileOptions)(nil),
+		ExtensionType: (*Swagger)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger",
+		Tag:           "bytes,1042,opt,name=openapiv2_swagger",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.MethodOptions)(nil),
+		ExtensionType: (*Operation)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation",
+		Tag:           "bytes,1042,opt,name=openapiv2_operation",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.MessageOptions)(nil),
+		ExtensionType: (*Schema)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema",
+		Tag:           "bytes,1042,opt,name=openapiv2_schema",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.EnumOptions)(nil),
+		ExtensionType: (*EnumSchema)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum",
+		Tag:           "bytes,1042,opt,name=openapiv2_enum",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.ServiceOptions)(nil),
+		ExtensionType: (*Tag)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag",
+		Tag:           "bytes,1042,opt,name=openapiv2_tag",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.FieldOptions)(nil),
+		ExtensionType: (*JSONSchema)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field",
+		Tag:           "bytes,1042,opt,name=openapiv2_field",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+}
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.Swagger openapiv2_swagger = 1042;
+	E_Openapiv2Swagger = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.MethodOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.Operation openapiv2_operation = 1042;
+	E_Openapiv2Operation = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.Schema openapiv2_schema = 1042;
+	E_Openapiv2Schema = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[2]
+)
+
+// Extension fields to descriptorpb.EnumOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.EnumSchema openapiv2_enum = 1042;
+	E_Openapiv2Enum = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[3]
+)
+
+// Extension fields to descriptorpb.ServiceOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.Tag openapiv2_tag = 1042;
+	E_Openapiv2Tag = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[4]
+)
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.JSONSchema openapiv2_field = 1042;
+	E_Openapiv2Field = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[5]
+)
+
+var File_protoc_gen_openapiv2_options_annotations_proto protoreflect.FileDescriptor
+
+var file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = []byte{
+	0x0a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61,
+	0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x12, 0x29, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73,
+	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x7e, 0x0a, 0x11, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72,
+	0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92,
+	0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+	0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+	0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x52, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+	0x70, 0x69, 0x76, 0x32, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x3a, 0x86, 0x01, 0x0a, 0x13,
+	0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
+	0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70,
+	0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x52, 0x12, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x4f, 0x70, 0x65, 0x72, 0x61,
+	0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x7e, 0x0a, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
+	0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b,
+	0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+	0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68,
+	0x65, 0x6d, 0x61, 0x52, 0x0f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x53, 0x63,
+	0x68, 0x65, 0x6d, 0x61, 0x3a, 0x7b, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72,
+	0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65,
+	0x6d, 0x61, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x45, 0x6e, 0x75,
+	0x6d, 0x3a, 0x75, 0x0a, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x74,
+	0x61, 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70,
+	0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x0c, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x54, 0x61, 0x67, 0x3a, 0x7e, 0x0a, 0x0f, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
+	0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53,
+	0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x76, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68,
+	0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73,
+	0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e,
+	0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_protoc_gen_openapiv2_options_annotations_proto_goTypes = []any{
+	(*descriptorpb.FileOptions)(nil),    // 0: google.protobuf.FileOptions
+	(*descriptorpb.MethodOptions)(nil),  // 1: google.protobuf.MethodOptions
+	(*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions
+	(*descriptorpb.EnumOptions)(nil),    // 3: google.protobuf.EnumOptions
+	(*descriptorpb.ServiceOptions)(nil), // 4: google.protobuf.ServiceOptions
+	(*descriptorpb.FieldOptions)(nil),   // 5: google.protobuf.FieldOptions
+	(*Swagger)(nil),                     // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger
+	(*Operation)(nil),                   // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation
+	(*Schema)(nil),                      // 8: grpc.gateway.protoc_gen_openapiv2.options.Schema
+	(*EnumSchema)(nil),                  // 9: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+	(*Tag)(nil),                         // 10: grpc.gateway.protoc_gen_openapiv2.options.Tag
+	(*JSONSchema)(nil),                  // 11: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+}
+var file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = []int32{
+	0,  // 0: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:extendee -> google.protobuf.FileOptions
+	1,  // 1: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:extendee -> google.protobuf.MethodOptions
+	2,  // 2: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:extendee -> google.protobuf.MessageOptions
+	3,  // 3: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:extendee -> google.protobuf.EnumOptions
+	4,  // 4: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:extendee -> google.protobuf.ServiceOptions
+	5,  // 5: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:extendee -> google.protobuf.FieldOptions
+	6,  // 6: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger
+	7,  // 7: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation
+	8,  // 8: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema
+	9,  // 9: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+	10, // 10: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag
+	11, // 11: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+	12, // [12:12] is the sub-list for method output_type
+	12, // [12:12] is the sub-list for method input_type
+	6,  // [6:12] is the sub-list for extension type_name
+	0,  // [0:6] is the sub-list for extension extendee
+	0,  // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_protoc_gen_openapiv2_options_annotations_proto_init() }
+func file_protoc_gen_openapiv2_options_annotations_proto_init() {
+	if File_protoc_gen_openapiv2_options_annotations_proto != nil {
+		return
+	}
+	file_protoc_gen_openapiv2_options_openapiv2_proto_init()
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_protoc_gen_openapiv2_options_annotations_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   0,
+			NumExtensions: 6,
+			NumServices:   0,
+		},
+		GoTypes:           file_protoc_gen_openapiv2_options_annotations_proto_goTypes,
+		DependencyIndexes: file_protoc_gen_openapiv2_options_annotations_proto_depIdxs,
+		ExtensionInfos:    file_protoc_gen_openapiv2_options_annotations_proto_extTypes,
+	}.Build()
+	File_protoc_gen_openapiv2_options_annotations_proto = out.File
+	file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = nil
+	file_protoc_gen_openapiv2_options_annotations_proto_goTypes = nil
+	file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto
new file mode 100644
index 0000000..aecc5e7
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto
@@ -0,0 +1,51 @@
+syntax = "proto3";
+
+package grpc.gateway.protoc_gen_openapiv2.options;
+
+import "google/protobuf/descriptor.proto";
+import "protoc-gen-openapiv2/options/openapiv2.proto";
+
+option go_package = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options";
+
+extend google.protobuf.FileOptions {
+  // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+  //
+  // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+  // different descriptor messages.
+  Swagger openapiv2_swagger = 1042;
+}
+extend google.protobuf.MethodOptions {
+  // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+  //
+  // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+  // different descriptor messages.
+  Operation openapiv2_operation = 1042;
+}
+extend google.protobuf.MessageOptions {
+  // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+  //
+  // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+  // different descriptor messages.
+  Schema openapiv2_schema = 1042;
+}
+extend google.protobuf.EnumOptions {
+  // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+  //
+  // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+  // different descriptor messages.
+  EnumSchema openapiv2_enum = 1042;
+}
+extend google.protobuf.ServiceOptions {
+  // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+  //
+  // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+  // different descriptor messages.
+  Tag openapiv2_tag = 1042;
+}
+extend google.protobuf.FieldOptions {
+  // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+  //
+  // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+  // different descriptor messages.
+  JSONSchema openapiv2_field = 1042;
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go
new file mode 100644
index 0000000..b570167
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go
@@ -0,0 +1,269 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.36.0
+// 	protoc        (unknown)
+// source: protoc-gen-openapiv2/options/annotations.proto
+
+//go:build protoopaque
+
+package options
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+	reflect "reflect"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.ExtensionInfo{
+	{
+		ExtendedType:  (*descriptorpb.FileOptions)(nil),
+		ExtensionType: (*Swagger)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger",
+		Tag:           "bytes,1042,opt,name=openapiv2_swagger",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.MethodOptions)(nil),
+		ExtensionType: (*Operation)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation",
+		Tag:           "bytes,1042,opt,name=openapiv2_operation",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.MessageOptions)(nil),
+		ExtensionType: (*Schema)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema",
+		Tag:           "bytes,1042,opt,name=openapiv2_schema",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.EnumOptions)(nil),
+		ExtensionType: (*EnumSchema)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum",
+		Tag:           "bytes,1042,opt,name=openapiv2_enum",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.ServiceOptions)(nil),
+		ExtensionType: (*Tag)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag",
+		Tag:           "bytes,1042,opt,name=openapiv2_tag",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+	{
+		ExtendedType:  (*descriptorpb.FieldOptions)(nil),
+		ExtensionType: (*JSONSchema)(nil),
+		Field:         1042,
+		Name:          "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field",
+		Tag:           "bytes,1042,opt,name=openapiv2_field",
+		Filename:      "protoc-gen-openapiv2/options/annotations.proto",
+	},
+}
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.Swagger openapiv2_swagger = 1042;
+	E_Openapiv2Swagger = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.MethodOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.Operation openapiv2_operation = 1042;
+	E_Openapiv2Operation = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.Schema openapiv2_schema = 1042;
+	E_Openapiv2Schema = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[2]
+)
+
+// Extension fields to descriptorpb.EnumOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.EnumSchema openapiv2_enum = 1042;
+	E_Openapiv2Enum = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[3]
+)
+
+// Extension fields to descriptorpb.ServiceOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.Tag openapiv2_tag = 1042;
+	E_Openapiv2Tag = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[4]
+)
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+	// ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+	//
+	// All IDs are the same, as assigned. It is okay that they are the same, as they extend
+	// different descriptor messages.
+	//
+	// optional grpc.gateway.protoc_gen_openapiv2.options.JSONSchema openapiv2_field = 1042;
+	E_Openapiv2Field = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[5]
+)
+
+var File_protoc_gen_openapiv2_options_annotations_proto protoreflect.FileDescriptor
+
+var file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = []byte{
+	0x0a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61,
+	0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x12, 0x29, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73,
+	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x7e, 0x0a, 0x11, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72,
+	0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92,
+	0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+	0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+	0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x52, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+	0x70, 0x69, 0x76, 0x32, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x3a, 0x86, 0x01, 0x0a, 0x13,
+	0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
+	0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70,
+	0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x52, 0x12, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x4f, 0x70, 0x65, 0x72, 0x61,
+	0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x7e, 0x0a, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
+	0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b,
+	0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+	0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68,
+	0x65, 0x6d, 0x61, 0x52, 0x0f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x53, 0x63,
+	0x68, 0x65, 0x6d, 0x61, 0x3a, 0x7b, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72,
+	0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65,
+	0x6d, 0x61, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x45, 0x6e, 0x75,
+	0x6d, 0x3a, 0x75, 0x0a, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x74,
+	0x61, 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70,
+	0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x0c, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x54, 0x61, 0x67, 0x3a, 0x7e, 0x0a, 0x0f, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
+	0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53,
+	0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x76, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68,
+	0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73,
+	0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e,
+	0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_protoc_gen_openapiv2_options_annotations_proto_goTypes = []any{
+	(*descriptorpb.FileOptions)(nil),    // 0: google.protobuf.FileOptions
+	(*descriptorpb.MethodOptions)(nil),  // 1: google.protobuf.MethodOptions
+	(*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions
+	(*descriptorpb.EnumOptions)(nil),    // 3: google.protobuf.EnumOptions
+	(*descriptorpb.ServiceOptions)(nil), // 4: google.protobuf.ServiceOptions
+	(*descriptorpb.FieldOptions)(nil),   // 5: google.protobuf.FieldOptions
+	(*Swagger)(nil),                     // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger
+	(*Operation)(nil),                   // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation
+	(*Schema)(nil),                      // 8: grpc.gateway.protoc_gen_openapiv2.options.Schema
+	(*EnumSchema)(nil),                  // 9: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+	(*Tag)(nil),                         // 10: grpc.gateway.protoc_gen_openapiv2.options.Tag
+	(*JSONSchema)(nil),                  // 11: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+}
+var file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = []int32{
+	0,  // 0: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:extendee -> google.protobuf.FileOptions
+	1,  // 1: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:extendee -> google.protobuf.MethodOptions
+	2,  // 2: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:extendee -> google.protobuf.MessageOptions
+	3,  // 3: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:extendee -> google.protobuf.EnumOptions
+	4,  // 4: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:extendee -> google.protobuf.ServiceOptions
+	5,  // 5: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:extendee -> google.protobuf.FieldOptions
+	6,  // 6: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger
+	7,  // 7: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation
+	8,  // 8: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema
+	9,  // 9: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+	10, // 10: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag
+	11, // 11: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+	12, // [12:12] is the sub-list for method output_type
+	12, // [12:12] is the sub-list for method input_type
+	6,  // [6:12] is the sub-list for extension type_name
+	0,  // [0:6] is the sub-list for extension extendee
+	0,  // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_protoc_gen_openapiv2_options_annotations_proto_init() }
+func file_protoc_gen_openapiv2_options_annotations_proto_init() {
+	if File_protoc_gen_openapiv2_options_annotations_proto != nil {
+		return
+	}
+	file_protoc_gen_openapiv2_options_openapiv2_proto_init()
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_protoc_gen_openapiv2_options_annotations_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   0,
+			NumExtensions: 6,
+			NumServices:   0,
+		},
+		GoTypes:           file_protoc_gen_openapiv2_options_annotations_proto_goTypes,
+		DependencyIndexes: file_protoc_gen_openapiv2_options_annotations_proto_depIdxs,
+		ExtensionInfos:    file_protoc_gen_openapiv2_options_annotations_proto_extTypes,
+	}.Build()
+	File_protoc_gen_openapiv2_options_annotations_proto = out.File
+	file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = nil
+	file_protoc_gen_openapiv2_options_annotations_proto_goTypes = nil
+	file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml
new file mode 100644
index 0000000..07dfb95
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml
@@ -0,0 +1,7 @@
+version: v2
+plugins:
+  - remote: buf.build/protocolbuffers/go:v1.36.0
+    out: .
+    opt:
+      - paths=source_relative
+      - default_api_level=API_HYBRID
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go
new file mode 100644
index 0000000..3a34e66
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go
@@ -0,0 +1,4263 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.36.0
+// 	protoc        (unknown)
+// source: protoc-gen-openapiv2/options/openapiv2.proto
+
+//go:build !protoopaque
+
+package options
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	structpb "google.golang.org/protobuf/types/known/structpb"
+	reflect "reflect"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Scheme describes the schemes supported by the OpenAPI Swagger
+// and Operation objects.
+type Scheme int32
+
+const (
+	Scheme_UNKNOWN Scheme = 0
+	Scheme_HTTP    Scheme = 1
+	Scheme_HTTPS   Scheme = 2
+	Scheme_WS      Scheme = 3
+	Scheme_WSS     Scheme = 4
+)
+
+// Enum value maps for Scheme.
+var (
+	Scheme_name = map[int32]string{
+		0: "UNKNOWN",
+		1: "HTTP",
+		2: "HTTPS",
+		3: "WS",
+		4: "WSS",
+	}
+	Scheme_value = map[string]int32{
+		"UNKNOWN": 0,
+		"HTTP":    1,
+		"HTTPS":   2,
+		"WS":      3,
+		"WSS":     4,
+	}
+)
+
+func (x Scheme) Enum() *Scheme {
+	p := new(Scheme)
+	*p = x
+	return p
+}
+
+func (x Scheme) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Scheme) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0].Descriptor()
+}
+
+func (Scheme) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0]
+}
+
+func (x Scheme) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// `Type` is a supported HTTP header type.
+// See https://swagger.io/specification/v2/#parameterType.
+type HeaderParameter_Type int32
+
+const (
+	HeaderParameter_UNKNOWN HeaderParameter_Type = 0
+	HeaderParameter_STRING  HeaderParameter_Type = 1
+	HeaderParameter_NUMBER  HeaderParameter_Type = 2
+	HeaderParameter_INTEGER HeaderParameter_Type = 3
+	HeaderParameter_BOOLEAN HeaderParameter_Type = 4
+)
+
+// Enum value maps for HeaderParameter_Type.
+var (
+	HeaderParameter_Type_name = map[int32]string{
+		0: "UNKNOWN",
+		1: "STRING",
+		2: "NUMBER",
+		3: "INTEGER",
+		4: "BOOLEAN",
+	}
+	HeaderParameter_Type_value = map[string]int32{
+		"UNKNOWN": 0,
+		"STRING":  1,
+		"NUMBER":  2,
+		"INTEGER": 3,
+		"BOOLEAN": 4,
+	}
+)
+
+func (x HeaderParameter_Type) Enum() *HeaderParameter_Type {
+	p := new(HeaderParameter_Type)
+	*p = x
+	return p
+}
+
+func (x HeaderParameter_Type) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HeaderParameter_Type) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1].Descriptor()
+}
+
+func (HeaderParameter_Type) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1]
+}
+
+func (x HeaderParameter_Type) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+type JSONSchema_JSONSchemaSimpleTypes int32
+
+const (
+	JSONSchema_UNKNOWN JSONSchema_JSONSchemaSimpleTypes = 0
+	JSONSchema_ARRAY   JSONSchema_JSONSchemaSimpleTypes = 1
+	JSONSchema_BOOLEAN JSONSchema_JSONSchemaSimpleTypes = 2
+	JSONSchema_INTEGER JSONSchema_JSONSchemaSimpleTypes = 3
+	JSONSchema_NULL    JSONSchema_JSONSchemaSimpleTypes = 4
+	JSONSchema_NUMBER  JSONSchema_JSONSchemaSimpleTypes = 5
+	JSONSchema_OBJECT  JSONSchema_JSONSchemaSimpleTypes = 6
+	JSONSchema_STRING  JSONSchema_JSONSchemaSimpleTypes = 7
+)
+
+// Enum value maps for JSONSchema_JSONSchemaSimpleTypes.
+var (
+	JSONSchema_JSONSchemaSimpleTypes_name = map[int32]string{
+		0: "UNKNOWN",
+		1: "ARRAY",
+		2: "BOOLEAN",
+		3: "INTEGER",
+		4: "NULL",
+		5: "NUMBER",
+		6: "OBJECT",
+		7: "STRING",
+	}
+	JSONSchema_JSONSchemaSimpleTypes_value = map[string]int32{
+		"UNKNOWN": 0,
+		"ARRAY":   1,
+		"BOOLEAN": 2,
+		"INTEGER": 3,
+		"NULL":    4,
+		"NUMBER":  5,
+		"OBJECT":  6,
+		"STRING":  7,
+	}
+)
+
+func (x JSONSchema_JSONSchemaSimpleTypes) Enum() *JSONSchema_JSONSchemaSimpleTypes {
+	p := new(JSONSchema_JSONSchemaSimpleTypes)
+	*p = x
+	return p
+}
+
+func (x JSONSchema_JSONSchemaSimpleTypes) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (JSONSchema_JSONSchemaSimpleTypes) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2].Descriptor()
+}
+
+func (JSONSchema_JSONSchemaSimpleTypes) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2]
+}
+
+func (x JSONSchema_JSONSchemaSimpleTypes) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// The type of the security scheme. Valid values are "basic",
+// "apiKey" or "oauth2".
+type SecurityScheme_Type int32
+
+const (
+	SecurityScheme_TYPE_INVALID SecurityScheme_Type = 0
+	SecurityScheme_TYPE_BASIC   SecurityScheme_Type = 1
+	SecurityScheme_TYPE_API_KEY SecurityScheme_Type = 2
+	SecurityScheme_TYPE_OAUTH2  SecurityScheme_Type = 3
+)
+
+// Enum value maps for SecurityScheme_Type.
+var (
+	SecurityScheme_Type_name = map[int32]string{
+		0: "TYPE_INVALID",
+		1: "TYPE_BASIC",
+		2: "TYPE_API_KEY",
+		3: "TYPE_OAUTH2",
+	}
+	SecurityScheme_Type_value = map[string]int32{
+		"TYPE_INVALID": 0,
+		"TYPE_BASIC":   1,
+		"TYPE_API_KEY": 2,
+		"TYPE_OAUTH2":  3,
+	}
+)
+
+func (x SecurityScheme_Type) Enum() *SecurityScheme_Type {
+	p := new(SecurityScheme_Type)
+	*p = x
+	return p
+}
+
+func (x SecurityScheme_Type) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_Type) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3].Descriptor()
+}
+
+func (SecurityScheme_Type) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3]
+}
+
+func (x SecurityScheme_Type) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// The location of the API key. Valid values are "query" or "header".
+type SecurityScheme_In int32
+
+const (
+	SecurityScheme_IN_INVALID SecurityScheme_In = 0
+	SecurityScheme_IN_QUERY   SecurityScheme_In = 1
+	SecurityScheme_IN_HEADER  SecurityScheme_In = 2
+)
+
+// Enum value maps for SecurityScheme_In.
+var (
+	SecurityScheme_In_name = map[int32]string{
+		0: "IN_INVALID",
+		1: "IN_QUERY",
+		2: "IN_HEADER",
+	}
+	SecurityScheme_In_value = map[string]int32{
+		"IN_INVALID": 0,
+		"IN_QUERY":   1,
+		"IN_HEADER":  2,
+	}
+)
+
+func (x SecurityScheme_In) Enum() *SecurityScheme_In {
+	p := new(SecurityScheme_In)
+	*p = x
+	return p
+}
+
+func (x SecurityScheme_In) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_In) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4].Descriptor()
+}
+
+func (SecurityScheme_In) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4]
+}
+
+func (x SecurityScheme_In) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// The flow used by the OAuth2 security scheme. Valid values are
+// "implicit", "password", "application" or "accessCode".
+type SecurityScheme_Flow int32
+
+const (
+	SecurityScheme_FLOW_INVALID     SecurityScheme_Flow = 0
+	SecurityScheme_FLOW_IMPLICIT    SecurityScheme_Flow = 1
+	SecurityScheme_FLOW_PASSWORD    SecurityScheme_Flow = 2
+	SecurityScheme_FLOW_APPLICATION SecurityScheme_Flow = 3
+	SecurityScheme_FLOW_ACCESS_CODE SecurityScheme_Flow = 4
+)
+
+// Enum value maps for SecurityScheme_Flow.
+var (
+	SecurityScheme_Flow_name = map[int32]string{
+		0: "FLOW_INVALID",
+		1: "FLOW_IMPLICIT",
+		2: "FLOW_PASSWORD",
+		3: "FLOW_APPLICATION",
+		4: "FLOW_ACCESS_CODE",
+	}
+	SecurityScheme_Flow_value = map[string]int32{
+		"FLOW_INVALID":     0,
+		"FLOW_IMPLICIT":    1,
+		"FLOW_PASSWORD":    2,
+		"FLOW_APPLICATION": 3,
+		"FLOW_ACCESS_CODE": 4,
+	}
+)
+
+func (x SecurityScheme_Flow) Enum() *SecurityScheme_Flow {
+	p := new(SecurityScheme_Flow)
+	*p = x
+	return p
+}
+
+func (x SecurityScheme_Flow) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_Flow) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5].Descriptor()
+}
+
+func (SecurityScheme_Flow) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5]
+}
+
+func (x SecurityScheme_Flow) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// `Swagger` is a representation of OpenAPI v2 specification's Swagger object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//	  info: {
+//	    title: "Echo API";
+//	    version: "1.0";
+//	    description: "";
+//	    contact: {
+//	      name: "gRPC-Gateway project";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//	      email: "none@example.com";
+//	    };
+//	    license: {
+//	      name: "BSD 3-Clause License";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+//	    };
+//	  };
+//	  schemes: HTTPS;
+//	  consumes: "application/json";
+//	  produces: "application/json";
+//	};
+type Swagger struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// Specifies the OpenAPI Specification version being used. It can be
+	// used by the OpenAPI UI and other clients to interpret the API listing. The
+	// value MUST be "2.0".
+	Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"`
+	// Provides metadata about the API. The metadata can be used by the
+	// clients if needed.
+	Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"`
+	// The host (name or ip) serving the API. This MUST be the host only and does
+	// not include the scheme nor sub-paths. It MAY include a port. If the host is
+	// not included, the host serving the documentation is to be used (including
+	// the port). The host does not support path templating.
+	Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"`
+	// The base path on which the API is served, which is relative to the host. If
+	// it is not included, the API is served directly under the host. The value
+	// MUST start with a leading slash (/). The basePath does not support path
+	// templating.
+	// Note that using `base_path` does not change the endpoint paths that are
+	// generated in the resulting OpenAPI file. If you wish to use `base_path`
+	// with relatively generated OpenAPI paths, the `base_path` prefix must be
+	// manually removed from your `google.api.http` paths and your code changed to
+	// serve the API from the `base_path`.
+	BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"`
+	// The transfer protocol of the API. Values MUST be from the list: "http",
+	// "https", "ws", "wss". If the schemes is not included, the default scheme to
+	// be used is the one used to access the OpenAPI definition itself.
+	Schemes []Scheme `protobuf:"varint,5,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"`
+	// A list of MIME types the APIs can consume. This is global to all APIs but
+	// can be overridden on specific API calls. Value MUST be as described under
+	// Mime Types.
+	Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"`
+	// A list of MIME types the APIs can produce. This is global to all APIs but
+	// can be overridden on specific API calls. Value MUST be as described under
+	// Mime Types.
+	Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"`
+	// An object to hold responses that can be used across operations. This
+	// property does not define global responses for all operations.
+	Responses map[string]*Response `protobuf:"bytes,10,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	// Security scheme definitions that can be used across the specification.
+	SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,11,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"`
+	// A declaration of which security schemes are applied for the API as a whole.
+	// The list of values describes alternative security schemes that can be used
+	// (that is, there is a logical OR between the security requirements).
+	// Individual operations can override this definition.
+	Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
+	// A list of tags for API documentation control. Tags can be used for logical
+	// grouping of operations by resources or any other qualifier.
+	Tags []*Tag `protobuf:"bytes,13,rep,name=tags,proto3" json:"tags,omitempty"`
+	// Additional external documentation.
+	ExternalDocs *ExternalDocumentation `protobuf:"bytes,14,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions    map[string]*structpb.Value `protobuf:"bytes,15,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *Swagger) Reset() {
+	*x = Swagger{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Swagger) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Swagger) ProtoMessage() {}
+
+func (x *Swagger) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Swagger) GetSwagger() string {
+	if x != nil {
+		return x.Swagger
+	}
+	return ""
+}
+
+func (x *Swagger) GetInfo() *Info {
+	if x != nil {
+		return x.Info
+	}
+	return nil
+}
+
+func (x *Swagger) GetHost() string {
+	if x != nil {
+		return x.Host
+	}
+	return ""
+}
+
+func (x *Swagger) GetBasePath() string {
+	if x != nil {
+		return x.BasePath
+	}
+	return ""
+}
+
+func (x *Swagger) GetSchemes() []Scheme {
+	if x != nil {
+		return x.Schemes
+	}
+	return nil
+}
+
+func (x *Swagger) GetConsumes() []string {
+	if x != nil {
+		return x.Consumes
+	}
+	return nil
+}
+
+func (x *Swagger) GetProduces() []string {
+	if x != nil {
+		return x.Produces
+	}
+	return nil
+}
+
+func (x *Swagger) GetResponses() map[string]*Response {
+	if x != nil {
+		return x.Responses
+	}
+	return nil
+}
+
+func (x *Swagger) GetSecurityDefinitions() *SecurityDefinitions {
+	if x != nil {
+		return x.SecurityDefinitions
+	}
+	return nil
+}
+
+func (x *Swagger) GetSecurity() []*SecurityRequirement {
+	if x != nil {
+		return x.Security
+	}
+	return nil
+}
+
+func (x *Swagger) GetTags() []*Tag {
+	if x != nil {
+		return x.Tags
+	}
+	return nil
+}
+
+func (x *Swagger) GetExternalDocs() *ExternalDocumentation {
+	if x != nil {
+		return x.ExternalDocs
+	}
+	return nil
+}
+
+func (x *Swagger) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.Extensions
+	}
+	return nil
+}
+
+func (x *Swagger) SetSwagger(v string) {
+	x.Swagger = v
+}
+
+func (x *Swagger) SetInfo(v *Info) {
+	x.Info = v
+}
+
+func (x *Swagger) SetHost(v string) {
+	x.Host = v
+}
+
+func (x *Swagger) SetBasePath(v string) {
+	x.BasePath = v
+}
+
+func (x *Swagger) SetSchemes(v []Scheme) {
+	x.Schemes = v
+}
+
+func (x *Swagger) SetConsumes(v []string) {
+	x.Consumes = v
+}
+
+func (x *Swagger) SetProduces(v []string) {
+	x.Produces = v
+}
+
+func (x *Swagger) SetResponses(v map[string]*Response) {
+	x.Responses = v
+}
+
+func (x *Swagger) SetSecurityDefinitions(v *SecurityDefinitions) {
+	x.SecurityDefinitions = v
+}
+
+func (x *Swagger) SetSecurity(v []*SecurityRequirement) {
+	x.Security = v
+}
+
+func (x *Swagger) SetTags(v []*Tag) {
+	x.Tags = v
+}
+
+func (x *Swagger) SetExternalDocs(v *ExternalDocumentation) {
+	x.ExternalDocs = v
+}
+
+func (x *Swagger) SetExtensions(v map[string]*structpb.Value) {
+	x.Extensions = v
+}
+
+func (x *Swagger) HasInfo() bool {
+	if x == nil {
+		return false
+	}
+	return x.Info != nil
+}
+
+func (x *Swagger) HasSecurityDefinitions() bool {
+	if x == nil {
+		return false
+	}
+	return x.SecurityDefinitions != nil
+}
+
+func (x *Swagger) HasExternalDocs() bool {
+	if x == nil {
+		return false
+	}
+	return x.ExternalDocs != nil
+}
+
+func (x *Swagger) ClearInfo() {
+	x.Info = nil
+}
+
+func (x *Swagger) ClearSecurityDefinitions() {
+	x.SecurityDefinitions = nil
+}
+
+func (x *Swagger) ClearExternalDocs() {
+	x.ExternalDocs = nil
+}
+
+type Swagger_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// Specifies the OpenAPI Specification version being used. It can be
+	// used by the OpenAPI UI and other clients to interpret the API listing. The
+	// value MUST be "2.0".
+	Swagger string
+	// Provides metadata about the API. The metadata can be used by the
+	// clients if needed.
+	Info *Info
+	// The host (name or ip) serving the API. This MUST be the host only and does
+	// not include the scheme nor sub-paths. It MAY include a port. If the host is
+	// not included, the host serving the documentation is to be used (including
+	// the port). The host does not support path templating.
+	Host string
+	// The base path on which the API is served, which is relative to the host. If
+	// it is not included, the API is served directly under the host. The value
+	// MUST start with a leading slash (/). The basePath does not support path
+	// templating.
+	// Note that using `base_path` does not change the endpoint paths that are
+	// generated in the resulting OpenAPI file. If you wish to use `base_path`
+	// with relatively generated OpenAPI paths, the `base_path` prefix must be
+	// manually removed from your `google.api.http` paths and your code changed to
+	// serve the API from the `base_path`.
+	BasePath string
+	// The transfer protocol of the API. Values MUST be from the list: "http",
+	// "https", "ws", "wss". If the schemes is not included, the default scheme to
+	// be used is the one used to access the OpenAPI definition itself.
+	Schemes []Scheme
+	// A list of MIME types the APIs can consume. This is global to all APIs but
+	// can be overridden on specific API calls. Value MUST be as described under
+	// Mime Types.
+	Consumes []string
+	// A list of MIME types the APIs can produce. This is global to all APIs but
+	// can be overridden on specific API calls. Value MUST be as described under
+	// Mime Types.
+	Produces []string
+	// An object to hold responses that can be used across operations. This
+	// property does not define global responses for all operations.
+	Responses map[string]*Response
+	// Security scheme definitions that can be used across the specification.
+	SecurityDefinitions *SecurityDefinitions
+	// A declaration of which security schemes are applied for the API as a whole.
+	// The list of values describes alternative security schemes that can be used
+	// (that is, there is a logical OR between the security requirements).
+	// Individual operations can override this definition.
+	Security []*SecurityRequirement
+	// A list of tags for API documentation control. Tags can be used for logical
+	// grouping of operations by resources or any other qualifier.
+	Tags []*Tag
+	// Additional external documentation.
+	ExternalDocs *ExternalDocumentation
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 Swagger_builder) Build() *Swagger {
+	m0 := &Swagger{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Swagger = b.Swagger
+	x.Info = b.Info
+	x.Host = b.Host
+	x.BasePath = b.BasePath
+	x.Schemes = b.Schemes
+	x.Consumes = b.Consumes
+	x.Produces = b.Produces
+	x.Responses = b.Responses
+	x.SecurityDefinitions = b.SecurityDefinitions
+	x.Security = b.Security
+	x.Tags = b.Tags
+	x.ExternalDocs = b.ExternalDocs
+	x.Extensions = b.Extensions
+	return m0
+}
+
+// `Operation` is a representation of OpenAPI v2 specification's Operation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject
+//
+// Example:
+//
+//	service EchoService {
+//	  rpc Echo(SimpleMessage) returns (SimpleMessage) {
+//	    option (google.api.http) = {
+//	      get: "/v1/example/echo/{id}"
+//	    };
+//
+//	    option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = {
+//	      summary: "Get a message.";
+//	      operation_id: "getMessage";
+//	      tags: "echo";
+//	      responses: {
+//	        key: "200"
+//	          value: {
+//	          description: "OK";
+//	        }
+//	      }
+//	    };
+//	  }
+//	}
+type Operation struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// A list of tags for API documentation control. Tags can be used for logical
+	// grouping of operations by resources or any other qualifier.
+	Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"`
+	// A short summary of what the operation does. For maximum readability in the
+	// swagger-ui, this field SHOULD be less than 120 characters.
+	Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"`
+	// A verbose explanation of the operation behavior. GFM syntax can be used for
+	// rich text representation.
+	Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+	// Additional external documentation for this operation.
+	ExternalDocs *ExternalDocumentation `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	// Unique string used to identify the operation. The id MUST be unique among
+	// all operations described in the API. Tools and libraries MAY use the
+	// operationId to uniquely identify an operation, therefore, it is recommended
+	// to follow common programming naming conventions.
+	OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"`
+	// A list of MIME types the operation can consume. This overrides the consumes
+	// definition at the OpenAPI Object. An empty value MAY be used to clear the
+	// global definition. Value MUST be as described under Mime Types.
+	Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"`
+	// A list of MIME types the operation can produce. This overrides the produces
+	// definition at the OpenAPI Object. An empty value MAY be used to clear the
+	// global definition. Value MUST be as described under Mime Types.
+	Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"`
+	// The list of possible responses as they are returned from executing this
+	// operation.
+	Responses map[string]*Response `protobuf:"bytes,9,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	// The transfer protocol for the operation. Values MUST be from the list:
+	// "http", "https", "ws", "wss". The value overrides the OpenAPI Object
+	// schemes definition.
+	Schemes []Scheme `protobuf:"varint,10,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"`
+	// Declares this operation to be deprecated. Usage of the declared operation
+	// should be refrained. Default value is false.
+	Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"`
+	// A declaration of which security schemes are applied for this operation. The
+	// list of values describes alternative security schemes that can be used
+	// (that is, there is a logical OR between the security requirements). This
+	// definition overrides any declared top-level security. To remove a top-level
+	// security declaration, an empty array can be used.
+	Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value `protobuf:"bytes,13,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	// Custom parameters such as HTTP request headers.
+	// See: https://swagger.io/docs/specification/2-0/describing-parameters/
+	// and https://swagger.io/specification/v2/#parameter-object.
+	Parameters    *Parameters `protobuf:"bytes,14,opt,name=parameters,proto3" json:"parameters,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *Operation) Reset() {
+	*x = Operation{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Operation) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Operation) ProtoMessage() {}
+
+func (x *Operation) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Operation) GetTags() []string {
+	if x != nil {
+		return x.Tags
+	}
+	return nil
+}
+
+func (x *Operation) GetSummary() string {
+	if x != nil {
+		return x.Summary
+	}
+	return ""
+}
+
+func (x *Operation) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *Operation) GetExternalDocs() *ExternalDocumentation {
+	if x != nil {
+		return x.ExternalDocs
+	}
+	return nil
+}
+
+func (x *Operation) GetOperationId() string {
+	if x != nil {
+		return x.OperationId
+	}
+	return ""
+}
+
+func (x *Operation) GetConsumes() []string {
+	if x != nil {
+		return x.Consumes
+	}
+	return nil
+}
+
+func (x *Operation) GetProduces() []string {
+	if x != nil {
+		return x.Produces
+	}
+	return nil
+}
+
+func (x *Operation) GetResponses() map[string]*Response {
+	if x != nil {
+		return x.Responses
+	}
+	return nil
+}
+
+func (x *Operation) GetSchemes() []Scheme {
+	if x != nil {
+		return x.Schemes
+	}
+	return nil
+}
+
+func (x *Operation) GetDeprecated() bool {
+	if x != nil {
+		return x.Deprecated
+	}
+	return false
+}
+
+func (x *Operation) GetSecurity() []*SecurityRequirement {
+	if x != nil {
+		return x.Security
+	}
+	return nil
+}
+
+func (x *Operation) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.Extensions
+	}
+	return nil
+}
+
+func (x *Operation) GetParameters() *Parameters {
+	if x != nil {
+		return x.Parameters
+	}
+	return nil
+}
+
+func (x *Operation) SetTags(v []string) {
+	x.Tags = v
+}
+
+func (x *Operation) SetSummary(v string) {
+	x.Summary = v
+}
+
+func (x *Operation) SetDescription(v string) {
+	x.Description = v
+}
+
+func (x *Operation) SetExternalDocs(v *ExternalDocumentation) {
+	x.ExternalDocs = v
+}
+
+func (x *Operation) SetOperationId(v string) {
+	x.OperationId = v
+}
+
+func (x *Operation) SetConsumes(v []string) {
+	x.Consumes = v
+}
+
+func (x *Operation) SetProduces(v []string) {
+	x.Produces = v
+}
+
+func (x *Operation) SetResponses(v map[string]*Response) {
+	x.Responses = v
+}
+
+func (x *Operation) SetSchemes(v []Scheme) {
+	x.Schemes = v
+}
+
+func (x *Operation) SetDeprecated(v bool) {
+	x.Deprecated = v
+}
+
+func (x *Operation) SetSecurity(v []*SecurityRequirement) {
+	x.Security = v
+}
+
+func (x *Operation) SetExtensions(v map[string]*structpb.Value) {
+	x.Extensions = v
+}
+
+func (x *Operation) SetParameters(v *Parameters) {
+	x.Parameters = v
+}
+
+func (x *Operation) HasExternalDocs() bool {
+	if x == nil {
+		return false
+	}
+	return x.ExternalDocs != nil
+}
+
+func (x *Operation) HasParameters() bool {
+	if x == nil {
+		return false
+	}
+	return x.Parameters != nil
+}
+
+func (x *Operation) ClearExternalDocs() {
+	x.ExternalDocs = nil
+}
+
+func (x *Operation) ClearParameters() {
+	x.Parameters = nil
+}
+
+type Operation_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// A list of tags for API documentation control. Tags can be used for logical
+	// grouping of operations by resources or any other qualifier.
+	Tags []string
+	// A short summary of what the operation does. For maximum readability in the
+	// swagger-ui, this field SHOULD be less than 120 characters.
+	Summary string
+	// A verbose explanation of the operation behavior. GFM syntax can be used for
+	// rich text representation.
+	Description string
+	// Additional external documentation for this operation.
+	ExternalDocs *ExternalDocumentation
+	// Unique string used to identify the operation. The id MUST be unique among
+	// all operations described in the API. Tools and libraries MAY use the
+	// operationId to uniquely identify an operation, therefore, it is recommended
+	// to follow common programming naming conventions.
+	OperationId string
+	// A list of MIME types the operation can consume. This overrides the consumes
+	// definition at the OpenAPI Object. An empty value MAY be used to clear the
+	// global definition. Value MUST be as described under Mime Types.
+	Consumes []string
+	// A list of MIME types the operation can produce. This overrides the produces
+	// definition at the OpenAPI Object. An empty value MAY be used to clear the
+	// global definition. Value MUST be as described under Mime Types.
+	Produces []string
+	// The list of possible responses as they are returned from executing this
+	// operation.
+	Responses map[string]*Response
+	// The transfer protocol for the operation. Values MUST be from the list:
+	// "http", "https", "ws", "wss". The value overrides the OpenAPI Object
+	// schemes definition.
+	Schemes []Scheme
+	// Declares this operation to be deprecated. Usage of the declared operation
+	// should be refrained. Default value is false.
+	Deprecated bool
+	// A declaration of which security schemes are applied for this operation. The
+	// list of values describes alternative security schemes that can be used
+	// (that is, there is a logical OR between the security requirements). This
+	// definition overrides any declared top-level security. To remove a top-level
+	// security declaration, an empty array can be used.
+	Security []*SecurityRequirement
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+	// Custom parameters such as HTTP request headers.
+	// See: https://swagger.io/docs/specification/2-0/describing-parameters/
+	// and https://swagger.io/specification/v2/#parameter-object.
+	Parameters *Parameters
+}
+
+func (b0 Operation_builder) Build() *Operation {
+	m0 := &Operation{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Tags = b.Tags
+	x.Summary = b.Summary
+	x.Description = b.Description
+	x.ExternalDocs = b.ExternalDocs
+	x.OperationId = b.OperationId
+	x.Consumes = b.Consumes
+	x.Produces = b.Produces
+	x.Responses = b.Responses
+	x.Schemes = b.Schemes
+	x.Deprecated = b.Deprecated
+	x.Security = b.Security
+	x.Extensions = b.Extensions
+	x.Parameters = b.Parameters
+	return m0
+}
+
+// `Parameters` is a representation of OpenAPI v2 specification's parameters object.
+// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only
+// allow header parameters to be set here since we do not want users specifying custom non-header
+// parameters beyond those inferred from the Protobuf schema.
+// See: https://swagger.io/specification/v2/#parameter-object
+type Parameters struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// `Headers` is one or more HTTP header parameter.
+	// See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters
+	Headers       []*HeaderParameter `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *Parameters) Reset() {
+	*x = Parameters{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Parameters) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Parameters) ProtoMessage() {}
+
+func (x *Parameters) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Parameters) GetHeaders() []*HeaderParameter {
+	if x != nil {
+		return x.Headers
+	}
+	return nil
+}
+
+func (x *Parameters) SetHeaders(v []*HeaderParameter) {
+	x.Headers = v
+}
+
+type Parameters_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// `Headers` is one or more HTTP header parameter.
+	// See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters
+	Headers []*HeaderParameter
+}
+
+func (b0 Parameters_builder) Build() *Parameters {
+	m0 := &Parameters{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Headers = b.Headers
+	return m0
+}
+
+// `HeaderParameter` a HTTP header parameter.
+// See: https://swagger.io/specification/v2/#parameter-object
+type HeaderParameter struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// `Name` is the header name.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// `Description` is a short description of the header.
+	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	// `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+	// See: https://swagger.io/specification/v2/#parameterType.
+	Type HeaderParameter_Type `protobuf:"varint,3,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter_Type" json:"type,omitempty"`
+	// `Format` The extending format for the previously mentioned type.
+	Format string `protobuf:"bytes,4,opt,name=format,proto3" json:"format,omitempty"`
+	// `Required` indicates if the header is optional
+	Required      bool `protobuf:"varint,5,opt,name=required,proto3" json:"required,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *HeaderParameter) Reset() {
+	*x = HeaderParameter{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *HeaderParameter) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HeaderParameter) ProtoMessage() {}
+
+func (x *HeaderParameter) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *HeaderParameter) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *HeaderParameter) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *HeaderParameter) GetType() HeaderParameter_Type {
+	if x != nil {
+		return x.Type
+	}
+	return HeaderParameter_UNKNOWN
+}
+
+func (x *HeaderParameter) GetFormat() string {
+	if x != nil {
+		return x.Format
+	}
+	return ""
+}
+
+func (x *HeaderParameter) GetRequired() bool {
+	if x != nil {
+		return x.Required
+	}
+	return false
+}
+
+func (x *HeaderParameter) SetName(v string) {
+	x.Name = v
+}
+
+func (x *HeaderParameter) SetDescription(v string) {
+	x.Description = v
+}
+
+func (x *HeaderParameter) SetType(v HeaderParameter_Type) {
+	x.Type = v
+}
+
+func (x *HeaderParameter) SetFormat(v string) {
+	x.Format = v
+}
+
+func (x *HeaderParameter) SetRequired(v bool) {
+	x.Required = v
+}
+
+type HeaderParameter_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// `Name` is the header name.
+	Name string
+	// `Description` is a short description of the header.
+	Description string
+	// `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+	// See: https://swagger.io/specification/v2/#parameterType.
+	Type HeaderParameter_Type
+	// `Format` The extending format for the previously mentioned type.
+	Format string
+	// `Required` indicates if the header is optional
+	Required bool
+}
+
+func (b0 HeaderParameter_builder) Build() *HeaderParameter {
+	m0 := &HeaderParameter{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Name = b.Name
+	x.Description = b.Description
+	x.Type = b.Type
+	x.Format = b.Format
+	x.Required = b.Required
+	return m0
+}
+
+// `Header` is a representation of OpenAPI v2 specification's Header object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject
+type Header struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// `Description` is a short description of the header.
+	Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	// The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+	Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
+	// `Format` The extending format for the previously mentioned type.
+	Format string `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"`
+	// `Default` Declares the value of the header that the server will use if none is provided.
+	// See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2.
+	// Unlike JSON Schema this value MUST conform to the defined type for the header.
+	Default string `protobuf:"bytes,6,opt,name=default,proto3" json:"default,omitempty"`
+	// 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3.
+	Pattern       string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *Header) Reset() {
+	*x = Header{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Header) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Header) ProtoMessage() {}
+
+func (x *Header) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Header) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *Header) GetType() string {
+	if x != nil {
+		return x.Type
+	}
+	return ""
+}
+
+func (x *Header) GetFormat() string {
+	if x != nil {
+		return x.Format
+	}
+	return ""
+}
+
+func (x *Header) GetDefault() string {
+	if x != nil {
+		return x.Default
+	}
+	return ""
+}
+
+func (x *Header) GetPattern() string {
+	if x != nil {
+		return x.Pattern
+	}
+	return ""
+}
+
+func (x *Header) SetDescription(v string) {
+	x.Description = v
+}
+
+func (x *Header) SetType(v string) {
+	x.Type = v
+}
+
+func (x *Header) SetFormat(v string) {
+	x.Format = v
+}
+
+func (x *Header) SetDefault(v string) {
+	x.Default = v
+}
+
+func (x *Header) SetPattern(v string) {
+	x.Pattern = v
+}
+
+type Header_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// `Description` is a short description of the header.
+	Description string
+	// The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+	Type string
+	// `Format` The extending format for the previously mentioned type.
+	Format string
+	// `Default` Declares the value of the header that the server will use if none is provided.
+	// See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2.
+	// Unlike JSON Schema this value MUST conform to the defined type for the header.
+	Default string
+	// 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3.
+	Pattern string
+}
+
+func (b0 Header_builder) Build() *Header {
+	m0 := &Header{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Description = b.Description
+	x.Type = b.Type
+	x.Format = b.Format
+	x.Default = b.Default
+	x.Pattern = b.Pattern
+	return m0
+}
+
+// `Response` is a representation of OpenAPI v2 specification's Response object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject
+type Response struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// `Description` is a short description of the response.
+	// GFM syntax can be used for rich text representation.
+	Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	// `Schema` optionally defines the structure of the response.
+	// If `Schema` is not provided, it means there is no content to the response.
+	Schema *Schema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"`
+	// `Headers` A list of headers that are sent with the response.
+	// `Header` name is expected to be a string in the canonical format of the MIME header key
+	// See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey
+	Headers map[string]*Header `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	// `Examples` gives per-mimetype response examples.
+	// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object
+	Examples map[string]string `protobuf:"bytes,4,rep,name=examples,proto3" json:"examples,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions    map[string]*structpb.Value `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *Response) Reset() {
+	*x = Response{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Response) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Response) ProtoMessage() {}
+
+func (x *Response) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Response) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *Response) GetSchema() *Schema {
+	if x != nil {
+		return x.Schema
+	}
+	return nil
+}
+
+func (x *Response) GetHeaders() map[string]*Header {
+	if x != nil {
+		return x.Headers
+	}
+	return nil
+}
+
+func (x *Response) GetExamples() map[string]string {
+	if x != nil {
+		return x.Examples
+	}
+	return nil
+}
+
+func (x *Response) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.Extensions
+	}
+	return nil
+}
+
+func (x *Response) SetDescription(v string) {
+	x.Description = v
+}
+
+func (x *Response) SetSchema(v *Schema) {
+	x.Schema = v
+}
+
+func (x *Response) SetHeaders(v map[string]*Header) {
+	x.Headers = v
+}
+
+func (x *Response) SetExamples(v map[string]string) {
+	x.Examples = v
+}
+
+func (x *Response) SetExtensions(v map[string]*structpb.Value) {
+	x.Extensions = v
+}
+
+func (x *Response) HasSchema() bool {
+	if x == nil {
+		return false
+	}
+	return x.Schema != nil
+}
+
+func (x *Response) ClearSchema() {
+	x.Schema = nil
+}
+
+type Response_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// `Description` is a short description of the response.
+	// GFM syntax can be used for rich text representation.
+	Description string
+	// `Schema` optionally defines the structure of the response.
+	// If `Schema` is not provided, it means there is no content to the response.
+	Schema *Schema
+	// `Headers` A list of headers that are sent with the response.
+	// `Header` name is expected to be a string in the canonical format of the MIME header key
+	// See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey
+	Headers map[string]*Header
+	// `Examples` gives per-mimetype response examples.
+	// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object
+	Examples map[string]string
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 Response_builder) Build() *Response {
+	m0 := &Response{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Description = b.Description
+	x.Schema = b.Schema
+	x.Headers = b.Headers
+	x.Examples = b.Examples
+	x.Extensions = b.Extensions
+	return m0
+}
+
+// `Info` is a representation of OpenAPI v2 specification's Info object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//	  info: {
+//	    title: "Echo API";
+//	    version: "1.0";
+//	    description: "";
+//	    contact: {
+//	      name: "gRPC-Gateway project";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//	      email: "none@example.com";
+//	    };
+//	    license: {
+//	      name: "BSD 3-Clause License";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+//	    };
+//	  };
+//	  ...
+//	};
+type Info struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// The title of the application.
+	Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"`
+	// A short description of the application. GFM syntax can be used for rich
+	// text representation.
+	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	// The Terms of Service for the API.
+	TermsOfService string `protobuf:"bytes,3,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"`
+	// The contact information for the exposed API.
+	Contact *Contact `protobuf:"bytes,4,opt,name=contact,proto3" json:"contact,omitempty"`
+	// The license information for the exposed API.
+	License *License `protobuf:"bytes,5,opt,name=license,proto3" json:"license,omitempty"`
+	// Provides the version of the application API (not to be confused
+	// with the specification version).
+	Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"`
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions    map[string]*structpb.Value `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *Info) Reset() {
+	*x = Info{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Info) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Info) ProtoMessage() {}
+
+func (x *Info) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Info) GetTitle() string {
+	if x != nil {
+		return x.Title
+	}
+	return ""
+}
+
+func (x *Info) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *Info) GetTermsOfService() string {
+	if x != nil {
+		return x.TermsOfService
+	}
+	return ""
+}
+
+func (x *Info) GetContact() *Contact {
+	if x != nil {
+		return x.Contact
+	}
+	return nil
+}
+
+func (x *Info) GetLicense() *License {
+	if x != nil {
+		return x.License
+	}
+	return nil
+}
+
+func (x *Info) GetVersion() string {
+	if x != nil {
+		return x.Version
+	}
+	return ""
+}
+
+func (x *Info) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.Extensions
+	}
+	return nil
+}
+
+func (x *Info) SetTitle(v string) {
+	x.Title = v
+}
+
+func (x *Info) SetDescription(v string) {
+	x.Description = v
+}
+
+func (x *Info) SetTermsOfService(v string) {
+	x.TermsOfService = v
+}
+
+func (x *Info) SetContact(v *Contact) {
+	x.Contact = v
+}
+
+func (x *Info) SetLicense(v *License) {
+	x.License = v
+}
+
+func (x *Info) SetVersion(v string) {
+	x.Version = v
+}
+
+func (x *Info) SetExtensions(v map[string]*structpb.Value) {
+	x.Extensions = v
+}
+
+func (x *Info) HasContact() bool {
+	if x == nil {
+		return false
+	}
+	return x.Contact != nil
+}
+
+func (x *Info) HasLicense() bool {
+	if x == nil {
+		return false
+	}
+	return x.License != nil
+}
+
+func (x *Info) ClearContact() {
+	x.Contact = nil
+}
+
+func (x *Info) ClearLicense() {
+	x.License = nil
+}
+
+type Info_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// The title of the application.
+	Title string
+	// A short description of the application. GFM syntax can be used for rich
+	// text representation.
+	Description string
+	// The Terms of Service for the API.
+	TermsOfService string
+	// The contact information for the exposed API.
+	Contact *Contact
+	// The license information for the exposed API.
+	License *License
+	// Provides the version of the application API (not to be confused
+	// with the specification version).
+	Version string
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 Info_builder) Build() *Info {
+	m0 := &Info{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Title = b.Title
+	x.Description = b.Description
+	x.TermsOfService = b.TermsOfService
+	x.Contact = b.Contact
+	x.License = b.License
+	x.Version = b.Version
+	x.Extensions = b.Extensions
+	return m0
+}
+
+// `Contact` is a representation of OpenAPI v2 specification's Contact object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//	  info: {
+//	    ...
+//	    contact: {
+//	      name: "gRPC-Gateway project";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//	      email: "none@example.com";
+//	    };
+//	    ...
+//	  };
+//	  ...
+//	};
+type Contact struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// The identifying name of the contact person/organization.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// The URL pointing to the contact information. MUST be in the format of a
+	// URL.
+	Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+	// The email address of the contact person/organization. MUST be in the format
+	// of an email address.
+	Email         string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *Contact) Reset() {
+	*x = Contact{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Contact) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Contact) ProtoMessage() {}
+
+func (x *Contact) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Contact) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *Contact) GetUrl() string {
+	if x != nil {
+		return x.Url
+	}
+	return ""
+}
+
+func (x *Contact) GetEmail() string {
+	if x != nil {
+		return x.Email
+	}
+	return ""
+}
+
+func (x *Contact) SetName(v string) {
+	x.Name = v
+}
+
+func (x *Contact) SetUrl(v string) {
+	x.Url = v
+}
+
+func (x *Contact) SetEmail(v string) {
+	x.Email = v
+}
+
+type Contact_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// The identifying name of the contact person/organization.
+	Name string
+	// The URL pointing to the contact information. MUST be in the format of a
+	// URL.
+	Url string
+	// The email address of the contact person/organization. MUST be in the format
+	// of an email address.
+	Email string
+}
+
+func (b0 Contact_builder) Build() *Contact {
+	m0 := &Contact{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Name = b.Name
+	x.Url = b.Url
+	x.Email = b.Email
+	return m0
+}
+
+// `License` is a representation of OpenAPI v2 specification's License object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//	  info: {
+//	    ...
+//	    license: {
+//	      name: "BSD 3-Clause License";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+//	    };
+//	    ...
+//	  };
+//	  ...
+//	};
+type License struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// The license name used for the API.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// A URL to the license used for the API. MUST be in the format of a URL.
+	Url           string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *License) Reset() {
+	*x = License{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *License) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*License) ProtoMessage() {}
+
+func (x *License) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *License) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *License) GetUrl() string {
+	if x != nil {
+		return x.Url
+	}
+	return ""
+}
+
+func (x *License) SetName(v string) {
+	x.Name = v
+}
+
+func (x *License) SetUrl(v string) {
+	x.Url = v
+}
+
+type License_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// The license name used for the API.
+	Name string
+	// A URL to the license used for the API. MUST be in the format of a URL.
+	Url string
+}
+
+func (b0 License_builder) Build() *License {
+	m0 := &License{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Name = b.Name
+	x.Url = b.Url
+	return m0
+}
+
+// `ExternalDocumentation` is a representation of OpenAPI v2 specification's
+// ExternalDocumentation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//	  ...
+//	  external_docs: {
+//	    description: "More about gRPC-Gateway";
+//	    url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//	  }
+//	  ...
+//	};
+type ExternalDocumentation struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// A short description of the target documentation. GFM syntax can be used for
+	// rich text representation.
+	Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	// The URL for the target documentation. Value MUST be in the format
+	// of a URL.
+	Url           string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *ExternalDocumentation) Reset() {
+	*x = ExternalDocumentation{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ExternalDocumentation) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExternalDocumentation) ProtoMessage() {}
+
+func (x *ExternalDocumentation) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *ExternalDocumentation) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *ExternalDocumentation) GetUrl() string {
+	if x != nil {
+		return x.Url
+	}
+	return ""
+}
+
+func (x *ExternalDocumentation) SetDescription(v string) {
+	x.Description = v
+}
+
+func (x *ExternalDocumentation) SetUrl(v string) {
+	x.Url = v
+}
+
+type ExternalDocumentation_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// A short description of the target documentation. GFM syntax can be used for
+	// rich text representation.
+	Description string
+	// The URL for the target documentation. Value MUST be in the format
+	// of a URL.
+	Url string
+}
+
+func (b0 ExternalDocumentation_builder) Build() *ExternalDocumentation {
+	m0 := &ExternalDocumentation{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Description = b.Description
+	x.Url = b.Url
+	return m0
+}
+
+// `Schema` is a representation of OpenAPI v2 specification's Schema object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+type Schema struct {
+	state      protoimpl.MessageState `protogen:"hybrid.v1"`
+	JsonSchema *JSONSchema            `protobuf:"bytes,1,opt,name=json_schema,json=jsonSchema,proto3" json:"json_schema,omitempty"`
+	// Adds support for polymorphism. The discriminator is the schema property
+	// name that is used to differentiate between other schema that inherit this
+	// schema. The property name used MUST be defined at this schema and it MUST
+	// be in the required property list. When used, the value MUST be the name of
+	// this schema or any schema that inherits it.
+	Discriminator string `protobuf:"bytes,2,opt,name=discriminator,proto3" json:"discriminator,omitempty"`
+	// Relevant only for Schema "properties" definitions. Declares the property as
+	// "read only". This means that it MAY be sent as part of a response but MUST
+	// NOT be sent as part of the request. Properties marked as readOnly being
+	// true SHOULD NOT be in the required list of the defined schema. Default
+	// value is false.
+	ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+	// Additional external documentation for this schema.
+	ExternalDocs *ExternalDocumentation `protobuf:"bytes,5,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	// A free-form property to include an example of an instance for this schema in JSON.
+	// This is copied verbatim to the output.
+	Example       string `protobuf:"bytes,6,opt,name=example,proto3" json:"example,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *Schema) Reset() {
+	*x = Schema{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Schema) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Schema) ProtoMessage() {}
+
+func (x *Schema) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Schema) GetJsonSchema() *JSONSchema {
+	if x != nil {
+		return x.JsonSchema
+	}
+	return nil
+}
+
+func (x *Schema) GetDiscriminator() string {
+	if x != nil {
+		return x.Discriminator
+	}
+	return ""
+}
+
+func (x *Schema) GetReadOnly() bool {
+	if x != nil {
+		return x.ReadOnly
+	}
+	return false
+}
+
+func (x *Schema) GetExternalDocs() *ExternalDocumentation {
+	if x != nil {
+		return x.ExternalDocs
+	}
+	return nil
+}
+
+func (x *Schema) GetExample() string {
+	if x != nil {
+		return x.Example
+	}
+	return ""
+}
+
+func (x *Schema) SetJsonSchema(v *JSONSchema) {
+	x.JsonSchema = v
+}
+
+func (x *Schema) SetDiscriminator(v string) {
+	x.Discriminator = v
+}
+
+func (x *Schema) SetReadOnly(v bool) {
+	x.ReadOnly = v
+}
+
+func (x *Schema) SetExternalDocs(v *ExternalDocumentation) {
+	x.ExternalDocs = v
+}
+
+func (x *Schema) SetExample(v string) {
+	x.Example = v
+}
+
+func (x *Schema) HasJsonSchema() bool {
+	if x == nil {
+		return false
+	}
+	return x.JsonSchema != nil
+}
+
+func (x *Schema) HasExternalDocs() bool {
+	if x == nil {
+		return false
+	}
+	return x.ExternalDocs != nil
+}
+
+func (x *Schema) ClearJsonSchema() {
+	x.JsonSchema = nil
+}
+
+func (x *Schema) ClearExternalDocs() {
+	x.ExternalDocs = nil
+}
+
+type Schema_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	JsonSchema *JSONSchema
+	// Adds support for polymorphism. The discriminator is the schema property
+	// name that is used to differentiate between other schema that inherit this
+	// schema. The property name used MUST be defined at this schema and it MUST
+	// be in the required property list. When used, the value MUST be the name of
+	// this schema or any schema that inherits it.
+	Discriminator string
+	// Relevant only for Schema "properties" definitions. Declares the property as
+	// "read only". This means that it MAY be sent as part of a response but MUST
+	// NOT be sent as part of the request. Properties marked as readOnly being
+	// true SHOULD NOT be in the required list of the defined schema. Default
+	// value is false.
+	ReadOnly bool
+	// Additional external documentation for this schema.
+	ExternalDocs *ExternalDocumentation
+	// A free-form property to include an example of an instance for this schema in JSON.
+	// This is copied verbatim to the output.
+	Example string
+}
+
+func (b0 Schema_builder) Build() *Schema {
+	m0 := &Schema{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.JsonSchema = b.JsonSchema
+	x.Discriminator = b.Discriminator
+	x.ReadOnly = b.ReadOnly
+	x.ExternalDocs = b.ExternalDocs
+	x.Example = b.Example
+	return m0
+}
+
+// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object.
+// Only fields that are applicable to Enums are included
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = {
+//	  ...
+//	  title: "MyEnum";
+//	  description:"This is my nice enum";
+//	  example: "ZERO";
+//	  required: true;
+//	  ...
+//	};
+type EnumSchema struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// A short description of the schema.
+	Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	Default     string `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"`
+	// The title of the schema.
+	Title    string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"`
+	Required bool   `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"`
+	ReadOnly bool   `protobuf:"varint,5,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+	// Additional external documentation for this schema.
+	ExternalDocs *ExternalDocumentation `protobuf:"bytes,6,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	Example      string                 `protobuf:"bytes,7,opt,name=example,proto3" json:"example,omitempty"`
+	// Ref is used to define an external reference to include in the message.
+	// This could be a fully qualified proto message reference, and that type must
+	// be imported into the protofile. If no message is identified, the Ref will
+	// be used verbatim in the output.
+	// For example:
+	//
+	//	`ref: ".google.protobuf.Timestamp"`.
+	Ref string `protobuf:"bytes,8,opt,name=ref,proto3" json:"ref,omitempty"`
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions    map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *EnumSchema) Reset() {
+	*x = EnumSchema{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *EnumSchema) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumSchema) ProtoMessage() {}
+
+func (x *EnumSchema) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *EnumSchema) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *EnumSchema) GetDefault() string {
+	if x != nil {
+		return x.Default
+	}
+	return ""
+}
+
+func (x *EnumSchema) GetTitle() string {
+	if x != nil {
+		return x.Title
+	}
+	return ""
+}
+
+func (x *EnumSchema) GetRequired() bool {
+	if x != nil {
+		return x.Required
+	}
+	return false
+}
+
+func (x *EnumSchema) GetReadOnly() bool {
+	if x != nil {
+		return x.ReadOnly
+	}
+	return false
+}
+
+func (x *EnumSchema) GetExternalDocs() *ExternalDocumentation {
+	if x != nil {
+		return x.ExternalDocs
+	}
+	return nil
+}
+
+func (x *EnumSchema) GetExample() string {
+	if x != nil {
+		return x.Example
+	}
+	return ""
+}
+
+func (x *EnumSchema) GetRef() string {
+	if x != nil {
+		return x.Ref
+	}
+	return ""
+}
+
+func (x *EnumSchema) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.Extensions
+	}
+	return nil
+}
+
+func (x *EnumSchema) SetDescription(v string) {
+	x.Description = v
+}
+
+func (x *EnumSchema) SetDefault(v string) {
+	x.Default = v
+}
+
+func (x *EnumSchema) SetTitle(v string) {
+	x.Title = v
+}
+
+func (x *EnumSchema) SetRequired(v bool) {
+	x.Required = v
+}
+
+func (x *EnumSchema) SetReadOnly(v bool) {
+	x.ReadOnly = v
+}
+
+func (x *EnumSchema) SetExternalDocs(v *ExternalDocumentation) {
+	x.ExternalDocs = v
+}
+
+func (x *EnumSchema) SetExample(v string) {
+	x.Example = v
+}
+
+func (x *EnumSchema) SetRef(v string) {
+	x.Ref = v
+}
+
+func (x *EnumSchema) SetExtensions(v map[string]*structpb.Value) {
+	x.Extensions = v
+}
+
+func (x *EnumSchema) HasExternalDocs() bool {
+	if x == nil {
+		return false
+	}
+	return x.ExternalDocs != nil
+}
+
+func (x *EnumSchema) ClearExternalDocs() {
+	x.ExternalDocs = nil
+}
+
+type EnumSchema_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// A short description of the schema.
+	Description string
+	Default     string
+	// The title of the schema.
+	Title    string
+	Required bool
+	ReadOnly bool
+	// Additional external documentation for this schema.
+	ExternalDocs *ExternalDocumentation
+	Example      string
+	// Ref is used to define an external reference to include in the message.
+	// This could be a fully qualified proto message reference, and that type must
+	// be imported into the protofile. If no message is identified, the Ref will
+	// be used verbatim in the output.
+	// For example:
+	//
+	//	`ref: ".google.protobuf.Timestamp"`.
+	Ref string
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 EnumSchema_builder) Build() *EnumSchema {
+	m0 := &EnumSchema{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Description = b.Description
+	x.Default = b.Default
+	x.Title = b.Title
+	x.Required = b.Required
+	x.ReadOnly = b.ReadOnly
+	x.ExternalDocs = b.ExternalDocs
+	x.Example = b.Example
+	x.Ref = b.Ref
+	x.Extensions = b.Extensions
+	return m0
+}
+
+// `JSONSchema` represents properties from JSON Schema taken, and as used, in
+// the OpenAPI v2 spec.
+//
+// This includes changes made by OpenAPI v2.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// See also: https://cswr.github.io/JsonSchema/spec/basic_types/,
+// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json
+//
+// Example:
+//
+//	message SimpleMessage {
+//	  option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = {
+//	    json_schema: {
+//	      title: "SimpleMessage"
+//	      description: "A simple message."
+//	      required: ["id"]
+//	    }
+//	  };
+//
+//	  // Id represents the message identifier.
+//	  string id = 1; [
+//	      (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
+//	        description: "The unique identifier of the simple message."
+//	      }];
+//	}
+type JSONSchema struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// Ref is used to define an external reference to include in the message.
+	// This could be a fully qualified proto message reference, and that type must
+	// be imported into the protofile. If no message is identified, the Ref will
+	// be used verbatim in the output.
+	// For example:
+	//
+	//	`ref: ".google.protobuf.Timestamp"`.
+	Ref string `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"`
+	// The title of the schema.
+	Title string `protobuf:"bytes,5,opt,name=title,proto3" json:"title,omitempty"`
+	// A short description of the schema.
+	Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"`
+	Default     string `protobuf:"bytes,7,opt,name=default,proto3" json:"default,omitempty"`
+	ReadOnly    bool   `protobuf:"varint,8,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+	// A free-form property to include a JSON example of this field. This is copied
+	// verbatim to the output swagger.json. Quotes must be escaped.
+	// This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject  https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+	Example    string  `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"`
+	MultipleOf float64 `protobuf:"fixed64,10,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
+	// Maximum represents an inclusive upper limit for a numeric instance. The
+	// value of MUST be a number,
+	Maximum          float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"`
+	ExclusiveMaximum bool    `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
+	// minimum represents an inclusive lower limit for a numeric instance. The
+	// value of MUST be a number,
+	Minimum          float64  `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"`
+	ExclusiveMinimum bool     `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
+	MaxLength        uint64   `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
+	MinLength        uint64   `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
+	Pattern          string   `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"`
+	MaxItems         uint64   `protobuf:"varint,20,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
+	MinItems         uint64   `protobuf:"varint,21,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
+	UniqueItems      bool     `protobuf:"varint,22,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
+	MaxProperties    uint64   `protobuf:"varint,24,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"`
+	MinProperties    uint64   `protobuf:"varint,25,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"`
+	Required         []string `protobuf:"bytes,26,rep,name=required,proto3" json:"required,omitempty"`
+	// Items in 'array' must be unique.
+	Array []string                           `protobuf:"bytes,34,rep,name=array,proto3" json:"array,omitempty"`
+	Type  []JSONSchema_JSONSchemaSimpleTypes `protobuf:"varint,35,rep,packed,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.JSONSchema_JSONSchemaSimpleTypes" json:"type,omitempty"`
+	// `Format`
+	Format string `protobuf:"bytes,36,opt,name=format,proto3" json:"format,omitempty"`
+	// Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1
+	Enum []string `protobuf:"bytes,46,rep,name=enum,proto3" json:"enum,omitempty"`
+	// Additional field level properties used when generating the OpenAPI v2 file.
+	FieldConfiguration *JSONSchema_FieldConfiguration `protobuf:"bytes,1001,opt,name=field_configuration,json=fieldConfiguration,proto3" json:"field_configuration,omitempty"`
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions    map[string]*structpb.Value `protobuf:"bytes,48,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *JSONSchema) Reset() {
+	*x = JSONSchema{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *JSONSchema) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*JSONSchema) ProtoMessage() {}
+
+func (x *JSONSchema) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *JSONSchema) GetRef() string {
+	if x != nil {
+		return x.Ref
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetTitle() string {
+	if x != nil {
+		return x.Title
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetDefault() string {
+	if x != nil {
+		return x.Default
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetReadOnly() bool {
+	if x != nil {
+		return x.ReadOnly
+	}
+	return false
+}
+
+func (x *JSONSchema) GetExample() string {
+	if x != nil {
+		return x.Example
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetMultipleOf() float64 {
+	if x != nil {
+		return x.MultipleOf
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetMaximum() float64 {
+	if x != nil {
+		return x.Maximum
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetExclusiveMaximum() bool {
+	if x != nil {
+		return x.ExclusiveMaximum
+	}
+	return false
+}
+
+func (x *JSONSchema) GetMinimum() float64 {
+	if x != nil {
+		return x.Minimum
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetExclusiveMinimum() bool {
+	if x != nil {
+		return x.ExclusiveMinimum
+	}
+	return false
+}
+
+func (x *JSONSchema) GetMaxLength() uint64 {
+	if x != nil {
+		return x.MaxLength
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetMinLength() uint64 {
+	if x != nil {
+		return x.MinLength
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetPattern() string {
+	if x != nil {
+		return x.Pattern
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetMaxItems() uint64 {
+	if x != nil {
+		return x.MaxItems
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetMinItems() uint64 {
+	if x != nil {
+		return x.MinItems
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetUniqueItems() bool {
+	if x != nil {
+		return x.UniqueItems
+	}
+	return false
+}
+
+func (x *JSONSchema) GetMaxProperties() uint64 {
+	if x != nil {
+		return x.MaxProperties
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetMinProperties() uint64 {
+	if x != nil {
+		return x.MinProperties
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetRequired() []string {
+	if x != nil {
+		return x.Required
+	}
+	return nil
+}
+
+func (x *JSONSchema) GetArray() []string {
+	if x != nil {
+		return x.Array
+	}
+	return nil
+}
+
+func (x *JSONSchema) GetType() []JSONSchema_JSONSchemaSimpleTypes {
+	if x != nil {
+		return x.Type
+	}
+	return nil
+}
+
+func (x *JSONSchema) GetFormat() string {
+	if x != nil {
+		return x.Format
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetEnum() []string {
+	if x != nil {
+		return x.Enum
+	}
+	return nil
+}
+
+func (x *JSONSchema) GetFieldConfiguration() *JSONSchema_FieldConfiguration {
+	if x != nil {
+		return x.FieldConfiguration
+	}
+	return nil
+}
+
+func (x *JSONSchema) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.Extensions
+	}
+	return nil
+}
+
+func (x *JSONSchema) SetRef(v string) {
+	x.Ref = v
+}
+
+func (x *JSONSchema) SetTitle(v string) {
+	x.Title = v
+}
+
+func (x *JSONSchema) SetDescription(v string) {
+	x.Description = v
+}
+
+func (x *JSONSchema) SetDefault(v string) {
+	x.Default = v
+}
+
+func (x *JSONSchema) SetReadOnly(v bool) {
+	x.ReadOnly = v
+}
+
+func (x *JSONSchema) SetExample(v string) {
+	x.Example = v
+}
+
+func (x *JSONSchema) SetMultipleOf(v float64) {
+	x.MultipleOf = v
+}
+
+func (x *JSONSchema) SetMaximum(v float64) {
+	x.Maximum = v
+}
+
+func (x *JSONSchema) SetExclusiveMaximum(v bool) {
+	x.ExclusiveMaximum = v
+}
+
+func (x *JSONSchema) SetMinimum(v float64) {
+	x.Minimum = v
+}
+
+func (x *JSONSchema) SetExclusiveMinimum(v bool) {
+	x.ExclusiveMinimum = v
+}
+
+func (x *JSONSchema) SetMaxLength(v uint64) {
+	x.MaxLength = v
+}
+
+func (x *JSONSchema) SetMinLength(v uint64) {
+	x.MinLength = v
+}
+
+func (x *JSONSchema) SetPattern(v string) {
+	x.Pattern = v
+}
+
+func (x *JSONSchema) SetMaxItems(v uint64) {
+	x.MaxItems = v
+}
+
+func (x *JSONSchema) SetMinItems(v uint64) {
+	x.MinItems = v
+}
+
+func (x *JSONSchema) SetUniqueItems(v bool) {
+	x.UniqueItems = v
+}
+
+func (x *JSONSchema) SetMaxProperties(v uint64) {
+	x.MaxProperties = v
+}
+
+func (x *JSONSchema) SetMinProperties(v uint64) {
+	x.MinProperties = v
+}
+
+func (x *JSONSchema) SetRequired(v []string) {
+	x.Required = v
+}
+
+func (x *JSONSchema) SetArray(v []string) {
+	x.Array = v
+}
+
+func (x *JSONSchema) SetType(v []JSONSchema_JSONSchemaSimpleTypes) {
+	x.Type = v
+}
+
+func (x *JSONSchema) SetFormat(v string) {
+	x.Format = v
+}
+
+func (x *JSONSchema) SetEnum(v []string) {
+	x.Enum = v
+}
+
+func (x *JSONSchema) SetFieldConfiguration(v *JSONSchema_FieldConfiguration) {
+	x.FieldConfiguration = v
+}
+
+func (x *JSONSchema) SetExtensions(v map[string]*structpb.Value) {
+	x.Extensions = v
+}
+
+func (x *JSONSchema) HasFieldConfiguration() bool {
+	if x == nil {
+		return false
+	}
+	return x.FieldConfiguration != nil
+}
+
+func (x *JSONSchema) ClearFieldConfiguration() {
+	x.FieldConfiguration = nil
+}
+
+type JSONSchema_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// Ref is used to define an external reference to include in the message.
+	// This could be a fully qualified proto message reference, and that type must
+	// be imported into the protofile. If no message is identified, the Ref will
+	// be used verbatim in the output.
+	// For example:
+	//
+	//	`ref: ".google.protobuf.Timestamp"`.
+	Ref string
+	// The title of the schema.
+	Title string
+	// A short description of the schema.
+	Description string
+	Default     string
+	ReadOnly    bool
+	// A free-form property to include a JSON example of this field. This is copied
+	// verbatim to the output swagger.json. Quotes must be escaped.
+	// This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject  https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+	Example    string
+	MultipleOf float64
+	// Maximum represents an inclusive upper limit for a numeric instance. The
+	// value of MUST be a number,
+	Maximum          float64
+	ExclusiveMaximum bool
+	// minimum represents an inclusive lower limit for a numeric instance. The
+	// value of MUST be a number,
+	Minimum          float64
+	ExclusiveMinimum bool
+	MaxLength        uint64
+	MinLength        uint64
+	Pattern          string
+	MaxItems         uint64
+	MinItems         uint64
+	UniqueItems      bool
+	MaxProperties    uint64
+	MinProperties    uint64
+	Required         []string
+	// Items in 'array' must be unique.
+	Array []string
+	Type  []JSONSchema_JSONSchemaSimpleTypes
+	// `Format`
+	Format string
+	// Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1
+	Enum []string
+	// Additional field level properties used when generating the OpenAPI v2 file.
+	FieldConfiguration *JSONSchema_FieldConfiguration
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 JSONSchema_builder) Build() *JSONSchema {
+	m0 := &JSONSchema{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Ref = b.Ref
+	x.Title = b.Title
+	x.Description = b.Description
+	x.Default = b.Default
+	x.ReadOnly = b.ReadOnly
+	x.Example = b.Example
+	x.MultipleOf = b.MultipleOf
+	x.Maximum = b.Maximum
+	x.ExclusiveMaximum = b.ExclusiveMaximum
+	x.Minimum = b.Minimum
+	x.ExclusiveMinimum = b.ExclusiveMinimum
+	x.MaxLength = b.MaxLength
+	x.MinLength = b.MinLength
+	x.Pattern = b.Pattern
+	x.MaxItems = b.MaxItems
+	x.MinItems = b.MinItems
+	x.UniqueItems = b.UniqueItems
+	x.MaxProperties = b.MaxProperties
+	x.MinProperties = b.MinProperties
+	x.Required = b.Required
+	x.Array = b.Array
+	x.Type = b.Type
+	x.Format = b.Format
+	x.Enum = b.Enum
+	x.FieldConfiguration = b.FieldConfiguration
+	x.Extensions = b.Extensions
+	return m0
+}
+
+// `Tag` is a representation of OpenAPI v2 specification's Tag object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject
+type Tag struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// The name of the tag. Use it to allow override of the name of a
+	// global Tag object, then use that name to reference the tag throughout the
+	// OpenAPI file.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// A short description for the tag. GFM syntax can be used for rich text
+	// representation.
+	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	// Additional external documentation for this tag.
+	ExternalDocs *ExternalDocumentation `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions    map[string]*structpb.Value `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *Tag) Reset() {
+	*x = Tag{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Tag) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Tag) ProtoMessage() {}
+
+func (x *Tag) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Tag) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *Tag) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *Tag) GetExternalDocs() *ExternalDocumentation {
+	if x != nil {
+		return x.ExternalDocs
+	}
+	return nil
+}
+
+func (x *Tag) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.Extensions
+	}
+	return nil
+}
+
+func (x *Tag) SetName(v string) {
+	x.Name = v
+}
+
+func (x *Tag) SetDescription(v string) {
+	x.Description = v
+}
+
+func (x *Tag) SetExternalDocs(v *ExternalDocumentation) {
+	x.ExternalDocs = v
+}
+
+func (x *Tag) SetExtensions(v map[string]*structpb.Value) {
+	x.Extensions = v
+}
+
+func (x *Tag) HasExternalDocs() bool {
+	if x == nil {
+		return false
+	}
+	return x.ExternalDocs != nil
+}
+
+func (x *Tag) ClearExternalDocs() {
+	x.ExternalDocs = nil
+}
+
+type Tag_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// The name of the tag. Use it to allow override of the name of a
+	// global Tag object, then use that name to reference the tag throughout the
+	// OpenAPI file.
+	Name string
+	// A short description for the tag. GFM syntax can be used for rich text
+	// representation.
+	Description string
+	// Additional external documentation for this tag.
+	ExternalDocs *ExternalDocumentation
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 Tag_builder) Build() *Tag {
+	m0 := &Tag{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Name = b.Name
+	x.Description = b.Description
+	x.ExternalDocs = b.ExternalDocs
+	x.Extensions = b.Extensions
+	return m0
+}
+
+// `SecurityDefinitions` is a representation of OpenAPI v2 specification's
+// Security Definitions object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject
+//
+// A declaration of the security schemes available to be used in the
+// specification. This does not enforce the security schemes on the operations
+// and only serves to provide the relevant details for each scheme.
+type SecurityDefinitions struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// A single security scheme definition, mapping a "name" to the scheme it
+	// defines.
+	Security      map[string]*SecurityScheme `protobuf:"bytes,1,rep,name=security,proto3" json:"security,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *SecurityDefinitions) Reset() {
+	*x = SecurityDefinitions{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityDefinitions) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityDefinitions) ProtoMessage() {}
+
+func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *SecurityDefinitions) GetSecurity() map[string]*SecurityScheme {
+	if x != nil {
+		return x.Security
+	}
+	return nil
+}
+
+func (x *SecurityDefinitions) SetSecurity(v map[string]*SecurityScheme) {
+	x.Security = v
+}
+
+type SecurityDefinitions_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// A single security scheme definition, mapping a "name" to the scheme it
+	// defines.
+	Security map[string]*SecurityScheme
+}
+
+func (b0 SecurityDefinitions_builder) Build() *SecurityDefinitions {
+	m0 := &SecurityDefinitions{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Security = b.Security
+	return m0
+}
+
+// `SecurityScheme` is a representation of OpenAPI v2 specification's
+// Security Scheme object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject
+//
+// Allows the definition of a security scheme that can be used by the
+// operations. Supported schemes are basic authentication, an API key (either as
+// a header or as a query parameter) and OAuth2's common flows (implicit,
+// password, application and access code).
+type SecurityScheme struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// The type of the security scheme. Valid values are "basic",
+	// "apiKey" or "oauth2".
+	Type SecurityScheme_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Type" json:"type,omitempty"`
+	// A short description for security scheme.
+	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	// The name of the header or query parameter to be used.
+	// Valid for apiKey.
+	Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+	// The location of the API key. Valid values are "query" or
+	// "header".
+	// Valid for apiKey.
+	In SecurityScheme_In `protobuf:"varint,4,opt,name=in,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_In" json:"in,omitempty"`
+	// The flow used by the OAuth2 security scheme. Valid values are
+	// "implicit", "password", "application" or "accessCode".
+	// Valid for oauth2.
+	Flow SecurityScheme_Flow `protobuf:"varint,5,opt,name=flow,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Flow" json:"flow,omitempty"`
+	// The authorization URL to be used for this flow. This SHOULD be in
+	// the form of a URL.
+	// Valid for oauth2/implicit and oauth2/accessCode.
+	AuthorizationUrl string `protobuf:"bytes,6,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"`
+	// The token URL to be used for this flow. This SHOULD be in the
+	// form of a URL.
+	// Valid for oauth2/password, oauth2/application and oauth2/accessCode.
+	TokenUrl string `protobuf:"bytes,7,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"`
+	// The available scopes for the OAuth2 security scheme.
+	// Valid for oauth2.
+	Scopes *Scopes `protobuf:"bytes,8,opt,name=scopes,proto3" json:"scopes,omitempty"`
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions    map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *SecurityScheme) Reset() {
+	*x = SecurityScheme{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityScheme) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityScheme) ProtoMessage() {}
+
+func (x *SecurityScheme) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *SecurityScheme) GetType() SecurityScheme_Type {
+	if x != nil {
+		return x.Type
+	}
+	return SecurityScheme_TYPE_INVALID
+}
+
+func (x *SecurityScheme) GetDescription() string {
+	if x != nil {
+		return x.Description
+	}
+	return ""
+}
+
+func (x *SecurityScheme) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+func (x *SecurityScheme) GetIn() SecurityScheme_In {
+	if x != nil {
+		return x.In
+	}
+	return SecurityScheme_IN_INVALID
+}
+
+func (x *SecurityScheme) GetFlow() SecurityScheme_Flow {
+	if x != nil {
+		return x.Flow
+	}
+	return SecurityScheme_FLOW_INVALID
+}
+
+func (x *SecurityScheme) GetAuthorizationUrl() string {
+	if x != nil {
+		return x.AuthorizationUrl
+	}
+	return ""
+}
+
+func (x *SecurityScheme) GetTokenUrl() string {
+	if x != nil {
+		return x.TokenUrl
+	}
+	return ""
+}
+
+func (x *SecurityScheme) GetScopes() *Scopes {
+	if x != nil {
+		return x.Scopes
+	}
+	return nil
+}
+
+func (x *SecurityScheme) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.Extensions
+	}
+	return nil
+}
+
+func (x *SecurityScheme) SetType(v SecurityScheme_Type) {
+	x.Type = v
+}
+
+func (x *SecurityScheme) SetDescription(v string) {
+	x.Description = v
+}
+
+func (x *SecurityScheme) SetName(v string) {
+	x.Name = v
+}
+
+func (x *SecurityScheme) SetIn(v SecurityScheme_In) {
+	x.In = v
+}
+
+func (x *SecurityScheme) SetFlow(v SecurityScheme_Flow) {
+	x.Flow = v
+}
+
+func (x *SecurityScheme) SetAuthorizationUrl(v string) {
+	x.AuthorizationUrl = v
+}
+
+func (x *SecurityScheme) SetTokenUrl(v string) {
+	x.TokenUrl = v
+}
+
+func (x *SecurityScheme) SetScopes(v *Scopes) {
+	x.Scopes = v
+}
+
+func (x *SecurityScheme) SetExtensions(v map[string]*structpb.Value) {
+	x.Extensions = v
+}
+
+func (x *SecurityScheme) HasScopes() bool {
+	if x == nil {
+		return false
+	}
+	return x.Scopes != nil
+}
+
+func (x *SecurityScheme) ClearScopes() {
+	x.Scopes = nil
+}
+
+type SecurityScheme_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// The type of the security scheme. Valid values are "basic",
+	// "apiKey" or "oauth2".
+	Type SecurityScheme_Type
+	// A short description for security scheme.
+	Description string
+	// The name of the header or query parameter to be used.
+	// Valid for apiKey.
+	Name string
+	// The location of the API key. Valid values are "query" or
+	// "header".
+	// Valid for apiKey.
+	In SecurityScheme_In
+	// The flow used by the OAuth2 security scheme. Valid values are
+	// "implicit", "password", "application" or "accessCode".
+	// Valid for oauth2.
+	Flow SecurityScheme_Flow
+	// The authorization URL to be used for this flow. This SHOULD be in
+	// the form of a URL.
+	// Valid for oauth2/implicit and oauth2/accessCode.
+	AuthorizationUrl string
+	// The token URL to be used for this flow. This SHOULD be in the
+	// form of a URL.
+	// Valid for oauth2/password, oauth2/application and oauth2/accessCode.
+	TokenUrl string
+	// The available scopes for the OAuth2 security scheme.
+	// Valid for oauth2.
+	Scopes *Scopes
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 SecurityScheme_builder) Build() *SecurityScheme {
+	m0 := &SecurityScheme{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Type = b.Type
+	x.Description = b.Description
+	x.Name = b.Name
+	x.In = b.In
+	x.Flow = b.Flow
+	x.AuthorizationUrl = b.AuthorizationUrl
+	x.TokenUrl = b.TokenUrl
+	x.Scopes = b.Scopes
+	x.Extensions = b.Extensions
+	return m0
+}
+
+// `SecurityRequirement` is a representation of OpenAPI v2 specification's
+// Security Requirement object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject
+//
+// Lists the required security schemes to execute this operation. The object can
+// have multiple security schemes declared in it which are all required (that
+// is, there is a logical AND between the schemes).
+//
+// The name used for each property MUST correspond to a security scheme
+// declared in the Security Definitions.
+type SecurityRequirement struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// Each name must correspond to a security scheme which is declared in
+	// the Security Definitions. If the security scheme is of type "oauth2",
+	// then the value is a list of scope names required for the execution.
+	// For other security scheme types, the array MUST be empty.
+	SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue `protobuf:"bytes,1,rep,name=security_requirement,json=securityRequirement,proto3" json:"security_requirement,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields       protoimpl.UnknownFields
+	sizeCache           protoimpl.SizeCache
+}
+
+func (x *SecurityRequirement) Reset() {
+	*x = SecurityRequirement{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityRequirement) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityRequirement) ProtoMessage() {}
+
+func (x *SecurityRequirement) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *SecurityRequirement) GetSecurityRequirement() map[string]*SecurityRequirement_SecurityRequirementValue {
+	if x != nil {
+		return x.SecurityRequirement
+	}
+	return nil
+}
+
+func (x *SecurityRequirement) SetSecurityRequirement(v map[string]*SecurityRequirement_SecurityRequirementValue) {
+	x.SecurityRequirement = v
+}
+
+type SecurityRequirement_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// Each name must correspond to a security scheme which is declared in
+	// the Security Definitions. If the security scheme is of type "oauth2",
+	// then the value is a list of scope names required for the execution.
+	// For other security scheme types, the array MUST be empty.
+	SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue
+}
+
+func (b0 SecurityRequirement_builder) Build() *SecurityRequirement {
+	m0 := &SecurityRequirement{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.SecurityRequirement = b.SecurityRequirement
+	return m0
+}
+
+// `Scopes` is a representation of OpenAPI v2 specification's Scopes object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject
+//
+// Lists the available scopes for an OAuth2 security scheme.
+type Scopes struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// Maps between a name of a scope to a short description of it (as the value
+	// of the property).
+	Scope         map[string]string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *Scopes) Reset() {
+	*x = Scopes{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Scopes) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Scopes) ProtoMessage() {}
+
+func (x *Scopes) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Scopes) GetScope() map[string]string {
+	if x != nil {
+		return x.Scope
+	}
+	return nil
+}
+
+func (x *Scopes) SetScope(v map[string]string) {
+	x.Scope = v
+}
+
+type Scopes_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// Maps between a name of a scope to a short description of it (as the value
+	// of the property).
+	Scope map[string]string
+}
+
+func (b0 Scopes_builder) Build() *Scopes {
+	m0 := &Scopes{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Scope = b.Scope
+	return m0
+}
+
+// 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file.
+// These properties are not defined by OpenAPIv2, but they are used to control the generation.
+type JSONSchema_FieldConfiguration struct {
+	state protoimpl.MessageState `protogen:"hybrid.v1"`
+	// Alternative parameter name when used as path parameter. If set, this will
+	// be used as the complete parameter name when this field is used as a path
+	// parameter. Use this to avoid having auto generated path parameter names
+	// for overlapping paths.
+	PathParamName string `protobuf:"bytes,47,opt,name=path_param_name,json=pathParamName,proto3" json:"path_param_name,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *JSONSchema_FieldConfiguration) Reset() {
+	*x = JSONSchema_FieldConfiguration{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *JSONSchema_FieldConfiguration) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*JSONSchema_FieldConfiguration) ProtoMessage() {}
+
+func (x *JSONSchema_FieldConfiguration) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *JSONSchema_FieldConfiguration) GetPathParamName() string {
+	if x != nil {
+		return x.PathParamName
+	}
+	return ""
+}
+
+func (x *JSONSchema_FieldConfiguration) SetPathParamName(v string) {
+	x.PathParamName = v
+}
+
+type JSONSchema_FieldConfiguration_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// Alternative parameter name when used as path parameter. If set, this will
+	// be used as the complete parameter name when this field is used as a path
+	// parameter. Use this to avoid having auto generated path parameter names
+	// for overlapping paths.
+	PathParamName string
+}
+
+func (b0 JSONSchema_FieldConfiguration_builder) Build() *JSONSchema_FieldConfiguration {
+	m0 := &JSONSchema_FieldConfiguration{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.PathParamName = b.PathParamName
+	return m0
+}
+
+// If the security scheme is of type "oauth2", then the value is a list of
+// scope names required for the execution. For other security scheme types,
+// the array MUST be empty.
+type SecurityRequirement_SecurityRequirementValue struct {
+	state         protoimpl.MessageState `protogen:"hybrid.v1"`
+	Scope         []string               `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) Reset() {
+	*x = SecurityRequirement_SecurityRequirementValue{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityRequirement_SecurityRequirementValue) ProtoMessage() {}
+
+func (x *SecurityRequirement_SecurityRequirementValue) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) GetScope() []string {
+	if x != nil {
+		return x.Scope
+	}
+	return nil
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) SetScope(v []string) {
+	x.Scope = v
+}
+
+type SecurityRequirement_SecurityRequirementValue_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	Scope []string
+}
+
+func (b0 SecurityRequirement_SecurityRequirementValue_builder) Build() *SecurityRequirement_SecurityRequirementValue {
+	m0 := &SecurityRequirement_SecurityRequirementValue{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.Scope = b.Scope
+	return m0
+}
+
+var File_protoc_gen_openapiv2_options_openapiv2_proto protoreflect.FileDescriptor
+
+var file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = []byte{
+	0x0a, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63,
+	0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb3, 0x08, 0x0a, 0x07, 0x53, 0x77, 0x61, 0x67,
+	0x67, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x12, 0x43, 0x0a,
+	0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72,
+	0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e,
+	0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x70,
+	0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x62, 0x61, 0x73, 0x65, 0x50,
+	0x61, 0x74, 0x68, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x05,
+	0x20, 0x03, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+	0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73,
+	0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03,
+	0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08,
+	0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08,
+	0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70,
+	0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72,
+	0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e,
+	0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09,
+	0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x71, 0x0a, 0x14, 0x73, 0x65, 0x63,
+	0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69,
+	0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+	0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5a, 0x0a, 0x08,
+	0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e,
+	0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+	0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72,
+	0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08,
+	0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73,
+	0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+	0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+	0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x65, 0x0a, 0x0d,
+	0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0e, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44,
+	0x6f, 0x63, 0x73, 0x12, 0x62, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+	0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65,
+	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f,
+	0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70,
+	0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+	0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+	0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+	0x01, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xd6, 0x07,
+	0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74,
+	0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12,
+	0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73,
+	0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+	0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65,
+	0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+	0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45,
+	0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61,
+	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f,
+	0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+	0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
+	0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65,
+	0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65,
+	0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20,
+	0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x61, 0x0a,
+	0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b,
+	0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+	0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65,
+	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73,
+	0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73,
+	0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28,
+	0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63,
+	0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a,
+	0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28,
+	0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x5a, 0x0a,
+	0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32,
+	0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75,
+	0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52,
+	0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x64, 0x0a, 0x0a, 0x65, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
+	0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e,
+	0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+	0x55, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61,
+	0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+	0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61,
+	0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70, 0x63,
+	0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f,
+	0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+	0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c,
+	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
+	0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0x62, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65,
+	0x74, 0x65, 0x72, 0x73, 0x12, 0x54, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18,
+	0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+	0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+	0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
+	0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xa3, 0x02, 0x0a, 0x0f, 0x48,
+	0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x12,
+	0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+	0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01,
+	0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+	0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48,
+	0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x54,
+	0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72,
+	0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+	0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20,
+	0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x45, 0x0a,
+	0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
+	0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0a,
+	0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e,
+	0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45,
+	0x41, 0x4e, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08,
+	0x22, 0xd8, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64,
+	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a,
+	0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
+	0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66,
+	0x61, 0x75, 0x6c, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61,
+	0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x4a, 0x04, 0x08,
+	0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a,
+	0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10,
+	0x0b, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08,
+	0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a,
+	0x04, 0x08, 0x11, 0x10, 0x12, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0x9a, 0x05, 0x0a, 0x08,
+	0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63,
+	0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64,
+	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63,
+	0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70,
+	0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73,
+	0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5a, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+	0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+	0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+	0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64,
+	0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
+	0x73, 0x12, 0x5d, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65,
+	0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73,
+	0x12, 0x63, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+	0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+	0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e,
+	0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x6d, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+	0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x47, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+	0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+	0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73,
+	0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+	0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45,
+	0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd6, 0x03, 0x0a, 0x04, 0x49, 0x6e, 0x66,
+	0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
+	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72,
+	0x6d, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76,
+	0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x04,
+	0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+	0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63,
+	0x74, 0x12, 0x4c, 0x0a, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+	0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4c,
+	0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12,
+	0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x0a, 0x65, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45,
+	0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a,
+	0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+	0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+	0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+	0x01, 0x22, 0x45, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04,
+	0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+	0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75,
+	0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x2f, 0x0a, 0x07, 0x4c, 0x69, 0x63, 0x65,
+	0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x4b, 0x0a, 0x15, 0x45, 0x78, 0x74,
+	0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69,
+	0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0xaa, 0x02, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d,
+	0x61, 0x12, 0x56, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+	0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+	0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x6a,
+	0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73,
+	0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12,
+	0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01,
+	0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a, 0x0d,
+	0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x05, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44,
+	0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x06,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4a, 0x04, 0x08,
+	0x04, 0x10, 0x05, 0x22, 0xe8, 0x03, 0x0a, 0x0a, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65,
+	0x6d, 0x61, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x14,
+	0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74,
+	0x69, 0x74, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64,
+	0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64,
+	0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20,
+	0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a,
+	0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x06,
+	0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+	0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e,
+	0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
+	0x44, 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18,
+	0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x10,
+	0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66,
+	0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+	0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65,
+	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e,
+	0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+	0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61,
+	0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7,
+	0x0a, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, 0x0a,
+	0x03, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12,
+	0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
+	0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
+	0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75,
+	0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
+	0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08,
+	0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x18,
+	0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74,
+	0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d,
+	0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78,
+	0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69,
+	0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65,
+	0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10,
+	0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d,
+	0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28,
+	0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78,
+	0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18,
+	0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65,
+	0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c,
+	0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x78,
+	0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65,
+	0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c,
+	0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e,
+	0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12,
+	0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01,
+	0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09,
+	0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, 0x52,
+	0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69,
+	0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52,
+	0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e,
+	0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x18,
+	0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74,
+	0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65,
+	0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x69, 0x6e,
+	0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65,
+	0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65,
+	0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x18,
+	0x22, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x12, 0x5f, 0x0a, 0x04,
+	0x74, 0x79, 0x70, 0x65, 0x18, 0x23, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x4b, 0x2e, 0x67, 0x72, 0x70,
+	0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d,
+	0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x69, 0x6d, 0x70,
+	0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a,
+	0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66,
+	0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x2e, 0x20,
+	0x03, 0x28, 0x09, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x7a, 0x0a, 0x13, 0x66, 0x69, 0x65,
+	0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x46,
+	0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+	0x6e, 0x52, 0x12, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+	0x6f, 0x6e, 0x73, 0x18, 0x30, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63,
+	0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f,
+	0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+	0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+	0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x12,
+	0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
+	0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d,
+	0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74,
+	0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+	0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+	0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+	0x01, 0x22, 0x77, 0x0a, 0x15, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53,
+	0x69, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e,
+	0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x52, 0x52, 0x41, 0x59,
+	0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, 0x41, 0x4e, 0x10, 0x02, 0x12,
+	0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04,
+	0x4e, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52,
+	0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, 0x0a,
+	0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02,
+	0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12,
+	0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, 0x4a, 0x04, 0x08, 0x17, 0x10, 0x18, 0x4a, 0x04,
+	0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x1c, 0x10, 0x1d, 0x4a, 0x04, 0x08, 0x1d, 0x10, 0x1e,
+	0x4a, 0x04, 0x08, 0x1e, 0x10, 0x22, 0x4a, 0x04, 0x08, 0x25, 0x10, 0x2a, 0x4a, 0x04, 0x08, 0x2a,
+	0x10, 0x2b, 0x4a, 0x04, 0x08, 0x2b, 0x10, 0x2e, 0x22, 0xd9, 0x02, 0x0a, 0x03, 0x54, 0x61, 0x67,
+	0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+	0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e,
+	0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e,
+	0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+	0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x5e, 0x0a,
+	0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28,
+	0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61,
+	0x67, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72,
+	0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a,
+	0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+	0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+	0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x3a, 0x02, 0x38, 0x01, 0x22, 0xf7, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+	0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x68, 0x0a, 0x08,
+	0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4c,
+	0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+	0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72,
+	0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53,
+	0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x73, 0x65,
+	0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x1a, 0x76, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69,
+	0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4f, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e,
+	0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67,
+	0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68,
+	0x65, 0x6d, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff,
+	0x06, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d,
+	0x65, 0x12, 0x52, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
+	0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75,
+	0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
+	0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
+	0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+	0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4c, 0x0a, 0x02, 0x69,
+	0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65,
+	0x6d, 0x65, 0x2e, 0x49, 0x6e, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x52, 0x0a, 0x04, 0x66, 0x6c, 0x6f,
+	0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65,
+	0x6d, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2b, 0x0a,
+	0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75,
+	0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72,
+	0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f,
+	0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74,
+	0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65,
+	0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70,
+	0x65, 0x73, 0x12, 0x69, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73,
+	0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+	0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+	0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d,
+	0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72,
+	0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a,
+	0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+	0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+	0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c,
+	0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0e,
+	0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x10,
+	0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x02,
+	0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x41, 0x55, 0x54, 0x48, 0x32, 0x10,
+	0x03, 0x22, 0x31, 0x0a, 0x02, 0x49, 0x6e, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x5f, 0x49, 0x4e,
+	0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x5f, 0x51, 0x55,
+	0x45, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x4e, 0x5f, 0x48, 0x45, 0x41, 0x44,
+	0x45, 0x52, 0x10, 0x02, 0x22, 0x6a, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x0c,
+	0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x11,
+	0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10,
+	0x01, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f,
+	0x52, 0x44, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x41, 0x50, 0x50,
+	0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c,
+	0x4f, 0x57, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x04,
+	0x22, 0xf6, 0x02, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71,
+	0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x8a, 0x01, 0x0a, 0x14, 0x73, 0x65, 0x63,
+	0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e,
+	0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75,
+	0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
+	0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79,
+	0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72,
+	0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+	0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09,
+	0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75,
+	0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45,
+	0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+	0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+	0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72,
+	0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65,
+	0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x06, 0x53, 0x63,
+	0x6f, 0x70, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x45, 0x6e, 0x74, 0x72,
+	0x79, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x38, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70,
+	0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
+	0x38, 0x01, 0x2a, 0x3b, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x0b, 0x0a, 0x07,
+	0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54,
+	0x50, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x02, 0x12, 0x06,
+	0x0a, 0x02, 0x57, 0x53, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x57, 0x53, 0x53, 0x10, 0x04, 0x42,
+	0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72,
+	0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70,
+	0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x33,
+}
+
+var file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes = make([]protoimpl.EnumInfo, 6)
+var file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes = make([]protoimpl.MessageInfo, 35)
+var file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = []any{
+	(Scheme)(0),                           // 0: grpc.gateway.protoc_gen_openapiv2.options.Scheme
+	(HeaderParameter_Type)(0),             // 1: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type
+	(JSONSchema_JSONSchemaSimpleTypes)(0), // 2: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes
+	(SecurityScheme_Type)(0),              // 3: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type
+	(SecurityScheme_In)(0),                // 4: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In
+	(SecurityScheme_Flow)(0),              // 5: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow
+	(*Swagger)(nil),                       // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger
+	(*Operation)(nil),                     // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation
+	(*Parameters)(nil),                    // 8: grpc.gateway.protoc_gen_openapiv2.options.Parameters
+	(*HeaderParameter)(nil),               // 9: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter
+	(*Header)(nil),                        // 10: grpc.gateway.protoc_gen_openapiv2.options.Header
+	(*Response)(nil),                      // 11: grpc.gateway.protoc_gen_openapiv2.options.Response
+	(*Info)(nil),                          // 12: grpc.gateway.protoc_gen_openapiv2.options.Info
+	(*Contact)(nil),                       // 13: grpc.gateway.protoc_gen_openapiv2.options.Contact
+	(*License)(nil),                       // 14: grpc.gateway.protoc_gen_openapiv2.options.License
+	(*ExternalDocumentation)(nil),         // 15: grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	(*Schema)(nil),                        // 16: grpc.gateway.protoc_gen_openapiv2.options.Schema
+	(*EnumSchema)(nil),                    // 17: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+	(*JSONSchema)(nil),                    // 18: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+	(*Tag)(nil),                           // 19: grpc.gateway.protoc_gen_openapiv2.options.Tag
+	(*SecurityDefinitions)(nil),           // 20: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions
+	(*SecurityScheme)(nil),                // 21: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme
+	(*SecurityRequirement)(nil),           // 22: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+	(*Scopes)(nil),                        // 23: grpc.gateway.protoc_gen_openapiv2.options.Scopes
+	nil,                                   // 24: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry
+	nil,                                   // 25: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry
+	nil,                                   // 26: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry
+	nil,                                   // 27: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry
+	nil,                                   // 28: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry
+	nil,                                   // 29: grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry
+	nil,                                   // 30: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry
+	nil,                                   // 31: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry
+	nil,                                   // 32: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry
+	(*JSONSchema_FieldConfiguration)(nil), // 33: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration
+	nil,                                   // 34: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry
+	nil,                                   // 35: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry
+	nil,                                   // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry
+	nil,                                   // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry
+	(*SecurityRequirement_SecurityRequirementValue)(nil), // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue
+	nil,                    // 39: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry
+	nil,                    // 40: grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry
+	(*structpb.Value)(nil), // 41: google.protobuf.Value
+}
+var file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = []int32{
+	12, // 0: grpc.gateway.protoc_gen_openapiv2.options.Swagger.info:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info
+	0,  // 1: grpc.gateway.protoc_gen_openapiv2.options.Swagger.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme
+	24, // 2: grpc.gateway.protoc_gen_openapiv2.options.Swagger.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry
+	20, // 3: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security_definitions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions
+	22, // 4: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+	19, // 5: grpc.gateway.protoc_gen_openapiv2.options.Swagger.tags:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag
+	15, // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	25, // 7: grpc.gateway.protoc_gen_openapiv2.options.Swagger.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry
+	15, // 8: grpc.gateway.protoc_gen_openapiv2.options.Operation.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	26, // 9: grpc.gateway.protoc_gen_openapiv2.options.Operation.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry
+	0,  // 10: grpc.gateway.protoc_gen_openapiv2.options.Operation.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme
+	22, // 11: grpc.gateway.protoc_gen_openapiv2.options.Operation.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+	27, // 12: grpc.gateway.protoc_gen_openapiv2.options.Operation.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry
+	8,  // 13: grpc.gateway.protoc_gen_openapiv2.options.Operation.parameters:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Parameters
+	9,  // 14: grpc.gateway.protoc_gen_openapiv2.options.Parameters.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter
+	1,  // 15: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type
+	16, // 16: grpc.gateway.protoc_gen_openapiv2.options.Response.schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema
+	28, // 17: grpc.gateway.protoc_gen_openapiv2.options.Response.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry
+	29, // 18: grpc.gateway.protoc_gen_openapiv2.options.Response.examples:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry
+	30, // 19: grpc.gateway.protoc_gen_openapiv2.options.Response.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry
+	13, // 20: grpc.gateway.protoc_gen_openapiv2.options.Info.contact:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Contact
+	14, // 21: grpc.gateway.protoc_gen_openapiv2.options.Info.license:type_name -> grpc.gateway.protoc_gen_openapiv2.options.License
+	31, // 22: grpc.gateway.protoc_gen_openapiv2.options.Info.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry
+	18, // 23: grpc.gateway.protoc_gen_openapiv2.options.Schema.json_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+	15, // 24: grpc.gateway.protoc_gen_openapiv2.options.Schema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	15, // 25: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	32, // 26: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry
+	2,  // 27: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes
+	33, // 28: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.field_configuration:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration
+	34, // 29: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry
+	15, // 30: grpc.gateway.protoc_gen_openapiv2.options.Tag.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	35, // 31: grpc.gateway.protoc_gen_openapiv2.options.Tag.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry
+	36, // 32: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry
+	3,  // 33: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type
+	4,  // 34: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.in:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In
+	5,  // 35: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.flow:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow
+	23, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.scopes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes
+	37, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry
+	39, // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.security_requirement:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry
+	40, // 39: grpc.gateway.protoc_gen_openapiv2.options.Scopes.scope:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry
+	11, // 40: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response
+	41, // 41: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	11, // 42: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response
+	41, // 43: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	10, // 44: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Header
+	41, // 45: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	41, // 46: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	41, // 47: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	41, // 48: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	41, // 49: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	21, // 50: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme
+	41, // 51: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	38, // 52: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue
+	53, // [53:53] is the sub-list for method output_type
+	53, // [53:53] is the sub-list for method input_type
+	53, // [53:53] is the sub-list for extension type_name
+	53, // [53:53] is the sub-list for extension extendee
+	0,  // [0:53] is the sub-list for field type_name
+}
+
+func init() { file_protoc_gen_openapiv2_options_openapiv2_proto_init() }
+func file_protoc_gen_openapiv2_options_openapiv2_proto_init() {
+	if File_protoc_gen_openapiv2_options_openapiv2_proto != nil {
+		return
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc,
+			NumEnums:      6,
+			NumMessages:   35,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes,
+		DependencyIndexes: file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs,
+		EnumInfos:         file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes,
+		MessageInfos:      file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes,
+	}.Build()
+	File_protoc_gen_openapiv2_options_openapiv2_proto = out.File
+	file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = nil
+	file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = nil
+	file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto
new file mode 100644
index 0000000..5313f08
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto
@@ -0,0 +1,759 @@
+syntax = "proto3";
+
+package grpc.gateway.protoc_gen_openapiv2.options;
+
+import "google/protobuf/struct.proto";
+
+option go_package = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options";
+
+// Scheme describes the schemes supported by the OpenAPI Swagger
+// and Operation objects.
+enum Scheme {
+  UNKNOWN = 0;
+  HTTP = 1;
+  HTTPS = 2;
+  WS = 3;
+  WSS = 4;
+}
+
+// `Swagger` is a representation of OpenAPI v2 specification's Swagger object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject
+//
+// Example:
+//
+//  option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//    info: {
+//      title: "Echo API";
+//      version: "1.0";
+//      description: "";
+//      contact: {
+//        name: "gRPC-Gateway project";
+//        url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//        email: "none@example.com";
+//      };
+//      license: {
+//        name: "BSD 3-Clause License";
+//        url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+//      };
+//    };
+//    schemes: HTTPS;
+//    consumes: "application/json";
+//    produces: "application/json";
+//  };
+//
+message Swagger {
+  // Specifies the OpenAPI Specification version being used. It can be
+  // used by the OpenAPI UI and other clients to interpret the API listing. The
+  // value MUST be "2.0".
+  string swagger = 1;
+  // Provides metadata about the API. The metadata can be used by the
+  // clients if needed.
+  Info info = 2;
+  // The host (name or ip) serving the API. This MUST be the host only and does
+  // not include the scheme nor sub-paths. It MAY include a port. If the host is
+  // not included, the host serving the documentation is to be used (including
+  // the port). The host does not support path templating.
+  string host = 3;
+  // The base path on which the API is served, which is relative to the host. If
+  // it is not included, the API is served directly under the host. The value
+  // MUST start with a leading slash (/). The basePath does not support path
+  // templating.
+  // Note that using `base_path` does not change the endpoint paths that are
+  // generated in the resulting OpenAPI file. If you wish to use `base_path`
+  // with relatively generated OpenAPI paths, the `base_path` prefix must be
+  // manually removed from your `google.api.http` paths and your code changed to
+  // serve the API from the `base_path`.
+  string base_path = 4;
+  // The transfer protocol of the API. Values MUST be from the list: "http",
+  // "https", "ws", "wss". If the schemes is not included, the default scheme to
+  // be used is the one used to access the OpenAPI definition itself.
+  repeated Scheme schemes = 5;
+  // A list of MIME types the APIs can consume. This is global to all APIs but
+  // can be overridden on specific API calls. Value MUST be as described under
+  // Mime Types.
+  repeated string consumes = 6;
+  // A list of MIME types the APIs can produce. This is global to all APIs but
+  // can be overridden on specific API calls. Value MUST be as described under
+  // Mime Types.
+  repeated string produces = 7;
+  // field 8 is reserved for 'paths'.
+  reserved 8;
+  // field 9 is reserved for 'definitions', which at this time are already
+  // exposed as and customizable as proto messages.
+  reserved 9;
+  // An object to hold responses that can be used across operations. This
+  // property does not define global responses for all operations.
+  map<string, Response> responses = 10;
+  // Security scheme definitions that can be used across the specification.
+  SecurityDefinitions security_definitions = 11;
+  // A declaration of which security schemes are applied for the API as a whole.
+  // The list of values describes alternative security schemes that can be used
+  // (that is, there is a logical OR between the security requirements).
+  // Individual operations can override this definition.
+  repeated SecurityRequirement security = 12;
+  // A list of tags for API documentation control. Tags can be used for logical
+  // grouping of operations by resources or any other qualifier.
+  repeated Tag tags = 13;
+  // Additional external documentation.
+  ExternalDocumentation external_docs = 14;
+  // Custom properties that start with "x-" such as "x-foo" used to describe
+  // extra functionality that is not covered by the standard OpenAPI Specification.
+  // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+  map<string, google.protobuf.Value> extensions = 15;
+}
+
+// `Operation` is a representation of OpenAPI v2 specification's Operation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject
+//
+// Example:
+//
+//  service EchoService {
+//    rpc Echo(SimpleMessage) returns (SimpleMessage) {
+//      option (google.api.http) = {
+//        get: "/v1/example/echo/{id}"
+//      };
+//
+//      option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = {
+//        summary: "Get a message.";
+//        operation_id: "getMessage";
+//        tags: "echo";
+//        responses: {
+//          key: "200"
+//            value: {
+//            description: "OK";
+//          }
+//        }
+//      };
+//    }
+//  }
+message Operation {
+  // A list of tags for API documentation control. Tags can be used for logical
+  // grouping of operations by resources or any other qualifier.
+  repeated string tags = 1;
+  // A short summary of what the operation does. For maximum readability in the
+  // swagger-ui, this field SHOULD be less than 120 characters.
+  string summary = 2;
+  // A verbose explanation of the operation behavior. GFM syntax can be used for
+  // rich text representation.
+  string description = 3;
+  // Additional external documentation for this operation.
+  ExternalDocumentation external_docs = 4;
+  // Unique string used to identify the operation. The id MUST be unique among
+  // all operations described in the API. Tools and libraries MAY use the
+  // operationId to uniquely identify an operation, therefore, it is recommended
+  // to follow common programming naming conventions.
+  string operation_id = 5;
+  // A list of MIME types the operation can consume. This overrides the consumes
+  // definition at the OpenAPI Object. An empty value MAY be used to clear the
+  // global definition. Value MUST be as described under Mime Types.
+  repeated string consumes = 6;
+  // A list of MIME types the operation can produce. This overrides the produces
+  // definition at the OpenAPI Object. An empty value MAY be used to clear the
+  // global definition. Value MUST be as described under Mime Types.
+  repeated string produces = 7;
+  // field 8 is reserved for 'parameters'.
+  reserved 8;
+  // The list of possible responses as they are returned from executing this
+  // operation.
+  map<string, Response> responses = 9;
+  // The transfer protocol for the operation. Values MUST be from the list:
+  // "http", "https", "ws", "wss". The value overrides the OpenAPI Object
+  // schemes definition.
+  repeated Scheme schemes = 10;
+  // Declares this operation to be deprecated. Usage of the declared operation
+  // should be refrained. Default value is false.
+  bool deprecated = 11;
+  // A declaration of which security schemes are applied for this operation. The
+  // list of values describes alternative security schemes that can be used
+  // (that is, there is a logical OR between the security requirements). This
+  // definition overrides any declared top-level security. To remove a top-level
+  // security declaration, an empty array can be used.
+  repeated SecurityRequirement security = 12;
+  // Custom properties that start with "x-" such as "x-foo" used to describe
+  // extra functionality that is not covered by the standard OpenAPI Specification.
+  // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+  map<string, google.protobuf.Value> extensions = 13;
+  // Custom parameters such as HTTP request headers.
+  // See: https://swagger.io/docs/specification/2-0/describing-parameters/
+  // and https://swagger.io/specification/v2/#parameter-object.
+  Parameters parameters = 14;
+}
+
+// `Parameters` is a representation of OpenAPI v2 specification's parameters object.
+// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only
+// allow header parameters to be set here since we do not want users specifying custom non-header
+// parameters beyond those inferred from the Protobuf schema.
+// See: https://swagger.io/specification/v2/#parameter-object
+message Parameters {
+  // `Headers` is one or more HTTP header parameter.
+  // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters
+  repeated HeaderParameter headers = 1;
+}
+
+// `HeaderParameter` a HTTP header parameter.
+// See: https://swagger.io/specification/v2/#parameter-object
+message HeaderParameter {
+  // `Type` is a supported HTTP header type.
+  // See https://swagger.io/specification/v2/#parameterType.
+  enum Type {
+    UNKNOWN = 0;
+    STRING = 1;
+    NUMBER = 2;
+    INTEGER = 3;
+    BOOLEAN = 4;
+  }
+
+  // `Name` is the header name.
+  string name = 1;
+  // `Description` is a short description of the header.
+  string description = 2;
+  // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+  // See: https://swagger.io/specification/v2/#parameterType.
+  Type type = 3;
+  // `Format` The extending format for the previously mentioned type.
+  string format = 4;
+  // `Required` indicates if the header is optional
+  bool required = 5;
+  // field 6 is reserved for 'items', but in OpenAPI-specific way.
+  reserved 6;
+  // field 7 is reserved `Collection Format`. Determines the format of the array if type array is used.
+  reserved 7;
+}
+
+// `Header` is a representation of OpenAPI v2 specification's Header object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject
+//
+message Header {
+  // `Description` is a short description of the header.
+  string description = 1;
+  // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+  string type = 2;
+  // `Format` The extending format for the previously mentioned type.
+  string format = 3;
+  // field 4 is reserved for 'items', but in OpenAPI-specific way.
+  reserved 4;
+  // field 5 is reserved `Collection Format` Determines the format of the array if type array is used.
+  reserved 5;
+  // `Default` Declares the value of the header that the server will use if none is provided.
+  // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2.
+  // Unlike JSON Schema this value MUST conform to the defined type for the header.
+  string default = 6;
+  // field 7 is reserved for 'maximum'.
+  reserved 7;
+  // field 8 is reserved for 'exclusiveMaximum'.
+  reserved 8;
+  // field 9 is reserved for 'minimum'.
+  reserved 9;
+  // field 10 is reserved for 'exclusiveMinimum'.
+  reserved 10;
+  // field 11 is reserved for 'maxLength'.
+  reserved 11;
+  // field 12 is reserved for 'minLength'.
+  reserved 12;
+  // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3.
+  string pattern = 13;
+  // field 14 is reserved for 'maxItems'.
+  reserved 14;
+  // field 15 is reserved for 'minItems'.
+  reserved 15;
+  // field 16 is reserved for 'uniqueItems'.
+  reserved 16;
+  // field 17 is reserved for 'enum'.
+  reserved 17;
+  // field 18 is reserved for 'multipleOf'.
+  reserved 18;
+}
+
+// `Response` is a representation of OpenAPI v2 specification's Response object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject
+//
+message Response {
+  // `Description` is a short description of the response.
+  // GFM syntax can be used for rich text representation.
+  string description = 1;
+  // `Schema` optionally defines the structure of the response.
+  // If `Schema` is not provided, it means there is no content to the response.
+  Schema schema = 2;
+  // `Headers` A list of headers that are sent with the response.
+  // `Header` name is expected to be a string in the canonical format of the MIME header key
+  // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey
+  map<string, Header> headers = 3;
+  // `Examples` gives per-mimetype response examples.
+  // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object
+  map<string, string> examples = 4;
+  // Custom properties that start with "x-" such as "x-foo" used to describe
+  // extra functionality that is not covered by the standard OpenAPI Specification.
+  // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+  map<string, google.protobuf.Value> extensions = 5;
+}
+
+// `Info` is a representation of OpenAPI v2 specification's Info object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject
+//
+// Example:
+//
+//  option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//    info: {
+//      title: "Echo API";
+//      version: "1.0";
+//      description: "";
+//      contact: {
+//        name: "gRPC-Gateway project";
+//        url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//        email: "none@example.com";
+//      };
+//      license: {
+//        name: "BSD 3-Clause License";
+//        url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+//      };
+//    };
+//    ...
+//  };
+//
+message Info {
+  // The title of the application.
+  string title = 1;
+  // A short description of the application. GFM syntax can be used for rich
+  // text representation.
+  string description = 2;
+  // The Terms of Service for the API.
+  string terms_of_service = 3;
+  // The contact information for the exposed API.
+  Contact contact = 4;
+  // The license information for the exposed API.
+  License license = 5;
+  // Provides the version of the application API (not to be confused
+  // with the specification version).
+  string version = 6;
+  // Custom properties that start with "x-" such as "x-foo" used to describe
+  // extra functionality that is not covered by the standard OpenAPI Specification.
+  // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+  map<string, google.protobuf.Value> extensions = 7;
+}
+
+// `Contact` is a representation of OpenAPI v2 specification's Contact object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject
+//
+// Example:
+//
+//  option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//    info: {
+//      ...
+//      contact: {
+//        name: "gRPC-Gateway project";
+//        url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//        email: "none@example.com";
+//      };
+//      ...
+//    };
+//    ...
+//  };
+//
+message Contact {
+  // The identifying name of the contact person/organization.
+  string name = 1;
+  // The URL pointing to the contact information. MUST be in the format of a
+  // URL.
+  string url = 2;
+  // The email address of the contact person/organization. MUST be in the format
+  // of an email address.
+  string email = 3;
+}
+
+// `License` is a representation of OpenAPI v2 specification's License object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject
+//
+// Example:
+//
+//  option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//    info: {
+//      ...
+//      license: {
+//        name: "BSD 3-Clause License";
+//        url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+//      };
+//      ...
+//    };
+//    ...
+//  };
+//
+message License {
+  // The license name used for the API.
+  string name = 1;
+  // A URL to the license used for the API. MUST be in the format of a URL.
+  string url = 2;
+}
+
+// `ExternalDocumentation` is a representation of OpenAPI v2 specification's
+// ExternalDocumentation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject
+//
+// Example:
+//
+//  option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//    ...
+//    external_docs: {
+//      description: "More about gRPC-Gateway";
+//      url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//    }
+//    ...
+//  };
+//
+message ExternalDocumentation {
+  // A short description of the target documentation. GFM syntax can be used for
+  // rich text representation.
+  string description = 1;
+  // The URL for the target documentation. Value MUST be in the format
+  // of a URL.
+  string url = 2;
+}
+
+// `Schema` is a representation of OpenAPI v2 specification's Schema object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+message Schema {
+  JSONSchema json_schema = 1;
+  // Adds support for polymorphism. The discriminator is the schema property
+  // name that is used to differentiate between other schema that inherit this
+  // schema. The property name used MUST be defined at this schema and it MUST
+  // be in the required property list. When used, the value MUST be the name of
+  // this schema or any schema that inherits it.
+  string discriminator = 2;
+  // Relevant only for Schema "properties" definitions. Declares the property as
+  // "read only". This means that it MAY be sent as part of a response but MUST
+  // NOT be sent as part of the request. Properties marked as readOnly being
+  // true SHOULD NOT be in the required list of the defined schema. Default
+  // value is false.
+  bool read_only = 3;
+  // field 4 is reserved for 'xml'.
+  reserved 4;
+  // Additional external documentation for this schema.
+  ExternalDocumentation external_docs = 5;
+  // A free-form property to include an example of an instance for this schema in JSON.
+  // This is copied verbatim to the output.
+  string example = 6;
+}
+
+// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object.
+// Only fields that are applicable to Enums are included
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// Example:
+//
+//  option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = {
+//    ...
+//    title: "MyEnum";
+//    description:"This is my nice enum";
+//    example: "ZERO";
+//    required: true;
+//    ...
+//  };
+//
+message EnumSchema {
+  // A short description of the schema.
+  string description = 1;
+  string default = 2;
+  // The title of the schema.
+  string title = 3;
+  bool required = 4;
+  bool read_only = 5;
+  // Additional external documentation for this schema.
+  ExternalDocumentation external_docs = 6;
+  string example = 7;
+  // Ref is used to define an external reference to include in the message.
+  // This could be a fully qualified proto message reference, and that type must
+  // be imported into the protofile. If no message is identified, the Ref will
+  // be used verbatim in the output.
+  // For example:
+  //  `ref: ".google.protobuf.Timestamp"`.
+  string ref = 8;
+  // Custom properties that start with "x-" such as "x-foo" used to describe
+  // extra functionality that is not covered by the standard OpenAPI Specification.
+  // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+  map<string, google.protobuf.Value> extensions = 9;
+}
+
+// `JSONSchema` represents properties from JSON Schema taken, and as used, in
+// the OpenAPI v2 spec.
+//
+// This includes changes made by OpenAPI v2.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// See also: https://cswr.github.io/JsonSchema/spec/basic_types/,
+// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json
+//
+// Example:
+//
+//  message SimpleMessage {
+//    option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = {
+//      json_schema: {
+//        title: "SimpleMessage"
+//        description: "A simple message."
+//        required: ["id"]
+//      }
+//    };
+//
+//    // Id represents the message identifier.
+//    string id = 1; [
+//        (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
+//          description: "The unique identifier of the simple message."
+//        }];
+//  }
+//
+message JSONSchema {
+  // field 1 is reserved for '$id', omitted from OpenAPI v2.
+  reserved 1;
+  // field 2 is reserved for '$schema', omitted from OpenAPI v2.
+  reserved 2;
+  // Ref is used to define an external reference to include in the message.
+  // This could be a fully qualified proto message reference, and that type must
+  // be imported into the protofile. If no message is identified, the Ref will
+  // be used verbatim in the output.
+  // For example:
+  //  `ref: ".google.protobuf.Timestamp"`.
+  string ref = 3;
+  // field 4 is reserved for '$comment', omitted from OpenAPI v2.
+  reserved 4;
+  // The title of the schema.
+  string title = 5;
+  // A short description of the schema.
+  string description = 6;
+  string default = 7;
+  bool read_only = 8;
+  // A free-form property to include a JSON example of this field. This is copied
+  // verbatim to the output swagger.json. Quotes must be escaped.
+  // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject  https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+  string example = 9;
+  double multiple_of = 10;
+  // Maximum represents an inclusive upper limit for a numeric instance. The
+  // value of MUST be a number,
+  double maximum = 11;
+  bool exclusive_maximum = 12;
+  // minimum represents an inclusive lower limit for a numeric instance. The
+  // value of MUST be a number,
+  double minimum = 13;
+  bool exclusive_minimum = 14;
+  uint64 max_length = 15;
+  uint64 min_length = 16;
+  string pattern = 17;
+  // field 18 is reserved for 'additionalItems', omitted from OpenAPI v2.
+  reserved 18;
+  // field 19 is reserved for 'items', but in OpenAPI-specific way.
+  // TODO(ivucica): add 'items'?
+  reserved 19;
+  uint64 max_items = 20;
+  uint64 min_items = 21;
+  bool unique_items = 22;
+  // field 23 is reserved for 'contains', omitted from OpenAPI v2.
+  reserved 23;
+  uint64 max_properties = 24;
+  uint64 min_properties = 25;
+  repeated string required = 26;
+  // field 27 is reserved for 'additionalProperties', but in OpenAPI-specific
+  // way. TODO(ivucica): add 'additionalProperties'?
+  reserved 27;
+  // field 28 is reserved for 'definitions', omitted from OpenAPI v2.
+  reserved 28;
+  // field 29 is reserved for 'properties', but in OpenAPI-specific way.
+  // TODO(ivucica): add 'additionalProperties'?
+  reserved 29;
+  // following fields are reserved, as the properties have been omitted from
+  // OpenAPI v2:
+  // patternProperties, dependencies, propertyNames, const
+  reserved 30 to 33;
+  // Items in 'array' must be unique.
+  repeated string array = 34;
+
+  enum JSONSchemaSimpleTypes {
+    UNKNOWN = 0;
+    ARRAY = 1;
+    BOOLEAN = 2;
+    INTEGER = 3;
+    NULL = 4;
+    NUMBER = 5;
+    OBJECT = 6;
+    STRING = 7;
+  }
+
+  repeated JSONSchemaSimpleTypes type = 35;
+  // `Format`
+  string format = 36;
+  // following fields are reserved, as the properties have been omitted from
+  // OpenAPI v2: contentMediaType, contentEncoding, if, then, else
+  reserved 37 to 41;
+  // field 42 is reserved for 'allOf', but in OpenAPI-specific way.
+  // TODO(ivucica): add 'allOf'?
+  reserved 42;
+  // following fields are reserved, as the properties have been omitted from
+  // OpenAPI v2:
+  // anyOf, oneOf, not
+  reserved 43 to 45;
+  // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1
+  repeated string enum = 46;
+
+  // Additional field level properties used when generating the OpenAPI v2 file.
+  FieldConfiguration field_configuration = 1001;
+
+  // 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file.
+  // These properties are not defined by OpenAPIv2, but they are used to control the generation.
+  message FieldConfiguration {
+    // Alternative parameter name when used as path parameter. If set, this will
+    // be used as the complete parameter name when this field is used as a path
+    // parameter. Use this to avoid having auto generated path parameter names
+    // for overlapping paths.
+    string path_param_name = 47;
+  }
+  // Custom properties that start with "x-" such as "x-foo" used to describe
+  // extra functionality that is not covered by the standard OpenAPI Specification.
+  // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+  map<string, google.protobuf.Value> extensions = 48;
+}
+
+// `Tag` is a representation of OpenAPI v2 specification's Tag object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject
+//
+message Tag {
+  // The name of the tag. Use it to allow override of the name of a
+  // global Tag object, then use that name to reference the tag throughout the
+  // OpenAPI file.
+  string name = 1;
+  // A short description for the tag. GFM syntax can be used for rich text
+  // representation.
+  string description = 2;
+  // Additional external documentation for this tag.
+  ExternalDocumentation external_docs = 3;
+  // Custom properties that start with "x-" such as "x-foo" used to describe
+  // extra functionality that is not covered by the standard OpenAPI Specification.
+  // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+  map<string, google.protobuf.Value> extensions = 4;
+}
+
+// `SecurityDefinitions` is a representation of OpenAPI v2 specification's
+// Security Definitions object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject
+//
+// A declaration of the security schemes available to be used in the
+// specification. This does not enforce the security schemes on the operations
+// and only serves to provide the relevant details for each scheme.
+message SecurityDefinitions {
+  // A single security scheme definition, mapping a "name" to the scheme it
+  // defines.
+  map<string, SecurityScheme> security = 1;
+}
+
+// `SecurityScheme` is a representation of OpenAPI v2 specification's
+// Security Scheme object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject
+//
+// Allows the definition of a security scheme that can be used by the
+// operations. Supported schemes are basic authentication, an API key (either as
+// a header or as a query parameter) and OAuth2's common flows (implicit,
+// password, application and access code).
+message SecurityScheme {
+  // The type of the security scheme. Valid values are "basic",
+  // "apiKey" or "oauth2".
+  enum Type {
+    TYPE_INVALID = 0;
+    TYPE_BASIC = 1;
+    TYPE_API_KEY = 2;
+    TYPE_OAUTH2 = 3;
+  }
+
+  // The location of the API key. Valid values are "query" or "header".
+  enum In {
+    IN_INVALID = 0;
+    IN_QUERY = 1;
+    IN_HEADER = 2;
+  }
+
+  // The flow used by the OAuth2 security scheme. Valid values are
+  // "implicit", "password", "application" or "accessCode".
+  enum Flow {
+    FLOW_INVALID = 0;
+    FLOW_IMPLICIT = 1;
+    FLOW_PASSWORD = 2;
+    FLOW_APPLICATION = 3;
+    FLOW_ACCESS_CODE = 4;
+  }
+
+  // The type of the security scheme. Valid values are "basic",
+  // "apiKey" or "oauth2".
+  Type type = 1;
+  // A short description for security scheme.
+  string description = 2;
+  // The name of the header or query parameter to be used.
+  // Valid for apiKey.
+  string name = 3;
+  // The location of the API key. Valid values are "query" or
+  // "header".
+  // Valid for apiKey.
+  In in = 4;
+  // The flow used by the OAuth2 security scheme. Valid values are
+  // "implicit", "password", "application" or "accessCode".
+  // Valid for oauth2.
+  Flow flow = 5;
+  // The authorization URL to be used for this flow. This SHOULD be in
+  // the form of a URL.
+  // Valid for oauth2/implicit and oauth2/accessCode.
+  string authorization_url = 6;
+  // The token URL to be used for this flow. This SHOULD be in the
+  // form of a URL.
+  // Valid for oauth2/password, oauth2/application and oauth2/accessCode.
+  string token_url = 7;
+  // The available scopes for the OAuth2 security scheme.
+  // Valid for oauth2.
+  Scopes scopes = 8;
+  // Custom properties that start with "x-" such as "x-foo" used to describe
+  // extra functionality that is not covered by the standard OpenAPI Specification.
+  // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+  map<string, google.protobuf.Value> extensions = 9;
+}
+
+// `SecurityRequirement` is a representation of OpenAPI v2 specification's
+// Security Requirement object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject
+//
+// Lists the required security schemes to execute this operation. The object can
+// have multiple security schemes declared in it which are all required (that
+// is, there is a logical AND between the schemes).
+//
+// The name used for each property MUST correspond to a security scheme
+// declared in the Security Definitions.
+message SecurityRequirement {
+  // If the security scheme is of type "oauth2", then the value is a list of
+  // scope names required for the execution. For other security scheme types,
+  // the array MUST be empty.
+  message SecurityRequirementValue {
+    repeated string scope = 1;
+  }
+  // Each name must correspond to a security scheme which is declared in
+  // the Security Definitions. If the security scheme is of type "oauth2",
+  // then the value is a list of scope names required for the execution.
+  // For other security scheme types, the array MUST be empty.
+  map<string, SecurityRequirementValue> security_requirement = 1;
+}
+
+// `Scopes` is a representation of OpenAPI v2 specification's Scopes object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject
+//
+// Lists the available scopes for an OAuth2 security scheme.
+message Scopes {
+  // Maps between a name of a scope to a short description of it (as the value
+  // of the property).
+  map<string, string> scope = 1;
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go
new file mode 100644
index 0000000..1f0e0c2
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go
@@ -0,0 +1,4055 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.36.0
+// 	protoc        (unknown)
+// source: protoc-gen-openapiv2/options/openapiv2.proto
+
+//go:build protoopaque
+
+package options
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	structpb "google.golang.org/protobuf/types/known/structpb"
+	reflect "reflect"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Scheme describes the schemes supported by the OpenAPI Swagger
+// and Operation objects.
+type Scheme int32
+
+const (
+	Scheme_UNKNOWN Scheme = 0
+	Scheme_HTTP    Scheme = 1
+	Scheme_HTTPS   Scheme = 2
+	Scheme_WS      Scheme = 3
+	Scheme_WSS     Scheme = 4
+)
+
+// Enum value maps for Scheme.
+var (
+	Scheme_name = map[int32]string{
+		0: "UNKNOWN",
+		1: "HTTP",
+		2: "HTTPS",
+		3: "WS",
+		4: "WSS",
+	}
+	Scheme_value = map[string]int32{
+		"UNKNOWN": 0,
+		"HTTP":    1,
+		"HTTPS":   2,
+		"WS":      3,
+		"WSS":     4,
+	}
+)
+
+func (x Scheme) Enum() *Scheme {
+	p := new(Scheme)
+	*p = x
+	return p
+}
+
+func (x Scheme) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Scheme) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0].Descriptor()
+}
+
+func (Scheme) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0]
+}
+
+func (x Scheme) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// `Type` is a supported HTTP header type.
+// See https://swagger.io/specification/v2/#parameterType.
+type HeaderParameter_Type int32
+
+const (
+	HeaderParameter_UNKNOWN HeaderParameter_Type = 0
+	HeaderParameter_STRING  HeaderParameter_Type = 1
+	HeaderParameter_NUMBER  HeaderParameter_Type = 2
+	HeaderParameter_INTEGER HeaderParameter_Type = 3
+	HeaderParameter_BOOLEAN HeaderParameter_Type = 4
+)
+
+// Enum value maps for HeaderParameter_Type.
+var (
+	HeaderParameter_Type_name = map[int32]string{
+		0: "UNKNOWN",
+		1: "STRING",
+		2: "NUMBER",
+		3: "INTEGER",
+		4: "BOOLEAN",
+	}
+	HeaderParameter_Type_value = map[string]int32{
+		"UNKNOWN": 0,
+		"STRING":  1,
+		"NUMBER":  2,
+		"INTEGER": 3,
+		"BOOLEAN": 4,
+	}
+)
+
+func (x HeaderParameter_Type) Enum() *HeaderParameter_Type {
+	p := new(HeaderParameter_Type)
+	*p = x
+	return p
+}
+
+func (x HeaderParameter_Type) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HeaderParameter_Type) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1].Descriptor()
+}
+
+func (HeaderParameter_Type) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1]
+}
+
+func (x HeaderParameter_Type) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+type JSONSchema_JSONSchemaSimpleTypes int32
+
+const (
+	JSONSchema_UNKNOWN JSONSchema_JSONSchemaSimpleTypes = 0
+	JSONSchema_ARRAY   JSONSchema_JSONSchemaSimpleTypes = 1
+	JSONSchema_BOOLEAN JSONSchema_JSONSchemaSimpleTypes = 2
+	JSONSchema_INTEGER JSONSchema_JSONSchemaSimpleTypes = 3
+	JSONSchema_NULL    JSONSchema_JSONSchemaSimpleTypes = 4
+	JSONSchema_NUMBER  JSONSchema_JSONSchemaSimpleTypes = 5
+	JSONSchema_OBJECT  JSONSchema_JSONSchemaSimpleTypes = 6
+	JSONSchema_STRING  JSONSchema_JSONSchemaSimpleTypes = 7
+)
+
+// Enum value maps for JSONSchema_JSONSchemaSimpleTypes.
+var (
+	JSONSchema_JSONSchemaSimpleTypes_name = map[int32]string{
+		0: "UNKNOWN",
+		1: "ARRAY",
+		2: "BOOLEAN",
+		3: "INTEGER",
+		4: "NULL",
+		5: "NUMBER",
+		6: "OBJECT",
+		7: "STRING",
+	}
+	JSONSchema_JSONSchemaSimpleTypes_value = map[string]int32{
+		"UNKNOWN": 0,
+		"ARRAY":   1,
+		"BOOLEAN": 2,
+		"INTEGER": 3,
+		"NULL":    4,
+		"NUMBER":  5,
+		"OBJECT":  6,
+		"STRING":  7,
+	}
+)
+
+func (x JSONSchema_JSONSchemaSimpleTypes) Enum() *JSONSchema_JSONSchemaSimpleTypes {
+	p := new(JSONSchema_JSONSchemaSimpleTypes)
+	*p = x
+	return p
+}
+
+func (x JSONSchema_JSONSchemaSimpleTypes) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (JSONSchema_JSONSchemaSimpleTypes) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2].Descriptor()
+}
+
+func (JSONSchema_JSONSchemaSimpleTypes) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2]
+}
+
+func (x JSONSchema_JSONSchemaSimpleTypes) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// The type of the security scheme. Valid values are "basic",
+// "apiKey" or "oauth2".
+type SecurityScheme_Type int32
+
+const (
+	SecurityScheme_TYPE_INVALID SecurityScheme_Type = 0
+	SecurityScheme_TYPE_BASIC   SecurityScheme_Type = 1
+	SecurityScheme_TYPE_API_KEY SecurityScheme_Type = 2
+	SecurityScheme_TYPE_OAUTH2  SecurityScheme_Type = 3
+)
+
+// Enum value maps for SecurityScheme_Type.
+var (
+	SecurityScheme_Type_name = map[int32]string{
+		0: "TYPE_INVALID",
+		1: "TYPE_BASIC",
+		2: "TYPE_API_KEY",
+		3: "TYPE_OAUTH2",
+	}
+	SecurityScheme_Type_value = map[string]int32{
+		"TYPE_INVALID": 0,
+		"TYPE_BASIC":   1,
+		"TYPE_API_KEY": 2,
+		"TYPE_OAUTH2":  3,
+	}
+)
+
+func (x SecurityScheme_Type) Enum() *SecurityScheme_Type {
+	p := new(SecurityScheme_Type)
+	*p = x
+	return p
+}
+
+func (x SecurityScheme_Type) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_Type) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3].Descriptor()
+}
+
+func (SecurityScheme_Type) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3]
+}
+
+func (x SecurityScheme_Type) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// The location of the API key. Valid values are "query" or "header".
+type SecurityScheme_In int32
+
+const (
+	SecurityScheme_IN_INVALID SecurityScheme_In = 0
+	SecurityScheme_IN_QUERY   SecurityScheme_In = 1
+	SecurityScheme_IN_HEADER  SecurityScheme_In = 2
+)
+
+// Enum value maps for SecurityScheme_In.
+var (
+	SecurityScheme_In_name = map[int32]string{
+		0: "IN_INVALID",
+		1: "IN_QUERY",
+		2: "IN_HEADER",
+	}
+	SecurityScheme_In_value = map[string]int32{
+		"IN_INVALID": 0,
+		"IN_QUERY":   1,
+		"IN_HEADER":  2,
+	}
+)
+
+func (x SecurityScheme_In) Enum() *SecurityScheme_In {
+	p := new(SecurityScheme_In)
+	*p = x
+	return p
+}
+
+func (x SecurityScheme_In) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_In) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4].Descriptor()
+}
+
+func (SecurityScheme_In) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4]
+}
+
+func (x SecurityScheme_In) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// The flow used by the OAuth2 security scheme. Valid values are
+// "implicit", "password", "application" or "accessCode".
+type SecurityScheme_Flow int32
+
+const (
+	SecurityScheme_FLOW_INVALID     SecurityScheme_Flow = 0
+	SecurityScheme_FLOW_IMPLICIT    SecurityScheme_Flow = 1
+	SecurityScheme_FLOW_PASSWORD    SecurityScheme_Flow = 2
+	SecurityScheme_FLOW_APPLICATION SecurityScheme_Flow = 3
+	SecurityScheme_FLOW_ACCESS_CODE SecurityScheme_Flow = 4
+)
+
+// Enum value maps for SecurityScheme_Flow.
+var (
+	SecurityScheme_Flow_name = map[int32]string{
+		0: "FLOW_INVALID",
+		1: "FLOW_IMPLICIT",
+		2: "FLOW_PASSWORD",
+		3: "FLOW_APPLICATION",
+		4: "FLOW_ACCESS_CODE",
+	}
+	SecurityScheme_Flow_value = map[string]int32{
+		"FLOW_INVALID":     0,
+		"FLOW_IMPLICIT":    1,
+		"FLOW_PASSWORD":    2,
+		"FLOW_APPLICATION": 3,
+		"FLOW_ACCESS_CODE": 4,
+	}
+)
+
+func (x SecurityScheme_Flow) Enum() *SecurityScheme_Flow {
+	p := new(SecurityScheme_Flow)
+	*p = x
+	return p
+}
+
+func (x SecurityScheme_Flow) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_Flow) Descriptor() protoreflect.EnumDescriptor {
+	return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5].Descriptor()
+}
+
+func (SecurityScheme_Flow) Type() protoreflect.EnumType {
+	return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5]
+}
+
+func (x SecurityScheme_Flow) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// `Swagger` is a representation of OpenAPI v2 specification's Swagger object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//	  info: {
+//	    title: "Echo API";
+//	    version: "1.0";
+//	    description: "";
+//	    contact: {
+//	      name: "gRPC-Gateway project";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//	      email: "none@example.com";
+//	    };
+//	    license: {
+//	      name: "BSD 3-Clause License";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+//	    };
+//	  };
+//	  schemes: HTTPS;
+//	  consumes: "application/json";
+//	  produces: "application/json";
+//	};
+type Swagger struct {
+	state                          protoimpl.MessageState     `protogen:"opaque.v1"`
+	xxx_hidden_Swagger             string                     `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"`
+	xxx_hidden_Info                *Info                      `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"`
+	xxx_hidden_Host                string                     `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"`
+	xxx_hidden_BasePath            string                     `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"`
+	xxx_hidden_Schemes             []Scheme                   `protobuf:"varint,5,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"`
+	xxx_hidden_Consumes            []string                   `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"`
+	xxx_hidden_Produces            []string                   `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"`
+	xxx_hidden_Responses           map[string]*Response       `protobuf:"bytes,10,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	xxx_hidden_SecurityDefinitions *SecurityDefinitions       `protobuf:"bytes,11,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"`
+	xxx_hidden_Security            *[]*SecurityRequirement    `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
+	xxx_hidden_Tags                *[]*Tag                    `protobuf:"bytes,13,rep,name=tags,proto3" json:"tags,omitempty"`
+	xxx_hidden_ExternalDocs        *ExternalDocumentation     `protobuf:"bytes,14,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	xxx_hidden_Extensions          map[string]*structpb.Value `protobuf:"bytes,15,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields                  protoimpl.UnknownFields
+	sizeCache                      protoimpl.SizeCache
+}
+
+func (x *Swagger) Reset() {
+	*x = Swagger{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Swagger) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Swagger) ProtoMessage() {}
+
+func (x *Swagger) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Swagger) GetSwagger() string {
+	if x != nil {
+		return x.xxx_hidden_Swagger
+	}
+	return ""
+}
+
+func (x *Swagger) GetInfo() *Info {
+	if x != nil {
+		return x.xxx_hidden_Info
+	}
+	return nil
+}
+
+func (x *Swagger) GetHost() string {
+	if x != nil {
+		return x.xxx_hidden_Host
+	}
+	return ""
+}
+
+func (x *Swagger) GetBasePath() string {
+	if x != nil {
+		return x.xxx_hidden_BasePath
+	}
+	return ""
+}
+
+func (x *Swagger) GetSchemes() []Scheme {
+	if x != nil {
+		return x.xxx_hidden_Schemes
+	}
+	return nil
+}
+
+func (x *Swagger) GetConsumes() []string {
+	if x != nil {
+		return x.xxx_hidden_Consumes
+	}
+	return nil
+}
+
+func (x *Swagger) GetProduces() []string {
+	if x != nil {
+		return x.xxx_hidden_Produces
+	}
+	return nil
+}
+
+func (x *Swagger) GetResponses() map[string]*Response {
+	if x != nil {
+		return x.xxx_hidden_Responses
+	}
+	return nil
+}
+
+func (x *Swagger) GetSecurityDefinitions() *SecurityDefinitions {
+	if x != nil {
+		return x.xxx_hidden_SecurityDefinitions
+	}
+	return nil
+}
+
+func (x *Swagger) GetSecurity() []*SecurityRequirement {
+	if x != nil {
+		if x.xxx_hidden_Security != nil {
+			return *x.xxx_hidden_Security
+		}
+	}
+	return nil
+}
+
+func (x *Swagger) GetTags() []*Tag {
+	if x != nil {
+		if x.xxx_hidden_Tags != nil {
+			return *x.xxx_hidden_Tags
+		}
+	}
+	return nil
+}
+
+func (x *Swagger) GetExternalDocs() *ExternalDocumentation {
+	if x != nil {
+		return x.xxx_hidden_ExternalDocs
+	}
+	return nil
+}
+
+func (x *Swagger) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.xxx_hidden_Extensions
+	}
+	return nil
+}
+
+func (x *Swagger) SetSwagger(v string) {
+	x.xxx_hidden_Swagger = v
+}
+
+func (x *Swagger) SetInfo(v *Info) {
+	x.xxx_hidden_Info = v
+}
+
+func (x *Swagger) SetHost(v string) {
+	x.xxx_hidden_Host = v
+}
+
+func (x *Swagger) SetBasePath(v string) {
+	x.xxx_hidden_BasePath = v
+}
+
+func (x *Swagger) SetSchemes(v []Scheme) {
+	x.xxx_hidden_Schemes = v
+}
+
+func (x *Swagger) SetConsumes(v []string) {
+	x.xxx_hidden_Consumes = v
+}
+
+func (x *Swagger) SetProduces(v []string) {
+	x.xxx_hidden_Produces = v
+}
+
+func (x *Swagger) SetResponses(v map[string]*Response) {
+	x.xxx_hidden_Responses = v
+}
+
+func (x *Swagger) SetSecurityDefinitions(v *SecurityDefinitions) {
+	x.xxx_hidden_SecurityDefinitions = v
+}
+
+func (x *Swagger) SetSecurity(v []*SecurityRequirement) {
+	x.xxx_hidden_Security = &v
+}
+
+func (x *Swagger) SetTags(v []*Tag) {
+	x.xxx_hidden_Tags = &v
+}
+
+func (x *Swagger) SetExternalDocs(v *ExternalDocumentation) {
+	x.xxx_hidden_ExternalDocs = v
+}
+
+func (x *Swagger) SetExtensions(v map[string]*structpb.Value) {
+	x.xxx_hidden_Extensions = v
+}
+
+func (x *Swagger) HasInfo() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_Info != nil
+}
+
+func (x *Swagger) HasSecurityDefinitions() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_SecurityDefinitions != nil
+}
+
+func (x *Swagger) HasExternalDocs() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_ExternalDocs != nil
+}
+
+func (x *Swagger) ClearInfo() {
+	x.xxx_hidden_Info = nil
+}
+
+func (x *Swagger) ClearSecurityDefinitions() {
+	x.xxx_hidden_SecurityDefinitions = nil
+}
+
+func (x *Swagger) ClearExternalDocs() {
+	x.xxx_hidden_ExternalDocs = nil
+}
+
+type Swagger_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// Specifies the OpenAPI Specification version being used. It can be
+	// used by the OpenAPI UI and other clients to interpret the API listing. The
+	// value MUST be "2.0".
+	Swagger string
+	// Provides metadata about the API. The metadata can be used by the
+	// clients if needed.
+	Info *Info
+	// The host (name or ip) serving the API. This MUST be the host only and does
+	// not include the scheme nor sub-paths. It MAY include a port. If the host is
+	// not included, the host serving the documentation is to be used (including
+	// the port). The host does not support path templating.
+	Host string
+	// The base path on which the API is served, which is relative to the host. If
+	// it is not included, the API is served directly under the host. The value
+	// MUST start with a leading slash (/). The basePath does not support path
+	// templating.
+	// Note that using `base_path` does not change the endpoint paths that are
+	// generated in the resulting OpenAPI file. If you wish to use `base_path`
+	// with relatively generated OpenAPI paths, the `base_path` prefix must be
+	// manually removed from your `google.api.http` paths and your code changed to
+	// serve the API from the `base_path`.
+	BasePath string
+	// The transfer protocol of the API. Values MUST be from the list: "http",
+	// "https", "ws", "wss". If the schemes is not included, the default scheme to
+	// be used is the one used to access the OpenAPI definition itself.
+	Schemes []Scheme
+	// A list of MIME types the APIs can consume. This is global to all APIs but
+	// can be overridden on specific API calls. Value MUST be as described under
+	// Mime Types.
+	Consumes []string
+	// A list of MIME types the APIs can produce. This is global to all APIs but
+	// can be overridden on specific API calls. Value MUST be as described under
+	// Mime Types.
+	Produces []string
+	// An object to hold responses that can be used across operations. This
+	// property does not define global responses for all operations.
+	Responses map[string]*Response
+	// Security scheme definitions that can be used across the specification.
+	SecurityDefinitions *SecurityDefinitions
+	// A declaration of which security schemes are applied for the API as a whole.
+	// The list of values describes alternative security schemes that can be used
+	// (that is, there is a logical OR between the security requirements).
+	// Individual operations can override this definition.
+	Security []*SecurityRequirement
+	// A list of tags for API documentation control. Tags can be used for logical
+	// grouping of operations by resources or any other qualifier.
+	Tags []*Tag
+	// Additional external documentation.
+	ExternalDocs *ExternalDocumentation
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 Swagger_builder) Build() *Swagger {
+	m0 := &Swagger{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Swagger = b.Swagger
+	x.xxx_hidden_Info = b.Info
+	x.xxx_hidden_Host = b.Host
+	x.xxx_hidden_BasePath = b.BasePath
+	x.xxx_hidden_Schemes = b.Schemes
+	x.xxx_hidden_Consumes = b.Consumes
+	x.xxx_hidden_Produces = b.Produces
+	x.xxx_hidden_Responses = b.Responses
+	x.xxx_hidden_SecurityDefinitions = b.SecurityDefinitions
+	x.xxx_hidden_Security = &b.Security
+	x.xxx_hidden_Tags = &b.Tags
+	x.xxx_hidden_ExternalDocs = b.ExternalDocs
+	x.xxx_hidden_Extensions = b.Extensions
+	return m0
+}
+
+// `Operation` is a representation of OpenAPI v2 specification's Operation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject
+//
+// Example:
+//
+//	service EchoService {
+//	  rpc Echo(SimpleMessage) returns (SimpleMessage) {
+//	    option (google.api.http) = {
+//	      get: "/v1/example/echo/{id}"
+//	    };
+//
+//	    option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = {
+//	      summary: "Get a message.";
+//	      operation_id: "getMessage";
+//	      tags: "echo";
+//	      responses: {
+//	        key: "200"
+//	          value: {
+//	          description: "OK";
+//	        }
+//	      }
+//	    };
+//	  }
+//	}
+type Operation struct {
+	state                   protoimpl.MessageState     `protogen:"opaque.v1"`
+	xxx_hidden_Tags         []string                   `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"`
+	xxx_hidden_Summary      string                     `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"`
+	xxx_hidden_Description  string                     `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+	xxx_hidden_ExternalDocs *ExternalDocumentation     `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	xxx_hidden_OperationId  string                     `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"`
+	xxx_hidden_Consumes     []string                   `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"`
+	xxx_hidden_Produces     []string                   `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"`
+	xxx_hidden_Responses    map[string]*Response       `protobuf:"bytes,9,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	xxx_hidden_Schemes      []Scheme                   `protobuf:"varint,10,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"`
+	xxx_hidden_Deprecated   bool                       `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"`
+	xxx_hidden_Security     *[]*SecurityRequirement    `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
+	xxx_hidden_Extensions   map[string]*structpb.Value `protobuf:"bytes,13,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	xxx_hidden_Parameters   *Parameters                `protobuf:"bytes,14,opt,name=parameters,proto3" json:"parameters,omitempty"`
+	unknownFields           protoimpl.UnknownFields
+	sizeCache               protoimpl.SizeCache
+}
+
+func (x *Operation) Reset() {
+	*x = Operation{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Operation) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Operation) ProtoMessage() {}
+
+func (x *Operation) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Operation) GetTags() []string {
+	if x != nil {
+		return x.xxx_hidden_Tags
+	}
+	return nil
+}
+
+func (x *Operation) GetSummary() string {
+	if x != nil {
+		return x.xxx_hidden_Summary
+	}
+	return ""
+}
+
+func (x *Operation) GetDescription() string {
+	if x != nil {
+		return x.xxx_hidden_Description
+	}
+	return ""
+}
+
+func (x *Operation) GetExternalDocs() *ExternalDocumentation {
+	if x != nil {
+		return x.xxx_hidden_ExternalDocs
+	}
+	return nil
+}
+
+func (x *Operation) GetOperationId() string {
+	if x != nil {
+		return x.xxx_hidden_OperationId
+	}
+	return ""
+}
+
+func (x *Operation) GetConsumes() []string {
+	if x != nil {
+		return x.xxx_hidden_Consumes
+	}
+	return nil
+}
+
+func (x *Operation) GetProduces() []string {
+	if x != nil {
+		return x.xxx_hidden_Produces
+	}
+	return nil
+}
+
+func (x *Operation) GetResponses() map[string]*Response {
+	if x != nil {
+		return x.xxx_hidden_Responses
+	}
+	return nil
+}
+
+func (x *Operation) GetSchemes() []Scheme {
+	if x != nil {
+		return x.xxx_hidden_Schemes
+	}
+	return nil
+}
+
+func (x *Operation) GetDeprecated() bool {
+	if x != nil {
+		return x.xxx_hidden_Deprecated
+	}
+	return false
+}
+
+func (x *Operation) GetSecurity() []*SecurityRequirement {
+	if x != nil {
+		if x.xxx_hidden_Security != nil {
+			return *x.xxx_hidden_Security
+		}
+	}
+	return nil
+}
+
+func (x *Operation) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.xxx_hidden_Extensions
+	}
+	return nil
+}
+
+func (x *Operation) GetParameters() *Parameters {
+	if x != nil {
+		return x.xxx_hidden_Parameters
+	}
+	return nil
+}
+
+func (x *Operation) SetTags(v []string) {
+	x.xxx_hidden_Tags = v
+}
+
+func (x *Operation) SetSummary(v string) {
+	x.xxx_hidden_Summary = v
+}
+
+func (x *Operation) SetDescription(v string) {
+	x.xxx_hidden_Description = v
+}
+
+func (x *Operation) SetExternalDocs(v *ExternalDocumentation) {
+	x.xxx_hidden_ExternalDocs = v
+}
+
+func (x *Operation) SetOperationId(v string) {
+	x.xxx_hidden_OperationId = v
+}
+
+func (x *Operation) SetConsumes(v []string) {
+	x.xxx_hidden_Consumes = v
+}
+
+func (x *Operation) SetProduces(v []string) {
+	x.xxx_hidden_Produces = v
+}
+
+func (x *Operation) SetResponses(v map[string]*Response) {
+	x.xxx_hidden_Responses = v
+}
+
+func (x *Operation) SetSchemes(v []Scheme) {
+	x.xxx_hidden_Schemes = v
+}
+
+func (x *Operation) SetDeprecated(v bool) {
+	x.xxx_hidden_Deprecated = v
+}
+
+func (x *Operation) SetSecurity(v []*SecurityRequirement) {
+	x.xxx_hidden_Security = &v
+}
+
+func (x *Operation) SetExtensions(v map[string]*structpb.Value) {
+	x.xxx_hidden_Extensions = v
+}
+
+func (x *Operation) SetParameters(v *Parameters) {
+	x.xxx_hidden_Parameters = v
+}
+
+func (x *Operation) HasExternalDocs() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_ExternalDocs != nil
+}
+
+func (x *Operation) HasParameters() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_Parameters != nil
+}
+
+func (x *Operation) ClearExternalDocs() {
+	x.xxx_hidden_ExternalDocs = nil
+}
+
+func (x *Operation) ClearParameters() {
+	x.xxx_hidden_Parameters = nil
+}
+
+type Operation_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// A list of tags for API documentation control. Tags can be used for logical
+	// grouping of operations by resources or any other qualifier.
+	Tags []string
+	// A short summary of what the operation does. For maximum readability in the
+	// swagger-ui, this field SHOULD be less than 120 characters.
+	Summary string
+	// A verbose explanation of the operation behavior. GFM syntax can be used for
+	// rich text representation.
+	Description string
+	// Additional external documentation for this operation.
+	ExternalDocs *ExternalDocumentation
+	// Unique string used to identify the operation. The id MUST be unique among
+	// all operations described in the API. Tools and libraries MAY use the
+	// operationId to uniquely identify an operation, therefore, it is recommended
+	// to follow common programming naming conventions.
+	OperationId string
+	// A list of MIME types the operation can consume. This overrides the consumes
+	// definition at the OpenAPI Object. An empty value MAY be used to clear the
+	// global definition. Value MUST be as described under Mime Types.
+	Consumes []string
+	// A list of MIME types the operation can produce. This overrides the produces
+	// definition at the OpenAPI Object. An empty value MAY be used to clear the
+	// global definition. Value MUST be as described under Mime Types.
+	Produces []string
+	// The list of possible responses as they are returned from executing this
+	// operation.
+	Responses map[string]*Response
+	// The transfer protocol for the operation. Values MUST be from the list:
+	// "http", "https", "ws", "wss". The value overrides the OpenAPI Object
+	// schemes definition.
+	Schemes []Scheme
+	// Declares this operation to be deprecated. Usage of the declared operation
+	// should be refrained. Default value is false.
+	Deprecated bool
+	// A declaration of which security schemes are applied for this operation. The
+	// list of values describes alternative security schemes that can be used
+	// (that is, there is a logical OR between the security requirements). This
+	// definition overrides any declared top-level security. To remove a top-level
+	// security declaration, an empty array can be used.
+	Security []*SecurityRequirement
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+	// Custom parameters such as HTTP request headers.
+	// See: https://swagger.io/docs/specification/2-0/describing-parameters/
+	// and https://swagger.io/specification/v2/#parameter-object.
+	Parameters *Parameters
+}
+
+func (b0 Operation_builder) Build() *Operation {
+	m0 := &Operation{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Tags = b.Tags
+	x.xxx_hidden_Summary = b.Summary
+	x.xxx_hidden_Description = b.Description
+	x.xxx_hidden_ExternalDocs = b.ExternalDocs
+	x.xxx_hidden_OperationId = b.OperationId
+	x.xxx_hidden_Consumes = b.Consumes
+	x.xxx_hidden_Produces = b.Produces
+	x.xxx_hidden_Responses = b.Responses
+	x.xxx_hidden_Schemes = b.Schemes
+	x.xxx_hidden_Deprecated = b.Deprecated
+	x.xxx_hidden_Security = &b.Security
+	x.xxx_hidden_Extensions = b.Extensions
+	x.xxx_hidden_Parameters = b.Parameters
+	return m0
+}
+
+// `Parameters` is a representation of OpenAPI v2 specification's parameters object.
+// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only
+// allow header parameters to be set here since we do not want users specifying custom non-header
+// parameters beyond those inferred from the Protobuf schema.
+// See: https://swagger.io/specification/v2/#parameter-object
+type Parameters struct {
+	state              protoimpl.MessageState `protogen:"opaque.v1"`
+	xxx_hidden_Headers *[]*HeaderParameter    `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"`
+	unknownFields      protoimpl.UnknownFields
+	sizeCache          protoimpl.SizeCache
+}
+
+func (x *Parameters) Reset() {
+	*x = Parameters{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Parameters) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Parameters) ProtoMessage() {}
+
+func (x *Parameters) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Parameters) GetHeaders() []*HeaderParameter {
+	if x != nil {
+		if x.xxx_hidden_Headers != nil {
+			return *x.xxx_hidden_Headers
+		}
+	}
+	return nil
+}
+
+func (x *Parameters) SetHeaders(v []*HeaderParameter) {
+	x.xxx_hidden_Headers = &v
+}
+
+type Parameters_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// `Headers` is one or more HTTP header parameter.
+	// See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters
+	Headers []*HeaderParameter
+}
+
+func (b0 Parameters_builder) Build() *Parameters {
+	m0 := &Parameters{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Headers = &b.Headers
+	return m0
+}
+
+// `HeaderParameter` a HTTP header parameter.
+// See: https://swagger.io/specification/v2/#parameter-object
+type HeaderParameter struct {
+	state                  protoimpl.MessageState `protogen:"opaque.v1"`
+	xxx_hidden_Name        string                 `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	xxx_hidden_Description string                 `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	xxx_hidden_Type        HeaderParameter_Type   `protobuf:"varint,3,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter_Type" json:"type,omitempty"`
+	xxx_hidden_Format      string                 `protobuf:"bytes,4,opt,name=format,proto3" json:"format,omitempty"`
+	xxx_hidden_Required    bool                   `protobuf:"varint,5,opt,name=required,proto3" json:"required,omitempty"`
+	unknownFields          protoimpl.UnknownFields
+	sizeCache              protoimpl.SizeCache
+}
+
+func (x *HeaderParameter) Reset() {
+	*x = HeaderParameter{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *HeaderParameter) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HeaderParameter) ProtoMessage() {}
+
+func (x *HeaderParameter) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *HeaderParameter) GetName() string {
+	if x != nil {
+		return x.xxx_hidden_Name
+	}
+	return ""
+}
+
+func (x *HeaderParameter) GetDescription() string {
+	if x != nil {
+		return x.xxx_hidden_Description
+	}
+	return ""
+}
+
+func (x *HeaderParameter) GetType() HeaderParameter_Type {
+	if x != nil {
+		return x.xxx_hidden_Type
+	}
+	return HeaderParameter_UNKNOWN
+}
+
+func (x *HeaderParameter) GetFormat() string {
+	if x != nil {
+		return x.xxx_hidden_Format
+	}
+	return ""
+}
+
+func (x *HeaderParameter) GetRequired() bool {
+	if x != nil {
+		return x.xxx_hidden_Required
+	}
+	return false
+}
+
+func (x *HeaderParameter) SetName(v string) {
+	x.xxx_hidden_Name = v
+}
+
+func (x *HeaderParameter) SetDescription(v string) {
+	x.xxx_hidden_Description = v
+}
+
+func (x *HeaderParameter) SetType(v HeaderParameter_Type) {
+	x.xxx_hidden_Type = v
+}
+
+func (x *HeaderParameter) SetFormat(v string) {
+	x.xxx_hidden_Format = v
+}
+
+func (x *HeaderParameter) SetRequired(v bool) {
+	x.xxx_hidden_Required = v
+}
+
+type HeaderParameter_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// `Name` is the header name.
+	Name string
+	// `Description` is a short description of the header.
+	Description string
+	// `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+	// See: https://swagger.io/specification/v2/#parameterType.
+	Type HeaderParameter_Type
+	// `Format` The extending format for the previously mentioned type.
+	Format string
+	// `Required` indicates if the header is optional
+	Required bool
+}
+
+func (b0 HeaderParameter_builder) Build() *HeaderParameter {
+	m0 := &HeaderParameter{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Name = b.Name
+	x.xxx_hidden_Description = b.Description
+	x.xxx_hidden_Type = b.Type
+	x.xxx_hidden_Format = b.Format
+	x.xxx_hidden_Required = b.Required
+	return m0
+}
+
+// `Header` is a representation of OpenAPI v2 specification's Header object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject
+type Header struct {
+	state                  protoimpl.MessageState `protogen:"opaque.v1"`
+	xxx_hidden_Description string                 `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	xxx_hidden_Type        string                 `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
+	xxx_hidden_Format      string                 `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"`
+	xxx_hidden_Default     string                 `protobuf:"bytes,6,opt,name=default,proto3" json:"default,omitempty"`
+	xxx_hidden_Pattern     string                 `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"`
+	unknownFields          protoimpl.UnknownFields
+	sizeCache              protoimpl.SizeCache
+}
+
+func (x *Header) Reset() {
+	*x = Header{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Header) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Header) ProtoMessage() {}
+
+func (x *Header) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Header) GetDescription() string {
+	if x != nil {
+		return x.xxx_hidden_Description
+	}
+	return ""
+}
+
+func (x *Header) GetType() string {
+	if x != nil {
+		return x.xxx_hidden_Type
+	}
+	return ""
+}
+
+func (x *Header) GetFormat() string {
+	if x != nil {
+		return x.xxx_hidden_Format
+	}
+	return ""
+}
+
+func (x *Header) GetDefault() string {
+	if x != nil {
+		return x.xxx_hidden_Default
+	}
+	return ""
+}
+
+func (x *Header) GetPattern() string {
+	if x != nil {
+		return x.xxx_hidden_Pattern
+	}
+	return ""
+}
+
+func (x *Header) SetDescription(v string) {
+	x.xxx_hidden_Description = v
+}
+
+func (x *Header) SetType(v string) {
+	x.xxx_hidden_Type = v
+}
+
+func (x *Header) SetFormat(v string) {
+	x.xxx_hidden_Format = v
+}
+
+func (x *Header) SetDefault(v string) {
+	x.xxx_hidden_Default = v
+}
+
+func (x *Header) SetPattern(v string) {
+	x.xxx_hidden_Pattern = v
+}
+
+type Header_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// `Description` is a short description of the header.
+	Description string
+	// The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+	Type string
+	// `Format` The extending format for the previously mentioned type.
+	Format string
+	// `Default` Declares the value of the header that the server will use if none is provided.
+	// See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2.
+	// Unlike JSON Schema this value MUST conform to the defined type for the header.
+	Default string
+	// 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3.
+	Pattern string
+}
+
+func (b0 Header_builder) Build() *Header {
+	m0 := &Header{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Description = b.Description
+	x.xxx_hidden_Type = b.Type
+	x.xxx_hidden_Format = b.Format
+	x.xxx_hidden_Default = b.Default
+	x.xxx_hidden_Pattern = b.Pattern
+	return m0
+}
+
+// `Response` is a representation of OpenAPI v2 specification's Response object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject
+type Response struct {
+	state                  protoimpl.MessageState     `protogen:"opaque.v1"`
+	xxx_hidden_Description string                     `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	xxx_hidden_Schema      *Schema                    `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"`
+	xxx_hidden_Headers     map[string]*Header         `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	xxx_hidden_Examples    map[string]string          `protobuf:"bytes,4,rep,name=examples,proto3" json:"examples,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	xxx_hidden_Extensions  map[string]*structpb.Value `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields          protoimpl.UnknownFields
+	sizeCache              protoimpl.SizeCache
+}
+
+func (x *Response) Reset() {
+	*x = Response{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Response) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Response) ProtoMessage() {}
+
+func (x *Response) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Response) GetDescription() string {
+	if x != nil {
+		return x.xxx_hidden_Description
+	}
+	return ""
+}
+
+func (x *Response) GetSchema() *Schema {
+	if x != nil {
+		return x.xxx_hidden_Schema
+	}
+	return nil
+}
+
+func (x *Response) GetHeaders() map[string]*Header {
+	if x != nil {
+		return x.xxx_hidden_Headers
+	}
+	return nil
+}
+
+func (x *Response) GetExamples() map[string]string {
+	if x != nil {
+		return x.xxx_hidden_Examples
+	}
+	return nil
+}
+
+func (x *Response) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.xxx_hidden_Extensions
+	}
+	return nil
+}
+
+func (x *Response) SetDescription(v string) {
+	x.xxx_hidden_Description = v
+}
+
+func (x *Response) SetSchema(v *Schema) {
+	x.xxx_hidden_Schema = v
+}
+
+func (x *Response) SetHeaders(v map[string]*Header) {
+	x.xxx_hidden_Headers = v
+}
+
+func (x *Response) SetExamples(v map[string]string) {
+	x.xxx_hidden_Examples = v
+}
+
+func (x *Response) SetExtensions(v map[string]*structpb.Value) {
+	x.xxx_hidden_Extensions = v
+}
+
+func (x *Response) HasSchema() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_Schema != nil
+}
+
+func (x *Response) ClearSchema() {
+	x.xxx_hidden_Schema = nil
+}
+
+type Response_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// `Description` is a short description of the response.
+	// GFM syntax can be used for rich text representation.
+	Description string
+	// `Schema` optionally defines the structure of the response.
+	// If `Schema` is not provided, it means there is no content to the response.
+	Schema *Schema
+	// `Headers` A list of headers that are sent with the response.
+	// `Header` name is expected to be a string in the canonical format of the MIME header key
+	// See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey
+	Headers map[string]*Header
+	// `Examples` gives per-mimetype response examples.
+	// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object
+	Examples map[string]string
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 Response_builder) Build() *Response {
+	m0 := &Response{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Description = b.Description
+	x.xxx_hidden_Schema = b.Schema
+	x.xxx_hidden_Headers = b.Headers
+	x.xxx_hidden_Examples = b.Examples
+	x.xxx_hidden_Extensions = b.Extensions
+	return m0
+}
+
+// `Info` is a representation of OpenAPI v2 specification's Info object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//	  info: {
+//	    title: "Echo API";
+//	    version: "1.0";
+//	    description: "";
+//	    contact: {
+//	      name: "gRPC-Gateway project";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//	      email: "none@example.com";
+//	    };
+//	    license: {
+//	      name: "BSD 3-Clause License";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+//	    };
+//	  };
+//	  ...
+//	};
+type Info struct {
+	state                     protoimpl.MessageState     `protogen:"opaque.v1"`
+	xxx_hidden_Title          string                     `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"`
+	xxx_hidden_Description    string                     `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	xxx_hidden_TermsOfService string                     `protobuf:"bytes,3,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"`
+	xxx_hidden_Contact        *Contact                   `protobuf:"bytes,4,opt,name=contact,proto3" json:"contact,omitempty"`
+	xxx_hidden_License        *License                   `protobuf:"bytes,5,opt,name=license,proto3" json:"license,omitempty"`
+	xxx_hidden_Version        string                     `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"`
+	xxx_hidden_Extensions     map[string]*structpb.Value `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields             protoimpl.UnknownFields
+	sizeCache                 protoimpl.SizeCache
+}
+
+func (x *Info) Reset() {
+	*x = Info{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Info) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Info) ProtoMessage() {}
+
+func (x *Info) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Info) GetTitle() string {
+	if x != nil {
+		return x.xxx_hidden_Title
+	}
+	return ""
+}
+
+func (x *Info) GetDescription() string {
+	if x != nil {
+		return x.xxx_hidden_Description
+	}
+	return ""
+}
+
+func (x *Info) GetTermsOfService() string {
+	if x != nil {
+		return x.xxx_hidden_TermsOfService
+	}
+	return ""
+}
+
+func (x *Info) GetContact() *Contact {
+	if x != nil {
+		return x.xxx_hidden_Contact
+	}
+	return nil
+}
+
+func (x *Info) GetLicense() *License {
+	if x != nil {
+		return x.xxx_hidden_License
+	}
+	return nil
+}
+
+func (x *Info) GetVersion() string {
+	if x != nil {
+		return x.xxx_hidden_Version
+	}
+	return ""
+}
+
+func (x *Info) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.xxx_hidden_Extensions
+	}
+	return nil
+}
+
+func (x *Info) SetTitle(v string) {
+	x.xxx_hidden_Title = v
+}
+
+func (x *Info) SetDescription(v string) {
+	x.xxx_hidden_Description = v
+}
+
+func (x *Info) SetTermsOfService(v string) {
+	x.xxx_hidden_TermsOfService = v
+}
+
+func (x *Info) SetContact(v *Contact) {
+	x.xxx_hidden_Contact = v
+}
+
+func (x *Info) SetLicense(v *License) {
+	x.xxx_hidden_License = v
+}
+
+func (x *Info) SetVersion(v string) {
+	x.xxx_hidden_Version = v
+}
+
+func (x *Info) SetExtensions(v map[string]*structpb.Value) {
+	x.xxx_hidden_Extensions = v
+}
+
+func (x *Info) HasContact() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_Contact != nil
+}
+
+func (x *Info) HasLicense() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_License != nil
+}
+
+func (x *Info) ClearContact() {
+	x.xxx_hidden_Contact = nil
+}
+
+func (x *Info) ClearLicense() {
+	x.xxx_hidden_License = nil
+}
+
+type Info_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// The title of the application.
+	Title string
+	// A short description of the application. GFM syntax can be used for rich
+	// text representation.
+	Description string
+	// The Terms of Service for the API.
+	TermsOfService string
+	// The contact information for the exposed API.
+	Contact *Contact
+	// The license information for the exposed API.
+	License *License
+	// Provides the version of the application API (not to be confused
+	// with the specification version).
+	Version string
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 Info_builder) Build() *Info {
+	m0 := &Info{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Title = b.Title
+	x.xxx_hidden_Description = b.Description
+	x.xxx_hidden_TermsOfService = b.TermsOfService
+	x.xxx_hidden_Contact = b.Contact
+	x.xxx_hidden_License = b.License
+	x.xxx_hidden_Version = b.Version
+	x.xxx_hidden_Extensions = b.Extensions
+	return m0
+}
+
+// `Contact` is a representation of OpenAPI v2 specification's Contact object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//	  info: {
+//	    ...
+//	    contact: {
+//	      name: "gRPC-Gateway project";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//	      email: "none@example.com";
+//	    };
+//	    ...
+//	  };
+//	  ...
+//	};
+type Contact struct {
+	state            protoimpl.MessageState `protogen:"opaque.v1"`
+	xxx_hidden_Name  string                 `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	xxx_hidden_Url   string                 `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+	xxx_hidden_Email string                 `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
+	unknownFields    protoimpl.UnknownFields
+	sizeCache        protoimpl.SizeCache
+}
+
+func (x *Contact) Reset() {
+	*x = Contact{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Contact) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Contact) ProtoMessage() {}
+
+func (x *Contact) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Contact) GetName() string {
+	if x != nil {
+		return x.xxx_hidden_Name
+	}
+	return ""
+}
+
+func (x *Contact) GetUrl() string {
+	if x != nil {
+		return x.xxx_hidden_Url
+	}
+	return ""
+}
+
+func (x *Contact) GetEmail() string {
+	if x != nil {
+		return x.xxx_hidden_Email
+	}
+	return ""
+}
+
+func (x *Contact) SetName(v string) {
+	x.xxx_hidden_Name = v
+}
+
+func (x *Contact) SetUrl(v string) {
+	x.xxx_hidden_Url = v
+}
+
+func (x *Contact) SetEmail(v string) {
+	x.xxx_hidden_Email = v
+}
+
+type Contact_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// The identifying name of the contact person/organization.
+	Name string
+	// The URL pointing to the contact information. MUST be in the format of a
+	// URL.
+	Url string
+	// The email address of the contact person/organization. MUST be in the format
+	// of an email address.
+	Email string
+}
+
+func (b0 Contact_builder) Build() *Contact {
+	m0 := &Contact{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Name = b.Name
+	x.xxx_hidden_Url = b.Url
+	x.xxx_hidden_Email = b.Email
+	return m0
+}
+
+// `License` is a representation of OpenAPI v2 specification's License object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//	  info: {
+//	    ...
+//	    license: {
+//	      name: "BSD 3-Clause License";
+//	      url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+//	    };
+//	    ...
+//	  };
+//	  ...
+//	};
+type License struct {
+	state           protoimpl.MessageState `protogen:"opaque.v1"`
+	xxx_hidden_Name string                 `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	xxx_hidden_Url  string                 `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+	unknownFields   protoimpl.UnknownFields
+	sizeCache       protoimpl.SizeCache
+}
+
+func (x *License) Reset() {
+	*x = License{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *License) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*License) ProtoMessage() {}
+
+func (x *License) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *License) GetName() string {
+	if x != nil {
+		return x.xxx_hidden_Name
+	}
+	return ""
+}
+
+func (x *License) GetUrl() string {
+	if x != nil {
+		return x.xxx_hidden_Url
+	}
+	return ""
+}
+
+func (x *License) SetName(v string) {
+	x.xxx_hidden_Name = v
+}
+
+func (x *License) SetUrl(v string) {
+	x.xxx_hidden_Url = v
+}
+
+type License_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// The license name used for the API.
+	Name string
+	// A URL to the license used for the API. MUST be in the format of a URL.
+	Url string
+}
+
+func (b0 License_builder) Build() *License {
+	m0 := &License{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Name = b.Name
+	x.xxx_hidden_Url = b.Url
+	return m0
+}
+
+// `ExternalDocumentation` is a representation of OpenAPI v2 specification's
+// ExternalDocumentation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+//	  ...
+//	  external_docs: {
+//	    description: "More about gRPC-Gateway";
+//	    url: "https://github.com/grpc-ecosystem/grpc-gateway";
+//	  }
+//	  ...
+//	};
+type ExternalDocumentation struct {
+	state                  protoimpl.MessageState `protogen:"opaque.v1"`
+	xxx_hidden_Description string                 `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	xxx_hidden_Url         string                 `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+	unknownFields          protoimpl.UnknownFields
+	sizeCache              protoimpl.SizeCache
+}
+
+func (x *ExternalDocumentation) Reset() {
+	*x = ExternalDocumentation{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ExternalDocumentation) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExternalDocumentation) ProtoMessage() {}
+
+func (x *ExternalDocumentation) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *ExternalDocumentation) GetDescription() string {
+	if x != nil {
+		return x.xxx_hidden_Description
+	}
+	return ""
+}
+
+func (x *ExternalDocumentation) GetUrl() string {
+	if x != nil {
+		return x.xxx_hidden_Url
+	}
+	return ""
+}
+
+func (x *ExternalDocumentation) SetDescription(v string) {
+	x.xxx_hidden_Description = v
+}
+
+func (x *ExternalDocumentation) SetUrl(v string) {
+	x.xxx_hidden_Url = v
+}
+
+type ExternalDocumentation_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// A short description of the target documentation. GFM syntax can be used for
+	// rich text representation.
+	Description string
+	// The URL for the target documentation. Value MUST be in the format
+	// of a URL.
+	Url string
+}
+
+func (b0 ExternalDocumentation_builder) Build() *ExternalDocumentation {
+	m0 := &ExternalDocumentation{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Description = b.Description
+	x.xxx_hidden_Url = b.Url
+	return m0
+}
+
+// `Schema` is a representation of OpenAPI v2 specification's Schema object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+type Schema struct {
+	state                    protoimpl.MessageState `protogen:"opaque.v1"`
+	xxx_hidden_JsonSchema    *JSONSchema            `protobuf:"bytes,1,opt,name=json_schema,json=jsonSchema,proto3" json:"json_schema,omitempty"`
+	xxx_hidden_Discriminator string                 `protobuf:"bytes,2,opt,name=discriminator,proto3" json:"discriminator,omitempty"`
+	xxx_hidden_ReadOnly      bool                   `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+	xxx_hidden_ExternalDocs  *ExternalDocumentation `protobuf:"bytes,5,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	xxx_hidden_Example       string                 `protobuf:"bytes,6,opt,name=example,proto3" json:"example,omitempty"`
+	unknownFields            protoimpl.UnknownFields
+	sizeCache                protoimpl.SizeCache
+}
+
+func (x *Schema) Reset() {
+	*x = Schema{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Schema) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Schema) ProtoMessage() {}
+
+func (x *Schema) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Schema) GetJsonSchema() *JSONSchema {
+	if x != nil {
+		return x.xxx_hidden_JsonSchema
+	}
+	return nil
+}
+
+func (x *Schema) GetDiscriminator() string {
+	if x != nil {
+		return x.xxx_hidden_Discriminator
+	}
+	return ""
+}
+
+func (x *Schema) GetReadOnly() bool {
+	if x != nil {
+		return x.xxx_hidden_ReadOnly
+	}
+	return false
+}
+
+func (x *Schema) GetExternalDocs() *ExternalDocumentation {
+	if x != nil {
+		return x.xxx_hidden_ExternalDocs
+	}
+	return nil
+}
+
+func (x *Schema) GetExample() string {
+	if x != nil {
+		return x.xxx_hidden_Example
+	}
+	return ""
+}
+
+func (x *Schema) SetJsonSchema(v *JSONSchema) {
+	x.xxx_hidden_JsonSchema = v
+}
+
+func (x *Schema) SetDiscriminator(v string) {
+	x.xxx_hidden_Discriminator = v
+}
+
+func (x *Schema) SetReadOnly(v bool) {
+	x.xxx_hidden_ReadOnly = v
+}
+
+func (x *Schema) SetExternalDocs(v *ExternalDocumentation) {
+	x.xxx_hidden_ExternalDocs = v
+}
+
+func (x *Schema) SetExample(v string) {
+	x.xxx_hidden_Example = v
+}
+
+func (x *Schema) HasJsonSchema() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_JsonSchema != nil
+}
+
+func (x *Schema) HasExternalDocs() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_ExternalDocs != nil
+}
+
+func (x *Schema) ClearJsonSchema() {
+	x.xxx_hidden_JsonSchema = nil
+}
+
+func (x *Schema) ClearExternalDocs() {
+	x.xxx_hidden_ExternalDocs = nil
+}
+
+type Schema_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	JsonSchema *JSONSchema
+	// Adds support for polymorphism. The discriminator is the schema property
+	// name that is used to differentiate between other schema that inherit this
+	// schema. The property name used MUST be defined at this schema and it MUST
+	// be in the required property list. When used, the value MUST be the name of
+	// this schema or any schema that inherits it.
+	Discriminator string
+	// Relevant only for Schema "properties" definitions. Declares the property as
+	// "read only". This means that it MAY be sent as part of a response but MUST
+	// NOT be sent as part of the request. Properties marked as readOnly being
+	// true SHOULD NOT be in the required list of the defined schema. Default
+	// value is false.
+	ReadOnly bool
+	// Additional external documentation for this schema.
+	ExternalDocs *ExternalDocumentation
+	// A free-form property to include an example of an instance for this schema in JSON.
+	// This is copied verbatim to the output.
+	Example string
+}
+
+func (b0 Schema_builder) Build() *Schema {
+	m0 := &Schema{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_JsonSchema = b.JsonSchema
+	x.xxx_hidden_Discriminator = b.Discriminator
+	x.xxx_hidden_ReadOnly = b.ReadOnly
+	x.xxx_hidden_ExternalDocs = b.ExternalDocs
+	x.xxx_hidden_Example = b.Example
+	return m0
+}
+
+// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object.
+// Only fields that are applicable to Enums are included
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// Example:
+//
+//	option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = {
+//	  ...
+//	  title: "MyEnum";
+//	  description:"This is my nice enum";
+//	  example: "ZERO";
+//	  required: true;
+//	  ...
+//	};
+type EnumSchema struct {
+	state                   protoimpl.MessageState     `protogen:"opaque.v1"`
+	xxx_hidden_Description  string                     `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	xxx_hidden_Default      string                     `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"`
+	xxx_hidden_Title        string                     `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"`
+	xxx_hidden_Required     bool                       `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"`
+	xxx_hidden_ReadOnly     bool                       `protobuf:"varint,5,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+	xxx_hidden_ExternalDocs *ExternalDocumentation     `protobuf:"bytes,6,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	xxx_hidden_Example      string                     `protobuf:"bytes,7,opt,name=example,proto3" json:"example,omitempty"`
+	xxx_hidden_Ref          string                     `protobuf:"bytes,8,opt,name=ref,proto3" json:"ref,omitempty"`
+	xxx_hidden_Extensions   map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields           protoimpl.UnknownFields
+	sizeCache               protoimpl.SizeCache
+}
+
+func (x *EnumSchema) Reset() {
+	*x = EnumSchema{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *EnumSchema) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumSchema) ProtoMessage() {}
+
+func (x *EnumSchema) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *EnumSchema) GetDescription() string {
+	if x != nil {
+		return x.xxx_hidden_Description
+	}
+	return ""
+}
+
+func (x *EnumSchema) GetDefault() string {
+	if x != nil {
+		return x.xxx_hidden_Default
+	}
+	return ""
+}
+
+func (x *EnumSchema) GetTitle() string {
+	if x != nil {
+		return x.xxx_hidden_Title
+	}
+	return ""
+}
+
+func (x *EnumSchema) GetRequired() bool {
+	if x != nil {
+		return x.xxx_hidden_Required
+	}
+	return false
+}
+
+func (x *EnumSchema) GetReadOnly() bool {
+	if x != nil {
+		return x.xxx_hidden_ReadOnly
+	}
+	return false
+}
+
+func (x *EnumSchema) GetExternalDocs() *ExternalDocumentation {
+	if x != nil {
+		return x.xxx_hidden_ExternalDocs
+	}
+	return nil
+}
+
+func (x *EnumSchema) GetExample() string {
+	if x != nil {
+		return x.xxx_hidden_Example
+	}
+	return ""
+}
+
+func (x *EnumSchema) GetRef() string {
+	if x != nil {
+		return x.xxx_hidden_Ref
+	}
+	return ""
+}
+
+func (x *EnumSchema) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.xxx_hidden_Extensions
+	}
+	return nil
+}
+
+func (x *EnumSchema) SetDescription(v string) {
+	x.xxx_hidden_Description = v
+}
+
+func (x *EnumSchema) SetDefault(v string) {
+	x.xxx_hidden_Default = v
+}
+
+func (x *EnumSchema) SetTitle(v string) {
+	x.xxx_hidden_Title = v
+}
+
+func (x *EnumSchema) SetRequired(v bool) {
+	x.xxx_hidden_Required = v
+}
+
+func (x *EnumSchema) SetReadOnly(v bool) {
+	x.xxx_hidden_ReadOnly = v
+}
+
+func (x *EnumSchema) SetExternalDocs(v *ExternalDocumentation) {
+	x.xxx_hidden_ExternalDocs = v
+}
+
+func (x *EnumSchema) SetExample(v string) {
+	x.xxx_hidden_Example = v
+}
+
+func (x *EnumSchema) SetRef(v string) {
+	x.xxx_hidden_Ref = v
+}
+
+func (x *EnumSchema) SetExtensions(v map[string]*structpb.Value) {
+	x.xxx_hidden_Extensions = v
+}
+
+func (x *EnumSchema) HasExternalDocs() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_ExternalDocs != nil
+}
+
+func (x *EnumSchema) ClearExternalDocs() {
+	x.xxx_hidden_ExternalDocs = nil
+}
+
+type EnumSchema_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// A short description of the schema.
+	Description string
+	Default     string
+	// The title of the schema.
+	Title    string
+	Required bool
+	ReadOnly bool
+	// Additional external documentation for this schema.
+	ExternalDocs *ExternalDocumentation
+	Example      string
+	// Ref is used to define an external reference to include in the message.
+	// This could be a fully qualified proto message reference, and that type must
+	// be imported into the protofile. If no message is identified, the Ref will
+	// be used verbatim in the output.
+	// For example:
+	//
+	//	`ref: ".google.protobuf.Timestamp"`.
+	Ref string
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 EnumSchema_builder) Build() *EnumSchema {
+	m0 := &EnumSchema{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Description = b.Description
+	x.xxx_hidden_Default = b.Default
+	x.xxx_hidden_Title = b.Title
+	x.xxx_hidden_Required = b.Required
+	x.xxx_hidden_ReadOnly = b.ReadOnly
+	x.xxx_hidden_ExternalDocs = b.ExternalDocs
+	x.xxx_hidden_Example = b.Example
+	x.xxx_hidden_Ref = b.Ref
+	x.xxx_hidden_Extensions = b.Extensions
+	return m0
+}
+
+// `JSONSchema` represents properties from JSON Schema taken, and as used, in
+// the OpenAPI v2 spec.
+//
+// This includes changes made by OpenAPI v2.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// See also: https://cswr.github.io/JsonSchema/spec/basic_types/,
+// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json
+//
+// Example:
+//
+//	message SimpleMessage {
+//	  option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = {
+//	    json_schema: {
+//	      title: "SimpleMessage"
+//	      description: "A simple message."
+//	      required: ["id"]
+//	    }
+//	  };
+//
+//	  // Id represents the message identifier.
+//	  string id = 1; [
+//	      (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
+//	        description: "The unique identifier of the simple message."
+//	      }];
+//	}
+type JSONSchema struct {
+	state                         protoimpl.MessageState             `protogen:"opaque.v1"`
+	xxx_hidden_Ref                string                             `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"`
+	xxx_hidden_Title              string                             `protobuf:"bytes,5,opt,name=title,proto3" json:"title,omitempty"`
+	xxx_hidden_Description        string                             `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"`
+	xxx_hidden_Default            string                             `protobuf:"bytes,7,opt,name=default,proto3" json:"default,omitempty"`
+	xxx_hidden_ReadOnly           bool                               `protobuf:"varint,8,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+	xxx_hidden_Example            string                             `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"`
+	xxx_hidden_MultipleOf         float64                            `protobuf:"fixed64,10,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
+	xxx_hidden_Maximum            float64                            `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"`
+	xxx_hidden_ExclusiveMaximum   bool                               `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
+	xxx_hidden_Minimum            float64                            `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"`
+	xxx_hidden_ExclusiveMinimum   bool                               `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
+	xxx_hidden_MaxLength          uint64                             `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
+	xxx_hidden_MinLength          uint64                             `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
+	xxx_hidden_Pattern            string                             `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"`
+	xxx_hidden_MaxItems           uint64                             `protobuf:"varint,20,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
+	xxx_hidden_MinItems           uint64                             `protobuf:"varint,21,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
+	xxx_hidden_UniqueItems        bool                               `protobuf:"varint,22,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
+	xxx_hidden_MaxProperties      uint64                             `protobuf:"varint,24,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"`
+	xxx_hidden_MinProperties      uint64                             `protobuf:"varint,25,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"`
+	xxx_hidden_Required           []string                           `protobuf:"bytes,26,rep,name=required,proto3" json:"required,omitempty"`
+	xxx_hidden_Array              []string                           `protobuf:"bytes,34,rep,name=array,proto3" json:"array,omitempty"`
+	xxx_hidden_Type               []JSONSchema_JSONSchemaSimpleTypes `protobuf:"varint,35,rep,packed,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.JSONSchema_JSONSchemaSimpleTypes" json:"type,omitempty"`
+	xxx_hidden_Format             string                             `protobuf:"bytes,36,opt,name=format,proto3" json:"format,omitempty"`
+	xxx_hidden_Enum               []string                           `protobuf:"bytes,46,rep,name=enum,proto3" json:"enum,omitempty"`
+	xxx_hidden_FieldConfiguration *JSONSchema_FieldConfiguration     `protobuf:"bytes,1001,opt,name=field_configuration,json=fieldConfiguration,proto3" json:"field_configuration,omitempty"`
+	xxx_hidden_Extensions         map[string]*structpb.Value         `protobuf:"bytes,48,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields                 protoimpl.UnknownFields
+	sizeCache                     protoimpl.SizeCache
+}
+
+func (x *JSONSchema) Reset() {
+	*x = JSONSchema{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *JSONSchema) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*JSONSchema) ProtoMessage() {}
+
+func (x *JSONSchema) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *JSONSchema) GetRef() string {
+	if x != nil {
+		return x.xxx_hidden_Ref
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetTitle() string {
+	if x != nil {
+		return x.xxx_hidden_Title
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetDescription() string {
+	if x != nil {
+		return x.xxx_hidden_Description
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetDefault() string {
+	if x != nil {
+		return x.xxx_hidden_Default
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetReadOnly() bool {
+	if x != nil {
+		return x.xxx_hidden_ReadOnly
+	}
+	return false
+}
+
+func (x *JSONSchema) GetExample() string {
+	if x != nil {
+		return x.xxx_hidden_Example
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetMultipleOf() float64 {
+	if x != nil {
+		return x.xxx_hidden_MultipleOf
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetMaximum() float64 {
+	if x != nil {
+		return x.xxx_hidden_Maximum
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetExclusiveMaximum() bool {
+	if x != nil {
+		return x.xxx_hidden_ExclusiveMaximum
+	}
+	return false
+}
+
+func (x *JSONSchema) GetMinimum() float64 {
+	if x != nil {
+		return x.xxx_hidden_Minimum
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetExclusiveMinimum() bool {
+	if x != nil {
+		return x.xxx_hidden_ExclusiveMinimum
+	}
+	return false
+}
+
+func (x *JSONSchema) GetMaxLength() uint64 {
+	if x != nil {
+		return x.xxx_hidden_MaxLength
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetMinLength() uint64 {
+	if x != nil {
+		return x.xxx_hidden_MinLength
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetPattern() string {
+	if x != nil {
+		return x.xxx_hidden_Pattern
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetMaxItems() uint64 {
+	if x != nil {
+		return x.xxx_hidden_MaxItems
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetMinItems() uint64 {
+	if x != nil {
+		return x.xxx_hidden_MinItems
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetUniqueItems() bool {
+	if x != nil {
+		return x.xxx_hidden_UniqueItems
+	}
+	return false
+}
+
+func (x *JSONSchema) GetMaxProperties() uint64 {
+	if x != nil {
+		return x.xxx_hidden_MaxProperties
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetMinProperties() uint64 {
+	if x != nil {
+		return x.xxx_hidden_MinProperties
+	}
+	return 0
+}
+
+func (x *JSONSchema) GetRequired() []string {
+	if x != nil {
+		return x.xxx_hidden_Required
+	}
+	return nil
+}
+
+func (x *JSONSchema) GetArray() []string {
+	if x != nil {
+		return x.xxx_hidden_Array
+	}
+	return nil
+}
+
+func (x *JSONSchema) GetType() []JSONSchema_JSONSchemaSimpleTypes {
+	if x != nil {
+		return x.xxx_hidden_Type
+	}
+	return nil
+}
+
+func (x *JSONSchema) GetFormat() string {
+	if x != nil {
+		return x.xxx_hidden_Format
+	}
+	return ""
+}
+
+func (x *JSONSchema) GetEnum() []string {
+	if x != nil {
+		return x.xxx_hidden_Enum
+	}
+	return nil
+}
+
+func (x *JSONSchema) GetFieldConfiguration() *JSONSchema_FieldConfiguration {
+	if x != nil {
+		return x.xxx_hidden_FieldConfiguration
+	}
+	return nil
+}
+
+func (x *JSONSchema) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.xxx_hidden_Extensions
+	}
+	return nil
+}
+
+func (x *JSONSchema) SetRef(v string) {
+	x.xxx_hidden_Ref = v
+}
+
+func (x *JSONSchema) SetTitle(v string) {
+	x.xxx_hidden_Title = v
+}
+
+func (x *JSONSchema) SetDescription(v string) {
+	x.xxx_hidden_Description = v
+}
+
+func (x *JSONSchema) SetDefault(v string) {
+	x.xxx_hidden_Default = v
+}
+
+func (x *JSONSchema) SetReadOnly(v bool) {
+	x.xxx_hidden_ReadOnly = v
+}
+
+func (x *JSONSchema) SetExample(v string) {
+	x.xxx_hidden_Example = v
+}
+
+func (x *JSONSchema) SetMultipleOf(v float64) {
+	x.xxx_hidden_MultipleOf = v
+}
+
+func (x *JSONSchema) SetMaximum(v float64) {
+	x.xxx_hidden_Maximum = v
+}
+
+func (x *JSONSchema) SetExclusiveMaximum(v bool) {
+	x.xxx_hidden_ExclusiveMaximum = v
+}
+
+func (x *JSONSchema) SetMinimum(v float64) {
+	x.xxx_hidden_Minimum = v
+}
+
+func (x *JSONSchema) SetExclusiveMinimum(v bool) {
+	x.xxx_hidden_ExclusiveMinimum = v
+}
+
+func (x *JSONSchema) SetMaxLength(v uint64) {
+	x.xxx_hidden_MaxLength = v
+}
+
+func (x *JSONSchema) SetMinLength(v uint64) {
+	x.xxx_hidden_MinLength = v
+}
+
+func (x *JSONSchema) SetPattern(v string) {
+	x.xxx_hidden_Pattern = v
+}
+
+func (x *JSONSchema) SetMaxItems(v uint64) {
+	x.xxx_hidden_MaxItems = v
+}
+
+func (x *JSONSchema) SetMinItems(v uint64) {
+	x.xxx_hidden_MinItems = v
+}
+
+func (x *JSONSchema) SetUniqueItems(v bool) {
+	x.xxx_hidden_UniqueItems = v
+}
+
+func (x *JSONSchema) SetMaxProperties(v uint64) {
+	x.xxx_hidden_MaxProperties = v
+}
+
+func (x *JSONSchema) SetMinProperties(v uint64) {
+	x.xxx_hidden_MinProperties = v
+}
+
+func (x *JSONSchema) SetRequired(v []string) {
+	x.xxx_hidden_Required = v
+}
+
+func (x *JSONSchema) SetArray(v []string) {
+	x.xxx_hidden_Array = v
+}
+
+func (x *JSONSchema) SetType(v []JSONSchema_JSONSchemaSimpleTypes) {
+	x.xxx_hidden_Type = v
+}
+
+func (x *JSONSchema) SetFormat(v string) {
+	x.xxx_hidden_Format = v
+}
+
+func (x *JSONSchema) SetEnum(v []string) {
+	x.xxx_hidden_Enum = v
+}
+
+func (x *JSONSchema) SetFieldConfiguration(v *JSONSchema_FieldConfiguration) {
+	x.xxx_hidden_FieldConfiguration = v
+}
+
+func (x *JSONSchema) SetExtensions(v map[string]*structpb.Value) {
+	x.xxx_hidden_Extensions = v
+}
+
+func (x *JSONSchema) HasFieldConfiguration() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_FieldConfiguration != nil
+}
+
+func (x *JSONSchema) ClearFieldConfiguration() {
+	x.xxx_hidden_FieldConfiguration = nil
+}
+
+type JSONSchema_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// Ref is used to define an external reference to include in the message.
+	// This could be a fully qualified proto message reference, and that type must
+	// be imported into the protofile. If no message is identified, the Ref will
+	// be used verbatim in the output.
+	// For example:
+	//
+	//	`ref: ".google.protobuf.Timestamp"`.
+	Ref string
+	// The title of the schema.
+	Title string
+	// A short description of the schema.
+	Description string
+	Default     string
+	ReadOnly    bool
+	// A free-form property to include a JSON example of this field. This is copied
+	// verbatim to the output swagger.json. Quotes must be escaped.
+	// This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject  https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+	Example    string
+	MultipleOf float64
+	// Maximum represents an inclusive upper limit for a numeric instance. The
+	// value of MUST be a number,
+	Maximum          float64
+	ExclusiveMaximum bool
+	// minimum represents an inclusive lower limit for a numeric instance. The
+	// value of MUST be a number,
+	Minimum          float64
+	ExclusiveMinimum bool
+	MaxLength        uint64
+	MinLength        uint64
+	Pattern          string
+	MaxItems         uint64
+	MinItems         uint64
+	UniqueItems      bool
+	MaxProperties    uint64
+	MinProperties    uint64
+	Required         []string
+	// Items in 'array' must be unique.
+	Array []string
+	Type  []JSONSchema_JSONSchemaSimpleTypes
+	// `Format`
+	Format string
+	// Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1
+	Enum []string
+	// Additional field level properties used when generating the OpenAPI v2 file.
+	FieldConfiguration *JSONSchema_FieldConfiguration
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 JSONSchema_builder) Build() *JSONSchema {
+	m0 := &JSONSchema{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Ref = b.Ref
+	x.xxx_hidden_Title = b.Title
+	x.xxx_hidden_Description = b.Description
+	x.xxx_hidden_Default = b.Default
+	x.xxx_hidden_ReadOnly = b.ReadOnly
+	x.xxx_hidden_Example = b.Example
+	x.xxx_hidden_MultipleOf = b.MultipleOf
+	x.xxx_hidden_Maximum = b.Maximum
+	x.xxx_hidden_ExclusiveMaximum = b.ExclusiveMaximum
+	x.xxx_hidden_Minimum = b.Minimum
+	x.xxx_hidden_ExclusiveMinimum = b.ExclusiveMinimum
+	x.xxx_hidden_MaxLength = b.MaxLength
+	x.xxx_hidden_MinLength = b.MinLength
+	x.xxx_hidden_Pattern = b.Pattern
+	x.xxx_hidden_MaxItems = b.MaxItems
+	x.xxx_hidden_MinItems = b.MinItems
+	x.xxx_hidden_UniqueItems = b.UniqueItems
+	x.xxx_hidden_MaxProperties = b.MaxProperties
+	x.xxx_hidden_MinProperties = b.MinProperties
+	x.xxx_hidden_Required = b.Required
+	x.xxx_hidden_Array = b.Array
+	x.xxx_hidden_Type = b.Type
+	x.xxx_hidden_Format = b.Format
+	x.xxx_hidden_Enum = b.Enum
+	x.xxx_hidden_FieldConfiguration = b.FieldConfiguration
+	x.xxx_hidden_Extensions = b.Extensions
+	return m0
+}
+
+// `Tag` is a representation of OpenAPI v2 specification's Tag object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject
+type Tag struct {
+	state                   protoimpl.MessageState     `protogen:"opaque.v1"`
+	xxx_hidden_Name         string                     `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	xxx_hidden_Description  string                     `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	xxx_hidden_ExternalDocs *ExternalDocumentation     `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+	xxx_hidden_Extensions   map[string]*structpb.Value `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields           protoimpl.UnknownFields
+	sizeCache               protoimpl.SizeCache
+}
+
+func (x *Tag) Reset() {
+	*x = Tag{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Tag) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Tag) ProtoMessage() {}
+
+func (x *Tag) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Tag) GetName() string {
+	if x != nil {
+		return x.xxx_hidden_Name
+	}
+	return ""
+}
+
+func (x *Tag) GetDescription() string {
+	if x != nil {
+		return x.xxx_hidden_Description
+	}
+	return ""
+}
+
+func (x *Tag) GetExternalDocs() *ExternalDocumentation {
+	if x != nil {
+		return x.xxx_hidden_ExternalDocs
+	}
+	return nil
+}
+
+func (x *Tag) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.xxx_hidden_Extensions
+	}
+	return nil
+}
+
+func (x *Tag) SetName(v string) {
+	x.xxx_hidden_Name = v
+}
+
+func (x *Tag) SetDescription(v string) {
+	x.xxx_hidden_Description = v
+}
+
+func (x *Tag) SetExternalDocs(v *ExternalDocumentation) {
+	x.xxx_hidden_ExternalDocs = v
+}
+
+func (x *Tag) SetExtensions(v map[string]*structpb.Value) {
+	x.xxx_hidden_Extensions = v
+}
+
+func (x *Tag) HasExternalDocs() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_ExternalDocs != nil
+}
+
+func (x *Tag) ClearExternalDocs() {
+	x.xxx_hidden_ExternalDocs = nil
+}
+
+type Tag_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// The name of the tag. Use it to allow override of the name of a
+	// global Tag object, then use that name to reference the tag throughout the
+	// OpenAPI file.
+	Name string
+	// A short description for the tag. GFM syntax can be used for rich text
+	// representation.
+	Description string
+	// Additional external documentation for this tag.
+	ExternalDocs *ExternalDocumentation
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 Tag_builder) Build() *Tag {
+	m0 := &Tag{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Name = b.Name
+	x.xxx_hidden_Description = b.Description
+	x.xxx_hidden_ExternalDocs = b.ExternalDocs
+	x.xxx_hidden_Extensions = b.Extensions
+	return m0
+}
+
+// `SecurityDefinitions` is a representation of OpenAPI v2 specification's
+// Security Definitions object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject
+//
+// A declaration of the security schemes available to be used in the
+// specification. This does not enforce the security schemes on the operations
+// and only serves to provide the relevant details for each scheme.
+type SecurityDefinitions struct {
+	state               protoimpl.MessageState     `protogen:"opaque.v1"`
+	xxx_hidden_Security map[string]*SecurityScheme `protobuf:"bytes,1,rep,name=security,proto3" json:"security,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields       protoimpl.UnknownFields
+	sizeCache           protoimpl.SizeCache
+}
+
+func (x *SecurityDefinitions) Reset() {
+	*x = SecurityDefinitions{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityDefinitions) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityDefinitions) ProtoMessage() {}
+
+func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *SecurityDefinitions) GetSecurity() map[string]*SecurityScheme {
+	if x != nil {
+		return x.xxx_hidden_Security
+	}
+	return nil
+}
+
+func (x *SecurityDefinitions) SetSecurity(v map[string]*SecurityScheme) {
+	x.xxx_hidden_Security = v
+}
+
+type SecurityDefinitions_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// A single security scheme definition, mapping a "name" to the scheme it
+	// defines.
+	Security map[string]*SecurityScheme
+}
+
+func (b0 SecurityDefinitions_builder) Build() *SecurityDefinitions {
+	m0 := &SecurityDefinitions{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Security = b.Security
+	return m0
+}
+
+// `SecurityScheme` is a representation of OpenAPI v2 specification's
+// Security Scheme object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject
+//
+// Allows the definition of a security scheme that can be used by the
+// operations. Supported schemes are basic authentication, an API key (either as
+// a header or as a query parameter) and OAuth2's common flows (implicit,
+// password, application and access code).
+type SecurityScheme struct {
+	state                       protoimpl.MessageState     `protogen:"opaque.v1"`
+	xxx_hidden_Type             SecurityScheme_Type        `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Type" json:"type,omitempty"`
+	xxx_hidden_Description      string                     `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+	xxx_hidden_Name             string                     `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+	xxx_hidden_In               SecurityScheme_In          `protobuf:"varint,4,opt,name=in,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_In" json:"in,omitempty"`
+	xxx_hidden_Flow             SecurityScheme_Flow        `protobuf:"varint,5,opt,name=flow,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Flow" json:"flow,omitempty"`
+	xxx_hidden_AuthorizationUrl string                     `protobuf:"bytes,6,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"`
+	xxx_hidden_TokenUrl         string                     `protobuf:"bytes,7,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"`
+	xxx_hidden_Scopes           *Scopes                    `protobuf:"bytes,8,opt,name=scopes,proto3" json:"scopes,omitempty"`
+	xxx_hidden_Extensions       map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields               protoimpl.UnknownFields
+	sizeCache                   protoimpl.SizeCache
+}
+
+func (x *SecurityScheme) Reset() {
+	*x = SecurityScheme{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityScheme) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityScheme) ProtoMessage() {}
+
+func (x *SecurityScheme) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *SecurityScheme) GetType() SecurityScheme_Type {
+	if x != nil {
+		return x.xxx_hidden_Type
+	}
+	return SecurityScheme_TYPE_INVALID
+}
+
+func (x *SecurityScheme) GetDescription() string {
+	if x != nil {
+		return x.xxx_hidden_Description
+	}
+	return ""
+}
+
+func (x *SecurityScheme) GetName() string {
+	if x != nil {
+		return x.xxx_hidden_Name
+	}
+	return ""
+}
+
+func (x *SecurityScheme) GetIn() SecurityScheme_In {
+	if x != nil {
+		return x.xxx_hidden_In
+	}
+	return SecurityScheme_IN_INVALID
+}
+
+func (x *SecurityScheme) GetFlow() SecurityScheme_Flow {
+	if x != nil {
+		return x.xxx_hidden_Flow
+	}
+	return SecurityScheme_FLOW_INVALID
+}
+
+func (x *SecurityScheme) GetAuthorizationUrl() string {
+	if x != nil {
+		return x.xxx_hidden_AuthorizationUrl
+	}
+	return ""
+}
+
+func (x *SecurityScheme) GetTokenUrl() string {
+	if x != nil {
+		return x.xxx_hidden_TokenUrl
+	}
+	return ""
+}
+
+func (x *SecurityScheme) GetScopes() *Scopes {
+	if x != nil {
+		return x.xxx_hidden_Scopes
+	}
+	return nil
+}
+
+func (x *SecurityScheme) GetExtensions() map[string]*structpb.Value {
+	if x != nil {
+		return x.xxx_hidden_Extensions
+	}
+	return nil
+}
+
+func (x *SecurityScheme) SetType(v SecurityScheme_Type) {
+	x.xxx_hidden_Type = v
+}
+
+func (x *SecurityScheme) SetDescription(v string) {
+	x.xxx_hidden_Description = v
+}
+
+func (x *SecurityScheme) SetName(v string) {
+	x.xxx_hidden_Name = v
+}
+
+func (x *SecurityScheme) SetIn(v SecurityScheme_In) {
+	x.xxx_hidden_In = v
+}
+
+func (x *SecurityScheme) SetFlow(v SecurityScheme_Flow) {
+	x.xxx_hidden_Flow = v
+}
+
+func (x *SecurityScheme) SetAuthorizationUrl(v string) {
+	x.xxx_hidden_AuthorizationUrl = v
+}
+
+func (x *SecurityScheme) SetTokenUrl(v string) {
+	x.xxx_hidden_TokenUrl = v
+}
+
+func (x *SecurityScheme) SetScopes(v *Scopes) {
+	x.xxx_hidden_Scopes = v
+}
+
+func (x *SecurityScheme) SetExtensions(v map[string]*structpb.Value) {
+	x.xxx_hidden_Extensions = v
+}
+
+func (x *SecurityScheme) HasScopes() bool {
+	if x == nil {
+		return false
+	}
+	return x.xxx_hidden_Scopes != nil
+}
+
+func (x *SecurityScheme) ClearScopes() {
+	x.xxx_hidden_Scopes = nil
+}
+
+type SecurityScheme_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// The type of the security scheme. Valid values are "basic",
+	// "apiKey" or "oauth2".
+	Type SecurityScheme_Type
+	// A short description for security scheme.
+	Description string
+	// The name of the header or query parameter to be used.
+	// Valid for apiKey.
+	Name string
+	// The location of the API key. Valid values are "query" or
+	// "header".
+	// Valid for apiKey.
+	In SecurityScheme_In
+	// The flow used by the OAuth2 security scheme. Valid values are
+	// "implicit", "password", "application" or "accessCode".
+	// Valid for oauth2.
+	Flow SecurityScheme_Flow
+	// The authorization URL to be used for this flow. This SHOULD be in
+	// the form of a URL.
+	// Valid for oauth2/implicit and oauth2/accessCode.
+	AuthorizationUrl string
+	// The token URL to be used for this flow. This SHOULD be in the
+	// form of a URL.
+	// Valid for oauth2/password, oauth2/application and oauth2/accessCode.
+	TokenUrl string
+	// The available scopes for the OAuth2 security scheme.
+	// Valid for oauth2.
+	Scopes *Scopes
+	// Custom properties that start with "x-" such as "x-foo" used to describe
+	// extra functionality that is not covered by the standard OpenAPI Specification.
+	// See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+	Extensions map[string]*structpb.Value
+}
+
+func (b0 SecurityScheme_builder) Build() *SecurityScheme {
+	m0 := &SecurityScheme{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Type = b.Type
+	x.xxx_hidden_Description = b.Description
+	x.xxx_hidden_Name = b.Name
+	x.xxx_hidden_In = b.In
+	x.xxx_hidden_Flow = b.Flow
+	x.xxx_hidden_AuthorizationUrl = b.AuthorizationUrl
+	x.xxx_hidden_TokenUrl = b.TokenUrl
+	x.xxx_hidden_Scopes = b.Scopes
+	x.xxx_hidden_Extensions = b.Extensions
+	return m0
+}
+
+// `SecurityRequirement` is a representation of OpenAPI v2 specification's
+// Security Requirement object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject
+//
+// Lists the required security schemes to execute this operation. The object can
+// have multiple security schemes declared in it which are all required (that
+// is, there is a logical AND between the schemes).
+//
+// The name used for each property MUST correspond to a security scheme
+// declared in the Security Definitions.
+type SecurityRequirement struct {
+	state                          protoimpl.MessageState                                   `protogen:"opaque.v1"`
+	xxx_hidden_SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue `protobuf:"bytes,1,rep,name=security_requirement,json=securityRequirement,proto3" json:"security_requirement,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields                  protoimpl.UnknownFields
+	sizeCache                      protoimpl.SizeCache
+}
+
+func (x *SecurityRequirement) Reset() {
+	*x = SecurityRequirement{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityRequirement) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityRequirement) ProtoMessage() {}
+
+func (x *SecurityRequirement) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *SecurityRequirement) GetSecurityRequirement() map[string]*SecurityRequirement_SecurityRequirementValue {
+	if x != nil {
+		return x.xxx_hidden_SecurityRequirement
+	}
+	return nil
+}
+
+func (x *SecurityRequirement) SetSecurityRequirement(v map[string]*SecurityRequirement_SecurityRequirementValue) {
+	x.xxx_hidden_SecurityRequirement = v
+}
+
+type SecurityRequirement_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// Each name must correspond to a security scheme which is declared in
+	// the Security Definitions. If the security scheme is of type "oauth2",
+	// then the value is a list of scope names required for the execution.
+	// For other security scheme types, the array MUST be empty.
+	SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue
+}
+
+func (b0 SecurityRequirement_builder) Build() *SecurityRequirement {
+	m0 := &SecurityRequirement{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_SecurityRequirement = b.SecurityRequirement
+	return m0
+}
+
+// `Scopes` is a representation of OpenAPI v2 specification's Scopes object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject
+//
+// Lists the available scopes for an OAuth2 security scheme.
+type Scopes struct {
+	state            protoimpl.MessageState `protogen:"opaque.v1"`
+	xxx_hidden_Scope map[string]string      `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+	unknownFields    protoimpl.UnknownFields
+	sizeCache        protoimpl.SizeCache
+}
+
+func (x *Scopes) Reset() {
+	*x = Scopes{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Scopes) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Scopes) ProtoMessage() {}
+
+func (x *Scopes) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *Scopes) GetScope() map[string]string {
+	if x != nil {
+		return x.xxx_hidden_Scope
+	}
+	return nil
+}
+
+func (x *Scopes) SetScope(v map[string]string) {
+	x.xxx_hidden_Scope = v
+}
+
+type Scopes_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// Maps between a name of a scope to a short description of it (as the value
+	// of the property).
+	Scope map[string]string
+}
+
+func (b0 Scopes_builder) Build() *Scopes {
+	m0 := &Scopes{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Scope = b.Scope
+	return m0
+}
+
+// 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file.
+// These properties are not defined by OpenAPIv2, but they are used to control the generation.
+type JSONSchema_FieldConfiguration struct {
+	state                    protoimpl.MessageState `protogen:"opaque.v1"`
+	xxx_hidden_PathParamName string                 `protobuf:"bytes,47,opt,name=path_param_name,json=pathParamName,proto3" json:"path_param_name,omitempty"`
+	unknownFields            protoimpl.UnknownFields
+	sizeCache                protoimpl.SizeCache
+}
+
+func (x *JSONSchema_FieldConfiguration) Reset() {
+	*x = JSONSchema_FieldConfiguration{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *JSONSchema_FieldConfiguration) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*JSONSchema_FieldConfiguration) ProtoMessage() {}
+
+func (x *JSONSchema_FieldConfiguration) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *JSONSchema_FieldConfiguration) GetPathParamName() string {
+	if x != nil {
+		return x.xxx_hidden_PathParamName
+	}
+	return ""
+}
+
+func (x *JSONSchema_FieldConfiguration) SetPathParamName(v string) {
+	x.xxx_hidden_PathParamName = v
+}
+
+type JSONSchema_FieldConfiguration_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	// Alternative parameter name when used as path parameter. If set, this will
+	// be used as the complete parameter name when this field is used as a path
+	// parameter. Use this to avoid having auto generated path parameter names
+	// for overlapping paths.
+	PathParamName string
+}
+
+func (b0 JSONSchema_FieldConfiguration_builder) Build() *JSONSchema_FieldConfiguration {
+	m0 := &JSONSchema_FieldConfiguration{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_PathParamName = b.PathParamName
+	return m0
+}
+
+// If the security scheme is of type "oauth2", then the value is a list of
+// scope names required for the execution. For other security scheme types,
+// the array MUST be empty.
+type SecurityRequirement_SecurityRequirementValue struct {
+	state            protoimpl.MessageState `protogen:"opaque.v1"`
+	xxx_hidden_Scope []string               `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty"`
+	unknownFields    protoimpl.UnknownFields
+	sizeCache        protoimpl.SizeCache
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) Reset() {
+	*x = SecurityRequirement_SecurityRequirementValue{}
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityRequirement_SecurityRequirementValue) ProtoMessage() {}
+
+func (x *SecurityRequirement_SecurityRequirementValue) ProtoReflect() protoreflect.Message {
+	mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) GetScope() []string {
+	if x != nil {
+		return x.xxx_hidden_Scope
+	}
+	return nil
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) SetScope(v []string) {
+	x.xxx_hidden_Scope = v
+}
+
+type SecurityRequirement_SecurityRequirementValue_builder struct {
+	_ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+	Scope []string
+}
+
+func (b0 SecurityRequirement_SecurityRequirementValue_builder) Build() *SecurityRequirement_SecurityRequirementValue {
+	m0 := &SecurityRequirement_SecurityRequirementValue{}
+	b, x := &b0, m0
+	_, _ = b, x
+	x.xxx_hidden_Scope = b.Scope
+	return m0
+}
+
+var File_protoc_gen_openapiv2_options_openapiv2_proto protoreflect.FileDescriptor
+
+var file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = []byte{
+	0x0a, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63,
+	0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb3, 0x08, 0x0a, 0x07, 0x53, 0x77, 0x61, 0x67,
+	0x67, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x12, 0x43, 0x0a,
+	0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72,
+	0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e,
+	0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x70,
+	0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x62, 0x61, 0x73, 0x65, 0x50,
+	0x61, 0x74, 0x68, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x05,
+	0x20, 0x03, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+	0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73,
+	0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03,
+	0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08,
+	0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08,
+	0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70,
+	0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72,
+	0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e,
+	0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09,
+	0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x71, 0x0a, 0x14, 0x73, 0x65, 0x63,
+	0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69,
+	0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+	0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5a, 0x0a, 0x08,
+	0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e,
+	0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+	0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72,
+	0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08,
+	0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73,
+	0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+	0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+	0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x65, 0x0a, 0x0d,
+	0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0e, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44,
+	0x6f, 0x63, 0x73, 0x12, 0x62, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+	0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65,
+	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f,
+	0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70,
+	0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+	0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+	0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+	0x01, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xd6, 0x07,
+	0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74,
+	0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12,
+	0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73,
+	0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+	0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65,
+	0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+	0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45,
+	0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61,
+	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f,
+	0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+	0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
+	0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65,
+	0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65,
+	0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20,
+	0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x61, 0x0a,
+	0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b,
+	0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+	0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65,
+	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73,
+	0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73,
+	0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28,
+	0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63,
+	0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a,
+	0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28,
+	0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x5a, 0x0a,
+	0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32,
+	0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75,
+	0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52,
+	0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x64, 0x0a, 0x0a, 0x65, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
+	0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e,
+	0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+	0x55, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61,
+	0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+	0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61,
+	0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70, 0x63,
+	0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f,
+	0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+	0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c,
+	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+	0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
+	0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0x62, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65,
+	0x74, 0x65, 0x72, 0x73, 0x12, 0x54, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18,
+	0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+	0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+	0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
+	0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xa3, 0x02, 0x0a, 0x0f, 0x48,
+	0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x12,
+	0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+	0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01,
+	0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+	0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48,
+	0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x54,
+	0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72,
+	0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+	0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20,
+	0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x45, 0x0a,
+	0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
+	0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0a,
+	0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e,
+	0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45,
+	0x41, 0x4e, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08,
+	0x22, 0xd8, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64,
+	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a,
+	0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
+	0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66,
+	0x61, 0x75, 0x6c, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61,
+	0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x4a, 0x04, 0x08,
+	0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a,
+	0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10,
+	0x0b, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08,
+	0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a,
+	0x04, 0x08, 0x11, 0x10, 0x12, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0x9a, 0x05, 0x0a, 0x08,
+	0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63,
+	0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64,
+	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63,
+	0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70,
+	0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73,
+	0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5a, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+	0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+	0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+	0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64,
+	0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
+	0x73, 0x12, 0x5d, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65,
+	0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73,
+	0x12, 0x63, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+	0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+	0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e,
+	0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x6d, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+	0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x47, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+	0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+	0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73,
+	0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+	0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45,
+	0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd6, 0x03, 0x0a, 0x04, 0x49, 0x6e, 0x66,
+	0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
+	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72,
+	0x6d, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76,
+	0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x04,
+	0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+	0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63,
+	0x74, 0x12, 0x4c, 0x0a, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01,
+	0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+	0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+	0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4c,
+	0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12,
+	0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x0a, 0x65, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45,
+	0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a,
+	0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+	0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+	0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+	0x01, 0x22, 0x45, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04,
+	0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+	0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75,
+	0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x2f, 0x0a, 0x07, 0x4c, 0x69, 0x63, 0x65,
+	0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x4b, 0x0a, 0x15, 0x45, 0x78, 0x74,
+	0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69,
+	0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0xaa, 0x02, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d,
+	0x61, 0x12, 0x56, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+	0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+	0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x6a,
+	0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73,
+	0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12,
+	0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01,
+	0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a, 0x0d,
+	0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x05, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44,
+	0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x06,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4a, 0x04, 0x08,
+	0x04, 0x10, 0x05, 0x22, 0xe8, 0x03, 0x0a, 0x0a, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65,
+	0x6d, 0x61, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x14,
+	0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74,
+	0x69, 0x74, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64,
+	0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64,
+	0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20,
+	0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a,
+	0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x06,
+	0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+	0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e,
+	0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
+	0x44, 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18,
+	0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x10,
+	0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66,
+	0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09,
+	0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+	0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+	0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+	0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65,
+	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74,
+	0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e,
+	0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+	0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61,
+	0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7,
+	0x0a, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, 0x0a,
+	0x03, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12,
+	0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
+	0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
+	0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75,
+	0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
+	0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08,
+	0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x18,
+	0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74,
+	0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d,
+	0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78,
+	0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69,
+	0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65,
+	0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10,
+	0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d,
+	0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28,
+	0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78,
+	0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18,
+	0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65,
+	0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c,
+	0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x78,
+	0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65,
+	0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c,
+	0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e,
+	0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12,
+	0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01,
+	0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09,
+	0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, 0x52,
+	0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69,
+	0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52,
+	0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e,
+	0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x18,
+	0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74,
+	0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65,
+	0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x69, 0x6e,
+	0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65,
+	0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65,
+	0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x18,
+	0x22, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x12, 0x5f, 0x0a, 0x04,
+	0x74, 0x79, 0x70, 0x65, 0x18, 0x23, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x4b, 0x2e, 0x67, 0x72, 0x70,
+	0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d,
+	0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x69, 0x6d, 0x70,
+	0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a,
+	0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66,
+	0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x2e, 0x20,
+	0x03, 0x28, 0x09, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x7a, 0x0a, 0x13, 0x66, 0x69, 0x65,
+	0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x46,
+	0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+	0x6e, 0x52, 0x12, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+	0x6f, 0x6e, 0x73, 0x18, 0x30, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63,
+	0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f,
+	0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+	0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+	0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x12,
+	0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
+	0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d,
+	0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74,
+	0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+	0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+	0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+	0x01, 0x22, 0x77, 0x0a, 0x15, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53,
+	0x69, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e,
+	0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x52, 0x52, 0x41, 0x59,
+	0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, 0x41, 0x4e, 0x10, 0x02, 0x12,
+	0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04,
+	0x4e, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52,
+	0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, 0x0a,
+	0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02,
+	0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12,
+	0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, 0x4a, 0x04, 0x08, 0x17, 0x10, 0x18, 0x4a, 0x04,
+	0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x1c, 0x10, 0x1d, 0x4a, 0x04, 0x08, 0x1d, 0x10, 0x1e,
+	0x4a, 0x04, 0x08, 0x1e, 0x10, 0x22, 0x4a, 0x04, 0x08, 0x25, 0x10, 0x2a, 0x4a, 0x04, 0x08, 0x2a,
+	0x10, 0x2b, 0x4a, 0x04, 0x08, 0x2b, 0x10, 0x2e, 0x22, 0xd9, 0x02, 0x0a, 0x03, 0x54, 0x61, 0x67,
+	0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+	0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e,
+	0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e,
+	0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e,
+	0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+	0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x5e, 0x0a,
+	0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28,
+	0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+	0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61,
+	0x67, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72,
+	0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a,
+	0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+	0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+	0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x3a, 0x02, 0x38, 0x01, 0x22, 0xf7, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+	0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x68, 0x0a, 0x08,
+	0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4c,
+	0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+	0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72,
+	0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53,
+	0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x73, 0x65,
+	0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x1a, 0x76, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69,
+	0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4f, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e,
+	0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67,
+	0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74,
+	0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68,
+	0x65, 0x6d, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff,
+	0x06, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d,
+	0x65, 0x12, 0x52, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
+	0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+	0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75,
+	0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
+	0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+	0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
+	0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+	0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4c, 0x0a, 0x02, 0x69,
+	0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65,
+	0x6d, 0x65, 0x2e, 0x49, 0x6e, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x52, 0x0a, 0x04, 0x66, 0x6c, 0x6f,
+	0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65,
+	0x6d, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2b, 0x0a,
+	0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75,
+	0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72,
+	0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f,
+	0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74,
+	0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65,
+	0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70,
+	0x65, 0x73, 0x12, 0x69, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73,
+	0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+	0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+	0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+	0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d,
+	0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72,
+	0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a,
+	0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+	0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+	0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+	0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c,
+	0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0e,
+	0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x10,
+	0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x02,
+	0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x41, 0x55, 0x54, 0x48, 0x32, 0x10,
+	0x03, 0x22, 0x31, 0x0a, 0x02, 0x49, 0x6e, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x5f, 0x49, 0x4e,
+	0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x5f, 0x51, 0x55,
+	0x45, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x4e, 0x5f, 0x48, 0x45, 0x41, 0x44,
+	0x45, 0x52, 0x10, 0x02, 0x22, 0x6a, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x0c,
+	0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x11,
+	0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10,
+	0x01, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f,
+	0x52, 0x44, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x41, 0x50, 0x50,
+	0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c,
+	0x4f, 0x57, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x04,
+	0x22, 0xf6, 0x02, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71,
+	0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x8a, 0x01, 0x0a, 0x14, 0x73, 0x65, 0x63,
+	0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e,
+	0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+	0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+	0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75,
+	0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
+	0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79,
+	0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72,
+	0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+	0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75,
+	0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09,
+	0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75,
+	0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45,
+	0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+	0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+	0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+	0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+	0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72,
+	0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65,
+	0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x06, 0x53, 0x63,
+	0x6f, 0x70, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+	0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+	0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+	0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x45, 0x6e, 0x74, 0x72,
+	0x79, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x38, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70,
+	0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
+	0x38, 0x01, 0x2a, 0x3b, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x0b, 0x0a, 0x07,
+	0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54,
+	0x50, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x02, 0x12, 0x06,
+	0x0a, 0x02, 0x57, 0x53, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x57, 0x53, 0x53, 0x10, 0x04, 0x42,
+	0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72,
+	0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70,
+	0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+	0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x33,
+}
+
+var file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes = make([]protoimpl.EnumInfo, 6)
+var file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes = make([]protoimpl.MessageInfo, 35)
+var file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = []any{
+	(Scheme)(0),                           // 0: grpc.gateway.protoc_gen_openapiv2.options.Scheme
+	(HeaderParameter_Type)(0),             // 1: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type
+	(JSONSchema_JSONSchemaSimpleTypes)(0), // 2: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes
+	(SecurityScheme_Type)(0),              // 3: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type
+	(SecurityScheme_In)(0),                // 4: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In
+	(SecurityScheme_Flow)(0),              // 5: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow
+	(*Swagger)(nil),                       // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger
+	(*Operation)(nil),                     // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation
+	(*Parameters)(nil),                    // 8: grpc.gateway.protoc_gen_openapiv2.options.Parameters
+	(*HeaderParameter)(nil),               // 9: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter
+	(*Header)(nil),                        // 10: grpc.gateway.protoc_gen_openapiv2.options.Header
+	(*Response)(nil),                      // 11: grpc.gateway.protoc_gen_openapiv2.options.Response
+	(*Info)(nil),                          // 12: grpc.gateway.protoc_gen_openapiv2.options.Info
+	(*Contact)(nil),                       // 13: grpc.gateway.protoc_gen_openapiv2.options.Contact
+	(*License)(nil),                       // 14: grpc.gateway.protoc_gen_openapiv2.options.License
+	(*ExternalDocumentation)(nil),         // 15: grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	(*Schema)(nil),                        // 16: grpc.gateway.protoc_gen_openapiv2.options.Schema
+	(*EnumSchema)(nil),                    // 17: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+	(*JSONSchema)(nil),                    // 18: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+	(*Tag)(nil),                           // 19: grpc.gateway.protoc_gen_openapiv2.options.Tag
+	(*SecurityDefinitions)(nil),           // 20: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions
+	(*SecurityScheme)(nil),                // 21: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme
+	(*SecurityRequirement)(nil),           // 22: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+	(*Scopes)(nil),                        // 23: grpc.gateway.protoc_gen_openapiv2.options.Scopes
+	nil,                                   // 24: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry
+	nil,                                   // 25: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry
+	nil,                                   // 26: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry
+	nil,                                   // 27: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry
+	nil,                                   // 28: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry
+	nil,                                   // 29: grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry
+	nil,                                   // 30: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry
+	nil,                                   // 31: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry
+	nil,                                   // 32: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry
+	(*JSONSchema_FieldConfiguration)(nil), // 33: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration
+	nil,                                   // 34: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry
+	nil,                                   // 35: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry
+	nil,                                   // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry
+	nil,                                   // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry
+	(*SecurityRequirement_SecurityRequirementValue)(nil), // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue
+	nil,                    // 39: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry
+	nil,                    // 40: grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry
+	(*structpb.Value)(nil), // 41: google.protobuf.Value
+}
+var file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = []int32{
+	12, // 0: grpc.gateway.protoc_gen_openapiv2.options.Swagger.info:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info
+	0,  // 1: grpc.gateway.protoc_gen_openapiv2.options.Swagger.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme
+	24, // 2: grpc.gateway.protoc_gen_openapiv2.options.Swagger.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry
+	20, // 3: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security_definitions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions
+	22, // 4: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+	19, // 5: grpc.gateway.protoc_gen_openapiv2.options.Swagger.tags:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag
+	15, // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	25, // 7: grpc.gateway.protoc_gen_openapiv2.options.Swagger.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry
+	15, // 8: grpc.gateway.protoc_gen_openapiv2.options.Operation.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	26, // 9: grpc.gateway.protoc_gen_openapiv2.options.Operation.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry
+	0,  // 10: grpc.gateway.protoc_gen_openapiv2.options.Operation.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme
+	22, // 11: grpc.gateway.protoc_gen_openapiv2.options.Operation.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+	27, // 12: grpc.gateway.protoc_gen_openapiv2.options.Operation.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry
+	8,  // 13: grpc.gateway.protoc_gen_openapiv2.options.Operation.parameters:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Parameters
+	9,  // 14: grpc.gateway.protoc_gen_openapiv2.options.Parameters.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter
+	1,  // 15: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type
+	16, // 16: grpc.gateway.protoc_gen_openapiv2.options.Response.schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema
+	28, // 17: grpc.gateway.protoc_gen_openapiv2.options.Response.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry
+	29, // 18: grpc.gateway.protoc_gen_openapiv2.options.Response.examples:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry
+	30, // 19: grpc.gateway.protoc_gen_openapiv2.options.Response.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry
+	13, // 20: grpc.gateway.protoc_gen_openapiv2.options.Info.contact:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Contact
+	14, // 21: grpc.gateway.protoc_gen_openapiv2.options.Info.license:type_name -> grpc.gateway.protoc_gen_openapiv2.options.License
+	31, // 22: grpc.gateway.protoc_gen_openapiv2.options.Info.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry
+	18, // 23: grpc.gateway.protoc_gen_openapiv2.options.Schema.json_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+	15, // 24: grpc.gateway.protoc_gen_openapiv2.options.Schema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	15, // 25: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	32, // 26: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry
+	2,  // 27: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes
+	33, // 28: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.field_configuration:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration
+	34, // 29: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry
+	15, // 30: grpc.gateway.protoc_gen_openapiv2.options.Tag.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+	35, // 31: grpc.gateway.protoc_gen_openapiv2.options.Tag.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry
+	36, // 32: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry
+	3,  // 33: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type
+	4,  // 34: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.in:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In
+	5,  // 35: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.flow:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow
+	23, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.scopes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes
+	37, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry
+	39, // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.security_requirement:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry
+	40, // 39: grpc.gateway.protoc_gen_openapiv2.options.Scopes.scope:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry
+	11, // 40: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response
+	41, // 41: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	11, // 42: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response
+	41, // 43: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	10, // 44: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Header
+	41, // 45: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	41, // 46: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	41, // 47: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	41, // 48: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	41, // 49: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	21, // 50: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme
+	41, // 51: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry.value:type_name -> google.protobuf.Value
+	38, // 52: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue
+	53, // [53:53] is the sub-list for method output_type
+	53, // [53:53] is the sub-list for method input_type
+	53, // [53:53] is the sub-list for extension type_name
+	53, // [53:53] is the sub-list for extension extendee
+	0,  // [0:53] is the sub-list for field type_name
+}
+
+func init() { file_protoc_gen_openapiv2_options_openapiv2_proto_init() }
+func file_protoc_gen_openapiv2_options_openapiv2_proto_init() {
+	if File_protoc_gen_openapiv2_options_openapiv2_proto != nil {
+		return
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc,
+			NumEnums:      6,
+			NumMessages:   35,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes,
+		DependencyIndexes: file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs,
+		EnumInfos:         file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes,
+		MessageInfos:      file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes,
+	}.Build()
+	File_protoc_gen_openapiv2_options_openapiv2_proto = out.File
+	file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = nil
+	file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = nil
+	file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE
index e87a115..a320b30 100644
--- a/vendor/github.com/hashicorp/go-uuid/LICENSE
+++ b/vendor/github.com/hashicorp/go-uuid/LICENSE
@@ -1,3 +1,5 @@
+Copyright © 2015-2022 HashiCorp, Inc.
+
 Mozilla Public License, version 2.0
 
 1. Definitions
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go b/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go
index e4571ce..fd01342 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go
@@ -89,7 +89,12 @@
 		return tkt, skey, nil
 	}
 	princ := types.NewPrincipalName(nametype.KRB_NT_PRINCIPAL, spn)
-	realm := cl.Config.ResolveRealm(princ.NameString[len(princ.NameString)-1])
+	realm := cl.spnRealm(princ)
+
+	// if we don't know the SPN's realm, ask the client realm's KDC
+	if realm == "" {
+		realm = cl.Credentials.Realm()
+	}
 
 	tgt, skey, err := cl.sessionTGT(realm)
 	if err != nil {
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/network.go b/vendor/github.com/jcmturner/gokrb5/v8/client/network.go
index 634f015..688ad3a 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/client/network.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/client/network.go
@@ -2,7 +2,6 @@
 
 import (
 	"encoding/binary"
-	"errors"
 	"fmt"
 	"io"
 	"net"
@@ -173,7 +172,7 @@
 		}
 		return rb, nil
 	}
-	return nil, errors.New("error in getting a TCP connection to any of the KDCs")
+	return nil, fmt.Errorf("error sending to a KDC: %s", strings.Join(errs, "; "))
 }
 
 // sendTCP sends bytes to connection over TCP.
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/session.go b/vendor/github.com/jcmturner/gokrb5/v8/client/session.go
index f7654d0..7e1e65c 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/client/session.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/client/session.go
@@ -41,7 +41,9 @@
 			// Cancel the one in the cache and add this one.
 			i.mux.Lock()
 			defer i.mux.Unlock()
-			i.cancel <- true
+			if i.cancel != nil {
+				i.cancel <- true
+			}
 			s.Entries[sess.realm] = sess
 			return
 		}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go b/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go
index a763843..f448c8a 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go
@@ -95,7 +95,7 @@
 	opts := asn1.BitString{}
 	opts.Bytes, _ = hex.DecodeString("00000010")
 	opts.BitLength = len(opts.Bytes) * 8
-	return LibDefaults{
+	l := LibDefaults{
 		CCacheType:              4,
 		Clockskew:               time.Duration(300) * time.Second,
 		DefaultClientKeytabName: fmt.Sprintf("/usr/local/var/krb5/user/%s/client.keytab", uid),
@@ -115,6 +115,10 @@
 		UDPPreferenceLimit:      1465,
 		PreferredPreauthTypes:   []int{17, 16, 15, 14},
 	}
+	l.DefaultTGSEnctypeIDs = parseETypes(l.DefaultTGSEnctypes, l.AllowWeakCrypto)
+	l.DefaultTktEnctypeIDs = parseETypes(l.DefaultTktEnctypes, l.AllowWeakCrypto)
+	l.PermittedEnctypeIDs = parseETypes(l.PermittedEnctypes, l.AllowWeakCrypto)
+	return l
 }
 
 // Parse the lines of the [libdefaults] section of the configuration into the LibDefaults struct.
@@ -507,7 +511,7 @@
 			return r
 		}
 	}
-	return c.LibDefaults.DefaultRealm
+	return ""
 }
 
 // Load the KRB5 configuration from the specified file path.
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go b/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go
index c3b35c7..a021e41 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go
@@ -4,7 +4,7 @@
 	"bytes"
 	"encoding/binary"
 	"errors"
-	"io/ioutil"
+	"os"
 	"strings"
 	"time"
 	"unsafe"
@@ -63,7 +63,7 @@
 // LoadCCache loads a credential cache file into a CCache type.
 func LoadCCache(cpath string) (*CCache, error) {
 	c := new(CCache)
-	b, err := ioutil.ReadFile(cpath)
+	b, err := os.ReadFile(cpath)
 	if err != nil {
 		return c, err
 	}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go b/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go
index 5c2e9d7..cd1b8b1 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go
@@ -8,7 +8,7 @@
 	"errors"
 	"fmt"
 	"io"
-	"io/ioutil"
+	"os"
 	"strings"
 	"time"
 	"unsafe"
@@ -93,7 +93,7 @@
 		}
 	}
 	if len(key.KeyValue) < 1 {
-		return key, 0, fmt.Errorf("matching key not found in keytab. Looking for %v realm: %v kvno: %v etype: %v", princName.NameString, realm, kvno, etype)
+		return key, 0, fmt.Errorf("matching key not found in keytab. Looking for %q realm: %v kvno: %v etype: %v", princName.PrincipalNameString(), realm, kvno, etype)
 	}
 	return key, kv, nil
 }
@@ -170,7 +170,7 @@
 // Load a Keytab file into a Keytab type.
 func Load(ktPath string) (*Keytab, error) {
 	kt := new(Keytab)
-	b, err := ioutil.ReadFile(ktPath)
+	b, err := os.ReadFile(ktPath)
 	if err != nil {
 		return kt, err
 	}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go b/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go
index 1fdba78..115a02a 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go
@@ -43,7 +43,7 @@
 		Cksum:     Checksum{},
 		Cusec:     int((t.UnixNano() / int64(time.Microsecond)) - (t.Unix() * 1e6)),
 		CTime:     t,
-		SeqNumber: seq.Int64(),
+		SeqNumber: seq.Int64() & 0x3fffffff,
 	}, nil
 }
 
@@ -53,7 +53,7 @@
 	if err != nil {
 		return err
 	}
-	a.SeqNumber = seq.Int64()
+	a.SeqNumber = seq.Int64() & 0x3fffffff
 	//Generate subkey value
 	sk := make([]byte, keySize, keySize)
 	rand.Read(sk)
diff --git a/vendor/github.com/jhump/protoreflect/LICENSE b/vendor/github.com/jhump/protoreflect/LICENSE
index d645695..b53b91d 100644
--- a/vendor/github.com/jhump/protoreflect/LICENSE
+++ b/vendor/github.com/jhump/protoreflect/LICENSE
@@ -187,7 +187,7 @@
       same "printed page" as the copyright notice for easier
       identification within third-party archives.
 
-   Copyright [yyyy] [name of copyright owner]
+   Copyright 2017 Joshua Humphries
 
    Licensed under the Apache License, Version 2.0 (the "License");
    you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/jhump/protoreflect/codec/codec.go b/vendor/github.com/jhump/protoreflect/codec/codec.go
index b6f4ed0..7e5c568 100644
--- a/vendor/github.com/jhump/protoreflect/codec/codec.go
+++ b/vendor/github.com/jhump/protoreflect/codec/codec.go
@@ -4,6 +4,7 @@
 	"io"
 
 	"github.com/golang/protobuf/proto"
+
 	"github.com/jhump/protoreflect/internal/codec"
 )
 
diff --git a/vendor/github.com/jhump/protoreflect/codec/decode_fields.go b/vendor/github.com/jhump/protoreflect/codec/decode_fields.go
index 02f8a32..0edb817 100644
--- a/vendor/github.com/jhump/protoreflect/codec/decode_fields.go
+++ b/vendor/github.com/jhump/protoreflect/codec/decode_fields.go
@@ -7,32 +7,32 @@
 	"math"
 
 	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/protobuf/types/descriptorpb"
 
 	"github.com/jhump/protoreflect/desc"
 )
 
-var varintTypes = map[descriptor.FieldDescriptorProto_Type]bool{}
-var fixed32Types = map[descriptor.FieldDescriptorProto_Type]bool{}
-var fixed64Types = map[descriptor.FieldDescriptorProto_Type]bool{}
+var varintTypes = map[descriptorpb.FieldDescriptorProto_Type]bool{}
+var fixed32Types = map[descriptorpb.FieldDescriptorProto_Type]bool{}
+var fixed64Types = map[descriptorpb.FieldDescriptorProto_Type]bool{}
 
 func init() {
-	varintTypes[descriptor.FieldDescriptorProto_TYPE_BOOL] = true
-	varintTypes[descriptor.FieldDescriptorProto_TYPE_INT32] = true
-	varintTypes[descriptor.FieldDescriptorProto_TYPE_INT64] = true
-	varintTypes[descriptor.FieldDescriptorProto_TYPE_UINT32] = true
-	varintTypes[descriptor.FieldDescriptorProto_TYPE_UINT64] = true
-	varintTypes[descriptor.FieldDescriptorProto_TYPE_SINT32] = true
-	varintTypes[descriptor.FieldDescriptorProto_TYPE_SINT64] = true
-	varintTypes[descriptor.FieldDescriptorProto_TYPE_ENUM] = true
+	varintTypes[descriptorpb.FieldDescriptorProto_TYPE_BOOL] = true
+	varintTypes[descriptorpb.FieldDescriptorProto_TYPE_INT32] = true
+	varintTypes[descriptorpb.FieldDescriptorProto_TYPE_INT64] = true
+	varintTypes[descriptorpb.FieldDescriptorProto_TYPE_UINT32] = true
+	varintTypes[descriptorpb.FieldDescriptorProto_TYPE_UINT64] = true
+	varintTypes[descriptorpb.FieldDescriptorProto_TYPE_SINT32] = true
+	varintTypes[descriptorpb.FieldDescriptorProto_TYPE_SINT64] = true
+	varintTypes[descriptorpb.FieldDescriptorProto_TYPE_ENUM] = true
 
-	fixed32Types[descriptor.FieldDescriptorProto_TYPE_FIXED32] = true
-	fixed32Types[descriptor.FieldDescriptorProto_TYPE_SFIXED32] = true
-	fixed32Types[descriptor.FieldDescriptorProto_TYPE_FLOAT] = true
+	fixed32Types[descriptorpb.FieldDescriptorProto_TYPE_FIXED32] = true
+	fixed32Types[descriptorpb.FieldDescriptorProto_TYPE_SFIXED32] = true
+	fixed32Types[descriptorpb.FieldDescriptorProto_TYPE_FLOAT] = true
 
-	fixed64Types[descriptor.FieldDescriptorProto_TYPE_FIXED64] = true
-	fixed64Types[descriptor.FieldDescriptorProto_TYPE_SFIXED64] = true
-	fixed64Types[descriptor.FieldDescriptorProto_TYPE_DOUBLE] = true
+	fixed64Types[descriptorpb.FieldDescriptorProto_TYPE_FIXED64] = true
+	fixed64Types[descriptorpb.FieldDescriptorProto_TYPE_SFIXED64] = true
+	fixed64Types[descriptorpb.FieldDescriptorProto_TYPE_DOUBLE] = true
 }
 
 // ErrWireTypeEndGroup is returned from DecodeFieldValue if the tag and wire-type
@@ -117,53 +117,53 @@
 // messages, bytes, and strings), an error is returned.
 func DecodeScalarField(fd *desc.FieldDescriptor, v uint64) (interface{}, error) {
 	switch fd.GetType() {
-	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+	case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
 		return v != 0, nil
-	case descriptor.FieldDescriptorProto_TYPE_UINT32,
-		descriptor.FieldDescriptorProto_TYPE_FIXED32:
+	case descriptorpb.FieldDescriptorProto_TYPE_UINT32,
+		descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
 		if v > math.MaxUint32 {
 			return nil, ErrOverflow
 		}
 		return uint32(v), nil
 
-	case descriptor.FieldDescriptorProto_TYPE_INT32,
-		descriptor.FieldDescriptorProto_TYPE_ENUM:
+	case descriptorpb.FieldDescriptorProto_TYPE_INT32,
+		descriptorpb.FieldDescriptorProto_TYPE_ENUM:
 		s := int64(v)
 		if s > math.MaxInt32 || s < math.MinInt32 {
 			return nil, ErrOverflow
 		}
 		return int32(s), nil
 
-	case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+	case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
 		if v > math.MaxUint32 {
 			return nil, ErrOverflow
 		}
 		return int32(v), nil
 
-	case descriptor.FieldDescriptorProto_TYPE_SINT32:
+	case descriptorpb.FieldDescriptorProto_TYPE_SINT32:
 		if v > math.MaxUint32 {
 			return nil, ErrOverflow
 		}
 		return DecodeZigZag32(v), nil
 
-	case descriptor.FieldDescriptorProto_TYPE_UINT64,
-		descriptor.FieldDescriptorProto_TYPE_FIXED64:
+	case descriptorpb.FieldDescriptorProto_TYPE_UINT64,
+		descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
 		return v, nil
 
-	case descriptor.FieldDescriptorProto_TYPE_INT64,
-		descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+	case descriptorpb.FieldDescriptorProto_TYPE_INT64,
+		descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
 		return int64(v), nil
 
-	case descriptor.FieldDescriptorProto_TYPE_SINT64:
+	case descriptorpb.FieldDescriptorProto_TYPE_SINT64:
 		return DecodeZigZag64(v), nil
 
-	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+	case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
 		if v > math.MaxUint32 {
 			return nil, ErrOverflow
 		}
 		return math.Float32frombits(uint32(v)), nil
 
-	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+	case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
 		return math.Float64frombits(v), nil
 
 	default:
@@ -184,14 +184,14 @@
 // contains multiple values, only the last value is returned.
 func DecodeLengthDelimitedField(fd *desc.FieldDescriptor, bytes []byte, mf MessageFactory) (interface{}, error) {
 	switch {
-	case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_BYTES:
+	case fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_BYTES:
 		return bytes, nil
 
-	case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_STRING:
+	case fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_STRING:
 		return string(bytes), nil
 
-	case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE ||
-		fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP:
+	case fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE ||
+		fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP:
 		msg := mf.NewMessage(fd.GetMessageType())
 		err := proto.Unmarshal(bytes, msg)
 		if err != nil {
@@ -263,7 +263,7 @@
 		}
 
 	case proto.WireBytes:
-		alloc := fd.GetType() == descriptor.FieldDescriptorProto_TYPE_BYTES
+		alloc := fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_BYTES
 		var raw []byte
 		raw, err = b.DecodeRawBytes(alloc)
 		if err == nil {
diff --git a/vendor/github.com/jhump/protoreflect/codec/encode_fields.go b/vendor/github.com/jhump/protoreflect/codec/encode_fields.go
index 499aa95..280f730 100644
--- a/vendor/github.com/jhump/protoreflect/codec/encode_fields.go
+++ b/vendor/github.com/jhump/protoreflect/codec/encode_fields.go
@@ -7,7 +7,7 @@
 	"sort"
 
 	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/protobuf/types/descriptorpb"
 
 	"github.com/jhump/protoreflect/desc"
 )
@@ -175,74 +175,74 @@
 
 func (b *Buffer) encodeFieldValue(fd *desc.FieldDescriptor, val interface{}) error {
 	switch fd.GetType() {
-	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+	case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
 		v := val.(bool)
 		if v {
 			return b.EncodeVarint(1)
 		}
 		return b.EncodeVarint(0)
 
-	case descriptor.FieldDescriptorProto_TYPE_ENUM,
-		descriptor.FieldDescriptorProto_TYPE_INT32:
+	case descriptorpb.FieldDescriptorProto_TYPE_ENUM,
+		descriptorpb.FieldDescriptorProto_TYPE_INT32:
 		v := val.(int32)
 		return b.EncodeVarint(uint64(v))
 
-	case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+	case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
 		v := val.(int32)
 		return b.EncodeFixed32(uint64(v))
 
-	case descriptor.FieldDescriptorProto_TYPE_SINT32:
+	case descriptorpb.FieldDescriptorProto_TYPE_SINT32:
 		v := val.(int32)
 		return b.EncodeVarint(EncodeZigZag32(v))
 
-	case descriptor.FieldDescriptorProto_TYPE_UINT32:
+	case descriptorpb.FieldDescriptorProto_TYPE_UINT32:
 		v := val.(uint32)
 		return b.EncodeVarint(uint64(v))
 
-	case descriptor.FieldDescriptorProto_TYPE_FIXED32:
+	case descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
 		v := val.(uint32)
 		return b.EncodeFixed32(uint64(v))
 
-	case descriptor.FieldDescriptorProto_TYPE_INT64:
+	case descriptorpb.FieldDescriptorProto_TYPE_INT64:
 		v := val.(int64)
 		return b.EncodeVarint(uint64(v))
 
-	case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+	case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
 		v := val.(int64)
 		return b.EncodeFixed64(uint64(v))
 
-	case descriptor.FieldDescriptorProto_TYPE_SINT64:
+	case descriptorpb.FieldDescriptorProto_TYPE_SINT64:
 		v := val.(int64)
 		return b.EncodeVarint(EncodeZigZag64(v))
 
-	case descriptor.FieldDescriptorProto_TYPE_UINT64:
+	case descriptorpb.FieldDescriptorProto_TYPE_UINT64:
 		v := val.(uint64)
 		return b.EncodeVarint(v)
 
-	case descriptor.FieldDescriptorProto_TYPE_FIXED64:
+	case descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
 		v := val.(uint64)
 		return b.EncodeFixed64(v)
 
-	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+	case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
 		v := val.(float64)
 		return b.EncodeFixed64(math.Float64bits(v))
 
-	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+	case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
 		v := val.(float32)
 		return b.EncodeFixed32(uint64(math.Float32bits(v)))
 
-	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+	case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
 		v := val.([]byte)
 		return b.EncodeRawBytes(v)
 
-	case descriptor.FieldDescriptorProto_TYPE_STRING:
+	case descriptorpb.FieldDescriptorProto_TYPE_STRING:
 		v := val.(string)
 		return b.EncodeRawBytes(([]byte)(v))
 
-	case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+	case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE:
 		return b.EncodeDelimitedMessage(val.(proto.Message))
 
-	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+	case descriptorpb.FieldDescriptorProto_TYPE_GROUP:
 		// just append the nested message to this buffer
 		return b.EncodeMessage(val.(proto.Message))
 		// whosoever writeth start-group tag (e.g. caller) is responsible for writing end-group tag
@@ -252,34 +252,34 @@
 	}
 }
 
-func getWireType(t descriptor.FieldDescriptorProto_Type) (int8, error) {
+func getWireType(t descriptorpb.FieldDescriptorProto_Type) (int8, error) {
 	switch t {
-	case descriptor.FieldDescriptorProto_TYPE_ENUM,
-		descriptor.FieldDescriptorProto_TYPE_BOOL,
-		descriptor.FieldDescriptorProto_TYPE_INT32,
-		descriptor.FieldDescriptorProto_TYPE_SINT32,
-		descriptor.FieldDescriptorProto_TYPE_UINT32,
-		descriptor.FieldDescriptorProto_TYPE_INT64,
-		descriptor.FieldDescriptorProto_TYPE_SINT64,
-		descriptor.FieldDescriptorProto_TYPE_UINT64:
+	case descriptorpb.FieldDescriptorProto_TYPE_ENUM,
+		descriptorpb.FieldDescriptorProto_TYPE_BOOL,
+		descriptorpb.FieldDescriptorProto_TYPE_INT32,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+		descriptorpb.FieldDescriptorProto_TYPE_UINT32,
+		descriptorpb.FieldDescriptorProto_TYPE_INT64,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT64,
+		descriptorpb.FieldDescriptorProto_TYPE_UINT64:
 		return proto.WireVarint, nil
 
-	case descriptor.FieldDescriptorProto_TYPE_FIXED32,
-		descriptor.FieldDescriptorProto_TYPE_SFIXED32,
-		descriptor.FieldDescriptorProto_TYPE_FLOAT:
+	case descriptorpb.FieldDescriptorProto_TYPE_FIXED32,
+		descriptorpb.FieldDescriptorProto_TYPE_SFIXED32,
+		descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
 		return proto.WireFixed32, nil
 
-	case descriptor.FieldDescriptorProto_TYPE_FIXED64,
-		descriptor.FieldDescriptorProto_TYPE_SFIXED64,
-		descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+	case descriptorpb.FieldDescriptorProto_TYPE_FIXED64,
+		descriptorpb.FieldDescriptorProto_TYPE_SFIXED64,
+		descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
 		return proto.WireFixed64, nil
 
-	case descriptor.FieldDescriptorProto_TYPE_BYTES,
-		descriptor.FieldDescriptorProto_TYPE_STRING,
-		descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+	case descriptorpb.FieldDescriptorProto_TYPE_BYTES,
+		descriptorpb.FieldDescriptorProto_TYPE_STRING,
+		descriptorpb.FieldDescriptorProto_TYPE_MESSAGE:
 		return proto.WireBytes, nil
 
-	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+	case descriptorpb.FieldDescriptorProto_TYPE_GROUP:
 		return proto.WireStartGroup, nil
 
 	default:
diff --git a/vendor/github.com/jhump/protoreflect/desc/cache.go b/vendor/github.com/jhump/protoreflect/desc/cache.go
new file mode 100644
index 0000000..418632b
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/cache.go
@@ -0,0 +1,48 @@
+package desc
+
+import (
+	"sync"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type descriptorCache interface {
+	get(protoreflect.Descriptor) Descriptor
+	put(protoreflect.Descriptor, Descriptor)
+}
+
+type lockingCache struct {
+	cacheMu sync.RWMutex
+	cache   mapCache
+}
+
+func (c *lockingCache) get(d protoreflect.Descriptor) Descriptor {
+	c.cacheMu.RLock()
+	defer c.cacheMu.RUnlock()
+	return c.cache.get(d)
+}
+
+func (c *lockingCache) put(key protoreflect.Descriptor, val Descriptor) {
+	c.cacheMu.Lock()
+	defer c.cacheMu.Unlock()
+	c.cache.put(key, val)
+}
+
+func (c *lockingCache) withLock(fn func(descriptorCache)) {
+	c.cacheMu.Lock()
+	defer c.cacheMu.Unlock()
+	// Pass the underlying mapCache. We don't want fn to use
+	// c.get or c.put sine we already have the lock. So those
+	// methods would try to re-acquire and then deadlock!
+	fn(c.cache)
+}
+
+type mapCache map[protoreflect.Descriptor]Descriptor
+
+func (c mapCache) get(d protoreflect.Descriptor) Descriptor {
+	return c[d]
+}
+
+func (c mapCache) put(key protoreflect.Descriptor, val Descriptor) {
+	c[key] = val
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/convert.go b/vendor/github.com/jhump/protoreflect/desc/convert.go
index 538820c..01a6e9e 100644
--- a/vendor/github.com/jhump/protoreflect/desc/convert.go
+++ b/vendor/github.com/jhump/protoreflect/desc/convert.go
@@ -5,7 +5,10 @@
 	"fmt"
 	"strings"
 
-	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/protobuf/reflect/protodesc"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/types/descriptorpb"
 
 	"github.com/jhump/protoreflect/desc/internal"
 	intn "github.com/jhump/protoreflect/internal"
@@ -15,34 +18,87 @@
 // The file's direct dependencies must be provided. If the given dependencies do not include
 // all of the file's dependencies or if the contents of the descriptors are internally
 // inconsistent (e.g. contain unresolvable symbols) then an error is returned.
-func CreateFileDescriptor(fd *dpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
+func CreateFileDescriptor(fd *descriptorpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
 	return createFileDescriptor(fd, deps, nil)
 }
 
-func createFileDescriptor(fd *dpb.FileDescriptorProto, deps []*FileDescriptor, r *ImportResolver) (*FileDescriptor, error) {
+type descResolver struct {
+	files          []*FileDescriptor
+	importResolver *ImportResolver
+	fromPath       string
+}
+
+func (r *descResolver) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
+	resolvedPath := r.importResolver.ResolveImport(r.fromPath, path)
+	d := r.findFileByPath(resolvedPath)
+	if d != nil {
+		return d, nil
+	}
+	if resolvedPath != path {
+		d := r.findFileByPath(path)
+		if d != nil {
+			return d, nil
+		}
+	}
+	return nil, protoregistry.NotFound
+}
+
+func (r *descResolver) findFileByPath(path string) protoreflect.FileDescriptor {
+	for _, fd := range r.files {
+		if fd.GetName() == path {
+			return fd.UnwrapFile()
+		}
+	}
+	return nil
+}
+
+func (r *descResolver) FindDescriptorByName(n protoreflect.FullName) (protoreflect.Descriptor, error) {
+	for _, fd := range r.files {
+		d := fd.FindSymbol(string(n))
+		if d != nil {
+			return d.(DescriptorWrapper).Unwrap(), nil
+		}
+	}
+	return nil, protoregistry.NotFound
+}
+
+func createFileDescriptor(fd *descriptorpb.FileDescriptorProto, deps []*FileDescriptor, r *ImportResolver) (*FileDescriptor, error) {
+	dr := &descResolver{files: deps, importResolver: r, fromPath: fd.GetName()}
+	d, err := protodesc.NewFile(fd, dr)
+	if err != nil {
+		return nil, err
+	}
+
+	// make sure cache has dependencies populated
+	cache := mapCache{}
+	for _, dep := range deps {
+		fd, err := dr.FindFileByPath(dep.GetName())
+		if err != nil {
+			return nil, err
+		}
+		cache.put(fd, dep)
+	}
+
+	return convertFile(d, fd, cache)
+}
+
+func convertFile(d protoreflect.FileDescriptor, fd *descriptorpb.FileDescriptorProto, cache descriptorCache) (*FileDescriptor, error) {
 	ret := &FileDescriptor{
+		wrapped:    d,
 		proto:      fd,
 		symbols:    map[string]Descriptor{},
 		fieldIndex: map[string]map[int32]*FieldDescriptor{},
 	}
-	pkg := fd.GetPackage()
+	cache.put(d, ret)
 
 	// populate references to file descriptor dependencies
-	files := map[string]*FileDescriptor{}
-	for _, f := range deps {
-		files[f.proto.GetName()] = f
-	}
 	ret.deps = make([]*FileDescriptor, len(fd.GetDependency()))
-	for i, d := range fd.GetDependency() {
-		resolved := r.ResolveImport(fd.GetName(), d)
-		ret.deps[i] = files[resolved]
-		if ret.deps[i] == nil {
-			if resolved != d {
-				ret.deps[i] = files[d]
-			}
-			if ret.deps[i] == nil {
-				return nil, intn.ErrNoSuchFile(d)
-			}
+	for i := 0; i < d.Imports().Len(); i++ {
+		f := d.Imports().Get(i).FileDescriptor
+		if c, err := wrapFile(f, cache); err != nil {
+			return nil, err
+		} else {
+			ret.deps[i] = c
 		}
 	}
 	ret.publicDeps = make([]*FileDescriptor, len(fd.GetPublicDependency()))
@@ -53,27 +109,39 @@
 	for i, wd := range fd.GetWeakDependency() {
 		ret.weakDeps[i] = ret.deps[wd]
 	}
-	ret.isProto3 = fd.GetSyntax() == "proto3"
 
 	// populate all tables of child descriptors
-	for _, m := range fd.GetMessageType() {
-		md, n := createMessageDescriptor(ret, ret, pkg, m, ret.symbols)
-		ret.symbols[n] = md
+	path := make([]int32, 1, 8)
+	path[0] = internal.File_messagesTag
+	for i := 0; i < d.Messages().Len(); i++ {
+		src := d.Messages().Get(i)
+		srcProto := fd.GetMessageType()[src.Index()]
+		md := createMessageDescriptor(ret, ret, src, srcProto, ret.symbols, cache, append(path, int32(i)))
+		ret.symbols[string(src.FullName())] = md
 		ret.messages = append(ret.messages, md)
 	}
-	for _, e := range fd.GetEnumType() {
-		ed, n := createEnumDescriptor(ret, ret, pkg, e, ret.symbols)
-		ret.symbols[n] = ed
+	path[0] = internal.File_enumsTag
+	for i := 0; i < d.Enums().Len(); i++ {
+		src := d.Enums().Get(i)
+		srcProto := fd.GetEnumType()[src.Index()]
+		ed := createEnumDescriptor(ret, ret, src, srcProto, ret.symbols, cache, append(path, int32(i)))
+		ret.symbols[string(src.FullName())] = ed
 		ret.enums = append(ret.enums, ed)
 	}
-	for _, ex := range fd.GetExtension() {
-		exd, n := createFieldDescriptor(ret, ret, pkg, ex)
-		ret.symbols[n] = exd
+	path[0] = internal.File_extensionsTag
+	for i := 0; i < d.Extensions().Len(); i++ {
+		src := d.Extensions().Get(i)
+		srcProto := fd.GetExtension()[src.Index()]
+		exd := createFieldDescriptor(ret, ret, src, srcProto, cache, append(path, int32(i)))
+		ret.symbols[string(src.FullName())] = exd
 		ret.extensions = append(ret.extensions, exd)
 	}
-	for _, s := range fd.GetService() {
-		sd, n := createServiceDescriptor(ret, pkg, s, ret.symbols)
-		ret.symbols[n] = sd
+	path[0] = internal.File_servicesTag
+	for i := 0; i < d.Services().Len(); i++ {
+		src := d.Services().Get(i)
+		srcProto := fd.GetService()[src.Index()]
+		sd := createServiceDescriptor(ret, src, srcProto, ret.symbols, append(path, int32(i)))
+		ret.symbols[string(src.FullName())] = sd
 		ret.services = append(ret.services, sd)
 	}
 
@@ -81,27 +149,20 @@
 	ret.sourceInfoRecomputeFunc = ret.recomputeSourceInfo
 
 	// now we can resolve all type references and source code info
-	scopes := []scope{fileScope(ret)}
-	path := make([]int32, 1, 8)
-	path[0] = internal.File_messagesTag
-	for i, md := range ret.messages {
-		if err := md.resolve(append(path, int32(i)), scopes); err != nil {
+	for _, md := range ret.messages {
+		if err := md.resolve(cache); err != nil {
 			return nil, err
 		}
 	}
-	path[0] = internal.File_enumsTag
-	for i, ed := range ret.enums {
-		ed.resolve(append(path, int32(i)))
-	}
 	path[0] = internal.File_extensionsTag
-	for i, exd := range ret.extensions {
-		if err := exd.resolve(append(path, int32(i)), scopes); err != nil {
+	for _, exd := range ret.extensions {
+		if err := exd.resolve(cache); err != nil {
 			return nil, err
 		}
 	}
 	path[0] = internal.File_servicesTag
-	for i, sd := range ret.services {
-		if err := sd.resolve(append(path, int32(i)), scopes); err != nil {
+	for _, sd := range ret.services {
+		if err := sd.resolve(cache); err != nil {
 			return nil, err
 		}
 	}
@@ -112,15 +173,15 @@
 // CreateFileDescriptors constructs a set of descriptors, one for each of the
 // given descriptor protos. The given set of descriptor protos must include all
 // transitive dependencies for every file.
-func CreateFileDescriptors(fds []*dpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
+func CreateFileDescriptors(fds []*descriptorpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
 	return createFileDescriptors(fds, nil)
 }
 
-func createFileDescriptors(fds []*dpb.FileDescriptorProto, r *ImportResolver) (map[string]*FileDescriptor, error) {
+func createFileDescriptors(fds []*descriptorpb.FileDescriptorProto, r *ImportResolver) (map[string]*FileDescriptor, error) {
 	if len(fds) == 0 {
 		return nil, nil
 	}
-	files := map[string]*dpb.FileDescriptorProto{}
+	files := map[string]*descriptorpb.FileDescriptorProto{}
 	resolved := map[string]*FileDescriptor{}
 	var name string
 	for _, fd := range fds {
@@ -139,13 +200,13 @@
 // ToFileDescriptorSet creates a FileDescriptorSet proto that contains all of the given
 // file descriptors and their transitive dependencies. The files are topologically sorted
 // so that a file will always appear after its dependencies.
-func ToFileDescriptorSet(fds ...*FileDescriptor) *dpb.FileDescriptorSet {
-	var fdps []*dpb.FileDescriptorProto
+func ToFileDescriptorSet(fds ...*FileDescriptor) *descriptorpb.FileDescriptorSet {
+	var fdps []*descriptorpb.FileDescriptorProto
 	addAllFiles(fds, &fdps, map[string]struct{}{})
-	return &dpb.FileDescriptorSet{File: fdps}
+	return &descriptorpb.FileDescriptorSet{File: fdps}
 }
 
-func addAllFiles(src []*FileDescriptor, results *[]*dpb.FileDescriptorProto, seen map[string]struct{}) {
+func addAllFiles(src []*FileDescriptor, results *[]*descriptorpb.FileDescriptorProto, seen map[string]struct{}) {
 	for _, fd := range src {
 		if _, ok := seen[fd.GetName()]; ok {
 			continue
@@ -160,12 +221,13 @@
 // set's *last* file will be the returned descriptor. The set's remaining files must comprise
 // the full set of transitive dependencies of that last file. This is the same format and
 // order used by protoc when emitting a FileDescriptorSet file with an invocation like so:
-//    protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
-func CreateFileDescriptorFromSet(fds *dpb.FileDescriptorSet) (*FileDescriptor, error) {
+//
+//	protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
+func CreateFileDescriptorFromSet(fds *descriptorpb.FileDescriptorSet) (*FileDescriptor, error) {
 	return createFileDescriptorFromSet(fds, nil)
 }
 
-func createFileDescriptorFromSet(fds *dpb.FileDescriptorSet, r *ImportResolver) (*FileDescriptor, error) {
+func createFileDescriptorFromSet(fds *descriptorpb.FileDescriptorSet, r *ImportResolver) (*FileDescriptor, error) {
 	result, err := createFileDescriptorsFromSet(fds, r)
 	if err != nil {
 		return nil, err
@@ -180,12 +242,13 @@
 // full set of transitive dependencies for all files therein or else a link error will occur
 // and be returned instead of the slice of descriptors. This is the same format used by
 // protoc when a FileDescriptorSet file with an invocation like so:
-//    protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
-func CreateFileDescriptorsFromSet(fds *dpb.FileDescriptorSet) (map[string]*FileDescriptor, error) {
+//
+//	protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
+func CreateFileDescriptorsFromSet(fds *descriptorpb.FileDescriptorSet) (map[string]*FileDescriptor, error) {
 	return createFileDescriptorsFromSet(fds, nil)
 }
 
-func createFileDescriptorsFromSet(fds *dpb.FileDescriptorSet, r *ImportResolver) (map[string]*FileDescriptor, error) {
+func createFileDescriptorsFromSet(fds *descriptorpb.FileDescriptorSet, r *ImportResolver) (map[string]*FileDescriptor, error) {
 	files := fds.GetFile()
 	if len(files) == 0 {
 		return nil, errors.New("file descriptor set is empty")
@@ -195,7 +258,7 @@
 
 // createFromSet creates a descriptor for the given filename. It recursively
 // creates descriptors for the given file's dependencies.
-func createFromSet(filename string, r *ImportResolver, seen []string, files map[string]*dpb.FileDescriptorProto, resolved map[string]*FileDescriptor) (*FileDescriptor, error) {
+func createFromSet(filename string, r *ImportResolver, seen []string, files map[string]*descriptorpb.FileDescriptorProto, resolved map[string]*FileDescriptor) (*FileDescriptor, error) {
 	for _, s := range seen {
 		if filename == s {
 			return nil, fmt.Errorf("cycle in imports: %s", strings.Join(append(seen, filename), " -> "))
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor.go b/vendor/github.com/jhump/protoreflect/desc/descriptor.go
index 42f0f8e..68eb252 100644
--- a/vendor/github.com/jhump/protoreflect/desc/descriptor.go
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor.go
@@ -5,12 +5,12 @@
 	"fmt"
 	"sort"
 	"strconv"
-	"strings"
 	"unicode"
 	"unicode/utf8"
 
 	"github.com/golang/protobuf/proto"
-	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/types/descriptorpb"
 
 	"github.com/jhump/protoreflect/desc/internal"
 )
@@ -40,7 +40,7 @@
 	// descriptor. Source code info is optional. If no source code info is available for
 	// the element (including if there is none at all in the file descriptor) then this
 	// returns nil
-	GetSourceInfo() *dpb.SourceCodeInfo_Location
+	GetSourceInfo() *descriptorpb.SourceCodeInfo_Location
 	// AsProto returns the underlying descriptor proto for this descriptor.
 	AsProto() proto.Message
 }
@@ -49,7 +49,8 @@
 
 // FileDescriptor describes a proto source file.
 type FileDescriptor struct {
-	proto      *dpb.FileDescriptorProto
+	wrapped    protoreflect.FileDescriptor
+	proto      *descriptorpb.FileDescriptorProto
 	symbols    map[string]Descriptor
 	deps       []*FileDescriptor
 	publicDeps []*FileDescriptor
@@ -59,11 +60,22 @@
 	extensions []*FieldDescriptor
 	services   []*ServiceDescriptor
 	fieldIndex map[string]map[int32]*FieldDescriptor
-	isProto3   bool
 	sourceInfo internal.SourceInfoMap
 	sourceInfoRecomputeFunc
 }
 
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapFile, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (fd *FileDescriptor) Unwrap() protoreflect.Descriptor {
+	return fd.wrapped
+}
+
+// UnwrapFile returns the underlying protoreflect.FileDescriptor.
+func (fd *FileDescriptor) UnwrapFile() protoreflect.FileDescriptor {
+	return fd.wrapped
+}
+
 func (fd *FileDescriptor) recomputeSourceInfo() {
 	internal.PopulateSourceInfoMap(fd.proto, fd.sourceInfo)
 }
@@ -81,18 +93,18 @@
 // to compile it, possibly including path (relative to a directory in the proto
 // import path).
 func (fd *FileDescriptor) GetName() string {
-	return fd.proto.GetName()
+	return fd.wrapped.Path()
 }
 
 // GetFullyQualifiedName returns the name of the file, same as GetName. It is
 // present to satisfy the Descriptor interface.
 func (fd *FileDescriptor) GetFullyQualifiedName() string {
-	return fd.proto.GetName()
+	return fd.wrapped.Path()
 }
 
 // GetPackage returns the name of the package declared in the file.
 func (fd *FileDescriptor) GetPackage() string {
-	return fd.proto.GetPackage()
+	return string(fd.wrapped.Package())
 }
 
 // GetParent always returns nil: files are the root of descriptor hierarchies.
@@ -115,13 +127,13 @@
 }
 
 // GetFileOptions returns the file's options.
-func (fd *FileDescriptor) GetFileOptions() *dpb.FileOptions {
+func (fd *FileDescriptor) GetFileOptions() *descriptorpb.FileOptions {
 	return fd.proto.GetOptions()
 }
 
 // GetSourceInfo returns nil for files. It is present to satisfy the Descriptor
 // interface.
-func (fd *FileDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (fd *FileDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
 	return nil
 }
 
@@ -133,7 +145,7 @@
 }
 
 // AsFileDescriptorProto returns the underlying descriptor proto.
-func (fd *FileDescriptor) AsFileDescriptorProto() *dpb.FileDescriptorProto {
+func (fd *FileDescriptor) AsFileDescriptorProto() *descriptorpb.FileDescriptorProto {
 	return fd.proto
 }
 
@@ -143,8 +155,20 @@
 }
 
 // IsProto3 returns true if the file declares a syntax of "proto3".
+//
+// When this returns false, the file is either syntax "proto2" (if
+// Edition() returns zero) or the file uses editions.
 func (fd *FileDescriptor) IsProto3() bool {
-	return fd.isProto3
+	return fd.wrapped.Syntax() == protoreflect.Proto3
+}
+
+// Edition returns the edition of the file. If the file does not
+// use editions syntax, zero is returned.
+func (fd *FileDescriptor) Edition() descriptorpb.Edition {
+	if fd.wrapped.Syntax() == protoreflect.Editions {
+		return fd.proto.GetEdition()
+	}
+	return 0
 }
 
 // GetDependencies returns all of this file's dependencies. These correspond to
@@ -189,6 +213,9 @@
 // element with the given fully-qualified symbol name. If no such element
 // exists then this method returns nil.
 func (fd *FileDescriptor) FindSymbol(symbol string) Descriptor {
+	if len(symbol) == 0 {
+		return nil
+	}
 	if symbol[0] == '.' {
 		symbol = symbol[1:]
 	}
@@ -259,7 +286,8 @@
 
 // MessageDescriptor describes a protocol buffer message.
 type MessageDescriptor struct {
-	proto          *dpb.DescriptorProto
+	wrapped        protoreflect.MessageDescriptor
+	proto          *descriptorpb.DescriptorProto
 	parent         Descriptor
 	file           *FileDescriptor
 	fields         []*FieldDescriptor
@@ -268,42 +296,72 @@
 	extensions     []*FieldDescriptor
 	oneOfs         []*OneOfDescriptor
 	extRanges      extRanges
-	fqn            string
 	sourceInfoPath []int32
 	jsonNames      jsonNameMap
-	isProto3       bool
-	isMapEntry     bool
 }
 
-func createMessageDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, md *dpb.DescriptorProto, symbols map[string]Descriptor) (*MessageDescriptor, string) {
-	msgName := merge(enclosing, md.GetName())
-	ret := &MessageDescriptor{proto: md, parent: parent, file: fd, fqn: msgName}
-	for _, f := range md.GetField() {
-		fld, n := createFieldDescriptor(fd, ret, msgName, f)
-		symbols[n] = fld
-		ret.fields = append(ret.fields, fld)
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapMessage, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (md *MessageDescriptor) Unwrap() protoreflect.Descriptor {
+	return md.wrapped
+}
+
+// UnwrapMessage returns the underlying protoreflect.MessageDescriptor.
+func (md *MessageDescriptor) UnwrapMessage() protoreflect.MessageDescriptor {
+	return md.wrapped
+}
+
+func createMessageDescriptor(fd *FileDescriptor, parent Descriptor, md protoreflect.MessageDescriptor, mdp *descriptorpb.DescriptorProto, symbols map[string]Descriptor, cache descriptorCache, path []int32) *MessageDescriptor {
+	ret := &MessageDescriptor{
+		wrapped:        md,
+		proto:          mdp,
+		parent:         parent,
+		file:           fd,
+		sourceInfoPath: append([]int32(nil), path...), // defensive copy
 	}
-	for _, nm := range md.NestedType {
-		nmd, n := createMessageDescriptor(fd, ret, msgName, nm, symbols)
-		symbols[n] = nmd
+	cache.put(md, ret)
+	path = append(path, internal.Message_nestedMessagesTag)
+	for i := 0; i < md.Messages().Len(); i++ {
+		src := md.Messages().Get(i)
+		srcProto := mdp.GetNestedType()[src.Index()]
+		nmd := createMessageDescriptor(fd, ret, src, srcProto, symbols, cache, append(path, int32(i)))
+		symbols[string(src.FullName())] = nmd
 		ret.nested = append(ret.nested, nmd)
 	}
-	for _, e := range md.EnumType {
-		ed, n := createEnumDescriptor(fd, ret, msgName, e, symbols)
-		symbols[n] = ed
+	path[len(path)-1] = internal.Message_enumsTag
+	for i := 0; i < md.Enums().Len(); i++ {
+		src := md.Enums().Get(i)
+		srcProto := mdp.GetEnumType()[src.Index()]
+		ed := createEnumDescriptor(fd, ret, src, srcProto, symbols, cache, append(path, int32(i)))
+		symbols[string(src.FullName())] = ed
 		ret.enums = append(ret.enums, ed)
 	}
-	for _, ex := range md.GetExtension() {
-		exd, n := createFieldDescriptor(fd, ret, msgName, ex)
-		symbols[n] = exd
+	path[len(path)-1] = internal.Message_fieldsTag
+	for i := 0; i < md.Fields().Len(); i++ {
+		src := md.Fields().Get(i)
+		srcProto := mdp.GetField()[src.Index()]
+		fld := createFieldDescriptor(fd, ret, src, srcProto, cache, append(path, int32(i)))
+		symbols[string(src.FullName())] = fld
+		ret.fields = append(ret.fields, fld)
+	}
+	path[len(path)-1] = internal.Message_extensionsTag
+	for i := 0; i < md.Extensions().Len(); i++ {
+		src := md.Extensions().Get(i)
+		srcProto := mdp.GetExtension()[src.Index()]
+		exd := createFieldDescriptor(fd, ret, src, srcProto, cache, append(path, int32(i)))
+		symbols[string(src.FullName())] = exd
 		ret.extensions = append(ret.extensions, exd)
 	}
-	for i, o := range md.GetOneofDecl() {
-		od, n := createOneOfDescriptor(fd, ret, i, msgName, o)
-		symbols[n] = od
+	path[len(path)-1] = internal.Message_oneOfsTag
+	for i := 0; i < md.Oneofs().Len(); i++ {
+		src := md.Oneofs().Get(i)
+		srcProto := mdp.GetOneofDecl()[src.Index()]
+		od := createOneOfDescriptor(fd, ret, i, src, srcProto, append(path, int32(i)))
+		symbols[string(src.FullName())] = od
 		ret.oneOfs = append(ret.oneOfs, od)
 	}
-	for _, r := range md.GetExtensionRange() {
+	for _, r := range mdp.GetExtensionRange() {
 		// proto.ExtensionRange is inclusive (and that's how extension ranges are defined in code).
 		// but protoc converts range to exclusive end in descriptor, so we must convert back
 		end := r.GetEnd() - 1
@@ -312,57 +370,39 @@
 			End:   end})
 	}
 	sort.Sort(ret.extRanges)
-	ret.isProto3 = fd.isProto3
-	ret.isMapEntry = md.GetOptions().GetMapEntry() &&
-		len(ret.fields) == 2 &&
-		ret.fields[0].GetNumber() == 1 &&
-		ret.fields[1].GetNumber() == 2
 
-	return ret, msgName
+	return ret
 }
 
-func (md *MessageDescriptor) resolve(path []int32, scopes []scope) error {
-	md.sourceInfoPath = append([]int32(nil), path...) // defensive copy
-	path = append(path, internal.Message_nestedMessagesTag)
-	scopes = append(scopes, messageScope(md))
-	for i, nmd := range md.nested {
-		if err := nmd.resolve(append(path, int32(i)), scopes); err != nil {
+func (md *MessageDescriptor) resolve(cache descriptorCache) error {
+	for _, nmd := range md.nested {
+		if err := nmd.resolve(cache); err != nil {
 			return err
 		}
 	}
-	path[len(path)-1] = internal.Message_enumsTag
-	for i, ed := range md.enums {
-		ed.resolve(append(path, int32(i)))
-	}
-	path[len(path)-1] = internal.Message_fieldsTag
-	for i, fld := range md.fields {
-		if err := fld.resolve(append(path, int32(i)), scopes); err != nil {
+	for _, fld := range md.fields {
+		if err := fld.resolve(cache); err != nil {
 			return err
 		}
 	}
-	path[len(path)-1] = internal.Message_extensionsTag
-	for i, exd := range md.extensions {
-		if err := exd.resolve(append(path, int32(i)), scopes); err != nil {
+	for _, exd := range md.extensions {
+		if err := exd.resolve(cache); err != nil {
 			return err
 		}
 	}
-	path[len(path)-1] = internal.Message_oneOfsTag
-	for i, od := range md.oneOfs {
-		od.resolve(append(path, int32(i)))
-	}
 	return nil
 }
 
 // GetName returns the simple (unqualified) name of the message.
 func (md *MessageDescriptor) GetName() string {
-	return md.proto.GetName()
+	return string(md.wrapped.Name())
 }
 
 // GetFullyQualifiedName returns the fully qualified name of the message. This
 // includes the package name (if there is one) as well as the names of any
 // enclosing messages.
 func (md *MessageDescriptor) GetFullyQualifiedName() string {
-	return md.fqn
+	return string(md.wrapped.FullName())
 }
 
 // GetParent returns the message's enclosing descriptor. For top-level messages,
@@ -385,7 +425,7 @@
 }
 
 // GetMessageOptions returns the message's options.
-func (md *MessageDescriptor) GetMessageOptions() *dpb.MessageOptions {
+func (md *MessageDescriptor) GetMessageOptions() *descriptorpb.MessageOptions {
 	return md.proto.GetOptions()
 }
 
@@ -394,7 +434,7 @@
 // returned info contains information about the location in the file where the
 // message was defined and also contains comments associated with the message
 // definition.
-func (md *MessageDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (md *MessageDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
 	return md.file.sourceInfo.Get(md.sourceInfoPath)
 }
 
@@ -406,7 +446,7 @@
 }
 
 // AsDescriptorProto returns the underlying descriptor proto.
-func (md *MessageDescriptor) AsDescriptorProto() *dpb.DescriptorProto {
+func (md *MessageDescriptor) AsDescriptorProto() *descriptorpb.DescriptorProto {
 	return md.proto
 }
 
@@ -418,7 +458,7 @@
 // IsMapEntry returns true if this is a synthetic message type that represents an entry
 // in a map field.
 func (md *MessageDescriptor) IsMapEntry() bool {
-	return md.isMapEntry
+	return md.wrapped.IsMapEntry()
 }
 
 // GetFields returns all of the fields for this message.
@@ -448,7 +488,7 @@
 
 // IsProto3 returns true if the file in which this message is defined declares a syntax of "proto3".
 func (md *MessageDescriptor) IsProto3() bool {
-	return md.isProto3
+	return md.file.IsProto3()
 }
 
 // GetExtensionRanges returns the ranges of extension field numbers for this message.
@@ -503,7 +543,7 @@
 // FindFieldByName finds the field with the given name. If no such field exists
 // then nil is returned. Only regular fields are returned, not extensions.
 func (md *MessageDescriptor) FindFieldByName(fieldName string) *FieldDescriptor {
-	fqn := fmt.Sprintf("%s.%s", md.fqn, fieldName)
+	fqn := md.GetFullyQualifiedName() + "." + fieldName
 	if fd, ok := md.file.symbols[fqn].(*FieldDescriptor); ok && !fd.IsExtension() {
 		return fd
 	} else {
@@ -514,7 +554,7 @@
 // FindFieldByNumber finds the field with the given tag number. If no such field
 // exists then nil is returned. Only regular fields are returned, not extensions.
 func (md *MessageDescriptor) FindFieldByNumber(tagNumber int32) *FieldDescriptor {
-	if fd, ok := md.file.fieldIndex[md.fqn][tagNumber]; ok && !fd.IsExtension() {
+	if fd, ok := md.file.fieldIndex[md.GetFullyQualifiedName()][tagNumber]; ok && !fd.IsExtension() {
 		return fd
 	} else {
 		return nil
@@ -523,59 +563,110 @@
 
 // FieldDescriptor describes a field of a protocol buffer message.
 type FieldDescriptor struct {
-	proto          *dpb.FieldDescriptorProto
+	wrapped        protoreflect.FieldDescriptor
+	proto          *descriptorpb.FieldDescriptorProto
 	parent         Descriptor
 	owner          *MessageDescriptor
 	file           *FileDescriptor
 	oneOf          *OneOfDescriptor
 	msgType        *MessageDescriptor
 	enumType       *EnumDescriptor
-	fqn            string
 	sourceInfoPath []int32
 	def            memoizedDefault
-	isMap          bool
 }
 
-func createFieldDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, fld *dpb.FieldDescriptorProto) (*FieldDescriptor, string) {
-	fldName := merge(enclosing, fld.GetName())
-	ret := &FieldDescriptor{proto: fld, parent: parent, file: fd, fqn: fldName}
-	if fld.GetExtendee() == "" {
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapField, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (fd *FieldDescriptor) Unwrap() protoreflect.Descriptor {
+	return fd.wrapped
+}
+
+// UnwrapField returns the underlying protoreflect.FieldDescriptor.
+func (fd *FieldDescriptor) UnwrapField() protoreflect.FieldDescriptor {
+	return fd.wrapped
+}
+
+func createFieldDescriptor(fd *FileDescriptor, parent Descriptor, fld protoreflect.FieldDescriptor, fldp *descriptorpb.FieldDescriptorProto, cache descriptorCache, path []int32) *FieldDescriptor {
+	ret := &FieldDescriptor{
+		wrapped:        fld,
+		proto:          fldp,
+		parent:         parent,
+		file:           fd,
+		sourceInfoPath: append([]int32(nil), path...), // defensive copy
+	}
+	cache.put(fld, ret)
+	if !fld.IsExtension() {
 		ret.owner = parent.(*MessageDescriptor)
 	}
 	// owner for extensions, field type (be it message or enum), and one-ofs get resolved later
-	return ret, fldName
+	return ret
 }
 
-func (fd *FieldDescriptor) resolve(path []int32, scopes []scope) error {
+func descriptorType(d Descriptor) string {
+	switch d := d.(type) {
+	case *FileDescriptor:
+		return "a file"
+	case *MessageDescriptor:
+		return "a message"
+	case *FieldDescriptor:
+		if d.IsExtension() {
+			return "an extension"
+		}
+		return "a field"
+	case *OneOfDescriptor:
+		return "a oneof"
+	case *EnumDescriptor:
+		return "an enum"
+	case *EnumValueDescriptor:
+		return "an enum value"
+	case *ServiceDescriptor:
+		return "a service"
+	case *MethodDescriptor:
+		return "a method"
+	default:
+		return fmt.Sprintf("a %T", d)
+	}
+}
+
+func (fd *FieldDescriptor) resolve(cache descriptorCache) error {
 	if fd.proto.OneofIndex != nil && fd.oneOf == nil {
-		return fmt.Errorf("could not link field %s to one-of index %d", fd.fqn, *fd.proto.OneofIndex)
+		return fmt.Errorf("could not link field %s to one-of index %d", fd.GetFullyQualifiedName(), *fd.proto.OneofIndex)
 	}
-	fd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
-	if fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_ENUM {
-		if desc, err := resolve(fd.file, fd.proto.GetTypeName(), scopes); err != nil {
+	if fd.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_ENUM {
+		desc, err := resolve(fd.file, fd.wrapped.Enum(), cache)
+		if err != nil {
 			return err
-		} else {
-			fd.enumType = desc.(*EnumDescriptor)
 		}
+		enumType, ok := desc.(*EnumDescriptor)
+		if !ok {
+			return fmt.Errorf("field %v indicates a type of enum, but references %q which is %s", fd.GetFullyQualifiedName(), fd.proto.GetTypeName(), descriptorType(desc))
+		}
+		fd.enumType = enumType
 	}
-	if fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_MESSAGE || fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_GROUP {
-		if desc, err := resolve(fd.file, fd.proto.GetTypeName(), scopes); err != nil {
+	if fd.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE || fd.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP {
+		desc, err := resolve(fd.file, fd.wrapped.Message(), cache)
+		if err != nil {
 			return err
-		} else {
-			fd.msgType = desc.(*MessageDescriptor)
 		}
+		msgType, ok := desc.(*MessageDescriptor)
+		if !ok {
+			return fmt.Errorf("field %v indicates a type of message, but references %q which is %s", fd.GetFullyQualifiedName(), fd.proto.GetTypeName(), descriptorType(desc))
+		}
+		fd.msgType = msgType
 	}
-	if fd.proto.GetExtendee() != "" {
-		if desc, err := resolve(fd.file, fd.proto.GetExtendee(), scopes); err != nil {
+	if fd.IsExtension() {
+		desc, err := resolve(fd.file, fd.wrapped.ContainingMessage(), cache)
+		if err != nil {
 			return err
-		} else {
-			fd.owner = desc.(*MessageDescriptor)
 		}
+		msgType, ok := desc.(*MessageDescriptor)
+		if !ok {
+			return fmt.Errorf("field %v extends %q which should be a message but is %s", fd.GetFullyQualifiedName(), fd.proto.GetExtendee(), descriptorType(desc))
+		}
+		fd.owner = msgType
 	}
 	fd.file.registerField(fd)
-	fd.isMap = fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REPEATED &&
-		fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_MESSAGE &&
-		fd.GetMessageType().IsMapEntry()
 	return nil
 }
 
@@ -588,7 +679,7 @@
 		return nil
 	}
 
-	proto3 := fd.file.isProto3
+	proto3 := fd.file.IsProto3()
 	if !proto3 {
 		def := fd.AsFieldDescriptorProto().GetDefaultValue()
 		if def != "" {
@@ -601,31 +692,31 @@
 	}
 
 	switch fd.GetType() {
-	case dpb.FieldDescriptorProto_TYPE_FIXED32,
-		dpb.FieldDescriptorProto_TYPE_UINT32:
+	case descriptorpb.FieldDescriptorProto_TYPE_FIXED32,
+		descriptorpb.FieldDescriptorProto_TYPE_UINT32:
 		return uint32(0)
-	case dpb.FieldDescriptorProto_TYPE_SFIXED32,
-		dpb.FieldDescriptorProto_TYPE_INT32,
-		dpb.FieldDescriptorProto_TYPE_SINT32:
+	case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32,
+		descriptorpb.FieldDescriptorProto_TYPE_INT32,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT32:
 		return int32(0)
-	case dpb.FieldDescriptorProto_TYPE_FIXED64,
-		dpb.FieldDescriptorProto_TYPE_UINT64:
+	case descriptorpb.FieldDescriptorProto_TYPE_FIXED64,
+		descriptorpb.FieldDescriptorProto_TYPE_UINT64:
 		return uint64(0)
-	case dpb.FieldDescriptorProto_TYPE_SFIXED64,
-		dpb.FieldDescriptorProto_TYPE_INT64,
-		dpb.FieldDescriptorProto_TYPE_SINT64:
+	case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64,
+		descriptorpb.FieldDescriptorProto_TYPE_INT64,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT64:
 		return int64(0)
-	case dpb.FieldDescriptorProto_TYPE_FLOAT:
+	case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
 		return float32(0.0)
-	case dpb.FieldDescriptorProto_TYPE_DOUBLE:
+	case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
 		return float64(0.0)
-	case dpb.FieldDescriptorProto_TYPE_BOOL:
+	case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
 		return false
-	case dpb.FieldDescriptorProto_TYPE_BYTES:
+	case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
 		return []byte(nil)
-	case dpb.FieldDescriptorProto_TYPE_STRING:
+	case descriptorpb.FieldDescriptorProto_TYPE_STRING:
 		return ""
-	case dpb.FieldDescriptorProto_TYPE_ENUM:
+	case descriptorpb.FieldDescriptorProto_TYPE_ENUM:
 		if proto3 {
 			return int32(0)
 		}
@@ -642,60 +733,60 @@
 
 func parseDefaultValue(fd *FieldDescriptor, val string) interface{} {
 	switch fd.GetType() {
-	case dpb.FieldDescriptorProto_TYPE_ENUM:
+	case descriptorpb.FieldDescriptorProto_TYPE_ENUM:
 		vd := fd.GetEnumType().FindValueByName(val)
 		if vd != nil {
 			return vd.GetNumber()
 		}
 		return nil
-	case dpb.FieldDescriptorProto_TYPE_BOOL:
+	case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
 		if val == "true" {
 			return true
 		} else if val == "false" {
 			return false
 		}
 		return nil
-	case dpb.FieldDescriptorProto_TYPE_BYTES:
+	case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
 		return []byte(unescape(val))
-	case dpb.FieldDescriptorProto_TYPE_STRING:
+	case descriptorpb.FieldDescriptorProto_TYPE_STRING:
 		return val
-	case dpb.FieldDescriptorProto_TYPE_FLOAT:
+	case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
 		if f, err := strconv.ParseFloat(val, 32); err == nil {
 			return float32(f)
 		} else {
 			return float32(0)
 		}
-	case dpb.FieldDescriptorProto_TYPE_DOUBLE:
+	case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
 		if f, err := strconv.ParseFloat(val, 64); err == nil {
 			return f
 		} else {
 			return float64(0)
 		}
-	case dpb.FieldDescriptorProto_TYPE_INT32,
-		dpb.FieldDescriptorProto_TYPE_SINT32,
-		dpb.FieldDescriptorProto_TYPE_SFIXED32:
+	case descriptorpb.FieldDescriptorProto_TYPE_INT32,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+		descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
 		if i, err := strconv.ParseInt(val, 10, 32); err == nil {
 			return int32(i)
 		} else {
 			return int32(0)
 		}
-	case dpb.FieldDescriptorProto_TYPE_UINT32,
-		dpb.FieldDescriptorProto_TYPE_FIXED32:
+	case descriptorpb.FieldDescriptorProto_TYPE_UINT32,
+		descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
 		if i, err := strconv.ParseUint(val, 10, 32); err == nil {
 			return uint32(i)
 		} else {
 			return uint32(0)
 		}
-	case dpb.FieldDescriptorProto_TYPE_INT64,
-		dpb.FieldDescriptorProto_TYPE_SINT64,
-		dpb.FieldDescriptorProto_TYPE_SFIXED64:
+	case descriptorpb.FieldDescriptorProto_TYPE_INT64,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT64,
+		descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
 		if i, err := strconv.ParseInt(val, 10, 64); err == nil {
 			return i
 		} else {
 			return int64(0)
 		}
-	case dpb.FieldDescriptorProto_TYPE_UINT64,
-		dpb.FieldDescriptorProto_TYPE_FIXED64:
+	case descriptorpb.FieldDescriptorProto_TYPE_UINT64,
+		descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
 		if i, err := strconv.ParseUint(val, 10, 64); err == nil {
 			return i
 		} else {
@@ -827,12 +918,12 @@
 
 // GetName returns the name of the field.
 func (fd *FieldDescriptor) GetName() string {
-	return fd.proto.GetName()
+	return string(fd.wrapped.Name())
 }
 
 // GetNumber returns the tag number of this field.
 func (fd *FieldDescriptor) GetNumber() int32 {
-	return fd.proto.GetNumber()
+	return int32(fd.wrapped.Number())
 }
 
 // GetFullyQualifiedName returns the fully qualified name of the field. Unlike
@@ -846,7 +937,7 @@
 // If this field is part of a one-of, the fully qualified name does *not*
 // include the name of the one-of, only of the enclosing message.
 func (fd *FieldDescriptor) GetFullyQualifiedName() string {
-	return fd.fqn
+	return string(fd.wrapped.FullName())
 }
 
 // GetParent returns the fields's enclosing descriptor. For normal
@@ -871,7 +962,7 @@
 }
 
 // GetFieldOptions returns the field's options.
-func (fd *FieldDescriptor) GetFieldOptions() *dpb.FieldOptions {
+func (fd *FieldDescriptor) GetFieldOptions() *descriptorpb.FieldOptions {
 	return fd.proto.GetOptions()
 }
 
@@ -880,7 +971,7 @@
 // returned info contains information about the location in the file where the
 // field was defined and also contains comments associated with the field
 // definition.
-func (fd *FieldDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (fd *FieldDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
 	return fd.file.sourceInfo.Get(fd.sourceInfoPath)
 }
 
@@ -892,7 +983,7 @@
 }
 
 // AsFieldDescriptorProto returns the underlying descriptor proto.
-func (fd *FieldDescriptor) AsFieldDescriptorProto() *dpb.FieldDescriptorProto {
+func (fd *FieldDescriptor) AsFieldDescriptorProto() *descriptorpb.FieldDescriptorProto {
 	return fd.proto
 }
 
@@ -962,7 +1053,7 @@
 
 // IsExtension returns true if this is an extension field.
 func (fd *FieldDescriptor) IsExtension() bool {
-	return fd.proto.GetExtendee() != ""
+	return fd.wrapped.IsExtension()
 }
 
 // GetOneOf returns the one-of field set to which this field belongs. If this field
@@ -974,24 +1065,24 @@
 // GetType returns the type of this field. If the type indicates an enum, the
 // enum type can be queried via GetEnumType. If the type indicates a message, the
 // message type can be queried via GetMessageType.
-func (fd *FieldDescriptor) GetType() dpb.FieldDescriptorProto_Type {
+func (fd *FieldDescriptor) GetType() descriptorpb.FieldDescriptorProto_Type {
 	return fd.proto.GetType()
 }
 
 // GetLabel returns the label for this field. The label can be required (proto2-only),
 // optional (default for proto3), or required.
-func (fd *FieldDescriptor) GetLabel() dpb.FieldDescriptorProto_Label {
+func (fd *FieldDescriptor) GetLabel() descriptorpb.FieldDescriptorProto_Label {
 	return fd.proto.GetLabel()
 }
 
 // IsRequired returns true if this field has the "required" label.
 func (fd *FieldDescriptor) IsRequired() bool {
-	return fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REQUIRED
+	return fd.wrapped.Cardinality() == protoreflect.Required
 }
 
 // IsRepeated returns true if this field has the "repeated" label.
 func (fd *FieldDescriptor) IsRepeated() bool {
-	return fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REPEATED
+	return fd.wrapped.Cardinality() == protoreflect.Repeated
 }
 
 // IsProto3Optional returns true if this field has an explicit "optional" label
@@ -999,30 +1090,27 @@
 // extensions), will be nested in synthetic oneofs that contain only the single
 // field.
 func (fd *FieldDescriptor) IsProto3Optional() bool {
-	return internal.GetProto3Optional(fd.proto)
+	return fd.proto.GetProto3Optional()
 }
 
 // HasPresence returns true if this field can distinguish when a value is
 // present or not. Scalar fields in "proto3" syntax files, for example, return
 // false since absent values are indistinguishable from zero values.
 func (fd *FieldDescriptor) HasPresence() bool {
-	if !fd.file.isProto3 {
-		return true
-	}
-	return fd.msgType != nil || fd.oneOf != nil
+	return fd.wrapped.HasPresence()
 }
 
 // IsMap returns true if this is a map field. If so, it will have the "repeated"
 // label its type will be a message that represents a map entry. The map entry
 // message will have exactly two fields: tag #1 is the key and tag #2 is the value.
 func (fd *FieldDescriptor) IsMap() bool {
-	return fd.isMap
+	return fd.wrapped.IsMap()
 }
 
 // GetMapKeyType returns the type of the key field if this is a map field. If it is
 // not a map field, nil is returned.
 func (fd *FieldDescriptor) GetMapKeyType() *FieldDescriptor {
-	if fd.isMap {
+	if fd.IsMap() {
 		return fd.msgType.FindFieldByNumber(int32(1))
 	}
 	return nil
@@ -1031,7 +1119,7 @@
 // GetMapValueType returns the type of the value field if this is a map field. If it
 // is not a map field, nil is returned.
 func (fd *FieldDescriptor) GetMapValueType() *FieldDescriptor {
-	if fd.isMap {
+	if fd.IsMap() {
 		return fd.msgType.FindFieldByNumber(int32(2))
 	}
 	return nil
@@ -1060,40 +1148,66 @@
 // Otherwise, it returns the declared default value for the field or a zero value, if no
 // default is declared or if the file is proto3. The type of said return value corresponds
 // to the type of the field:
-//  +-------------------------+-----------+
-//  |       Declared Type     |  Go Type  |
-//  +-------------------------+-----------+
-//  | int32, sint32, sfixed32 | int32     |
-//  | int64, sint64, sfixed64 | int64     |
-//  | uint32, fixed32         | uint32    |
-//  | uint64, fixed64         | uint64    |
-//  | float                   | float32   |
-//  | double                  | double32  |
-//  | bool                    | bool      |
-//  | string                  | string    |
-//  | bytes                   | []byte    |
-//  +-------------------------+-----------+
+//
+//	+-------------------------+-----------+
+//	|       Declared Type     |  Go Type  |
+//	+-------------------------+-----------+
+//	| int32, sint32, sfixed32 | int32     |
+//	| int64, sint64, sfixed64 | int64     |
+//	| uint32, fixed32         | uint32    |
+//	| uint64, fixed64         | uint64    |
+//	| float                   | float32   |
+//	| double                  | double32  |
+//	| bool                    | bool      |
+//	| string                  | string    |
+//	| bytes                   | []byte    |
+//	+-------------------------+-----------+
 func (fd *FieldDescriptor) GetDefaultValue() interface{} {
 	return fd.getDefaultValue()
 }
 
 // EnumDescriptor describes an enum declared in a proto file.
 type EnumDescriptor struct {
-	proto          *dpb.EnumDescriptorProto
+	wrapped        protoreflect.EnumDescriptor
+	proto          *descriptorpb.EnumDescriptorProto
 	parent         Descriptor
 	file           *FileDescriptor
 	values         []*EnumValueDescriptor
 	valuesByNum    sortedValues
-	fqn            string
 	sourceInfoPath []int32
 }
 
-func createEnumDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, ed *dpb.EnumDescriptorProto, symbols map[string]Descriptor) (*EnumDescriptor, string) {
-	enumName := merge(enclosing, ed.GetName())
-	ret := &EnumDescriptor{proto: ed, parent: parent, file: fd, fqn: enumName}
-	for _, ev := range ed.GetValue() {
-		evd, n := createEnumValueDescriptor(fd, ret, enumName, ev)
-		symbols[n] = evd
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapEnum, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (ed *EnumDescriptor) Unwrap() protoreflect.Descriptor {
+	return ed.wrapped
+}
+
+// UnwrapEnum returns the underlying protoreflect.EnumDescriptor.
+func (ed *EnumDescriptor) UnwrapEnum() protoreflect.EnumDescriptor {
+	return ed.wrapped
+}
+
+func createEnumDescriptor(fd *FileDescriptor, parent Descriptor, ed protoreflect.EnumDescriptor, edp *descriptorpb.EnumDescriptorProto, symbols map[string]Descriptor, cache descriptorCache, path []int32) *EnumDescriptor {
+	ret := &EnumDescriptor{
+		wrapped:        ed,
+		proto:          edp,
+		parent:         parent,
+		file:           fd,
+		sourceInfoPath: append([]int32(nil), path...), // defensive copy
+	}
+	path = append(path, internal.Enum_valuesTag)
+	for i := 0; i < ed.Values().Len(); i++ {
+		src := ed.Values().Get(i)
+		srcProto := edp.GetValue()[src.Index()]
+		evd := createEnumValueDescriptor(fd, ret, src, srcProto, append(path, int32(i)))
+		symbols[string(src.FullName())] = evd
+		// NB: for backwards compatibility, also register the enum value as if
+		// scoped within the enum (counter-intuitively, enum value full names are
+		// scoped in the enum's parent element). EnumValueDescripto.GetFullyQualifiedName
+		// returns that alternate full name.
+		symbols[evd.GetFullyQualifiedName()] = evd
 		ret.values = append(ret.values, evd)
 	}
 	if len(ret.values) > 0 {
@@ -1101,7 +1215,7 @@
 		copy(ret.valuesByNum, ret.values)
 		sort.Stable(ret.valuesByNum)
 	}
-	return ret, enumName
+	return ret
 }
 
 type sortedValues []*EnumValueDescriptor
@@ -1116,26 +1230,19 @@
 
 func (sv sortedValues) Swap(i, j int) {
 	sv[i], sv[j] = sv[j], sv[i]
-}
 
-func (ed *EnumDescriptor) resolve(path []int32) {
-	ed.sourceInfoPath = append([]int32(nil), path...) // defensive copy
-	path = append(path, internal.Enum_valuesTag)
-	for i, evd := range ed.values {
-		evd.resolve(append(path, int32(i)))
-	}
 }
 
 // GetName returns the simple (unqualified) name of the enum type.
 func (ed *EnumDescriptor) GetName() string {
-	return ed.proto.GetName()
+	return string(ed.wrapped.Name())
 }
 
 // GetFullyQualifiedName returns the fully qualified name of the enum type.
 // This includes the package name (if there is one) as well as the names of any
 // enclosing messages.
 func (ed *EnumDescriptor) GetFullyQualifiedName() string {
-	return ed.fqn
+	return string(ed.wrapped.FullName())
 }
 
 // GetParent returns the enum type's enclosing descriptor. For top-level enums,
@@ -1158,7 +1265,7 @@
 }
 
 // GetEnumOptions returns the enum type's options.
-func (ed *EnumDescriptor) GetEnumOptions() *dpb.EnumOptions {
+func (ed *EnumDescriptor) GetEnumOptions() *descriptorpb.EnumOptions {
 	return ed.proto.GetOptions()
 }
 
@@ -1167,7 +1274,7 @@
 // returned info contains information about the location in the file where the
 // enum type was defined and also contains comments associated with the enum
 // definition.
-func (ed *EnumDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (ed *EnumDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
 	return ed.file.sourceInfo.Get(ed.sourceInfoPath)
 }
 
@@ -1179,7 +1286,7 @@
 }
 
 // AsEnumDescriptorProto returns the underlying descriptor proto.
-func (ed *EnumDescriptor) AsEnumDescriptorProto() *dpb.EnumDescriptorProto {
+func (ed *EnumDescriptor) AsEnumDescriptorProto() *descriptorpb.EnumDescriptorProto {
 	return ed.proto
 }
 
@@ -1196,7 +1303,7 @@
 // FindValueByName finds the enum value with the given name. If no such value exists
 // then nil is returned.
 func (ed *EnumDescriptor) FindValueByName(name string) *EnumValueDescriptor {
-	fqn := fmt.Sprintf("%s.%s", ed.fqn, name)
+	fqn := fmt.Sprintf("%s.%s", ed.GetFullyQualifiedName(), name)
 	if vd, ok := ed.file.symbols[fqn].(*EnumValueDescriptor); ok {
 		return vd
 	} else {
@@ -1220,16 +1327,33 @@
 
 // EnumValueDescriptor describes an allowed value of an enum declared in a proto file.
 type EnumValueDescriptor struct {
-	proto          *dpb.EnumValueDescriptorProto
+	wrapped        protoreflect.EnumValueDescriptor
+	proto          *descriptorpb.EnumValueDescriptorProto
 	parent         *EnumDescriptor
 	file           *FileDescriptor
-	fqn            string
 	sourceInfoPath []int32
 }
 
-func createEnumValueDescriptor(fd *FileDescriptor, parent *EnumDescriptor, enclosing string, evd *dpb.EnumValueDescriptorProto) (*EnumValueDescriptor, string) {
-	valName := merge(enclosing, evd.GetName())
-	return &EnumValueDescriptor{proto: evd, parent: parent, file: fd, fqn: valName}, valName
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapEnumValue, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (vd *EnumValueDescriptor) Unwrap() protoreflect.Descriptor {
+	return vd.wrapped
+}
+
+// UnwrapEnumValue returns the underlying protoreflect.EnumValueDescriptor.
+func (vd *EnumValueDescriptor) UnwrapEnumValue() protoreflect.EnumValueDescriptor {
+	return vd.wrapped
+}
+
+func createEnumValueDescriptor(fd *FileDescriptor, parent *EnumDescriptor, evd protoreflect.EnumValueDescriptor, evdp *descriptorpb.EnumValueDescriptorProto, path []int32) *EnumValueDescriptor {
+	return &EnumValueDescriptor{
+		wrapped:        evd,
+		proto:          evdp,
+		parent:         parent,
+		file:           fd,
+		sourceInfoPath: append([]int32(nil), path...), // defensive copy
+	}
 }
 
 func (vd *EnumValueDescriptor) resolve(path []int32) {
@@ -1238,18 +1362,24 @@
 
 // GetName returns the name of the enum value.
 func (vd *EnumValueDescriptor) GetName() string {
-	return vd.proto.GetName()
+	return string(vd.wrapped.Name())
 }
 
 // GetNumber returns the numeric value associated with this enum value.
 func (vd *EnumValueDescriptor) GetNumber() int32 {
-	return vd.proto.GetNumber()
+	return int32(vd.wrapped.Number())
 }
 
 // GetFullyQualifiedName returns the fully qualified name of the enum value.
 // Unlike GetName, this includes fully qualified name of the enclosing enum.
 func (vd *EnumValueDescriptor) GetFullyQualifiedName() string {
-	return vd.fqn
+	// NB: Technically, we do not return the correct value. Enum values are
+	// scoped within the enclosing element, not within the enum itself (which
+	// is very non-intuitive, but it follows C++ scoping rules). The value
+	// returned from vd.wrapped.FullName() is correct. However, we return
+	// something different, just for backwards compatibility, as this package
+	// has always instead returned the name scoped inside the enum.
+	return fmt.Sprintf("%s.%s", vd.parent.GetFullyQualifiedName(), vd.wrapped.Name())
 }
 
 // GetParent returns the descriptor for the enum in which this enum value is
@@ -1278,7 +1408,7 @@
 }
 
 // GetEnumValueOptions returns the enum value's options.
-func (vd *EnumValueDescriptor) GetEnumValueOptions() *dpb.EnumValueOptions {
+func (vd *EnumValueDescriptor) GetEnumValueOptions() *descriptorpb.EnumValueOptions {
 	return vd.proto.GetOptions()
 }
 
@@ -1287,7 +1417,7 @@
 // returned info contains information about the location in the file where the
 // enum value was defined and also contains comments associated with the enum
 // value definition.
-func (vd *EnumValueDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (vd *EnumValueDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
 	return vd.file.sourceInfo.Get(vd.sourceInfoPath)
 }
 
@@ -1299,7 +1429,7 @@
 }
 
 // AsEnumValueDescriptorProto returns the underlying descriptor proto.
-func (vd *EnumValueDescriptor) AsEnumValueDescriptorProto() *dpb.EnumValueDescriptorProto {
+func (vd *EnumValueDescriptor) AsEnumValueDescriptorProto() *descriptorpb.EnumValueDescriptorProto {
 	return vd.proto
 }
 
@@ -1310,29 +1440,46 @@
 
 // ServiceDescriptor describes an RPC service declared in a proto file.
 type ServiceDescriptor struct {
-	proto          *dpb.ServiceDescriptorProto
+	wrapped        protoreflect.ServiceDescriptor
+	proto          *descriptorpb.ServiceDescriptorProto
 	file           *FileDescriptor
 	methods        []*MethodDescriptor
-	fqn            string
 	sourceInfoPath []int32
 }
 
-func createServiceDescriptor(fd *FileDescriptor, enclosing string, sd *dpb.ServiceDescriptorProto, symbols map[string]Descriptor) (*ServiceDescriptor, string) {
-	serviceName := merge(enclosing, sd.GetName())
-	ret := &ServiceDescriptor{proto: sd, file: fd, fqn: serviceName}
-	for _, m := range sd.GetMethod() {
-		md, n := createMethodDescriptor(fd, ret, serviceName, m)
-		symbols[n] = md
-		ret.methods = append(ret.methods, md)
-	}
-	return ret, serviceName
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapService, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (sd *ServiceDescriptor) Unwrap() protoreflect.Descriptor {
+	return sd.wrapped
 }
 
-func (sd *ServiceDescriptor) resolve(path []int32, scopes []scope) error {
-	sd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+// UnwrapService returns the underlying protoreflect.ServiceDescriptor.
+func (sd *ServiceDescriptor) UnwrapService() protoreflect.ServiceDescriptor {
+	return sd.wrapped
+}
+
+func createServiceDescriptor(fd *FileDescriptor, sd protoreflect.ServiceDescriptor, sdp *descriptorpb.ServiceDescriptorProto, symbols map[string]Descriptor, path []int32) *ServiceDescriptor {
+	ret := &ServiceDescriptor{
+		wrapped:        sd,
+		proto:          sdp,
+		file:           fd,
+		sourceInfoPath: append([]int32(nil), path...), // defensive copy
+	}
 	path = append(path, internal.Service_methodsTag)
-	for i, md := range sd.methods {
-		if err := md.resolve(append(path, int32(i)), scopes); err != nil {
+	for i := 0; i < sd.Methods().Len(); i++ {
+		src := sd.Methods().Get(i)
+		srcProto := sdp.GetMethod()[src.Index()]
+		md := createMethodDescriptor(fd, ret, src, srcProto, append(path, int32(i)))
+		symbols[string(src.FullName())] = md
+		ret.methods = append(ret.methods, md)
+	}
+	return ret
+}
+
+func (sd *ServiceDescriptor) resolve(cache descriptorCache) error {
+	for _, md := range sd.methods {
+		if err := md.resolve(cache); err != nil {
 			return err
 		}
 	}
@@ -1341,13 +1488,13 @@
 
 // GetName returns the simple (unqualified) name of the service.
 func (sd *ServiceDescriptor) GetName() string {
-	return sd.proto.GetName()
+	return string(sd.wrapped.Name())
 }
 
 // GetFullyQualifiedName returns the fully qualified name of the service. This
 // includes the package name (if there is one).
 func (sd *ServiceDescriptor) GetFullyQualifiedName() string {
-	return sd.fqn
+	return string(sd.wrapped.FullName())
 }
 
 // GetParent returns the descriptor for the file in which this service is
@@ -1370,7 +1517,7 @@
 }
 
 // GetServiceOptions returns the service's options.
-func (sd *ServiceDescriptor) GetServiceOptions() *dpb.ServiceOptions {
+func (sd *ServiceDescriptor) GetServiceOptions() *descriptorpb.ServiceOptions {
 	return sd.proto.GetOptions()
 }
 
@@ -1379,7 +1526,7 @@
 // returned info contains information about the location in the file where the
 // service was defined and also contains comments associated with the service
 // definition.
-func (sd *ServiceDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (sd *ServiceDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
 	return sd.file.sourceInfo.Get(sd.sourceInfoPath)
 }
 
@@ -1391,7 +1538,7 @@
 }
 
 // AsServiceDescriptorProto returns the underlying descriptor proto.
-func (sd *ServiceDescriptor) AsServiceDescriptorProto() *dpb.ServiceDescriptorProto {
+func (sd *ServiceDescriptor) AsServiceDescriptorProto() *descriptorpb.ServiceDescriptorProto {
 	return sd.proto
 }
 
@@ -1408,7 +1555,7 @@
 // FindMethodByName finds the method with the given name. If no such method exists
 // then nil is returned.
 func (sd *ServiceDescriptor) FindMethodByName(name string) *MethodDescriptor {
-	fqn := fmt.Sprintf("%s.%s", sd.fqn, name)
+	fqn := fmt.Sprintf("%s.%s", sd.GetFullyQualifiedName(), name)
 	if md, ok := sd.file.symbols[fqn].(*MethodDescriptor); ok {
 		return md
 	} else {
@@ -1418,45 +1565,69 @@
 
 // MethodDescriptor describes an RPC method declared in a proto file.
 type MethodDescriptor struct {
-	proto          *dpb.MethodDescriptorProto
+	wrapped        protoreflect.MethodDescriptor
+	proto          *descriptorpb.MethodDescriptorProto
 	parent         *ServiceDescriptor
 	file           *FileDescriptor
 	inType         *MessageDescriptor
 	outType        *MessageDescriptor
-	fqn            string
 	sourceInfoPath []int32
 }
 
-func createMethodDescriptor(fd *FileDescriptor, parent *ServiceDescriptor, enclosing string, md *dpb.MethodDescriptorProto) (*MethodDescriptor, string) {
-	// request and response types get resolved later
-	methodName := merge(enclosing, md.GetName())
-	return &MethodDescriptor{proto: md, parent: parent, file: fd, fqn: methodName}, methodName
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapMethod, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (md *MethodDescriptor) Unwrap() protoreflect.Descriptor {
+	return md.wrapped
 }
 
-func (md *MethodDescriptor) resolve(path []int32, scopes []scope) error {
-	md.sourceInfoPath = append([]int32(nil), path...) // defensive copy
-	if desc, err := resolve(md.file, md.proto.GetInputType(), scopes); err != nil {
-		return err
-	} else {
-		md.inType = desc.(*MessageDescriptor)
+// UnwrapMethod returns the underlying protoreflect.MethodDescriptor.
+func (md *MethodDescriptor) UnwrapMethod() protoreflect.MethodDescriptor {
+	return md.wrapped
+}
+
+func createMethodDescriptor(fd *FileDescriptor, parent *ServiceDescriptor, md protoreflect.MethodDescriptor, mdp *descriptorpb.MethodDescriptorProto, path []int32) *MethodDescriptor {
+	// request and response types get resolved later
+	return &MethodDescriptor{
+		wrapped:        md,
+		proto:          mdp,
+		parent:         parent,
+		file:           fd,
+		sourceInfoPath: append([]int32(nil), path...), // defensive copy
 	}
-	if desc, err := resolve(md.file, md.proto.GetOutputType(), scopes); err != nil {
+}
+
+func (md *MethodDescriptor) resolve(cache descriptorCache) error {
+	if desc, err := resolve(md.file, md.wrapped.Input(), cache); err != nil {
 		return err
 	} else {
-		md.outType = desc.(*MessageDescriptor)
+		msgType, ok := desc.(*MessageDescriptor)
+		if !ok {
+			return fmt.Errorf("method %v has request type %q which should be a message but is %s", md.GetFullyQualifiedName(), md.proto.GetInputType(), descriptorType(desc))
+		}
+		md.inType = msgType
+	}
+	if desc, err := resolve(md.file, md.wrapped.Output(), cache); err != nil {
+		return err
+	} else {
+		msgType, ok := desc.(*MessageDescriptor)
+		if !ok {
+			return fmt.Errorf("method %v has response type %q which should be a message but is %s", md.GetFullyQualifiedName(), md.proto.GetOutputType(), descriptorType(desc))
+		}
+		md.outType = msgType
 	}
 	return nil
 }
 
 // GetName returns the name of the method.
 func (md *MethodDescriptor) GetName() string {
-	return md.proto.GetName()
+	return string(md.wrapped.Name())
 }
 
 // GetFullyQualifiedName returns the fully qualified name of the method. Unlike
 // GetName, this includes fully qualified name of the enclosing service.
 func (md *MethodDescriptor) GetFullyQualifiedName() string {
-	return md.fqn
+	return string(md.wrapped.FullName())
 }
 
 // GetParent returns the descriptor for the service in which this method is
@@ -1485,7 +1656,7 @@
 }
 
 // GetMethodOptions returns the method's options.
-func (md *MethodDescriptor) GetMethodOptions() *dpb.MethodOptions {
+func (md *MethodDescriptor) GetMethodOptions() *descriptorpb.MethodOptions {
 	return md.proto.GetOptions()
 }
 
@@ -1494,7 +1665,7 @@
 // returned info contains information about the location in the file where the
 // method was defined and also contains comments associated with the method
 // definition.
-func (md *MethodDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (md *MethodDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
 	return md.file.sourceInfo.Get(md.sourceInfoPath)
 }
 
@@ -1506,7 +1677,7 @@
 }
 
 // AsMethodDescriptorProto returns the underlying descriptor proto.
-func (md *MethodDescriptor) AsMethodDescriptorProto() *dpb.MethodDescriptorProto {
+func (md *MethodDescriptor) AsMethodDescriptorProto() *descriptorpb.MethodDescriptorProto {
 	return md.proto
 }
 
@@ -1517,12 +1688,12 @@
 
 // IsServerStreaming returns true if this is a server-streaming method.
 func (md *MethodDescriptor) IsServerStreaming() bool {
-	return md.proto.GetServerStreaming()
+	return md.wrapped.IsStreamingServer()
 }
 
 // IsClientStreaming returns true if this is a client-streaming method.
 func (md *MethodDescriptor) IsClientStreaming() bool {
-	return md.proto.GetClientStreaming()
+	return md.wrapped.IsStreamingClient()
 }
 
 // GetInputType returns the input type, or request type, of the RPC method.
@@ -1537,17 +1708,34 @@
 
 // OneOfDescriptor describes a one-of field set declared in a protocol buffer message.
 type OneOfDescriptor struct {
-	proto          *dpb.OneofDescriptorProto
+	wrapped        protoreflect.OneofDescriptor
+	proto          *descriptorpb.OneofDescriptorProto
 	parent         *MessageDescriptor
 	file           *FileDescriptor
 	choices        []*FieldDescriptor
-	fqn            string
 	sourceInfoPath []int32
 }
 
-func createOneOfDescriptor(fd *FileDescriptor, parent *MessageDescriptor, index int, enclosing string, od *dpb.OneofDescriptorProto) (*OneOfDescriptor, string) {
-	oneOfName := merge(enclosing, od.GetName())
-	ret := &OneOfDescriptor{proto: od, parent: parent, file: fd, fqn: oneOfName}
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapOneOf, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (od *OneOfDescriptor) Unwrap() protoreflect.Descriptor {
+	return od.wrapped
+}
+
+// UnwrapOneOf returns the underlying protoreflect.OneofDescriptor.
+func (od *OneOfDescriptor) UnwrapOneOf() protoreflect.OneofDescriptor {
+	return od.wrapped
+}
+
+func createOneOfDescriptor(fd *FileDescriptor, parent *MessageDescriptor, index int, od protoreflect.OneofDescriptor, odp *descriptorpb.OneofDescriptorProto, path []int32) *OneOfDescriptor {
+	ret := &OneOfDescriptor{
+		wrapped:        od,
+		proto:          odp,
+		parent:         parent,
+		file:           fd,
+		sourceInfoPath: append([]int32(nil), path...), // defensive copy
+	}
 	for _, f := range parent.fields {
 		oi := f.proto.OneofIndex
 		if oi != nil && *oi == int32(index) {
@@ -1555,22 +1743,18 @@
 			ret.choices = append(ret.choices, f)
 		}
 	}
-	return ret, oneOfName
-}
-
-func (od *OneOfDescriptor) resolve(path []int32) {
-	od.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+	return ret
 }
 
 // GetName returns the name of the one-of.
 func (od *OneOfDescriptor) GetName() string {
-	return od.proto.GetName()
+	return string(od.wrapped.Name())
 }
 
 // GetFullyQualifiedName returns the fully qualified name of the one-of. Unlike
 // GetName, this includes fully qualified name of the enclosing message.
 func (od *OneOfDescriptor) GetFullyQualifiedName() string {
-	return od.fqn
+	return string(od.wrapped.FullName())
 }
 
 // GetParent returns the descriptor for the message in which this one-of is
@@ -1599,7 +1783,7 @@
 }
 
 // GetOneOfOptions returns the one-of's options.
-func (od *OneOfDescriptor) GetOneOfOptions() *dpb.OneofOptions {
+func (od *OneOfDescriptor) GetOneOfOptions() *descriptorpb.OneofOptions {
 	return od.proto.GetOptions()
 }
 
@@ -1608,7 +1792,7 @@
 // returned info contains information about the location in the file where the
 // one-of was defined and also contains comments associated with the one-of
 // definition.
-func (od *OneOfDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (od *OneOfDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
 	return od.file.sourceInfo.Get(od.sourceInfoPath)
 }
 
@@ -1620,7 +1804,7 @@
 }
 
 // AsOneofDescriptorProto returns the underlying descriptor proto.
-func (od *OneOfDescriptor) AsOneofDescriptorProto() *dpb.OneofDescriptorProto {
+func (od *OneOfDescriptor) AsOneofDescriptorProto() *descriptorpb.OneofDescriptorProto {
 	return od.proto
 }
 
@@ -1636,88 +1820,28 @@
 }
 
 func (od *OneOfDescriptor) IsSynthetic() bool {
-	return len(od.choices) == 1 && od.choices[0].IsProto3Optional()
+	return od.wrapped.IsSynthetic()
 }
 
-// scope represents a lexical scope in a proto file in which messages and enums
-// can be declared.
-type scope func(string) Descriptor
-
-func fileScope(fd *FileDescriptor) scope {
-	// we search symbols in this file, but also symbols in other files that have
-	// the same package as this file or a "parent" package (in protobuf,
-	// packages are a hierarchy like C++ namespaces)
-	prefixes := internal.CreatePrefixList(fd.proto.GetPackage())
-	return func(name string) Descriptor {
-		for _, prefix := range prefixes {
-			n := merge(prefix, name)
-			d := findSymbol(fd, n, false)
-			if d != nil {
-				return d
-			}
-		}
-		return nil
+func resolve(fd *FileDescriptor, src protoreflect.Descriptor, cache descriptorCache) (Descriptor, error) {
+	d := cache.get(src)
+	if d != nil {
+		return d, nil
 	}
-}
 
-func messageScope(md *MessageDescriptor) scope {
-	return func(name string) Descriptor {
-		n := merge(md.fqn, name)
-		if d, ok := md.file.symbols[n]; ok {
-			return d
-		}
-		return nil
+	fqn := string(src.FullName())
+
+	d = fd.FindSymbol(fqn)
+	if d != nil {
+		return d, nil
 	}
-}
 
-func resolve(fd *FileDescriptor, name string, scopes []scope) (Descriptor, error) {
-	if strings.HasPrefix(name, ".") {
-		// already fully-qualified
-		d := findSymbol(fd, name[1:], false)
+	for _, dep := range fd.deps {
+		d := dep.FindSymbol(fqn)
 		if d != nil {
 			return d, nil
 		}
-	} else {
-		// unqualified, so we look in the enclosing (last) scope first and move
-		// towards outermost (first) scope, trying to resolve the symbol
-		for i := len(scopes) - 1; i >= 0; i-- {
-			d := scopes[i](name)
-			if d != nil {
-				return d, nil
-			}
-		}
-	}
-	return nil, fmt.Errorf("file %q included an unresolvable reference to %q", fd.proto.GetName(), name)
-}
-
-func findSymbol(fd *FileDescriptor, name string, public bool) Descriptor {
-	d := fd.symbols[name]
-	if d != nil {
-		return d
 	}
 
-	// When public = false, we are searching only directly imported symbols. But we
-	// also need to search transitive public imports due to semantics of public imports.
-	var deps []*FileDescriptor
-	if public {
-		deps = fd.publicDeps
-	} else {
-		deps = fd.deps
-	}
-	for _, dep := range deps {
-		d = findSymbol(dep, name, true)
-		if d != nil {
-			return d
-		}
-	}
-
-	return nil
-}
-
-func merge(a, b string) string {
-	if a == "" {
-		return b
-	} else {
-		return a + "." + b
-	}
+	return nil, fmt.Errorf("file %q included an unresolvable reference to %q", fd.proto.GetName(), fqn)
 }
diff --git a/vendor/github.com/jhump/protoreflect/desc/doc.go b/vendor/github.com/jhump/protoreflect/desc/doc.go
index 1740dce..07bcbf3 100644
--- a/vendor/github.com/jhump/protoreflect/desc/doc.go
+++ b/vendor/github.com/jhump/protoreflect/desc/doc.go
@@ -24,18 +24,47 @@
 // properties that are not immediately accessible through rich descriptor's
 // methods.
 //
+// Also see the grpcreflect, dynamic, and grpcdynamic packages in this same
+// repo to see just how useful rich descriptors really are.
+//
+// # Loading Descriptors
+//
 // Rich descriptors can be accessed in similar ways as their "poor" cousins
 // (descriptor protos). Instead of using proto.FileDescriptor, use
 // desc.LoadFileDescriptor. Message descriptors and extension field descriptors
 // can also be easily accessed using desc.LoadMessageDescriptor and
 // desc.LoadFieldDescriptorForExtension, respectively.
 //
+// If you are using the protoc-gen-gosrcinfo plugin (also in this repo), then
+// the descriptors returned from these Load* functions will include source code
+// information, and thus include comments for elements.
+//
+// # Creating Descriptors
+//
 // It is also possible create rich descriptors for proto messages that a given
 // Go program doesn't even know about. For example, they could be loaded from a
 // FileDescriptorSet file (which can be generated by protoc) or loaded from a
 // server. This enables interesting things like dynamic clients: where a Go
 // program can be an RPC client of a service it wasn't compiled to know about.
 //
-// Also see the grpcreflect, dynamic, and grpcdynamic packages in this same
-// repo to see just how useful rich descriptors really are.
+// You cannot create a message descriptor without also creating its enclosing
+// file, because the enclosing file is what contains other relevant information
+// like other symbols and dependencies/imports, which is how type references
+// are resolved (such as when a field in a message has a type that is another
+// message or enum).
+//
+// So the functions in this package for creating descriptors are all for
+// creating *file* descriptors. See the various Create* functions for more
+// information.
+//
+// Also see the desc/builder sub-package, for another API that makes it easier
+// to synthesize descriptors programmatically.
+//
+// Deprecated: This module was created for use with the older "v1" Protobuf API
+// in github.com/golang/protobuf. However, much of this module is no longer
+// necessary as the newer "v2" API in google.golang.org/protobuf provides similar
+// capabilities. Instead of using this github.com/jhump/protoreflect/desc package,
+// see [google.golang.org/protobuf/reflect/protoreflect].
+//
+// [google.golang.org/protobuf/reflect/protoreflect]: https://pkg.go.dev/google.golang.org/protobuf/reflect/protoreflect
 package desc
diff --git a/vendor/github.com/jhump/protoreflect/desc/imports.go b/vendor/github.com/jhump/protoreflect/desc/imports.go
index ab93032..dc6b735 100644
--- a/vendor/github.com/jhump/protoreflect/desc/imports.go
+++ b/vendor/github.com/jhump/protoreflect/desc/imports.go
@@ -8,7 +8,8 @@
 	"sync"
 
 	"github.com/golang/protobuf/proto"
-	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/types/descriptorpb"
 )
 
 var (
@@ -37,8 +38,8 @@
 	if len(importPath) == 0 {
 		panic("import path cannot be empty")
 	}
-	desc := proto.FileDescriptor(registerPath)
-	if len(desc) == 0 {
+	_, err := protoregistry.GlobalFiles.FindFileByPath(registerPath)
+	if err != nil {
 		panic(fmt.Sprintf("path %q is not a registered proto file", registerPath))
 	}
 	globalImportPathMu.Lock()
@@ -76,30 +77,40 @@
 //
 // For example, let's say we have two proto source files: "foo/bar.proto" and
 // "fubar/baz.proto". The latter imports the former using a line like so:
-//    import "foo/bar.proto";
+//
+//	import "foo/bar.proto";
+//
 // However, when protoc is invoked, the command-line args looks like so:
-//    protoc -Ifoo/ --go_out=foo/ bar.proto
-//    protoc -I./ -Ifubar/ --go_out=fubar/ baz.proto
+//
+//	protoc -Ifoo/ --go_out=foo/ bar.proto
+//	protoc -I./ -Ifubar/ --go_out=fubar/ baz.proto
+//
 // Because the path given to protoc is just "bar.proto" and "baz.proto", this is
 // how they are registered in the Go protobuf runtime. So, when loading the
 // descriptor for "fubar/baz.proto", we'll see an import path of "foo/bar.proto"
 // but will find no file registered with that path:
-//    fd, err := desc.LoadFileDescriptor("baz.proto")
-//    // err will be non-nil, complaining that there is no such file
-//    // found named "foo/bar.proto"
+//
+//	fd, err := desc.LoadFileDescriptor("baz.proto")
+//	// err will be non-nil, complaining that there is no such file
+//	// found named "foo/bar.proto"
 //
 // This can be remedied by registering alternate import paths using an
 // ImportResolver. Continuing with the example above, the code below would fix
 // any link issue:
-//    var r desc.ImportResolver
-//    r.RegisterImportPath("bar.proto", "foo/bar.proto")
-//    fd, err := r.LoadFileDescriptor("baz.proto")
-//    // err will be nil; descriptor successfully loaded!
+//
+//	var r desc.ImportResolver
+//	r.RegisterImportPath("bar.proto", "foo/bar.proto")
+//	fd, err := r.LoadFileDescriptor("baz.proto")
+//	// err will be nil; descriptor successfully loaded!
 //
 // If there are files that are *always* imported using a different relative
 // path then how they are registered, consider using the global
 // RegisterImportPath function, so you don't have to use an ImportResolver for
 // every file that imports it.
+//
+// Note that the new protobuf runtime (v1.4+) verifies that import paths are
+// correct and that descriptors can be linked during package initialization. So
+// customizing import paths for descriptor resolution is no longer necessary.
 type ImportResolver struct {
 	children    map[string]*ImportResolver
 	importPaths map[string]string
@@ -134,7 +145,7 @@
 		return r.importPaths[importPath]
 	}
 	var car, cdr string
-	idx := strings.IndexRune(source, filepath.Separator)
+	idx := strings.IndexRune(source, '/')
 	if idx < 0 {
 		car, cdr = source, ""
 	} else {
@@ -205,7 +216,7 @@
 		return
 	}
 	var car, cdr string
-	idx := strings.IndexRune(source, filepath.Separator)
+	idx := strings.IndexRune(source, '/')
 	if idx < 0 {
 		car, cdr = source, ""
 	} else {
@@ -226,86 +237,86 @@
 // any alternate paths configured in this resolver are used when linking the
 // given descriptor proto.
 func (r *ImportResolver) LoadFileDescriptor(filePath string) (*FileDescriptor, error) {
-	return loadFileDescriptor(filePath, r)
+	return LoadFileDescriptor(filePath)
 }
 
 // LoadMessageDescriptor is the same as the package function of the same name,
 // but any alternate paths configured in this resolver are used when linking
 // files for the returned descriptor.
 func (r *ImportResolver) LoadMessageDescriptor(msgName string) (*MessageDescriptor, error) {
-	return loadMessageDescriptor(msgName, r)
+	return LoadMessageDescriptor(msgName)
 }
 
 // LoadMessageDescriptorForMessage is the same as the package function of the
 // same name, but any alternate paths configured in this resolver are used when
 // linking files for the returned descriptor.
 func (r *ImportResolver) LoadMessageDescriptorForMessage(msg proto.Message) (*MessageDescriptor, error) {
-	return loadMessageDescriptorForMessage(msg, r)
+	return LoadMessageDescriptorForMessage(msg)
 }
 
 // LoadMessageDescriptorForType is the same as the package function of the same
 // name, but any alternate paths configured in this resolver are used when
 // linking files for the returned descriptor.
 func (r *ImportResolver) LoadMessageDescriptorForType(msgType reflect.Type) (*MessageDescriptor, error) {
-	return loadMessageDescriptorForType(msgType, r)
+	return LoadMessageDescriptorForType(msgType)
 }
 
 // LoadEnumDescriptorForEnum is the same as the package function of the same
 // name, but any alternate paths configured in this resolver are used when
 // linking files for the returned descriptor.
 func (r *ImportResolver) LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) {
-	return loadEnumDescriptorForEnum(enum, r)
+	return LoadEnumDescriptorForEnum(enum)
 }
 
 // LoadEnumDescriptorForType is the same as the package function of the same
 // name, but any alternate paths configured in this resolver are used when
 // linking files for the returned descriptor.
 func (r *ImportResolver) LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) {
-	return loadEnumDescriptorForType(enumType, r)
+	return LoadEnumDescriptorForType(enumType)
 }
 
 // LoadFieldDescriptorForExtension is the same as the package function of the
 // same name, but any alternate paths configured in this resolver are used when
 // linking files for the returned descriptor.
 func (r *ImportResolver) LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) {
-	return loadFieldDescriptorForExtension(ext, r)
+	return LoadFieldDescriptorForExtension(ext)
 }
 
 // CreateFileDescriptor is the same as the package function of the same name,
 // but any alternate paths configured in this resolver are used when linking the
 // given descriptor proto.
-func (r *ImportResolver) CreateFileDescriptor(fdp *dpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
+func (r *ImportResolver) CreateFileDescriptor(fdp *descriptorpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
 	return createFileDescriptor(fdp, deps, r)
 }
 
 // CreateFileDescriptors is the same as the package function of the same name,
 // but any alternate paths configured in this resolver are used when linking the
 // given descriptor protos.
-func (r *ImportResolver) CreateFileDescriptors(fds []*dpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
+func (r *ImportResolver) CreateFileDescriptors(fds []*descriptorpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
 	return createFileDescriptors(fds, r)
 }
 
 // CreateFileDescriptorFromSet is the same as the package function of the same
 // name, but any alternate paths configured in this resolver are used when
 // linking the descriptor protos in the given set.
-func (r *ImportResolver) CreateFileDescriptorFromSet(fds *dpb.FileDescriptorSet) (*FileDescriptor, error) {
+func (r *ImportResolver) CreateFileDescriptorFromSet(fds *descriptorpb.FileDescriptorSet) (*FileDescriptor, error) {
 	return createFileDescriptorFromSet(fds, r)
 }
 
 // CreateFileDescriptorsFromSet is the same as the package function of the same
 // name, but any alternate paths configured in this resolver are used when
 // linking the descriptor protos in the given set.
-func (r *ImportResolver) CreateFileDescriptorsFromSet(fds *dpb.FileDescriptorSet) (map[string]*FileDescriptor, error) {
+func (r *ImportResolver) CreateFileDescriptorsFromSet(fds *descriptorpb.FileDescriptorSet) (map[string]*FileDescriptor, error) {
 	return createFileDescriptorsFromSet(fds, r)
 }
 
-const dotPrefix = "." + string(filepath.Separator)
+const dotPrefix = "./"
 
 func clean(path string) string {
 	if path == "" {
 		return ""
 	}
-	path = filepath.Clean(path)
+	path = filepath.ToSlash(filepath.Clean(path))
 	if path == "." {
 		return ""
 	}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go b/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go
index 9aa4a3e..aa8c3e9 100644
--- a/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go
@@ -1,86 +1,37 @@
 package internal
 
 import (
-	"github.com/golang/protobuf/proto"
-	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
-	"github.com/jhump/protoreflect/internal/codec"
-	"reflect"
 	"strings"
 
-	"github.com/jhump/protoreflect/internal"
+	"github.com/golang/protobuf/proto"
+	"google.golang.org/protobuf/types/descriptorpb"
 )
 
-// NB: We use reflection or unknown fields in case we are linked against an older
-// version of the proto runtime which does not know about the proto3_optional field.
-// We don't require linking with newer version (which would greatly simplify this)
-// because that means pulling in v1.4+ of the protobuf runtime, which has some
-// compatibility issues. (We'll be nice to users and not require they upgrade to
-// that latest runtime to upgrade to newer protoreflect.)
-
-func GetProto3Optional(fd *dpb.FieldDescriptorProto) bool {
-	type newerFieldDesc interface {
-		GetProto3Optional() bool
-	}
-	var pm proto.Message = fd
-	if fd, ok := pm.(newerFieldDesc); ok {
-		return fd.GetProto3Optional()
-	}
-
-	// Field does not exist, so we have to examine unknown fields
-	// (we just silently bail if we have problems parsing them)
-	unk := internal.GetUnrecognized(pm)
-	buf := codec.NewBuffer(unk)
-	for {
-		tag, wt, err := buf.DecodeTagAndWireType()
-		if err != nil {
-			return false
-		}
-		if tag == Field_proto3OptionalTag && wt == proto.WireVarint {
-			v, _ := buf.DecodeVarint()
-			return v != 0
-		}
-		if err := buf.SkipField(wt); err != nil {
-			return false
-		}
-	}
-}
-
-func SetProto3Optional(fd *dpb.FieldDescriptorProto) {
-	rv := reflect.ValueOf(fd).Elem()
-	fld := rv.FieldByName("Proto3Optional")
-	if fld.IsValid() {
-		fld.Set(reflect.ValueOf(proto.Bool(true)))
-		return
-	}
-
-	// Field does not exist, so we have to store as unknown field.
-	var buf codec.Buffer
-	if err := buf.EncodeTagAndWireType(Field_proto3OptionalTag, proto.WireVarint); err != nil {
-		// TODO: panic? log?
-		return
-	}
-	if err := buf.EncodeVarint(1); err != nil {
-		// TODO: panic? log?
-		return
-	}
-	internal.SetUnrecognized(fd, buf.Bytes())
-}
-
 // ProcessProto3OptionalFields adds synthetic oneofs to the given message descriptor
 // for each proto3 optional field. It also updates the fields to have the correct
-// oneof index reference.
-func ProcessProto3OptionalFields(msgd *dpb.DescriptorProto) {
+// oneof index reference. The given callback, if not nil, is called for each synthetic
+// oneof created.
+func ProcessProto3OptionalFields(msgd *descriptorpb.DescriptorProto, callback func(*descriptorpb.FieldDescriptorProto, *descriptorpb.OneofDescriptorProto)) {
 	var allNames map[string]struct{}
 	for _, fd := range msgd.Field {
-		if GetProto3Optional(fd) {
+		if fd.GetProto3Optional() {
 			// lazy init the set of all names
 			if allNames == nil {
 				allNames = map[string]struct{}{}
 				for _, fd := range msgd.Field {
 					allNames[fd.GetName()] = struct{}{}
 				}
-				for _, fd := range msgd.Extension {
-					allNames[fd.GetName()] = struct{}{}
+				for _, od := range msgd.OneofDecl {
+					allNames[od.GetName()] = struct{}{}
+				}
+				// NB: protoc only considers names of other fields and oneofs
+				// when computing the synthetic oneof name. But that feels like
+				// a bug, since it means it could generate a name that conflicts
+				// with some other symbol defined in the message. If it's decided
+				// that's NOT a bug and is desirable, then we should remove the
+				// following four loops to mimic protoc's behavior.
+				for _, xd := range msgd.Extension {
+					allNames[xd.GetName()] = struct{}{}
 				}
 				for _, ed := range msgd.EnumType {
 					allNames[ed.GetName()] = struct{}{}
@@ -114,7 +65,11 @@
 			}
 
 			fd.OneofIndex = proto.Int32(int32(len(msgd.OneofDecl)))
-			msgd.OneofDecl = append(msgd.OneofDecl, &dpb.OneofDescriptorProto{Name: proto.String(ooName)})
+			ood := &descriptorpb.OneofDescriptorProto{Name: proto.String(ooName)}
+			msgd.OneofDecl = append(msgd.OneofDecl, ood)
+			if callback != nil {
+				callback(fd, ood)
+			}
 		}
 	}
 }
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/registry.go b/vendor/github.com/jhump/protoreflect/desc/internal/registry.go
new file mode 100644
index 0000000..d7259e4
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/registry.go
@@ -0,0 +1,67 @@
+package internal
+
+import (
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/types/dynamicpb"
+)
+
+// RegisterExtensionsFromImportedFile registers extensions in the given file as well
+// as those in its public imports. So if another file imports the given fd, this adds
+// all extensions made visible to that importing file.
+//
+// All extensions in the given file are made visible to the importing file, and so are
+// extensions in any public imports in the given file.
+func RegisterExtensionsFromImportedFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) {
+	registerTypesForFile(reg, fd, true, true)
+}
+
+// RegisterExtensionsVisibleToFile registers all extensions visible to the given file.
+// This includes all extensions defined in fd and as well as extensions defined in the
+// files that it imports (and any public imports thereof, etc).
+//
+// This is effectively the same as registering the extensions in fd and then calling
+// RegisterExtensionsFromImportedFile for each file imported by fd.
+func RegisterExtensionsVisibleToFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) {
+	registerTypesForFile(reg, fd, true, false)
+}
+
+// RegisterTypesVisibleToFile registers all types visible to the given file.
+// This is the same as RegisterExtensionsVisibleToFile but it also registers
+// message and enum types, not just extensions.
+func RegisterTypesVisibleToFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) {
+	registerTypesForFile(reg, fd, false, false)
+}
+
+func registerTypesForFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor, extensionsOnly, publicImportsOnly bool) {
+	registerTypes(reg, fd, extensionsOnly)
+	for i := 0; i < fd.Imports().Len(); i++ {
+		imp := fd.Imports().Get(i)
+		if imp.IsPublic || !publicImportsOnly {
+			registerTypesForFile(reg, imp, extensionsOnly, true)
+		}
+	}
+}
+
+func registerTypes(reg *protoregistry.Types, elem fileOrMessage, extensionsOnly bool) {
+	for i := 0; i < elem.Extensions().Len(); i++ {
+		_ = reg.RegisterExtension(dynamicpb.NewExtensionType(elem.Extensions().Get(i)))
+	}
+	if !extensionsOnly {
+		for i := 0; i < elem.Messages().Len(); i++ {
+			_ = reg.RegisterMessage(dynamicpb.NewMessageType(elem.Messages().Get(i)))
+		}
+		for i := 0; i < elem.Enums().Len(); i++ {
+			_ = reg.RegisterEnum(dynamicpb.NewEnumType(elem.Enums().Get(i)))
+		}
+	}
+	for i := 0; i < elem.Messages().Len(); i++ {
+		registerTypes(reg, elem.Messages().Get(i), extensionsOnly)
+	}
+}
+
+type fileOrMessage interface {
+	Extensions() protoreflect.ExtensionDescriptors
+	Messages() protoreflect.MessageDescriptors
+	Enums() protoreflect.EnumDescriptors
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
index b4150b8..6037128 100644
--- a/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
@@ -1,16 +1,16 @@
 package internal
 
 import (
-	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/protobuf/types/descriptorpb"
 )
 
 // SourceInfoMap is a map of paths in a descriptor to the corresponding source
 // code info.
-type SourceInfoMap map[string][]*dpb.SourceCodeInfo_Location
+type SourceInfoMap map[string][]*descriptorpb.SourceCodeInfo_Location
 
 // Get returns the source code info for the given path. If there are
 // multiple locations for the same path, the first one is returned.
-func (m SourceInfoMap) Get(path []int32) *dpb.SourceCodeInfo_Location {
+func (m SourceInfoMap) Get(path []int32) *descriptorpb.SourceCodeInfo_Location {
 	v := m[asMapKey(path)]
 	if len(v) > 0 {
 		return v[0]
@@ -19,24 +19,24 @@
 }
 
 // GetAll returns all source code info for the given path.
-func (m SourceInfoMap) GetAll(path []int32) []*dpb.SourceCodeInfo_Location {
+func (m SourceInfoMap) GetAll(path []int32) []*descriptorpb.SourceCodeInfo_Location {
 	return m[asMapKey(path)]
 }
 
 // Add stores the given source code info for the given path.
-func (m SourceInfoMap) Add(path []int32, loc *dpb.SourceCodeInfo_Location) {
+func (m SourceInfoMap) Add(path []int32, loc *descriptorpb.SourceCodeInfo_Location) {
 	m[asMapKey(path)] = append(m[asMapKey(path)], loc)
 }
 
 // PutIfAbsent stores the given source code info for the given path only if the
 // given path does not exist in the map. This method returns true when the value
 // is stored, false if the path already exists.
-func (m SourceInfoMap) PutIfAbsent(path []int32, loc *dpb.SourceCodeInfo_Location) bool {
+func (m SourceInfoMap) PutIfAbsent(path []int32, loc *descriptorpb.SourceCodeInfo_Location) bool {
 	k := asMapKey(path)
 	if _, ok := m[k]; ok {
 		return false
 	}
-	m[k] = []*dpb.SourceCodeInfo_Location{loc}
+	m[k] = []*descriptorpb.SourceCodeInfo_Location{loc}
 	return true
 }
 
@@ -63,7 +63,7 @@
 
 // CreateSourceInfoMap constructs a new SourceInfoMap and populates it with the
 // source code info in the given file descriptor proto.
-func CreateSourceInfoMap(fd *dpb.FileDescriptorProto) SourceInfoMap {
+func CreateSourceInfoMap(fd *descriptorpb.FileDescriptorProto) SourceInfoMap {
 	res := SourceInfoMap{}
 	PopulateSourceInfoMap(fd, res)
 	return res
@@ -71,7 +71,7 @@
 
 // PopulateSourceInfoMap populates the given SourceInfoMap with information from
 // the given file descriptor.
-func PopulateSourceInfoMap(fd *dpb.FileDescriptorProto, m SourceInfoMap) {
+func PopulateSourceInfoMap(fd *descriptorpb.FileDescriptorProto, m SourceInfoMap) {
 	for _, l := range fd.GetSourceCodeInfo().GetLocation() {
 		m.Add(l.Path, l)
 	}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/util.go b/vendor/github.com/jhump/protoreflect/desc/internal/util.go
index 71855bf..595c872 100644
--- a/vendor/github.com/jhump/protoreflect/desc/internal/util.go
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/util.go
@@ -54,6 +54,9 @@
 	// File_syntaxTag is the tag number of the syntax element in a file
 	// descriptor proto.
 	File_syntaxTag = 12
+	// File_editionTag is the tag number of the edition element in a file
+	// descriptor proto.
+	File_editionTag = 14
 	// Message_nameTag is the tag number of the name element in a message
 	// descriptor proto.
 	Message_nameTag = 1
@@ -219,17 +222,18 @@
 )
 
 // JsonName returns the default JSON name for a field with the given name.
+// This mirrors the algorithm in protoc:
+//
+//	https://github.com/protocolbuffers/protobuf/blob/v21.3/src/google/protobuf/descriptor.cc#L95
 func JsonName(name string) string {
 	var js []rune
 	nextUpper := false
-	for i, r := range name {
+	for _, r := range name {
 		if r == '_' {
 			nextUpper = true
 			continue
 		}
-		if i == 0 {
-			js = append(js, r)
-		} else if nextUpper {
+		if nextUpper {
 			nextUpper = false
 			js = append(js, unicode.ToUpper(r))
 		} else {
@@ -251,7 +255,8 @@
 // that token and the empty string, e.g. ["foo", ""]. Otherwise, it returns
 // successively shorter prefixes of the package and then the empty string. For
 // example, for a package named "foo.bar.baz" it will return the following list:
-//   ["foo.bar.baz", "foo.bar", "foo", ""]
+//
+//	["foo.bar.baz", "foo.bar", "foo", ""]
 func CreatePrefixList(pkg string) []string {
 	if pkg == "" {
 		return []string{""}
diff --git a/vendor/github.com/jhump/protoreflect/desc/load.go b/vendor/github.com/jhump/protoreflect/desc/load.go
index 4a05830..8fd09ac 100644
--- a/vendor/github.com/jhump/protoreflect/desc/load.go
+++ b/vendor/github.com/jhump/protoreflect/desc/load.go
@@ -1,150 +1,109 @@
 package desc
 
 import (
+	"errors"
 	"fmt"
 	"reflect"
 	"sync"
 
 	"github.com/golang/protobuf/proto"
-	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/types/descriptorpb"
 
+	"github.com/jhump/protoreflect/desc/sourceinfo"
 	"github.com/jhump/protoreflect/internal"
 )
 
+// The global cache is used to store descriptors that wrap items in
+// protoregistry.GlobalTypes and protoregistry.GlobalFiles. This prevents
+// repeating work to re-wrap underlying global descriptors.
 var (
-	cacheMu       sync.RWMutex
-	filesCache    = map[string]*FileDescriptor{}
-	messagesCache = map[string]*MessageDescriptor{}
-	enumCache     = map[reflect.Type]*EnumDescriptor{}
+	// We put all wrapped file and message descriptors in this cache.
+	loadedDescriptors = lockingCache{cache: mapCache{}}
+
+	// Unfortunately, we need a different mechanism for enums for
+	// compatibility with old APIs, which required that they were
+	// registered in a different way :(
+	loadedEnumsMu sync.RWMutex
+	loadedEnums   = map[reflect.Type]*EnumDescriptor{}
 )
 
 // LoadFileDescriptor creates a file descriptor using the bytes returned by
 // proto.FileDescriptor. Descriptors are cached so that they do not need to be
 // re-processed if the same file is fetched again later.
 func LoadFileDescriptor(file string) (*FileDescriptor, error) {
-	return loadFileDescriptor(file, nil)
-}
-
-func loadFileDescriptor(file string, r *ImportResolver) (*FileDescriptor, error) {
-	f := getFileFromCache(file)
-	if f != nil {
-		return f, nil
-	}
-	cacheMu.Lock()
-	defer cacheMu.Unlock()
-	return loadFileDescriptorLocked(file, r)
-}
-
-func loadFileDescriptorLocked(file string, r *ImportResolver) (*FileDescriptor, error) {
-	f := filesCache[file]
-	if f != nil {
-		return f, nil
-	}
-	fd, err := internal.LoadFileDescriptor(file)
-	if err != nil {
-		return nil, err
-	}
-
-	f, err = toFileDescriptorLocked(fd, r)
-	if err != nil {
-		return nil, err
-	}
-	putCacheLocked(file, f)
-	return f, nil
-}
-
-func toFileDescriptorLocked(fd *dpb.FileDescriptorProto, r *ImportResolver) (*FileDescriptor, error) {
-	deps := make([]*FileDescriptor, len(fd.GetDependency()))
-	for i, dep := range fd.GetDependency() {
-		resolvedDep := r.ResolveImport(fd.GetName(), dep)
-		var err error
-		deps[i], err = loadFileDescriptorLocked(resolvedDep, r)
-		if _, ok := err.(internal.ErrNoSuchFile); ok && resolvedDep != dep {
-			// try original path
-			deps[i], err = loadFileDescriptorLocked(dep, r)
-		}
-		if err != nil {
-			return nil, err
+	d, err := sourceinfo.GlobalFiles.FindFileByPath(file)
+	if errors.Is(err, protoregistry.NotFound) {
+		// for backwards compatibility, see if this matches a known old
+		// alias for the file (older versions of libraries that registered
+		// the files using incorrect/non-canonical paths)
+		if alt := internal.StdFileAliases[file]; alt != "" {
+			d, err = sourceinfo.GlobalFiles.FindFileByPath(alt)
 		}
 	}
-	return CreateFileDescriptor(fd, deps...)
-}
-
-func getFileFromCache(file string) *FileDescriptor {
-	cacheMu.RLock()
-	defer cacheMu.RUnlock()
-	return filesCache[file]
-}
-
-func putCacheLocked(filename string, fd *FileDescriptor) {
-	filesCache[filename] = fd
-	putMessageCacheLocked(fd.messages)
-}
-
-func putMessageCacheLocked(mds []*MessageDescriptor) {
-	for _, md := range mds {
-		messagesCache[md.fqn] = md
-		putMessageCacheLocked(md.nested)
+	if err != nil {
+		if !errors.Is(err, protoregistry.NotFound) {
+			return nil, internal.ErrNoSuchFile(file)
+		}
+		return nil, err
 	}
-}
+	if fd := loadedDescriptors.get(d); fd != nil {
+		return fd.(*FileDescriptor), nil
+	}
 
-// interface implemented by generated messages, which all have a Descriptor() method in
-// addition to the methods of proto.Message
-type protoMessage interface {
-	proto.Message
-	Descriptor() ([]byte, []int)
+	var fd *FileDescriptor
+	loadedDescriptors.withLock(func(cache descriptorCache) {
+		fd, err = wrapFile(d, cache)
+	})
+	return fd, err
 }
 
 // LoadMessageDescriptor loads descriptor using the encoded descriptor proto returned by
 // Message.Descriptor() for the given message type. If the given type is not recognized,
 // then a nil descriptor is returned.
 func LoadMessageDescriptor(message string) (*MessageDescriptor, error) {
-	return loadMessageDescriptor(message, nil)
+	mt, err := sourceinfo.GlobalTypes.FindMessageByName(protoreflect.FullName(message))
+	if err != nil {
+		if errors.Is(err, protoregistry.NotFound) {
+			return nil, nil
+		}
+		return nil, err
+	}
+	return loadMessageDescriptor(mt.Descriptor())
 }
 
-func loadMessageDescriptor(message string, r *ImportResolver) (*MessageDescriptor, error) {
-	m := getMessageFromCache(message)
-	if m != nil {
-		return m, nil
+func loadMessageDescriptor(md protoreflect.MessageDescriptor) (*MessageDescriptor, error) {
+	d := loadedDescriptors.get(md)
+	if d != nil {
+		return d.(*MessageDescriptor), nil
 	}
 
-	pt := proto.MessageType(message)
-	if pt == nil {
-		return nil, nil
-	}
-	msg, err := messageFromType(pt)
+	var err error
+	loadedDescriptors.withLock(func(cache descriptorCache) {
+		d, err = wrapMessage(md, cache)
+	})
 	if err != nil {
 		return nil, err
 	}
-
-	cacheMu.Lock()
-	defer cacheMu.Unlock()
-	return loadMessageDescriptorForTypeLocked(message, msg, r)
+	return d.(*MessageDescriptor), err
 }
 
 // LoadMessageDescriptorForType loads descriptor using the encoded descriptor proto returned
 // by message.Descriptor() for the given message type. If the given type is not recognized,
 // then a nil descriptor is returned.
 func LoadMessageDescriptorForType(messageType reflect.Type) (*MessageDescriptor, error) {
-	return loadMessageDescriptorForType(messageType, nil)
-}
-
-func loadMessageDescriptorForType(messageType reflect.Type, r *ImportResolver) (*MessageDescriptor, error) {
 	m, err := messageFromType(messageType)
 	if err != nil {
 		return nil, err
 	}
-	return loadMessageDescriptorForMessage(m, r)
+	return LoadMessageDescriptorForMessage(m)
 }
 
 // LoadMessageDescriptorForMessage loads descriptor using the encoded descriptor proto
 // returned by message.Descriptor(). If the given type is not recognized, then a nil
 // descriptor is returned.
 func LoadMessageDescriptorForMessage(message proto.Message) (*MessageDescriptor, error) {
-	return loadMessageDescriptorForMessage(message, nil)
-}
-
-func loadMessageDescriptorForMessage(message proto.Message, r *ImportResolver) (*MessageDescriptor, error) {
 	// efficiently handle dynamic messages
 	type descriptorable interface {
 		GetMessageDescriptor() *MessageDescriptor
@@ -153,57 +112,26 @@
 		return d.GetMessageDescriptor(), nil
 	}
 
-	name := proto.MessageName(message)
-	if name == "" {
-		return nil, nil
+	var md protoreflect.MessageDescriptor
+	if m, ok := message.(protoreflect.ProtoMessage); ok {
+		md = m.ProtoReflect().Descriptor()
+	} else {
+		md = proto.MessageReflect(message).Descriptor()
 	}
-	m := getMessageFromCache(name)
-	if m != nil {
-		return m, nil
-	}
-
-	cacheMu.Lock()
-	defer cacheMu.Unlock()
-	return loadMessageDescriptorForTypeLocked(name, message.(protoMessage), nil)
+	return loadMessageDescriptor(sourceinfo.WrapMessage(md))
 }
 
-func messageFromType(mt reflect.Type) (protoMessage, error) {
+func messageFromType(mt reflect.Type) (proto.Message, error) {
 	if mt.Kind() != reflect.Ptr {
 		mt = reflect.PtrTo(mt)
 	}
-	m, ok := reflect.Zero(mt).Interface().(protoMessage)
+	m, ok := reflect.Zero(mt).Interface().(proto.Message)
 	if !ok {
 		return nil, fmt.Errorf("failed to create message from type: %v", mt)
 	}
 	return m, nil
 }
 
-func loadMessageDescriptorForTypeLocked(name string, message protoMessage, r *ImportResolver) (*MessageDescriptor, error) {
-	m := messagesCache[name]
-	if m != nil {
-		return m, nil
-	}
-
-	fdb, _ := message.Descriptor()
-	fd, err := internal.DecodeFileDescriptor(name, fdb)
-	if err != nil {
-		return nil, err
-	}
-
-	f, err := toFileDescriptorLocked(fd, r)
-	if err != nil {
-		return nil, err
-	}
-	putCacheLocked(fd.GetName(), f)
-	return f.FindSymbol(name).(*MessageDescriptor), nil
-}
-
-func getMessageFromCache(message string) *MessageDescriptor {
-	cacheMu.RLock()
-	defer cacheMu.RUnlock()
-	return messagesCache[message]
-}
-
 // interface implemented by all generated enums
 type protoEnum interface {
 	EnumDescriptor() ([]byte, []int)
@@ -220,10 +148,6 @@
 // LoadEnumDescriptorForType loads descriptor using the encoded descriptor proto returned
 // by enum.EnumDescriptor() for the given enum type.
 func LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) {
-	return loadEnumDescriptorForType(enumType, nil)
-}
-
-func loadEnumDescriptorForType(enumType reflect.Type, r *ImportResolver) (*EnumDescriptor, error) {
 	// we cache descriptors using non-pointer type
 	if enumType.Kind() == reflect.Ptr {
 		enumType = enumType.Elem()
@@ -237,18 +161,24 @@
 		return nil, err
 	}
 
-	cacheMu.Lock()
-	defer cacheMu.Unlock()
-	return loadEnumDescriptorForTypeLocked(enumType, enum, r)
+	return loadEnumDescriptor(enumType, enum)
+}
+
+func getEnumFromCache(t reflect.Type) *EnumDescriptor {
+	loadedEnumsMu.RLock()
+	defer loadedEnumsMu.RUnlock()
+	return loadedEnums[t]
+}
+
+func putEnumInCache(t reflect.Type, d *EnumDescriptor) {
+	loadedEnumsMu.Lock()
+	defer loadedEnumsMu.Unlock()
+	loadedEnums[t] = d
 }
 
 // LoadEnumDescriptorForEnum loads descriptor using the encoded descriptor proto
 // returned by enum.EnumDescriptor().
 func LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) {
-	return loadEnumDescriptorForEnum(enum, nil)
-}
-
-func loadEnumDescriptorForEnum(enum protoEnum, r *ImportResolver) (*EnumDescriptor, error) {
 	et := reflect.TypeOf(enum)
 	// we cache descriptors using non-pointer type
 	if et.Kind() == reflect.Ptr {
@@ -260,55 +190,46 @@
 		return e, nil
 	}
 
-	cacheMu.Lock()
-	defer cacheMu.Unlock()
-	return loadEnumDescriptorForTypeLocked(et, enum, r)
+	return loadEnumDescriptor(et, enum)
 }
 
 func enumFromType(et reflect.Type) (protoEnum, error) {
-	if et.Kind() != reflect.Int32 {
-		et = reflect.PtrTo(et)
-	}
 	e, ok := reflect.Zero(et).Interface().(protoEnum)
 	if !ok {
+		if et.Kind() != reflect.Ptr {
+			et = et.Elem()
+		}
+		e, ok = reflect.Zero(et).Interface().(protoEnum)
+	}
+	if !ok {
 		return nil, fmt.Errorf("failed to create enum from type: %v", et)
 	}
 	return e, nil
 }
 
-func loadEnumDescriptorForTypeLocked(et reflect.Type, enum protoEnum, r *ImportResolver) (*EnumDescriptor, error) {
-	e := enumCache[et]
-	if e != nil {
-		return e, nil
-	}
-
+func getDescriptorForEnum(enum protoEnum) (*descriptorpb.FileDescriptorProto, []int, error) {
 	fdb, path := enum.EnumDescriptor()
-	name := fmt.Sprintf("%v", et)
+	name := fmt.Sprintf("%T", enum)
 	fd, err := internal.DecodeFileDescriptor(name, fdb)
+	return fd, path, err
+}
+
+func loadEnumDescriptor(et reflect.Type, enum protoEnum) (*EnumDescriptor, error) {
+	fdp, path, err := getDescriptorForEnum(enum)
 	if err != nil {
 		return nil, err
 	}
-	// see if we already have cached "rich" descriptor
-	f, ok := filesCache[fd.GetName()]
-	if !ok {
-		f, err = toFileDescriptorLocked(fd, r)
-		if err != nil {
-			return nil, err
-		}
-		putCacheLocked(fd.GetName(), f)
+
+	fd, err := LoadFileDescriptor(fdp.GetName())
+	if err != nil {
+		return nil, err
 	}
 
-	ed := findEnum(f, path)
-	enumCache[et] = ed
+	ed := findEnum(fd, path)
+	putEnumInCache(et, ed)
 	return ed, nil
 }
 
-func getEnumFromCache(et reflect.Type) *EnumDescriptor {
-	cacheMu.RLock()
-	defer cacheMu.RUnlock()
-	return enumCache[et]
-}
-
 func findEnum(fd *FileDescriptor, path []int) *EnumDescriptor {
 	if len(path) == 1 {
 		return fd.GetEnumTypes()[path[0]]
@@ -323,11 +244,7 @@
 // LoadFieldDescriptorForExtension loads the field descriptor that corresponds to the given
 // extension description.
 func LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) {
-	return loadFieldDescriptorForExtension(ext, nil)
-}
-
-func loadFieldDescriptorForExtension(ext *proto.ExtensionDesc, r *ImportResolver) (*FieldDescriptor, error) {
-	file, err := loadFileDescriptor(ext.Filename, r)
+	file, err := LoadFileDescriptor(ext.Filename)
 	if err != nil {
 		return nil, err
 	}
diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go
new file mode 100644
index 0000000..20d2d7a
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go
@@ -0,0 +1,207 @@
+package sourceinfo
+
+import (
+	"math"
+	"sync"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/types/descriptorpb"
+
+	"github.com/jhump/protoreflect/desc/internal"
+)
+
+// NB: forked from google.golang.org/protobuf/internal/filedesc
+type sourceLocations struct {
+	protoreflect.SourceLocations
+
+	orig []*descriptorpb.SourceCodeInfo_Location
+	// locs is a list of sourceLocations.
+	// The SourceLocation.Next field does not need to be populated
+	// as it will be lazily populated upon first need.
+	locs []protoreflect.SourceLocation
+
+	// fd is the parent file descriptor that these locations are relative to.
+	// If non-nil, ByDescriptor verifies that the provided descriptor
+	// is a child of this file descriptor.
+	fd protoreflect.FileDescriptor
+
+	once   sync.Once
+	byPath map[pathKey]int
+}
+
+func (p *sourceLocations) Len() int { return len(p.orig) }
+func (p *sourceLocations) Get(i int) protoreflect.SourceLocation {
+	return p.lazyInit().locs[i]
+}
+func (p *sourceLocations) byKey(k pathKey) protoreflect.SourceLocation {
+	if i, ok := p.lazyInit().byPath[k]; ok {
+		return p.locs[i]
+	}
+	return protoreflect.SourceLocation{}
+}
+func (p *sourceLocations) ByPath(path protoreflect.SourcePath) protoreflect.SourceLocation {
+	return p.byKey(newPathKey(path))
+}
+func (p *sourceLocations) ByDescriptor(desc protoreflect.Descriptor) protoreflect.SourceLocation {
+	if p.fd != nil && desc != nil && p.fd != desc.ParentFile() {
+		return protoreflect.SourceLocation{} // mismatching parent imports
+	}
+	var pathArr [16]int32
+	path := pathArr[:0]
+	for {
+		switch desc.(type) {
+		case protoreflect.FileDescriptor:
+			// Reverse the path since it was constructed in reverse.
+			for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
+				path[i], path[j] = path[j], path[i]
+			}
+			return p.byKey(newPathKey(path))
+		case protoreflect.MessageDescriptor:
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			switch desc.(type) {
+			case protoreflect.FileDescriptor:
+				path = append(path, int32(internal.File_messagesTag))
+			case protoreflect.MessageDescriptor:
+				path = append(path, int32(internal.Message_nestedMessagesTag))
+			default:
+				return protoreflect.SourceLocation{}
+			}
+		case protoreflect.FieldDescriptor:
+			isExtension := desc.(protoreflect.FieldDescriptor).IsExtension()
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			if isExtension {
+				switch desc.(type) {
+				case protoreflect.FileDescriptor:
+					path = append(path, int32(internal.File_extensionsTag))
+				case protoreflect.MessageDescriptor:
+					path = append(path, int32(internal.Message_extensionsTag))
+				default:
+					return protoreflect.SourceLocation{}
+				}
+			} else {
+				switch desc.(type) {
+				case protoreflect.MessageDescriptor:
+					path = append(path, int32(internal.Message_fieldsTag))
+				default:
+					return protoreflect.SourceLocation{}
+				}
+			}
+		case protoreflect.OneofDescriptor:
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			switch desc.(type) {
+			case protoreflect.MessageDescriptor:
+				path = append(path, int32(internal.Message_oneOfsTag))
+			default:
+				return protoreflect.SourceLocation{}
+			}
+		case protoreflect.EnumDescriptor:
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			switch desc.(type) {
+			case protoreflect.FileDescriptor:
+				path = append(path, int32(internal.File_enumsTag))
+			case protoreflect.MessageDescriptor:
+				path = append(path, int32(internal.Message_enumsTag))
+			default:
+				return protoreflect.SourceLocation{}
+			}
+		case protoreflect.EnumValueDescriptor:
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			switch desc.(type) {
+			case protoreflect.EnumDescriptor:
+				path = append(path, int32(internal.Enum_valuesTag))
+			default:
+				return protoreflect.SourceLocation{}
+			}
+		case protoreflect.ServiceDescriptor:
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			switch desc.(type) {
+			case protoreflect.FileDescriptor:
+				path = append(path, int32(internal.File_servicesTag))
+			default:
+				return protoreflect.SourceLocation{}
+			}
+		case protoreflect.MethodDescriptor:
+			path = append(path, int32(desc.Index()))
+			desc = desc.Parent()
+			switch desc.(type) {
+			case protoreflect.ServiceDescriptor:
+				path = append(path, int32(internal.Service_methodsTag))
+			default:
+				return protoreflect.SourceLocation{}
+			}
+		default:
+			return protoreflect.SourceLocation{}
+		}
+	}
+}
+func (p *sourceLocations) lazyInit() *sourceLocations {
+	p.once.Do(func() {
+		if len(p.orig) > 0 {
+			p.locs = make([]protoreflect.SourceLocation, len(p.orig))
+			// Collect all the indexes for a given path.
+			pathIdxs := make(map[pathKey][]int, len(p.locs))
+			for i := range p.orig {
+				l := asSourceLocation(p.orig[i])
+				p.locs[i] = l
+				k := newPathKey(l.Path)
+				pathIdxs[k] = append(pathIdxs[k], i)
+			}
+
+			// Update the next index for all locations.
+			p.byPath = make(map[pathKey]int, len(p.locs))
+			for k, idxs := range pathIdxs {
+				for i := 0; i < len(idxs)-1; i++ {
+					p.locs[idxs[i]].Next = idxs[i+1]
+				}
+				p.locs[idxs[len(idxs)-1]].Next = 0
+				p.byPath[k] = idxs[0] // record the first location for this path
+			}
+		}
+	})
+	return p
+}
+
+func asSourceLocation(l *descriptorpb.SourceCodeInfo_Location) protoreflect.SourceLocation {
+	endLine := l.Span[0]
+	endCol := l.Span[2]
+	if len(l.Span) > 3 {
+		endLine = l.Span[2]
+		endCol = l.Span[3]
+	}
+	return protoreflect.SourceLocation{
+		Path:                    l.Path,
+		StartLine:               int(l.Span[0]),
+		StartColumn:             int(l.Span[1]),
+		EndLine:                 int(endLine),
+		EndColumn:               int(endCol),
+		LeadingDetachedComments: l.LeadingDetachedComments,
+		LeadingComments:         l.GetLeadingComments(),
+		TrailingComments:        l.GetTrailingComments(),
+	}
+}
+
+// pathKey is a comparable representation of protoreflect.SourcePath.
+type pathKey struct {
+	arr [16]uint8 // first n-1 path segments; last element is the length
+	str string    // used if the path does not fit in arr
+}
+
+func newPathKey(p protoreflect.SourcePath) (k pathKey) {
+	if len(p) < len(k.arr) {
+		for i, ps := range p {
+			if ps < 0 || math.MaxUint8 <= ps {
+				return pathKey{str: p.String()}
+			}
+			k.arr[i] = uint8(ps)
+		}
+		k.arr[len(k.arr)-1] = uint8(len(p))
+		return k
+	}
+	return pathKey{str: p.String()}
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go
new file mode 100644
index 0000000..8301c40
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go
@@ -0,0 +1,340 @@
+// Package sourceinfo provides the ability to register and query source code info
+// for file descriptors that are compiled into the binary. This data is registered
+// by code generated from the protoc-gen-gosrcinfo plugin.
+//
+// The standard descriptors bundled into the compiled binary are stripped of source
+// code info, to reduce binary size and reduce runtime memory footprint. However,
+// the source code info can be very handy and worth the size cost when used with
+// gRPC services and the server reflection service. Without source code info, the
+// descriptors that a client downloads from the reflection service have no comments.
+// But the presence of comments, and the ability to show them to humans, can greatly
+// improve the utility of user agents that use the reflection service.
+//
+// When the protoc-gen-gosrcinfo plugin is used, the desc.Load* methods, which load
+// descriptors for compiled-in elements, will automatically include source code
+// info, using the data registered with this package.
+//
+// In order to make the reflection service use this functionality, you will need to
+// be using v1.45 or higher of the Go runtime for gRPC (google.golang.org/grpc). The
+// following snippet demonstrates how to do this in your server. Do this instead of
+// using the reflection.Register function:
+//
+//	refSvr := reflection.NewServer(reflection.ServerOptions{
+//	    Services:           grpcServer,
+//	    DescriptorResolver: sourceinfo.GlobalFiles,
+//	    ExtensionResolver:  sourceinfo.GlobalFiles,
+//	})
+//	grpc_reflection_v1alpha.RegisterServerReflectionServer(grpcServer, refSvr)
+package sourceinfo
+
+import (
+	"bytes"
+	"compress/gzip"
+	"errors"
+	"fmt"
+	"io"
+	"sync"
+
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protodesc"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/types/descriptorpb"
+)
+
+var (
+	// GlobalFiles is a registry of descriptors that include source code info, if the
+	// files they belong to were processed with protoc-gen-gosrcinfo.
+	//
+	// If is mean to serve as a drop-in alternative to protoregistry.GlobalFiles that
+	// can include source code info in the returned descriptors.
+	GlobalFiles Resolver = registry{}
+
+	// GlobalTypes is a registry of descriptors that include source code info, if the
+	// files they belong to were processed with protoc-gen-gosrcinfo.
+	//
+	// If is mean to serve as a drop-in alternative to protoregistry.GlobalTypes that
+	// can include source code info in the returned descriptors.
+	GlobalTypes TypeResolver = registry{}
+
+	mu                 sync.RWMutex
+	sourceInfoByFile   = map[string]*descriptorpb.SourceCodeInfo{}
+	fileDescriptors    = map[protoreflect.FileDescriptor]protoreflect.FileDescriptor{}
+	updatedDescriptors filesWithFallback
+)
+
+// Resolver can resolve file names into file descriptors and also provides methods for
+// resolving extensions.
+type Resolver interface {
+	protodesc.Resolver
+	protoregistry.ExtensionTypeResolver
+	RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
+}
+
+// NB: These interfaces are far from ideal. Ideally, Resolver would have
+//    * EITHER been named FileResolver and not included the extension methods.
+//    * OR also included message methods (i.e. embed protoregistry.MessageTypeResolver).
+//   Now (since it's been released) we can't add the message methods to the interface as
+//   that's not a backwards-compatible change. So we have to introduce the new interface
+//   below, which is now a little confusing since it has some overlap with Resolver.
+
+// TypeResolver can resolve message names and URLs into message descriptors and also
+// provides methods for resolving extensions.
+type TypeResolver interface {
+	protoregistry.MessageTypeResolver
+	protoregistry.ExtensionTypeResolver
+	RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
+}
+
+// RegisterSourceInfo registers the given source code info for the file descriptor
+// with the given path/name.
+//
+// This is automatically used from older generated code if using a previous release of
+// the protoc-gen-gosrcinfo plugin.
+func RegisterSourceInfo(file string, srcInfo *descriptorpb.SourceCodeInfo) {
+	mu.Lock()
+	defer mu.Unlock()
+	sourceInfoByFile[file] = srcInfo
+}
+
+// RegisterEncodedSourceInfo registers the given source code info, which is a serialized
+// and gzipped form of a google.protobuf.SourceCodeInfo message.
+//
+// This is automatically used from generated code if using the protoc-gen-gosrcinfo
+// plugin.
+func RegisterEncodedSourceInfo(file string, data []byte) error {
+	zipReader, err := gzip.NewReader(bytes.NewReader(data))
+	if err != nil {
+		return err
+	}
+	defer func() {
+		_ = zipReader.Close()
+	}()
+	unzipped, err := io.ReadAll(zipReader)
+	if err != nil {
+		return err
+	}
+	var srcInfo descriptorpb.SourceCodeInfo
+	if err := proto.Unmarshal(unzipped, &srcInfo); err != nil {
+		return err
+	}
+	RegisterSourceInfo(file, &srcInfo)
+	return nil
+}
+
+// SourceInfoForFile queries for any registered source code info for the file
+// descriptor with the given path/name. It returns nil if no source code info
+// was registered.
+func SourceInfoForFile(file string) *descriptorpb.SourceCodeInfo {
+	mu.RLock()
+	defer mu.RUnlock()
+	return sourceInfoByFile[file]
+}
+
+func canUpgrade(d protoreflect.Descriptor) bool {
+	if d == nil {
+		return false
+	}
+	fd := d.ParentFile()
+	if fd.SourceLocations().Len() > 0 {
+		// already has source info
+		return false
+	}
+	if genFile, err := protoregistry.GlobalFiles.FindFileByPath(fd.Path()); err != nil || genFile != fd {
+		// given descriptor is not from generated code
+		return false
+	}
+	return true
+}
+
+func getFile(fd protoreflect.FileDescriptor) (protoreflect.FileDescriptor, error) {
+	if !canUpgrade(fd) {
+		return fd, nil
+	}
+
+	mu.RLock()
+	result := fileDescriptors[fd]
+	mu.RUnlock()
+
+	if result != nil {
+		return result, nil
+	}
+
+	mu.Lock()
+	defer mu.Unlock()
+	result, err := getFileLocked(fd)
+	if err != nil {
+		return nil, fmt.Errorf("updating file %q: %w", fd.Path(), err)
+	}
+	return result, nil
+}
+
+func getFileLocked(fd protoreflect.FileDescriptor) (protoreflect.FileDescriptor, error) {
+	result := fileDescriptors[fd]
+	if result != nil {
+		return result, nil
+	}
+
+	// We have to build its dependencies, too, so that the descriptor's
+	// references *all* have source code info.
+	var deps []protoreflect.FileDescriptor
+	imps := fd.Imports()
+	for i, length := 0, imps.Len(); i < length; i++ {
+		origDep := imps.Get(i).FileDescriptor
+		updatedDep, err := getFileLocked(origDep)
+		if err != nil {
+			return nil, fmt.Errorf("updating import %q: %w", origDep.Path(), err)
+		}
+		if updatedDep != origDep && deps == nil {
+			// lazily init slice of deps and copy over deps before this one
+			deps = make([]protoreflect.FileDescriptor, i, length)
+			for j := 0; j < i; j++ {
+				deps[j] = imps.Get(i).FileDescriptor
+			}
+		}
+		if deps != nil {
+			deps = append(deps, updatedDep)
+		}
+	}
+
+	srcInfo := sourceInfoByFile[fd.Path()]
+	if len(srcInfo.GetLocation()) == 0 && len(deps) == 0 {
+		// nothing to do; don't bother changing
+		return fd, nil
+	}
+
+	// Add source code info and rebuild.
+	fdProto := protodesc.ToFileDescriptorProto(fd)
+	fdProto.SourceCodeInfo = srcInfo
+
+	result, err := protodesc.NewFile(fdProto, &updatedDescriptors)
+	if err != nil {
+		return nil, err
+	}
+	if err := updatedDescriptors.RegisterFile(result); err != nil {
+		return nil, fmt.Errorf("registering import %q: %w", result.Path(), err)
+	}
+
+	fileDescriptors[fd] = result
+	return result, nil
+}
+
+type registry struct{}
+
+var _ protodesc.Resolver = &registry{}
+
+func (r registry) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
+	fd, err := protoregistry.GlobalFiles.FindFileByPath(path)
+	if err != nil {
+		return nil, err
+	}
+	return getFile(fd)
+}
+
+func (r registry) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) {
+	d, err := protoregistry.GlobalFiles.FindDescriptorByName(name)
+	if err != nil {
+		return nil, err
+	}
+	if !canUpgrade(d) {
+		return d, nil
+	}
+	switch d := d.(type) {
+	case protoreflect.FileDescriptor:
+		return getFile(d)
+	case protoreflect.MessageDescriptor:
+		return updateDescriptor(d)
+	case protoreflect.FieldDescriptor:
+		return updateField(d)
+	case protoreflect.OneofDescriptor:
+		return updateDescriptor(d)
+	case protoreflect.EnumDescriptor:
+		return updateDescriptor(d)
+	case protoreflect.EnumValueDescriptor:
+		return updateDescriptor(d)
+	case protoreflect.ServiceDescriptor:
+		return updateDescriptor(d)
+	case protoreflect.MethodDescriptor:
+		return updateDescriptor(d)
+	default:
+		return nil, fmt.Errorf("unrecognized descriptor type: %T", d)
+	}
+}
+
+func (r registry) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
+	mt, err := protoregistry.GlobalTypes.FindMessageByName(message)
+	if err != nil {
+		return nil, err
+	}
+	msg, err := updateDescriptor(mt.Descriptor())
+	if err != nil {
+		return mt, nil
+	}
+	return messageType{MessageType: mt, msgDesc: msg}, nil
+}
+
+func (r registry) FindMessageByURL(url string) (protoreflect.MessageType, error) {
+	mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
+	if err != nil {
+		return nil, err
+	}
+	msg, err := updateDescriptor(mt.Descriptor())
+	if err != nil {
+		return mt, nil
+	}
+	return messageType{MessageType: mt, msgDesc: msg}, nil
+}
+
+func (r registry) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+	xt, err := protoregistry.GlobalTypes.FindExtensionByName(field)
+	if err != nil {
+		return nil, err
+	}
+	ext, err := updateDescriptor(xt.TypeDescriptor().Descriptor())
+	if err != nil {
+		return xt, nil
+	}
+	return extensionType{ExtensionType: xt, extDesc: ext}, nil
+}
+
+func (r registry) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+	xt, err := protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
+	if err != nil {
+		return nil, err
+	}
+	ext, err := updateDescriptor(xt.TypeDescriptor().Descriptor())
+	if err != nil {
+		return xt, nil
+	}
+	return extensionType{ExtensionType: xt, extDesc: ext}, nil
+}
+
+func (r registry) RangeExtensionsByMessage(message protoreflect.FullName, fn func(protoreflect.ExtensionType) bool) {
+	protoregistry.GlobalTypes.RangeExtensionsByMessage(message, func(xt protoreflect.ExtensionType) bool {
+		ext, err := updateDescriptor(xt.TypeDescriptor().Descriptor())
+		if err != nil {
+			return fn(xt)
+		}
+		return fn(extensionType{ExtensionType: xt, extDesc: ext})
+	})
+}
+
+type filesWithFallback struct {
+	protoregistry.Files
+}
+
+func (f *filesWithFallback) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
+	fd, err := f.Files.FindFileByPath(path)
+	if errors.Is(err, protoregistry.NotFound) {
+		fd, err = protoregistry.GlobalFiles.FindFileByPath(path)
+	}
+	return fd, err
+}
+
+func (f *filesWithFallback) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) {
+	fd, err := f.Files.FindDescriptorByName(name)
+	if errors.Is(err, protoregistry.NotFound) {
+		fd, err = protoregistry.GlobalFiles.FindDescriptorByName(name)
+	}
+	return fd, err
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/update.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/update.go
new file mode 100644
index 0000000..53bc457
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/update.go
@@ -0,0 +1,314 @@
+package sourceinfo
+
+import (
+	"fmt"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// AddSourceInfoToFile will return a new file descriptor that is a copy
+// of fd except that it includes source code info. If the given file
+// already contains source info, was not registered from generated code,
+// or was not processed with the protoc-gen-gosrcinfo plugin, then fd
+// is returned as is, unchanged.
+func AddSourceInfoToFile(fd protoreflect.FileDescriptor) (protoreflect.FileDescriptor, error) {
+	return getFile(fd)
+}
+
+// AddSourceInfoToMessage will return a new message descriptor that is a
+// copy of md except that it includes source code info. If the file that
+// contains the given message descriptor already contains source info,
+// was not registered from generated code, or was not processed with the
+// protoc-gen-gosrcinfo plugin, then md is returned as is, unchanged.
+func AddSourceInfoToMessage(md protoreflect.MessageDescriptor) (protoreflect.MessageDescriptor, error) {
+	return updateDescriptor(md)
+}
+
+// AddSourceInfoToEnum will return a new enum descriptor that is a copy
+// of ed except that it includes source code info. If the file that
+// contains the given enum descriptor already contains source info, was
+// not registered from generated code, or was not processed with the
+// protoc-gen-gosrcinfo plugin, then ed is returned as is, unchanged.
+func AddSourceInfoToEnum(ed protoreflect.EnumDescriptor) (protoreflect.EnumDescriptor, error) {
+	return updateDescriptor(ed)
+}
+
+// AddSourceInfoToService will return a new service descriptor that is
+// a copy of sd except that it includes source code info. If the file
+// that contains the given service descriptor already contains source
+// info, was not registered from generated code, or was not processed
+// with the protoc-gen-gosrcinfo plugin, then ed is returned as is,
+// unchanged.
+func AddSourceInfoToService(sd protoreflect.ServiceDescriptor) (protoreflect.ServiceDescriptor, error) {
+	return updateDescriptor(sd)
+}
+
+// AddSourceInfoToExtensionType will return a new extension type that
+// is a copy of xt except that its associated descriptors includes
+// source code info. If the file that contains the given extension
+// already contains source info, was not registered from generated
+// code, or was not processed with the protoc-gen-gosrcinfo plugin,
+// then xt is returned as is, unchanged.
+func AddSourceInfoToExtensionType(xt protoreflect.ExtensionType) (protoreflect.ExtensionType, error) {
+	if genType, err := protoregistry.GlobalTypes.FindExtensionByName(xt.TypeDescriptor().FullName()); err != nil || genType != xt {
+		return xt, nil // not from generated code
+	}
+	ext, err := updateField(xt.TypeDescriptor().Descriptor())
+	if err != nil {
+		return nil, err
+	}
+	return extensionType{ExtensionType: xt, extDesc: ext}, nil
+}
+
+// AddSourceInfoToMessageType will return a new message type that
+// is a copy of mt except that its associated descriptors includes
+// source code info. If the file that contains the given message
+// already contains source info, was not registered from generated
+// code, or was not processed with the protoc-gen-gosrcinfo plugin,
+// then mt is returned as is, unchanged.
+func AddSourceInfoToMessageType(mt protoreflect.MessageType) (protoreflect.MessageType, error) {
+	if genType, err := protoregistry.GlobalTypes.FindMessageByName(mt.Descriptor().FullName()); err != nil || genType != mt {
+		return mt, nil // not from generated code
+	}
+	msg, err := updateDescriptor(mt.Descriptor())
+	if err != nil {
+		return nil, err
+	}
+	return messageType{MessageType: mt, msgDesc: msg}, nil
+}
+
+// WrapFile is present for backwards-compatibility reasons. It calls
+// AddSourceInfoToFile and panics if that function returns an error.
+//
+// Deprecated: Use AddSourceInfoToFile directly instead. The word "wrap" is
+// a misnomer since this method does not actually wrap the given value.
+// Though unlikely, the operation can technically fail, so the recommended
+// function allows the return of an error instead of panic'ing.
+func WrapFile(fd protoreflect.FileDescriptor) protoreflect.FileDescriptor {
+	result, err := AddSourceInfoToFile(fd)
+	if err != nil {
+		panic(err)
+	}
+	return result
+}
+
+// WrapMessage is present for backwards-compatibility reasons. It calls
+// AddSourceInfoToMessage and panics if that function returns an error.
+//
+// Deprecated: Use AddSourceInfoToMessage directly instead. The word
+// "wrap" is a misnomer since this method does not actually wrap the
+// given value. Though unlikely, the operation can technically fail,
+// so the recommended function allows the return of an error instead
+// of panic'ing.
+func WrapMessage(md protoreflect.MessageDescriptor) protoreflect.MessageDescriptor {
+	result, err := AddSourceInfoToMessage(md)
+	if err != nil {
+		panic(err)
+	}
+	return result
+}
+
+// WrapEnum is present for backwards-compatibility reasons. It calls
+// AddSourceInfoToEnum and panics if that function returns an error.
+//
+// Deprecated: Use AddSourceInfoToEnum directly instead. The word
+// "wrap" is a misnomer since this method does not actually wrap the
+// given value. Though unlikely, the operation can technically fail,
+// so the recommended function allows the return of an error instead
+// of panic'ing.
+func WrapEnum(ed protoreflect.EnumDescriptor) protoreflect.EnumDescriptor {
+	result, err := AddSourceInfoToEnum(ed)
+	if err != nil {
+		panic(err)
+	}
+	return result
+}
+
+// WrapService is present for backwards-compatibility reasons. It calls
+// AddSourceInfoToService and panics if that function returns an error.
+//
+// Deprecated: Use AddSourceInfoToService directly instead. The word
+// "wrap" is a misnomer since this method does not actually wrap the
+// given value. Though unlikely, the operation can technically fail,
+// so the recommended function allows the return of an error instead
+// of panic'ing.
+func WrapService(sd protoreflect.ServiceDescriptor) protoreflect.ServiceDescriptor {
+	result, err := AddSourceInfoToService(sd)
+	if err != nil {
+		panic(err)
+	}
+	return result
+}
+
+// WrapExtensionType is present for backwards-compatibility reasons. It
+// calls AddSourceInfoToExtensionType and panics if that function
+// returns an error.
+//
+// Deprecated: Use AddSourceInfoToExtensionType directly instead. The
+// word "wrap" is a misnomer since this method does not actually wrap
+// the given value. Though unlikely, the operation can technically fail,
+// so the recommended function allows the return of an error instead
+// of panic'ing.
+func WrapExtensionType(xt protoreflect.ExtensionType) protoreflect.ExtensionType {
+	result, err := AddSourceInfoToExtensionType(xt)
+	if err != nil {
+		panic(err)
+	}
+	return result
+}
+
+// WrapMessageType is present for backwards-compatibility reasons. It
+// calls AddSourceInfoToMessageType and panics if that function returns
+// an error.
+//
+// Deprecated: Use AddSourceInfoToMessageType directly instead. The word
+// "wrap" is a misnomer since this method does not actually wrap the
+// given value. Though unlikely, the operation can technically fail, so
+// the recommended function allows the return of an error instead of
+// panic'ing.
+func WrapMessageType(mt protoreflect.MessageType) protoreflect.MessageType {
+	result, err := AddSourceInfoToMessageType(mt)
+	if err != nil {
+		panic(err)
+	}
+	return result
+}
+
+type extensionType struct {
+	protoreflect.ExtensionType
+	extDesc protoreflect.ExtensionDescriptor
+}
+
+func (xt extensionType) TypeDescriptor() protoreflect.ExtensionTypeDescriptor {
+	return extensionTypeDescriptor{ExtensionDescriptor: xt.extDesc, extType: xt.ExtensionType}
+}
+
+type extensionTypeDescriptor struct {
+	protoreflect.ExtensionDescriptor
+	extType protoreflect.ExtensionType
+}
+
+func (xtd extensionTypeDescriptor) Type() protoreflect.ExtensionType {
+	return extensionType{ExtensionType: xtd.extType, extDesc: xtd.ExtensionDescriptor}
+}
+
+func (xtd extensionTypeDescriptor) Descriptor() protoreflect.ExtensionDescriptor {
+	return xtd.ExtensionDescriptor
+}
+
+type messageType struct {
+	protoreflect.MessageType
+	msgDesc protoreflect.MessageDescriptor
+}
+
+func (mt messageType) Descriptor() protoreflect.MessageDescriptor {
+	return mt.msgDesc
+}
+
+func updateField(fd protoreflect.FieldDescriptor) (protoreflect.FieldDescriptor, error) {
+	if xtd, ok := fd.(protoreflect.ExtensionTypeDescriptor); ok {
+		ext, err := updateField(xtd.Descriptor())
+		if err != nil {
+			return nil, err
+		}
+		return extensionTypeDescriptor{ExtensionDescriptor: ext, extType: xtd.Type()}, nil
+	}
+	d, err := updateDescriptor(fd)
+	if err != nil {
+		return nil, err
+	}
+	return d.(protoreflect.FieldDescriptor), nil
+}
+
+func updateDescriptor[D protoreflect.Descriptor](d D) (D, error) {
+	updatedFile, err := getFile(d.ParentFile())
+	if err != nil {
+		var zero D
+		return zero, err
+	}
+	if updatedFile == d.ParentFile() {
+		// no change
+		return d, nil
+	}
+	updated := findDescriptor(updatedFile, d)
+	result, ok := updated.(D)
+	if !ok {
+		var zero D
+		return zero, fmt.Errorf("updated result is type %T which could not be converted to %T", updated, result)
+	}
+	return result, nil
+}
+
+func findDescriptor(fd protoreflect.FileDescriptor, d protoreflect.Descriptor) protoreflect.Descriptor {
+	if d == nil {
+		return nil
+	}
+	if _, isFile := d.(protoreflect.FileDescriptor); isFile {
+		return fd
+	}
+	if d.Parent() == nil {
+		return d
+	}
+	switch d := d.(type) {
+	case protoreflect.MessageDescriptor:
+		parent := findDescriptor(fd, d.Parent()).(messageContainer)
+		return parent.Messages().Get(d.Index())
+	case protoreflect.FieldDescriptor:
+		if d.IsExtension() {
+			parent := findDescriptor(fd, d.Parent()).(extensionContainer)
+			return parent.Extensions().Get(d.Index())
+		} else {
+			parent := findDescriptor(fd, d.Parent()).(fieldContainer)
+			return parent.Fields().Get(d.Index())
+		}
+	case protoreflect.OneofDescriptor:
+		parent := findDescriptor(fd, d.Parent()).(oneofContainer)
+		return parent.Oneofs().Get(d.Index())
+	case protoreflect.EnumDescriptor:
+		parent := findDescriptor(fd, d.Parent()).(enumContainer)
+		return parent.Enums().Get(d.Index())
+	case protoreflect.EnumValueDescriptor:
+		parent := findDescriptor(fd, d.Parent()).(enumValueContainer)
+		return parent.Values().Get(d.Index())
+	case protoreflect.ServiceDescriptor:
+		parent := findDescriptor(fd, d.Parent()).(serviceContainer)
+		return parent.Services().Get(d.Index())
+	case protoreflect.MethodDescriptor:
+		parent := findDescriptor(fd, d.Parent()).(methodContainer)
+		return parent.Methods().Get(d.Index())
+	}
+	return d
+}
+
+type messageContainer interface {
+	Messages() protoreflect.MessageDescriptors
+}
+
+type extensionContainer interface {
+	Extensions() protoreflect.ExtensionDescriptors
+}
+
+type fieldContainer interface {
+	Fields() protoreflect.FieldDescriptors
+}
+
+type oneofContainer interface {
+	Oneofs() protoreflect.OneofDescriptors
+}
+
+type enumContainer interface {
+	Enums() protoreflect.EnumDescriptors
+}
+
+type enumValueContainer interface {
+	Values() protoreflect.EnumValueDescriptors
+}
+
+type serviceContainer interface {
+	Services() protoreflect.ServiceDescriptors
+}
+
+type methodContainer interface {
+	Methods() protoreflect.MethodDescriptors
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/wrap.go b/vendor/github.com/jhump/protoreflect/desc/wrap.go
new file mode 100644
index 0000000..5491afd
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/wrap.go
@@ -0,0 +1,211 @@
+package desc
+
+import (
+	"fmt"
+
+	"github.com/bufbuild/protocompile/protoutil"
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// DescriptorWrapper wraps a protoreflect.Descriptor. All of the Descriptor
+// implementations in this package implement this interface. This can be
+// used to recover the underlying descriptor. Each descriptor type in this
+// package also provides a strongly-typed form of this method, such as the
+// following method for *FileDescriptor:
+//
+//	UnwrapFile() protoreflect.FileDescriptor
+type DescriptorWrapper interface {
+	Unwrap() protoreflect.Descriptor
+}
+
+// WrapDescriptor wraps the given descriptor, returning a desc.Descriptor
+// value that represents the same element.
+func WrapDescriptor(d protoreflect.Descriptor) (Descriptor, error) {
+	return wrapDescriptor(d, mapCache{})
+}
+
+func wrapDescriptor(d protoreflect.Descriptor, cache descriptorCache) (Descriptor, error) {
+	switch d := d.(type) {
+	case protoreflect.FileDescriptor:
+		return wrapFile(d, cache)
+	case protoreflect.MessageDescriptor:
+		return wrapMessage(d, cache)
+	case protoreflect.FieldDescriptor:
+		return wrapField(d, cache)
+	case protoreflect.OneofDescriptor:
+		return wrapOneOf(d, cache)
+	case protoreflect.EnumDescriptor:
+		return wrapEnum(d, cache)
+	case protoreflect.EnumValueDescriptor:
+		return wrapEnumValue(d, cache)
+	case protoreflect.ServiceDescriptor:
+		return wrapService(d, cache)
+	case protoreflect.MethodDescriptor:
+		return wrapMethod(d, cache)
+	default:
+		return nil, fmt.Errorf("unknown descriptor type: %T", d)
+	}
+}
+
+// WrapFiles wraps the given file descriptors, returning a slice of *desc.FileDescriptor
+// values that represent the same files.
+func WrapFiles(d []protoreflect.FileDescriptor) ([]*FileDescriptor, error) {
+	cache := mapCache{}
+	results := make([]*FileDescriptor, len(d))
+	for i := range d {
+		var err error
+		results[i], err = wrapFile(d[i], cache)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return results, nil
+}
+
+// WrapFile wraps the given file descriptor, returning a *desc.FileDescriptor
+// value that represents the same file.
+func WrapFile(d protoreflect.FileDescriptor) (*FileDescriptor, error) {
+	return wrapFile(d, mapCache{})
+}
+
+func wrapFile(d protoreflect.FileDescriptor, cache descriptorCache) (*FileDescriptor, error) {
+	if res := cache.get(d); res != nil {
+		return res.(*FileDescriptor), nil
+	}
+	fdp := protoutil.ProtoFromFileDescriptor(d)
+	return convertFile(d, fdp, cache)
+}
+
+// WrapMessage wraps the given message descriptor, returning a *desc.MessageDescriptor
+// value that represents the same message.
+func WrapMessage(d protoreflect.MessageDescriptor) (*MessageDescriptor, error) {
+	return wrapMessage(d, mapCache{})
+}
+
+func wrapMessage(d protoreflect.MessageDescriptor, cache descriptorCache) (*MessageDescriptor, error) {
+	parent, err := wrapDescriptor(d.Parent(), cache)
+	if err != nil {
+		return nil, err
+	}
+	switch p := parent.(type) {
+	case *FileDescriptor:
+		return p.messages[d.Index()], nil
+	case *MessageDescriptor:
+		return p.nested[d.Index()], nil
+	default:
+		return nil, fmt.Errorf("message has unexpected parent type: %T", parent)
+	}
+}
+
+// WrapField wraps the given field descriptor, returning a *desc.FieldDescriptor
+// value that represents the same field.
+func WrapField(d protoreflect.FieldDescriptor) (*FieldDescriptor, error) {
+	return wrapField(d, mapCache{})
+}
+
+func wrapField(d protoreflect.FieldDescriptor, cache descriptorCache) (*FieldDescriptor, error) {
+	parent, err := wrapDescriptor(d.Parent(), cache)
+	if err != nil {
+		return nil, err
+	}
+	switch p := parent.(type) {
+	case *FileDescriptor:
+		return p.extensions[d.Index()], nil
+	case *MessageDescriptor:
+		if d.IsExtension() {
+			return p.extensions[d.Index()], nil
+		}
+		return p.fields[d.Index()], nil
+	default:
+		return nil, fmt.Errorf("field has unexpected parent type: %T", parent)
+	}
+}
+
+// WrapOneOf wraps the given oneof descriptor, returning a *desc.OneOfDescriptor
+// value that represents the same oneof.
+func WrapOneOf(d protoreflect.OneofDescriptor) (*OneOfDescriptor, error) {
+	return wrapOneOf(d, mapCache{})
+}
+
+func wrapOneOf(d protoreflect.OneofDescriptor, cache descriptorCache) (*OneOfDescriptor, error) {
+	parent, err := wrapDescriptor(d.Parent(), cache)
+	if err != nil {
+		return nil, err
+	}
+	if p, ok := parent.(*MessageDescriptor); ok {
+		return p.oneOfs[d.Index()], nil
+	}
+	return nil, fmt.Errorf("oneof has unexpected parent type: %T", parent)
+}
+
+// WrapEnum wraps the given enum descriptor, returning a *desc.EnumDescriptor
+// value that represents the same enum.
+func WrapEnum(d protoreflect.EnumDescriptor) (*EnumDescriptor, error) {
+	return wrapEnum(d, mapCache{})
+}
+
+func wrapEnum(d protoreflect.EnumDescriptor, cache descriptorCache) (*EnumDescriptor, error) {
+	parent, err := wrapDescriptor(d.Parent(), cache)
+	if err != nil {
+		return nil, err
+	}
+	switch p := parent.(type) {
+	case *FileDescriptor:
+		return p.enums[d.Index()], nil
+	case *MessageDescriptor:
+		return p.enums[d.Index()], nil
+	default:
+		return nil, fmt.Errorf("enum has unexpected parent type: %T", parent)
+	}
+}
+
+// WrapEnumValue wraps the given enum value descriptor, returning a *desc.EnumValueDescriptor
+// value that represents the same enum value.
+func WrapEnumValue(d protoreflect.EnumValueDescriptor) (*EnumValueDescriptor, error) {
+	return wrapEnumValue(d, mapCache{})
+}
+
+func wrapEnumValue(d protoreflect.EnumValueDescriptor, cache descriptorCache) (*EnumValueDescriptor, error) {
+	parent, err := wrapDescriptor(d.Parent(), cache)
+	if err != nil {
+		return nil, err
+	}
+	if p, ok := parent.(*EnumDescriptor); ok {
+		return p.values[d.Index()], nil
+	}
+	return nil, fmt.Errorf("enum value has unexpected parent type: %T", parent)
+}
+
+// WrapService wraps the given service descriptor, returning a *desc.ServiceDescriptor
+// value that represents the same service.
+func WrapService(d protoreflect.ServiceDescriptor) (*ServiceDescriptor, error) {
+	return wrapService(d, mapCache{})
+}
+
+func wrapService(d protoreflect.ServiceDescriptor, cache descriptorCache) (*ServiceDescriptor, error) {
+	parent, err := wrapDescriptor(d.Parent(), cache)
+	if err != nil {
+		return nil, err
+	}
+	if p, ok := parent.(*FileDescriptor); ok {
+		return p.services[d.Index()], nil
+	}
+	return nil, fmt.Errorf("service has unexpected parent type: %T", parent)
+}
+
+// WrapMethod wraps the given method descriptor, returning a *desc.MethodDescriptor
+// value that represents the same method.
+func WrapMethod(d protoreflect.MethodDescriptor) (*MethodDescriptor, error) {
+	return wrapMethod(d, mapCache{})
+}
+
+func wrapMethod(d protoreflect.MethodDescriptor, cache descriptorCache) (*MethodDescriptor, error) {
+	parent, err := wrapDescriptor(d.Parent(), cache)
+	if err != nil {
+		return nil, err
+	}
+	if p, ok := parent.(*ServiceDescriptor); ok {
+		return p.methods[d.Index()], nil
+	}
+	return nil, fmt.Errorf("method has unexpected parent type: %T", parent)
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/binary.go b/vendor/github.com/jhump/protoreflect/dynamic/binary.go
index 91fd672..39e077a 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/binary.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/binary.go
@@ -4,9 +4,11 @@
 
 import (
 	"fmt"
-	"github.com/golang/protobuf/proto"
-	"github.com/jhump/protoreflect/codec"
 	"io"
+
+	"github.com/golang/protobuf/proto"
+
+	"github.com/jhump/protoreflect/codec"
 )
 
 // defaultDeterminism, if true, will mean that calls to Marshal will produce
@@ -71,6 +73,9 @@
 }
 
 func (m *Message) marshal(b *codec.Buffer) error {
+	if m.GetMessageDescriptor().GetMessageOptions().GetMessageSetWireFormat() {
+		return fmt.Errorf("%s is a message set; marshaling message sets is not implemented", m.GetMessageDescriptor().GetFullyQualifiedName())
+	}
 	if err := m.marshalKnownFields(b); err != nil {
 		return err
 	}
@@ -150,6 +155,9 @@
 }
 
 func (m *Message) unmarshal(buf *codec.Buffer, isGroup bool) error {
+	if m.GetMessageDescriptor().GetMessageOptions().GetMessageSetWireFormat() {
+		return fmt.Errorf("%s is a message set; unmarshaling message sets is not implemented", m.GetMessageDescriptor().GetFullyQualifiedName())
+	}
 	for !buf.EOF() {
 		fd, val, err := buf.DecodeFieldValue(m.FindFieldDescriptor, m.mf)
 		if err != nil {
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/doc.go b/vendor/github.com/jhump/protoreflect/dynamic/doc.go
index c329fcd..59b77eb 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/doc.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/doc.go
@@ -15,8 +15,7 @@
 // will be used to create other messages or parse extension fields during
 // de-serialization.
 //
-//
-// Field Types
+// # Field Types
 //
 // The types of values expected by setters and returned by getters are the
 // same as protoc generates for scalar fields. For repeated fields, there are
@@ -72,8 +71,7 @@
 // but if the factory is configured with a KnownTypeRegistry, or if the field's
 // type is a well-known type, it will return a zero value generated message.
 //
-//
-// Unrecognized Fields
+// # Unrecognized Fields
 //
 // Unrecognized fields are preserved by the dynamic message when unmarshaling
 // from the standard binary format. If the message's MessageFactory was
@@ -89,21 +87,20 @@
 // it can even happen for non-extension fields! Here's an example scenario where
 // a non-extension field can initially be unknown and become known:
 //
-//   1. A dynamic message is created with a descriptor, A, and then
-//      de-serialized from a stream of bytes. The stream includes an
-//      unrecognized tag T. The message will include tag T in its unrecognized
-//      field set.
-//   2. Another call site retrieves a newer descriptor, A', which includes a
-//      newly added field with tag T.
-//   3. That other call site then uses a FieldDescriptor to access the value of
-//      the new field. This will cause the dynamic message to parse the bytes
-//      for the unknown tag T and store them as a known field.
-//   4. Subsequent operations for tag T, including setting the field using only
-//      tag number or de-serializing a stream that includes tag T, will operate
-//      as if that tag were part of the original descriptor, A.
+//  1. A dynamic message is created with a descriptor, A, and then
+//     de-serialized from a stream of bytes. The stream includes an
+//     unrecognized tag T. The message will include tag T in its unrecognized
+//     field set.
+//  2. Another call site retrieves a newer descriptor, A', which includes a
+//     newly added field with tag T.
+//  3. That other call site then uses a FieldDescriptor to access the value of
+//     the new field. This will cause the dynamic message to parse the bytes
+//     for the unknown tag T and store them as a known field.
+//  4. Subsequent operations for tag T, including setting the field using only
+//     tag number or de-serializing a stream that includes tag T, will operate
+//     as if that tag were part of the original descriptor, A.
 //
-//
-// Compatibility
+// # Compatibility
 //
 // In addition to implementing the proto.Message interface, the included
 // Message type also provides an XXX_MessageName() method, so it can work with
@@ -145,8 +142,7 @@
 // about fields that the dynamic message does not, these unrecognized fields may
 // become known fields in the generated message.
 //
-//
-// Registries
+// # Registries
 //
 // This package also contains a couple of registries, for managing known types
 // and descriptors.
@@ -160,4 +156,12 @@
 //
 // The ExtensionRegistry allows for recognizing and parsing extensions fields
 // (for proto2 messages).
+//
+// Deprecated: This module was created for use with the older "v1" Protobuf API
+// in github.com/golang/protobuf. However, much of this module is no longer
+// necessary as the newer "v2" API in google.golang.org/protobuf provides similar
+// capabilities. Instead of using this github.com/jhump/protoreflect/dynamic package,
+// see [google.golang.org/protobuf/types/dynamicpb].
+//
+// [google.golang.org/protobuf/types/dynamicpb]: https://pkg.go.dev/google.golang.org/protobuf/types/dynamicpb
 package dynamic
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
index de13b92..ff136b0 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
@@ -10,7 +10,9 @@
 	"strings"
 
 	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+	protov2 "google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/types/descriptorpb"
 
 	"github.com/jhump/protoreflect/codec"
 	"github.com/jhump/protoreflect/desc"
@@ -411,19 +413,20 @@
 // The Go type of the returned value, for scalar fields, is the same as protoc
 // would generate for the field (in a non-dynamic message). The table below
 // lists the scalar types and the corresponding Go types.
-//  +-------------------------+-----------+
-//  |       Declared Type     |  Go Type  |
-//  +-------------------------+-----------+
-//  | int32, sint32, sfixed32 | int32     |
-//  | int64, sint64, sfixed64 | int64     |
-//  | uint32, fixed32         | uint32    |
-//  | uint64, fixed64         | uint64    |
-//  | float                   | float32   |
-//  | double                  | double32  |
-//  | bool                    | bool      |
-//  | string                  | string    |
-//  | bytes                   | []byte    |
-//  +-------------------------+-----------+
+//
+//	+-------------------------+-----------+
+//	|       Declared Type     |  Go Type  |
+//	+-------------------------+-----------+
+//	| int32, sint32, sfixed32 | int32     |
+//	| int64, sint64, sfixed64 | int64     |
+//	| uint32, fixed32         | uint32    |
+//	| uint64, fixed64         | uint64    |
+//	| float                   | float32   |
+//	| double                  | double32  |
+//	| bool                    | bool      |
+//	| string                  | string    |
+//	| bytes                   | []byte    |
+//	+-------------------------+-----------+
 //
 // Values for enum fields will always be int32 values. You can use the enum
 // descriptor associated with the field to lookup value names with those values.
@@ -1883,42 +1886,42 @@
 	}
 
 	switch t {
-	case descriptor.FieldDescriptorProto_TYPE_SFIXED32,
-		descriptor.FieldDescriptorProto_TYPE_INT32,
-		descriptor.FieldDescriptorProto_TYPE_SINT32,
-		descriptor.FieldDescriptorProto_TYPE_ENUM:
+	case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32,
+		descriptorpb.FieldDescriptorProto_TYPE_INT32,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+		descriptorpb.FieldDescriptorProto_TYPE_ENUM:
 		return toInt32(reflect.Indirect(val), fd)
 
-	case descriptor.FieldDescriptorProto_TYPE_SFIXED64,
-		descriptor.FieldDescriptorProto_TYPE_INT64,
-		descriptor.FieldDescriptorProto_TYPE_SINT64:
+	case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64,
+		descriptorpb.FieldDescriptorProto_TYPE_INT64,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT64:
 		return toInt64(reflect.Indirect(val), fd)
 
-	case descriptor.FieldDescriptorProto_TYPE_FIXED32,
-		descriptor.FieldDescriptorProto_TYPE_UINT32:
+	case descriptorpb.FieldDescriptorProto_TYPE_FIXED32,
+		descriptorpb.FieldDescriptorProto_TYPE_UINT32:
 		return toUint32(reflect.Indirect(val), fd)
 
-	case descriptor.FieldDescriptorProto_TYPE_FIXED64,
-		descriptor.FieldDescriptorProto_TYPE_UINT64:
+	case descriptorpb.FieldDescriptorProto_TYPE_FIXED64,
+		descriptorpb.FieldDescriptorProto_TYPE_UINT64:
 		return toUint64(reflect.Indirect(val), fd)
 
-	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+	case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
 		return toFloat32(reflect.Indirect(val), fd)
 
-	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+	case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
 		return toFloat64(reflect.Indirect(val), fd)
 
-	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+	case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
 		return toBool(reflect.Indirect(val), fd)
 
-	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+	case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
 		return toBytes(reflect.Indirect(val), fd)
 
-	case descriptor.FieldDescriptorProto_TYPE_STRING:
+	case descriptorpb.FieldDescriptorProto_TYPE_STRING:
 		return toString(reflect.Indirect(val), fd)
 
-	case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
-		descriptor.FieldDescriptorProto_TYPE_GROUP:
+	case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE,
+		descriptorpb.FieldDescriptorProto_TYPE_GROUP:
 		m, err := asMessage(val, fd.GetFullyQualifiedName())
 		// check that message is correct type
 		if err != nil {
@@ -2053,8 +2056,9 @@
 
 // ConvertTo converts this dynamic message into the given message. This is
 // shorthand for resetting then merging:
-//   target.Reset()
-//   m.MergeInto(target)
+//
+//	target.Reset()
+//	m.MergeInto(target)
 func (m *Message) ConvertTo(target proto.Message) error {
 	if err := m.checkType(target); err != nil {
 		return err
@@ -2081,8 +2085,9 @@
 
 // ConvertFrom converts the given message into this dynamic message. This is
 // shorthand for resetting then merging:
-//   m.Reset()
-//   m.MergeFrom(target)
+//
+//	m.Reset()
+//	m.MergeFrom(target)
 func (m *Message) ConvertFrom(target proto.Message) error {
 	if err := m.checkType(target); err != nil {
 		return err
@@ -2553,6 +2558,66 @@
 		}
 	}
 
+	// With API v2, it is possible that the new protoreflect interfaces
+	// were used to store an extension, which means it can't be returned
+	// by proto.ExtensionDescs and it's also not in the unrecognized data.
+	// So we have a separate loop to trawl through it...
+	var err error
+	proto.MessageReflect(pm).Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+		if !fld.IsExtension() {
+			// normal field... we already got it above
+			return true
+		}
+		xt := fld.(protoreflect.ExtensionTypeDescriptor)
+		if _, ok := xt.Type().(*proto.ExtensionDesc); ok {
+			// known extension... we already got it above
+			return true
+		}
+		var fd *desc.FieldDescriptor
+		fd, err = desc.WrapField(fld)
+		if err != nil {
+			return false
+		}
+		v := convertProtoReflectValue(val)
+		if v, err = validFieldValue(fd, v); err != nil {
+			return false
+		}
+		values[fd] = v
+		return true
+	})
+	if err != nil {
+		return err
+	}
+
+	// unrecognized extensions fields:
+	//   In API v2 of proto, some extensions may NEITHER be included in ExtensionDescs
+	//   above NOR included in unrecognized fields below. These are extensions that use
+	//   a custom extension type (not a generated one -- i.e. not a linked in extension).
+	mr := proto.MessageReflect(pm)
+	var extBytes []byte
+	var retErr error
+	mr.Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+		if !fld.IsExtension() {
+			// normal field, already processed above
+			return true
+		}
+		if extd, ok := fld.(protoreflect.ExtensionTypeDescriptor); ok {
+			if _, ok := extd.Type().(*proto.ExtensionDesc); ok {
+				// normal known extension, already processed above
+				return true
+			}
+		}
+
+		// marshal the extension to bytes and then handle as unknown field below
+		mr.New()
+		mr.Set(fld, val)
+		extBytes, retErr = protov2.MarshalOptions{}.MarshalAppend(extBytes, mr.Interface())
+		return retErr == nil
+	})
+	if retErr != nil {
+		return retErr
+	}
+
 	// now actually perform the merge
 	for fd, v := range values {
 		if err := mergeField(m, fd, v); err != nil {
@@ -2560,6 +2625,12 @@
 		}
 	}
 
+	if len(extBytes) > 0 {
+		// treating unrecognized extensions like unknown fields: best-effort
+		// ignore any error returned: pulling in unknown fields is best-effort
+		_ = m.UnmarshalMerge(extBytes)
+	}
+
 	data := internal.GetUnrecognized(pm)
 	if len(data) > 0 {
 		// ignore any error returned: pulling in unknown fields is best-effort
@@ -2569,6 +2640,31 @@
 	return nil
 }
 
+func convertProtoReflectValue(v protoreflect.Value) interface{} {
+	val := v.Interface()
+	switch val := val.(type) {
+	case protoreflect.Message:
+		return val.Interface()
+	case protoreflect.Map:
+		mp := make(map[interface{}]interface{}, val.Len())
+		val.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+			mp[convertProtoReflectValue(k.Value())] = convertProtoReflectValue(v)
+			return true
+		})
+		return mp
+	case protoreflect.List:
+		sl := make([]interface{}, val.Len())
+		for i := 0; i < val.Len(); i++ {
+			sl[i] = convertProtoReflectValue(val.Get(i))
+		}
+		return sl
+	case protoreflect.EnumNumber:
+		return int32(val)
+	default:
+		return val
+	}
+}
+
 // Validate checks that all required fields are present. It returns an error if any are absent.
 func (m *Message) Validate() error {
 	missingFields := m.findMissingFields()
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go
index 1eaedfa..6fca393 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go
@@ -4,11 +4,11 @@
 package grpcdynamic
 
 import (
+	"context"
 	"fmt"
 	"io"
 
 	"github.com/golang/protobuf/proto"
-	"golang.org/x/net/context"
 	"google.golang.org/grpc"
 	"google.golang.org/grpc/metadata"
 
@@ -27,12 +27,7 @@
 // type used to construct Stubs. But the use of this interface allows
 // construction of stubs that use alternate concrete types as the transport for
 // RPC operations.
-type Channel interface {
-	Invoke(ctx context.Context, method string, args, reply interface{}, opts ...grpc.CallOption) error
-	NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error)
-}
-
-var _ Channel = (*grpc.ClientConn)(nil)
+type Channel = grpc.ClientConnInterface
 
 // NewStub creates a new RPC stub that uses the given channel for dispatching RPCs.
 func NewStub(channel Channel) Stub {
@@ -79,6 +74,7 @@
 		ClientStreams: method.IsClientStreaming(),
 	}
 	if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+		cancel()
 		return nil, err
 	} else {
 		err = cs.SendMsg(request)
@@ -91,6 +87,11 @@
 			cancel()
 			return nil, err
 		}
+		go func() {
+			// when the new stream is finished, also cleanup the parent context
+			<-cs.Context().Done()
+			cancel()
+		}()
 		return &ServerStream{cs, method.GetOutputType(), s.mf}, nil
 	}
 }
@@ -108,8 +109,14 @@
 		ClientStreams: method.IsClientStreaming(),
 	}
 	if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+		cancel()
 		return nil, err
 	} else {
+		go func() {
+			// when the new stream is finished, also cleanup the parent context
+			<-cs.Context().Done()
+			cancel()
+		}()
 		return &ClientStream{cs, method, s.mf, cancel}, nil
 	}
 }
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/json.go b/vendor/github.com/jhump/protoreflect/dynamic/json.go
index 02c8298..9081965 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/json.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/json.go
@@ -17,14 +17,14 @@
 
 	"github.com/golang/protobuf/jsonpb"
 	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/protobuf/types/descriptorpb"
 	// link in the well-known-types that have a special JSON format
-	_ "github.com/golang/protobuf/ptypes/any"
-	_ "github.com/golang/protobuf/ptypes/duration"
-	_ "github.com/golang/protobuf/ptypes/empty"
-	_ "github.com/golang/protobuf/ptypes/struct"
-	_ "github.com/golang/protobuf/ptypes/timestamp"
-	_ "github.com/golang/protobuf/ptypes/wrappers"
+	_ "google.golang.org/protobuf/types/known/anypb"
+	_ "google.golang.org/protobuf/types/known/durationpb"
+	_ "google.golang.org/protobuf/types/known/emptypb"
+	_ "google.golang.org/protobuf/types/known/structpb"
+	_ "google.golang.org/protobuf/types/known/timestamppb"
+	_ "google.golang.org/protobuf/types/known/wrapperspb"
 
 	"github.com/jhump/protoreflect/desc"
 )
@@ -60,7 +60,7 @@
 // This method is convenient shorthand for invoking MarshalJSONPB with a default
 // (zero value) marshaler:
 //
-//    m.MarshalJSONPB(&jsonpb.Marshaler{})
+//	m.MarshalJSONPB(&jsonpb.Marshaler{})
 //
 // So enums are serialized using enum value name strings, and values that are
 // not present (including those with default/zero value for messages defined in
@@ -80,7 +80,7 @@
 // This method is convenient shorthand for invoking MarshalJSONPB with a default
 // (zero value) marshaler:
 //
-//    m.MarshalJSONPB(&jsonpb.Marshaler{Indent: "  "})
+//	m.MarshalJSONPB(&jsonpb.Marshaler{Indent: "  "})
 //
 // So enums are serialized using enum value name strings, and values that are
 // not present (including those with default/zero value for messages defined in
@@ -497,7 +497,7 @@
 // This method is shorthand for invoking UnmarshalJSONPB with a default (zero
 // value) unmarshaler:
 //
-//    m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js)
+//	m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js)
 //
 // So unknown fields will result in an error, and no provided jsonpb.AnyResolver
 // will be used when parsing google.protobuf.Any messages.
@@ -669,13 +669,13 @@
 }
 
 func isWellKnownValue(fd *desc.FieldDescriptor) bool {
-	return !fd.IsRepeated() && fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE &&
+	return !fd.IsRepeated() && fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE &&
 		fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.Value"
 }
 
 func isWellKnownListValue(fd *desc.FieldDescriptor) bool {
 	// we look for ListValue; but we also look for Value, which can be assigned a ListValue
-	return !fd.IsRepeated() && fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE &&
+	return !fd.IsRepeated() && fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE &&
 		(fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.ListValue" ||
 			fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.Value")
 }
@@ -794,8 +794,8 @@
 	}
 
 	switch fd.GetType() {
-	case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
-		descriptor.FieldDescriptorProto_TYPE_GROUP:
+	case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE,
+		descriptorpb.FieldDescriptorProto_TYPE_GROUP:
 
 		if t == nil && allowNilMessage {
 			// if json is simply "null" return a nil pointer
@@ -822,7 +822,7 @@
 		}
 		return m, nil
 
-	case descriptor.FieldDescriptorProto_TYPE_ENUM:
+	case descriptorpb.FieldDescriptorProto_TYPE_ENUM:
 		if e, err := r.nextNumber(); err != nil {
 			return nil, err
 		} else {
@@ -842,9 +842,9 @@
 			}
 		}
 
-	case descriptor.FieldDescriptorProto_TYPE_INT32,
-		descriptor.FieldDescriptorProto_TYPE_SINT32,
-		descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+	case descriptorpb.FieldDescriptorProto_TYPE_INT32,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+		descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
 		if i, err := r.nextInt(); err != nil {
 			return nil, err
 		} else if i > math.MaxInt32 || i < math.MinInt32 {
@@ -853,13 +853,13 @@
 			return int32(i), err
 		}
 
-	case descriptor.FieldDescriptorProto_TYPE_INT64,
-		descriptor.FieldDescriptorProto_TYPE_SINT64,
-		descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+	case descriptorpb.FieldDescriptorProto_TYPE_INT64,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT64,
+		descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
 		return r.nextInt()
 
-	case descriptor.FieldDescriptorProto_TYPE_UINT32,
-		descriptor.FieldDescriptorProto_TYPE_FIXED32:
+	case descriptorpb.FieldDescriptorProto_TYPE_UINT32,
+		descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
 		if i, err := r.nextUint(); err != nil {
 			return nil, err
 		} else if i > math.MaxUint32 {
@@ -868,11 +868,11 @@
 			return uint32(i), err
 		}
 
-	case descriptor.FieldDescriptorProto_TYPE_UINT64,
-		descriptor.FieldDescriptorProto_TYPE_FIXED64:
+	case descriptorpb.FieldDescriptorProto_TYPE_UINT64,
+		descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
 		return r.nextUint()
 
-	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+	case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
 		if str, ok := t.(string); ok {
 			if str == "true" {
 				r.poll() // consume token
@@ -884,20 +884,20 @@
 		}
 		return r.nextBool()
 
-	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+	case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
 		if f, err := r.nextFloat(); err != nil {
 			return nil, err
 		} else {
 			return float32(f), nil
 		}
 
-	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+	case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
 		return r.nextFloat()
 
-	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+	case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
 		return r.nextBytes()
 
-	case descriptor.FieldDescriptorProto_TYPE_STRING:
+	case descriptorpb.FieldDescriptorProto_TYPE_STRING:
 		return r.nextString()
 
 	default:
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
index 03162a4..69969fc 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
@@ -4,8 +4,9 @@
 package dynamic
 
 import (
-	"github.com/jhump/protoreflect/desc"
 	"reflect"
+
+	"github.com/jhump/protoreflect/desc"
 )
 
 // Pre-Go-1.12, we must use reflect.Value.MapKeys to reflectively
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
index ef1b370..fb353cf 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
@@ -4,8 +4,9 @@
 package dynamic
 
 import (
-	"github.com/jhump/protoreflect/desc"
 	"reflect"
+
+	"github.com/jhump/protoreflect/desc"
 )
 
 // With Go 1.12 and above, we can use reflect.Value.MapRange to iterate
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
index 9ab8e61..683e7b3 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
@@ -37,9 +37,10 @@
 // (those for which protoc-generated code is statically linked into the Go program) are
 // known types. If any dynamic messages are produced, they will recognize and parse all
 // "default" extension fields. This is the equivalent of:
-//   NewMessageFactoryWithRegistries(
-//       NewExtensionRegistryWithDefaults(),
-//       NewKnownTypeRegistryWithDefaults())
+//
+//	NewMessageFactoryWithRegistries(
+//	    NewExtensionRegistryWithDefaults(),
+//	    NewKnownTypeRegistryWithDefaults())
 func NewMessageFactoryWithDefaults() *MessageFactory {
 	return NewMessageFactoryWithRegistries(NewExtensionRegistryWithDefaults(), NewKnownTypeRegistryWithDefaults())
 }
@@ -174,28 +175,33 @@
 // GetKnownType will return the reflect.Type for the given message name if it is
 // known. If it is not known, nil is returned.
 func (r *KnownTypeRegistry) GetKnownType(messageName string) reflect.Type {
-	var msgType reflect.Type
 	if r == nil {
 		// a nil registry behaves the same as zero value instance: only know of well-known types
 		t := proto.MessageType(messageName)
 		if t != nil && isWellKnownType(t) {
-			msgType = t
+			return t
 		}
-	} else {
-		if r.includeDefault {
-			msgType = proto.MessageType(messageName)
-		} else if !r.excludeWkt {
-			t := proto.MessageType(messageName)
-			if t != nil && isWellKnownType(t) {
-				msgType = t
-			}
+		return nil
+	}
+
+	if r.includeDefault {
+		t := proto.MessageType(messageName)
+		if t != nil && isMessage(t) {
+			return t
 		}
-		if msgType == nil {
-			r.mu.RLock()
-			msgType = r.types[messageName]
-			r.mu.RUnlock()
+	} else if !r.excludeWkt {
+		t := proto.MessageType(messageName)
+		if t != nil && isWellKnownType(t) {
+			return t
 		}
 	}
 
-	return msgType
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	return r.types[messageName]
+}
+
+func isMessage(t reflect.Type) bool {
+	_, ok := reflect.Zero(t).Interface().(proto.Message)
+	return ok
 }
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/text.go b/vendor/github.com/jhump/protoreflect/dynamic/text.go
index 5784d3e..5680dc2 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/text.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/text.go
@@ -15,7 +15,7 @@
 	"unicode"
 
 	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/protobuf/types/descriptorpb"
 
 	"github.com/jhump/protoreflect/codec"
 	"github.com/jhump/protoreflect/desc"
@@ -208,7 +208,7 @@
 }
 
 func marshalKnownFieldText(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}) error {
-	group := fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP
+	group := fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP
 	if group {
 		var name string
 		if fd.IsExtension() {
@@ -518,7 +518,7 @@
 			if fd == nil {
 				// See if it's a group name
 				for _, field := range m.md.GetFields() {
-					if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetMessageType().GetName() == fieldName {
+					if field.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP && field.GetMessageType().GetName() == fieldName {
 						fd = field
 						break
 					}
@@ -581,8 +581,8 @@
 				return err
 			}
 
-		} else if (fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP ||
-			fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE) &&
+		} else if (fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP ||
+			fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE) &&
 			tok.tokTyp.EndToken() != tokenError {
 
 			// TODO: use mf.NewMessage and, if not a dynamic message, use proto.UnmarshalText to unmarshal it
@@ -689,7 +689,7 @@
 
 	var expected string
 	switch fd.GetType() {
-	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+	case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
 		if tok.tokTyp == tokenIdent {
 			if tok.val.(string) == "true" {
 				return set(m, fd, true)
@@ -698,17 +698,17 @@
 			}
 		}
 		expected = "boolean value"
-	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+	case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
 		if tok.tokTyp == tokenString {
 			return set(m, fd, []byte(tok.val.(string)))
 		}
 		expected = "bytes string value"
-	case descriptor.FieldDescriptorProto_TYPE_STRING:
+	case descriptorpb.FieldDescriptorProto_TYPE_STRING:
 		if tok.tokTyp == tokenString {
 			return set(m, fd, tok.val)
 		}
 		expected = "string value"
-	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+	case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
 		switch tok.tokTyp {
 		case tokenFloat:
 			return set(m, fd, float32(tok.val.(float64)))
@@ -736,7 +736,7 @@
 			}
 		}
 		expected = "float value"
-	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+	case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
 		switch tok.tokTyp {
 		case tokenFloat:
 			return set(m, fd, tok.val)
@@ -764,9 +764,9 @@
 			}
 		}
 		expected = "float value"
-	case descriptor.FieldDescriptorProto_TYPE_INT32,
-		descriptor.FieldDescriptorProto_TYPE_SINT32,
-		descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+	case descriptorpb.FieldDescriptorProto_TYPE_INT32,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+		descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
 		if tok.tokTyp == tokenInt {
 			if i, err := strconv.ParseInt(tok.val.(string), 10, 32); err != nil {
 				return err
@@ -775,9 +775,9 @@
 			}
 		}
 		expected = "int value"
-	case descriptor.FieldDescriptorProto_TYPE_INT64,
-		descriptor.FieldDescriptorProto_TYPE_SINT64,
-		descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+	case descriptorpb.FieldDescriptorProto_TYPE_INT64,
+		descriptorpb.FieldDescriptorProto_TYPE_SINT64,
+		descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
 		if tok.tokTyp == tokenInt {
 			if i, err := strconv.ParseInt(tok.val.(string), 10, 64); err != nil {
 				return err
@@ -786,8 +786,8 @@
 			}
 		}
 		expected = "int value"
-	case descriptor.FieldDescriptorProto_TYPE_UINT32,
-		descriptor.FieldDescriptorProto_TYPE_FIXED32:
+	case descriptorpb.FieldDescriptorProto_TYPE_UINT32,
+		descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
 		if tok.tokTyp == tokenInt {
 			if i, err := strconv.ParseUint(tok.val.(string), 10, 32); err != nil {
 				return err
@@ -796,8 +796,8 @@
 			}
 		}
 		expected = "unsigned int value"
-	case descriptor.FieldDescriptorProto_TYPE_UINT64,
-		descriptor.FieldDescriptorProto_TYPE_FIXED64:
+	case descriptorpb.FieldDescriptorProto_TYPE_UINT64,
+		descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
 		if tok.tokTyp == tokenInt {
 			if i, err := strconv.ParseUint(tok.val.(string), 10, 64); err != nil {
 				return err
@@ -806,7 +806,7 @@
 			}
 		}
 		expected = "unsigned int value"
-	case descriptor.FieldDescriptorProto_TYPE_ENUM:
+	case descriptorpb.FieldDescriptorProto_TYPE_ENUM:
 		if tok.tokTyp == tokenIdent {
 			// TODO: add a flag to just ignore unrecognized enum value names?
 			vd := fd.GetEnumType().FindValueByName(tok.val.(string))
@@ -821,8 +821,8 @@
 			}
 		}
 		expected = fmt.Sprintf("enum %s value", fd.GetEnumType().GetFullyQualifiedName())
-	case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
-		descriptor.FieldDescriptorProto_TYPE_GROUP:
+	case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE,
+		descriptorpb.FieldDescriptorProto_TYPE_GROUP:
 
 		endTok := tok.tokTyp.EndToken()
 		if endTok != tokenError {
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go b/vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go
new file mode 100644
index 0000000..661b925
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go
@@ -0,0 +1,137 @@
+package grpcreflect
+
+import (
+	refv1 "google.golang.org/grpc/reflection/grpc_reflection_v1"
+	refv1alpha "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
+)
+
+func toV1Request(v1alpha *refv1alpha.ServerReflectionRequest) *refv1.ServerReflectionRequest {
+	var v1 refv1.ServerReflectionRequest
+	v1.Host = v1alpha.Host
+	switch mr := v1alpha.MessageRequest.(type) {
+	case *refv1alpha.ServerReflectionRequest_FileByFilename:
+		v1.MessageRequest = &refv1.ServerReflectionRequest_FileByFilename{
+			FileByFilename: mr.FileByFilename,
+		}
+	case *refv1alpha.ServerReflectionRequest_FileContainingSymbol:
+		v1.MessageRequest = &refv1.ServerReflectionRequest_FileContainingSymbol{
+			FileContainingSymbol: mr.FileContainingSymbol,
+		}
+	case *refv1alpha.ServerReflectionRequest_FileContainingExtension:
+		if mr.FileContainingExtension != nil {
+			v1.MessageRequest = &refv1.ServerReflectionRequest_FileContainingExtension{
+				FileContainingExtension: &refv1.ExtensionRequest{
+					ContainingType:  mr.FileContainingExtension.GetContainingType(),
+					ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(),
+				},
+			}
+		}
+	case *refv1alpha.ServerReflectionRequest_AllExtensionNumbersOfType:
+		v1.MessageRequest = &refv1.ServerReflectionRequest_AllExtensionNumbersOfType{
+			AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType,
+		}
+	case *refv1alpha.ServerReflectionRequest_ListServices:
+		v1.MessageRequest = &refv1.ServerReflectionRequest_ListServices{
+			ListServices: mr.ListServices,
+		}
+	default:
+		// no value set
+	}
+	return &v1
+}
+
+func toV1AlphaRequest(v1 *refv1.ServerReflectionRequest) *refv1alpha.ServerReflectionRequest {
+	var v1alpha refv1alpha.ServerReflectionRequest
+	v1alpha.Host = v1.Host
+	switch mr := v1.MessageRequest.(type) {
+	case *refv1.ServerReflectionRequest_FileByFilename:
+		if mr != nil {
+			v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_FileByFilename{
+				FileByFilename: mr.FileByFilename,
+			}
+		}
+	case *refv1.ServerReflectionRequest_FileContainingSymbol:
+		if mr != nil {
+			v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_FileContainingSymbol{
+				FileContainingSymbol: mr.FileContainingSymbol,
+			}
+		}
+	case *refv1.ServerReflectionRequest_FileContainingExtension:
+		if mr != nil {
+			v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_FileContainingExtension{
+				FileContainingExtension: &refv1alpha.ExtensionRequest{
+					ContainingType:  mr.FileContainingExtension.GetContainingType(),
+					ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(),
+				},
+			}
+		}
+	case *refv1.ServerReflectionRequest_AllExtensionNumbersOfType:
+		if mr != nil {
+			v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_AllExtensionNumbersOfType{
+				AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType,
+			}
+		}
+	case *refv1.ServerReflectionRequest_ListServices:
+		if mr != nil {
+			v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_ListServices{
+				ListServices: mr.ListServices,
+			}
+		}
+	default:
+		// no value set
+	}
+	return &v1alpha
+}
+
+func toV1Response(v1alpha *refv1alpha.ServerReflectionResponse) *refv1.ServerReflectionResponse {
+	var v1 refv1.ServerReflectionResponse
+	v1.ValidHost = v1alpha.ValidHost
+	if v1alpha.OriginalRequest != nil {
+		v1.OriginalRequest = toV1Request(v1alpha.OriginalRequest)
+	}
+	switch mr := v1alpha.MessageResponse.(type) {
+	case *refv1alpha.ServerReflectionResponse_FileDescriptorResponse:
+		if mr != nil {
+			v1.MessageResponse = &refv1.ServerReflectionResponse_FileDescriptorResponse{
+				FileDescriptorResponse: &refv1.FileDescriptorResponse{
+					FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(),
+				},
+			}
+		}
+	case *refv1alpha.ServerReflectionResponse_AllExtensionNumbersResponse:
+		if mr != nil {
+			v1.MessageResponse = &refv1.ServerReflectionResponse_AllExtensionNumbersResponse{
+				AllExtensionNumbersResponse: &refv1.ExtensionNumberResponse{
+					BaseTypeName:    mr.AllExtensionNumbersResponse.GetBaseTypeName(),
+					ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(),
+				},
+			}
+		}
+	case *refv1alpha.ServerReflectionResponse_ListServicesResponse:
+		if mr != nil {
+			svcs := make([]*refv1.ServiceResponse, len(mr.ListServicesResponse.GetService()))
+			for i, svc := range mr.ListServicesResponse.GetService() {
+				svcs[i] = &refv1.ServiceResponse{
+					Name: svc.GetName(),
+				}
+			}
+			v1.MessageResponse = &refv1.ServerReflectionResponse_ListServicesResponse{
+				ListServicesResponse: &refv1.ListServiceResponse{
+					Service: svcs,
+				},
+			}
+		}
+	case *refv1alpha.ServerReflectionResponse_ErrorResponse:
+		if mr != nil {
+			v1.MessageResponse = &refv1.ServerReflectionResponse_ErrorResponse{
+				ErrorResponse: &refv1.ErrorResponse{
+					ErrorCode:    mr.ErrorResponse.GetErrorCode(),
+					ErrorMessage: mr.ErrorResponse.GetErrorMessage(),
+				},
+			}
+		}
+	default:
+		// no value set
+	}
+	return &v1
+}
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/client.go b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go
index 3fca3eb..1a35540 100644
--- a/vendor/github.com/jhump/protoreflect/grpcreflect/client.go
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go
@@ -2,23 +2,39 @@
 
 import (
 	"bytes"
+	"context"
 	"fmt"
 	"io"
 	"reflect"
 	"runtime"
+	"sort"
 	"sync"
+	"sync/atomic"
+	"time"
 
 	"github.com/golang/protobuf/proto"
-	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
-	"golang.org/x/net/context"
+	"google.golang.org/grpc"
 	"google.golang.org/grpc/codes"
-	rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
+	refv1 "google.golang.org/grpc/reflection/grpc_reflection_v1"
+	refv1alpha "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
 	"google.golang.org/grpc/status"
+	"google.golang.org/protobuf/reflect/protodesc"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/types/descriptorpb"
 
 	"github.com/jhump/protoreflect/desc"
 	"github.com/jhump/protoreflect/internal"
 )
 
+// If we try the v1 reflection API and get back "not implemented", we'll wait
+// this long before trying v1 again. This allows a long-lived client to
+// dynamically switch from v1alpha to v1 if the underlying server is updated
+// to support it. But it also prevents every stream request from always trying
+// v1 first: if we try it and see it fail, we shouldn't continually retry it
+// if we expect it will fail again.
+const durationBetweenV1Attempts = time.Hour
+
 // elementNotFoundError is the error returned by reflective operations where the
 // server does not recognize a given file name, symbol name, or extension.
 type elementNotFoundError struct {
@@ -51,14 +67,31 @@
 )
 
 func symbolNotFound(symbol string, symType symbolType, cause *elementNotFoundError) error {
+	if cause != nil && cause.kind == elementKindSymbol && cause.name == symbol {
+		// no need to wrap
+		if symType != symbolTypeUnknown && cause.symType == symbolTypeUnknown {
+			// We previously didn't know symbol type but now do?
+			// Create a new error that has the right symbol type.
+			return &elementNotFoundError{name: symbol, symType: symType, kind: elementKindSymbol}
+		}
+		return cause
+	}
 	return &elementNotFoundError{name: symbol, symType: symType, kind: elementKindSymbol, cause: cause}
 }
 
 func extensionNotFound(extendee string, tag int32, cause *elementNotFoundError) error {
+	if cause != nil && cause.kind == elementKindExtension && cause.name == extendee && cause.tag == tag {
+		// no need to wrap
+		return cause
+	}
 	return &elementNotFoundError{name: extendee, tag: tag, kind: elementKindExtension, cause: cause}
 }
 
 func fileNotFound(file string, cause *elementNotFoundError) error {
+	if cause != nil && cause.kind == elementKindFile && cause.name == file {
+		// no need to wrap
+		return cause
+	}
 	return &elementNotFoundError{name: file, kind: elementKindFile, cause: cause}
 }
 
@@ -69,15 +102,15 @@
 		if first {
 			first = false
 		} else {
-			fmt.Fprint(&b, "\ncaused by: ")
+			_, _ = fmt.Fprint(&b, "\ncaused by: ")
 		}
 		switch e.kind {
 		case elementKindSymbol:
-			fmt.Fprintf(&b, "%s not found: %s", e.symType, e.name)
+			_, _ = fmt.Fprintf(&b, "%s not found: %s", e.symType, e.name)
 		case elementKindExtension:
-			fmt.Fprintf(&b, "Extension not found: tag %d for %s", e.tag, e.name)
+			_, _ = fmt.Fprintf(&b, "Extension not found: tag %d for %s", e.tag, e.name)
 		default:
-			fmt.Fprintf(&b, "File not found: %s", e.name)
+			_, _ = fmt.Fprintf(&b, "File not found: %s", e.name)
 		}
 	}
 	return b.String()
@@ -105,79 +138,186 @@
 	extensionNumber     int32
 }
 
+type resolvers struct {
+	descriptorResolver protodesc.Resolver
+	extensionResolver  protoregistry.ExtensionTypeResolver
+}
+
+type fileEntry struct {
+	fd       *desc.FileDescriptor
+	fallback bool
+}
+
 // Client is a client connection to a server for performing reflection calls
 // and resolving remote symbols.
 type Client struct {
-	ctx  context.Context
-	stub rpb.ServerReflectionClient
+	ctx              context.Context
+	now              func() time.Time
+	stubV1           refv1.ServerReflectionClient
+	stubV1Alpha      refv1alpha.ServerReflectionClient
+	allowMissing     atomic.Bool
+	fallbackResolver atomic.Pointer[resolvers]
 
-	connMu sync.Mutex
-	cancel context.CancelFunc
-	stream rpb.ServerReflection_ServerReflectionInfoClient
+	connMu      sync.Mutex
+	cancel      context.CancelFunc
+	stream      refv1.ServerReflection_ServerReflectionInfoClient
+	useV1Alpha  bool
+	lastTriedV1 time.Time
 
 	cacheMu          sync.RWMutex
-	protosByName     map[string]*dpb.FileDescriptorProto
-	filesByName      map[string]*desc.FileDescriptor
-	filesBySymbol    map[string]*desc.FileDescriptor
-	filesByExtension map[extDesc]*desc.FileDescriptor
+	protosByName     map[string]*descriptorpb.FileDescriptorProto
+	filesByName      map[string]fileEntry
+	filesBySymbol    map[string]fileEntry
+	filesByExtension map[extDesc]fileEntry
 }
 
 // NewClient creates a new Client with the given root context and using the
 // given RPC stub for talking to the server.
-func NewClient(ctx context.Context, stub rpb.ServerReflectionClient) *Client {
+//
+// Deprecated: Use NewClientV1Alpha if you are intentionally pinning the
+// v1alpha version of the reflection service. Otherwise, use NewClientAuto
+// instead.
+func NewClient(ctx context.Context, stub refv1alpha.ServerReflectionClient) *Client {
+	return NewClientV1Alpha(ctx, stub)
+}
+
+// NewClientV1Alpha creates a new Client using the v1alpha version of reflection
+// with the given root context and using the given RPC stub for talking to the
+// server.
+func NewClientV1Alpha(ctx context.Context, stub refv1alpha.ServerReflectionClient) *Client {
+	return newClient(ctx, nil, stub)
+}
+
+// NewClientV1 creates a new Client using the v1 version of reflection with the
+// given root context and using the given RPC stub for talking to the server.
+func NewClientV1(ctx context.Context, stub refv1.ServerReflectionClient) *Client {
+	return newClient(ctx, stub, nil)
+}
+
+func newClient(ctx context.Context, stubv1 refv1.ServerReflectionClient, stubv1alpha refv1alpha.ServerReflectionClient) *Client {
 	cr := &Client{
 		ctx:              ctx,
-		stub:             stub,
-		protosByName:     map[string]*dpb.FileDescriptorProto{},
-		filesByName:      map[string]*desc.FileDescriptor{},
-		filesBySymbol:    map[string]*desc.FileDescriptor{},
-		filesByExtension: map[extDesc]*desc.FileDescriptor{},
+		now:              time.Now,
+		stubV1:           stubv1,
+		stubV1Alpha:      stubv1alpha,
+		protosByName:     map[string]*descriptorpb.FileDescriptorProto{},
+		filesByName:      map[string]fileEntry{},
+		filesBySymbol:    map[string]fileEntry{},
+		filesByExtension: map[extDesc]fileEntry{},
 	}
 	// don't leak a grpc stream
 	runtime.SetFinalizer(cr, (*Client).Reset)
 	return cr
 }
 
+// NewClientAuto creates a new Client that will use either v1 or v1alpha version
+// of reflection (based on what the server supports) with the given root context
+// and using the given client connection.
+//
+// It will first the v1 version of the reflection service. If it gets back an
+// "Unimplemented" error, it will fall back to using the v1alpha version. It
+// will remember which version the server supports for any subsequent operations
+// that need to re-invoke the streaming RPC. But, if it's a very long-lived
+// client, it will periodically retry the v1 version (in case the server is
+// updated to support it also). The period for these retries is every hour.
+func NewClientAuto(ctx context.Context, cc grpc.ClientConnInterface) *Client {
+	stubv1 := refv1.NewServerReflectionClient(cc)
+	stubv1alpha := refv1alpha.NewServerReflectionClient(cc)
+	return newClient(ctx, stubv1, stubv1alpha)
+}
+
+// AllowMissingFileDescriptors configures the client to allow missing files
+// when building descriptors when possible. Missing files are often fatal
+// errors, but with this option they can sometimes be worked around. Building
+// a schema can only succeed with some files missing if the files in question
+// only provide custom options and/or other unused types.
+func (cr *Client) AllowMissingFileDescriptors() {
+	cr.allowMissing.Store(true)
+}
+
+// AllowFallbackResolver configures the client to allow falling back to the
+// given resolvers if the server is unable to supply descriptors for a particular
+// query. This allows working around issues where servers' reflection service
+// provides an incomplete set of descriptors, but the client has knowledge of
+// the missing descriptors from another source. It is usually most appropriate
+// to pass [protoregistry.GlobalFiles] and [protoregistry.GlobalTypes] as the
+// resolver values.
+//
+// The first value is used as a fallback for FileByFilename and FileContainingSymbol
+// queries. The second value is used as a fallback for FileContainingExtension. It
+// can also be used as a fallback for AllExtensionNumbersForType if it provides
+// a method with the following signature (which *[protoregistry.Types] provides):
+//
+//	RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
+func (cr *Client) AllowFallbackResolver(descriptors protodesc.Resolver, exts protoregistry.ExtensionTypeResolver) {
+	if descriptors == nil && exts == nil {
+		cr.fallbackResolver.Store(nil)
+	} else {
+		cr.fallbackResolver.Store(&resolvers{
+			descriptorResolver: descriptors,
+			extensionResolver:  exts,
+		})
+	}
+}
+
 // FileByFilename asks the server for a file descriptor for the proto file with
 // the given name.
 func (cr *Client) FileByFilename(filename string) (*desc.FileDescriptor, error) {
 	// hit the cache first
 	cr.cacheMu.RLock()
-	if fd, ok := cr.filesByName[filename]; ok {
+	if entry, ok := cr.filesByName[filename]; ok {
 		cr.cacheMu.RUnlock()
-		return fd, nil
+		return entry.fd, nil
 	}
+	// not there? see if we've downloaded the proto
 	fdp, ok := cr.protosByName[filename]
 	cr.cacheMu.RUnlock()
-	// not there? see if we've downloaded the proto
 	if ok {
 		return cr.descriptorFromProto(fdp)
 	}
 
-	req := &rpb.ServerReflectionRequest{
-		MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{
+	req := &refv1.ServerReflectionRequest{
+		MessageRequest: &refv1.ServerReflectionRequest_FileByFilename{
 			FileByFilename: filename,
 		},
 	}
-	fd, err := cr.getAndCacheFileDescriptors(req, filename, "")
+	accept := func(fd *desc.FileDescriptor) bool {
+		return fd.GetName() == filename
+	}
+
+	fd, err := cr.getAndCacheFileDescriptors(req, filename, "", accept)
 	if isNotFound(err) {
-		// file not found? see if we can look up via alternate name
+		// File not found? see if we can look up via alternate name
 		if alternate, ok := internal.StdFileAliases[filename]; ok {
-			req := &rpb.ServerReflectionRequest{
-				MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{
+			req := &refv1.ServerReflectionRequest{
+				MessageRequest: &refv1.ServerReflectionRequest_FileByFilename{
 					FileByFilename: alternate,
 				},
 			}
-			fd, err = cr.getAndCacheFileDescriptors(req, alternate, filename)
-			if isNotFound(err) {
-				err = fileNotFound(filename, nil)
-			}
-		} else {
-			err = fileNotFound(filename, nil)
+			fd, err = cr.getAndCacheFileDescriptors(req, alternate, filename, accept)
 		}
+	}
+	if isNotFound(err) {
+		// Still no? See if we can use a fallback resolver
+		resolver := cr.fallbackResolver.Load()
+		if resolver != nil && resolver.descriptorResolver != nil {
+			fileDesc, fallbackErr := resolver.descriptorResolver.FindFileByPath(filename)
+			if fallbackErr == nil {
+				var wrapErr error
+				fd, wrapErr = desc.WrapFile(fileDesc)
+				if wrapErr == nil {
+					fd = cr.cacheFile(fd, true)
+					err = nil // clear error since we've succeeded via the fallback
+				}
+			}
+		}
+	}
+	if isNotFound(err) {
+		err = fileNotFound(filename, nil)
 	} else if e, ok := err.(*elementNotFoundError); ok {
 		err = fileNotFound(filename, e)
 	}
+
 	return fd, err
 }
 
@@ -186,18 +326,36 @@
 func (cr *Client) FileContainingSymbol(symbol string) (*desc.FileDescriptor, error) {
 	// hit the cache first
 	cr.cacheMu.RLock()
-	fd, ok := cr.filesBySymbol[symbol]
+	entry, ok := cr.filesBySymbol[symbol]
 	cr.cacheMu.RUnlock()
 	if ok {
-		return fd, nil
+		return entry.fd, nil
 	}
 
-	req := &rpb.ServerReflectionRequest{
-		MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{
+	req := &refv1.ServerReflectionRequest{
+		MessageRequest: &refv1.ServerReflectionRequest_FileContainingSymbol{
 			FileContainingSymbol: symbol,
 		},
 	}
-	fd, err := cr.getAndCacheFileDescriptors(req, "", "")
+	accept := func(fd *desc.FileDescriptor) bool {
+		return fd.FindSymbol(symbol) != nil
+	}
+	fd, err := cr.getAndCacheFileDescriptors(req, "", "", accept)
+	if isNotFound(err) {
+		// Symbol not found? See if we can use a fallback resolver
+		resolver := cr.fallbackResolver.Load()
+		if resolver != nil && resolver.descriptorResolver != nil {
+			d, fallbackErr := resolver.descriptorResolver.FindDescriptorByName(protoreflect.FullName(symbol))
+			if fallbackErr == nil {
+				var wrapErr error
+				fd, wrapErr = desc.WrapFile(d.ParentFile())
+				if wrapErr == nil {
+					fd = cr.cacheFile(fd, true)
+					err = nil // clear error since we've succeeded via the fallback
+				}
+			}
+		}
+	}
 	if isNotFound(err) {
 		err = symbolNotFound(symbol, symbolTypeUnknown, nil)
 	} else if e, ok := err.(*elementNotFoundError); ok {
@@ -212,21 +370,39 @@
 func (cr *Client) FileContainingExtension(extendedMessageName string, extensionNumber int32) (*desc.FileDescriptor, error) {
 	// hit the cache first
 	cr.cacheMu.RLock()
-	fd, ok := cr.filesByExtension[extDesc{extendedMessageName, extensionNumber}]
+	entry, ok := cr.filesByExtension[extDesc{extendedMessageName, extensionNumber}]
 	cr.cacheMu.RUnlock()
 	if ok {
-		return fd, nil
+		return entry.fd, nil
 	}
 
-	req := &rpb.ServerReflectionRequest{
-		MessageRequest: &rpb.ServerReflectionRequest_FileContainingExtension{
-			FileContainingExtension: &rpb.ExtensionRequest{
+	req := &refv1.ServerReflectionRequest{
+		MessageRequest: &refv1.ServerReflectionRequest_FileContainingExtension{
+			FileContainingExtension: &refv1.ExtensionRequest{
 				ContainingType:  extendedMessageName,
 				ExtensionNumber: extensionNumber,
 			},
 		},
 	}
-	fd, err := cr.getAndCacheFileDescriptors(req, "", "")
+	accept := func(fd *desc.FileDescriptor) bool {
+		return fd.FindExtension(extendedMessageName, extensionNumber) != nil
+	}
+	fd, err := cr.getAndCacheFileDescriptors(req, "", "", accept)
+	if isNotFound(err) {
+		// Extension not found? See if we can use a fallback resolver
+		resolver := cr.fallbackResolver.Load()
+		if resolver != nil && resolver.extensionResolver != nil {
+			extType, fallbackErr := resolver.extensionResolver.FindExtensionByNumber(protoreflect.FullName(extendedMessageName), protoreflect.FieldNumber(extensionNumber))
+			if fallbackErr == nil {
+				var wrapErr error
+				fd, wrapErr = desc.WrapFile(extType.TypeDescriptor().ParentFile())
+				if wrapErr == nil {
+					fd = cr.cacheFile(fd, true)
+					err = nil // clear error since we've succeeded via the fallback
+				}
+			}
+		}
+	}
 	if isNotFound(err) {
 		err = extensionNotFound(extendedMessageName, extensionNumber, nil)
 	} else if e, ok := err.(*elementNotFoundError); ok {
@@ -235,7 +411,7 @@
 	return fd, err
 }
 
-func (cr *Client) getAndCacheFileDescriptors(req *rpb.ServerReflectionRequest, expectedName, alias string) (*desc.FileDescriptor, error) {
+func (cr *Client) getAndCacheFileDescriptors(req *refv1.ServerReflectionRequest, expectedName, alias string, accept func(*desc.FileDescriptor) bool) (*desc.FileDescriptor, error) {
 	resp, err := cr.send(req)
 	if err != nil {
 		return nil, err
@@ -253,9 +429,9 @@
 	// should be the answer). If we're looking for a file by name, we can be
 	// smarter and make sure to grab one by name instead of just grabbing the
 	// first one.
-	var firstFd *dpb.FileDescriptorProto
+	var fds []*descriptorpb.FileDescriptorProto
 	for _, fdBytes := range fdResp.FileDescriptorProto {
-		fd := &dpb.FileDescriptorProto{}
+		fd := &descriptorpb.FileDescriptorProto{}
 		if err = proto.Unmarshal(fdBytes, fd); err != nil {
 			return nil, err
 		}
@@ -266,13 +442,6 @@
 		}
 
 		cr.cacheMu.Lock()
-		// see if this file was created and cached concurrently
-		if firstFd == nil {
-			if d, ok := cr.filesByName[fd.GetName()]; ok {
-				cr.cacheMu.Unlock()
-				return d, nil
-			}
-		}
 		// store in cache of raw descriptor protos, but don't overwrite existing protos
 		if existingFd, ok := cr.protosByName[fd.GetName()]; ok {
 			fd = existingFd
@@ -280,120 +449,208 @@
 			cr.protosByName[fd.GetName()] = fd
 		}
 		cr.cacheMu.Unlock()
-		if firstFd == nil {
-			firstFd = fd
-		}
-	}
-	if firstFd == nil {
-		return nil, &ProtocolError{reflect.TypeOf(firstFd).Elem()}
+
+		fds = append(fds, fd)
 	}
 
-	return cr.descriptorFromProto(firstFd)
+	// find the right result from the files returned
+	for _, fd := range fds {
+		result, err := cr.descriptorFromProto(fd)
+		if err != nil {
+			return nil, err
+		}
+		if accept(result) {
+			return result, nil
+		}
+	}
+
+	return nil, status.Errorf(codes.NotFound, "response does not include expected file")
 }
 
-func (cr *Client) descriptorFromProto(fd *dpb.FileDescriptorProto) (*desc.FileDescriptor, error) {
-	deps := make([]*desc.FileDescriptor, len(fd.GetDependency()))
+func (cr *Client) descriptorFromProto(fd *descriptorpb.FileDescriptorProto) (*desc.FileDescriptor, error) {
+	allowMissing := cr.allowMissing.Load()
+	deps := make([]*desc.FileDescriptor, 0, len(fd.GetDependency()))
+	var deferredErr error
+	var missingDeps []int
 	for i, depName := range fd.GetDependency() {
 		if dep, err := cr.FileByFilename(depName); err != nil {
-			return nil, err
+			if _, ok := err.(*elementNotFoundError); !ok || !allowMissing {
+				return nil, err
+			}
+			// We'll ignore for now to see if the file is really necessary.
+			// (If it only supplies custom options, we can get by without it.)
+			if deferredErr == nil {
+				deferredErr = err
+			}
+			missingDeps = append(missingDeps, i)
 		} else {
-			deps[i] = dep
+			deps = append(deps, dep)
 		}
 	}
+	if len(missingDeps) > 0 {
+		fd = fileWithoutDeps(fd, missingDeps)
+	}
 	d, err := desc.CreateFileDescriptor(fd, deps...)
 	if err != nil {
+		if deferredErr != nil {
+			// assume the issue is the missing dep
+			return nil, deferredErr
+		}
 		return nil, err
 	}
-	d = cr.cacheFile(d)
+	d = cr.cacheFile(d, false)
 	return d, nil
 }
 
-func (cr *Client) cacheFile(fd *desc.FileDescriptor) *desc.FileDescriptor {
+func (cr *Client) cacheFile(fd *desc.FileDescriptor, fallback bool) *desc.FileDescriptor {
 	cr.cacheMu.Lock()
 	defer cr.cacheMu.Unlock()
 
-	// cache file descriptor by name, but don't overwrite existing entry
-	// (existing entry could come from concurrent caller)
-	if existingFd, ok := cr.filesByName[fd.GetName()]; ok {
-		return existingFd
+	// Cache file descriptor by name. If we can't overwrite an existing
+	// entry, return it. (Existing entry could come from concurrent caller.)
+	if existing, ok := cr.filesByName[fd.GetName()]; ok && !canOverwrite(existing, fallback) {
+		return existing.fd
 	}
-	cr.filesByName[fd.GetName()] = fd
+	entry := fileEntry{fd: fd, fallback: fallback}
+	cr.filesByName[fd.GetName()] = entry
 
 	// also cache by symbols and extensions
 	for _, m := range fd.GetMessageTypes() {
-		cr.cacheMessageLocked(fd, m)
+		cr.cacheMessageLocked(m, entry)
 	}
 	for _, e := range fd.GetEnumTypes() {
-		cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+		if !cr.maybeCacheFileBySymbol(e.GetFullyQualifiedName(), entry) {
+			continue
+		}
 		for _, v := range e.GetValues() {
-			cr.filesBySymbol[v.GetFullyQualifiedName()] = fd
+			cr.maybeCacheFileBySymbol(v.GetFullyQualifiedName(), entry)
 		}
 	}
 	for _, e := range fd.GetExtensions() {
-		cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
-		cr.filesByExtension[extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}] = fd
+		if !cr.maybeCacheFileBySymbol(e.GetFullyQualifiedName(), entry) {
+			continue
+		}
+		cr.maybeCacheFileByExtension(extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}, entry)
 	}
 	for _, s := range fd.GetServices() {
-		cr.filesBySymbol[s.GetFullyQualifiedName()] = fd
+		if !cr.maybeCacheFileBySymbol(s.GetFullyQualifiedName(), entry) {
+			continue
+		}
 		for _, m := range s.GetMethods() {
-			cr.filesBySymbol[m.GetFullyQualifiedName()] = fd
+			cr.maybeCacheFileBySymbol(m.GetFullyQualifiedName(), entry)
 		}
 	}
 
 	return fd
 }
 
-func (cr *Client) cacheMessageLocked(fd *desc.FileDescriptor, md *desc.MessageDescriptor) {
-	cr.filesBySymbol[md.GetFullyQualifiedName()] = fd
+func (cr *Client) cacheMessageLocked(md *desc.MessageDescriptor, entry fileEntry) {
+	if !cr.maybeCacheFileBySymbol(md.GetFullyQualifiedName(), entry) {
+		return
+	}
 	for _, f := range md.GetFields() {
-		cr.filesBySymbol[f.GetFullyQualifiedName()] = fd
+		cr.maybeCacheFileBySymbol(f.GetFullyQualifiedName(), entry)
 	}
 	for _, o := range md.GetOneOfs() {
-		cr.filesBySymbol[o.GetFullyQualifiedName()] = fd
+		cr.maybeCacheFileBySymbol(o.GetFullyQualifiedName(), entry)
 	}
 	for _, e := range md.GetNestedEnumTypes() {
-		cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+		if !cr.maybeCacheFileBySymbol(e.GetFullyQualifiedName(), entry) {
+			continue
+		}
 		for _, v := range e.GetValues() {
-			cr.filesBySymbol[v.GetFullyQualifiedName()] = fd
+			cr.maybeCacheFileBySymbol(v.GetFullyQualifiedName(), entry)
 		}
 	}
 	for _, e := range md.GetNestedExtensions() {
-		cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
-		cr.filesByExtension[extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}] = fd
+		if !cr.maybeCacheFileBySymbol(e.GetFullyQualifiedName(), entry) {
+			continue
+		}
+		cr.maybeCacheFileByExtension(extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}, entry)
 	}
 	for _, m := range md.GetNestedMessageTypes() {
-		cr.cacheMessageLocked(fd, m) // recurse
+		cr.cacheMessageLocked(m, entry) // recurse
 	}
 }
 
+func canOverwrite(existing fileEntry, fallback bool) bool {
+	return !fallback && existing.fallback
+}
+
+func (cr *Client) maybeCacheFileBySymbol(symbol string, entry fileEntry) bool {
+	existing, ok := cr.filesBySymbol[symbol]
+	if ok && !canOverwrite(existing, entry.fallback) {
+		return false
+	}
+	cr.filesBySymbol[symbol] = entry
+	return true
+}
+
+func (cr *Client) maybeCacheFileByExtension(ext extDesc, entry fileEntry) {
+	existing, ok := cr.filesByExtension[ext]
+	if ok && !canOverwrite(existing, entry.fallback) {
+		return
+	}
+	cr.filesByExtension[ext] = entry
+}
+
 // AllExtensionNumbersForType asks the server for all known extension numbers
 // for the given fully-qualified message name.
 func (cr *Client) AllExtensionNumbersForType(extendedMessageName string) ([]int32, error) {
-	req := &rpb.ServerReflectionRequest{
-		MessageRequest: &rpb.ServerReflectionRequest_AllExtensionNumbersOfType{
+	req := &refv1.ServerReflectionRequest{
+		MessageRequest: &refv1.ServerReflectionRequest_AllExtensionNumbersOfType{
 			AllExtensionNumbersOfType: extendedMessageName,
 		},
 	}
 	resp, err := cr.send(req)
-	if err != nil {
-		if isNotFound(err) {
-			return nil, symbolNotFound(extendedMessageName, symbolTypeMessage, nil)
-		}
+	var exts []int32
+	if err != nil && !isNotFound(err) {
+		// If the server doesn't know about the message type and returns "not found",
+		// we'll treat that as "no known extensions" instead of returning an error.
 		return nil, err
 	}
-
-	extResp := resp.GetAllExtensionNumbersResponse()
-	if extResp == nil {
-		return nil, &ProtocolError{reflect.TypeOf(extResp).Elem()}
+	if err == nil {
+		extResp := resp.GetAllExtensionNumbersResponse()
+		if extResp == nil {
+			return nil, &ProtocolError{reflect.TypeOf(extResp).Elem()}
+		}
+		exts = extResp.ExtensionNumber
 	}
-	return extResp.ExtensionNumber, nil
+
+	resolver := cr.fallbackResolver.Load()
+	if resolver != nil && resolver.extensionResolver != nil {
+		type extRanger interface {
+			RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
+		}
+		if ranger, ok := resolver.extensionResolver.(extRanger); ok {
+			// Merge results with fallback resolver
+			extSet := map[int32]struct{}{}
+			ranger.RangeExtensionsByMessage(protoreflect.FullName(extendedMessageName), func(extType protoreflect.ExtensionType) bool {
+				extSet[int32(extType.TypeDescriptor().Number())] = struct{}{}
+				return true
+			})
+			if len(extSet) > 0 {
+				// De-dupe with the set of extension numbers we got
+				// from the server and merge the results back into exts.
+				for _, ext := range exts {
+					extSet[ext] = struct{}{}
+				}
+				exts = make([]int32, 0, len(extSet))
+				for ext := range extSet {
+					exts = append(exts, ext)
+				}
+				sort.Slice(exts, func(i, j int) bool { return exts[i] < exts[j] })
+			}
+		}
+	}
+	return exts, nil
 }
 
 // ListServices asks the server for the fully-qualified names of all exposed
 // services.
 func (cr *Client) ListServices() ([]string, error) {
-	req := &rpb.ServerReflectionRequest{
-		MessageRequest: &rpb.ServerReflectionRequest_ListServices{
+	req := &refv1.ServerReflectionRequest{
+		MessageRequest: &refv1.ServerReflectionRequest_ListServices{
 			// proto doesn't indicate any purpose for this value and server impl
 			// doesn't actually use it...
 			ListServices: "*",
@@ -415,10 +672,10 @@
 	return serviceNames, nil
 }
 
-func (cr *Client) send(req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+func (cr *Client) send(req *refv1.ServerReflectionRequest) (*refv1.ServerReflectionResponse, error) {
 	// we allow one immediate retry, in case we have a stale stream
 	// (e.g. closed by server)
-	resp, err := cr.doSend(true, req)
+	resp, err := cr.doSend(req)
 	if err != nil {
 		return nil, err
 	}
@@ -440,16 +697,33 @@
 	return ok && s.Code() == codes.NotFound
 }
 
-func (cr *Client) doSend(retry bool, req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+func (cr *Client) doSend(req *refv1.ServerReflectionRequest) (*refv1.ServerReflectionResponse, error) {
 	// TODO: Streams are thread-safe, so we shouldn't need to lock. But without locking, we'll need more machinery
 	// (goroutines and channels) to ensure that responses are correctly correlated with their requests and thus
 	// delivered in correct oder.
 	cr.connMu.Lock()
 	defer cr.connMu.Unlock()
-	return cr.doSendLocked(retry, req)
+	return cr.doSendLocked(0, nil, req)
 }
 
-func (cr *Client) doSendLocked(retry bool, req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+func (cr *Client) doSendLocked(attemptCount int, prevErr error, req *refv1.ServerReflectionRequest) (*refv1.ServerReflectionResponse, error) {
+	if attemptCount >= 3 && prevErr != nil {
+		return nil, prevErr
+	}
+	if (status.Code(prevErr) == codes.Unimplemented ||
+		status.Code(prevErr) == codes.Unavailable) &&
+		cr.useV1() {
+		// If v1 is unimplemented, fallback to v1alpha.
+		// We also fallback on unavailable because some servers have been
+		// observed to close the connection/cancel the stream, w/out sending
+		// back status or headers, when the service name is not known. When
+		// this happens, the RPC status code is unavailable.
+		// See https://github.com/fullstorydev/grpcurl/issues/434
+		cr.useV1Alpha = true
+		cr.lastTriedV1 = cr.now()
+	}
+	attemptCount++
+
 	if err := cr.initStreamLocked(); err != nil {
 		return nil, err
 	}
@@ -460,21 +734,15 @@
 			_, err = cr.stream.Recv()
 		}
 		cr.resetLocked()
-		if retry {
-			return cr.doSendLocked(false, req)
-		}
-		return nil, err
+		return cr.doSendLocked(attemptCount, err, req)
 	}
 
-	if resp, err := cr.stream.Recv(); err != nil {
+	resp, err := cr.stream.Recv()
+	if err != nil {
 		cr.resetLocked()
-		if retry {
-			return cr.doSendLocked(false, req)
-		}
-		return nil, err
-	} else {
-		return resp, nil
+		return cr.doSendLocked(attemptCount, err, req)
 	}
+	return resp, nil
 }
 
 func (cr *Client) initStreamLocked() error {
@@ -483,11 +751,38 @@
 	}
 	var newCtx context.Context
 	newCtx, cr.cancel = context.WithCancel(cr.ctx)
+	if cr.useV1Alpha == true && cr.now().Sub(cr.lastTriedV1) > durationBetweenV1Attempts {
+		// we're due for periodic retry of v1
+		cr.useV1Alpha = false
+	}
+	if cr.useV1() {
+		// try the v1 API
+		streamv1, err := cr.stubV1.ServerReflectionInfo(newCtx)
+		if err == nil {
+			cr.stream = streamv1
+			return nil
+		}
+		if status.Code(err) != codes.Unimplemented {
+			return err
+		}
+		// oh well, fall through below to try v1alpha and update state
+		// so we skip straight to v1alpha next time
+		cr.useV1Alpha = true
+		cr.lastTriedV1 = cr.now()
+	}
 	var err error
-	cr.stream, err = cr.stub.ServerReflectionInfo(newCtx)
+	streamv1alpha, err := cr.stubV1Alpha.ServerReflectionInfo(newCtx)
+	if err == nil {
+		cr.stream = adaptStreamFromV1Alpha{streamv1alpha}
+		return nil
+	}
 	return err
 }
 
+func (cr *Client) useV1() bool {
+	return !cr.useV1Alpha && cr.stubV1 != nil
+}
+
 // Reset ensures that any active stream with the server is closed, releasing any
 // resources.
 func (cr *Client) Reset() {
@@ -498,7 +793,7 @@
 
 func (cr *Client) resetLocked() {
 	if cr.stream != nil {
-		cr.stream.CloseSend()
+		_ = cr.stream.CloseSend()
 		for {
 			// drain the stream, this covers io.EOF too
 			if _, err := cr.stream.Recv(); err != nil {
@@ -605,6 +900,46 @@
 	}
 }
 
+func fileWithoutDeps(fd *descriptorpb.FileDescriptorProto, missingDeps []int) *descriptorpb.FileDescriptorProto {
+	// We need to rebuild the file without the missing deps.
+	fd = proto.Clone(fd).(*descriptorpb.FileDescriptorProto)
+	newNumDeps := len(fd.GetDependency()) - len(missingDeps)
+	newDeps := make([]string, 0, newNumDeps)
+	remapped := make(map[int]int, newNumDeps)
+	missingIdx := 0
+	for i, dep := range fd.GetDependency() {
+		if missingIdx < len(missingDeps) {
+			if i == missingDeps[missingIdx] {
+				// This dep was missing. Skip it.
+				missingIdx++
+				continue
+			}
+		}
+		remapped[i] = len(newDeps)
+		newDeps = append(newDeps, dep)
+	}
+	// Also rebuild public and weak import slices.
+	newPublic := make([]int32, 0, len(fd.GetPublicDependency()))
+	for _, idx := range fd.GetPublicDependency() {
+		newIdx, ok := remapped[int(idx)]
+		if ok {
+			newPublic = append(newPublic, int32(newIdx))
+		}
+	}
+	newWeak := make([]int32, 0, len(fd.GetWeakDependency()))
+	for _, idx := range fd.GetWeakDependency() {
+		newIdx, ok := remapped[int(idx)]
+		if ok {
+			newWeak = append(newWeak, int32(newIdx))
+		}
+	}
+
+	fd.Dependency = newDeps
+	fd.PublicDependency = newPublic
+	fd.WeakDependency = newWeak
+	return fd
+}
+
 func findExtension(extendedType string, extensionNumber int32, scope extensionScope) *desc.FieldDescriptor {
 	// search extensions in this scope
 	for _, ext := range scope.extensions() {
@@ -664,3 +999,20 @@
 	}
 	return scopes
 }
+
+type adaptStreamFromV1Alpha struct {
+	refv1alpha.ServerReflection_ServerReflectionInfoClient
+}
+
+func (a adaptStreamFromV1Alpha) Send(request *refv1.ServerReflectionRequest) error {
+	v1req := toV1AlphaRequest(request)
+	return a.ServerReflection_ServerReflectionInfoClient.Send(v1req)
+}
+
+func (a adaptStreamFromV1Alpha) Recv() (*refv1.ServerReflectionResponse, error) {
+	v1resp, err := a.ServerReflection_ServerReflectionInfoClient.Recv()
+	if err != nil {
+		return nil, err
+	}
+	return toV1Response(v1resp), nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/server.go b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go
index c9ef619..7ff1912 100644
--- a/vendor/github.com/jhump/protoreflect/grpcreflect/server.go
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go
@@ -4,13 +4,19 @@
 	"fmt"
 
 	"google.golang.org/grpc"
+	"google.golang.org/grpc/reflection"
 
 	"github.com/jhump/protoreflect/desc"
 )
 
+// GRPCServer is the interface provided by a gRPC server. In addition to being a
+// service registrar (for registering services and handlers), it also has an
+// accessor for retrieving metadata about all registered services.
+type GRPCServer = reflection.GRPCServer
+
 // LoadServiceDescriptors loads the service descriptors for all services exposed by the
 // given GRPC server.
-func LoadServiceDescriptors(s *grpc.Server) (map[string]*desc.ServiceDescriptor, error) {
+func LoadServiceDescriptors(s GRPCServer) (map[string]*desc.ServiceDescriptor, error) {
 	descs := map[string]*desc.ServiceDescriptor{}
 	for name, info := range s.GetServiceInfo() {
 		file, ok := info.Metadata.(string)
diff --git a/vendor/github.com/jhump/protoreflect/internal/standard_files.go b/vendor/github.com/jhump/protoreflect/internal/standard_files.go
index 4a8b47a..777c3a4 100644
--- a/vendor/github.com/jhump/protoreflect/internal/standard_files.go
+++ b/vendor/github.com/jhump/protoreflect/internal/standard_files.go
@@ -9,7 +9,7 @@
 	"io/ioutil"
 
 	"github.com/golang/protobuf/proto"
-	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"google.golang.org/protobuf/types/descriptorpb"
 )
 
 // TODO: replace this alias configuration with desc.RegisterImportPath?
@@ -68,7 +68,7 @@
 // name cannot be loaded but is a known standard name, an alias will be tried,
 // so the standard files can be loaded even if linked against older "known bad"
 // versions of packages.
-func LoadFileDescriptor(file string) (*dpb.FileDescriptorProto, error) {
+func LoadFileDescriptor(file string) (*descriptorpb.FileDescriptorProto, error) {
 	fdb := proto.FileDescriptor(file)
 	aliased := false
 	if fdb == nil {
@@ -102,12 +102,12 @@
 // Registered file descriptors are first "proto encoded" (e.g. binary format
 // for the descriptor protos) and then gzipped. So this function gunzips and
 // then unmarshals into a descriptor proto.
-func DecodeFileDescriptor(element string, fdb []byte) (*dpb.FileDescriptorProto, error) {
+func DecodeFileDescriptor(element string, fdb []byte) (*descriptorpb.FileDescriptorProto, error) {
 	raw, err := decompress(fdb)
 	if err != nil {
 		return nil, fmt.Errorf("failed to decompress %q descriptor: %v", element, err)
 	}
-	fd := dpb.FileDescriptorProto{}
+	fd := descriptorpb.FileDescriptorProto{}
 	if err := proto.Unmarshal(raw, &fd); err != nil {
 		return nil, fmt.Errorf("bad descriptor for %q: %v", element, err)
 	}
diff --git a/vendor/github.com/jhump/protoreflect/internal/unrecognized.go b/vendor/github.com/jhump/protoreflect/internal/unrecognized.go
index c903d4b..25376c7 100644
--- a/vendor/github.com/jhump/protoreflect/internal/unrecognized.go
+++ b/vendor/github.com/jhump/protoreflect/internal/unrecognized.go
@@ -1,86 +1,20 @@
 package internal
 
 import (
-	"reflect"
-
 	"github.com/golang/protobuf/proto"
 )
 
-var typeOfBytes = reflect.TypeOf([]byte(nil))
-
 // GetUnrecognized fetches the bytes of unrecognized fields for the given message.
 func GetUnrecognized(msg proto.Message) []byte {
-	val := reflect.Indirect(reflect.ValueOf(msg))
-	u := val.FieldByName("XXX_unrecognized")
-	if u.IsValid() && u.Type() == typeOfBytes {
-		return u.Interface().([]byte)
-	}
-
-	// Fallback to reflection for API v2 messages
-	get, _, _, ok := unrecognizedGetSetMethods(val)
-	if !ok {
-		return nil
-	}
-
-	return get.Call([]reflect.Value(nil))[0].Convert(typeOfBytes).Interface().([]byte)
+	return proto.MessageReflect(msg).GetUnknown()
 }
 
 // SetUnrecognized adds the given bytes to the unrecognized fields for the given message.
 func SetUnrecognized(msg proto.Message, data []byte) {
-	val := reflect.Indirect(reflect.ValueOf(msg))
-	u := val.FieldByName("XXX_unrecognized")
-	if u.IsValid() && u.Type() == typeOfBytes {
-		// Just store the bytes in the unrecognized field
-		ub := u.Interface().([]byte)
-		ub = append(ub, data...)
-		u.Set(reflect.ValueOf(ub))
-		return
-	}
-
-	// Fallback to reflection for API v2 messages
-	get, set, argType, ok := unrecognizedGetSetMethods(val)
-	if !ok {
-		return
-	}
-
-	existing := get.Call([]reflect.Value(nil))[0].Convert(typeOfBytes).Interface().([]byte)
+	refl := proto.MessageReflect(msg)
+	existing := refl.GetUnknown()
 	if len(existing) > 0 {
 		data = append(existing, data...)
 	}
-	set.Call([]reflect.Value{reflect.ValueOf(data).Convert(argType)})
-}
-
-func unrecognizedGetSetMethods(val reflect.Value) (get reflect.Value, set reflect.Value, argType reflect.Type, ok bool) {
-	// val could be an APIv2 message. We use reflection to interact with
-	// this message so that we don't have a hard dependency on the new
-	// version of the protobuf package.
-	refMethod := val.MethodByName("ProtoReflect")
-	if !refMethod.IsValid() {
-		if val.CanAddr() {
-			refMethod = val.Addr().MethodByName("ProtoReflect")
-		}
-		if !refMethod.IsValid() {
-			return
-		}
-	}
-	refType := refMethod.Type()
-	if refType.NumIn() != 0 || refType.NumOut() != 1 {
-		return
-	}
-	ref := refMethod.Call([]reflect.Value(nil))
-	getMethod, setMethod := ref[0].MethodByName("GetUnknown"), ref[0].MethodByName("SetUnknown")
-	if !getMethod.IsValid() || !setMethod.IsValid() {
-		return
-	}
-	getType := getMethod.Type()
-	setType := setMethod.Type()
-	if getType.NumIn() != 0 || getType.NumOut() != 1 || setType.NumIn() != 1 || setType.NumOut() != 0 {
-		return
-	}
-	arg := setType.In(0)
-	if !arg.ConvertibleTo(typeOfBytes) || getType.Out(0) != arg {
-		return
-	}
-
-	return getMethod, setMethod, arg, true
+	refl.SetUnknown(data)
 }
diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml
index 0af08e6..4528059 100644
--- a/vendor/github.com/klauspost/compress/.goreleaser.yml
+++ b/vendor/github.com/klauspost/compress/.goreleaser.yml
@@ -1,9 +1,8 @@
-# This is an example goreleaser.yaml file with some sane defaults.
-# Make sure to check the documentation at http://goreleaser.com
+version: 2
+
 before:
   hooks:
     - ./gen.sh
-    - go install mvdan.cc/garble@latest
 
 builds:
   -
@@ -32,7 +31,6 @@
       - mips64le
     goarm:
       - 7
-    gobinary: garble
   -
     id: "s2d"
     binary: s2d
@@ -59,7 +57,6 @@
       - mips64le
     goarm:
       - 7
-    gobinary: garble
   -
     id: "s2sx"
     binary: s2sx
@@ -87,21 +84,11 @@
       - mips64le
     goarm:
       - 7
-    gobinary: garble
 
 archives:
   -
     id: s2-binaries
-    name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}"
-    replacements:
-      aix: AIX
-      darwin: OSX
-      linux: Linux
-      windows: Windows
-      386: i386
-      amd64: x86_64
-      freebsd: FreeBSD
-      netbsd: NetBSD
+    name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
     format_overrides:
       - goos: windows
         format: zip
@@ -112,7 +99,7 @@
 checksum:
   name_template: 'checksums.txt'
 snapshot:
-  name_template: "{{ .Tag }}-next"
+  version_template: "{{ .Tag }}-next"
 changelog:
   sort: asc
   filters:
@@ -125,7 +112,7 @@
 
 nfpms:
   -
-    file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
+    file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
     vendor: Klaus Post
     homepage: https://github.com/klauspost/compress
     maintainer: Klaus Post <klauspost@gmail.com>
@@ -134,8 +121,3 @@
     formats:
       - deb
       - rpm
-    replacements:
-      darwin: Darwin
-      linux: Linux
-      freebsd: FreeBSD
-      amd64: x86_64
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index ad5c63a..244ee19 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -9,14 +9,192 @@
 * [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.

 * [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.

 * [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.

-* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here.

 

 [![Go Reference](https://pkg.go.dev/badge/klauspost/compress.svg)](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)

 [![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml)

 [![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge)

 

+# package usage

+

+Use `go get github.com/klauspost/compress@latest` to add it to your project.

+

+This package will support the current Go version and 2 versions back.

+

+* Use the `nounsafe` tag to disable all use of the "unsafe" package.

+* Use the `noasm` tag to disable all assembly across packages.

+

+Use the links above for more information on each.

+

 # changelog

 

+* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0)

+  * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036

+  * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028

+  * flate: Simplify L4-6 loading https://github.com/klauspost/compress/pull/1043

+  * flate: Simplify matchlen (remove asm) https://github.com/klauspost/compress/pull/1045

+  * s2: Improve small block compression speed w/o asm https://github.com/klauspost/compress/pull/1048

+  * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049

+  * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050

+

+* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11)

+  * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017

+  * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014

+  * gzhttp: No content-type on no body response code by @juliens in https://github.com/klauspost/compress/pull/1011

+  * gzhttp: Do not set the content-type when response has no body by @kevinpollet in https://github.com/klauspost/compress/pull/1013

+

+* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10)

+	* gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978

+	* gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002

+	* s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982

+	* zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007

+	* flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996

+

+* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9)

+	* s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949

+	* flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963

+	* Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971

+	* zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951

+

+* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8)

+	* zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885

+	* zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938

+

+* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7)

+	* s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927

+	* s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930

+  

+* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6)

+	* zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923

+	* s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925

+  

+* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5)

+	* flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912

+	* zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908

+	* zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913

+	* zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910

+	* s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917

+https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918

+

+* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4)

+	* huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887

+	* huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886

+	* gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892

+	* gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890

+	* gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891

+

+* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3)

+	* fse: Fix max header size https://github.com/klauspost/compress/pull/881

+	* zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877

+	* gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883

+

+* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2)

+	* zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876

+

+* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1)

+	* s2: Fix S2 "best" dictionary wrong encoding https://github.com/klauspost/compress/pull/871

+	* flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869

+	* s2: Fix EstimateBlockSize on 6&7 length input https://github.com/klauspost/compress/pull/867

+

+* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)

+	* Add experimental dictionary builder  https://github.com/klauspost/compress/pull/853

+	* Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838

+	* flate: Add limited window compression https://github.com/klauspost/compress/pull/843

+	* s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839

+	* flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837

+	* gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860

+

+<details>

+	<summary>See changes to v1.16.x</summary>

+

+   

+* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7)

+	* zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829

+	* s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832

+

+* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6)

+	* zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806

+	* zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824

+	* gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815

+	* s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663

+

+* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5)

+	* zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802

+	* gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804

+

+* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4)

+	* zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784

+	* zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792

+	* zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785

+	* zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795

+	* s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779

+	* s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780

+	* gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799

+

+* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)

+	* zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776

+	* gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767

+	* s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766

+	* zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773

+	* huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774

+

+* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0)

+	* s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support.  https://github.com/klauspost/compress/pull/685

+	* s2: Add Compression Size Estimate.  https://github.com/klauspost/compress/pull/752

+	* s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755

+	* s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748

+	* s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747

+	* s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746

+</details>

+

+<details>

+	<summary>See changes to v1.15.x</summary>

+	

+* Jan 21st, 2023 (v1.15.15)

+	* deflate: Improve level 7-9 https://github.com/klauspost/compress/pull/739

+	* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728

+	* zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745

+	* gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740

+

+* Jan 3rd, 2023 (v1.15.14)

+

+	* flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718

+	* zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720

+	* export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722

+	* s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723

+

+* Dec 11, 2022 (v1.15.13)

+	* zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder  https://github.com/klauspost/compress/pull/691

+	* zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708

+

+* Oct 26, 2022 (v1.15.12)

+

+	* zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680

+	* gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683

+

+* Sept 26, 2022 (v1.15.11)

+

+	* flate: Improve level 1-3 compression  https://github.com/klauspost/compress/pull/678

+	* zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677

+	* zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668

+	* zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667

+

+* Sept 16, 2022 (v1.15.10)

+

+	* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649

+	* Add Go 1.19 - deprecate Go 1.16  https://github.com/klauspost/compress/pull/651

+	* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656

+	* zstd: Improve "better" compression  https://github.com/klauspost/compress/pull/657

+	* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658

+	* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635

+	* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646

+	* Use arrays for constant size copies https://github.com/klauspost/compress/pull/659

+

+* July 21, 2022 (v1.15.9)

+

+	* zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645

+	* zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644

+	* zstd: Allow single segments up to "max decoded size" https://github.com/klauspost/compress/pull/643

+

 * July 13, 2022 (v1.15.8)

 

 	* gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641

@@ -57,7 +235,7 @@
 	* zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599

 	* zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593

 	* huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586

-	* flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590

+	* flate: Inplace hashing for level 7-9 https://github.com/klauspost/compress/pull/590

 

 

 * May 11, 2022 (v1.15.4)

@@ -84,27 +262,29 @@
 	* zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)

 

 * Mar 3, 2022 (v1.15.0)

-	* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)

-	* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)

+	* zstd: Refactor decoder [#498](https://github.com/klauspost/compress/pull/498)

+	* zstd: Add stream encoding without goroutines [#505](https://github.com/klauspost/compress/pull/505)

 	* huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)

-	* flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)

-	* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)

-	* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)

+	* flate: Inline literal emission [#509](https://github.com/klauspost/compress/pull/509)

+	* gzhttp: Add zstd to transport [#400](https://github.com/klauspost/compress/pull/400)

+	* gzhttp: Make content-type optional [#510](https://github.com/klauspost/compress/pull/510)

 

-<details>

-	<summary>See  Details</summary>

 Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.

 

 Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.

 

 While the release has been extensively tested, it is recommended to testing when upgrading.

+

 </details>

 

+<details>

+	<summary>See changes to v1.14.x</summary>

+	

 * Feb 22, 2022 (v1.14.4)

 	* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)

 	* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)

 	* zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501)  #501

-	* huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)

+	* huff0: Use static decompression buffer up to 30% faster [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)

 

 * Feb 17, 2022 (v1.14.3)

 	* flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494)  [#478](https://github.com/klauspost/compress/pull/478)

@@ -125,6 +305,7 @@
 	* zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)

 	* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)

 	* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)

+</details>

 

 <details>

 	<summary>See changes to v1.13.x</summary>

@@ -205,7 +386,7 @@
 	* s2: Fix binaries.

 

 * Feb 25, 2021 (v1.11.8)

-	* s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended.

+	* s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended.

 	* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)

 	* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)

 	* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)

@@ -384,7 +565,7 @@
 * Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.

 * Feb 19, 2016: Handle small payloads faster in level 1-3.

 * Feb 19, 2016: Added faster level 2 + 3 compression modes.

-* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.

+* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5.

 * Feb 14, 2016: Snappy: Merge upstream changes. 

 * Feb 14, 2016: Snappy: Fix aggressive skipping.

 * Feb 14, 2016: Snappy: Update benchmark.

@@ -410,12 +591,14 @@
 

 The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:

 

-| old import         | new import                              | Documentation

-|--------------------|-----------------------------------------|--------------------|

-| `compress/gzip`    | `github.com/klauspost/compress/gzip`    | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc)

-| `compress/zlib`    | `github.com/klauspost/compress/zlib`    | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc)

-| `archive/zip`      | `github.com/klauspost/compress/zip`     | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc)

-| `compress/flate`   | `github.com/klauspost/compress/flate`   | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc)

+Typical speed is about 2x of the standard library packages.

+

+| old import       | new import                            | Documentation                                                           |

+|------------------|---------------------------------------|-------------------------------------------------------------------------|

+| `compress/gzip`  | `github.com/klauspost/compress/gzip`  | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc)   |

+| `compress/zlib`  | `github.com/klauspost/compress/zlib`  | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc)   |

+| `archive/zip`    | `github.com/klauspost/compress/zip`   | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc)     |

+| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) |

 

 * Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).

 

@@ -431,6 +614,8 @@
 

 For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing).

 

+To disable all assembly add `-tags=noasm`. This works across all packages.

+

 # Stateless compression

 

 This package offers stateless compression as a special option for gzip/deflate. 

@@ -449,7 +634,7 @@
 

 A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer:

 

-```

+```go

 	// replace 'ioutil.Discard' with your output.

 	gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression)

 	if err != nil {

@@ -468,84 +653,6 @@
 Compression is almost always worse than the fastest compression level 

 and each write will allocate (a little) memory. 

 

-# Performance Update 2018

-

-It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.

-

-The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.

-

-The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.

-

-The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).

-

-

-## Overall differences.

-

-There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.

-

-The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.

-

-This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.

-

-There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.

-

-## Web Content

-

-This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.

-

-Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.

-

-Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.

-

-## Object files

-

-This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.

-

-The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. 

-

-The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.

-

-## Highly Compressible File

-

-This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.

-

-It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.

-

-So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".

-

-## Medium-High Compressible

-

-This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.

-

-We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.

-

-## Medium Compressible

-

-I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.

-

-The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.

-

-

-## Un-compressible Content

-

-This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed.  The only downside is that it might skip some compressible data on false detections.

-

-

-## Huffman only compression

-

-This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. 

-

-This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).

-

-Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.

-

-The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). 

-

-The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.

-

-For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).

-

-This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.

 

 # Other packages

 

@@ -554,6 +661,10 @@
 * [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression.

 * [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression.

 * [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.

+* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression.

+* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression.

+* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index.

+* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor.

 

 # license

 

diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md
new file mode 100644
index 0000000..ca6685e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/SECURITY.md
@@ -0,0 +1,25 @@
+# Security Policy
+
+## Supported Versions
+
+Security updates are applied only to the latest release.
+
+## Vulnerability Definition
+
+A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability.
+
+Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently.
+
+Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue.
+
+It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability.
+
+Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround.
+
+## Reporting a Vulnerability
+
+If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
+
+Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that.
+
+This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base.
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
new file mode 100644
index 0000000..af53fb8
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -0,0 +1,1017 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright (c) 2015 Klaus Post
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+)
+
+const (
+	NoCompression      = 0
+	BestSpeed          = 1
+	BestCompression    = 9
+	DefaultCompression = -1
+
+	// HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman
+	// entropy encoding. This mode is useful in compressing data that has
+	// already been compressed with an LZ style algorithm (e.g. Snappy or LZ4)
+	// that lacks an entropy encoder. Compression gains are achieved when
+	// certain bytes in the input stream occur more frequently than others.
+	//
+	// Note that HuffmanOnly produces a compressed output that is
+	// RFC 1951 compliant. That is, any valid DEFLATE decompressor will
+	// continue to be able to decompress this output.
+	HuffmanOnly         = -2
+	ConstantCompression = HuffmanOnly // compatibility alias.
+
+	logWindowSize    = 15
+	windowSize       = 1 << logWindowSize
+	windowMask       = windowSize - 1
+	logMaxOffsetSize = 15  // Standard DEFLATE
+	minMatchLength   = 4   // The smallest match that the compressor looks for
+	maxMatchLength   = 258 // The longest match for the compressor
+	minOffsetSize    = 1   // The shortest offset that makes any sense
+
+	// The maximum number of tokens we will encode at the time.
+	// Smaller sizes usually creates less optimal blocks.
+	// Bigger can make context switching slow.
+	// We use this for levels 7-9, so we make it big.
+	maxFlateBlockTokens = 1 << 15
+	maxStoreBlockSize   = 65535
+	hashBits            = 17 // After 17 performance degrades
+	hashSize            = 1 << hashBits
+	hashMask            = (1 << hashBits) - 1
+	hashShift           = (hashBits + minMatchLength - 1) / minMatchLength
+	maxHashOffset       = 1 << 28
+
+	skipNever = math.MaxInt32
+
+	debugDeflate = false
+)
+
+type compressionLevel struct {
+	good, lazy, nice, chain, fastSkipHashing, level int
+}
+
+// Compression levels have been rebalanced from zlib deflate defaults
+// to give a bigger spread in speed and compression.
+// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/
+var levels = []compressionLevel{
+	{}, // 0
+	// Level 1-6 uses specialized algorithm - values not used
+	{0, 0, 0, 0, 0, 1},
+	{0, 0, 0, 0, 0, 2},
+	{0, 0, 0, 0, 0, 3},
+	{0, 0, 0, 0, 0, 4},
+	{0, 0, 0, 0, 0, 5},
+	{0, 0, 0, 0, 0, 6},
+	// Levels 7-9 use increasingly more lazy matching
+	// and increasingly stringent conditions for "good enough".
+	{8, 12, 16, 24, skipNever, 7},
+	{16, 30, 40, 64, skipNever, 8},
+	{32, 258, 258, 1024, skipNever, 9},
+}
+
+// advancedState contains state for the advanced levels, with bigger hash tables, etc.
+type advancedState struct {
+	// deflate state
+	length         int
+	offset         int
+	maxInsertIndex int
+	chainHead      int
+	hashOffset     int
+
+	ii uint16 // position of last match, intended to overflow to reset.
+
+	// input window: unprocessed data is window[index:windowEnd]
+	index     int
+	hashMatch [maxMatchLength + minMatchLength]uint32
+
+	// Input hash chains
+	// hashHead[hashValue] contains the largest inputIndex with the specified hash value
+	// If hashHead[hashValue] is within the current window, then
+	// hashPrev[hashHead[hashValue] & windowMask] contains the previous index
+	// with the same hash value.
+	hashHead [hashSize]uint32
+	hashPrev [windowSize]uint32
+}
+
+type compressor struct {
+	compressionLevel
+
+	h *huffmanEncoder
+	w *huffmanBitWriter
+
+	// compression algorithm
+	fill func(*compressor, []byte) int // copy data to window
+	step func(*compressor)             // process window
+
+	window     []byte
+	windowEnd  int
+	blockStart int // window index where current tokens start
+	err        error
+
+	// queued output tokens
+	tokens tokens
+	fast   fastEnc
+	state  *advancedState
+
+	sync          bool // requesting flush
+	byteAvailable bool // if true, still need to process window[index-1].
+}
+
+func (d *compressor) fillDeflate(b []byte) int {
+	s := d.state
+	if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
+		// shift the window by windowSize
+		//copy(d.window[:], d.window[windowSize:2*windowSize])
+		*(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:])
+		s.index -= windowSize
+		d.windowEnd -= windowSize
+		if d.blockStart >= windowSize {
+			d.blockStart -= windowSize
+		} else {
+			d.blockStart = math.MaxInt32
+		}
+		s.hashOffset += windowSize
+		if s.hashOffset > maxHashOffset {
+			delta := s.hashOffset - 1
+			s.hashOffset -= delta
+			s.chainHead -= delta
+			// Iterate over slices instead of arrays to avoid copying
+			// the entire table onto the stack (Issue #18625).
+			for i, v := range s.hashPrev[:] {
+				if int(v) > delta {
+					s.hashPrev[i] = uint32(int(v) - delta)
+				} else {
+					s.hashPrev[i] = 0
+				}
+			}
+			for i, v := range s.hashHead[:] {
+				if int(v) > delta {
+					s.hashHead[i] = uint32(int(v) - delta)
+				} else {
+					s.hashHead[i] = 0
+				}
+			}
+		}
+	}
+	n := copy(d.window[d.windowEnd:], b)
+	d.windowEnd += n
+	return n
+}
+
+func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error {
+	if index > 0 || eof {
+		var window []byte
+		if d.blockStart <= index {
+			window = d.window[d.blockStart:index]
+		}
+		d.blockStart = index
+		//d.w.writeBlock(tok, eof, window)
+		d.w.writeBlockDynamic(tok, eof, window, d.sync)
+		return d.w.err
+	}
+	return nil
+}
+
+// writeBlockSkip writes the current block and uses the number of tokens
+// to determine if the block should be stored on no matches, or
+// only huffman encoded.
+func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error {
+	if index > 0 || eof {
+		if d.blockStart <= index {
+			window := d.window[d.blockStart:index]
+			// If we removed less than a 64th of all literals
+			// we huffman compress the block.
+			if int(tok.n) > len(window)-int(tok.n>>6) {
+				d.w.writeBlockHuff(eof, window, d.sync)
+			} else {
+				// Write a dynamic huffman block.
+				d.w.writeBlockDynamic(tok, eof, window, d.sync)
+			}
+		} else {
+			d.w.writeBlock(tok, eof, nil)
+		}
+		d.blockStart = index
+		return d.w.err
+	}
+	return nil
+}
+
+// fillWindow will fill the current window with the supplied
+// dictionary and calculate all hashes.
+// This is much faster than doing a full encode.
+// Should only be used after a start/reset.
+func (d *compressor) fillWindow(b []byte) {
+	// Do not fill window if we are in store-only or huffman mode.
+	if d.level <= 0 && d.level > -MinCustomWindowSize {
+		return
+	}
+	if d.fast != nil {
+		// encode the last data, but discard the result
+		if len(b) > maxMatchOffset {
+			b = b[len(b)-maxMatchOffset:]
+		}
+		d.fast.Encode(&d.tokens, b)
+		d.tokens.Reset()
+		return
+	}
+	s := d.state
+	// If we are given too much, cut it.
+	if len(b) > windowSize {
+		b = b[len(b)-windowSize:]
+	}
+	// Add all to window.
+	n := copy(d.window[d.windowEnd:], b)
+
+	// Calculate 256 hashes at the time (more L1 cache hits)
+	loops := (n + 256 - minMatchLength) / 256
+	for j := 0; j < loops; j++ {
+		startindex := j * 256
+		end := startindex + 256 + minMatchLength - 1
+		if end > n {
+			end = n
+		}
+		tocheck := d.window[startindex:end]
+		dstSize := len(tocheck) - minMatchLength + 1
+
+		if dstSize <= 0 {
+			continue
+		}
+
+		dst := s.hashMatch[:dstSize]
+		bulkHash4(tocheck, dst)
+		var newH uint32
+		for i, val := range dst {
+			di := i + startindex
+			newH = val & hashMask
+			// Get previous value with the same hash.
+			// Our chain should point to the previous value.
+			s.hashPrev[di&windowMask] = s.hashHead[newH]
+			// Set the head of the hash chain to us.
+			s.hashHead[newH] = uint32(di + s.hashOffset)
+		}
+	}
+	// Update window information.
+	d.windowEnd += n
+	s.index = n
+}
+
+// Try to find a match starting at index whose length is greater than prevSize.
+// We only look at chainCount possibilities before giving up.
+// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
+func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) {
+	minMatchLook := maxMatchLength
+	if lookahead < minMatchLook {
+		minMatchLook = lookahead
+	}
+
+	win := d.window[0 : pos+minMatchLook]
+
+	// We quit when we get a match that's at least nice long
+	nice := len(win) - pos
+	if d.nice < nice {
+		nice = d.nice
+	}
+
+	// If we've got a match that's good enough, only look in 1/4 the chain.
+	tries := d.chain
+	length = minMatchLength - 1
+
+	wEnd := win[pos+length]
+	wPos := win[pos:]
+	minIndex := pos - windowSize
+	if minIndex < 0 {
+		minIndex = 0
+	}
+	offset = 0
+
+	if d.chain < 100 {
+		for i := prevHead; tries > 0; tries-- {
+			if wEnd == win[i+length] {
+				n := matchLen(win[i:i+minMatchLook], wPos)
+				if n > length {
+					length = n
+					offset = pos - i
+					ok = true
+					if n >= nice {
+						// The match is good enough that we don't try to find a better one.
+						break
+					}
+					wEnd = win[pos+n]
+				}
+			}
+			if i <= minIndex {
+				// hashPrev[i & windowMask] has already been overwritten, so stop now.
+				break
+			}
+			i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
+			if i < minIndex {
+				break
+			}
+		}
+		return
+	}
+
+	// Minimum gain to accept a match.
+	cGain := 4
+
+	// Some like it higher (CSV), some like it lower (JSON)
+	const baseCost = 3
+	// Base is 4 bytes at with an additional cost.
+	// Matches must be better than this.
+
+	for i := prevHead; tries > 0; tries-- {
+		if wEnd == win[i+length] {
+			n := matchLen(win[i:i+minMatchLook], wPos)
+			if n > length {
+				// Calculate gain. Estimate
+				newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]])
+
+				//fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length)
+				if newGain > cGain {
+					length = n
+					offset = pos - i
+					cGain = newGain
+					ok = true
+					if n >= nice {
+						// The match is good enough that we don't try to find a better one.
+						break
+					}
+					wEnd = win[pos+n]
+				}
+			}
+		}
+		if i <= minIndex {
+			// hashPrev[i & windowMask] has already been overwritten, so stop now.
+			break
+		}
+		i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
+		if i < minIndex {
+			break
+		}
+	}
+	return
+}
+
+func (d *compressor) writeStoredBlock(buf []byte) error {
+	if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
+		return d.w.err
+	}
+	d.w.writeBytes(buf)
+	return d.w.err
+}
+
+// hash4 returns a hash representation of the first 4 bytes
+// of the supplied slice.
+// The caller must ensure that len(b) >= 4.
+func hash4(b []byte) uint32 {
+	return hash4u(binary.LittleEndian.Uint32(b), hashBits)
+}
+
+// hash4 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4u(u uint32, h uint8) uint32 {
+	return (u * prime4bytes) >> (32 - h)
+}
+
+// bulkHash4 will compute hashes using the same
+// algorithm as hash4
+func bulkHash4(b []byte, dst []uint32) {
+	if len(b) < 4 {
+		return
+	}
+	hb := binary.LittleEndian.Uint32(b)
+
+	dst[0] = hash4u(hb, hashBits)
+	end := len(b) - 4 + 1
+	for i := 1; i < end; i++ {
+		hb = (hb >> 8) | uint32(b[i+3])<<24
+		dst[i] = hash4u(hb, hashBits)
+	}
+}
+
+func (d *compressor) initDeflate() {
+	d.window = make([]byte, 2*windowSize)
+	d.byteAvailable = false
+	d.err = nil
+	if d.state == nil {
+		return
+	}
+	s := d.state
+	s.index = 0
+	s.hashOffset = 1
+	s.length = minMatchLength - 1
+	s.offset = 0
+	s.chainHead = -1
+}
+
+// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
+// meaning it always has lazy matching on.
+func (d *compressor) deflateLazy() {
+	s := d.state
+	// Sanity enables additional runtime tests.
+	// It's intended to be used during development
+	// to supplement the currently ad-hoc unit tests.
+	const sanity = debugDeflate
+
+	if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
+		return
+	}
+	if d.windowEnd != s.index && d.chain > 100 {
+		// Get literal huffman coder.
+		if d.h == nil {
+			d.h = newHuffmanEncoder(maxFlateBlockTokens)
+		}
+		var tmp [256]uint16
+		for _, v := range d.window[s.index:d.windowEnd] {
+			tmp[v]++
+		}
+		d.h.generate(tmp[:], 15)
+	}
+
+	s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
+
+	for {
+		if sanity && s.index > d.windowEnd {
+			panic("index > windowEnd")
+		}
+		lookahead := d.windowEnd - s.index
+		if lookahead < minMatchLength+maxMatchLength {
+			if !d.sync {
+				return
+			}
+			if sanity && s.index > d.windowEnd {
+				panic("index > windowEnd")
+			}
+			if lookahead == 0 {
+				// Flush current output block if any.
+				if d.byteAvailable {
+					// There is still one pending token that needs to be flushed
+					d.tokens.AddLiteral(d.window[s.index-1])
+					d.byteAvailable = false
+				}
+				if d.tokens.n > 0 {
+					if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+						return
+					}
+					d.tokens.Reset()
+				}
+				return
+			}
+		}
+		if s.index < s.maxInsertIndex {
+			// Update the hash
+			hash := hash4(d.window[s.index:])
+			ch := s.hashHead[hash]
+			s.chainHead = int(ch)
+			s.hashPrev[s.index&windowMask] = ch
+			s.hashHead[hash] = uint32(s.index + s.hashOffset)
+		}
+		prevLength := s.length
+		prevOffset := s.offset
+		s.length = minMatchLength - 1
+		s.offset = 0
+		minIndex := s.index - windowSize
+		if minIndex < 0 {
+			minIndex = 0
+		}
+
+		if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
+			if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok {
+				s.length = newLength
+				s.offset = newOffset
+			}
+		}
+
+		if prevLength >= minMatchLength && s.length <= prevLength {
+			// No better match, but check for better match at end...
+			//
+			// Skip forward a number of bytes.
+			// Offset of 2 seems to yield best results. 3 is sometimes better.
+			const checkOff = 2
+
+			// Check all, except full length
+			if prevLength < maxMatchLength-checkOff {
+				prevIndex := s.index - 1
+				if prevIndex+prevLength < s.maxInsertIndex {
+					end := lookahead
+					if lookahead > maxMatchLength+checkOff {
+						end = maxMatchLength + checkOff
+					}
+					end += prevIndex
+
+					// Hash at match end.
+					h := hash4(d.window[prevIndex+prevLength:])
+					ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength
+					if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff {
+						length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:])
+						// It seems like a pure length metric is best.
+						if length > prevLength {
+							prevLength = length
+							prevOffset = prevIndex - ch2
+
+							// Extend back...
+							for i := checkOff - 1; i >= 0; i-- {
+								if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] {
+									// Emit tokens we "owe"
+									for j := 0; j <= i; j++ {
+										d.tokens.AddLiteral(d.window[prevIndex+j])
+										if d.tokens.n == maxFlateBlockTokens {
+											// The block includes the current character
+											if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+												return
+											}
+											d.tokens.Reset()
+										}
+										s.index++
+										if s.index < s.maxInsertIndex {
+											h := hash4(d.window[s.index:])
+											ch := s.hashHead[h]
+											s.chainHead = int(ch)
+											s.hashPrev[s.index&windowMask] = ch
+											s.hashHead[h] = uint32(s.index + s.hashOffset)
+										}
+									}
+									break
+								} else {
+									prevLength++
+								}
+							}
+						} else if false {
+							// Check one further ahead.
+							// Only rarely better, disabled for now.
+							prevIndex++
+							h := hash4(d.window[prevIndex+prevLength:])
+							ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength
+							if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff {
+								length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:])
+								// It seems like a pure length metric is best.
+								if length > prevLength+checkOff {
+									prevLength = length
+									prevOffset = prevIndex - ch2
+									prevIndex--
+
+									// Extend back...
+									for i := checkOff; i >= 0; i-- {
+										if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] {
+											// Emit tokens we "owe"
+											for j := 0; j <= i; j++ {
+												d.tokens.AddLiteral(d.window[prevIndex+j])
+												if d.tokens.n == maxFlateBlockTokens {
+													// The block includes the current character
+													if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+														return
+													}
+													d.tokens.Reset()
+												}
+												s.index++
+												if s.index < s.maxInsertIndex {
+													h := hash4(d.window[s.index:])
+													ch := s.hashHead[h]
+													s.chainHead = int(ch)
+													s.hashPrev[s.index&windowMask] = ch
+													s.hashHead[h] = uint32(s.index + s.hashOffset)
+												}
+											}
+											break
+										} else {
+											prevLength++
+										}
+									}
+								}
+							}
+						}
+					}
+				}
+			}
+			// There was a match at the previous step, and the current match is
+			// not better. Output the previous match.
+			d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
+
+			// Insert in the hash table all strings up to the end of the match.
+			// index and index-1 are already inserted. If there is not enough
+			// lookahead, the last two strings are not inserted into the hash
+			// table.
+			newIndex := s.index + prevLength - 1
+			// Calculate missing hashes
+			end := newIndex
+			if end > s.maxInsertIndex {
+				end = s.maxInsertIndex
+			}
+			end += minMatchLength - 1
+			startindex := s.index + 1
+			if startindex > s.maxInsertIndex {
+				startindex = s.maxInsertIndex
+			}
+			tocheck := d.window[startindex:end]
+			dstSize := len(tocheck) - minMatchLength + 1
+			if dstSize > 0 {
+				dst := s.hashMatch[:dstSize]
+				bulkHash4(tocheck, dst)
+				var newH uint32
+				for i, val := range dst {
+					di := i + startindex
+					newH = val & hashMask
+					// Get previous value with the same hash.
+					// Our chain should point to the previous value.
+					s.hashPrev[di&windowMask] = s.hashHead[newH]
+					// Set the head of the hash chain to us.
+					s.hashHead[newH] = uint32(di + s.hashOffset)
+				}
+			}
+
+			s.index = newIndex
+			d.byteAvailable = false
+			s.length = minMatchLength - 1
+			if d.tokens.n == maxFlateBlockTokens {
+				// The block includes the current character
+				if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+					return
+				}
+				d.tokens.Reset()
+			}
+			s.ii = 0
+		} else {
+			// Reset, if we got a match this run.
+			if s.length >= minMatchLength {
+				s.ii = 0
+			}
+			// We have a byte waiting. Emit it.
+			if d.byteAvailable {
+				s.ii++
+				d.tokens.AddLiteral(d.window[s.index-1])
+				if d.tokens.n == maxFlateBlockTokens {
+					if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+						return
+					}
+					d.tokens.Reset()
+				}
+				s.index++
+
+				// If we have a long run of no matches, skip additional bytes
+				// Resets when s.ii overflows after 64KB.
+				if n := int(s.ii) - d.chain; n > 0 {
+					n = 1 + int(n>>6)
+					for j := 0; j < n; j++ {
+						if s.index >= d.windowEnd-1 {
+							break
+						}
+						d.tokens.AddLiteral(d.window[s.index-1])
+						if d.tokens.n == maxFlateBlockTokens {
+							if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+								return
+							}
+							d.tokens.Reset()
+						}
+						// Index...
+						if s.index < s.maxInsertIndex {
+							h := hash4(d.window[s.index:])
+							ch := s.hashHead[h]
+							s.chainHead = int(ch)
+							s.hashPrev[s.index&windowMask] = ch
+							s.hashHead[h] = uint32(s.index + s.hashOffset)
+						}
+						s.index++
+					}
+					// Flush last byte
+					d.tokens.AddLiteral(d.window[s.index-1])
+					d.byteAvailable = false
+					// s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
+					if d.tokens.n == maxFlateBlockTokens {
+						if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+							return
+						}
+						d.tokens.Reset()
+					}
+				}
+			} else {
+				s.index++
+				d.byteAvailable = true
+			}
+		}
+	}
+}
+
+func (d *compressor) store() {
+	if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) {
+		d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+		d.windowEnd = 0
+	}
+}
+
+// fillWindow will fill the buffer with data for huffman-only compression.
+// The number of bytes copied is returned.
+func (d *compressor) fillBlock(b []byte) int {
+	n := copy(d.window[d.windowEnd:], b)
+	d.windowEnd += n
+	return n
+}
+
+// storeHuff will compress and store the currently added data,
+// if enough has been accumulated or we at the end of the stream.
+// Any error that occurred will be in d.err
+func (d *compressor) storeHuff() {
+	if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
+		return
+	}
+	d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
+	d.err = d.w.err
+	d.windowEnd = 0
+}
+
+// storeFast will compress and store the currently added data,
+// if enough has been accumulated or we at the end of the stream.
+// Any error that occurred will be in d.err
+func (d *compressor) storeFast() {
+	// We only compress if we have maxStoreBlockSize.
+	if d.windowEnd < len(d.window) {
+		if !d.sync {
+			return
+		}
+		// Handle extremely small sizes.
+		if d.windowEnd < 128 {
+			if d.windowEnd == 0 {
+				return
+			}
+			if d.windowEnd <= 32 {
+				d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+			} else {
+				d.w.writeBlockHuff(false, d.window[:d.windowEnd], true)
+				d.err = d.w.err
+			}
+			d.tokens.Reset()
+			d.windowEnd = 0
+			d.fast.Reset()
+			return
+		}
+	}
+
+	d.fast.Encode(&d.tokens, d.window[:d.windowEnd])
+	// If we made zero matches, store the block as is.
+	if d.tokens.n == 0 {
+		d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+		// If we removed less than 1/16th, huffman compress the block.
+	} else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) {
+		d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
+		d.err = d.w.err
+	} else {
+		d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync)
+		d.err = d.w.err
+	}
+	d.tokens.Reset()
+	d.windowEnd = 0
+}
+
+// write will add input byte to the stream.
+// Unless an error occurs all bytes will be consumed.
+func (d *compressor) write(b []byte) (n int, err error) {
+	if d.err != nil {
+		return 0, d.err
+	}
+	n = len(b)
+	for len(b) > 0 {
+		if d.windowEnd == len(d.window) || d.sync {
+			d.step(d)
+		}
+		b = b[d.fill(d, b):]
+		if d.err != nil {
+			return 0, d.err
+		}
+	}
+	return n, d.err
+}
+
+func (d *compressor) syncFlush() error {
+	d.sync = true
+	if d.err != nil {
+		return d.err
+	}
+	d.step(d)
+	if d.err == nil {
+		d.w.writeStoredHeader(0, false)
+		d.w.flush()
+		d.err = d.w.err
+	}
+	d.sync = false
+	return d.err
+}
+
+func (d *compressor) init(w io.Writer, level int) (err error) {
+	d.w = newHuffmanBitWriter(w)
+
+	switch {
+	case level == NoCompression:
+		d.window = make([]byte, maxStoreBlockSize)
+		d.fill = (*compressor).fillBlock
+		d.step = (*compressor).store
+	case level == ConstantCompression:
+		d.w.logNewTablePenalty = 10
+		d.window = make([]byte, 32<<10)
+		d.fill = (*compressor).fillBlock
+		d.step = (*compressor).storeHuff
+	case level == DefaultCompression:
+		level = 5
+		fallthrough
+	case level >= 1 && level <= 6:
+		d.w.logNewTablePenalty = 7
+		d.fast = newFastEnc(level)
+		d.window = make([]byte, maxStoreBlockSize)
+		d.fill = (*compressor).fillBlock
+		d.step = (*compressor).storeFast
+	case 7 <= level && level <= 9:
+		d.w.logNewTablePenalty = 8
+		d.state = &advancedState{}
+		d.compressionLevel = levels[level]
+		d.initDeflate()
+		d.fill = (*compressor).fillDeflate
+		d.step = (*compressor).deflateLazy
+	case -level >= MinCustomWindowSize && -level <= MaxCustomWindowSize:
+		d.w.logNewTablePenalty = 7
+		d.fast = &fastEncL5Window{maxOffset: int32(-level), cur: maxStoreBlockSize}
+		d.window = make([]byte, maxStoreBlockSize)
+		d.fill = (*compressor).fillBlock
+		d.step = (*compressor).storeFast
+	default:
+		return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
+	}
+	d.level = level
+	return nil
+}
+
+// reset the state of the compressor.
+func (d *compressor) reset(w io.Writer) {
+	d.w.reset(w)
+	d.sync = false
+	d.err = nil
+	// We only need to reset a few things for Snappy.
+	if d.fast != nil {
+		d.fast.Reset()
+		d.windowEnd = 0
+		d.tokens.Reset()
+		return
+	}
+	switch d.compressionLevel.chain {
+	case 0:
+		// level was NoCompression or ConstantCompression.
+		d.windowEnd = 0
+	default:
+		s := d.state
+		s.chainHead = -1
+		for i := range s.hashHead {
+			s.hashHead[i] = 0
+		}
+		for i := range s.hashPrev {
+			s.hashPrev[i] = 0
+		}
+		s.hashOffset = 1
+		s.index, d.windowEnd = 0, 0
+		d.blockStart, d.byteAvailable = 0, false
+		d.tokens.Reset()
+		s.length = minMatchLength - 1
+		s.offset = 0
+		s.ii = 0
+		s.maxInsertIndex = 0
+	}
+}
+
+func (d *compressor) close() error {
+	if d.err != nil {
+		return d.err
+	}
+	d.sync = true
+	d.step(d)
+	if d.err != nil {
+		return d.err
+	}
+	if d.w.writeStoredHeader(0, true); d.w.err != nil {
+		return d.w.err
+	}
+	d.w.flush()
+	d.w.reset(nil)
+	return d.w.err
+}
+
+// NewWriter returns a new Writer compressing data at the given level.
+// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
+// higher levels typically run slower but compress more.
+// Level 0 (NoCompression) does not attempt any compression; it only adds the
+// necessary DEFLATE framing.
+// Level -1 (DefaultCompression) uses the default compression level.
+// Level -2 (ConstantCompression) will use Huffman compression only, giving
+// a very fast compression for all types of input, but sacrificing considerable
+// compression efficiency.
+//
+// If level is in the range [-2, 9] then the error returned will be nil.
+// Otherwise the error returned will be non-nil.
+func NewWriter(w io.Writer, level int) (*Writer, error) {
+	var dw Writer
+	if err := dw.d.init(w, level); err != nil {
+		return nil, err
+	}
+	return &dw, nil
+}
+
+// NewWriterDict is like NewWriter but initializes the new
+// Writer with a preset dictionary.  The returned Writer behaves
+// as if the dictionary had been written to it without producing
+// any compressed output.  The compressed data written to w
+// can only be decompressed by a Reader initialized with the
+// same dictionary.
+func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
+	zw, err := NewWriter(w, level)
+	if err != nil {
+		return nil, err
+	}
+	zw.d.fillWindow(dict)
+	zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
+	return zw, err
+}
+
+// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow.
+const MinCustomWindowSize = 32
+
+// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow.
+const MaxCustomWindowSize = windowSize
+
+// NewWriterWindow returns a new Writer compressing data with a custom window size.
+// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize.
+func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) {
+	if windowSize < MinCustomWindowSize {
+		return nil, errors.New("flate: requested window size less than MinWindowSize")
+	}
+	if windowSize > MaxCustomWindowSize {
+		return nil, errors.New("flate: requested window size bigger than MaxCustomWindowSize")
+	}
+	var dw Writer
+	if err := dw.d.init(w, -windowSize); err != nil {
+		return nil, err
+	}
+	return &dw, nil
+}
+
+// A Writer takes data written to it and writes the compressed
+// form of that data to an underlying writer (see NewWriter).
+type Writer struct {
+	d    compressor
+	dict []byte
+}
+
+// Write writes data to w, which will eventually write the
+// compressed form of data to its underlying writer.
+func (w *Writer) Write(data []byte) (n int, err error) {
+	return w.d.write(data)
+}
+
+// Flush flushes any pending data to the underlying writer.
+// It is useful mainly in compressed network protocols, to ensure that
+// a remote reader has enough data to reconstruct a packet.
+// Flush does not return until the data has been written.
+// Calling Flush when there is no pending data still causes the Writer
+// to emit a sync marker of at least 4 bytes.
+// If the underlying writer returns an error, Flush returns that error.
+//
+// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
+func (w *Writer) Flush() error {
+	// For more about flushing:
+	// http://www.bolet.org/~pornin/deflate-flush.html
+	return w.d.syncFlush()
+}
+
+// Close flushes and closes the writer.
+func (w *Writer) Close() error {
+	return w.d.close()
+}
+
+// Reset discards the writer's state and makes it equivalent to
+// the result of NewWriter or NewWriterDict called with dst
+// and w's level and dictionary.
+func (w *Writer) Reset(dst io.Writer) {
+	if len(w.dict) > 0 {
+		// w was created with NewWriterDict
+		w.d.reset(dst)
+		if dst != nil {
+			w.d.fillWindow(w.dict)
+		}
+	} else {
+		// w was created with NewWriter
+		w.d.reset(dst)
+	}
+}
+
+// ResetDict discards the writer's state and makes it equivalent to
+// the result of NewWriter or NewWriterDict called with dst
+// and w's level, but sets a specific dictionary.
+func (w *Writer) ResetDict(dst io.Writer, dict []byte) {
+	w.dict = dict
+	w.d.reset(dst)
+	w.d.fillWindow(w.dict)
+}
diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go
new file mode 100644
index 0000000..bb36351
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go
@@ -0,0 +1,184 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
+// LZ77 decompresses data through sequences of two forms of commands:
+//
+//   - Literal insertions: Runs of one or more symbols are inserted into the data
+//     stream as is. This is accomplished through the writeByte method for a
+//     single symbol, or combinations of writeSlice/writeMark for multiple symbols.
+//     Any valid stream must start with a literal insertion if no preset dictionary
+//     is used.
+//
+//   - Backward copies: Runs of one or more symbols are copied from previously
+//     emitted data. Backward copies come as the tuple (dist, length) where dist
+//     determines how far back in the stream to copy from and length determines how
+//     many bytes to copy. Note that it is valid for the length to be greater than
+//     the distance. Since LZ77 uses forward copies, that situation is used to
+//     perform a form of run-length encoding on repeated runs of symbols.
+//     The writeCopy and tryWriteCopy are used to implement this command.
+//
+// For performance reasons, this implementation performs little to no sanity
+// checks about the arguments. As such, the invariants documented for each
+// method call must be respected.
+type dictDecoder struct {
+	hist []byte // Sliding window history
+
+	// Invariant: 0 <= rdPos <= wrPos <= len(hist)
+	wrPos int  // Current output position in buffer
+	rdPos int  // Have emitted hist[:rdPos] already
+	full  bool // Has a full window length been written yet?
+}
+
+// init initializes dictDecoder to have a sliding window dictionary of the given
+// size. If a preset dict is provided, it will initialize the dictionary with
+// the contents of dict.
+func (dd *dictDecoder) init(size int, dict []byte) {
+	*dd = dictDecoder{hist: dd.hist}
+
+	if cap(dd.hist) < size {
+		dd.hist = make([]byte, size)
+	}
+	dd.hist = dd.hist[:size]
+
+	if len(dict) > len(dd.hist) {
+		dict = dict[len(dict)-len(dd.hist):]
+	}
+	dd.wrPos = copy(dd.hist, dict)
+	if dd.wrPos == len(dd.hist) {
+		dd.wrPos = 0
+		dd.full = true
+	}
+	dd.rdPos = dd.wrPos
+}
+
+// histSize reports the total amount of historical data in the dictionary.
+func (dd *dictDecoder) histSize() int {
+	if dd.full {
+		return len(dd.hist)
+	}
+	return dd.wrPos
+}
+
+// availRead reports the number of bytes that can be flushed by readFlush.
+func (dd *dictDecoder) availRead() int {
+	return dd.wrPos - dd.rdPos
+}
+
+// availWrite reports the available amount of output buffer space.
+func (dd *dictDecoder) availWrite() int {
+	return len(dd.hist) - dd.wrPos
+}
+
+// writeSlice returns a slice of the available buffer to write data to.
+//
+// This invariant will be kept: len(s) <= availWrite()
+func (dd *dictDecoder) writeSlice() []byte {
+	return dd.hist[dd.wrPos:]
+}
+
+// writeMark advances the writer pointer by cnt.
+//
+// This invariant must be kept: 0 <= cnt <= availWrite()
+func (dd *dictDecoder) writeMark(cnt int) {
+	dd.wrPos += cnt
+}
+
+// writeByte writes a single byte to the dictionary.
+//
+// This invariant must be kept: 0 < availWrite()
+func (dd *dictDecoder) writeByte(c byte) {
+	dd.hist[dd.wrPos] = c
+	dd.wrPos++
+}
+
+// writeCopy copies a string at a given (dist, length) to the output.
+// This returns the number of bytes copied and may be less than the requested
+// length if the available space in the output buffer is too small.
+//
+// This invariant must be kept: 0 < dist <= histSize()
+func (dd *dictDecoder) writeCopy(dist, length int) int {
+	dstBase := dd.wrPos
+	dstPos := dstBase
+	srcPos := dstPos - dist
+	endPos := dstPos + length
+	if endPos > len(dd.hist) {
+		endPos = len(dd.hist)
+	}
+
+	// Copy non-overlapping section after destination position.
+	//
+	// This section is non-overlapping in that the copy length for this section
+	// is always less than or equal to the backwards distance. This can occur
+	// if a distance refers to data that wraps-around in the buffer.
+	// Thus, a backwards copy is performed here; that is, the exact bytes in
+	// the source prior to the copy is placed in the destination.
+	if srcPos < 0 {
+		srcPos += len(dd.hist)
+		dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
+		srcPos = 0
+	}
+
+	// Copy possibly overlapping section before destination position.
+	//
+	// This section can overlap if the copy length for this section is larger
+	// than the backwards distance. This is allowed by LZ77 so that repeated
+	// strings can be succinctly represented using (dist, length) pairs.
+	// Thus, a forwards copy is performed here; that is, the bytes copied is
+	// possibly dependent on the resulting bytes in the destination as the copy
+	// progresses along. This is functionally equivalent to the following:
+	//
+	//	for i := 0; i < endPos-dstPos; i++ {
+	//		dd.hist[dstPos+i] = dd.hist[srcPos+i]
+	//	}
+	//	dstPos = endPos
+	//
+	for dstPos < endPos {
+		dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
+	}
+
+	dd.wrPos = dstPos
+	return dstPos - dstBase
+}
+
+// tryWriteCopy tries to copy a string at a given (distance, length) to the
+// output. This specialized version is optimized for short distances.
+//
+// This method is designed to be inlined for performance reasons.
+//
+// This invariant must be kept: 0 < dist <= histSize()
+func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
+	dstPos := dd.wrPos
+	endPos := dstPos + length
+	if dstPos < dist || endPos > len(dd.hist) {
+		return 0
+	}
+	dstBase := dstPos
+	srcPos := dstPos - dist
+
+	// Copy possibly overlapping section before destination position.
+loop:
+	dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
+	if dstPos < endPos {
+		goto loop // Avoid for-loop so that this function can be inlined
+	}
+
+	dd.wrPos = dstPos
+	return dstPos - dstBase
+}
+
+// readFlush returns a slice of the historical buffer that is ready to be
+// emitted to the user. The data returned by readFlush must be fully consumed
+// before calling any other dictDecoder methods.
+func (dd *dictDecoder) readFlush() []byte {
+	toRead := dd.hist[dd.rdPos:dd.wrPos]
+	dd.rdPos = dd.wrPos
+	if dd.wrPos == len(dd.hist) {
+		dd.wrPos, dd.rdPos = 0, 0
+		dd.full = true
+	}
+	return toRead
+}
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
new file mode 100644
index 0000000..0e8b163
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -0,0 +1,232 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Modified for deflate by Klaus Post (c) 2015.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+	"fmt"
+	"math/bits"
+
+	"github.com/klauspost/compress/internal/le"
+)
+
+type fastEnc interface {
+	Encode(dst *tokens, src []byte)
+	Reset()
+}
+
+func newFastEnc(level int) fastEnc {
+	switch level {
+	case 1:
+		return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}}
+	case 2:
+		return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}}
+	case 3:
+		return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}}
+	case 4:
+		return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}}
+	case 5:
+		return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}}
+	case 6:
+		return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}}
+	default:
+		panic("invalid level specified")
+	}
+}
+
+const (
+	tableBits       = 15             // Bits used in the table
+	tableSize       = 1 << tableBits // Size of the table
+	tableShift      = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
+	baseMatchOffset = 1              // The smallest match offset
+	baseMatchLength = 3              // The smallest match length per the RFC section 3.2.5
+	maxMatchOffset  = 1 << 15        // The largest match offset
+
+	bTableBits   = 17                                               // Bits used in the big tables
+	bTableSize   = 1 << bTableBits                                  // Size of the table
+	allocHistory = maxStoreBlockSize * 5                            // Size to preallocate for history.
+	bufferReset  = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
+)
+
+const (
+	prime3bytes = 506832829
+	prime4bytes = 2654435761
+	prime5bytes = 889523592379
+	prime6bytes = 227718039650203
+	prime7bytes = 58295818150454627
+	prime8bytes = 0xcf1bbcdcb7a56463
+)
+
+func load3232(b []byte, i int32) uint32 {
+	return le.Load32(b, i)
+}
+
+func load6432(b []byte, i int32) uint64 {
+	return le.Load64(b, i)
+}
+
+type tableEntry struct {
+	offset int32
+}
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastGen struct {
+	hist []byte
+	cur  int32
+}
+
+func (e *fastGen) addBlock(src []byte) int32 {
+	// check if we have space already
+	if len(e.hist)+len(src) > cap(e.hist) {
+		if cap(e.hist) == 0 {
+			e.hist = make([]byte, 0, allocHistory)
+		} else {
+			if cap(e.hist) < maxMatchOffset*2 {
+				panic("unexpected buffer size")
+			}
+			// Move down
+			offset := int32(len(e.hist)) - maxMatchOffset
+			// copy(e.hist[0:maxMatchOffset], e.hist[offset:])
+			*(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:])
+			e.cur += offset
+			e.hist = e.hist[:maxMatchOffset]
+		}
+	}
+	s := int32(len(e.hist))
+	e.hist = append(e.hist, src...)
+	return s
+}
+
+type tableEntryPrev struct {
+	Cur  tableEntry
+	Prev tableEntry
+}
+
+// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash7(u uint64, h uint8) uint32 {
+	return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64))
+}
+
+// hashLen returns a hash of the lowest mls bytes of with length output bits.
+// mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
+// length should always be < 32.
+// Preferably length and mls should be a constant for inlining.
+func hashLen(u uint64, length, mls uint8) uint32 {
+	switch mls {
+	case 3:
+		return (uint32(u<<8) * prime3bytes) >> (32 - length)
+	case 5:
+		return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
+	case 6:
+		return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
+	case 7:
+		return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
+	case 8:
+		return uint32((u * prime8bytes) >> (64 - length))
+	default:
+		return (uint32(u) * prime4bytes) >> (32 - length)
+	}
+}
+
+// matchlen will return the match length between offsets and t in src.
+// The maximum length returned is maxMatchLength - 4.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastGen) matchlen(s, t int, src []byte) int32 {
+	if debugDeflate {
+		if t >= s {
+			panic(fmt.Sprint("t >=s:", t, s))
+		}
+		if int(s) >= len(src) {
+			panic(fmt.Sprint("s >= len(src):", s, len(src)))
+		}
+		if t < 0 {
+			panic(fmt.Sprint("t < 0:", t))
+		}
+		if s-t > maxMatchOffset {
+			panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+		}
+	}
+	s1 := min(s+maxMatchLength-4, len(src))
+	left := s1 - s
+	n := int32(0)
+	for left >= 8 {
+		diff := le.Load64(src, s) ^ le.Load64(src, t)
+		if diff != 0 {
+			return n + int32(bits.TrailingZeros64(diff)>>3)
+		}
+		s += 8
+		t += 8
+		n += 8
+		left -= 8
+	}
+
+	a := src[s:s1]
+	b := src[t:]
+	for i := range a {
+		if a[i] != b[i] {
+			break
+		}
+		n++
+	}
+	return n
+}
+
+// matchlenLong will return the match length between offsets and t in src.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastGen) matchlenLong(s, t int, src []byte) int32 {
+	if debugDeflate {
+		if t >= s {
+			panic(fmt.Sprint("t >=s:", t, s))
+		}
+		if int(s) >= len(src) {
+			panic(fmt.Sprint("s >= len(src):", s, len(src)))
+		}
+		if t < 0 {
+			panic(fmt.Sprint("t < 0:", t))
+		}
+		if s-t > maxMatchOffset {
+			panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+		}
+	}
+	// Extend the match to be as long as possible.
+	left := len(src) - s
+	n := int32(0)
+	for left >= 8 {
+		diff := le.Load64(src, s) ^ le.Load64(src, t)
+		if diff != 0 {
+			return n + int32(bits.TrailingZeros64(diff)>>3)
+		}
+		s += 8
+		t += 8
+		n += 8
+		left -= 8
+	}
+
+	a := src[s:]
+	b := src[t:]
+	for i := range a {
+		if a[i] != b[i] {
+			break
+		}
+		n++
+	}
+	return n
+}
+
+// Reset the encoding table.
+func (e *fastGen) Reset() {
+	if cap(e.hist) < allocHistory {
+		e.hist = make([]byte, 0, allocHistory)
+	}
+	// We offset current position so everything will be out of reach.
+	// If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
+	if e.cur <= bufferReset {
+		e.cur += maxMatchOffset + int32(len(e.hist))
+	}
+	e.hist = e.hist[:0]
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
new file mode 100644
index 0000000..afdc8c0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -0,0 +1,1183 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+	"fmt"
+	"io"
+	"math"
+
+	"github.com/klauspost/compress/internal/le"
+)
+
+const (
+	// The largest offset code.
+	offsetCodeCount = 30
+
+	// The special code used to mark the end of a block.
+	endBlockMarker = 256
+
+	// The first length code.
+	lengthCodesStart = 257
+
+	// The number of codegen codes.
+	codegenCodeCount = 19
+	badCode          = 255
+
+	// maxPredefinedTokens is the maximum number of tokens
+	// where we check if fixed size is smaller.
+	maxPredefinedTokens = 250
+
+	// bufferFlushSize indicates the buffer size
+	// after which bytes are flushed to the writer.
+	// Should preferably be a multiple of 6, since
+	// we accumulate 6 bytes between writes to the buffer.
+	bufferFlushSize = 246
+)
+
+// Minimum length code that emits bits.
+const lengthExtraBitsMinCode = 8
+
+// The number of extra bits needed by length code X - LENGTH_CODES_START.
+var lengthExtraBits = [32]uint8{
+	/* 257 */ 0, 0, 0,
+	/* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
+	/* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
+	/* 280 */ 4, 5, 5, 5, 5, 0,
+}
+
+// The length indicated by length code X - LENGTH_CODES_START.
+var lengthBase = [32]uint8{
+	0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
+	12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
+	64, 80, 96, 112, 128, 160, 192, 224, 255,
+}
+
+// Minimum offset code that emits bits.
+const offsetExtraBitsMinCode = 4
+
+// offset code word extra bits.
+var offsetExtraBits = [32]int8{
+	0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
+	4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+	9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
+	/* extended window */
+	14, 14,
+}
+
+var offsetCombined = [32]uint32{}
+
+func init() {
+	var offsetBase = [32]uint32{
+		/* normal deflate */
+		0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
+		0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
+		0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
+		0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
+		0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
+		0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
+
+		/* extended window */
+		0x008000, 0x00c000,
+	}
+
+	for i := range offsetCombined[:] {
+		// Don't use extended window values...
+		if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 {
+			continue
+		}
+		offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8)
+	}
+}
+
+// The odd order in which the codegen code sizes are written.
+var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
+
+type huffmanBitWriter struct {
+	// writer is the underlying writer.
+	// Do not use it directly; use the write method, which ensures
+	// that Write errors are sticky.
+	writer io.Writer
+
+	// Data waiting to be written is bytes[0:nbytes]
+	// and then the low nbits of bits.
+	bits            uint64
+	nbits           uint8
+	nbytes          uint8
+	lastHuffMan     bool
+	literalEncoding *huffmanEncoder
+	tmpLitEncoding  *huffmanEncoder
+	offsetEncoding  *huffmanEncoder
+	codegenEncoding *huffmanEncoder
+	err             error
+	lastHeader      int
+	// Set between 0 (reused block can be up to 2x the size)
+	logNewTablePenalty uint
+	bytes              [256 + 8]byte
+	literalFreq        [lengthCodesStart + 32]uint16
+	offsetFreq         [32]uint16
+	codegenFreq        [codegenCodeCount]uint16
+
+	// codegen must have an extra space for the final symbol.
+	codegen [literalCount + offsetCodeCount + 1]uint8
+}
+
+// Huffman reuse.
+//
+// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections.
+//
+// This is controlled by several variables:
+//
+// If lastHeader is non-zero the Huffman table can be reused.
+// This also indicates that a Huffman table has been generated that can output all
+// possible symbols.
+// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated
+// an EOB with the previous table must be written.
+//
+// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid.
+//
+// An incoming block estimates the output size of a new table using a 'fresh' by calculating the
+// optimal size and adding a penalty in 'logNewTablePenalty'.
+// A Huffman table is not optimal, which is why we add a penalty, and generating a new table
+// is slower both for compression and decompression.
+
+func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
+	return &huffmanBitWriter{
+		writer:          w,
+		literalEncoding: newHuffmanEncoder(literalCount),
+		tmpLitEncoding:  newHuffmanEncoder(literalCount),
+		codegenEncoding: newHuffmanEncoder(codegenCodeCount),
+		offsetEncoding:  newHuffmanEncoder(offsetCodeCount),
+	}
+}
+
+func (w *huffmanBitWriter) reset(writer io.Writer) {
+	w.writer = writer
+	w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
+	w.lastHeader = 0
+	w.lastHuffMan = false
+}
+
+func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) {
+	a := t.offHist[:offsetCodeCount]
+	b := w.offsetEncoding.codes
+	b = b[:len(a)]
+	for i, v := range a {
+		if v != 0 && b[i].zero() {
+			return false
+		}
+	}
+
+	a = t.extraHist[:literalCount-256]
+	b = w.literalEncoding.codes[256:literalCount]
+	b = b[:len(a)]
+	for i, v := range a {
+		if v != 0 && b[i].zero() {
+			return false
+		}
+	}
+
+	a = t.litHist[:256]
+	b = w.literalEncoding.codes[:len(a)]
+	for i, v := range a {
+		if v != 0 && b[i].zero() {
+			return false
+		}
+	}
+	return true
+}
+
+func (w *huffmanBitWriter) flush() {
+	if w.err != nil {
+		w.nbits = 0
+		return
+	}
+	if w.lastHeader > 0 {
+		// We owe an EOB
+		w.writeCode(w.literalEncoding.codes[endBlockMarker])
+		w.lastHeader = 0
+	}
+	n := w.nbytes
+	for w.nbits != 0 {
+		w.bytes[n] = byte(w.bits)
+		w.bits >>= 8
+		if w.nbits > 8 { // Avoid underflow
+			w.nbits -= 8
+		} else {
+			w.nbits = 0
+		}
+		n++
+	}
+	w.bits = 0
+	w.write(w.bytes[:n])
+	w.nbytes = 0
+}
+
+func (w *huffmanBitWriter) write(b []byte) {
+	if w.err != nil {
+		return
+	}
+	_, w.err = w.writer.Write(b)
+}
+
+func (w *huffmanBitWriter) writeBits(b int32, nb uint8) {
+	w.bits |= uint64(b) << (w.nbits & 63)
+	w.nbits += nb
+	if w.nbits >= 48 {
+		w.writeOutBits()
+	}
+}
+
+func (w *huffmanBitWriter) writeBytes(bytes []byte) {
+	if w.err != nil {
+		return
+	}
+	n := w.nbytes
+	if w.nbits&7 != 0 {
+		w.err = InternalError("writeBytes with unfinished bits")
+		return
+	}
+	for w.nbits != 0 {
+		w.bytes[n] = byte(w.bits)
+		w.bits >>= 8
+		w.nbits -= 8
+		n++
+	}
+	if n != 0 {
+		w.write(w.bytes[:n])
+	}
+	w.nbytes = 0
+	w.write(bytes)
+}
+
+// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
+// the literal and offset lengths arrays (which are concatenated into a single
+// array).  This method generates that run-length encoding.
+//
+// The result is written into the codegen array, and the frequencies
+// of each code is written into the codegenFreq array.
+// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
+// information. Code badCode is an end marker
+//
+//	numLiterals      The number of literals in literalEncoding
+//	numOffsets       The number of offsets in offsetEncoding
+//	litenc, offenc   The literal and offset encoder to use
+func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
+	for i := range w.codegenFreq {
+		w.codegenFreq[i] = 0
+	}
+	// Note that we are using codegen both as a temporary variable for holding
+	// a copy of the frequencies, and as the place where we put the result.
+	// This is fine because the output is always shorter than the input used
+	// so far.
+	codegen := w.codegen[:] // cache
+	// Copy the concatenated code sizes to codegen. Put a marker at the end.
+	cgnl := codegen[:numLiterals]
+	for i := range cgnl {
+		cgnl[i] = litEnc.codes[i].len()
+	}
+
+	cgnl = codegen[numLiterals : numLiterals+numOffsets]
+	for i := range cgnl {
+		cgnl[i] = offEnc.codes[i].len()
+	}
+	codegen[numLiterals+numOffsets] = badCode
+
+	size := codegen[0]
+	count := 1
+	outIndex := 0
+	for inIndex := 1; size != badCode; inIndex++ {
+		// INVARIANT: We have seen "count" copies of size that have not yet
+		// had output generated for them.
+		nextSize := codegen[inIndex]
+		if nextSize == size {
+			count++
+			continue
+		}
+		// We need to generate codegen indicating "count" of size.
+		if size != 0 {
+			codegen[outIndex] = size
+			outIndex++
+			w.codegenFreq[size]++
+			count--
+			for count >= 3 {
+				n := 6
+				if n > count {
+					n = count
+				}
+				codegen[outIndex] = 16
+				outIndex++
+				codegen[outIndex] = uint8(n - 3)
+				outIndex++
+				w.codegenFreq[16]++
+				count -= n
+			}
+		} else {
+			for count >= 11 {
+				n := 138
+				if n > count {
+					n = count
+				}
+				codegen[outIndex] = 18
+				outIndex++
+				codegen[outIndex] = uint8(n - 11)
+				outIndex++
+				w.codegenFreq[18]++
+				count -= n
+			}
+			if count >= 3 {
+				// count >= 3 && count <= 10
+				codegen[outIndex] = 17
+				outIndex++
+				codegen[outIndex] = uint8(count - 3)
+				outIndex++
+				w.codegenFreq[17]++
+				count = 0
+			}
+		}
+		count--
+		for ; count >= 0; count-- {
+			codegen[outIndex] = size
+			outIndex++
+			w.codegenFreq[size]++
+		}
+		// Set up invariant for next time through the loop.
+		size = nextSize
+		count = 1
+	}
+	// Marker indicating the end of the codegen.
+	codegen[outIndex] = badCode
+}
+
+func (w *huffmanBitWriter) codegens() int {
+	numCodegens := len(w.codegenFreq)
+	for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
+		numCodegens--
+	}
+	return numCodegens
+}
+
+func (w *huffmanBitWriter) headerSize() (size, numCodegens int) {
+	numCodegens = len(w.codegenFreq)
+	for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
+		numCodegens--
+	}
+	return 3 + 5 + 5 + 4 + (3 * numCodegens) +
+		w.codegenEncoding.bitLength(w.codegenFreq[:]) +
+		int(w.codegenFreq[16])*2 +
+		int(w.codegenFreq[17])*3 +
+		int(w.codegenFreq[18])*7, numCodegens
+}
+
+// dynamicSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) {
+	size = litEnc.bitLength(w.literalFreq[:]) +
+		offEnc.bitLength(w.offsetFreq[:])
+	return size
+}
+
+// dynamicSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
+	header, numCodegens := w.headerSize()
+	size = header +
+		litEnc.bitLength(w.literalFreq[:]) +
+		offEnc.bitLength(w.offsetFreq[:]) +
+		extraBits
+	return size, numCodegens
+}
+
+// extraBitSize will return the number of bits that will be written
+// as "extra" bits on matches.
+func (w *huffmanBitWriter) extraBitSize() int {
+	total := 0
+	for i, n := range w.literalFreq[257:literalCount] {
+		total += int(n) * int(lengthExtraBits[i&31])
+	}
+	for i, n := range w.offsetFreq[:offsetCodeCount] {
+		total += int(n) * int(offsetExtraBits[i&31])
+	}
+	return total
+}
+
+// fixedSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) fixedSize(extraBits int) int {
+	return 3 +
+		fixedLiteralEncoding.bitLength(w.literalFreq[:]) +
+		fixedOffsetEncoding.bitLength(w.offsetFreq[:]) +
+		extraBits
+}
+
+// storedSize calculates the stored size, including header.
+// The function returns the size in bits and whether the block
+// fits inside a single block.
+func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
+	if in == nil {
+		return 0, false
+	}
+	if len(in) <= maxStoreBlockSize {
+		return (len(in) + 5) * 8, true
+	}
+	return 0, false
+}
+
+func (w *huffmanBitWriter) writeCode(c hcode) {
+	// The function does not get inlined if we "& 63" the shift.
+	w.bits |= c.code64() << (w.nbits & 63)
+	w.nbits += c.len()
+	if w.nbits >= 48 {
+		w.writeOutBits()
+	}
+}
+
+// writeOutBits will write bits to the buffer.
+func (w *huffmanBitWriter) writeOutBits() {
+	bits := w.bits
+	w.bits >>= 48
+	w.nbits -= 48
+	n := w.nbytes
+
+	// We over-write, but faster...
+	le.Store64(w.bytes[n:], bits)
+	n += 6
+
+	if n >= bufferFlushSize {
+		if w.err != nil {
+			n = 0
+			return
+		}
+		w.write(w.bytes[:n])
+		n = 0
+	}
+
+	w.nbytes = n
+}
+
+// Write the header of a dynamic Huffman block to the output stream.
+//
+//	numLiterals  The number of literals specified in codegen
+//	numOffsets   The number of offsets specified in codegen
+//	numCodegens  The number of codegens used in codegen
+func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
+	if w.err != nil {
+		return
+	}
+	var firstBits int32 = 4
+	if isEof {
+		firstBits = 5
+	}
+	w.writeBits(firstBits, 3)
+	w.writeBits(int32(numLiterals-257), 5)
+	w.writeBits(int32(numOffsets-1), 5)
+	w.writeBits(int32(numCodegens-4), 4)
+
+	for i := 0; i < numCodegens; i++ {
+		value := uint(w.codegenEncoding.codes[codegenOrder[i]].len())
+		w.writeBits(int32(value), 3)
+	}
+
+	i := 0
+	for {
+		var codeWord = uint32(w.codegen[i])
+		i++
+		if codeWord == badCode {
+			break
+		}
+		w.writeCode(w.codegenEncoding.codes[codeWord])
+
+		switch codeWord {
+		case 16:
+			w.writeBits(int32(w.codegen[i]), 2)
+			i++
+		case 17:
+			w.writeBits(int32(w.codegen[i]), 3)
+			i++
+		case 18:
+			w.writeBits(int32(w.codegen[i]), 7)
+			i++
+		}
+	}
+}
+
+// writeStoredHeader will write a stored header.
+// If the stored block is only used for EOF,
+// it is replaced with a fixed huffman block.
+func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
+	if w.err != nil {
+		return
+	}
+	if w.lastHeader > 0 {
+		// We owe an EOB
+		w.writeCode(w.literalEncoding.codes[endBlockMarker])
+		w.lastHeader = 0
+	}
+
+	// To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes.
+	if length == 0 && isEof {
+		w.writeFixedHeader(isEof)
+		// EOB: 7 bits, value: 0
+		w.writeBits(0, 7)
+		w.flush()
+		return
+	}
+
+	var flag int32
+	if isEof {
+		flag = 1
+	}
+	w.writeBits(flag, 3)
+	w.flush()
+	w.writeBits(int32(length), 16)
+	w.writeBits(int32(^uint16(length)), 16)
+}
+
+func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
+	if w.err != nil {
+		return
+	}
+	if w.lastHeader > 0 {
+		// We owe an EOB
+		w.writeCode(w.literalEncoding.codes[endBlockMarker])
+		w.lastHeader = 0
+	}
+
+	// Indicate that we are a fixed Huffman block
+	var value int32 = 2
+	if isEof {
+		value = 3
+	}
+	w.writeBits(value, 3)
+}
+
+// writeBlock will write a block of tokens with the smallest encoding.
+// The original input can be supplied, and if the huffman encoded data
+// is larger than the original bytes, the data will be written as a
+// stored block.
+// If the input is nil, the tokens will always be Huffman encoded.
+func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
+	if w.err != nil {
+		return
+	}
+
+	tokens.AddEOB()
+	if w.lastHeader > 0 {
+		// We owe an EOB
+		w.writeCode(w.literalEncoding.codes[endBlockMarker])
+		w.lastHeader = 0
+	}
+	numLiterals, numOffsets := w.indexTokens(tokens, false)
+	w.generate()
+	var extraBits int
+	storedSize, storable := w.storedSize(input)
+	if storable {
+		extraBits = w.extraBitSize()
+	}
+
+	// Figure out smallest code.
+	// Fixed Huffman baseline.
+	var literalEncoding = fixedLiteralEncoding
+	var offsetEncoding = fixedOffsetEncoding
+	var size = math.MaxInt32
+	if tokens.n < maxPredefinedTokens {
+		size = w.fixedSize(extraBits)
+	}
+
+	// Dynamic Huffman?
+	var numCodegens int
+
+	// Generate codegen and codegenFrequencies, which indicates how to encode
+	// the literalEncoding and the offsetEncoding.
+	w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
+	w.codegenEncoding.generate(w.codegenFreq[:], 7)
+	dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
+
+	if dynamicSize < size {
+		size = dynamicSize
+		literalEncoding = w.literalEncoding
+		offsetEncoding = w.offsetEncoding
+	}
+
+	// Stored bytes?
+	if storable && storedSize <= size {
+		w.writeStoredHeader(len(input), eof)
+		w.writeBytes(input)
+		return
+	}
+
+	// Huffman.
+	if literalEncoding == fixedLiteralEncoding {
+		w.writeFixedHeader(eof)
+	} else {
+		w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+	}
+
+	// Write the tokens.
+	w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes)
+}
+
+// writeBlockDynamic encodes a block using a dynamic Huffman table.
+// This should be used if the symbols used have a disproportionate
+// histogram distribution.
+// If input is supplied and the compression savings are below 1/16th of the
+// input size the block is stored.
+func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) {
+	if w.err != nil {
+		return
+	}
+
+	sync = sync || eof
+	if sync {
+		tokens.AddEOB()
+	}
+
+	// We cannot reuse pure huffman table, and must mark as EOF.
+	if (w.lastHuffMan || eof) && w.lastHeader > 0 {
+		// We will not try to reuse.
+		w.writeCode(w.literalEncoding.codes[endBlockMarker])
+		w.lastHeader = 0
+		w.lastHuffMan = false
+	}
+
+	// fillReuse enables filling of empty values.
+	// This will make encodings always reusable without testing.
+	// However, this does not appear to benefit on most cases.
+	const fillReuse = false
+
+	// Check if we can reuse...
+	if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) {
+		w.writeCode(w.literalEncoding.codes[endBlockMarker])
+		w.lastHeader = 0
+	}
+
+	numLiterals, numOffsets := w.indexTokens(tokens, !sync)
+	extraBits := 0
+	ssize, storable := w.storedSize(input)
+
+	const usePrefs = true
+	if storable || w.lastHeader > 0 {
+		extraBits = w.extraBitSize()
+	}
+
+	var size int
+
+	// Check if we should reuse.
+	if w.lastHeader > 0 {
+		// Estimate size for using a new table.
+		// Use the previous header size as the best estimate.
+		newSize := w.lastHeader + tokens.EstimatedBits()
+		newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty
+
+		// The estimated size is calculated as an optimal table.
+		// We add a penalty to make it more realistic and re-use a bit more.
+		reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits
+
+		// Check if a new table is better.
+		if newSize < reuseSize {
+			// Write the EOB we owe.
+			w.writeCode(w.literalEncoding.codes[endBlockMarker])
+			size = newSize
+			w.lastHeader = 0
+		} else {
+			size = reuseSize
+		}
+
+		if tokens.n < maxPredefinedTokens {
+			if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
+				// Check if we get a reasonable size decrease.
+				if storable && ssize <= size {
+					w.writeStoredHeader(len(input), eof)
+					w.writeBytes(input)
+					return
+				}
+				w.writeFixedHeader(eof)
+				if !sync {
+					tokens.AddEOB()
+				}
+				w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
+				return
+			}
+		}
+		// Check if we get a reasonable size decrease.
+		if storable && ssize <= size {
+			w.writeStoredHeader(len(input), eof)
+			w.writeBytes(input)
+			return
+		}
+	}
+
+	// We want a new block/table
+	if w.lastHeader == 0 {
+		if fillReuse && !sync {
+			w.fillTokens()
+			numLiterals, numOffsets = maxNumLit, maxNumDist
+		} else {
+			w.literalFreq[endBlockMarker] = 1
+		}
+
+		w.generate()
+		// Generate codegen and codegenFrequencies, which indicates how to encode
+		// the literalEncoding and the offsetEncoding.
+		w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
+		w.codegenEncoding.generate(w.codegenFreq[:], 7)
+
+		var numCodegens int
+		if fillReuse && !sync {
+			// Reindex for accurate size...
+			w.indexTokens(tokens, true)
+		}
+		size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
+
+		// Store predefined, if we don't get a reasonable improvement.
+		if tokens.n < maxPredefinedTokens {
+			if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
+				// Store bytes, if we don't get an improvement.
+				if storable && ssize <= preSize {
+					w.writeStoredHeader(len(input), eof)
+					w.writeBytes(input)
+					return
+				}
+				w.writeFixedHeader(eof)
+				if !sync {
+					tokens.AddEOB()
+				}
+				w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
+				return
+			}
+		}
+
+		if storable && ssize <= size {
+			// Store bytes, if we don't get an improvement.
+			w.writeStoredHeader(len(input), eof)
+			w.writeBytes(input)
+			return
+		}
+
+		// Write Huffman table.
+		w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+		if !sync {
+			w.lastHeader, _ = w.headerSize()
+		}
+		w.lastHuffMan = false
+	}
+
+	if sync {
+		w.lastHeader = 0
+	}
+	// Write the tokens.
+	w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes)
+}
+
+func (w *huffmanBitWriter) fillTokens() {
+	for i, v := range w.literalFreq[:literalCount] {
+		if v == 0 {
+			w.literalFreq[i] = 1
+		}
+	}
+	for i, v := range w.offsetFreq[:offsetCodeCount] {
+		if v == 0 {
+			w.offsetFreq[i] = 1
+		}
+	}
+}
+
+// indexTokens indexes a slice of tokens, and updates
+// literalFreq and offsetFreq, and generates literalEncoding
+// and offsetEncoding.
+// The number of literal and offset tokens is returned.
+func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) {
+	//copy(w.literalFreq[:], t.litHist[:])
+	*(*[256]uint16)(w.literalFreq[:]) = t.litHist
+	//copy(w.literalFreq[256:], t.extraHist[:])
+	*(*[32]uint16)(w.literalFreq[256:]) = t.extraHist
+	w.offsetFreq = t.offHist
+
+	if t.n == 0 {
+		return
+	}
+	if filled {
+		return maxNumLit, maxNumDist
+	}
+	// get the number of literals
+	numLiterals = len(w.literalFreq)
+	for w.literalFreq[numLiterals-1] == 0 {
+		numLiterals--
+	}
+	// get the number of offsets
+	numOffsets = len(w.offsetFreq)
+	for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
+		numOffsets--
+	}
+	if numOffsets == 0 {
+		// We haven't found a single match. If we want to go with the dynamic encoding,
+		// we should count at least one offset to be sure that the offset huffman tree could be encoded.
+		w.offsetFreq[0] = 1
+		numOffsets = 1
+	}
+	return
+}
+
+func (w *huffmanBitWriter) generate() {
+	w.literalEncoding.generate(w.literalFreq[:literalCount], 15)
+	w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
+}
+
+// writeTokens writes a slice of tokens to the output.
+// codes for literal and offset encoding must be supplied.
+func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
+	if w.err != nil {
+		return
+	}
+	if len(tokens) == 0 {
+		return
+	}
+
+	// Only last token should be endBlockMarker.
+	var deferEOB bool
+	if tokens[len(tokens)-1] == endBlockMarker {
+		tokens = tokens[:len(tokens)-1]
+		deferEOB = true
+	}
+
+	// Create slices up to the next power of two to avoid bounds checks.
+	lits := leCodes[:256]
+	offs := oeCodes[:32]
+	lengths := leCodes[lengthCodesStart:]
+	lengths = lengths[:32]
+
+	// Go 1.16 LOVES having these on stack.
+	bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
+
+	for _, t := range tokens {
+		if t < 256 {
+			//w.writeCode(lits[t.literal()])
+			c := lits[t]
+			bits |= c.code64() << (nbits & 63)
+			nbits += c.len()
+			if nbits >= 48 {
+				le.Store64(w.bytes[nbytes:], bits)
+				//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+				bits >>= 48
+				nbits -= 48
+				nbytes += 6
+				if nbytes >= bufferFlushSize {
+					if w.err != nil {
+						nbytes = 0
+						return
+					}
+					_, w.err = w.writer.Write(w.bytes[:nbytes])
+					nbytes = 0
+				}
+			}
+			continue
+		}
+
+		// Write the length
+		length := t.length()
+		lengthCode := lengthCode(length) & 31
+		if false {
+			w.writeCode(lengths[lengthCode])
+		} else {
+			// inlined
+			c := lengths[lengthCode]
+			bits |= c.code64() << (nbits & 63)
+			nbits += c.len()
+			if nbits >= 48 {
+				le.Store64(w.bytes[nbytes:], bits)
+				//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+				bits >>= 48
+				nbits -= 48
+				nbytes += 6
+				if nbytes >= bufferFlushSize {
+					if w.err != nil {
+						nbytes = 0
+						return
+					}
+					_, w.err = w.writer.Write(w.bytes[:nbytes])
+					nbytes = 0
+				}
+			}
+		}
+
+		if lengthCode >= lengthExtraBitsMinCode {
+			extraLengthBits := lengthExtraBits[lengthCode]
+			//w.writeBits(extraLength, extraLengthBits)
+			extraLength := int32(length - lengthBase[lengthCode])
+			bits |= uint64(extraLength) << (nbits & 63)
+			nbits += extraLengthBits
+			if nbits >= 48 {
+				le.Store64(w.bytes[nbytes:], bits)
+				//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+				bits >>= 48
+				nbits -= 48
+				nbytes += 6
+				if nbytes >= bufferFlushSize {
+					if w.err != nil {
+						nbytes = 0
+						return
+					}
+					_, w.err = w.writer.Write(w.bytes[:nbytes])
+					nbytes = 0
+				}
+			}
+		}
+		// Write the offset
+		offset := t.offset()
+		offsetCode := (offset >> 16) & 31
+		if false {
+			w.writeCode(offs[offsetCode])
+		} else {
+			// inlined
+			c := offs[offsetCode]
+			bits |= c.code64() << (nbits & 63)
+			nbits += c.len()
+			if nbits >= 48 {
+				le.Store64(w.bytes[nbytes:], bits)
+				//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+				bits >>= 48
+				nbits -= 48
+				nbytes += 6
+				if nbytes >= bufferFlushSize {
+					if w.err != nil {
+						nbytes = 0
+						return
+					}
+					_, w.err = w.writer.Write(w.bytes[:nbytes])
+					nbytes = 0
+				}
+			}
+		}
+
+		if offsetCode >= offsetExtraBitsMinCode {
+			offsetComb := offsetCombined[offsetCode]
+			//w.writeBits(extraOffset, extraOffsetBits)
+			bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63)
+			nbits += uint8(offsetComb)
+			if nbits >= 48 {
+				le.Store64(w.bytes[nbytes:], bits)
+				//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+				bits >>= 48
+				nbits -= 48
+				nbytes += 6
+				if nbytes >= bufferFlushSize {
+					if w.err != nil {
+						nbytes = 0
+						return
+					}
+					_, w.err = w.writer.Write(w.bytes[:nbytes])
+					nbytes = 0
+				}
+			}
+		}
+	}
+	// Restore...
+	w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
+
+	if deferEOB {
+		w.writeCode(leCodes[endBlockMarker])
+	}
+}
+
+// huffOffset is a static offset encoder used for huffman only encoding.
+// It can be reused since we will not be encoding offset values.
+var huffOffset *huffmanEncoder
+
+func init() {
+	w := newHuffmanBitWriter(nil)
+	w.offsetFreq[0] = 1
+	huffOffset = newHuffmanEncoder(offsetCodeCount)
+	huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15)
+}
+
+// writeBlockHuff encodes a block of bytes as either
+// Huffman encoded literals or uncompressed bytes if the
+// results only gains very little from compression.
+func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
+	if w.err != nil {
+		return
+	}
+
+	// Clear histogram
+	for i := range w.literalFreq[:] {
+		w.literalFreq[i] = 0
+	}
+	if !w.lastHuffMan {
+		for i := range w.offsetFreq[:] {
+			w.offsetFreq[i] = 0
+		}
+	}
+
+	const numLiterals = endBlockMarker + 1
+	const numOffsets = 1
+
+	// Add everything as literals
+	// We have to estimate the header size.
+	// Assume header is around 70 bytes:
+	// https://stackoverflow.com/a/25454430
+	const guessHeaderSizeBits = 70 * 8
+	histogram(input, w.literalFreq[:numLiterals])
+	ssize, storable := w.storedSize(input)
+	if storable && len(input) > 1024 {
+		// Quick check for incompressible content.
+		abs := float64(0)
+		avg := float64(len(input)) / 256
+		max := float64(len(input) * 2)
+		for _, v := range w.literalFreq[:256] {
+			diff := float64(v) - avg
+			abs += diff * diff
+			if abs > max {
+				break
+			}
+		}
+		if abs < max {
+			if debugDeflate {
+				fmt.Println("stored", abs, "<", max)
+			}
+			// No chance we can compress this...
+			w.writeStoredHeader(len(input), eof)
+			w.writeBytes(input)
+			return
+		}
+	}
+	w.literalFreq[endBlockMarker] = 1
+	w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15)
+	estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals])
+	if estBits < math.MaxInt32 {
+		estBits += w.lastHeader
+		if w.lastHeader == 0 {
+			estBits += guessHeaderSizeBits
+		}
+		estBits += estBits >> w.logNewTablePenalty
+	}
+
+	// Store bytes, if we don't get a reasonable improvement.
+	if storable && ssize <= estBits {
+		if debugDeflate {
+			fmt.Println("stored,", ssize, "<=", estBits)
+		}
+		w.writeStoredHeader(len(input), eof)
+		w.writeBytes(input)
+		return
+	}
+
+	if w.lastHeader > 0 {
+		reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256])
+
+		if estBits < reuseSize {
+			if debugDeflate {
+				fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes")
+			}
+			// We owe an EOB
+			w.writeCode(w.literalEncoding.codes[endBlockMarker])
+			w.lastHeader = 0
+		} else if debugDeflate {
+			fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8)
+		}
+	}
+
+	count := 0
+	if w.lastHeader == 0 {
+		// Use the temp encoding, so swap.
+		w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding
+		// Generate codegen and codegenFrequencies, which indicates how to encode
+		// the literalEncoding and the offsetEncoding.
+		w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
+		w.codegenEncoding.generate(w.codegenFreq[:], 7)
+		numCodegens := w.codegens()
+
+		// Huffman.
+		w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+		w.lastHuffMan = true
+		w.lastHeader, _ = w.headerSize()
+		if debugDeflate {
+			count += w.lastHeader
+			fmt.Println("header:", count/8)
+		}
+	}
+
+	encoding := w.literalEncoding.codes[:256]
+	// Go 1.16 LOVES having these on stack. At least 1.5x the speed.
+	bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
+
+	if debugDeflate {
+		count -= int(nbytes)*8 + int(nbits)
+	}
+	// Unroll, write 3 codes/loop.
+	// Fastest number of unrolls.
+	for len(input) > 3 {
+		// We must have at least 48 bits free.
+		if nbits >= 8 {
+			n := nbits >> 3
+			le.Store64(w.bytes[nbytes:], bits)
+			bits >>= (n * 8) & 63
+			nbits -= n * 8
+			nbytes += n
+		}
+		if nbytes >= bufferFlushSize {
+			if w.err != nil {
+				nbytes = 0
+				return
+			}
+			if debugDeflate {
+				count += int(nbytes) * 8
+			}
+			_, w.err = w.writer.Write(w.bytes[:nbytes])
+			nbytes = 0
+		}
+		a, b := encoding[input[0]], encoding[input[1]]
+		bits |= a.code64() << (nbits & 63)
+		bits |= b.code64() << ((nbits + a.len()) & 63)
+		c := encoding[input[2]]
+		nbits += b.len() + a.len()
+		bits |= c.code64() << (nbits & 63)
+		nbits += c.len()
+		input = input[3:]
+	}
+
+	// Remaining...
+	for _, t := range input {
+		if nbits >= 48 {
+			le.Store64(w.bytes[nbytes:], bits)
+			//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+			bits >>= 48
+			nbits -= 48
+			nbytes += 6
+			if nbytes >= bufferFlushSize {
+				if w.err != nil {
+					nbytes = 0
+					return
+				}
+				if debugDeflate {
+					count += int(nbytes) * 8
+				}
+				_, w.err = w.writer.Write(w.bytes[:nbytes])
+				nbytes = 0
+			}
+		}
+		// Bitwriting inlined, ~30% speedup
+		c := encoding[t]
+		bits |= c.code64() << (nbits & 63)
+
+		nbits += c.len()
+		if debugDeflate {
+			count += int(c.len())
+		}
+	}
+	// Restore...
+	w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
+
+	if debugDeflate {
+		nb := count + int(nbytes)*8 + int(nbits)
+		fmt.Println("wrote", nb, "bits,", nb/8, "bytes.")
+	}
+	// Flush if needed to have space.
+	if w.nbits >= 48 {
+		w.writeOutBits()
+	}
+
+	if eof || sync {
+		w.writeCode(w.literalEncoding.codes[endBlockMarker])
+		w.lastHeader = 0
+		w.lastHuffMan = false
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go
new file mode 100644
index 0000000..be7b58b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go
@@ -0,0 +1,417 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+	"math"
+	"math/bits"
+)
+
+const (
+	maxBitsLimit = 16
+	// number of valid literals
+	literalCount = 286
+)
+
+// hcode is a huffman code with a bit code and bit length.
+type hcode uint32
+
+func (h hcode) len() uint8 {
+	return uint8(h)
+}
+
+func (h hcode) code64() uint64 {
+	return uint64(h >> 8)
+}
+
+func (h hcode) zero() bool {
+	return h == 0
+}
+
+type huffmanEncoder struct {
+	codes    []hcode
+	bitCount [17]int32
+
+	// Allocate a reusable buffer with the longest possible frequency table.
+	// Possible lengths are codegenCodeCount, offsetCodeCount and literalCount.
+	// The largest of these is literalCount, so we allocate for that case.
+	freqcache [literalCount + 1]literalNode
+}
+
+type literalNode struct {
+	literal uint16
+	freq    uint16
+}
+
+// A levelInfo describes the state of the constructed tree for a given depth.
+type levelInfo struct {
+	// Our level.  for better printing
+	level int32
+
+	// The frequency of the last node at this level
+	lastFreq int32
+
+	// The frequency of the next character to add to this level
+	nextCharFreq int32
+
+	// The frequency of the next pair (from level below) to add to this level.
+	// Only valid if the "needed" value of the next lower level is 0.
+	nextPairFreq int32
+
+	// The number of chains remaining to generate for this level before moving
+	// up to the next level
+	needed int32
+}
+
+// set sets the code and length of an hcode.
+func (h *hcode) set(code uint16, length uint8) {
+	*h = hcode(length) | (hcode(code) << 8)
+}
+
+func newhcode(code uint16, length uint8) hcode {
+	return hcode(length) | (hcode(code) << 8)
+}
+
+func reverseBits(number uint16, bitLength byte) uint16 {
+	return bits.Reverse16(number << ((16 - bitLength) & 15))
+}
+
+func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} }
+
+func newHuffmanEncoder(size int) *huffmanEncoder {
+	// Make capacity to next power of two.
+	c := uint(bits.Len32(uint32(size - 1)))
+	return &huffmanEncoder{codes: make([]hcode, size, 1<<c)}
+}
+
+// Generates a HuffmanCode corresponding to the fixed literal table
+func generateFixedLiteralEncoding() *huffmanEncoder {
+	h := newHuffmanEncoder(literalCount)
+	codes := h.codes
+	var ch uint16
+	for ch = 0; ch < literalCount; ch++ {
+		var bits uint16
+		var size uint8
+		switch {
+		case ch < 144:
+			// size 8, 000110000  .. 10111111
+			bits = ch + 48
+			size = 8
+		case ch < 256:
+			// size 9, 110010000 .. 111111111
+			bits = ch + 400 - 144
+			size = 9
+		case ch < 280:
+			// size 7, 0000000 .. 0010111
+			bits = ch - 256
+			size = 7
+		default:
+			// size 8, 11000000 .. 11000111
+			bits = ch + 192 - 280
+			size = 8
+		}
+		codes[ch] = newhcode(reverseBits(bits, size), size)
+	}
+	return h
+}
+
+func generateFixedOffsetEncoding() *huffmanEncoder {
+	h := newHuffmanEncoder(30)
+	codes := h.codes
+	for ch := range codes {
+		codes[ch] = newhcode(reverseBits(uint16(ch), 5), 5)
+	}
+	return h
+}
+
+var fixedLiteralEncoding = generateFixedLiteralEncoding()
+var fixedOffsetEncoding = generateFixedOffsetEncoding()
+
+func (h *huffmanEncoder) bitLength(freq []uint16) int {
+	var total int
+	for i, f := range freq {
+		if f != 0 {
+			total += int(f) * int(h.codes[i].len())
+		}
+	}
+	return total
+}
+
+func (h *huffmanEncoder) bitLengthRaw(b []byte) int {
+	var total int
+	for _, f := range b {
+		total += int(h.codes[f].len())
+	}
+	return total
+}
+
+// canReuseBits returns the number of bits or math.MaxInt32 if the encoder cannot be reused.
+func (h *huffmanEncoder) canReuseBits(freq []uint16) int {
+	var total int
+	for i, f := range freq {
+		if f != 0 {
+			code := h.codes[i]
+			if code.zero() {
+				return math.MaxInt32
+			}
+			total += int(f) * int(code.len())
+		}
+	}
+	return total
+}
+
+// Return the number of literals assigned to each bit size in the Huffman encoding
+//
+// This method is only called when list.length >= 3
+// The cases of 0, 1, and 2 literals are handled by special case code.
+//
+// list  An array of the literals with non-zero frequencies
+//
+//	and their associated frequencies. The array is in order of increasing
+//	frequency, and has as its last element a special element with frequency
+//	MaxInt32
+//
+// maxBits     The maximum number of bits that should be used to encode any literal.
+//
+//	Must be less than 16.
+//
+// return      An integer array in which array[i] indicates the number of literals
+//
+//	that should be encoded in i bits.
+func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
+	if maxBits >= maxBitsLimit {
+		panic("flate: maxBits too large")
+	}
+	n := int32(len(list))
+	list = list[0 : n+1]
+	list[n] = maxNode()
+
+	// The tree can't have greater depth than n - 1, no matter what. This
+	// saves a little bit of work in some small cases
+	if maxBits > n-1 {
+		maxBits = n - 1
+	}
+
+	// Create information about each of the levels.
+	// A bogus "Level 0" whose sole purpose is so that
+	// level1.prev.needed==0.  This makes level1.nextPairFreq
+	// be a legitimate value that never gets chosen.
+	var levels [maxBitsLimit]levelInfo
+	// leafCounts[i] counts the number of literals at the left
+	// of ancestors of the rightmost node at level i.
+	// leafCounts[i][j] is the number of literals at the left
+	// of the level j ancestor.
+	var leafCounts [maxBitsLimit][maxBitsLimit]int32
+
+	// Descending to only have 1 bounds check.
+	l2f := int32(list[2].freq)
+	l1f := int32(list[1].freq)
+	l0f := int32(list[0].freq) + int32(list[1].freq)
+
+	for level := int32(1); level <= maxBits; level++ {
+		// For every level, the first two items are the first two characters.
+		// We initialize the levels as if we had already figured this out.
+		levels[level] = levelInfo{
+			level:        level,
+			lastFreq:     l1f,
+			nextCharFreq: l2f,
+			nextPairFreq: l0f,
+		}
+		leafCounts[level][level] = 2
+		if level == 1 {
+			levels[level].nextPairFreq = math.MaxInt32
+		}
+	}
+
+	// We need a total of 2*n - 2 items at top level and have already generated 2.
+	levels[maxBits].needed = 2*n - 4
+
+	level := uint32(maxBits)
+	for level < 16 {
+		l := &levels[level]
+		if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
+			// We've run out of both leafs and pairs.
+			// End all calculations for this level.
+			// To make sure we never come back to this level or any lower level,
+			// set nextPairFreq impossibly large.
+			l.needed = 0
+			levels[level+1].nextPairFreq = math.MaxInt32
+			level++
+			continue
+		}
+
+		prevFreq := l.lastFreq
+		if l.nextCharFreq < l.nextPairFreq {
+			// The next item on this row is a leaf node.
+			n := leafCounts[level][level] + 1
+			l.lastFreq = l.nextCharFreq
+			// Lower leafCounts are the same of the previous node.
+			leafCounts[level][level] = n
+			e := list[n]
+			if e.literal < math.MaxUint16 {
+				l.nextCharFreq = int32(e.freq)
+			} else {
+				l.nextCharFreq = math.MaxInt32
+			}
+		} else {
+			// The next item on this row is a pair from the previous row.
+			// nextPairFreq isn't valid until we generate two
+			// more values in the level below
+			l.lastFreq = l.nextPairFreq
+			// Take leaf counts from the lower level, except counts[level] remains the same.
+			if true {
+				save := leafCounts[level][level]
+				leafCounts[level] = leafCounts[level-1]
+				leafCounts[level][level] = save
+			} else {
+				copy(leafCounts[level][:level], leafCounts[level-1][:level])
+			}
+			levels[l.level-1].needed = 2
+		}
+
+		if l.needed--; l.needed == 0 {
+			// We've done everything we need to do for this level.
+			// Continue calculating one level up. Fill in nextPairFreq
+			// of that level with the sum of the two nodes we've just calculated on
+			// this level.
+			if l.level == maxBits {
+				// All done!
+				break
+			}
+			levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
+			level++
+		} else {
+			// If we stole from below, move down temporarily to replenish it.
+			for levels[level-1].needed > 0 {
+				level--
+			}
+		}
+	}
+
+	// Somethings is wrong if at the end, the top level is null or hasn't used
+	// all of the leaves.
+	if leafCounts[maxBits][maxBits] != n {
+		panic("leafCounts[maxBits][maxBits] != n")
+	}
+
+	bitCount := h.bitCount[:maxBits+1]
+	bits := 1
+	counts := &leafCounts[maxBits]
+	for level := maxBits; level > 0; level-- {
+		// chain.leafCount gives the number of literals requiring at least "bits"
+		// bits to encode.
+		bitCount[bits] = counts[level] - counts[level-1]
+		bits++
+	}
+	return bitCount
+}
+
+// Look at the leaves and assign them a bit count and an encoding as specified
+// in RFC 1951 3.2.2
+func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
+	code := uint16(0)
+	for n, bits := range bitCount {
+		code <<= 1
+		if n == 0 || bits == 0 {
+			continue
+		}
+		// The literals list[len(list)-bits] .. list[len(list)-bits]
+		// are encoded using "bits" bits, and get the values
+		// code, code + 1, ....  The code values are
+		// assigned in literal order (not frequency order).
+		chunk := list[len(list)-int(bits):]
+
+		sortByLiteral(chunk)
+		for _, node := range chunk {
+			h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n))
+			code++
+		}
+		list = list[0 : len(list)-int(bits)]
+	}
+}
+
+// Update this Huffman Code object to be the minimum code for the specified frequency count.
+//
+// freq  An array of frequencies, in which frequency[i] gives the frequency of literal i.
+// maxBits  The maximum number of bits to use for any literal.
+func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
+	list := h.freqcache[:len(freq)+1]
+	codes := h.codes[:len(freq)]
+	// Number of non-zero literals
+	count := 0
+	// Set list to be the set of all non-zero literals and their frequencies
+	for i, f := range freq {
+		if f != 0 {
+			list[count] = literalNode{uint16(i), f}
+			count++
+		} else {
+			codes[i] = 0
+		}
+	}
+	list[count] = literalNode{}
+
+	list = list[:count]
+	if count <= 2 {
+		// Handle the small cases here, because they are awkward for the general case code. With
+		// two or fewer literals, everything has bit length 1.
+		for i, node := range list {
+			// "list" is in order of increasing literal value.
+			h.codes[node.literal].set(uint16(i), 1)
+		}
+		return
+	}
+	sortByFreq(list)
+
+	// Get the number of literals for each bit count
+	bitCount := h.bitCounts(list, maxBits)
+	// And do the assignment
+	h.assignEncodingAndSize(bitCount, list)
+}
+
+// atLeastOne clamps the result between 1 and 15.
+func atLeastOne(v float32) float32 {
+	if v < 1 {
+		return 1
+	}
+	if v > 15 {
+		return 15
+	}
+	return v
+}
+
+func histogram(b []byte, h []uint16) {
+	if true && len(b) >= 8<<10 {
+		// Split for bigger inputs
+		histogramSplit(b, h)
+	} else {
+		h = h[:256]
+		for _, t := range b {
+			h[t]++
+		}
+	}
+}
+
+func histogramSplit(b []byte, h []uint16) {
+	// Tested, and slightly faster than 2-way.
+	// Writing to separate arrays and combining is also slightly slower.
+	h = h[:256]
+	for len(b)&3 != 0 {
+		h[b[0]]++
+		b = b[1:]
+	}
+	n := len(b) / 4
+	x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:]
+	y, z, w = y[:len(x)], z[:len(x)], w[:len(x)]
+	for i, t := range x {
+		v0 := &h[t]
+		v1 := &h[y[i]]
+		v3 := &h[w[i]]
+		v2 := &h[z[i]]
+		*v0++
+		*v1++
+		*v2++
+		*v3++
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
new file mode 100644
index 0000000..6c05ba8
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
@@ -0,0 +1,159 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// Sort sorts data.
+// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
+// data.Less and data.Swap. The sort is not guaranteed to be stable.
+func sortByFreq(data []literalNode) {
+	n := len(data)
+	quickSortByFreq(data, 0, n, maxDepth(n))
+}
+
+func quickSortByFreq(data []literalNode, a, b, maxDepth int) {
+	for b-a > 12 { // Use ShellSort for slices <= 12 elements
+		if maxDepth == 0 {
+			heapSort(data, a, b)
+			return
+		}
+		maxDepth--
+		mlo, mhi := doPivotByFreq(data, a, b)
+		// Avoiding recursion on the larger subproblem guarantees
+		// a stack depth of at most lg(b-a).
+		if mlo-a < b-mhi {
+			quickSortByFreq(data, a, mlo, maxDepth)
+			a = mhi // i.e., quickSortByFreq(data, mhi, b)
+		} else {
+			quickSortByFreq(data, mhi, b, maxDepth)
+			b = mlo // i.e., quickSortByFreq(data, a, mlo)
+		}
+	}
+	if b-a > 1 {
+		// Do ShellSort pass with gap 6
+		// It could be written in this simplified form cause b-a <= 12
+		for i := a + 6; i < b; i++ {
+			if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq {
+				data[i], data[i-6] = data[i-6], data[i]
+			}
+		}
+		insertionSortByFreq(data, a, b)
+	}
+}
+
+func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) {
+	m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
+	if hi-lo > 40 {
+		// Tukey's ``Ninther,'' median of three medians of three.
+		s := (hi - lo) / 8
+		medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s)
+		medianOfThreeSortByFreq(data, m, m-s, m+s)
+		medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s)
+	}
+	medianOfThreeSortByFreq(data, lo, m, hi-1)
+
+	// Invariants are:
+	//	data[lo] = pivot (set up by ChoosePivot)
+	//	data[lo < i < a] < pivot
+	//	data[a <= i < b] <= pivot
+	//	data[b <= i < c] unexamined
+	//	data[c <= i < hi-1] > pivot
+	//	data[hi-1] >= pivot
+	pivot := lo
+	a, c := lo+1, hi-1
+
+	for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ {
+	}
+	b := a
+	for {
+		for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot
+		}
+		for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot
+		}
+		if b >= c {
+			break
+		}
+		// data[b] > pivot; data[c-1] <= pivot
+		data[b], data[c-1] = data[c-1], data[b]
+		b++
+		c--
+	}
+	// If hi-c<3 then there are duplicates (by property of median of nine).
+	// Let's be a bit more conservative, and set border to 5.
+	protect := hi-c < 5
+	if !protect && hi-c < (hi-lo)/4 {
+		// Lets test some points for equality to pivot
+		dups := 0
+		if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot
+			data[c], data[hi-1] = data[hi-1], data[c]
+			c++
+			dups++
+		}
+		if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot
+			b--
+			dups++
+		}
+		// m-lo = (hi-lo)/2 > 6
+		// b-lo > (hi-lo)*3/4-1 > 8
+		// ==> m < b ==> data[m] <= pivot
+		if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot
+			data[m], data[b-1] = data[b-1], data[m]
+			b--
+			dups++
+		}
+		// if at least 2 points are equal to pivot, assume skewed distribution
+		protect = dups > 1
+	}
+	if protect {
+		// Protect against a lot of duplicates
+		// Add invariant:
+		//	data[a <= i < b] unexamined
+		//	data[b <= i < c] = pivot
+		for {
+			for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot
+			}
+			for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot
+			}
+			if a >= b {
+				break
+			}
+			// data[a] == pivot; data[b-1] < pivot
+			data[a], data[b-1] = data[b-1], data[a]
+			a++
+			b--
+		}
+	}
+	// Swap pivot into middle
+	data[pivot], data[b-1] = data[b-1], data[pivot]
+	return b - 1, c
+}
+
+// Insertion sort
+func insertionSortByFreq(data []literalNode, a, b int) {
+	for i := a + 1; i < b; i++ {
+		for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- {
+			data[j], data[j-1] = data[j-1], data[j]
+		}
+	}
+}
+
+// quickSortByFreq, loosely following Bentley and McIlroy,
+// ``Engineering a Sort Function,'' SP&E November 1993.
+
+// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
+func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) {
+	// sort 3 elements
+	if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
+		data[m1], data[m0] = data[m0], data[m1]
+	}
+	// data[m0] <= data[m1]
+	if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq {
+		data[m2], data[m1] = data[m1], data[m2]
+		// data[m0] <= data[m2] && data[m1] < data[m2]
+		if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
+			data[m1], data[m0] = data[m0], data[m1]
+		}
+	}
+	// now data[m0] <= data[m1] <= data[m2]
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
new file mode 100644
index 0000000..93f1aea
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
@@ -0,0 +1,201 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// Sort sorts data.
+// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
+// data.Less and data.Swap. The sort is not guaranteed to be stable.
+func sortByLiteral(data []literalNode) {
+	n := len(data)
+	quickSort(data, 0, n, maxDepth(n))
+}
+
+func quickSort(data []literalNode, a, b, maxDepth int) {
+	for b-a > 12 { // Use ShellSort for slices <= 12 elements
+		if maxDepth == 0 {
+			heapSort(data, a, b)
+			return
+		}
+		maxDepth--
+		mlo, mhi := doPivot(data, a, b)
+		// Avoiding recursion on the larger subproblem guarantees
+		// a stack depth of at most lg(b-a).
+		if mlo-a < b-mhi {
+			quickSort(data, a, mlo, maxDepth)
+			a = mhi // i.e., quickSort(data, mhi, b)
+		} else {
+			quickSort(data, mhi, b, maxDepth)
+			b = mlo // i.e., quickSort(data, a, mlo)
+		}
+	}
+	if b-a > 1 {
+		// Do ShellSort pass with gap 6
+		// It could be written in this simplified form cause b-a <= 12
+		for i := a + 6; i < b; i++ {
+			if data[i].literal < data[i-6].literal {
+				data[i], data[i-6] = data[i-6], data[i]
+			}
+		}
+		insertionSort(data, a, b)
+	}
+}
+func heapSort(data []literalNode, a, b int) {
+	first := a
+	lo := 0
+	hi := b - a
+
+	// Build heap with greatest element at top.
+	for i := (hi - 1) / 2; i >= 0; i-- {
+		siftDown(data, i, hi, first)
+	}
+
+	// Pop elements, largest first, into end of data.
+	for i := hi - 1; i >= 0; i-- {
+		data[first], data[first+i] = data[first+i], data[first]
+		siftDown(data, lo, i, first)
+	}
+}
+
+// siftDown implements the heap property on data[lo, hi).
+// first is an offset into the array where the root of the heap lies.
+func siftDown(data []literalNode, lo, hi, first int) {
+	root := lo
+	for {
+		child := 2*root + 1
+		if child >= hi {
+			break
+		}
+		if child+1 < hi && data[first+child].literal < data[first+child+1].literal {
+			child++
+		}
+		if data[first+root].literal > data[first+child].literal {
+			return
+		}
+		data[first+root], data[first+child] = data[first+child], data[first+root]
+		root = child
+	}
+}
+func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) {
+	m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
+	if hi-lo > 40 {
+		// Tukey's ``Ninther,'' median of three medians of three.
+		s := (hi - lo) / 8
+		medianOfThree(data, lo, lo+s, lo+2*s)
+		medianOfThree(data, m, m-s, m+s)
+		medianOfThree(data, hi-1, hi-1-s, hi-1-2*s)
+	}
+	medianOfThree(data, lo, m, hi-1)
+
+	// Invariants are:
+	//	data[lo] = pivot (set up by ChoosePivot)
+	//	data[lo < i < a] < pivot
+	//	data[a <= i < b] <= pivot
+	//	data[b <= i < c] unexamined
+	//	data[c <= i < hi-1] > pivot
+	//	data[hi-1] >= pivot
+	pivot := lo
+	a, c := lo+1, hi-1
+
+	for ; a < c && data[a].literal < data[pivot].literal; a++ {
+	}
+	b := a
+	for {
+		for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot
+		}
+		for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot
+		}
+		if b >= c {
+			break
+		}
+		// data[b] > pivot; data[c-1] <= pivot
+		data[b], data[c-1] = data[c-1], data[b]
+		b++
+		c--
+	}
+	// If hi-c<3 then there are duplicates (by property of median of nine).
+	// Let's be a bit more conservative, and set border to 5.
+	protect := hi-c < 5
+	if !protect && hi-c < (hi-lo)/4 {
+		// Lets test some points for equality to pivot
+		dups := 0
+		if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot
+			data[c], data[hi-1] = data[hi-1], data[c]
+			c++
+			dups++
+		}
+		if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot
+			b--
+			dups++
+		}
+		// m-lo = (hi-lo)/2 > 6
+		// b-lo > (hi-lo)*3/4-1 > 8
+		// ==> m < b ==> data[m] <= pivot
+		if data[m].literal > data[pivot].literal { // data[m] = pivot
+			data[m], data[b-1] = data[b-1], data[m]
+			b--
+			dups++
+		}
+		// if at least 2 points are equal to pivot, assume skewed distribution
+		protect = dups > 1
+	}
+	if protect {
+		// Protect against a lot of duplicates
+		// Add invariant:
+		//	data[a <= i < b] unexamined
+		//	data[b <= i < c] = pivot
+		for {
+			for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot
+			}
+			for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot
+			}
+			if a >= b {
+				break
+			}
+			// data[a] == pivot; data[b-1] < pivot
+			data[a], data[b-1] = data[b-1], data[a]
+			a++
+			b--
+		}
+	}
+	// Swap pivot into middle
+	data[pivot], data[b-1] = data[b-1], data[pivot]
+	return b - 1, c
+}
+
+// Insertion sort
+func insertionSort(data []literalNode, a, b int) {
+	for i := a + 1; i < b; i++ {
+		for j := i; j > a && data[j].literal < data[j-1].literal; j-- {
+			data[j], data[j-1] = data[j-1], data[j]
+		}
+	}
+}
+
+// maxDepth returns a threshold at which quicksort should switch
+// to heapsort. It returns 2*ceil(lg(n+1)).
+func maxDepth(n int) int {
+	var depth int
+	for i := n; i > 0; i >>= 1 {
+		depth++
+	}
+	return depth * 2
+}
+
+// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
+func medianOfThree(data []literalNode, m1, m0, m2 int) {
+	// sort 3 elements
+	if data[m1].literal < data[m0].literal {
+		data[m1], data[m0] = data[m0], data[m1]
+	}
+	// data[m0] <= data[m1]
+	if data[m2].literal < data[m1].literal {
+		data[m2], data[m1] = data[m1], data[m2]
+		// data[m0] <= data[m2] && data[m1] < data[m2]
+		if data[m1].literal < data[m0].literal {
+			data[m1], data[m0] = data[m0], data[m1]
+		}
+	}
+	// now data[m0] <= data[m1] <= data[m2]
+}
diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go
new file mode 100644
index 0000000..0d7b437
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/inflate.go
@@ -0,0 +1,865 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package flate implements the DEFLATE compressed data format, described in
+// RFC 1951.  The gzip and zlib packages implement access to DEFLATE-based file
+// formats.
+package flate
+
+import (
+	"bufio"
+	"compress/flate"
+	"fmt"
+	"io"
+	"math/bits"
+	"sync"
+)
+
+const (
+	maxCodeLen     = 16 // max length of Huffman code
+	maxCodeLenMask = 15 // mask for max length of Huffman code
+	// The next three numbers come from the RFC section 3.2.7, with the
+	// additional proviso in section 3.2.5 which implies that distance codes
+	// 30 and 31 should never occur in compressed data.
+	maxNumLit  = 286
+	maxNumDist = 30
+	numCodes   = 19 // number of codes in Huffman meta-code
+
+	debugDecode = false
+)
+
+// Value of length - 3 and extra bits.
+type lengthExtra struct {
+	length, extra uint8
+}
+
+var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}}
+
+var bitMask32 = [32]uint32{
+	0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
+	0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
+	0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
+	0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
+} // up to 32 bits
+
+// Initialize the fixedHuffmanDecoder only once upon first use.
+var fixedOnce sync.Once
+var fixedHuffmanDecoder huffmanDecoder
+
+// A CorruptInputError reports the presence of corrupt input at a given offset.
+type CorruptInputError = flate.CorruptInputError
+
+// An InternalError reports an error in the flate code itself.
+type InternalError string
+
+func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
+
+// A ReadError reports an error encountered while reading input.
+//
+// Deprecated: No longer returned.
+type ReadError = flate.ReadError
+
+// A WriteError reports an error encountered while writing output.
+//
+// Deprecated: No longer returned.
+type WriteError = flate.WriteError
+
+// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
+// to switch to a new underlying Reader. This permits reusing a ReadCloser
+// instead of allocating a new one.
+type Resetter interface {
+	// Reset discards any buffered data and resets the Resetter as if it was
+	// newly initialized with the given reader.
+	Reset(r io.Reader, dict []byte) error
+}
+
+// The data structure for decoding Huffman tables is based on that of
+// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
+// For codes smaller than the table width, there are multiple entries
+// (each combination of trailing bits has the same value). For codes
+// larger than the table width, the table contains a link to an overflow
+// table. The width of each entry in the link table is the maximum code
+// size minus the chunk width.
+//
+// Note that you can do a lookup in the table even without all bits
+// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
+// have the property that shorter codes come before longer ones, the
+// bit length estimate in the result is a lower bound on the actual
+// number of bits.
+//
+// See the following:
+//	http://www.gzip.org/algorithm.txt
+
+// chunk & 15 is number of bits
+// chunk >> 4 is value, including table link
+
+const (
+	huffmanChunkBits  = 9
+	huffmanNumChunks  = 1 << huffmanChunkBits
+	huffmanCountMask  = 15
+	huffmanValueShift = 4
+)
+
+type huffmanDecoder struct {
+	maxRead  int                       // the maximum number of bits we can read and not overread
+	chunks   *[huffmanNumChunks]uint16 // chunks as described above
+	links    [][]uint16                // overflow links
+	linkMask uint32                    // mask the width of the link table
+}
+
+// Initialize Huffman decoding tables from array of code lengths.
+// Following this function, h is guaranteed to be initialized into a complete
+// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
+// degenerate case where the tree has only a single symbol with length 1. Empty
+// trees are permitted.
+func (h *huffmanDecoder) init(lengths []int) bool {
+	// Sanity enables additional runtime tests during Huffman
+	// table construction. It's intended to be used during
+	// development to supplement the currently ad-hoc unit tests.
+	const sanity = false
+
+	if h.chunks == nil {
+		h.chunks = new([huffmanNumChunks]uint16)
+	}
+
+	if h.maxRead != 0 {
+		*h = huffmanDecoder{chunks: h.chunks, links: h.links}
+	}
+
+	// Count number of codes of each length,
+	// compute maxRead and max length.
+	var count [maxCodeLen]int
+	var min, max int
+	for _, n := range lengths {
+		if n == 0 {
+			continue
+		}
+		if min == 0 || n < min {
+			min = n
+		}
+		if n > max {
+			max = n
+		}
+		count[n&maxCodeLenMask]++
+	}
+
+	// Empty tree. The decompressor.huffSym function will fail later if the tree
+	// is used. Technically, an empty tree is only valid for the HDIST tree and
+	// not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
+	// is guaranteed to fail since it will attempt to use the tree to decode the
+	// codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
+	// guaranteed to fail later since the compressed data section must be
+	// composed of at least one symbol (the end-of-block marker).
+	if max == 0 {
+		return true
+	}
+
+	code := 0
+	var nextcode [maxCodeLen]int
+	for i := min; i <= max; i++ {
+		code <<= 1
+		nextcode[i&maxCodeLenMask] = code
+		code += count[i&maxCodeLenMask]
+	}
+
+	// Check that the coding is complete (i.e., that we've
+	// assigned all 2-to-the-max possible bit sequences).
+	// Exception: To be compatible with zlib, we also need to
+	// accept degenerate single-code codings. See also
+	// TestDegenerateHuffmanCoding.
+	if code != 1<<uint(max) && !(code == 1 && max == 1) {
+		if debugDecode {
+			fmt.Println("coding failed, code, max:", code, max, code == 1<<uint(max), code == 1 && max == 1, "(one should be true)")
+		}
+		return false
+	}
+
+	h.maxRead = min
+
+	chunks := h.chunks[:]
+	for i := range chunks {
+		chunks[i] = 0
+	}
+
+	if max > huffmanChunkBits {
+		numLinks := 1 << (uint(max) - huffmanChunkBits)
+		h.linkMask = uint32(numLinks - 1)
+
+		// create link tables
+		link := nextcode[huffmanChunkBits+1] >> 1
+		if cap(h.links) < huffmanNumChunks-link {
+			h.links = make([][]uint16, huffmanNumChunks-link)
+		} else {
+			h.links = h.links[:huffmanNumChunks-link]
+		}
+		for j := uint(link); j < huffmanNumChunks; j++ {
+			reverse := int(bits.Reverse16(uint16(j)))
+			reverse >>= uint(16 - huffmanChunkBits)
+			off := j - uint(link)
+			if sanity && h.chunks[reverse] != 0 {
+				panic("impossible: overwriting existing chunk")
+			}
+			h.chunks[reverse] = uint16(off<<huffmanValueShift | (huffmanChunkBits + 1))
+			if cap(h.links[off]) < numLinks {
+				h.links[off] = make([]uint16, numLinks)
+			} else {
+				h.links[off] = h.links[off][:numLinks]
+			}
+		}
+	} else {
+		h.links = h.links[:0]
+	}
+
+	for i, n := range lengths {
+		if n == 0 {
+			continue
+		}
+		code := nextcode[n]
+		nextcode[n]++
+		chunk := uint16(i<<huffmanValueShift | n)
+		reverse := int(bits.Reverse16(uint16(code)))
+		reverse >>= uint(16 - n)
+		if n <= huffmanChunkBits {
+			for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
+				// We should never need to overwrite
+				// an existing chunk. Also, 0 is
+				// never a valid chunk, because the
+				// lower 4 "count" bits should be
+				// between 1 and 15.
+				if sanity && h.chunks[off] != 0 {
+					panic("impossible: overwriting existing chunk")
+				}
+				h.chunks[off] = chunk
+			}
+		} else {
+			j := reverse & (huffmanNumChunks - 1)
+			if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
+				// Longer codes should have been
+				// associated with a link table above.
+				panic("impossible: not an indirect chunk")
+			}
+			value := h.chunks[j] >> huffmanValueShift
+			linktab := h.links[value]
+			reverse >>= huffmanChunkBits
+			for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
+				if sanity && linktab[off] != 0 {
+					panic("impossible: overwriting existing chunk")
+				}
+				linktab[off] = chunk
+			}
+		}
+	}
+
+	if sanity {
+		// Above we've sanity checked that we never overwrote
+		// an existing entry. Here we additionally check that
+		// we filled the tables completely.
+		for i, chunk := range h.chunks {
+			if chunk == 0 {
+				// As an exception, in the degenerate
+				// single-code case, we allow odd
+				// chunks to be missing.
+				if code == 1 && i%2 == 1 {
+					continue
+				}
+				panic("impossible: missing chunk")
+			}
+		}
+		for _, linktab := range h.links {
+			for _, chunk := range linktab {
+				if chunk == 0 {
+					panic("impossible: missing chunk")
+				}
+			}
+		}
+	}
+
+	return true
+}
+
+// Reader is the actual read interface needed by NewReader.
+// If the passed in io.Reader does not also have ReadByte,
+// the NewReader will introduce its own buffering.
+type Reader interface {
+	io.Reader
+	io.ByteReader
+}
+
+type step uint8
+
+const (
+	copyData step = iota + 1
+	nextBlock
+	huffmanBytesBuffer
+	huffmanBytesReader
+	huffmanBufioReader
+	huffmanStringsReader
+	huffmanGenericReader
+)
+
+// flushMode tells decompressor when to return data
+type flushMode uint8
+
+const (
+	syncFlush    flushMode = iota // return data after sync flush block
+	partialFlush                  // return data after each block
+)
+
+// Decompress state.
+type decompressor struct {
+	// Input source.
+	r       Reader
+	roffset int64
+
+	// Huffman decoders for literal/length, distance.
+	h1, h2 huffmanDecoder
+
+	// Length arrays used to define Huffman codes.
+	bits     *[maxNumLit + maxNumDist]int
+	codebits *[numCodes]int
+
+	// Output history, buffer.
+	dict dictDecoder
+
+	// Next step in the decompression,
+	// and decompression state.
+	step      step
+	stepState int
+	err       error
+	toRead    []byte
+	hl, hd    *huffmanDecoder
+	copyLen   int
+	copyDist  int
+
+	// Temporary buffer (avoids repeated allocation).
+	buf [4]byte
+
+	// Input bits, in top of b.
+	b uint32
+
+	nb    uint
+	final bool
+
+	flushMode flushMode
+}
+
+func (f *decompressor) nextBlock() {
+	for f.nb < 1+2 {
+		if f.err = f.moreBits(); f.err != nil {
+			return
+		}
+	}
+	f.final = f.b&1 == 1
+	f.b >>= 1
+	typ := f.b & 3
+	f.b >>= 2
+	f.nb -= 1 + 2
+	switch typ {
+	case 0:
+		f.dataBlock()
+		if debugDecode {
+			fmt.Println("stored block")
+		}
+	case 1:
+		// compressed, fixed Huffman tables
+		f.hl = &fixedHuffmanDecoder
+		f.hd = nil
+		f.huffmanBlockDecoder()
+		if debugDecode {
+			fmt.Println("predefinied huffman block")
+		}
+	case 2:
+		// compressed, dynamic Huffman tables
+		if f.err = f.readHuffman(); f.err != nil {
+			break
+		}
+		f.hl = &f.h1
+		f.hd = &f.h2
+		f.huffmanBlockDecoder()
+		if debugDecode {
+			fmt.Println("dynamic huffman block")
+		}
+	default:
+		// 3 is reserved.
+		if debugDecode {
+			fmt.Println("reserved data block encountered")
+		}
+		f.err = CorruptInputError(f.roffset)
+	}
+}
+
+func (f *decompressor) Read(b []byte) (int, error) {
+	for {
+		if len(f.toRead) > 0 {
+			n := copy(b, f.toRead)
+			f.toRead = f.toRead[n:]
+			if len(f.toRead) == 0 {
+				return n, f.err
+			}
+			return n, nil
+		}
+		if f.err != nil {
+			return 0, f.err
+		}
+
+		f.doStep()
+
+		if f.err != nil && len(f.toRead) == 0 {
+			f.toRead = f.dict.readFlush() // Flush what's left in case of error
+		}
+	}
+}
+
+// WriteTo implements the io.WriteTo interface for io.Copy and friends.
+func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
+	total := int64(0)
+	flushed := false
+	for {
+		if len(f.toRead) > 0 {
+			n, err := w.Write(f.toRead)
+			total += int64(n)
+			if err != nil {
+				f.err = err
+				return total, err
+			}
+			if n != len(f.toRead) {
+				return total, io.ErrShortWrite
+			}
+			f.toRead = f.toRead[:0]
+		}
+		if f.err != nil && flushed {
+			if f.err == io.EOF {
+				return total, nil
+			}
+			return total, f.err
+		}
+		if f.err == nil {
+			f.doStep()
+		}
+		if len(f.toRead) == 0 && f.err != nil && !flushed {
+			f.toRead = f.dict.readFlush() // Flush what's left in case of error
+			flushed = true
+		}
+	}
+}
+
+func (f *decompressor) Close() error {
+	if f.err == io.EOF {
+		return nil
+	}
+	return f.err
+}
+
+// RFC 1951 section 3.2.7.
+// Compression with dynamic Huffman codes
+
+var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
+
+func (f *decompressor) readHuffman() error {
+	// HLIT[5], HDIST[5], HCLEN[4].
+	for f.nb < 5+5+4 {
+		if err := f.moreBits(); err != nil {
+			return err
+		}
+	}
+	nlit := int(f.b&0x1F) + 257
+	if nlit > maxNumLit {
+		if debugDecode {
+			fmt.Println("nlit > maxNumLit", nlit)
+		}
+		return CorruptInputError(f.roffset)
+	}
+	f.b >>= 5
+	ndist := int(f.b&0x1F) + 1
+	if ndist > maxNumDist {
+		if debugDecode {
+			fmt.Println("ndist > maxNumDist", ndist)
+		}
+		return CorruptInputError(f.roffset)
+	}
+	f.b >>= 5
+	nclen := int(f.b&0xF) + 4
+	// numCodes is 19, so nclen is always valid.
+	f.b >>= 4
+	f.nb -= 5 + 5 + 4
+
+	// (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
+	for i := 0; i < nclen; i++ {
+		for f.nb < 3 {
+			if err := f.moreBits(); err != nil {
+				return err
+			}
+		}
+		f.codebits[codeOrder[i]] = int(f.b & 0x7)
+		f.b >>= 3
+		f.nb -= 3
+	}
+	for i := nclen; i < len(codeOrder); i++ {
+		f.codebits[codeOrder[i]] = 0
+	}
+	if !f.h1.init(f.codebits[0:]) {
+		if debugDecode {
+			fmt.Println("init codebits failed")
+		}
+		return CorruptInputError(f.roffset)
+	}
+
+	// HLIT + 257 code lengths, HDIST + 1 code lengths,
+	// using the code length Huffman code.
+	for i, n := 0, nlit+ndist; i < n; {
+		x, err := f.huffSym(&f.h1)
+		if err != nil {
+			return err
+		}
+		if x < 16 {
+			// Actual length.
+			f.bits[i] = x
+			i++
+			continue
+		}
+		// Repeat previous length or zero.
+		var rep int
+		var nb uint
+		var b int
+		switch x {
+		default:
+			return InternalError("unexpected length code")
+		case 16:
+			rep = 3
+			nb = 2
+			if i == 0 {
+				if debugDecode {
+					fmt.Println("i==0")
+				}
+				return CorruptInputError(f.roffset)
+			}
+			b = f.bits[i-1]
+		case 17:
+			rep = 3
+			nb = 3
+			b = 0
+		case 18:
+			rep = 11
+			nb = 7
+			b = 0
+		}
+		for f.nb < nb {
+			if err := f.moreBits(); err != nil {
+				if debugDecode {
+					fmt.Println("morebits:", err)
+				}
+				return err
+			}
+		}
+		rep += int(f.b & uint32(1<<(nb&regSizeMaskUint32)-1))
+		f.b >>= nb & regSizeMaskUint32
+		f.nb -= nb
+		if i+rep > n {
+			if debugDecode {
+				fmt.Println("i+rep > n", i, rep, n)
+			}
+			return CorruptInputError(f.roffset)
+		}
+		for j := 0; j < rep; j++ {
+			f.bits[i] = b
+			i++
+		}
+	}
+
+	if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
+		if debugDecode {
+			fmt.Println("init2 failed")
+		}
+		return CorruptInputError(f.roffset)
+	}
+
+	// As an optimization, we can initialize the maxRead bits to read at a time
+	// for the HLIT tree to the length of the EOB marker since we know that
+	// every block must terminate with one. This preserves the property that
+	// we never read any extra bytes after the end of the DEFLATE stream.
+	if f.h1.maxRead < f.bits[endBlockMarker] {
+		f.h1.maxRead = f.bits[endBlockMarker]
+	}
+	if !f.final {
+		// If not the final block, the smallest block possible is
+		// a predefined table, BTYPE=01, with a single EOB marker.
+		// This will take up 3 + 7 bits.
+		f.h1.maxRead += 10
+	}
+
+	return nil
+}
+
+// Copy a single uncompressed data block from input to output.
+func (f *decompressor) dataBlock() {
+	// Uncompressed.
+	// Discard current half-byte.
+	left := (f.nb) & 7
+	f.nb -= left
+	f.b >>= left
+
+	offBytes := f.nb >> 3
+	// Unfilled values will be overwritten.
+	f.buf[0] = uint8(f.b)
+	f.buf[1] = uint8(f.b >> 8)
+	f.buf[2] = uint8(f.b >> 16)
+	f.buf[3] = uint8(f.b >> 24)
+
+	f.roffset += int64(offBytes)
+	f.nb, f.b = 0, 0
+
+	// Length then ones-complement of length.
+	nr, err := io.ReadFull(f.r, f.buf[offBytes:4])
+	f.roffset += int64(nr)
+	if err != nil {
+		f.err = noEOF(err)
+		return
+	}
+	n := uint16(f.buf[0]) | uint16(f.buf[1])<<8
+	nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8
+	if nn != ^n {
+		if debugDecode {
+			ncomp := ^n
+			fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp)
+		}
+		f.err = CorruptInputError(f.roffset)
+		return
+	}
+
+	if n == 0 {
+		if f.flushMode == syncFlush {
+			f.toRead = f.dict.readFlush()
+		}
+
+		f.finishBlock()
+		return
+	}
+
+	f.copyLen = int(n)
+	f.copyData()
+}
+
+// copyData copies f.copyLen bytes from the underlying reader into f.hist.
+// It pauses for reads when f.hist is full.
+func (f *decompressor) copyData() {
+	buf := f.dict.writeSlice()
+	if len(buf) > f.copyLen {
+		buf = buf[:f.copyLen]
+	}
+
+	cnt, err := io.ReadFull(f.r, buf)
+	f.roffset += int64(cnt)
+	f.copyLen -= cnt
+	f.dict.writeMark(cnt)
+	if err != nil {
+		f.err = noEOF(err)
+		return
+	}
+
+	if f.dict.availWrite() == 0 || f.copyLen > 0 {
+		f.toRead = f.dict.readFlush()
+		f.step = copyData
+		return
+	}
+	f.finishBlock()
+}
+
+func (f *decompressor) finishBlock() {
+	if f.final {
+		if f.dict.availRead() > 0 {
+			f.toRead = f.dict.readFlush()
+		}
+
+		f.err = io.EOF
+	} else if f.flushMode == partialFlush && f.dict.availRead() > 0 {
+		f.toRead = f.dict.readFlush()
+	}
+
+	f.step = nextBlock
+}
+
+func (f *decompressor) doStep() {
+	switch f.step {
+	case copyData:
+		f.copyData()
+	case nextBlock:
+		f.nextBlock()
+	case huffmanBytesBuffer:
+		f.huffmanBytesBuffer()
+	case huffmanBytesReader:
+		f.huffmanBytesReader()
+	case huffmanBufioReader:
+		f.huffmanBufioReader()
+	case huffmanStringsReader:
+		f.huffmanStringsReader()
+	case huffmanGenericReader:
+		f.huffmanGenericReader()
+	default:
+		panic("BUG: unexpected step state")
+	}
+}
+
+// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
+func noEOF(e error) error {
+	if e == io.EOF {
+		return io.ErrUnexpectedEOF
+	}
+	return e
+}
+
+func (f *decompressor) moreBits() error {
+	c, err := f.r.ReadByte()
+	if err != nil {
+		return noEOF(err)
+	}
+	f.roffset++
+	f.b |= uint32(c) << (f.nb & regSizeMaskUint32)
+	f.nb += 8
+	return nil
+}
+
+// Read the next Huffman-encoded symbol from f according to h.
+func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
+	// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+	// with single element, huffSym must error on these two edge cases. In both
+	// cases, the chunks slice will be 0 for the invalid sequence, leading it
+	// satisfy the n == 0 check below.
+	n := uint(h.maxRead)
+	// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+	// but is smart enough to keep local variables in registers, so use nb and b,
+	// inline call to moreBits and reassign b,nb back to f on return.
+	nb, b := f.nb, f.b
+	for {
+		for nb < n {
+			c, err := f.r.ReadByte()
+			if err != nil {
+				f.b = b
+				f.nb = nb
+				return 0, noEOF(err)
+			}
+			f.roffset++
+			b |= uint32(c) << (nb & regSizeMaskUint32)
+			nb += 8
+		}
+		chunk := h.chunks[b&(huffmanNumChunks-1)]
+		n = uint(chunk & huffmanCountMask)
+		if n > huffmanChunkBits {
+			chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask]
+			n = uint(chunk & huffmanCountMask)
+		}
+		if n <= nb {
+			if n == 0 {
+				f.b = b
+				f.nb = nb
+				if debugDecode {
+					fmt.Println("huffsym: n==0")
+				}
+				f.err = CorruptInputError(f.roffset)
+				return 0, f.err
+			}
+			f.b = b >> (n & regSizeMaskUint32)
+			f.nb = nb - n
+			return int(chunk >> huffmanValueShift), nil
+		}
+	}
+}
+
+func makeReader(r io.Reader) Reader {
+	if rr, ok := r.(Reader); ok {
+		return rr
+	}
+	return bufio.NewReader(r)
+}
+
+func fixedHuffmanDecoderInit() {
+	fixedOnce.Do(func() {
+		// These come from the RFC section 3.2.6.
+		var bits [288]int
+		for i := 0; i < 144; i++ {
+			bits[i] = 8
+		}
+		for i := 144; i < 256; i++ {
+			bits[i] = 9
+		}
+		for i := 256; i < 280; i++ {
+			bits[i] = 7
+		}
+		for i := 280; i < 288; i++ {
+			bits[i] = 8
+		}
+		fixedHuffmanDecoder.init(bits[:])
+	})
+}
+
+func (f *decompressor) Reset(r io.Reader, dict []byte) error {
+	*f = decompressor{
+		r:        makeReader(r),
+		bits:     f.bits,
+		codebits: f.codebits,
+		h1:       f.h1,
+		h2:       f.h2,
+		dict:     f.dict,
+		step:     nextBlock,
+	}
+	f.dict.init(maxMatchOffset, dict)
+	return nil
+}
+
+type ReaderOpt func(*decompressor)
+
+// WithPartialBlock tells decompressor to return after each block,
+// so it can read data written with partial flush
+func WithPartialBlock() ReaderOpt {
+	return func(f *decompressor) {
+		f.flushMode = partialFlush
+	}
+}
+
+// WithDict initializes the reader with a preset dictionary
+func WithDict(dict []byte) ReaderOpt {
+	return func(f *decompressor) {
+		f.dict.init(maxMatchOffset, dict)
+	}
+}
+
+// NewReaderOpts returns new reader with provided options
+func NewReaderOpts(r io.Reader, opts ...ReaderOpt) io.ReadCloser {
+	fixedHuffmanDecoderInit()
+
+	var f decompressor
+	f.r = makeReader(r)
+	f.bits = new([maxNumLit + maxNumDist]int)
+	f.codebits = new([numCodes]int)
+	f.step = nextBlock
+	f.dict.init(maxMatchOffset, nil)
+
+	for _, opt := range opts {
+		opt(&f)
+	}
+
+	return &f
+}
+
+// NewReader returns a new ReadCloser that can be used
+// to read the uncompressed version of r.
+// If r does not also implement io.ByteReader,
+// the decompressor may read more data than necessary from r.
+// It is the caller's responsibility to call Close on the ReadCloser
+// when finished reading.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
+func NewReader(r io.Reader) io.ReadCloser {
+	return NewReaderOpts(r)
+}
+
+// NewReaderDict is like NewReader but initializes the reader
+// with a preset dictionary. The returned Reader behaves as if
+// the uncompressed data stream started with the given dictionary,
+// which has already been read. NewReaderDict is typically used
+// to read data compressed by NewWriterDict.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
+func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
+	return NewReaderOpts(r, WithDict(dict))
+}
diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
new file mode 100644
index 0000000..2b2f993
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
@@ -0,0 +1,1283 @@
+// Code generated by go generate gen_inflate.go. DO NOT EDIT.
+
+package flate
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"math/bits"
+	"strings"
+)
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBytesBuffer() {
+	const (
+		stateInit = iota // Zero value must be stateInit
+		stateDict
+	)
+	fr := f.r.(*bytes.Buffer)
+
+	// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+	// but is smart enough to keep local variables in registers, so use nb and b,
+	// inline call to moreBits and reassign b,nb back to f on return.
+	fnb, fb, dict := f.nb, f.b, &f.dict
+
+	switch f.stepState {
+	case stateInit:
+		goto readLiteral
+	case stateDict:
+		goto copyHistory
+	}
+
+readLiteral:
+	// Read literal and/or (length, distance) according to RFC section 3.2.3.
+	{
+		var v int
+		{
+			// Inlined v, err := f.huffSym(f.hl)
+			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+			// with single element, huffSym must error on these two edge cases. In both
+			// cases, the chunks slice will be 0 for the invalid sequence, leading it
+			// satisfy the n == 0 check below.
+			n := uint(f.hl.maxRead)
+			for {
+				for fnb < n {
+					c, err := fr.ReadByte()
+					if err != nil {
+						f.b, f.nb = fb, fnb
+						f.err = noEOF(err)
+						return
+					}
+					f.roffset++
+					fb |= uint32(c) << (fnb & regSizeMaskUint32)
+					fnb += 8
+				}
+				chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+				n = uint(chunk & huffmanCountMask)
+				if n > huffmanChunkBits {
+					chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+					n = uint(chunk & huffmanCountMask)
+				}
+				if n <= fnb {
+					if n == 0 {
+						f.b, f.nb = fb, fnb
+						if debugDecode {
+							fmt.Println("huffsym: n==0")
+						}
+						f.err = CorruptInputError(f.roffset)
+						return
+					}
+					fb = fb >> (n & regSizeMaskUint32)
+					fnb = fnb - n
+					v = int(chunk >> huffmanValueShift)
+					break
+				}
+			}
+		}
+
+		var length int
+		switch {
+		case v < 256:
+			dict.writeByte(byte(v))
+			if dict.availWrite() == 0 {
+				f.toRead = dict.readFlush()
+				f.step = huffmanBytesBuffer
+				f.stepState = stateInit
+				f.b, f.nb = fb, fnb
+				return
+			}
+			goto readLiteral
+		case v == 256:
+			f.b, f.nb = fb, fnb
+			f.finishBlock()
+			return
+		// otherwise, reference to older data
+		case v < 265:
+			length = v - (257 - 3)
+		case v < maxNumLit:
+			val := decCodeToLen[(v - 257)]
+			length = int(val.length) + 3
+			n := uint(val.extra)
+			for fnb < n {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits n>0:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			length += int(fb & bitMask32[n])
+			fb >>= n & regSizeMaskUint32
+			fnb -= n
+		default:
+			if debugDecode {
+				fmt.Println(v, ">= maxNumLit")
+			}
+			f.err = CorruptInputError(f.roffset)
+			f.b, f.nb = fb, fnb
+			return
+		}
+
+		var dist uint32
+		if f.hd == nil {
+			for fnb < 5 {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits f.nb<5:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+			fb >>= 5
+			fnb -= 5
+		} else {
+			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+			// with single element, huffSym must error on these two edge cases. In both
+			// cases, the chunks slice will be 0 for the invalid sequence, leading it
+			// satisfy the n == 0 check below.
+			n := uint(f.hd.maxRead)
+			// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+			// but is smart enough to keep local variables in registers, so use nb and b,
+			// inline call to moreBits and reassign b,nb back to f on return.
+			for {
+				for fnb < n {
+					c, err := fr.ReadByte()
+					if err != nil {
+						f.b, f.nb = fb, fnb
+						f.err = noEOF(err)
+						return
+					}
+					f.roffset++
+					fb |= uint32(c) << (fnb & regSizeMaskUint32)
+					fnb += 8
+				}
+				chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+				n = uint(chunk & huffmanCountMask)
+				if n > huffmanChunkBits {
+					chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+					n = uint(chunk & huffmanCountMask)
+				}
+				if n <= fnb {
+					if n == 0 {
+						f.b, f.nb = fb, fnb
+						if debugDecode {
+							fmt.Println("huffsym: n==0")
+						}
+						f.err = CorruptInputError(f.roffset)
+						return
+					}
+					fb = fb >> (n & regSizeMaskUint32)
+					fnb = fnb - n
+					dist = uint32(chunk >> huffmanValueShift)
+					break
+				}
+			}
+		}
+
+		switch {
+		case dist < 4:
+			dist++
+		case dist < maxNumDist:
+			nb := uint(dist-2) >> 1
+			// have 1 bit in bottom of dist, need nb more.
+			extra := (dist & 1) << (nb & regSizeMaskUint32)
+			for fnb < nb {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits f.nb<nb:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			extra |= fb & bitMask32[nb]
+			fb >>= nb & regSizeMaskUint32
+			fnb -= nb
+			dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
+			// slower: dist = bitMask32[nb+1] + 2 + extra
+		default:
+			f.b, f.nb = fb, fnb
+			if debugDecode {
+				fmt.Println("dist too big:", dist, maxNumDist)
+			}
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		// No check on length; encoding can be prescient.
+		if dist > uint32(dict.histSize()) {
+			f.b, f.nb = fb, fnb
+			if debugDecode {
+				fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+			}
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		f.copyLen, f.copyDist = length, int(dist)
+		goto copyHistory
+	}
+
+copyHistory:
+	// Perform a backwards copy according to RFC section 3.2.3.
+	{
+		cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+		if cnt == 0 {
+			cnt = dict.writeCopy(f.copyDist, f.copyLen)
+		}
+		f.copyLen -= cnt
+
+		if dict.availWrite() == 0 || f.copyLen > 0 {
+			f.toRead = dict.readFlush()
+			f.step = huffmanBytesBuffer // We need to continue this work
+			f.stepState = stateDict
+			f.b, f.nb = fb, fnb
+			return
+		}
+		goto readLiteral
+	}
+	// Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBytesReader() {
+	const (
+		stateInit = iota // Zero value must be stateInit
+		stateDict
+	)
+	fr := f.r.(*bytes.Reader)
+
+	// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+	// but is smart enough to keep local variables in registers, so use nb and b,
+	// inline call to moreBits and reassign b,nb back to f on return.
+	fnb, fb, dict := f.nb, f.b, &f.dict
+
+	switch f.stepState {
+	case stateInit:
+		goto readLiteral
+	case stateDict:
+		goto copyHistory
+	}
+
+readLiteral:
+	// Read literal and/or (length, distance) according to RFC section 3.2.3.
+	{
+		var v int
+		{
+			// Inlined v, err := f.huffSym(f.hl)
+			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+			// with single element, huffSym must error on these two edge cases. In both
+			// cases, the chunks slice will be 0 for the invalid sequence, leading it
+			// satisfy the n == 0 check below.
+			n := uint(f.hl.maxRead)
+			for {
+				for fnb < n {
+					c, err := fr.ReadByte()
+					if err != nil {
+						f.b, f.nb = fb, fnb
+						f.err = noEOF(err)
+						return
+					}
+					f.roffset++
+					fb |= uint32(c) << (fnb & regSizeMaskUint32)
+					fnb += 8
+				}
+				chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+				n = uint(chunk & huffmanCountMask)
+				if n > huffmanChunkBits {
+					chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+					n = uint(chunk & huffmanCountMask)
+				}
+				if n <= fnb {
+					if n == 0 {
+						f.b, f.nb = fb, fnb
+						if debugDecode {
+							fmt.Println("huffsym: n==0")
+						}
+						f.err = CorruptInputError(f.roffset)
+						return
+					}
+					fb = fb >> (n & regSizeMaskUint32)
+					fnb = fnb - n
+					v = int(chunk >> huffmanValueShift)
+					break
+				}
+			}
+		}
+
+		var length int
+		switch {
+		case v < 256:
+			dict.writeByte(byte(v))
+			if dict.availWrite() == 0 {
+				f.toRead = dict.readFlush()
+				f.step = huffmanBytesReader
+				f.stepState = stateInit
+				f.b, f.nb = fb, fnb
+				return
+			}
+			goto readLiteral
+		case v == 256:
+			f.b, f.nb = fb, fnb
+			f.finishBlock()
+			return
+		// otherwise, reference to older data
+		case v < 265:
+			length = v - (257 - 3)
+		case v < maxNumLit:
+			val := decCodeToLen[(v - 257)]
+			length = int(val.length) + 3
+			n := uint(val.extra)
+			for fnb < n {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits n>0:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			length += int(fb & bitMask32[n])
+			fb >>= n & regSizeMaskUint32
+			fnb -= n
+		default:
+			if debugDecode {
+				fmt.Println(v, ">= maxNumLit")
+			}
+			f.err = CorruptInputError(f.roffset)
+			f.b, f.nb = fb, fnb
+			return
+		}
+
+		var dist uint32
+		if f.hd == nil {
+			for fnb < 5 {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits f.nb<5:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+			fb >>= 5
+			fnb -= 5
+		} else {
+			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+			// with single element, huffSym must error on these two edge cases. In both
+			// cases, the chunks slice will be 0 for the invalid sequence, leading it
+			// satisfy the n == 0 check below.
+			n := uint(f.hd.maxRead)
+			// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+			// but is smart enough to keep local variables in registers, so use nb and b,
+			// inline call to moreBits and reassign b,nb back to f on return.
+			for {
+				for fnb < n {
+					c, err := fr.ReadByte()
+					if err != nil {
+						f.b, f.nb = fb, fnb
+						f.err = noEOF(err)
+						return
+					}
+					f.roffset++
+					fb |= uint32(c) << (fnb & regSizeMaskUint32)
+					fnb += 8
+				}
+				chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+				n = uint(chunk & huffmanCountMask)
+				if n > huffmanChunkBits {
+					chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+					n = uint(chunk & huffmanCountMask)
+				}
+				if n <= fnb {
+					if n == 0 {
+						f.b, f.nb = fb, fnb
+						if debugDecode {
+							fmt.Println("huffsym: n==0")
+						}
+						f.err = CorruptInputError(f.roffset)
+						return
+					}
+					fb = fb >> (n & regSizeMaskUint32)
+					fnb = fnb - n
+					dist = uint32(chunk >> huffmanValueShift)
+					break
+				}
+			}
+		}
+
+		switch {
+		case dist < 4:
+			dist++
+		case dist < maxNumDist:
+			nb := uint(dist-2) >> 1
+			// have 1 bit in bottom of dist, need nb more.
+			extra := (dist & 1) << (nb & regSizeMaskUint32)
+			for fnb < nb {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits f.nb<nb:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			extra |= fb & bitMask32[nb]
+			fb >>= nb & regSizeMaskUint32
+			fnb -= nb
+			dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
+			// slower: dist = bitMask32[nb+1] + 2 + extra
+		default:
+			f.b, f.nb = fb, fnb
+			if debugDecode {
+				fmt.Println("dist too big:", dist, maxNumDist)
+			}
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		// No check on length; encoding can be prescient.
+		if dist > uint32(dict.histSize()) {
+			f.b, f.nb = fb, fnb
+			if debugDecode {
+				fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+			}
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		f.copyLen, f.copyDist = length, int(dist)
+		goto copyHistory
+	}
+
+copyHistory:
+	// Perform a backwards copy according to RFC section 3.2.3.
+	{
+		cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+		if cnt == 0 {
+			cnt = dict.writeCopy(f.copyDist, f.copyLen)
+		}
+		f.copyLen -= cnt
+
+		if dict.availWrite() == 0 || f.copyLen > 0 {
+			f.toRead = dict.readFlush()
+			f.step = huffmanBytesReader // We need to continue this work
+			f.stepState = stateDict
+			f.b, f.nb = fb, fnb
+			return
+		}
+		goto readLiteral
+	}
+	// Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBufioReader() {
+	const (
+		stateInit = iota // Zero value must be stateInit
+		stateDict
+	)
+	fr := f.r.(*bufio.Reader)
+
+	// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+	// but is smart enough to keep local variables in registers, so use nb and b,
+	// inline call to moreBits and reassign b,nb back to f on return.
+	fnb, fb, dict := f.nb, f.b, &f.dict
+
+	switch f.stepState {
+	case stateInit:
+		goto readLiteral
+	case stateDict:
+		goto copyHistory
+	}
+
+readLiteral:
+	// Read literal and/or (length, distance) according to RFC section 3.2.3.
+	{
+		var v int
+		{
+			// Inlined v, err := f.huffSym(f.hl)
+			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+			// with single element, huffSym must error on these two edge cases. In both
+			// cases, the chunks slice will be 0 for the invalid sequence, leading it
+			// satisfy the n == 0 check below.
+			n := uint(f.hl.maxRead)
+			for {
+				for fnb < n {
+					c, err := fr.ReadByte()
+					if err != nil {
+						f.b, f.nb = fb, fnb
+						f.err = noEOF(err)
+						return
+					}
+					f.roffset++
+					fb |= uint32(c) << (fnb & regSizeMaskUint32)
+					fnb += 8
+				}
+				chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+				n = uint(chunk & huffmanCountMask)
+				if n > huffmanChunkBits {
+					chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+					n = uint(chunk & huffmanCountMask)
+				}
+				if n <= fnb {
+					if n == 0 {
+						f.b, f.nb = fb, fnb
+						if debugDecode {
+							fmt.Println("huffsym: n==0")
+						}
+						f.err = CorruptInputError(f.roffset)
+						return
+					}
+					fb = fb >> (n & regSizeMaskUint32)
+					fnb = fnb - n
+					v = int(chunk >> huffmanValueShift)
+					break
+				}
+			}
+		}
+
+		var length int
+		switch {
+		case v < 256:
+			dict.writeByte(byte(v))
+			if dict.availWrite() == 0 {
+				f.toRead = dict.readFlush()
+				f.step = huffmanBufioReader
+				f.stepState = stateInit
+				f.b, f.nb = fb, fnb
+				return
+			}
+			goto readLiteral
+		case v == 256:
+			f.b, f.nb = fb, fnb
+			f.finishBlock()
+			return
+		// otherwise, reference to older data
+		case v < 265:
+			length = v - (257 - 3)
+		case v < maxNumLit:
+			val := decCodeToLen[(v - 257)]
+			length = int(val.length) + 3
+			n := uint(val.extra)
+			for fnb < n {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits n>0:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			length += int(fb & bitMask32[n])
+			fb >>= n & regSizeMaskUint32
+			fnb -= n
+		default:
+			if debugDecode {
+				fmt.Println(v, ">= maxNumLit")
+			}
+			f.err = CorruptInputError(f.roffset)
+			f.b, f.nb = fb, fnb
+			return
+		}
+
+		var dist uint32
+		if f.hd == nil {
+			for fnb < 5 {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits f.nb<5:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+			fb >>= 5
+			fnb -= 5
+		} else {
+			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+			// with single element, huffSym must error on these two edge cases. In both
+			// cases, the chunks slice will be 0 for the invalid sequence, leading it
+			// satisfy the n == 0 check below.
+			n := uint(f.hd.maxRead)
+			// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+			// but is smart enough to keep local variables in registers, so use nb and b,
+			// inline call to moreBits and reassign b,nb back to f on return.
+			for {
+				for fnb < n {
+					c, err := fr.ReadByte()
+					if err != nil {
+						f.b, f.nb = fb, fnb
+						f.err = noEOF(err)
+						return
+					}
+					f.roffset++
+					fb |= uint32(c) << (fnb & regSizeMaskUint32)
+					fnb += 8
+				}
+				chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+				n = uint(chunk & huffmanCountMask)
+				if n > huffmanChunkBits {
+					chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+					n = uint(chunk & huffmanCountMask)
+				}
+				if n <= fnb {
+					if n == 0 {
+						f.b, f.nb = fb, fnb
+						if debugDecode {
+							fmt.Println("huffsym: n==0")
+						}
+						f.err = CorruptInputError(f.roffset)
+						return
+					}
+					fb = fb >> (n & regSizeMaskUint32)
+					fnb = fnb - n
+					dist = uint32(chunk >> huffmanValueShift)
+					break
+				}
+			}
+		}
+
+		switch {
+		case dist < 4:
+			dist++
+		case dist < maxNumDist:
+			nb := uint(dist-2) >> 1
+			// have 1 bit in bottom of dist, need nb more.
+			extra := (dist & 1) << (nb & regSizeMaskUint32)
+			for fnb < nb {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits f.nb<nb:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			extra |= fb & bitMask32[nb]
+			fb >>= nb & regSizeMaskUint32
+			fnb -= nb
+			dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
+			// slower: dist = bitMask32[nb+1] + 2 + extra
+		default:
+			f.b, f.nb = fb, fnb
+			if debugDecode {
+				fmt.Println("dist too big:", dist, maxNumDist)
+			}
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		// No check on length; encoding can be prescient.
+		if dist > uint32(dict.histSize()) {
+			f.b, f.nb = fb, fnb
+			if debugDecode {
+				fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+			}
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		f.copyLen, f.copyDist = length, int(dist)
+		goto copyHistory
+	}
+
+copyHistory:
+	// Perform a backwards copy according to RFC section 3.2.3.
+	{
+		cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+		if cnt == 0 {
+			cnt = dict.writeCopy(f.copyDist, f.copyLen)
+		}
+		f.copyLen -= cnt
+
+		if dict.availWrite() == 0 || f.copyLen > 0 {
+			f.toRead = dict.readFlush()
+			f.step = huffmanBufioReader // We need to continue this work
+			f.stepState = stateDict
+			f.b, f.nb = fb, fnb
+			return
+		}
+		goto readLiteral
+	}
+	// Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanStringsReader() {
+	const (
+		stateInit = iota // Zero value must be stateInit
+		stateDict
+	)
+	fr := f.r.(*strings.Reader)
+
+	// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+	// but is smart enough to keep local variables in registers, so use nb and b,
+	// inline call to moreBits and reassign b,nb back to f on return.
+	fnb, fb, dict := f.nb, f.b, &f.dict
+
+	switch f.stepState {
+	case stateInit:
+		goto readLiteral
+	case stateDict:
+		goto copyHistory
+	}
+
+readLiteral:
+	// Read literal and/or (length, distance) according to RFC section 3.2.3.
+	{
+		var v int
+		{
+			// Inlined v, err := f.huffSym(f.hl)
+			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+			// with single element, huffSym must error on these two edge cases. In both
+			// cases, the chunks slice will be 0 for the invalid sequence, leading it
+			// satisfy the n == 0 check below.
+			n := uint(f.hl.maxRead)
+			for {
+				for fnb < n {
+					c, err := fr.ReadByte()
+					if err != nil {
+						f.b, f.nb = fb, fnb
+						f.err = noEOF(err)
+						return
+					}
+					f.roffset++
+					fb |= uint32(c) << (fnb & regSizeMaskUint32)
+					fnb += 8
+				}
+				chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+				n = uint(chunk & huffmanCountMask)
+				if n > huffmanChunkBits {
+					chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+					n = uint(chunk & huffmanCountMask)
+				}
+				if n <= fnb {
+					if n == 0 {
+						f.b, f.nb = fb, fnb
+						if debugDecode {
+							fmt.Println("huffsym: n==0")
+						}
+						f.err = CorruptInputError(f.roffset)
+						return
+					}
+					fb = fb >> (n & regSizeMaskUint32)
+					fnb = fnb - n
+					v = int(chunk >> huffmanValueShift)
+					break
+				}
+			}
+		}
+
+		var length int
+		switch {
+		case v < 256:
+			dict.writeByte(byte(v))
+			if dict.availWrite() == 0 {
+				f.toRead = dict.readFlush()
+				f.step = huffmanStringsReader
+				f.stepState = stateInit
+				f.b, f.nb = fb, fnb
+				return
+			}
+			goto readLiteral
+		case v == 256:
+			f.b, f.nb = fb, fnb
+			f.finishBlock()
+			return
+		// otherwise, reference to older data
+		case v < 265:
+			length = v - (257 - 3)
+		case v < maxNumLit:
+			val := decCodeToLen[(v - 257)]
+			length = int(val.length) + 3
+			n := uint(val.extra)
+			for fnb < n {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits n>0:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			length += int(fb & bitMask32[n])
+			fb >>= n & regSizeMaskUint32
+			fnb -= n
+		default:
+			if debugDecode {
+				fmt.Println(v, ">= maxNumLit")
+			}
+			f.err = CorruptInputError(f.roffset)
+			f.b, f.nb = fb, fnb
+			return
+		}
+
+		var dist uint32
+		if f.hd == nil {
+			for fnb < 5 {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits f.nb<5:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+			fb >>= 5
+			fnb -= 5
+		} else {
+			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+			// with single element, huffSym must error on these two edge cases. In both
+			// cases, the chunks slice will be 0 for the invalid sequence, leading it
+			// satisfy the n == 0 check below.
+			n := uint(f.hd.maxRead)
+			// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+			// but is smart enough to keep local variables in registers, so use nb and b,
+			// inline call to moreBits and reassign b,nb back to f on return.
+			for {
+				for fnb < n {
+					c, err := fr.ReadByte()
+					if err != nil {
+						f.b, f.nb = fb, fnb
+						f.err = noEOF(err)
+						return
+					}
+					f.roffset++
+					fb |= uint32(c) << (fnb & regSizeMaskUint32)
+					fnb += 8
+				}
+				chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+				n = uint(chunk & huffmanCountMask)
+				if n > huffmanChunkBits {
+					chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+					n = uint(chunk & huffmanCountMask)
+				}
+				if n <= fnb {
+					if n == 0 {
+						f.b, f.nb = fb, fnb
+						if debugDecode {
+							fmt.Println("huffsym: n==0")
+						}
+						f.err = CorruptInputError(f.roffset)
+						return
+					}
+					fb = fb >> (n & regSizeMaskUint32)
+					fnb = fnb - n
+					dist = uint32(chunk >> huffmanValueShift)
+					break
+				}
+			}
+		}
+
+		switch {
+		case dist < 4:
+			dist++
+		case dist < maxNumDist:
+			nb := uint(dist-2) >> 1
+			// have 1 bit in bottom of dist, need nb more.
+			extra := (dist & 1) << (nb & regSizeMaskUint32)
+			for fnb < nb {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits f.nb<nb:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			extra |= fb & bitMask32[nb]
+			fb >>= nb & regSizeMaskUint32
+			fnb -= nb
+			dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
+			// slower: dist = bitMask32[nb+1] + 2 + extra
+		default:
+			f.b, f.nb = fb, fnb
+			if debugDecode {
+				fmt.Println("dist too big:", dist, maxNumDist)
+			}
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		// No check on length; encoding can be prescient.
+		if dist > uint32(dict.histSize()) {
+			f.b, f.nb = fb, fnb
+			if debugDecode {
+				fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+			}
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		f.copyLen, f.copyDist = length, int(dist)
+		goto copyHistory
+	}
+
+copyHistory:
+	// Perform a backwards copy according to RFC section 3.2.3.
+	{
+		cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+		if cnt == 0 {
+			cnt = dict.writeCopy(f.copyDist, f.copyLen)
+		}
+		f.copyLen -= cnt
+
+		if dict.availWrite() == 0 || f.copyLen > 0 {
+			f.toRead = dict.readFlush()
+			f.step = huffmanStringsReader // We need to continue this work
+			f.stepState = stateDict
+			f.b, f.nb = fb, fnb
+			return
+		}
+		goto readLiteral
+	}
+	// Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanGenericReader() {
+	const (
+		stateInit = iota // Zero value must be stateInit
+		stateDict
+	)
+	fr := f.r.(Reader)
+
+	// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+	// but is smart enough to keep local variables in registers, so use nb and b,
+	// inline call to moreBits and reassign b,nb back to f on return.
+	fnb, fb, dict := f.nb, f.b, &f.dict
+
+	switch f.stepState {
+	case stateInit:
+		goto readLiteral
+	case stateDict:
+		goto copyHistory
+	}
+
+readLiteral:
+	// Read literal and/or (length, distance) according to RFC section 3.2.3.
+	{
+		var v int
+		{
+			// Inlined v, err := f.huffSym(f.hl)
+			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+			// with single element, huffSym must error on these two edge cases. In both
+			// cases, the chunks slice will be 0 for the invalid sequence, leading it
+			// satisfy the n == 0 check below.
+			n := uint(f.hl.maxRead)
+			for {
+				for fnb < n {
+					c, err := fr.ReadByte()
+					if err != nil {
+						f.b, f.nb = fb, fnb
+						f.err = noEOF(err)
+						return
+					}
+					f.roffset++
+					fb |= uint32(c) << (fnb & regSizeMaskUint32)
+					fnb += 8
+				}
+				chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+				n = uint(chunk & huffmanCountMask)
+				if n > huffmanChunkBits {
+					chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+					n = uint(chunk & huffmanCountMask)
+				}
+				if n <= fnb {
+					if n == 0 {
+						f.b, f.nb = fb, fnb
+						if debugDecode {
+							fmt.Println("huffsym: n==0")
+						}
+						f.err = CorruptInputError(f.roffset)
+						return
+					}
+					fb = fb >> (n & regSizeMaskUint32)
+					fnb = fnb - n
+					v = int(chunk >> huffmanValueShift)
+					break
+				}
+			}
+		}
+
+		var length int
+		switch {
+		case v < 256:
+			dict.writeByte(byte(v))
+			if dict.availWrite() == 0 {
+				f.toRead = dict.readFlush()
+				f.step = huffmanGenericReader
+				f.stepState = stateInit
+				f.b, f.nb = fb, fnb
+				return
+			}
+			goto readLiteral
+		case v == 256:
+			f.b, f.nb = fb, fnb
+			f.finishBlock()
+			return
+		// otherwise, reference to older data
+		case v < 265:
+			length = v - (257 - 3)
+		case v < maxNumLit:
+			val := decCodeToLen[(v - 257)]
+			length = int(val.length) + 3
+			n := uint(val.extra)
+			for fnb < n {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits n>0:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			length += int(fb & bitMask32[n])
+			fb >>= n & regSizeMaskUint32
+			fnb -= n
+		default:
+			if debugDecode {
+				fmt.Println(v, ">= maxNumLit")
+			}
+			f.err = CorruptInputError(f.roffset)
+			f.b, f.nb = fb, fnb
+			return
+		}
+
+		var dist uint32
+		if f.hd == nil {
+			for fnb < 5 {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits f.nb<5:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+			fb >>= 5
+			fnb -= 5
+		} else {
+			// Since a huffmanDecoder can be empty or be composed of a degenerate tree
+			// with single element, huffSym must error on these two edge cases. In both
+			// cases, the chunks slice will be 0 for the invalid sequence, leading it
+			// satisfy the n == 0 check below.
+			n := uint(f.hd.maxRead)
+			// Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+			// but is smart enough to keep local variables in registers, so use nb and b,
+			// inline call to moreBits and reassign b,nb back to f on return.
+			for {
+				for fnb < n {
+					c, err := fr.ReadByte()
+					if err != nil {
+						f.b, f.nb = fb, fnb
+						f.err = noEOF(err)
+						return
+					}
+					f.roffset++
+					fb |= uint32(c) << (fnb & regSizeMaskUint32)
+					fnb += 8
+				}
+				chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+				n = uint(chunk & huffmanCountMask)
+				if n > huffmanChunkBits {
+					chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+					n = uint(chunk & huffmanCountMask)
+				}
+				if n <= fnb {
+					if n == 0 {
+						f.b, f.nb = fb, fnb
+						if debugDecode {
+							fmt.Println("huffsym: n==0")
+						}
+						f.err = CorruptInputError(f.roffset)
+						return
+					}
+					fb = fb >> (n & regSizeMaskUint32)
+					fnb = fnb - n
+					dist = uint32(chunk >> huffmanValueShift)
+					break
+				}
+			}
+		}
+
+		switch {
+		case dist < 4:
+			dist++
+		case dist < maxNumDist:
+			nb := uint(dist-2) >> 1
+			// have 1 bit in bottom of dist, need nb more.
+			extra := (dist & 1) << (nb & regSizeMaskUint32)
+			for fnb < nb {
+				c, err := fr.ReadByte()
+				if err != nil {
+					f.b, f.nb = fb, fnb
+					if debugDecode {
+						fmt.Println("morebits f.nb<nb:", err)
+					}
+					f.err = err
+					return
+				}
+				f.roffset++
+				fb |= uint32(c) << (fnb & regSizeMaskUint32)
+				fnb += 8
+			}
+			extra |= fb & bitMask32[nb]
+			fb >>= nb & regSizeMaskUint32
+			fnb -= nb
+			dist = 1<<((nb+1)&regSizeMaskUint32) + 1 + extra
+			// slower: dist = bitMask32[nb+1] + 2 + extra
+		default:
+			f.b, f.nb = fb, fnb
+			if debugDecode {
+				fmt.Println("dist too big:", dist, maxNumDist)
+			}
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		// No check on length; encoding can be prescient.
+		if dist > uint32(dict.histSize()) {
+			f.b, f.nb = fb, fnb
+			if debugDecode {
+				fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+			}
+			f.err = CorruptInputError(f.roffset)
+			return
+		}
+
+		f.copyLen, f.copyDist = length, int(dist)
+		goto copyHistory
+	}
+
+copyHistory:
+	// Perform a backwards copy according to RFC section 3.2.3.
+	{
+		cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+		if cnt == 0 {
+			cnt = dict.writeCopy(f.copyDist, f.copyLen)
+		}
+		f.copyLen -= cnt
+
+		if dict.availWrite() == 0 || f.copyLen > 0 {
+			f.toRead = dict.readFlush()
+			f.step = huffmanGenericReader // We need to continue this work
+			f.stepState = stateDict
+			f.b, f.nb = fb, fnb
+			return
+		}
+		goto readLiteral
+	}
+	// Not reached
+}
+
+func (f *decompressor) huffmanBlockDecoder() {
+	switch f.r.(type) {
+	case *bytes.Buffer:
+		f.huffmanBytesBuffer()
+	case *bytes.Reader:
+		f.huffmanBytesReader()
+	case *bufio.Reader:
+		f.huffmanBufioReader()
+	case *strings.Reader:
+		f.huffmanStringsReader()
+	case Reader:
+		f.huffmanGenericReader()
+	default:
+		f.huffmanGenericReader()
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go
new file mode 100644
index 0000000..c3581a3
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level1.go
@@ -0,0 +1,215 @@
+package flate
+
+import (
+	"fmt"
+
+	"github.com/klauspost/compress/internal/le"
+)
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastEncL1 struct {
+	fastGen
+	table [tableSize]tableEntry
+}
+
+// EncodeL1 uses a similar algorithm to level 1
+func (e *fastEncL1) Encode(dst *tokens, src []byte) {
+	const (
+		inputMargin            = 12 - 1
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+		hashBytes              = 5
+	)
+	if debugDeflate && e.cur < 0 {
+		panic(fmt.Sprint("e.cur < 0: ", e.cur))
+	}
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			e.cur = maxMatchOffset
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v <= minOff {
+				v = 0
+			} else {
+				v = v - e.cur + maxMatchOffset
+			}
+			e.table[i].offset = v
+		}
+		e.cur = maxMatchOffset
+	}
+
+	s := e.addBlock(src)
+
+	// This check isn't in the Snappy implementation, but there, the caller
+	// instead of the callee handles this case.
+	if len(src) < minNonLiteralBlockSize {
+		// We do not fill the token table.
+		// This will be picked up by caller.
+		dst.n = uint16(len(src))
+		return
+	}
+
+	// Override src
+	src = e.hist
+	nextEmit := s
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := int32(len(src) - inputMargin)
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	cv := load6432(src, s)
+
+	for {
+		const skipLog = 5
+		const doEvery = 2
+
+		nextS := s
+		var candidate tableEntry
+		var t int32
+		for {
+			nextHash := hashLen(cv, tableBits, hashBytes)
+			candidate = e.table[nextHash]
+			nextS = s + doEvery + (s-nextEmit)>>skipLog
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+
+			now := load6432(src, nextS)
+			e.table[nextHash] = tableEntry{offset: s + e.cur}
+			nextHash = hashLen(now, tableBits, hashBytes)
+			t = candidate.offset - e.cur
+			if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+				e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+				break
+			}
+
+			// Do one right away...
+			cv = now
+			s = nextS
+			nextS++
+			candidate = e.table[nextHash]
+			now >>= 8
+			e.table[nextHash] = tableEntry{offset: s + e.cur}
+
+			t = candidate.offset - e.cur
+			if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+				e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+				break
+			}
+			cv = now
+			s = nextS
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+		for {
+			// Invariant: we have a 4-byte match at s, and no need to emit any
+			// literal bytes prior to s.
+
+			// Extend the 4-byte match as long as possible.
+			l := e.matchlenLong(int(s+4), int(t+4), src) + 4
+
+			// Extend backwards
+			for t > 0 && s > nextEmit && le.Load8(src, t-1) == le.Load8(src, s-1) {
+				s--
+				t--
+				l++
+			}
+			if nextEmit < s {
+				if false {
+					emitLiteral(dst, src[nextEmit:s])
+				} else {
+					for _, v := range src[nextEmit:s] {
+						dst.tokens[dst.n] = token(v)
+						dst.litHist[v]++
+						dst.n++
+					}
+				}
+			}
+
+			// Save the match found
+			if false {
+				dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+			} else {
+				// Inlined...
+				xoffset := uint32(s - t - baseMatchOffset)
+				xlength := l
+				oc := offsetCode(xoffset)
+				xoffset |= oc << 16
+				for xlength > 0 {
+					xl := xlength
+					if xl > 258 {
+						if xl > 258+baseMatchLength {
+							xl = 258
+						} else {
+							xl = 258 - baseMatchLength
+						}
+					}
+					xlength -= xl
+					xl -= baseMatchLength
+					dst.extraHist[lengthCodes1[uint8(xl)]]++
+					dst.offHist[oc]++
+					dst.tokens[dst.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
+					dst.n++
+				}
+			}
+			s += l
+			nextEmit = s
+			if nextS >= s {
+				s = nextS + 1
+			}
+			if s >= sLimit {
+				// Index first pair after match end.
+				if int(s+l+8) < len(src) {
+					cv := load6432(src, s)
+					e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur}
+				}
+				goto emitRemainder
+			}
+
+			// We could immediately start working at s now, but to improve
+			// compression we first update the hash table at s-2 and at s. If
+			// another emitCopy is not our next move, also calculate nextHash
+			// at s+1. At least on GOARCH=amd64, these three hash calculations
+			// are faster as one load64 call (with some shifts) instead of
+			// three load32 calls.
+			x := load6432(src, s-2)
+			o := e.cur + s - 2
+			prevHash := hashLen(x, tableBits, hashBytes)
+			e.table[prevHash] = tableEntry{offset: o}
+			x >>= 16
+			currHash := hashLen(x, tableBits, hashBytes)
+			candidate = e.table[currHash]
+			e.table[currHash] = tableEntry{offset: o + 2}
+
+			t = candidate.offset - e.cur
+			if s-t > maxMatchOffset || uint32(x) != load3232(src, t) {
+				cv = x >> 8
+				s++
+				break
+			}
+		}
+	}
+
+emitRemainder:
+	if int(nextEmit) < len(src) {
+		// If nothing was added, don't encode literals.
+		if dst.n == 0 {
+			return
+		}
+		emitLiteral(dst, src[nextEmit:])
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go
new file mode 100644
index 0000000..c8d047f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level2.go
@@ -0,0 +1,214 @@
+package flate
+
+import "fmt"
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastEncL2 struct {
+	fastGen
+	table [bTableSize]tableEntry
+}
+
+// EncodeL2 uses a similar algorithm to level 1, but is capable
+// of matching across blocks giving better compression at a small slowdown.
+func (e *fastEncL2) Encode(dst *tokens, src []byte) {
+	const (
+		inputMargin            = 12 - 1
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+		hashBytes              = 5
+	)
+
+	if debugDeflate && e.cur < 0 {
+		panic(fmt.Sprint("e.cur < 0: ", e.cur))
+	}
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			e.cur = maxMatchOffset
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v <= minOff {
+				v = 0
+			} else {
+				v = v - e.cur + maxMatchOffset
+			}
+			e.table[i].offset = v
+		}
+		e.cur = maxMatchOffset
+	}
+
+	s := e.addBlock(src)
+
+	// This check isn't in the Snappy implementation, but there, the caller
+	// instead of the callee handles this case.
+	if len(src) < minNonLiteralBlockSize {
+		// We do not fill the token table.
+		// This will be picked up by caller.
+		dst.n = uint16(len(src))
+		return
+	}
+
+	// Override src
+	src = e.hist
+	nextEmit := s
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := int32(len(src) - inputMargin)
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	cv := load6432(src, s)
+	for {
+		// When should we start skipping if we haven't found matches in a long while.
+		const skipLog = 5
+		const doEvery = 2
+
+		nextS := s
+		var candidate tableEntry
+		for {
+			nextHash := hashLen(cv, bTableBits, hashBytes)
+			s = nextS
+			nextS = s + doEvery + (s-nextEmit)>>skipLog
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+			candidate = e.table[nextHash]
+			now := load6432(src, nextS)
+			e.table[nextHash] = tableEntry{offset: s + e.cur}
+			nextHash = hashLen(now, bTableBits, hashBytes)
+
+			offset := s - (candidate.offset - e.cur)
+			if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+				e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+				break
+			}
+
+			// Do one right away...
+			cv = now
+			s = nextS
+			nextS++
+			candidate = e.table[nextHash]
+			now >>= 8
+			e.table[nextHash] = tableEntry{offset: s + e.cur}
+
+			offset = s - (candidate.offset - e.cur)
+			if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+				break
+			}
+			cv = now
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+
+		// Call emitCopy, and then see if another emitCopy could be our next
+		// move. Repeat until we find no match for the input immediately after
+		// what was consumed by the last emitCopy call.
+		//
+		// If we exit this loop normally then we need to call emitLiteral next,
+		// though we don't yet know how big the literal will be. We handle that
+		// by proceeding to the next iteration of the main loop. We also can
+		// exit this loop via goto if we get close to exhausting the input.
+		for {
+			// Invariant: we have a 4-byte match at s, and no need to emit any
+			// literal bytes prior to s.
+
+			// Extend the 4-byte match as long as possible.
+			t := candidate.offset - e.cur
+			l := e.matchlenLong(int(s+4), int(t+4), src) + 4
+
+			// Extend backwards
+			for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+				s--
+				t--
+				l++
+			}
+			if nextEmit < s {
+				if false {
+					emitLiteral(dst, src[nextEmit:s])
+				} else {
+					for _, v := range src[nextEmit:s] {
+						dst.tokens[dst.n] = token(v)
+						dst.litHist[v]++
+						dst.n++
+					}
+				}
+			}
+
+			dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+			s += l
+			nextEmit = s
+			if nextS >= s {
+				s = nextS + 1
+			}
+
+			if s >= sLimit {
+				// Index first pair after match end.
+				if int(s+l+8) < len(src) {
+					cv := load6432(src, s)
+					e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur}
+				}
+				goto emitRemainder
+			}
+
+			// Store every second hash in-between, but offset by 1.
+			for i := s - l + 2; i < s-5; i += 7 {
+				x := load6432(src, i)
+				nextHash := hashLen(x, bTableBits, hashBytes)
+				e.table[nextHash] = tableEntry{offset: e.cur + i}
+				// Skip one
+				x >>= 16
+				nextHash = hashLen(x, bTableBits, hashBytes)
+				e.table[nextHash] = tableEntry{offset: e.cur + i + 2}
+				// Skip one
+				x >>= 16
+				nextHash = hashLen(x, bTableBits, hashBytes)
+				e.table[nextHash] = tableEntry{offset: e.cur + i + 4}
+			}
+
+			// We could immediately start working at s now, but to improve
+			// compression we first update the hash table at s-2 to s. If
+			// another emitCopy is not our next move, also calculate nextHash
+			// at s+1. At least on GOARCH=amd64, these three hash calculations
+			// are faster as one load64 call (with some shifts) instead of
+			// three load32 calls.
+			x := load6432(src, s-2)
+			o := e.cur + s - 2
+			prevHash := hashLen(x, bTableBits, hashBytes)
+			prevHash2 := hashLen(x>>8, bTableBits, hashBytes)
+			e.table[prevHash] = tableEntry{offset: o}
+			e.table[prevHash2] = tableEntry{offset: o + 1}
+			currHash := hashLen(x>>16, bTableBits, hashBytes)
+			candidate = e.table[currHash]
+			e.table[currHash] = tableEntry{offset: o + 2}
+
+			offset := s - (candidate.offset - e.cur)
+			if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) {
+				cv = x >> 24
+				s++
+				break
+			}
+		}
+	}
+
+emitRemainder:
+	if int(nextEmit) < len(src) {
+		// If nothing was added, don't encode literals.
+		if dst.n == 0 {
+			return
+		}
+
+		emitLiteral(dst, src[nextEmit:])
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go
new file mode 100644
index 0000000..33f9fb1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level3.go
@@ -0,0 +1,241 @@
+package flate
+
+import "fmt"
+
+// fastEncL3
+type fastEncL3 struct {
+	fastGen
+	table [1 << 16]tableEntryPrev
+}
+
+// Encode uses a similar algorithm to level 2, will check up to two candidates.
+func (e *fastEncL3) Encode(dst *tokens, src []byte) {
+	const (
+		inputMargin            = 12 - 1
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+		tableBits              = 16
+		tableSize              = 1 << tableBits
+		hashBytes              = 5
+	)
+
+	if debugDeflate && e.cur < 0 {
+		panic(fmt.Sprint("e.cur < 0: ", e.cur))
+	}
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntryPrev{}
+			}
+			e.cur = maxMatchOffset
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+		for i := range e.table[:] {
+			v := e.table[i]
+			if v.Cur.offset <= minOff {
+				v.Cur.offset = 0
+			} else {
+				v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+			}
+			if v.Prev.offset <= minOff {
+				v.Prev.offset = 0
+			} else {
+				v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+			}
+			e.table[i] = v
+		}
+		e.cur = maxMatchOffset
+	}
+
+	s := e.addBlock(src)
+
+	// Skip if too small.
+	if len(src) < minNonLiteralBlockSize {
+		// We do not fill the token table.
+		// This will be picked up by caller.
+		dst.n = uint16(len(src))
+		return
+	}
+
+	// Override src
+	src = e.hist
+	nextEmit := s
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := int32(len(src) - inputMargin)
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	cv := load6432(src, s)
+	for {
+		const skipLog = 7
+		nextS := s
+		var candidate tableEntry
+		for {
+			nextHash := hashLen(cv, tableBits, hashBytes)
+			s = nextS
+			nextS = s + 1 + (s-nextEmit)>>skipLog
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+			candidates := e.table[nextHash]
+			now := load6432(src, nextS)
+
+			// Safe offset distance until s + 4...
+			minOffset := e.cur + s - (maxMatchOffset - 4)
+			e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}}
+
+			// Check both candidates
+			candidate = candidates.Cur
+			if candidate.offset < minOffset {
+				cv = now
+				// Previous will also be invalid, we have nothing.
+				continue
+			}
+
+			if uint32(cv) == load3232(src, candidate.offset-e.cur) {
+				if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) {
+					break
+				}
+				// Both match and are valid, pick longest.
+				offset := s - (candidate.offset - e.cur)
+				o2 := s - (candidates.Prev.offset - e.cur)
+				l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:])
+				if l2 > l1 {
+					candidate = candidates.Prev
+				}
+				break
+			} else {
+				// We only check if value mismatches.
+				// Offset will always be invalid in other cases.
+				candidate = candidates.Prev
+				if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+					break
+				}
+			}
+			cv = now
+		}
+
+		// Call emitCopy, and then see if another emitCopy could be our next
+		// move. Repeat until we find no match for the input immediately after
+		// what was consumed by the last emitCopy call.
+		//
+		// If we exit this loop normally then we need to call emitLiteral next,
+		// though we don't yet know how big the literal will be. We handle that
+		// by proceeding to the next iteration of the main loop. We also can
+		// exit this loop via goto if we get close to exhausting the input.
+		for {
+			// Invariant: we have a 4-byte match at s, and no need to emit any
+			// literal bytes prior to s.
+
+			// Extend the 4-byte match as long as possible.
+			//
+			t := candidate.offset - e.cur
+			l := e.matchlenLong(int(s+4), int(t+4), src) + 4
+
+			// Extend backwards
+			for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+				s--
+				t--
+				l++
+			}
+			if nextEmit < s {
+				if false {
+					emitLiteral(dst, src[nextEmit:s])
+				} else {
+					for _, v := range src[nextEmit:s] {
+						dst.tokens[dst.n] = token(v)
+						dst.litHist[v]++
+						dst.n++
+					}
+				}
+			}
+
+			dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+			s += l
+			nextEmit = s
+			if nextS >= s {
+				s = nextS + 1
+			}
+
+			if s >= sLimit {
+				t += l
+				// Index first pair after match end.
+				if int(t+8) < len(src) && t > 0 {
+					cv = load6432(src, t)
+					nextHash := hashLen(cv, tableBits, hashBytes)
+					e.table[nextHash] = tableEntryPrev{
+						Prev: e.table[nextHash].Cur,
+						Cur:  tableEntry{offset: e.cur + t},
+					}
+				}
+				goto emitRemainder
+			}
+
+			// Store every 5th hash in-between.
+			for i := s - l + 2; i < s-5; i += 6 {
+				nextHash := hashLen(load6432(src, i), tableBits, hashBytes)
+				e.table[nextHash] = tableEntryPrev{
+					Prev: e.table[nextHash].Cur,
+					Cur:  tableEntry{offset: e.cur + i}}
+			}
+			// We could immediately start working at s now, but to improve
+			// compression we first update the hash table at s-2 to s.
+			x := load6432(src, s-2)
+			prevHash := hashLen(x, tableBits, hashBytes)
+
+			e.table[prevHash] = tableEntryPrev{
+				Prev: e.table[prevHash].Cur,
+				Cur:  tableEntry{offset: e.cur + s - 2},
+			}
+			x >>= 8
+			prevHash = hashLen(x, tableBits, hashBytes)
+
+			e.table[prevHash] = tableEntryPrev{
+				Prev: e.table[prevHash].Cur,
+				Cur:  tableEntry{offset: e.cur + s - 1},
+			}
+			x >>= 8
+			currHash := hashLen(x, tableBits, hashBytes)
+			candidates := e.table[currHash]
+			cv = x
+			e.table[currHash] = tableEntryPrev{
+				Prev: candidates.Cur,
+				Cur:  tableEntry{offset: s + e.cur},
+			}
+
+			// Check both candidates
+			candidate = candidates.Cur
+			minOffset := e.cur + s - (maxMatchOffset - 4)
+
+			if candidate.offset > minOffset {
+				if uint32(cv) == load3232(src, candidate.offset-e.cur) {
+					// Found a match...
+					continue
+				}
+				candidate = candidates.Prev
+				if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+					// Match at prev...
+					continue
+				}
+			}
+			cv = x >> 8
+			s++
+			break
+		}
+	}
+
+emitRemainder:
+	if int(nextEmit) < len(src) {
+		// If nothing was added, don't encode literals.
+		if dst.n == 0 {
+			return
+		}
+
+		emitLiteral(dst, src[nextEmit:])
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go
new file mode 100644
index 0000000..88509e1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level4.go
@@ -0,0 +1,221 @@
+package flate
+
+import "fmt"
+
+type fastEncL4 struct {
+	fastGen
+	table  [tableSize]tableEntry
+	bTable [tableSize]tableEntry
+}
+
+func (e *fastEncL4) Encode(dst *tokens, src []byte) {
+	const (
+		inputMargin            = 12 - 1
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+		hashShortBytes         = 4
+	)
+	if debugDeflate && e.cur < 0 {
+		panic(fmt.Sprint("e.cur < 0: ", e.cur))
+	}
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			for i := range e.bTable[:] {
+				e.bTable[i] = tableEntry{}
+			}
+			e.cur = maxMatchOffset
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v <= minOff {
+				v = 0
+			} else {
+				v = v - e.cur + maxMatchOffset
+			}
+			e.table[i].offset = v
+		}
+		for i := range e.bTable[:] {
+			v := e.bTable[i].offset
+			if v <= minOff {
+				v = 0
+			} else {
+				v = v - e.cur + maxMatchOffset
+			}
+			e.bTable[i].offset = v
+		}
+		e.cur = maxMatchOffset
+	}
+
+	s := e.addBlock(src)
+
+	// This check isn't in the Snappy implementation, but there, the caller
+	// instead of the callee handles this case.
+	if len(src) < minNonLiteralBlockSize {
+		// We do not fill the token table.
+		// This will be picked up by caller.
+		dst.n = uint16(len(src))
+		return
+	}
+
+	// Override src
+	src = e.hist
+	nextEmit := s
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := int32(len(src) - inputMargin)
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	cv := load6432(src, s)
+	for {
+		const skipLog = 6
+		const doEvery = 1
+
+		nextS := s
+		var t int32
+		for {
+			nextHashS := hashLen(cv, tableBits, hashShortBytes)
+			nextHashL := hash7(cv, tableBits)
+
+			s = nextS
+			nextS = s + doEvery + (s-nextEmit)>>skipLog
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+			// Fetch a short+long candidate
+			sCandidate := e.table[nextHashS]
+			lCandidate := e.bTable[nextHashL]
+			next := load6432(src, nextS)
+			entry := tableEntry{offset: s + e.cur}
+			e.table[nextHashS] = entry
+			e.bTable[nextHashL] = entry
+
+			t = lCandidate.offset - e.cur
+			if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+				// We got a long match. Use that.
+				break
+			}
+
+			t = sCandidate.offset - e.cur
+			if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+				// Found a 4 match...
+				lCandidate = e.bTable[hash7(next, tableBits)]
+
+				// If the next long is a candidate, check if we should use that instead...
+				lOff := lCandidate.offset - e.cur
+				if nextS-lOff < maxMatchOffset && load3232(src, lOff) == uint32(next) {
+					l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:])
+					if l2 > l1 {
+						s = nextS
+						t = lCandidate.offset - e.cur
+					}
+				}
+				break
+			}
+			cv = next
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+
+		// Extend the 4-byte match as long as possible.
+		l := e.matchlenLong(int(s+4), int(t+4), src) + 4
+
+		// Extend backwards
+		for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+			s--
+			t--
+			l++
+		}
+		if nextEmit < s {
+			if false {
+				emitLiteral(dst, src[nextEmit:s])
+			} else {
+				for _, v := range src[nextEmit:s] {
+					dst.tokens[dst.n] = token(v)
+					dst.litHist[v]++
+					dst.n++
+				}
+			}
+		}
+		if debugDeflate {
+			if t >= s {
+				panic("s-t")
+			}
+			if (s - t) > maxMatchOffset {
+				panic(fmt.Sprintln("mmo", t))
+			}
+			if l < baseMatchLength {
+				panic("bml")
+			}
+		}
+
+		dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+		s += l
+		nextEmit = s
+		if nextS >= s {
+			s = nextS + 1
+		}
+
+		if s >= sLimit {
+			// Index first pair after match end.
+			if int(s+8) < len(src) {
+				cv := load6432(src, s)
+				e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur}
+				e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur}
+			}
+			goto emitRemainder
+		}
+
+		// Store every 3rd hash in-between
+		if true {
+			i := nextS
+			if i < s-1 {
+				cv := load6432(src, i)
+				t := tableEntry{offset: i + e.cur}
+				t2 := tableEntry{offset: t.offset + 1}
+				e.bTable[hash7(cv, tableBits)] = t
+				e.bTable[hash7(cv>>8, tableBits)] = t2
+				e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
+
+				i += 3
+				for ; i < s-1; i += 3 {
+					cv := load6432(src, i)
+					t := tableEntry{offset: i + e.cur}
+					t2 := tableEntry{offset: t.offset + 1}
+					e.bTable[hash7(cv, tableBits)] = t
+					e.bTable[hash7(cv>>8, tableBits)] = t2
+					e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
+				}
+			}
+		}
+
+		// We could immediately start working at s now, but to improve
+		// compression we first update the hash table at s-1 and at s.
+		x := load6432(src, s-1)
+		o := e.cur + s - 1
+		prevHashS := hashLen(x, tableBits, hashShortBytes)
+		prevHashL := hash7(x, tableBits)
+		e.table[prevHashS] = tableEntry{offset: o}
+		e.bTable[prevHashL] = tableEntry{offset: o}
+		cv = x >> 8
+	}
+
+emitRemainder:
+	if int(nextEmit) < len(src) {
+		// If nothing was added, don't encode literals.
+		if dst.n == 0 {
+			return
+		}
+
+		emitLiteral(dst, src[nextEmit:])
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go
new file mode 100644
index 0000000..6e5c215
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level5.go
@@ -0,0 +1,708 @@
+package flate
+
+import "fmt"
+
+type fastEncL5 struct {
+	fastGen
+	table  [tableSize]tableEntry
+	bTable [tableSize]tableEntryPrev
+}
+
+func (e *fastEncL5) Encode(dst *tokens, src []byte) {
+	const (
+		inputMargin            = 12 - 1
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+		hashShortBytes         = 4
+	)
+	if debugDeflate && e.cur < 0 {
+		panic(fmt.Sprint("e.cur < 0: ", e.cur))
+	}
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			for i := range e.bTable[:] {
+				e.bTable[i] = tableEntryPrev{}
+			}
+			e.cur = maxMatchOffset
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v <= minOff {
+				v = 0
+			} else {
+				v = v - e.cur + maxMatchOffset
+			}
+			e.table[i].offset = v
+		}
+		for i := range e.bTable[:] {
+			v := e.bTable[i]
+			if v.Cur.offset <= minOff {
+				v.Cur.offset = 0
+				v.Prev.offset = 0
+			} else {
+				v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+				if v.Prev.offset <= minOff {
+					v.Prev.offset = 0
+				} else {
+					v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+				}
+			}
+			e.bTable[i] = v
+		}
+		e.cur = maxMatchOffset
+	}
+
+	s := e.addBlock(src)
+
+	// This check isn't in the Snappy implementation, but there, the caller
+	// instead of the callee handles this case.
+	if len(src) < minNonLiteralBlockSize {
+		// We do not fill the token table.
+		// This will be picked up by caller.
+		dst.n = uint16(len(src))
+		return
+	}
+
+	// Override src
+	src = e.hist
+	nextEmit := s
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := int32(len(src) - inputMargin)
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	cv := load6432(src, s)
+	for {
+		const skipLog = 6
+		const doEvery = 1
+
+		nextS := s
+		var l int32
+		var t int32
+		for {
+			nextHashS := hashLen(cv, tableBits, hashShortBytes)
+			nextHashL := hash7(cv, tableBits)
+
+			s = nextS
+			nextS = s + doEvery + (s-nextEmit)>>skipLog
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+			// Fetch a short+long candidate
+			sCandidate := e.table[nextHashS]
+			lCandidate := e.bTable[nextHashL]
+			next := load6432(src, nextS)
+			entry := tableEntry{offset: s + e.cur}
+			e.table[nextHashS] = entry
+			eLong := &e.bTable[nextHashL]
+			eLong.Cur, eLong.Prev = entry, eLong.Cur
+
+			nextHashS = hashLen(next, tableBits, hashShortBytes)
+			nextHashL = hash7(next, tableBits)
+
+			t = lCandidate.Cur.offset - e.cur
+			if s-t < maxMatchOffset {
+				if uint32(cv) == load3232(src, t) {
+					// Store the next match
+					e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+					eLong := &e.bTable[nextHashL]
+					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+					t2 := lCandidate.Prev.offset - e.cur
+					if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) {
+						l = e.matchlen(int(s+4), int(t+4), src) + 4
+						ml1 := e.matchlen(int(s+4), int(t2+4), src) + 4
+						if ml1 > l {
+							t = t2
+							l = ml1
+							break
+						}
+					}
+					break
+				}
+				t = lCandidate.Prev.offset - e.cur
+				if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+					// Store the next match
+					e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+					eLong := &e.bTable[nextHashL]
+					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+					break
+				}
+			}
+
+			t = sCandidate.offset - e.cur
+			if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+				// Found a 4 match...
+				l = e.matchlen(int(s+4), int(t+4), src) + 4
+				lCandidate = e.bTable[nextHashL]
+				// Store the next match
+
+				e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+				eLong := &e.bTable[nextHashL]
+				eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+				// If the next long is a candidate, use that...
+				t2 := lCandidate.Cur.offset - e.cur
+				if nextS-t2 < maxMatchOffset {
+					if load3232(src, t2) == uint32(next) {
+						ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
+						if ml > l {
+							t = t2
+							s = nextS
+							l = ml
+							break
+						}
+					}
+					// If the previous long is a candidate, use that...
+					t2 = lCandidate.Prev.offset - e.cur
+					if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) {
+						ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
+						if ml > l {
+							t = t2
+							s = nextS
+							l = ml
+							break
+						}
+					}
+				}
+				break
+			}
+			cv = next
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+
+		if l == 0 {
+			// Extend the 4-byte match as long as possible.
+			l = e.matchlenLong(int(s+4), int(t+4), src) + 4
+		} else if l == maxMatchLength {
+			l += e.matchlenLong(int(s+l), int(t+l), src)
+		}
+
+		// Try to locate a better match by checking the end of best match...
+		if sAt := s + l; l < 30 && sAt < sLimit {
+			// Allow some bytes at the beginning to mismatch.
+			// Sweet spot is 2/3 bytes depending on input.
+			// 3 is only a little better when it is but sometimes a lot worse.
+			// The skipped bytes are tested in Extend backwards,
+			// and still picked up as part of the match if they do.
+			const skipBeginning = 2
+			eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
+			t2 := eLong - e.cur - l + skipBeginning
+			s2 := s + skipBeginning
+			off := s2 - t2
+			if t2 >= 0 && off < maxMatchOffset && off > 0 {
+				if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l {
+					t = t2
+					l = l2
+					s = s2
+				}
+			}
+		}
+
+		// Extend backwards
+		for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+			s--
+			t--
+			l++
+		}
+		if nextEmit < s {
+			if false {
+				emitLiteral(dst, src[nextEmit:s])
+			} else {
+				for _, v := range src[nextEmit:s] {
+					dst.tokens[dst.n] = token(v)
+					dst.litHist[v]++
+					dst.n++
+				}
+			}
+		}
+		if debugDeflate {
+			if t >= s {
+				panic(fmt.Sprintln("s-t", s, t))
+			}
+			if (s - t) > maxMatchOffset {
+				panic(fmt.Sprintln("mmo", s-t))
+			}
+			if l < baseMatchLength {
+				panic("bml")
+			}
+		}
+
+		dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+		s += l
+		nextEmit = s
+		if nextS >= s {
+			s = nextS + 1
+		}
+
+		if s >= sLimit {
+			goto emitRemainder
+		}
+
+		// Store every 3rd hash in-between.
+		if true {
+			const hashEvery = 3
+			i := s - l + 1
+			if i < s-1 {
+				cv := load6432(src, i)
+				t := tableEntry{offset: i + e.cur}
+				e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+				eLong := &e.bTable[hash7(cv, tableBits)]
+				eLong.Cur, eLong.Prev = t, eLong.Cur
+
+				// Do an long at i+1
+				cv >>= 8
+				t = tableEntry{offset: t.offset + 1}
+				eLong = &e.bTable[hash7(cv, tableBits)]
+				eLong.Cur, eLong.Prev = t, eLong.Cur
+
+				// We only have enough bits for a short entry at i+2
+				cv >>= 8
+				t = tableEntry{offset: t.offset + 1}
+				e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+
+				// Skip one - otherwise we risk hitting 's'
+				i += 4
+				for ; i < s-1; i += hashEvery {
+					cv := load6432(src, i)
+					t := tableEntry{offset: i + e.cur}
+					t2 := tableEntry{offset: t.offset + 1}
+					eLong := &e.bTable[hash7(cv, tableBits)]
+					eLong.Cur, eLong.Prev = t, eLong.Cur
+					e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
+				}
+			}
+		}
+
+		// We could immediately start working at s now, but to improve
+		// compression we first update the hash table at s-1 and at s.
+		x := load6432(src, s-1)
+		o := e.cur + s - 1
+		prevHashS := hashLen(x, tableBits, hashShortBytes)
+		prevHashL := hash7(x, tableBits)
+		e.table[prevHashS] = tableEntry{offset: o}
+		eLong := &e.bTable[prevHashL]
+		eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
+		cv = x >> 8
+	}
+
+emitRemainder:
+	if int(nextEmit) < len(src) {
+		// If nothing was added, don't encode literals.
+		if dst.n == 0 {
+			return
+		}
+
+		emitLiteral(dst, src[nextEmit:])
+	}
+}
+
+// fastEncL5Window is a level 5 encoder,
+// but with a custom window size.
+type fastEncL5Window struct {
+	hist      []byte
+	cur       int32
+	maxOffset int32
+	table     [tableSize]tableEntry
+	bTable    [tableSize]tableEntryPrev
+}
+
+func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
+	const (
+		inputMargin            = 12 - 1
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+		hashShortBytes         = 4
+	)
+	maxMatchOffset := e.maxOffset
+	if debugDeflate && e.cur < 0 {
+		panic(fmt.Sprint("e.cur < 0: ", e.cur))
+	}
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			for i := range e.bTable[:] {
+				e.bTable[i] = tableEntryPrev{}
+			}
+			e.cur = maxMatchOffset
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v <= minOff {
+				v = 0
+			} else {
+				v = v - e.cur + maxMatchOffset
+			}
+			e.table[i].offset = v
+		}
+		for i := range e.bTable[:] {
+			v := e.bTable[i]
+			if v.Cur.offset <= minOff {
+				v.Cur.offset = 0
+				v.Prev.offset = 0
+			} else {
+				v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+				if v.Prev.offset <= minOff {
+					v.Prev.offset = 0
+				} else {
+					v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+				}
+			}
+			e.bTable[i] = v
+		}
+		e.cur = maxMatchOffset
+	}
+
+	s := e.addBlock(src)
+
+	// This check isn't in the Snappy implementation, but there, the caller
+	// instead of the callee handles this case.
+	if len(src) < minNonLiteralBlockSize {
+		// We do not fill the token table.
+		// This will be picked up by caller.
+		dst.n = uint16(len(src))
+		return
+	}
+
+	// Override src
+	src = e.hist
+	nextEmit := s
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := int32(len(src) - inputMargin)
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	cv := load6432(src, s)
+	for {
+		const skipLog = 6
+		const doEvery = 1
+
+		nextS := s
+		var l int32
+		var t int32
+		for {
+			nextHashS := hashLen(cv, tableBits, hashShortBytes)
+			nextHashL := hash7(cv, tableBits)
+
+			s = nextS
+			nextS = s + doEvery + (s-nextEmit)>>skipLog
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+			// Fetch a short+long candidate
+			sCandidate := e.table[nextHashS]
+			lCandidate := e.bTable[nextHashL]
+			next := load6432(src, nextS)
+			entry := tableEntry{offset: s + e.cur}
+			e.table[nextHashS] = entry
+			eLong := &e.bTable[nextHashL]
+			eLong.Cur, eLong.Prev = entry, eLong.Cur
+
+			nextHashS = hashLen(next, tableBits, hashShortBytes)
+			nextHashL = hash7(next, tableBits)
+
+			t = lCandidate.Cur.offset - e.cur
+			if s-t < maxMatchOffset {
+				if uint32(cv) == load3232(src, t) {
+					// Store the next match
+					e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+					eLong := &e.bTable[nextHashL]
+					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+					t2 := lCandidate.Prev.offset - e.cur
+					if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) {
+						l = e.matchlen(s+4, t+4, src) + 4
+						ml1 := e.matchlen(s+4, t2+4, src) + 4
+						if ml1 > l {
+							t = t2
+							l = ml1
+							break
+						}
+					}
+					break
+				}
+				t = lCandidate.Prev.offset - e.cur
+				if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+					// Store the next match
+					e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+					eLong := &e.bTable[nextHashL]
+					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+					break
+				}
+			}
+
+			t = sCandidate.offset - e.cur
+			if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+				// Found a 4 match...
+				l = e.matchlen(s+4, t+4, src) + 4
+				lCandidate = e.bTable[nextHashL]
+				// Store the next match
+
+				e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+				eLong := &e.bTable[nextHashL]
+				eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+				// If the next long is a candidate, use that...
+				t2 := lCandidate.Cur.offset - e.cur
+				if nextS-t2 < maxMatchOffset {
+					if load3232(src, t2) == uint32(next) {
+						ml := e.matchlen(nextS+4, t2+4, src) + 4
+						if ml > l {
+							t = t2
+							s = nextS
+							l = ml
+							break
+						}
+					}
+					// If the previous long is a candidate, use that...
+					t2 = lCandidate.Prev.offset - e.cur
+					if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) {
+						ml := e.matchlen(nextS+4, t2+4, src) + 4
+						if ml > l {
+							t = t2
+							s = nextS
+							l = ml
+							break
+						}
+					}
+				}
+				break
+			}
+			cv = next
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+
+		if l == 0 {
+			// Extend the 4-byte match as long as possible.
+			l = e.matchlenLong(s+4, t+4, src) + 4
+		} else if l == maxMatchLength {
+			l += e.matchlenLong(s+l, t+l, src)
+		}
+
+		// Try to locate a better match by checking the end of best match...
+		if sAt := s + l; l < 30 && sAt < sLimit {
+			// Allow some bytes at the beginning to mismatch.
+			// Sweet spot is 2/3 bytes depending on input.
+			// 3 is only a little better when it is but sometimes a lot worse.
+			// The skipped bytes are tested in Extend backwards,
+			// and still picked up as part of the match if they do.
+			const skipBeginning = 2
+			eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
+			t2 := eLong - e.cur - l + skipBeginning
+			s2 := s + skipBeginning
+			off := s2 - t2
+			if t2 >= 0 && off < maxMatchOffset && off > 0 {
+				if l2 := e.matchlenLong(s2, t2, src); l2 > l {
+					t = t2
+					l = l2
+					s = s2
+				}
+			}
+		}
+
+		// Extend backwards
+		for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+			s--
+			t--
+			l++
+		}
+		if nextEmit < s {
+			if false {
+				emitLiteral(dst, src[nextEmit:s])
+			} else {
+				for _, v := range src[nextEmit:s] {
+					dst.tokens[dst.n] = token(v)
+					dst.litHist[v]++
+					dst.n++
+				}
+			}
+		}
+		if debugDeflate {
+			if t >= s {
+				panic(fmt.Sprintln("s-t", s, t))
+			}
+			if (s - t) > maxMatchOffset {
+				panic(fmt.Sprintln("mmo", s-t))
+			}
+			if l < baseMatchLength {
+				panic("bml")
+			}
+		}
+
+		dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+		s += l
+		nextEmit = s
+		if nextS >= s {
+			s = nextS + 1
+		}
+
+		if s >= sLimit {
+			goto emitRemainder
+		}
+
+		// Store every 3rd hash in-between.
+		if true {
+			const hashEvery = 3
+			i := s - l + 1
+			if i < s-1 {
+				cv := load6432(src, i)
+				t := tableEntry{offset: i + e.cur}
+				e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+				eLong := &e.bTable[hash7(cv, tableBits)]
+				eLong.Cur, eLong.Prev = t, eLong.Cur
+
+				// Do an long at i+1
+				cv >>= 8
+				t = tableEntry{offset: t.offset + 1}
+				eLong = &e.bTable[hash7(cv, tableBits)]
+				eLong.Cur, eLong.Prev = t, eLong.Cur
+
+				// We only have enough bits for a short entry at i+2
+				cv >>= 8
+				t = tableEntry{offset: t.offset + 1}
+				e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+
+				// Skip one - otherwise we risk hitting 's'
+				i += 4
+				for ; i < s-1; i += hashEvery {
+					cv := load6432(src, i)
+					t := tableEntry{offset: i + e.cur}
+					t2 := tableEntry{offset: t.offset + 1}
+					eLong := &e.bTable[hash7(cv, tableBits)]
+					eLong.Cur, eLong.Prev = t, eLong.Cur
+					e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
+				}
+			}
+		}
+
+		// We could immediately start working at s now, but to improve
+		// compression we first update the hash table at s-1 and at s.
+		x := load6432(src, s-1)
+		o := e.cur + s - 1
+		prevHashS := hashLen(x, tableBits, hashShortBytes)
+		prevHashL := hash7(x, tableBits)
+		e.table[prevHashS] = tableEntry{offset: o}
+		eLong := &e.bTable[prevHashL]
+		eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
+		cv = x >> 8
+	}
+
+emitRemainder:
+	if int(nextEmit) < len(src) {
+		// If nothing was added, don't encode literals.
+		if dst.n == 0 {
+			return
+		}
+
+		emitLiteral(dst, src[nextEmit:])
+	}
+}
+
+// Reset the encoding table.
+func (e *fastEncL5Window) Reset() {
+	// We keep the same allocs, since we are compressing the same block sizes.
+	if cap(e.hist) < allocHistory {
+		e.hist = make([]byte, 0, allocHistory)
+	}
+
+	// We offset current position so everything will be out of reach.
+	// If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
+	if e.cur <= int32(bufferReset) {
+		e.cur += e.maxOffset + int32(len(e.hist))
+	}
+	e.hist = e.hist[:0]
+}
+
+func (e *fastEncL5Window) addBlock(src []byte) int32 {
+	// check if we have space already
+	maxMatchOffset := e.maxOffset
+
+	if len(e.hist)+len(src) > cap(e.hist) {
+		if cap(e.hist) == 0 {
+			e.hist = make([]byte, 0, allocHistory)
+		} else {
+			if cap(e.hist) < int(maxMatchOffset*2) {
+				panic("unexpected buffer size")
+			}
+			// Move down
+			offset := int32(len(e.hist)) - maxMatchOffset
+			copy(e.hist[0:maxMatchOffset], e.hist[offset:])
+			e.cur += offset
+			e.hist = e.hist[:maxMatchOffset]
+		}
+	}
+	s := int32(len(e.hist))
+	e.hist = append(e.hist, src...)
+	return s
+}
+
+// matchlen will return the match length between offsets and t in src.
+// The maximum length returned is maxMatchLength - 4.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 {
+	if debugDecode {
+		if t >= s {
+			panic(fmt.Sprint("t >=s:", t, s))
+		}
+		if int(s) >= len(src) {
+			panic(fmt.Sprint("s >= len(src):", s, len(src)))
+		}
+		if t < 0 {
+			panic(fmt.Sprint("t < 0:", t))
+		}
+		if s-t > e.maxOffset {
+			panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+		}
+	}
+	s1 := int(s) + maxMatchLength - 4
+	if s1 > len(src) {
+		s1 = len(src)
+	}
+
+	// Extend the match to be as long as possible.
+	return int32(matchLen(src[s:s1], src[t:]))
+}
+
+// matchlenLong will return the match length between offsets and t in src.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastEncL5Window) matchlenLong(s, t int32, src []byte) int32 {
+	if debugDeflate {
+		if t >= s {
+			panic(fmt.Sprint("t >=s:", t, s))
+		}
+		if int(s) >= len(src) {
+			panic(fmt.Sprint("s >= len(src):", s, len(src)))
+		}
+		if t < 0 {
+			panic(fmt.Sprint("t < 0:", t))
+		}
+		if s-t > e.maxOffset {
+			panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+		}
+	}
+	// Extend the match to be as long as possible.
+	return int32(matchLen(src[s:], src[t:]))
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go
new file mode 100644
index 0000000..96f5bb4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level6.go
@@ -0,0 +1,325 @@
+package flate
+
+import "fmt"
+
+type fastEncL6 struct {
+	fastGen
+	table  [tableSize]tableEntry
+	bTable [tableSize]tableEntryPrev
+}
+
+func (e *fastEncL6) Encode(dst *tokens, src []byte) {
+	const (
+		inputMargin            = 12 - 1
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+		hashShortBytes         = 4
+	)
+	if debugDeflate && e.cur < 0 {
+		panic(fmt.Sprint("e.cur < 0: ", e.cur))
+	}
+
+	// Protect against e.cur wraparound.
+	for e.cur >= bufferReset {
+		if len(e.hist) == 0 {
+			for i := range e.table[:] {
+				e.table[i] = tableEntry{}
+			}
+			for i := range e.bTable[:] {
+				e.bTable[i] = tableEntryPrev{}
+			}
+			e.cur = maxMatchOffset
+			break
+		}
+		// Shift down everything in the table that isn't already too far away.
+		minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+		for i := range e.table[:] {
+			v := e.table[i].offset
+			if v <= minOff {
+				v = 0
+			} else {
+				v = v - e.cur + maxMatchOffset
+			}
+			e.table[i].offset = v
+		}
+		for i := range e.bTable[:] {
+			v := e.bTable[i]
+			if v.Cur.offset <= minOff {
+				v.Cur.offset = 0
+				v.Prev.offset = 0
+			} else {
+				v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+				if v.Prev.offset <= minOff {
+					v.Prev.offset = 0
+				} else {
+					v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+				}
+			}
+			e.bTable[i] = v
+		}
+		e.cur = maxMatchOffset
+	}
+
+	s := e.addBlock(src)
+
+	// This check isn't in the Snappy implementation, but there, the caller
+	// instead of the callee handles this case.
+	if len(src) < minNonLiteralBlockSize {
+		// We do not fill the token table.
+		// This will be picked up by caller.
+		dst.n = uint16(len(src))
+		return
+	}
+
+	// Override src
+	src = e.hist
+	nextEmit := s
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := int32(len(src) - inputMargin)
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	cv := load6432(src, s)
+	// Repeat MUST be > 1 and within range
+	repeat := int32(1)
+	for {
+		const skipLog = 7
+		const doEvery = 1
+
+		nextS := s
+		var l int32
+		var t int32
+		for {
+			nextHashS := hashLen(cv, tableBits, hashShortBytes)
+			nextHashL := hash7(cv, tableBits)
+			s = nextS
+			nextS = s + doEvery + (s-nextEmit)>>skipLog
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+			// Fetch a short+long candidate
+			sCandidate := e.table[nextHashS]
+			lCandidate := e.bTable[nextHashL]
+			next := load6432(src, nextS)
+			entry := tableEntry{offset: s + e.cur}
+			e.table[nextHashS] = entry
+			eLong := &e.bTable[nextHashL]
+			eLong.Cur, eLong.Prev = entry, eLong.Cur
+
+			// Calculate hashes of 'next'
+			nextHashS = hashLen(next, tableBits, hashShortBytes)
+			nextHashL = hash7(next, tableBits)
+
+			t = lCandidate.Cur.offset - e.cur
+			if s-t < maxMatchOffset {
+				if uint32(cv) == load3232(src, t) {
+					// Long candidate matches at least 4 bytes.
+
+					// Store the next match
+					e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+					eLong := &e.bTable[nextHashL]
+					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+					// Check the previous long candidate as well.
+					t2 := lCandidate.Prev.offset - e.cur
+					if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) {
+						l = e.matchlen(int(s+4), int(t+4), src) + 4
+						ml1 := e.matchlen(int(s+4), int(t2+4), src) + 4
+						if ml1 > l {
+							t = t2
+							l = ml1
+							break
+						}
+					}
+					break
+				}
+				// Current value did not match, but check if previous long value does.
+				t = lCandidate.Prev.offset - e.cur
+				if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+					// Store the next match
+					e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+					eLong := &e.bTable[nextHashL]
+					eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+					break
+				}
+			}
+
+			t = sCandidate.offset - e.cur
+			if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+				// Found a 4 match...
+				l = e.matchlen(int(s+4), int(t+4), src) + 4
+
+				// Look up next long candidate (at nextS)
+				lCandidate = e.bTable[nextHashL]
+
+				// Store the next match
+				e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+				eLong := &e.bTable[nextHashL]
+				eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+				// Check repeat at s + repOff
+				const repOff = 1
+				t2 := s - repeat + repOff
+				if load3232(src, t2) == uint32(cv>>(8*repOff)) {
+					ml := e.matchlen(int(s+4+repOff), int(t2+4), src) + 4
+					if ml > l {
+						t = t2
+						l = ml
+						s += repOff
+						// Not worth checking more.
+						break
+					}
+				}
+
+				// If the next long is a candidate, use that...
+				t2 = lCandidate.Cur.offset - e.cur
+				if nextS-t2 < maxMatchOffset {
+					if load3232(src, t2) == uint32(next) {
+						ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
+						if ml > l {
+							t = t2
+							s = nextS
+							l = ml
+							// This is ok, but check previous as well.
+						}
+					}
+					// If the previous long is a candidate, use that...
+					t2 = lCandidate.Prev.offset - e.cur
+					if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) {
+						ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
+						if ml > l {
+							t = t2
+							s = nextS
+							l = ml
+							break
+						}
+					}
+				}
+				break
+			}
+			cv = next
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+
+		// Extend the 4-byte match as long as possible.
+		if l == 0 {
+			l = e.matchlenLong(int(s+4), int(t+4), src) + 4
+		} else if l == maxMatchLength {
+			l += e.matchlenLong(int(s+l), int(t+l), src)
+		}
+
+		// Try to locate a better match by checking the end-of-match...
+		if sAt := s + l; sAt < sLimit {
+			// Allow some bytes at the beginning to mismatch.
+			// Sweet spot is 2/3 bytes depending on input.
+			// 3 is only a little better when it is but sometimes a lot worse.
+			// The skipped bytes are tested in Extend backwards,
+			// and still picked up as part of the match if they do.
+			const skipBeginning = 2
+			eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)]
+			// Test current
+			t2 := eLong.Cur.offset - e.cur - l + skipBeginning
+			s2 := s + skipBeginning
+			off := s2 - t2
+			if off < maxMatchOffset {
+				if off > 0 && t2 >= 0 {
+					if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l {
+						t = t2
+						l = l2
+						s = s2
+					}
+				}
+				// Test next:
+				t2 = eLong.Prev.offset - e.cur - l + skipBeginning
+				off := s2 - t2
+				if off > 0 && off < maxMatchOffset && t2 >= 0 {
+					if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l {
+						t = t2
+						l = l2
+						s = s2
+					}
+				}
+			}
+		}
+
+		// Extend backwards
+		for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+			s--
+			t--
+			l++
+		}
+		if nextEmit < s {
+			if false {
+				emitLiteral(dst, src[nextEmit:s])
+			} else {
+				for _, v := range src[nextEmit:s] {
+					dst.tokens[dst.n] = token(v)
+					dst.litHist[v]++
+					dst.n++
+				}
+			}
+		}
+		if false {
+			if t >= s {
+				panic(fmt.Sprintln("s-t", s, t))
+			}
+			if (s - t) > maxMatchOffset {
+				panic(fmt.Sprintln("mmo", s-t))
+			}
+			if l < baseMatchLength {
+				panic("bml")
+			}
+		}
+
+		dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+		repeat = s - t
+		s += l
+		nextEmit = s
+		if nextS >= s {
+			s = nextS + 1
+		}
+
+		if s >= sLimit {
+			// Index after match end.
+			for i := nextS + 1; i < int32(len(src))-8; i += 2 {
+				cv := load6432(src, i)
+				e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur}
+				eLong := &e.bTable[hash7(cv, tableBits)]
+				eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur
+			}
+			goto emitRemainder
+		}
+
+		// Store every long hash in-between and every second short.
+		if true {
+			for i := nextS + 1; i < s-1; i += 2 {
+				cv := load6432(src, i)
+				t := tableEntry{offset: i + e.cur}
+				t2 := tableEntry{offset: t.offset + 1}
+				eLong := &e.bTable[hash7(cv, tableBits)]
+				eLong2 := &e.bTable[hash7(cv>>8, tableBits)]
+				e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+				eLong.Cur, eLong.Prev = t, eLong.Cur
+				eLong2.Cur, eLong2.Prev = t2, eLong2.Cur
+			}
+		}
+
+		// We could immediately start working at s now, but to improve
+		// compression we first update the hash table at s-1 and at s.
+		cv = load6432(src, s)
+	}
+
+emitRemainder:
+	if int(nextEmit) < len(src) {
+		// If nothing was added, don't encode literals.
+		if dst.n == 0 {
+			return
+		}
+
+		emitLiteral(dst, src[nextEmit:])
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_generic.go b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go
new file mode 100644
index 0000000..6149384
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go
@@ -0,0 +1,34 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package flate
+
+import (
+	"math/bits"
+
+	"github.com/klauspost/compress/internal/le"
+)
+
+// matchLen returns the maximum common prefix length of a and b.
+// a must be the shortest of the two.
+func matchLen(a, b []byte) (n int) {
+	left := len(a)
+	for left >= 8 {
+		diff := le.Load64(a, n) ^ le.Load64(b, n)
+		if diff != 0 {
+			return n + bits.TrailingZeros64(diff)>>3
+		}
+		n += 8
+		left -= 8
+	}
+
+	a = a[n:]
+	b = b[n:]
+	for i := range a {
+		if a[i] != b[i] {
+			break
+		}
+		n++
+	}
+	return n
+}
diff --git a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go
new file mode 100644
index 0000000..6ed2806
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go
@@ -0,0 +1,37 @@
+package flate
+
+const (
+	// Masks for shifts with register sizes of the shift value.
+	// This can be used to work around the x86 design of shifting by mod register size.
+	// It can be used when a variable shift is always smaller than the register size.
+
+	// reg8SizeMaskX - shift value is 8 bits, shifted is X
+	reg8SizeMask8  = 7
+	reg8SizeMask16 = 15
+	reg8SizeMask32 = 31
+	reg8SizeMask64 = 63
+
+	// reg16SizeMaskX - shift value is 16 bits, shifted is X
+	reg16SizeMask8  = reg8SizeMask8
+	reg16SizeMask16 = reg8SizeMask16
+	reg16SizeMask32 = reg8SizeMask32
+	reg16SizeMask64 = reg8SizeMask64
+
+	// reg32SizeMaskX - shift value is 32 bits, shifted is X
+	reg32SizeMask8  = reg8SizeMask8
+	reg32SizeMask16 = reg8SizeMask16
+	reg32SizeMask32 = reg8SizeMask32
+	reg32SizeMask64 = reg8SizeMask64
+
+	// reg64SizeMaskX - shift value is 64 bits, shifted is X
+	reg64SizeMask8  = reg8SizeMask8
+	reg64SizeMask16 = reg8SizeMask16
+	reg64SizeMask32 = reg8SizeMask32
+	reg64SizeMask64 = reg8SizeMask64
+
+	// regSizeMaskUintX - shift value is uint, shifted is X
+	regSizeMaskUint8  = reg8SizeMask8
+	regSizeMaskUint16 = reg8SizeMask16
+	regSizeMaskUint32 = reg8SizeMask32
+	regSizeMaskUint64 = reg8SizeMask64
+)
diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go
new file mode 100644
index 0000000..1b7a2cb
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/regmask_other.go
@@ -0,0 +1,40 @@
+//go:build !amd64
+// +build !amd64
+
+package flate
+
+const (
+	// Masks for shifts with register sizes of the shift value.
+	// This can be used to work around the x86 design of shifting by mod register size.
+	// It can be used when a variable shift is always smaller than the register size.
+
+	// reg8SizeMaskX - shift value is 8 bits, shifted is X
+	reg8SizeMask8  = 0xff
+	reg8SizeMask16 = 0xff
+	reg8SizeMask32 = 0xff
+	reg8SizeMask64 = 0xff
+
+	// reg16SizeMaskX - shift value is 16 bits, shifted is X
+	reg16SizeMask8  = 0xffff
+	reg16SizeMask16 = 0xffff
+	reg16SizeMask32 = 0xffff
+	reg16SizeMask64 = 0xffff
+
+	// reg32SizeMaskX - shift value is 32 bits, shifted is X
+	reg32SizeMask8  = 0xffffffff
+	reg32SizeMask16 = 0xffffffff
+	reg32SizeMask32 = 0xffffffff
+	reg32SizeMask64 = 0xffffffff
+
+	// reg64SizeMaskX - shift value is 64 bits, shifted is X
+	reg64SizeMask8  = 0xffffffffffffffff
+	reg64SizeMask16 = 0xffffffffffffffff
+	reg64SizeMask32 = 0xffffffffffffffff
+	reg64SizeMask64 = 0xffffffffffffffff
+
+	// regSizeMaskUintX - shift value is uint, shifted is X
+	regSizeMaskUint8  = ^uint(0)
+	regSizeMaskUint16 = ^uint(0)
+	regSizeMaskUint32 = ^uint(0)
+	regSizeMaskUint64 = ^uint(0)
+)
diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go
new file mode 100644
index 0000000..13b9b10
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/stateless.go
@@ -0,0 +1,313 @@
+package flate
+
+import (
+	"io"
+	"math"
+	"sync"
+
+	"github.com/klauspost/compress/internal/le"
+)
+
+const (
+	maxStatelessBlock = math.MaxInt16
+	// dictionary will be taken from maxStatelessBlock, so limit it.
+	maxStatelessDict = 8 << 10
+
+	slTableBits  = 13
+	slTableSize  = 1 << slTableBits
+	slTableShift = 32 - slTableBits
+)
+
+type statelessWriter struct {
+	dst    io.Writer
+	closed bool
+}
+
+func (s *statelessWriter) Close() error {
+	if s.closed {
+		return nil
+	}
+	s.closed = true
+	// Emit EOF block
+	return StatelessDeflate(s.dst, nil, true, nil)
+}
+
+func (s *statelessWriter) Write(p []byte) (n int, err error) {
+	err = StatelessDeflate(s.dst, p, false, nil)
+	if err != nil {
+		return 0, err
+	}
+	return len(p), nil
+}
+
+func (s *statelessWriter) Reset(w io.Writer) {
+	s.dst = w
+	s.closed = false
+}
+
+// NewStatelessWriter will do compression but without maintaining any state
+// between Write calls.
+// There will be no memory kept between Write calls,
+// but compression and speed will be suboptimal.
+// Because of this, the size of actual Write calls will affect output size.
+func NewStatelessWriter(dst io.Writer) io.WriteCloser {
+	return &statelessWriter{dst: dst}
+}
+
+// bitWriterPool contains bit writers that can be reused.
+var bitWriterPool = sync.Pool{
+	New: func() interface{} {
+		return newHuffmanBitWriter(nil)
+	},
+}
+
+// StatelessDeflate allows compressing directly to a Writer without retaining state.
+// When returning everything will be flushed.
+// Up to 8KB of an optional dictionary can be given which is presumed to precede the block.
+// Longer dictionaries will be truncated and will still produce valid output.
+// Sending nil dictionary is perfectly fine.
+func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
+	var dst tokens
+	bw := bitWriterPool.Get().(*huffmanBitWriter)
+	bw.reset(out)
+	defer func() {
+		// don't keep a reference to our output
+		bw.reset(nil)
+		bitWriterPool.Put(bw)
+	}()
+	if eof && len(in) == 0 {
+		// Just write an EOF block.
+		// Could be faster...
+		bw.writeStoredHeader(0, true)
+		bw.flush()
+		return bw.err
+	}
+
+	// Truncate dict
+	if len(dict) > maxStatelessDict {
+		dict = dict[len(dict)-maxStatelessDict:]
+	}
+
+	// For subsequent loops, keep shallow dict reference to avoid alloc+copy.
+	var inDict []byte
+
+	for len(in) > 0 {
+		todo := in
+		if len(inDict) > 0 {
+			if len(todo) > maxStatelessBlock-maxStatelessDict {
+				todo = todo[:maxStatelessBlock-maxStatelessDict]
+			}
+		} else if len(todo) > maxStatelessBlock-len(dict) {
+			todo = todo[:maxStatelessBlock-len(dict)]
+		}
+		inOrg := in
+		in = in[len(todo):]
+		uncompressed := todo
+		if len(dict) > 0 {
+			// combine dict and source
+			bufLen := len(todo) + len(dict)
+			combined := make([]byte, bufLen)
+			copy(combined, dict)
+			copy(combined[len(dict):], todo)
+			todo = combined
+		}
+		// Compress
+		if len(inDict) == 0 {
+			statelessEnc(&dst, todo, int16(len(dict)))
+		} else {
+			statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict)
+		}
+		isEof := eof && len(in) == 0
+
+		if dst.n == 0 {
+			bw.writeStoredHeader(len(uncompressed), isEof)
+			if bw.err != nil {
+				return bw.err
+			}
+			bw.writeBytes(uncompressed)
+		} else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 {
+			// If we removed less than 1/16th, huffman compress the block.
+			bw.writeBlockHuff(isEof, uncompressed, len(in) == 0)
+		} else {
+			bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0)
+		}
+		if len(in) > 0 {
+			// Retain a dict if we have more
+			inDict = inOrg[len(uncompressed)-maxStatelessDict:]
+			dict = nil
+			dst.Reset()
+		}
+		if bw.err != nil {
+			return bw.err
+		}
+	}
+	if !eof {
+		// Align, only a stored block can do that.
+		bw.writeStoredHeader(0, false)
+	}
+	bw.flush()
+	return bw.err
+}
+
+func hashSL(u uint32) uint32 {
+	return (u * 0x1e35a7bd) >> slTableShift
+}
+
+func load3216(b []byte, i int16) uint32 {
+	return le.Load32(b, i)
+}
+
+func load6416(b []byte, i int16) uint64 {
+	return le.Load64(b, i)
+}
+
+func statelessEnc(dst *tokens, src []byte, startAt int16) {
+	const (
+		inputMargin            = 12 - 1
+		minNonLiteralBlockSize = 1 + 1 + inputMargin
+	)
+
+	type tableEntry struct {
+		offset int16
+	}
+
+	var table [slTableSize]tableEntry
+
+	// This check isn't in the Snappy implementation, but there, the caller
+	// instead of the callee handles this case.
+	if len(src)-int(startAt) < minNonLiteralBlockSize {
+		// We do not fill the token table.
+		// This will be picked up by caller.
+		dst.n = 0
+		return
+	}
+	// Index until startAt
+	if startAt > 0 {
+		cv := load3232(src, 0)
+		for i := int16(0); i < startAt; i++ {
+			table[hashSL(cv)] = tableEntry{offset: i}
+			cv = (cv >> 8) | (uint32(src[i+4]) << 24)
+		}
+	}
+
+	s := startAt + 1
+	nextEmit := startAt
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := int16(len(src) - inputMargin)
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	cv := load3216(src, s)
+
+	for {
+		const skipLog = 5
+		const doEvery = 2
+
+		nextS := s
+		var candidate tableEntry
+		for {
+			nextHash := hashSL(cv)
+			candidate = table[nextHash]
+			nextS = s + doEvery + (s-nextEmit)>>skipLog
+			if nextS > sLimit || nextS <= 0 {
+				goto emitRemainder
+			}
+
+			now := load6416(src, nextS)
+			table[nextHash] = tableEntry{offset: s}
+			nextHash = hashSL(uint32(now))
+
+			if cv == load3216(src, candidate.offset) {
+				table[nextHash] = tableEntry{offset: nextS}
+				break
+			}
+
+			// Do one right away...
+			cv = uint32(now)
+			s = nextS
+			nextS++
+			candidate = table[nextHash]
+			now >>= 8
+			table[nextHash] = tableEntry{offset: s}
+
+			if cv == load3216(src, candidate.offset) {
+				table[nextHash] = tableEntry{offset: nextS}
+				break
+			}
+			cv = uint32(now)
+			s = nextS
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+		for {
+			// Invariant: we have a 4-byte match at s, and no need to emit any
+			// literal bytes prior to s.
+
+			// Extend the 4-byte match as long as possible.
+			t := candidate.offset
+			l := int16(matchLen(src[s+4:], src[t+4:]) + 4)
+
+			// Extend backwards
+			for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+				s--
+				t--
+				l++
+			}
+			if nextEmit < s {
+				if false {
+					emitLiteral(dst, src[nextEmit:s])
+				} else {
+					for _, v := range src[nextEmit:s] {
+						dst.tokens[dst.n] = token(v)
+						dst.litHist[v]++
+						dst.n++
+					}
+				}
+			}
+
+			// Save the match found
+			dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset))
+			s += l
+			nextEmit = s
+			if nextS >= s {
+				s = nextS + 1
+			}
+			if s >= sLimit {
+				goto emitRemainder
+			}
+
+			// We could immediately start working at s now, but to improve
+			// compression we first update the hash table at s-2 and at s. If
+			// another emitCopy is not our next move, also calculate nextHash
+			// at s+1. At least on GOARCH=amd64, these three hash calculations
+			// are faster as one load64 call (with some shifts) instead of
+			// three load32 calls.
+			x := load6416(src, s-2)
+			o := s - 2
+			prevHash := hashSL(uint32(x))
+			table[prevHash] = tableEntry{offset: o}
+			x >>= 16
+			currHash := hashSL(uint32(x))
+			candidate = table[currHash]
+			table[currHash] = tableEntry{offset: o + 2}
+
+			if uint32(x) != load3216(src, candidate.offset) {
+				cv = uint32(x >> 8)
+				s++
+				break
+			}
+		}
+	}
+
+emitRemainder:
+	if int(nextEmit) < len(src) {
+		// If nothing was added, don't encode literals.
+		if dst.n == 0 {
+			return
+		}
+		emitLiteral(dst, src[nextEmit:])
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go
new file mode 100644
index 0000000..d818790
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/token.go
@@ -0,0 +1,379 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"math"
+)
+
+const (
+	// bits 0-16  	xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits
+	// bits 16-22	offsetcode - 5 bits
+	// bits 22-30   xlength = length - MIN_MATCH_LENGTH - 8 bits
+	// bits 30-32   type   0 = literal  1=EOF  2=Match   3=Unused - 2 bits
+	lengthShift         = 22
+	offsetMask          = 1<<lengthShift - 1
+	typeMask            = 3 << 30
+	literalType         = 0 << 30
+	matchType           = 1 << 30
+	matchOffsetOnlyMask = 0xffff
+)
+
+// The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH)
+// is lengthCodes[length - MIN_MATCH_LENGTH]
+var lengthCodes = [256]uint8{
+	0, 1, 2, 3, 4, 5, 6, 7, 8, 8,
+	9, 9, 10, 10, 11, 11, 12, 12, 12, 12,
+	13, 13, 13, 13, 14, 14, 14, 14, 15, 15,
+	15, 15, 16, 16, 16, 16, 16, 16, 16, 16,
+	17, 17, 17, 17, 17, 17, 17, 17, 18, 18,
+	18, 18, 18, 18, 18, 18, 19, 19, 19, 19,
+	19, 19, 19, 19, 20, 20, 20, 20, 20, 20,
+	20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+	21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+	21, 21, 21, 21, 21, 21, 22, 22, 22, 22,
+	22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+	22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
+	23, 23, 23, 23, 23, 23, 23, 23, 24, 24,
+	24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+	24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+	24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+	25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+	25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+	25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+	25, 25, 26, 26, 26, 26, 26, 26, 26, 26,
+	26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+	26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+	26, 26, 26, 26, 27, 27, 27, 27, 27, 27,
+	27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+	27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+	27, 27, 27, 27, 27, 28,
+}
+
+// lengthCodes1 is length codes, but starting at 1.
+var lengthCodes1 = [256]uint8{
+	1, 2, 3, 4, 5, 6, 7, 8, 9, 9,
+	10, 10, 11, 11, 12, 12, 13, 13, 13, 13,
+	14, 14, 14, 14, 15, 15, 15, 15, 16, 16,
+	16, 16, 17, 17, 17, 17, 17, 17, 17, 17,
+	18, 18, 18, 18, 18, 18, 18, 18, 19, 19,
+	19, 19, 19, 19, 19, 19, 20, 20, 20, 20,
+	20, 20, 20, 20, 21, 21, 21, 21, 21, 21,
+	21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+	22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+	22, 22, 22, 22, 22, 22, 23, 23, 23, 23,
+	23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+	23, 23, 24, 24, 24, 24, 24, 24, 24, 24,
+	24, 24, 24, 24, 24, 24, 24, 24, 25, 25,
+	25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+	25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+	25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+	26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+	26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+	26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+	26, 26, 27, 27, 27, 27, 27, 27, 27, 27,
+	27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+	27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+	27, 27, 27, 27, 28, 28, 28, 28, 28, 28,
+	28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+	28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+	28, 28, 28, 28, 28, 29,
+}
+
+var offsetCodes = [256]uint32{
+	0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
+	8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
+	10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+	11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+	12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+	12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+	13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+	13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+	14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+	14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+	14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+	14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+	15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+	15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+	15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+	15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+}
+
+// offsetCodes14 are offsetCodes, but with 14 added.
+var offsetCodes14 = [256]uint32{
+	14, 15, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
+	22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
+	24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+	25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+	26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+	26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+	27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+	27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+	28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+	28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+	28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+	28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+	29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+	29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+	29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+	29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+}
+
+type token uint32
+
+type tokens struct {
+	extraHist [32]uint16  // codes 256->maxnumlit
+	offHist   [32]uint16  // offset codes
+	litHist   [256]uint16 // codes 0->255
+	nFilled   int
+	n         uint16 // Must be able to contain maxStoreBlockSize
+	tokens    [maxStoreBlockSize + 1]token
+}
+
+func (t *tokens) Reset() {
+	if t.n == 0 {
+		return
+	}
+	t.n = 0
+	t.nFilled = 0
+	for i := range t.litHist[:] {
+		t.litHist[i] = 0
+	}
+	for i := range t.extraHist[:] {
+		t.extraHist[i] = 0
+	}
+	for i := range t.offHist[:] {
+		t.offHist[i] = 0
+	}
+}
+
+func (t *tokens) Fill() {
+	if t.n == 0 {
+		return
+	}
+	for i, v := range t.litHist[:] {
+		if v == 0 {
+			t.litHist[i] = 1
+			t.nFilled++
+		}
+	}
+	for i, v := range t.extraHist[:literalCount-256] {
+		if v == 0 {
+			t.nFilled++
+			t.extraHist[i] = 1
+		}
+	}
+	for i, v := range t.offHist[:offsetCodeCount] {
+		if v == 0 {
+			t.offHist[i] = 1
+		}
+	}
+}
+
+func indexTokens(in []token) tokens {
+	var t tokens
+	t.indexTokens(in)
+	return t
+}
+
+func (t *tokens) indexTokens(in []token) {
+	t.Reset()
+	for _, tok := range in {
+		if tok < matchType {
+			t.AddLiteral(tok.literal())
+			continue
+		}
+		t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask)
+	}
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+func emitLiteral(dst *tokens, lit []byte) {
+	for _, v := range lit {
+		dst.tokens[dst.n] = token(v)
+		dst.litHist[v]++
+		dst.n++
+	}
+}
+
+func (t *tokens) AddLiteral(lit byte) {
+	t.tokens[t.n] = token(lit)
+	t.litHist[lit]++
+	t.n++
+}
+
+// from https://stackoverflow.com/a/28730362
+func mFastLog2(val float32) float32 {
+	ux := int32(math.Float32bits(val))
+	log2 := (float32)(((ux >> 23) & 255) - 128)
+	ux &= -0x7f800001
+	ux += 127 << 23
+	uval := math.Float32frombits(uint32(ux))
+	log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759
+	return log2
+}
+
+// EstimatedBits will return an minimum size estimated by an *optimal*
+// compression of the block.
+// The size of the block
+func (t *tokens) EstimatedBits() int {
+	shannon := float32(0)
+	bits := int(0)
+	nMatches := 0
+	total := int(t.n) + t.nFilled
+	if total > 0 {
+		invTotal := 1.0 / float32(total)
+		for _, v := range t.litHist[:] {
+			if v > 0 {
+				n := float32(v)
+				shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
+			}
+		}
+		// Just add 15 for EOB
+		shannon += 15
+		for i, v := range t.extraHist[1 : literalCount-256] {
+			if v > 0 {
+				n := float32(v)
+				shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
+				bits += int(lengthExtraBits[i&31]) * int(v)
+				nMatches += int(v)
+			}
+		}
+	}
+	if nMatches > 0 {
+		invTotal := 1.0 / float32(nMatches)
+		for i, v := range t.offHist[:offsetCodeCount] {
+			if v > 0 {
+				n := float32(v)
+				shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
+				bits += int(offsetExtraBits[i&31]) * int(v)
+			}
+		}
+	}
+	return int(shannon) + bits
+}
+
+// AddMatch adds a match to the tokens.
+// This function is very sensitive to inlining and right on the border.
+func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
+	if debugDeflate {
+		if xlength >= maxMatchLength+baseMatchLength {
+			panic(fmt.Errorf("invalid length: %v", xlength))
+		}
+		if xoffset >= maxMatchOffset+baseMatchOffset {
+			panic(fmt.Errorf("invalid offset: %v", xoffset))
+		}
+	}
+	oCode := offsetCode(xoffset)
+	xoffset |= oCode << 16
+
+	t.extraHist[lengthCodes1[uint8(xlength)]]++
+	t.offHist[oCode&31]++
+	t.tokens[t.n] = token(matchType | xlength<<lengthShift | xoffset)
+	t.n++
+}
+
+// AddMatchLong adds a match to the tokens, potentially longer than max match length.
+// Length should NOT have the base subtracted, only offset should.
+func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) {
+	if debugDeflate {
+		if xoffset >= maxMatchOffset+baseMatchOffset {
+			panic(fmt.Errorf("invalid offset: %v", xoffset))
+		}
+	}
+	oc := offsetCode(xoffset)
+	xoffset |= oc << 16
+	for xlength > 0 {
+		xl := xlength
+		if xl > 258 {
+			// We need to have at least baseMatchLength left over for next loop.
+			if xl > 258+baseMatchLength {
+				xl = 258
+			} else {
+				xl = 258 - baseMatchLength
+			}
+		}
+		xlength -= xl
+		xl -= baseMatchLength
+		t.extraHist[lengthCodes1[uint8(xl)]]++
+		t.offHist[oc&31]++
+		t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
+		t.n++
+	}
+}
+
+func (t *tokens) AddEOB() {
+	t.tokens[t.n] = token(endBlockMarker)
+	t.extraHist[0]++
+	t.n++
+}
+
+func (t *tokens) Slice() []token {
+	return t.tokens[:t.n]
+}
+
+// VarInt returns the tokens as varint encoded bytes.
+func (t *tokens) VarInt() []byte {
+	var b = make([]byte, binary.MaxVarintLen32*int(t.n))
+	var off int
+	for _, v := range t.tokens[:t.n] {
+		off += binary.PutUvarint(b[off:], uint64(v))
+	}
+	return b[:off]
+}
+
+// FromVarInt restores t to the varint encoded tokens provided.
+// Any data in t is removed.
+func (t *tokens) FromVarInt(b []byte) error {
+	var buf = bytes.NewReader(b)
+	var toks []token
+	for {
+		r, err := binary.ReadUvarint(buf)
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			return err
+		}
+		toks = append(toks, token(r))
+	}
+	t.indexTokens(toks)
+	return nil
+}
+
+// Returns the type of a token
+func (t token) typ() uint32 { return uint32(t) & typeMask }
+
+// Returns the literal of a literal token
+func (t token) literal() uint8 { return uint8(t) }
+
+// Returns the extra offset of a match token
+func (t token) offset() uint32 { return uint32(t) & offsetMask }
+
+func (t token) length() uint8 { return uint8(t >> lengthShift) }
+
+// Convert length to code.
+func lengthCode(len uint8) uint8 { return lengthCodes[len] }
+
+// Returns the offset code corresponding to a specific offset
+func offsetCode(off uint32) uint32 {
+	if false {
+		if off < uint32(len(offsetCodes)) {
+			return offsetCodes[off&255]
+		} else if off>>7 < uint32(len(offsetCodes)) {
+			return offsetCodes[(off>>7)&255] + 14
+		} else {
+			return offsetCodes[(off>>14)&255] + 28
+		}
+	}
+	if off < uint32(len(offsetCodes)) {
+		return offsetCodes[uint8(off)]
+	}
+	return offsetCodes14[uint8(off>>7)]
+}
diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go
index 43e4636..e82fa3b 100644
--- a/vendor/github.com/klauspost/compress/fse/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go
@@ -152,12 +152,11 @@
 
 // close will write the alignment bit and write the final byte(s)
 // to the output.
-func (b *bitWriter) close() error {
+func (b *bitWriter) close() {
 	// End mark
 	b.addBits16Clean(1, 1)
 	// flush until next byte.
 	b.flushAlign()
-	return nil
 }
 
 // reset and continue writing by appending to out.
diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go
index 6f34191..074018d 100644
--- a/vendor/github.com/klauspost/compress/fse/compress.go
+++ b/vendor/github.com/klauspost/compress/fse/compress.go
@@ -146,54 +146,51 @@
 		c1.encodeZero(tt[src[ip-2]])
 		ip -= 2
 	}
+	src = src[:ip]
 
 	// Main compression loop.
 	switch {
 	case !s.zeroBits && s.actualTableLog <= 8:
 		// We can encode 4 symbols without requiring a flush.
 		// We do not need to check if any output is 0 bits.
-		for ip >= 4 {
+		for ; len(src) >= 4; src = src[:len(src)-4] {
 			s.bw.flush32()
-			v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+			v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
 			c2.encode(tt[v0])
 			c1.encode(tt[v1])
 			c2.encode(tt[v2])
 			c1.encode(tt[v3])
-			ip -= 4
 		}
 	case !s.zeroBits:
 		// We do not need to check if any output is 0 bits.
-		for ip >= 4 {
+		for ; len(src) >= 4; src = src[:len(src)-4] {
 			s.bw.flush32()
-			v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+			v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
 			c2.encode(tt[v0])
 			c1.encode(tt[v1])
 			s.bw.flush32()
 			c2.encode(tt[v2])
 			c1.encode(tt[v3])
-			ip -= 4
 		}
 	case s.actualTableLog <= 8:
 		// We can encode 4 symbols without requiring a flush
-		for ip >= 4 {
+		for ; len(src) >= 4; src = src[:len(src)-4] {
 			s.bw.flush32()
-			v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+			v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
 			c2.encodeZero(tt[v0])
 			c1.encodeZero(tt[v1])
 			c2.encodeZero(tt[v2])
 			c1.encodeZero(tt[v3])
-			ip -= 4
 		}
 	default:
-		for ip >= 4 {
+		for ; len(src) >= 4; src = src[:len(src)-4] {
 			s.bw.flush32()
-			v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+			v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
 			c2.encodeZero(tt[v0])
 			c1.encodeZero(tt[v1])
 			s.bw.flush32()
 			c2.encodeZero(tt[v2])
 			c1.encodeZero(tt[v3])
-			ip -= 4
 		}
 	}
 
@@ -202,7 +199,8 @@
 	c2.flush(s.actualTableLog)
 	c1.flush(s.actualTableLog)
 
-	return s.bw.close()
+	s.bw.close()
+	return nil
 }
 
 // writeCount will write the normalized histogram count to header.
@@ -214,7 +212,7 @@
 		previous0 bool
 		charnum   uint16
 
-		maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3
+		maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3
 
 		// Write Table Size
 		bitStream = uint32(tableLog - minTablelog)
@@ -459,15 +457,17 @@
 	for _, v := range in {
 		s.count[v]++
 	}
-	m := uint32(0)
+	m, symlen := uint32(0), s.symbolLen
 	for i, v := range s.count[:] {
+		if v == 0 {
+			continue
+		}
 		if v > m {
 			m = v
 		}
-		if v > 0 {
-			s.symbolLen = uint16(i) + 1
-		}
+		symlen = uint16(i) + 1
 	}
+	s.symbolLen = symlen
 	return int(m)
 }
 
diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go
index 926f5f1..0c7dd4f 100644
--- a/vendor/github.com/klauspost/compress/fse/decompress.go
+++ b/vendor/github.com/klauspost/compress/fse/decompress.go
@@ -15,7 +15,7 @@
 // It is possible, but by no way guaranteed that corrupt data will
 // return an error.
 // It is up to the caller to verify integrity of the returned data.
-// Use a predefined Scrach to set maximum acceptable output size.
+// Use a predefined Scratch to set maximum acceptable output size.
 func Decompress(b []byte, s *Scratch) ([]byte, error) {
 	s, err := s.prepare(b)
 	if err != nil {
@@ -260,7 +260,9 @@
 // If the buffer is over-read an error is returned.
 func (s *Scratch) decompress() error {
 	br := &s.bits
-	br.init(s.br.unread())
+	if err := br.init(s.br.unread()); err != nil {
+		return err
+	}
 
 	var s1, s2 decoder
 	// Initialize and decode first state and symbol.
diff --git a/vendor/github.com/klauspost/compress/gzip/gunzip.go b/vendor/github.com/klauspost/compress/gzip/gunzip.go
new file mode 100644
index 0000000..00a0a2c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/gzip/gunzip.go
@@ -0,0 +1,380 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gzip implements reading and writing of gzip format compressed files,
+// as specified in RFC 1952.
+package gzip
+
+import (
+	"bufio"
+	"compress/gzip"
+	"encoding/binary"
+	"hash/crc32"
+	"io"
+	"time"
+
+	"github.com/klauspost/compress/flate"
+)
+
+const (
+	gzipID1     = 0x1f
+	gzipID2     = 0x8b
+	gzipDeflate = 8
+	flagText    = 1 << 0
+	flagHdrCrc  = 1 << 1
+	flagExtra   = 1 << 2
+	flagName    = 1 << 3
+	flagComment = 1 << 4
+)
+
+var (
+	// ErrChecksum is returned when reading GZIP data that has an invalid checksum.
+	ErrChecksum = gzip.ErrChecksum
+	// ErrHeader is returned when reading GZIP data that has an invalid header.
+	ErrHeader = gzip.ErrHeader
+)
+
+var le = binary.LittleEndian
+
+// noEOF converts io.EOF to io.ErrUnexpectedEOF.
+func noEOF(err error) error {
+	if err == io.EOF {
+		return io.ErrUnexpectedEOF
+	}
+	return err
+}
+
+// The gzip file stores a header giving metadata about the compressed file.
+// That header is exposed as the fields of the Writer and Reader structs.
+//
+// Strings must be UTF-8 encoded and may only contain Unicode code points
+// U+0001 through U+00FF, due to limitations of the GZIP file format.
+type Header struct {
+	Comment string    // comment
+	Extra   []byte    // "extra data"
+	ModTime time.Time // modification time
+	Name    string    // file name
+	OS      byte      // operating system type
+}
+
+// A Reader is an io.Reader that can be read to retrieve
+// uncompressed data from a gzip-format compressed file.
+//
+// In general, a gzip file can be a concatenation of gzip files,
+// each with its own header. Reads from the Reader
+// return the concatenation of the uncompressed data of each.
+// Only the first header is recorded in the Reader fields.
+//
+// Gzip files store a length and checksum of the uncompressed data.
+// The Reader will return a ErrChecksum when Read
+// reaches the end of the uncompressed data if it does not
+// have the expected length or checksum. Clients should treat data
+// returned by Read as tentative until they receive the io.EOF
+// marking the end of the data.
+type Reader struct {
+	Header       // valid after NewReader or Reader.Reset
+	r            flate.Reader
+	br           *bufio.Reader
+	decompressor io.ReadCloser
+	digest       uint32 // CRC-32, IEEE polynomial (section 8)
+	size         uint32 // Uncompressed size (section 2.3.1)
+	buf          [512]byte
+	err          error
+	multistream  bool
+}
+
+// NewReader creates a new Reader reading the given reader.
+// If r does not also implement io.ByteReader,
+// the decompressor may read more data than necessary from r.
+//
+// It is the caller's responsibility to call Close on the Reader when done.
+//
+// The Reader.Header fields will be valid in the Reader returned.
+func NewReader(r io.Reader) (*Reader, error) {
+	z := new(Reader)
+	if err := z.Reset(r); err != nil {
+		return nil, err
+	}
+	return z, nil
+}
+
+// Reset discards the Reader z's state and makes it equivalent to the
+// result of its original state from NewReader, but reading from r instead.
+// This permits reusing a Reader rather than allocating a new one.
+func (z *Reader) Reset(r io.Reader) error {
+	*z = Reader{
+		decompressor: z.decompressor,
+		multistream:  true,
+		br:           z.br,
+	}
+	if rr, ok := r.(flate.Reader); ok {
+		z.r = rr
+	} else {
+		// Reuse if we can.
+		if z.br != nil {
+			z.br.Reset(r)
+		} else {
+			z.br = bufio.NewReader(r)
+		}
+		z.r = z.br
+	}
+	z.Header, z.err = z.readHeader()
+	return z.err
+}
+
+// Multistream controls whether the reader supports multistream files.
+//
+// If enabled (the default), the Reader expects the input to be a sequence
+// of individually gzipped data streams, each with its own header and
+// trailer, ending at EOF. The effect is that the concatenation of a sequence
+// of gzipped files is treated as equivalent to the gzip of the concatenation
+// of the sequence. This is standard behavior for gzip readers.
+//
+// Calling Multistream(false) disables this behavior; disabling the behavior
+// can be useful when reading file formats that distinguish individual gzip
+// data streams or mix gzip data streams with other data streams.
+// In this mode, when the Reader reaches the end of the data stream,
+// Read returns io.EOF. If the underlying reader implements io.ByteReader,
+// it will be left positioned just after the gzip stream.
+// To start the next stream, call z.Reset(r) followed by z.Multistream(false).
+// If there is no next stream, z.Reset(r) will return io.EOF.
+func (z *Reader) Multistream(ok bool) {
+	z.multistream = ok
+}
+
+// readString reads a NUL-terminated string from z.r.
+// It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and
+// will output a string encoded using UTF-8.
+// This method always updates z.digest with the data read.
+func (z *Reader) readString() (string, error) {
+	var err error
+	needConv := false
+	for i := 0; ; i++ {
+		if i >= len(z.buf) {
+			return "", ErrHeader
+		}
+		z.buf[i], err = z.r.ReadByte()
+		if err != nil {
+			return "", err
+		}
+		if z.buf[i] > 0x7f {
+			needConv = true
+		}
+		if z.buf[i] == 0 {
+			// Digest covers the NUL terminator.
+			z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1])
+
+			// Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1).
+			if needConv {
+				s := make([]rune, 0, i)
+				for _, v := range z.buf[:i] {
+					s = append(s, rune(v))
+				}
+				return string(s), nil
+			}
+			return string(z.buf[:i]), nil
+		}
+	}
+}
+
+// readHeader reads the GZIP header according to section 2.3.1.
+// This method does not set z.err.
+func (z *Reader) readHeader() (hdr Header, err error) {
+	if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil {
+		// RFC 1952, section 2.2, says the following:
+		//	A gzip file consists of a series of "members" (compressed data sets).
+		//
+		// Other than this, the specification does not clarify whether a
+		// "series" is defined as "one or more" or "zero or more". To err on the
+		// side of caution, Go interprets this to mean "zero or more".
+		// Thus, it is okay to return io.EOF here.
+		return hdr, err
+	}
+	if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate {
+		return hdr, ErrHeader
+	}
+	flg := z.buf[3]
+	hdr.ModTime = time.Unix(int64(le.Uint32(z.buf[4:8])), 0)
+	// z.buf[8] is XFL and is currently ignored.
+	hdr.OS = z.buf[9]
+	z.digest = crc32.ChecksumIEEE(z.buf[:10])
+
+	if flg&flagExtra != 0 {
+		if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
+			return hdr, noEOF(err)
+		}
+		z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2])
+		data := make([]byte, le.Uint16(z.buf[:2]))
+		if _, err = io.ReadFull(z.r, data); err != nil {
+			return hdr, noEOF(err)
+		}
+		z.digest = crc32.Update(z.digest, crc32.IEEETable, data)
+		hdr.Extra = data
+	}
+
+	var s string
+	if flg&flagName != 0 {
+		if s, err = z.readString(); err != nil {
+			return hdr, err
+		}
+		hdr.Name = s
+	}
+
+	if flg&flagComment != 0 {
+		if s, err = z.readString(); err != nil {
+			return hdr, err
+		}
+		hdr.Comment = s
+	}
+
+	if flg&flagHdrCrc != 0 {
+		if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
+			return hdr, noEOF(err)
+		}
+		digest := le.Uint16(z.buf[:2])
+		if digest != uint16(z.digest) {
+			return hdr, ErrHeader
+		}
+	}
+
+	// Reserved FLG bits must be zero.
+	if flg>>5 != 0 {
+		return hdr, ErrHeader
+	}
+
+	z.digest = 0
+	if z.decompressor == nil {
+		z.decompressor = flate.NewReader(z.r)
+	} else {
+		z.decompressor.(flate.Resetter).Reset(z.r, nil)
+	}
+	return hdr, nil
+}
+
+// Read implements io.Reader, reading uncompressed bytes from its underlying Reader.
+func (z *Reader) Read(p []byte) (n int, err error) {
+	if z.err != nil {
+		return 0, z.err
+	}
+
+	for n == 0 {
+		n, z.err = z.decompressor.Read(p)
+		z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
+		z.size += uint32(n)
+		if z.err != io.EOF {
+			// In the normal case we return here.
+			return n, z.err
+		}
+
+		// Finished file; check checksum and size.
+		if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
+			z.err = noEOF(err)
+			return n, z.err
+		}
+		digest := le.Uint32(z.buf[:4])
+		size := le.Uint32(z.buf[4:8])
+		if digest != z.digest || size != z.size {
+			z.err = ErrChecksum
+			return n, z.err
+		}
+		z.digest, z.size = 0, 0
+
+		// File is ok; check if there is another.
+		if !z.multistream {
+			return n, io.EOF
+		}
+		z.err = nil // Remove io.EOF
+
+		if _, z.err = z.readHeader(); z.err != nil {
+			return n, z.err
+		}
+	}
+
+	return n, nil
+}
+
+type crcer interface {
+	io.Writer
+	Sum32() uint32
+	Reset()
+}
+type crcUpdater struct {
+	z *Reader
+}
+
+func (c *crcUpdater) Write(p []byte) (int, error) {
+	c.z.digest = crc32.Update(c.z.digest, crc32.IEEETable, p)
+	return len(p), nil
+}
+
+func (c *crcUpdater) Sum32() uint32 {
+	return c.z.digest
+}
+
+func (c *crcUpdater) Reset() {
+	c.z.digest = 0
+}
+
+// WriteTo support the io.WriteTo interface for io.Copy and friends.
+func (z *Reader) WriteTo(w io.Writer) (int64, error) {
+	total := int64(0)
+	crcWriter := crcer(crc32.NewIEEE())
+	if z.digest != 0 {
+		crcWriter = &crcUpdater{z: z}
+	}
+	for {
+		if z.err != nil {
+			if z.err == io.EOF {
+				return total, nil
+			}
+			return total, z.err
+		}
+
+		// We write both to output and digest.
+		mw := io.MultiWriter(w, crcWriter)
+		n, err := z.decompressor.(io.WriterTo).WriteTo(mw)
+		total += n
+		z.size += uint32(n)
+		if err != nil {
+			z.err = err
+			return total, z.err
+		}
+
+		// Finished file; check checksum + size.
+		if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil {
+			if err == io.EOF {
+				err = io.ErrUnexpectedEOF
+			}
+			z.err = err
+			return total, err
+		}
+		z.digest = crcWriter.Sum32()
+		digest := le.Uint32(z.buf[:4])
+		size := le.Uint32(z.buf[4:8])
+		if digest != z.digest || size != z.size {
+			z.err = ErrChecksum
+			return total, z.err
+		}
+		z.digest, z.size = 0, 0
+
+		// File is ok; check if there is another.
+		if !z.multistream {
+			return total, nil
+		}
+		crcWriter.Reset()
+		z.err = nil // Remove io.EOF
+
+		if _, z.err = z.readHeader(); z.err != nil {
+			if z.err == io.EOF {
+				return total, nil
+			}
+			return total, z.err
+		}
+	}
+}
+
+// Close closes the Reader. It does not close the underlying io.Reader.
+// In order for the GZIP checksum to be verified, the reader must be
+// fully consumed until the io.EOF.
+func (z *Reader) Close() error { return z.decompressor.Close() }
diff --git a/vendor/github.com/klauspost/compress/gzip/gzip.go b/vendor/github.com/klauspost/compress/gzip/gzip.go
new file mode 100644
index 0000000..5bc7205
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/gzip/gzip.go
@@ -0,0 +1,290 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gzip
+
+import (
+	"errors"
+	"fmt"
+	"hash/crc32"
+	"io"
+
+	"github.com/klauspost/compress/flate"
+)
+
+// These constants are copied from the flate package, so that code that imports
+// "compress/gzip" does not also have to import "compress/flate".
+const (
+	NoCompression       = flate.NoCompression
+	BestSpeed           = flate.BestSpeed
+	BestCompression     = flate.BestCompression
+	DefaultCompression  = flate.DefaultCompression
+	ConstantCompression = flate.ConstantCompression
+	HuffmanOnly         = flate.HuffmanOnly
+
+	// StatelessCompression will do compression but without maintaining any state
+	// between Write calls.
+	// There will be no memory kept between Write calls,
+	// but compression and speed will be suboptimal.
+	// Because of this, the size of actual Write calls will affect output size.
+	StatelessCompression = -3
+)
+
+// A Writer is an io.WriteCloser.
+// Writes to a Writer are compressed and written to w.
+type Writer struct {
+	Header      // written at first call to Write, Flush, or Close
+	w           io.Writer
+	level       int
+	err         error
+	compressor  *flate.Writer
+	digest      uint32 // CRC-32, IEEE polynomial (section 8)
+	size        uint32 // Uncompressed size (section 2.3.1)
+	wroteHeader bool
+	closed      bool
+	buf         [10]byte
+}
+
+// NewWriter returns a new Writer.
+// Writes to the returned writer are compressed and written to w.
+//
+// It is the caller's responsibility to call Close on the WriteCloser when done.
+// Writes may be buffered and not flushed until Close.
+//
+// Callers that wish to set the fields in Writer.Header must do so before
+// the first call to Write, Flush, or Close.
+func NewWriter(w io.Writer) *Writer {
+	z, _ := NewWriterLevel(w, DefaultCompression)
+	return z
+}
+
+// NewWriterLevel is like NewWriter but specifies the compression level instead
+// of assuming DefaultCompression.
+//
+// The compression level can be DefaultCompression, NoCompression, or any
+// integer value between BestSpeed and BestCompression inclusive. The error
+// returned will be nil if the level is valid.
+func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
+	if level < StatelessCompression || level > BestCompression {
+		return nil, fmt.Errorf("gzip: invalid compression level: %d", level)
+	}
+	z := new(Writer)
+	z.init(w, level)
+	return z, nil
+}
+
+// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow.
+const MinCustomWindowSize = flate.MinCustomWindowSize
+
+// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow.
+const MaxCustomWindowSize = flate.MaxCustomWindowSize
+
+// NewWriterWindow returns a new Writer compressing data with a custom window size.
+// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize.
+func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) {
+	if windowSize < MinCustomWindowSize {
+		return nil, errors.New("gzip: requested window size less than MinWindowSize")
+	}
+	if windowSize > MaxCustomWindowSize {
+		return nil, errors.New("gzip: requested window size bigger than MaxCustomWindowSize")
+	}
+
+	z := new(Writer)
+	z.init(w, -windowSize)
+	return z, nil
+}
+
+func (z *Writer) init(w io.Writer, level int) {
+	compressor := z.compressor
+	if level != StatelessCompression {
+		if compressor != nil {
+			compressor.Reset(w)
+		}
+	}
+
+	*z = Writer{
+		Header: Header{
+			OS: 255, // unknown
+		},
+		w:          w,
+		level:      level,
+		compressor: compressor,
+	}
+}
+
+// Reset discards the Writer z's state and makes it equivalent to the
+// result of its original state from NewWriter or NewWriterLevel, but
+// writing to w instead. This permits reusing a Writer rather than
+// allocating a new one.
+func (z *Writer) Reset(w io.Writer) {
+	z.init(w, z.level)
+}
+
+// writeBytes writes a length-prefixed byte slice to z.w.
+func (z *Writer) writeBytes(b []byte) error {
+	if len(b) > 0xffff {
+		return errors.New("gzip.Write: Extra data is too large")
+	}
+	le.PutUint16(z.buf[:2], uint16(len(b)))
+	_, err := z.w.Write(z.buf[:2])
+	if err != nil {
+		return err
+	}
+	_, err = z.w.Write(b)
+	return err
+}
+
+// writeString writes a UTF-8 string s in GZIP's format to z.w.
+// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1).
+func (z *Writer) writeString(s string) (err error) {
+	// GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII.
+	needconv := false
+	for _, v := range s {
+		if v == 0 || v > 0xff {
+			return errors.New("gzip.Write: non-Latin-1 header string")
+		}
+		if v > 0x7f {
+			needconv = true
+		}
+	}
+	if needconv {
+		b := make([]byte, 0, len(s))
+		for _, v := range s {
+			b = append(b, byte(v))
+		}
+		_, err = z.w.Write(b)
+	} else {
+		_, err = io.WriteString(z.w, s)
+	}
+	if err != nil {
+		return err
+	}
+	// GZIP strings are NUL-terminated.
+	z.buf[0] = 0
+	_, err = z.w.Write(z.buf[:1])
+	return err
+}
+
+// Write writes a compressed form of p to the underlying io.Writer. The
+// compressed bytes are not necessarily flushed until the Writer is closed.
+func (z *Writer) Write(p []byte) (int, error) {
+	if z.err != nil {
+		return 0, z.err
+	}
+	var n int
+	// Write the GZIP header lazily.
+	if !z.wroteHeader {
+		z.wroteHeader = true
+		z.buf[0] = gzipID1
+		z.buf[1] = gzipID2
+		z.buf[2] = gzipDeflate
+		z.buf[3] = 0
+		if z.Extra != nil {
+			z.buf[3] |= 0x04
+		}
+		if z.Name != "" {
+			z.buf[3] |= 0x08
+		}
+		if z.Comment != "" {
+			z.buf[3] |= 0x10
+		}
+		le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix()))
+		if z.level == BestCompression {
+			z.buf[8] = 2
+		} else if z.level == BestSpeed {
+			z.buf[8] = 4
+		} else {
+			z.buf[8] = 0
+		}
+		z.buf[9] = z.OS
+		n, z.err = z.w.Write(z.buf[:10])
+		if z.err != nil {
+			return n, z.err
+		}
+		if z.Extra != nil {
+			z.err = z.writeBytes(z.Extra)
+			if z.err != nil {
+				return n, z.err
+			}
+		}
+		if z.Name != "" {
+			z.err = z.writeString(z.Name)
+			if z.err != nil {
+				return n, z.err
+			}
+		}
+		if z.Comment != "" {
+			z.err = z.writeString(z.Comment)
+			if z.err != nil {
+				return n, z.err
+			}
+		}
+
+		if z.compressor == nil && z.level != StatelessCompression {
+			z.compressor, _ = flate.NewWriter(z.w, z.level)
+		}
+	}
+	z.size += uint32(len(p))
+	z.digest = crc32.Update(z.digest, crc32.IEEETable, p)
+	if z.level == StatelessCompression {
+		return len(p), flate.StatelessDeflate(z.w, p, false, nil)
+	}
+	n, z.err = z.compressor.Write(p)
+	return n, z.err
+}
+
+// Flush flushes any pending compressed data to the underlying writer.
+//
+// It is useful mainly in compressed network protocols, to ensure that
+// a remote reader has enough data to reconstruct a packet. Flush does
+// not return until the data has been written. If the underlying
+// writer returns an error, Flush returns that error.
+//
+// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
+func (z *Writer) Flush() error {
+	if z.err != nil {
+		return z.err
+	}
+	if z.closed || z.level == StatelessCompression {
+		return nil
+	}
+	if !z.wroteHeader {
+		z.Write(nil)
+		if z.err != nil {
+			return z.err
+		}
+	}
+	z.err = z.compressor.Flush()
+	return z.err
+}
+
+// Close closes the Writer, flushing any unwritten data to the underlying
+// io.Writer, but does not close the underlying io.Writer.
+func (z *Writer) Close() error {
+	if z.err != nil {
+		return z.err
+	}
+	if z.closed {
+		return nil
+	}
+	z.closed = true
+	if !z.wroteHeader {
+		z.Write(nil)
+		if z.err != nil {
+			return z.err
+		}
+	}
+	if z.level == StatelessCompression {
+		z.err = flate.StatelessDeflate(z.w, nil, true, nil)
+	} else {
+		z.err = z.compressor.Close()
+	}
+	if z.err != nil {
+		return z.err
+	}
+	le.PutUint32(z.buf[:4], z.digest)
+	le.PutUint32(z.buf[4:8], z.size)
+	_, z.err = z.w.Write(z.buf[:8])
+	return z.err
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go
index 504a7be..bfc7a52 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitreader.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go
@@ -6,10 +6,11 @@
 package huff0
 
 import (
-	"encoding/binary"
 	"errors"
 	"fmt"
 	"io"
+
+	"github.com/klauspost/compress/internal/le"
 )
 
 // bitReader reads a bitstream in reverse.
@@ -46,7 +47,7 @@
 	return nil
 }
 
-// peekBitsFast requires that at least one bit is requested every time.
+// peekByteFast requires that at least one byte is requested every time.
 // There are no checks if the buffer is filled.
 func (b *bitReaderBytes) peekByteFast() uint8 {
 	got := uint8(b.value >> 56)
@@ -66,9 +67,7 @@
 	}
 
 	// 2 bounds checks.
-	v := b.in[b.off-4 : b.off]
-	v = v[:4]
-	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	low := le.Load32(b.in, b.off-4)
 	b.value |= uint64(low) << (b.bitsRead - 32)
 	b.bitsRead -= 32
 	b.off -= 4
@@ -77,7 +76,7 @@
 // fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read.
 func (b *bitReaderBytes) fillFastStart() {
 	// Do single re-slice to avoid bounds checks.
-	b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+	b.value = le.Load64(b.in, b.off-8)
 	b.bitsRead = 0
 	b.off -= 8
 }
@@ -87,10 +86,8 @@
 	if b.bitsRead < 32 {
 		return
 	}
-	if b.off > 4 {
-		v := b.in[b.off-4:]
-		v = v[:4]
-		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	if b.off >= 4 {
+		low := le.Load32(b.in, b.off-4)
 		b.value |= uint64(low) << (b.bitsRead - 32)
 		b.bitsRead -= 32
 		b.off -= 4
@@ -177,10 +174,7 @@
 		return
 	}
 
-	// 2 bounds checks.
-	v := b.in[b.off-4 : b.off]
-	v = v[:4]
-	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+	low := le.Load32(b.in, b.off-4)
 	b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
 	b.bitsRead -= 32
 	b.off -= 4
@@ -188,8 +182,7 @@
 
 // fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read.
 func (b *bitReaderShifted) fillFastStart() {
-	// Do single re-slice to avoid bounds checks.
-	b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+	b.value = le.Load64(b.in, b.off-8)
 	b.bitsRead = 0
 	b.off -= 8
 }
@@ -200,9 +193,7 @@
 		return
 	}
 	if b.off > 4 {
-		v := b.in[b.off-4:]
-		v = v[:4]
-		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+		low := le.Load32(b.in, b.off-4)
 		b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
 		b.bitsRead -= 32
 		b.off -= 4
diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
index ec71f7a..0ebc9aa 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
@@ -13,14 +13,6 @@
 	out          []byte
 }
 
-// bitMask16 is bitmasks. Has extra to avoid bounds check.
-var bitMask16 = [32]uint16{
-	0, 1, 3, 7, 0xF, 0x1F,
-	0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
-	0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
-	0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
-	0xFFFF, 0xFFFF} /* up to 16 bits */
-
 // addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
 // It will not check if there is space for them, so the caller must ensure that it has flushed recently.
 func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
@@ -60,6 +52,22 @@
 	b.nBits += encA.nBits + encB.nBits
 }
 
+// encFourSymbols adds up to 32 bits from four symbols.
+// It will not check if there is space for them,
+// so the caller must ensure that b has been flushed recently.
+func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) {
+	bitsA := encA.nBits
+	bitsB := bitsA + encB.nBits
+	bitsC := bitsB + encC.nBits
+	bitsD := bitsC + encD.nBits
+	combined := uint64(encA.val) |
+		(uint64(encB.val) << (bitsA & 63)) |
+		(uint64(encC.val) << (bitsB & 63)) |
+		(uint64(encD.val) << (bitsC & 63))
+	b.bitContainer |= combined << (b.nBits & 63)
+	b.nBits += bitsD
+}
+
 // flush32 will flush out, so there are at least 32 bits available for writing.
 func (b *bitWriter) flush32() {
 	if b.nBits < 32 {
@@ -86,10 +94,9 @@
 
 // close will write the alignment bit and write the final byte(s)
 // to the output.
-func (b *bitWriter) close() error {
+func (b *bitWriter) close() {
 	// End mark
 	b.addBits16Clean(1, 1)
 	// flush until next byte.
 	b.flushAlign()
-	return nil
 }
diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go
deleted file mode 100644
index 4dcab8d..0000000
--- a/vendor/github.com/klauspost/compress/huff0/bytereader.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2018 Klaus Post. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
-
-package huff0
-
-// byteReader provides a byte reader that reads
-// little endian values from a byte stream.
-// The input stream is manually advanced.
-// The reader performs no bounds checks.
-type byteReader struct {
-	b   []byte
-	off int
-}
-
-// init will initialize the reader and set the input.
-func (b *byteReader) init(in []byte) {
-	b.b = in
-	b.off = 0
-}
-
-// Int32 returns a little endian int32 starting at current offset.
-func (b byteReader) Int32() int32 {
-	v3 := int32(b.b[b.off+3])
-	v2 := int32(b.b[b.off+2])
-	v1 := int32(b.b[b.off+1])
-	v0 := int32(b.b[b.off])
-	return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
-}
-
-// Uint32 returns a little endian uint32 starting at current offset.
-func (b byteReader) Uint32() uint32 {
-	v3 := uint32(b.b[b.off+3])
-	v2 := uint32(b.b[b.off+2])
-	v1 := uint32(b.b[b.off+1])
-	v0 := uint32(b.b[b.off])
-	return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
-}
-
-// remain will return the number of bytes remaining.
-func (b byteReader) remain() int {
-	return len(b.b) - b.off
-}
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
index 4d14542..84aa3d1 100644
--- a/vendor/github.com/klauspost/compress/huff0/compress.go
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -227,10 +227,10 @@
 }
 
 func (s *Scratch) compress1X(src []byte) ([]byte, error) {
-	return s.compress1xDo(s.Out, src)
+	return s.compress1xDo(s.Out, src), nil
 }
 
-func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
+func (s *Scratch) compress1xDo(dst, src []byte) []byte {
 	var bw = bitWriter{out: dst}
 
 	// N is length divisible by 4.
@@ -248,8 +248,7 @@
 			tmp := src[n : n+4]
 			// tmp should be len 4
 			bw.flush32()
-			bw.encTwoSymbols(cTable, tmp[3], tmp[2])
-			bw.encTwoSymbols(cTable, tmp[1], tmp[0])
+			bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]])
 		}
 	} else {
 		for ; n >= 0; n -= 4 {
@@ -261,8 +260,8 @@
 			bw.encTwoSymbols(cTable, tmp[1], tmp[0])
 		}
 	}
-	err := bw.close()
-	return bw.out, err
+	bw.close()
+	return bw.out
 }
 
 var sixZeros [6]byte
@@ -284,12 +283,8 @@
 		}
 		src = src[len(toDo):]
 
-		var err error
 		idx := len(s.Out)
-		s.Out, err = s.compress1xDo(s.Out, toDo)
-		if err != nil {
-			return nil, err
-		}
+		s.Out = s.compress1xDo(s.Out, toDo)
 		if len(s.Out)-idx > math.MaxUint16 {
 			// We cannot store the size in the jump table
 			return nil, ErrIncompressible
@@ -316,7 +311,6 @@
 
 	segmentSize := (len(src) + 3) / 4
 	var wg sync.WaitGroup
-	var errs [4]error
 	wg.Add(4)
 	for i := 0; i < 4; i++ {
 		toDo := src
@@ -327,15 +321,12 @@
 
 		// Separate goroutine for each block.
 		go func(i int) {
-			s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
+			s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
 			wg.Done()
 		}(i)
 	}
 	wg.Wait()
 	for i := 0; i < 4; i++ {
-		if errs[i] != nil {
-			return nil, errs[i]
-		}
 		o := s.tmpOut[i]
 		if len(o) > math.MaxUint16 {
 			// We cannot store the size in the jump table
@@ -359,35 +350,36 @@
 // Does not update s.clearCount.
 func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
 	reuse = true
+	_ = s.count // Assert that s != nil to speed up the following loop.
 	for _, v := range in {
 		s.count[v]++
 	}
 	m := uint32(0)
 	if len(s.prevTable) > 0 {
 		for i, v := range s.count[:] {
+			if v == 0 {
+				continue
+			}
 			if v > m {
 				m = v
 			}
-			if v > 0 {
-				s.symbolLen = uint16(i) + 1
-				if i >= len(s.prevTable) {
-					reuse = false
-				} else {
-					if s.prevTable[i].nBits == 0 {
-						reuse = false
-					}
-				}
+			s.symbolLen = uint16(i) + 1
+			if i >= len(s.prevTable) {
+				reuse = false
+			} else if s.prevTable[i].nBits == 0 {
+				reuse = false
 			}
 		}
 		return int(m), reuse
 	}
 	for i, v := range s.count[:] {
+		if v == 0 {
+			continue
+		}
 		if v > m {
 			m = v
 		}
-		if v > 0 {
-			s.symbolLen = uint16(i) + 1
-		}
+		s.symbolLen = uint16(i) + 1
 	}
 	return int(m), false
 }
@@ -424,7 +416,7 @@
 
 // minTableLog provides the minimum logSize to safely represent a distribution.
 func (s *Scratch) minTableLog() uint8 {
-	minBitsSrc := highBit32(uint32(s.br.remain())) + 1
+	minBitsSrc := highBit32(uint32(s.srcLen)) + 1
 	minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2
 	if minBitsSrc < minBitsSymbols {
 		return uint8(minBitsSrc)
@@ -436,7 +428,7 @@
 func (s *Scratch) optimalTableLog() {
 	tableLog := s.TableLog
 	minBits := s.minTableLog()
-	maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1
+	maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1
 	if maxBitsSrc < tableLog {
 		// Accuracy can be reduced
 		tableLog = maxBitsSrc
@@ -484,34 +476,35 @@
 	// Different from reference implementation.
 	huffNode0 := s.nodes[0 : huffNodesLen+1]
 
-	for huffNode[nonNullRank].count == 0 {
+	for huffNode[nonNullRank].count() == 0 {
 		nonNullRank--
 	}
 
 	lowS := int16(nonNullRank)
 	nodeRoot := nodeNb + lowS - 1
 	lowN := nodeNb
-	huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count
-	huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb)
+	huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count())
+	huffNode[lowS].setParent(nodeNb)
+	huffNode[lowS-1].setParent(nodeNb)
 	nodeNb++
 	lowS -= 2
 	for n := nodeNb; n <= nodeRoot; n++ {
-		huffNode[n].count = 1 << 30
+		huffNode[n].setCount(1 << 30)
 	}
 	// fake entry, strong barrier
-	huffNode0[0].count = 1 << 31
+	huffNode0[0].setCount(1 << 31)
 
 	// create parents
 	for nodeNb <= nodeRoot {
 		var n1, n2 int16
-		if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
+		if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
 			n1 = lowS
 			lowS--
 		} else {
 			n1 = lowN
 			lowN++
 		}
-		if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
+		if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
 			n2 = lowS
 			lowS--
 		} else {
@@ -519,18 +512,19 @@
 			lowN++
 		}
 
-		huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count
-		huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb)
+		huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count())
+		huffNode0[n1+1].setParent(nodeNb)
+		huffNode0[n2+1].setParent(nodeNb)
 		nodeNb++
 	}
 
 	// distribute weights (unlimited tree height)
-	huffNode[nodeRoot].nbBits = 0
+	huffNode[nodeRoot].setNbBits(0)
 	for n := nodeRoot - 1; n >= startNode; n-- {
-		huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
+		huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
 	}
 	for n := uint16(0); n <= nonNullRank; n++ {
-		huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
+		huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
 	}
 	s.actualTableLog = s.setMaxHeight(int(nonNullRank))
 	maxNbBits := s.actualTableLog
@@ -542,7 +536,7 @@
 	var nbPerRank [tableLogMax + 1]uint16
 	var valPerRank [16]uint16
 	for _, v := range huffNode[:nonNullRank+1] {
-		nbPerRank[v.nbBits]++
+		nbPerRank[v.nbBits()]++
 	}
 	// determine stating value per rank
 	{
@@ -557,7 +551,7 @@
 
 	// push nbBits per symbol, symbol order
 	for _, v := range huffNode[:nonNullRank+1] {
-		s.cTable[v.symbol].nBits = v.nbBits
+		s.cTable[v.symbol()].nBits = v.nbBits()
 	}
 
 	// assign value within rank, symbol order
@@ -603,12 +597,12 @@
 		pos := rank[r].current
 		rank[r].current++
 		prev := nodes[(pos-1)&huffNodesMask]
-		for pos > rank[r].base && c > prev.count {
+		for pos > rank[r].base && c > prev.count() {
 			nodes[pos&huffNodesMask] = prev
 			pos--
 			prev = nodes[(pos-1)&huffNodesMask]
 		}
-		nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)}
+		nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n))
 	}
 }
 
@@ -617,7 +611,7 @@
 	huffNode := s.nodes[1 : huffNodesLen+1]
 	//huffNode = huffNode[: huffNodesLen]
 
-	largestBits := huffNode[lastNonNull].nbBits
+	largestBits := huffNode[lastNonNull].nbBits()
 
 	// early exit : no elt > maxNbBits
 	if largestBits <= maxNbBits {
@@ -627,14 +621,14 @@
 	baseCost := int(1) << (largestBits - maxNbBits)
 	n := uint32(lastNonNull)
 
-	for huffNode[n].nbBits > maxNbBits {
-		totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits))
-		huffNode[n].nbBits = maxNbBits
+	for huffNode[n].nbBits() > maxNbBits {
+		totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits()))
+		huffNode[n].setNbBits(maxNbBits)
 		n--
 	}
 	// n stops at huffNode[n].nbBits <= maxNbBits
 
-	for huffNode[n].nbBits == maxNbBits {
+	for huffNode[n].nbBits() == maxNbBits {
 		n--
 	}
 	// n end at index of smallest symbol using < maxNbBits
@@ -655,10 +649,10 @@
 		{
 			currentNbBits := maxNbBits
 			for pos := int(n); pos >= 0; pos-- {
-				if huffNode[pos].nbBits >= currentNbBits {
+				if huffNode[pos].nbBits() >= currentNbBits {
 					continue
 				}
-				currentNbBits = huffNode[pos].nbBits // < maxNbBits
+				currentNbBits = huffNode[pos].nbBits() // < maxNbBits
 				rankLast[maxNbBits-currentNbBits] = uint32(pos)
 			}
 		}
@@ -675,8 +669,8 @@
 				if lowPos == noSymbol {
 					break
 				}
-				highTotal := huffNode[highPos].count
-				lowTotal := 2 * huffNode[lowPos].count
+				highTotal := huffNode[highPos].count()
+				lowTotal := 2 * huffNode[lowPos].count()
 				if highTotal <= lowTotal {
 					break
 				}
@@ -692,13 +686,14 @@
 				// this rank is no longer empty
 				rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]
 			}
-			huffNode[rankLast[nBitsToDecrease]].nbBits++
+			huffNode[rankLast[nBitsToDecrease]].setNbBits(1 +
+				huffNode[rankLast[nBitsToDecrease]].nbBits())
 			if rankLast[nBitsToDecrease] == 0 {
 				/* special case, reached largest symbol */
 				rankLast[nBitsToDecrease] = noSymbol
 			} else {
 				rankLast[nBitsToDecrease]--
-				if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease {
+				if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease {
 					rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */
 				}
 			}
@@ -706,15 +701,15 @@
 
 		for totalCost < 0 { /* Sometimes, cost correction overshoot */
 			if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
-				for huffNode[n].nbBits == maxNbBits {
+				for huffNode[n].nbBits() == maxNbBits {
 					n--
 				}
-				huffNode[n+1].nbBits--
+				huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1)
 				rankLast[1] = n + 1
 				totalCost++
 				continue
 			}
-			huffNode[rankLast[1]+1].nbBits--
+			huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1)
 			rankLast[1]++
 			totalCost++
 		}
@@ -722,9 +717,26 @@
 	return maxNbBits
 }
 
-type nodeElt struct {
-	count  uint32
-	parent uint16
-	symbol byte
-	nbBits uint8
+// A nodeElt is the fields
+//
+//	count  uint32
+//	parent uint16
+//	symbol byte
+//	nbBits uint8
+//
+// in some order, all squashed into an integer so that the compiler
+// always loads and stores entire nodeElts instead of separate fields.
+type nodeElt uint64
+
+func makeNodeElt(count uint32, symbol byte) nodeElt {
+	return nodeElt(count) | nodeElt(symbol)<<48
 }
+
+func (e *nodeElt) count() uint32  { return uint32(*e) }
+func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) }
+func (e *nodeElt) symbol() byte   { return byte(*e >> 48) }
+func (e *nodeElt) nbBits() uint8  { return uint8(*e >> 56) }
+
+func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) }
+func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 }
+func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 }
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index c0c48bd..0f56b02 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -61,7 +61,7 @@
 		b, err := fse.Decompress(in[:iSize], s.fse)
 		s.fse.Out = nil
 		if err != nil {
-			return s, nil, err
+			return s, nil, fmt.Errorf("fse decompress returned: %w", err)
 		}
 		if len(b) > 255 {
 			return s, nil, errors.New("corrupt input: output table too large")
@@ -253,7 +253,7 @@
 
 	switch d.actualTableLog {
 	case 8:
-		const shift = 8 - 8
+		const shift = 0
 		for br.off >= 4 {
 			br.fillFast()
 			v := dt[uint8(br.value>>(56+shift))]
@@ -763,17 +763,20 @@
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 1")
 			}
-			copy(out, buf[0][:])
-			copy(out[dstEvery:], buf[1][:])
-			copy(out[dstEvery*2:], buf[2][:])
-			copy(out[dstEvery*3:], buf[3][:])
-			out = out[bufoff:]
-			decoded += bufoff * 4
 			// There must at least be 3 buffers left.
-			if len(out) < dstEvery*3 {
+			if len(out)-bufoff < dstEvery*3 {
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 2")
 			}
+			//copy(out, buf[0][:])
+			//copy(out[dstEvery:], buf[1][:])
+			//copy(out[dstEvery*2:], buf[2][:])
+			*(*[bufoff]byte)(out) = buf[0]
+			*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+			*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+			*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+			out = out[bufoff:]
+			decoded += bufoff * 4
 		}
 	}
 	if off > 0 {
@@ -997,17 +1000,22 @@
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 1")
 			}
-			copy(out, buf[0][:])
-			copy(out[dstEvery:], buf[1][:])
-			copy(out[dstEvery*2:], buf[2][:])
-			copy(out[dstEvery*3:], buf[3][:])
-			out = out[bufoff:]
-			decoded += bufoff * 4
 			// There must at least be 3 buffers left.
-			if len(out) < dstEvery*3 {
+			if len(out)-bufoff < dstEvery*3 {
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 2")
 			}
+
+			//copy(out, buf[0][:])
+			//copy(out[dstEvery:], buf[1][:])
+			//copy(out[dstEvery*2:], buf[2][:])
+			// copy(out[dstEvery*3:], buf[3][:])
+			*(*[bufoff]byte)(out) = buf[0]
+			*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+			*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+			*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+			out = out[bufoff:]
+			decoded += bufoff * 4
 		}
 	}
 	if off > 0 {
@@ -1128,7 +1136,7 @@
 			errs++
 		}
 		if errs > 0 {
-			fmt.Fprintf(w, "%d errros in base, stopping\n", errs)
+			fmt.Fprintf(w, "%d errors in base, stopping\n", errs)
 			continue
 		}
 		// Ensure that all combinations are covered.
@@ -1144,7 +1152,7 @@
 				errs++
 			}
 			if errs > 20 {
-				fmt.Fprintf(w, "%d errros, stopping\n", errs)
+				fmt.Fprintf(w, "%d errors, stopping\n", errs)
 				break
 			}
 		}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
index 9f3e9f7..ba7e8e6 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
@@ -14,12 +14,14 @@
 
 // decompress4x_main_loop_x86 is an x86 assembler implementation
 // of Decompress4X when tablelog > 8.
+//
 //go:noescape
 func decompress4x_main_loop_amd64(ctx *decompress4xContext)
 
 // decompress4x_8b_loop_x86 is an x86 assembler implementation
 // of Decompress4X when tablelog <= 8 which decodes 4 entries
 // per loop.
+//
 //go:noescape
 func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
 
@@ -145,11 +147,13 @@
 
 // decompress4x_main_loop_x86 is an x86 assembler implementation
 // of Decompress1X when tablelog > 8.
+//
 //go:noescape
 func decompress1x_main_loop_amd64(ctx *decompress1xContext)
 
 // decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
 // of Decompress1X when tablelog > 8.
+//
 //go:noescape
 func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
 
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
index dd1a5ae..c4c7ab2 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
@@ -1,364 +1,352 @@
 // Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT.
 
 //go:build amd64 && !appengine && !noasm && gc
-// +build amd64,!appengine,!noasm,gc
 
 // func decompress4x_main_loop_amd64(ctx *decompress4xContext)
 TEXT ·decompress4x_main_loop_amd64(SB), $0-8
-	XORQ DX, DX
-
 	// Preload values
 	MOVQ    ctx+0(FP), AX
 	MOVBQZX 8(AX), DI
-	MOVQ    16(AX), SI
-	MOVQ    48(AX), BX
-	MOVQ    24(AX), R9
-	MOVQ    32(AX), R10
-	MOVQ    (AX), R11
+	MOVQ    16(AX), BX
+	MOVQ    48(AX), SI
+	MOVQ    24(AX), R8
+	MOVQ    32(AX), R9
+	MOVQ    (AX), R10
 
 	// Main loop
 main_loop:
-	MOVQ  SI, R8
-	CMPQ  R8, BX
+	XORL  DX, DX
+	CMPQ  BX, SI
 	SETGE DL
 
 	// br0.fillFast32()
-	MOVQ    32(R11), R12
-	MOVBQZX 40(R11), R13
-	CMPQ    R13, $0x20
+	MOVQ    32(R10), R11
+	MOVBQZX 40(R10), R12
+	CMPQ    R12, $0x20
 	JBE     skip_fill0
-	MOVQ    24(R11), AX
-	SUBQ    $0x20, R13
+	MOVQ    24(R10), AX
+	SUBQ    $0x20, R12
 	SUBQ    $0x04, AX
-	MOVQ    (R11), R14
+	MOVQ    (R10), R13
 
 	// b.value |= uint64(low) << (b.bitsRead & 63)
-	MOVL (AX)(R14*1), R14
-	MOVQ R13, CX
-	SHLQ CL, R14
-	MOVQ AX, 24(R11)
-	ORQ  R14, R12
+	MOVL (AX)(R13*1), R13
+	MOVQ R12, CX
+	SHLQ CL, R13
+	MOVQ AX, 24(R10)
+	ORQ  R13, R11
 
-	// exhausted = exhausted || (br0.off < 4)
-	CMPQ  AX, $0x04
-	SETLT AL
-	ORB   AL, DL
+	// exhausted += (br0.off < 4)
+	CMPQ AX, $0x04
+	ADCB $+0, DL
 
 skip_fill0:
 	// val0 := br0.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v0 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br0.advance(uint8(v0.entry)
 	MOVB CH, AL
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val1 := br0.peekTopBits(peekBits)
 	MOVQ DI, CX
-	MOVQ R12, R14
-	SHRQ CL, R14
+	MOVQ R11, R13
+	SHRQ CL, R13
 
 	// v1 := table[val1&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br0.advance(uint8(v1.entry))
 	MOVB CH, AH
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// these two writes get coalesced
 	// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
 	// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
-	MOVW AX, (R8)
+	MOVW AX, (BX)
 
 	// update the bitreader structure
-	MOVQ R12, 32(R11)
-	MOVB R13, 40(R11)
-	ADDQ R9, R8
+	MOVQ R11, 32(R10)
+	MOVB R12, 40(R10)
 
 	// br1.fillFast32()
-	MOVQ    80(R11), R12
-	MOVBQZX 88(R11), R13
-	CMPQ    R13, $0x20
+	MOVQ    80(R10), R11
+	MOVBQZX 88(R10), R12
+	CMPQ    R12, $0x20
 	JBE     skip_fill1
-	MOVQ    72(R11), AX
-	SUBQ    $0x20, R13
+	MOVQ    72(R10), AX
+	SUBQ    $0x20, R12
 	SUBQ    $0x04, AX
-	MOVQ    48(R11), R14
+	MOVQ    48(R10), R13
 
 	// b.value |= uint64(low) << (b.bitsRead & 63)
-	MOVL (AX)(R14*1), R14
-	MOVQ R13, CX
-	SHLQ CL, R14
-	MOVQ AX, 72(R11)
-	ORQ  R14, R12
+	MOVL (AX)(R13*1), R13
+	MOVQ R12, CX
+	SHLQ CL, R13
+	MOVQ AX, 72(R10)
+	ORQ  R13, R11
 
-	// exhausted = exhausted || (br1.off < 4)
-	CMPQ  AX, $0x04
-	SETLT AL
-	ORB   AL, DL
+	// exhausted += (br1.off < 4)
+	CMPQ AX, $0x04
+	ADCB $+0, DL
 
 skip_fill1:
 	// val0 := br1.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v0 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br1.advance(uint8(v0.entry)
 	MOVB CH, AL
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val1 := br1.peekTopBits(peekBits)
 	MOVQ DI, CX
-	MOVQ R12, R14
-	SHRQ CL, R14
+	MOVQ R11, R13
+	SHRQ CL, R13
 
 	// v1 := table[val1&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br1.advance(uint8(v1.entry))
 	MOVB CH, AH
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// these two writes get coalesced
 	// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
 	// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
-	MOVW AX, (R8)
+	MOVW AX, (BX)(R8*1)
 
 	// update the bitreader structure
-	MOVQ R12, 80(R11)
-	MOVB R13, 88(R11)
-	ADDQ R9, R8
+	MOVQ R11, 80(R10)
+	MOVB R12, 88(R10)
 
 	// br2.fillFast32()
-	MOVQ    128(R11), R12
-	MOVBQZX 136(R11), R13
-	CMPQ    R13, $0x20
+	MOVQ    128(R10), R11
+	MOVBQZX 136(R10), R12
+	CMPQ    R12, $0x20
 	JBE     skip_fill2
-	MOVQ    120(R11), AX
-	SUBQ    $0x20, R13
+	MOVQ    120(R10), AX
+	SUBQ    $0x20, R12
 	SUBQ    $0x04, AX
-	MOVQ    96(R11), R14
+	MOVQ    96(R10), R13
 
 	// b.value |= uint64(low) << (b.bitsRead & 63)
-	MOVL (AX)(R14*1), R14
-	MOVQ R13, CX
-	SHLQ CL, R14
-	MOVQ AX, 120(R11)
-	ORQ  R14, R12
+	MOVL (AX)(R13*1), R13
+	MOVQ R12, CX
+	SHLQ CL, R13
+	MOVQ AX, 120(R10)
+	ORQ  R13, R11
 
-	// exhausted = exhausted || (br2.off < 4)
-	CMPQ  AX, $0x04
-	SETLT AL
-	ORB   AL, DL
+	// exhausted += (br2.off < 4)
+	CMPQ AX, $0x04
+	ADCB $+0, DL
 
 skip_fill2:
 	// val0 := br2.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v0 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br2.advance(uint8(v0.entry)
 	MOVB CH, AL
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val1 := br2.peekTopBits(peekBits)
 	MOVQ DI, CX
-	MOVQ R12, R14
-	SHRQ CL, R14
+	MOVQ R11, R13
+	SHRQ CL, R13
 
 	// v1 := table[val1&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br2.advance(uint8(v1.entry))
 	MOVB CH, AH
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// these two writes get coalesced
 	// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
 	// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
-	MOVW AX, (R8)
+	MOVW AX, (BX)(R8*2)
 
 	// update the bitreader structure
-	MOVQ R12, 128(R11)
-	MOVB R13, 136(R11)
-	ADDQ R9, R8
+	MOVQ R11, 128(R10)
+	MOVB R12, 136(R10)
 
 	// br3.fillFast32()
-	MOVQ    176(R11), R12
-	MOVBQZX 184(R11), R13
-	CMPQ    R13, $0x20
+	MOVQ    176(R10), R11
+	MOVBQZX 184(R10), R12
+	CMPQ    R12, $0x20
 	JBE     skip_fill3
-	MOVQ    168(R11), AX
-	SUBQ    $0x20, R13
+	MOVQ    168(R10), AX
+	SUBQ    $0x20, R12
 	SUBQ    $0x04, AX
-	MOVQ    144(R11), R14
+	MOVQ    144(R10), R13
 
 	// b.value |= uint64(low) << (b.bitsRead & 63)
-	MOVL (AX)(R14*1), R14
-	MOVQ R13, CX
-	SHLQ CL, R14
-	MOVQ AX, 168(R11)
-	ORQ  R14, R12
+	MOVL (AX)(R13*1), R13
+	MOVQ R12, CX
+	SHLQ CL, R13
+	MOVQ AX, 168(R10)
+	ORQ  R13, R11
 
-	// exhausted = exhausted || (br3.off < 4)
-	CMPQ  AX, $0x04
-	SETLT AL
-	ORB   AL, DL
+	// exhausted += (br3.off < 4)
+	CMPQ AX, $0x04
+	ADCB $+0, DL
 
 skip_fill3:
 	// val0 := br3.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v0 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br3.advance(uint8(v0.entry)
 	MOVB CH, AL
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val1 := br3.peekTopBits(peekBits)
 	MOVQ DI, CX
-	MOVQ R12, R14
-	SHRQ CL, R14
+	MOVQ R11, R13
+	SHRQ CL, R13
 
 	// v1 := table[val1&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br3.advance(uint8(v1.entry))
 	MOVB CH, AH
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// these two writes get coalesced
 	// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
 	// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
-	MOVW AX, (R8)
+	LEAQ (R8)(R8*2), CX
+	MOVW AX, (BX)(CX*1)
 
 	// update the bitreader structure
-	MOVQ  R12, 176(R11)
-	MOVB  R13, 184(R11)
-	ADDQ  $0x02, SI
+	MOVQ  R11, 176(R10)
+	MOVB  R12, 184(R10)
+	ADDQ  $0x02, BX
 	TESTB DL, DL
 	JZ    main_loop
 	MOVQ  ctx+0(FP), AX
-	SUBQ  16(AX), SI
-	SHLQ  $0x02, SI
-	MOVQ  SI, 40(AX)
+	SUBQ  16(AX), BX
+	SHLQ  $0x02, BX
+	MOVQ  BX, 40(AX)
 	RET
 
 // func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
 TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8
-	XORQ DX, DX
-
 	// Preload values
 	MOVQ    ctx+0(FP), CX
 	MOVBQZX 8(CX), DI
 	MOVQ    16(CX), BX
 	MOVQ    48(CX), SI
-	MOVQ    24(CX), R9
-	MOVQ    32(CX), R10
-	MOVQ    (CX), R11
+	MOVQ    24(CX), R8
+	MOVQ    32(CX), R9
+	MOVQ    (CX), R10
 
 	// Main loop
 main_loop:
-	MOVQ  BX, R8
-	CMPQ  R8, SI
+	XORL  DX, DX
+	CMPQ  BX, SI
 	SETGE DL
 
 	// br0.fillFast32()
-	MOVQ    32(R11), R12
-	MOVBQZX 40(R11), R13
-	CMPQ    R13, $0x20
+	MOVQ    32(R10), R11
+	MOVBQZX 40(R10), R12
+	CMPQ    R12, $0x20
 	JBE     skip_fill0
-	MOVQ    24(R11), R14
-	SUBQ    $0x20, R13
-	SUBQ    $0x04, R14
-	MOVQ    (R11), R15
+	MOVQ    24(R10), R13
+	SUBQ    $0x20, R12
+	SUBQ    $0x04, R13
+	MOVQ    (R10), R14
 
 	// b.value |= uint64(low) << (b.bitsRead & 63)
-	MOVL (R14)(R15*1), R15
-	MOVQ R13, CX
-	SHLQ CL, R15
-	MOVQ R14, 24(R11)
-	ORQ  R15, R12
+	MOVL (R13)(R14*1), R14
+	MOVQ R12, CX
+	SHLQ CL, R14
+	MOVQ R13, 24(R10)
+	ORQ  R14, R11
 
-	// exhausted = exhausted || (br0.off < 4)
-	CMPQ  R14, $0x04
-	SETLT AL
-	ORB   AL, DL
+	// exhausted += (br0.off < 4)
+	CMPQ R13, $0x04
+	ADCB $+0, DL
 
 skip_fill0:
 	// val0 := br0.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v0 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br0.advance(uint8(v0.entry)
 	MOVB CH, AL
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val1 := br0.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v1 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br0.advance(uint8(v1.entry)
 	MOVB   CH, AH
-	SHLQ   CL, R12
-	ADDB   CL, R13
+	SHLQ   CL, R11
+	ADDB   CL, R12
 	BSWAPL AX
 
 	// val2 := br0.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v2 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br0.advance(uint8(v2.entry)
 	MOVB CH, AH
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val3 := br0.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v3 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br0.advance(uint8(v3.entry)
 	MOVB   CH, AL
-	SHLQ   CL, R12
-	ADDB   CL, R13
+	SHLQ   CL, R11
+	ADDB   CL, R12
 	BSWAPL AX
 
 	// these four writes get coalesced
@@ -366,88 +354,86 @@
 	// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
 	// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
 	// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
-	MOVL AX, (R8)
+	MOVL AX, (BX)
 
 	// update the bitreader structure
-	MOVQ R12, 32(R11)
-	MOVB R13, 40(R11)
-	ADDQ R9, R8
+	MOVQ R11, 32(R10)
+	MOVB R12, 40(R10)
 
 	// br1.fillFast32()
-	MOVQ    80(R11), R12
-	MOVBQZX 88(R11), R13
-	CMPQ    R13, $0x20
+	MOVQ    80(R10), R11
+	MOVBQZX 88(R10), R12
+	CMPQ    R12, $0x20
 	JBE     skip_fill1
-	MOVQ    72(R11), R14
-	SUBQ    $0x20, R13
-	SUBQ    $0x04, R14
-	MOVQ    48(R11), R15
+	MOVQ    72(R10), R13
+	SUBQ    $0x20, R12
+	SUBQ    $0x04, R13
+	MOVQ    48(R10), R14
 
 	// b.value |= uint64(low) << (b.bitsRead & 63)
-	MOVL (R14)(R15*1), R15
-	MOVQ R13, CX
-	SHLQ CL, R15
-	MOVQ R14, 72(R11)
-	ORQ  R15, R12
+	MOVL (R13)(R14*1), R14
+	MOVQ R12, CX
+	SHLQ CL, R14
+	MOVQ R13, 72(R10)
+	ORQ  R14, R11
 
-	// exhausted = exhausted || (br1.off < 4)
-	CMPQ  R14, $0x04
-	SETLT AL
-	ORB   AL, DL
+	// exhausted += (br1.off < 4)
+	CMPQ R13, $0x04
+	ADCB $+0, DL
 
 skip_fill1:
 	// val0 := br1.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v0 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br1.advance(uint8(v0.entry)
 	MOVB CH, AL
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val1 := br1.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v1 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br1.advance(uint8(v1.entry)
 	MOVB   CH, AH
-	SHLQ   CL, R12
-	ADDB   CL, R13
+	SHLQ   CL, R11
+	ADDB   CL, R12
 	BSWAPL AX
 
 	// val2 := br1.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v2 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br1.advance(uint8(v2.entry)
 	MOVB CH, AH
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val3 := br1.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v3 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br1.advance(uint8(v3.entry)
 	MOVB   CH, AL
-	SHLQ   CL, R12
-	ADDB   CL, R13
+	SHLQ   CL, R11
+	ADDB   CL, R12
 	BSWAPL AX
 
 	// these four writes get coalesced
@@ -455,88 +441,86 @@
 	// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
 	// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
 	// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
-	MOVL AX, (R8)
+	MOVL AX, (BX)(R8*1)
 
 	// update the bitreader structure
-	MOVQ R12, 80(R11)
-	MOVB R13, 88(R11)
-	ADDQ R9, R8
+	MOVQ R11, 80(R10)
+	MOVB R12, 88(R10)
 
 	// br2.fillFast32()
-	MOVQ    128(R11), R12
-	MOVBQZX 136(R11), R13
-	CMPQ    R13, $0x20
+	MOVQ    128(R10), R11
+	MOVBQZX 136(R10), R12
+	CMPQ    R12, $0x20
 	JBE     skip_fill2
-	MOVQ    120(R11), R14
-	SUBQ    $0x20, R13
-	SUBQ    $0x04, R14
-	MOVQ    96(R11), R15
+	MOVQ    120(R10), R13
+	SUBQ    $0x20, R12
+	SUBQ    $0x04, R13
+	MOVQ    96(R10), R14
 
 	// b.value |= uint64(low) << (b.bitsRead & 63)
-	MOVL (R14)(R15*1), R15
-	MOVQ R13, CX
-	SHLQ CL, R15
-	MOVQ R14, 120(R11)
-	ORQ  R15, R12
+	MOVL (R13)(R14*1), R14
+	MOVQ R12, CX
+	SHLQ CL, R14
+	MOVQ R13, 120(R10)
+	ORQ  R14, R11
 
-	// exhausted = exhausted || (br2.off < 4)
-	CMPQ  R14, $0x04
-	SETLT AL
-	ORB   AL, DL
+	// exhausted += (br2.off < 4)
+	CMPQ R13, $0x04
+	ADCB $+0, DL
 
 skip_fill2:
 	// val0 := br2.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v0 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br2.advance(uint8(v0.entry)
 	MOVB CH, AL
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val1 := br2.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v1 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br2.advance(uint8(v1.entry)
 	MOVB   CH, AH
-	SHLQ   CL, R12
-	ADDB   CL, R13
+	SHLQ   CL, R11
+	ADDB   CL, R12
 	BSWAPL AX
 
 	// val2 := br2.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v2 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br2.advance(uint8(v2.entry)
 	MOVB CH, AH
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val3 := br2.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v3 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br2.advance(uint8(v3.entry)
 	MOVB   CH, AL
-	SHLQ   CL, R12
-	ADDB   CL, R13
+	SHLQ   CL, R11
+	ADDB   CL, R12
 	BSWAPL AX
 
 	// these four writes get coalesced
@@ -544,88 +528,86 @@
 	// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
 	// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
 	// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
-	MOVL AX, (R8)
+	MOVL AX, (BX)(R8*2)
 
 	// update the bitreader structure
-	MOVQ R12, 128(R11)
-	MOVB R13, 136(R11)
-	ADDQ R9, R8
+	MOVQ R11, 128(R10)
+	MOVB R12, 136(R10)
 
 	// br3.fillFast32()
-	MOVQ    176(R11), R12
-	MOVBQZX 184(R11), R13
-	CMPQ    R13, $0x20
+	MOVQ    176(R10), R11
+	MOVBQZX 184(R10), R12
+	CMPQ    R12, $0x20
 	JBE     skip_fill3
-	MOVQ    168(R11), R14
-	SUBQ    $0x20, R13
-	SUBQ    $0x04, R14
-	MOVQ    144(R11), R15
+	MOVQ    168(R10), R13
+	SUBQ    $0x20, R12
+	SUBQ    $0x04, R13
+	MOVQ    144(R10), R14
 
 	// b.value |= uint64(low) << (b.bitsRead & 63)
-	MOVL (R14)(R15*1), R15
-	MOVQ R13, CX
-	SHLQ CL, R15
-	MOVQ R14, 168(R11)
-	ORQ  R15, R12
+	MOVL (R13)(R14*1), R14
+	MOVQ R12, CX
+	SHLQ CL, R14
+	MOVQ R13, 168(R10)
+	ORQ  R14, R11
 
-	// exhausted = exhausted || (br3.off < 4)
-	CMPQ  R14, $0x04
-	SETLT AL
-	ORB   AL, DL
+	// exhausted += (br3.off < 4)
+	CMPQ R13, $0x04
+	ADCB $+0, DL
 
 skip_fill3:
 	// val0 := br3.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v0 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br3.advance(uint8(v0.entry)
 	MOVB CH, AL
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val1 := br3.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v1 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br3.advance(uint8(v1.entry)
 	MOVB   CH, AH
-	SHLQ   CL, R12
-	ADDB   CL, R13
+	SHLQ   CL, R11
+	ADDB   CL, R12
 	BSWAPL AX
 
 	// val2 := br3.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v2 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br3.advance(uint8(v2.entry)
 	MOVB CH, AH
-	SHLQ CL, R12
-	ADDB CL, R13
+	SHLQ CL, R11
+	ADDB CL, R12
 
 	// val3 := br3.peekTopBits(peekBits)
-	MOVQ R12, R14
+	MOVQ R11, R13
 	MOVQ DI, CX
-	SHRQ CL, R14
+	SHRQ CL, R13
 
 	// v3 := table[val0&mask]
-	MOVW (R10)(R14*2), CX
+	MOVW (R9)(R13*2), CX
 
 	// br3.advance(uint8(v3.entry)
 	MOVB   CH, AL
-	SHLQ   CL, R12
-	ADDB   CL, R13
+	SHLQ   CL, R11
+	ADDB   CL, R12
 	BSWAPL AX
 
 	// these four writes get coalesced
@@ -633,11 +615,12 @@
 	// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
 	// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
 	// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
-	MOVL AX, (R8)
+	LEAQ (R8)(R8*2), CX
+	MOVL AX, (BX)(CX*1)
 
 	// update the bitreader structure
-	MOVQ  R12, 176(R11)
-	MOVB  R13, 184(R11)
+	MOVQ  R11, 176(R10)
+	MOVB  R12, 184(R10)
 	ADDQ  $0x04, BX
 	TESTB DL, DL
 	JZ    main_loop
@@ -653,7 +636,7 @@
 	MOVQ    16(CX), DX
 	MOVQ    24(CX), BX
 	CMPQ    BX, $0x04
-	JB      error_max_decoded_size_exeeded
+	JB      error_max_decoded_size_exceeded
 	LEAQ    (DX)(BX*1), BX
 	MOVQ    (CX), SI
 	MOVQ    (SI), R8
@@ -668,7 +651,7 @@
 	// Check if we have room for 4 bytes in the output buffer
 	LEAQ 4(DX), CX
 	CMPQ CX, BX
-	JGE  error_max_decoded_size_exeeded
+	JGE  error_max_decoded_size_exceeded
 
 	// Decode 4 values
 	CMPQ R11, $0x20
@@ -745,7 +728,7 @@
 	RET
 
 	// Report error
-error_max_decoded_size_exeeded:
+error_max_decoded_size_exceeded:
 	MOVQ ctx+0(FP), AX
 	MOVQ $-1, CX
 	MOVQ CX, 40(AX)
@@ -758,7 +741,7 @@
 	MOVQ    16(CX), DX
 	MOVQ    24(CX), BX
 	CMPQ    BX, $0x04
-	JB      error_max_decoded_size_exeeded
+	JB      error_max_decoded_size_exceeded
 	LEAQ    (DX)(BX*1), BX
 	MOVQ    (CX), SI
 	MOVQ    (SI), R8
@@ -773,7 +756,7 @@
 	// Check if we have room for 4 bytes in the output buffer
 	LEAQ 4(DX), CX
 	CMPQ CX, BX
-	JGE  error_max_decoded_size_exeeded
+	JGE  error_max_decoded_size_exceeded
 
 	// Decode 4 values
 	CMPQ  R11, $0x20
@@ -840,7 +823,7 @@
 	RET
 
 	// Report error
-error_max_decoded_size_exeeded:
+error_max_decoded_size_exceeded:
 	MOVQ ctx+0(FP), AX
 	MOVQ $-1, CX
 	MOVQ CX, 40(AX)
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
index 4f6f37c..908c17d 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
@@ -122,17 +122,21 @@
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 1")
 			}
-			copy(out, buf[0][:])
-			copy(out[dstEvery:], buf[1][:])
-			copy(out[dstEvery*2:], buf[2][:])
-			copy(out[dstEvery*3:], buf[3][:])
-			out = out[bufoff:]
-			decoded += bufoff * 4
 			// There must at least be 3 buffers left.
-			if len(out) < dstEvery*3 {
+			if len(out)-bufoff < dstEvery*3 {
 				d.bufs.Put(buf)
 				return nil, errors.New("corruption detected: stream overrun 2")
 			}
+			//copy(out, buf[0][:])
+			//copy(out[dstEvery:], buf[1][:])
+			//copy(out[dstEvery*2:], buf[2][:])
+			//copy(out[dstEvery*3:], buf[3][:])
+			*(*[bufoff]byte)(out) = buf[0]
+			*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+			*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+			*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+			out = out[bufoff:]
+			decoded += bufoff * 4
 		}
 	}
 	if off > 0 {
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
index e8ad17a..77ecd68 100644
--- a/vendor/github.com/klauspost/compress/huff0/huff0.go
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -88,7 +88,7 @@
 	// Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
 	MaxDecodedSize int
 
-	br byteReader
+	srcLen int
 
 	// MaxSymbolValue will override the maximum symbol value of the next block.
 	MaxSymbolValue uint8
@@ -170,7 +170,7 @@
 	if s.fse == nil {
 		s.fse = &fse.Scratch{}
 	}
-	s.br.init(in)
+	s.srcLen = len(in)
 
 	return s, nil
 }
diff --git a/vendor/github.com/klauspost/compress/internal/le/le.go b/vendor/github.com/klauspost/compress/internal/le/le.go
new file mode 100644
index 0000000..e54909e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/le/le.go
@@ -0,0 +1,5 @@
+package le
+
+type Indexer interface {
+	int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64
+}
diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go
new file mode 100644
index 0000000..0cfb5c0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go
@@ -0,0 +1,42 @@
+//go:build !(amd64 || arm64 || ppc64le || riscv64) || nounsafe || purego || appengine
+
+package le
+
+import (
+	"encoding/binary"
+)
+
+// Load8 will load from b at index i.
+func Load8[I Indexer](b []byte, i I) byte {
+	return b[i]
+}
+
+// Load16 will load from b at index i.
+func Load16[I Indexer](b []byte, i I) uint16 {
+	return binary.LittleEndian.Uint16(b[i:])
+}
+
+// Load32 will load from b at index i.
+func Load32[I Indexer](b []byte, i I) uint32 {
+	return binary.LittleEndian.Uint32(b[i:])
+}
+
+// Load64 will load from b at index i.
+func Load64[I Indexer](b []byte, i I) uint64 {
+	return binary.LittleEndian.Uint64(b[i:])
+}
+
+// Store16 will store v at b.
+func Store16(b []byte, v uint16) {
+	binary.LittleEndian.PutUint16(b, v)
+}
+
+// Store32 will store v at b.
+func Store32(b []byte, v uint32) {
+	binary.LittleEndian.PutUint32(b, v)
+}
+
+// Store64 will store v at b.
+func Store64(b []byte, v uint64) {
+	binary.LittleEndian.PutUint64(b, v)
+}
diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go
new file mode 100644
index 0000000..ada45cd
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go
@@ -0,0 +1,55 @@
+// We enable 64 bit LE platforms:
+
+//go:build (amd64 || arm64 || ppc64le || riscv64) && !nounsafe && !purego && !appengine
+
+package le
+
+import (
+	"unsafe"
+)
+
+// Load8 will load from b at index i.
+func Load8[I Indexer](b []byte, i I) byte {
+	//return binary.LittleEndian.Uint16(b[i:])
+	//return *(*uint16)(unsafe.Pointer(&b[i]))
+	return *(*byte)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Load16 will load from b at index i.
+func Load16[I Indexer](b []byte, i I) uint16 {
+	//return binary.LittleEndian.Uint16(b[i:])
+	//return *(*uint16)(unsafe.Pointer(&b[i]))
+	return *(*uint16)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Load32 will load from b at index i.
+func Load32[I Indexer](b []byte, i I) uint32 {
+	//return binary.LittleEndian.Uint32(b[i:])
+	//return *(*uint32)(unsafe.Pointer(&b[i]))
+	return *(*uint32)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Load64 will load from b at index i.
+func Load64[I Indexer](b []byte, i I) uint64 {
+	//return binary.LittleEndian.Uint64(b[i:])
+	//return *(*uint64)(unsafe.Pointer(&b[i]))
+	return *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Store16 will store v at b.
+func Store16(b []byte, v uint16) {
+	//binary.LittleEndian.PutUint16(b, v)
+	*(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v
+}
+
+// Store32 will store v at b.
+func Store32(b []byte, v uint32) {
+	//binary.LittleEndian.PutUint32(b, v)
+	*(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v
+}
+
+// Store64 will store v at b.
+func Store64(b []byte, v uint64) {
+	//binary.LittleEndian.PutUint64(b, v)
+	*(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v
+}
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
index 511bba6..2754bac 100644
--- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
+++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
@@ -18,6 +18,7 @@
 // emitLiteral writes a literal chunk and returns the number of bytes written.
 //
 // It assumes that:
+//
 //	dst is long enough to hold the encoded bytes
 //	1 <= len(lit) && len(lit) <= 65536
 func emitLiteral(dst, lit []byte) int {
@@ -42,6 +43,7 @@
 // emitCopy writes a copy chunk and returns the number of bytes written.
 //
 // It assumes that:
+//
 //	dst is long enough to hold the encoded bytes
 //	1 <= offset && offset <= 65535
 //	4 <= length && length <= 65535
@@ -49,7 +51,7 @@
 	i := 0
 	// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
 	// threshold for this loop is a little higher (at 68 = 64 + 4), and the
-	// length emitted down below is is a little lower (at 60 = 64 - 4), because
+	// length emitted down below is a little lower (at 60 = 64 - 4), because
 	// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
 	// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
 	// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
@@ -85,28 +87,40 @@
 	return i + 2
 }
 
-// extendMatch returns the largest k such that k <= len(src) and that
-// src[i:i+k-j] and src[j:k] have the same contents.
-//
-// It assumes that:
-//	0 <= i && i < j && j <= len(src)
-func extendMatch(src []byte, i, j int) int {
-	for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
-	}
-	return j
-}
-
 func hash(u, shift uint32) uint32 {
 	return (u * 0x1e35a7bd) >> shift
 }
 
+// EncodeBlockInto exposes encodeBlock but checks dst size.
+func EncodeBlockInto(dst, src []byte) (d int) {
+	if MaxEncodedLen(len(src)) > len(dst) {
+		return 0
+	}
+
+	// encodeBlock breaks on too big blocks, so split.
+	for len(src) > 0 {
+		p := src
+		src = nil
+		if len(p) > maxBlockSize {
+			p, src = p[:maxBlockSize], p[maxBlockSize:]
+		}
+		if len(p) < minNonLiteralBlockSize {
+			d += emitLiteral(dst[d:], p)
+		} else {
+			d += encodeBlock(dst[d:], p)
+		}
+	}
+	return d
+}
+
 // encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
 // assumes that the varint-encoded length of the decompressed bytes has already
 // been written.
 //
 // It also assumes that:
+//
 //	len(dst) >= MaxEncodedLen(len(src)) &&
-// 	minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+//	minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
 func encodeBlock(dst, src []byte) (d int) {
 	// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
 	// The table element type is uint16, as s < sLimit and sLimit < len(src)
diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod
index 2263853..81bda5e 100644
--- a/vendor/github.com/klauspost/compress/s2sx.mod
+++ b/vendor/github.com/klauspost/compress/s2sx.mod
@@ -1,4 +1,3 @@
 module github.com/klauspost/compress
 
-go 1.16
-
+go 1.22
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index beb7fa8..c11d7fa 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -6,12 +6,14 @@
 
 This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. 
 
-This package is pure Go and without use of "unsafe". 
+This package is pure Go. Use `noasm` and `nounsafe` to disable relevant features.
 
 The `zstd` package is provided as open source software using a Go standard license.
 
 Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors.
 
+For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go).
+
 ## Installation
 
 Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.
@@ -257,7 +259,7 @@
 
 ## Decompressor
 
-Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
+Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
 
 This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
 kindly supplied by [fuzzit.dev](https://fuzzit.dev/). 
@@ -302,7 +304,7 @@
 
 // Create a reader that caches decompressors.
 // For this operation type we supply a nil Reader.
-var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0))
+var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0))
 
 // Decompress a buffer. We don't supply a destination buffer,
 // so it will be allocated by the decoder.
diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go
index 97299d4..d41e3e1 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitreader.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go
@@ -5,11 +5,12 @@
 package zstd
 
 import (
-	"encoding/binary"
 	"errors"
 	"fmt"
 	"io"
 	"math/bits"
+
+	"github.com/klauspost/compress/internal/le"
 )
 
 // bitReader reads a bitstream in reverse.
@@ -17,8 +18,8 @@
 // for aligning the input.
 type bitReader struct {
 	in       []byte
-	off      uint   // next byte to read is at in[off - 1]
 	value    uint64 // Maybe use [16]byte, but shifting is awkward.
+	cursor   int    // offset where next read should end
 	bitsRead uint8
 }
 
@@ -28,12 +29,12 @@
 		return errors.New("corrupt stream: too short")
 	}
 	b.in = in
-	b.off = uint(len(in))
 	// The highest bit of the last byte indicates where to start
 	v := in[len(in)-1]
 	if v == 0 {
 		return errors.New("corrupt stream, did not find end of stream")
 	}
+	b.cursor = len(in)
 	b.bitsRead = 64
 	b.value = 0
 	if len(in) >= 8 {
@@ -69,21 +70,16 @@
 	if b.bitsRead < 32 {
 		return
 	}
-	// 2 bounds checks.
-	v := b.in[b.off-4:]
-	v = v[:4]
-	low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
-	b.value = (b.value << 32) | uint64(low)
+	b.cursor -= 4
+	b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
 	b.bitsRead -= 32
-	b.off -= 4
 }
 
 // fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
 func (b *bitReader) fillFastStart() {
-	// Do single re-slice to avoid bounds checks.
-	b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+	b.cursor -= 8
+	b.value = le.Load64(b.in, b.cursor)
 	b.bitsRead = 0
-	b.off -= 8
 }
 
 // fill() will make sure at least 32 bits are available.
@@ -91,25 +87,23 @@
 	if b.bitsRead < 32 {
 		return
 	}
-	if b.off >= 4 {
-		v := b.in[b.off-4:]
-		v = v[:4]
-		low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
-		b.value = (b.value << 32) | uint64(low)
+	if b.cursor >= 4 {
+		b.cursor -= 4
+		b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
 		b.bitsRead -= 32
-		b.off -= 4
 		return
 	}
-	for b.off > 0 {
-		b.value = (b.value << 8) | uint64(b.in[b.off-1])
-		b.bitsRead -= 8
-		b.off--
+
+	b.bitsRead -= uint8(8 * b.cursor)
+	for b.cursor > 0 {
+		b.cursor -= 1
+		b.value = (b.value << 8) | uint64(b.in[b.cursor])
 	}
 }
 
 // finished returns true if all bits have been read from the bit stream.
 func (b *bitReader) finished() bool {
-	return b.off == 0 && b.bitsRead >= 64
+	return b.cursor == 0 && b.bitsRead >= 64
 }
 
 // overread returns true if more bits have been requested than is on the stream.
@@ -119,13 +113,14 @@
 
 // remain returns the number of bits remaining.
 func (b *bitReader) remain() uint {
-	return b.off*8 + 64 - uint(b.bitsRead)
+	return 8*uint(b.cursor) + 64 - uint(b.bitsRead)
 }
 
 // close the bitstream and returns an error if out-of-buffer reads occurred.
 func (b *bitReader) close() error {
 	// Release reference.
 	b.in = nil
+	b.cursor = 0
 	if !b.finished() {
 		return fmt.Errorf("%d extra bits on block, should be 0", b.remain())
 	}
diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
index 78b3c61..1952f17 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
@@ -97,12 +97,11 @@
 
 // close will write the alignment bit and write the final byte(s)
 // to the output.
-func (b *bitWriter) close() error {
+func (b *bitWriter) close() {
 	// End mark
 	b.addBits16Clean(1, 1)
 	// flush until next byte.
 	b.flushAlign()
-	return nil
 }
 
 // reset and continue writing by appending to out.
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 7eed729..0dd742f 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -5,14 +5,10 @@
 package zstd
 
 import (
-	"bytes"
-	"encoding/binary"
 	"errors"
 	"fmt"
+	"hash/crc32"
 	"io"
-	"io/ioutil"
-	"os"
-	"path/filepath"
 	"sync"
 
 	"github.com/klauspost/compress/huff0"
@@ -83,8 +79,9 @@
 
 	err error
 
-	// Check against this crc
-	checkCRC []byte
+	// Check against this crc, if hasCRC is true.
+	checkCRC uint32
+	hasCRC   bool
 
 	// Frame to use for singlethreaded decoding.
 	// Should not be used by the decoder itself since parent may be another frame.
@@ -192,16 +189,14 @@
 	}
 
 	// Read block data.
-	if cap(b.dataStorage) < cSize {
+	if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize {
+		// byteBuf doesn't need a destination buffer.
 		if b.lowMem || cSize > maxCompressedBlockSize {
 			b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
 		} else {
 			b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
 		}
 	}
-	if cap(b.dst) <= maxSize {
-		b.dst = make([]byte, 0, maxSize+1)
-	}
 	b.data, err = br.readBig(cSize, b.dataStorage)
 	if err != nil {
 		if debugDecoder {
@@ -210,6 +205,9 @@
 		}
 		return err
 	}
+	if cap(b.dst) <= maxSize {
+		b.dst = make([]byte, 0, maxSize+1)
+	}
 	return nil
 }
 
@@ -233,7 +231,7 @@
 			if b.lowMem {
 				b.dst = make([]byte, b.RLESize)
 			} else {
-				b.dst = make([]byte, maxBlockSize)
+				b.dst = make([]byte, maxCompressedBlockSize)
 			}
 		}
 		b.dst = b.dst[:b.RLESize]
@@ -441,6 +439,9 @@
 			}
 		}
 		var err error
+		if debugDecoder {
+			println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals))
+		}
 		huff, literals, err = huff0.ReadTable(literals, huff)
 		if err != nil {
 			println("reading huffman table:", err)
@@ -549,6 +550,9 @@
 		if debugDecoder {
 			printf("Compression modes: 0b%b", compMode)
 		}
+		if compMode&3 != 0 {
+			return errors.New("corrupt block: reserved bits not zero")
+		}
 		for i := uint(0); i < 3; i++ {
 			mode := seqCompMode((compMode >> (6 - i*2)) & 3)
 			if debugDecoder {
@@ -587,10 +591,12 @@
 				}
 				seq.fse.setRLE(symb)
 				if debugDecoder {
-					printf("RLE set to %+v, code: %v", symb, v)
+					printf("RLE set to 0x%x, code: %v", symb, v)
 				}
 			case compModeFSE:
-				println("Reading table for", tableIndex(i))
+				if debugDecoder {
+					println("Reading table for", tableIndex(i))
+				}
 				if seq.fse == nil || seq.fse.preDefined {
 					seq.fse = fseDecoderPool.Get().(*fseDecoder)
 				}
@@ -638,21 +644,6 @@
 		println("initializing sequences:", err)
 		return err
 	}
-	// Extract blocks...
-	if false && hist.dict == nil {
-		fatalErr := func(err error) {
-			if err != nil {
-				panic(err)
-			}
-		}
-		fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
-		var buf bytes.Buffer
-		fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
-		fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
-		fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
-		buf.Write(in)
-		ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
-	}
 
 	return nil
 }
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
index 12e8f6f..fd35ea1 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -9,6 +9,7 @@
 	"fmt"
 	"math"
 	"math/bits"
+	"slices"
 
 	"github.com/klauspost/compress/huff0"
 )
@@ -361,14 +362,21 @@
 	if len(lits) >= 1024 {
 		// Use 4 Streams.
 		out, reUsed, err = huff0.Compress4X(lits, b.litEnc)
-	} else if len(lits) > 32 {
+	} else if len(lits) > 16 {
 		// Use 1 stream
 		single = true
 		out, reUsed, err = huff0.Compress1X(lits, b.litEnc)
 	} else {
 		err = huff0.ErrIncompressible
 	}
-
+	if err == nil && len(out)+5 > len(lits) {
+		// If we are close, we may still be worse or equal to raw.
+		var lh literalsHeader
+		lh.setSizes(len(out), len(lits), single)
+		if len(out)+lh.size() >= len(lits) {
+			err = huff0.ErrIncompressible
+		}
+	}
 	switch err {
 	case huff0.ErrIncompressible:
 		if debugEncoder {
@@ -420,6 +428,16 @@
 	return nil
 }
 
+// encodeRLE will encode an RLE block.
+func (b *blockEnc) encodeRLE(val byte, length uint32) {
+	var bh blockHeader
+	bh.setLast(b.last)
+	bh.setSize(length)
+	bh.setType(blockTypeRLE)
+	b.output = bh.appendTo(b.output)
+	b.output = append(b.output, val)
+}
+
 // fuzzFseEncoder can be used to fuzz the FSE encoder.
 func fuzzFseEncoder(data []byte) int {
 	if len(data) > maxSequences || len(data) < 2 {
@@ -440,16 +458,7 @@
 		// All 0
 		return 0
 	}
-	maxCount := func(a []uint32) int {
-		var max uint32
-		for _, v := range a {
-			if v > max {
-				max = v
-			}
-		}
-		return int(max)
-	}
-	cnt := maxCount(hist[:maxSym])
+	cnt := int(slices.Max(hist[:maxSym]))
 	if cnt == len(data) {
 		// RLE
 		return 0
@@ -472,8 +481,18 @@
 	if len(b.sequences) == 0 {
 		return b.encodeLits(b.literals, rawAllLits)
 	}
+	if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 {
+		// Check common RLE cases.
+		seq := b.sequences[0]
+		if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 {
+			// Offset == 1 and 0 or 1 literals.
+			b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen)
+			return nil
+		}
+	}
+
 	// We want some difference to at least account for the headers.
-	saved := b.size - len(b.literals) - (b.size >> 5)
+	saved := b.size - len(b.literals) - (b.size >> 6)
 	if saved < 16 {
 		if org == nil {
 			return errIncompressible
@@ -503,7 +522,7 @@
 	if len(b.literals) >= 1024 && !raw {
 		// Use 4 Streams.
 		out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
-	} else if len(b.literals) > 32 && !raw {
+	} else if len(b.literals) > 16 && !raw {
 		// Use 1 stream
 		single = true
 		out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
@@ -511,6 +530,17 @@
 		err = huff0.ErrIncompressible
 	}
 
+	if err == nil && len(out)+5 > len(b.literals) {
+		// If we are close, we may still be worse or equal to raw.
+		var lh literalsHeader
+		lh.setSize(len(b.literals))
+		szRaw := lh.size()
+		lh.setSizes(len(out), len(b.literals), single)
+		szComp := lh.size()
+		if len(out)+szComp >= len(b.literals)+szRaw {
+			err = huff0.ErrIncompressible
+		}
+	}
 	switch err {
 	case huff0.ErrIncompressible:
 		lh.setType(literalsBlockRaw)
@@ -773,16 +803,16 @@
 	ml.flush(mlEnc.actualTableLog)
 	of.flush(ofEnc.actualTableLog)
 	ll.flush(llEnc.actualTableLog)
-	err = wr.close()
-	if err != nil {
-		return err
-	}
+	wr.close()
 	b.output = wr.out
 
+	// Maybe even add a bigger margin.
 	if len(b.output)-3-bhOffset >= b.size {
-		// Maybe even add a bigger margin.
+		// Discard and encode as raw block.
+		b.output = b.encodeRawTo(b.output[:bhOffset], org)
+		b.popOffsets()
 		b.litEnc.Reuse = huff0.ReusePolicyNone
-		return errIncompressible
+		return nil
 	}
 
 	// Size is output minus block header.
@@ -846,15 +876,6 @@
 			}
 		}
 	}
-	maxCount := func(a []uint32) int {
-		var max uint32
-		for _, v := range a {
-			if v > max {
-				max = v
-			}
-		}
-		return int(max)
-	}
 	if debugAsserts && mlMax > maxMatchLengthSymbol {
 		panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax))
 	}
@@ -865,7 +886,7 @@
 		panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax))
 	}
 
-	b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1]))
-	b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1]))
-	b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1]))
+	b.coders.mlEnc.HistogramFinished(mlMax, int(slices.Max(mlH[:mlMax+1])))
+	b.coders.ofEnc.HistogramFinished(ofMax, int(slices.Max(ofH[:ofMax+1])))
+	b.coders.llEnc.HistogramFinished(llMax, int(slices.Max(llH[:llMax+1])))
 }
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
index 2ad0207..55a3885 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
@@ -7,7 +7,6 @@
 import (
 	"fmt"
 	"io"
-	"io/ioutil"
 )
 
 type byteBuffer interface {
@@ -55,7 +54,7 @@
 func (b *byteBuf) readByte() (byte, error) {
 	bb := *b
 	if len(bb) < 1 {
-		return 0, nil
+		return 0, io.ErrUnexpectedEOF
 	}
 	r := bb[0]
 	*b = bb[1:]
@@ -110,7 +109,7 @@
 }
 
 func (r *readerWrapper) readByte() (byte, error) {
-	n2, err := r.r.Read(r.tmp[:1])
+	n2, err := io.ReadFull(r.r, r.tmp[:1])
 	if err != nil {
 		if err == io.EOF {
 			err = io.ErrUnexpectedEOF
@@ -124,7 +123,7 @@
 }
 
 func (r *readerWrapper) skipN(n int64) error {
-	n2, err := io.CopyN(ioutil.Discard, r.r, n)
+	n2, err := io.CopyN(io.Discard, r.r, n)
 	if n2 != n {
 		err = io.ErrUnexpectedEOF
 	}
diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
index 5022e71..6a5a298 100644
--- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go
+++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
@@ -4,7 +4,6 @@
 package zstd
 
 import (
-	"bytes"
 	"encoding/binary"
 	"errors"
 	"io"
@@ -96,42 +95,54 @@
 // If there isn't enough input, io.ErrUnexpectedEOF is returned.
 // The FirstBlock.OK will indicate if enough information was available to decode the first block header.
 func (h *Header) Decode(in []byte) error {
+	_, err := h.DecodeAndStrip(in)
+	return err
+}
+
+// DecodeAndStrip will decode the header from the beginning of the stream
+// and on success return the remaining bytes.
+// This will decode the frame header and the first block header if enough bytes are provided.
+// It is recommended to provide at least HeaderMaxSize bytes.
+// If the frame header cannot be read an error will be returned.
+// If there isn't enough input, io.ErrUnexpectedEOF is returned.
+// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
+func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) {
 	*h = Header{}
 	if len(in) < 4 {
-		return io.ErrUnexpectedEOF
+		return nil, io.ErrUnexpectedEOF
 	}
 	h.HeaderSize += 4
 	b, in := in[:4], in[4:]
-	if !bytes.Equal(b, frameMagic) {
-		if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
-			return ErrMagicMismatch
+	if string(b) != frameMagic {
+		if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 {
+			return nil, ErrMagicMismatch
 		}
 		if len(in) < 4 {
-			return io.ErrUnexpectedEOF
+			return nil, io.ErrUnexpectedEOF
 		}
 		h.HeaderSize += 4
 		h.Skippable = true
 		h.SkippableID = int(b[0] & 0xf)
 		h.SkippableSize = binary.LittleEndian.Uint32(in)
-		return nil
+		return in[4:], nil
 	}
 
 	// Read Window_Descriptor
 	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
 	if len(in) < 1 {
-		return io.ErrUnexpectedEOF
+		return nil, io.ErrUnexpectedEOF
 	}
 	fhd, in := in[0], in[1:]
 	h.HeaderSize++
 	h.SingleSegment = fhd&(1<<5) != 0
 	h.HasCheckSum = fhd&(1<<2) != 0
 	if fhd&(1<<3) != 0 {
-		return errors.New("reserved bit set on frame header")
+		return nil, errors.New("reserved bit set on frame header")
 	}
 
 	if !h.SingleSegment {
 		if len(in) < 1 {
-			return io.ErrUnexpectedEOF
+			return nil, io.ErrUnexpectedEOF
 		}
 		var wd byte
 		wd, in = in[0], in[1:]
@@ -149,11 +160,11 @@
 			size = 4
 		}
 		if len(in) < int(size) {
-			return io.ErrUnexpectedEOF
+			return nil, io.ErrUnexpectedEOF
 		}
 		b, in = in[:size], in[size:]
 		h.HeaderSize += int(size)
-		switch size {
+		switch len(b) {
 		case 1:
 			h.DictionaryID = uint32(b[0])
 		case 2:
@@ -179,11 +190,11 @@
 	if fcsSize > 0 {
 		h.HasFCS = true
 		if len(in) < fcsSize {
-			return io.ErrUnexpectedEOF
+			return nil, io.ErrUnexpectedEOF
 		}
 		b, in = in[:fcsSize], in[fcsSize:]
 		h.HeaderSize += int(fcsSize)
-		switch fcsSize {
+		switch len(b) {
 		case 1:
 			h.FrameContentSize = uint64(b[0])
 		case 2:
@@ -200,7 +211,7 @@
 
 	// Frame Header done, we will not fail from now on.
 	if len(in) < 3 {
-		return nil
+		return in, nil
 	}
 	tmp := in[:3]
 	bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16)
@@ -210,7 +221,7 @@
 	cSize := int(bh >> 3)
 	switch blockType {
 	case blockTypeReserved:
-		return nil
+		return in, nil
 	case blockTypeRLE:
 		h.FirstBlock.Compressed = true
 		h.FirstBlock.DecompressedSize = cSize
@@ -226,5 +237,25 @@
 	}
 
 	h.FirstBlock.OK = true
-	return nil
+	return in, nil
+}
+
+// AppendTo will append the encoded header to the dst slice.
+// There is no error checking performed on the header values.
+func (h *Header) AppendTo(dst []byte) ([]byte, error) {
+	if h.Skippable {
+		magic := [4]byte{0x50, 0x2a, 0x4d, 0x18}
+		magic[0] |= byte(h.SkippableID & 0xf)
+		dst = append(dst, magic[:]...)
+		f := h.SkippableSize
+		return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil
+	}
+	f := frameHeader{
+		ContentSize:   h.FrameContentSize,
+		WindowSize:    uint32(h.WindowSize),
+		SingleSegment: h.SingleSegment,
+		Checksum:      h.HasCheckSum,
+		DictID:        h.DictionaryID,
+	}
+	return f.appendTo(dst), nil
 }
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index d212f47..ea2a193 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -5,7 +5,6 @@
 package zstd
 
 import (
-	"bytes"
 	"context"
 	"encoding/binary"
 	"io"
@@ -35,13 +34,13 @@
 		br           readerWrapper
 		enabled      bool
 		inFrame      bool
+		dstBuf       []byte
 	}
 
 	frame *frameDec
 
 	// Custom dictionaries.
-	// Always uses copies.
-	dicts map[uint32]dict
+	dicts map[uint32]*dict
 
 	// streamWg is the waitgroup for all streams
 	streamWg sync.WaitGroup
@@ -83,7 +82,7 @@
 // can run multiple concurrent stateless decodes. It is even possible to
 // use stateless decodes while a stream is being decoded.
 //
-// The Reset function can be used to initiate a new stream, which is will considerably
+// The Reset function can be used to initiate a new stream, which will considerably
 // reduce the allocations normally caused by NewReader.
 func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
 	initPredefined()
@@ -103,7 +102,7 @@
 	}
 
 	// Transfer option dicts.
-	d.dicts = make(map[uint32]dict, len(d.o.dicts))
+	d.dicts = make(map[uint32]*dict, len(d.o.dicts))
 	for _, dc := range d.o.dicts {
 		d.dicts[dc.id] = dc
 	}
@@ -124,7 +123,7 @@
 }
 
 // Read bytes from the decompressed stream into p.
-// Returns the number of bytes written and any error that occurred.
+// Returns the number of bytes read and any error that occurred.
 // When the stream is done, io.EOF will be returned.
 func (d *Decoder) Read(p []byte) (int, error) {
 	var n int
@@ -187,21 +186,23 @@
 	}
 
 	// If bytes buffer and < 5MB, do sync decoding anyway.
-	if bb, ok := r.(byter); ok && bb.Len() < 5<<20 {
+	if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap {
 		bb2 := bb
 		if debugDecoder {
 			println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
 		}
 		b := bb2.Bytes()
 		var dst []byte
-		if cap(d.current.b) > 0 {
-			dst = d.current.b
+		if cap(d.syncStream.dstBuf) > 0 {
+			dst = d.syncStream.dstBuf[:0]
 		}
 
-		dst, err := d.DecodeAll(b, dst[:0])
+		dst, err := d.DecodeAll(b, dst)
 		if err == nil {
 			err = io.EOF
 		}
+		// Save output buffer
+		d.syncStream.dstBuf = dst
 		d.current.b = dst
 		d.current.err = err
 		d.current.flushed = true
@@ -216,6 +217,7 @@
 	d.current.err = nil
 	d.current.flushed = false
 	d.current.d = nil
+	d.syncStream.dstBuf = nil
 
 	// Ensure no-one else is still running...
 	d.streamWg.Wait()
@@ -312,6 +314,7 @@
 	// Grab a block decoder and frame decoder.
 	block := <-d.decoders
 	frame := block.localFrame
+	initialSize := len(dst)
 	defer func() {
 		if debugDecoder {
 			printf("re-adding decoder: %p", block)
@@ -320,6 +323,7 @@
 		frame.bBuf = nil
 		if frame.history.decoders.br != nil {
 			frame.history.decoders.br.in = nil
+			frame.history.decoders.br.cursor = 0
 		}
 		d.decoders <- block
 	}()
@@ -337,15 +341,8 @@
 			}
 			return dst, err
 		}
-		if frame.DictionaryID != nil {
-			dict, ok := d.dicts[*frame.DictionaryID]
-			if !ok {
-				return nil, ErrUnknownDictionary
-			}
-			if debugDecoder {
-				println("setting dict", frame.DictionaryID)
-			}
-			frame.history.setDict(&dict)
+		if err = d.setDict(frame); err != nil {
+			return nil, err
 		}
 		if frame.WindowSize > d.o.maxWindowSize {
 			if debugDecoder {
@@ -354,7 +351,16 @@
 			return dst, ErrWindowSizeExceeded
 		}
 		if frame.FrameContentSize != fcsUnknown {
-			if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
+			if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) {
+				if debugDecoder {
+					println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst))
+				}
+				return dst, ErrDecoderSizeExceeded
+			}
+			if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) {
+				if debugDecoder {
+					println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst))
+				}
 				return dst, ErrDecoderSizeExceeded
 			}
 			if cap(dst)-len(dst) < int(frame.FrameContentSize) {
@@ -364,7 +370,7 @@
 			}
 		}
 
-		if cap(dst) == 0 {
+		if cap(dst) == 0 && !d.o.limitToCap {
 			// Allocate len(input) * 2 by default if nothing is provided
 			// and we didn't get frame content size.
 			size := len(input) * 2
@@ -382,6 +388,9 @@
 		if err != nil {
 			return dst, err
 		}
+		if uint64(len(dst)-initialSize) > d.o.maxDecodedSize {
+			return dst, ErrDecoderSizeExceeded
+		}
 		if len(frame.bBuf) == 0 {
 			if debugDecoder {
 				println("frame dbuf empty")
@@ -442,26 +451,23 @@
 		println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
 	}
 
-	if !d.o.ignoreChecksum && len(next.b) > 0 {
-		n, err := d.current.crc.Write(next.b)
-		if err == nil {
-			if n != len(next.b) {
-				d.current.err = io.ErrShortWrite
-			}
-		}
+	if d.o.ignoreChecksum {
+		return true
 	}
-	if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 {
-		got := d.current.crc.Sum64()
-		var tmp [4]byte
-		binary.LittleEndian.PutUint32(tmp[:], uint32(got))
-		if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) {
+
+	if len(next.b) > 0 {
+		d.current.crc.Write(next.b)
+	}
+	if next.err == nil && next.d != nil && next.d.hasCRC {
+		got := uint32(d.current.crc.Sum64())
+		if got != next.d.checkCRC {
 			if debugDecoder {
-				println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
+				printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC)
 			}
 			d.current.err = ErrCRCMismatch
 		} else {
 			if debugDecoder {
-				println("CRC ok", tmp[:])
+				printf("CRC ok %08x\n", got)
 			}
 		}
 	}
@@ -477,18 +483,12 @@
 		if !d.syncStream.inFrame {
 			d.frame.history.reset()
 			d.current.err = d.frame.reset(&d.syncStream.br)
+			if d.current.err == nil {
+				d.current.err = d.setDict(d.frame)
+			}
 			if d.current.err != nil {
 				return false
 			}
-			if d.frame.DictionaryID != nil {
-				dict, ok := d.dicts[*d.frame.DictionaryID]
-				if !ok {
-					d.current.err = ErrUnknownDictionary
-					return false
-				} else {
-					d.frame.history.setDict(&dict)
-				}
-			}
 			if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize {
 				d.current.err = ErrDecoderSizeExceeded
 				return false
@@ -667,6 +667,7 @@
 				if debugDecoder {
 					println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
 				}
+				hist.reset()
 				hist.decoders = block.async.newHist.decoders
 				hist.recentOffsets = block.async.newHist.recentOffsets
 				hist.windowSize = block.async.newHist.windowSize
@@ -698,6 +699,7 @@
 			seqExecute <- block
 		}
 		close(seqExecute)
+		hist.reset()
 	}()
 
 	var wg sync.WaitGroup
@@ -721,6 +723,7 @@
 				if debugDecoder {
 					println("Async 2: new history")
 				}
+				hist.reset()
 				hist.windowSize = block.async.newHist.windowSize
 				hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
 				if block.async.newHist.dict != nil {
@@ -750,7 +753,7 @@
 					if block.lowMem {
 						block.dst = make([]byte, block.RLESize)
 					} else {
-						block.dst = make([]byte, maxBlockSize)
+						block.dst = make([]byte, maxCompressedBlockSize)
 					}
 				}
 				block.dst = block.dst[:block.RLESize]
@@ -802,13 +805,14 @@
 		if debugDecoder {
 			println("decoder goroutines finished")
 		}
+		hist.reset()
 	}()
 
+	var hist history
 decodeStream:
 	for {
-		var hist history
 		var hasErr bool
-
+		hist.reset()
 		decodeBlock := func(block *blockDec) {
 			if hasErr {
 				if block != nil {
@@ -843,15 +847,14 @@
 		if debugDecoder && err != nil {
 			println("Frame decoder returned", err)
 		}
-		if err == nil && frame.DictionaryID != nil {
-			dict, ok := d.dicts[*frame.DictionaryID]
-			if !ok {
-				err = ErrUnknownDictionary
-			} else {
-				frame.history.setDict(&dict)
-			}
+		if err == nil {
+			err = d.setDict(frame)
 		}
 		if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
+			if debugDecoder {
+				println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize)
+			}
+
 			err = ErrDecoderSizeExceeded
 		}
 		if err != nil {
@@ -893,18 +896,22 @@
 				println("next block returned error:", err)
 			}
 			dec.err = err
-			dec.checkCRC = nil
+			dec.hasCRC = false
 			if dec.Last && frame.HasCheckSum && err == nil {
 				crc, err := frame.rawInput.readSmall(4)
-				if err != nil {
+				if len(crc) < 4 {
+					if err == nil {
+						err = io.ErrUnexpectedEOF
+
+					}
 					println("CRC missing?", err)
 					dec.err = err
-				}
-				var tmp [4]byte
-				copy(tmp[:], crc)
-				dec.checkCRC = tmp[:]
-				if debugDecoder {
-					println("found crc to check:", dec.checkCRC)
+				} else {
+					dec.checkCRC = binary.LittleEndian.Uint32(crc)
+					dec.hasCRC = true
+					if debugDecoder {
+						printf("found crc to check: %08x\n", dec.checkCRC)
+					}
 				}
 			}
 			err = dec.err
@@ -920,5 +927,23 @@
 	}
 	close(seqDecode)
 	wg.Wait()
+	hist.reset()
 	d.frame.history.b = frameHistCache
 }
+
+func (d *Decoder) setDict(frame *frameDec) (err error) {
+	dict, ok := d.dicts[frame.DictionaryID]
+	if ok {
+		if debugDecoder {
+			println("setting dict", frame.DictionaryID)
+		}
+		frame.history.setDict(dict)
+	} else if frame.DictionaryID != 0 {
+		// A zero or missing dictionary id is ambiguous:
+		// either dictionary zero, or no dictionary. In particular,
+		// zstd --patch-from uses this id for the source file,
+		// so only return an error if the dictionary id is not zero.
+		err = ErrUnknownDictionary
+	}
+	return err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
index c70e6fa..774c5f0 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
@@ -6,6 +6,8 @@
 
 import (
 	"errors"
+	"fmt"
+	"math/bits"
 	"runtime"
 )
 
@@ -14,20 +16,23 @@
 
 // options retains accumulated state of multiple options.
 type decoderOptions struct {
-	lowMem         bool
-	concurrent     int
-	maxDecodedSize uint64
-	maxWindowSize  uint64
-	dicts          []dict
-	ignoreChecksum bool
+	lowMem          bool
+	concurrent      int
+	maxDecodedSize  uint64
+	maxWindowSize   uint64
+	dicts           []*dict
+	ignoreChecksum  bool
+	limitToCap      bool
+	decodeBufsBelow int
 }
 
 func (o *decoderOptions) setDefault() {
 	*o = decoderOptions{
 		// use less ram: true for now, but may change.
-		lowMem:        true,
-		concurrent:    runtime.GOMAXPROCS(0),
-		maxWindowSize: MaxWindowSize,
+		lowMem:          true,
+		concurrent:      runtime.GOMAXPROCS(0),
+		maxWindowSize:   MaxWindowSize,
+		decodeBufsBelow: 128 << 10,
 	}
 	if o.concurrent > 4 {
 		o.concurrent = 4
@@ -82,7 +87,13 @@
 }
 
 // WithDecoderDicts allows to register one or more dictionaries for the decoder.
-// If several dictionaries with the same ID is provided the last one will be used.
+//
+// Each slice in dict must be in the [dictionary format] produced by
+// "zstd --train" from the Zstandard reference implementation.
+//
+// If several dictionaries with the same ID are provided, the last one will be used.
+//
+// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
 func WithDecoderDicts(dicts ...[]byte) DOption {
 	return func(o *decoderOptions) error {
 		for _, b := range dicts {
@@ -90,12 +101,24 @@
 			if err != nil {
 				return err
 			}
-			o.dicts = append(o.dicts, *d)
+			o.dicts = append(o.dicts, d)
 		}
 		return nil
 	}
 }
 
+// WithDecoderDictRaw registers a dictionary that may be used by the decoder.
+// The slice content can be arbitrary data.
+func WithDecoderDictRaw(id uint32, content []byte) DOption {
+	return func(o *decoderOptions) error {
+		if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
+			return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
+		}
+		o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}})
+		return nil
+	}
+}
+
 // WithDecoderMaxWindow allows to set a maximum window size for decodes.
 // This allows rejecting packets that will cause big memory usage.
 // The Decoder will likely allocate more memory based on the WithDecoderLowmem setting.
@@ -114,6 +137,29 @@
 	}
 }
 
+// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes,
+// or any size set in WithDecoderMaxMemory.
+// This can be used to limit decoding to a specific maximum output size.
+// Disabled by default.
+func WithDecodeAllCapLimit(b bool) DOption {
+	return func(o *decoderOptions) error {
+		o.limitToCap = b
+		return nil
+	}
+}
+
+// WithDecodeBuffersBelow will fully decode readers that have a
+// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer.
+// This typically uses less allocations but will have the full decompressed object in memory.
+// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less.
+// Default is 128KiB.
+func WithDecodeBuffersBelow(size int) DOption {
+	return func(o *decoderOptions) error {
+		o.decodeBufsBelow = size
+		return nil
+	}
+}
+
 // IgnoreChecksum allows to forcibly ignore checksum checking.
 func IgnoreChecksum(b bool) DOption {
 	return func(o *decoderOptions) error {
diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go
index a36ae83..b7b8316 100644
--- a/vendor/github.com/klauspost/compress/zstd/dict.go
+++ b/vendor/github.com/klauspost/compress/zstd/dict.go
@@ -6,6 +6,8 @@
 	"errors"
 	"fmt"
 	"io"
+	"math"
+	"sort"
 
 	"github.com/klauspost/compress/huff0"
 )
@@ -15,12 +17,14 @@
 
 	litEnc              *huff0.Scratch
 	llDec, ofDec, mlDec sequenceDec
-	//llEnc, ofEnc, mlEnc []*fseEncoder
-	offsets [3]int
-	content []byte
+	offsets             [3]int
+	content             []byte
 }
 
-var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec}
+const dictMagic = "\x37\xa4\x30\xec"
+
+// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB.
+const dictMaxLength = 1 << 31
 
 // ID returns the dictionary id or 0 if d is nil.
 func (d *dict) ID() uint32 {
@@ -30,14 +34,38 @@
 	return d.id
 }
 
-// DictContentSize returns the dictionary content size or 0 if d is nil.
-func (d *dict) DictContentSize() int {
+// ContentSize returns the dictionary content size or 0 if d is nil.
+func (d *dict) ContentSize() int {
 	if d == nil {
 		return 0
 	}
 	return len(d.content)
 }
 
+// Content returns the dictionary content.
+func (d *dict) Content() []byte {
+	if d == nil {
+		return nil
+	}
+	return d.content
+}
+
+// Offsets returns the initial offsets.
+func (d *dict) Offsets() [3]int {
+	if d == nil {
+		return [3]int{}
+	}
+	return d.offsets
+}
+
+// LitEncoder returns the literal encoder.
+func (d *dict) LitEncoder() *huff0.Scratch {
+	if d == nil {
+		return nil
+	}
+	return d.litEnc
+}
+
 // Load a dictionary as described in
 // https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
 func loadDict(b []byte) (*dict, error) {
@@ -50,7 +78,7 @@
 		ofDec: sequenceDec{fse: &fseDecoder{}},
 		mlDec: sequenceDec{fse: &fseDecoder{}},
 	}
-	if !bytes.Equal(b[:4], dictMagic[:]) {
+	if string(b[:4]) != dictMagic {
 		return nil, ErrMagicMismatch
 	}
 	d.id = binary.LittleEndian.Uint32(b[4:8])
@@ -62,7 +90,7 @@
 	var err error
 	d.litEnc, b, err = huff0.ReadTable(b[8:], nil)
 	if err != nil {
-		return nil, err
+		return nil, fmt.Errorf("loading literal table: %w", err)
 	}
 	d.litEnc.Reuse = huff0.ReusePolicyMust
 
@@ -120,3 +148,418 @@
 
 	return &d, nil
 }
+
+// InspectDictionary loads a zstd dictionary and provides functions to inspect the content.
+func InspectDictionary(b []byte) (interface {
+	ID() uint32
+	ContentSize() int
+	Content() []byte
+	Offsets() [3]int
+	LitEncoder() *huff0.Scratch
+}, error) {
+	initPredefined()
+	d, err := loadDict(b)
+	return d, err
+}
+
+type BuildDictOptions struct {
+	// Dictionary ID.
+	ID uint32
+
+	// Content to use to create dictionary tables.
+	Contents [][]byte
+
+	// History to use for all blocks.
+	History []byte
+
+	// Offsets to use.
+	Offsets [3]int
+
+	// CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier.
+	// See https://github.com/facebook/zstd/issues/3724
+	CompatV155 bool
+
+	// Use the specified encoder level.
+	// The dictionary will be built using the specified encoder level,
+	// which will reflect speed and make the dictionary tailored for that level.
+	// If not set SpeedBestCompression will be used.
+	Level EncoderLevel
+
+	// DebugOut will write stats and other details here if set.
+	DebugOut io.Writer
+}
+
+func BuildDict(o BuildDictOptions) ([]byte, error) {
+	initPredefined()
+	hist := o.History
+	contents := o.Contents
+	debug := o.DebugOut != nil
+	println := func(args ...interface{}) {
+		if o.DebugOut != nil {
+			fmt.Fprintln(o.DebugOut, args...)
+		}
+	}
+	printf := func(s string, args ...interface{}) {
+		if o.DebugOut != nil {
+			fmt.Fprintf(o.DebugOut, s, args...)
+		}
+	}
+	print := func(args ...interface{}) {
+		if o.DebugOut != nil {
+			fmt.Fprint(o.DebugOut, args...)
+		}
+	}
+
+	if int64(len(hist)) > dictMaxLength {
+		return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength))
+	}
+	if len(hist) < 8 {
+		return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8)
+	}
+	if len(contents) == 0 {
+		return nil, errors.New("no content provided")
+	}
+	d := dict{
+		id:      o.ID,
+		litEnc:  nil,
+		llDec:   sequenceDec{},
+		ofDec:   sequenceDec{},
+		mlDec:   sequenceDec{},
+		offsets: o.Offsets,
+		content: hist,
+	}
+	block := blockEnc{lowMem: false}
+	block.init()
+	enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}})
+	if o.Level != 0 {
+		eOpts := encoderOptions{
+			level:      o.Level,
+			blockSize:  maxMatchLen,
+			windowSize: maxMatchLen,
+			dict:       &d,
+			lowMem:     false,
+		}
+		enc = eOpts.encoder()
+	} else {
+		o.Level = SpeedBestCompression
+	}
+	var (
+		remain [256]int
+		ll     [256]int
+		ml     [256]int
+		of     [256]int
+	)
+	addValues := func(dst *[256]int, src []byte) {
+		for _, v := range src {
+			dst[v]++
+		}
+	}
+	addHist := func(dst *[256]int, src *[256]uint32) {
+		for i, v := range src {
+			dst[i] += int(v)
+		}
+	}
+	seqs := 0
+	nUsed := 0
+	litTotal := 0
+	newOffsets := make(map[uint32]int, 1000)
+	for _, b := range contents {
+		block.reset(nil)
+		if len(b) < 8 {
+			continue
+		}
+		nUsed++
+		enc.Reset(&d, true)
+		enc.Encode(&block, b)
+		addValues(&remain, block.literals)
+		litTotal += len(block.literals)
+		if len(block.sequences) == 0 {
+			continue
+		}
+		seqs += len(block.sequences)
+		block.genCodes()
+		addHist(&ll, block.coders.llEnc.Histogram())
+		addHist(&ml, block.coders.mlEnc.Histogram())
+		addHist(&of, block.coders.ofEnc.Histogram())
+		for i, seq := range block.sequences {
+			if i > 3 {
+				break
+			}
+			offset := seq.offset
+			if offset == 0 {
+				continue
+			}
+			if int(offset) >= len(o.History) {
+				continue
+			}
+			if offset > 3 {
+				newOffsets[offset-3]++
+			} else {
+				newOffsets[uint32(o.Offsets[offset-1])]++
+			}
+		}
+	}
+	// Find most used offsets.
+	var sortedOffsets []uint32
+	for k := range newOffsets {
+		sortedOffsets = append(sortedOffsets, k)
+	}
+	sort.Slice(sortedOffsets, func(i, j int) bool {
+		a, b := sortedOffsets[i], sortedOffsets[j]
+		if a == b {
+			// Prefer the longer offset
+			return sortedOffsets[i] > sortedOffsets[j]
+		}
+		return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]]
+	})
+	if len(sortedOffsets) > 3 {
+		if debug {
+			print("Offsets:")
+			for i, v := range sortedOffsets {
+				if i > 20 {
+					break
+				}
+				printf("[%d: %d],", v, newOffsets[v])
+			}
+			println("")
+		}
+
+		sortedOffsets = sortedOffsets[:3]
+	}
+	for i, v := range sortedOffsets {
+		o.Offsets[i] = int(v)
+	}
+	if debug {
+		println("New repeat offsets", o.Offsets)
+	}
+
+	if nUsed == 0 || seqs == 0 {
+		return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs)
+	}
+	if debug {
+		println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal)
+	}
+	if seqs/nUsed < 512 {
+		// Use 512 as minimum.
+		nUsed = seqs / 512
+		if nUsed == 0 {
+			nUsed = 1
+		}
+	}
+	copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) {
+		hist := dst.Histogram()
+		var maxSym uint8
+		var maxCount int
+		var fakeLength int
+		for i, v := range src {
+			if v > 0 {
+				v = v / nUsed
+				if v == 0 {
+					v = 1
+				}
+			}
+			if v > maxCount {
+				maxCount = v
+			}
+			if v != 0 {
+				maxSym = uint8(i)
+			}
+			fakeLength += v
+			hist[i] = uint32(v)
+		}
+
+		// Ensure we aren't trying to represent RLE.
+		if maxCount == fakeLength {
+			for i := range hist {
+				if uint8(i) == maxSym {
+					fakeLength++
+					maxSym++
+					hist[i+1] = 1
+					if maxSym > 1 {
+						break
+					}
+				}
+				if hist[0] == 0 {
+					fakeLength++
+					hist[i] = 1
+					if maxSym > 1 {
+						break
+					}
+				}
+			}
+		}
+
+		dst.HistogramFinished(maxSym, maxCount)
+		dst.reUsed = false
+		dst.useRLE = false
+		err := dst.normalizeCount(fakeLength)
+		if err != nil {
+			return nil, err
+		}
+		if debug {
+			println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength)
+		}
+		return dst.writeCount(nil)
+	}
+	if debug {
+		print("Literal lengths: ")
+	}
+	llTable, err := copyHist(block.coders.llEnc, &ll)
+	if err != nil {
+		return nil, err
+	}
+	if debug {
+		print("Match lengths: ")
+	}
+	mlTable, err := copyHist(block.coders.mlEnc, &ml)
+	if err != nil {
+		return nil, err
+	}
+	if debug {
+		print("Offsets: ")
+	}
+	ofTable, err := copyHist(block.coders.ofEnc, &of)
+	if err != nil {
+		return nil, err
+	}
+
+	// Literal table
+	avgSize := litTotal
+	if avgSize > huff0.BlockSizeMax/2 {
+		avgSize = huff0.BlockSizeMax / 2
+	}
+	huffBuff := make([]byte, 0, avgSize)
+	// Target size
+	div := litTotal / avgSize
+	if div < 1 {
+		div = 1
+	}
+	if debug {
+		println("Huffman weights:")
+	}
+	for i, n := range remain[:] {
+		if n > 0 {
+			n = n / div
+			// Allow all entries to be represented.
+			if n == 0 {
+				n = 1
+			}
+			huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
+			if debug {
+				printf("[%d: %d], ", i, n)
+			}
+		}
+	}
+	if o.CompatV155 && remain[255]/div == 0 {
+		huffBuff = append(huffBuff, 255)
+	}
+	scratch := &huff0.Scratch{TableLog: 11}
+	for tries := 0; tries < 255; tries++ {
+		scratch = &huff0.Scratch{TableLog: 11}
+		_, _, err = huff0.Compress1X(huffBuff, scratch)
+		if err == nil {
+			break
+		}
+		if debug {
+			printf("Try %d: Huffman error: %v\n", tries+1, err)
+		}
+		huffBuff = huffBuff[:0]
+		if tries == 250 {
+			if debug {
+				println("Huffman: Bailing out with predefined table")
+			}
+
+			// Bail out.... Just generate something
+			huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...)
+			for i := 0; i < 128; i++ {
+				huffBuff = append(huffBuff, byte(i))
+			}
+			continue
+		}
+		if errors.Is(err, huff0.ErrIncompressible) {
+			// Try truncating least common.
+			for i, n := range remain[:] {
+				if n > 0 {
+					n = n / (div * (i + 1))
+					if n > 0 {
+						huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
+					}
+				}
+			}
+			if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 {
+				huffBuff = append(huffBuff, 255)
+			}
+			if len(huffBuff) == 0 {
+				huffBuff = append(huffBuff, 0, 255)
+			}
+		}
+		if errors.Is(err, huff0.ErrUseRLE) {
+			for i, n := range remain[:] {
+				n = n / (div * (i + 1))
+				// Allow all entries to be represented.
+				if n == 0 {
+					n = 1
+				}
+				huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
+			}
+		}
+	}
+
+	var out bytes.Buffer
+	out.Write([]byte(dictMagic))
+	out.Write(binary.LittleEndian.AppendUint32(nil, o.ID))
+	out.Write(scratch.OutTable)
+	if debug {
+		println("huff table:", len(scratch.OutTable), "bytes")
+		println("of table:", len(ofTable), "bytes")
+		println("ml table:", len(mlTable), "bytes")
+		println("ll table:", len(llTable), "bytes")
+	}
+	out.Write(ofTable)
+	out.Write(mlTable)
+	out.Write(llTable)
+	out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0])))
+	out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1])))
+	out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2])))
+	out.Write(hist)
+	if debug {
+		_, err := loadDict(out.Bytes())
+		if err != nil {
+			panic(err)
+		}
+		i, err := InspectDictionary(out.Bytes())
+		if err != nil {
+			panic(err)
+		}
+		println("ID:", i.ID())
+		println("Content size:", i.ContentSize())
+		println("Encoder:", i.LitEncoder() != nil)
+		println("Offsets:", i.Offsets())
+		var totalSize int
+		for _, b := range contents {
+			totalSize += len(b)
+		}
+
+		encWith := func(opts ...EOption) int {
+			enc, err := NewWriter(nil, opts...)
+			if err != nil {
+				panic(err)
+			}
+			defer enc.Close()
+			var dst []byte
+			var totalSize int
+			for _, b := range contents {
+				dst = enc.EncodeAll(b, dst[:0])
+				totalSize += len(dst)
+			}
+			return totalSize
+		}
+		plain := encWith(WithEncoderLevel(o.Level))
+		withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes()))
+		println("Input size:", totalSize)
+		println("Plain Compressed:", plain)
+		println("Dict Compressed:", withDict)
+		println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)")
+	}
+	return out.Bytes(), nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go
index 15ae8ee..7d250c6 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_base.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go
@@ -16,6 +16,7 @@
 	cur int32
 	// maximum offset. Should be at least 2x block size.
 	maxMatchOff int32
+	bufferReset int32
 	hist        []byte
 	crc         *xxhash.Digest
 	tmp         [8]byte
@@ -56,8 +57,8 @@
 }
 
 func (e *fastBase) addBlock(src []byte) int32 {
-	if debugAsserts && e.cur > bufferReset {
-		panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset))
+	if debugAsserts && e.cur > e.bufferReset {
+		panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset))
 	}
 	// check if we have space already
 	if len(e.hist)+len(src) > cap(e.hist) {
@@ -115,7 +116,7 @@
 			panic(err)
 		}
 		if t < 0 {
-			err := fmt.Sprintf("s (%d) < 0", s)
+			err := fmt.Sprintf("t (%d) < 0", t)
 			panic(err)
 		}
 		if s-t > e.maxMatchOff {
@@ -126,24 +127,7 @@
 			panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
 		}
 	}
-	a := src[s:]
-	b := src[t:]
-	b = b[:len(a)]
-	end := int32((len(a) >> 3) << 3)
-	for i := int32(0); i < end; i += 8 {
-		if diff := load6432(a, i) ^ load6432(b, i); diff != 0 {
-			return i + int32(bits.TrailingZeros64(diff)>>3)
-		}
-	}
-
-	a = a[end:]
-	b = b[end:]
-	for i := range a {
-		if a[i] != b[i] {
-			return int32(i) + end
-		}
-	}
-	return int32(len(a)) + end
+	return int32(matchLen(src[s:], src[t:]))
 }
 
 // Reset the encoding table.
@@ -160,18 +144,19 @@
 	} else {
 		e.crc.Reset()
 	}
+	e.blk.dictLitEnc = nil
 	if d != nil {
 		low := e.lowMem
 		if singleBlock {
 			e.lowMem = true
 		}
-		e.ensureHist(d.DictContentSize() + maxCompressedBlockSize)
+		e.ensureHist(d.ContentSize() + maxCompressedBlockSize)
 		e.lowMem = low
 	}
 
 	// We offset current position so everything will be out of reach.
 	// If above reset line, history will be purged.
-	if e.cur < bufferReset {
+	if e.cur < e.bufferReset {
 		e.cur += e.maxMatchOff + int32(len(e.hist))
 	}
 	e.hist = e.hist[:0]
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go
index 96028ec..4613724 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_best.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go
@@ -34,7 +34,7 @@
 	est    int32
 }
 
-const highScore = 25000
+const highScore = maxMatchLen * 8
 
 // estBits will estimate output bits from predefined tables.
 func (m *match) estBits(bitsPerByte int32) {
@@ -43,7 +43,7 @@
 	if m.rep < 0 {
 		ofc = ofCode(uint32(m.s-m.offset) + 3)
 	} else {
-		ofc = ofCode(uint32(m.rep))
+		ofc = ofCode(uint32(m.rep) & 3)
 	}
 	// Cost, excluding
 	ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc]
@@ -84,14 +84,10 @@
 	)
 
 	// Protect against e.cur wraparound.
-	for e.cur >= bufferReset {
+	for e.cur >= e.bufferReset-int32(len(e.hist)) {
 		if len(e.hist) == 0 {
-			for i := range e.table[:] {
-				e.table[i] = prevEntry{}
-			}
-			for i := range e.longTable[:] {
-				e.longTable[i] = prevEntry{}
-			}
+			e.table = [bestShortTableSize]prevEntry{}
+			e.longTable = [bestLongTableSize]prevEntry{}
 			e.cur = e.maxMatchOff
 			break
 		}
@@ -139,8 +135,20 @@
 		break
 	}
 
+	// Add block to history
 	s := e.addBlock(src)
 	blk.size = len(src)
+
+	// Check RLE first
+	if len(src) > zstdMinMatch {
+		ml := matchLen(src[1:], src)
+		if ml == len(src)-1 {
+			blk.literals = append(blk.literals, src[0])
+			blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3})
+			return
+		}
+	}
+
 	if len(src) < minNonLiteralBlockSize {
 		blk.extraLits = len(src)
 		blk.literals = blk.literals[:len(src)]
@@ -163,7 +171,6 @@
 
 	// nextEmit is where in src the next emitLiteral should start from.
 	nextEmit := s
-	cv := load6432(src, s)
 
 	// Relative offsets
 	offset1 := int32(blk.recentOffsets[0])
@@ -177,7 +184,6 @@
 		blk.literals = append(blk.literals, src[nextEmit:until]...)
 		s.litLen = uint32(until - nextEmit)
 	}
-	_ = addLiterals
 
 	if debugEncoder {
 		println("recent offsets:", blk.recentOffsets)
@@ -192,54 +198,104 @@
 			panic("offset0 was 0")
 		}
 
-		bestOf := func(a, b match) match {
-			if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 {
-				return a
-			}
-			return b
-		}
-		const goodEnough = 100
+		const goodEnough = 250
+
+		cv := load6432(src, s)
 
 		nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
 		nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
 		candidateL := e.longTable[nextHashL]
 		candidateS := e.table[nextHashS]
 
-		matchAt := func(offset int32, s int32, first uint32, rep int32) match {
-			if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
-				return match{s: s, est: highScore}
+		// Set m to a match at offset if it looks like that will improve compression.
+		improve := func(m *match, offset int32, s int32, first uint32, rep int32) {
+			delta := s - offset
+			if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first {
+				return
 			}
-			if debugAsserts {
-				if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
-					panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
+			// Try to quick reject if we already have a long match.
+			if m.length > 16 {
+				left := len(src) - int(m.s+m.length)
+				// If we are too close to the end, keep as is.
+				if left <= 0 {
+					return
+				}
+				checkLen := m.length - (s - m.s) - 8
+				if left > 2 && checkLen > 4 {
+					// Check 4 bytes, 4 bytes from the end of the current match.
+					a := load3232(src, offset+checkLen)
+					b := load3232(src, s+checkLen)
+					if a != b {
+						return
+					}
 				}
 			}
-			m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
-			m.estBits(bitsPerByte)
-			return m
+			l := 4 + e.matchlen(s+4, offset+4, src)
+			if m.rep <= 0 {
+				// Extend candidate match backwards as far as possible.
+				// Do not extend repeats as we can assume they are optimal
+				// and offsets change if s == nextEmit.
+				tMin := s - e.maxMatchOff
+				if tMin < 0 {
+					tMin = 0
+				}
+				for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength {
+					s--
+					offset--
+					l++
+				}
+			}
+			if debugAsserts {
+				if offset >= s {
+					panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff))
+				}
+				if !bytes.Equal(src[s:s+l], src[offset:offset+l]) {
+					panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
+				}
+			}
+			cand := match{offset: offset, s: s, length: l, rep: rep}
+			cand.estBits(bitsPerByte)
+			if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 {
+				*m = cand
+			}
 		}
 
-		best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
-		best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
-		best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1))
+		best := match{s: s, est: highScore}
+		improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1)
+		improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1)
+		improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1)
+		improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1)
 
 		if canRepeat && best.length < goodEnough {
-			cv32 := uint32(cv >> 8)
-			spp := s + 1
-			best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
-			best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
-			best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
-			if best.length > 0 {
-				cv32 = uint32(cv >> 24)
-				spp += 2
-				best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
-				best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
-				best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
+			if s == nextEmit {
+				// Check repeats straight after a match.
+				improve(&best, s-offset2, s, uint32(cv), 1|4)
+				improve(&best, s-offset3, s, uint32(cv), 2|4)
+				if offset1 > 1 {
+					improve(&best, s-(offset1-1), s, uint32(cv), 3|4)
+				}
+			}
+
+			// If either no match or a non-repeat match, check at + 1
+			if best.rep <= 0 {
+				cv32 := uint32(cv >> 8)
+				spp := s + 1
+				improve(&best, spp-offset1, spp, cv32, 1)
+				improve(&best, spp-offset2, spp, cv32, 2)
+				improve(&best, spp-offset3, spp, cv32, 3)
+				if best.rep < 0 {
+					cv32 = uint32(cv >> 24)
+					spp += 2
+					improve(&best, spp-offset1, spp, cv32, 1)
+					improve(&best, spp-offset2, spp, cv32, 2)
+					improve(&best, spp-offset3, spp, cv32, 3)
+				}
 			}
 		}
 		// Load next and check...
 		e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset}
 		e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset}
+		index0 := s + 1
 
 		// Look far ahead, unless we have a really long match already...
 		if best.length < goodEnough {
@@ -249,97 +305,89 @@
 				if s >= sLimit {
 					break encodeLoop
 				}
-				cv = load6432(src, s)
 				continue
 			}
 
-			s++
 			candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)]
-			cv = load6432(src, s)
-			cv2 := load6432(src, s+1)
+			cv = load6432(src, s+1)
+			cv2 := load6432(src, s+2)
 			candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)]
 			candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
 
 			// Short at s+1
-			best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
+			improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1)
 			// Long at s+1, s+2
-			best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1))
-			best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
-			best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1))
-			best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1))
+			improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1)
+			improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1)
+			improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1)
+			improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1)
 			if false {
 				// Short at s+3.
 				// Too often worse...
-				best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1))
+				improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1)
 			}
-			// See if we can find a better match by checking where the current best ends.
-			// Use that offset to see if we can find a better full match.
-			if sAt := best.s + best.length; sAt < sLimit {
-				nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
-				candidateEnd := e.longTable[nextHashL]
-				if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 {
-					bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1))
-					if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 {
-						bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1))
+
+			// Start check at a fixed offset to allow for a few mismatches.
+			// For this compression level 2 yields the best results.
+			// We cannot do this if we have already indexed this position.
+			const skipBeginning = 2
+			if best.s > s-skipBeginning {
+				// See if we can find a better match by checking where the current best ends.
+				// Use that offset to see if we can find a better full match.
+				if sAt := best.s + best.length; sAt < sLimit {
+					nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
+					candidateEnd := e.longTable[nextHashL]
+
+					if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 {
+						improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
+						if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 {
+							improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
+						}
 					}
-					best = bestEnd
 				}
 			}
 		}
 
 		if debugAsserts {
+			if best.offset >= best.s {
+				panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s))
+			}
+			if best.s < nextEmit {
+				panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit))
+			}
+			if best.offset < s-e.maxMatchOff {
+				panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff))
+			}
 			if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) {
 				panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]))
 			}
 		}
 
 		// We have a match, we can store the forward value
+		s = best.s
 		if best.rep > 0 {
-			s = best.s
 			var seq seq
 			seq.matchLen = uint32(best.length - zstdMinMatch)
+			addLiterals(&seq, best.s)
 
-			// We might be able to match backwards.
-			// Extend as long as we can.
-			start := best.s
-			// We end the search early, so we don't risk 0 literals
-			// and have to do special offset treatment.
-			startLimit := nextEmit + 1
-
-			tMin := s - e.maxMatchOff
-			if tMin < 0 {
-				tMin = 0
-			}
-			repIndex := best.offset
-			for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
-				repIndex--
-				start--
-				seq.matchLen++
-			}
-			addLiterals(&seq, start)
-
-			// rep 0
-			seq.offset = uint32(best.rep)
+			// Repeat. If bit 4 is set, this is a non-lit repeat.
+			seq.offset = uint32(best.rep & 3)
 			if debugSequences {
-				println("repeat sequence", seq, "next s:", s)
+				println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset)
 			}
 			blk.sequences = append(blk.sequences, seq)
 
-			// Index match start+1 (long) -> s - 1
-			index0 := s
+			// Index old s + 1 -> s - 1
 			s = best.s + best.length
-
 			nextEmit = s
-			if s >= sLimit {
-				if debugEncoder {
-					println("repeat ended", s, best.length)
 
-				}
-				break encodeLoop
-			}
 			// Index skipped...
+			end := s
+			if s > sLimit+4 {
+				end = sLimit + 4
+			}
 			off := index0 + e.cur
-			for index0 < s-1 {
+			for index0 < end {
 				cv0 := load6432(src, index0)
 				h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
 				h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
@@ -348,19 +396,26 @@
 				off++
 				index0++
 			}
+
 			switch best.rep {
-			case 2:
+			case 2, 4 | 1:
 				offset1, offset2 = offset2, offset1
-			case 3:
+			case 3, 4 | 2:
 				offset1, offset2, offset3 = offset3, offset1, offset2
+			case 4 | 3:
+				offset1, offset2, offset3 = offset1-1, offset1, offset2
 			}
-			cv = load6432(src, s)
+			if s >= sLimit {
+				if debugEncoder {
+					println("repeat ended", s, best.length)
+				}
+				break encodeLoop
+			}
 			continue
 		}
 
 		// A 4-byte match has been found. Update recent offsets.
 		// We'll later see if more than 4 bytes.
-		s = best.s
 		t := best.offset
 		offset1, offset2, offset3 = s-t, offset1, offset2
 
@@ -372,22 +427,9 @@
 			panic("invalid offset")
 		}
 
-		// Extend the n-byte match as long as possible.
-		l := best.length
-
-		// Extend backwards
-		tMin := s - e.maxMatchOff
-		if tMin < 0 {
-			tMin = 0
-		}
-		for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
-			s--
-			t--
-			l++
-		}
-
 		// Write our sequence
 		var seq seq
+		l := best.length
 		seq.litLen = uint32(s - nextEmit)
 		seq.matchLen = uint32(l - zstdMinMatch)
 		if seq.litLen > 0 {
@@ -400,65 +442,25 @@
 		}
 		blk.sequences = append(blk.sequences, seq)
 		nextEmit = s
-		if s >= sLimit {
-			break encodeLoop
+
+		// Index old s + 1 -> s - 1 or sLimit
+		end := s
+		if s > sLimit-4 {
+			end = sLimit - 4
 		}
 
-		// Index match start+1 (long) -> s - 1
-		index0 := s - l + 1
-		// every entry
-		for index0 < s-1 {
+		off := index0 + e.cur
+		for index0 < end {
 			cv0 := load6432(src, index0)
 			h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
 			h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
-			off := index0 + e.cur
 			e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
 			e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
 			index0++
+			off++
 		}
-
-		cv = load6432(src, s)
-		if !canRepeat {
-			continue
-		}
-
-		// Check offset 2
-		for {
-			o2 := s - offset2
-			if load3232(src, o2) != uint32(cv) {
-				// Do regular search
-				break
-			}
-
-			// Store this, since we have it.
-			nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
-			nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
-
-			// We have at least 4 byte match.
-			// No need to check backwards. We come straight from a match
-			l := 4 + e.matchlen(s+4, o2+4, src)
-
-			e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
-			e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset}
-			seq.matchLen = uint32(l) - zstdMinMatch
-			seq.litLen = 0
-
-			// Since litlen is always 0, this is offset 1.
-			seq.offset = 1
-			s += l
-			nextEmit = s
-			if debugSequences {
-				println("sequence", seq, "next s:", s)
-			}
-			blk.sequences = append(blk.sequences, seq)
-
-			// Swap offset 1 and 2.
-			offset1, offset2 = offset2, offset1
-			if s >= sLimit {
-				// Finished
-				break encodeLoop
-			}
-			cv = load6432(src, s)
+		if s >= sLimit {
+			break encodeLoop
 		}
 	}
 
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
index c769f69..84a79fd 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_better.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -62,14 +62,10 @@
 	)
 
 	// Protect against e.cur wraparound.
-	for e.cur >= bufferReset {
+	for e.cur >= e.bufferReset-int32(len(e.hist)) {
 		if len(e.hist) == 0 {
-			for i := range e.table[:] {
-				e.table[i] = tableEntry{}
-			}
-			for i := range e.longTable[:] {
-				e.longTable[i] = prevEntry{}
-			}
+			e.table = [betterShortTableSize]tableEntry{}
+			e.longTable = [betterLongTableSize]prevEntry{}
 			e.cur = e.maxMatchOff
 			break
 		}
@@ -106,9 +102,20 @@
 		e.cur = e.maxMatchOff
 		break
 	}
-
+	// Add block to history
 	s := e.addBlock(src)
 	blk.size = len(src)
+
+	// Check RLE first
+	if len(src) > zstdMinMatch {
+		ml := matchLen(src[1:], src)
+		if ml == len(src)-1 {
+			blk.literals = append(blk.literals, src[0])
+			blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3})
+			return
+		}
+	}
+
 	if len(src) < minNonLiteralBlockSize {
 		blk.extraLits = len(src)
 		blk.literals = blk.literals[:len(src)]
@@ -149,7 +156,7 @@
 		var t int32
 		// We allow the encoder to optionally turn off repeat offsets across blocks
 		canRepeat := len(blk.sequences) > 2
-		var matched int32
+		var matched, index0 int32
 
 		for {
 			if debugAsserts && canRepeat && offset1 == 0 {
@@ -166,14 +173,15 @@
 			off := s + e.cur
 			e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
 			e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
+			index0 = s + 1
 
 			if canRepeat {
 				if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
 					// Consider history as well.
 					var seq seq
-					lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+					length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
 
-					seq.matchLen = uint32(lenght - zstdMinMatch)
+					seq.matchLen = uint32(length - zstdMinMatch)
 
 					// We might be able to match backwards.
 					// Extend as long as we can.
@@ -202,12 +210,12 @@
 
 					// Index match start+1 (long) -> s - 1
 					index0 := s + repOff
-					s += lenght + repOff
+					s += length + repOff
 
 					nextEmit = s
 					if s >= sLimit {
 						if debugEncoder {
-							println("repeat ended", s, lenght)
+							println("repeat ended", s, length)
 
 						}
 						break encodeLoop
@@ -233,9 +241,9 @@
 				if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
 					// Consider history as well.
 					var seq seq
-					lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+					length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
 
-					seq.matchLen = uint32(lenght - zstdMinMatch)
+					seq.matchLen = uint32(length - zstdMinMatch)
 
 					// We might be able to match backwards.
 					// Extend as long as we can.
@@ -262,12 +270,11 @@
 					}
 					blk.sequences = append(blk.sequences, seq)
 
-					index0 := s + repOff2
-					s += lenght + repOff2
+					s += length + repOff2
 					nextEmit = s
 					if s >= sLimit {
 						if debugEncoder {
-							println("repeat ended", s, lenght)
+							println("repeat ended", s, length)
 
 						}
 						break encodeLoop
@@ -416,15 +423,23 @@
 
 		// Try to find a better match by searching for a long match at the end of the current best match
 		if s+matched < sLimit {
+			// Allow some bytes at the beginning to mismatch.
+			// Sweet spot is around 3 bytes, but depends on input.
+			// The skipped bytes are tested in Extend backwards,
+			// and still picked up as part of the match if they do.
+			const skipBeginning = 3
+
 			nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
-			cv := load3232(src, s)
+			s2 := s + skipBeginning
+			cv := load3232(src, s2)
 			candidateL := e.longTable[nextHashL]
-			coffsetL := candidateL.offset - e.cur - matched
-			if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+			coffsetL := candidateL.offset - e.cur - matched + skipBeginning
+			if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
 				// Found a long match, at least 4 bytes.
-				matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+				matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
 				if matchedNext > matched {
 					t = coffsetL
+					s = s2
 					matched = matchedNext
 					if debugMatches {
 						println("long match at end-of-match")
@@ -434,12 +449,13 @@
 
 			// Check prev long...
 			if true {
-				coffsetL = candidateL.prev - e.cur - matched
-				if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+				coffsetL = candidateL.prev - e.cur - matched + skipBeginning
+				if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
 					// Found a long match, at least 4 bytes.
-					matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+					matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
 					if matchedNext > matched {
 						t = coffsetL
+						s = s2
 						matched = matchedNext
 						if debugMatches {
 							println("prev long match at end-of-match")
@@ -493,15 +509,15 @@
 		}
 
 		// Index match start+1 (long) -> s - 1
-		index0 := s - l + 1
+		off := index0 + e.cur
 		for index0 < s-1 {
 			cv0 := load6432(src, index0)
 			cv1 := cv0 >> 8
 			h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
-			off := index0 + e.cur
 			e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
 			e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
 			index0 += 2
+			off += 2
 		}
 
 		cv = load6432(src, s)
@@ -578,7 +594,7 @@
 	)
 
 	// Protect against e.cur wraparound.
-	for e.cur >= bufferReset {
+	for e.cur >= e.bufferReset-int32(len(e.hist)) {
 		if len(e.hist) == 0 {
 			for i := range e.table[:] {
 				e.table[i] = tableEntry{}
@@ -667,7 +683,7 @@
 		var t int32
 		// We allow the encoder to optionally turn off repeat offsets across blocks
 		canRepeat := len(blk.sequences) > 2
-		var matched int32
+		var matched, index0 int32
 
 		for {
 			if debugAsserts && canRepeat && offset1 == 0 {
@@ -686,14 +702,15 @@
 			e.markLongShardDirty(nextHashL)
 			e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
 			e.markShortShardDirty(nextHashS)
+			index0 = s + 1
 
 			if canRepeat {
 				if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
 					// Consider history as well.
 					var seq seq
-					lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+					length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
 
-					seq.matchLen = uint32(lenght - zstdMinMatch)
+					seq.matchLen = uint32(length - zstdMinMatch)
 
 					// We might be able to match backwards.
 					// Extend as long as we can.
@@ -721,13 +738,12 @@
 					blk.sequences = append(blk.sequences, seq)
 
 					// Index match start+1 (long) -> s - 1
-					index0 := s + repOff
-					s += lenght + repOff
+					s += length + repOff
 
 					nextEmit = s
 					if s >= sLimit {
 						if debugEncoder {
-							println("repeat ended", s, lenght)
+							println("repeat ended", s, length)
 
 						}
 						break encodeLoop
@@ -756,9 +772,9 @@
 				if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
 					// Consider history as well.
 					var seq seq
-					lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+					length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
 
-					seq.matchLen = uint32(lenght - zstdMinMatch)
+					seq.matchLen = uint32(length - zstdMinMatch)
 
 					// We might be able to match backwards.
 					// Extend as long as we can.
@@ -785,12 +801,11 @@
 					}
 					blk.sequences = append(blk.sequences, seq)
 
-					index0 := s + repOff2
-					s += lenght + repOff2
+					s += length + repOff2
 					nextEmit = s
 					if s >= sLimit {
 						if debugEncoder {
-							println("repeat ended", s, lenght)
+							println("repeat ended", s, length)
 
 						}
 						break encodeLoop
@@ -1019,18 +1034,18 @@
 		}
 
 		// Index match start+1 (long) -> s - 1
-		index0 := s - l + 1
+		off := index0 + e.cur
 		for index0 < s-1 {
 			cv0 := load6432(src, index0)
 			cv1 := cv0 >> 8
 			h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
-			off := index0 + e.cur
 			e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
 			e.markLongShardDirty(h0)
 			h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
 			e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
 			e.markShortShardDirty(h1)
 			index0 += 2
+			off += 2
 		}
 
 		cv = load6432(src, s)
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index 7ff0c64..d36be7b 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -44,14 +44,10 @@
 	)
 
 	// Protect against e.cur wraparound.
-	for e.cur >= bufferReset {
+	for e.cur >= e.bufferReset-int32(len(e.hist)) {
 		if len(e.hist) == 0 {
-			for i := range e.table[:] {
-				e.table[i] = tableEntry{}
-			}
-			for i := range e.longTable[:] {
-				e.longTable[i] = tableEntry{}
-			}
+			e.table = [dFastShortTableSize]tableEntry{}
+			e.longTable = [dFastLongTableSize]tableEntry{}
 			e.cur = e.maxMatchOff
 			break
 		}
@@ -142,9 +138,9 @@
 				if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
 					// Consider history as well.
 					var seq seq
-					lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+					length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
 
-					seq.matchLen = uint32(lenght - zstdMinMatch)
+					seq.matchLen = uint32(length - zstdMinMatch)
 
 					// We might be able to match backwards.
 					// Extend as long as we can.
@@ -170,11 +166,11 @@
 						println("repeat sequence", seq, "next s:", s)
 					}
 					blk.sequences = append(blk.sequences, seq)
-					s += lenght + repOff
+					s += length + repOff
 					nextEmit = s
 					if s >= sLimit {
 						if debugEncoder {
-							println("repeat ended", s, lenght)
+							println("repeat ended", s, length)
 
 						}
 						break encodeLoop
@@ -388,7 +384,7 @@
 	)
 
 	// Protect against e.cur wraparound.
-	if e.cur >= bufferReset {
+	if e.cur >= e.bufferReset {
 		for i := range e.table[:] {
 			e.table[i] = tableEntry{}
 		}
@@ -685,7 +681,7 @@
 	}
 
 	// We do not store history, so we must offset e.cur to avoid false matches for next user.
-	if e.cur < bufferReset {
+	if e.cur < e.bufferReset {
 		e.cur += int32(len(src))
 	}
 }
@@ -700,7 +696,7 @@
 	)
 
 	// Protect against e.cur wraparound.
-	for e.cur >= bufferReset {
+	for e.cur >= e.bufferReset-int32(len(e.hist)) {
 		if len(e.hist) == 0 {
 			for i := range e.table[:] {
 				e.table[i] = tableEntry{}
@@ -802,9 +798,9 @@
 				if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
 					// Consider history as well.
 					var seq seq
-					lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+					length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
 
-					seq.matchLen = uint32(lenght - zstdMinMatch)
+					seq.matchLen = uint32(length - zstdMinMatch)
 
 					// We might be able to match backwards.
 					// Extend as long as we can.
@@ -830,11 +826,11 @@
 						println("repeat sequence", seq, "next s:", s)
 					}
 					blk.sequences = append(blk.sequences, seq)
-					s += lenght + repOff
+					s += length + repOff
 					nextEmit = s
 					if s >= sLimit {
 						if debugEncoder {
-							println("repeat ended", s, lenght)
+							println("repeat ended", s, length)
 
 						}
 						break encodeLoop
@@ -1088,7 +1084,7 @@
 			}
 		}
 		e.lastDictID = d.id
-		e.allDirty = true
+		allDirty = true
 	}
 	// Reset table to initial state
 	e.cur = e.maxMatchOff
@@ -1103,7 +1099,8 @@
 	}
 
 	if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
-		copy(e.longTable[:], e.dictLongTable)
+		//copy(e.longTable[:], e.dictLongTable)
+		e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable)
 		for i := range e.longTableShardDirty {
 			e.longTableShardDirty[i] = false
 		}
@@ -1114,7 +1111,9 @@
 			continue
 		}
 
-		copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+		// copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+		*(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:])
+
 		e.longTableShardDirty[i] = false
 	}
 }
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
index f51ab52..f45a3da 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -43,7 +43,7 @@
 	)
 
 	// Protect against e.cur wraparound.
-	for e.cur >= bufferReset {
+	for e.cur >= e.bufferReset-int32(len(e.hist)) {
 		if len(e.hist) == 0 {
 			for i := range e.table[:] {
 				e.table[i] = tableEntry{}
@@ -133,8 +133,7 @@
 			if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
 				// Consider history as well.
 				var seq seq
-				var length int32
-				length = 4 + e.matchlen(s+6, repIndex+4, src)
+				length := 4 + e.matchlen(s+6, repIndex+4, src)
 				seq.matchLen = uint32(length - zstdMinMatch)
 
 				// We might be able to match backwards.
@@ -304,13 +303,13 @@
 		minNonLiteralBlockSize = 1 + 1 + inputMargin
 	)
 	if debugEncoder {
-		if len(src) > maxBlockSize {
+		if len(src) > maxCompressedBlockSize {
 			panic("src too big")
 		}
 	}
 
 	// Protect against e.cur wraparound.
-	if e.cur >= bufferReset {
+	if e.cur >= e.bufferReset {
 		for i := range e.table[:] {
 			e.table[i] = tableEntry{}
 		}
@@ -538,7 +537,7 @@
 		println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
 	}
 	// We do not store history, so we must offset e.cur to avoid false matches for next user.
-	if e.cur < bufferReset {
+	if e.cur < e.bufferReset {
 		e.cur += int32(len(src))
 	}
 }
@@ -555,11 +554,9 @@
 		return
 	}
 	// Protect against e.cur wraparound.
-	for e.cur >= bufferReset {
+	for e.cur >= e.bufferReset-int32(len(e.hist)) {
 		if len(e.hist) == 0 {
-			for i := range e.table[:] {
-				e.table[i] = tableEntry{}
-			}
+			e.table = [tableSize]tableEntry{}
 			e.cur = e.maxMatchOff
 			break
 		}
@@ -647,8 +644,7 @@
 			if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
 				// Consider history as well.
 				var seq seq
-				var length int32
-				length = 4 + e.matchlen(s+6, repIndex+4, src)
+				length := 4 + e.matchlen(s+6, repIndex+4, src)
 
 				seq.matchLen = uint32(length - zstdMinMatch)
 
@@ -833,13 +829,12 @@
 		}
 		if true {
 			end := e.maxMatchOff + int32(len(d.content)) - 8
-			for i := e.maxMatchOff; i < end; i += 3 {
+			for i := e.maxMatchOff; i < end; i += 2 {
 				const hashLog = tableBits
 
 				cv := load6432(d.content, i-e.maxMatchOff)
-				nextHash := hashLen(cv, hashLog, tableFastHashLen)      // 0 -> 5
-				nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen)  // 1 -> 6
-				nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7
+				nextHash := hashLen(cv, hashLog, tableFastHashLen)     // 0 -> 6
+				nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7
 				e.dictTable[nextHash] = tableEntry{
 					val:    uint32(cv),
 					offset: i,
@@ -848,10 +843,6 @@
 					val:    uint32(cv >> 8),
 					offset: i + 1,
 				}
-				e.dictTable[nextHash2] = tableEntry{
-					val:    uint32(cv >> 16),
-					offset: i + 2,
-				}
 			}
 		}
 		e.lastDictID = d.id
@@ -871,7 +862,8 @@
 	const shardCnt = tableShardCnt
 	const shardSize = tableShardSize
 	if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
-		copy(e.table[:], e.dictTable)
+		//copy(e.table[:], e.dictTable)
+		e.table = *(*[tableSize]tableEntry)(e.dictTable)
 		for i := range e.tableShardDirty {
 			e.tableShardDirty[i] = false
 		}
@@ -883,7 +875,8 @@
 			continue
 		}
 
-		copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+		//copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+		*(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:])
 		e.tableShardDirty[i] = false
 	}
 	e.allDirty = false
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index 7aaaedb..8f8223c 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -6,8 +6,10 @@
 
 import (
 	"crypto/rand"
+	"errors"
 	"fmt"
 	"io"
+	"math"
 	rdebug "runtime/debug"
 	"sync"
 
@@ -148,6 +150,9 @@
 // and write CRC if requested.
 func (e *Encoder) Write(p []byte) (n int, err error) {
 	s := &e.state
+	if s.eofWritten {
+		return 0, ErrEncoderClosed
+	}
 	for len(p) > 0 {
 		if len(p)+len(s.filling) < e.o.blockSize {
 			if e.o.crc {
@@ -201,7 +206,7 @@
 			return nil
 		}
 		if final && len(s.filling) > 0 {
-			s.current = e.EncodeAll(s.filling, s.current[:0])
+			s.current = e.encodeAll(s.encoder, s.filling, s.current[:0])
 			var n2 int
 			n2, s.err = s.w.Write(s.current)
 			if s.err != nil {
@@ -226,10 +231,7 @@
 			DictID:        e.o.dict.ID(),
 		}
 
-		dst, err := fh.appendTo(tmp[:0])
-		if err != nil {
-			return err
-		}
+		dst := fh.appendTo(tmp[:0])
 		s.headerWritten = true
 		s.wWg.Wait()
 		var n2 int
@@ -276,23 +278,9 @@
 			s.eofWritten = true
 		}
 
-		err := errIncompressible
-		// If we got the exact same number of literals as input,
-		// assume the literals cannot be compressed.
-		if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
-			err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
-		}
-		switch err {
-		case errIncompressible:
-			if debugEncoder {
-				println("Storing incompressible block as raw")
-			}
-			blk.encodeRaw(src)
-			// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
-		case nil:
-		default:
-			s.err = err
-			return err
+		s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+		if s.err != nil {
+			return s.err
 		}
 		_, s.err = s.w.Write(blk.output)
 		s.nWritten += int64(len(blk.output))
@@ -304,6 +292,9 @@
 	s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
 	s.nInput += int64(len(s.current))
 	s.wg.Add(1)
+	if final {
+		s.eofWritten = true
+	}
 	go func(src []byte) {
 		if debugEncoder {
 			println("Adding block,", len(src), "bytes, final:", final)
@@ -319,9 +310,6 @@
 		blk := enc.Block()
 		enc.Encode(blk, src)
 		blk.last = final
-		if final {
-			s.eofWritten = true
-		}
 		// Wait for pending writes.
 		s.wWg.Wait()
 		if s.writeErr != nil {
@@ -342,22 +330,8 @@
 				}
 				s.wWg.Done()
 			}()
-			err := errIncompressible
-			// If we got the exact same number of literals as input,
-			// assume the literals cannot be compressed.
-			if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
-				err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
-			}
-			switch err {
-			case errIncompressible:
-				if debugEncoder {
-					println("Storing incompressible block as raw")
-				}
-				blk.encodeRaw(src)
-				// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
-			case nil:
-			default:
-				s.writeErr = err
+			s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+			if s.writeErr != nil {
 				return
 			}
 			_, s.writeErr = s.w.Write(blk.output)
@@ -431,12 +405,20 @@
 	if len(s.filling) > 0 {
 		err := e.nextBlock(false)
 		if err != nil {
+			// Ignore Flush after Close.
+			if errors.Is(s.err, ErrEncoderClosed) {
+				return nil
+			}
 			return err
 		}
 	}
 	s.wg.Wait()
 	s.wWg.Wait()
 	if s.err != nil {
+		// Ignore Flush after Close.
+		if errors.Is(s.err, ErrEncoderClosed) {
+			return nil
+		}
 		return s.err
 	}
 	return s.writeErr
@@ -452,6 +434,9 @@
 	}
 	err := e.nextBlock(true)
 	if err != nil {
+		if errors.Is(s.err, ErrEncoderClosed) {
+			return nil
+		}
 		return err
 	}
 	if s.frameContentSize > 0 {
@@ -489,6 +474,11 @@
 		}
 		_, s.err = s.w.Write(frame)
 	}
+	if s.err == nil {
+		s.err = ErrEncoderClosed
+		return nil
+	}
+
 	return s.err
 }
 
@@ -499,6 +489,15 @@
 // Data compressed with EncodeAll can be decoded with the Decoder,
 // using either a stream or DecodeAll.
 func (e *Encoder) EncodeAll(src, dst []byte) []byte {
+	e.init.Do(e.initialize)
+	enc := <-e.encoders
+	defer func() {
+		e.encoders <- enc
+	}()
+	return e.encodeAll(enc, src, dst)
+}
+
+func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte {
 	if len(src) == 0 {
 		if e.o.fullZero {
 			// Add frame header.
@@ -510,7 +509,7 @@
 				Checksum: false,
 				DictID:   0,
 			}
-			dst, _ = fh.appendTo(dst)
+			dst = fh.appendTo(dst)
 
 			// Write raw block as last one only.
 			var blk blockHeader
@@ -521,13 +520,7 @@
 		}
 		return dst
 	}
-	e.init.Do(e.initialize)
-	enc := <-e.encoders
-	defer func() {
-		// Release encoder reference to last block.
-		// If a non-single block is needed the encoder will reset again.
-		e.encoders <- enc
-	}()
+
 	// Use single segments when above minimum window and below window size.
 	single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
 	if e.o.single != nil {
@@ -545,10 +538,7 @@
 	if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem {
 		dst = make([]byte, 0, len(src))
 	}
-	dst, err := fh.appendTo(dst)
-	if err != nil {
-		panic(err)
-	}
+	dst = fh.appendTo(dst)
 
 	// If we can do everything in one block, prefer that.
 	if len(src) <= e.o.blockSize {
@@ -567,25 +557,15 @@
 
 		// If we got the exact same number of literals as input,
 		// assume the literals cannot be compressed.
-		err := errIncompressible
 		oldout := blk.output
-		if len(blk.literals) != len(src) || len(src) != e.o.blockSize {
-			// Output directly to dst
-			blk.output = dst
-			err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
-		}
+		// Output directly to dst
+		blk.output = dst
 
-		switch err {
-		case errIncompressible:
-			if debugEncoder {
-				println("Storing incompressible block as raw")
-			}
-			dst = blk.encodeRawTo(dst, src)
-		case nil:
-			dst = blk.output
-		default:
+		err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+		if err != nil {
 			panic(err)
 		}
+		dst = blk.output
 		blk.output = oldout
 	} else {
 		enc.Reset(e.o.dict, false)
@@ -604,25 +584,11 @@
 			if len(src) == 0 {
 				blk.last = true
 			}
-			err := errIncompressible
-			// If we got the exact same number of literals as input,
-			// assume the literals cannot be compressed.
-			if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
-				err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
-			}
-
-			switch err {
-			case errIncompressible:
-				if debugEncoder {
-					println("Storing incompressible block as raw")
-				}
-				dst = blk.encodeRawTo(dst, todo)
-				blk.popOffsets()
-			case nil:
-				dst = append(dst, blk.output...)
-			default:
+			err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
+			if err != nil {
 				panic(err)
 			}
+			dst = append(dst, blk.output...)
 			blk.reset(nil)
 		}
 	}
@@ -632,6 +598,7 @@
 	// Add padding with content from crypto/rand.Reader
 	if e.o.pad > 0 {
 		add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad))
+		var err error
 		dst, err = skippableFrame(dst, add, rand.Reader)
 		if err != nil {
 			panic(err)
@@ -639,3 +606,37 @@
 	}
 	return dst
 }
+
+// MaxEncodedSize returns the expected maximum
+// size of an encoded block or stream.
+func (e *Encoder) MaxEncodedSize(size int) int {
+	frameHeader := 4 + 2 // magic + frame header & window descriptor
+	if e.o.dict != nil {
+		frameHeader += 4
+	}
+	// Frame content size:
+	if size < 256 {
+		frameHeader++
+	} else if size < 65536+256 {
+		frameHeader += 2
+	} else if size < math.MaxInt32 {
+		frameHeader += 4
+	} else {
+		frameHeader += 8
+	}
+	// Final crc
+	if e.o.crc {
+		frameHeader += 4
+	}
+
+	// Max overhead is 3 bytes/block.
+	// There cannot be 0 blocks.
+	blocks := (size + e.o.blockSize) / e.o.blockSize
+
+	// Combine, add padding.
+	maxSz := frameHeader + 3*blocks + size
+	if e.o.pad > 1 {
+		maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad))
+	}
+	return maxSz
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
index a7c5e1a..20671dc 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -3,6 +3,8 @@
 import (
 	"errors"
 	"fmt"
+	"math"
+	"math/bits"
 	"runtime"
 	"strings"
 )
@@ -37,7 +39,7 @@
 		blockSize:     maxCompressedBlockSize,
 		windowSize:    8 << 20,
 		level:         SpeedDefault,
-		allLitEntropy: true,
+		allLitEntropy: false,
 		lowMem:        false,
 	}
 }
@@ -47,22 +49,22 @@
 	switch o.level {
 	case SpeedFastest:
 		if o.dict != nil {
-			return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+			return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
 		}
-		return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+		return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
 
 	case SpeedDefault:
 		if o.dict != nil {
-			return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}}
+			return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}}
 		}
-		return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+		return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
 	case SpeedBetterCompression:
 		if o.dict != nil {
-			return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+			return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
 		}
-		return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+		return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
 	case SpeedBestCompression:
-		return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+		return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
 	}
 	panic("unknown compression level")
 }
@@ -92,7 +94,7 @@
 // The value must be a power of two between MinWindowSize and MaxWindowSize.
 // A larger value will enable better compression but allocate more memory and,
 // for above-default values, take considerably longer.
-// The default value is determined by the compression level.
+// The default value is determined by the compression level and max 8MB.
 func WithWindowSize(n int) EOption {
 	return func(o *encoderOptions) error {
 		switch {
@@ -127,7 +129,7 @@
 		}
 		// No need to waste our time.
 		if n == 1 {
-			o.pad = 0
+			n = 0
 		}
 		if n > 1<<30 {
 			return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ")
@@ -230,13 +232,13 @@
 			case SpeedDefault:
 				o.windowSize = 8 << 20
 			case SpeedBetterCompression:
-				o.windowSize = 16 << 20
+				o.windowSize = 8 << 20
 			case SpeedBestCompression:
-				o.windowSize = 32 << 20
+				o.windowSize = 8 << 20
 			}
 		}
 		if !o.customALEntropy {
-			o.allLitEntropy = l > SpeedFastest
+			o.allLitEntropy = l > SpeedDefault
 		}
 
 		return nil
@@ -304,7 +306,13 @@
 }
 
 // WithEncoderDict allows to register a dictionary that will be used for the encode.
+//
+// The slice dict must be in the [dictionary format] produced by
+// "zstd --train" from the Zstandard reference implementation.
+//
 // The encoder *may* choose to use no dictionary instead for certain payloads.
+//
+// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
 func WithEncoderDict(dict []byte) EOption {
 	return func(o *encoderOptions) error {
 		d, err := loadDict(dict)
@@ -315,3 +323,17 @@
 		return nil
 	}
 }
+
+// WithEncoderDictRaw registers a dictionary that may be used by the encoder.
+//
+// The slice content may contain arbitrary data. It will be used as an initial
+// history.
+func WithEncoderDictRaw(id uint32, content []byte) EOption {
+	return func(o *encoderOptions) error {
+		if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
+			return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
+		}
+		o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}
+		return nil
+	}
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 9568a4b..e47af66 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -5,7 +5,7 @@
 package zstd
 
 import (
-	"bytes"
+	"encoding/binary"
 	"encoding/hex"
 	"errors"
 	"io"
@@ -29,7 +29,7 @@
 
 	FrameContentSize uint64
 
-	DictionaryID  *uint32
+	DictionaryID  uint32
 	HasCheckSum   bool
 	SingleSegment bool
 }
@@ -43,9 +43,9 @@
 	MaxWindowSize = 1 << 29
 )
 
-var (
-	frameMagic          = []byte{0x28, 0xb5, 0x2f, 0xfd}
-	skippableFrameMagic = []byte{0x2a, 0x4d, 0x18}
+const (
+	frameMagic          = "\x28\xb5\x2f\xfd"
+	skippableFrameMagic = "\x2a\x4d\x18"
 )
 
 func newFrameDec(o decoderOptions) *frameDec {
@@ -73,25 +73,25 @@
 		switch err {
 		case io.EOF, io.ErrUnexpectedEOF:
 			return io.EOF
-		default:
-			return err
 		case nil:
 			signature[0] = b[0]
+		default:
+			return err
 		}
 		// Read the rest, don't allow io.ErrUnexpectedEOF
 		b, err = br.readSmall(3)
 		switch err {
 		case io.EOF:
 			return io.EOF
-		default:
-			return err
 		case nil:
 			copy(signature[1:], b)
+		default:
+			return err
 		}
 
-		if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 {
+		if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 {
 			if debugDecoder {
-				println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic))
+				println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic)))
 			}
 			// Break if not skippable frame.
 			break
@@ -114,9 +114,9 @@
 			return err
 		}
 	}
-	if !bytes.Equal(signature[:], frameMagic) {
+	if string(signature[:]) != frameMagic {
 		if debugDecoder {
-			println("Got magic numbers: ", signature, "want:", frameMagic)
+			println("Got magic numbers: ", signature, "want:", []byte(frameMagic))
 		}
 		return ErrMagicMismatch
 	}
@@ -146,7 +146,9 @@
 			}
 			return err
 		}
-		printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
+		if debugDecoder {
+			printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
+		}
 		windowLog := 10 + (wd >> 3)
 		windowBase := uint64(1) << windowLog
 		windowAdd := (windowBase / 8) * uint64(wd&0x7)
@@ -155,7 +157,7 @@
 
 	// Read Dictionary_ID
 	// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
-	d.DictionaryID = nil
+	d.DictionaryID = 0
 	if size := fhd & 3; size != 0 {
 		if size == 3 {
 			size = 4
@@ -167,7 +169,7 @@
 			return err
 		}
 		var id uint32
-		switch size {
+		switch len(b) {
 		case 1:
 			id = uint32(b[0])
 		case 2:
@@ -178,11 +180,7 @@
 		if debugDecoder {
 			println("Dict size", size, "ID:", id)
 		}
-		if id > 0 {
-			// ID 0 means "sorry, no dictionary anyway".
-			// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
-			d.DictionaryID = &id
-		}
+		d.DictionaryID = id
 	}
 
 	// Read Frame_Content_Size
@@ -204,7 +202,7 @@
 			println("Reading Frame content", err)
 			return err
 		}
-		switch fcsSize {
+		switch len(b) {
 		case 1:
 			d.FrameContentSize = uint64(b[0])
 		case 2:
@@ -261,11 +259,16 @@
 	}
 	d.history.windowSize = int(d.WindowSize)
 	if !d.o.lowMem || d.history.windowSize < maxBlockSize {
-		// Alloc 2x window size if not low-mem, or very small window size.
+		// Alloc 2x window size if not low-mem, or window size below 2MB.
 		d.history.allocFrameBuffer = d.history.windowSize * 2
 	} else {
-		// Alloc with one additional block
-		d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
+		if d.o.lowMem {
+			// Alloc with 1MB extra.
+			d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2
+		} else {
+			// Alloc with 2MB extra.
+			d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
+		}
 	}
 
 	if debugDecoder {
@@ -292,58 +295,41 @@
 	return nil
 }
 
-// checkCRC will check the checksum if the frame has one.
+// checkCRC will check the checksum, assuming the frame has one.
 // Will return ErrCRCMismatch if crc check failed, otherwise nil.
 func (d *frameDec) checkCRC() error {
-	if !d.HasCheckSum {
-		return nil
-	}
-
 	// We can overwrite upper tmp now
-	want, err := d.rawInput.readSmall(4)
+	buf, err := d.rawInput.readSmall(4)
 	if err != nil {
 		println("CRC missing?", err)
 		return err
 	}
 
-	if d.o.ignoreChecksum {
-		return nil
-	}
+	want := binary.LittleEndian.Uint32(buf[:4])
+	got := uint32(d.crc.Sum64())
 
-	var tmp [4]byte
-	got := d.crc.Sum64()
-	// Flip to match file order.
-	tmp[0] = byte(got >> 0)
-	tmp[1] = byte(got >> 8)
-	tmp[2] = byte(got >> 16)
-	tmp[3] = byte(got >> 24)
-
-	if !bytes.Equal(tmp[:], want) {
+	if got != want {
 		if debugDecoder {
-			println("CRC Check Failed:", tmp[:], "!=", want)
+			printf("CRC check failed: got %08x, want %08x\n", got, want)
 		}
 		return ErrCRCMismatch
 	}
 	if debugDecoder {
-		println("CRC ok", tmp[:])
+		printf("CRC ok %08x\n", got)
 	}
 	return nil
 }
 
-// consumeCRC reads the checksum data if the frame has one.
+// consumeCRC skips over the checksum, assuming the frame has one.
 func (d *frameDec) consumeCRC() error {
-	if d.HasCheckSum {
-		_, err := d.rawInput.readSmall(4)
-		if err != nil {
-			println("CRC missing?", err)
-			return err
-		}
+	_, err := d.rawInput.readSmall(4)
+	if err != nil {
+		println("CRC missing?", err)
 	}
-
-	return nil
+	return err
 }
 
-// runDecoder will create a sync decoder that will decode a block of data.
+// runDecoder will run the decoder for the remainder of the frame.
 func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
 	saved := d.history.b
 
@@ -353,12 +339,23 @@
 	// Store input length, so we only check new data.
 	crcStart := len(dst)
 	d.history.decoders.maxSyncLen = 0
+	if d.o.limitToCap {
+		d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst))
+	}
 	if d.FrameContentSize != fcsUnknown {
-		d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
+		if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen {
+			d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
+		}
 		if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
+			if debugDecoder {
+				println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize)
+			}
 			return dst, ErrDecoderSizeExceeded
 		}
-		if uint64(cap(dst)) < d.history.decoders.maxSyncLen {
+		if debugDecoder {
+			println("maxSyncLen:", d.history.decoders.maxSyncLen)
+		}
+		if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen {
 			// Alloc for output
 			dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
 			copy(dst2, dst)
@@ -378,7 +375,13 @@
 		if err != nil {
 			break
 		}
-		if uint64(len(d.history.b)) > d.o.maxDecodedSize {
+		if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize {
+			println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize)
+			err = ErrDecoderSizeExceeded
+			break
+		}
+		if d.o.limitToCap && len(d.history.b) > cap(dst) {
+			println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst))
 			err = ErrDecoderSizeExceeded
 			break
 		}
@@ -402,15 +405,8 @@
 			if d.o.ignoreChecksum {
 				err = d.consumeCRC()
 			} else {
-				var n int
-				n, err = d.crc.Write(dst[crcStart:])
-				if err == nil {
-					if n != len(dst)-crcStart {
-						err = io.ErrShortWrite
-					} else {
-						err = d.checkCRC()
-					}
-				}
+				d.crc.Write(dst[crcStart:])
+				err = d.checkCRC()
 			}
 		}
 	}
diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go
index 4ef7f5a..667ca06 100644
--- a/vendor/github.com/klauspost/compress/zstd/frameenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go
@@ -22,7 +22,7 @@
 
 const maxHeaderSize = 14
 
-func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
+func (f frameHeader) appendTo(dst []byte) []byte {
 	dst = append(dst, frameMagic...)
 	var fhd uint8
 	if f.Checksum {
@@ -76,7 +76,7 @@
 		if f.SingleSegment {
 			dst = append(dst, uint8(f.ContentSize))
 		}
-		// Unless SingleSegment is set, framessizes < 256 are nto stored.
+		// Unless SingleSegment is set, framessizes < 256 are not stored.
 	case 1:
 		f.ContentSize -= 256
 		dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8))
@@ -88,7 +88,7 @@
 	default:
 		panic("invalid fcs")
 	}
-	return dst, nil
+	return dst
 }
 
 const skippableFrameHeader = 4 + 4
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
index c881d28..d04a829 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
@@ -21,7 +21,8 @@
 
 // buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable.
 // Function returns non-zero exit code on error.
-// go:noescape
+//
+//go:noescape
 func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
 
 // please keep in sync with _generate/gen_fse.go
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
index da32b44..bcde398 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
@@ -1,7 +1,6 @@
 // Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT.
 
 //go:build !appengine && !noasm && gc && !noasm
-// +build !appengine,!noasm,gc,!noasm
 
 // func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
 TEXT ·buildDtable_asm(SB), $0-24
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
index 332e51f..8adfebb 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
@@ -20,10 +20,9 @@
 			if v == -1 {
 				s.dt[highThreshold].setAddBits(uint8(i))
 				highThreshold--
-				symbolNext[i] = 1
-			} else {
-				symbolNext[i] = uint16(v)
+				v = 1
 			}
+			symbolNext[i] = uint16(v)
 		}
 	}
 
@@ -35,10 +34,12 @@
 		for ss, v := range s.norm[:s.symbolLen] {
 			for i := 0; i < int(v); i++ {
 				s.dt[position].setAddBits(uint8(ss))
-				position = (position + step) & tableMask
-				for position > highThreshold {
+				for {
 					// lowprob area
 					position = (position + step) & tableMask
+					if position <= highThreshold {
+						break
+					}
 				}
 			}
 		}
diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go
index 28b4015..0916485 100644
--- a/vendor/github.com/klauspost/compress/zstd/history.go
+++ b/vendor/github.com/klauspost/compress/zstd/history.go
@@ -37,26 +37,23 @@
 	h.ignoreBuffer = 0
 	h.error = false
 	h.recentOffsets = [3]int{1, 4, 8}
-	if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
-		fseDecoderPool.Put(f)
-	}
-	if f := h.decoders.offsets.fse; f != nil && !f.preDefined {
-		fseDecoderPool.Put(f)
-	}
-	if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
-		fseDecoderPool.Put(f)
-	}
+	h.decoders.freeDecoders()
 	h.decoders = sequenceDecs{br: h.decoders.br}
-	if h.huffTree != nil {
-		if h.dict == nil || h.dict.litEnc != h.huffTree {
-			huffDecoderPool.Put(h.huffTree)
-		}
-	}
+	h.freeHuffDecoder()
 	h.huffTree = nil
 	h.dict = nil
 	//printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
 }
 
+func (h *history) freeHuffDecoder() {
+	if h.huffTree != nil {
+		if h.dict == nil || h.dict.litEnc != h.huffTree {
+			huffDecoderPool.Put(h.huffTree)
+			h.huffTree = nil
+		}
+	}
+}
+
 func (h *history) setDict(dict *dict) {
 	if dict == nil {
 		return
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
index 69aa3bb..777290d 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
@@ -2,12 +2,7 @@
 
 VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package.
 
-
-[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
-[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
-
-xxhash is a Go implementation of the 64-bit
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
 high-quality hashing algorithm that is much faster than anything in the Go
 standard library.
 
@@ -28,31 +23,49 @@
 func (*Digest) Sum64() uint64
 ```
 
-This implementation provides a fast pure-Go implementation and an even faster
-assembly implementation for amd64.
+The package is written with optimized pure Go and also contains even faster
+assembly implementations for amd64 and arm64. If desired, the `purego` build tag
+opts into using the Go code even on those architectures.
+
+[xxHash]: http://cyan4973.github.io/xxHash/
+
+## Compatibility
+
+This package is in a module and the latest code is in version 2 of the module.
+You need a version of Go with at least "minimal module compatibility" to use
+github.com/cespare/xxhash/v2:
+
+* 1.9.7+ for Go 1.9
+* 1.10.3+ for Go 1.10
+* Go 1.11 or later
+
+I recommend using the latest release of Go.
 
 ## Benchmarks
 
 Here are some quick benchmarks comparing the pure-Go and assembly
 implementations of Sum64.
 
-| input size | purego | asm |
-| --- | --- | --- |
-| 5 B   |  979.66 MB/s |  1291.17 MB/s  |
-| 100 B | 7475.26 MB/s | 7973.40 MB/s  |
-| 4 KB  | 17573.46 MB/s | 17602.65 MB/s |
-| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+| input size | purego    | asm       |
+| ---------- | --------- | --------- |
+| 4 B        |  1.3 GB/s |  1.2 GB/s |
+| 16 B       |  2.9 GB/s |  3.5 GB/s |
+| 100 B      |  6.9 GB/s |  8.1 GB/s |
+| 4 KB       | 11.7 GB/s | 16.7 GB/s |
+| 10 MB      | 12.0 GB/s | 17.3 GB/s |
 
-These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
-the following commands under Go 1.11.2:
+These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
+CPU using the following commands under Go 1.19.2:
 
 ```
-$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
-$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
+benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
 ```
 
 ## Projects using this package
 
 - [InfluxDB](https://github.com/influxdata/influxdb)
 - [Prometheus](https://github.com/prometheus/prometheus)
+- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
 - [FreeCache](https://github.com/coocood/freecache)
+- [FastCache](https://github.com/VictoriaMetrics/fastcache)
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
index 2c112a0..fc40c82 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
@@ -18,19 +18,11 @@
 	prime5 uint64 = 2870177450012600261
 )
 
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
-// possible in the Go code is worth a small (but measurable) performance boost
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
-// convenience in the Go code in a few places where we need to intentionally
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
-// result overflows a uint64).
-var (
-	prime1v = prime1
-	prime2v = prime2
-	prime3v = prime3
-	prime4v = prime4
-	prime5v = prime5
-)
+// Store the primes in an array as well.
+//
+// The consts are used when possible in Go code to avoid MOVs but we need a
+// contiguous array of the assembly code.
+var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
 
 // Digest implements hash.Hash64.
 type Digest struct {
@@ -52,10 +44,10 @@
 
 // Reset clears the Digest's state so that it can be reused.
 func (d *Digest) Reset() {
-	d.v1 = prime1v + prime2
+	d.v1 = primes[0] + prime2
 	d.v2 = prime2
 	d.v3 = 0
-	d.v4 = -prime1v
+	d.v4 = -primes[0]
 	d.total = 0
 	d.n = 0
 }
@@ -71,21 +63,23 @@
 	n = len(b)
 	d.total += uint64(n)
 
+	memleft := d.mem[d.n&(len(d.mem)-1):]
+
 	if d.n+n < 32 {
 		// This new data doesn't even fill the current block.
-		copy(d.mem[d.n:], b)
+		copy(memleft, b)
 		d.n += n
 		return
 	}
 
 	if d.n > 0 {
 		// Finish off the partial block.
-		copy(d.mem[d.n:], b)
+		c := copy(memleft, b)
 		d.v1 = round(d.v1, u64(d.mem[0:8]))
 		d.v2 = round(d.v2, u64(d.mem[8:16]))
 		d.v3 = round(d.v3, u64(d.mem[16:24]))
 		d.v4 = round(d.v4, u64(d.mem[24:32]))
-		b = b[32-d.n:]
+		b = b[c:]
 		d.n = 0
 	}
 
@@ -135,21 +129,20 @@
 
 	h += d.total
 
-	i, end := 0, d.n
-	for ; i+8 <= end; i += 8 {
-		k1 := round(0, u64(d.mem[i:i+8]))
+	b := d.mem[:d.n&(len(d.mem)-1)]
+	for ; len(b) >= 8; b = b[8:] {
+		k1 := round(0, u64(b[:8]))
 		h ^= k1
 		h = rol27(h)*prime1 + prime4
 	}
-	if i+4 <= end {
-		h ^= uint64(u32(d.mem[i:i+4])) * prime1
+	if len(b) >= 4 {
+		h ^= uint64(u32(b[:4])) * prime1
 		h = rol23(h)*prime2 + prime3
-		i += 4
+		b = b[4:]
 	}
-	for i < end {
-		h ^= uint64(d.mem[i]) * prime5
+	for ; len(b) > 0; b = b[1:] {
+		h ^= uint64(b[0]) * prime5
 		h = rol11(h) * prime1
-		i++
 	}
 
 	h ^= h >> 33
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
index cea1785..ddb63aa 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
@@ -1,3 +1,4 @@
+//go:build !appengine && gc && !purego && !noasm
 // +build !appengine
 // +build gc
 // +build !purego
@@ -5,212 +6,205 @@
 
 #include "textflag.h"
 
-// Register allocation:
-// AX	h
-// SI	pointer to advance through b
-// DX	n
-// BX	loop end
-// R8	v1, k1
-// R9	v2
-// R10	v3
-// R11	v4
-// R12	tmp
-// R13	prime1v
-// R14	prime2v
-// DI	prime4v
+// Registers:
+#define h      AX
+#define d      AX
+#define p      SI // pointer to advance through b
+#define n      DX
+#define end    BX // loop end
+#define v1     R8
+#define v2     R9
+#define v3     R10
+#define v4     R11
+#define x      R12
+#define prime1 R13
+#define prime2 R14
+#define prime4 DI
 
-// round reads from and advances the buffer pointer in SI.
-// It assumes that R13 has prime1v and R14 has prime2v.
-#define round(r) \
-	MOVQ  (SI), R12 \
-	ADDQ  $8, SI    \
-	IMULQ R14, R12  \
-	ADDQ  R12, r    \
-	ROLQ  $31, r    \
-	IMULQ R13, r
+#define round(acc, x) \
+	IMULQ prime2, x   \
+	ADDQ  x, acc      \
+	ROLQ  $31, acc    \
+	IMULQ prime1, acc
 
-// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
-#define mergeRound(acc, val) \
-	IMULQ R14, val \
-	ROLQ  $31, val \
-	IMULQ R13, val \
-	XORQ  val, acc \
-	IMULQ R13, acc \
-	ADDQ  DI, acc
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+	IMULQ prime2, x \
+	ROLQ  $31, x    \
+	IMULQ prime1, x
+
+// mergeRound applies a merge round on the two registers acc and x.
+// It assumes that prime1, prime2, and prime4 have been loaded.
+#define mergeRound(acc, x) \
+	round0(x)         \
+	XORQ  x, acc      \
+	IMULQ prime1, acc \
+	ADDQ  prime4, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that there is at least one block
+// to process.
+#define blockLoop() \
+loop:  \
+	MOVQ +0(p), x  \
+	round(v1, x)   \
+	MOVQ +8(p), x  \
+	round(v2, x)   \
+	MOVQ +16(p), x \
+	round(v3, x)   \
+	MOVQ +24(p), x \
+	round(v4, x)   \
+	ADDQ $32, p    \
+	CMPQ p, end    \
+	JLE  loop
 
 // func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOSPLIT, $0-32
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
 	// Load fixed primes.
-	MOVQ ·prime1v(SB), R13
-	MOVQ ·prime2v(SB), R14
-	MOVQ ·prime4v(SB), DI
+	MOVQ ·primes+0(SB), prime1
+	MOVQ ·primes+8(SB), prime2
+	MOVQ ·primes+24(SB), prime4
 
 	// Load slice.
-	MOVQ b_base+0(FP), SI
-	MOVQ b_len+8(FP), DX
-	LEAQ (SI)(DX*1), BX
+	MOVQ b_base+0(FP), p
+	MOVQ b_len+8(FP), n
+	LEAQ (p)(n*1), end
 
 	// The first loop limit will be len(b)-32.
-	SUBQ $32, BX
+	SUBQ $32, end
 
 	// Check whether we have at least one block.
-	CMPQ DX, $32
+	CMPQ n, $32
 	JLT  noBlocks
 
 	// Set up initial state (v1, v2, v3, v4).
-	MOVQ R13, R8
-	ADDQ R14, R8
-	MOVQ R14, R9
-	XORQ R10, R10
-	XORQ R11, R11
-	SUBQ R13, R11
+	MOVQ prime1, v1
+	ADDQ prime2, v1
+	MOVQ prime2, v2
+	XORQ v3, v3
+	XORQ v4, v4
+	SUBQ prime1, v4
 
-	// Loop until SI > BX.
-blockLoop:
-	round(R8)
-	round(R9)
-	round(R10)
-	round(R11)
+	blockLoop()
 
-	CMPQ SI, BX
-	JLE  blockLoop
+	MOVQ v1, h
+	ROLQ $1, h
+	MOVQ v2, x
+	ROLQ $7, x
+	ADDQ x, h
+	MOVQ v3, x
+	ROLQ $12, x
+	ADDQ x, h
+	MOVQ v4, x
+	ROLQ $18, x
+	ADDQ x, h
 
-	MOVQ R8, AX
-	ROLQ $1, AX
-	MOVQ R9, R12
-	ROLQ $7, R12
-	ADDQ R12, AX
-	MOVQ R10, R12
-	ROLQ $12, R12
-	ADDQ R12, AX
-	MOVQ R11, R12
-	ROLQ $18, R12
-	ADDQ R12, AX
-
-	mergeRound(AX, R8)
-	mergeRound(AX, R9)
-	mergeRound(AX, R10)
-	mergeRound(AX, R11)
+	mergeRound(h, v1)
+	mergeRound(h, v2)
+	mergeRound(h, v3)
+	mergeRound(h, v4)
 
 	JMP afterBlocks
 
 noBlocks:
-	MOVQ ·prime5v(SB), AX
+	MOVQ ·primes+32(SB), h
 
 afterBlocks:
-	ADDQ DX, AX
+	ADDQ n, h
 
-	// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
-	ADDQ $24, BX
+	ADDQ $24, end
+	CMPQ p, end
+	JG   try4
 
-	CMPQ SI, BX
-	JG   fourByte
+loop8:
+	MOVQ  (p), x
+	ADDQ  $8, p
+	round0(x)
+	XORQ  x, h
+	ROLQ  $27, h
+	IMULQ prime1, h
+	ADDQ  prime4, h
 
-wordLoop:
-	// Calculate k1.
-	MOVQ  (SI), R8
-	ADDQ  $8, SI
-	IMULQ R14, R8
-	ROLQ  $31, R8
-	IMULQ R13, R8
+	CMPQ p, end
+	JLE  loop8
 
-	XORQ  R8, AX
-	ROLQ  $27, AX
-	IMULQ R13, AX
-	ADDQ  DI, AX
+try4:
+	ADDQ $4, end
+	CMPQ p, end
+	JG   try1
 
-	CMPQ SI, BX
-	JLE  wordLoop
+	MOVL  (p), x
+	ADDQ  $4, p
+	IMULQ prime1, x
+	XORQ  x, h
 
-fourByte:
-	ADDQ $4, BX
-	CMPQ SI, BX
-	JG   singles
+	ROLQ  $23, h
+	IMULQ prime2, h
+	ADDQ  ·primes+16(SB), h
 
-	MOVL  (SI), R8
-	ADDQ  $4, SI
-	IMULQ R13, R8
-	XORQ  R8, AX
-
-	ROLQ  $23, AX
-	IMULQ R14, AX
-	ADDQ  ·prime3v(SB), AX
-
-singles:
-	ADDQ $4, BX
-	CMPQ SI, BX
+try1:
+	ADDQ $4, end
+	CMPQ p, end
 	JGE  finalize
 
-singlesLoop:
-	MOVBQZX (SI), R12
-	ADDQ    $1, SI
-	IMULQ   ·prime5v(SB), R12
-	XORQ    R12, AX
+loop1:
+	MOVBQZX (p), x
+	ADDQ    $1, p
+	IMULQ   ·primes+32(SB), x
+	XORQ    x, h
+	ROLQ    $11, h
+	IMULQ   prime1, h
 
-	ROLQ  $11, AX
-	IMULQ R13, AX
-
-	CMPQ SI, BX
-	JL   singlesLoop
+	CMPQ p, end
+	JL   loop1
 
 finalize:
-	MOVQ  AX, R12
-	SHRQ  $33, R12
-	XORQ  R12, AX
-	IMULQ R14, AX
-	MOVQ  AX, R12
-	SHRQ  $29, R12
-	XORQ  R12, AX
-	IMULQ ·prime3v(SB), AX
-	MOVQ  AX, R12
-	SHRQ  $32, R12
-	XORQ  R12, AX
+	MOVQ  h, x
+	SHRQ  $33, x
+	XORQ  x, h
+	IMULQ prime2, h
+	MOVQ  h, x
+	SHRQ  $29, x
+	XORQ  x, h
+	IMULQ ·primes+16(SB), h
+	MOVQ  h, x
+	SHRQ  $32, x
+	XORQ  x, h
 
-	MOVQ AX, ret+24(FP)
+	MOVQ h, ret+24(FP)
 	RET
 
-// writeBlocks uses the same registers as above except that it uses AX to store
-// the d pointer.
-
 // func writeBlocks(d *Digest, b []byte) int
-TEXT ·writeBlocks(SB), NOSPLIT, $0-40
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
 	// Load fixed primes needed for round.
-	MOVQ ·prime1v(SB), R13
-	MOVQ ·prime2v(SB), R14
+	MOVQ ·primes+0(SB), prime1
+	MOVQ ·primes+8(SB), prime2
 
 	// Load slice.
-	MOVQ b_base+8(FP), SI
-	MOVQ b_len+16(FP), DX
-	LEAQ (SI)(DX*1), BX
-	SUBQ $32, BX
+	MOVQ b_base+8(FP), p
+	MOVQ b_len+16(FP), n
+	LEAQ (p)(n*1), end
+	SUBQ $32, end
 
 	// Load vN from d.
-	MOVQ d+0(FP), AX
-	MOVQ 0(AX), R8   // v1
-	MOVQ 8(AX), R9   // v2
-	MOVQ 16(AX), R10 // v3
-	MOVQ 24(AX), R11 // v4
+	MOVQ s+0(FP), d
+	MOVQ 0(d), v1
+	MOVQ 8(d), v2
+	MOVQ 16(d), v3
+	MOVQ 24(d), v4
 
 	// We don't need to check the loop condition here; this function is
 	// always called with at least one block of data to process.
-blockLoop:
-	round(R8)
-	round(R9)
-	round(R10)
-	round(R11)
-
-	CMPQ SI, BX
-	JLE  blockLoop
+	blockLoop()
 
 	// Copy vN back to d.
-	MOVQ R8, 0(AX)
-	MOVQ R9, 8(AX)
-	MOVQ R10, 16(AX)
-	MOVQ R11, 24(AX)
+	MOVQ v1, 0(d)
+	MOVQ v2, 8(d)
+	MOVQ v3, 16(d)
+	MOVQ v4, 24(d)
 
-	// The number of bytes written is SI minus the old base pointer.
-	SUBQ b_base+8(FP), SI
-	MOVQ SI, ret+32(FP)
+	// The number of bytes written is p minus the old base pointer.
+	SUBQ b_base+8(FP), p
+	MOVQ p, ret+32(FP)
 
 	RET
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
index 4d64a17..ae7d4d3 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
@@ -1,13 +1,17 @@
-// +build gc,!purego,!noasm
+//go:build !appengine && gc && !purego && !noasm
+// +build !appengine
+// +build gc
+// +build !purego
+// +build !noasm
 
 #include "textflag.h"
 
-// Register allocation.
+// Registers:
 #define digest	R1
-#define h	R2 // Return value.
-#define p	R3 // Input pointer.
-#define len	R4
-#define nblocks	R5 // len / 32.
+#define h	R2 // return value
+#define p	R3 // input pointer
+#define n	R4 // input length
+#define nblocks	R5 // n / 32
 #define prime1	R7
 #define prime2	R8
 #define prime3	R9
@@ -25,60 +29,52 @@
 #define round(acc, x) \
 	MADD prime2, acc, x, acc \
 	ROR  $64-31, acc         \
-	MUL  prime1, acc         \
+	MUL  prime1, acc
 
-// x = round(0, x).
+// round0 performs the operation x = round(0, x).
 #define round0(x) \
 	MUL prime2, x \
 	ROR $64-31, x \
-	MUL prime1, x \
+	MUL prime1, x
 
-#define mergeRound(x) \
-	round0(x)                 \
-	EOR  x, h                 \
-	MADD h, prime4, prime1, h \
+#define mergeRound(acc, x) \
+	round0(x)                     \
+	EOR  x, acc                   \
+	MADD acc, prime4, prime1, acc
 
-// Update v[1-4] with 32-byte blocks. Assumes len >= 32.
-#define blocksLoop() \
-	LSR     $5, len, nblocks \
-	PCALIGN $16              \
-	loop:                    \
-	LDP.P   32(p), (x1, x2)  \
-	round(v1, x1)            \
-	LDP     -16(p), (x3, x4) \
-	round(v2, x2)            \
-	SUB     $1, nblocks      \
-	round(v3, x3)            \
-	round(v4, x4)            \
-	CBNZ    nblocks, loop    \
-
-// The primes are repeated here to ensure that they're stored
-// in a contiguous array, so we can load them with LDP.
-DATA primes<> +0(SB)/8, $11400714785074694791
-DATA primes<> +8(SB)/8, $14029467366897019727
-DATA primes<>+16(SB)/8, $1609587929392839161
-DATA primes<>+24(SB)/8, $9650029242287828579
-DATA primes<>+32(SB)/8, $2870177450012600261
-GLOBL primes<>(SB), NOPTR+RODATA, $40
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that n >= 32.
+#define blockLoop() \
+	LSR     $5, n, nblocks  \
+	PCALIGN $16             \
+	loop:                   \
+	LDP.P   16(p), (x1, x2) \
+	LDP.P   16(p), (x3, x4) \
+	round(v1, x1)           \
+	round(v2, x2)           \
+	round(v3, x3)           \
+	round(v4, x4)           \
+	SUB     $1, nblocks     \
+	CBNZ    nblocks, loop
 
 // func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
-	LDP b_base+0(FP), (p, len)
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+	LDP b_base+0(FP), (p, n)
 
-	LDP  primes<> +0(SB), (prime1, prime2)
-	LDP  primes<>+16(SB), (prime3, prime4)
-	MOVD primes<>+32(SB), prime5
+	LDP  ·primes+0(SB), (prime1, prime2)
+	LDP  ·primes+16(SB), (prime3, prime4)
+	MOVD ·primes+32(SB), prime5
 
-	CMP  $32, len
-	CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 }
-	BLO  afterLoop
+	CMP  $32, n
+	CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
+	BLT  afterLoop
 
 	ADD  prime1, prime2, v1
 	MOVD prime2, v2
 	MOVD $0, v3
 	NEG  prime1, v4
 
-	blocksLoop()
+	blockLoop()
 
 	ROR $64-1, v1, x1
 	ROR $64-7, v2, x2
@@ -88,71 +84,75 @@
 	ADD x3, x4
 	ADD x2, x4, h
 
-	mergeRound(v1)
-	mergeRound(v2)
-	mergeRound(v3)
-	mergeRound(v4)
+	mergeRound(h, v1)
+	mergeRound(h, v2)
+	mergeRound(h, v3)
+	mergeRound(h, v4)
 
 afterLoop:
-	ADD len, h
+	ADD n, h
 
-	TBZ   $4, len, try8
+	TBZ   $4, n, try8
 	LDP.P 16(p), (x1, x2)
 
 	round0(x1)
+
+	// NOTE: here and below, sequencing the EOR after the ROR (using a
+	// rotated register) is worth a small but measurable speedup for small
+	// inputs.
 	ROR  $64-27, h
 	EOR  x1 @> 64-27, h, h
 	MADD h, prime4, prime1, h
 
 	round0(x2)
 	ROR  $64-27, h
-	EOR  x2 @> 64-27, h
+	EOR  x2 @> 64-27, h, h
 	MADD h, prime4, prime1, h
 
 try8:
-	TBZ    $3, len, try4
+	TBZ    $3, n, try4
 	MOVD.P 8(p), x1
 
 	round0(x1)
 	ROR  $64-27, h
-	EOR  x1 @> 64-27, h
+	EOR  x1 @> 64-27, h, h
 	MADD h, prime4, prime1, h
 
 try4:
-	TBZ     $2, len, try2
+	TBZ     $2, n, try2
 	MOVWU.P 4(p), x2
 
 	MUL  prime1, x2
 	ROR  $64-23, h
-	EOR  x2 @> 64-23, h
+	EOR  x2 @> 64-23, h, h
 	MADD h, prime3, prime2, h
 
 try2:
-	TBZ     $1, len, try1
+	TBZ     $1, n, try1
 	MOVHU.P 2(p), x3
 	AND     $255, x3, x1
 	LSR     $8, x3, x2
 
 	MUL prime5, x1
 	ROR $64-11, h
-	EOR x1 @> 64-11, h
+	EOR x1 @> 64-11, h, h
 	MUL prime1, h
 
 	MUL prime5, x2
 	ROR $64-11, h
-	EOR x2 @> 64-11, h
+	EOR x2 @> 64-11, h, h
 	MUL prime1, h
 
 try1:
-	TBZ   $0, len, end
+	TBZ   $0, n, finalize
 	MOVBU (p), x4
 
 	MUL prime5, x4
 	ROR $64-11, h
-	EOR x4 @> 64-11, h
+	EOR x4 @> 64-11, h, h
 	MUL prime1, h
 
-end:
+finalize:
 	EOR h >> 33, h
 	MUL prime2, h
 	EOR h >> 29, h
@@ -162,25 +162,23 @@
 	MOVD h, ret+24(FP)
 	RET
 
-// func writeBlocks(d *Digest, b []byte) int
-//
-// Assumes len(b) >= 32.
-TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
-	LDP primes<>(SB), (prime1, prime2)
+// func writeBlocks(s *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+	LDP ·primes+0(SB), (prime1, prime2)
 
 	// Load state. Assume v[1-4] are stored contiguously.
-	MOVD d+0(FP), digest
+	MOVD s+0(FP), digest
 	LDP  0(digest), (v1, v2)
 	LDP  16(digest), (v3, v4)
 
-	LDP b_base+8(FP), (p, len)
+	LDP b_base+8(FP), (p, n)
 
-	blocksLoop()
+	blockLoop()
 
 	// Store updated state.
 	STP (v1, v2), 0(digest)
 	STP (v3, v4), 16(digest)
 
-	BIC  $31, len
-	MOVD len, ret+32(FP)
+	BIC  $31, n
+	MOVD n, ret+32(FP)
 	RET
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
index 1a1fac9..d4221ed 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
@@ -13,4 +13,4 @@
 func Sum64(b []byte) uint64
 
 //go:noescape
-func writeBlocks(d *Digest, b []byte) int
+func writeBlocks(s *Digest, b []byte) int
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
index 209cb4a..0be16ce 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
@@ -15,10 +15,10 @@
 	var h uint64
 
 	if n >= 32 {
-		v1 := prime1v + prime2
+		v1 := primes[0] + prime2
 		v2 := prime2
 		v3 := uint64(0)
-		v4 := -prime1v
+		v4 := -primes[0]
 		for len(b) >= 32 {
 			v1 = round(v1, u64(b[0:8:len(b)]))
 			v2 = round(v2, u64(b[8:16:len(b)]))
@@ -37,19 +37,18 @@
 
 	h += uint64(n)
 
-	i, end := 0, len(b)
-	for ; i+8 <= end; i += 8 {
-		k1 := round(0, u64(b[i:i+8:len(b)]))
+	for ; len(b) >= 8; b = b[8:] {
+		k1 := round(0, u64(b[:8]))
 		h ^= k1
 		h = rol27(h)*prime1 + prime4
 	}
-	if i+4 <= end {
-		h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
+	if len(b) >= 4 {
+		h ^= uint64(u32(b[:4])) * prime1
 		h = rol23(h)*prime2 + prime3
-		i += 4
+		b = b[4:]
 	}
-	for ; i < end; i++ {
-		h ^= uint64(b[i]) * prime5
+	for ; len(b) > 0; b = b[1:] {
+		h ^= uint64(b[0]) * prime5
 		h = rol11(h) * prime1
 	}
 
diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go
new file mode 100644
index 0000000..f41932b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go
@@ -0,0 +1,16 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package zstd
+
+// matchLen returns how many bytes match in a and b
+//
+// It assumes that:
+//
+//	len(a) <= len(b) and len(a) > 0
+//
+//go:noescape
+func matchLen(a []byte, b []byte) int
diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
new file mode 100644
index 0000000..0782b86
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
@@ -0,0 +1,66 @@
+// Copied from S2 implementation.
+
+//go:build !appengine && !noasm && gc && !noasm
+
+#include "textflag.h"
+
+// func matchLen(a []byte, b []byte) int
+TEXT ·matchLen(SB), NOSPLIT, $0-56
+	MOVQ a_base+0(FP), AX
+	MOVQ b_base+24(FP), CX
+	MOVQ a_len+8(FP), DX
+
+	// matchLen
+	XORL SI, SI
+	CMPL DX, $0x08
+	JB   matchlen_match4_standalone
+
+matchlen_loopback_standalone:
+	MOVQ (AX)(SI*1), BX
+	XORQ (CX)(SI*1), BX
+	JZ   matchlen_loop_standalone
+
+#ifdef GOAMD64_v3
+	TZCNTQ BX, BX
+#else
+	BSFQ BX, BX
+#endif
+	SHRL $0x03, BX
+	LEAL (SI)(BX*1), SI
+	JMP  gen_match_len_end
+
+matchlen_loop_standalone:
+	LEAL -8(DX), DX
+	LEAL 8(SI), SI
+	CMPL DX, $0x08
+	JAE  matchlen_loopback_standalone
+
+matchlen_match4_standalone:
+	CMPL DX, $0x04
+	JB   matchlen_match2_standalone
+	MOVL (AX)(SI*1), BX
+	CMPL (CX)(SI*1), BX
+	JNE  matchlen_match2_standalone
+	LEAL -4(DX), DX
+	LEAL 4(SI), SI
+
+matchlen_match2_standalone:
+	CMPL DX, $0x02
+	JB   matchlen_match1_standalone
+	MOVW (AX)(SI*1), BX
+	CMPW (CX)(SI*1), BX
+	JNE  matchlen_match1_standalone
+	LEAL -2(DX), DX
+	LEAL 2(SI), SI
+
+matchlen_match1_standalone:
+	CMPL DX, $0x01
+	JB   gen_match_len_end
+	MOVB (AX)(SI*1), BL
+	CMPB (CX)(SI*1), BL
+	JNE  gen_match_len_end
+	INCL SI
+
+gen_match_len_end:
+	MOVQ SI, ret+48(FP)
+	RET
diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
new file mode 100644
index 0000000..bea1779
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
@@ -0,0 +1,38 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package zstd
+
+import (
+	"math/bits"
+
+	"github.com/klauspost/compress/internal/le"
+)
+
+// matchLen returns the maximum common prefix length of a and b.
+// a must be the shortest of the two.
+func matchLen(a, b []byte) (n int) {
+	left := len(a)
+	for left >= 8 {
+		diff := le.Load64(a, n) ^ le.Load64(b, n)
+		if diff != 0 {
+			return n + bits.TrailingZeros64(diff)>>3
+		}
+		n += 8
+		left -= 8
+	}
+	a = a[n:]
+	b = b[n:]
+
+	for i := range a {
+		if a[i] != b[i] {
+			break
+		}
+		n++
+	}
+	return n
+
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
index df04472..9a7de82 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -99,6 +99,21 @@
 	return nil
 }
 
+func (s *sequenceDecs) freeDecoders() {
+	if f := s.litLengths.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+		s.litLengths.fse = nil
+	}
+	if f := s.offsets.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+		s.offsets.fse = nil
+	}
+	if f := s.matchLengths.fse; f != nil && !f.preDefined {
+		fseDecoderPool.Put(f)
+		s.matchLengths.fse = nil
+	}
+}
+
 // execute will execute the decoded sequence with the provided history.
 // The sequence must be evaluated before being sent.
 func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
@@ -221,13 +236,16 @@
 		maxBlockSize = s.windowSize
 	}
 
+	if debugDecoder {
+		println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream")
+	}
 	for i := seqs - 1; i >= 0; i-- {
 		if br.overread() {
-			printf("reading sequence %d, exceeded available data\n", seqs-i)
+			printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain())
 			return io.ErrUnexpectedEOF
 		}
 		var ll, mo, ml int
-		if br.off > 4+((maxOffsetBits+16+16)>>3) {
+		if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
 			// inlined function:
 			// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
 
@@ -299,7 +317,7 @@
 		}
 		size := ll + ml + len(out)
 		if size-startSize > maxBlockSize {
-			return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
+			return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 		}
 		if size > cap(out) {
 			// Not enough size, which can happen under high volume block streaming conditions
@@ -409,9 +427,8 @@
 		}
 	}
 
-	// Check if space for literals
-	if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
-		return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
+	if size := len(s.literals) + len(out) - startSize; size > maxBlockSize {
+		return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 	}
 
 	// Add final literals
@@ -435,18 +452,13 @@
 
 	// extra bits are stored in reverse order.
 	br.fill()
-	if s.maxBits <= 32 {
-		mo += br.getBits(moB)
-		ml += br.getBits(mlB)
-		ll += br.getBits(llB)
-	} else {
-		mo += br.getBits(moB)
+	mo += br.getBits(moB)
+	if s.maxBits > 32 {
 		br.fill()
-		// matchlength+literal length, max 32 bits
-		ml += br.getBits(mlB)
-		ll += br.getBits(llB)
-
 	}
+	// matchlength+literal length, max 32 bits
+	ml += br.getBits(mlB)
+	ll += br.getBits(llB)
 	mo = s.adjustOffset(mo, ll, moB)
 	return
 }
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
index 7598c10..c59f17e 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
@@ -5,6 +5,7 @@
 
 import (
 	"fmt"
+	"io"
 
 	"github.com/klauspost/compress/internal/cpuinfo"
 )
@@ -32,18 +33,22 @@
 // sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
 //
 // Please refer to seqdec_generic.go for the reference implementation.
+//
 //go:noescape
 func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 
 // sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
+//
 //go:noescape
 func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 
 // sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
+//
 //go:noescape
 func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 
 // sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
+//
 //go:noescape
 func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 
@@ -130,20 +135,23 @@
 		return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
 			ctx.ll, ctx.litRemain+ctx.ll)
 
+	case errorOverread:
+		return true, io.ErrUnexpectedEOF
+
 	case errorNotEnoughSpace:
 		size := ctx.outPosition + ctx.ll + ctx.ml
 		if debugDecoder {
 			println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
 		}
-		return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
+		return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 
 	default:
-		return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
+		return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode)
 	}
 
 	s.seqSize += ctx.litRemain
 	if s.seqSize > maxBlockSize {
-		return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+		return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 	}
 	err := br.close()
 	if err != nil {
@@ -198,23 +206,30 @@
 // error reported when capacity of `out` is too small
 const errorNotEnoughSpace = 5
 
+// error reported when bits are overread.
+const errorOverread = 6
+
 // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
 //
 // Please refer to seqdec_generic.go for the reference implementation.
+//
 //go:noescape
 func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 
 // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
 //
 // Please refer to seqdec_generic.go for the reference implementation.
+//
 //go:noescape
 func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 
 // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
 //go:noescape
 func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 
 // sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
 //go:noescape
 func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 
@@ -239,6 +254,10 @@
 		litRemain: len(s.literals),
 	}
 
+	if debugDecoder {
+		println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream")
+	}
+
 	s.seqSize = 0
 	lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
 	var errCode int
@@ -269,9 +288,11 @@
 		case errorNotEnoughLiterals:
 			ll := ctx.seqs[i].ll
 			return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
+		case errorOverread:
+			return io.ErrUnexpectedEOF
 		}
 
-		return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
+		return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode)
 	}
 
 	if ctx.litRemain < 0 {
@@ -281,7 +302,10 @@
 
 	s.seqSize += ctx.litRemain
 	if s.seqSize > maxBlockSize {
-		return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+		return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+	}
+	if debugDecoder {
+		println("decode: ", br.remain(), "bits remain on stream. code:", errCode)
 	}
 	err := br.close()
 	if err != nil {
@@ -308,10 +332,12 @@
 // Returns false if a match offset is too big.
 //
 // Please refer to seqdec_generic.go for the reference implementation.
+//
 //go:noescape
 func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
 
 // Same as above, but with safe memcopies
+//
 //go:noescape
 func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
 
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
index 27e7677..a708ca6 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
@@ -1,16 +1,15 @@
 // Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT.
 
 //go:build !appengine && !noasm && gc && !noasm
-// +build !appengine,!noasm,gc,!noasm
 
 // func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 // Requires: CMOV
 TEXT ·sequenceDecs_decode_amd64(SB), $8-32
-	MOVQ    br+8(FP), AX
-	MOVQ    32(AX), DX
-	MOVBQZX 40(AX), BX
-	MOVQ    24(AX), SI
-	MOVQ    (AX), AX
+	MOVQ    br+8(FP), CX
+	MOVQ    24(CX), DX
+	MOVBQZX 40(CX), BX
+	MOVQ    (CX), AX
+	MOVQ    32(CX), SI
 	ADDQ    SI, AX
 	MOVQ    AX, (SP)
 	MOVQ    ctx+16(FP), AX
@@ -39,7 +38,7 @@
 
 sequenceDecs_decode_amd64_fill_byte_by_byte:
 	CMPQ    SI, $0x00
-	JLE     sequenceDecs_decode_amd64_fill_end
+	JLE     sequenceDecs_decode_amd64_fill_check_overread
 	CMPQ    BX, $0x07
 	JLE     sequenceDecs_decode_amd64_fill_end
 	SHLQ    $0x08, DX
@@ -50,6 +49,10 @@
 	ORQ     AX, DX
 	JMP     sequenceDecs_decode_amd64_fill_byte_by_byte
 
+sequenceDecs_decode_amd64_fill_check_overread:
+	CMPQ BX, $0x40
+	JA   error_overread
+
 sequenceDecs_decode_amd64_fill_end:
 	// Update offset
 	MOVQ  R9, AX
@@ -106,7 +109,7 @@
 
 sequenceDecs_decode_amd64_fill_2_byte_by_byte:
 	CMPQ    SI, $0x00
-	JLE     sequenceDecs_decode_amd64_fill_2_end
+	JLE     sequenceDecs_decode_amd64_fill_2_check_overread
 	CMPQ    BX, $0x07
 	JLE     sequenceDecs_decode_amd64_fill_2_end
 	SHLQ    $0x08, DX
@@ -117,6 +120,10 @@
 	ORQ     AX, DX
 	JMP     sequenceDecs_decode_amd64_fill_2_byte_by_byte
 
+sequenceDecs_decode_amd64_fill_2_check_overread:
+	CMPQ BX, $0x40
+	JA   error_overread
+
 sequenceDecs_decode_amd64_fill_2_end:
 	// Update literal length
 	MOVQ  DI, AX
@@ -150,8 +157,7 @@
 
 	// Update Literal Length State
 	MOVBQZX DI, R14
-	SHRQ    $0x10, DI
-	MOVWQZX DI, DI
+	SHRL    $0x10, DI
 	LEAQ    (BX)(R14*1), CX
 	MOVQ    DX, R15
 	MOVQ    CX, BX
@@ -170,8 +176,7 @@
 
 	// Update Match Length State
 	MOVBQZX R8, R14
-	SHRQ    $0x10, R8
-	MOVWQZX R8, R8
+	SHRL    $0x10, R8
 	LEAQ    (BX)(R14*1), CX
 	MOVQ    DX, R15
 	MOVQ    CX, BX
@@ -190,8 +195,7 @@
 
 	// Update Offset State
 	MOVBQZX R9, R14
-	SHRQ    $0x10, R9
-	MOVWQZX R9, R9
+	SHRL    $0x10, R9
 	LEAQ    (BX)(R14*1), CX
 	MOVQ    DX, R15
 	MOVQ    CX, BX
@@ -294,9 +298,9 @@
 	MOVQ R12, 152(AX)
 	MOVQ R13, 160(AX)
 	MOVQ br+8(FP), AX
-	MOVQ DX, 32(AX)
+	MOVQ DX, 24(AX)
 	MOVB BL, 40(AX)
-	MOVQ SI, 24(AX)
+	MOVQ SI, 32(AX)
 
 	// Return success
 	MOVQ $0x00000000, ret+24(FP)
@@ -321,18 +325,19 @@
 	MOVQ $0x00000004, ret+24(FP)
 	RET
 
-	// Return with not enough output space error
-	MOVQ $0x00000005, ret+24(FP)
+	// Return with overread error
+error_overread:
+	MOVQ $0x00000006, ret+24(FP)
 	RET
 
 // func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 // Requires: CMOV
 TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
-	MOVQ    br+8(FP), AX
-	MOVQ    32(AX), DX
-	MOVBQZX 40(AX), BX
-	MOVQ    24(AX), SI
-	MOVQ    (AX), AX
+	MOVQ    br+8(FP), CX
+	MOVQ    24(CX), DX
+	MOVBQZX 40(CX), BX
+	MOVQ    (CX), AX
+	MOVQ    32(CX), SI
 	ADDQ    SI, AX
 	MOVQ    AX, (SP)
 	MOVQ    ctx+16(FP), AX
@@ -361,7 +366,7 @@
 
 sequenceDecs_decode_56_amd64_fill_byte_by_byte:
 	CMPQ    SI, $0x00
-	JLE     sequenceDecs_decode_56_amd64_fill_end
+	JLE     sequenceDecs_decode_56_amd64_fill_check_overread
 	CMPQ    BX, $0x07
 	JLE     sequenceDecs_decode_56_amd64_fill_end
 	SHLQ    $0x08, DX
@@ -372,6 +377,10 @@
 	ORQ     AX, DX
 	JMP     sequenceDecs_decode_56_amd64_fill_byte_by_byte
 
+sequenceDecs_decode_56_amd64_fill_check_overread:
+	CMPQ BX, $0x40
+	JA   error_overread
+
 sequenceDecs_decode_56_amd64_fill_end:
 	// Update offset
 	MOVQ  R9, AX
@@ -447,8 +456,7 @@
 
 	// Update Literal Length State
 	MOVBQZX DI, R14
-	SHRQ    $0x10, DI
-	MOVWQZX DI, DI
+	SHRL    $0x10, DI
 	LEAQ    (BX)(R14*1), CX
 	MOVQ    DX, R15
 	MOVQ    CX, BX
@@ -467,8 +475,7 @@
 
 	// Update Match Length State
 	MOVBQZX R8, R14
-	SHRQ    $0x10, R8
-	MOVWQZX R8, R8
+	SHRL    $0x10, R8
 	LEAQ    (BX)(R14*1), CX
 	MOVQ    DX, R15
 	MOVQ    CX, BX
@@ -487,8 +494,7 @@
 
 	// Update Offset State
 	MOVBQZX R9, R14
-	SHRQ    $0x10, R9
-	MOVWQZX R9, R9
+	SHRL    $0x10, R9
 	LEAQ    (BX)(R14*1), CX
 	MOVQ    DX, R15
 	MOVQ    CX, BX
@@ -591,9 +597,9 @@
 	MOVQ R12, 152(AX)
 	MOVQ R13, 160(AX)
 	MOVQ br+8(FP), AX
-	MOVQ DX, 32(AX)
+	MOVQ DX, 24(AX)
 	MOVB BL, 40(AX)
-	MOVQ SI, 24(AX)
+	MOVQ SI, 32(AX)
 
 	// Return success
 	MOVQ $0x00000000, ret+24(FP)
@@ -618,18 +624,19 @@
 	MOVQ $0x00000004, ret+24(FP)
 	RET
 
-	// Return with not enough output space error
-	MOVQ $0x00000005, ret+24(FP)
+	// Return with overread error
+error_overread:
+	MOVQ $0x00000006, ret+24(FP)
 	RET
 
 // func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 // Requires: BMI, BMI2, CMOV
 TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
-	MOVQ    br+8(FP), CX
-	MOVQ    32(CX), AX
-	MOVBQZX 40(CX), DX
-	MOVQ    24(CX), BX
-	MOVQ    (CX), CX
+	MOVQ    br+8(FP), BX
+	MOVQ    24(BX), AX
+	MOVBQZX 40(BX), DX
+	MOVQ    (BX), CX
+	MOVQ    32(BX), BX
 	ADDQ    BX, CX
 	MOVQ    CX, (SP)
 	MOVQ    ctx+16(FP), CX
@@ -658,7 +665,7 @@
 
 sequenceDecs_decode_bmi2_fill_byte_by_byte:
 	CMPQ    BX, $0x00
-	JLE     sequenceDecs_decode_bmi2_fill_end
+	JLE     sequenceDecs_decode_bmi2_fill_check_overread
 	CMPQ    DX, $0x07
 	JLE     sequenceDecs_decode_bmi2_fill_end
 	SHLQ    $0x08, AX
@@ -669,6 +676,10 @@
 	ORQ     CX, AX
 	JMP     sequenceDecs_decode_bmi2_fill_byte_by_byte
 
+sequenceDecs_decode_bmi2_fill_check_overread:
+	CMPQ DX, $0x40
+	JA   error_overread
+
 sequenceDecs_decode_bmi2_fill_end:
 	// Update offset
 	MOVQ   $0x00000808, CX
@@ -709,7 +720,7 @@
 
 sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
 	CMPQ    BX, $0x00
-	JLE     sequenceDecs_decode_bmi2_fill_2_end
+	JLE     sequenceDecs_decode_bmi2_fill_2_check_overread
 	CMPQ    DX, $0x07
 	JLE     sequenceDecs_decode_bmi2_fill_2_end
 	SHLQ    $0x08, AX
@@ -720,6 +731,10 @@
 	ORQ     CX, AX
 	JMP     sequenceDecs_decode_bmi2_fill_2_byte_by_byte
 
+sequenceDecs_decode_bmi2_fill_2_check_overread:
+	CMPQ DX, $0x40
+	JA   error_overread
+
 sequenceDecs_decode_bmi2_fill_2_end:
 	// Update literal length
 	MOVQ   $0x00000808, CX
@@ -751,11 +766,10 @@
 	BZHIQ   R14, R15, R15
 
 	// Update Offset State
-	BZHIQ  R8, R15, CX
-	SHRXQ  R8, R15, R15
-	MOVQ   $0x00001010, R14
-	BEXTRQ R14, R8, R8
-	ADDQ   CX, R8
+	BZHIQ R8, R15, CX
+	SHRXQ R8, R15, R15
+	SHRL  $0x10, R8
+	ADDQ  CX, R8
 
 	// Load ctx.ofTable
 	MOVQ ctx+16(FP), CX
@@ -763,11 +777,10 @@
 	MOVQ (CX)(R8*8), R8
 
 	// Update Match Length State
-	BZHIQ  DI, R15, CX
-	SHRXQ  DI, R15, R15
-	MOVQ   $0x00001010, R14
-	BEXTRQ R14, DI, DI
-	ADDQ   CX, DI
+	BZHIQ DI, R15, CX
+	SHRXQ DI, R15, R15
+	SHRL  $0x10, DI
+	ADDQ  CX, DI
 
 	// Load ctx.mlTable
 	MOVQ ctx+16(FP), CX
@@ -775,10 +788,9 @@
 	MOVQ (CX)(DI*8), DI
 
 	// Update Literal Length State
-	BZHIQ  SI, R15, CX
-	MOVQ   $0x00001010, R14
-	BEXTRQ R14, SI, SI
-	ADDQ   CX, SI
+	BZHIQ SI, R15, CX
+	SHRL  $0x10, SI
+	ADDQ  CX, SI
 
 	// Load ctx.llTable
 	MOVQ ctx+16(FP), CX
@@ -871,9 +883,9 @@
 	MOVQ R11, 152(CX)
 	MOVQ R12, 160(CX)
 	MOVQ br+8(FP), CX
-	MOVQ AX, 32(CX)
+	MOVQ AX, 24(CX)
 	MOVB DL, 40(CX)
-	MOVQ BX, 24(CX)
+	MOVQ BX, 32(CX)
 
 	// Return success
 	MOVQ $0x00000000, ret+24(FP)
@@ -898,18 +910,19 @@
 	MOVQ $0x00000004, ret+24(FP)
 	RET
 
-	// Return with not enough output space error
-	MOVQ $0x00000005, ret+24(FP)
+	// Return with overread error
+error_overread:
+	MOVQ $0x00000006, ret+24(FP)
 	RET
 
 // func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
 // Requires: BMI, BMI2, CMOV
 TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
-	MOVQ    br+8(FP), CX
-	MOVQ    32(CX), AX
-	MOVBQZX 40(CX), DX
-	MOVQ    24(CX), BX
-	MOVQ    (CX), CX
+	MOVQ    br+8(FP), BX
+	MOVQ    24(BX), AX
+	MOVBQZX 40(BX), DX
+	MOVQ    (BX), CX
+	MOVQ    32(BX), BX
 	ADDQ    BX, CX
 	MOVQ    CX, (SP)
 	MOVQ    ctx+16(FP), CX
@@ -938,7 +951,7 @@
 
 sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
 	CMPQ    BX, $0x00
-	JLE     sequenceDecs_decode_56_bmi2_fill_end
+	JLE     sequenceDecs_decode_56_bmi2_fill_check_overread
 	CMPQ    DX, $0x07
 	JLE     sequenceDecs_decode_56_bmi2_fill_end
 	SHLQ    $0x08, AX
@@ -949,6 +962,10 @@
 	ORQ     CX, AX
 	JMP     sequenceDecs_decode_56_bmi2_fill_byte_by_byte
 
+sequenceDecs_decode_56_bmi2_fill_check_overread:
+	CMPQ DX, $0x40
+	JA   error_overread
+
 sequenceDecs_decode_56_bmi2_fill_end:
 	// Update offset
 	MOVQ   $0x00000808, CX
@@ -1006,11 +1023,10 @@
 	BZHIQ   R14, R15, R15
 
 	// Update Offset State
-	BZHIQ  R8, R15, CX
-	SHRXQ  R8, R15, R15
-	MOVQ   $0x00001010, R14
-	BEXTRQ R14, R8, R8
-	ADDQ   CX, R8
+	BZHIQ R8, R15, CX
+	SHRXQ R8, R15, R15
+	SHRL  $0x10, R8
+	ADDQ  CX, R8
 
 	// Load ctx.ofTable
 	MOVQ ctx+16(FP), CX
@@ -1018,11 +1034,10 @@
 	MOVQ (CX)(R8*8), R8
 
 	// Update Match Length State
-	BZHIQ  DI, R15, CX
-	SHRXQ  DI, R15, R15
-	MOVQ   $0x00001010, R14
-	BEXTRQ R14, DI, DI
-	ADDQ   CX, DI
+	BZHIQ DI, R15, CX
+	SHRXQ DI, R15, R15
+	SHRL  $0x10, DI
+	ADDQ  CX, DI
 
 	// Load ctx.mlTable
 	MOVQ ctx+16(FP), CX
@@ -1030,10 +1045,9 @@
 	MOVQ (CX)(DI*8), DI
 
 	// Update Literal Length State
-	BZHIQ  SI, R15, CX
-	MOVQ   $0x00001010, R14
-	BEXTRQ R14, SI, SI
-	ADDQ   CX, SI
+	BZHIQ SI, R15, CX
+	SHRL  $0x10, SI
+	ADDQ  CX, SI
 
 	// Load ctx.llTable
 	MOVQ ctx+16(FP), CX
@@ -1126,9 +1140,9 @@
 	MOVQ R11, 152(CX)
 	MOVQ R12, 160(CX)
 	MOVQ br+8(FP), CX
-	MOVQ AX, 32(CX)
+	MOVQ AX, 24(CX)
 	MOVB DL, 40(CX)
-	MOVQ BX, 24(CX)
+	MOVQ BX, 32(CX)
 
 	// Return success
 	MOVQ $0x00000000, ret+24(FP)
@@ -1153,8 +1167,9 @@
 	MOVQ $0x00000004, ret+24(FP)
 	RET
 
-	// Return with not enough output space error
-	MOVQ $0x00000005, ret+24(FP)
+	// Return with overread error
+error_overread:
+	MOVQ $0x00000006, ret+24(FP)
 	RET
 
 // func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
@@ -1390,8 +1405,7 @@
 	MOVQ ctx+0(FP), AX
 	MOVQ DX, 24(AX)
 	MOVQ DI, 104(AX)
-	MOVQ 80(AX), CX
-	SUBQ CX, SI
+	SUBQ 80(AX), SI
 	MOVQ SI, 112(AX)
 	RET
 
@@ -1403,8 +1417,7 @@
 	MOVQ ctx+0(FP), AX
 	MOVQ DX, 24(AX)
 	MOVQ DI, 104(AX)
-	MOVQ 80(AX), CX
-	SUBQ CX, SI
+	SUBQ 80(AX), SI
 	MOVQ SI, 112(AX)
 	RET
 
@@ -1748,8 +1761,7 @@
 	MOVQ ctx+0(FP), AX
 	MOVQ DX, 24(AX)
 	MOVQ DI, 104(AX)
-	MOVQ 80(AX), CX
-	SUBQ CX, SI
+	SUBQ 80(AX), SI
 	MOVQ SI, 112(AX)
 	RET
 
@@ -1761,8 +1773,7 @@
 	MOVQ ctx+0(FP), AX
 	MOVQ DX, 24(AX)
 	MOVQ DI, 104(AX)
-	MOVQ 80(AX), CX
-	SUBQ CX, SI
+	SUBQ 80(AX), SI
 	MOVQ SI, 112(AX)
 	RET
 
@@ -1774,11 +1785,11 @@
 // func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 // Requires: CMOV, SSE
 TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
-	MOVQ    br+8(FP), AX
-	MOVQ    32(AX), DX
-	MOVBQZX 40(AX), BX
-	MOVQ    24(AX), SI
-	MOVQ    (AX), AX
+	MOVQ    br+8(FP), CX
+	MOVQ    24(CX), DX
+	MOVBQZX 40(CX), BX
+	MOVQ    (CX), AX
+	MOVQ    32(CX), SI
 	ADDQ    SI, AX
 	MOVQ    AX, (SP)
 	MOVQ    ctx+16(FP), AX
@@ -1803,7 +1814,7 @@
 	MOVQ    40(SP), AX
 	ADDQ    AX, 48(SP)
 
-	// Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+	// Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
 	ADDQ R10, 32(SP)
 
 	// outBase += outPosition
@@ -1825,7 +1836,7 @@
 
 sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
 	CMPQ    SI, $0x00
-	JLE     sequenceDecs_decodeSync_amd64_fill_end
+	JLE     sequenceDecs_decodeSync_amd64_fill_check_overread
 	CMPQ    BX, $0x07
 	JLE     sequenceDecs_decodeSync_amd64_fill_end
 	SHLQ    $0x08, DX
@@ -1836,6 +1847,10 @@
 	ORQ     AX, DX
 	JMP     sequenceDecs_decodeSync_amd64_fill_byte_by_byte
 
+sequenceDecs_decodeSync_amd64_fill_check_overread:
+	CMPQ BX, $0x40
+	JA   error_overread
+
 sequenceDecs_decodeSync_amd64_fill_end:
 	// Update offset
 	MOVQ  R9, AX
@@ -1892,7 +1907,7 @@
 
 sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
 	CMPQ    SI, $0x00
-	JLE     sequenceDecs_decodeSync_amd64_fill_2_end
+	JLE     sequenceDecs_decodeSync_amd64_fill_2_check_overread
 	CMPQ    BX, $0x07
 	JLE     sequenceDecs_decodeSync_amd64_fill_2_end
 	SHLQ    $0x08, DX
@@ -1903,6 +1918,10 @@
 	ORQ     AX, DX
 	JMP     sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
 
+sequenceDecs_decodeSync_amd64_fill_2_check_overread:
+	CMPQ BX, $0x40
+	JA   error_overread
+
 sequenceDecs_decodeSync_amd64_fill_2_end:
 	// Update literal length
 	MOVQ  DI, AX
@@ -1936,8 +1955,7 @@
 
 	// Update Literal Length State
 	MOVBQZX DI, R13
-	SHRQ    $0x10, DI
-	MOVWQZX DI, DI
+	SHRL    $0x10, DI
 	LEAQ    (BX)(R13*1), CX
 	MOVQ    DX, R14
 	MOVQ    CX, BX
@@ -1956,8 +1974,7 @@
 
 	// Update Match Length State
 	MOVBQZX R8, R13
-	SHRQ    $0x10, R8
-	MOVWQZX R8, R8
+	SHRL    $0x10, R8
 	LEAQ    (BX)(R13*1), CX
 	MOVQ    DX, R14
 	MOVQ    CX, BX
@@ -1976,8 +1993,7 @@
 
 	// Update Offset State
 	MOVBQZX R9, R13
-	SHRQ    $0x10, R9
-	MOVWQZX R9, R9
+	SHRL    $0x10, R9
 	LEAQ    (BX)(R13*1), CX
 	MOVQ    DX, R14
 	MOVQ    CX, BX
@@ -2264,9 +2280,9 @@
 
 loop_finished:
 	MOVQ br+8(FP), AX
-	MOVQ DX, 32(AX)
+	MOVQ DX, 24(AX)
 	MOVB BL, 40(AX)
-	MOVQ SI, 24(AX)
+	MOVQ SI, 32(AX)
 
 	// Update the context
 	MOVQ ctx+16(FP), AX
@@ -2312,6 +2328,11 @@
 	MOVQ $0x00000004, ret+24(FP)
 	RET
 
+	// Return with overread error
+error_overread:
+	MOVQ $0x00000006, ret+24(FP)
+	RET
+
 	// Return with not enough output space error
 error_not_enough_space:
 	MOVQ ctx+16(FP), AX
@@ -2326,11 +2347,11 @@
 // func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 // Requires: BMI, BMI2, CMOV, SSE
 TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
-	MOVQ    br+8(FP), CX
-	MOVQ    32(CX), AX
-	MOVBQZX 40(CX), DX
-	MOVQ    24(CX), BX
-	MOVQ    (CX), CX
+	MOVQ    br+8(FP), BX
+	MOVQ    24(BX), AX
+	MOVBQZX 40(BX), DX
+	MOVQ    (BX), CX
+	MOVQ    32(BX), BX
 	ADDQ    BX, CX
 	MOVQ    CX, (SP)
 	MOVQ    ctx+16(FP), CX
@@ -2355,7 +2376,7 @@
 	MOVQ    40(SP), CX
 	ADDQ    CX, 48(SP)
 
-	// Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+	// Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
 	ADDQ R9, 32(SP)
 
 	// outBase += outPosition
@@ -2377,7 +2398,7 @@
 
 sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
 	CMPQ    BX, $0x00
-	JLE     sequenceDecs_decodeSync_bmi2_fill_end
+	JLE     sequenceDecs_decodeSync_bmi2_fill_check_overread
 	CMPQ    DX, $0x07
 	JLE     sequenceDecs_decodeSync_bmi2_fill_end
 	SHLQ    $0x08, AX
@@ -2388,6 +2409,10 @@
 	ORQ     CX, AX
 	JMP     sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
 
+sequenceDecs_decodeSync_bmi2_fill_check_overread:
+	CMPQ DX, $0x40
+	JA   error_overread
+
 sequenceDecs_decodeSync_bmi2_fill_end:
 	// Update offset
 	MOVQ   $0x00000808, CX
@@ -2428,7 +2453,7 @@
 
 sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
 	CMPQ    BX, $0x00
-	JLE     sequenceDecs_decodeSync_bmi2_fill_2_end
+	JLE     sequenceDecs_decodeSync_bmi2_fill_2_check_overread
 	CMPQ    DX, $0x07
 	JLE     sequenceDecs_decodeSync_bmi2_fill_2_end
 	SHLQ    $0x08, AX
@@ -2439,6 +2464,10 @@
 	ORQ     CX, AX
 	JMP     sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
 
+sequenceDecs_decodeSync_bmi2_fill_2_check_overread:
+	CMPQ DX, $0x40
+	JA   error_overread
+
 sequenceDecs_decodeSync_bmi2_fill_2_end:
 	// Update literal length
 	MOVQ   $0x00000808, CX
@@ -2470,11 +2499,10 @@
 	BZHIQ   R13, R14, R14
 
 	// Update Offset State
-	BZHIQ  R8, R14, CX
-	SHRXQ  R8, R14, R14
-	MOVQ   $0x00001010, R13
-	BEXTRQ R13, R8, R8
-	ADDQ   CX, R8
+	BZHIQ R8, R14, CX
+	SHRXQ R8, R14, R14
+	SHRL  $0x10, R8
+	ADDQ  CX, R8
 
 	// Load ctx.ofTable
 	MOVQ ctx+16(FP), CX
@@ -2482,11 +2510,10 @@
 	MOVQ (CX)(R8*8), R8
 
 	// Update Match Length State
-	BZHIQ  DI, R14, CX
-	SHRXQ  DI, R14, R14
-	MOVQ   $0x00001010, R13
-	BEXTRQ R13, DI, DI
-	ADDQ   CX, DI
+	BZHIQ DI, R14, CX
+	SHRXQ DI, R14, R14
+	SHRL  $0x10, DI
+	ADDQ  CX, DI
 
 	// Load ctx.mlTable
 	MOVQ ctx+16(FP), CX
@@ -2494,10 +2521,9 @@
 	MOVQ (CX)(DI*8), DI
 
 	// Update Literal Length State
-	BZHIQ  SI, R14, CX
-	MOVQ   $0x00001010, R13
-	BEXTRQ R13, SI, SI
-	ADDQ   CX, SI
+	BZHIQ SI, R14, CX
+	SHRL  $0x10, SI
+	ADDQ  CX, SI
 
 	// Load ctx.llTable
 	MOVQ ctx+16(FP), CX
@@ -2774,9 +2800,9 @@
 
 loop_finished:
 	MOVQ br+8(FP), CX
-	MOVQ AX, 32(CX)
+	MOVQ AX, 24(CX)
 	MOVB DL, 40(CX)
-	MOVQ BX, 24(CX)
+	MOVQ BX, 32(CX)
 
 	// Update the context
 	MOVQ ctx+16(FP), AX
@@ -2822,6 +2848,11 @@
 	MOVQ $0x00000004, ret+24(FP)
 	RET
 
+	// Return with overread error
+error_overread:
+	MOVQ $0x00000006, ret+24(FP)
+	RET
+
 	// Return with not enough output space error
 error_not_enough_space:
 	MOVQ ctx+16(FP), AX
@@ -2836,11 +2867,11 @@
 // func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 // Requires: CMOV, SSE
 TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
-	MOVQ    br+8(FP), AX
-	MOVQ    32(AX), DX
-	MOVBQZX 40(AX), BX
-	MOVQ    24(AX), SI
-	MOVQ    (AX), AX
+	MOVQ    br+8(FP), CX
+	MOVQ    24(CX), DX
+	MOVBQZX 40(CX), BX
+	MOVQ    (CX), AX
+	MOVQ    32(CX), SI
 	ADDQ    SI, AX
 	MOVQ    AX, (SP)
 	MOVQ    ctx+16(FP), AX
@@ -2865,7 +2896,7 @@
 	MOVQ    40(SP), AX
 	ADDQ    AX, 48(SP)
 
-	// Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+	// Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
 	ADDQ R10, 32(SP)
 
 	// outBase += outPosition
@@ -2887,7 +2918,7 @@
 
 sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
 	CMPQ    SI, $0x00
-	JLE     sequenceDecs_decodeSync_safe_amd64_fill_end
+	JLE     sequenceDecs_decodeSync_safe_amd64_fill_check_overread
 	CMPQ    BX, $0x07
 	JLE     sequenceDecs_decodeSync_safe_amd64_fill_end
 	SHLQ    $0x08, DX
@@ -2898,6 +2929,10 @@
 	ORQ     AX, DX
 	JMP     sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
 
+sequenceDecs_decodeSync_safe_amd64_fill_check_overread:
+	CMPQ BX, $0x40
+	JA   error_overread
+
 sequenceDecs_decodeSync_safe_amd64_fill_end:
 	// Update offset
 	MOVQ  R9, AX
@@ -2954,7 +2989,7 @@
 
 sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
 	CMPQ    SI, $0x00
-	JLE     sequenceDecs_decodeSync_safe_amd64_fill_2_end
+	JLE     sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread
 	CMPQ    BX, $0x07
 	JLE     sequenceDecs_decodeSync_safe_amd64_fill_2_end
 	SHLQ    $0x08, DX
@@ -2965,6 +3000,10 @@
 	ORQ     AX, DX
 	JMP     sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
 
+sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread:
+	CMPQ BX, $0x40
+	JA   error_overread
+
 sequenceDecs_decodeSync_safe_amd64_fill_2_end:
 	// Update literal length
 	MOVQ  DI, AX
@@ -2998,8 +3037,7 @@
 
 	// Update Literal Length State
 	MOVBQZX DI, R13
-	SHRQ    $0x10, DI
-	MOVWQZX DI, DI
+	SHRL    $0x10, DI
 	LEAQ    (BX)(R13*1), CX
 	MOVQ    DX, R14
 	MOVQ    CX, BX
@@ -3018,8 +3056,7 @@
 
 	// Update Match Length State
 	MOVBQZX R8, R13
-	SHRQ    $0x10, R8
-	MOVWQZX R8, R8
+	SHRL    $0x10, R8
 	LEAQ    (BX)(R13*1), CX
 	MOVQ    DX, R14
 	MOVQ    CX, BX
@@ -3038,8 +3075,7 @@
 
 	// Update Offset State
 	MOVBQZX R9, R13
-	SHRQ    $0x10, R9
-	MOVWQZX R9, R9
+	SHRL    $0x10, R9
 	LEAQ    (BX)(R13*1), CX
 	MOVQ    DX, R14
 	MOVQ    CX, BX
@@ -3428,9 +3464,9 @@
 
 loop_finished:
 	MOVQ br+8(FP), AX
-	MOVQ DX, 32(AX)
+	MOVQ DX, 24(AX)
 	MOVB BL, 40(AX)
-	MOVQ SI, 24(AX)
+	MOVQ SI, 32(AX)
 
 	// Update the context
 	MOVQ ctx+16(FP), AX
@@ -3476,6 +3512,11 @@
 	MOVQ $0x00000004, ret+24(FP)
 	RET
 
+	// Return with overread error
+error_overread:
+	MOVQ $0x00000006, ret+24(FP)
+	RET
+
 	// Return with not enough output space error
 error_not_enough_space:
 	MOVQ ctx+16(FP), AX
@@ -3490,11 +3531,11 @@
 // func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
 // Requires: BMI, BMI2, CMOV, SSE
 TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
-	MOVQ    br+8(FP), CX
-	MOVQ    32(CX), AX
-	MOVBQZX 40(CX), DX
-	MOVQ    24(CX), BX
-	MOVQ    (CX), CX
+	MOVQ    br+8(FP), BX
+	MOVQ    24(BX), AX
+	MOVBQZX 40(BX), DX
+	MOVQ    (BX), CX
+	MOVQ    32(BX), BX
 	ADDQ    BX, CX
 	MOVQ    CX, (SP)
 	MOVQ    ctx+16(FP), CX
@@ -3519,7 +3560,7 @@
 	MOVQ    40(SP), CX
 	ADDQ    CX, 48(SP)
 
-	// Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+	// Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
 	ADDQ R9, 32(SP)
 
 	// outBase += outPosition
@@ -3541,7 +3582,7 @@
 
 sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
 	CMPQ    BX, $0x00
-	JLE     sequenceDecs_decodeSync_safe_bmi2_fill_end
+	JLE     sequenceDecs_decodeSync_safe_bmi2_fill_check_overread
 	CMPQ    DX, $0x07
 	JLE     sequenceDecs_decodeSync_safe_bmi2_fill_end
 	SHLQ    $0x08, AX
@@ -3552,6 +3593,10 @@
 	ORQ     CX, AX
 	JMP     sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
 
+sequenceDecs_decodeSync_safe_bmi2_fill_check_overread:
+	CMPQ DX, $0x40
+	JA   error_overread
+
 sequenceDecs_decodeSync_safe_bmi2_fill_end:
 	// Update offset
 	MOVQ   $0x00000808, CX
@@ -3592,7 +3637,7 @@
 
 sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
 	CMPQ    BX, $0x00
-	JLE     sequenceDecs_decodeSync_safe_bmi2_fill_2_end
+	JLE     sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread
 	CMPQ    DX, $0x07
 	JLE     sequenceDecs_decodeSync_safe_bmi2_fill_2_end
 	SHLQ    $0x08, AX
@@ -3603,6 +3648,10 @@
 	ORQ     CX, AX
 	JMP     sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
 
+sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread:
+	CMPQ DX, $0x40
+	JA   error_overread
+
 sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
 	// Update literal length
 	MOVQ   $0x00000808, CX
@@ -3634,11 +3683,10 @@
 	BZHIQ   R13, R14, R14
 
 	// Update Offset State
-	BZHIQ  R8, R14, CX
-	SHRXQ  R8, R14, R14
-	MOVQ   $0x00001010, R13
-	BEXTRQ R13, R8, R8
-	ADDQ   CX, R8
+	BZHIQ R8, R14, CX
+	SHRXQ R8, R14, R14
+	SHRL  $0x10, R8
+	ADDQ  CX, R8
 
 	// Load ctx.ofTable
 	MOVQ ctx+16(FP), CX
@@ -3646,11 +3694,10 @@
 	MOVQ (CX)(R8*8), R8
 
 	// Update Match Length State
-	BZHIQ  DI, R14, CX
-	SHRXQ  DI, R14, R14
-	MOVQ   $0x00001010, R13
-	BEXTRQ R13, DI, DI
-	ADDQ   CX, DI
+	BZHIQ DI, R14, CX
+	SHRXQ DI, R14, R14
+	SHRL  $0x10, DI
+	ADDQ  CX, DI
 
 	// Load ctx.mlTable
 	MOVQ ctx+16(FP), CX
@@ -3658,10 +3705,9 @@
 	MOVQ (CX)(DI*8), DI
 
 	// Update Literal Length State
-	BZHIQ  SI, R14, CX
-	MOVQ   $0x00001010, R13
-	BEXTRQ R13, SI, SI
-	ADDQ   CX, SI
+	BZHIQ SI, R14, CX
+	SHRL  $0x10, SI
+	ADDQ  CX, SI
 
 	// Load ctx.llTable
 	MOVQ ctx+16(FP), CX
@@ -4040,9 +4086,9 @@
 
 loop_finished:
 	MOVQ br+8(FP), CX
-	MOVQ AX, 32(CX)
+	MOVQ AX, 24(CX)
 	MOVB DL, 40(CX)
-	MOVQ BX, 24(CX)
+	MOVQ BX, 32(CX)
 
 	// Update the context
 	MOVQ ctx+16(FP), AX
@@ -4088,6 +4134,11 @@
 	MOVQ $0x00000004, ret+24(FP)
 	RET
 
+	// Return with overread error
+error_overread:
+	MOVQ $0x00000006, ret+24(FP)
+	RET
+
 	// Return with not enough output space error
 error_not_enough_space:
 	MOVQ ctx+16(FP), AX
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
index c3452bc..7cec219 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
@@ -29,7 +29,7 @@
 	}
 	for i := range seqs {
 		var ll, mo, ml int
-		if br.off > 4+((maxOffsetBits+16+16)>>3) {
+		if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
 			// inlined function:
 			// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
 
@@ -111,7 +111,7 @@
 		}
 		s.seqSize += ll + ml
 		if s.seqSize > maxBlockSize {
-			return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+			return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 		}
 		litRemain -= ll
 		if litRemain < 0 {
@@ -149,7 +149,7 @@
 	}
 	s.seqSize += litRemain
 	if s.seqSize > maxBlockSize {
-		return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+		return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
 	}
 	err := br.close()
 	if err != nil {
diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go
index 8014174..65045ea 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go
@@ -69,7 +69,6 @@
 func llCode(litLength uint32) uint8 {
 	const llDeltaCode = 19
 	if litLength <= 63 {
-		// Compiler insists on bounds check (Go 1.12)
 		return llCodeTable[litLength&63]
 	}
 	return uint8(highBit(litLength)) + llDeltaCode
@@ -102,7 +101,6 @@
 func mlCode(mlBase uint32) uint8 {
 	const mlDeltaCode = 36
 	if mlBase <= 127 {
-		// Compiler insists on bounds check (Go 1.12)
 		return mlCodeTable[mlBase&127]
 	}
 	return uint8(highBit(mlBase)) + mlDeltaCode
diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go
index 9e1baad..a17381b 100644
--- a/vendor/github.com/klauspost/compress/zstd/snappy.go
+++ b/vendor/github.com/klauspost/compress/zstd/snappy.go
@@ -95,10 +95,9 @@
 	var written int64
 	var readHeader bool
 	{
-		var header []byte
-		var n int
-		header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
+		header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
 
+		var n int
 		n, r.err = w.Write(header)
 		if r.err != nil {
 			return written, r.err
@@ -198,7 +197,7 @@
 
 			n, r.err = w.Write(r.block.output)
 			if r.err != nil {
-				return written, err
+				return written, r.err
 			}
 			written += int64(n)
 			continue
@@ -240,7 +239,7 @@
 			}
 			n, r.err = w.Write(r.block.output)
 			if r.err != nil {
-				return written, err
+				return written, r.err
 			}
 			written += int64(n)
 			continue
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
index 3eb3f1c..6252b46 100644
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -5,11 +5,11 @@
 
 import (
 	"bytes"
-	"encoding/binary"
 	"errors"
 	"log"
 	"math"
-	"math/bits"
+
+	"github.com/klauspost/compress/internal/le"
 )
 
 // enable debug printing
@@ -36,9 +36,6 @@
 // zstdMinMatch is the minimum zstd match length.
 const zstdMinMatch = 3
 
-// Reset the buffer offset when reaching this.
-const bufferReset = math.MaxInt32 - MaxWindowSize
-
 // fcsUnknown is used for unknown frame content size.
 const fcsUnknown = math.MaxUint64
 
@@ -75,7 +72,6 @@
 	ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit")
 
 	// ErrUnknownDictionary is returned if the dictionary ID is unknown.
-	// For the time being dictionaries are not supported.
 	ErrUnknownDictionary = errors.New("unknown dictionary")
 
 	// ErrFrameSizeExceeded is returned if the stated frame size is exceeded.
@@ -93,6 +89,10 @@
 	// Close has been called.
 	ErrDecoderClosed = errors.New("decoder used after Close")
 
+	// ErrEncoderClosed will be returned if the Encoder was used after
+	// Close has been called.
+	ErrEncoderClosed = errors.New("encoder used after Close")
+
 	// ErrDecoderNilInput is returned when a nil Reader was provided
 	// and an operation other than Reset/DecodeAll/Close was attempted.
 	ErrDecoderNilInput = errors.New("nil input provided as reader")
@@ -110,38 +110,12 @@
 	}
 }
 
-// matchLen returns the maximum length.
-// a must be the shortest of the two.
-// The function also returns whether all bytes matched.
-func matchLen(a, b []byte) int {
-	b = b[:len(a)]
-	for i := 0; i < len(a)-7; i += 8 {
-		if diff := load64(a, i) ^ load64(b, i); diff != 0 {
-			return i + (bits.TrailingZeros64(diff) >> 3)
-		}
-	}
-
-	checked := (len(a) >> 3) << 3
-	a = a[checked:]
-	b = b[checked:]
-	for i := range a {
-		if a[i] != b[i] {
-			return i + checked
-		}
-	}
-	return len(a) + checked
-}
-
 func load3232(b []byte, i int32) uint32 {
-	return binary.LittleEndian.Uint32(b[i:])
+	return le.Load32(b, i)
 }
 
 func load6432(b []byte, i int32) uint64 {
-	return binary.LittleEndian.Uint64(b[i:])
-}
-
-func load64(b []byte, i int) uint64 {
-	return binary.LittleEndian.Uint64(b[i:])
+	return le.Load64(b, i)
 }
 
 type byter interface {
diff --git a/vendor/github.com/munnerz/goautoneg/LICENSE b/vendor/github.com/munnerz/goautoneg/LICENSE
new file mode 100644
index 0000000..bbc7b89
--- /dev/null
+++ b/vendor/github.com/munnerz/goautoneg/LICENSE
@@ -0,0 +1,31 @@
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+    Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in
+    the documentation and/or other materials provided with the
+    distribution.
+
+    Neither the name of the Open Knowledge Foundation Ltd. nor the
+    names of its contributors may be used to endorse or promote
+    products derived from this software without specific prior written
+    permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/munnerz/goautoneg/Makefile b/vendor/github.com/munnerz/goautoneg/Makefile
new file mode 100644
index 0000000..e33ee17
--- /dev/null
+++ b/vendor/github.com/munnerz/goautoneg/Makefile
@@ -0,0 +1,13 @@
+include $(GOROOT)/src/Make.inc
+
+TARG=bitbucket.org/ww/goautoneg
+GOFILES=autoneg.go
+
+include $(GOROOT)/src/Make.pkg
+
+format:
+	gofmt -w *.go
+
+docs:
+	gomake clean
+	godoc ${TARG} > README.txt
diff --git a/vendor/github.com/munnerz/goautoneg/README.txt b/vendor/github.com/munnerz/goautoneg/README.txt
new file mode 100644
index 0000000..7723656
--- /dev/null
+++ b/vendor/github.com/munnerz/goautoneg/README.txt
@@ -0,0 +1,67 @@
+PACKAGE
+
+package goautoneg
+import "bitbucket.org/ww/goautoneg"
+
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+    Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in
+    the documentation and/or other materials provided with the
+    distribution.
+
+    Neither the name of the Open Knowledge Foundation Ltd. nor the
+    names of its contributors may be used to endorse or promote
+    products derived from this software without specific prior written
+    permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+FUNCTIONS
+
+func Negotiate(header string, alternatives []string) (content_type string)
+Negotiate the most appropriate content_type given the accept header
+and a list of alternatives.
+
+func ParseAccept(header string) (accept []Accept)
+Parse an Accept Header string returning a sorted list
+of clauses
+
+
+TYPES
+
+type Accept struct {
+    Type, SubType string
+    Q             float32
+    Params        map[string]string
+}
+Structure to represent a clause in an HTTP Accept Header
+
+
+SUBDIRECTORIES
+
+	.hg
diff --git a/vendor/github.com/munnerz/goautoneg/autoneg.go b/vendor/github.com/munnerz/goautoneg/autoneg.go
new file mode 100644
index 0000000..1dd1cad
--- /dev/null
+++ b/vendor/github.com/munnerz/goautoneg/autoneg.go
@@ -0,0 +1,189 @@
+/*
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+    Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in
+    the documentation and/or other materials provided with the
+    distribution.
+
+    Neither the name of the Open Knowledge Foundation Ltd. nor the
+    names of its contributors may be used to endorse or promote
+    products derived from this software without specific prior written
+    permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+package goautoneg
+
+import (
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+	Type, SubType string
+	Q             float64
+	Params        map[string]string
+}
+
+// acceptSlice is defined to implement sort interface.
+type acceptSlice []Accept
+
+func (slice acceptSlice) Len() int {
+	return len(slice)
+}
+
+func (slice acceptSlice) Less(i, j int) bool {
+	ai, aj := slice[i], slice[j]
+	if ai.Q > aj.Q {
+		return true
+	}
+	if ai.Type != "*" && aj.Type == "*" {
+		return true
+	}
+	if ai.SubType != "*" && aj.SubType == "*" {
+		return true
+	}
+	return false
+}
+
+func (slice acceptSlice) Swap(i, j int) {
+	slice[i], slice[j] = slice[j], slice[i]
+}
+
+func stringTrimSpaceCutset(r rune) bool {
+	return r == ' '
+}
+
+func nextSplitElement(s, sep string) (item string, remaining string) {
+	if index := strings.Index(s, sep); index != -1 {
+		return s[:index], s[index+1:]
+	}
+	return s, ""
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) acceptSlice {
+	partsCount := 0
+	remaining := header
+	for len(remaining) > 0 {
+		partsCount++
+		_, remaining = nextSplitElement(remaining, ",")
+	}
+	accept := make(acceptSlice, 0, partsCount)
+
+	remaining = header
+	var part string
+	for len(remaining) > 0 {
+		part, remaining = nextSplitElement(remaining, ",")
+		part = strings.TrimFunc(part, stringTrimSpaceCutset)
+
+		a := Accept{
+			Q: 1.0,
+		}
+
+		sp, remainingPart := nextSplitElement(part, ";")
+
+		sp0, spRemaining := nextSplitElement(sp, "/")
+		a.Type = strings.TrimFunc(sp0, stringTrimSpaceCutset)
+
+		switch {
+		case len(spRemaining) == 0:
+			if a.Type == "*" {
+				a.SubType = "*"
+			} else {
+				continue
+			}
+		default:
+			var sp1 string
+			sp1, spRemaining = nextSplitElement(spRemaining, "/")
+			if len(spRemaining) > 0 {
+				continue
+			}
+			a.SubType = strings.TrimFunc(sp1, stringTrimSpaceCutset)
+		}
+
+		if len(remainingPart) == 0 {
+			accept = append(accept, a)
+			continue
+		}
+
+		a.Params = make(map[string]string)
+		for len(remainingPart) > 0 {
+			sp, remainingPart = nextSplitElement(remainingPart, ";")
+			sp0, spRemaining = nextSplitElement(sp, "=")
+			if len(spRemaining) == 0 {
+				continue
+			}
+			var sp1 string
+			sp1, spRemaining = nextSplitElement(spRemaining, "=")
+			if len(spRemaining) != 0 {
+				continue
+			}
+			token := strings.TrimFunc(sp0, stringTrimSpaceCutset)
+			if token == "q" {
+				a.Q, _ = strconv.ParseFloat(sp1, 32)
+			} else {
+				a.Params[token] = strings.TrimFunc(sp1, stringTrimSpaceCutset)
+			}
+		}
+
+		accept = append(accept, a)
+	}
+
+	sort.Sort(accept)
+	return accept
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+	asp := make([][]string, 0, len(alternatives))
+	for _, ctype := range alternatives {
+		asp = append(asp, strings.SplitN(ctype, "/", 2))
+	}
+	for _, clause := range ParseAccept(header) {
+		for i, ctsp := range asp {
+			if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+				content_type = alternatives[i]
+				return
+			}
+			if clause.Type == ctsp[0] && clause.SubType == "*" {
+				content_type = alternatives[i]
+				return
+			}
+			if clause.Type == "*" && clause.SubType == "*" {
+				content_type = alternatives[i]
+				return
+			}
+		}
+	}
+	return
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logcontroller.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logcontroller.go
index 8cc4e52..8d0fab8 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logcontroller.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logcontroller.go
@@ -56,7 +56,7 @@
 	logger.Debug(ctx, "creating-new-component-log-controller")
 	componentName := os.Getenv("COMPONENT_NAME")
 	if componentName == "" {
-		return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
+		return nil, errors.New("unable to retrieve PoD Component Name from Runtime env")
 	}
 
 	var defaultLogLevel string
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logfeaturescontroller.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logfeaturescontroller.go
index 8f4131e..6b5ce53 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logfeaturescontroller.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logfeaturescontroller.go
@@ -44,7 +44,7 @@
 	logger.Debug(ctx, "creating-new-component-log-features-controller")
 	componentName := os.Getenv("COMPONENT_NAME")
 	if componentName == "" {
-		return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
+		return nil, errors.New("unable to retrieve PoD Component Name from Runtime env")
 	}
 
 	tracingStatus := log.GetGlobalLFM().GetTracePublishingStatus()
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/backend.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/backend.go
index bb99970..8c31295 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/backend.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/backend.go
@@ -179,6 +179,21 @@
 	return alive
 }
 
+// KeyExists checks if the specified key exists in kv-store or not
+func (b *Backend) KeyExists(ctx context.Context, key string) (bool, error) {
+	span, ctx := log.CreateChildSpan(ctx, "kvs-key-exists")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, key)
+	logger.Debugw(ctx, "checking-key-exists", log.Fields{"key": key, "path": formattedPath})
+
+	keyExists, err := b.Client.KeyExists(ctx, formattedPath)
+
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(err))
+
+	return keyExists, err
+}
+
 // List retrieves one or more items that match the specified key
 func (b *Backend) List(ctx context.Context, key string) (map[string]*kvstore.KVPair, error) {
 	span, ctx := log.CreateChildSpan(ctx, "kvs-list")
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/client.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/client.go
index 85bc5f5..c84eacf 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/client.go
@@ -75,6 +75,7 @@
 // Client represents the set of APIs a KV Client must implement
 type Client interface {
 	List(ctx context.Context, key string) (map[string]*KVPair, error)
+	KeyExists(ctx context.Context, key string) (bool, error)
 	Get(ctx context.Context, key string) (*KVPair, error)
 	GetWithPrefix(ctx context.Context, prefixKey string) (map[string]*KVPair, error)
 	GetWithPrefixKeysOnly(ctx context.Context, prefixKey string) ([]string, error)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdclient.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdclient.go
index c5df1d8..b1080ef 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdclient.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdclient.go
@@ -25,8 +25,9 @@
 	"time"
 
 	"github.com/opencord/voltha-lib-go/v7/pkg/log"
-	v3Client "go.etcd.io/etcd/clientv3"
-	v3rpcTypes "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+	v3rpcTypes "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+
+	clientv3 "go.etcd.io/etcd/client/v3"
 )
 
 const (
@@ -35,15 +36,17 @@
 )
 
 const (
-	defaultMaxPoolCapacity = 1000 // Default size of an Etcd Client pool
-	defaultMaxPoolUsage    = 100  // Maximum concurrent request an Etcd Client is allowed to process
+	defaultMaxPoolCapacity         = 1000            // Default size of an Etcd Client pool
+	defaultMaxPoolUsage            = 100             // Maximum concurrent request an Etcd Client is allowed to process
+	defaultMaxAttempts             = 10              // Default number of attempts to retry an operation
+	defaultOperationContextTimeout = 3 * time.Second // Default context timeout for operations
 )
 
 // EtcdClient represents the Etcd KV store client
 type EtcdClient struct {
 	pool               EtcdClientAllocator
 	watchedChannels    sync.Map
-	watchedClients     map[string]*v3Client.Client
+	watchedClients     map[string]*clientv3.Client
 	watchedClientsLock sync.RWMutex
 }
 
@@ -82,7 +85,7 @@
 	logger.Infow(ctx, "etcd-pool-created", log.Fields{"capacity": capacity, "max-usage": maxUsage})
 
 	return &EtcdClient{pool: pool,
-		watchedClients: make(map[string]*v3Client.Client),
+		watchedClients: make(map[string]*clientv3.Client),
 	}, nil
 }
 
@@ -101,6 +104,25 @@
 	return true
 }
 
+// KeyExists returns boolean value based on the existence of the key in kv-store. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) KeyExists(ctx context.Context, key string) (bool, error) {
+	client, err := c.pool.Get(ctx)
+	if err != nil {
+		return false, err
+	}
+	defer c.pool.Put(client)
+	resp, err := client.Get(ctx, key, clientv3.WithKeysOnly(), clientv3.WithCountOnly())
+	if err != nil {
+		logger.Error(ctx, err)
+		return false, err
+	}
+	if resp.Count > 0 {
+		return true, nil
+	}
+	return false, nil
+}
+
 // List returns an array of key-value pairs with key as a prefix.  Timeout defines how long the function will
 // wait for a response
 func (c *EtcdClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
@@ -109,7 +131,7 @@
 		return nil, err
 	}
 	defer c.pool.Put(client)
-	resp, err := client.Get(ctx, key, v3Client.WithPrefix())
+	resp, err := client.Get(ctx, key, clientv3.WithPrefix())
 
 	if err != nil {
 		logger.Error(ctx, err)
@@ -132,35 +154,49 @@
 	defer c.pool.Put(client)
 
 	attempt := 0
-
 startLoop:
 	for {
-		resp, err := client.Get(ctx, key)
+		retryCtx, cancel := context.WithTimeout(ctx, defaultOperationContextTimeout)
+		resp, err := client.Get(retryCtx, key)
+		cancel()
+		if attempt >= defaultMaxAttempts {
+			logger.Warnw(ctx, "get-retries-exceeded", log.Fields{"key": key, "error": err, "attempt": attempt})
+			return nil, err
+		}
 		if err != nil {
 			switch err {
 			case context.Canceled:
+				// Check if the parent context was cancelled, if so don't retry
+				if ctx.Err() != nil {
+					logger.Warnw(ctx, "parent-context-cancelled", log.Fields{"error": err})
+					return nil, err
+				}
+				// Otherwise retry
 				logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
 			case context.DeadlineExceeded:
-				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "attempt": attempt})
 			case v3rpcTypes.ErrEmptyKey:
 				logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+				return nil, err
 			case v3rpcTypes.ErrLeaderChanged,
 				v3rpcTypes.ErrGRPCNoLeader,
 				v3rpcTypes.ErrTimeout,
 				v3rpcTypes.ErrTimeoutDueToLeaderFail,
 				v3rpcTypes.ErrTimeoutDueToConnectionLost:
 				// Retry for these server errors
-				attempt += 1
-				if er := backoff(ctx, attempt); er != nil {
-					logger.Warnw(ctx, "get-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
-					return nil, err
-				}
-				logger.Warnw(ctx, "retrying-get", log.Fields{"key": key, "error": err, "attempt": attempt})
-				goto startLoop
-			default:
 				logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+			default:
+				logger.Warnw(ctx, "etcd-unknown-error", log.Fields{"error": err})
 			}
-			return nil, err
+
+			// Common retry logic for all error cases
+			attempt++
+			if er := backoff(ctx, attempt); er != nil {
+				logger.Warnw(ctx, "get-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+				return nil, err
+			}
+			logger.Warnw(ctx, "retrying-get", log.Fields{"key": key, "error": err, "attempt": attempt})
+			goto startLoop
 		}
 
 		for _, ev := range resp.Kvs {
@@ -182,7 +218,7 @@
 	defer c.pool.Put(client)
 
 	// Fetch keys with the prefix
-	resp, err := client.Get(ctx, prefixKey, v3Client.WithPrefix())
+	resp, err := client.Get(ctx, prefixKey, clientv3.WithPrefix())
 	if err != nil {
 		return nil, fmt.Errorf("failed to fetch entries for prefix %s: %w", prefixKey, err)
 	}
@@ -208,7 +244,7 @@
 	defer c.pool.Put(client)
 
 	// Fetch keys with the prefix
-	resp, err := client.Get(ctx, prefixKey, v3Client.WithPrefix(), v3Client.WithKeysOnly())
+	resp, err := client.Get(ctx, prefixKey, clientv3.WithPrefix(), clientv3.WithKeysOnly())
 	if err != nil {
 		return nil, fmt.Errorf("failed to fetch entries for prefix %s: %w", prefixKey, err)
 	}
@@ -226,7 +262,6 @@
 // accepts only a string as a value for a put operation. Timeout defines how long the function will
 // wait for a response
 func (c *EtcdClient) Put(ctx context.Context, key string, value interface{}) error {
-
 	// Validate that we can convert value to a string as etcd API expects a string
 	var val string
 	var err error
@@ -243,32 +278,47 @@
 	attempt := 0
 startLoop:
 	for {
-		_, err = client.Put(ctx, key, val)
+		retryCtx, cancel := context.WithTimeout(ctx, defaultOperationContextTimeout)
+		_, err = client.Put(retryCtx, key, val)
+		cancel()
+		if attempt >= defaultMaxAttempts {
+			logger.Warnw(ctx, "put-retries-exceeded", log.Fields{"key": key, "error": err, "attempt": attempt})
+			return err
+		}
 		if err != nil {
 			switch err {
 			case context.Canceled:
+				// Check if the parent context was cancelled, if so don't retry
+				if ctx.Err() != nil {
+					logger.Warnw(ctx, "parent-context-cancelled", log.Fields{"error": err})
+					return err
+				}
+				// Otherwise retry
 				logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
 			case context.DeadlineExceeded:
-				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "attempt": attempt})
 			case v3rpcTypes.ErrEmptyKey:
 				logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+				return err
 			case v3rpcTypes.ErrLeaderChanged,
 				v3rpcTypes.ErrGRPCNoLeader,
 				v3rpcTypes.ErrTimeout,
 				v3rpcTypes.ErrTimeoutDueToLeaderFail,
 				v3rpcTypes.ErrTimeoutDueToConnectionLost:
 				// Retry for these server errors
-				attempt += 1
-				if er := backoff(ctx, attempt); er != nil {
-					logger.Warnw(ctx, "put-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
-					return err
-				}
-				logger.Warnw(ctx, "retrying-put", log.Fields{"key": key, "error": err, "attempt": attempt})
-				goto startLoop
-			default:
 				logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+			default:
+				logger.Warnw(ctx, "etcd-unknown-error", log.Fields{"error": err})
 			}
-			return err
+
+			// Common retry logic for all error cases
+			attempt++
+			if er := backoff(ctx, attempt); er != nil {
+				logger.Warnw(ctx, "put-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+				return err
+			}
+			logger.Warnw(ctx, "retrying-put", log.Fields{"key": key, "error": err, "attempt": attempt})
+			goto startLoop
 		}
 		return nil
 	}
@@ -286,32 +336,47 @@
 	attempt := 0
 startLoop:
 	for {
-		_, err = client.Delete(ctx, key)
+		retryCtx, cancel := context.WithTimeout(ctx, defaultOperationContextTimeout)
+		_, err = client.Delete(retryCtx, key)
+		cancel()
+		if attempt >= defaultMaxAttempts {
+			logger.Warnw(ctx, "delete-retries-exceeded", log.Fields{"key": key, "error": err, "attempt": attempt})
+			return err
+		}
 		if err != nil {
 			switch err {
 			case context.Canceled:
+				// Check if the parent context was cancelled, if so don't retry
+				if ctx.Err() != nil {
+					logger.Warnw(ctx, "parent-context-cancelled", log.Fields{"error": err})
+					return err
+				}
+				// Otherwise retry
 				logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
 			case context.DeadlineExceeded:
-				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "attempt": attempt})
 			case v3rpcTypes.ErrEmptyKey:
 				logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+				return err
 			case v3rpcTypes.ErrLeaderChanged,
 				v3rpcTypes.ErrGRPCNoLeader,
 				v3rpcTypes.ErrTimeout,
 				v3rpcTypes.ErrTimeoutDueToLeaderFail,
 				v3rpcTypes.ErrTimeoutDueToConnectionLost:
 				// Retry for these server errors
-				attempt += 1
-				if er := backoff(ctx, attempt); er != nil {
-					logger.Warnw(ctx, "delete-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
-					return err
-				}
-				logger.Warnw(ctx, "retrying-delete", log.Fields{"key": key, "error": err, "attempt": attempt})
-				goto startLoop
-			default:
 				logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+			default:
+				logger.Warnw(ctx, "etcd-unknown-error", log.Fields{"error": err})
 			}
-			return err
+
+			// Common retry logic for all error cases
+			attempt++
+			if er := backoff(ctx, attempt); er != nil {
+				logger.Warnw(ctx, "delete-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+				return err
+			}
+			logger.Warnw(ctx, "retrying-delete", log.Fields{"key": key, "error": err, "attempt": attempt})
+			goto startLoop
 		}
 		logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": key})
 		return nil
@@ -327,7 +392,7 @@
 	defer c.pool.Put(client)
 
 	//delete the prefix
-	if _, err := client.Delete(ctx, prefixKey, v3Client.WithPrefix()); err != nil {
+	if _, err := client.Delete(ctx, prefixKey, clientv3.WithPrefix()); err != nil {
 		logger.Errorw(ctx, "failed-to-delete-prefix-key", log.Fields{"key": prefixKey, "error": err})
 		return err
 	}
@@ -353,11 +418,11 @@
 	}
 	c.watchedClientsLock.Unlock()
 
-	w := v3Client.NewWatcher(client)
+	w := clientv3.NewWatcher(client)
 	ctx, cancel := context.WithCancel(ctx)
-	var channel v3Client.WatchChan
+	var channel clientv3.WatchChan
 	if withPrefix {
-		channel = w.Watch(ctx, key, v3Client.WithPrefix())
+		channel = w.Watch(ctx, key, clientv3.WithPrefix())
 	} else {
 		channel = w.Watch(ctx, key)
 	}
@@ -366,7 +431,7 @@
 	ch := make(chan *Event, maxClientChannelBufferSize)
 
 	// Keep track of the created channels so they can be closed when required
-	channelMap := make(map[chan *Event]v3Client.Watcher)
+	channelMap := make(map[chan *Event]clientv3.Watcher)
 	channelMap[ch] = w
 	channelMaps := c.addChannelMap(key, channelMap)
 
@@ -380,33 +445,33 @@
 
 }
 
-func (c *EtcdClient) addChannelMap(key string, channelMap map[chan *Event]v3Client.Watcher) []map[chan *Event]v3Client.Watcher {
+func (c *EtcdClient) addChannelMap(key string, channelMap map[chan *Event]clientv3.Watcher) []map[chan *Event]clientv3.Watcher {
 	var channels interface{}
 	var exists bool
 
 	if channels, exists = c.watchedChannels.Load(key); exists {
-		channels = append(channels.([]map[chan *Event]v3Client.Watcher), channelMap)
+		channels = append(channels.([]map[chan *Event]clientv3.Watcher), channelMap)
 	} else {
-		channels = []map[chan *Event]v3Client.Watcher{channelMap}
+		channels = []map[chan *Event]clientv3.Watcher{channelMap}
 	}
 	c.watchedChannels.Store(key, channels)
 
-	return channels.([]map[chan *Event]v3Client.Watcher)
+	return channels.([]map[chan *Event]clientv3.Watcher)
 }
 
-func (c *EtcdClient) removeChannelMap(key string, pos int) []map[chan *Event]v3Client.Watcher {
+func (c *EtcdClient) removeChannelMap(key string, pos int) []map[chan *Event]clientv3.Watcher {
 	var channels interface{}
 	var exists bool
 
 	if channels, exists = c.watchedChannels.Load(key); exists {
-		channels = append(channels.([]map[chan *Event]v3Client.Watcher)[:pos], channels.([]map[chan *Event]v3Client.Watcher)[pos+1:]...)
+		channels = append(channels.([]map[chan *Event]clientv3.Watcher)[:pos], channels.([]map[chan *Event]clientv3.Watcher)[pos+1:]...)
 		c.watchedChannels.Store(key, channels)
 	}
 
-	return channels.([]map[chan *Event]v3Client.Watcher)
+	return channels.([]map[chan *Event]clientv3.Watcher)
 }
 
-func (c *EtcdClient) getChannelMaps(key string) ([]map[chan *Event]v3Client.Watcher, bool) {
+func (c *EtcdClient) getChannelMaps(key string) ([]map[chan *Event]clientv3.Watcher, bool) {
 	var channels interface{}
 	var exists bool
 
@@ -416,14 +481,14 @@
 		return nil, exists
 	}
 
-	return channels.([]map[chan *Event]v3Client.Watcher), exists
+	return channels.([]map[chan *Event]clientv3.Watcher), exists
 }
 
 // CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
 // may be multiple listeners on the same key.  The previously created channel serves as a key
 func (c *EtcdClient) CloseWatch(ctx context.Context, key string, ch chan *Event) {
 	// Get the array of channels mapping
-	var watchedChannels []map[chan *Event]v3Client.Watcher
+	var watchedChannels []map[chan *Event]clientv3.Watcher
 	var ok bool
 
 	if watchedChannels, ok = c.getChannelMaps(key); !ok {
@@ -463,7 +528,7 @@
 	logger.Infow(ctx, "watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
 }
 
-func (c *EtcdClient) listenForKeyChange(ctx context.Context, channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
+func (c *EtcdClient) listenForKeyChange(ctx context.Context, channel clientv3.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
 	logger.Debug(ctx, "start-listening-on-channel ...")
 	defer cancel()
 	defer close(ch)
@@ -475,11 +540,11 @@
 	logger.Debug(ctx, "stop-listening-on-channel ...")
 }
 
-func getEventType(event *v3Client.Event) int {
+func getEventType(event *clientv3.Event) int {
 	switch event.Type {
-	case v3Client.EventTypePut:
+	case clientv3.EventTypePut:
 		return PUT
-	case v3Client.EventTypeDelete:
+	case clientv3.EventTypeDelete:
 		return DELETE
 	}
 	return UNKNOWN
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdpool.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdpool.go
index 6c7c838..68095c4 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdpool.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdpool.go
@@ -23,7 +23,7 @@
 	"time"
 
 	"github.com/opencord/voltha-lib-go/v7/pkg/log"
-	"go.etcd.io/etcd/clientv3"
+	clientv3 "go.etcd.io/etcd/client/v3"
 )
 
 // EtcdClientAllocator represents a generic interface to allocate an Etcd Client
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/redisclient.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/redisclient.go
index 7ed4159..3a7e512 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/redisclient.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/redisclient.go
@@ -120,6 +120,19 @@
 	return allkeys, nil
 }
 
+func (c *RedisClient) KeyExists(ctx context.Context, key string) (bool, error) {
+	var err error
+	var keyCount int64
+
+	if keyCount, err = c.redisAPI.Exists(ctx, key).Result(); err != nil {
+		return false, err
+	}
+	if keyCount > 0 {
+		return true, nil
+	}
+	return false, nil
+}
+
 func (c *RedisClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
 	var err error
 	var keys []string
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/events_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/events_proxy.go
index a265392..1eb2409 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/events_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/events_proxy.go
@@ -128,8 +128,8 @@
 /* Send out device events with key*/
 func (ep *EventProxy) SendDeviceEventWithKey(ctx context.Context, deviceEvent *voltha.DeviceEvent, category eventif.EventCategory, subCategory eventif.EventSubCategory, raisedTs int64, key string) error {
 	if deviceEvent == nil {
-		logger.Error(ctx, "Recieved empty device event")
-		return errors.New("Device event nil")
+		logger.Error(ctx, "recieved empty device event")
+		return errors.New("device event nil")
 	}
 	var event voltha.Event
 	var de voltha.Event_DeviceEvent
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go
index 04c248c..8a6333f 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go
@@ -13,6 +13,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+//nolint:staticcheck
 package flows
 
 import (
@@ -347,7 +348,14 @@
 	if flow == nil {
 		return nil
 	}
-	nFlow := (proto.Clone(flow)).(*ofp.OfpFlowStats)
+	flowData, err := proto.Marshal(flow)
+	if err != nil {
+		return nil // Handle error appropriately
+	}
+	nFlow := &ofp.OfpFlowStats{}
+	if err := proto.Unmarshal(flowData, nFlow); err != nil {
+		return nil // Handle error appropriately
+	}
 	nFlow.Instructions = nil
 	nInsts := make([]*ofp.OfpInstruction, 0)
 	for _, instruction := range flow.Instructions {
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go
index e8a0832..6d8a93c 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go
@@ -25,6 +25,7 @@
 
 	grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
 	grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
+	grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
 	"github.com/jhump/protoreflect/dynamic/grpcdynamic"
 	"github.com/jhump/protoreflect/grpcreflect"
 	"github.com/opencord/voltha-lib-go/v7/pkg/log"
@@ -60,6 +61,12 @@
 )
 
 const (
+	// [VOL-5434] Setting max receive message size to 20 MB,
+	// Default value of 'defaultServerMaxReceiveMessageSize' is 4 MB
+	grpcRecvMsgSizeLimit = 20
+)
+
+const (
 	eventConnecting = event(iota)
 	eventValidatingConnection
 	eventConnected
@@ -605,15 +612,21 @@
 	// 1. automatically inject
 	// 2. publish Open Tracing Spans by this GRPC Client
 	// 3. detect connection failure on client calls such that the reconnection process can begin
-	interceptor_opts := []grpc.UnaryClientInterceptor{grpc_opentracing.UnaryClientInterceptor(grpc_opentracing.WithTracer(log.ActiveTracerProxy{}))}
+	interceptor_opts := []grpc.UnaryClientInterceptor{
+		grpc_opentracing.UnaryClientInterceptor(grpc_opentracing.WithTracer(log.ActiveTracerProxy{})),
+		grpc_prometheus.UnaryClientInterceptor,
+	}
 
+	grpc_prometheus.EnableClientHandlingTimeHistogram()
 	if len(retry_interceptor) > 0 {
 		interceptor_opts = append(interceptor_opts, retry_interceptor...)
 	}
 	conn, err := grpc.Dial(c.serverEndPoint,
 		grpc.WithInsecure(),
+		grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(grpcRecvMsgSizeLimit*1024*1024)),
 		grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(
 			grpc_opentracing.StreamClientInterceptor(grpc_opentracing.WithTracer(log.ActiveTracerProxy{})),
+			grpc_prometheus.StreamClientInterceptor,
 		)),
 		grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(interceptor_opts...)),
 	)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/server.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/server.go
index c2f4aed..815990c 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/server.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/server.go
@@ -94,7 +94,7 @@
 /*
 Start prepares the GRPC server and starts servicing requests
 */
-func (s *GrpcServer) Start(ctx context.Context) {
+func (s *GrpcServer) Start(ctx context.Context, opts ...grpc.ServerOption) {
 
 	lis, err := net.Listen("tcp", s.address)
 	if err != nil {
@@ -118,10 +118,10 @@
 		}
 
 		serverOptions = append(serverOptions, grpc.Creds(creds))
-		s.gs = grpc.NewServer(serverOptions...)
+		s.gs = grpc.NewServer(append(serverOptions, opts...)...)
 	} else {
 		logger.Info(ctx, "starting-insecure-grpc-server")
-		s.gs = grpc.NewServer(serverOptions...)
+		s.gs = grpc.NewServer(append(serverOptions, opts...)...)
 	}
 
 	// Register all required services
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/client.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/client.go
index b3e20a3..77c529b 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/client.go
@@ -18,6 +18,7 @@
 * release 2.9.  It is no longer used for inter voltha container
 * communication.
  */
+//nolint:staticcheck
 package kafka
 
 import (
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/sarama_client.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/sarama_client.go
index 99168dc..75025f2 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/sarama_client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/sarama_client.go
@@ -13,6 +13,7 @@
 * See the License for the specific language governing permissions and
 * limitations under the License.
  */
+//nolint:staticcheck
 package kafka
 
 import (
@@ -23,8 +24,7 @@
 	"sync"
 	"time"
 
-	"github.com/Shopify/sarama"
-	scc "github.com/bsm/sarama-cluster"
+	"github.com/IBM/sarama"
 	"github.com/eapache/go-resiliency/breaker"
 	"github.com/golang/protobuf/proto"
 	"github.com/google/uuid"
@@ -48,7 +48,7 @@
 	KafkaAddress                  string
 	producer                      sarama.AsyncProducer
 	consumer                      sarama.Consumer
-	groupConsumers                map[string]*scc.Consumer
+	groupConsumers                map[string]sarama.ConsumerGroup
 	lockOfGroupConsumers          sync.RWMutex
 	consumerGroupPrefix           string
 	consumerType                  int
@@ -217,7 +217,7 @@
 		option(client)
 	}
 
-	client.groupConsumers = make(map[string]*scc.Consumer)
+	client.groupConsumers = make(map[string]sarama.ConsumerGroup)
 
 	client.lockTopicToConsumerChannelMap = sync.RWMutex{}
 	client.topicLockMap = make(map[string]*sync.RWMutex)
@@ -775,7 +775,7 @@
 					err = errTemp
 				}
 			}
-		} else if groupConsumer, ok := consumer.(*scc.Consumer); ok {
+		} else if groupConsumer, ok := consumer.(sarama.ConsumerGroup); ok {
 			if errTemp := groupConsumer.Close(); errTemp != nil {
 				if strings.Compare(errTemp.Error(), sarama.ErrUnknownTopicOrPartition.Error()) == 0 {
 					// This can occur on race condition
@@ -878,33 +878,28 @@
 }
 
 // createGroupConsumer creates a consumers group
-func (sc *SaramaClient) createGroupConsumer(ctx context.Context, topic *Topic, groupId string, initialOffset int64, retries int) (*scc.Consumer, error) {
-	config := scc.NewConfig()
+func (sc *SaramaClient) createGroupConsumer(ctx context.Context, topic *Topic, groupId string, initialOffset int64) (sarama.ConsumerGroup, error) {
+	config := sarama.NewConfig()
 	config.Version = sarama.V1_0_0_0
 	config.ClientID = uuid.New().String()
-	config.Group.Mode = scc.ConsumerModeMultiplex
+	config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange
 	config.Consumer.Group.Heartbeat.Interval, _ = time.ParseDuration("1s")
 	config.Consumer.Return.Errors = true
-	//config.Group.Return.Notifications = false
-	//config.Consumer.MaxWaitTime = time.Duration(DefaultConsumerMaxwait) * time.Millisecond
-	//config.Consumer.MaxProcessingTime = time.Duration(DefaultMaxProcessingTime) * time.Millisecond
 	config.Consumer.Offsets.Initial = initialOffset
-	//config.Consumer.Offsets.Initial = sarama.OffsetOldest
+
 	brokers := []string{sc.KafkaAddress}
-
-	topics := []string{topic.Name}
-	var consumer *scc.Consumer
+	// topics := []string{topic.Name}
+	var consumerGroup sarama.ConsumerGroup
 	var err error
-
-	if consumer, err = scc.NewConsumer(brokers, groupId, topics, config); err != nil {
-		logger.Errorw(ctx, "create-group-consumers-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
+	if consumerGroup, err = sarama.NewConsumerGroup(brokers, groupId, config); err != nil {
+		logger.Errorw(ctx, "create-group-consumer-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
 		return nil, err
 	}
-	logger.Debugw(ctx, "create-group-consumers-success", log.Fields{"topic": topic.Name, "groupId": groupId})
 
-	//sc.groupConsumers[topic.Name] = consumer
-	sc.addToGroupConsumers(topic.Name, consumer)
-	return consumer, nil
+	logger.Debugw(ctx, "create-group-consumer-success", log.Fields{"topic": topic.Name, "groupId": groupId})
+	//sc.groupConsumers[topic.Name] = consumerGroup
+	sc.addToGroupConsumers(topic.Name, consumerGroup)
+	return consumerGroup, nil
 }
 
 // dispatchToConsumers sends the intercontainermessage received on a given topic to all subscribers for that
@@ -963,48 +958,59 @@
 	sc.setUnhealthy(ctx)
 }
 
-func (sc *SaramaClient) consumeGroupMessages(ctx context.Context, topic *Topic, consumer *scc.Consumer, consumerChnls *consumerChannels) {
+func (sc *SaramaClient) consumeGroupMessages(ctx context.Context, topic *Topic, consumerGroup sarama.ConsumerGroup, consumerChnls *consumerChannels) {
 	logger.Debugw(ctx, "starting-group-consumption-loop", log.Fields{"topic": topic.Name})
 
-startloop:
-	for {
-		select {
-		case err, ok := <-consumer.Errors():
-			if ok {
+	go func() {
+		for {
+			err := consumerGroup.Consume(ctx, []string{topic.Name}, &groupConsumerHandler{
+				consumerChnls: consumerChnls,
+				sc:            sc,
+				topic:         topic,
+			})
+			if err != nil {
+				logger.Warnw(ctx, "group-consumer-error", log.Fields{"topic": topic.Name, "error": err})
 				if sc.isLivenessError(ctx, err) {
 					sc.updateLiveness(ctx, false)
 				}
-				logger.Warnw(ctx, "group-consumers-error", log.Fields{"topic": topic.Name, "error": err})
-			} else {
-				logger.Warnw(ctx, "group-consumers-closed-err", log.Fields{"topic": topic.Name})
-				// channel is closed
-				break startloop
 			}
-		case msg, ok := <-consumer.Messages():
-			if !ok {
-				logger.Warnw(ctx, "group-consumers-closed-msg", log.Fields{"topic": topic.Name})
-				// Channel closed
-				break startloop
+			select {
+			case <-sc.doneCh:
+				logger.Infow(ctx, "group-received-exit-signal", log.Fields{"topic": topic.Name})
+				return
+			default:
 			}
-			sc.updateLiveness(ctx, true)
-			logger.Debugw(ctx, "message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
-			msgBody := msg.Value
-			var protoMsg proto.Message
-			if err := proto.Unmarshal(msgBody, protoMsg); err != nil {
-				logger.Warnw(ctx, "invalid-message", log.Fields{"error": err})
-				continue
-			}
-			go sc.dispatchToConsumers(consumerChnls, protoMsg, msg.Topic, msg.Timestamp)
-			consumer.MarkOffset(msg, "")
-		case ntf := <-consumer.Notifications():
-			logger.Debugw(ctx, "group-received-notification", log.Fields{"notification": ntf})
-		case <-sc.doneCh:
-			logger.Infow(ctx, "group-received-exit-signal", log.Fields{"topic": topic.Name})
-			break startloop
 		}
+	}()
+}
+
+type groupConsumerHandler struct {
+	consumerChnls *consumerChannels
+	sc            *SaramaClient
+	topic         *Topic
+}
+
+func (h *groupConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error {
+	return nil
+}
+
+func (h *groupConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error {
+	return nil
+}
+
+func (h *groupConsumerHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
+	for msg := range claim.Messages() {
+		h.sc.updateLiveness(context.Background(), true)
+		logger.Debugw(context.Background(), "message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
+		var protoMsg proto.Message
+		if err := proto.Unmarshal(msg.Value, protoMsg); err != nil {
+			logger.Warnw(context.Background(), "invalid-message", log.Fields{"error": err})
+			continue
+		}
+		go h.sc.dispatchToConsumers(h.consumerChnls, protoMsg, msg.Topic, msg.Timestamp)
+		session.MarkMessage(msg, "")
 	}
-	logger.Infow(ctx, "group-consumer-stopped", log.Fields{"topic": topic.Name})
-	sc.setUnhealthy(ctx)
+	return nil
 }
 
 func (sc *SaramaClient) startConsumers(ctx context.Context, topic *Topic) error {
@@ -1018,7 +1024,7 @@
 	for _, consumer := range consumerCh.consumers {
 		if pConsumer, ok := consumer.(sarama.PartitionConsumer); ok {
 			go sc.consumeFromAPartition(ctx, topic, pConsumer, consumerCh)
-		} else if gConsumer, ok := consumer.(*scc.Consumer); ok {
+		} else if gConsumer, ok := consumer.(sarama.ConsumerGroup); ok {
 			go sc.consumeGroupMessages(ctx, topic, gConsumer, consumerCh)
 		} else {
 			logger.Errorw(ctx, "invalid-consumer", log.Fields{"topic": topic})
@@ -1070,18 +1076,16 @@
 // setupConsumerChannel creates a consumerChannels object for that topic and add it to the consumerChannels map
 // for that topic.  It also starts the routine that listens for messages on that topic.
 func (sc *SaramaClient) setupGroupConsumerChannel(ctx context.Context, topic *Topic, groupId string, initialOffset int64) (chan proto.Message, error) {
-	// TODO:  Replace this development partition consumers with a group consumers
-	var pConsumer *scc.Consumer
+	var consumerGroup sarama.ConsumerGroup
 	var err error
-	if pConsumer, err = sc.createGroupConsumer(ctx, topic, groupId, initialOffset, DefaultMaxRetries); err != nil {
-		logger.Errorw(ctx, "creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
+	if consumerGroup, err = sc.createGroupConsumer(ctx, topic, groupId, initialOffset); err != nil {
+		logger.Errorw(ctx, "creating-group-consumer-failure", log.Fields{"error": err, "topic": topic.Name})
 		return nil, err
 	}
-	// Create the consumers/channel structure and set the consumers and create a channel on that topic - for now
-	// unbuffered to verify race conditions.
+
 	consumerListeningChannel := make(chan proto.Message)
 	cc := &consumerChannels{
-		consumers: []interface{}{pConsumer},
+		consumers: []interface{}{consumerGroup},
 		channels:  []chan proto.Message{consumerListeningChannel},
 	}
 
@@ -1134,22 +1138,21 @@
 	return channels
 }
 
-func (sc *SaramaClient) addToGroupConsumers(topic string, consumer *scc.Consumer) {
+func (sc *SaramaClient) addToGroupConsumers(topic string, consumerGroup sarama.ConsumerGroup) {
 	sc.lockOfGroupConsumers.Lock()
 	defer sc.lockOfGroupConsumers.Unlock()
 	if _, exist := sc.groupConsumers[topic]; !exist {
-		sc.groupConsumers[topic] = consumer
+		sc.groupConsumers[topic] = consumerGroup
 	}
 }
 
 func (sc *SaramaClient) deleteFromGroupConsumers(ctx context.Context, topic string) error {
 	sc.lockOfGroupConsumers.Lock()
 	defer sc.lockOfGroupConsumers.Unlock()
-	if _, exist := sc.groupConsumers[topic]; exist {
-		consumer := sc.groupConsumers[topic]
+	if consumerGroup, exist := sc.groupConsumers[topic]; exist {
 		delete(sc.groupConsumers, topic)
-		if err := consumer.Close(); err != nil {
-			logger.Errorw(ctx, "failure-closing-consumer", log.Fields{"error": err})
+		if err := consumerGroup.Close(); err != nil {
+			logger.Errorw(ctx, "failure-closing-consumer-group", log.Fields{"error": err})
 			return err
 		}
 	}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go
index af5821a..4e42c26 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go
@@ -202,7 +202,7 @@
 	case "FATAL":
 		return FatalLevel, nil
 	}
-	return 0, errors.New("Given LogLevel is invalid : " + l)
+	return 0, errors.New("given LogLevel is invalid : " + l)
 }
 
 func LogLevelToString(l LogLevel) (string, error) {
@@ -218,7 +218,7 @@
 	case FatalLevel:
 		return "FATAL", nil
 	}
-	return "", fmt.Errorf("Given LogLevel is invalid %d", l)
+	return "", fmt.Errorf("given LogLevel is invalid %d", l)
 }
 
 func getDefaultConfig(outputType string, level LogLevel, defaultFields Fields) zp.Config {
@@ -503,13 +503,9 @@
 		}
 	}
 
-	if strings.HasSuffix(packageName, ".init") {
-		packageName = strings.TrimSuffix(packageName, ".init")
-	}
+	packageName = strings.TrimSuffix(packageName, ".init")
 
-	if strings.HasSuffix(fileName, ".go") {
-		fileName = strings.TrimSuffix(fileName, ".go")
-	}
+	fileName = strings.TrimSuffix(fileName, ".go")
 
 	return packageName, fileName, funcName, foundFrame.Line
 }
@@ -678,7 +674,7 @@
 		return loggers[pkgName], nil
 	}
 
-	return loggers[pkgName], errors.New("Package Not Found")
+	return loggers[pkgName], errors.New("package not found")
 }
 
 // UpdateAllCallerSkipLevel create new loggers for all registered pacakges with the default updated caller skipltFields.
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/utils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/utils.go
index e21ce0c..292a1cf 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/utils.go
@@ -23,13 +23,14 @@
 	"context"
 	"errors"
 	"fmt"
-	"github.com/opentracing/opentracing-go"
-	jtracing "github.com/uber/jaeger-client-go"
-	jcfg "github.com/uber/jaeger-client-go/config"
 	"io"
 	"os"
 	"strings"
 	"sync"
+
+	"github.com/opentracing/opentracing-go"
+	jtracing "github.com/uber/jaeger-client-go"
+	jcfg "github.com/uber/jaeger-client-go/config"
 )
 
 const (
@@ -103,7 +104,7 @@
 func (lfm *LogFeaturesManager) InitTracingAndLogCorrelation(tracePublishEnabled bool, traceAgentAddress string, logCorrelationEnabled bool) (io.Closer, error) {
 	lfm.componentName = os.Getenv("COMPONENT_NAME")
 	if lfm.componentName == "" {
-		return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
+		return nil, errors.New("unable to retrieve PoD Component Name from Runtime env")
 	}
 
 	lfm.lock.Lock()
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/meters/meter_utils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/meters/meter_utils.go
index 6dfb0c8..4e84126 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/meters/meter_utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/meters/meter_utils.go
@@ -26,14 +26,15 @@
 
 // GetTrafficShapingInfo returns CIR,PIR and GIR values
 func GetTrafficShapingInfo(ctx context.Context, meterConfig *ofp.OfpMeterConfig) (*tp_pb.TrafficShapingInfo, error) {
-	switch meterBandSize := len(meterConfig.Bands); {
-	case meterBandSize == 1:
+	meterBandSize := len(meterConfig.Bands)
+	switch meterBandSize {
+	case 1:
 		band := meterConfig.Bands[0]
 		if band.BurstSize == 0 { // GIR = PIR, Burst Size = 0, tcont type 1
 			return &tp_pb.TrafficShapingInfo{Pir: band.Rate, Gir: band.Rate}, nil
 		}
 		return &tp_pb.TrafficShapingInfo{Pir: band.Rate, Pbs: band.BurstSize}, nil // PIR, tcont type 4
-	case meterBandSize == 2:
+	case 2:
 		firstBand, secondBand := meterConfig.Bands[0], meterConfig.Bands[1]
 		if firstBand.BurstSize == 0 && secondBand.BurstSize == 0 &&
 			firstBand.Rate == secondBand.Rate { // PIR = GIR, tcont type 1
@@ -45,7 +46,7 @@
 			}
 			return &tp_pb.TrafficShapingInfo{Pir: secondBand.Rate, Pbs: secondBand.BurstSize, Cir: firstBand.Rate, Cbs: firstBand.BurstSize}, nil
 		}
-	case meterBandSize == 3: // PIR,CIR,GIR, tcont type 5
+	case 3: // PIR,CIR,GIR, tcont type 5
 		var count, girIndex int
 		for i, band := range meterConfig.Bands {
 			if band.BurstSize == 0 { // find GIR
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go
index 4b4bb1f..dba7ffa 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go
@@ -240,7 +240,7 @@
 	}
 	comma := ""
 	for c, s := range p.status {
-		if _, err := w.Write([]byte(fmt.Sprintf("%s\"%s\": \"%s\"", comma, c, s.String()))); err != nil {
+		if _, err := fmt.Fprintf(w, "%s\"%s\": \"%s\"", comma, c, s.String()); err != nil {
 			logger.Errorw(ctx, "write-response", log.Fields{"error": err})
 			w.WriteHeader(http.StatusInternalServerError)
 			return
diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore
deleted file mode 100644
index 5e98735..0000000
--- a/vendor/github.com/pierrec/lz4/.gitignore
+++ /dev/null
@@ -1,34 +0,0 @@
-# Created by https://www.gitignore.io/api/macos
-
-### macOS ###
-*.DS_Store
-.AppleDouble
-.LSOverride
-
-# Icon must end with two \r
-Icon
-
-
-# Thumbnails
-._*
-
-# Files that might appear in the root of a volume
-.DocumentRevisions-V100
-.fseventsd
-.Spotlight-V100
-.TemporaryItems
-.Trashes
-.VolumeIcon.icns
-.com.apple.timemachine.donotpresent
-
-# Directories potentially created on remote AFP share
-.AppleDB
-.AppleDesktop
-Network Trash Folder
-Temporary Items
-.apdisk
-
-# End of https://www.gitignore.io/api/macos
-
-cmd/*/*exe
-.idea
\ No newline at end of file
diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml
deleted file mode 100644
index fd6c6db..0000000
--- a/vendor/github.com/pierrec/lz4/.travis.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-language: go
-
-env:
-  - GO111MODULE=off
-
-go:
-  - 1.9.x
-  - 1.10.x
-  - 1.11.x
-  - 1.12.x
-  - master
-
-matrix:
- fast_finish: true
- allow_failures:
-   - go: master
-
-sudo: false
-
-script: 
- - go test -v -cpu=2
- - go test -v -cpu=2 -race
- - go test -v -cpu=2 -tags noasm
- - go test -v -cpu=2 -race -tags noasm
diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md
deleted file mode 100644
index 4ee388e..0000000
--- a/vendor/github.com/pierrec/lz4/README.md
+++ /dev/null
@@ -1,90 +0,0 @@
-# lz4 : LZ4 compression in pure Go
-
-[![GoDoc](https://godoc.org/github.com/pierrec/lz4?status.svg)](https://godoc.org/github.com/pierrec/lz4)
-[![Build Status](https://travis-ci.org/pierrec/lz4.svg?branch=master)](https://travis-ci.org/pierrec/lz4)
-[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4)
-[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags)
-
-## Overview
-
-This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks.
-The implementation is based on the reference C [one](https://github.com/lz4/lz4).
-
-## Install
-
-Assuming you have the go toolchain installed:
-
-```
-go get github.com/pierrec/lz4
-```
-
-There is a command line interface tool to compress and decompress LZ4 files.
-
-```
-go install github.com/pierrec/lz4/cmd/lz4c
-```
-
-Usage
-
-```
-Usage of lz4c:
-  -version
-        print the program version
-
-Subcommands:
-Compress the given files or from stdin to stdout.
-compress [arguments] [<file name> ...]
-  -bc
-        enable block checksum
-  -l int
-        compression level (0=fastest)
-  -sc
-        disable stream checksum
-  -size string
-        block max size [64K,256K,1M,4M] (default "4M")
-
-Uncompress the given files or from stdin to stdout.
-uncompress [arguments] [<file name> ...]
-
-```
-
-
-## Example
-
-```
-// Compress and uncompress an input string.
-s := "hello world"
-r := strings.NewReader(s)
-
-// The pipe will uncompress the data from the writer.
-pr, pw := io.Pipe()
-zw := lz4.NewWriter(pw)
-zr := lz4.NewReader(pr)
-
-go func() {
-	// Compress the input string.
-	_, _ = io.Copy(zw, r)
-	_ = zw.Close() // Make sure the writer is closed
-	_ = pw.Close() // Terminate the pipe
-}()
-
-_, _ = io.Copy(os.Stdout, zr)
-
-// Output:
-// hello world
-```
-
-## Contributing
-
-Contributions are very welcome for bug fixing, performance improvements...!
-
-- Open an issue with a proper description
-- Send a pull request with appropriate test case(s)
-
-## Contributors
-
-Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors)  so far!
-
-Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder.
-
-Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code.
diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go
deleted file mode 100644
index 664d9be..0000000
--- a/vendor/github.com/pierrec/lz4/block.go
+++ /dev/null
@@ -1,413 +0,0 @@
-package lz4
-
-import (
-	"encoding/binary"
-	"math/bits"
-	"sync"
-)
-
-// blockHash hashes the lower 6 bytes into a value < htSize.
-func blockHash(x uint64) uint32 {
-	const prime6bytes = 227718039650203
-	return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog))
-}
-
-// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
-func CompressBlockBound(n int) int {
-	return n + n/255 + 16
-}
-
-// UncompressBlock uncompresses the source buffer into the destination one,
-// and returns the uncompressed size.
-//
-// The destination buffer must be sized appropriately.
-//
-// An error is returned if the source data is invalid or the destination buffer is too small.
-func UncompressBlock(src, dst []byte) (int, error) {
-	if len(src) == 0 {
-		return 0, nil
-	}
-	if di := decodeBlock(dst, src); di >= 0 {
-		return di, nil
-	}
-	return 0, ErrInvalidSourceShortBuffer
-}
-
-// CompressBlock compresses the source buffer into the destination one.
-// This is the fast version of LZ4 compression and also the default one.
-//
-// The argument hashTable is scratch space for a hash table used by the
-// compressor. If provided, it should have length at least 1<<16. If it is
-// shorter (or nil), CompressBlock allocates its own hash table.
-//
-// The size of the compressed data is returned.
-//
-// If the destination buffer size is lower than CompressBlockBound and
-// the compressed size is 0 and no error, then the data is incompressible.
-//
-// An error is returned if the destination buffer is too small.
-func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) {
-	defer recoverBlock(&err)
-
-	// Return 0, nil only if the destination buffer size is < CompressBlockBound.
-	isNotCompressible := len(dst) < CompressBlockBound(len(src))
-
-	// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
-	// This significantly speeds up incompressible data and usually has very small impact on compression.
-	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
-	const adaptSkipLog = 7
-	if len(hashTable) < htSize {
-		htIface := htPool.Get()
-		defer htPool.Put(htIface)
-		hashTable = (*(htIface).(*[htSize]int))[:]
-	}
-	// Prove to the compiler the table has at least htSize elements.
-	// The compiler can see that "uint32() >> hashShift" cannot be out of bounds.
-	hashTable = hashTable[:htSize]
-
-	// si: Current position of the search.
-	// anchor: Position of the current literals.
-	var si, di, anchor int
-	sn := len(src) - mfLimit
-	if sn <= 0 {
-		goto lastLiterals
-	}
-
-	// Fast scan strategy: the hash table only stores the last 4 bytes sequences.
-	for si < sn {
-		// Hash the next 6 bytes (sequence)...
-		match := binary.LittleEndian.Uint64(src[si:])
-		h := blockHash(match)
-		h2 := blockHash(match >> 8)
-
-		// We check a match at s, s+1 and s+2 and pick the first one we get.
-		// Checking 3 only requires us to load the source one.
-		ref := hashTable[h]
-		ref2 := hashTable[h2]
-		hashTable[h] = si
-		hashTable[h2] = si + 1
-		offset := si - ref
-
-		// If offset <= 0 we got an old entry in the hash table.
-		if offset <= 0 || offset >= winSize || // Out of window.
-			uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches.
-			// No match. Start calculating another hash.
-			// The processor can usually do this out-of-order.
-			h = blockHash(match >> 16)
-			ref = hashTable[h]
-
-			// Check the second match at si+1
-			si += 1
-			offset = si - ref2
-
-			if offset <= 0 || offset >= winSize ||
-				uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) {
-				// No match. Check the third match at si+2
-				si += 1
-				offset = si - ref
-				hashTable[h] = si
-
-				if offset <= 0 || offset >= winSize ||
-					uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) {
-					// Skip one extra byte (at si+3) before we check 3 matches again.
-					si += 2 + (si-anchor)>>adaptSkipLog
-					continue
-				}
-			}
-		}
-
-		// Match found.
-		lLen := si - anchor // Literal length.
-		// We already matched 4 bytes.
-		mLen := 4
-
-		// Extend backwards if we can, reducing literals.
-		tOff := si - offset - 1
-		for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] {
-			si--
-			tOff--
-			lLen--
-			mLen++
-		}
-
-		// Add the match length, so we continue search at the end.
-		// Use mLen to store the offset base.
-		si, mLen = si+mLen, si+minMatch
-
-		// Find the longest match by looking by batches of 8 bytes.
-		for si+8 < sn {
-			x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:])
-			if x == 0 {
-				si += 8
-			} else {
-				// Stop is first non-zero byte.
-				si += bits.TrailingZeros64(x) >> 3
-				break
-			}
-		}
-
-		mLen = si - mLen
-		if mLen < 0xF {
-			dst[di] = byte(mLen)
-		} else {
-			dst[di] = 0xF
-		}
-
-		// Encode literals length.
-		if lLen < 0xF {
-			dst[di] |= byte(lLen << 4)
-		} else {
-			dst[di] |= 0xF0
-			di++
-			l := lLen - 0xF
-			for ; l >= 0xFF; l -= 0xFF {
-				dst[di] = 0xFF
-				di++
-			}
-			dst[di] = byte(l)
-		}
-		di++
-
-		// Literals.
-		copy(dst[di:di+lLen], src[anchor:anchor+lLen])
-		di += lLen + 2
-		anchor = si
-
-		// Encode offset.
-		_ = dst[di] // Bound check elimination.
-		dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
-
-		// Encode match length part 2.
-		if mLen >= 0xF {
-			for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
-				dst[di] = 0xFF
-				di++
-			}
-			dst[di] = byte(mLen)
-			di++
-		}
-		// Check if we can load next values.
-		if si >= sn {
-			break
-		}
-		// Hash match end-2
-		h = blockHash(binary.LittleEndian.Uint64(src[si-2:]))
-		hashTable[h] = si - 2
-	}
-
-lastLiterals:
-	if isNotCompressible && anchor == 0 {
-		// Incompressible.
-		return 0, nil
-	}
-
-	// Last literals.
-	lLen := len(src) - anchor
-	if lLen < 0xF {
-		dst[di] = byte(lLen << 4)
-	} else {
-		dst[di] = 0xF0
-		di++
-		for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF {
-			dst[di] = 0xFF
-			di++
-		}
-		dst[di] = byte(lLen)
-	}
-	di++
-
-	// Write the last literals.
-	if isNotCompressible && di >= anchor {
-		// Incompressible.
-		return 0, nil
-	}
-	di += copy(dst[di:di+len(src)-anchor], src[anchor:])
-	return di, nil
-}
-
-// Pool of hash tables for CompressBlock.
-var htPool = sync.Pool{
-	New: func() interface{} {
-		return new([htSize]int)
-	},
-}
-
-// blockHash hashes 4 bytes into a value < winSize.
-func blockHashHC(x uint32) uint32 {
-	const hasher uint32 = 2654435761 // Knuth multiplicative hash.
-	return x * hasher >> (32 - winSizeLog)
-}
-
-// CompressBlockHC compresses the source buffer src into the destination dst
-// with max search depth (use 0 or negative value for no max).
-//
-// CompressBlockHC compression ratio is better than CompressBlock but it is also slower.
-//
-// The size of the compressed data is returned.
-//
-// If the destination buffer size is lower than CompressBlockBound and
-// the compressed size is 0 and no error, then the data is incompressible.
-//
-// An error is returned if the destination buffer is too small.
-func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) {
-	defer recoverBlock(&err)
-
-	// Return 0, nil only if the destination buffer size is < CompressBlockBound.
-	isNotCompressible := len(dst) < CompressBlockBound(len(src))
-
-	// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
-	// This significantly speeds up incompressible data and usually has very small impact on compression.
-	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
-	const adaptSkipLog = 7
-
-	var si, di, anchor int
-
-	// hashTable: stores the last position found for a given hash
-	// chainTable: stores previous positions for a given hash
-	var hashTable, chainTable [winSize]int
-
-	if depth <= 0 {
-		depth = winSize
-	}
-
-	sn := len(src) - mfLimit
-	if sn <= 0 {
-		goto lastLiterals
-	}
-
-	for si < sn {
-		// Hash the next 4 bytes (sequence).
-		match := binary.LittleEndian.Uint32(src[si:])
-		h := blockHashHC(match)
-
-		// Follow the chain until out of window and give the longest match.
-		mLen := 0
-		offset := 0
-		for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] {
-			// The first (mLen==0) or next byte (mLen>=minMatch) at current match length
-			// must match to improve on the match length.
-			if src[next+mLen] != src[si+mLen] {
-				continue
-			}
-			ml := 0
-			// Compare the current position with a previous with the same hash.
-			for ml < sn-si {
-				x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:])
-				if x == 0 {
-					ml += 8
-				} else {
-					// Stop is first non-zero byte.
-					ml += bits.TrailingZeros64(x) >> 3
-					break
-				}
-			}
-			if ml < minMatch || ml <= mLen {
-				// Match too small (<minMath) or smaller than the current match.
-				continue
-			}
-			// Found a longer match, keep its position and length.
-			mLen = ml
-			offset = si - next
-			// Try another previous position with the same hash.
-			try--
-		}
-		chainTable[si&winMask] = hashTable[h]
-		hashTable[h] = si
-
-		// No match found.
-		if mLen == 0 {
-			si += 1 + (si-anchor)>>adaptSkipLog
-			continue
-		}
-
-		// Match found.
-		// Update hash/chain tables with overlapping bytes:
-		// si already hashed, add everything from si+1 up to the match length.
-		winStart := si + 1
-		if ws := si + mLen - winSize; ws > winStart {
-			winStart = ws
-		}
-		for si, ml := winStart, si+mLen; si < ml; {
-			match >>= 8
-			match |= uint32(src[si+3]) << 24
-			h := blockHashHC(match)
-			chainTable[si&winMask] = hashTable[h]
-			hashTable[h] = si
-			si++
-		}
-
-		lLen := si - anchor
-		si += mLen
-		mLen -= minMatch // Match length does not include minMatch.
-
-		if mLen < 0xF {
-			dst[di] = byte(mLen)
-		} else {
-			dst[di] = 0xF
-		}
-
-		// Encode literals length.
-		if lLen < 0xF {
-			dst[di] |= byte(lLen << 4)
-		} else {
-			dst[di] |= 0xF0
-			di++
-			l := lLen - 0xF
-			for ; l >= 0xFF; l -= 0xFF {
-				dst[di] = 0xFF
-				di++
-			}
-			dst[di] = byte(l)
-		}
-		di++
-
-		// Literals.
-		copy(dst[di:di+lLen], src[anchor:anchor+lLen])
-		di += lLen
-		anchor = si
-
-		// Encode offset.
-		di += 2
-		dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
-
-		// Encode match length part 2.
-		if mLen >= 0xF {
-			for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
-				dst[di] = 0xFF
-				di++
-			}
-			dst[di] = byte(mLen)
-			di++
-		}
-	}
-
-	if isNotCompressible && anchor == 0 {
-		// Incompressible.
-		return 0, nil
-	}
-
-	// Last literals.
-lastLiterals:
-	lLen := len(src) - anchor
-	if lLen < 0xF {
-		dst[di] = byte(lLen << 4)
-	} else {
-		dst[di] = 0xF0
-		di++
-		lLen -= 0xF
-		for ; lLen >= 0xFF; lLen -= 0xFF {
-			dst[di] = 0xFF
-			di++
-		}
-		dst[di] = byte(lLen)
-	}
-	di++
-
-	// Write the last literals.
-	if isNotCompressible && di >= anchor {
-		// Incompressible.
-		return 0, nil
-	}
-	di += copy(dst[di:di+len(src)-anchor], src[anchor:])
-	return di, nil
-}
diff --git a/vendor/github.com/pierrec/lz4/debug.go b/vendor/github.com/pierrec/lz4/debug.go
deleted file mode 100644
index bc5e78d..0000000
--- a/vendor/github.com/pierrec/lz4/debug.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// +build lz4debug
-
-package lz4
-
-import (
-	"fmt"
-	"os"
-	"path/filepath"
-	"runtime"
-)
-
-const debugFlag = true
-
-func debug(args ...interface{}) {
-	_, file, line, _ := runtime.Caller(1)
-	file = filepath.Base(file)
-
-	f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0])
-	if f[len(f)-1] != '\n' {
-		f += "\n"
-	}
-	fmt.Fprintf(os.Stderr, f, args[1:]...)
-}
diff --git a/vendor/github.com/pierrec/lz4/debug_stub.go b/vendor/github.com/pierrec/lz4/debug_stub.go
deleted file mode 100644
index 44211ad..0000000
--- a/vendor/github.com/pierrec/lz4/debug_stub.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build !lz4debug
-
-package lz4
-
-const debugFlag = false
-
-func debug(args ...interface{}) {}
diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.go b/vendor/github.com/pierrec/lz4/decode_amd64.go
deleted file mode 100644
index 43cc14f..0000000
--- a/vendor/github.com/pierrec/lz4/decode_amd64.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !noasm
-
-package lz4
-
-//go:noescape
-func decodeBlock(dst, src []byte) int
diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.s b/vendor/github.com/pierrec/lz4/decode_amd64.s
deleted file mode 100644
index 20fef39..0000000
--- a/vendor/github.com/pierrec/lz4/decode_amd64.s
+++ /dev/null
@@ -1,375 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !noasm
-
-#include "textflag.h"
-
-// AX scratch
-// BX scratch
-// CX scratch
-// DX token
-//
-// DI &dst
-// SI &src
-// R8 &dst + len(dst)
-// R9 &src + len(src)
-// R11 &dst
-// R12 short output end
-// R13 short input end
-// func decodeBlock(dst, src []byte) int
-// using 50 bytes of stack currently
-TEXT ·decodeBlock(SB), NOSPLIT, $64-56
-	MOVQ dst_base+0(FP), DI
-	MOVQ DI, R11
-	MOVQ dst_len+8(FP), R8
-	ADDQ DI, R8
-
-	MOVQ src_base+24(FP), SI
-	MOVQ src_len+32(FP), R9
-	ADDQ SI, R9
-
-	// shortcut ends
-	// short output end
-	MOVQ R8, R12
-	SUBQ $32, R12
-	// short input end
-	MOVQ R9, R13
-	SUBQ $16, R13
-
-loop:
-	// for si < len(src)
-	CMPQ SI, R9
-	JGE end
-
-	// token := uint32(src[si])
-	MOVBQZX (SI), DX
-	INCQ SI
-
-	// lit_len = token >> 4
-	// if lit_len > 0
-	// CX = lit_len
-	MOVQ DX, CX
-	SHRQ $4, CX
-
-	// if lit_len != 0xF
-	CMPQ CX, $0xF
-	JEQ lit_len_loop_pre
-	CMPQ DI, R12
-	JGE lit_len_loop_pre
-	CMPQ SI, R13
-	JGE lit_len_loop_pre
-
-	// copy shortcut
-
-	// A two-stage shortcut for the most common case:
-	// 1) If the literal length is 0..14, and there is enough space,
-	// enter the shortcut and copy 16 bytes on behalf of the literals
-	// (in the fast mode, only 8 bytes can be safely copied this way).
-	// 2) Further if the match length is 4..18, copy 18 bytes in a similar
-	// manner; but we ensure that there's enough space in the output for
-	// those 18 bytes earlier, upon entering the shortcut (in other words,
-	// there is a combined check for both stages).
-
-	// copy literal
-	MOVOU (SI), X0
-	MOVOU X0, (DI)
-	ADDQ CX, DI
-	ADDQ CX, SI
-
-	MOVQ DX, CX
-	ANDQ $0xF, CX
-
-	// The second stage: prepare for match copying, decode full info.
-	// If it doesn't work out, the info won't be wasted.
-	// offset := uint16(data[:2])
-	MOVWQZX (SI), DX
-	ADDQ $2, SI
-
-	MOVQ DI, AX
-	SUBQ DX, AX
-	CMPQ AX, DI
-	JGT err_short_buf
-
-	// if we can't do the second stage then jump straight to read the
-	// match length, we already have the offset.
-	CMPQ CX, $0xF
-	JEQ match_len_loop_pre
-	CMPQ DX, $8
-	JLT match_len_loop_pre
-	CMPQ AX, R11
-	JLT err_short_buf
-
-	// memcpy(op + 0, match + 0, 8);
-	MOVQ (AX), BX
-	MOVQ BX, (DI)
-	// memcpy(op + 8, match + 8, 8);
-	MOVQ 8(AX), BX
-	MOVQ BX, 8(DI)
-	// memcpy(op +16, match +16, 2);
-	MOVW 16(AX), BX
-	MOVW BX, 16(DI)
-
-	ADDQ $4, DI // minmatch
-	ADDQ CX, DI
-
-	// shortcut complete, load next token
-	JMP loop
-
-lit_len_loop_pre:
-	// if lit_len > 0
-	CMPQ CX, $0
-	JEQ offset
-	CMPQ CX, $0xF
-	JNE copy_literal
-
-lit_len_loop:
-	// for src[si] == 0xFF
-	CMPB (SI), $0xFF
-	JNE lit_len_finalise
-
-	// bounds check src[si+1]
-	MOVQ SI, AX
-	ADDQ $1, AX
-	CMPQ AX, R9
-	JGT err_short_buf
-
-	// lit_len += 0xFF
-	ADDQ $0xFF, CX
-	INCQ SI
-	JMP lit_len_loop
-
-lit_len_finalise:
-	// lit_len += int(src[si])
-	// si++
-	MOVBQZX (SI), AX
-	ADDQ AX, CX
-	INCQ SI
-
-copy_literal:
-	// bounds check src and dst
-	MOVQ SI, AX
-	ADDQ CX, AX
-	CMPQ AX, R9
-	JGT err_short_buf
-
-	MOVQ DI, AX
-	ADDQ CX, AX
-	CMPQ AX, R8
-	JGT err_short_buf
-
-	// whats a good cut off to call memmove?
-	CMPQ CX, $16
-	JGT memmove_lit
-
-	// if len(dst[di:]) < 16
-	MOVQ R8, AX
-	SUBQ DI, AX
-	CMPQ AX, $16
-	JLT memmove_lit
-
-	// if len(src[si:]) < 16
-	MOVQ R9, AX
-	SUBQ SI, AX
-	CMPQ AX, $16
-	JLT memmove_lit
-
-	MOVOU (SI), X0
-	MOVOU X0, (DI)
-
-	JMP finish_lit_copy
-
-memmove_lit:
-	// memmove(to, from, len)
-	MOVQ DI, 0(SP)
-	MOVQ SI, 8(SP)
-	MOVQ CX, 16(SP)
-	// spill
-	MOVQ DI, 24(SP)
-	MOVQ SI, 32(SP)
-	MOVQ CX, 40(SP) // need len to inc SI, DI after
-	MOVB DX, 48(SP)
-	CALL runtime·memmove(SB)
-
-	// restore registers
-	MOVQ 24(SP), DI
-	MOVQ 32(SP), SI
-	MOVQ 40(SP), CX
-	MOVB 48(SP), DX
-
-	// recalc initial values
-	MOVQ dst_base+0(FP), R8
-	MOVQ R8, R11
-	ADDQ dst_len+8(FP), R8
-	MOVQ src_base+24(FP), R9
-	ADDQ src_len+32(FP), R9
-	MOVQ R8, R12
-	SUBQ $32, R12
-	MOVQ R9, R13
-	SUBQ $16, R13
-
-finish_lit_copy:
-	ADDQ CX, SI
-	ADDQ CX, DI
-
-	CMPQ SI, R9
-	JGE end
-
-offset:
-	// CX := mLen
-	// free up DX to use for offset
-	MOVQ DX, CX
-
-	MOVQ SI, AX
-	ADDQ $2, AX
-	CMPQ AX, R9
-	JGT err_short_buf
-
-	// offset
-	// DX := int(src[si]) | int(src[si+1])<<8
-	MOVWQZX (SI), DX
-	ADDQ $2, SI
-
-	// 0 offset is invalid
-	CMPQ DX, $0
-	JEQ err_corrupt
-
-	ANDB $0xF, CX
-
-match_len_loop_pre:
-	// if mlen != 0xF
-	CMPB CX, $0xF
-	JNE copy_match
-
-match_len_loop:
-	// for src[si] == 0xFF
-	// lit_len += 0xFF
-	CMPB (SI), $0xFF
-	JNE match_len_finalise
-
-	// bounds check src[si+1]
-	MOVQ SI, AX
-	ADDQ $1, AX
-	CMPQ AX, R9
-	JGT err_short_buf
-
-	ADDQ $0xFF, CX
-	INCQ SI
-	JMP match_len_loop
-
-match_len_finalise:
-	// lit_len += int(src[si])
-	// si++
-	MOVBQZX (SI), AX
-	ADDQ AX, CX
-	INCQ SI
-
-copy_match:
-	// mLen += minMatch
-	ADDQ $4, CX
-
-	// check we have match_len bytes left in dst
-	// di+match_len < len(dst)
-	MOVQ DI, AX
-	ADDQ CX, AX
-	CMPQ AX, R8
-	JGT err_short_buf
-
-	// DX = offset
-	// CX = match_len
-	// BX = &dst + (di - offset)
-	MOVQ DI, BX
-	SUBQ DX, BX
-
-	// check BX is within dst
-	// if BX < &dst
-	CMPQ BX, R11
-	JLT err_short_buf
-
-	// if offset + match_len < di
-	MOVQ BX, AX
-	ADDQ CX, AX
-	CMPQ DI, AX
-	JGT copy_interior_match
-
-	// AX := len(dst[:di])
-	// MOVQ DI, AX
-	// SUBQ R11, AX
-
-	// copy 16 bytes at a time
-	// if di-offset < 16 copy 16-(di-offset) bytes to di
-	// then do the remaining
-
-copy_match_loop:
-	// for match_len >= 0
-	// dst[di] = dst[i]
-	// di++
-	// i++
-	MOVB (BX), AX
-	MOVB AX, (DI)
-	INCQ DI
-	INCQ BX
-	DECQ CX
-
-	CMPQ CX, $0
-	JGT copy_match_loop
-
-	JMP loop
-
-copy_interior_match:
-	CMPQ CX, $16
-	JGT memmove_match
-
-	// if len(dst[di:]) < 16
-	MOVQ R8, AX
-	SUBQ DI, AX
-	CMPQ AX, $16
-	JLT memmove_match
-
-	MOVOU (BX), X0
-	MOVOU X0, (DI)
-
-	ADDQ CX, DI
-	JMP loop
-
-memmove_match:
-	// memmove(to, from, len)
-	MOVQ DI, 0(SP)
-	MOVQ BX, 8(SP)
-	MOVQ CX, 16(SP)
-	// spill
-	MOVQ DI, 24(SP)
-	MOVQ SI, 32(SP)
-	MOVQ CX, 40(SP) // need len to inc SI, DI after
-	CALL runtime·memmove(SB)
-
-	// restore registers
-	MOVQ 24(SP), DI
-	MOVQ 32(SP), SI
-	MOVQ 40(SP), CX
-
-	// recalc initial values
-	MOVQ dst_base+0(FP), R8
-	MOVQ R8, R11 // TODO: make these sensible numbers
-	ADDQ dst_len+8(FP), R8
-	MOVQ src_base+24(FP), R9
-	ADDQ src_len+32(FP), R9
-	MOVQ R8, R12
-	SUBQ $32, R12
-	MOVQ R9, R13
-	SUBQ $16, R13
-
-	ADDQ CX, DI
-	JMP loop
-
-err_corrupt:
-	MOVQ $-1, ret+48(FP)
-	RET
-
-err_short_buf:
-	MOVQ $-2, ret+48(FP)
-	RET
-
-end:
-	SUBQ R11, DI
-	MOVQ DI, ret+48(FP)
-	RET
diff --git a/vendor/github.com/pierrec/lz4/decode_other.go b/vendor/github.com/pierrec/lz4/decode_other.go
deleted file mode 100644
index 919888e..0000000
--- a/vendor/github.com/pierrec/lz4/decode_other.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// +build !amd64 appengine !gc noasm
-
-package lz4
-
-func decodeBlock(dst, src []byte) (ret int) {
-	const hasError = -2
-	defer func() {
-		if recover() != nil {
-			ret = hasError
-		}
-	}()
-
-	var si, di int
-	for {
-		// Literals and match lengths (token).
-		b := int(src[si])
-		si++
-
-		// Literals.
-		if lLen := b >> 4; lLen > 0 {
-			switch {
-			case lLen < 0xF && si+16 < len(src):
-				// Shortcut 1
-				// if we have enough room in src and dst, and the literals length
-				// is small enough (0..14) then copy all 16 bytes, even if not all
-				// are part of the literals.
-				copy(dst[di:], src[si:si+16])
-				si += lLen
-				di += lLen
-				if mLen := b & 0xF; mLen < 0xF {
-					// Shortcut 2
-					// if the match length (4..18) fits within the literals, then copy
-					// all 18 bytes, even if not all are part of the literals.
-					mLen += 4
-					if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset {
-						i := di - offset
-						end := i + 18
-						if end > len(dst) {
-							// The remaining buffer may not hold 18 bytes.
-							// See https://github.com/pierrec/lz4/issues/51.
-							end = len(dst)
-						}
-						copy(dst[di:], dst[i:end])
-						si += 2
-						di += mLen
-						continue
-					}
-				}
-			case lLen == 0xF:
-				for src[si] == 0xFF {
-					lLen += 0xFF
-					si++
-				}
-				lLen += int(src[si])
-				si++
-				fallthrough
-			default:
-				copy(dst[di:di+lLen], src[si:si+lLen])
-				si += lLen
-				di += lLen
-			}
-		}
-		if si >= len(src) {
-			return di
-		}
-
-		offset := int(src[si]) | int(src[si+1])<<8
-		if offset == 0 {
-			return hasError
-		}
-		si += 2
-
-		// Match.
-		mLen := b & 0xF
-		if mLen == 0xF {
-			for src[si] == 0xFF {
-				mLen += 0xFF
-				si++
-			}
-			mLen += int(src[si])
-			si++
-		}
-		mLen += minMatch
-
-		// Copy the match.
-		expanded := dst[di-offset:]
-		if mLen > offset {
-			// Efficiently copy the match dst[di-offset:di] into the dst slice.
-			bytesToCopy := offset * (mLen / offset)
-			for n := offset; n <= bytesToCopy+offset; n *= 2 {
-				copy(expanded[n:], expanded[:n])
-			}
-			di += bytesToCopy
-			mLen -= bytesToCopy
-		}
-		di += copy(dst[di:di+mLen], expanded[:mLen])
-	}
-}
diff --git a/vendor/github.com/pierrec/lz4/errors.go b/vendor/github.com/pierrec/lz4/errors.go
deleted file mode 100644
index 1c45d18..0000000
--- a/vendor/github.com/pierrec/lz4/errors.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package lz4
-
-import (
-	"errors"
-	"fmt"
-	"os"
-	rdebug "runtime/debug"
-)
-
-var (
-	// ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
-	// block is corrupted or the destination buffer is not large enough for the uncompressed data.
-	ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short")
-	// ErrInvalid is returned when reading an invalid LZ4 archive.
-	ErrInvalid = errors.New("lz4: bad magic number")
-	// ErrBlockDependency is returned when attempting to decompress an archive created with block dependency.
-	ErrBlockDependency = errors.New("lz4: block dependency not supported")
-	// ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position.
-	ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent")
-)
-
-func recoverBlock(e *error) {
-	if r := recover(); r != nil && *e == nil {
-		if debugFlag {
-			fmt.Fprintln(os.Stderr, r)
-			rdebug.PrintStack()
-		}
-		*e = ErrInvalidSourceShortBuffer
-	}
-}
diff --git a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go
deleted file mode 100644
index 7a76a6b..0000000
--- a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version).
-// (https://github.com/Cyan4973/XXH/)
-package xxh32
-
-import (
-	"encoding/binary"
-)
-
-const (
-	prime1 uint32 = 2654435761
-	prime2 uint32 = 2246822519
-	prime3 uint32 = 3266489917
-	prime4 uint32 = 668265263
-	prime5 uint32 = 374761393
-
-	primeMask   = 0xFFFFFFFF
-	prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984
-	prime1minus = uint32((-int64(prime1)) & primeMask)                  // 1640531535
-)
-
-// XXHZero represents an xxhash32 object with seed 0.
-type XXHZero struct {
-	v1       uint32
-	v2       uint32
-	v3       uint32
-	v4       uint32
-	totalLen uint64
-	buf      [16]byte
-	bufused  int
-}
-
-// Sum appends the current hash to b and returns the resulting slice.
-// It does not change the underlying hash state.
-func (xxh XXHZero) Sum(b []byte) []byte {
-	h32 := xxh.Sum32()
-	return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24))
-}
-
-// Reset resets the Hash to its initial state.
-func (xxh *XXHZero) Reset() {
-	xxh.v1 = prime1plus2
-	xxh.v2 = prime2
-	xxh.v3 = 0
-	xxh.v4 = prime1minus
-	xxh.totalLen = 0
-	xxh.bufused = 0
-}
-
-// Size returns the number of bytes returned by Sum().
-func (xxh *XXHZero) Size() int {
-	return 4
-}
-
-// BlockSize gives the minimum number of bytes accepted by Write().
-func (xxh *XXHZero) BlockSize() int {
-	return 1
-}
-
-// Write adds input bytes to the Hash.
-// It never returns an error.
-func (xxh *XXHZero) Write(input []byte) (int, error) {
-	if xxh.totalLen == 0 {
-		xxh.Reset()
-	}
-	n := len(input)
-	m := xxh.bufused
-
-	xxh.totalLen += uint64(n)
-
-	r := len(xxh.buf) - m
-	if n < r {
-		copy(xxh.buf[m:], input)
-		xxh.bufused += len(input)
-		return n, nil
-	}
-
-	p := 0
-	// Causes compiler to work directly from registers instead of stack:
-	v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4
-	if m > 0 {
-		// some data left from previous update
-		copy(xxh.buf[xxh.bufused:], input[:r])
-		xxh.bufused += len(input) - r
-
-		// fast rotl(13)
-		buf := xxh.buf[:16] // BCE hint.
-		v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1
-		v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1
-		v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1
-		v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1
-		p = r
-		xxh.bufused = 0
-	}
-
-	for n := n - 16; p <= n; p += 16 {
-		sub := input[p:][:16] //BCE hint for compiler
-		v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
-		v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
-		v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
-		v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
-	}
-	xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4
-
-	copy(xxh.buf[xxh.bufused:], input[p:])
-	xxh.bufused += len(input) - p
-
-	return n, nil
-}
-
-// Sum32 returns the 32 bits Hash value.
-func (xxh *XXHZero) Sum32() uint32 {
-	h32 := uint32(xxh.totalLen)
-	if h32 >= 16 {
-		h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4)
-	} else {
-		h32 += prime5
-	}
-
-	p := 0
-	n := xxh.bufused
-	buf := xxh.buf
-	for n := n - 4; p <= n; p += 4 {
-		h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3
-		h32 = rol17(h32) * prime4
-	}
-	for ; p < n; p++ {
-		h32 += uint32(buf[p]) * prime5
-		h32 = rol11(h32) * prime1
-	}
-
-	h32 ^= h32 >> 15
-	h32 *= prime2
-	h32 ^= h32 >> 13
-	h32 *= prime3
-	h32 ^= h32 >> 16
-
-	return h32
-}
-
-// ChecksumZero returns the 32bits Hash value.
-func ChecksumZero(input []byte) uint32 {
-	n := len(input)
-	h32 := uint32(n)
-
-	if n < 16 {
-		h32 += prime5
-	} else {
-		v1 := prime1plus2
-		v2 := prime2
-		v3 := uint32(0)
-		v4 := prime1minus
-		p := 0
-		for n := n - 16; p <= n; p += 16 {
-			sub := input[p:][:16] //BCE hint for compiler
-			v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
-			v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
-			v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
-			v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
-		}
-		input = input[p:]
-		n -= p
-		h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
-	}
-
-	p := 0
-	for n := n - 4; p <= n; p += 4 {
-		h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3
-		h32 = rol17(h32) * prime4
-	}
-	for p < n {
-		h32 += uint32(input[p]) * prime5
-		h32 = rol11(h32) * prime1
-		p++
-	}
-
-	h32 ^= h32 >> 15
-	h32 *= prime2
-	h32 ^= h32 >> 13
-	h32 *= prime3
-	h32 ^= h32 >> 16
-
-	return h32
-}
-
-// Uint32Zero hashes x with seed 0.
-func Uint32Zero(x uint32) uint32 {
-	h := prime5 + 4 + x*prime3
-	h = rol17(h) * prime4
-	h ^= h >> 15
-	h *= prime2
-	h ^= h >> 13
-	h *= prime3
-	h ^= h >> 16
-	return h
-}
-
-func rol1(u uint32) uint32 {
-	return u<<1 | u>>31
-}
-
-func rol7(u uint32) uint32 {
-	return u<<7 | u>>25
-}
-
-func rol11(u uint32) uint32 {
-	return u<<11 | u>>21
-}
-
-func rol12(u uint32) uint32 {
-	return u<<12 | u>>20
-}
-
-func rol13(u uint32) uint32 {
-	return u<<13 | u>>19
-}
-
-func rol17(u uint32) uint32 {
-	return u<<17 | u>>15
-}
-
-func rol18(u uint32) uint32 {
-	return u<<18 | u>>14
-}
diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go
deleted file mode 100644
index a3284bd..0000000
--- a/vendor/github.com/pierrec/lz4/lz4.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Package lz4 implements reading and writing lz4 compressed data (a frame),
-// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html.
-//
-// Although the block level compression and decompression functions are exposed and are fully compatible
-// with the lz4 block format definition, they are low level and should not be used directly.
-// For a complete description of an lz4 compressed block, see:
-// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
-//
-// See https://github.com/Cyan4973/lz4 for the reference C implementation.
-//
-package lz4
-
-import (
-	"math/bits"
-	"sync"
-)
-
-const (
-	// Extension is the LZ4 frame file name extension
-	Extension = ".lz4"
-	// Version is the LZ4 frame format version
-	Version = 1
-
-	frameMagic       uint32 = 0x184D2204
-	frameSkipMagic   uint32 = 0x184D2A50
-	frameMagicLegacy uint32 = 0x184C2102
-
-	// The following constants are used to setup the compression algorithm.
-	minMatch            = 4  // the minimum size of the match sequence size (4 bytes)
-	winSizeLog          = 16 // LZ4 64Kb window size limit
-	winSize             = 1 << winSizeLog
-	winMask             = winSize - 1 // 64Kb window of previous data for dependent blocks
-	compressedBlockFlag = 1 << 31
-	compressedBlockMask = compressedBlockFlag - 1
-
-	// hashLog determines the size of the hash table used to quickly find a previous match position.
-	// Its value influences the compression speed and memory usage, the lower the faster,
-	// but at the expense of the compression ratio.
-	// 16 seems to be the best compromise for fast compression.
-	hashLog = 16
-	htSize  = 1 << hashLog
-
-	mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes.
-)
-
-// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb.
-const (
-	blockSize64K = 1 << (16 + 2*iota)
-	blockSize256K
-	blockSize1M
-	blockSize4M
-)
-
-var (
-	// Keep a pool of buffers for each valid block sizes.
-	bsMapValue = [...]*sync.Pool{
-		newBufferPool(2 * blockSize64K),
-		newBufferPool(2 * blockSize256K),
-		newBufferPool(2 * blockSize1M),
-		newBufferPool(2 * blockSize4M),
-	}
-)
-
-// newBufferPool returns a pool for buffers of the given size.
-func newBufferPool(size int) *sync.Pool {
-	return &sync.Pool{
-		New: func() interface{} {
-			return make([]byte, size)
-		},
-	}
-}
-
-// getBuffer returns a buffer to its pool.
-func getBuffer(size int) []byte {
-	idx := blockSizeValueToIndex(size) - 4
-	return bsMapValue[idx].Get().([]byte)
-}
-
-// putBuffer returns a buffer to its pool.
-func putBuffer(size int, buf []byte) {
-	if cap(buf) > 0 {
-		idx := blockSizeValueToIndex(size) - 4
-		bsMapValue[idx].Put(buf[:cap(buf)])
-	}
-}
-func blockSizeIndexToValue(i byte) int {
-	return 1 << (16 + 2*uint(i))
-}
-func isValidBlockSize(size int) bool {
-	const blockSizeMask = blockSize64K | blockSize256K | blockSize1M | blockSize4M
-
-	return size&blockSizeMask > 0 && bits.OnesCount(uint(size)) == 1
-}
-func blockSizeValueToIndex(size int) byte {
-	return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2)
-}
-
-// Header describes the various flags that can be set on a Writer or obtained from a Reader.
-// The default values match those of the LZ4 frame format definition
-// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html).
-//
-// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls.
-// It is the caller's responsibility to check them if necessary.
-type Header struct {
-	BlockChecksum    bool   // Compressed blocks checksum flag.
-	NoChecksum       bool   // Frame checksum flag.
-	BlockMaxSize     int    // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB.
-	Size             uint64 // Frame total size. It is _not_ computed by the Writer.
-	CompressionLevel int    // Compression level (higher is better, use 0 for fastest compression).
-	done             bool   // Header processed flag (Read or Write and checked).
-}
-
-// Reset reset internal status
-func (h *Header) Reset() {
-	h.done = false
-}
diff --git a/vendor/github.com/pierrec/lz4/lz4_go1.10.go b/vendor/github.com/pierrec/lz4/lz4_go1.10.go
deleted file mode 100644
index 9a0fb00..0000000
--- a/vendor/github.com/pierrec/lz4/lz4_go1.10.go
+++ /dev/null
@@ -1,29 +0,0 @@
-//+build go1.10
-
-package lz4
-
-import (
-	"fmt"
-	"strings"
-)
-
-func (h Header) String() string {
-	var s strings.Builder
-
-	s.WriteString(fmt.Sprintf("%T{", h))
-	if h.BlockChecksum {
-		s.WriteString("BlockChecksum: true ")
-	}
-	if h.NoChecksum {
-		s.WriteString("NoChecksum: true ")
-	}
-	if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
-		s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs))
-	}
-	if l := h.CompressionLevel; l != 0 {
-		s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l))
-	}
-	s.WriteByte('}')
-
-	return s.String()
-}
diff --git a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go
deleted file mode 100644
index 12c761a..0000000
--- a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go
+++ /dev/null
@@ -1,29 +0,0 @@
-//+build !go1.10
-
-package lz4
-
-import (
-	"bytes"
-	"fmt"
-)
-
-func (h Header) String() string {
-	var s bytes.Buffer
-
-	s.WriteString(fmt.Sprintf("%T{", h))
-	if h.BlockChecksum {
-		s.WriteString("BlockChecksum: true ")
-	}
-	if h.NoChecksum {
-		s.WriteString("NoChecksum: true ")
-	}
-	if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
-		s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs))
-	}
-	if l := h.CompressionLevel; l != 0 {
-		s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l))
-	}
-	s.WriteByte('}')
-
-	return s.String()
-}
diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go
deleted file mode 100644
index 87dd72b..0000000
--- a/vendor/github.com/pierrec/lz4/reader.go
+++ /dev/null
@@ -1,335 +0,0 @@
-package lz4
-
-import (
-	"encoding/binary"
-	"fmt"
-	"io"
-	"io/ioutil"
-
-	"github.com/pierrec/lz4/internal/xxh32"
-)
-
-// Reader implements the LZ4 frame decoder.
-// The Header is set after the first call to Read().
-// The Header may change between Read() calls in case of concatenated frames.
-type Reader struct {
-	Header
-	// Handler called when a block has been successfully read.
-	// It provides the number of bytes read.
-	OnBlockDone func(size int)
-
-	buf      [8]byte       // Scrap buffer.
-	pos      int64         // Current position in src.
-	src      io.Reader     // Source.
-	zdata    []byte        // Compressed data.
-	data     []byte        // Uncompressed data.
-	idx      int           // Index of unread bytes into data.
-	checksum xxh32.XXHZero // Frame hash.
-	skip     int64         // Bytes to skip before next read.
-	dpos     int64         // Position in dest
-}
-
-// NewReader returns a new LZ4 frame decoder.
-// No access to the underlying io.Reader is performed.
-func NewReader(src io.Reader) *Reader {
-	r := &Reader{src: src}
-	return r
-}
-
-// readHeader checks the frame magic number and parses the frame descriptoz.
-// Skippable frames are supported even as a first frame although the LZ4
-// specifications recommends skippable frames not to be used as first frames.
-func (z *Reader) readHeader(first bool) error {
-	defer z.checksum.Reset()
-
-	buf := z.buf[:]
-	for {
-		magic, err := z.readUint32()
-		if err != nil {
-			z.pos += 4
-			if !first && err == io.ErrUnexpectedEOF {
-				return io.EOF
-			}
-			return err
-		}
-		if magic == frameMagic {
-			break
-		}
-		if magic>>8 != frameSkipMagic>>8 {
-			return ErrInvalid
-		}
-		skipSize, err := z.readUint32()
-		if err != nil {
-			return err
-		}
-		z.pos += 4
-		m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize))
-		if err != nil {
-			return err
-		}
-		z.pos += m
-	}
-
-	// Header.
-	if _, err := io.ReadFull(z.src, buf[:2]); err != nil {
-		return err
-	}
-	z.pos += 8
-
-	b := buf[0]
-	if v := b >> 6; v != Version {
-		return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version)
-	}
-	if b>>5&1 == 0 {
-		return ErrBlockDependency
-	}
-	z.BlockChecksum = b>>4&1 > 0
-	frameSize := b>>3&1 > 0
-	z.NoChecksum = b>>2&1 == 0
-
-	bmsID := buf[1] >> 4 & 0x7
-	if bmsID < 4 || bmsID > 7 {
-		return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID)
-	}
-	bSize := blockSizeIndexToValue(bmsID - 4)
-	z.BlockMaxSize = bSize
-
-	// Allocate the compressed/uncompressed buffers.
-	// The compressed buffer cannot exceed the uncompressed one.
-	if n := 2 * bSize; cap(z.zdata) < n {
-		z.zdata = make([]byte, n, n)
-	}
-	if debugFlag {
-		debug("header block max size id=%d size=%d", bmsID, bSize)
-	}
-	z.zdata = z.zdata[:bSize]
-	z.data = z.zdata[:cap(z.zdata)][bSize:]
-	z.idx = len(z.data)
-
-	_, _ = z.checksum.Write(buf[0:2])
-
-	if frameSize {
-		buf := buf[:8]
-		if _, err := io.ReadFull(z.src, buf); err != nil {
-			return err
-		}
-		z.Size = binary.LittleEndian.Uint64(buf)
-		z.pos += 8
-		_, _ = z.checksum.Write(buf)
-	}
-
-	// Header checksum.
-	if _, err := io.ReadFull(z.src, buf[:1]); err != nil {
-		return err
-	}
-	z.pos++
-	if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] {
-		return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h)
-	}
-
-	z.Header.done = true
-	if debugFlag {
-		debug("header read: %v", z.Header)
-	}
-
-	return nil
-}
-
-// Read decompresses data from the underlying source into the supplied buffer.
-//
-// Since there can be multiple streams concatenated, Header values may
-// change between calls to Read(). If that is the case, no data is actually read from
-// the underlying io.Reader, to allow for potential input buffer resizing.
-func (z *Reader) Read(buf []byte) (int, error) {
-	if debugFlag {
-		debug("Read buf len=%d", len(buf))
-	}
-	if !z.Header.done {
-		if err := z.readHeader(true); err != nil {
-			return 0, err
-		}
-		if debugFlag {
-			debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d",
-				len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx)
-		}
-	}
-
-	if len(buf) == 0 {
-		return 0, nil
-	}
-
-	if z.idx == len(z.data) {
-		// No data ready for reading, process the next block.
-		if debugFlag {
-			debug("reading block from writer")
-		}
-		// Reset uncompressed buffer
-		z.data = z.zdata[:cap(z.zdata)][len(z.zdata):]
-
-		// Block length: 0 = end of frame, highest bit set: uncompressed.
-		bLen, err := z.readUint32()
-		if err != nil {
-			return 0, err
-		}
-		z.pos += 4
-
-		if bLen == 0 {
-			// End of frame reached.
-			if !z.NoChecksum {
-				// Validate the frame checksum.
-				checksum, err := z.readUint32()
-				if err != nil {
-					return 0, err
-				}
-				if debugFlag {
-					debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum)
-				}
-				z.pos += 4
-				if h := z.checksum.Sum32(); checksum != h {
-					return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum)
-				}
-			}
-
-			// Get ready for the next concatenated frame and keep the position.
-			pos := z.pos
-			z.Reset(z.src)
-			z.pos = pos
-
-			// Since multiple frames can be concatenated, check for more.
-			return 0, z.readHeader(false)
-		}
-
-		if debugFlag {
-			debug("raw block size %d", bLen)
-		}
-		if bLen&compressedBlockFlag > 0 {
-			// Uncompressed block.
-			bLen &= compressedBlockMask
-			if debugFlag {
-				debug("uncompressed block size %d", bLen)
-			}
-			if int(bLen) > cap(z.data) {
-				return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
-			}
-			z.data = z.data[:bLen]
-			if _, err := io.ReadFull(z.src, z.data); err != nil {
-				return 0, err
-			}
-			z.pos += int64(bLen)
-			if z.OnBlockDone != nil {
-				z.OnBlockDone(int(bLen))
-			}
-
-			if z.BlockChecksum {
-				checksum, err := z.readUint32()
-				if err != nil {
-					return 0, err
-				}
-				z.pos += 4
-
-				if h := xxh32.ChecksumZero(z.data); h != checksum {
-					return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
-				}
-			}
-
-		} else {
-			// Compressed block.
-			if debugFlag {
-				debug("compressed block size %d", bLen)
-			}
-			if int(bLen) > cap(z.data) {
-				return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
-			}
-			zdata := z.zdata[:bLen]
-			if _, err := io.ReadFull(z.src, zdata); err != nil {
-				return 0, err
-			}
-			z.pos += int64(bLen)
-
-			if z.BlockChecksum {
-				checksum, err := z.readUint32()
-				if err != nil {
-					return 0, err
-				}
-				z.pos += 4
-
-				if h := xxh32.ChecksumZero(zdata); h != checksum {
-					return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
-				}
-			}
-
-			n, err := UncompressBlock(zdata, z.data)
-			if err != nil {
-				return 0, err
-			}
-			z.data = z.data[:n]
-			if z.OnBlockDone != nil {
-				z.OnBlockDone(n)
-			}
-		}
-
-		if !z.NoChecksum {
-			_, _ = z.checksum.Write(z.data)
-			if debugFlag {
-				debug("current frame checksum %x", z.checksum.Sum32())
-			}
-		}
-		z.idx = 0
-	}
-
-	if z.skip > int64(len(z.data[z.idx:])) {
-		z.skip -= int64(len(z.data[z.idx:]))
-		z.dpos += int64(len(z.data[z.idx:]))
-		z.idx = len(z.data)
-		return 0, nil
-	}
-
-	z.idx += int(z.skip)
-	z.dpos += z.skip
-	z.skip = 0
-
-	n := copy(buf, z.data[z.idx:])
-	z.idx += n
-	z.dpos += int64(n)
-	if debugFlag {
-		debug("copied %d bytes to input", n)
-	}
-
-	return n, nil
-}
-
-// Seek implements io.Seeker, but supports seeking forward from the current
-// position only. Any other seek will return an error. Allows skipping output
-// bytes which aren't needed, which in some scenarios is faster than reading
-// and discarding them.
-// Note this may cause future calls to Read() to read 0 bytes if all of the
-// data they would have returned is skipped.
-func (z *Reader) Seek(offset int64, whence int) (int64, error) {
-	if offset < 0 || whence != io.SeekCurrent {
-		return z.dpos + z.skip, ErrUnsupportedSeek
-	}
-	z.skip += offset
-	return z.dpos + z.skip, nil
-}
-
-// Reset discards the Reader's state and makes it equivalent to the
-// result of its original state from NewReader, but reading from r instead.
-// This permits reusing a Reader rather than allocating a new one.
-func (z *Reader) Reset(r io.Reader) {
-	z.Header = Header{}
-	z.pos = 0
-	z.src = r
-	z.zdata = z.zdata[:0]
-	z.data = z.data[:0]
-	z.idx = 0
-	z.checksum.Reset()
-}
-
-// readUint32 reads an uint32 into the supplied buffer.
-// The idea is to make use of the already allocated buffers avoiding additional allocations.
-func (z *Reader) readUint32() (uint32, error) {
-	buf := z.buf[:4]
-	_, err := io.ReadFull(z.src, buf)
-	x := binary.LittleEndian.Uint32(buf)
-	return x, err
-}
diff --git a/vendor/github.com/pierrec/lz4/reader_legacy.go b/vendor/github.com/pierrec/lz4/reader_legacy.go
deleted file mode 100644
index 1670a77..0000000
--- a/vendor/github.com/pierrec/lz4/reader_legacy.go
+++ /dev/null
@@ -1,207 +0,0 @@
-package lz4
-
-import (
-	"encoding/binary"
-	"fmt"
-	"io"
-)
-
-// ReaderLegacy implements the LZ4Demo frame decoder.
-// The Header is set after the first call to Read().
-type ReaderLegacy struct {
-	Header
-	// Handler called when a block has been successfully read.
-	// It provides the number of bytes read.
-	OnBlockDone func(size int)
-
-	lastBlock bool
-	buf       [8]byte   // Scrap buffer.
-	pos       int64     // Current position in src.
-	src       io.Reader // Source.
-	zdata     []byte    // Compressed data.
-	data      []byte    // Uncompressed data.
-	idx       int       // Index of unread bytes into data.
-	skip      int64     // Bytes to skip before next read.
-	dpos      int64     // Position in dest
-}
-
-// NewReaderLegacy returns a new LZ4Demo frame decoder.
-// No access to the underlying io.Reader is performed.
-func NewReaderLegacy(src io.Reader) *ReaderLegacy {
-	r := &ReaderLegacy{src: src}
-	return r
-}
-
-// readHeader checks the frame magic number and parses the frame descriptoz.
-// Skippable frames are supported even as a first frame although the LZ4
-// specifications recommends skippable frames not to be used as first frames.
-func (z *ReaderLegacy) readLegacyHeader() error {
-	z.lastBlock = false
-	magic, err := z.readUint32()
-	if err != nil {
-		z.pos += 4
-		if err == io.ErrUnexpectedEOF {
-			return io.EOF
-		}
-		return err
-	}
-	if magic != frameMagicLegacy {
-		return ErrInvalid
-	}
-	z.pos += 4
-
-	// Legacy has fixed 8MB blocksizes
-	// https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame
-	bSize := blockSize4M * 2
-
-	// Allocate the compressed/uncompressed buffers.
-	// The compressed buffer cannot exceed the uncompressed one.
-	if n := 2 * bSize; cap(z.zdata) < n {
-		z.zdata = make([]byte, n, n)
-	}
-	if debugFlag {
-		debug("header block max size size=%d", bSize)
-	}
-	z.zdata = z.zdata[:bSize]
-	z.data = z.zdata[:cap(z.zdata)][bSize:]
-	z.idx = len(z.data)
-
-	z.Header.done = true
-	if debugFlag {
-		debug("header read: %v", z.Header)
-	}
-
-	return nil
-}
-
-// Read decompresses data from the underlying source into the supplied buffer.
-//
-// Since there can be multiple streams concatenated, Header values may
-// change between calls to Read(). If that is the case, no data is actually read from
-// the underlying io.Reader, to allow for potential input buffer resizing.
-func (z *ReaderLegacy) Read(buf []byte) (int, error) {
-	if debugFlag {
-		debug("Read buf len=%d", len(buf))
-	}
-	if !z.Header.done {
-		if err := z.readLegacyHeader(); err != nil {
-			return 0, err
-		}
-		if debugFlag {
-			debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d",
-				len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx)
-		}
-	}
-
-	if len(buf) == 0 {
-		return 0, nil
-	}
-
-	if z.idx == len(z.data) {
-		// No data ready for reading, process the next block.
-		if debugFlag {
-			debug("  reading block from writer %d %d", z.idx, blockSize4M*2)
-		}
-
-		// Reset uncompressed buffer
-		z.data = z.zdata[:cap(z.zdata)][len(z.zdata):]
-
-		bLen, err := z.readUint32()
-		if err != nil {
-			return 0, err
-		}
-		if debugFlag {
-			debug("   bLen %d (0x%x) offset = %d (0x%x)", bLen, bLen, z.pos, z.pos)
-		}
-		z.pos += 4
-
-		// Legacy blocks are always compressed, even when detrimental
-		if debugFlag {
-			debug("   compressed block size %d", bLen)
-		}
-
-		if int(bLen) > cap(z.data) {
-			return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
-		}
-		zdata := z.zdata[:bLen]
-		if _, err := io.ReadFull(z.src, zdata); err != nil {
-			return 0, err
-		}
-		z.pos += int64(bLen)
-
-		n, err := UncompressBlock(zdata, z.data)
-		if err != nil {
-			return 0, err
-		}
-
-		z.data = z.data[:n]
-		if z.OnBlockDone != nil {
-			z.OnBlockDone(n)
-		}
-
-		z.idx = 0
-
-		// Legacy blocks are fixed to 8MB, if we read a decompressed block smaller than this
-		// it means we've reached the end...
-		if n < blockSize4M*2 {
-			z.lastBlock = true
-		}
-	}
-
-	if z.skip > int64(len(z.data[z.idx:])) {
-		z.skip -= int64(len(z.data[z.idx:]))
-		z.dpos += int64(len(z.data[z.idx:]))
-		z.idx = len(z.data)
-		return 0, nil
-	}
-
-	z.idx += int(z.skip)
-	z.dpos += z.skip
-	z.skip = 0
-
-	n := copy(buf, z.data[z.idx:])
-	z.idx += n
-	z.dpos += int64(n)
-	if debugFlag {
-		debug("%v] copied %d bytes to input (%d:%d)", z.lastBlock, n, z.idx, len(z.data))
-	}
-	if z.lastBlock && len(z.data) == z.idx {
-		return n, io.EOF
-	}
-	return n, nil
-}
-
-// Seek implements io.Seeker, but supports seeking forward from the current
-// position only. Any other seek will return an error. Allows skipping output
-// bytes which aren't needed, which in some scenarios is faster than reading
-// and discarding them.
-// Note this may cause future calls to Read() to read 0 bytes if all of the
-// data they would have returned is skipped.
-func (z *ReaderLegacy) Seek(offset int64, whence int) (int64, error) {
-	if offset < 0 || whence != io.SeekCurrent {
-		return z.dpos + z.skip, ErrUnsupportedSeek
-	}
-	z.skip += offset
-	return z.dpos + z.skip, nil
-}
-
-// Reset discards the Reader's state and makes it equivalent to the
-// result of its original state from NewReader, but reading from r instead.
-// This permits reusing a Reader rather than allocating a new one.
-func (z *ReaderLegacy) Reset(r io.Reader) {
-	z.Header = Header{}
-	z.pos = 0
-	z.src = r
-	z.zdata = z.zdata[:0]
-	z.data = z.data[:0]
-	z.idx = 0
-}
-
-// readUint32 reads an uint32 into the supplied buffer.
-// The idea is to make use of the already allocated buffers avoiding additional allocations.
-func (z *ReaderLegacy) readUint32() (uint32, error) {
-	buf := z.buf[:4]
-	_, err := io.ReadFull(z.src, buf)
-	x := binary.LittleEndian.Uint32(buf)
-	return x, err
-}
diff --git a/vendor/github.com/pierrec/lz4/v4/.gitignore b/vendor/github.com/pierrec/lz4/v4/.gitignore
new file mode 100644
index 0000000..5d7e88d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/.gitignore
@@ -0,0 +1,36 @@
+# Created by https://www.gitignore.io/api/macos
+
+### macOS ###
+*.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+# End of https://www.gitignore.io/api/macos
+
+cmd/*/*exe
+.idea
+
+fuzz/*.zip
diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/v4/LICENSE
similarity index 100%
rename from vendor/github.com/pierrec/lz4/LICENSE
rename to vendor/github.com/pierrec/lz4/v4/LICENSE
diff --git a/vendor/github.com/pierrec/lz4/v4/README.md b/vendor/github.com/pierrec/lz4/v4/README.md
new file mode 100644
index 0000000..dee7754
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/README.md
@@ -0,0 +1,92 @@
+# lz4 : LZ4 compression in pure Go
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/pierrec/lz4/v4.svg)](https://pkg.go.dev/github.com/pierrec/lz4/v4)
+[![CI](https://github.com/pierrec/lz4/workflows/ci/badge.svg)](https://github.com/pierrec/lz4/actions)
+[![Go Report Card](https://goreportcard.com/badge/github.com/pierrec/lz4)](https://goreportcard.com/report/github.com/pierrec/lz4)
+[![GitHub tag (latest SemVer)](https://img.shields.io/github/tag/pierrec/lz4.svg?style=social)](https://github.com/pierrec/lz4/tags)
+
+## Overview
+
+This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks.
+The implementation is based on the reference C [one](https://github.com/lz4/lz4).
+
+## Install
+
+Assuming you have the go toolchain installed:
+
+```
+go get github.com/pierrec/lz4/v4
+```
+
+There is a command line interface tool to compress and decompress LZ4 files.
+
+```
+go install github.com/pierrec/lz4/v4/cmd/lz4c@latest
+```
+
+Usage
+
+```
+Usage of lz4c:
+  -version
+        print the program version
+
+Subcommands:
+Compress the given files or from stdin to stdout.
+compress [arguments] [<file name> ...]
+  -bc
+        enable block checksum
+  -l int
+        compression level (0=fastest)
+  -sc
+        disable stream checksum
+  -size string
+        block max size [64K,256K,1M,4M] (default "4M")
+
+Uncompress the given files or from stdin to stdout.
+uncompress [arguments] [<file name> ...]
+
+```
+
+
+## Example
+
+```
+// Compress and uncompress an input string.
+s := "hello world"
+r := strings.NewReader(s)
+
+// The pipe will uncompress the data from the writer.
+pr, pw := io.Pipe()
+zw := lz4.NewWriter(pw)
+zr := lz4.NewReader(pr)
+
+go func() {
+	// Compress the input string.
+	_, _ = io.Copy(zw, r)
+	_ = zw.Close() // Make sure the writer is closed
+	_ = pw.Close() // Terminate the pipe
+}()
+
+_, _ = io.Copy(os.Stdout, zr)
+
+// Output:
+// hello world
+```
+
+## Contributing
+
+Contributions are very welcome for bug fixing, performance improvements...!
+
+- Open an issue with a proper description
+- Send a pull request with appropriate test case(s)
+
+## Contributors
+
+Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors)  so far!
+
+Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder.
+
+Special thanks to [@greatroar](https://github.com/greatroar) for his work on the asm implementations of the decoder for amd64 and arm64.
+
+Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code.
diff --git a/vendor/github.com/pierrec/lz4/v4/compressing_reader.go b/vendor/github.com/pierrec/lz4/v4/compressing_reader.go
new file mode 100644
index 0000000..8df0dc7
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/compressing_reader.go
@@ -0,0 +1,222 @@
+package lz4
+
+import (
+	"errors"
+	"io"
+
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+	"github.com/pierrec/lz4/v4/internal/lz4stream"
+)
+
+type crState int
+
+const (
+	crStateInitial crState = iota
+	crStateReading 
+	crStateFlushing
+	crStateDone
+)
+
+type CompressingReader struct {
+	state crState
+	src io.ReadCloser // source reader
+	level lz4block.CompressionLevel // how hard to try
+	frame *lz4stream.Frame // frame being built
+	in []byte
+	out ovWriter
+	handler func(int)
+}
+
+// NewCompressingReader creates a reader which reads compressed data from
+// raw stream. This makes it a logical opposite of a normal lz4.Reader.
+// We require an io.ReadCloser as an underlying source for compatibility
+// with Go's http.Request.
+func NewCompressingReader(src io.ReadCloser) *CompressingReader {
+	zrd := &CompressingReader {
+		frame: lz4stream.NewFrame(),
+	}
+
+	_ = zrd.Apply(DefaultBlockSizeOption, DefaultChecksumOption, defaultOnBlockDone)
+	zrd.Reset(src)
+
+	return zrd
+}
+
+// Source exposes the underlying source stream for introspection and control.
+func (zrd *CompressingReader) Source() io.ReadCloser {
+	return zrd.src
+}
+
+// Close simply invokes the underlying stream Close method. This method is
+// provided for the benefit of Go http client/server, which relies on Close
+// for goroutine termination.
+func (zrd *CompressingReader) Close() error {
+	return zrd.src.Close()
+}
+
+// Apply applies useful options to the lz4 encoder.
+func (zrd *CompressingReader) Apply(options ...Option) (err error) {
+	if zrd.state != crStateInitial {
+		return lz4errors.ErrOptionClosedOrError
+	}
+
+	zrd.Reset(zrd.src)
+
+	for _, o := range options {
+		if err = o(zrd); err != nil {
+			return
+		}
+	}
+	return
+}
+
+func (*CompressingReader) private() {}
+
+func (zrd *CompressingReader) init() error {
+	zrd.frame.InitW(&zrd.out, 1, false)
+	size := zrd.frame.Descriptor.Flags.BlockSizeIndex()
+	zrd.in = size.Get()
+	return zrd.frame.Descriptor.Write(zrd.frame, &zrd.out)
+}
+
+// Read allows reading of lz4 compressed data
+func (zrd *CompressingReader) Read(p []byte) (n int, err error) {
+	defer func() {
+		if err != nil {
+			zrd.state = crStateDone
+		}
+	}()
+
+	if !zrd.out.reset(p) {
+		return len(p), nil
+	}
+
+	switch zrd.state {
+	case crStateInitial:
+		err = zrd.init()
+		if err != nil {
+			return
+		}
+		zrd.state = crStateReading
+	case crStateDone:
+		return 0, errors.New("This reader is done")
+	case crStateFlushing:
+		if zrd.out.dataPos > 0 {
+			n = zrd.out.dataPos
+			zrd.out.data = nil
+			zrd.out.dataPos = 0
+			return
+		} else {
+			zrd.state = crStateDone
+			return 0, io.EOF
+		}
+	}
+
+	for zrd.state == crStateReading {
+		block := zrd.frame.Blocks.Block
+
+		var rCount int
+		rCount, err = io.ReadFull(zrd.src, zrd.in)
+		switch err {
+		case nil:
+			err = block.Compress(
+				zrd.frame, zrd.in[ : rCount], zrd.level,
+			).Write(zrd.frame, &zrd.out)
+			zrd.handler(len(block.Data))
+			if err != nil {
+				return
+			}
+
+			if zrd.out.dataPos == len(zrd.out.data) {
+				n = zrd.out.dataPos
+				zrd.out.dataPos = 0
+				zrd.out.data = nil
+				return
+			}
+		case io.EOF, io.ErrUnexpectedEOF: // read may be partial
+			if rCount > 0 {
+				err = block.Compress(
+					zrd.frame, zrd.in[ : rCount], zrd.level,
+				).Write(zrd.frame, &zrd.out)
+				zrd.handler(len(block.Data))
+				if err != nil {
+					return
+				}
+			}
+
+			err = zrd.frame.CloseW(&zrd.out, 1)
+			if err != nil {
+				return
+			}
+			zrd.state = crStateFlushing
+
+			n = zrd.out.dataPos
+			zrd.out.dataPos = 0
+			zrd.out.data = nil
+			return
+		default:
+			return
+		}
+	}
+
+	err = lz4errors.ErrInternalUnhandledState
+	return
+}
+
+// Reset makes the stream usable again; mostly handy to reuse lz4 encoder
+// instances.
+func (zrd *CompressingReader) Reset(src io.ReadCloser) {
+	zrd.frame.Reset(1)
+	zrd.state = crStateInitial
+	zrd.src = src
+	zrd.out.clear()
+}
+
+type ovWriter struct {
+	data []byte
+	ov []byte
+	dataPos int
+	ovPos int
+}
+
+func (wr *ovWriter) Write(p []byte) (n int, err error) {
+	count := copy(wr.data[wr.dataPos : ], p)
+	wr.dataPos += count
+
+	if count < len(p) {
+		wr.ov = append(wr.ov, p[count : ]...)
+	}
+
+	return len(p), nil
+}
+
+func (wr *ovWriter) reset(out []byte) bool {
+	ovRem := len(wr.ov) - wr.ovPos
+
+	if ovRem >= len(out) {
+		wr.ovPos += copy(out, wr.ov[wr.ovPos : ])
+		return false
+	}
+
+	if ovRem > 0 {
+		copy(out, wr.ov[wr.ovPos : ])
+		wr.ov = wr.ov[ : 0]
+		wr.ovPos = 0
+		wr.dataPos = ovRem
+	} else if wr.ovPos > 0 {
+		wr.ov = wr.ov[ : 0]
+		wr.ovPos = 0
+		wr.dataPos = 0
+	}
+
+	wr.data = out
+	return true
+}
+
+func (wr *ovWriter) clear() {
+	wr.data = nil
+	wr.dataPos = 0
+	wr.ov = wr.ov[ : 0]
+	wr.ovPos = 0
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go
new file mode 100644
index 0000000..fec8adb
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go
@@ -0,0 +1,481 @@
+package lz4block
+
+import (
+	"encoding/binary"
+	"math/bits"
+	"sync"
+
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+const (
+	// The following constants are used to setup the compression algorithm.
+	minMatch   = 4  // the minimum size of the match sequence size (4 bytes)
+	winSizeLog = 16 // LZ4 64Kb window size limit
+	winSize    = 1 << winSizeLog
+	winMask    = winSize - 1 // 64Kb window of previous data for dependent blocks
+
+	// hashLog determines the size of the hash table used to quickly find a previous match position.
+	// Its value influences the compression speed and memory usage, the lower the faster,
+	// but at the expense of the compression ratio.
+	// 16 seems to be the best compromise for fast compression.
+	hashLog = 16
+	htSize  = 1 << hashLog
+
+	mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes.
+)
+
+func recoverBlock(e *error) {
+	if r := recover(); r != nil && *e == nil {
+		*e = lz4errors.ErrInvalidSourceShortBuffer
+	}
+}
+
+// blockHash hashes the lower 6 bytes into a value < htSize.
+func blockHash(x uint64) uint32 {
+	const prime6bytes = 227718039650203
+	return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog))
+}
+
+func CompressBlockBound(n int) int {
+	return n + n/255 + 16
+}
+
+func UncompressBlock(src, dst, dict []byte) (int, error) {
+	if len(src) == 0 {
+		return 0, nil
+	}
+	if di := decodeBlock(dst, src, dict); di >= 0 {
+		return di, nil
+	}
+	return 0, lz4errors.ErrInvalidSourceShortBuffer
+}
+
+type Compressor struct {
+	// Offsets are at most 64kiB, so we can store only the lower 16 bits of
+	// match positions: effectively, an offset from some 64kiB block boundary.
+	//
+	// When we retrieve such an offset, we interpret it as relative to the last
+	// block boundary si &^ 0xffff, or the one before, (si &^ 0xffff) - 0x10000,
+	// depending on which of these is inside the current window. If a table
+	// entry was generated more than 64kiB back in the input, we find out by
+	// inspecting the input stream.
+	table [htSize]uint16
+
+	// Bitmap indicating which positions in the table are in use.
+	// This allows us to quickly reset the table for reuse,
+	// without having to zero everything.
+	inUse [htSize / 32]uint32
+}
+
+// Get returns the position of a presumptive match for the hash h.
+// The match may be a false positive due to a hash collision or an old entry.
+// If si < winSize, the return value may be negative.
+func (c *Compressor) get(h uint32, si int) int {
+	h &= htSize - 1
+	i := 0
+	if c.inUse[h/32]&(1<<(h%32)) != 0 {
+		i = int(c.table[h])
+	}
+	i += si &^ winMask
+	if i >= si {
+		// Try previous 64kiB block (negative when in first block).
+		i -= winSize
+	}
+	return i
+}
+
+func (c *Compressor) put(h uint32, si int) {
+	h &= htSize - 1
+	c.table[h] = uint16(si)
+	c.inUse[h/32] |= 1 << (h % 32)
+}
+
+func (c *Compressor) reset() { c.inUse = [htSize / 32]uint32{} }
+
+var compressorPool = sync.Pool{New: func() interface{} { return new(Compressor) }}
+
+func CompressBlock(src, dst []byte) (int, error) {
+	c := compressorPool.Get().(*Compressor)
+	n, err := c.CompressBlock(src, dst)
+	compressorPool.Put(c)
+	return n, err
+}
+
+func (c *Compressor) CompressBlock(src, dst []byte) (int, error) {
+	// Zero out reused table to avoid non-deterministic output (issue #65).
+	c.reset()
+
+	// Return 0, nil only if the destination buffer size is < CompressBlockBound.
+	isNotCompressible := len(dst) < CompressBlockBound(len(src))
+
+	// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
+	// This significantly speeds up incompressible data and usually has very small impact on compression.
+	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
+	const adaptSkipLog = 7
+
+	// si: Current position of the search.
+	// anchor: Position of the current literals.
+	var si, di, anchor int
+	sn := len(src) - mfLimit
+	if sn <= 0 {
+		goto lastLiterals
+	}
+
+	// Fast scan strategy: the hash table only stores the last 4 bytes sequences.
+	for si < sn {
+		// Hash the next 6 bytes (sequence)...
+		match := binary.LittleEndian.Uint64(src[si:])
+		h := blockHash(match)
+		h2 := blockHash(match >> 8)
+
+		// We check a match at s, s+1 and s+2 and pick the first one we get.
+		// Checking 3 only requires us to load the source one.
+		ref := c.get(h, si)
+		ref2 := c.get(h2, si+1)
+		c.put(h, si)
+		c.put(h2, si+1)
+
+		offset := si - ref
+
+		if offset <= 0 || offset >= winSize || uint32(match) != binary.LittleEndian.Uint32(src[ref:]) {
+			// No match. Start calculating another hash.
+			// The processor can usually do this out-of-order.
+			h = blockHash(match >> 16)
+			ref3 := c.get(h, si+2)
+
+			// Check the second match at si+1
+			si += 1
+			offset = si - ref2
+
+			if offset <= 0 || offset >= winSize || uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) {
+				// No match. Check the third match at si+2
+				si += 1
+				offset = si - ref3
+				c.put(h, si)
+
+				if offset <= 0 || offset >= winSize || uint32(match>>16) != binary.LittleEndian.Uint32(src[ref3:]) {
+					// Skip one extra byte (at si+3) before we check 3 matches again.
+					si += 2 + (si-anchor)>>adaptSkipLog
+					continue
+				}
+			}
+		}
+
+		// Match found.
+		lLen := si - anchor // Literal length.
+		// We already matched 4 bytes.
+		mLen := 4
+
+		// Extend backwards if we can, reducing literals.
+		tOff := si - offset - 1
+		for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] {
+			si--
+			tOff--
+			lLen--
+			mLen++
+		}
+
+		// Add the match length, so we continue search at the end.
+		// Use mLen to store the offset base.
+		si, mLen = si+mLen, si+minMatch
+
+		// Find the longest match by looking by batches of 8 bytes.
+		for si+8 <= sn {
+			x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:])
+			if x == 0 {
+				si += 8
+			} else {
+				// Stop is first non-zero byte.
+				si += bits.TrailingZeros64(x) >> 3
+				break
+			}
+		}
+
+		mLen = si - mLen
+		if di >= len(dst) {
+			return 0, lz4errors.ErrInvalidSourceShortBuffer
+		}
+		if mLen < 0xF {
+			dst[di] = byte(mLen)
+		} else {
+			dst[di] = 0xF
+		}
+
+		// Encode literals length.
+		if lLen < 0xF {
+			dst[di] |= byte(lLen << 4)
+		} else {
+			dst[di] |= 0xF0
+			di++
+			l := lLen - 0xF
+			for ; l >= 0xFF && di < len(dst); l -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			if di >= len(dst) {
+				return 0, lz4errors.ErrInvalidSourceShortBuffer
+			}
+			dst[di] = byte(l)
+		}
+		di++
+
+		// Literals.
+		if di+lLen > len(dst) {
+			return 0, lz4errors.ErrInvalidSourceShortBuffer
+		}
+		copy(dst[di:di+lLen], src[anchor:anchor+lLen])
+		di += lLen + 2
+		anchor = si
+
+		// Encode offset.
+		if di > len(dst) {
+			return 0, lz4errors.ErrInvalidSourceShortBuffer
+		}
+		dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+		// Encode match length part 2.
+		if mLen >= 0xF {
+			for mLen -= 0xF; mLen >= 0xFF && di < len(dst); mLen -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			if di >= len(dst) {
+				return 0, lz4errors.ErrInvalidSourceShortBuffer
+			}
+			dst[di] = byte(mLen)
+			di++
+		}
+		// Check if we can load next values.
+		if si >= sn {
+			break
+		}
+		// Hash match end-2
+		h = blockHash(binary.LittleEndian.Uint64(src[si-2:]))
+		c.put(h, si-2)
+	}
+
+lastLiterals:
+	if isNotCompressible && anchor == 0 {
+		// Incompressible.
+		return 0, nil
+	}
+
+	// Last literals.
+	if di >= len(dst) {
+		return 0, lz4errors.ErrInvalidSourceShortBuffer
+	}
+	lLen := len(src) - anchor
+	if lLen < 0xF {
+		dst[di] = byte(lLen << 4)
+	} else {
+		dst[di] = 0xF0
+		di++
+		for lLen -= 0xF; lLen >= 0xFF && di < len(dst); lLen -= 0xFF {
+			dst[di] = 0xFF
+			di++
+		}
+		if di >= len(dst) {
+			return 0, lz4errors.ErrInvalidSourceShortBuffer
+		}
+		dst[di] = byte(lLen)
+	}
+	di++
+
+	// Write the last literals.
+	if isNotCompressible && di >= anchor {
+		// Incompressible.
+		return 0, nil
+	}
+	if di+len(src)-anchor > len(dst) {
+		return 0, lz4errors.ErrInvalidSourceShortBuffer
+	}
+	di += copy(dst[di:di+len(src)-anchor], src[anchor:])
+	return di, nil
+}
+
+// blockHash hashes 4 bytes into a value < winSize.
+func blockHashHC(x uint32) uint32 {
+	const hasher uint32 = 2654435761 // Knuth multiplicative hash.
+	return x * hasher >> (32 - winSizeLog)
+}
+
+type CompressorHC struct {
+	// hashTable: stores the last position found for a given hash
+	// chainTable: stores previous positions for a given hash
+	hashTable, chainTable [htSize]int
+	needsReset            bool
+}
+
+var compressorHCPool = sync.Pool{New: func() interface{} { return new(CompressorHC) }}
+
+func CompressBlockHC(src, dst []byte, depth CompressionLevel) (int, error) {
+	c := compressorHCPool.Get().(*CompressorHC)
+	n, err := c.CompressBlock(src, dst, depth)
+	compressorHCPool.Put(c)
+	return n, err
+}
+
+func (c *CompressorHC) CompressBlock(src, dst []byte, depth CompressionLevel) (_ int, err error) {
+	if c.needsReset {
+		// Zero out reused table to avoid non-deterministic output (issue #65).
+		c.hashTable = [htSize]int{}
+		c.chainTable = [htSize]int{}
+	}
+	c.needsReset = true // Only false on first call.
+
+	defer recoverBlock(&err)
+
+	// Return 0, nil only if the destination buffer size is < CompressBlockBound.
+	isNotCompressible := len(dst) < CompressBlockBound(len(src))
+
+	// adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
+	// This significantly speeds up incompressible data and usually has very small impact on compression.
+	// bytes to skip =  1 + (bytes since last match >> adaptSkipLog)
+	const adaptSkipLog = 7
+
+	var si, di, anchor int
+	sn := len(src) - mfLimit
+	if sn <= 0 {
+		goto lastLiterals
+	}
+
+	if depth == 0 {
+		depth = winSize
+	}
+
+	for si < sn {
+		// Hash the next 4 bytes (sequence).
+		match := binary.LittleEndian.Uint32(src[si:])
+		h := blockHashHC(match)
+
+		// Follow the chain until out of window and give the longest match.
+		mLen := 0
+		offset := 0
+		for next, try := c.hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next, try = c.chainTable[next&winMask], try-1 {
+			// The first (mLen==0) or next byte (mLen>=minMatch) at current match length
+			// must match to improve on the match length.
+			if src[next+mLen] != src[si+mLen] {
+				continue
+			}
+			ml := 0
+			// Compare the current position with a previous with the same hash.
+			for ml < sn-si {
+				x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:])
+				if x == 0 {
+					ml += 8
+				} else {
+					// Stop is first non-zero byte.
+					ml += bits.TrailingZeros64(x) >> 3
+					break
+				}
+			}
+			if ml < minMatch || ml <= mLen {
+				// Match too small (<minMath) or smaller than the current match.
+				continue
+			}
+			// Found a longer match, keep its position and length.
+			mLen = ml
+			offset = si - next
+			// Try another previous position with the same hash.
+		}
+		c.chainTable[si&winMask] = c.hashTable[h]
+		c.hashTable[h] = si
+
+		// No match found.
+		if mLen == 0 {
+			si += 1 + (si-anchor)>>adaptSkipLog
+			continue
+		}
+
+		// Match found.
+		// Update hash/chain tables with overlapping bytes:
+		// si already hashed, add everything from si+1 up to the match length.
+		winStart := si + 1
+		if ws := si + mLen - winSize; ws > winStart {
+			winStart = ws
+		}
+		for si, ml := winStart, si+mLen; si < ml; {
+			match >>= 8
+			match |= uint32(src[si+3]) << 24
+			h := blockHashHC(match)
+			c.chainTable[si&winMask] = c.hashTable[h]
+			c.hashTable[h] = si
+			si++
+		}
+
+		lLen := si - anchor
+		si += mLen
+		mLen -= minMatch // Match length does not include minMatch.
+
+		if mLen < 0xF {
+			dst[di] = byte(mLen)
+		} else {
+			dst[di] = 0xF
+		}
+
+		// Encode literals length.
+		if lLen < 0xF {
+			dst[di] |= byte(lLen << 4)
+		} else {
+			dst[di] |= 0xF0
+			di++
+			l := lLen - 0xF
+			for ; l >= 0xFF; l -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			dst[di] = byte(l)
+		}
+		di++
+
+		// Literals.
+		copy(dst[di:di+lLen], src[anchor:anchor+lLen])
+		di += lLen
+		anchor = si
+
+		// Encode offset.
+		di += 2
+		dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+		// Encode match length part 2.
+		if mLen >= 0xF {
+			for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
+				dst[di] = 0xFF
+				di++
+			}
+			dst[di] = byte(mLen)
+			di++
+		}
+	}
+
+	if isNotCompressible && anchor == 0 {
+		// Incompressible.
+		return 0, nil
+	}
+
+	// Last literals.
+lastLiterals:
+	lLen := len(src) - anchor
+	if lLen < 0xF {
+		dst[di] = byte(lLen << 4)
+	} else {
+		dst[di] = 0xF0
+		di++
+		lLen -= 0xF
+		for ; lLen >= 0xFF; lLen -= 0xFF {
+			dst[di] = 0xFF
+			di++
+		}
+		dst[di] = byte(lLen)
+	}
+	di++
+
+	// Write the last literals.
+	if isNotCompressible && di >= anchor {
+		// Incompressible.
+		return 0, nil
+	}
+	di += copy(dst[di:di+len(src)-anchor], src[anchor:])
+	return di, nil
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go
new file mode 100644
index 0000000..138083d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go
@@ -0,0 +1,87 @@
+// Package lz4block provides LZ4 BlockSize types and pools of buffers.
+package lz4block
+
+import "sync"
+
+const (
+	Block64Kb uint32 = 1 << (16 + iota*2)
+	Block256Kb
+	Block1Mb
+	Block4Mb
+	Block8Mb = 2 * Block4Mb
+)
+
+var (
+	BlockPool64K  = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }}
+	BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }}
+	BlockPool1M   = sync.Pool{New: func() interface{} { return make([]byte, Block1Mb) }}
+	BlockPool4M   = sync.Pool{New: func() interface{} { return make([]byte, Block4Mb) }}
+	BlockPool8M   = sync.Pool{New: func() interface{} { return make([]byte, Block8Mb) }}
+)
+
+func Index(b uint32) BlockSizeIndex {
+	switch b {
+	case Block64Kb:
+		return 4
+	case Block256Kb:
+		return 5
+	case Block1Mb:
+		return 6
+	case Block4Mb:
+		return 7
+	case Block8Mb: // only valid in legacy mode
+		return 3
+	}
+	return 0
+}
+
+func IsValid(b uint32) bool {
+	return Index(b) > 0
+}
+
+type BlockSizeIndex uint8
+
+func (b BlockSizeIndex) IsValid() bool {
+	switch b {
+	case 4, 5, 6, 7:
+		return true
+	}
+	return false
+}
+
+func (b BlockSizeIndex) Get() []byte {
+	var buf interface{}
+	switch b {
+	case 4:
+		buf = BlockPool64K.Get()
+	case 5:
+		buf = BlockPool256K.Get()
+	case 6:
+		buf = BlockPool1M.Get()
+	case 7:
+		buf = BlockPool4M.Get()
+	case 3:
+		buf = BlockPool8M.Get()
+	}
+	return buf.([]byte)
+}
+
+func Put(buf []byte) {
+	// Safeguard: do not allow invalid buffers.
+	switch c := cap(buf); uint32(c) {
+	case Block64Kb:
+		BlockPool64K.Put(buf[:c])
+	case Block256Kb:
+		BlockPool256K.Put(buf[:c])
+	case Block1Mb:
+		BlockPool1M.Put(buf[:c])
+	case Block4Mb:
+		BlockPool4M.Put(buf[:c])
+	case Block8Mb:
+		BlockPool8M.Put(buf[:c])
+	}
+}
+
+type CompressionLevel uint32
+
+const Fast CompressionLevel = 0
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s
new file mode 100644
index 0000000..1d00133
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s
@@ -0,0 +1,448 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// AX scratch
+// BX scratch
+// CX literal and match lengths
+// DX token, match offset
+//
+// DI &dst
+// SI &src
+// R8 &dst + len(dst)
+// R9 &src + len(src)
+// R11 &dst
+// R12 short output end
+// R13 short input end
+// R14 &dict
+// R15 len(dict)
+
+// func decodeBlock(dst, src, dict []byte) int
+TEXT ·decodeBlock(SB), NOSPLIT, $48-80
+	MOVQ dst_base+0(FP), DI
+	MOVQ DI, R11
+	MOVQ dst_len+8(FP), R8
+	ADDQ DI, R8
+
+	MOVQ src_base+24(FP), SI
+	MOVQ src_len+32(FP), R9
+	CMPQ R9, $0
+	JE   err_corrupt
+	ADDQ SI, R9
+
+	MOVQ dict_base+48(FP), R14
+	MOVQ dict_len+56(FP), R15
+
+	// shortcut ends
+	// short output end
+	MOVQ R8, R12
+	SUBQ $32, R12
+	// short input end
+	MOVQ R9, R13
+	SUBQ $16, R13
+
+	XORL CX, CX
+
+loop:
+	// token := uint32(src[si])
+	MOVBLZX (SI), DX
+	INCQ SI
+
+	// lit_len = token >> 4
+	// if lit_len > 0
+	// CX = lit_len
+	MOVL DX, CX
+	SHRL $4, CX
+
+	// if lit_len != 0xF
+	CMPL CX, $0xF
+	JEQ  lit_len_loop
+	CMPQ DI, R12
+	JAE  copy_literal
+	CMPQ SI, R13
+	JAE  copy_literal
+
+	// copy shortcut
+
+	// A two-stage shortcut for the most common case:
+	// 1) If the literal length is 0..14, and there is enough space,
+	// enter the shortcut and copy 16 bytes on behalf of the literals
+	// (in the fast mode, only 8 bytes can be safely copied this way).
+	// 2) Further if the match length is 4..18, copy 18 bytes in a similar
+	// manner; but we ensure that there's enough space in the output for
+	// those 18 bytes earlier, upon entering the shortcut (in other words,
+	// there is a combined check for both stages).
+
+	// copy literal
+	MOVOU (SI), X0
+	MOVOU X0, (DI)
+	ADDQ CX, DI
+	ADDQ CX, SI
+
+	MOVL DX, CX
+	ANDL $0xF, CX
+
+	// The second stage: prepare for match copying, decode full info.
+	// If it doesn't work out, the info won't be wasted.
+	// offset := uint16(data[:2])
+	MOVWLZX (SI), DX
+	TESTL DX, DX
+	JE err_corrupt
+	ADDQ $2, SI
+	JC err_short_buf
+
+	MOVQ DI, AX
+	SUBQ DX, AX
+	JC err_corrupt
+	CMPQ AX, DI
+	JA err_short_buf
+
+	// if we can't do the second stage then jump straight to read the
+	// match length, we already have the offset.
+	CMPL CX, $0xF
+	JEQ match_len_loop_pre
+	CMPL DX, $8
+	JLT match_len_loop_pre
+	CMPQ AX, R11
+	JB match_len_loop_pre
+
+	// memcpy(op + 0, match + 0, 8);
+	MOVQ (AX), BX
+	MOVQ BX, (DI)
+	// memcpy(op + 8, match + 8, 8);
+	MOVQ 8(AX), BX
+	MOVQ BX, 8(DI)
+	// memcpy(op +16, match +16, 2);
+	MOVW 16(AX), BX
+	MOVW BX, 16(DI)
+
+	LEAQ const_minMatch(DI)(CX*1), DI
+
+	// shortcut complete, load next token
+	JMP loopcheck
+
+	// Read the rest of the literal length:
+	// do { BX = src[si++]; lit_len += BX } while (BX == 0xFF).
+lit_len_loop:
+	CMPQ SI, R9
+	JAE err_short_buf
+
+	MOVBLZX (SI), BX
+	INCQ SI
+	ADDQ BX, CX
+
+	CMPB BX, $0xFF
+	JE lit_len_loop
+
+copy_literal:
+	// bounds check src and dst
+	MOVQ SI, AX
+	ADDQ CX, AX
+	JC err_short_buf
+	CMPQ AX, R9
+	JA err_short_buf
+
+	MOVQ DI, BX
+	ADDQ CX, BX
+	JC err_short_buf
+	CMPQ BX, R8
+	JA err_short_buf
+
+	// Copy literals of <=48 bytes through the XMM registers.
+	CMPQ CX, $48
+	JGT memmove_lit
+
+	// if len(dst[di:]) < 48
+	MOVQ R8, AX
+	SUBQ DI, AX
+	CMPQ AX, $48
+	JLT memmove_lit
+
+	// if len(src[si:]) < 48
+	MOVQ R9, BX
+	SUBQ SI, BX
+	CMPQ BX, $48
+	JLT memmove_lit
+
+	MOVOU (SI), X0
+	MOVOU 16(SI), X1
+	MOVOU 32(SI), X2
+	MOVOU X0, (DI)
+	MOVOU X1, 16(DI)
+	MOVOU X2, 32(DI)
+
+	ADDQ CX, SI
+	ADDQ CX, DI
+
+	JMP finish_lit_copy
+
+memmove_lit:
+	// memmove(to, from, len)
+	MOVQ DI, 0(SP)
+	MOVQ SI, 8(SP)
+	MOVQ CX, 16(SP)
+
+	// Spill registers. Increment SI, DI now so we don't need to save CX.
+	ADDQ CX, DI
+	ADDQ CX, SI
+	MOVQ DI, 24(SP)
+	MOVQ SI, 32(SP)
+	MOVL DX, 40(SP)
+
+	CALL runtime·memmove(SB)
+
+	// restore registers
+	MOVQ 24(SP), DI
+	MOVQ 32(SP), SI
+	MOVL 40(SP), DX
+
+	// recalc initial values
+	MOVQ dst_base+0(FP), R8
+	MOVQ R8, R11
+	ADDQ dst_len+8(FP), R8
+	MOVQ src_base+24(FP), R9
+	ADDQ src_len+32(FP), R9
+	MOVQ dict_base+48(FP), R14
+	MOVQ dict_len+56(FP), R15
+	MOVQ R8, R12
+	SUBQ $32, R12
+	MOVQ R9, R13
+	SUBQ $16, R13
+
+finish_lit_copy:
+	// CX := mLen
+	// free up DX to use for offset
+	MOVL DX, CX
+	ANDL $0xF, CX
+
+	CMPQ SI, R9
+	JAE end
+
+	// offset
+	// si += 2
+	// DX := int(src[si-2]) | int(src[si-1])<<8
+	ADDQ $2, SI
+	JC err_short_buf
+	CMPQ SI, R9
+	JA err_short_buf
+	MOVWQZX -2(SI), DX
+
+	// 0 offset is invalid
+	TESTL DX, DX
+	JEQ   err_corrupt
+
+match_len_loop_pre:
+	// if mlen != 0xF
+	CMPB CX, $0xF
+	JNE copy_match
+
+	// do { BX = src[si++]; mlen += BX } while (BX == 0xFF).
+match_len_loop:
+	CMPQ SI, R9
+	JAE err_short_buf
+
+	MOVBLZX (SI), BX
+	INCQ SI
+	ADDQ BX, CX
+
+	CMPB BX, $0xFF
+	JE match_len_loop
+
+copy_match:
+	ADDQ $const_minMatch, CX
+
+	// check we have match_len bytes left in dst
+	// di+match_len < len(dst)
+	MOVQ DI, AX
+	ADDQ CX, AX
+	JC err_short_buf
+	CMPQ AX, R8
+	JA err_short_buf
+
+	// DX = offset
+	// CX = match_len
+	// BX = &dst + (di - offset)
+	MOVQ DI, BX
+	SUBQ DX, BX
+
+	// check BX is within dst
+	// if BX < &dst
+	JC copy_match_from_dict
+	CMPQ BX, R11
+	JBE copy_match_from_dict
+
+	// if offset + match_len < di
+	LEAQ (BX)(CX*1), AX
+	CMPQ DI, AX
+	JA copy_interior_match
+
+	// AX := len(dst[:di])
+	// MOVQ DI, AX
+	// SUBQ R11, AX
+
+	// copy 16 bytes at a time
+	// if di-offset < 16 copy 16-(di-offset) bytes to di
+	// then do the remaining
+
+copy_match_loop:
+	// for match_len >= 0
+	// dst[di] = dst[i]
+	// di++
+	// i++
+	MOVB (BX), AX
+	MOVB AX, (DI)
+	INCQ DI
+	INCQ BX
+	DECQ CX
+	JNZ copy_match_loop
+
+	JMP loopcheck
+
+copy_interior_match:
+	CMPQ CX, $16
+	JGT memmove_match
+
+	// if len(dst[di:]) < 16
+	MOVQ R8, AX
+	SUBQ DI, AX
+	CMPQ AX, $16
+	JLT memmove_match
+
+	MOVOU (BX), X0
+	MOVOU X0, (DI)
+
+	ADDQ CX, DI
+	XORL CX, CX
+	JMP  loopcheck
+
+copy_match_from_dict:
+	// CX = match_len
+	// BX = &dst + (di - offset)
+
+	// AX = offset - di = dict_bytes_available => count of bytes potentially covered by the dictionary
+	MOVQ R11, AX
+	SUBQ BX, AX
+
+	// BX = len(dict) - dict_bytes_available
+	MOVQ R15, BX
+	SUBQ AX, BX
+	JS err_short_dict
+
+	ADDQ R14, BX
+
+	// if match_len > dict_bytes_available, match fits entirely within external dictionary : just copy
+	CMPQ CX, AX
+	JLT memmove_match
+
+	// The match stretches over the dictionary and our block
+	// 1) copy what comes from the dictionary
+	// AX = dict_bytes_available = copy_size
+	// BX = &dict_end - copy_size
+	// CX = match_len
+
+	// memmove(to, from, len)
+	MOVQ DI, 0(SP)
+	MOVQ BX, 8(SP)
+	MOVQ AX, 16(SP)
+	// store extra stuff we want to recover
+	// spill
+	MOVQ DI, 24(SP)
+	MOVQ SI, 32(SP)
+	MOVQ CX, 40(SP)
+	CALL runtime·memmove(SB)
+
+	// restore registers
+	MOVQ 16(SP), AX // copy_size
+	MOVQ 24(SP), DI
+	MOVQ 32(SP), SI
+	MOVQ 40(SP), CX // match_len
+
+	// recalc initial values
+	MOVQ dst_base+0(FP), R8
+	MOVQ R8, R11 // TODO: make these sensible numbers
+	ADDQ dst_len+8(FP), R8
+	MOVQ src_base+24(FP), R9
+	ADDQ src_len+32(FP), R9
+	MOVQ dict_base+48(FP), R14
+	MOVQ dict_len+56(FP), R15
+	MOVQ R8, R12
+	SUBQ $32, R12
+	MOVQ R9, R13
+	SUBQ $16, R13
+
+	// di+=copy_size
+	ADDQ AX, DI
+
+	// 2) copy the rest from the current block
+	// CX = match_len - copy_size = rest_size
+	SUBQ AX, CX
+	MOVQ R11, BX
+
+	// check if we have a copy overlap
+	// AX = &dst + rest_size
+	MOVQ CX, AX
+	ADDQ BX, AX
+	// if &dst + rest_size > di, copy byte by byte
+	CMPQ AX, DI
+
+	JA copy_match_loop
+
+memmove_match:
+	// memmove(to, from, len)
+	MOVQ DI, 0(SP)
+	MOVQ BX, 8(SP)
+	MOVQ CX, 16(SP)
+
+	// Spill registers. Increment DI now so we don't need to save CX.
+	ADDQ CX, DI
+	MOVQ DI, 24(SP)
+	MOVQ SI, 32(SP)
+
+	CALL runtime·memmove(SB)
+
+	// restore registers
+	MOVQ 24(SP), DI
+	MOVQ 32(SP), SI
+
+	// recalc initial values
+	MOVQ dst_base+0(FP), R8
+	MOVQ R8, R11 // TODO: make these sensible numbers
+	ADDQ dst_len+8(FP), R8
+	MOVQ src_base+24(FP), R9
+	ADDQ src_len+32(FP), R9
+	MOVQ R8, R12
+	SUBQ $32, R12
+	MOVQ R9, R13
+	SUBQ $16, R13
+	MOVQ dict_base+48(FP), R14
+	MOVQ dict_len+56(FP), R15
+	XORL CX, CX
+
+loopcheck:
+	// for si < len(src)
+	CMPQ SI, R9
+	JB   loop
+
+end:
+	// Remaining length must be zero.
+	TESTQ CX, CX
+	JNE   err_corrupt
+
+	SUBQ R11, DI
+	MOVQ DI, ret+72(FP)
+	RET
+
+err_corrupt:
+	MOVQ $-1, ret+72(FP)
+	RET
+
+err_short_buf:
+	MOVQ $-2, ret+72(FP)
+	RET
+
+err_short_dict:
+	MOVQ $-3, ret+72(FP)
+	RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s
new file mode 100644
index 0000000..20b21fc
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s
@@ -0,0 +1,231 @@
+// +build gc
+// +build !noasm
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// Register allocation.
+#define dst	R0
+#define dstorig	R1
+#define src	R2
+#define dstend	R3
+#define srcend	R4
+#define match	R5	// Match address.
+#define dictend	R6
+#define token	R7
+#define len	R8	// Literal and match lengths.
+#define offset	R7	// Match offset; overlaps with token.
+#define tmp1	R9
+#define tmp2	R11
+#define tmp3	R12
+
+// func decodeBlock(dst, src, dict []byte) int
+TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $-4-40
+	MOVW dst_base  +0(FP), dst
+	MOVW dst_len   +4(FP), dstend
+	MOVW src_base +12(FP), src
+	MOVW src_len  +16(FP), srcend
+
+	CMP $0, srcend
+	BEQ shortSrc
+
+	ADD dst, dstend
+	ADD src, srcend
+
+	MOVW dst, dstorig
+
+loop:
+	// Read token. Extract literal length.
+	MOVBU.P 1(src), token
+	MOVW    token >> 4, len
+	CMP     $15, len
+	BNE     readLitlenDone
+
+readLitlenLoop:
+	CMP     src, srcend
+	BEQ     shortSrc
+	MOVBU.P 1(src), tmp1
+	ADD.S   tmp1, len
+	BVS     shortDst
+	CMP     $255, tmp1
+	BEQ     readLitlenLoop
+
+readLitlenDone:
+	CMP $0, len
+	BEQ copyLiteralDone
+
+	// Bounds check dst+len and src+len.
+	ADD.S    dst, len, tmp1
+	ADD.CC.S src, len, tmp2
+	BCS      shortSrc
+	CMP      dstend, tmp1
+	//BHI    shortDst // Uncomment for distinct error codes.
+	CMP.LS   srcend, tmp2
+	BHI      shortSrc
+
+	// Copy literal.
+	CMP $4, len
+	BLO copyLiteralFinish
+
+	// Copy 0-3 bytes until src is aligned.
+	TST        $1, src
+	MOVBU.NE.P 1(src), tmp1
+	MOVB.NE.P  tmp1, 1(dst)
+	SUB.NE     $1, len
+
+	TST        $2, src
+	MOVHU.NE.P 2(src), tmp2
+	MOVB.NE.P  tmp2, 1(dst)
+	MOVW.NE    tmp2 >> 8, tmp1
+	MOVB.NE.P  tmp1, 1(dst)
+	SUB.NE     $2, len
+
+	B copyLiteralLoopCond
+
+copyLiteralLoop:
+	// Aligned load, unaligned write.
+	MOVW.P 4(src), tmp1
+	MOVW   tmp1 >>  8, tmp2
+	MOVB   tmp2, 1(dst)
+	MOVW   tmp1 >> 16, tmp3
+	MOVB   tmp3, 2(dst)
+	MOVW   tmp1 >> 24, tmp2
+	MOVB   tmp2, 3(dst)
+	MOVB.P tmp1, 4(dst)
+copyLiteralLoopCond:
+	// Loop until len-4 < 0.
+	SUB.S  $4, len
+	BPL    copyLiteralLoop
+
+copyLiteralFinish:
+	// Copy remaining 0-3 bytes.
+	// At this point, len may be < 0, but len&3 is still accurate.
+	TST       $1, len
+	MOVB.NE.P 1(src), tmp3
+	MOVB.NE.P tmp3, 1(dst)
+	TST       $2, len
+	MOVB.NE.P 2(src), tmp1
+	MOVB.NE.P tmp1, 2(dst)
+	MOVB.NE   -1(src), tmp2
+	MOVB.NE   tmp2, -1(dst)
+
+copyLiteralDone:
+	// Initial part of match length.
+	// This frees up the token register for reuse as offset.
+	AND $15, token, len
+
+	CMP src, srcend
+	BEQ end
+
+	// Read offset.
+	ADD.S $2, src
+	BCS   shortSrc
+	CMP   srcend, src
+	BHI   shortSrc
+	MOVBU -2(src), offset
+	MOVBU -1(src), tmp1
+	ORR.S tmp1 << 8, offset
+	BEQ   corrupt
+
+	// Read rest of match length.
+	CMP $15, len
+	BNE readMatchlenDone
+
+readMatchlenLoop:
+	CMP     src, srcend
+	BEQ     shortSrc
+	MOVBU.P 1(src), tmp1
+	ADD.S   tmp1, len
+	BVS     shortDst
+	CMP     $255, tmp1
+	BEQ     readMatchlenLoop
+
+readMatchlenDone:
+	// Bounds check dst+len+minMatch.
+	ADD.S    dst, len, tmp1
+	ADD.CC.S $const_minMatch, tmp1
+	BCS      shortDst
+	CMP      dstend, tmp1
+	BHI      shortDst
+
+	RSB dst, offset, match
+	CMP dstorig, match
+	BGE copyMatch4
+
+	// match < dstorig means the match starts in the dictionary,
+	// at len(dict) - offset + (dst - dstorig).
+	MOVW dict_base+24(FP), match
+	MOVW dict_len +28(FP), dictend
+
+	ADD $const_minMatch, len
+
+	RSB   dst, dstorig, tmp1
+	RSB   dictend, offset, tmp2
+	ADD.S tmp2, tmp1
+	BMI   shortDict
+	ADD   match, dictend
+	ADD   tmp1, match
+
+copyDict:
+	MOVBU.P 1(match), tmp1
+	MOVB.P  tmp1, 1(dst)
+	SUB.S   $1, len
+	CMP.NE  match, dictend
+	BNE     copyDict
+
+	// If the match extends beyond the dictionary, the rest is at dstorig.
+	CMP  $0, len
+	BEQ  copyMatchDone
+	MOVW dstorig, match
+	B    copyMatch
+
+	// Copy a regular match.
+	// Since len+minMatch is at least four, we can do a 4× unrolled
+	// byte copy loop. Using MOVW instead of four byte loads is faster,
+	// but to remain portable we'd have to align match first, which is
+	// too expensive. By alternating loads and stores, we also handle
+	// the case offset < 4.
+copyMatch4:
+	SUB.S   $4, len
+	MOVBU.P 4(match), tmp1
+	MOVB.P  tmp1, 4(dst)
+	MOVBU   -3(match), tmp2
+	MOVB    tmp2, -3(dst)
+	MOVBU   -2(match), tmp3
+	MOVB    tmp3, -2(dst)
+	MOVBU   -1(match), tmp1
+	MOVB    tmp1, -1(dst)
+	BPL     copyMatch4
+
+	// Restore len, which is now negative.
+	ADD.S $4, len
+	BEQ   copyMatchDone
+
+copyMatch:
+	// Finish with a byte-at-a-time copy.
+	SUB.S   $1, len
+	MOVBU.P 1(match), tmp2
+	MOVB.P  tmp2, 1(dst)
+	BNE     copyMatch
+
+copyMatchDone:
+	CMP src, srcend
+	BNE loop
+
+end:
+	CMP  $0, len
+	BNE  corrupt
+	SUB  dstorig, dst, tmp1
+	MOVW tmp1, ret+36(FP)
+	RET
+
+	// The error cases have distinct labels so we can put different
+	// return codes here when debugging, or if the error returns need to
+	// be changed.
+shortDict:
+shortDst:
+shortSrc:
+corrupt:
+	MOVW $-1, tmp1
+	MOVW tmp1, ret+36(FP)
+	RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s
new file mode 100644
index 0000000..d2fe11b
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s
@@ -0,0 +1,241 @@
+// +build gc
+// +build !noasm
+
+// This implementation assumes that strict alignment checking is turned off.
+// The Go compiler makes the same assumption.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// Register allocation.
+#define dst		R0
+#define dstorig		R1
+#define src		R2
+#define dstend		R3
+#define dstend16	R4	// dstend - 16
+#define srcend		R5
+#define srcend16	R6	// srcend - 16
+#define match		R7	// Match address.
+#define dict		R8
+#define dictlen		R9
+#define dictend		R10
+#define token		R11
+#define len		R12	// Literal and match lengths.
+#define lenRem		R13
+#define offset		R14	// Match offset.
+#define tmp1		R15
+#define tmp2		R16
+#define tmp3		R17
+#define tmp4		R19
+
+// func decodeBlock(dst, src, dict []byte) int
+TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $0-80
+	LDP  dst_base+0(FP), (dst, dstend)
+	ADD  dst, dstend
+	MOVD dst, dstorig
+
+	LDP src_base+24(FP), (src, srcend)
+	CBZ srcend, shortSrc
+	ADD src, srcend
+
+	// dstend16 = max(dstend-16, 0) and similarly for srcend16.
+	SUBS $16, dstend, dstend16
+	CSEL LO, ZR, dstend16, dstend16
+	SUBS $16, srcend, srcend16
+	CSEL LO, ZR, srcend16, srcend16
+
+	LDP dict_base+48(FP), (dict, dictlen)
+	ADD dict, dictlen, dictend
+
+loop:
+	// Read token. Extract literal length.
+	MOVBU.P 1(src), token
+	LSR     $4, token, len
+	CMP     $15, len
+	BNE     readLitlenDone
+
+readLitlenLoop:
+	CMP     src, srcend
+	BEQ     shortSrc
+	MOVBU.P 1(src), tmp1
+	ADDS    tmp1, len
+	BVS     shortDst
+	CMP     $255, tmp1
+	BEQ     readLitlenLoop
+
+readLitlenDone:
+	CBZ len, copyLiteralDone
+
+	// Bounds check dst+len and src+len.
+	ADDS dst, len, tmp1
+	BCS  shortSrc
+	ADDS src, len, tmp2
+	BCS  shortSrc
+	CMP  dstend, tmp1
+	BHI  shortDst
+	CMP  srcend, tmp2
+	BHI  shortSrc
+
+	// Copy literal.
+	SUBS $16, len
+	BLO  copyLiteralShort
+
+copyLiteralLoop:
+	LDP.P 16(src), (tmp1, tmp2)
+	STP.P (tmp1, tmp2), 16(dst)
+	SUBS  $16, len
+	BPL   copyLiteralLoop
+
+	// Copy (final part of) literal of length 0-15.
+	// If we have >=16 bytes left in src and dst, just copy 16 bytes.
+copyLiteralShort:
+	CMP  dstend16, dst
+	CCMP LO, src, srcend16, $0b0010 // 0010 = preserve carry (LO).
+	BHS  copyLiteralShortEnd
+
+	AND $15, len
+
+	LDP (src), (tmp1, tmp2)
+	ADD len, src
+	STP (tmp1, tmp2), (dst)
+	ADD len, dst
+
+	B copyLiteralDone
+
+	// Safe but slow copy near the end of src, dst.
+copyLiteralShortEnd:
+	TBZ     $3, len, 3(PC)
+	MOVD.P  8(src), tmp1
+	MOVD.P  tmp1, 8(dst)
+	TBZ     $2, len, 3(PC)
+	MOVW.P  4(src), tmp2
+	MOVW.P  tmp2, 4(dst)
+	TBZ     $1, len, 3(PC)
+	MOVH.P  2(src), tmp3
+	MOVH.P  tmp3, 2(dst)
+	TBZ     $0, len, 3(PC)
+	MOVBU.P 1(src), tmp4
+	MOVB.P  tmp4, 1(dst)
+
+copyLiteralDone:
+	// Initial part of match length.
+	AND $15, token, len
+
+	CMP src, srcend
+	BEQ end
+
+	// Read offset.
+	ADDS  $2, src
+	BCS   shortSrc
+	CMP   srcend, src
+	BHI   shortSrc
+	MOVHU -2(src), offset
+	CBZ   offset, corrupt
+
+	// Read rest of match length.
+	CMP $15, len
+	BNE readMatchlenDone
+
+readMatchlenLoop:
+	CMP     src, srcend
+	BEQ     shortSrc
+	MOVBU.P 1(src), tmp1
+	ADDS    tmp1, len
+	BVS     shortDst
+	CMP     $255, tmp1
+	BEQ     readMatchlenLoop
+
+readMatchlenDone:
+	ADD $const_minMatch, len
+
+	// Bounds check dst+len.
+	ADDS dst, len, tmp2
+	BCS  shortDst
+	CMP  dstend, tmp2
+	BHI  shortDst
+
+	SUB offset, dst, match
+	CMP dstorig, match
+	BHS copyMatchTry8
+
+	// match < dstorig means the match starts in the dictionary,
+	// at len(dict) - offset + (dst - dstorig).
+	SUB  dstorig, dst, tmp1
+	SUB  offset, dictlen, tmp2
+	ADDS tmp2, tmp1
+	BMI  shortDict
+	ADD  dict, tmp1, match
+
+copyDict:
+	MOVBU.P 1(match), tmp3
+	MOVB.P  tmp3, 1(dst)
+	SUBS    $1, len
+	CCMP    NE, dictend, match, $0b0100 // 0100 sets the Z (EQ) flag.
+	BNE     copyDict
+
+	CBZ len, copyMatchDone
+
+	// If the match extends beyond the dictionary, the rest is at dstorig.
+	// Recompute the offset for the next check.
+	MOVD dstorig, match
+	SUB  dstorig, dst, offset
+
+copyMatchTry8:
+	// Copy doublewords if both len and offset are at least eight.
+	// A 16-at-a-time loop doesn't provide a further speedup.
+	CMP  $8, len
+	CCMP HS, offset, $8, $0
+	BLO  copyMatchTry4
+
+	AND    $7, len, lenRem
+	SUB    $8, len
+copyMatchLoop8:
+	MOVD.P 8(match), tmp1
+	MOVD.P tmp1, 8(dst)
+	SUBS   $8, len
+	BPL    copyMatchLoop8
+
+	MOVD (match)(len), tmp2 // match+len == match+lenRem-8.
+	ADD  lenRem, dst
+	MOVD $0, len
+	MOVD tmp2, -8(dst)
+	B    copyMatchDone
+
+copyMatchTry4:
+	// Copy words if both len and offset are at least four.
+	CMP  $4, len
+	CCMP HS, offset, $4, $0
+	BLO  copyMatchLoop1
+
+	MOVWU.P 4(match), tmp2
+	MOVWU.P tmp2, 4(dst)
+	SUBS    $4, len
+	BEQ     copyMatchDone
+
+copyMatchLoop1:
+	// Byte-at-a-time copy for small offsets <= 3.
+	MOVBU.P 1(match), tmp2
+	MOVB.P  tmp2, 1(dst)
+	SUBS    $1, len
+	BNE     copyMatchLoop1
+
+copyMatchDone:
+	CMP src, srcend
+	BNE loop
+
+end:
+	CBNZ len, corrupt
+	SUB  dstorig, dst, tmp1
+	MOVD tmp1, ret+72(FP)
+	RET
+
+	// The error cases have distinct labels so we can put different
+	// return codes here when debugging, or if the error returns need to
+	// be changed.
+shortDict:
+shortDst:
+shortSrc:
+corrupt:
+	MOVD $-1, tmp1
+	MOVD tmp1, ret+72(FP)
+	RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go
new file mode 100644
index 0000000..8d9023d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go
@@ -0,0 +1,10 @@
+//go:build (amd64 || arm || arm64) && !appengine && gc && !noasm
+// +build amd64 arm arm64
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package lz4block
+
+//go:noescape
+func decodeBlock(dst, src, dict []byte) int
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go
new file mode 100644
index 0000000..9f568fb
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go
@@ -0,0 +1,139 @@
+//go:build (!amd64 && !arm && !arm64) || appengine || !gc || noasm
+// +build !amd64,!arm,!arm64 appengine !gc noasm
+
+package lz4block
+
+import (
+	"encoding/binary"
+)
+
+func decodeBlock(dst, src, dict []byte) (ret int) {
+	// Restrict capacities so we don't read or write out of bounds.
+	dst = dst[:len(dst):len(dst)]
+	src = src[:len(src):len(src)]
+
+	const hasError = -2
+
+	if len(src) == 0 {
+		return hasError
+	}
+
+	defer func() {
+		if recover() != nil {
+			ret = hasError
+		}
+	}()
+
+	var si, di uint
+	for si < uint(len(src)) {
+		// Literals and match lengths (token).
+		b := uint(src[si])
+		si++
+
+		// Literals.
+		if lLen := b >> 4; lLen > 0 {
+			switch {
+			case lLen < 0xF && si+16 < uint(len(src)):
+				// Shortcut 1
+				// if we have enough room in src and dst, and the literals length
+				// is small enough (0..14) then copy all 16 bytes, even if not all
+				// are part of the literals.
+				copy(dst[di:], src[si:si+16])
+				si += lLen
+				di += lLen
+				if mLen := b & 0xF; mLen < 0xF {
+					// Shortcut 2
+					// if the match length (4..18) fits within the literals, then copy
+					// all 18 bytes, even if not all are part of the literals.
+					mLen += 4
+					if offset := u16(src[si:]); mLen <= offset && offset < di {
+						i := di - offset
+						// The remaining buffer may not hold 18 bytes.
+						// See https://github.com/pierrec/lz4/issues/51.
+						if end := i + 18; end <= uint(len(dst)) {
+							copy(dst[di:], dst[i:end])
+							si += 2
+							di += mLen
+							continue
+						}
+					}
+				}
+			case lLen == 0xF:
+				for {
+					x := uint(src[si])
+					if lLen += x; int(lLen) < 0 {
+						return hasError
+					}
+					si++
+					if x != 0xFF {
+						break
+					}
+				}
+				fallthrough
+			default:
+				copy(dst[di:di+lLen], src[si:si+lLen])
+				si += lLen
+				di += lLen
+			}
+		}
+
+		mLen := b & 0xF
+		if si == uint(len(src)) && mLen == 0 {
+			break
+		} else if si >= uint(len(src)) {
+			return hasError
+		}
+
+		offset := u16(src[si:])
+		if offset == 0 {
+			return hasError
+		}
+		si += 2
+
+		// Match.
+		mLen += minMatch
+		if mLen == minMatch+0xF {
+			for {
+				x := uint(src[si])
+				if mLen += x; int(mLen) < 0 {
+					return hasError
+				}
+				si++
+				if x != 0xFF {
+					break
+				}
+			}
+		}
+
+		// Copy the match.
+		if di < offset {
+			// The match is beyond our block, meaning the first part
+			// is in the dictionary.
+			fromDict := dict[uint(len(dict))+di-offset:]
+			n := uint(copy(dst[di:di+mLen], fromDict))
+			di += n
+			if mLen -= n; mLen == 0 {
+				continue
+			}
+			// We copied n = offset-di bytes from the dictionary,
+			// then set di = di+n = offset, so the following code
+			// copies from dst[di-offset:] = dst[0:].
+		}
+
+		expanded := dst[di-offset:]
+		if mLen > offset {
+			// Efficiently copy the match dst[di-offset:di] into the dst slice.
+			bytesToCopy := offset * (mLen / offset)
+			for n := offset; n <= bytesToCopy+offset; n *= 2 {
+				copy(expanded[n:], expanded[:n])
+			}
+			di += bytesToCopy
+			mLen -= bytesToCopy
+		}
+		di += uint(copy(dst[di:di+mLen], expanded[:mLen]))
+	}
+
+	return int(di)
+}
+
+func u16(p []byte) uint { return uint(binary.LittleEndian.Uint16(p)) }
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go
new file mode 100644
index 0000000..710ea42
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go
@@ -0,0 +1,19 @@
+package lz4errors
+
+type Error string
+
+func (e Error) Error() string { return string(e) }
+
+const (
+	ErrInvalidSourceShortBuffer      Error = "lz4: invalid source or destination buffer too short"
+	ErrInvalidFrame                  Error = "lz4: bad magic number"
+	ErrInternalUnhandledState        Error = "lz4: unhandled state"
+	ErrInvalidHeaderChecksum         Error = "lz4: invalid header checksum"
+	ErrInvalidBlockChecksum          Error = "lz4: invalid block checksum"
+	ErrInvalidFrameChecksum          Error = "lz4: invalid frame checksum"
+	ErrOptionInvalidCompressionLevel Error = "lz4: invalid compression level"
+	ErrOptionClosedOrError           Error = "lz4: cannot apply options on closed or in error object"
+	ErrOptionInvalidBlockSize        Error = "lz4: invalid block size"
+	ErrOptionNotApplicable           Error = "lz4: option not applicable"
+	ErrWriterNotClosed               Error = "lz4: writer not closed"
+)
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go
new file mode 100644
index 0000000..04aaca8
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go
@@ -0,0 +1,348 @@
+package lz4stream
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+	"sync"
+
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+	"github.com/pierrec/lz4/v4/internal/xxh32"
+)
+
+type Blocks struct {
+	Block  *FrameDataBlock
+	Blocks chan chan *FrameDataBlock
+	mu     sync.Mutex
+	err    error
+}
+
+func (b *Blocks) initW(f *Frame, dst io.Writer, num int) {
+	if num == 1 {
+		b.Blocks = nil
+		b.Block = NewFrameDataBlock(f)
+		return
+	}
+	b.Block = nil
+	if cap(b.Blocks) != num {
+		b.Blocks = make(chan chan *FrameDataBlock, num)
+	}
+	// goroutine managing concurrent block compression goroutines.
+	go func() {
+		// Process next block compression item.
+		for c := range b.Blocks {
+			// Read the next compressed block result.
+			// Waiting here ensures that the blocks are output in the order they were sent.
+			// The incoming channel is always closed as it indicates to the caller that
+			// the block has been processed.
+			block := <-c
+			if block == nil {
+				// Notify the block compression routine that we are done with its result.
+				// This is used when a sentinel block is sent to terminate the compression.
+				close(c)
+				return
+			}
+			// Do not attempt to write the block upon any previous failure.
+			if b.err == nil {
+				// Write the block.
+				if err := block.Write(f, dst); err != nil {
+					// Keep the first error.
+					b.err = err
+					// All pending compression goroutines need to shut down, so we need to keep going.
+				}
+			}
+			close(c)
+		}
+	}()
+}
+
+func (b *Blocks) close(f *Frame, num int) error {
+	if num == 1 {
+		if b.Block != nil {
+			b.Block.Close(f)
+		}
+		err := b.err
+		b.err = nil
+		return err
+	}
+	if b.Blocks == nil {
+		err := b.err
+		b.err = nil
+		return err
+	}
+	c := make(chan *FrameDataBlock)
+	b.Blocks <- c
+	c <- nil
+	<-c
+	err := b.err
+	b.err = nil
+	return err
+}
+
+// ErrorR returns any error set while uncompressing a stream.
+func (b *Blocks) ErrorR() error {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	return b.err
+}
+
+// initR returns a channel that streams the uncompressed blocks if in concurrent
+// mode and no error. When the channel is closed, check for any error with b.ErrorR.
+//
+// If not in concurrent mode, the uncompressed block is b.Block and the returned error
+// needs to be checked.
+func (b *Blocks) initR(f *Frame, num int, src io.Reader) (chan []byte, error) {
+	size := f.Descriptor.Flags.BlockSizeIndex()
+	if num == 1 {
+		b.Blocks = nil
+		b.Block = NewFrameDataBlock(f)
+		return nil, nil
+	}
+	b.Block = nil
+	blocks := make(chan chan []byte, num)
+	// data receives the uncompressed blocks.
+	data := make(chan []byte)
+	// Read blocks from the source sequentially
+	// and uncompress them concurrently.
+
+	// In legacy mode, accrue the uncompress sizes in cum.
+	var cum uint32
+	go func() {
+		var cumx uint32
+		var err error
+		for b.ErrorR() == nil {
+			block := NewFrameDataBlock(f)
+			cumx, err = block.Read(f, src, 0)
+			if err != nil {
+				block.Close(f)
+				break
+			}
+			// Recheck for an error as reading may be slow and uncompressing is expensive.
+			if b.ErrorR() != nil {
+				block.Close(f)
+				break
+			}
+			c := make(chan []byte)
+			blocks <- c
+			go func() {
+				defer block.Close(f)
+				data, err := block.Uncompress(f, size.Get(), nil, false)
+				if err != nil {
+					b.closeR(err)
+					// Close the block channel to indicate an error.
+					close(c)
+				} else {
+					c <- data
+				}
+			}()
+		}
+		// End the collection loop and the data channel.
+		c := make(chan []byte)
+		blocks <- c
+		c <- nil // signal the collection loop that we are done
+		<-c      // wait for the collect loop to complete
+		if f.isLegacy() && cum == cumx {
+			err = io.EOF
+		}
+		b.closeR(err)
+		close(data)
+	}()
+	// Collect the uncompressed blocks and make them available
+	// on the returned channel.
+	go func(leg bool) {
+		defer close(blocks)
+		skipBlocks := false
+		for c := range blocks {
+			buf, ok := <-c
+			if !ok {
+				// A closed channel indicates an error.
+				// All remaining channels should be discarded.
+				skipBlocks = true
+				continue
+			}
+			if buf == nil {
+				// Signal to end the loop.
+				close(c)
+				return
+			}
+			if skipBlocks {
+				// A previous error has occurred, skipping remaining channels.
+				continue
+			}
+			// Perform checksum now as the blocks are received in order.
+			if f.Descriptor.Flags.ContentChecksum() {
+				_, _ = f.checksum.Write(buf)
+			}
+			if leg {
+				cum += uint32(len(buf))
+			}
+			data <- buf
+			close(c)
+		}
+	}(f.isLegacy())
+	return data, nil
+}
+
+// closeR safely sets the error on b if not already set.
+func (b *Blocks) closeR(err error) {
+	b.mu.Lock()
+	if b.err == nil {
+		b.err = err
+	}
+	b.mu.Unlock()
+}
+
+func NewFrameDataBlock(f *Frame) *FrameDataBlock {
+	buf := f.Descriptor.Flags.BlockSizeIndex().Get()
+	return &FrameDataBlock{Data: buf, data: buf}
+}
+
+type FrameDataBlock struct {
+	Size     DataBlockSize
+	Data     []byte // compressed or uncompressed data (.data or .src)
+	Checksum uint32
+	data     []byte // buffer for compressed data
+	src      []byte // uncompressed data
+	err      error  // used in concurrent mode
+}
+
+func (b *FrameDataBlock) Close(f *Frame) {
+	b.Size = 0
+	b.Checksum = 0
+	b.err = nil
+	if b.data != nil {
+		// Block was not already closed.
+		lz4block.Put(b.data)
+		b.Data = nil
+		b.data = nil
+		b.src = nil
+	}
+}
+
+// Block compression errors are ignored since the buffer is sized appropriately.
+func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.CompressionLevel) *FrameDataBlock {
+	data := b.data
+	if f.isLegacy() {
+		data = data[:cap(data)]
+	} else {
+		data = data[:len(src)] // trigger the incompressible flag in CompressBlock
+	}
+	var n int
+	switch level {
+	case lz4block.Fast:
+		n, _ = lz4block.CompressBlock(src, data)
+	default:
+		n, _ = lz4block.CompressBlockHC(src, data, level)
+	}
+	if n == 0 {
+		b.Size.UncompressedSet(true)
+		b.Data = src
+	} else {
+		b.Size.UncompressedSet(false)
+		b.Data = data[:n]
+	}
+	b.Size.sizeSet(len(b.Data))
+	b.src = src // keep track of the source for content checksum
+
+	if f.Descriptor.Flags.BlockChecksum() {
+		b.Checksum = xxh32.ChecksumZero(b.Data)
+	}
+	return b
+}
+
+func (b *FrameDataBlock) Write(f *Frame, dst io.Writer) error {
+	// Write is called in the same order as blocks are compressed,
+	// so content checksum must be done here.
+	if f.Descriptor.Flags.ContentChecksum() {
+		_, _ = f.checksum.Write(b.src)
+	}
+	buf := f.buf[:]
+	binary.LittleEndian.PutUint32(buf, uint32(b.Size))
+	if _, err := dst.Write(buf[:4]); err != nil {
+		return err
+	}
+
+	if _, err := dst.Write(b.Data); err != nil {
+		return err
+	}
+
+	if b.Checksum == 0 {
+		return nil
+	}
+	binary.LittleEndian.PutUint32(buf, b.Checksum)
+	_, err := dst.Write(buf[:4])
+	return err
+}
+
+// Read updates b with the next block data, size and checksum if available.
+func (b *FrameDataBlock) Read(f *Frame, src io.Reader, cum uint32) (uint32, error) {
+	x, err := f.readUint32(src)
+	if err != nil {
+		return 0, err
+	}
+	if f.isLegacy() {
+		switch x {
+		case frameMagicLegacy:
+			// Concatenated legacy frame.
+			return b.Read(f, src, cum)
+		case cum:
+			// Only works in non concurrent mode, for concurrent mode
+			// it is handled separately.
+			// Linux kernel format appends the total uncompressed size at the end.
+			return 0, io.EOF
+		}
+	} else if x == 0 {
+		// Marker for end of stream.
+		return 0, io.EOF
+	}
+	b.Size = DataBlockSize(x)
+
+	size := b.Size.size()
+	if size > cap(b.data) {
+		return x, lz4errors.ErrOptionInvalidBlockSize
+	}
+	b.data = b.data[:size]
+	if _, err := io.ReadFull(src, b.data); err != nil {
+		return x, err
+	}
+	if f.Descriptor.Flags.BlockChecksum() {
+		sum, err := f.readUint32(src)
+		if err != nil {
+			return 0, err
+		}
+		b.Checksum = sum
+	}
+	return x, nil
+}
+
+func (b *FrameDataBlock) Uncompress(f *Frame, dst, dict []byte, sum bool) ([]byte, error) {
+	if b.Size.Uncompressed() {
+		n := copy(dst, b.data)
+		dst = dst[:n]
+	} else {
+		n, err := lz4block.UncompressBlock(b.data, dst, dict)
+		if err != nil {
+			return nil, err
+		}
+		dst = dst[:n]
+	}
+	if f.Descriptor.Flags.BlockChecksum() {
+		if c := xxh32.ChecksumZero(b.data); c != b.Checksum {
+			err := fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidBlockChecksum, c, b.Checksum)
+			return nil, err
+		}
+	}
+	if sum && f.Descriptor.Flags.ContentChecksum() {
+		_, _ = f.checksum.Write(dst)
+	}
+	return dst, nil
+}
+
+func (f *Frame) readUint32(r io.Reader) (x uint32, err error) {
+	if _, err = io.ReadFull(r, f.buf[:4]); err != nil {
+		return
+	}
+	x = binary.LittleEndian.Uint32(f.buf[:4])
+	return
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go
new file mode 100644
index 0000000..18192a9
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go
@@ -0,0 +1,204 @@
+// Package lz4stream provides the types that support reading and writing LZ4 data streams.
+package lz4stream
+
+import (
+	"encoding/binary"
+	"fmt"
+	"io"
+	"io/ioutil"
+
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+	"github.com/pierrec/lz4/v4/internal/xxh32"
+)
+
+//go:generate go run gen.go
+
+const (
+	frameMagic       uint32 = 0x184D2204
+	frameSkipMagic   uint32 = 0x184D2A50
+	frameMagicLegacy uint32 = 0x184C2102
+)
+
+func NewFrame() *Frame {
+	return &Frame{}
+}
+
+type Frame struct {
+	buf        [15]byte // frame descriptor needs at most 4(magic)+4+8+1=11 bytes
+	Magic      uint32
+	Descriptor FrameDescriptor
+	Blocks     Blocks
+	Checksum   uint32
+	checksum   xxh32.XXHZero
+}
+
+// Reset allows reusing the Frame.
+// The Descriptor configuration is not modified.
+func (f *Frame) Reset(num int) {
+	f.Magic = 0
+	f.Descriptor.Checksum = 0
+	f.Descriptor.ContentSize = 0
+	_ = f.Blocks.close(f, num)
+	f.Checksum = 0
+}
+
+func (f *Frame) InitW(dst io.Writer, num int, legacy bool) {
+	if legacy {
+		f.Magic = frameMagicLegacy
+		idx := lz4block.Index(lz4block.Block8Mb)
+		f.Descriptor.Flags.BlockSizeIndexSet(idx)
+	} else {
+		f.Magic = frameMagic
+		f.Descriptor.initW()
+	}
+	f.Blocks.initW(f, dst, num)
+	f.checksum.Reset()
+}
+
+func (f *Frame) CloseW(dst io.Writer, num int) error {
+	if err := f.Blocks.close(f, num); err != nil {
+		return err
+	}
+	if f.isLegacy() {
+		return nil
+	}
+	buf := f.buf[:0]
+	// End mark (data block size of uint32(0)).
+	buf = append(buf, 0, 0, 0, 0)
+	if f.Descriptor.Flags.ContentChecksum() {
+		buf = f.checksum.Sum(buf)
+	}
+	_, err := dst.Write(buf)
+	return err
+}
+
+func (f *Frame) isLegacy() bool {
+	return f.Magic == frameMagicLegacy
+}
+
+func (f *Frame) ParseHeaders(src io.Reader) error {
+	if f.Magic > 0 {
+		// Header already read.
+		return nil
+	}
+
+newFrame:
+	var err error
+	if f.Magic, err = f.readUint32(src); err != nil {
+		return err
+	}
+	switch m := f.Magic; {
+	case m == frameMagic || m == frameMagicLegacy:
+	// All 16 values of frameSkipMagic are valid.
+	case m>>8 == frameSkipMagic>>8:
+		skip, err := f.readUint32(src)
+		if err != nil {
+			return err
+		}
+		if _, err := io.CopyN(ioutil.Discard, src, int64(skip)); err != nil {
+			return err
+		}
+		goto newFrame
+	default:
+		return lz4errors.ErrInvalidFrame
+	}
+	if err := f.Descriptor.initR(f, src); err != nil {
+		return err
+	}
+	f.checksum.Reset()
+	return nil
+}
+
+func (f *Frame) InitR(src io.Reader, num int) (chan []byte, error) {
+	return f.Blocks.initR(f, num, src)
+}
+
+func (f *Frame) CloseR(src io.Reader) (err error) {
+	if f.isLegacy() {
+		return nil
+	}
+	if !f.Descriptor.Flags.ContentChecksum() {
+		return nil
+	}
+	if f.Checksum, err = f.readUint32(src); err != nil {
+		return err
+	}
+	if c := f.checksum.Sum32(); c != f.Checksum {
+		return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidFrameChecksum, c, f.Checksum)
+	}
+	return nil
+}
+
+type FrameDescriptor struct {
+	Flags       DescriptorFlags
+	ContentSize uint64
+	Checksum    uint8
+}
+
+func (fd *FrameDescriptor) initW() {
+	fd.Flags.VersionSet(1)
+	fd.Flags.BlockIndependenceSet(true)
+}
+
+func (fd *FrameDescriptor) Write(f *Frame, dst io.Writer) error {
+	if fd.Checksum > 0 {
+		// Header already written.
+		return nil
+	}
+
+	buf := f.buf[:4]
+	// Write the magic number here even though it belongs to the Frame.
+	binary.LittleEndian.PutUint32(buf, f.Magic)
+	if !f.isLegacy() {
+		buf = buf[:4+2]
+		binary.LittleEndian.PutUint16(buf[4:], uint16(fd.Flags))
+
+		if fd.Flags.Size() {
+			buf = buf[:4+2+8]
+			binary.LittleEndian.PutUint64(buf[4+2:], fd.ContentSize)
+		}
+		fd.Checksum = descriptorChecksum(buf[4:])
+		buf = append(buf, fd.Checksum)
+	}
+
+	_, err := dst.Write(buf)
+	return err
+}
+
+func (fd *FrameDescriptor) initR(f *Frame, src io.Reader) error {
+	if f.isLegacy() {
+		idx := lz4block.Index(lz4block.Block8Mb)
+		f.Descriptor.Flags.BlockSizeIndexSet(idx)
+		return nil
+	}
+	// Read the flags and the checksum, hoping that there is not content size.
+	buf := f.buf[:3]
+	if _, err := io.ReadFull(src, buf); err != nil {
+		return err
+	}
+	descr := binary.LittleEndian.Uint16(buf)
+	fd.Flags = DescriptorFlags(descr)
+	if fd.Flags.Size() {
+		// Append the 8 missing bytes.
+		buf = buf[:3+8]
+		if _, err := io.ReadFull(src, buf[3:]); err != nil {
+			return err
+		}
+		fd.ContentSize = binary.LittleEndian.Uint64(buf[2:])
+	}
+	fd.Checksum = buf[len(buf)-1] // the checksum is the last byte
+	buf = buf[:len(buf)-1]        // all descriptor fields except checksum
+	if c := descriptorChecksum(buf); fd.Checksum != c {
+		return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidHeaderChecksum, c, fd.Checksum)
+	}
+	// Validate the elements that can be.
+	if idx := fd.Flags.BlockSizeIndex(); !idx.IsValid() {
+		return lz4errors.ErrOptionInvalidBlockSize
+	}
+	return nil
+}
+
+func descriptorChecksum(buf []byte) byte {
+	return byte(xxh32.ChecksumZero(buf) >> 8)
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go
new file mode 100644
index 0000000..d33a6be
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go
@@ -0,0 +1,103 @@
+// Code generated by `gen.exe`. DO NOT EDIT.
+
+package lz4stream
+
+import "github.com/pierrec/lz4/v4/internal/lz4block"
+
+// DescriptorFlags is defined as follow:
+//   field              bits
+//   -----              ----
+//   _                  2
+//   ContentChecksum    1
+//   Size               1
+//   BlockChecksum      1
+//   BlockIndependence  1
+//   Version            2
+//   _                  4
+//   BlockSizeIndex     3
+//   _                  1
+type DescriptorFlags uint16
+
+// Getters.
+func (x DescriptorFlags) ContentChecksum() bool   { return x>>2&1 != 0 }
+func (x DescriptorFlags) Size() bool              { return x>>3&1 != 0 }
+func (x DescriptorFlags) BlockChecksum() bool     { return x>>4&1 != 0 }
+func (x DescriptorFlags) BlockIndependence() bool { return x>>5&1 != 0 }
+func (x DescriptorFlags) Version() uint16         { return uint16(x >> 6 & 0x3) }
+func (x DescriptorFlags) BlockSizeIndex() lz4block.BlockSizeIndex {
+	return lz4block.BlockSizeIndex(x >> 12 & 0x7)
+}
+
+// Setters.
+func (x *DescriptorFlags) ContentChecksumSet(v bool) *DescriptorFlags {
+	const b = 1 << 2
+	if v {
+		*x = *x&^b | b
+	} else {
+		*x &^= b
+	}
+	return x
+}
+func (x *DescriptorFlags) SizeSet(v bool) *DescriptorFlags {
+	const b = 1 << 3
+	if v {
+		*x = *x&^b | b
+	} else {
+		*x &^= b
+	}
+	return x
+}
+func (x *DescriptorFlags) BlockChecksumSet(v bool) *DescriptorFlags {
+	const b = 1 << 4
+	if v {
+		*x = *x&^b | b
+	} else {
+		*x &^= b
+	}
+	return x
+}
+func (x *DescriptorFlags) BlockIndependenceSet(v bool) *DescriptorFlags {
+	const b = 1 << 5
+	if v {
+		*x = *x&^b | b
+	} else {
+		*x &^= b
+	}
+	return x
+}
+func (x *DescriptorFlags) VersionSet(v uint16) *DescriptorFlags {
+	*x = *x&^(0x3<<6) | (DescriptorFlags(v) & 0x3 << 6)
+	return x
+}
+func (x *DescriptorFlags) BlockSizeIndexSet(v lz4block.BlockSizeIndex) *DescriptorFlags {
+	*x = *x&^(0x7<<12) | (DescriptorFlags(v) & 0x7 << 12)
+	return x
+}
+
+// Code generated by `gen.exe`. DO NOT EDIT.
+
+// DataBlockSize is defined as follow:
+//   field         bits
+//   -----         ----
+//   size          31
+//   Uncompressed  1
+type DataBlockSize uint32
+
+// Getters.
+func (x DataBlockSize) size() int          { return int(x & 0x7FFFFFFF) }
+func (x DataBlockSize) Uncompressed() bool { return x>>31&1 != 0 }
+
+// Setters.
+func (x *DataBlockSize) sizeSet(v int) *DataBlockSize {
+	*x = *x&^0x7FFFFFFF | DataBlockSize(v)&0x7FFFFFFF
+	return x
+}
+func (x *DataBlockSize) UncompressedSet(v bool) *DataBlockSize {
+	const b = 1 << 31
+	if v {
+		*x = *x&^b | b
+	} else {
+		*x &^= b
+	}
+	return x
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go
new file mode 100644
index 0000000..651d10c
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go
@@ -0,0 +1,212 @@
+// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version).
+// (ported from the reference implementation https://github.com/Cyan4973/xxHash/)
+package xxh32
+
+import (
+	"encoding/binary"
+)
+
+const (
+	prime1 uint32 = 2654435761
+	prime2 uint32 = 2246822519
+	prime3 uint32 = 3266489917
+	prime4 uint32 = 668265263
+	prime5 uint32 = 374761393
+
+	primeMask   = 0xFFFFFFFF
+	prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984
+	prime1minus = uint32((-int64(prime1)) & primeMask)                  // 1640531535
+)
+
+// XXHZero represents an xxhash32 object with seed 0.
+type XXHZero struct {
+	v        [4]uint32
+	totalLen uint64
+	buf      [16]byte
+	bufused  int
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+// It does not change the underlying hash state.
+func (xxh XXHZero) Sum(b []byte) []byte {
+	h32 := xxh.Sum32()
+	return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24))
+}
+
+// Reset resets the Hash to its initial state.
+func (xxh *XXHZero) Reset() {
+	xxh.v[0] = prime1plus2
+	xxh.v[1] = prime2
+	xxh.v[2] = 0
+	xxh.v[3] = prime1minus
+	xxh.totalLen = 0
+	xxh.bufused = 0
+}
+
+// Size returns the number of bytes returned by Sum().
+func (xxh *XXHZero) Size() int {
+	return 4
+}
+
+// BlockSizeIndex gives the minimum number of bytes accepted by Write().
+func (xxh *XXHZero) BlockSize() int {
+	return 1
+}
+
+// Write adds input bytes to the Hash.
+// It never returns an error.
+func (xxh *XXHZero) Write(input []byte) (int, error) {
+	if xxh.totalLen == 0 {
+		xxh.Reset()
+	}
+	n := len(input)
+	m := xxh.bufused
+
+	xxh.totalLen += uint64(n)
+
+	r := len(xxh.buf) - m
+	if n < r {
+		copy(xxh.buf[m:], input)
+		xxh.bufused += len(input)
+		return n, nil
+	}
+
+	var buf *[16]byte
+	if m != 0 {
+		// some data left from previous update
+		buf = &xxh.buf
+		c := copy(buf[m:], input)
+		n -= c
+		input = input[c:]
+	}
+	update(&xxh.v, buf, input)
+	xxh.bufused = copy(xxh.buf[:], input[n-n%16:])
+
+	return n, nil
+}
+
+// Portable version of update. This updates v by processing all of buf
+// (if not nil) and all full 16-byte blocks of input.
+func updateGo(v *[4]uint32, buf *[16]byte, input []byte) {
+	// Causes compiler to work directly from registers instead of stack:
+	v1, v2, v3, v4 := v[0], v[1], v[2], v[3]
+
+	if buf != nil {
+		v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1
+		v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1
+		v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1
+		v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1
+	}
+
+	for ; len(input) >= 16; input = input[16:] {
+		sub := input[:16] //BCE hint for compiler
+		v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
+		v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
+		v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
+		v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
+	}
+	v[0], v[1], v[2], v[3] = v1, v2, v3, v4
+}
+
+// Sum32 returns the 32 bits Hash value.
+func (xxh *XXHZero) Sum32() uint32 {
+	h32 := uint32(xxh.totalLen)
+	if h32 >= 16 {
+		h32 += rol1(xxh.v[0]) + rol7(xxh.v[1]) + rol12(xxh.v[2]) + rol18(xxh.v[3])
+	} else {
+		h32 += prime5
+	}
+
+	p := 0
+	n := xxh.bufused
+	buf := xxh.buf
+	for n := n - 4; p <= n; p += 4 {
+		h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3
+		h32 = rol17(h32) * prime4
+	}
+	for ; p < n; p++ {
+		h32 += uint32(buf[p]) * prime5
+		h32 = rol11(h32) * prime1
+	}
+
+	h32 ^= h32 >> 15
+	h32 *= prime2
+	h32 ^= h32 >> 13
+	h32 *= prime3
+	h32 ^= h32 >> 16
+
+	return h32
+}
+
+// Portable version of ChecksumZero.
+func checksumZeroGo(input []byte) uint32 {
+	n := len(input)
+	h32 := uint32(n)
+
+	if n < 16 {
+		h32 += prime5
+	} else {
+		v1 := prime1plus2
+		v2 := prime2
+		v3 := uint32(0)
+		v4 := prime1minus
+		p := 0
+		for n := n - 16; p <= n; p += 16 {
+			sub := input[p:][:16] //BCE hint for compiler
+			v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
+			v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
+			v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
+			v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
+		}
+		input = input[p:]
+		n -= p
+		h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+	}
+
+	p := 0
+	for n := n - 4; p <= n; p += 4 {
+		h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3
+		h32 = rol17(h32) * prime4
+	}
+	for p < n {
+		h32 += uint32(input[p]) * prime5
+		h32 = rol11(h32) * prime1
+		p++
+	}
+
+	h32 ^= h32 >> 15
+	h32 *= prime2
+	h32 ^= h32 >> 13
+	h32 *= prime3
+	h32 ^= h32 >> 16
+
+	return h32
+}
+
+func rol1(u uint32) uint32 {
+	return u<<1 | u>>31
+}
+
+func rol7(u uint32) uint32 {
+	return u<<7 | u>>25
+}
+
+func rol11(u uint32) uint32 {
+	return u<<11 | u>>21
+}
+
+func rol12(u uint32) uint32 {
+	return u<<12 | u>>20
+}
+
+func rol13(u uint32) uint32 {
+	return u<<13 | u>>19
+}
+
+func rol17(u uint32) uint32 {
+	return u<<17 | u>>15
+}
+
+func rol18(u uint32) uint32 {
+	return u<<18 | u>>14
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go
new file mode 100644
index 0000000..0978b26
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go
@@ -0,0 +1,11 @@
+// +build !noasm
+
+package xxh32
+
+// ChecksumZero returns the 32-bit hash of input.
+//
+//go:noescape
+func ChecksumZero(input []byte) uint32
+
+//go:noescape
+func update(v *[4]uint32, buf *[16]byte, input []byte)
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s
new file mode 100644
index 0000000..c18ffd5
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s
@@ -0,0 +1,251 @@
+// +build !noasm
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// Register allocation.
+#define p	R0
+#define n	R1
+#define h	R2
+#define v1	R2	// Alias for h.
+#define v2	R3
+#define v3	R4
+#define v4	R5
+#define x1	R6
+#define x2	R7
+#define x3	R8
+#define x4	R9
+
+// We need the primes in registers. The 16-byte loop only uses prime{1,2}.
+#define prime1r	R11
+#define prime2r	R12
+#define prime3r	R3	// The rest can alias v{2-4}.
+#define prime4r	R4
+#define prime5r	R5
+
+// Update round macros. These read from and increment p.
+
+#define round16aligned			\
+	MOVM.IA.W (p), [x1, x2, x3, x4]	\
+					\
+	MULA x1, prime2r, v1, v1	\
+	MULA x2, prime2r, v2, v2	\
+	MULA x3, prime2r, v3, v3	\
+	MULA x4, prime2r, v4, v4	\
+					\
+	MOVW v1 @> 19, v1		\
+	MOVW v2 @> 19, v2		\
+	MOVW v3 @> 19, v3		\
+	MOVW v4 @> 19, v4		\
+					\
+	MUL prime1r, v1			\
+	MUL prime1r, v2			\
+	MUL prime1r, v3			\
+	MUL prime1r, v4			\
+
+#define round16unaligned 		\
+	MOVBU.P  16(p), x1		\
+	MOVBU   -15(p), x2		\
+	ORR     x2 <<  8, x1		\
+	MOVBU   -14(p), x3		\
+	MOVBU   -13(p), x4		\
+	ORR     x4 <<  8, x3		\
+	ORR     x3 << 16, x1		\
+					\
+	MULA x1, prime2r, v1, v1	\
+	MOVW v1 @> 19, v1		\
+	MUL prime1r, v1			\
+					\
+	MOVBU -12(p), x1		\
+	MOVBU -11(p), x2		\
+	ORR   x2 <<  8, x1		\
+	MOVBU -10(p), x3		\
+	MOVBU  -9(p), x4		\
+	ORR   x4 <<  8, x3		\
+	ORR   x3 << 16, x1		\
+					\
+	MULA x1, prime2r, v2, v2	\
+	MOVW v2 @> 19, v2		\
+	MUL prime1r, v2			\
+					\
+	MOVBU -8(p), x1			\
+	MOVBU -7(p), x2			\
+	ORR   x2 <<  8, x1		\
+	MOVBU -6(p), x3			\
+	MOVBU -5(p), x4			\
+	ORR   x4 <<  8, x3		\
+	ORR   x3 << 16, x1		\
+					\
+	MULA x1, prime2r, v3, v3	\
+	MOVW v3 @> 19, v3		\
+	MUL prime1r, v3			\
+					\
+	MOVBU -4(p), x1			\
+	MOVBU -3(p), x2			\
+	ORR   x2 <<  8, x1		\
+	MOVBU -2(p), x3			\
+	MOVBU -1(p), x4			\
+	ORR   x4 <<  8, x3		\
+	ORR   x3 << 16, x1		\
+					\
+	MULA x1, prime2r, v4, v4	\
+	MOVW v4 @> 19, v4		\
+	MUL prime1r, v4			\
+
+
+// func ChecksumZero([]byte) uint32
+TEXT ·ChecksumZero(SB), NOFRAME|NOSPLIT, $-4-16
+	MOVW input_base+0(FP), p
+	MOVW input_len+4(FP),  n
+
+	MOVW $const_prime1, prime1r
+	MOVW $const_prime2, prime2r
+
+	// Set up h for n < 16. It's tempting to say {ADD prime5, n, h}
+	// here, but that's a pseudo-op that generates a load through R11.
+	MOVW $const_prime5, prime5r
+	ADD  prime5r, n, h
+	CMP  $0, n
+	BEQ  end
+
+	// We let n go negative so we can do comparisons with SUB.S
+	// instead of separate CMP.
+	SUB.S $16, n
+	BMI   loop16done
+
+	ADD  prime1r, prime2r, v1
+	MOVW prime2r, v2
+	MOVW $0, v3
+	RSB  $0, prime1r, v4
+
+	TST $3, p
+	BNE loop16unaligned
+
+loop16aligned:
+	SUB.S $16, n
+	round16aligned
+	BPL loop16aligned
+	B   loop16finish
+
+loop16unaligned:
+	SUB.S $16, n
+	round16unaligned
+	BPL loop16unaligned
+
+loop16finish:
+	MOVW v1 @> 31, h
+	ADD  v2 @> 25, h
+	ADD  v3 @> 20, h
+	ADD  v4 @> 14, h
+
+	// h += len(input) with v2 as temporary.
+	MOVW input_len+4(FP), v2
+	ADD  v2, h
+
+loop16done:
+	ADD $16, n	// Restore number of bytes left.
+
+	SUB.S $4, n
+	MOVW  $const_prime3, prime3r
+	BMI   loop4done
+	MOVW  $const_prime4, prime4r
+
+	TST $3, p
+	BNE loop4unaligned
+
+loop4aligned:
+	SUB.S $4, n
+
+	MOVW.P 4(p), x1
+	MULA   prime3r, x1, h, h
+	MOVW   h @> 15, h
+	MUL    prime4r, h
+
+	BPL loop4aligned
+	B   loop4done
+
+loop4unaligned:
+	SUB.S $4, n
+
+	MOVBU.P  4(p), x1
+	MOVBU   -3(p), x2
+	ORR     x2 <<  8, x1
+	MOVBU   -2(p), x3
+	ORR     x3 << 16, x1
+	MOVBU   -1(p), x4
+	ORR     x4 << 24, x1
+
+	MULA prime3r, x1, h, h
+	MOVW h @> 15, h
+	MUL  prime4r, h
+
+	BPL loop4unaligned
+
+loop4done:
+	ADD.S $4, n	// Restore number of bytes left.
+	BEQ   end
+
+	MOVW $const_prime5, prime5r
+
+loop1:
+	SUB.S $1, n
+
+	MOVBU.P 1(p), x1
+	MULA    prime5r, x1, h, h
+	MOVW    h @> 21, h
+	MUL     prime1r, h
+
+	BNE loop1
+
+end:
+	MOVW $const_prime3, prime3r
+	EOR  h >> 15, h
+	MUL  prime2r, h
+	EOR  h >> 13, h
+	MUL  prime3r, h
+	EOR  h >> 16, h
+
+	MOVW h, ret+12(FP)
+	RET
+
+
+// func update(v *[4]uint64, buf *[16]byte, p []byte)
+TEXT ·update(SB), NOFRAME|NOSPLIT, $-4-20
+	MOVW    v+0(FP), p
+	MOVM.IA (p), [v1, v2, v3, v4]
+
+	MOVW $const_prime1, prime1r
+	MOVW $const_prime2, prime2r
+
+	// Process buf, if not nil.
+	MOVW buf+4(FP), p
+	CMP  $0, p
+	BEQ  noBuffered
+
+	round16aligned
+
+noBuffered:
+	MOVW input_base +8(FP), p
+	MOVW input_len +12(FP), n
+
+	SUB.S $16, n
+	BMI   end
+
+	TST $3, p
+	BNE loop16unaligned
+
+loop16aligned:
+	SUB.S $16, n
+	round16aligned
+	BPL loop16aligned
+	B   end
+
+loop16unaligned:
+	SUB.S $16, n
+	round16unaligned
+	BPL loop16unaligned
+
+end:
+	MOVW    v+0(FP), p
+	MOVM.IA [v1, v2, v3, v4], (p)
+	RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go
new file mode 100644
index 0000000..c96b59b
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go
@@ -0,0 +1,10 @@
+// +build !arm noasm
+
+package xxh32
+
+// ChecksumZero returns the 32-bit hash of input.
+func ChecksumZero(input []byte) uint32 { return checksumZeroGo(input) }
+
+func update(v *[4]uint32, buf *[16]byte, input []byte) {
+	updateGo(v, buf, input)
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/lz4.go b/vendor/github.com/pierrec/lz4/v4/lz4.go
new file mode 100644
index 0000000..a62022e
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/lz4.go
@@ -0,0 +1,157 @@
+// Package lz4 implements reading and writing lz4 compressed data.
+//
+// The package supports both the LZ4 stream format,
+// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html,
+// and the LZ4 block format, defined at
+// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html.
+//
+// See https://github.com/lz4/lz4 for the reference C implementation.
+package lz4
+
+import (
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+func _() {
+	// Safety checks for duplicated elements.
+	var x [1]struct{}
+	_ = x[lz4block.CompressionLevel(Fast)-lz4block.Fast]
+	_ = x[Block64Kb-BlockSize(lz4block.Block64Kb)]
+	_ = x[Block256Kb-BlockSize(lz4block.Block256Kb)]
+	_ = x[Block1Mb-BlockSize(lz4block.Block1Mb)]
+	_ = x[Block4Mb-BlockSize(lz4block.Block4Mb)]
+}
+
+// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
+func CompressBlockBound(n int) int {
+	return lz4block.CompressBlockBound(n)
+}
+
+// UncompressBlock uncompresses the source buffer into the destination one,
+// and returns the uncompressed size.
+//
+// The destination buffer must be sized appropriately.
+//
+// An error is returned if the source data is invalid or the destination buffer is too small.
+func UncompressBlock(src, dst []byte) (int, error) {
+	return lz4block.UncompressBlock(src, dst, nil)
+}
+
+// UncompressBlockWithDict uncompresses the source buffer into the destination one using a
+// dictionary, and returns the uncompressed size.
+//
+// The destination buffer must be sized appropriately.
+//
+// An error is returned if the source data is invalid or the destination buffer is too small.
+func UncompressBlockWithDict(src, dst, dict []byte) (int, error) {
+	return lz4block.UncompressBlock(src, dst, dict)
+}
+
+// A Compressor compresses data into the LZ4 block format.
+// It uses a fast compression algorithm.
+//
+// A Compressor is not safe for concurrent use by multiple goroutines.
+//
+// Use a Writer to compress into the LZ4 stream format.
+type Compressor struct{ c lz4block.Compressor }
+
+// CompressBlock compresses the source buffer src into the destination dst.
+//
+// If compression is successful, the first return value is the size of the
+// compressed data, which is always >0.
+//
+// If dst has length at least CompressBlockBound(len(src)), compression always
+// succeeds. Otherwise, the first return value is zero. The error return is
+// non-nil if the compressed data does not fit in dst, but it might fit in a
+// larger buffer that is still smaller than CompressBlockBound(len(src)). The
+// return value (0, nil) means the data is likely incompressible and a buffer
+// of length CompressBlockBound(len(src)) should be passed in.
+func (c *Compressor) CompressBlock(src, dst []byte) (int, error) {
+	return c.c.CompressBlock(src, dst)
+}
+
+// CompressBlock compresses the source buffer into the destination one.
+// This is the fast version of LZ4 compression and also the default one.
+//
+// The argument hashTable is scratch space for a hash table used by the
+// compressor. If provided, it should have length at least 1<<16. If it is
+// shorter (or nil), CompressBlock allocates its own hash table.
+//
+// The size of the compressed data is returned.
+//
+// If the destination buffer size is lower than CompressBlockBound and
+// the compressed size is 0 and no error, then the data is incompressible.
+//
+// An error is returned if the destination buffer is too small.
+
+// CompressBlock is equivalent to Compressor.CompressBlock.
+// The final argument is ignored and should be set to nil.
+//
+// This function is deprecated. Use a Compressor instead.
+func CompressBlock(src, dst []byte, _ []int) (int, error) {
+	return lz4block.CompressBlock(src, dst)
+}
+
+// A CompressorHC compresses data into the LZ4 block format.
+// Its compression ratio is potentially better than that of a Compressor,
+// but it is also slower and requires more memory.
+//
+// A Compressor is not safe for concurrent use by multiple goroutines.
+//
+// Use a Writer to compress into the LZ4 stream format.
+type CompressorHC struct {
+	// Level is the maximum search depth for compression.
+	// Values <= 0 mean no maximum.
+	Level CompressionLevel
+	c     lz4block.CompressorHC
+}
+
+// CompressBlock compresses the source buffer src into the destination dst.
+//
+// If compression is successful, the first return value is the size of the
+// compressed data, which is always >0.
+//
+// If dst has length at least CompressBlockBound(len(src)), compression always
+// succeeds. Otherwise, the first return value is zero. The error return is
+// non-nil if the compressed data does not fit in dst, but it might fit in a
+// larger buffer that is still smaller than CompressBlockBound(len(src)). The
+// return value (0, nil) means the data is likely incompressible and a buffer
+// of length CompressBlockBound(len(src)) should be passed in.
+func (c *CompressorHC) CompressBlock(src, dst []byte) (int, error) {
+	return c.c.CompressBlock(src, dst, lz4block.CompressionLevel(c.Level))
+}
+
+// CompressBlockHC is equivalent to CompressorHC.CompressBlock.
+// The final two arguments are ignored and should be set to nil.
+//
+// This function is deprecated. Use a CompressorHC instead.
+func CompressBlockHC(src, dst []byte, depth CompressionLevel, _, _ []int) (int, error) {
+	return lz4block.CompressBlockHC(src, dst, lz4block.CompressionLevel(depth))
+}
+
+const (
+	// ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
+	// block is corrupted or the destination buffer is not large enough for the uncompressed data.
+	ErrInvalidSourceShortBuffer = lz4errors.ErrInvalidSourceShortBuffer
+	// ErrInvalidFrame is returned when reading an invalid LZ4 archive.
+	ErrInvalidFrame = lz4errors.ErrInvalidFrame
+	// ErrInternalUnhandledState is an internal error.
+	ErrInternalUnhandledState = lz4errors.ErrInternalUnhandledState
+	// ErrInvalidHeaderChecksum is returned when reading a frame.
+	ErrInvalidHeaderChecksum = lz4errors.ErrInvalidHeaderChecksum
+	// ErrInvalidBlockChecksum is returned when reading a frame.
+	ErrInvalidBlockChecksum = lz4errors.ErrInvalidBlockChecksum
+	// ErrInvalidFrameChecksum is returned when reading a frame.
+	ErrInvalidFrameChecksum = lz4errors.ErrInvalidFrameChecksum
+	// ErrOptionInvalidCompressionLevel is returned when the supplied compression level is invalid.
+	ErrOptionInvalidCompressionLevel = lz4errors.ErrOptionInvalidCompressionLevel
+	// ErrOptionClosedOrError is returned when an option is applied to a closed or in error object.
+	ErrOptionClosedOrError = lz4errors.ErrOptionClosedOrError
+	// ErrOptionInvalidBlockSize is returned when
+	ErrOptionInvalidBlockSize = lz4errors.ErrOptionInvalidBlockSize
+	// ErrOptionNotApplicable is returned when trying to apply an option to an object not supporting it.
+	ErrOptionNotApplicable = lz4errors.ErrOptionNotApplicable
+	// ErrWriterNotClosed is returned when attempting to reset an unclosed writer.
+	ErrWriterNotClosed = lz4errors.ErrWriterNotClosed
+)
diff --git a/vendor/github.com/pierrec/lz4/v4/options.go b/vendor/github.com/pierrec/lz4/v4/options.go
new file mode 100644
index 0000000..57a44e7
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/options.go
@@ -0,0 +1,242 @@
+package lz4
+
+import (
+	"fmt"
+	"reflect"
+	"runtime"
+
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer -type=BlockSize,CompressionLevel -output options_gen.go
+
+type (
+	applier interface {
+		Apply(...Option) error
+		private()
+	}
+	// Option defines the parameters to setup an LZ4 Writer or Reader.
+	Option func(applier) error
+)
+
+// String returns a string representation of the option with its parameter(s).
+func (o Option) String() string {
+	return o(nil).Error()
+}
+
+// Default options.
+var (
+	DefaultBlockSizeOption = BlockSizeOption(Block4Mb)
+	DefaultChecksumOption  = ChecksumOption(true)
+	DefaultConcurrency     = ConcurrencyOption(1)
+	defaultOnBlockDone     = OnBlockDoneOption(nil)
+)
+
+const (
+	Block64Kb BlockSize = 1 << (16 + iota*2)
+	Block256Kb
+	Block1Mb
+	Block4Mb
+)
+
+// BlockSizeIndex defines the size of the blocks to be compressed.
+type BlockSize uint32
+
+// BlockSizeOption defines the maximum size of compressed blocks (default=Block4Mb).
+func BlockSizeOption(size BlockSize) Option {
+	return func(a applier) error {
+		switch w := a.(type) {
+		case nil:
+			s := fmt.Sprintf("BlockSizeOption(%s)", size)
+			return lz4errors.Error(s)
+		case *Writer:
+			size := uint32(size)
+			if !lz4block.IsValid(size) {
+				return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size)
+			}
+			w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size))
+			return nil
+		case *CompressingReader:
+			size := uint32(size)
+			if !lz4block.IsValid(size) {
+				return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size)
+			}
+			w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size))
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+// BlockChecksumOption enables or disables block checksum (default=false).
+func BlockChecksumOption(flag bool) Option {
+	return func(a applier) error {
+		switch w := a.(type) {
+		case nil:
+			s := fmt.Sprintf("BlockChecksumOption(%v)", flag)
+			return lz4errors.Error(s)
+		case *Writer:
+			w.frame.Descriptor.Flags.BlockChecksumSet(flag)
+			return nil
+		case *CompressingReader:
+			w.frame.Descriptor.Flags.BlockChecksumSet(flag)
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+// ChecksumOption enables/disables all blocks or content checksum (default=true).
+func ChecksumOption(flag bool) Option {
+	return func(a applier) error {
+		switch w := a.(type) {
+		case nil:
+			s := fmt.Sprintf("ChecksumOption(%v)", flag)
+			return lz4errors.Error(s)
+		case *Writer:
+			w.frame.Descriptor.Flags.ContentChecksumSet(flag)
+			return nil
+		case *CompressingReader:
+			w.frame.Descriptor.Flags.ContentChecksumSet(flag)
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+// SizeOption sets the size of the original uncompressed data (default=0). It is useful to know the size of the
+// whole uncompressed data stream.
+func SizeOption(size uint64) Option {
+	return func(a applier) error {
+		switch w := a.(type) {
+		case nil:
+			s := fmt.Sprintf("SizeOption(%d)", size)
+			return lz4errors.Error(s)
+		case *Writer:
+			w.frame.Descriptor.Flags.SizeSet(size > 0)
+			w.frame.Descriptor.ContentSize = size
+			return nil
+		case *CompressingReader:
+			w.frame.Descriptor.Flags.SizeSet(size > 0)
+			w.frame.Descriptor.ContentSize = size
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+// ConcurrencyOption sets the number of go routines used for compression.
+// If n <= 0, then the output of runtime.GOMAXPROCS(0) is used.
+func ConcurrencyOption(n int) Option {
+	if n <= 0 {
+		n = runtime.GOMAXPROCS(0)
+	}
+	return func(a applier) error {
+		switch rw := a.(type) {
+		case nil:
+			s := fmt.Sprintf("ConcurrencyOption(%d)", n)
+			return lz4errors.Error(s)
+		case *Writer:
+			rw.num = n
+			return nil
+		case *Reader:
+			rw.num = n
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+// CompressionLevel defines the level of compression to use. The higher the better, but slower, compression.
+type CompressionLevel uint32
+
+const (
+	Fast   CompressionLevel = 0
+	Level1 CompressionLevel = 1 << (8 + iota)
+	Level2
+	Level3
+	Level4
+	Level5
+	Level6
+	Level7
+	Level8
+	Level9
+)
+
+// CompressionLevelOption defines the compression level (default=Fast).
+func CompressionLevelOption(level CompressionLevel) Option {
+	return func(a applier) error {
+		switch w := a.(type) {
+		case nil:
+			s := fmt.Sprintf("CompressionLevelOption(%s)", level)
+			return lz4errors.Error(s)
+		case *Writer:
+			switch level {
+			case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9:
+			default:
+				return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level)
+			}
+			w.level = lz4block.CompressionLevel(level)
+			return nil
+		case *CompressingReader:
+			switch level {
+			case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9:
+			default:
+				return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level)
+			}
+			w.level = lz4block.CompressionLevel(level)
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+func onBlockDone(int) {}
+
+// OnBlockDoneOption is triggered when a block has been processed. For a Writer, it is when is has been compressed,
+// for a Reader, it is when it has been uncompressed.
+func OnBlockDoneOption(handler func(size int)) Option {
+	if handler == nil {
+		handler = onBlockDone
+	}
+	return func(a applier) error {
+		switch rw := a.(type) {
+		case nil:
+			s := fmt.Sprintf("OnBlockDoneOption(%s)", reflect.TypeOf(handler).String())
+			return lz4errors.Error(s)
+		case *Writer:
+			rw.handler = handler
+			return nil
+		case *Reader:
+			rw.handler = handler
+			return nil
+		case *CompressingReader:
+			rw.handler = handler
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
+
+// LegacyOption provides support for writing LZ4 frames in the legacy format.
+//
+// See https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame.
+//
+// NB. compressed Linux kernel images use a tweaked LZ4 legacy format where
+// the compressed stream is followed by the original (uncompressed) size of
+// the kernel (https://events.static.linuxfound.org/sites/events/files/lcjpcojp13_klee.pdf).
+// This is also supported as a special case.
+func LegacyOption(legacy bool) Option {
+	return func(a applier) error {
+		switch rw := a.(type) {
+		case nil:
+			s := fmt.Sprintf("LegacyOption(%v)", legacy)
+			return lz4errors.Error(s)
+		case *Writer:
+			rw.legacy = legacy
+			return nil
+		}
+		return lz4errors.ErrOptionNotApplicable
+	}
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/options_gen.go b/vendor/github.com/pierrec/lz4/v4/options_gen.go
new file mode 100644
index 0000000..2de8149
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/options_gen.go
@@ -0,0 +1,92 @@
+// Code generated by "stringer -type=BlockSize,CompressionLevel -output options_gen.go"; DO NOT EDIT.
+
+package lz4
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[Block64Kb-65536]
+	_ = x[Block256Kb-262144]
+	_ = x[Block1Mb-1048576]
+	_ = x[Block4Mb-4194304]
+}
+
+const (
+	_BlockSize_name_0 = "Block64Kb"
+	_BlockSize_name_1 = "Block256Kb"
+	_BlockSize_name_2 = "Block1Mb"
+	_BlockSize_name_3 = "Block4Mb"
+)
+
+func (i BlockSize) String() string {
+	switch {
+	case i == 65536:
+		return _BlockSize_name_0
+	case i == 262144:
+		return _BlockSize_name_1
+	case i == 1048576:
+		return _BlockSize_name_2
+	case i == 4194304:
+		return _BlockSize_name_3
+	default:
+		return "BlockSize(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+}
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[Fast-0]
+	_ = x[Level1-512]
+	_ = x[Level2-1024]
+	_ = x[Level3-2048]
+	_ = x[Level4-4096]
+	_ = x[Level5-8192]
+	_ = x[Level6-16384]
+	_ = x[Level7-32768]
+	_ = x[Level8-65536]
+	_ = x[Level9-131072]
+}
+
+const (
+	_CompressionLevel_name_0 = "Fast"
+	_CompressionLevel_name_1 = "Level1"
+	_CompressionLevel_name_2 = "Level2"
+	_CompressionLevel_name_3 = "Level3"
+	_CompressionLevel_name_4 = "Level4"
+	_CompressionLevel_name_5 = "Level5"
+	_CompressionLevel_name_6 = "Level6"
+	_CompressionLevel_name_7 = "Level7"
+	_CompressionLevel_name_8 = "Level8"
+	_CompressionLevel_name_9 = "Level9"
+)
+
+func (i CompressionLevel) String() string {
+	switch {
+	case i == 0:
+		return _CompressionLevel_name_0
+	case i == 512:
+		return _CompressionLevel_name_1
+	case i == 1024:
+		return _CompressionLevel_name_2
+	case i == 2048:
+		return _CompressionLevel_name_3
+	case i == 4096:
+		return _CompressionLevel_name_4
+	case i == 8192:
+		return _CompressionLevel_name_5
+	case i == 16384:
+		return _CompressionLevel_name_6
+	case i == 32768:
+		return _CompressionLevel_name_7
+	case i == 65536:
+		return _CompressionLevel_name_8
+	case i == 131072:
+		return _CompressionLevel_name_9
+	default:
+		return "CompressionLevel(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/reader.go b/vendor/github.com/pierrec/lz4/v4/reader.go
new file mode 100644
index 0000000..275daad
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/reader.go
@@ -0,0 +1,275 @@
+package lz4
+
+import (
+	"bytes"
+	"io"
+
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+	"github.com/pierrec/lz4/v4/internal/lz4stream"
+)
+
+var readerStates = []aState{
+	noState:     newState,
+	errorState:  newState,
+	newState:    readState,
+	readState:   closedState,
+	closedState: newState,
+}
+
+// NewReader returns a new LZ4 frame decoder.
+func NewReader(r io.Reader) *Reader {
+	return newReader(r, false)
+}
+
+func newReader(r io.Reader, legacy bool) *Reader {
+	zr := &Reader{frame: lz4stream.NewFrame()}
+	zr.state.init(readerStates)
+	_ = zr.Apply(DefaultConcurrency, defaultOnBlockDone)
+	zr.Reset(r)
+	return zr
+}
+
+// Reader allows reading an LZ4 stream.
+type Reader struct {
+	state   _State
+	src     io.Reader        // source reader
+	num     int              // concurrency level
+	frame   *lz4stream.Frame // frame being read
+	data    []byte           // block buffer allocated in non concurrent mode
+	reads   chan []byte      // pending data
+	idx     int              // size of pending data
+	handler func(int)
+	cum     uint32
+	dict    []byte
+}
+
+func (*Reader) private() {}
+
+func (r *Reader) Apply(options ...Option) (err error) {
+	defer r.state.check(&err)
+	switch r.state.state {
+	case newState:
+	case errorState:
+		return r.state.err
+	default:
+		return lz4errors.ErrOptionClosedOrError
+	}
+	for _, o := range options {
+		if err = o(r); err != nil {
+			return
+		}
+	}
+	return
+}
+
+// Size returns the size of the underlying uncompressed data, if set in the stream.
+func (r *Reader) Size() int {
+	switch r.state.state {
+	case readState, closedState:
+		if r.frame.Descriptor.Flags.Size() {
+			return int(r.frame.Descriptor.ContentSize)
+		}
+	}
+	return 0
+}
+
+func (r *Reader) isNotConcurrent() bool {
+	return r.num == 1
+}
+
+func (r *Reader) init() error {
+	err := r.frame.ParseHeaders(r.src)
+	if err != nil {
+		return err
+	}
+	if !r.frame.Descriptor.Flags.BlockIndependence() {
+		// We can't decompress dependent blocks concurrently.
+		// Instead of throwing an error to the user, silently drop concurrency
+		r.num = 1
+	}
+	data, err := r.frame.InitR(r.src, r.num)
+	if err != nil {
+		return err
+	}
+	r.reads = data
+	r.idx = 0
+	size := r.frame.Descriptor.Flags.BlockSizeIndex()
+	r.data = size.Get()
+	r.cum = 0
+	return nil
+}
+
+func (r *Reader) Read(buf []byte) (n int, err error) {
+	defer r.state.check(&err)
+	switch r.state.state {
+	case readState:
+	case closedState, errorState:
+		return 0, r.state.err
+	case newState:
+		// First initialization.
+		if err = r.init(); r.state.next(err) {
+			return
+		}
+	default:
+		return 0, r.state.fail()
+	}
+	for len(buf) > 0 {
+		var bn int
+		if r.idx == 0 {
+			if r.isNotConcurrent() {
+				bn, err = r.read(buf)
+			} else {
+				lz4block.Put(r.data)
+				r.data = <-r.reads
+				if len(r.data) == 0 {
+					// No uncompressed data: something went wrong or we are done.
+					err = r.frame.Blocks.ErrorR()
+				}
+			}
+			switch err {
+			case nil:
+			case io.EOF:
+				if er := r.frame.CloseR(r.src); er != nil {
+					err = er
+				}
+				lz4block.Put(r.data)
+				r.data = nil
+				return
+			default:
+				return
+			}
+		}
+		if bn == 0 {
+			// Fill buf with buffered data.
+			bn = copy(buf, r.data[r.idx:])
+			r.idx += bn
+			if r.idx == len(r.data) {
+				// All data read, get ready for the next Read.
+				r.idx = 0
+			}
+		}
+		buf = buf[bn:]
+		n += bn
+		r.handler(bn)
+	}
+	return
+}
+
+// read uncompresses the next block as follow:
+// - if buf has enough room, the block is uncompressed into it directly
+//   and the lenght of used space is returned
+// - else, the uncompress data is stored in r.data and 0 is returned
+func (r *Reader) read(buf []byte) (int, error) {
+	block := r.frame.Blocks.Block
+	_, err := block.Read(r.frame, r.src, r.cum)
+	if err != nil {
+		return 0, err
+	}
+	var direct bool
+	dst := r.data[:cap(r.data)]
+	if len(buf) >= len(dst) {
+		// Uncompress directly into buf.
+		direct = true
+		dst = buf
+	}
+	dst, err = block.Uncompress(r.frame, dst, r.dict, true)
+	if err != nil {
+		return 0, err
+	}
+	if !r.frame.Descriptor.Flags.BlockIndependence() {
+		if len(r.dict)+len(dst) > 128*1024 {
+			preserveSize := 64*1024 - len(dst)
+			if preserveSize < 0 {
+				preserveSize = 0
+			}
+			r.dict = r.dict[len(r.dict)-preserveSize:]
+		}
+		r.dict = append(r.dict, dst...)
+	}
+	r.cum += uint32(len(dst))
+	if direct {
+		return len(dst), nil
+	}
+	r.data = dst
+	return 0, nil
+}
+
+// Reset clears the state of the Reader r such that it is equivalent to its
+// initial state from NewReader, but instead reading from reader.
+// No access to reader is performed.
+func (r *Reader) Reset(reader io.Reader) {
+	if r.data != nil {
+		lz4block.Put(r.data)
+		r.data = nil
+	}
+	r.frame.Reset(r.num)
+	r.state.reset()
+	r.src = reader
+	r.reads = nil
+}
+
+// WriteTo efficiently uncompresses the data from the Reader underlying source to w.
+func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
+	switch r.state.state {
+	case closedState, errorState:
+		return 0, r.state.err
+	case newState:
+		if err = r.init(); r.state.next(err) {
+			return
+		}
+	default:
+		return 0, r.state.fail()
+	}
+	defer r.state.nextd(&err)
+
+	var data []byte
+	if r.isNotConcurrent() {
+		size := r.frame.Descriptor.Flags.BlockSizeIndex()
+		data = size.Get()
+		defer lz4block.Put(data)
+	}
+	for {
+		var bn int
+		var dst []byte
+		if r.isNotConcurrent() {
+			bn, err = r.read(data)
+			dst = data[:bn]
+		} else {
+			lz4block.Put(dst)
+			dst = <-r.reads
+			bn = len(dst)
+			if bn == 0 {
+				// No uncompressed data: something went wrong or we are done.
+				err = r.frame.Blocks.ErrorR()
+			}
+		}
+		switch err {
+		case nil:
+		case io.EOF:
+			err = r.frame.CloseR(r.src)
+			return
+		default:
+			return
+		}
+		r.handler(bn)
+		bn, err = w.Write(dst)
+		n += int64(bn)
+		if err != nil {
+			return
+		}
+	}
+}
+
+// ValidFrameHeader returns a bool indicating if the given bytes slice matches a LZ4 header.
+func ValidFrameHeader(in []byte) (bool, error) {
+	f := lz4stream.NewFrame()
+	err := f.ParseHeaders(bytes.NewReader(in))
+	if err == nil {
+		return true, nil
+	}
+	if err == lz4errors.ErrInvalidFrame {
+		return false, nil
+	}
+	return false, err
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/state.go b/vendor/github.com/pierrec/lz4/v4/state.go
new file mode 100644
index 0000000..d94f04d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/state.go
@@ -0,0 +1,75 @@
+package lz4
+
+import (
+	"errors"
+	"fmt"
+	"io"
+
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer -type=aState -output state_gen.go
+
+const (
+	noState     aState = iota // uninitialized reader
+	errorState                // unrecoverable error encountered
+	newState                  // instantiated object
+	readState                 // reading data
+	writeState                // writing data
+	closedState               // all done
+)
+
+type (
+	aState uint8
+	_State struct {
+		states []aState
+		state  aState
+		err    error
+	}
+)
+
+func (s *_State) init(states []aState) {
+	s.states = states
+	s.state = states[0]
+}
+
+func (s *_State) reset() {
+	s.state = s.states[0]
+	s.err = nil
+}
+
+// next sets the state to the next one unless it is passed a non nil error.
+// It returns whether or not it is in error.
+func (s *_State) next(err error) bool {
+	if err != nil {
+		s.err = fmt.Errorf("%s: %w", s.state, err)
+		s.state = errorState
+		return true
+	}
+	s.state = s.states[s.state]
+	return false
+}
+
+// nextd is like next but for defers.
+func (s *_State) nextd(errp *error) bool {
+	return errp != nil && s.next(*errp)
+}
+
+// check sets s in error if not already in error and if the error is not nil or io.EOF,
+func (s *_State) check(errp *error) {
+	if s.state == errorState || errp == nil {
+		return
+	}
+	if err := *errp; err != nil {
+		s.err = fmt.Errorf("%w[%s]", err, s.state)
+		if !errors.Is(err, io.EOF) {
+			s.state = errorState
+		}
+	}
+}
+
+func (s *_State) fail() error {
+	s.state = errorState
+	s.err = fmt.Errorf("%w[%s]", lz4errors.ErrInternalUnhandledState, s.state)
+	return s.err
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/state_gen.go b/vendor/github.com/pierrec/lz4/v4/state_gen.go
new file mode 100644
index 0000000..75fb828
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/state_gen.go
@@ -0,0 +1,28 @@
+// Code generated by "stringer -type=aState -output state_gen.go"; DO NOT EDIT.
+
+package lz4
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[noState-0]
+	_ = x[errorState-1]
+	_ = x[newState-2]
+	_ = x[readState-3]
+	_ = x[writeState-4]
+	_ = x[closedState-5]
+}
+
+const _aState_name = "noStateerrorStatenewStatereadStatewriteStateclosedState"
+
+var _aState_index = [...]uint8{0, 7, 17, 25, 34, 44, 55}
+
+func (i aState) String() string {
+	if i >= aState(len(_aState_index)-1) {
+		return "aState(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+	return _aState_name[_aState_index[i]:_aState_index[i+1]]
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/writer.go b/vendor/github.com/pierrec/lz4/v4/writer.go
new file mode 100644
index 0000000..4358ade
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/writer.go
@@ -0,0 +1,242 @@
+package lz4
+
+import (
+	"io"
+
+	"github.com/pierrec/lz4/v4/internal/lz4block"
+	"github.com/pierrec/lz4/v4/internal/lz4errors"
+	"github.com/pierrec/lz4/v4/internal/lz4stream"
+)
+
+var writerStates = []aState{
+	noState:     newState,
+	newState:    writeState,
+	writeState:  closedState,
+	closedState: newState,
+	errorState:  newState,
+}
+
+// NewWriter returns a new LZ4 frame encoder.
+func NewWriter(w io.Writer) *Writer {
+	zw := &Writer{frame: lz4stream.NewFrame()}
+	zw.state.init(writerStates)
+	_ = zw.Apply(DefaultBlockSizeOption, DefaultChecksumOption, DefaultConcurrency, defaultOnBlockDone)
+	zw.Reset(w)
+	return zw
+}
+
+// Writer allows writing an LZ4 stream.
+type Writer struct {
+	state   _State
+	src     io.Writer                 // destination writer
+	level   lz4block.CompressionLevel // how hard to try
+	num     int                       // concurrency level
+	frame   *lz4stream.Frame          // frame being built
+	data    []byte                    // pending data
+	idx     int                       // size of pending data
+	handler func(int)
+	legacy  bool
+}
+
+func (*Writer) private() {}
+
+func (w *Writer) Apply(options ...Option) (err error) {
+	defer w.state.check(&err)
+	switch w.state.state {
+	case newState:
+	case errorState:
+		return w.state.err
+	default:
+		return lz4errors.ErrOptionClosedOrError
+	}
+	w.Reset(w.src)
+	for _, o := range options {
+		if err = o(w); err != nil {
+			return
+		}
+	}
+	return
+}
+
+func (w *Writer) isNotConcurrent() bool {
+	return w.num == 1
+}
+
+// init sets up the Writer when in newState. It does not change the Writer state.
+func (w *Writer) init() error {
+	w.frame.InitW(w.src, w.num, w.legacy)
+	size := w.frame.Descriptor.Flags.BlockSizeIndex()
+	w.data = size.Get()
+	w.idx = 0
+	return w.frame.Descriptor.Write(w.frame, w.src)
+}
+
+func (w *Writer) Write(buf []byte) (n int, err error) {
+	defer w.state.check(&err)
+	switch w.state.state {
+	case writeState:
+	case closedState, errorState:
+		return 0, w.state.err
+	case newState:
+		if err = w.init(); w.state.next(err) {
+			return
+		}
+	default:
+		return 0, w.state.fail()
+	}
+
+	zn := len(w.data)
+	for len(buf) > 0 {
+		if w.isNotConcurrent() && w.idx == 0 && len(buf) >= zn {
+			// Avoid a copy as there is enough data for a block.
+			if err = w.write(buf[:zn], false); err != nil {
+				return
+			}
+			n += zn
+			buf = buf[zn:]
+			continue
+		}
+		// Accumulate the data to be compressed.
+		m := copy(w.data[w.idx:], buf)
+		n += m
+		w.idx += m
+		buf = buf[m:]
+
+		if w.idx < len(w.data) {
+			// Buffer not filled.
+			return
+		}
+
+		// Buffer full.
+		if err = w.write(w.data, true); err != nil {
+			return
+		}
+		if !w.isNotConcurrent() {
+			size := w.frame.Descriptor.Flags.BlockSizeIndex()
+			w.data = size.Get()
+		}
+		w.idx = 0
+	}
+	return
+}
+
+func (w *Writer) write(data []byte, safe bool) error {
+	if w.isNotConcurrent() {
+		block := w.frame.Blocks.Block
+		err := block.Compress(w.frame, data, w.level).Write(w.frame, w.src)
+		w.handler(len(block.Data))
+		return err
+	}
+	c := make(chan *lz4stream.FrameDataBlock)
+	w.frame.Blocks.Blocks <- c
+	go func(c chan *lz4stream.FrameDataBlock, data []byte, safe bool) {
+		b := lz4stream.NewFrameDataBlock(w.frame)
+		c <- b.Compress(w.frame, data, w.level)
+		<-c
+		w.handler(len(b.Data))
+		b.Close(w.frame)
+		if safe {
+			// safe to put it back as the last usage of it was FrameDataBlock.Write() called before c is closed
+			lz4block.Put(data)
+		}
+	}(c, data, safe)
+
+	return nil
+}
+
+// Flush any buffered data to the underlying writer immediately.
+func (w *Writer) Flush() (err error) {
+	switch w.state.state {
+	case writeState:
+	case errorState:
+		return w.state.err
+	case newState:
+		if err = w.init(); w.state.next(err) {
+			return
+		}
+	default:
+		return nil
+	}
+
+	if w.idx > 0 {
+		// Flush pending data, disable w.data freeing as it is done later on.
+		if err = w.write(w.data[:w.idx], false); err != nil {
+			return err
+		}
+		w.idx = 0
+	}
+	return nil
+}
+
+// Close closes the Writer, flushing any unwritten data to the underlying writer
+// without closing it.
+func (w *Writer) Close() error {
+	if err := w.Flush(); err != nil {
+		return err
+	}
+	err := w.frame.CloseW(w.src, w.num)
+	// It is now safe to free the buffer.
+	if w.data != nil {
+		lz4block.Put(w.data)
+		w.data = nil
+	}
+	return err
+}
+
+// Reset clears the state of the Writer w such that it is equivalent to its
+// initial state from NewWriter, but instead writing to writer.
+// Reset keeps the previous options unless overwritten by the supplied ones.
+// No access to writer is performed.
+//
+// w.Close must be called before Reset or pending data may be dropped.
+func (w *Writer) Reset(writer io.Writer) {
+	w.frame.Reset(w.num)
+	w.state.reset()
+	w.src = writer
+}
+
+// ReadFrom efficiently reads from r and compressed into the Writer destination.
+func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) {
+	switch w.state.state {
+	case closedState, errorState:
+		return 0, w.state.err
+	case newState:
+		if err = w.init(); w.state.next(err) {
+			return
+		}
+	default:
+		return 0, w.state.fail()
+	}
+	defer w.state.check(&err)
+
+	size := w.frame.Descriptor.Flags.BlockSizeIndex()
+	var done bool
+	var rn int
+	data := size.Get()
+	if w.isNotConcurrent() {
+		// Keep the same buffer for the whole process.
+		defer lz4block.Put(data)
+	}
+	for !done {
+		rn, err = io.ReadFull(r, data)
+		switch err {
+		case nil:
+		case io.EOF, io.ErrUnexpectedEOF: // read may be partial
+			done = true
+		default:
+			return
+		}
+		n += int64(rn)
+		err = w.write(data[:rn], true)
+		if err != nil {
+			return
+		}
+		w.handler(rn)
+		if !done && !w.isNotConcurrent() {
+			// The buffer will be returned automatically by go routines (safe=true)
+			// so get a new one fo the next round.
+			data = size.Get()
+		}
+	}
+	return
+}
diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go
deleted file mode 100644
index 6a60a9a..0000000
--- a/vendor/github.com/pierrec/lz4/writer.go
+++ /dev/null
@@ -1,413 +0,0 @@
-package lz4
-
-import (
-	"encoding/binary"
-	"fmt"
-	"io"
-	"runtime"
-
-	"github.com/pierrec/lz4/internal/xxh32"
-)
-
-// zResult contains the results of compressing a block.
-type zResult struct {
-	size     uint32 // Block header
-	data     []byte // Compressed data
-	checksum uint32 // Data checksum
-}
-
-// Writer implements the LZ4 frame encoder.
-type Writer struct {
-	Header
-	// Handler called when a block has been successfully written out.
-	// It provides the number of bytes written.
-	OnBlockDone func(size int)
-
-	buf       [19]byte      // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes
-	dst       io.Writer     // Destination.
-	checksum  xxh32.XXHZero // Frame checksum.
-	data      []byte        // Data to be compressed + buffer for compressed data.
-	idx       int           // Index into data.
-	hashtable [winSize]int  // Hash table used in CompressBlock().
-
-	// For concurrency.
-	c   chan chan zResult // Channel for block compression goroutines and writer goroutine.
-	err error             // Any error encountered while writing to the underlying destination.
-}
-
-// NewWriter returns a new LZ4 frame encoder.
-// No access to the underlying io.Writer is performed.
-// The supplied Header is checked at the first Write.
-// It is ok to change it before the first Write but then not until a Reset() is performed.
-func NewWriter(dst io.Writer) *Writer {
-	z := new(Writer)
-	z.Reset(dst)
-	return z
-}
-
-// WithConcurrency sets the number of concurrent go routines used for compression.
-// A negative value sets the concurrency to GOMAXPROCS.
-func (z *Writer) WithConcurrency(n int) *Writer {
-	switch {
-	case n == 0 || n == 1:
-		z.c = nil
-		return z
-	case n < 0:
-		n = runtime.GOMAXPROCS(0)
-	}
-	z.c = make(chan chan zResult, n)
-	// Writer goroutine managing concurrent block compression goroutines.
-	go func() {
-		// Process next block compression item.
-		for c := range z.c {
-			// Read the next compressed block result.
-			// Waiting here ensures that the blocks are output in the order they were sent.
-			// The incoming channel is always closed as it indicates to the caller that
-			// the block has been processed.
-			res := <-c
-			n := len(res.data)
-			if n == 0 {
-				// Notify the block compression routine that we are done with its result.
-				// This is used when a sentinel block is sent to terminate the compression.
-				close(c)
-				return
-			}
-			// Write the block.
-			if err := z.writeUint32(res.size); err != nil && z.err == nil {
-				z.err = err
-			}
-			if _, err := z.dst.Write(res.data); err != nil && z.err == nil {
-				z.err = err
-			}
-			if z.BlockChecksum {
-				if err := z.writeUint32(res.checksum); err != nil && z.err == nil {
-					z.err = err
-				}
-			}
-			if isCompressed := res.size&compressedBlockFlag == 0; isCompressed {
-				// It is now safe to release the buffer as no longer in use by any goroutine.
-				putBuffer(cap(res.data), res.data)
-			}
-			if h := z.OnBlockDone; h != nil {
-				h(n)
-			}
-			close(c)
-		}
-	}()
-	return z
-}
-
-// newBuffers instantiates new buffers which size matches the one in Header.
-// The returned buffers are for decompression and compression respectively.
-func (z *Writer) newBuffers() {
-	bSize := z.Header.BlockMaxSize
-	buf := getBuffer(bSize)
-	z.data = buf[:bSize] // Uncompressed buffer is the first half.
-}
-
-// freeBuffers puts the writer's buffers back to the pool.
-func (z *Writer) freeBuffers() {
-	// Put the buffer back into the pool, if any.
-	putBuffer(z.Header.BlockMaxSize, z.data)
-	z.data = nil
-}
-
-// writeHeader builds and writes the header (magic+header) to the underlying io.Writer.
-func (z *Writer) writeHeader() error {
-	// Default to 4Mb if BlockMaxSize is not set.
-	if z.Header.BlockMaxSize == 0 {
-		z.Header.BlockMaxSize = blockSize4M
-	}
-	// The only option that needs to be validated.
-	bSize := z.Header.BlockMaxSize
-	if !isValidBlockSize(z.Header.BlockMaxSize) {
-		return fmt.Errorf("lz4: invalid block max size: %d", bSize)
-	}
-	// Allocate the compressed/uncompressed buffers.
-	// The compressed buffer cannot exceed the uncompressed one.
-	z.newBuffers()
-	z.idx = 0
-
-	// Size is optional.
-	buf := z.buf[:]
-
-	// Set the fixed size data: magic number, block max size and flags.
-	binary.LittleEndian.PutUint32(buf[0:], frameMagic)
-	flg := byte(Version << 6)
-	flg |= 1 << 5 // No block dependency.
-	if z.Header.BlockChecksum {
-		flg |= 1 << 4
-	}
-	if z.Header.Size > 0 {
-		flg |= 1 << 3
-	}
-	if !z.Header.NoChecksum {
-		flg |= 1 << 2
-	}
-	buf[4] = flg
-	buf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4
-
-	// Current buffer size: magic(4) + flags(1) + block max size (1).
-	n := 6
-	// Optional items.
-	if z.Header.Size > 0 {
-		binary.LittleEndian.PutUint64(buf[n:], z.Header.Size)
-		n += 8
-	}
-
-	// The header checksum includes the flags, block max size and optional Size.
-	buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF)
-	z.checksum.Reset()
-
-	// Header ready, write it out.
-	if _, err := z.dst.Write(buf[0 : n+1]); err != nil {
-		return err
-	}
-	z.Header.done = true
-	if debugFlag {
-		debug("wrote header %v", z.Header)
-	}
-
-	return nil
-}
-
-// Write compresses data from the supplied buffer into the underlying io.Writer.
-// Write does not return until the data has been written.
-func (z *Writer) Write(buf []byte) (int, error) {
-	if !z.Header.done {
-		if err := z.writeHeader(); err != nil {
-			return 0, err
-		}
-	}
-	if debugFlag {
-		debug("input buffer len=%d index=%d", len(buf), z.idx)
-	}
-
-	zn := len(z.data)
-	var n int
-	for len(buf) > 0 {
-		if z.idx == 0 && len(buf) >= zn {
-			// Avoid a copy as there is enough data for a block.
-			if err := z.compressBlock(buf[:zn]); err != nil {
-				return n, err
-			}
-			n += zn
-			buf = buf[zn:]
-			continue
-		}
-		// Accumulate the data to be compressed.
-		m := copy(z.data[z.idx:], buf)
-		n += m
-		z.idx += m
-		buf = buf[m:]
-		if debugFlag {
-			debug("%d bytes copied to buf, current index %d", n, z.idx)
-		}
-
-		if z.idx < len(z.data) {
-			// Buffer not filled.
-			if debugFlag {
-				debug("need more data for compression")
-			}
-			return n, nil
-		}
-
-		// Buffer full.
-		if err := z.compressBlock(z.data); err != nil {
-			return n, err
-		}
-		z.idx = 0
-	}
-
-	return n, nil
-}
-
-// compressBlock compresses a block.
-func (z *Writer) compressBlock(data []byte) error {
-	if !z.NoChecksum {
-		_, _ = z.checksum.Write(data)
-	}
-
-	if z.c != nil {
-		c := make(chan zResult)
-		z.c <- c // Send now to guarantee order
-		go writerCompressBlock(c, z.Header, data)
-		return nil
-	}
-
-	zdata := z.data[z.Header.BlockMaxSize:cap(z.data)]
-	// The compressed block size cannot exceed the input's.
-	var zn int
-
-	if level := z.Header.CompressionLevel; level != 0 {
-		zn, _ = CompressBlockHC(data, zdata, level)
-	} else {
-		zn, _ = CompressBlock(data, zdata, z.hashtable[:])
-	}
-
-	var bLen uint32
-	if debugFlag {
-		debug("block compression %d => %d", len(data), zn)
-	}
-	if zn > 0 && zn < len(data) {
-		// Compressible and compressed size smaller than uncompressed: ok!
-		bLen = uint32(zn)
-		zdata = zdata[:zn]
-	} else {
-		// Uncompressed block.
-		bLen = uint32(len(data)) | compressedBlockFlag
-		zdata = data
-	}
-	if debugFlag {
-		debug("block compression to be written len=%d data len=%d", bLen, len(zdata))
-	}
-
-	// Write the block.
-	if err := z.writeUint32(bLen); err != nil {
-		return err
-	}
-	written, err := z.dst.Write(zdata)
-	if err != nil {
-		return err
-	}
-	if h := z.OnBlockDone; h != nil {
-		h(written)
-	}
-
-	if !z.BlockChecksum {
-		if debugFlag {
-			debug("current frame checksum %x", z.checksum.Sum32())
-		}
-		return nil
-	}
-	checksum := xxh32.ChecksumZero(zdata)
-	if debugFlag {
-		debug("block checksum %x", checksum)
-		defer func() { debug("current frame checksum %x", z.checksum.Sum32()) }()
-	}
-	return z.writeUint32(checksum)
-}
-
-// Flush flushes any pending compressed data to the underlying writer.
-// Flush does not return until the data has been written.
-// If the underlying writer returns an error, Flush returns that error.
-func (z *Writer) Flush() error {
-	if debugFlag {
-		debug("flush with index %d", z.idx)
-	}
-	if z.idx == 0 {
-		return nil
-	}
-
-	data := z.data[:z.idx]
-	z.idx = 0
-	if z.c == nil {
-		return z.compressBlock(data)
-	}
-	if !z.NoChecksum {
-		_, _ = z.checksum.Write(data)
-	}
-	c := make(chan zResult)
-	z.c <- c
-	writerCompressBlock(c, z.Header, data)
-	return nil
-}
-
-func (z *Writer) close() error {
-	if z.c == nil {
-		return nil
-	}
-	// Send a sentinel block (no data to compress) to terminate the writer main goroutine.
-	c := make(chan zResult)
-	z.c <- c
-	c <- zResult{}
-	// Wait for the main goroutine to complete.
-	<-c
-	// At this point the main goroutine has shut down or is about to return.
-	z.c = nil
-	return z.err
-}
-
-// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer.
-func (z *Writer) Close() error {
-	if !z.Header.done {
-		if err := z.writeHeader(); err != nil {
-			return err
-		}
-	}
-	if err := z.Flush(); err != nil {
-		return err
-	}
-	if err := z.close(); err != nil {
-		return err
-	}
-	z.freeBuffers()
-
-	if debugFlag {
-		debug("writing last empty block")
-	}
-	if err := z.writeUint32(0); err != nil {
-		return err
-	}
-	if z.NoChecksum {
-		return nil
-	}
-	checksum := z.checksum.Sum32()
-	if debugFlag {
-		debug("stream checksum %x", checksum)
-	}
-	return z.writeUint32(checksum)
-}
-
-// Reset clears the state of the Writer z such that it is equivalent to its
-// initial state from NewWriter, but instead writing to w.
-// No access to the underlying io.Writer is performed.
-func (z *Writer) Reset(w io.Writer) {
-	n := cap(z.c)
-	_ = z.close()
-	z.freeBuffers()
-	z.Header.Reset()
-	z.dst = w
-	z.checksum.Reset()
-	z.idx = 0
-	z.err = nil
-	// reset hashtable to ensure deterministic output.
-	for i := range z.hashtable {
-		z.hashtable[i] = 0
-	}
-	z.WithConcurrency(n)
-}
-
-// writeUint32 writes a uint32 to the underlying writer.
-func (z *Writer) writeUint32(x uint32) error {
-	buf := z.buf[:4]
-	binary.LittleEndian.PutUint32(buf, x)
-	_, err := z.dst.Write(buf)
-	return err
-}
-
-// writerCompressBlock compresses data into a pooled buffer and writes its result
-// out to the input channel.
-func writerCompressBlock(c chan zResult, header Header, data []byte) {
-	zdata := getBuffer(header.BlockMaxSize)
-	// The compressed block size cannot exceed the input's.
-	var zn int
-	if level := header.CompressionLevel; level != 0 {
-		zn, _ = CompressBlockHC(data, zdata, level)
-	} else {
-		var hashTable [winSize]int
-		zn, _ = CompressBlock(data, zdata, hashTable[:])
-	}
-	var res zResult
-	if zn > 0 && zn < len(data) {
-		res.size = uint32(zn)
-		res.data = zdata[:zn]
-	} else {
-		res.size = uint32(len(data)) | compressedBlockFlag
-		res.data = data
-	}
-	if header.BlockChecksum {
-		res.checksum = xxh32.ChecksumZero(res.data)
-	}
-	c <- res
-}
diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE
similarity index 100%
copy from vendor/go.opentelemetry.io/otel/LICENSE
copy to vendor/github.com/prometheus/client_golang/LICENSE
diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE
new file mode 100644
index 0000000..b9cc55a
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/NOTICE
@@ -0,0 +1,18 @@
+Prometheus instrumentation library for Go applications
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
+
+
+The following components are included in this product:
+
+perks - a fork of https://github.com/bmizerany/perks
+https://github.com/beorn7/perks
+Copyright 2013-2015 Blake Mizerany, Björn Rabenstein
+See https://github.com/beorn7/perks/blob/master/README.md for license details.
+
+Go support for Protocol Buffers - Google's data interchange format
+http://github.com/golang/protobuf/
+Copyright 2010 The Go Authors
+See source code for license details.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
new file mode 100644
index 0000000..3460f03
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore
@@ -0,0 +1 @@
+command-line-arguments.test
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
new file mode 100644
index 0000000..c67ff1b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md
@@ -0,0 +1 @@
+See [![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/client_golang/prometheus.svg)](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus).
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
new file mode 100644
index 0000000..450189f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
@@ -0,0 +1,38 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "runtime/debug"
+
+// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewBuildInfoCollector instead.
+func NewBuildInfoCollector() Collector {
+	path, version, sum := "unknown", "unknown", "unknown"
+	if bi, ok := debug.ReadBuildInfo(); ok {
+		path = bi.Main.Path
+		version = bi.Main.Version
+		sum = bi.Main.Sum
+	}
+	c := &selfCollector{MustNewConstMetric(
+		NewDesc(
+			"go_build_info",
+			"Build information about the main Go module.",
+			nil, Labels{"path": path, "version": version, "checksum": sum},
+		),
+		GaugeValue, 1)}
+	c.init(c.self)
+	return c
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
new file mode 100644
index 0000000..cf05079
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -0,0 +1,128 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Collector is the interface implemented by anything that can be used by
+// Prometheus to collect metrics. A Collector has to be registered for
+// collection. See Registerer.Register.
+//
+// The stock metrics provided by this package (Gauge, Counter, Summary,
+// Histogram, Untyped) are also Collectors (which only ever collect one metric,
+// namely itself). An implementer of Collector may, however, collect multiple
+// metrics in a coordinated fashion and/or create metrics on the fly. Examples
+// for collectors already implemented in this library are the metric vectors
+// (i.e. collection of multiple instances of the same Metric but with different
+// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.
+type Collector interface {
+	// Describe sends the super-set of all possible descriptors of metrics
+	// collected by this Collector to the provided channel and returns once
+	// the last descriptor has been sent. The sent descriptors fulfill the
+	// consistency and uniqueness requirements described in the Desc
+	// documentation.
+	//
+	// It is valid if one and the same Collector sends duplicate
+	// descriptors. Those duplicates are simply ignored. However, two
+	// different Collectors must not send duplicate descriptors.
+	//
+	// Sending no descriptor at all marks the Collector as “unchecked”,
+	// i.e. no checks will be performed at registration time, and the
+	// Collector may yield any Metric it sees fit in its Collect method.
+	//
+	// This method idempotently sends the same descriptors throughout the
+	// lifetime of the Collector. It may be called concurrently and
+	// therefore must be implemented in a concurrency safe way.
+	//
+	// If a Collector encounters an error while executing this method, it
+	// must send an invalid descriptor (created with NewInvalidDesc) to
+	// signal the error to the registry.
+	Describe(chan<- *Desc)
+	// Collect is called by the Prometheus registry when collecting
+	// metrics. The implementation sends each collected metric via the
+	// provided channel and returns once the last metric has been sent. The
+	// descriptor of each sent metric is one of those returned by Describe
+	// (unless the Collector is unchecked, see above). Returned metrics that
+	// share the same descriptor must differ in their variable label
+	// values.
+	//
+	// This method may be called concurrently and must therefore be
+	// implemented in a concurrency safe way. Blocking occurs at the expense
+	// of total performance of rendering all registered metrics. Ideally,
+	// Collector implementations support concurrent readers.
+	Collect(chan<- Metric)
+}
+
+// DescribeByCollect is a helper to implement the Describe method of a custom
+// Collector. It collects the metrics from the provided Collector and sends
+// their descriptors to the provided channel.
+//
+// If a Collector collects the same metrics throughout its lifetime, its
+// Describe method can simply be implemented as:
+//
+//	func (c customCollector) Describe(ch chan<- *Desc) {
+//		DescribeByCollect(c, ch)
+//	}
+//
+// However, this will not work if the metrics collected change dynamically over
+// the lifetime of the Collector in a way that their combined set of descriptors
+// changes as well. The shortcut implementation will then violate the contract
+// of the Describe method. If a Collector sometimes collects no metrics at all
+// (for example vectors like CounterVec, GaugeVec, etc., which only collect
+// metrics after a metric with a fully specified label set has been accessed),
+// it might even get registered as an unchecked Collector (cf. the Register
+// method of the Registerer interface). Hence, only use this shortcut
+// implementation of Describe if you are certain to fulfill the contract.
+//
+// The Collector example demonstrates a use of DescribeByCollect.
+func DescribeByCollect(c Collector, descs chan<- *Desc) {
+	metrics := make(chan Metric)
+	go func() {
+		c.Collect(metrics)
+		close(metrics)
+	}()
+	for m := range metrics {
+		descs <- m.Desc()
+	}
+}
+
+// selfCollector implements Collector for a single Metric so that the Metric
+// collects itself. Add it as an anonymous field to a struct that implements
+// Metric, and call init with the Metric itself as an argument.
+type selfCollector struct {
+	self Metric
+}
+
+// init provides the selfCollector with a reference to the metric it is supposed
+// to collect. It is usually called within the factory function to create a
+// metric. See example.
+func (c *selfCollector) init(self Metric) {
+	c.self = self
+}
+
+// Describe implements Collector.
+func (c *selfCollector) Describe(ch chan<- *Desc) {
+	ch <- c.self.Desc()
+}
+
+// Collect implements Collector.
+func (c *selfCollector) Collect(ch chan<- Metric) {
+	ch <- c.self
+}
+
+// collectorMetric is a metric that is also a collector.
+// Because of selfCollector, most (if not all) Metrics in
+// this package are also collectors.
+type collectorMetric interface {
+	Metric
+	Collector
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go b/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go
new file mode 100644
index 0000000..9a71a15
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go
@@ -0,0 +1,30 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// CollectorFunc is a convenient way to implement a Prometheus Collector
+// without interface boilerplate.
+// This implementation is based on DescribeByCollect method.
+// familiarize yourself to it before using.
+type CollectorFunc func(chan<- Metric)
+
+// Collect calls the defined CollectorFunc function with the provided Metrics channel
+func (f CollectorFunc) Collect(ch chan<- Metric) {
+	f(ch)
+}
+
+// Describe sends the descriptor information using DescribeByCollect
+func (f CollectorFunc) Describe(ch chan<- *Desc) {
+	DescribeByCollect(f, ch)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
new file mode 100644
index 0000000..4ce84e7
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -0,0 +1,358 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"errors"
+	"math"
+	"sync/atomic"
+	"time"
+
+	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/types/known/timestamppb"
+)
+
+// Counter is a Metric that represents a single numerical value that only ever
+// goes up. That implies that it cannot be used to count items whose number can
+// also go down, e.g. the number of currently running goroutines. Those
+// "counters" are represented by Gauges.
+//
+// A Counter is typically used to count requests served, tasks completed, errors
+// occurred, etc.
+//
+// To create Counter instances, use NewCounter.
+type Counter interface {
+	Metric
+	Collector
+
+	// Inc increments the counter by 1. Use Add to increment it by arbitrary
+	// non-negative values.
+	Inc()
+	// Add adds the given value to the counter. It panics if the value is <
+	// 0.
+	Add(float64)
+}
+
+// ExemplarAdder is implemented by Counters that offer the option of adding a
+// value to the Counter together with an exemplar. Its AddWithExemplar method
+// works like the Add method of the Counter interface but also replaces the
+// currently saved exemplar (if any) with a new one, created from the provided
+// value, the current time as timestamp, and the provided labels. Empty Labels
+// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
+// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
+// of the provided labels are invalid, or if the provided labels contain more
+// than 128 runes in total.
+type ExemplarAdder interface {
+	AddWithExemplar(value float64, exemplar Labels)
+}
+
+// CounterOpts is an alias for Opts. See there for doc comments.
+type CounterOpts Opts
+
+// CounterVecOpts bundles the options to create a CounterVec metric.
+// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type CounterVecOpts struct {
+	CounterOpts
+
+	// VariableLabels are used to partition the metric vector by the given set
+	// of labels. Each label value will be constrained with the optional Constraint
+	// function, if provided.
+	VariableLabels ConstrainableLabels
+}
+
+// NewCounter creates a new Counter based on the provided CounterOpts.
+//
+// The returned implementation also implements ExemplarAdder. It is safe to
+// perform the corresponding type assertion.
+//
+// The returned implementation tracks the counter value in two separate
+// variables, a float64 and a uint64. The latter is used to track calls of the
+// Inc method and calls of the Add method with a value that can be represented
+// as a uint64. This allows atomic increments of the counter with optimal
+// performance. (It is common to have an Inc call in very hot execution paths.)
+// Both internal tracking values are added up in the Write method. This has to
+// be taken into account when it comes to precision and overflow behavior.
+func NewCounter(opts CounterOpts) Counter {
+	desc := NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		nil,
+		opts.ConstLabels,
+	)
+	if opts.now == nil {
+		opts.now = time.Now
+	}
+	result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now}
+	result.init(result) // Init self-collection.
+	result.createdTs = timestamppb.New(opts.now())
+	return result
+}
+
+type counter struct {
+	// valBits contains the bits of the represented float64 value, while
+	// valInt stores values that are exact integers. Both have to go first
+	// in the struct to guarantee alignment for atomic operations.
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	valBits uint64
+	valInt  uint64
+
+	selfCollector
+	desc *Desc
+
+	createdTs  *timestamppb.Timestamp
+	labelPairs []*dto.LabelPair
+	exemplar   atomic.Value // Containing nil or a *dto.Exemplar.
+
+	// now is for testing purposes, by default it's time.Now.
+	now func() time.Time
+}
+
+func (c *counter) Desc() *Desc {
+	return c.desc
+}
+
+func (c *counter) Add(v float64) {
+	if v < 0 {
+		panic(errors.New("counter cannot decrease in value"))
+	}
+
+	ival := uint64(v)
+	if float64(ival) == v {
+		atomic.AddUint64(&c.valInt, ival)
+		return
+	}
+
+	for {
+		oldBits := atomic.LoadUint64(&c.valBits)
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+		if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
+			return
+		}
+	}
+}
+
+func (c *counter) AddWithExemplar(v float64, e Labels) {
+	c.Add(v)
+	c.updateExemplar(v, e)
+}
+
+func (c *counter) Inc() {
+	atomic.AddUint64(&c.valInt, 1)
+}
+
+func (c *counter) get() float64 {
+	fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
+	ival := atomic.LoadUint64(&c.valInt)
+	return fval + float64(ival)
+}
+
+func (c *counter) Write(out *dto.Metric) error {
+	// Read the Exemplar first and the value second. This is to avoid a race condition
+	// where users see an exemplar for a not-yet-existing observation.
+	var exemplar *dto.Exemplar
+	if e := c.exemplar.Load(); e != nil {
+		exemplar = e.(*dto.Exemplar)
+	}
+	val := c.get()
+	return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs)
+}
+
+func (c *counter) updateExemplar(v float64, l Labels) {
+	if l == nil {
+		return
+	}
+	e, err := newExemplar(v, c.now(), l)
+	if err != nil {
+		panic(err)
+	}
+	c.exemplar.Store(e)
+}
+
+// CounterVec is a Collector that bundles a set of Counters that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. number of HTTP requests, partitioned by response code and
+// method). Create instances with NewCounterVec.
+type CounterVec struct {
+	*MetricVec
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
+// partitioned by the given label names.
+func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
+	return V2.NewCounterVec(CounterVecOpts{
+		CounterOpts:    opts,
+		VariableLabels: UnconstrainedLabels(labelNames),
+	})
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts.
+func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
+	desc := V2.NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		opts.VariableLabels,
+		opts.ConstLabels,
+	)
+	if opts.now == nil {
+		opts.now = time.Now
+	}
+	return &CounterVec{
+		MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
+			if len(lvs) != len(desc.variableLabels.names) {
+				panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
+			}
+			result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now}
+			result.init(result) // Init self-collection.
+			result.createdTs = timestamppb.New(opts.now())
+			return result
+		}),
+	}
+}
+
+// GetMetricWithLabelValues returns the Counter for the given slice of label
+// values (same order as the variable labels in Desc). If that combination of
+// label values is accessed for the first time, a new Counter is created.
+//
+// It is possible to call this method without using the returned Counter to only
+// create the new Counter but leave it at its starting value 0. See also the
+// SummaryVec example.
+//
+// Keeping the Counter for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Counter from the CounterVec. In that case,
+// the Counter will still exist, but it will not be exported anymore, even if a
+// Counter with the same label values is created later.
+//
+// An error is returned if the number of label values is not the same as the
+// number of variable labels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
+	metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
+	if metric != nil {
+		return metric.(Counter), err
+	}
+	return nil, err
+}
+
+// GetMetricWith returns the Counter for the given Labels map (the label names
+// must match those of the variable labels in Desc). If that label map is
+// accessed for the first time, a new Counter is created. Implications of
+// creating a Counter without using it and keeping the Counter for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the variable labels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
+	metric, err := v.MetricVec.GetMetricWith(labels)
+	if metric != nil {
+		return metric.(Counter), err
+	}
+	return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+//
+//	myVec.WithLabelValues("404", "GET").Add(42)
+func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
+	c, err := v.GetMetricWithLabelValues(lvs...)
+	if err != nil {
+		panic(err)
+	}
+	return c
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+//
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+func (v *CounterVec) With(labels Labels) Counter {
+	c, err := v.GetMetricWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return c
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the CounterVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) {
+	vec, err := v.MetricVec.CurryWith(labels)
+	if vec != nil {
+		return &CounterVec{vec}, err
+	}
+	return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec {
+	vec, err := v.CurryWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return vec
+}
+
+// CounterFunc is a Counter whose value is determined at collect time by calling a
+// provided function.
+//
+// To create CounterFunc instances, use NewCounterFunc.
+type CounterFunc interface {
+	Metric
+	Collector
+}
+
+// NewCounterFunc creates a new CounterFunc based on the provided
+// CounterOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a CounterFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe. The function should also honor
+// the contract for a Counter (values only go up, not down), but compliance will
+// not be checked.
+//
+// Check out the ExampleGaugeFunc examples for the similar GaugeFunc.
+func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {
+	return newValueFunc(NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		nil,
+		opts.ConstLabels,
+	), CounterValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
new file mode 100644
index 0000000..2331b8b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -0,0 +1,211 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+
+	"github.com/cespare/xxhash/v2"
+	dto "github.com/prometheus/client_model/go"
+	"github.com/prometheus/common/model"
+	"google.golang.org/protobuf/proto"
+
+	"github.com/prometheus/client_golang/prometheus/internal"
+)
+
+// Desc is the descriptor used by every Prometheus Metric. It is essentially
+// the immutable meta-data of a Metric. The normal Metric implementations
+// included in this package manage their Desc under the hood. Users only have to
+// deal with Desc if they use advanced features like the ExpvarCollector or
+// custom Collectors and Metrics.
+//
+// Descriptors registered with the same registry have to fulfill certain
+// consistency and uniqueness criteria if they share the same fully-qualified
+// name: They must have the same help string and the same label names (aka label
+// dimensions) in each, constLabels and variableLabels, but they must differ in
+// the values of the constLabels.
+//
+// Descriptors that share the same fully-qualified names and the same label
+// values of their constLabels are considered equal.
+//
+// Use NewDesc to create new Desc instances.
+type Desc struct {
+	// fqName has been built from Namespace, Subsystem, and Name.
+	fqName string
+	// help provides some helpful information about this metric.
+	help string
+	// constLabelPairs contains precalculated DTO label pairs based on
+	// the constant labels.
+	constLabelPairs []*dto.LabelPair
+	// variableLabels contains names of labels and normalization function for
+	// which the metric maintains variable values.
+	variableLabels *compiledLabels
+	// id is a hash of the values of the ConstLabels and fqName. This
+	// must be unique among all registered descriptors and can therefore be
+	// used as an identifier of the descriptor.
+	id uint64
+	// dimHash is a hash of the label names (preset and variable) and the
+	// Help string. Each Desc with the same fqName must have the same
+	// dimHash.
+	dimHash uint64
+	// err is an error that occurred during construction. It is reported on
+	// registration time.
+	err error
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName must not be empty.
+//
+// variableLabels only contain the label names. Their label values are variable
+// and therefore not part of the Desc. (They are managed within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Collector example for a usage pattern.
+func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+	return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels)
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName must not be empty.
+//
+// variableLabels only contain the label names and normalization functions. Their
+// label values are variable and therefore not part of the Desc. (They are managed
+// within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Collector example for a usage pattern.
+func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc {
+	d := &Desc{
+		fqName:         fqName,
+		help:           help,
+		variableLabels: variableLabels.compile(),
+	}
+	//nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme.
+	if !model.NameValidationScheme.IsValidMetricName(fqName) {
+		d.err = fmt.Errorf("%q is not a valid metric name", fqName)
+		return d
+	}
+	// labelValues contains the label values of const labels (in order of
+	// their sorted label names) plus the fqName (at position 0).
+	labelValues := make([]string, 1, len(constLabels)+1)
+	labelValues[0] = fqName
+	labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names))
+	labelNameSet := map[string]struct{}{}
+	// First add only the const label names and sort them...
+	for labelName := range constLabels {
+		if !checkLabelName(labelName) {
+			d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
+			return d
+		}
+		labelNames = append(labelNames, labelName)
+		labelNameSet[labelName] = struct{}{}
+	}
+	sort.Strings(labelNames)
+	// ... so that we can now add const label values in the order of their names.
+	for _, labelName := range labelNames {
+		labelValues = append(labelValues, constLabels[labelName])
+	}
+	// Validate the const label values. They can't have a wrong cardinality, so
+	// use in len(labelValues) as expectedNumberOfValues.
+	if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
+		d.err = err
+		return d
+	}
+	// Now add the variable label names, but prefix them with something that
+	// cannot be in a regular label name. That prevents matching the label
+	// dimension with a different mix between preset and variable labels.
+	for _, label := range d.variableLabels.names {
+		if !checkLabelName(label) {
+			d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName)
+			return d
+		}
+		labelNames = append(labelNames, "$"+label)
+		labelNameSet[label] = struct{}{}
+	}
+	if len(labelNames) != len(labelNameSet) {
+		d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
+		return d
+	}
+
+	xxh := xxhash.New()
+	for _, val := range labelValues {
+		xxh.WriteString(val)
+		xxh.Write(separatorByteSlice)
+	}
+	d.id = xxh.Sum64()
+	// Sort labelNames so that order doesn't matter for the hash.
+	sort.Strings(labelNames)
+	// Now hash together (in this order) the help string and the sorted
+	// label names.
+	xxh.Reset()
+	xxh.WriteString(help)
+	xxh.Write(separatorByteSlice)
+	for _, labelName := range labelNames {
+		xxh.WriteString(labelName)
+		xxh.Write(separatorByteSlice)
+	}
+	d.dimHash = xxh.Sum64()
+
+	d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))
+	for n, v := range constLabels {
+		d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{
+			Name:  proto.String(n),
+			Value: proto.String(v),
+		})
+	}
+	sort.Sort(internal.LabelPairSorter(d.constLabelPairs))
+	return d
+}
+
+// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the
+// provided error set. If a collector returning such a descriptor is registered,
+// registration will fail with the provided error. NewInvalidDesc can be used by
+// a Collector to signal inability to describe itself.
+func NewInvalidDesc(err error) *Desc {
+	return &Desc{
+		err: err,
+	}
+}
+
+func (d *Desc) String() string {
+	lpStrings := make([]string, 0, len(d.constLabelPairs))
+	for _, lp := range d.constLabelPairs {
+		lpStrings = append(
+			lpStrings,
+			fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
+		)
+	}
+	vlStrings := []string{}
+	if d.variableLabels != nil {
+		vlStrings = make([]string, 0, len(d.variableLabels.names))
+		for _, vl := range d.variableLabels.names {
+			if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
+				vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
+			} else {
+				vlStrings = append(vlStrings, vl)
+			}
+		}
+	}
+	return fmt.Sprintf(
+		"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}",
+		d.fqName,
+		d.help,
+		strings.Join(lpStrings, ","),
+		strings.Join(vlStrings, ","),
+	)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
new file mode 100644
index 0000000..962608f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -0,0 +1,210 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package prometheus is the core instrumentation package. It provides metrics
+// primitives to instrument code for monitoring. It also offers a registry for
+// metrics. Sub-packages allow to expose the registered metrics via HTTP
+// (package promhttp) or push them to a Pushgateway (package push). There is
+// also a sub-package promauto, which provides metrics constructors with
+// automatic registration.
+//
+// All exported functions and methods are safe to be used concurrently unless
+// specified otherwise.
+//
+// # A Basic Example
+//
+// As a starting point, a very basic usage example:
+//
+//	package main
+//
+//	import (
+//		"log"
+//		"net/http"
+//
+//		"github.com/prometheus/client_golang/prometheus"
+//		"github.com/prometheus/client_golang/prometheus/promhttp"
+//	)
+//
+//	type metrics struct {
+//		cpuTemp  prometheus.Gauge
+//		hdFailures *prometheus.CounterVec
+//	}
+//
+//	func NewMetrics(reg prometheus.Registerer) *metrics {
+//		m := &metrics{
+//			cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
+//				Name: "cpu_temperature_celsius",
+//				Help: "Current temperature of the CPU.",
+//			}),
+//			hdFailures: prometheus.NewCounterVec(
+//				prometheus.CounterOpts{
+//					Name: "hd_errors_total",
+//					Help: "Number of hard-disk errors.",
+//				},
+//				[]string{"device"},
+//			),
+//		}
+//		reg.MustRegister(m.cpuTemp)
+//		reg.MustRegister(m.hdFailures)
+//		return m
+//	}
+//
+//	func main() {
+//		// Create a non-global registry.
+//		reg := prometheus.NewRegistry()
+//
+//		// Create new metrics and register them using the custom registry.
+//		m := NewMetrics(reg)
+//		// Set values for the new created metrics.
+//		m.cpuTemp.Set(65.3)
+//		m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
+//
+//		// Expose metrics and custom registry via an HTTP server
+//		// using the HandleFor function. "/metrics" is the usual endpoint for that.
+//		http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}))
+//		log.Fatal(http.ListenAndServe(":8080", nil))
+//	}
+//
+// This is a complete program that exports two metrics, a Gauge and a Counter,
+// the latter with a label attached to turn it into a (one-dimensional) vector.
+// It register the metrics using a custom registry and exposes them via an HTTP server
+// on the /metrics endpoint.
+//
+// # Metrics
+//
+// The number of exported identifiers in this package might appear a bit
+// overwhelming. However, in addition to the basic plumbing shown in the example
+// above, you only need to understand the different metric types and their
+// vector versions for basic usage. Furthermore, if you are not concerned with
+// fine-grained control of when and how to register metrics with the registry,
+// have a look at the promauto package, which will effectively allow you to
+// ignore registration altogether in simple cases.
+//
+// Above, you have already touched the Counter and the Gauge. There are two more
+// advanced metric types: the Summary and Histogram. A more thorough description
+// of those four metric types can be found in the Prometheus docs:
+// https://prometheus.io/docs/concepts/metric_types/
+//
+// In addition to the fundamental metric types Gauge, Counter, Summary, and
+// Histogram, a very important part of the Prometheus data model is the
+// partitioning of samples along dimensions called labels, which results in
+// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,
+// and HistogramVec.
+//
+// While only the fundamental metric types implement the Metric interface, both
+// the metrics and their vector versions implement the Collector interface. A
+// Collector manages the collection of a number of Metrics, but for convenience,
+// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, and
+// Histogram are interfaces themselves while GaugeVec, CounterVec, SummaryVec,
+// and HistogramVec are not.
+//
+// To create instances of Metrics and their vector versions, you need a suitable
+// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
+//
+// # Custom Collectors and constant Metrics
+//
+// While you could create your own implementations of Metric, most likely you
+// will only ever implement the Collector interface on your own. At a first
+// glance, a custom Collector seems handy to bundle Metrics for common
+// registration (with the prime example of the different metric vectors above,
+// which bundle all the metrics of the same name but with different labels).
+//
+// There is a more involved use case, too: If you already have metrics
+// available, created outside of the Prometheus context, you don't need the
+// interface of the various Metric types. You essentially want to mirror the
+// existing numbers into Prometheus Metrics during collection. An own
+// implementation of the Collector interface is perfect for that. You can create
+// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and
+// NewConstSummary (and their respective Must… versions). NewConstMetric is used
+// for all metric types with just a float64 as their value: Counter, Gauge, and
+// a special “type” called Untyped. Use the latter if you are not sure if the
+// mirrored metric is a Counter or a Gauge. Creation of the Metric instance
+// happens in the Collect method. The Describe method has to return separate
+// Desc instances, representative of the “throw-away” metrics to be created
+// later.  NewDesc comes in handy to create those Desc instances. Alternatively,
+// you could return no Desc at all, which will mark the Collector “unchecked”.
+// No checks are performed at registration time, but metric consistency will
+// still be ensured at scrape time, i.e. any inconsistencies will lead to scrape
+// errors. Thus, with unchecked Collectors, the responsibility to not collect
+// metrics that lead to inconsistencies in the total scrape result lies with the
+// implementer of the Collector. While this is not a desirable state, it is
+// sometimes necessary. The typical use case is a situation where the exact
+// metrics to be returned by a Collector cannot be predicted at registration
+// time, but the implementer has sufficient knowledge of the whole system to
+// guarantee metric consistency.
+//
+// The Collector example illustrates the use case. You can also look at the
+// source code of the processCollector (mirroring process metrics), the
+// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar
+// metrics) as examples that are used in this package itself.
+//
+// If you just need to call a function to get a single float value to collect as
+// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
+// shortcuts.
+//
+// # Advanced Uses of the Registry
+//
+// While MustRegister is the by far most common way of registering a Collector,
+// sometimes you might want to handle the errors the registration might cause.
+// As suggested by the name, MustRegister panics if an error occurs. With the
+// Register function, the error is returned and can be handled.
+//
+// An error is returned if the registered Collector is incompatible or
+// inconsistent with already registered metrics. The registry aims for
+// consistency of the collected metrics according to the Prometheus data model.
+// Inconsistencies are ideally detected at registration time, not at collect
+// time. The former will usually be detected at start-up time of a program,
+// while the latter will only happen at scrape time, possibly not even on the
+// first scrape if the inconsistency only becomes relevant later. That is the
+// main reason why a Collector and a Metric have to describe themselves to the
+// registry.
+//
+// So far, everything we did operated on the so-called default registry, as it
+// can be found in the global DefaultRegisterer variable. With NewRegistry, you
+// can create a custom registry, or you can even implement the Registerer or
+// Gatherer interfaces yourself. The methods Register and Unregister work in the
+// same way on a custom registry as the global functions Register and Unregister
+// on the default registry.
+//
+// There are a number of uses for custom registries: You can use registries with
+// special properties, see NewPedanticRegistry. You can avoid global state, as
+// it is imposed by the DefaultRegisterer. You can use multiple registries at
+// the same time to expose different metrics in different ways.  You can use
+// separate registries for testing purposes.
+//
+// Also note that the DefaultRegisterer comes registered with a Collector for Go
+// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
+// NewProcessCollector). With a custom registry, you are in control and decide
+// yourself about the Collectors to register.
+//
+// # HTTP Exposition
+//
+// The Registry implements the Gatherer interface. The caller of the Gather
+// method can then expose the gathered metrics in some way. Usually, the metrics
+// are served via HTTP on the /metrics endpoint. That's happening in the example
+// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
+//
+// # Pushing to the Pushgateway
+//
+// Function for pushing to the Pushgateway can be found in the push sub-package.
+//
+// # Graphite Bridge
+//
+// Functions and examples to push metrics from a Gatherer to Graphite can be
+// found in the graphite sub-package.
+//
+// # Other Means of Exposition
+//
+// More ways of exposing metrics can easily be added by following the approaches
+// of the existing implementations.
+package prometheus
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
new file mode 100644
index 0000000..de5a856
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
@@ -0,0 +1,86 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"encoding/json"
+	"expvar"
+)
+
+type expvarCollector struct {
+	exports map[string]*Desc
+}
+
+// NewExpvarCollector is the obsolete version of collectors.NewExpvarCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewExpvarCollector instead.
+func NewExpvarCollector(exports map[string]*Desc) Collector {
+	return &expvarCollector{
+		exports: exports,
+	}
+}
+
+// Describe implements Collector.
+func (e *expvarCollector) Describe(ch chan<- *Desc) {
+	for _, desc := range e.exports {
+		ch <- desc
+	}
+}
+
+// Collect implements Collector.
+func (e *expvarCollector) Collect(ch chan<- Metric) {
+	for name, desc := range e.exports {
+		var m Metric
+		expVar := expvar.Get(name)
+		if expVar == nil {
+			continue
+		}
+		var v interface{}
+		labels := make([]string, len(desc.variableLabels.names))
+		if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
+			ch <- NewInvalidMetric(desc, err)
+			continue
+		}
+		var processValue func(v interface{}, i int)
+		processValue = func(v interface{}, i int) {
+			if i >= len(labels) {
+				copiedLabels := append(make([]string, 0, len(labels)), labels...)
+				switch v := v.(type) {
+				case float64:
+					m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)
+				case bool:
+					if v {
+						m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)
+					} else {
+						m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)
+					}
+				default:
+					return
+				}
+				ch <- m
+				return
+			}
+			vm, ok := v.(map[string]interface{})
+			if !ok {
+				return
+			}
+			for lv, val := range vm {
+				labels[i] = lv
+				processValue(val, i+1)
+			}
+		}
+		processValue(v, 0)
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
new file mode 100644
index 0000000..3d383a7
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+	offset64 = 14695981039346656037
+	prime64  = 1099511628211
+)
+
+// hashNew initializies a new fnv64a hash value.
+func hashNew() uint64 {
+	return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+	for i := 0; i < len(s); i++ {
+		h ^= uint64(s[i])
+		h *= prime64
+	}
+	return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+	h ^= uint64(b)
+	h *= prime64
+	return h
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
new file mode 100644
index 0000000..dd2eac9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -0,0 +1,311 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"math"
+	"sync/atomic"
+	"time"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+// Gauge is a Metric that represents a single numerical value that can
+// arbitrarily go up and down.
+//
+// A Gauge is typically used for measured values like temperatures or current
+// memory usage, but also "counts" that can go up and down, like the number of
+// running goroutines.
+//
+// To create Gauge instances, use NewGauge.
+type Gauge interface {
+	Metric
+	Collector
+
+	// Set sets the Gauge to an arbitrary value.
+	Set(float64)
+	// Inc increments the Gauge by 1. Use Add to increment it by arbitrary
+	// values.
+	Inc()
+	// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary
+	// values.
+	Dec()
+	// Add adds the given value to the Gauge. (The value can be negative,
+	// resulting in a decrease of the Gauge.)
+	Add(float64)
+	// Sub subtracts the given value from the Gauge. (The value can be
+	// negative, resulting in an increase of the Gauge.)
+	Sub(float64)
+
+	// SetToCurrentTime sets the Gauge to the current Unix time in seconds.
+	SetToCurrentTime()
+}
+
+// GaugeOpts is an alias for Opts. See there for doc comments.
+type GaugeOpts Opts
+
+// GaugeVecOpts bundles the options to create a GaugeVec metric.
+// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type GaugeVecOpts struct {
+	GaugeOpts
+
+	// VariableLabels are used to partition the metric vector by the given set
+	// of labels. Each label value will be constrained with the optional Constraint
+	// function, if provided.
+	VariableLabels ConstrainableLabels
+}
+
+// NewGauge creates a new Gauge based on the provided GaugeOpts.
+//
+// The returned implementation is optimized for a fast Set method. If you have a
+// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
+// the former. For example, the Inc method of the returned Gauge is slower than
+// the Inc method of a Counter returned by NewCounter. This matches the typical
+// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
+// the latter Inc-heavy.
+func NewGauge(opts GaugeOpts) Gauge {
+	desc := NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		nil,
+		opts.ConstLabels,
+	)
+	result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
+	result.init(result) // Init self-collection.
+	return result
+}
+
+type gauge struct {
+	// valBits contains the bits of the represented float64 value. It has
+	// to go first in the struct to guarantee alignment for atomic
+	// operations.  http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	valBits uint64
+
+	selfCollector
+
+	desc       *Desc
+	labelPairs []*dto.LabelPair
+}
+
+func (g *gauge) Desc() *Desc {
+	return g.desc
+}
+
+func (g *gauge) Set(val float64) {
+	atomic.StoreUint64(&g.valBits, math.Float64bits(val))
+}
+
+func (g *gauge) SetToCurrentTime() {
+	g.Set(float64(time.Now().UnixNano()) / 1e9)
+}
+
+func (g *gauge) Inc() {
+	g.Add(1)
+}
+
+func (g *gauge) Dec() {
+	g.Add(-1)
+}
+
+func (g *gauge) Add(val float64) {
+	for {
+		oldBits := atomic.LoadUint64(&g.valBits)
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
+		if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
+			return
+		}
+	}
+}
+
+func (g *gauge) Sub(val float64) {
+	g.Add(val * -1)
+}
+
+func (g *gauge) Write(out *dto.Metric) error {
+	val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
+	return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil)
+}
+
+// GaugeVec is a Collector that bundles a set of Gauges that all share the same
+// Desc, but have different values for their variable labels. This is used if
+// you want to count the same thing partitioned by various dimensions
+// (e.g. number of operations queued, partitioned by user and operation
+// type). Create instances with NewGaugeVec.
+type GaugeVec struct {
+	*MetricVec
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
+// partitioned by the given label names.
+func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
+	return V2.NewGaugeVec(GaugeVecOpts{
+		GaugeOpts:      opts,
+		VariableLabels: UnconstrainedLabels(labelNames),
+	})
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts.
+func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
+	desc := V2.NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		opts.VariableLabels,
+		opts.ConstLabels,
+	)
+	return &GaugeVec{
+		MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
+			if len(lvs) != len(desc.variableLabels.names) {
+				panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
+			}
+			result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
+			result.init(result) // Init self-collection.
+			return result
+		}),
+	}
+}
+
+// GetMetricWithLabelValues returns the Gauge for the given slice of label
+// values (same order as the variable labels in Desc). If that combination of
+// label values is accessed for the first time, a new Gauge is created.
+//
+// It is possible to call this method without using the returned Gauge to only
+// create the new Gauge but leave it at its starting value 0. See also the
+// SummaryVec example.
+//
+// Keeping the Gauge for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
+// Gauge will still exist, but it will not be exported anymore, even if a
+// Gauge with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of variable labels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
+	metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
+	if metric != nil {
+		return metric.(Gauge), err
+	}
+	return nil, err
+}
+
+// GetMetricWith returns the Gauge for the given Labels map (the label names
+// must match those of the variable labels in Desc). If that label map is
+// accessed for the first time, a new Gauge is created. Implications of
+// creating a Gauge without using it and keeping the Gauge for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the variable labels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
+	metric, err := v.MetricVec.GetMetricWith(labels)
+	if metric != nil {
+		return metric.(Gauge), err
+	}
+	return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+//
+//	myVec.WithLabelValues("404", "GET").Add(42)
+func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
+	g, err := v.GetMetricWithLabelValues(lvs...)
+	if err != nil {
+		panic(err)
+	}
+	return g
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+//
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+func (v *GaugeVec) With(labels Labels) Gauge {
+	g, err := v.GetMetricWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return g
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the GaugeVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) {
+	vec, err := v.MetricVec.CurryWith(labels)
+	if vec != nil {
+		return &GaugeVec{vec}, err
+	}
+	return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec {
+	vec, err := v.CurryWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return vec
+}
+
+// GaugeFunc is a Gauge whose value is determined at collect time by calling a
+// provided function.
+//
+// To create GaugeFunc instances, use NewGaugeFunc.
+type GaugeFunc interface {
+	Metric
+	Collector
+}
+
+// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The
+// value reported is determined by calling the given function from within the
+// Write method. Take into account that metric collection may happen
+// concurrently. Therefore, it must be safe to call the provided function
+// concurrently.
+//
+// NewGaugeFunc is a good way to create an “info” style metric with a constant
+// value of 1. Example:
+// https://github.com/prometheus/common/blob/8558a5b7db3c84fa38b4766966059a7bd5bfa2ee/version/info.go#L36-L56
+func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {
+	return newValueFunc(NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		nil,
+		opts.ConstLabels,
+	), GaugeValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
new file mode 100644
index 0000000..614fd61
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
@@ -0,0 +1,26 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !js || wasm
+// +build !js wasm
+
+package prometheus
+
+import "os"
+
+func getPIDFn() func() (int, error) {
+	pid := os.Getpid()
+	return func() (int, error) {
+		return pid, nil
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
new file mode 100644
index 0000000..eaf8059
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
@@ -0,0 +1,23 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build js && !wasm
+// +build js,!wasm
+
+package prometheus
+
+func getPIDFn() func() (int, error) {
+	return func() (int, error) {
+		return 1, nil
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
new file mode 100644
index 0000000..520cbd7
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -0,0 +1,274 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"runtime"
+	"runtime/debug"
+	"time"
+)
+
+// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats.
+// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so
+// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is
+// populated using runtime/metrics. Those are the defaults we can't alter.
+func goRuntimeMemStats() memStatsMetrics {
+	return memStatsMetrics{
+		{
+			desc: NewDesc(
+				memstatNamespace("alloc_bytes"),
+				"Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("alloc_bytes_total"),
+				"Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
+			valType: CounterValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("sys_bytes"),
+				"Number of bytes obtained from system. Equals to /memory/classes/total:byte.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("mallocs_total"),
+				// TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric.
+				"Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
+			valType: CounterValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("frees_total"),
+				"Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
+			valType: CounterValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("heap_alloc_bytes"),
+				"Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("heap_sys_bytes"),
+				"Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("heap_idle_bytes"),
+				"Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("heap_inuse_bytes"),
+				"Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("heap_released_bytes"),
+				"Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("heap_objects"),
+				"Number of currently allocated objects. Equals to /gc/heap/objects:objects.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("stack_inuse_bytes"),
+				"Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("stack_sys_bytes"),
+				"Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("mspan_inuse_bytes"),
+				"Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("mspan_sys_bytes"),
+				"Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("mcache_inuse_bytes"),
+				"Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("mcache_sys_bytes"),
+				"Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("buck_hash_sys_bytes"),
+				"Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("gc_sys_bytes"),
+				"Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("other_sys_bytes"),
+				"Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
+			valType: GaugeValue,
+		}, {
+			desc: NewDesc(
+				memstatNamespace("next_gc_bytes"),
+				"Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.",
+				nil, nil,
+			),
+			eval:    func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
+			valType: GaugeValue,
+		},
+	}
+}
+
+type baseGoCollector struct {
+	goroutinesDesc *Desc
+	threadsDesc    *Desc
+	gcDesc         *Desc
+	gcLastTimeDesc *Desc
+	goInfoDesc     *Desc
+}
+
+func newBaseGoCollector() baseGoCollector {
+	return baseGoCollector{
+		goroutinesDesc: NewDesc(
+			"go_goroutines",
+			"Number of goroutines that currently exist.",
+			nil, nil),
+		threadsDesc: NewDesc(
+			"go_threads",
+			"Number of OS threads created.",
+			nil, nil),
+		gcDesc: NewDesc(
+			"go_gc_duration_seconds",
+			"A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.",
+			nil, nil),
+		gcLastTimeDesc: NewDesc(
+			"go_memstats_last_gc_time_seconds",
+			"Number of seconds since 1970 of last garbage collection.",
+			nil, nil),
+		goInfoDesc: NewDesc(
+			"go_info",
+			"Information about the Go environment.",
+			nil, Labels{"version": runtime.Version()}),
+	}
+}
+
+// Describe returns all descriptions of the collector.
+func (c *baseGoCollector) Describe(ch chan<- *Desc) {
+	ch <- c.goroutinesDesc
+	ch <- c.threadsDesc
+	ch <- c.gcDesc
+	ch <- c.gcLastTimeDesc
+	ch <- c.goInfoDesc
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *baseGoCollector) Collect(ch chan<- Metric) {
+	ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
+
+	n := getRuntimeNumThreads()
+	ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n)
+
+	var stats debug.GCStats
+	stats.PauseQuantiles = make([]time.Duration, 5)
+	debug.ReadGCStats(&stats)
+
+	quantiles := make(map[float64]float64)
+	for idx, pq := range stats.PauseQuantiles[1:] {
+		quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
+	}
+	quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
+	ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
+	ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
+	ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
+}
+
+func memstatNamespace(s string) string {
+	return "go_memstats_" + s
+}
+
+// memStatsMetrics provide description, evaluator, runtime/metrics name, and
+// value type for memstat metrics.
+type memStatsMetrics []struct {
+	desc    *Desc
+	eval    func(*runtime.MemStats) float64
+	valType ValueType
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
new file mode 100644
index 0000000..897a6e9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
@@ -0,0 +1,122 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !go1.17
+// +build !go1.17
+
+package prometheus
+
+import (
+	"runtime"
+	"sync"
+	"time"
+)
+
+type goCollector struct {
+	base baseGoCollector
+
+	// ms... are memstats related.
+	msLast          *runtime.MemStats // Previously collected memstats.
+	msLastTimestamp time.Time
+	msMtx           sync.Mutex // Protects msLast and msLastTimestamp.
+	msMetrics       memStatsMetrics
+	msRead          func(*runtime.MemStats) // For mocking in tests.
+	msMaxWait       time.Duration           // Wait time for fresh memstats.
+	msMaxAge        time.Duration           // Maximum allowed age of old memstats.
+}
+
+// NewGoCollector is the obsolete version of collectors.NewGoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewGoCollector instead.
+func NewGoCollector() Collector {
+	msMetrics := goRuntimeMemStats()
+	msMetrics = append(msMetrics, struct {
+		desc    *Desc
+		eval    func(*runtime.MemStats) float64
+		valType ValueType
+	}{
+		// This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
+		desc: NewDesc(
+			memstatNamespace("gc_cpu_fraction"),
+			"The fraction of this program's available CPU time used by the GC since the program started.",
+			nil, nil,
+		),
+		eval:    func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
+		valType: GaugeValue,
+	})
+	return &goCollector{
+		base:      newBaseGoCollector(),
+		msLast:    &runtime.MemStats{},
+		msRead:    runtime.ReadMemStats,
+		msMaxWait: time.Second,
+		msMaxAge:  5 * time.Minute,
+		msMetrics: msMetrics,
+	}
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+	c.base.Describe(ch)
+	for _, i := range c.msMetrics {
+		ch <- i.desc
+	}
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+	var (
+		ms   = &runtime.MemStats{}
+		done = make(chan struct{})
+	)
+	// Start reading memstats first as it might take a while.
+	go func() {
+		c.msRead(ms)
+		c.msMtx.Lock()
+		c.msLast = ms
+		c.msLastTimestamp = time.Now()
+		c.msMtx.Unlock()
+		close(done)
+	}()
+
+	// Collect base non-memory metrics.
+	c.base.Collect(ch)
+
+	timer := time.NewTimer(c.msMaxWait)
+	select {
+	case <-done: // Our own ReadMemStats succeeded in time. Use it.
+		timer.Stop() // Important for high collection frequencies to not pile up timers.
+		c.msCollect(ch, ms)
+		return
+	case <-timer.C: // Time out, use last memstats if possible. Continue below.
+	}
+	c.msMtx.Lock()
+	if time.Since(c.msLastTimestamp) < c.msMaxAge {
+		// Last memstats are recent enough. Collect from them under the lock.
+		c.msCollect(ch, c.msLast)
+		c.msMtx.Unlock()
+		return
+	}
+	// If we are here, the last memstats are too old or don't exist. We have
+	// to wait until our own ReadMemStats finally completes. For that to
+	// happen, we have to release the lock.
+	c.msMtx.Unlock()
+	<-done
+	c.msCollect(ch, ms)
+}
+
+func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
+	for _, i := range c.msMetrics {
+		ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
new file mode 100644
index 0000000..6b86847
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
@@ -0,0 +1,574 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.17
+// +build go1.17
+
+package prometheus
+
+import (
+	"fmt"
+	"math"
+	"runtime"
+	"runtime/metrics"
+	"strings"
+	"sync"
+
+	"github.com/prometheus/client_golang/prometheus/internal"
+
+	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/proto"
+)
+
+const (
+	// constants for strings referenced more than once.
+	goGCHeapTinyAllocsObjects               = "/gc/heap/tiny/allocs:objects"
+	goGCHeapAllocsObjects                   = "/gc/heap/allocs:objects"
+	goGCHeapFreesObjects                    = "/gc/heap/frees:objects"
+	goGCHeapFreesBytes                      = "/gc/heap/frees:bytes"
+	goGCHeapAllocsBytes                     = "/gc/heap/allocs:bytes"
+	goGCHeapObjects                         = "/gc/heap/objects:objects"
+	goGCHeapGoalBytes                       = "/gc/heap/goal:bytes"
+	goMemoryClassesTotalBytes               = "/memory/classes/total:bytes"
+	goMemoryClassesHeapObjectsBytes         = "/memory/classes/heap/objects:bytes"
+	goMemoryClassesHeapUnusedBytes          = "/memory/classes/heap/unused:bytes"
+	goMemoryClassesHeapReleasedBytes        = "/memory/classes/heap/released:bytes"
+	goMemoryClassesHeapFreeBytes            = "/memory/classes/heap/free:bytes"
+	goMemoryClassesHeapStacksBytes          = "/memory/classes/heap/stacks:bytes"
+	goMemoryClassesOSStacksBytes            = "/memory/classes/os-stacks:bytes"
+	goMemoryClassesMetadataMSpanInuseBytes  = "/memory/classes/metadata/mspan/inuse:bytes"
+	goMemoryClassesMetadataMSPanFreeBytes   = "/memory/classes/metadata/mspan/free:bytes"
+	goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes"
+	goMemoryClassesMetadataMCacheFreeBytes  = "/memory/classes/metadata/mcache/free:bytes"
+	goMemoryClassesProfilingBucketsBytes    = "/memory/classes/profiling/buckets:bytes"
+	goMemoryClassesMetadataOtherBytes       = "/memory/classes/metadata/other:bytes"
+	goMemoryClassesOtherBytes               = "/memory/classes/other:bytes"
+)
+
+// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic.
+var rmNamesForMemStatsMetrics = []string{
+	goGCHeapTinyAllocsObjects,
+	goGCHeapAllocsObjects,
+	goGCHeapFreesObjects,
+	goGCHeapAllocsBytes,
+	goGCHeapObjects,
+	goGCHeapGoalBytes,
+	goMemoryClassesTotalBytes,
+	goMemoryClassesHeapObjectsBytes,
+	goMemoryClassesHeapUnusedBytes,
+	goMemoryClassesHeapReleasedBytes,
+	goMemoryClassesHeapFreeBytes,
+	goMemoryClassesHeapStacksBytes,
+	goMemoryClassesOSStacksBytes,
+	goMemoryClassesMetadataMSpanInuseBytes,
+	goMemoryClassesMetadataMSPanFreeBytes,
+	goMemoryClassesMetadataMCacheInuseBytes,
+	goMemoryClassesMetadataMCacheFreeBytes,
+	goMemoryClassesProfilingBucketsBytes,
+	goMemoryClassesMetadataOtherBytes,
+	goMemoryClassesOtherBytes,
+}
+
+func bestEffortLookupRM(lookup []string) []metrics.Description {
+	ret := make([]metrics.Description, 0, len(lookup))
+	for _, rm := range metrics.All() {
+		for _, m := range lookup {
+			if m == rm.Name {
+				ret = append(ret, rm)
+			}
+		}
+	}
+	return ret
+}
+
+type goCollector struct {
+	base baseGoCollector
+
+	// mu protects updates to all fields ensuring a consistent
+	// snapshot is always produced by Collect.
+	mu sync.Mutex
+
+	// Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed).
+	sampleBuf []metrics.Sample
+	// sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums.
+	sampleMap map[string]*metrics.Sample
+
+	// rmExposedMetrics represents all runtime/metrics package metrics
+	// that were configured to be exposed.
+	rmExposedMetrics     []collectorMetric
+	rmExactSumMapForHist map[string]string
+
+	// With Go 1.17, the runtime/metrics package was introduced.
+	// From that point on, metric names produced by the runtime/metrics
+	// package could be generated from runtime/metrics names. However,
+	// these differ from the old names for the same values.
+	//
+	// This field exists to export the same values under the old names
+	// as well.
+	msMetrics        memStatsMetrics
+	msMetricsEnabled bool
+}
+
+type rmMetricDesc struct {
+	metrics.Description
+}
+
+func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc {
+	var descs []rmMetricDesc
+	for _, d := range metrics.All() {
+		var (
+			deny = true
+			desc rmMetricDesc
+		)
+
+		for _, r := range rules {
+			if !r.Matcher.MatchString(d.Name) {
+				continue
+			}
+			deny = r.Deny
+		}
+		if deny {
+			continue
+		}
+
+		desc.Description = d
+		descs = append(descs, desc)
+	}
+	return descs
+}
+
+func defaultGoCollectorOptions() internal.GoCollectorOptions {
+	return internal.GoCollectorOptions{
+		RuntimeMetricSumForHist: map[string]string{
+			"/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes,
+			"/gc/heap/frees-by-size:bytes":  goGCHeapFreesBytes,
+		},
+		RuntimeMetricRules: []internal.GoCollectorRule{
+			// Recommended metrics we want by default from runtime/metrics.
+			{Matcher: internal.GoCollectorDefaultRuntimeMetrics},
+		},
+	}
+}
+
+// NewGoCollector is the obsolete version of collectors.NewGoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewGoCollector instead.
+func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
+	opt := defaultGoCollectorOptions()
+	for _, o := range opts {
+		o(&opt)
+	}
+
+	exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules)
+
+	// Collect all histogram samples so that we can get their buckets.
+	// The API guarantees that the buckets are always fixed for the lifetime
+	// of the process.
+	var histograms []metrics.Sample
+	for _, d := range exposedDescriptions {
+		if d.Kind == metrics.KindFloat64Histogram {
+			histograms = append(histograms, metrics.Sample{Name: d.Name})
+		}
+	}
+
+	if len(histograms) > 0 {
+		metrics.Read(histograms)
+	}
+
+	bucketsMap := make(map[string][]float64)
+	for i := range histograms {
+		bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
+	}
+
+	// Generate a collector for each exposed runtime/metrics metric.
+	metricSet := make([]collectorMetric, 0, len(exposedDescriptions))
+	// SampleBuf is used for reading from runtime/metrics.
+	// We are assuming the largest case to have stable pointers for sampleMap purposes.
+	sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics))
+	sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions))
+	for _, d := range exposedDescriptions {
+		namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description)
+		if !ok {
+			// Just ignore this metric; we can't do anything with it here.
+			// If a user decides to use the latest version of Go, we don't want
+			// to fail here. This condition is tested in TestExpectedRuntimeMetrics.
+			continue
+		}
+		help := attachOriginalName(d.Description.Description, d.Name)
+
+		sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
+		sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
+
+		var m collectorMetric
+		if d.Kind == metrics.KindFloat64Histogram {
+			_, hasSum := opt.RuntimeMetricSumForHist[d.Name]
+			unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
+			m = newBatchHistogram(
+				NewDesc(
+					BuildFQName(namespace, subsystem, name),
+					help,
+					nil,
+					nil,
+				),
+				internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),
+				hasSum,
+			)
+		} else if d.Cumulative {
+			m = NewCounter(CounterOpts{
+				Namespace: namespace,
+				Subsystem: subsystem,
+				Name:      name,
+				Help:      help,
+			},
+			)
+		} else {
+			m = NewGauge(GaugeOpts{
+				Namespace: namespace,
+				Subsystem: subsystem,
+				Name:      name,
+				Help:      help,
+			})
+		}
+		metricSet = append(metricSet, m)
+	}
+
+	// Add exact sum metrics to sampleBuf if not added before.
+	for _, h := range histograms {
+		sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name]
+		if !ok {
+			continue
+		}
+
+		if _, ok := sampleMap[sumMetric]; ok {
+			continue
+		}
+		sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric})
+		sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1]
+	}
+
+	var (
+		msMetrics      memStatsMetrics
+		msDescriptions []metrics.Description
+	)
+
+	if !opt.DisableMemStatsLikeMetrics {
+		msMetrics = goRuntimeMemStats()
+		msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics)
+
+		// Check if metric was not exposed before and if not, add to sampleBuf.
+		for _, mdDesc := range msDescriptions {
+			if _, ok := sampleMap[mdDesc.Name]; ok {
+				continue
+			}
+			sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name})
+			sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1]
+		}
+	}
+
+	return &goCollector{
+		base:                 newBaseGoCollector(),
+		sampleBuf:            sampleBuf,
+		sampleMap:            sampleMap,
+		rmExposedMetrics:     metricSet,
+		rmExactSumMapForHist: opt.RuntimeMetricSumForHist,
+		msMetrics:            msMetrics,
+		msMetricsEnabled:     !opt.DisableMemStatsLikeMetrics,
+	}
+}
+
+func attachOriginalName(desc, origName string) string {
+	return fmt.Sprintf("%s Sourced from %s.", desc, origName)
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+	c.base.Describe(ch)
+	for _, i := range c.msMetrics {
+		ch <- i.desc
+	}
+	for _, m := range c.rmExposedMetrics {
+		ch <- m.Desc()
+	}
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+	// Collect base non-memory metrics.
+	c.base.Collect(ch)
+
+	if len(c.sampleBuf) == 0 {
+		return
+	}
+
+	// Collect must be thread-safe, so prevent concurrent use of
+	// sampleBuf elements. Just read into sampleBuf but write all the data
+	// we get into our Metrics or MemStats.
+	//
+	// This lock also ensures that the Metrics we send out are all from
+	// the same updates, ensuring their mutual consistency insofar as
+	// is guaranteed by the runtime/metrics package.
+	//
+	// N.B. This locking is heavy-handed, but Collect is expected to be called
+	// relatively infrequently. Also the core operation here, metrics.Read,
+	// is fast (O(tens of microseconds)) so contention should certainly be
+	// low, though channel operations and any allocations may add to that.
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	// Populate runtime/metrics sample buffer.
+	metrics.Read(c.sampleBuf)
+
+	// Collect all our runtime/metrics user chose to expose from sampleBuf (if any).
+	for i, metric := range c.rmExposedMetrics {
+		// We created samples for exposed metrics first in order, so indexes match.
+		sample := c.sampleBuf[i]
+
+		// N.B. switch on concrete type because it's significantly more efficient
+		// than checking for the Counter and Gauge interface implementations. In
+		// this case, we control all the types here.
+		switch m := metric.(type) {
+		case *counter:
+			// Guard against decreases. This should never happen, but a failure
+			// to do so will result in a panic, which is a harsh consequence for
+			// a metrics collection bug.
+			v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
+			if v1 > v0 {
+				m.Add(unwrapScalarRMValue(sample.Value) - m.get())
+			}
+			m.Collect(ch)
+		case *gauge:
+			m.Set(unwrapScalarRMValue(sample.Value))
+			m.Collect(ch)
+		case *batchHistogram:
+			m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
+			m.Collect(ch)
+		default:
+			panic("unexpected metric type")
+		}
+	}
+
+	if c.msMetricsEnabled {
+		// ms is a dummy MemStats that we populate ourselves so that we can
+		// populate the old metrics from it if goMemStatsCollection is enabled.
+		var ms runtime.MemStats
+		memStatsFromRM(&ms, c.sampleMap)
+		for _, i := range c.msMetrics {
+			ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
+		}
+	}
+}
+
+// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed
+// to be scalar and returns the equivalent float64 value. Panics if the
+// value is not scalar.
+func unwrapScalarRMValue(v metrics.Value) float64 {
+	switch v.Kind() {
+	case metrics.KindUint64:
+		return float64(v.Uint64())
+	case metrics.KindFloat64:
+		return v.Float64()
+	case metrics.KindBad:
+		// Unsupported metric.
+		//
+		// This should never happen because we always populate our metric
+		// set from the runtime/metrics package.
+		panic("unexpected bad kind metric")
+	default:
+		// Unsupported metric kind.
+		//
+		// This should never happen because we check for this during initialization
+		// and flag and filter metrics whose kinds we don't understand.
+		panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind()))
+	}
+}
+
+// exactSumFor takes a runtime/metrics metric name (that is assumed to
+// be of kind KindFloat64Histogram) and returns its exact sum and whether
+// its exact sum exists.
+//
+// The runtime/metrics API for histograms doesn't currently expose exact
+// sums, but some of the other metrics are in fact exact sums of histograms.
+func (c *goCollector) exactSumFor(rmName string) float64 {
+	sumName, ok := c.rmExactSumMapForHist[rmName]
+	if !ok {
+		return 0
+	}
+	s, ok := c.sampleMap[sumName]
+	if !ok {
+		return 0
+	}
+	return unwrapScalarRMValue(s.Value)
+}
+
+func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
+	lookupOrZero := func(name string) uint64 {
+		if s, ok := rm[name]; ok {
+			return s.Value.Uint64()
+		}
+		return 0
+	}
+
+	// Currently, MemStats adds tiny alloc count to both Mallocs AND Frees.
+	// The reason for this is because MemStats couldn't be extended at the time
+	// but there was a desire to have Mallocs at least be a little more representative,
+	// while having Mallocs - Frees still represent a live object count.
+	// Unfortunately, MemStats doesn't actually export a large allocation count,
+	// so it's impossible to pull this number out directly.
+	tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects)
+	ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs
+	ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs
+
+	ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes)
+	ms.Sys = lookupOrZero(goMemoryClassesTotalBytes)
+	ms.Lookups = 0 // Already always zero.
+	ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes)
+	ms.Alloc = ms.HeapAlloc
+	ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes)
+	ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes)
+	ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes)
+	ms.HeapSys = ms.HeapInuse + ms.HeapIdle
+	ms.HeapObjects = lookupOrZero(goGCHeapObjects)
+	ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes)
+	ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes)
+	ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes)
+	ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes)
+	ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes)
+	ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes)
+	ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes)
+	ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes)
+	ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes)
+	ms.NextGC = lookupOrZero(goGCHeapGoalBytes)
+
+	// N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
+	// and often misleading due to the fact that it's an average over the lifetime
+	// of the process.
+	// See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
+	// for more details.
+	ms.GCCPUFraction = 0
+}
+
+// batchHistogram is a mutable histogram that is updated
+// in batches.
+type batchHistogram struct {
+	selfCollector
+
+	// Static fields updated only once.
+	desc   *Desc
+	hasSum bool
+
+	// Because this histogram operates in batches, it just uses a
+	// single mutex for everything. updates are always serialized
+	// but Write calls may operate concurrently with updates.
+	// Contention between these two sources should be rare.
+	mu      sync.Mutex
+	buckets []float64 // Inclusive lower bounds, like runtime/metrics.
+	counts  []uint64
+	sum     float64 // Used if hasSum is true.
+}
+
+// newBatchHistogram creates a new batch histogram value with the given
+// Desc, buckets, and whether or not it has an exact sum available.
+//
+// buckets must always be from the runtime/metrics package, following
+// the same conventions.
+func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
+	// We need to remove -Inf values. runtime/metrics keeps them around.
+	// But -Inf bucket should not be allowed for prometheus histograms.
+	if buckets[0] == math.Inf(-1) {
+		buckets = buckets[1:]
+	}
+	h := &batchHistogram{
+		desc:    desc,
+		buckets: buckets,
+		// Because buckets follows runtime/metrics conventions, there's
+		// 1 more value in the buckets list than there are buckets represented,
+		// because in runtime/metrics, the bucket values represent *boundaries*,
+		// and non-Inf boundaries are inclusive lower bounds for that bucket.
+		counts: make([]uint64, len(buckets)-1),
+		hasSum: hasSum,
+	}
+	h.init(h)
+	return h
+}
+
+// update updates the batchHistogram from a runtime/metrics histogram.
+//
+// sum must be provided if the batchHistogram was created to have an exact sum.
+// h.buckets must be a strict subset of his.Buckets.
+func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
+	counts, buckets := his.Counts, his.Buckets
+
+	h.mu.Lock()
+	defer h.mu.Unlock()
+
+	// Clear buckets.
+	for i := range h.counts {
+		h.counts[i] = 0
+	}
+	// Copy and reduce buckets.
+	var j int
+	for i, count := range counts {
+		h.counts[j] += count
+		if buckets[i+1] == h.buckets[j+1] {
+			j++
+		}
+	}
+	if h.hasSum {
+		h.sum = sum
+	}
+}
+
+func (h *batchHistogram) Desc() *Desc {
+	return h.desc
+}
+
+func (h *batchHistogram) Write(out *dto.Metric) error {
+	h.mu.Lock()
+	defer h.mu.Unlock()
+
+	sum := float64(0)
+	if h.hasSum {
+		sum = h.sum
+	}
+	dtoBuckets := make([]*dto.Bucket, 0, len(h.counts))
+	totalCount := uint64(0)
+	for i, count := range h.counts {
+		totalCount += count
+		if !h.hasSum {
+			if count != 0 {
+				// N.B. This computed sum is an underestimate.
+				sum += h.buckets[i] * float64(count)
+			}
+		}
+
+		// Skip the +Inf bucket, but only for the bucket list.
+		// It must still count for sum and totalCount.
+		if math.IsInf(h.buckets[i+1], 1) {
+			break
+		}
+		// Float64Histogram's upper bound is exclusive, so make it inclusive
+		// by obtaining the next float64 value down, in order.
+		upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i])
+		dtoBuckets = append(dtoBuckets, &dto.Bucket{
+			CumulativeCount: proto.Uint64(totalCount),
+			UpperBound:      proto.Float64(upperBound),
+		})
+	}
+	out.Histogram = &dto.Histogram{
+		Bucket:      dtoBuckets,
+		SampleCount: proto.Uint64(totalCount),
+		SampleSum:   proto.Float64(sum),
+	}
+	return nil
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
new file mode 100644
index 0000000..c453b75
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -0,0 +1,2056 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"runtime"
+	"sort"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	dto "github.com/prometheus/client_model/go"
+
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/types/known/timestamppb"
+)
+
+const (
+	nativeHistogramSchemaMaximum = 8
+	nativeHistogramSchemaMinimum = -4
+)
+
+// nativeHistogramBounds for the frac of observed values. Only relevant for
+// schema > 0. The position in the slice is the schema. (0 is never used, just
+// here for convenience of using the schema directly as the index.)
+//
+// TODO(beorn7): Currently, we do a binary search into these slices. There are
+// ways to turn it into a small number of simple array lookups. It probably only
+// matters for schema 5 and beyond, but should be investigated. See this comment
+// as a starting point:
+// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310
+var nativeHistogramBounds = [][]float64{
+	// Schema "0":
+	{0.5},
+	// Schema 1:
+	{0.5, 0.7071067811865475},
+	// Schema 2:
+	{0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144},
+	// Schema 3:
+	{
+		0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048,
+		0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711,
+	},
+	// Schema 4:
+	{
+		0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458,
+		0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463,
+		0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627,
+		0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735,
+	},
+	// Schema 5:
+	{
+		0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117,
+		0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887,
+		0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666,
+		0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159,
+		0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112,
+		0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823,
+		0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533,
+		0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999,
+	},
+	// Schema 6:
+	{
+		0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142,
+		0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598,
+		0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209,
+		0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406,
+		0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349,
+		0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891,
+		0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515,
+		0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555,
+		0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234,
+		0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269,
+		0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334,
+		0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681,
+		0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529,
+		0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991,
+		0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827,
+		0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752,
+	},
+	// Schema 7:
+	{
+		0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764,
+		0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894,
+		0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309,
+		0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545,
+		0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393,
+		0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595,
+		0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754,
+		0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704,
+		0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907,
+		0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665,
+		0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253,
+		0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329,
+		0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032,
+		0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728,
+		0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265,
+		0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076,
+		0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491,
+		0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908,
+		0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126,
+		0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777,
+		0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764,
+		0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465,
+		0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821,
+		0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981,
+		0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312,
+		0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842,
+		0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671,
+		0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263,
+		0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943,
+		0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368,
+		0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164,
+		0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328,
+	},
+	// Schema 8:
+	{
+		0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088,
+		0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869,
+		0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205,
+		0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158,
+		0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313,
+		0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321,
+		0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954,
+		0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847,
+		0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111,
+		0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088,
+		0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098,
+		0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026,
+		0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894,
+		0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493,
+		0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185,
+		0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968,
+		0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903,
+		0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005,
+		0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725,
+		0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082,
+		0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581,
+		0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031,
+		0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346,
+		0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447,
+		0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385,
+		0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788,
+		0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727,
+		0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171,
+		0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058,
+		0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119,
+		0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999,
+		0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352,
+		0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471,
+		0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126,
+		0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218,
+		0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837,
+		0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984,
+		0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031,
+		0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071,
+		0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282,
+		0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442,
+		0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707,
+		0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818,
+		0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853,
+		0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642,
+		0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003,
+		0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079,
+		0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391,
+		0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661,
+		0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629,
+		0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553,
+		0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389,
+		0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771,
+		0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002,
+		0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155,
+		0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483,
+		0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253,
+		0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191,
+		0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693,
+		0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947,
+		0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133,
+		0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889,
+		0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168,
+		0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698,
+	},
+}
+
+// The nativeHistogramBounds above can be generated with the code below.
+//
+// TODO(beorn7): It's tempting to actually use `go generate` to generate the
+// code above. However, this could lead to slightly different numbers on
+// different architectures. We still need to come to terms if we are fine with
+// that, or if we might prefer to specify precise numbers in the standard.
+//
+// var nativeHistogramBounds [][]float64 = make([][]float64, 9)
+//
+// func init() {
+// 	// Populate nativeHistogramBounds.
+// 	numBuckets := 1
+// 	for i := range nativeHistogramBounds {
+// 		bounds := []float64{0.5}
+// 		factor := math.Exp2(math.Exp2(float64(-i)))
+// 		for j := 0; j < numBuckets-1; j++ {
+// 			var bound float64
+// 			if (j+1)%2 == 0 {
+// 				// Use previously calculated value for increased precision.
+// 				bound = nativeHistogramBounds[i-1][j/2+1]
+// 			} else {
+// 				bound = bounds[j] * factor
+// 			}
+// 			bounds = append(bounds, bound)
+// 		}
+// 		numBuckets *= 2
+// 		nativeHistogramBounds[i] = bounds
+// 	}
+// }
+
+// A Histogram counts individual observations from an event or sample stream in
+// configurable static buckets (or in dynamic sparse buckets as part of the
+// experimental Native Histograms, see below for more details). Similar to a
+// Summary, it also provides a sum of observations and an observation count.
+//
+// On the Prometheus server, quantiles can be calculated from a Histogram using
+// the histogram_quantile PromQL function.
+//
+// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL
+// (see the documentation for detailed procedures). However, Histograms require
+// the user to pre-define suitable buckets, and they are in general less
+// accurate. (Both problems are addressed by the experimental Native
+// Histograms. To use them, configure a NativeHistogramBucketFactor in the
+// HistogramOpts. They also require a Prometheus server v2.40+ with the
+// corresponding feature flag enabled.)
+//
+// The Observe method of a Histogram has a very low performance overhead in
+// comparison with the Observe method of a Summary.
+//
+// To create Histogram instances, use NewHistogram.
+type Histogram interface {
+	Metric
+	Collector
+
+	// Observe adds a single observation to the histogram. Observations are
+	// usually positive or zero. Negative observations are accepted but
+	// prevent current versions of Prometheus from properly detecting
+	// counter resets in the sum of observations. (The experimental Native
+	// Histograms handle negative observations properly.) See
+	// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
+	// for details.
+	Observe(float64)
+}
+
+// bucketLabel is used for the label that defines the upper bound of a
+// bucket of a histogram ("le" -> "less or equal").
+const bucketLabel = "le"
+
+// DefBuckets are the default Histogram buckets. The default buckets are
+// tailored to broadly measure the response time (in seconds) of a network
+// service. Most likely, however, you will be required to define buckets
+// customized to your use case.
+var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+
+// DefNativeHistogramZeroThreshold is the default value for
+// NativeHistogramZeroThreshold in the HistogramOpts.
+//
+// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation),
+// which is a bucket boundary at all possible resolutions.
+const DefNativeHistogramZeroThreshold = 2.938735877055719e-39
+
+// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold
+// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero
+// bucket that only receives observations of precisely zero.
+const NativeHistogramZeroThresholdZero = -1
+
+var errBucketLabelNotAllowed = fmt.Errorf(
+	"%q is not allowed as label name in histograms", bucketLabel,
+)
+
+// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the
+// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not
+// counted and not included in the returned slice. The returned slice is meant
+// to be used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is zero or negative.
+func LinearBuckets(start, width float64, count int) []float64 {
+	if count < 1 {
+		panic("LinearBuckets needs a positive count")
+	}
+	buckets := make([]float64, count)
+	for i := range buckets {
+		buckets[i] = start
+		start += width
+	}
+	return buckets
+}
+
+// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket
+// has an upper bound of 'start' and each following bucket's upper bound is
+// 'factor' times the previous bucket's upper bound. The final +Inf bucket is
+// not counted and not included in the returned slice. The returned slice is
+// meant to be used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
+// or if 'factor' is less than or equal 1.
+func ExponentialBuckets(start, factor float64, count int) []float64 {
+	if count < 1 {
+		panic("ExponentialBuckets needs a positive count")
+	}
+	if start <= 0 {
+		panic("ExponentialBuckets needs a positive start value")
+	}
+	if factor <= 1 {
+		panic("ExponentialBuckets needs a factor greater than 1")
+	}
+	buckets := make([]float64, count)
+	for i := range buckets {
+		buckets[i] = start
+		start *= factor
+	}
+	return buckets
+}
+
+// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is
+// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
+func ExponentialBucketsRange(minBucket, maxBucket float64, count int) []float64 {
+	if count < 1 {
+		panic("ExponentialBucketsRange count needs a positive count")
+	}
+	if minBucket <= 0 {
+		panic("ExponentialBucketsRange min needs to be greater than 0")
+	}
+
+	// Formula for exponential buckets.
+	// max = min*growthFactor^(bucketCount-1)
+
+	// We know max/min and highest bucket. Solve for growthFactor.
+	growthFactor := math.Pow(maxBucket/minBucket, 1.0/float64(count-1))
+
+	// Now that we know growthFactor, solve for each bucket.
+	buckets := make([]float64, count)
+	for i := 1; i <= count; i++ {
+		buckets[i-1] = minBucket * math.Pow(growthFactor, float64(i-1))
+	}
+	return buckets
+}
+
+// HistogramOpts bundles the options for creating a Histogram metric. It is
+// mandatory to set Name to a non-empty string. All other fields are optional
+// and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
+type HistogramOpts struct {
+	// Namespace, Subsystem, and Name are components of the fully-qualified
+	// name of the Histogram (created by joining these components with
+	// "_"). Only Name is mandatory, the others merely help structuring the
+	// name. Note that the fully-qualified name of the Histogram must be a
+	// valid Prometheus metric name.
+	Namespace string
+	Subsystem string
+	Name      string
+
+	// Help provides information about this Histogram.
+	//
+	// Metrics with the same fully-qualified name must have the same Help
+	// string.
+	Help string
+
+	// ConstLabels are used to attach fixed labels to this metric. Metrics
+	// with the same fully-qualified name must have the same label names in
+	// their ConstLabels.
+	//
+	// ConstLabels are only used rarely. In particular, do not use them to
+	// attach the same labels to all your metrics. Those use cases are
+	// better covered by target labels set by the scraping Prometheus
+	// server, or by one specific metric (e.g. a build_info or a
+	// machine_role metric). See also
+	// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
+	ConstLabels Labels
+
+	// Buckets defines the buckets into which observations are counted. Each
+	// element in the slice is the upper inclusive bound of a bucket. The
+	// values must be sorted in strictly increasing order. There is no need
+	// to add a highest bucket with +Inf bound, it will be added
+	// implicitly. If Buckets is left as nil or set to a slice of length
+	// zero, it is replaced by default buckets. The default buckets are
+	// DefBuckets if no buckets for a native histogram (see below) are used,
+	// otherwise the default is no buckets. (In other words, if you want to
+	// use both regular buckets and buckets for a native histogram, you have
+	// to define the regular buckets here explicitly.)
+	Buckets []float64
+
+	// If NativeHistogramBucketFactor is greater than one, so-called sparse
+	// buckets are used (in addition to the regular buckets, if defined
+	// above). A Histogram with sparse buckets will be ingested as a Native
+	// Histogram by a Prometheus server with that feature enabled (requires
+	// Prometheus v2.40+). Sparse buckets are exponential buckets covering
+	// the whole float64 range (with the exception of the “zero” bucket, see
+	// NativeHistogramZeroThreshold below). From any one bucket to the next,
+	// the width of the bucket grows by a constant
+	// factor. NativeHistogramBucketFactor provides an upper bound for this
+	// factor (exception see below). The smaller
+	// NativeHistogramBucketFactor, the more buckets will be used and thus
+	// the more costly the histogram will become. A generally good trade-off
+	// between cost and accuracy is a value of 1.1 (each bucket is at most
+	// 10% wider than the previous one), which will result in each power of
+	// two divided into 8 buckets (e.g. there will be 8 buckets between 1
+	// and 2, same as between 2 and 4, and 4 and 8, etc.).
+	//
+	// Details about the actually used factor: The factor is calculated as
+	// 2^(2^-n), where n is an integer number between (and including) -4 and
+	// 8. n is chosen so that the resulting factor is the largest that is
+	// still smaller or equal to NativeHistogramBucketFactor. Note that the
+	// smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8)
+	// ). If NativeHistogramBucketFactor is greater than 1 but smaller than
+	// 2^(2^-8), then the actually used factor is still 2^(2^-8) even though
+	// it is larger than the provided NativeHistogramBucketFactor.
+	//
+	// NOTE: Native Histograms are still an experimental feature. Their
+	// behavior might still change without a major version
+	// bump. Subsequently, all NativeHistogram... options here might still
+	// change their behavior or name (or might completely disappear) without
+	// a major version bump.
+	NativeHistogramBucketFactor float64
+	// All observations with an absolute value of less or equal
+	// NativeHistogramZeroThreshold are accumulated into a “zero” bucket.
+	// For best results, this should be close to a bucket boundary. This is
+	// usually the case if picking a power of two. If
+	// NativeHistogramZeroThreshold is left at zero,
+	// DefNativeHistogramZeroThreshold is used as the threshold. To
+	// configure a zero bucket with an actual threshold of zero (i.e. only
+	// observations of precisely zero will go into the zero bucket), set
+	// NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
+	// constant (or any negative float value).
+	NativeHistogramZeroThreshold float64
+
+	// The next three fields define a strategy to limit the number of
+	// populated sparse buckets. If NativeHistogramMaxBucketNumber is left
+	// at zero, the number of buckets is not limited. (Note that this might
+	// lead to unbounded memory consumption if the values observed by the
+	// Histogram are sufficiently wide-spread. In particular, this could be
+	// used as a DoS attack vector. Where the observed values depend on
+	// external inputs, it is highly recommended to set a
+	// NativeHistogramMaxBucketNumber.) Once the set
+	// NativeHistogramMaxBucketNumber is exceeded, the following strategy is
+	// enacted:
+	//  - First, if the last reset (or the creation) of the histogram is at
+	//    least NativeHistogramMinResetDuration ago, then the whole
+	//    histogram is reset to its initial state (including regular
+	//    buckets).
+	//  - If less time has passed, or if NativeHistogramMinResetDuration is
+	//    zero, no reset is performed. Instead, the zero threshold is
+	//    increased sufficiently to reduce the number of buckets to or below
+	//    NativeHistogramMaxBucketNumber, but not to more than
+	//    NativeHistogramMaxZeroThreshold. Thus, if
+	//    NativeHistogramMaxZeroThreshold is already at or below the current
+	//    zero threshold, nothing happens at this step.
+	//  - After that, if the number of buckets still exceeds
+	//    NativeHistogramMaxBucketNumber, the resolution of the histogram is
+	//    reduced by doubling the width of the sparse buckets (up to a
+	//    growth factor between one bucket to the next of 2^(2^4) = 65536,
+	//    see above).
+	//  - Any increased zero threshold or reduced resolution is reset back
+	//    to their original values once NativeHistogramMinResetDuration has
+	//    passed (since the last reset or the creation of the histogram).
+	NativeHistogramMaxBucketNumber  uint32
+	NativeHistogramMinResetDuration time.Duration
+	NativeHistogramMaxZeroThreshold float64
+
+	// NativeHistogramMaxExemplars limits the number of exemplars
+	// that are kept in memory for each native histogram. If you leave it at
+	// zero, a default value of 10 is used. If no exemplars should be kept specifically
+	// for native histograms, set it to a negative value. (Scrapers can
+	// still use the exemplars exposed for classic buckets, which are managed
+	// independently.)
+	NativeHistogramMaxExemplars int
+	// NativeHistogramExemplarTTL is only checked once
+	// NativeHistogramMaxExemplars is exceeded. In that case, the
+	// oldest exemplar is removed if it is older than NativeHistogramExemplarTTL.
+	// Otherwise, the older exemplar in the pair of exemplars that are closest
+	// together (on an exponential scale) is removed.
+	// If NativeHistogramExemplarTTL is left at its zero value, a default value of
+	// 5m is used. To always delete the oldest exemplar, set it to a negative value.
+	NativeHistogramExemplarTTL time.Duration
+
+	// now is for testing purposes, by default it's time.Now.
+	now func() time.Time
+
+	// afterFunc is for testing purposes, by default it's time.AfterFunc.
+	afterFunc func(time.Duration, func()) *time.Timer
+}
+
+// HistogramVecOpts bundles the options to create a HistogramVec metric.
+// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type HistogramVecOpts struct {
+	HistogramOpts
+
+	// VariableLabels are used to partition the metric vector by the given set
+	// of labels. Each label value will be constrained with the optional Constraint
+	// function, if provided.
+	VariableLabels ConstrainableLabels
+}
+
+// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
+// panics if the buckets in HistogramOpts are not in strictly increasing order.
+//
+// The returned implementation also implements ExemplarObserver. It is safe to
+// perform the corresponding type assertion. Exemplars are tracked separately
+// for each bucket.
+func NewHistogram(opts HistogramOpts) Histogram {
+	return newHistogram(
+		NewDesc(
+			BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+			opts.Help,
+			nil,
+			opts.ConstLabels,
+		),
+		opts,
+	)
+}
+
+func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
+	if len(desc.variableLabels.names) != len(labelValues) {
+		panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
+	}
+
+	for _, n := range desc.variableLabels.names {
+		if n == bucketLabel {
+			panic(errBucketLabelNotAllowed)
+		}
+	}
+	for _, lp := range desc.constLabelPairs {
+		if lp.GetName() == bucketLabel {
+			panic(errBucketLabelNotAllowed)
+		}
+	}
+
+	if opts.now == nil {
+		opts.now = time.Now
+	}
+	if opts.afterFunc == nil {
+		opts.afterFunc = time.AfterFunc
+	}
+
+	h := &histogram{
+		desc:                            desc,
+		upperBounds:                     opts.Buckets,
+		labelPairs:                      MakeLabelPairs(desc, labelValues),
+		nativeHistogramMaxBuckets:       opts.NativeHistogramMaxBucketNumber,
+		nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold,
+		nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
+		lastResetTime:                   opts.now(),
+		now:                             opts.now,
+		afterFunc:                       opts.afterFunc,
+	}
+	if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
+		h.upperBounds = DefBuckets
+	}
+	if opts.NativeHistogramBucketFactor <= 1 {
+		h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets.
+	} else {
+		switch {
+		case opts.NativeHistogramZeroThreshold > 0:
+			h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold
+		case opts.NativeHistogramZeroThreshold == 0:
+			h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold
+		} // Leave h.nativeHistogramZeroThreshold at 0 otherwise.
+		h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor)
+		h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars)
+	}
+	for i, upperBound := range h.upperBounds {
+		if i < len(h.upperBounds)-1 {
+			if upperBound >= h.upperBounds[i+1] {
+				panic(fmt.Errorf(
+					"histogram buckets must be in increasing order: %f >= %f",
+					upperBound, h.upperBounds[i+1],
+				))
+			}
+		} else {
+			if math.IsInf(upperBound, +1) {
+				// The +Inf bucket is implicit. Remove it here.
+				h.upperBounds = h.upperBounds[:i]
+			}
+		}
+	}
+	// Finally we know the final length of h.upperBounds and can make buckets
+	// for both counts as well as exemplars:
+	h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
+	atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+	atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema)
+	h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
+	atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+	atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema)
+	h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
+
+	h.init(h) // Init self-collection.
+	return h
+}
+
+type histogramCounts struct {
+	// Order in this struct matters for the alignment required by atomic
+	// operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+
+	// sumBits contains the bits of the float64 representing the sum of all
+	// observations.
+	sumBits uint64
+	count   uint64
+
+	// nativeHistogramZeroBucket counts all (positive and negative)
+	// observations in the zero bucket (with an absolute value less or equal
+	// the current threshold, see next field.
+	nativeHistogramZeroBucket uint64
+	// nativeHistogramZeroThresholdBits is the bit pattern of the current
+	// threshold for the zero bucket. It's initially equal to
+	// nativeHistogramZeroThreshold but may change according to the bucket
+	// count limitation strategy.
+	nativeHistogramZeroThresholdBits uint64
+	// nativeHistogramSchema may change over time according to the bucket
+	// count limitation strategy and therefore has to be saved here.
+	nativeHistogramSchema int32
+	// Number of (positive and negative) sparse buckets.
+	nativeHistogramBucketsNumber uint32
+
+	// Regular buckets.
+	buckets []uint64
+
+	// The sparse buckets for native histograms are implemented with a
+	// sync.Map for now. A dedicated data structure will likely be more
+	// efficient. There are separate maps for negative and positive
+	// observations. The map's value is an *int64, counting observations in
+	// that bucket. (Note that we don't use uint64 as an int64 won't
+	// overflow in practice, and working with signed numbers from the
+	// beginning simplifies the handling of deltas.) The map's key is the
+	// index of the bucket according to the used
+	// nativeHistogramSchema. Index 0 is for an upper bound of 1.
+	nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map
+}
+
+// observe manages the parts of observe that only affects
+// histogramCounts. doSparse is true if sparse buckets should be done,
+// too.
+func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
+	if bucket < len(hc.buckets) {
+		atomic.AddUint64(&hc.buckets[bucket], 1)
+	}
+	atomicAddFloat(&hc.sumBits, v)
+	if doSparse && !math.IsNaN(v) {
+		var (
+			key                  int
+			schema               = atomic.LoadInt32(&hc.nativeHistogramSchema)
+			zeroThreshold        = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits))
+			bucketCreated, isInf bool
+		)
+		if math.IsInf(v, 0) {
+			// Pretend v is MaxFloat64 but later increment key by one.
+			if math.IsInf(v, +1) {
+				v = math.MaxFloat64
+			} else {
+				v = -math.MaxFloat64
+			}
+			isInf = true
+		}
+		frac, exp := math.Frexp(math.Abs(v))
+		if schema > 0 {
+			bounds := nativeHistogramBounds[schema]
+			key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds)
+		} else {
+			key = exp
+			if frac == 0.5 {
+				key--
+			}
+			offset := (1 << -schema) - 1
+			key = (key + offset) >> -schema
+		}
+		if isInf {
+			key++
+		}
+		switch {
+		case v > zeroThreshold:
+			bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1)
+		case v < -zeroThreshold:
+			bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1)
+		default:
+			atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1)
+		}
+		if bucketCreated {
+			atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1)
+		}
+	}
+	// Increment count last as we take it as a signal that the observation
+	// is complete.
+	atomic.AddUint64(&hc.count, 1)
+}
+
+type histogram struct {
+	// countAndHotIdx enables lock-free writes with use of atomic updates.
+	// The most significant bit is the hot index [0 or 1] of the count field
+	// below. Observe calls update the hot one. All remaining bits count the
+	// number of Observe calls. Observe starts by incrementing this counter,
+	// and finish by incrementing the count field in the respective
+	// histogramCounts, as a marker for completion.
+	//
+	// Calls of the Write method (which are non-mutating reads from the
+	// perspective of the histogram) swap the hot–cold under the writeMtx
+	// lock. A cooldown is awaited (while locked) by comparing the number of
+	// observations with the initiation count. Once they match, then the
+	// last observation on the now cool one has completed. All cold fields must
+	// be merged into the new hot before releasing writeMtx.
+	//
+	// Fields with atomic access first! See alignment constraint:
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	countAndHotIdx uint64
+
+	selfCollector
+	desc *Desc
+
+	// Only used in the Write method and for sparse bucket management.
+	mtx sync.Mutex
+
+	// Two counts, one is "hot" for lock-free observations, the other is
+	// "cold" for writing out a dto.Metric. It has to be an array of
+	// pointers to guarantee 64bit alignment of the histogramCounts, see
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
+	counts [2]*histogramCounts
+
+	upperBounds                     []float64
+	labelPairs                      []*dto.LabelPair
+	exemplars                       []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
+	nativeHistogramSchema           int32          // The initial schema. Set to math.MinInt32 if no sparse buckets are used.
+	nativeHistogramZeroThreshold    float64        // The initial zero threshold.
+	nativeHistogramMaxZeroThreshold float64
+	nativeHistogramMaxBuckets       uint32
+	nativeHistogramMinResetDuration time.Duration
+	// lastResetTime is protected by mtx. It is also used as created timestamp.
+	lastResetTime time.Time
+	// resetScheduled is protected by mtx. It is true if a reset is
+	// scheduled for a later time (when nativeHistogramMinResetDuration has
+	// passed).
+	resetScheduled  bool
+	nativeExemplars nativeExemplars
+
+	// now is for testing purposes, by default it's time.Now.
+	now func() time.Time
+
+	// afterFunc is for testing purposes, by default it's time.AfterFunc.
+	afterFunc func(time.Duration, func()) *time.Timer
+}
+
+func (h *histogram) Desc() *Desc {
+	return h.desc
+}
+
+func (h *histogram) Observe(v float64) {
+	h.observe(v, h.findBucket(v))
+}
+
+// ObserveWithExemplar should not be called in a high-frequency setting
+// for a native histogram with configured exemplars. For this case,
+// the implementation isn't lock-free and might suffer from lock contention.
+func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
+	i := h.findBucket(v)
+	h.observe(v, i)
+	h.updateExemplar(v, i, e)
+}
+
+func (h *histogram) Write(out *dto.Metric) error {
+	// For simplicity, we protect this whole method by a mutex. It is not in
+	// the hot path, i.e. Observe is called much more often than Write. The
+	// complication of making Write lock-free isn't worth it, if possible at
+	// all.
+	h.mtx.Lock()
+	defer h.mtx.Unlock()
+
+	// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
+	// without touching the count bits. See the struct comments for a full
+	// description of the algorithm.
+	n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+	// count is contained unchanged in the lower 63 bits.
+	count := n & ((1 << 63) - 1)
+	// The most significant bit tells us which counts is hot. The complement
+	// is thus the cold one.
+	hotCounts := h.counts[n>>63]
+	coldCounts := h.counts[(^n)>>63]
+
+	waitForCooldown(count, coldCounts)
+
+	his := &dto.Histogram{
+		Bucket:           make([]*dto.Bucket, len(h.upperBounds)),
+		SampleCount:      proto.Uint64(count),
+		SampleSum:        proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+		CreatedTimestamp: timestamppb.New(h.lastResetTime),
+	}
+	out.Histogram = his
+	out.Label = h.labelPairs
+
+	var cumCount uint64
+	for i, upperBound := range h.upperBounds {
+		cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
+		his.Bucket[i] = &dto.Bucket{
+			CumulativeCount: proto.Uint64(cumCount),
+			UpperBound:      proto.Float64(upperBound),
+		}
+		if e := h.exemplars[i].Load(); e != nil {
+			his.Bucket[i].Exemplar = e.(*dto.Exemplar)
+		}
+	}
+	// If there is an exemplar for the +Inf bucket, we have to add that bucket explicitly.
+	if e := h.exemplars[len(h.upperBounds)].Load(); e != nil {
+		b := &dto.Bucket{
+			CumulativeCount: proto.Uint64(count),
+			UpperBound:      proto.Float64(math.Inf(1)),
+			Exemplar:        e.(*dto.Exemplar),
+		}
+		his.Bucket = append(his.Bucket, b)
+	}
+	if h.nativeHistogramSchema > math.MinInt32 {
+		his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits)))
+		his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema))
+		zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket)
+
+		defer func() {
+			coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber))
+			coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber))
+		}()
+
+		his.ZeroCount = proto.Uint64(zeroBucket)
+		his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative)
+		his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive)
+
+		// Add a no-op span to a histogram without observations and with
+		// a zero threshold of zero. Otherwise, a native histogram would
+		// look like a classic histogram to scrapers.
+		if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 {
+			his.PositiveSpan = []*dto.BucketSpan{{
+				Offset: proto.Int32(0),
+				Length: proto.Uint32(0),
+			}}
+		}
+
+		if h.nativeExemplars.isEnabled() {
+			h.nativeExemplars.Lock()
+			his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...)
+			h.nativeExemplars.Unlock()
+		}
+
+	}
+	addAndResetCounts(hotCounts, coldCounts)
+	return nil
+}
+
+// findBucket returns the index of the bucket for the provided value, or
+// len(h.upperBounds) for the +Inf bucket.
+func (h *histogram) findBucket(v float64) int {
+	n := len(h.upperBounds)
+	if n == 0 {
+		return 0
+	}
+
+	// Early exit: if v is less than or equal to the first upper bound, return 0
+	if v <= h.upperBounds[0] {
+		return 0
+	}
+
+	// Early exit: if v is greater than the last upper bound, return len(h.upperBounds)
+	if v > h.upperBounds[n-1] {
+		return n
+	}
+
+	// For small arrays, use simple linear search
+	// "magic number" 35 is result of tests on couple different (AWS and baremetal) servers
+	// see more details here: https://github.com/prometheus/client_golang/pull/1662
+	if n < 35 {
+		for i, bound := range h.upperBounds {
+			if v <= bound {
+				return i
+			}
+		}
+		// If v is greater than all upper bounds, return len(h.upperBounds)
+		return n
+	}
+
+	// For larger arrays, use stdlib's binary search
+	return sort.SearchFloat64s(h.upperBounds, v)
+}
+
+// observe is the implementation for Observe without the findBucket part.
+func (h *histogram) observe(v float64, bucket int) {
+	// Do not add to sparse buckets for NaN observations.
+	doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
+	// We increment h.countAndHotIdx so that the counter in the lower
+	// 63 bits gets incremented. At the same time, we get the new value
+	// back, which we can use to find the currently-hot counts.
+	n := atomic.AddUint64(&h.countAndHotIdx, 1)
+	hotCounts := h.counts[n>>63]
+	hotCounts.observe(v, bucket, doSparse)
+	if doSparse {
+		h.limitBuckets(hotCounts, v, bucket)
+	}
+}
+
+// limitBuckets applies a strategy to limit the number of populated sparse
+// buckets. It's generally best effort, and there are situations where the
+// number can go higher (if even the lowest resolution isn't enough to reduce
+// the number sufficiently, or if the provided counts aren't fully updated yet
+// by a concurrently happening Write call).
+func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) {
+	if h.nativeHistogramMaxBuckets == 0 {
+		return // No limit configured.
+	}
+	if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) {
+		return // Bucket limit not exceeded yet.
+	}
+
+	h.mtx.Lock()
+	defer h.mtx.Unlock()
+
+	// The hot counts might have been swapped just before we acquired the
+	// lock. Re-fetch the hot counts first...
+	n := atomic.LoadUint64(&h.countAndHotIdx)
+	hotIdx := n >> 63
+	coldIdx := (^n) >> 63
+	hotCounts := h.counts[hotIdx]
+	coldCounts := h.counts[coldIdx]
+	// ...and then check again if we really have to reduce the bucket count.
+	if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) {
+		return // Bucket limit not exceeded after all.
+	}
+	// Try the various strategies in order.
+	if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) {
+		return
+	}
+	// One of the other strategies will happen. To undo what they will do as
+	// soon as enough time has passed to satisfy
+	// h.nativeHistogramMinResetDuration, schedule a reset at the right time
+	// if we haven't done so already.
+	if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled {
+		h.resetScheduled = true
+		h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset)
+	}
+
+	if h.maybeWidenZeroBucket(hotCounts, coldCounts) {
+		return
+	}
+	h.doubleBucketWidth(hotCounts, coldCounts)
+}
+
+// maybeReset resets the whole histogram if at least
+// h.nativeHistogramMinResetDuration has been passed. It returns true if the
+// histogram has been reset. The caller must have locked h.mtx.
+func (h *histogram) maybeReset(
+	hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int,
+) bool {
+	// We are using the possibly mocked h.now() rather than
+	// time.Since(h.lastResetTime) to enable testing.
+	if h.nativeHistogramMinResetDuration == 0 || // No reset configured.
+		h.resetScheduled || // Do not interefere if a reset is already scheduled.
+		h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
+		return false
+	}
+	// Completely reset coldCounts.
+	h.resetCounts(cold)
+	// Repeat the latest observation to not lose it completely.
+	cold.observe(value, bucket, true)
+	// Make coldCounts the new hot counts while resetting countAndHotIdx.
+	n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1)
+	count := n & ((1 << 63) - 1)
+	waitForCooldown(count, hot)
+	// Finally, reset the formerly hot counts, too.
+	h.resetCounts(hot)
+	h.lastResetTime = h.now()
+	return true
+}
+
+// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be
+// called without having locked h.mtx.
+func (h *histogram) reset() {
+	h.mtx.Lock()
+	defer h.mtx.Unlock()
+
+	n := atomic.LoadUint64(&h.countAndHotIdx)
+	hotIdx := n >> 63
+	coldIdx := (^n) >> 63
+	hot := h.counts[hotIdx]
+	cold := h.counts[coldIdx]
+	// Completely reset coldCounts.
+	h.resetCounts(cold)
+	// Make coldCounts the new hot counts while resetting countAndHotIdx.
+	n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63)
+	count := n & ((1 << 63) - 1)
+	waitForCooldown(count, hot)
+	// Finally, reset the formerly hot counts, too.
+	h.resetCounts(hot)
+	h.lastResetTime = h.now()
+	h.resetScheduled = false
+}
+
+// maybeWidenZeroBucket widens the zero bucket until it includes the existing
+// buckets closest to the zero bucket (which could be two, if an equidistant
+// negative and a positive bucket exists, but usually it's only one bucket to be
+// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold
+// limits how far the zero bucket can be extended, and if that's not enough to
+// include an existing bucket, the method returns false. The caller must have
+// locked h.mtx.
+func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool {
+	currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits))
+	if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold {
+		return false
+	}
+	// Find the key of the bucket closest to zero.
+	smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive)
+	smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative)
+	if smallestNegativeKey < smallestKey {
+		smallestKey = smallestNegativeKey
+	}
+	if smallestKey == math.MaxInt32 {
+		return false
+	}
+	newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema))
+	if newZeroThreshold > h.nativeHistogramMaxZeroThreshold {
+		return false // New threshold would exceed the max threshold.
+	}
+	atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
+	// Remove applicable buckets.
+	if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded {
+		atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+	}
+	if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded {
+		atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+	}
+	// Make cold counts the new hot counts.
+	n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+	count := n & ((1 << 63) - 1)
+	// Swap the pointer names to represent the new roles and make
+	// the rest less confusing.
+	hot, cold = cold, hot
+	waitForCooldown(count, cold)
+	// Add all the now cold counts to the new hot counts...
+	addAndResetCounts(hot, cold)
+	// ...adjust the new zero threshold in the cold counts, too...
+	atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
+	// ...and then merge the newly deleted buckets into the wider zero
+	// bucket.
+	mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool {
+		return func(k, v interface{}) bool {
+			key := k.(int)
+			bucket := v.(*int64)
+			if key == smallestKey {
+				// Merge into hot zero bucket...
+				atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket)))
+				// ...and delete from cold counts.
+				coldBuckets.Delete(key)
+				atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+			} else {
+				// Add to corresponding hot bucket...
+				if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
+					atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
+				}
+				// ...and reset cold bucket.
+				atomic.StoreInt64(bucket, 0)
+			}
+			return true
+		}
+	}
+
+	cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive))
+	cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative))
+	return true
+}
+
+// doubleBucketWidth doubles the bucket width (by decrementing the schema
+// number). Note that very sparse buckets could lead to a low reduction of the
+// bucket count (or even no reduction at all). The method does nothing if the
+// schema is already -4.
+func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) {
+	coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema)
+	if coldSchema == -4 {
+		return // Already at lowest resolution.
+	}
+	coldSchema--
+	atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
+	// Play it simple and just delete all cold buckets.
+	atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
+	deleteSyncMap(&cold.nativeHistogramBucketsNegative)
+	deleteSyncMap(&cold.nativeHistogramBucketsPositive)
+	// Make coldCounts the new hot counts.
+	n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+	count := n & ((1 << 63) - 1)
+	// Swap the pointer names to represent the new roles and make
+	// the rest less confusing.
+	hot, cold = cold, hot
+	waitForCooldown(count, cold)
+	// Add all the now cold counts to the new hot counts...
+	addAndResetCounts(hot, cold)
+	// ...adjust the schema in the cold counts, too...
+	atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
+	// ...and then merge the cold buckets into the wider hot buckets.
+	merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool {
+		return func(k, v interface{}) bool {
+			key := k.(int)
+			bucket := v.(*int64)
+			// Adjust key to match the bucket to merge into.
+			if key > 0 {
+				key++
+			}
+			key /= 2
+			// Add to corresponding hot bucket.
+			if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
+				atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
+			}
+			return true
+		}
+	}
+
+	cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive))
+	cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative))
+	// Play it simple again and just delete all cold buckets.
+	atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
+	deleteSyncMap(&cold.nativeHistogramBucketsNegative)
+	deleteSyncMap(&cold.nativeHistogramBucketsPositive)
+}
+
+func (h *histogram) resetCounts(counts *histogramCounts) {
+	atomic.StoreUint64(&counts.sumBits, 0)
+	atomic.StoreUint64(&counts.count, 0)
+	atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0)
+	atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+	atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema)
+	atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0)
+	for i := range h.upperBounds {
+		atomic.StoreUint64(&counts.buckets[i], 0)
+	}
+	deleteSyncMap(&counts.nativeHistogramBucketsNegative)
+	deleteSyncMap(&counts.nativeHistogramBucketsPositive)
+}
+
+// updateExemplar replaces the exemplar for the provided classic bucket.
+// With empty labels, it's a no-op. It panics if any of the labels is invalid.
+// If histogram is native, the exemplar will be cached into nativeExemplars,
+// which has a limit, and will remove one exemplar when limit is reached.
+func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
+	if l == nil {
+		return
+	}
+	e, err := newExemplar(v, h.now(), l)
+	if err != nil {
+		panic(err)
+	}
+	h.exemplars[bucket].Store(e)
+	doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
+	if doSparse {
+		h.nativeExemplars.addExemplar(e)
+	}
+}
+
+// HistogramVec is a Collector that bundles a set of Histograms that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewHistogramVec.
+type HistogramVec struct {
+	*MetricVec
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
+// partitioned by the given label names.
+func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
+	return V2.NewHistogramVec(HistogramVecOpts{
+		HistogramOpts:  opts,
+		VariableLabels: UnconstrainedLabels(labelNames),
+	})
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts.
+func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec {
+	desc := V2.NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		opts.VariableLabels,
+		opts.ConstLabels,
+	)
+	return &HistogramVec{
+		MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
+			return newHistogram(desc, opts.HistogramOpts, lvs...)
+		}),
+	}
+}
+
+// GetMetricWithLabelValues returns the Histogram for the given slice of label
+// values (same order as the variable labels in Desc). If that combination of
+// label values is accessed for the first time, a new Histogram is created.
+//
+// It is possible to call this method without using the returned Histogram to only
+// create the new Histogram but leave it at its starting value, a Histogram without
+// any observations.
+//
+// Keeping the Histogram for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Histogram from the HistogramVec. In that case, the
+// Histogram will still exist, but it will not be exported anymore, even if a
+// Histogram with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of variable labels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+	metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
+	if metric != nil {
+		return metric.(Observer), err
+	}
+	return nil, err
+}
+
+// GetMetricWith returns the Histogram for the given Labels map (the label names
+// must match those of the variable labels in Desc). If that label map is
+// accessed for the first time, a new Histogram is created. Implications of
+// creating a Histogram without using it and keeping the Histogram for later use
+// are the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the variable labels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
+	metric, err := v.MetricVec.GetMetricWith(labels)
+	if metric != nil {
+		return metric.(Observer), err
+	}
+	return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+//
+//	myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
+	h, err := v.GetMetricWithLabelValues(lvs...)
+	if err != nil {
+		panic(err)
+	}
+	return h
+}
+
+// With works as GetMetricWith but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+//
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (v *HistogramVec) With(labels Labels) Observer {
+	h, err := v.GetMetricWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return h
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the HistogramVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) {
+	vec, err := v.MetricVec.CurryWith(labels)
+	if vec != nil {
+		return &HistogramVec{vec}, err
+	}
+	return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec {
+	vec, err := v.CurryWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return vec
+}
+
+type constHistogram struct {
+	desc       *Desc
+	count      uint64
+	sum        float64
+	buckets    map[float64]uint64
+	labelPairs []*dto.LabelPair
+	createdTs  *timestamppb.Timestamp
+}
+
+func (h *constHistogram) Desc() *Desc {
+	return h.desc
+}
+
+func (h *constHistogram) Write(out *dto.Metric) error {
+	his := &dto.Histogram{
+		CreatedTimestamp: h.createdTs,
+	}
+
+	buckets := make([]*dto.Bucket, 0, len(h.buckets))
+
+	his.SampleCount = proto.Uint64(h.count)
+	his.SampleSum = proto.Float64(h.sum)
+	for upperBound, count := range h.buckets {
+		buckets = append(buckets, &dto.Bucket{
+			CumulativeCount: proto.Uint64(count),
+			UpperBound:      proto.Float64(upperBound),
+		})
+	}
+
+	if len(buckets) > 0 {
+		sort.Sort(buckSort(buckets))
+	}
+	his.Bucket = buckets
+
+	out.Histogram = his
+	out.Label = h.labelPairs
+
+	return nil
+}
+
+// NewConstHistogram returns a metric representing a Prometheus histogram with
+// fixed values for the count, sum, and bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
+// bucket. The +Inf bucket is implicit, and its value is equal to the provided count.
+//
+// NewConstHistogram returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc or if Desc is invalid.
+func NewConstHistogram(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	buckets map[float64]uint64,
+	labelValues ...string,
+) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+		return nil, err
+	}
+	return &constHistogram{
+		desc:       desc,
+		count:      count,
+		sum:        sum,
+		buckets:    buckets,
+		labelPairs: MakeLabelPairs(desc, labelValues),
+	}, nil
+}
+
+// MustNewConstHistogram is a version of NewConstHistogram that panics where
+// NewConstHistogram would have returned an error.
+func MustNewConstHistogram(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	buckets map[float64]uint64,
+	labelValues ...string,
+) Metric {
+	m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
+
+// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp.
+func NewConstHistogramWithCreatedTimestamp(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	buckets map[float64]uint64,
+	ct time.Time,
+	labelValues ...string,
+) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+		return nil, err
+	}
+	return &constHistogram{
+		desc:       desc,
+		count:      count,
+		sum:        sum,
+		buckets:    buckets,
+		labelPairs: MakeLabelPairs(desc, labelValues),
+		createdTs:  timestamppb.New(ct),
+	}, nil
+}
+
+// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where
+// NewConstHistogramWithCreatedTimestamp would have returned an error.
+func MustNewConstHistogramWithCreatedTimestamp(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	buckets map[float64]uint64,
+	ct time.Time,
+	labelValues ...string,
+) Metric {
+	m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
+
+type buckSort []*dto.Bucket
+
+func (s buckSort) Len() int {
+	return len(s)
+}
+
+func (s buckSort) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s buckSort) Less(i, j int) bool {
+	return s[i].GetUpperBound() < s[j].GetUpperBound()
+}
+
+// pickSchema returns the largest number n between -4 and 8 such that
+// 2^(2^-n) is less or equal the provided bucketFactor.
+//
+// Special cases:
+//   - bucketFactor <= 1: panics.
+//   - bucketFactor < 2^(2^-8) (but > 1): still returns 8.
+func pickSchema(bucketFactor float64) int32 {
+	if bucketFactor <= 1 {
+		panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor))
+	}
+	floor := math.Floor(math.Log2(math.Log2(bucketFactor)))
+	switch {
+	case floor <= -8:
+		return nativeHistogramSchemaMaximum
+	case floor >= 4:
+		return nativeHistogramSchemaMinimum
+	default:
+		return -int32(floor)
+	}
+}
+
+func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) {
+	var ii []int
+	buckets.Range(func(k, v interface{}) bool {
+		ii = append(ii, k.(int))
+		return true
+	})
+	sort.Ints(ii)
+
+	if len(ii) == 0 {
+		return nil, nil
+	}
+
+	var (
+		spans     []*dto.BucketSpan
+		deltas    []int64
+		prevCount int64
+		nextI     int
+	)
+
+	appendDelta := func(count int64) {
+		*spans[len(spans)-1].Length++
+		deltas = append(deltas, count-prevCount)
+		prevCount = count
+	}
+
+	for n, i := range ii {
+		v, _ := buckets.Load(i)
+		count := atomic.LoadInt64(v.(*int64))
+		// Multiple spans with only small gaps in between are probably
+		// encoded more efficiently as one larger span with a few empty
+		// buckets. Needs some research to find the sweet spot. For now,
+		// we assume that gaps of one or two buckets should not create
+		// a new span.
+		iDelta := int32(i - nextI)
+		if n == 0 || iDelta > 2 {
+			// We have to create a new span, either because we are
+			// at the very beginning, or because we have found a gap
+			// of more than two buckets.
+			spans = append(spans, &dto.BucketSpan{
+				Offset: proto.Int32(iDelta),
+				Length: proto.Uint32(0),
+			})
+		} else {
+			// We have found a small gap (or no gap at all).
+			// Insert empty buckets as needed.
+			for j := int32(0); j < iDelta; j++ {
+				appendDelta(0)
+			}
+		}
+		appendDelta(count)
+		nextI = i + 1
+	}
+	return spans, deltas
+}
+
+// addToBucket increments the sparse bucket at key by the provided amount. It
+// returns true if a new sparse bucket had to be created for that.
+func addToBucket(buckets *sync.Map, key int, increment int64) bool {
+	if existingBucket, ok := buckets.Load(key); ok {
+		// Fast path without allocation.
+		atomic.AddInt64(existingBucket.(*int64), increment)
+		return false
+	}
+	// Bucket doesn't exist yet. Slow path allocating new counter.
+	newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape.
+	if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded {
+		// The bucket was created concurrently in another goroutine.
+		// Have to increment after all.
+		atomic.AddInt64(actualBucket.(*int64), increment)
+		return false
+	}
+	return true
+}
+
+// addAndReset returns a function to be used with sync.Map.Range of spare
+// buckets in coldCounts. It increments the buckets in the provided hotBuckets
+// according to the buckets ranged through. It then resets all buckets ranged
+// through to 0 (but leaves them in place so that they don't need to get
+// recreated on the next scrape).
+func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool {
+	return func(k, v interface{}) bool {
+		bucket := v.(*int64)
+		if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) {
+			atomic.AddUint32(bucketNumber, 1)
+		}
+		atomic.StoreInt64(bucket, 0)
+		return true
+	}
+}
+
+func deleteSyncMap(m *sync.Map) {
+	m.Range(func(k, v interface{}) bool {
+		m.Delete(k)
+		return true
+	})
+}
+
+func findSmallestKey(m *sync.Map) int {
+	result := math.MaxInt32
+	m.Range(func(k, v interface{}) bool {
+		key := k.(int)
+		if key < result {
+			result = key
+		}
+		return true
+	})
+	return result
+}
+
+func getLe(key int, schema int32) float64 {
+	// Here a bit of context about the behavior for the last bucket counting
+	// regular numbers (called simply "last bucket" below) and the bucket
+	// counting observations of ±Inf (called "inf bucket" below, with a key
+	// one higher than that of the "last bucket"):
+	//
+	// If we apply the usual formula to the last bucket, its upper bound
+	// would be calculated as +Inf. The reason is that the max possible
+	// regular float64 number (math.MaxFloat64) doesn't coincide with one of
+	// the calculated bucket boundaries. So the calculated boundary has to
+	// be larger than math.MaxFloat64, and the only float64 larger than
+	// math.MaxFloat64 is +Inf. However, we want to count actual
+	// observations of ±Inf in the inf bucket. Therefore, we have to treat
+	// the upper bound of the last bucket specially and set it to
+	// math.MaxFloat64. (The upper bound of the inf bucket, with its key
+	// being one higher than that of the last bucket, naturally comes out as
+	// +Inf by the usual formula. So that's fine.)
+	//
+	// math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of
+	// 1024. If there were a float64 number following math.MaxFloat64, it
+	// would have a frac of 1.0 and an exp of 1024, or equivalently a frac
+	// of 0.5 and an exp of 1025. However, since frac must be smaller than
+	// 1, and exp must be smaller than 1025, either representation overflows
+	// a float64. (Which, in turn, is the reason that math.MaxFloat64 is the
+	// largest possible float64. Q.E.D.) However, the formula for
+	// calculating the upper bound from the idx and schema of the last
+	// bucket results in precisely that. It is either frac=1.0 & exp=1024
+	// (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is,
+	// by the way, a power of two where the exponent itself is a power of
+	// two, 2¹⁰ in fact, which coinicides with a bucket boundary in all
+	// schemas.) So these are the special cases we have to catch below.
+	if schema < 0 {
+		exp := key << -schema
+		if exp == 1024 {
+			// This is the last bucket before the overflow bucket
+			// (for ±Inf observations). Return math.MaxFloat64 as
+			// explained above.
+			return math.MaxFloat64
+		}
+		return math.Ldexp(1, exp)
+	}
+
+	fracIdx := key & ((1 << schema) - 1)
+	frac := nativeHistogramBounds[schema][fracIdx]
+	exp := (key >> schema) + 1
+	if frac == 0.5 && exp == 1025 {
+		// This is the last bucket before the overflow bucket (for ±Inf
+		// observations). Return math.MaxFloat64 as explained above.
+		return math.MaxFloat64
+	}
+	return math.Ldexp(frac, exp)
+}
+
+// waitForCooldown returns after the count field in the provided histogramCounts
+// has reached the provided count value.
+func waitForCooldown(count uint64, counts *histogramCounts) {
+	for count != atomic.LoadUint64(&counts.count) {
+		runtime.Gosched() // Let observations get work done.
+	}
+}
+
+// atomicAddFloat adds the provided float atomically to another float
+// represented by the bit pattern the bits pointer is pointing to.
+func atomicAddFloat(bits *uint64, v float64) {
+	for {
+		loadedBits := atomic.LoadUint64(bits)
+		newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
+		if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
+			break
+		}
+	}
+}
+
+// atomicDecUint32 atomically decrements the uint32 p points to.  See
+// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done.
+func atomicDecUint32(p *uint32) {
+	atomic.AddUint32(p, ^uint32(0))
+}
+
+// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero
+// bucket) from the cold counts to the corresponding fields in the hot
+// counts. Those fields are then reset to 0 in the cold counts.
+func addAndResetCounts(hot, cold *histogramCounts) {
+	atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count))
+	atomic.StoreUint64(&cold.count, 0)
+	coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits))
+	atomicAddFloat(&hot.sumBits, coldSum)
+	atomic.StoreUint64(&cold.sumBits, 0)
+	for i := range hot.buckets {
+		atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i]))
+		atomic.StoreUint64(&cold.buckets[i], 0)
+	}
+	atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket))
+	atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0)
+}
+
+type nativeExemplars struct {
+	sync.Mutex
+
+	// Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0.
+	// The ttl is used on insertion to remove an exemplar that is older than ttl, if present.
+	ttl time.Duration
+
+	exemplars []*dto.Exemplar
+}
+
+func (n *nativeExemplars) isEnabled() bool {
+	return n.ttl != -1
+}
+
+func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars {
+	if ttl == 0 {
+		ttl = 5 * time.Minute
+	}
+
+	if maxCount == 0 {
+		maxCount = 10
+	}
+
+	if maxCount < 0 {
+		maxCount = 0
+		ttl = -1
+	}
+
+	return nativeExemplars{
+		ttl:       ttl,
+		exemplars: make([]*dto.Exemplar, 0, maxCount),
+	}
+}
+
+func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
+	if !n.isEnabled() {
+		return
+	}
+
+	n.Lock()
+	defer n.Unlock()
+
+	// When the number of exemplars has not yet exceeded or
+	// is equal to cap(n.exemplars), then
+	// insert the new exemplar directly.
+	if len(n.exemplars) < cap(n.exemplars) {
+		var nIdx int
+		for nIdx = 0; nIdx < len(n.exemplars); nIdx++ {
+			if *e.Value < *n.exemplars[nIdx].Value {
+				break
+			}
+		}
+		n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)
+		return
+	}
+
+	if len(n.exemplars) == 1 {
+		// When the number of exemplars is 1, then
+		// replace the existing exemplar with the new exemplar.
+		n.exemplars[0] = e
+		return
+	}
+	// From this point on, the number of exemplars is greater than 1.
+
+	// When the number of exemplars exceeds the limit, remove one exemplar.
+	var (
+		ot    = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop.
+		otIdx = -1          // Index of the exemplar with the oldest timestamp.
+
+		md = -1.0 // Logarithm of the delta of the closest pair of exemplars.
+
+		// The insertion point of the new exemplar in the exemplars slice after insertion.
+		// This is calculated purely based on the order of the exemplars by value.
+		// nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end.
+		nIdx = -1
+
+		// rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar.
+		// The aim is to keep a good spread of exemplars by value and not let them bunch up too much.
+		// It is calculated in 3 steps:
+		//   1. First we set rIdx to the index of the older exemplar within the closest pair by value.
+		//      That is the following will be true (on log scale):
+		//      either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have
+		//      the closest values to each other from all pairs.
+		//      For example, suppose the values are distributed like this:
+		//        |-----------x-------------x----------------x----x-----|
+		//                                                   ^--rIdx as this is older.
+		//      Or like this:
+		//        |-----------x-------------x----------------x----x-----|
+		//                                                        ^--rIdx as this is older.
+		//   2. If there is an exemplar that expired, then we simple reset rIdx to that index.
+		//   3. We check if by inserting the new exemplar we would create a closer pair at
+		//      (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to
+		//      keep the spread of exemplars by value; otherwise we keep rIdx as it is.
+		rIdx = -1
+		cLog float64 // Logarithm of the current exemplar.
+		pLog float64 // Logarithm of the previous exemplar.
+	)
+
+	for i, exemplar := range n.exemplars {
+		// Find the exemplar with the oldest timestamp.
+		if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) {
+			ot = exemplar.Timestamp.AsTime()
+			otIdx = i
+		}
+
+		// Find the index at which to insert new the exemplar.
+		if nIdx == -1 && *e.Value <= *exemplar.Value {
+			nIdx = i
+		}
+
+		// Find the two closest exemplars and pick the one the with older timestamp.
+		pLog = cLog
+		cLog = math.Log(exemplar.GetValue())
+		if i == 0 {
+			continue
+		}
+		diff := math.Abs(cLog - pLog)
+		if md == -1 || diff < md {
+			// The closest exemplar pair is at index: i-1, i.
+			// Choose the exemplar with the older timestamp for replacement.
+			md = diff
+			if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) {
+				rIdx = i
+			} else {
+				rIdx = i - 1
+			}
+		}
+
+	}
+
+	// If all existing exemplar are smaller than new exemplar,
+	// then the exemplar should be inserted at the end.
+	if nIdx == -1 {
+		nIdx = len(n.exemplars)
+	}
+	// Here, we have the following relationships:
+	// n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0)
+	// e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars))
+
+	if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl {
+		// If the oldest exemplar has expired, then replace it with the new exemplar.
+		rIdx = otIdx
+	} else {
+		// In the previous for loop, when calculating the closest pair of exemplars,
+		// we did not take into account the newly inserted exemplar.
+		// So we need to calculate with the newly inserted exemplar again.
+		elog := math.Log(e.GetValue())
+		if nIdx > 0 {
+			diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue()))
+			if diff < md {
+				// The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx.
+				//                                            v--rIdx
+				// |-----------x-n-----------x----------------x----x-----|
+				//     nIdx-1--^ ^--new exemplar value
+				// Do not make the spread worse, replace nIdx-1 and not rIdx.
+				md = diff
+				rIdx = nIdx - 1
+			}
+		}
+		if nIdx < len(n.exemplars) {
+			diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog)
+			if diff < md {
+				// The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx.
+				//                                            v--rIdx
+				// |-----------x-----------n-x----------------x----x-----|
+				//     new exemplar value--^ ^--nIdx
+				// Do not make the spread worse, replace nIdx-1 and not rIdx.
+				rIdx = nIdx
+			}
+		}
+	}
+
+	// Adjust the slice according to rIdx and nIdx.
+	switch {
+	case rIdx == nIdx:
+		n.exemplars[nIdx] = e
+	case rIdx < nIdx:
+		n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...)
+	case rIdx > nIdx:
+		n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...)
+	}
+}
+
+type constNativeHistogram struct {
+	desc *Desc
+	dto.Histogram
+	labelPairs []*dto.LabelPair
+}
+
+func validateCount(sum float64, count uint64, negativeBuckets, positiveBuckets map[int]int64, zeroBucket uint64) error {
+	var bucketPopulationSum int64
+	for _, v := range positiveBuckets {
+		bucketPopulationSum += v
+	}
+	for _, v := range negativeBuckets {
+		bucketPopulationSum += v
+	}
+	bucketPopulationSum += int64(zeroBucket)
+
+	// If the sum of observations is NaN, the number of observations must be greater or equal to the sum of all bucket counts.
+	// Otherwise, the number of observations must be equal to the sum of all bucket counts .
+
+	if math.IsNaN(sum) && bucketPopulationSum > int64(count) ||
+		!math.IsNaN(sum) && bucketPopulationSum != int64(count) {
+		return errors.New("the sum of all bucket populations exceeds the count of observations")
+	}
+	return nil
+}
+
+// NewConstNativeHistogram returns a metric representing a Prometheus native histogram with
+// fixed values for the count, sum, and positive/negative/zero bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// OpenTelemetry Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// zeroBucket counts all (positive and negative)
+// observations in the zero bucket (with an absolute value less or equal
+// the current threshold).
+// positiveBuckets and negativeBuckets are separate maps for negative and positive
+// observations. The map's value is an int64, counting observations in
+// that bucket. The map's key is the
+// index of the bucket according to the used
+// Schema. Index 0 is for an upper bound of 1 in positive buckets and for a lower bound of -1 in negative buckets.
+// NewConstNativeHistogram returns an error if
+//   - the length of labelValues is not consistent with the variable labels in Desc or if Desc is invalid.
+//   - the schema passed is not between 8 and -4
+//   - the sum of counts in all buckets including the zero bucket does not equal the count if sum is not NaN (or exceeds the count if sum is NaN)
+//
+// See https://opentelemetry.io/docs/specs/otel/compatibility/prometheus_and_openmetrics/#exponential-histograms for more details about the conversion from OTel to Prometheus.
+func NewConstNativeHistogram(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	positiveBuckets, negativeBuckets map[int]int64,
+	zeroBucket uint64,
+	schema int32,
+	zeroThreshold float64,
+	createdTimestamp time.Time,
+	labelValues ...string,
+) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+		return nil, err
+	}
+	if schema > nativeHistogramSchemaMaximum || schema < nativeHistogramSchemaMinimum {
+		return nil, errors.New("invalid native histogram schema")
+	}
+	if err := validateCount(sum, count, negativeBuckets, positiveBuckets, zeroBucket); err != nil {
+		return nil, err
+	}
+
+	NegativeSpan, NegativeDelta := makeBucketsFromMap(negativeBuckets)
+	PositiveSpan, PositiveDelta := makeBucketsFromMap(positiveBuckets)
+	ret := &constNativeHistogram{
+		desc: desc,
+		Histogram: dto.Histogram{
+			CreatedTimestamp: timestamppb.New(createdTimestamp),
+			Schema:           &schema,
+			ZeroThreshold:    &zeroThreshold,
+			SampleCount:      &count,
+			SampleSum:        &sum,
+
+			NegativeSpan:  NegativeSpan,
+			NegativeDelta: NegativeDelta,
+
+			PositiveSpan:  PositiveSpan,
+			PositiveDelta: PositiveDelta,
+
+			ZeroCount: proto.Uint64(zeroBucket),
+		},
+		labelPairs: MakeLabelPairs(desc, labelValues),
+	}
+	if *ret.ZeroThreshold == 0 && *ret.ZeroCount == 0 && len(ret.PositiveSpan) == 0 && len(ret.NegativeSpan) == 0 {
+		ret.PositiveSpan = []*dto.BucketSpan{{
+			Offset: proto.Int32(0),
+			Length: proto.Uint32(0),
+		}}
+	}
+	return ret, nil
+}
+
+// MustNewConstNativeHistogram is a version of NewConstNativeHistogram that panics where
+// NewConstNativeHistogram would have returned an error.
+func MustNewConstNativeHistogram(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	positiveBuckets, negativeBuckets map[int]int64,
+	zeroBucket uint64,
+	nativeHistogramSchema int32,
+	nativeHistogramZeroThreshold float64,
+	createdTimestamp time.Time,
+	labelValues ...string,
+) Metric {
+	nativehistogram, err := NewConstNativeHistogram(desc,
+		count,
+		sum,
+		positiveBuckets,
+		negativeBuckets,
+		zeroBucket,
+		nativeHistogramSchema,
+		nativeHistogramZeroThreshold,
+		createdTimestamp,
+		labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return nativehistogram
+}
+
+func (h *constNativeHistogram) Desc() *Desc {
+	return h.desc
+}
+
+func (h *constNativeHistogram) Write(out *dto.Metric) error {
+	out.Histogram = &h.Histogram
+	out.Label = h.labelPairs
+	return nil
+}
+
+func makeBucketsFromMap(buckets map[int]int64) ([]*dto.BucketSpan, []int64) {
+	if len(buckets) == 0 {
+		return nil, nil
+	}
+	var ii []int
+	for k := range buckets {
+		ii = append(ii, k)
+	}
+	sort.Ints(ii)
+
+	var (
+		spans     []*dto.BucketSpan
+		deltas    []int64
+		prevCount int64
+		nextI     int
+	)
+
+	appendDelta := func(count int64) {
+		*spans[len(spans)-1].Length++
+		deltas = append(deltas, count-prevCount)
+		prevCount = count
+	}
+
+	for n, i := range ii {
+		count := buckets[i]
+		// Multiple spans with only small gaps in between are probably
+		// encoded more efficiently as one larger span with a few empty
+		// buckets. Needs some research to find the sweet spot. For now,
+		// we assume that gaps of one or two buckets should not create
+		// a new span.
+		iDelta := int32(i - nextI)
+		if n == 0 || iDelta > 2 {
+			// We have to create a new span, either because we are
+			// at the very beginning, or because we have found a gap
+			// of more than two buckets.
+			spans = append(spans, &dto.BucketSpan{
+				Offset: proto.Int32(iDelta),
+				Length: proto.Uint32(0),
+			})
+		} else {
+			// We have found a small gap (or no gap at all).
+			// Insert empty buckets as needed.
+			for j := int32(0); j < iDelta; j++ {
+				appendDelta(0)
+			}
+		}
+		appendDelta(count)
+		nextI = i + 1
+	}
+	return spans, deltas
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
new file mode 100644
index 0000000..1ed5abe
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2015 Björn Rabenstein
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+//
+// The code in this package is copy/paste to avoid a dependency. Hence this file
+// carries the copyright of the original repo.
+// https://github.com/beorn7/floats
+package internal
+
+import (
+	"math"
+)
+
+// minNormalFloat64 is the smallest positive normal value of type float64.
+var minNormalFloat64 = math.Float64frombits(0x0010000000000000)
+
+// AlmostEqualFloat64 returns true if a and b are equal within a relative error
+// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the
+// details of the applied method.
+func AlmostEqualFloat64(a, b, epsilon float64) bool {
+	if a == b {
+		return true
+	}
+	absA := math.Abs(a)
+	absB := math.Abs(b)
+	diff := math.Abs(a - b)
+	if a == 0 || b == 0 || absA+absB < minNormalFloat64 {
+		return diff < epsilon*minNormalFloat64
+	}
+	return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon
+}
+
+// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64.
+func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	for i := range a {
+		if !AlmostEqualFloat64(a[i], b[i], epsilon) {
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
new file mode 100644
index 0000000..7bac0da
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
@@ -0,0 +1,655 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// It provides tools to compare sequences of strings and generate textual diffs.
+//
+// Maintaining `GetUnifiedDiffString` here because original repository
+// (https://github.com/pmezard/go-difflib) is no longer maintained.
+package internal
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+)
+
+func minInt(a, b int) int {
+	if a < b {
+		return a
+	}
+	return b
+}
+
+func maxInt(a, b int) int {
+	if a > b {
+		return a
+	}
+	return b
+}
+
+func calculateRatio(matches, length int) float64 {
+	if length > 0 {
+		return 2.0 * float64(matches) / float64(length)
+	}
+	return 1.0
+}
+
+type Match struct {
+	A    int
+	B    int
+	Size int
+}
+
+type OpCode struct {
+	Tag byte
+	I1  int
+	I2  int
+	J1  int
+	J2  int
+}
+
+// SequenceMatcher compares sequence of strings. The basic
+// algorithm predates, and is a little fancier than, an algorithm
+// published in the late 1980's by Ratcliff and Obershelp under the
+// hyperbolic name "gestalt pattern matching".  The basic idea is to find
+// the longest contiguous matching subsequence that contains no "junk"
+// elements (R-O doesn't address junk).  The same idea is then applied
+// recursively to the pieces of the sequences to the left and to the right
+// of the matching subsequence.  This does not yield minimal edit
+// sequences, but does tend to yield matches that "look right" to people.
+//
+// SequenceMatcher tries to compute a "human-friendly diff" between two
+// sequences.  Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+// longest *contiguous* & junk-free matching subsequence.  That's what
+// catches peoples' eyes.  The Windows(tm) windiff has another interesting
+// notion, pairing up elements that appear uniquely in each sequence.
+// That, and the method here, appear to yield more intuitive difference
+// reports than does diff.  This method appears to be the least vulnerable
+// to synching up on blocks of "junk lines", though (like blank lines in
+// ordinary text files, or maybe "<P>" lines in HTML files).  That may be
+// because this is the only method of the 3 that has a *concept* of
+// "junk" <wink>.
+//
+// Timing:  Basic R-O is cubic time worst case and quadratic time expected
+// case.  SequenceMatcher is quadratic time for the worst case and has
+// expected-case behavior dependent in a complicated way on how many
+// elements the sequences have in common; best case time is linear.
+type SequenceMatcher struct {
+	a              []string
+	b              []string
+	b2j            map[string][]int
+	IsJunk         func(string) bool
+	autoJunk       bool
+	bJunk          map[string]struct{}
+	matchingBlocks []Match
+	fullBCount     map[string]int
+	bPopular       map[string]struct{}
+	opCodes        []OpCode
+}
+
+func NewMatcher(a, b []string) *SequenceMatcher {
+	m := SequenceMatcher{autoJunk: true}
+	m.SetSeqs(a, b)
+	return &m
+}
+
+func NewMatcherWithJunk(a, b []string, autoJunk bool,
+	isJunk func(string) bool,
+) *SequenceMatcher {
+	m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
+	m.SetSeqs(a, b)
+	return &m
+}
+
+// Set two sequences to be compared.
+func (m *SequenceMatcher) SetSeqs(a, b []string) {
+	m.SetSeq1(a)
+	m.SetSeq2(b)
+}
+
+// Set the first sequence to be compared. The second sequence to be compared is
+// not changed.
+//
+// SequenceMatcher computes and caches detailed information about the second
+// sequence, so if you want to compare one sequence S against many sequences,
+// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
+// sequences.
+//
+// See also SetSeqs() and SetSeq2().
+func (m *SequenceMatcher) SetSeq1(a []string) {
+	if &a == &m.a {
+		return
+	}
+	m.a = a
+	m.matchingBlocks = nil
+	m.opCodes = nil
+}
+
+// Set the second sequence to be compared. The first sequence to be compared is
+// not changed.
+func (m *SequenceMatcher) SetSeq2(b []string) {
+	if &b == &m.b {
+		return
+	}
+	m.b = b
+	m.matchingBlocks = nil
+	m.opCodes = nil
+	m.fullBCount = nil
+	m.chainB()
+}
+
+func (m *SequenceMatcher) chainB() {
+	// Populate line -> index mapping
+	b2j := map[string][]int{}
+	for i, s := range m.b {
+		indices := b2j[s]
+		indices = append(indices, i)
+		b2j[s] = indices
+	}
+
+	// Purge junk elements
+	m.bJunk = map[string]struct{}{}
+	if m.IsJunk != nil {
+		junk := m.bJunk
+		for s := range b2j {
+			if m.IsJunk(s) {
+				junk[s] = struct{}{}
+			}
+		}
+		for s := range junk {
+			delete(b2j, s)
+		}
+	}
+
+	// Purge remaining popular elements
+	popular := map[string]struct{}{}
+	n := len(m.b)
+	if m.autoJunk && n >= 200 {
+		ntest := n/100 + 1
+		for s, indices := range b2j {
+			if len(indices) > ntest {
+				popular[s] = struct{}{}
+			}
+		}
+		for s := range popular {
+			delete(b2j, s)
+		}
+	}
+	m.bPopular = popular
+	m.b2j = b2j
+}
+
+func (m *SequenceMatcher) isBJunk(s string) bool {
+	_, ok := m.bJunk[s]
+	return ok
+}
+
+// Find longest matching block in a[alo:ahi] and b[blo:bhi].
+//
+// If IsJunk is not defined:
+//
+// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+//
+//	alo <= i <= i+k <= ahi
+//	blo <= j <= j+k <= bhi
+//
+// and for all (i',j',k') meeting those conditions,
+//
+//	k >= k'
+//	i <= i'
+//	and if i == i', j <= j'
+//
+// In other words, of all maximal matching blocks, return one that
+// starts earliest in a, and of all those maximal matching blocks that
+// start earliest in a, return the one that starts earliest in b.
+//
+// If IsJunk is defined, first the longest matching block is
+// determined as above, but with the additional restriction that no
+// junk element appears in the block.  Then that block is extended as
+// far as possible by matching (only) junk elements on both sides.  So
+// the resulting block never matches on junk except as identical junk
+// happens to be adjacent to an "interesting" match.
+//
+// If no blocks match, return (alo, blo, 0).
+func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
+	// CAUTION:  stripping common prefix or suffix would be incorrect.
+	// E.g.,
+	//    ab
+	//    acab
+	// Longest matching block is "ab", but if common prefix is
+	// stripped, it's "a" (tied with "b").  UNIX(tm) diff does so
+	// strip, so ends up claiming that ab is changed to acab by
+	// inserting "ca" in the middle.  That's minimal but unintuitive:
+	// "it's obvious" that someone inserted "ac" at the front.
+	// Windiff ends up at the same place as diff, but by pairing up
+	// the unique 'b's and then matching the first two 'a's.
+	besti, bestj, bestsize := alo, blo, 0
+
+	// find longest junk-free match
+	// during an iteration of the loop, j2len[j] = length of longest
+	// junk-free match ending with a[i-1] and b[j]
+	j2len := map[int]int{}
+	for i := alo; i != ahi; i++ {
+		// look at all instances of a[i] in b; note that because
+		// b2j has no junk keys, the loop is skipped if a[i] is junk
+		newj2len := map[int]int{}
+		for _, j := range m.b2j[m.a[i]] {
+			// a[i] matches b[j]
+			if j < blo {
+				continue
+			}
+			if j >= bhi {
+				break
+			}
+			k := j2len[j-1] + 1
+			newj2len[j] = k
+			if k > bestsize {
+				besti, bestj, bestsize = i-k+1, j-k+1, k
+			}
+		}
+		j2len = newj2len
+	}
+
+	// Extend the best by non-junk elements on each end.  In particular,
+	// "popular" non-junk elements aren't in b2j, which greatly speeds
+	// the inner loop above, but also means "the best" match so far
+	// doesn't contain any junk *or* popular non-junk elements.
+	for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
+		m.a[besti-1] == m.b[bestj-1] {
+		besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+	}
+	for besti+bestsize < ahi && bestj+bestsize < bhi &&
+		!m.isBJunk(m.b[bestj+bestsize]) &&
+		m.a[besti+bestsize] == m.b[bestj+bestsize] {
+		bestsize++
+	}
+
+	// Now that we have a wholly interesting match (albeit possibly
+	// empty!), we may as well suck up the matching junk on each
+	// side of it too.  Can't think of a good reason not to, and it
+	// saves post-processing the (possibly considerable) expense of
+	// figuring out what to do with it.  In the case of an empty
+	// interesting match, this is clearly the right thing to do,
+	// because no other kind of match is possible in the regions.
+	for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
+		m.a[besti-1] == m.b[bestj-1] {
+		besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+	}
+	for besti+bestsize < ahi && bestj+bestsize < bhi &&
+		m.isBJunk(m.b[bestj+bestsize]) &&
+		m.a[besti+bestsize] == m.b[bestj+bestsize] {
+		bestsize++
+	}
+
+	return Match{A: besti, B: bestj, Size: bestsize}
+}
+
+// Return list of triples describing matching subsequences.
+//
+// Each triple is of the form (i, j, n), and means that
+// a[i:i+n] == b[j:j+n].  The triples are monotonically increasing in
+// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
+// adjacent triples in the list, and the second is not the last triple in the
+// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
+// adjacent equal blocks.
+//
+// The last triple is a dummy, (len(a), len(b), 0), and is the only
+// triple with n==0.
+func (m *SequenceMatcher) GetMatchingBlocks() []Match {
+	if m.matchingBlocks != nil {
+		return m.matchingBlocks
+	}
+
+	var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
+	matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
+		match := m.findLongestMatch(alo, ahi, blo, bhi)
+		i, j, k := match.A, match.B, match.Size
+		if match.Size > 0 {
+			if alo < i && blo < j {
+				matched = matchBlocks(alo, i, blo, j, matched)
+			}
+			matched = append(matched, match)
+			if i+k < ahi && j+k < bhi {
+				matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
+			}
+		}
+		return matched
+	}
+	matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
+
+	// It's possible that we have adjacent equal blocks in the
+	// matching_blocks list now.
+	nonAdjacent := []Match{}
+	i1, j1, k1 := 0, 0, 0
+	for _, b := range matched {
+		// Is this block adjacent to i1, j1, k1?
+		i2, j2, k2 := b.A, b.B, b.Size
+		if i1+k1 == i2 && j1+k1 == j2 {
+			// Yes, so collapse them -- this just increases the length of
+			// the first block by the length of the second, and the first
+			// block so lengthened remains the block to compare against.
+			k1 += k2
+		} else {
+			// Not adjacent.  Remember the first block (k1==0 means it's
+			// the dummy we started with), and make the second block the
+			// new block to compare against.
+			if k1 > 0 {
+				nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+			}
+			i1, j1, k1 = i2, j2, k2
+		}
+	}
+	if k1 > 0 {
+		nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+	}
+
+	nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
+	m.matchingBlocks = nonAdjacent
+	return m.matchingBlocks
+}
+
+// Return list of 5-tuples describing how to turn a into b.
+//
+// Each tuple is of the form (tag, i1, i2, j1, j2).  The first tuple
+// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+// tuple preceding it, and likewise for j1 == the previous j2.
+//
+// The tags are characters, with these meanings:
+//
+// 'r' (replace):  a[i1:i2] should be replaced by b[j1:j2]
+//
+// 'd' (delete):   a[i1:i2] should be deleted, j1==j2 in this case.
+//
+// 'i' (insert):   b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
+//
+// 'e' (equal):    a[i1:i2] == b[j1:j2]
+func (m *SequenceMatcher) GetOpCodes() []OpCode {
+	if m.opCodes != nil {
+		return m.opCodes
+	}
+	i, j := 0, 0
+	matching := m.GetMatchingBlocks()
+	opCodes := make([]OpCode, 0, len(matching))
+	for _, m := range matching {
+		//  invariant:  we've pumped out correct diffs to change
+		//  a[:i] into b[:j], and the next matching block is
+		//  a[ai:ai+size] == b[bj:bj+size]. So we need to pump
+		//  out a diff to change a[i:ai] into b[j:bj], pump out
+		//  the matching block, and move (i,j) beyond the match
+		ai, bj, size := m.A, m.B, m.Size
+		tag := byte(0)
+		if i < ai && j < bj {
+			tag = 'r'
+		} else if i < ai {
+			tag = 'd'
+		} else if j < bj {
+			tag = 'i'
+		}
+		if tag > 0 {
+			opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
+		}
+		i, j = ai+size, bj+size
+		// the list of matching blocks is terminated by a
+		// sentinel with size 0
+		if size > 0 {
+			opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
+		}
+	}
+	m.opCodes = opCodes
+	return m.opCodes
+}
+
+// Isolate change clusters by eliminating ranges with no changes.
+//
+// Return a generator of groups with up to n lines of context.
+// Each group is in the same format as returned by GetOpCodes().
+func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
+	if n < 0 {
+		n = 3
+	}
+	codes := m.GetOpCodes()
+	if len(codes) == 0 {
+		codes = []OpCode{{'e', 0, 1, 0, 1}}
+	}
+	// Fixup leading and trailing groups if they show no changes.
+	if codes[0].Tag == 'e' {
+		c := codes[0]
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		codes[0] = OpCode{c.Tag, maxInt(i1, i2-n), i2, maxInt(j1, j2-n), j2}
+	}
+	if codes[len(codes)-1].Tag == 'e' {
+		c := codes[len(codes)-1]
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		codes[len(codes)-1] = OpCode{c.Tag, i1, minInt(i2, i1+n), j1, minInt(j2, j1+n)}
+	}
+	nn := n + n
+	groups := [][]OpCode{}
+	group := []OpCode{}
+	for _, c := range codes {
+		i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+		// End the current group and start a new one whenever
+		// there is a large range with no changes.
+		if c.Tag == 'e' && i2-i1 > nn {
+			group = append(group, OpCode{
+				c.Tag, i1, minInt(i2, i1+n),
+				j1, minInt(j2, j1+n),
+			})
+			groups = append(groups, group)
+			group = []OpCode{}
+			i1, j1 = maxInt(i1, i2-n), maxInt(j1, j2-n)
+		}
+		group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
+	}
+	if len(group) > 0 && (len(group) != 1 || group[0].Tag != 'e') {
+		groups = append(groups, group)
+	}
+	return groups
+}
+
+// Return a measure of the sequences' similarity (float in [0,1]).
+//
+// Where T is the total number of elements in both sequences, and
+// M is the number of matches, this is 2.0*M / T.
+// Note that this is 1 if the sequences are identical, and 0 if
+// they have nothing in common.
+//
+// .Ratio() is expensive to compute if you haven't already computed
+// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
+// want to try .QuickRatio() or .RealQuickRation() first to get an
+// upper bound.
+func (m *SequenceMatcher) Ratio() float64 {
+	matches := 0
+	for _, m := range m.GetMatchingBlocks() {
+		matches += m.Size
+	}
+	return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() relatively quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute.
+func (m *SequenceMatcher) QuickRatio() float64 {
+	// viewing a and b as multisets, set matches to the cardinality
+	// of their intersection; this counts the number of matches
+	// without regard to order, so is clearly an upper bound
+	if m.fullBCount == nil {
+		m.fullBCount = map[string]int{}
+		for _, s := range m.b {
+			m.fullBCount[s]++
+		}
+	}
+
+	// avail[x] is the number of times x appears in 'b' less the
+	// number of times we've seen it in 'a' so far ... kinda
+	avail := map[string]int{}
+	matches := 0
+	for _, s := range m.a {
+		n, ok := avail[s]
+		if !ok {
+			n = m.fullBCount[s]
+		}
+		avail[s] = n - 1
+		if n > 0 {
+			matches++
+		}
+	}
+	return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() very quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute than either .Ratio() or .QuickRatio().
+func (m *SequenceMatcher) RealQuickRatio() float64 {
+	la, lb := len(m.a), len(m.b)
+	return calculateRatio(minInt(la, lb), la+lb)
+}
+
+// Convert range to the "ed" format
+func formatRangeUnified(start, stop int) string {
+	// Per the diff spec at http://www.unix.org/single_unix_specification/
+	beginning := start + 1 // lines start numbering with one
+	length := stop - start
+	if length == 1 {
+		return strconv.Itoa(beginning)
+	}
+	if length == 0 {
+		beginning-- // empty ranges begin at line just before the range
+	}
+	return fmt.Sprintf("%d,%d", beginning, length)
+}
+
+// Unified diff parameters
+type UnifiedDiff struct {
+	A        []string // First sequence lines
+	FromFile string   // First file name
+	FromDate string   // First file time
+	B        []string // Second sequence lines
+	ToFile   string   // Second file name
+	ToDate   string   // Second file time
+	Eol      string   // Headers end of line, defaults to LF
+	Context  int      // Number of context lines
+}
+
+// Compare two sequences of lines; generate the delta as a unified diff.
+//
+// Unified diffs are a compact way of showing line changes and a few
+// lines of context.  The number of context lines is set by 'n' which
+// defaults to three.
+//
+// By default, the diff control lines (those with ---, +++, or @@) are
+// created with a trailing newline.  This is helpful so that inputs
+// created from file.readlines() result in diffs that are suitable for
+// file.writelines() since both the inputs and outputs have trailing
+// newlines.
+//
+// For inputs that do not have trailing newlines, set the lineterm
+// argument to "" so that the output will be uniformly newline free.
+//
+// The unidiff format normally has a header for filenames and modification
+// times.  Any or all of these may be specified using strings for
+// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
+// The modification times are normally expressed in the ISO 8601 format.
+func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
+	buf := bufio.NewWriter(writer)
+	defer buf.Flush()
+	wf := func(format string, args ...interface{}) error {
+		_, err := fmt.Fprintf(buf, format, args...)
+		return err
+	}
+	ws := func(s string) error {
+		_, err := buf.WriteString(s)
+		return err
+	}
+
+	if len(diff.Eol) == 0 {
+		diff.Eol = "\n"
+	}
+
+	started := false
+	m := NewMatcher(diff.A, diff.B)
+	for _, g := range m.GetGroupedOpCodes(diff.Context) {
+		if !started {
+			started = true
+			fromDate := ""
+			if len(diff.FromDate) > 0 {
+				fromDate = "\t" + diff.FromDate
+			}
+			toDate := ""
+			if len(diff.ToDate) > 0 {
+				toDate = "\t" + diff.ToDate
+			}
+			if diff.FromFile != "" || diff.ToFile != "" {
+				err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
+				if err != nil {
+					return err
+				}
+				err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
+				if err != nil {
+					return err
+				}
+			}
+		}
+		first, last := g[0], g[len(g)-1]
+		range1 := formatRangeUnified(first.I1, last.I2)
+		range2 := formatRangeUnified(first.J1, last.J2)
+		if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
+			return err
+		}
+		for _, c := range g {
+			i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+			if c.Tag == 'e' {
+				for _, line := range diff.A[i1:i2] {
+					if err := ws(" " + line); err != nil {
+						return err
+					}
+				}
+				continue
+			}
+			if c.Tag == 'r' || c.Tag == 'd' {
+				for _, line := range diff.A[i1:i2] {
+					if err := ws("-" + line); err != nil {
+						return err
+					}
+				}
+			}
+			if c.Tag == 'r' || c.Tag == 'i' {
+				for _, line := range diff.B[j1:j2] {
+					if err := ws("+" + line); err != nil {
+						return err
+					}
+				}
+			}
+		}
+	}
+	return nil
+}
+
+// Like WriteUnifiedDiff but returns the diff a string.
+func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
+	w := &bytes.Buffer{}
+	err := WriteUnifiedDiff(w, diff)
+	return w.String(), err
+}
+
+// Split a string on "\n" while preserving them. The output can be used
+// as input for UnifiedDiff and ContextDiff structures.
+func SplitLines(s string) []string {
+	lines := strings.SplitAfter(s, "\n")
+	lines[len(lines)-1] += "\n"
+	return lines
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
new file mode 100644
index 0000000..a4fa6ea
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
@@ -0,0 +1,34 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import "regexp"
+
+type GoCollectorRule struct {
+	Matcher *regexp.Regexp
+	Deny    bool
+}
+
+// GoCollectorOptions should not be used be directly by anything, except `collectors` package.
+// Use it via collectors package instead. See issue
+// https://github.com/prometheus/client_golang/issues/1030.
+//
+// This is internal, so external users only can use it via `collector.WithGoCollector*` methods
+type GoCollectorOptions struct {
+	DisableMemStatsLikeMetrics bool
+	RuntimeMetricSumForHist    map[string]string
+	RuntimeMetricRules         []GoCollectorRule
+}
+
+var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
new file mode 100644
index 0000000..d273b66
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
@@ -0,0 +1,143 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.17
+// +build go1.17
+
+package internal
+
+import (
+	"math"
+	"path"
+	"runtime/metrics"
+	"strings"
+
+	"github.com/prometheus/common/model"
+)
+
+// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics
+// metric description and validates whether the metric is suitable for integration
+// with Prometheus.
+//
+// Returns false if a name could not be produced, or if Prometheus does not understand
+// the runtime/metrics Kind.
+//
+// Note that the main reason a name couldn't be produced is if the runtime/metrics
+// package exports a name with characters outside the valid Prometheus metric name
+// character set. This is theoretically possible, but should never happen in practice.
+// Still, don't rely on it.
+func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) {
+	namespace := "go"
+
+	comp := strings.SplitN(d.Name, ":", 2)
+	key := comp[0]
+	unit := comp[1]
+
+	// The last path element in the key is the name,
+	// the rest is the subsystem.
+	subsystem := path.Dir(key[1:] /* remove leading / */)
+	name := path.Base(key)
+
+	// subsystem is translated by replacing all / and - with _.
+	subsystem = strings.ReplaceAll(subsystem, "/", "_")
+	subsystem = strings.ReplaceAll(subsystem, "-", "_")
+
+	// unit is translated assuming that the unit contains no
+	// non-ASCII characters.
+	unit = strings.ReplaceAll(unit, "-", "_")
+	unit = strings.ReplaceAll(unit, "*", "_")
+	unit = strings.ReplaceAll(unit, "/", "_per_")
+
+	// name has - replaced with _ and is concatenated with the unit and
+	// other data.
+	name = strings.ReplaceAll(name, "-", "_")
+	name += "_" + unit
+	if d.Cumulative && d.Kind != metrics.KindFloat64Histogram {
+		name += "_total"
+	}
+
+	// Our current conversion moves to legacy naming, so use legacy validation.
+	valid := model.LegacyValidation.IsValidMetricName(namespace + "_" + subsystem + "_" + name)
+	switch d.Kind {
+	case metrics.KindUint64:
+	case metrics.KindFloat64:
+	case metrics.KindFloat64Histogram:
+	default:
+		valid = false
+	}
+	return namespace, subsystem, name, valid
+}
+
+// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
+// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces
+// a reduced set of buckets. This function always removes any -Inf bucket as it's represented
+// as the bottom-most upper-bound inclusive bucket in Prometheus.
+func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
+	switch unit {
+	case "bytes":
+		// Re-bucket as powers of 2.
+		return reBucketExp(buckets, 2)
+	case "seconds":
+		// Re-bucket as powers of 10 and then merge all buckets greater
+		// than 1 second into the +Inf bucket.
+		b := reBucketExp(buckets, 10)
+		for i := range b {
+			if b[i] <= 1 {
+				continue
+			}
+			b[i] = math.Inf(1)
+			b = b[:i+1]
+			break
+		}
+		return b
+	}
+	return buckets
+}
+
+// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and
+// downsamples the buckets to those a multiple of base apart. The end result
+// is a roughly exponential (in many cases, perfectly exponential) bucketing
+// scheme.
+func reBucketExp(buckets []float64, base float64) []float64 {
+	bucket := buckets[0]
+	var newBuckets []float64
+	// We may see a -Inf here, in which case, add it and skip it
+	// since we risk producing NaNs otherwise.
+	//
+	// We need to preserve -Inf values to maintain runtime/metrics
+	// conventions. We'll strip it out later.
+	if bucket == math.Inf(-1) {
+		newBuckets = append(newBuckets, bucket)
+		buckets = buckets[1:]
+		bucket = buckets[0]
+	}
+	// From now on, bucket should always have a non-Inf value because
+	// Infs are only ever at the ends of the bucket lists, so
+	// arithmetic operations on it are non-NaN.
+	for i := 1; i < len(buckets); i++ {
+		if bucket >= 0 && buckets[i] < bucket*base {
+			// The next bucket we want to include is at least bucket*base.
+			continue
+		} else if bucket < 0 && buckets[i] < bucket/base {
+			// In this case the bucket we're targeting is negative, and since
+			// we're ascending through buckets here, we need to divide to get
+			// closer to zero exponentially.
+			continue
+		}
+		// The +Inf bucket will always be the last one, and we'll always
+		// end up including it here because bucket
+		newBuckets = append(newBuckets, bucket)
+		bucket = buckets[i]
+	}
+	return append(newBuckets, bucket)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
new file mode 100644
index 0000000..6515c11
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
@@ -0,0 +1,101 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+	"sort"
+
+	dto "github.com/prometheus/client_model/go"
+)
+
+// LabelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers.
+type LabelPairSorter []*dto.LabelPair
+
+func (s LabelPairSorter) Len() int {
+	return len(s)
+}
+
+func (s LabelPairSorter) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s LabelPairSorter) Less(i, j int) bool {
+	return s[i].GetName() < s[j].GetName()
+}
+
+// MetricSorter is a sortable slice of *dto.Metric.
+type MetricSorter []*dto.Metric
+
+func (s MetricSorter) Len() int {
+	return len(s)
+}
+
+func (s MetricSorter) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s MetricSorter) Less(i, j int) bool {
+	if len(s[i].Label) != len(s[j].Label) {
+		// This should not happen. The metrics are
+		// inconsistent. However, we have to deal with the fact, as
+		// people might use custom collectors or metric family injection
+		// to create inconsistent metrics. So let's simply compare the
+		// number of labels in this case. That will still yield
+		// reproducible sorting.
+		return len(s[i].Label) < len(s[j].Label)
+	}
+	for n, lp := range s[i].Label {
+		vi := lp.GetValue()
+		vj := s[j].Label[n].GetValue()
+		if vi != vj {
+			return vi < vj
+		}
+	}
+
+	// We should never arrive here. Multiple metrics with the same
+	// label set in the same scrape will lead to undefined ingestion
+	// behavior. However, as above, we have to provide stable sorting
+	// here, even for inconsistent metrics. So sort equal metrics
+	// by their timestamp, with missing timestamps (implying "now")
+	// coming last.
+	if s[i].TimestampMs == nil {
+		return false
+	}
+	if s[j].TimestampMs == nil {
+		return true
+	}
+	return s[i].GetTimestampMs() < s[j].GetTimestampMs()
+}
+
+// NormalizeMetricFamilies returns a MetricFamily slice with empty
+// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
+// the slice, with the contained Metrics sorted within each MetricFamily.
+func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
+	for _, mf := range metricFamiliesByName {
+		sort.Sort(MetricSorter(mf.Metric))
+	}
+	names := make([]string, 0, len(metricFamiliesByName))
+	for name, mf := range metricFamiliesByName {
+		if len(mf.Metric) > 0 {
+			names = append(names, name)
+		}
+	}
+	sort.Strings(names)
+	result := make([]*dto.MetricFamily, 0, len(names))
+	for _, name := range names {
+		result = append(result, metricFamiliesByName[name])
+	}
+	return result
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
new file mode 100644
index 0000000..5fe8d3b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -0,0 +1,189 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+	"unicode/utf8"
+
+	"github.com/prometheus/common/model"
+)
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+//
+//	myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// LabelConstraint normalizes label values.
+type LabelConstraint func(string) string
+
+// ConstrainedLabels represents a label name and its constrain function
+// to normalize label values. This type is commonly used when constructing
+// metric vector Collectors.
+type ConstrainedLabel struct {
+	Name       string
+	Constraint LabelConstraint
+}
+
+// ConstrainableLabels is an interface that allows creating of labels that can
+// be optionally constrained.
+//
+//	prometheus.V2().NewCounterVec(CounterVecOpts{
+//	  CounterOpts: {...}, // Usual CounterOpts fields
+//	  VariableLabels: []ConstrainedLabels{
+//	    {Name: "A"},
+//	    {Name: "B", Constraint: func(v string) string { ... }},
+//	  },
+//	})
+type ConstrainableLabels interface {
+	compile() *compiledLabels
+	labelNames() []string
+}
+
+// ConstrainedLabels represents a collection of label name -> constrain function
+// to normalize label values. This type is commonly used when constructing
+// metric vector Collectors.
+type ConstrainedLabels []ConstrainedLabel
+
+func (cls ConstrainedLabels) compile() *compiledLabels {
+	compiled := &compiledLabels{
+		names:            make([]string, len(cls)),
+		labelConstraints: map[string]LabelConstraint{},
+	}
+
+	for i, label := range cls {
+		compiled.names[i] = label.Name
+		if label.Constraint != nil {
+			compiled.labelConstraints[label.Name] = label.Constraint
+		}
+	}
+
+	return compiled
+}
+
+func (cls ConstrainedLabels) labelNames() []string {
+	names := make([]string, len(cls))
+	for i, label := range cls {
+		names[i] = label.Name
+	}
+	return names
+}
+
+// UnconstrainedLabels represents collection of label without any constraint on
+// their value. Thus, it is simply a collection of label names.
+//
+//	UnconstrainedLabels([]string{ "A", "B" })
+//
+// is equivalent to
+//
+//	ConstrainedLabels {
+//	  { Name: "A" },
+//	  { Name: "B" },
+//	}
+type UnconstrainedLabels []string
+
+func (uls UnconstrainedLabels) compile() *compiledLabels {
+	return &compiledLabels{
+		names: uls,
+	}
+}
+
+func (uls UnconstrainedLabels) labelNames() []string {
+	return uls
+}
+
+type compiledLabels struct {
+	names            []string
+	labelConstraints map[string]LabelConstraint
+}
+
+func (cls *compiledLabels) compile() *compiledLabels {
+	return cls
+}
+
+func (cls *compiledLabels) labelNames() []string {
+	return cls.names
+}
+
+func (cls *compiledLabels) constrain(labelName, value string) string {
+	if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil {
+		return fn(value)
+	}
+	return value
+}
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
+	return fmt.Errorf(
+		"%w: %q has %d variable labels named %q but %d values %q were provided",
+		errInconsistentCardinality, fqName,
+		len(labels), labels,
+		len(labelValues), labelValues,
+	)
+}
+
+func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
+	if len(labels) != expectedNumberOfValues {
+		return fmt.Errorf(
+			"%w: expected %d label values but got %d in %#v",
+			errInconsistentCardinality, expectedNumberOfValues,
+			len(labels), labels,
+		)
+	}
+
+	for name, val := range labels {
+		if !utf8.ValidString(val) {
+			return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
+		}
+	}
+
+	return nil
+}
+
+func validateLabelValues(vals []string, expectedNumberOfValues int) error {
+	if len(vals) != expectedNumberOfValues {
+		// The call below makes vals escape, copy them to avoid that.
+		vals := append([]string(nil), vals...)
+		return fmt.Errorf(
+			"%w: expected %d label values but got %d in %#v",
+			errInconsistentCardinality, expectedNumberOfValues,
+			len(vals), vals,
+		)
+	}
+
+	for _, val := range vals {
+		if !utf8.ValidString(val) {
+			return fmt.Errorf("label value %q is not valid UTF-8", val)
+		}
+	}
+
+	return nil
+}
+
+func checkLabelName(l string) bool {
+	//nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme.
+	return model.NameValidationScheme.IsValidLabelName(l) && !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
new file mode 100644
index 0000000..76e59f1
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -0,0 +1,276 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"errors"
+	"math"
+	"sort"
+	"strings"
+	"time"
+
+	dto "github.com/prometheus/client_model/go"
+	"github.com/prometheus/common/model"
+	"google.golang.org/protobuf/proto"
+)
+
+var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
+
+// A Metric models a single sample value with its meta data being exported to
+// Prometheus. Implementations of Metric in this package are Gauge, Counter,
+// Histogram, Summary, and Untyped.
+type Metric interface {
+	// Desc returns the descriptor for the Metric. This method idempotently
+	// returns the same descriptor throughout the lifetime of the
+	// Metric. The returned descriptor is immutable by contract. A Metric
+	// unable to describe itself must return an invalid descriptor (created
+	// with NewInvalidDesc).
+	Desc() *Desc
+	// Write encodes the Metric into a "Metric" Protocol Buffer data
+	// transmission object.
+	//
+	// Metric implementations must observe concurrency safety as reads of
+	// this metric may occur at any time, and any blocking occurs at the
+	// expense of total performance of rendering all registered
+	// metrics. Ideally, Metric implementations should support concurrent
+	// readers.
+	//
+	// While populating dto.Metric, it is the responsibility of the
+	// implementation to ensure validity of the Metric protobuf (like valid
+	// UTF-8 strings or syntactically valid metric and label names). It is
+	// recommended to sort labels lexicographically. Callers of Write should
+	// still make sure of sorting if they depend on it.
+	Write(*dto.Metric) error
+	// TODO(beorn7): The original rationale of passing in a pre-allocated
+	// dto.Metric protobuf to save allocations has disappeared. The
+	// signature of this method should be changed to "Write() (*dto.Metric,
+	// error)".
+}
+
+// Opts bundles the options for creating most Metric types. Each metric
+// implementation XXX has its own XXXOpts type, but in most cases, it is just
+// an alias of this type (which might change when the requirement arises.)
+//
+// It is mandatory to set Name to a non-empty string. All other fields are
+// optional and can safely be left at their zero value, although it is strongly
+// encouraged to set a Help string.
+type Opts struct {
+	// Namespace, Subsystem, and Name are components of the fully-qualified
+	// name of the Metric (created by joining these components with
+	// "_"). Only Name is mandatory, the others merely help structuring the
+	// name. Note that the fully-qualified name of the metric must be a
+	// valid Prometheus metric name.
+	Namespace string
+	Subsystem string
+	Name      string
+
+	// Help provides information about this metric.
+	//
+	// Metrics with the same fully-qualified name must have the same Help
+	// string.
+	Help string
+
+	// ConstLabels are used to attach fixed labels to this metric. Metrics
+	// with the same fully-qualified name must have the same label names in
+	// their ConstLabels.
+	//
+	// ConstLabels are only used rarely. In particular, do not use them to
+	// attach the same labels to all your metrics. Those use cases are
+	// better covered by target labels set by the scraping Prometheus
+	// server, or by one specific metric (e.g. a build_info or a
+	// machine_role metric). See also
+	// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
+	ConstLabels Labels
+
+	// now is for testing purposes, by default it's time.Now.
+	now func() time.Time
+}
+
+// BuildFQName joins the given three name components by "_". Empty name
+// components are ignored. If the name parameter itself is empty, an empty
+// string is returned, no matter what. Metric implementations included in this
+// library use this function internally to generate the fully-qualified metric
+// name from the name component in their Opts. Users of the library will only
+// need this function if they implement their own Metric or instantiate a Desc
+// (with NewDesc) directly.
+func BuildFQName(namespace, subsystem, name string) string {
+	if name == "" {
+		return ""
+	}
+
+	sb := strings.Builder{}
+	sb.Grow(len(namespace) + len(subsystem) + len(name) + 2)
+
+	if namespace != "" {
+		sb.WriteString(namespace)
+		sb.WriteString("_")
+	}
+
+	if subsystem != "" {
+		sb.WriteString(subsystem)
+		sb.WriteString("_")
+	}
+
+	sb.WriteString(name)
+
+	return sb.String()
+}
+
+type invalidMetric struct {
+	desc *Desc
+	err  error
+}
+
+// NewInvalidMetric returns a metric whose Write method always returns the
+// provided error. It is useful if a Collector finds itself unable to collect
+// a metric and wishes to report an error to the registry.
+func NewInvalidMetric(desc *Desc, err error) Metric {
+	return &invalidMetric{desc, err}
+}
+
+func (m *invalidMetric) Desc() *Desc { return m.desc }
+
+func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
+
+type timestampedMetric struct {
+	Metric
+	t time.Time
+}
+
+func (m timestampedMetric) Write(pb *dto.Metric) error {
+	e := m.Metric.Write(pb)
+	pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
+	return e
+}
+
+// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
+// way that it has an explicit timestamp set to the provided Time. This is only
+// useful in rare cases as the timestamp of a Prometheus metric should usually
+// be set by the Prometheus server during scraping. Exceptions include mirroring
+// metrics with given timestamps from other metric
+// sources.
+//
+// NewMetricWithTimestamp works best with MustNewConstMetric,
+// MustNewConstHistogram, and MustNewConstSummary, see example.
+//
+// Currently, the exposition formats used by Prometheus are limited to
+// millisecond resolution. Thus, the provided time will be rounded down to the
+// next full millisecond value.
+func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
+	return timestampedMetric{Metric: m, t: t}
+}
+
+type withExemplarsMetric struct {
+	Metric
+
+	exemplars []*dto.Exemplar
+}
+
+func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
+	if err := m.Metric.Write(pb); err != nil {
+		return err
+	}
+
+	switch {
+	case pb.Counter != nil:
+		pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1]
+	case pb.Histogram != nil:
+		h := pb.Histogram
+		for _, e := range m.exemplars {
+			if (h.GetZeroThreshold() != 0 || h.GetZeroCount() != 0 ||
+				len(h.PositiveSpan) != 0 || len(h.NegativeSpan) != 0) &&
+				e.GetTimestamp() != nil {
+				h.Exemplars = append(h.Exemplars, e)
+				if len(h.Bucket) == 0 {
+					// Don't proceed to classic buckets if there are none.
+					continue
+				}
+			}
+			// h.Bucket are sorted by UpperBound.
+			i := sort.Search(len(h.Bucket), func(i int) bool {
+				return h.Bucket[i].GetUpperBound() >= e.GetValue()
+			})
+			if i < len(h.Bucket) {
+				h.Bucket[i].Exemplar = e
+			} else {
+				// The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
+				b := &dto.Bucket{
+					CumulativeCount: proto.Uint64(h.GetSampleCount()),
+					UpperBound:      proto.Float64(math.Inf(1)),
+					Exemplar:        e,
+				}
+				h.Bucket = append(h.Bucket, b)
+			}
+		}
+	default:
+		// TODO(bwplotka): Implement Gauge?
+		return errors.New("cannot inject exemplar into Gauge, Summary or Untyped")
+	}
+
+	return nil
+}
+
+// Exemplar is easier to use, user-facing representation of *dto.Exemplar.
+type Exemplar struct {
+	Value  float64
+	Labels Labels
+	// Optional.
+	// Default value (time.Time{}) indicates its empty, which should be
+	// understood as time.Now() time at the moment of creation of metric.
+	Timestamp time.Time
+}
+
+// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given
+// exemplars. Exemplars are validated.
+//
+// Only last applicable exemplar is injected from the list.
+// For example for Counter it means last exemplar is injected.
+// For Histogram, it means last applicable exemplar for each bucket is injected.
+// For a Native Histogram, all valid exemplars are injected.
+//
+// NewMetricWithExemplars works best with MustNewConstMetric and
+// MustNewConstHistogram, see example.
+func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) {
+	if len(exemplars) == 0 {
+		return nil, errors.New("no exemplar was passed for NewMetricWithExemplars")
+	}
+
+	var (
+		now = time.Now()
+		exs = make([]*dto.Exemplar, len(exemplars))
+		err error
+	)
+	for i, e := range exemplars {
+		ts := e.Timestamp
+		if ts.IsZero() {
+			ts = now
+		}
+		exs[i], err = newExemplar(e.Value, ts, e.Labels)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return &withExemplarsMetric{Metric: m, exemplars: exs}, nil
+}
+
+// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where
+// NewMetricWithExemplars would have returned an error.
+func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric {
+	ret, err := NewMetricWithExemplars(m, exemplars...)
+	if err != nil {
+		panic(err)
+	}
+	return ret
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
new file mode 100644
index 0000000..7c12b21
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
@@ -0,0 +1,25 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !js || wasm
+// +build !js wasm
+
+package prometheus
+
+import "runtime"
+
+// getRuntimeNumThreads returns the number of open OS threads.
+func getRuntimeNumThreads() float64 {
+	n, _ := runtime.ThreadCreateProfile(nil)
+	return float64(n)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
new file mode 100644
index 0000000..7348df0
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
@@ -0,0 +1,22 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build js && !wasm
+// +build js,!wasm
+
+package prometheus
+
+// getRuntimeNumThreads returns the number of open OS threads.
+func getRuntimeNumThreads() float64 {
+	return 1
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
new file mode 100644
index 0000000..03773b2
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
@@ -0,0 +1,64 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Observer is the interface that wraps the Observe method, which is used by
+// Histogram and Summary to add observations.
+type Observer interface {
+	Observe(float64)
+}
+
+// The ObserverFunc type is an adapter to allow the use of ordinary
+// functions as Observers. If f is a function with the appropriate
+// signature, ObserverFunc(f) is an Observer that calls f.
+//
+// This adapter is usually used in connection with the Timer type, and there are
+// two general use cases:
+//
+// The most common one is to use a Gauge as the Observer for a Timer.
+// See the "Gauge" Timer example.
+//
+// The more advanced use case is to create a function that dynamically decides
+// which Observer to use for observing the duration. See the "Complex" Timer
+// example.
+type ObserverFunc func(float64)
+
+// Observe calls f(value). It implements Observer.
+func (f ObserverFunc) Observe(value float64) {
+	f(value)
+}
+
+// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
+type ObserverVec interface {
+	GetMetricWith(Labels) (Observer, error)
+	GetMetricWithLabelValues(lvs ...string) (Observer, error)
+	With(Labels) Observer
+	WithLabelValues(...string) Observer
+	CurryWith(Labels) (ObserverVec, error)
+	MustCurryWith(Labels) ObserverVec
+
+	Collector
+}
+
+// ExemplarObserver is implemented by Observers that offer the option of
+// observing a value together with an exemplar. Its ObserveWithExemplar method
+// works like the Observe method of an Observer but also replaces the currently
+// saved exemplar (if any) with a new one, created from the provided value, the
+// current time as timestamp, and the provided Labels. Empty Labels will lead to
+// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
+// left in place. ObserveWithExemplar panics if any of the provided labels are
+// invalid or if the provided labels contain more than 128 runes in total.
+type ExemplarObserver interface {
+	ObserveWithExemplar(value float64, exemplar Labels)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
new file mode 100644
index 0000000..e7bce8b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -0,0 +1,180 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+)
+
+type processCollector struct {
+	collectFn         func(chan<- Metric)
+	describeFn        func(chan<- *Desc)
+	pidFn             func() (int, error)
+	reportErrors      bool
+	cpuTotal          *Desc
+	openFDs, maxFDs   *Desc
+	vsize, maxVsize   *Desc
+	rss               *Desc
+	startTime         *Desc
+	inBytes, outBytes *Desc
+}
+
+// ProcessCollectorOpts defines the behavior of a process metrics collector
+// created with NewProcessCollector.
+type ProcessCollectorOpts struct {
+	// PidFn returns the PID of the process the collector collects metrics
+	// for. It is called upon each collection. By default, the PID of the
+	// current process is used, as determined on construction time by
+	// calling os.Getpid().
+	PidFn func() (int, error)
+	// If non-empty, each of the collected metrics is prefixed by the
+	// provided string and an underscore ("_").
+	Namespace string
+	// If true, any error encountered during collection is reported as an
+	// invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
+	// and the collected metrics will be incomplete. (Possibly, no metrics
+	// will be collected at all.) While that's usually not desired, it is
+	// appropriate for the common "mix-in" of process metrics, where process
+	// metrics are nice to have, but failing to collect them should not
+	// disrupt the collection of the remaining metrics.
+	ReportErrors bool
+}
+
+// NewProcessCollector is the obsolete version of collectors.NewProcessCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewProcessCollector instead.
+func NewProcessCollector(opts ProcessCollectorOpts) Collector {
+	ns := ""
+	if len(opts.Namespace) > 0 {
+		ns = opts.Namespace + "_"
+	}
+
+	c := &processCollector{
+		reportErrors: opts.ReportErrors,
+		cpuTotal: NewDesc(
+			ns+"process_cpu_seconds_total",
+			"Total user and system CPU time spent in seconds.",
+			nil, nil,
+		),
+		openFDs: NewDesc(
+			ns+"process_open_fds",
+			"Number of open file descriptors.",
+			nil, nil,
+		),
+		maxFDs: NewDesc(
+			ns+"process_max_fds",
+			"Maximum number of open file descriptors.",
+			nil, nil,
+		),
+		vsize: NewDesc(
+			ns+"process_virtual_memory_bytes",
+			"Virtual memory size in bytes.",
+			nil, nil,
+		),
+		maxVsize: NewDesc(
+			ns+"process_virtual_memory_max_bytes",
+			"Maximum amount of virtual memory available in bytes.",
+			nil, nil,
+		),
+		rss: NewDesc(
+			ns+"process_resident_memory_bytes",
+			"Resident memory size in bytes.",
+			nil, nil,
+		),
+		startTime: NewDesc(
+			ns+"process_start_time_seconds",
+			"Start time of the process since unix epoch in seconds.",
+			nil, nil,
+		),
+		inBytes: NewDesc(
+			ns+"process_network_receive_bytes_total",
+			"Number of bytes received by the process over the network.",
+			nil, nil,
+		),
+		outBytes: NewDesc(
+			ns+"process_network_transmit_bytes_total",
+			"Number of bytes sent by the process over the network.",
+			nil, nil,
+		),
+	}
+
+	if opts.PidFn == nil {
+		c.pidFn = getPIDFn()
+	} else {
+		c.pidFn = opts.PidFn
+	}
+
+	// Set up process metric collection if supported by the runtime.
+	if canCollectProcess() {
+		c.collectFn = c.processCollect
+		c.describeFn = c.describe
+	} else {
+		c.collectFn = c.errorCollectFn
+		c.describeFn = c.errorDescribeFn
+	}
+
+	return c
+}
+
+func (c *processCollector) errorCollectFn(ch chan<- Metric) {
+	c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
+}
+
+func (c *processCollector) errorDescribeFn(ch chan<- *Desc) {
+	if c.reportErrors {
+		ch <- NewInvalidDesc(errors.New("process metrics not supported on this platform"))
+	}
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *processCollector) Collect(ch chan<- Metric) {
+	c.collectFn(ch)
+}
+
+// Describe returns all descriptions of the collector.
+func (c *processCollector) Describe(ch chan<- *Desc) {
+	c.describeFn(ch)
+}
+
+func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
+	if !c.reportErrors {
+		return
+	}
+	if desc == nil {
+		desc = NewInvalidDesc(err)
+	}
+	ch <- NewInvalidMetric(desc, err)
+}
+
+// NewPidFileFn returns a function that retrieves a pid from the specified file.
+// It is meant to be used for the PidFn field in ProcessCollectorOpts.
+func NewPidFileFn(pidFilePath string) func() (int, error) {
+	return func() (int, error) {
+		content, err := os.ReadFile(pidFilePath)
+		if err != nil {
+			return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err)
+		}
+		pid, err := strconv.Atoi(strings.TrimSpace(string(content)))
+		if err != nil {
+			return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err)
+		}
+
+		return pid, nil
+	}
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
new file mode 100644
index 0000000..b32c95f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
@@ -0,0 +1,130 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin && !ios
+
+package prometheus
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"syscall"
+	"time"
+
+	"golang.org/x/sys/unix"
+)
+
+// errNotImplemented is returned by stub functions that replace cgo functions, when cgo
+// isn't available.
+var errNotImplemented = errors.New("not implemented")
+
+type memoryInfo struct {
+	vsize uint64 // Virtual memory size in bytes
+	rss   uint64 // Resident memory size in bytes
+}
+
+func canCollectProcess() bool {
+	return true
+}
+
+func getSoftLimit(which int) (uint64, error) {
+	rlimit := syscall.Rlimit{}
+
+	if err := syscall.Getrlimit(which, &rlimit); err != nil {
+		return 0, err
+	}
+
+	return rlimit.Cur, nil
+}
+
+func getOpenFileCount() (float64, error) {
+	// Alternately, the undocumented proc_pidinfo(PROC_PIDLISTFDS) can be used to
+	// return a list of open fds, but that requires a way to call C APIs.  The
+	// benefits, however, include fewer system calls and not failing when at the
+	// open file soft limit.
+
+	if dir, err := os.Open("/dev/fd"); err != nil {
+		return 0.0, err
+	} else {
+		defer dir.Close()
+
+		// Avoid ReadDir(), as it calls stat(2) on each descriptor.  Not only is
+		// that info not used, but KQUEUE descriptors fail stat(2), which causes
+		// the whole method to fail.
+		if names, err := dir.Readdirnames(0); err != nil {
+			return 0.0, err
+		} else {
+			// Subtract 1 to ignore the open /dev/fd descriptor above.
+			return float64(len(names) - 1), nil
+		}
+	}
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+	if procs, err := unix.SysctlKinfoProcSlice("kern.proc.pid", os.Getpid()); err == nil {
+		if len(procs) == 1 {
+			startTime := float64(procs[0].Proc.P_starttime.Nano() / 1e9)
+			ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
+		} else {
+			err = fmt.Errorf("sysctl() returned %d proc structs (expected 1)", len(procs))
+			c.reportError(ch, c.startTime, err)
+		}
+	} else {
+		c.reportError(ch, c.startTime, err)
+	}
+
+	// The proc structure returned by kern.proc.pid above has an Rusage member,
+	// but it is not filled in, so it needs to be fetched by getrusage(2).  For
+	// that call, the UTime, STime, and Maxrss members are filled out, but not
+	// Ixrss, Idrss, or Isrss for the memory usage.  Memory stats will require
+	// access to the C API to call task_info(TASK_BASIC_INFO).
+	rusage := unix.Rusage{}
+
+	if err := unix.Getrusage(syscall.RUSAGE_SELF, &rusage); err == nil {
+		cpuTime := time.Duration(rusage.Stime.Nano() + rusage.Utime.Nano()).Seconds()
+		ch <- MustNewConstMetric(c.cpuTotal, CounterValue, cpuTime)
+	} else {
+		c.reportError(ch, c.cpuTotal, err)
+	}
+
+	if memInfo, err := getMemory(); err == nil {
+		ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss))
+		ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize))
+	} else if !errors.Is(err, errNotImplemented) {
+		// Don't report an error when support is not compiled in.
+		c.reportError(ch, c.rss, err)
+		c.reportError(ch, c.vsize, err)
+	}
+
+	if fds, err := getOpenFileCount(); err == nil {
+		ch <- MustNewConstMetric(c.openFDs, GaugeValue, fds)
+	} else {
+		c.reportError(ch, c.openFDs, err)
+	}
+
+	if openFiles, err := getSoftLimit(syscall.RLIMIT_NOFILE); err == nil {
+		ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(openFiles))
+	} else {
+		c.reportError(ch, c.maxFDs, err)
+	}
+
+	if addressSpace, err := getSoftLimit(syscall.RLIMIT_AS); err == nil {
+		ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(addressSpace))
+	} else {
+		c.reportError(ch, c.maxVsize, err)
+	}
+
+	// TODO: socket(PF_SYSTEM) to fetch "com.apple.network.statistics" might
+	//  be able to get the per-process network send/receive counts.
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c
new file mode 100644
index 0000000..d00a243
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c
@@ -0,0 +1,84 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin && !ios && cgo
+
+#include <mach/mach_init.h>
+#include <mach/task.h>
+#include <mach/mach_vm.h>
+
+// The compiler warns that mach/shared_memory_server.h is deprecated, and to use
+// mach/shared_region.h instead.  But that doesn't define
+// SHARED_DATA_REGION_SIZE or SHARED_TEXT_REGION_SIZE, so redefine them here and
+// avoid a warning message when running tests.
+#define GLOBAL_SHARED_TEXT_SEGMENT      0x90000000U
+#define SHARED_DATA_REGION_SIZE         0x10000000
+#define SHARED_TEXT_REGION_SIZE         0x10000000
+
+
+int get_memory_info(unsigned long long *rss, unsigned long long *vsize)
+{
+    // This is lightly adapted from how ps(1) obtains its memory info.
+    // https://github.com/apple-oss-distributions/adv_cmds/blob/8744084ea0ff41ca4bb96b0f9c22407d0e48e9b7/ps/tasks.c#L109
+
+    kern_return_t               error;
+    task_t                      task = MACH_PORT_NULL;
+    mach_task_basic_info_data_t info;
+    mach_msg_type_number_t      info_count = MACH_TASK_BASIC_INFO_COUNT;
+
+    error = task_info(
+                mach_task_self(),
+                MACH_TASK_BASIC_INFO,
+                (task_info_t) &info,
+                &info_count );
+
+    if( error != KERN_SUCCESS )
+    {
+        return error;
+    }
+
+    *rss   = info.resident_size;
+    *vsize = info.virtual_size;
+
+    {
+        vm_region_basic_info_data_64_t    b_info;
+        mach_vm_address_t                 address = GLOBAL_SHARED_TEXT_SEGMENT;
+        mach_vm_size_t                    size;
+        mach_port_t                       object_name;
+
+        /*
+         * try to determine if this task has the split libraries
+         * mapped in... if so, adjust its virtual size down by
+         * the 2 segments that are used for split libraries
+         */
+        info_count = VM_REGION_BASIC_INFO_COUNT_64;
+
+        error = mach_vm_region(
+                    mach_task_self(),
+                    &address,
+                    &size,
+                    VM_REGION_BASIC_INFO_64,
+                    (vm_region_info_t) &b_info,
+                    &info_count,
+                    &object_name);
+
+        if (error == KERN_SUCCESS) {
+            if (b_info.reserved && size == (SHARED_TEXT_REGION_SIZE) &&
+                *vsize > (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE)) {
+                    *vsize -= (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE);
+            }
+        }
+    }
+
+    return 0;
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go
new file mode 100644
index 0000000..9ac53f9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go
@@ -0,0 +1,51 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin && !ios && cgo
+
+package prometheus
+
+/*
+int get_memory_info(unsigned long long *rss, unsigned long long *vs);
+*/
+import "C"
+import "fmt"
+
+func getMemory() (*memoryInfo, error) {
+	var rss, vsize C.ulonglong
+
+	if err := C.get_memory_info(&rss, &vsize); err != 0 {
+		return nil, fmt.Errorf("task_info() failed with 0x%x", int(err))
+	}
+
+	return &memoryInfo{vsize: uint64(vsize), rss: uint64(rss)}, nil
+}
+
+// describe returns all descriptions of the collector for Darwin.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+	ch <- c.cpuTotal
+	ch <- c.openFDs
+	ch <- c.maxFDs
+	ch <- c.maxVsize
+	ch <- c.startTime
+	ch <- c.rss
+	ch <- c.vsize
+
+	/* the process could be collected but not implemented yet
+	ch <- c.inBytes
+	ch <- c.outBytes
+	*/
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go
new file mode 100644
index 0000000..3788651
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go
@@ -0,0 +1,39 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin && !ios && !cgo
+
+package prometheus
+
+func getMemory() (*memoryInfo, error) {
+	return nil, errNotImplemented
+}
+
+// describe returns all descriptions of the collector for Darwin.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+	ch <- c.cpuTotal
+	ch <- c.openFDs
+	ch <- c.maxFDs
+	ch <- c.maxVsize
+	ch <- c.startTime
+
+	/* the process could be collected but not implemented yet
+	ch <- c.rss
+	ch <- c.vsize
+	ch <- c.inBytes
+	ch <- c.outBytes
+	*/
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go
new file mode 100644
index 0000000..7732b7f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go
@@ -0,0 +1,33 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build wasip1 || js || ios
+// +build wasip1 js ios
+
+package prometheus
+
+func canCollectProcess() bool {
+	return false
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+	c.errorCollectFn(ch)
+}
+
+// describe returns all descriptions of the collector for wasip1 and js.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+	c.errorDescribeFn(ch)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go
new file mode 100644
index 0000000..8074f70
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go
@@ -0,0 +1,96 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows && !js && !wasip1 && !darwin
+// +build !windows,!js,!wasip1,!darwin
+
+package prometheus
+
+import (
+	"github.com/prometheus/procfs"
+)
+
+func canCollectProcess() bool {
+	_, err := procfs.NewDefaultFS()
+	return err == nil
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+	pid, err := c.pidFn()
+	if err != nil {
+		c.reportError(ch, nil, err)
+		return
+	}
+
+	p, err := procfs.NewProc(pid)
+	if err != nil {
+		c.reportError(ch, nil, err)
+		return
+	}
+
+	if stat, err := p.Stat(); err == nil {
+		ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
+		ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
+		ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
+		if startTime, err := stat.StartTime(); err == nil {
+			ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
+		} else {
+			c.reportError(ch, c.startTime, err)
+		}
+	} else {
+		c.reportError(ch, nil, err)
+	}
+
+	if fds, err := p.FileDescriptorsLen(); err == nil {
+		ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
+	} else {
+		c.reportError(ch, c.openFDs, err)
+	}
+
+	if limits, err := p.Limits(); err == nil {
+		ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
+		ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
+	} else {
+		c.reportError(ch, nil, err)
+	}
+
+	if netstat, err := p.Netstat(); err == nil {
+		var inOctets, outOctets float64
+		if netstat.InOctets != nil {
+			inOctets = *netstat.InOctets
+		}
+		if netstat.OutOctets != nil {
+			outOctets = *netstat.OutOctets
+		}
+		ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets)
+		ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets)
+	} else {
+		c.reportError(ch, nil, err)
+	}
+}
+
+// describe returns all descriptions of the collector for others than windows, js, wasip1 and darwin.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+	ch <- c.cpuTotal
+	ch <- c.openFDs
+	ch <- c.maxFDs
+	ch <- c.vsize
+	ch <- c.maxVsize
+	ch <- c.rss
+	ch <- c.startTime
+	ch <- c.inBytes
+	ch <- c.outBytes
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
new file mode 100644
index 0000000..fa47428
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
@@ -0,0 +1,125 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/sys/windows"
+)
+
+func canCollectProcess() bool {
+	return true
+}
+
+var (
+	modpsapi    = syscall.NewLazyDLL("psapi.dll")
+	modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+	procGetProcessMemoryInfo  = modpsapi.NewProc("GetProcessMemoryInfo")
+	procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount")
+)
+
+type processMemoryCounters struct {
+	// System interface description
+	// https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-process_memory_counters_ex
+
+	// Refer to the Golang internal implementation
+	// https://golang.org/src/internal/syscall/windows/psapi_windows.go
+	_                          uint32
+	PageFaultCount             uint32
+	PeakWorkingSetSize         uintptr
+	WorkingSetSize             uintptr
+	QuotaPeakPagedPoolUsage    uintptr
+	QuotaPagedPoolUsage        uintptr
+	QuotaPeakNonPagedPoolUsage uintptr
+	QuotaNonPagedPoolUsage     uintptr
+	PagefileUsage              uintptr
+	PeakPagefileUsage          uintptr
+	PrivateUsage               uintptr
+}
+
+func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) {
+	mem := processMemoryCounters{}
+	r1, _, err := procGetProcessMemoryInfo.Call(
+		uintptr(handle),
+		uintptr(unsafe.Pointer(&mem)),
+		uintptr(unsafe.Sizeof(mem)),
+	)
+	if r1 != 1 {
+		return mem, err
+	} else {
+		return mem, nil
+	}
+}
+
+func getProcessHandleCount(handle windows.Handle) (uint32, error) {
+	var count uint32
+	r1, _, err := procGetProcessHandleCount.Call(
+		uintptr(handle),
+		uintptr(unsafe.Pointer(&count)),
+	)
+	if r1 != 1 {
+		return 0, err
+	} else {
+		return count, nil
+	}
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+	h := windows.CurrentProcess()
+
+	var startTime, exitTime, kernelTime, userTime windows.Filetime
+	err := windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
+	if err != nil {
+		c.reportError(ch, nil, err)
+		return
+	}
+	ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9))
+	ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime))
+
+	mem, err := getProcessMemoryInfo(h)
+	if err != nil {
+		c.reportError(ch, nil, err)
+		return
+	}
+	ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage))
+	ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize))
+
+	handles, err := getProcessHandleCount(h)
+	if err != nil {
+		c.reportError(ch, nil, err)
+		return
+	}
+	ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles))
+	ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
+}
+
+// describe returns all descriptions of the collector for windows.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+	ch <- c.cpuTotal
+	ch <- c.openFDs
+	ch <- c.maxFDs
+	ch <- c.vsize
+	ch <- c.rss
+	ch <- c.startTime
+}
+
+func fileTimeToSeconds(ft windows.Filetime) float64 {
+	return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
new file mode 100644
index 0000000..c6fd2f5
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -0,0 +1,1076 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+	"runtime"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"unicode/utf8"
+
+	"github.com/prometheus/client_golang/prometheus/internal"
+
+	"github.com/cespare/xxhash/v2"
+	dto "github.com/prometheus/client_model/go"
+	"github.com/prometheus/common/expfmt"
+	"google.golang.org/protobuf/proto"
+)
+
+const (
+	// Capacity for the channel to collect metrics and descriptors.
+	capMetricChan = 1000
+	capDescChan   = 10
+)
+
+// DefaultRegisterer and DefaultGatherer are the implementations of the
+// Registerer and Gatherer interface a number of convenience functions in this
+// package act on. Initially, both variables point to the same Registry, which
+// has a process collector (currently on Linux only, see NewProcessCollector)
+// and a Go collector (see NewGoCollector, in particular the note about
+// stop-the-world implication with Go versions older than 1.9) already
+// registered. This approach to keep default instances as global state mirrors
+// the approach of other packages in the Go standard library. Note that there
+// are caveats. Change the variables with caution and only if you understand the
+// consequences. Users who want to avoid global state altogether should not use
+// the convenience functions and act on custom instances instead.
+var (
+	defaultRegistry              = NewRegistry()
+	DefaultRegisterer Registerer = defaultRegistry
+	DefaultGatherer   Gatherer   = defaultRegistry
+)
+
+func init() {
+	MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
+	MustRegister(NewGoCollector())
+}
+
+// NewRegistry creates a new vanilla Registry without any Collectors
+// pre-registered.
+func NewRegistry() *Registry {
+	return &Registry{
+		collectorsByID:  map[uint64]Collector{},
+		descIDs:         map[uint64]struct{}{},
+		dimHashesByName: map[string]uint64{},
+	}
+}
+
+// NewPedanticRegistry returns a registry that checks during collection if each
+// collected Metric is consistent with its reported Desc, and if the Desc has
+// actually been registered with the registry. Unchecked Collectors (those whose
+// Describe method does not yield any descriptors) are excluded from the check.
+//
+// Usually, a Registry will be happy as long as the union of all collected
+// Metrics is consistent and valid even if some metrics are not consistent with
+// their own Desc or a Desc provided by their registered Collector. Well-behaved
+// Collectors and Metrics will only provide consistent Descs. This Registry is
+// useful to test the implementation of Collectors and Metrics.
+func NewPedanticRegistry() *Registry {
+	r := NewRegistry()
+	r.pedanticChecksEnabled = true
+	return r
+}
+
+// Registerer is the interface for the part of a registry in charge of
+// registering and unregistering. Users of custom registries should use
+// Registerer as type for registration purposes (rather than the Registry type
+// directly). In that way, they are free to use custom Registerer implementation
+// (e.g. for testing purposes).
+type Registerer interface {
+	// Register registers a new Collector to be included in metrics
+	// collection. It returns an error if the descriptors provided by the
+	// Collector are invalid or if they — in combination with descriptors of
+	// already registered Collectors — do not fulfill the consistency and
+	// uniqueness criteria described in the documentation of metric.Desc.
+	//
+	// If the provided Collector is equal to a Collector already registered
+	// (which includes the case of re-registering the same Collector), the
+	// returned error is an instance of AlreadyRegisteredError, which
+	// contains the previously registered Collector.
+	//
+	// A Collector whose Describe method does not yield any Desc is treated
+	// as unchecked. Registration will always succeed. No check for
+	// re-registering (see previous paragraph) is performed. Thus, the
+	// caller is responsible for not double-registering the same unchecked
+	// Collector, and for providing a Collector that will not cause
+	// inconsistent metrics on collection. (This would lead to scrape
+	// errors.)
+	Register(Collector) error
+	// MustRegister works like Register but registers any number of
+	// Collectors and panics upon the first registration that causes an
+	// error.
+	MustRegister(...Collector)
+	// Unregister unregisters the Collector that equals the Collector passed
+	// in as an argument.  (Two Collectors are considered equal if their
+	// Describe method yields the same set of descriptors.) The function
+	// returns whether a Collector was unregistered. Note that an unchecked
+	// Collector cannot be unregistered (as its Describe method does not
+	// yield any descriptor).
+	//
+	// Note that even after unregistering, it will not be possible to
+	// register a new Collector that is inconsistent with the unregistered
+	// Collector, e.g. a Collector collecting metrics with the same name but
+	// a different help string. The rationale here is that the same registry
+	// instance must only collect consistent metrics throughout its
+	// lifetime.
+	Unregister(Collector) bool
+}
+
+// Gatherer is the interface for the part of a registry in charge of gathering
+// the collected metrics into a number of MetricFamilies. The Gatherer interface
+// comes with the same general implication as described for the Registerer
+// interface.
+type Gatherer interface {
+	// Gather calls the Collect method of the registered Collectors and then
+	// gathers the collected metrics into a lexicographically sorted slice
+	// of uniquely named MetricFamily protobufs. Gather ensures that the
+	// returned slice is valid and self-consistent so that it can be used
+	// for valid exposition. As an exception to the strict consistency
+	// requirements described for metric.Desc, Gather will tolerate
+	// different sets of label names for metrics of the same metric family.
+	//
+	// Even if an error occurs, Gather attempts to gather as many metrics as
+	// possible. Hence, if a non-nil error is returned, the returned
+	// MetricFamily slice could be nil (in case of a fatal error that
+	// prevented any meaningful metric collection) or contain a number of
+	// MetricFamily protobufs, some of which might be incomplete, and some
+	// might be missing altogether. The returned error (which might be a
+	// MultiError) explains the details. Note that this is mostly useful for
+	// debugging purposes. If the gathered protobufs are to be used for
+	// exposition in actual monitoring, it is almost always better to not
+	// expose an incomplete result and instead disregard the returned
+	// MetricFamily protobufs in case the returned error is non-nil.
+	Gather() ([]*dto.MetricFamily, error)
+}
+
+// Register registers the provided Collector with the DefaultRegisterer.
+//
+// Register is a shortcut for DefaultRegisterer.Register(c). See there for more
+// details.
+func Register(c Collector) error {
+	return DefaultRegisterer.Register(c)
+}
+
+// MustRegister registers the provided Collectors with the DefaultRegisterer and
+// panics if any error occurs.
+//
+// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See
+// there for more details.
+func MustRegister(cs ...Collector) {
+	DefaultRegisterer.MustRegister(cs...)
+}
+
+// Unregister removes the registration of the provided Collector from the
+// DefaultRegisterer.
+//
+// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for
+// more details.
+func Unregister(c Collector) bool {
+	return DefaultRegisterer.Unregister(c)
+}
+
+// GathererFunc turns a function into a Gatherer.
+type GathererFunc func() ([]*dto.MetricFamily, error)
+
+// Gather implements Gatherer.
+func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {
+	return gf()
+}
+
+// AlreadyRegisteredError is returned by the Register method if the Collector to
+// be registered has already been registered before, or a different Collector
+// that collects the same metrics has been registered before. Registration fails
+// in that case, but you can detect from the kind of error what has
+// happened. The error contains fields for the existing Collector and the
+// (rejected) new Collector that equals the existing one. This can be used to
+// find out if an equal Collector has been registered before and switch over to
+// using the old one, as demonstrated in the example.
+type AlreadyRegisteredError struct {
+	ExistingCollector, NewCollector Collector
+}
+
+func (err AlreadyRegisteredError) Error() string {
+	return "duplicate metrics collector registration attempted"
+}
+
+// MultiError is a slice of errors implementing the error interface. It is used
+// by a Gatherer to report multiple errors during MetricFamily gathering.
+type MultiError []error
+
+// Error formats the contained errors as a bullet point list, preceded by the
+// total number of errors. Note that this results in a multi-line string.
+func (errs MultiError) Error() string {
+	if len(errs) == 0 {
+		return ""
+	}
+	buf := &bytes.Buffer{}
+	fmt.Fprintf(buf, "%d error(s) occurred:", len(errs))
+	for _, err := range errs {
+		fmt.Fprintf(buf, "\n* %s", err)
+	}
+	return buf.String()
+}
+
+// Append appends the provided error if it is not nil.
+func (errs *MultiError) Append(err error) {
+	if err != nil {
+		*errs = append(*errs, err)
+	}
+}
+
+// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
+// contained error as error if len(errs is 1). In all other cases, it returns
+// the MultiError directly. This is helpful for returning a MultiError in a way
+// that only uses the MultiError if needed.
+func (errs MultiError) MaybeUnwrap() error {
+	switch len(errs) {
+	case 0:
+		return nil
+	case 1:
+		return errs[0]
+	default:
+		return errs
+	}
+}
+
+// Registry registers Prometheus collectors, collects their metrics, and gathers
+// them into MetricFamilies for exposition. It implements Registerer, Gatherer,
+// and Collector. The zero value is not usable. Create instances with
+// NewRegistry or NewPedanticRegistry.
+//
+// Registry implements Collector to allow it to be used for creating groups of
+// metrics. See the Grouping example for how this can be done.
+type Registry struct {
+	mtx                   sync.RWMutex
+	collectorsByID        map[uint64]Collector // ID is a hash of the descIDs.
+	descIDs               map[uint64]struct{}
+	dimHashesByName       map[string]uint64
+	uncheckedCollectors   []Collector
+	pedanticChecksEnabled bool
+}
+
+// Register implements Registerer.
+func (r *Registry) Register(c Collector) error {
+	var (
+		descChan           = make(chan *Desc, capDescChan)
+		newDescIDs         = map[uint64]struct{}{}
+		newDimHashesByName = map[string]uint64{}
+		collectorID        uint64 // All desc IDs XOR'd together.
+		duplicateDescErr   error
+	)
+	go func() {
+		c.Describe(descChan)
+		close(descChan)
+	}()
+	r.mtx.Lock()
+	defer func() {
+		// Drain channel in case of premature return to not leak a goroutine.
+		for range descChan {
+		}
+		r.mtx.Unlock()
+	}()
+	// Conduct various tests...
+	for desc := range descChan {
+
+		// Is the descriptor valid at all?
+		if desc.err != nil {
+			return fmt.Errorf("descriptor %s is invalid: %w", desc, desc.err)
+		}
+
+		// Is the descID unique?
+		// (In other words: Is the fqName + constLabel combination unique?)
+		if _, exists := r.descIDs[desc.id]; exists {
+			duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc)
+		}
+		// If it is not a duplicate desc in this collector, XOR it to
+		// the collectorID.  (We allow duplicate descs within the same
+		// collector, but their existence must be a no-op.)
+		if _, exists := newDescIDs[desc.id]; !exists {
+			newDescIDs[desc.id] = struct{}{}
+			collectorID ^= desc.id
+		}
+
+		// Are all the label names and the help string consistent with
+		// previous descriptors of the same name?
+		// First check existing descriptors...
+		if dimHash, exists := r.dimHashesByName[desc.fqName]; exists {
+			if dimHash != desc.dimHash {
+				return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
+			}
+			continue
+		}
+
+		// ...then check the new descriptors already seen.
+		if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+			if dimHash != desc.dimHash {
+				return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
+			}
+			continue
+		}
+		newDimHashesByName[desc.fqName] = desc.dimHash
+	}
+	// A Collector yielding no Desc at all is considered unchecked.
+	if len(newDescIDs) == 0 {
+		r.uncheckedCollectors = append(r.uncheckedCollectors, c)
+		return nil
+	}
+	if existing, exists := r.collectorsByID[collectorID]; exists {
+		switch e := existing.(type) {
+		case *wrappingCollector:
+			return AlreadyRegisteredError{
+				ExistingCollector: e.unwrapRecursively(),
+				NewCollector:      c,
+			}
+		default:
+			return AlreadyRegisteredError{
+				ExistingCollector: e,
+				NewCollector:      c,
+			}
+		}
+	}
+	// If the collectorID is new, but at least one of the descs existed
+	// before, we are in trouble.
+	if duplicateDescErr != nil {
+		return duplicateDescErr
+	}
+
+	// Only after all tests have passed, actually register.
+	r.collectorsByID[collectorID] = c
+	for hash := range newDescIDs {
+		r.descIDs[hash] = struct{}{}
+	}
+	for name, dimHash := range newDimHashesByName {
+		r.dimHashesByName[name] = dimHash
+	}
+	return nil
+}
+
+// Unregister implements Registerer.
+func (r *Registry) Unregister(c Collector) bool {
+	var (
+		descChan    = make(chan *Desc, capDescChan)
+		descIDs     = map[uint64]struct{}{}
+		collectorID uint64 // All desc IDs XOR'd together.
+	)
+	go func() {
+		c.Describe(descChan)
+		close(descChan)
+	}()
+	for desc := range descChan {
+		if _, exists := descIDs[desc.id]; !exists {
+			collectorID ^= desc.id
+			descIDs[desc.id] = struct{}{}
+		}
+	}
+
+	r.mtx.RLock()
+	if _, exists := r.collectorsByID[collectorID]; !exists {
+		r.mtx.RUnlock()
+		return false
+	}
+	r.mtx.RUnlock()
+
+	r.mtx.Lock()
+	defer r.mtx.Unlock()
+
+	delete(r.collectorsByID, collectorID)
+	for id := range descIDs {
+		delete(r.descIDs, id)
+	}
+	// dimHashesByName is left untouched as those must be consistent
+	// throughout the lifetime of a program.
+	return true
+}
+
+// MustRegister implements Registerer.
+func (r *Registry) MustRegister(cs ...Collector) {
+	for _, c := range cs {
+		if err := r.Register(c); err != nil {
+			panic(err)
+		}
+	}
+}
+
+// Gather implements Gatherer.
+func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
+	r.mtx.RLock()
+
+	if len(r.collectorsByID) == 0 && len(r.uncheckedCollectors) == 0 {
+		// Fast path.
+		r.mtx.RUnlock()
+		return nil, nil
+	}
+
+	var (
+		checkedMetricChan   = make(chan Metric, capMetricChan)
+		uncheckedMetricChan = make(chan Metric, capMetricChan)
+		metricHashes        = map[uint64]struct{}{}
+		wg                  sync.WaitGroup
+		errs                MultiError          // The collected errors to return in the end.
+		registeredDescIDs   map[uint64]struct{} // Only used for pedantic checks
+	)
+
+	goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
+	metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
+	checkedCollectors := make(chan Collector, len(r.collectorsByID))
+	uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
+	for _, collector := range r.collectorsByID {
+		checkedCollectors <- collector
+	}
+	for _, collector := range r.uncheckedCollectors {
+		uncheckedCollectors <- collector
+	}
+	// In case pedantic checks are enabled, we have to copy the map before
+	// giving up the RLock.
+	if r.pedanticChecksEnabled {
+		registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs))
+		for id := range r.descIDs {
+			registeredDescIDs[id] = struct{}{}
+		}
+	}
+	r.mtx.RUnlock()
+
+	wg.Add(goroutineBudget)
+
+	collectWorker := func() {
+		for {
+			select {
+			case collector := <-checkedCollectors:
+				collector.Collect(checkedMetricChan)
+			case collector := <-uncheckedCollectors:
+				collector.Collect(uncheckedMetricChan)
+			default:
+				return
+			}
+			wg.Done()
+		}
+	}
+
+	// Start the first worker now to make sure at least one is running.
+	go collectWorker()
+	goroutineBudget--
+
+	// Close checkedMetricChan and uncheckedMetricChan once all collectors
+	// are collected.
+	go func() {
+		wg.Wait()
+		close(checkedMetricChan)
+		close(uncheckedMetricChan)
+	}()
+
+	// Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
+	defer func() {
+		if checkedMetricChan != nil {
+			for range checkedMetricChan {
+			}
+		}
+		if uncheckedMetricChan != nil {
+			for range uncheckedMetricChan {
+			}
+		}
+	}()
+
+	// Copy the channel references so we can nil them out later to remove
+	// them from the select statements below.
+	cmc := checkedMetricChan
+	umc := uncheckedMetricChan
+
+	for {
+		select {
+		case metric, ok := <-cmc:
+			if !ok {
+				cmc = nil
+				break
+			}
+			errs.Append(processMetric(
+				metric, metricFamiliesByName,
+				metricHashes,
+				registeredDescIDs,
+			))
+		case metric, ok := <-umc:
+			if !ok {
+				umc = nil
+				break
+			}
+			errs.Append(processMetric(
+				metric, metricFamiliesByName,
+				metricHashes,
+				nil,
+			))
+		default:
+			if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
+				// All collectors are already being worked on or
+				// we have already as many goroutines started as
+				// there are collectors. Do the same as above,
+				// just without the default.
+				select {
+				case metric, ok := <-cmc:
+					if !ok {
+						cmc = nil
+						break
+					}
+					errs.Append(processMetric(
+						metric, metricFamiliesByName,
+						metricHashes,
+						registeredDescIDs,
+					))
+				case metric, ok := <-umc:
+					if !ok {
+						umc = nil
+						break
+					}
+					errs.Append(processMetric(
+						metric, metricFamiliesByName,
+						metricHashes,
+						nil,
+					))
+				}
+				break
+			}
+			// Start more workers.
+			go collectWorker()
+			goroutineBudget--
+			runtime.Gosched()
+		}
+		// Once both checkedMetricChan and uncheckedMetricChan are closed
+		// and drained, the contraption above will nil out cmc and umc,
+		// and then we can leave the collect loop here.
+		if cmc == nil && umc == nil {
+			break
+		}
+	}
+	return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// Describe implements Collector.
+func (r *Registry) Describe(ch chan<- *Desc) {
+	r.mtx.RLock()
+	defer r.mtx.RUnlock()
+
+	// Only report the checked Collectors; unchecked collectors don't report any
+	// Desc.
+	for _, c := range r.collectorsByID {
+		c.Describe(ch)
+	}
+}
+
+// Collect implements Collector.
+func (r *Registry) Collect(ch chan<- Metric) {
+	r.mtx.RLock()
+	defer r.mtx.RUnlock()
+
+	for _, c := range r.collectorsByID {
+		c.Collect(ch)
+	}
+	for _, c := range r.uncheckedCollectors {
+		c.Collect(ch)
+	}
+}
+
+// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
+// Prometheus text format, and writes it to a temporary file. Upon success, the
+// temporary file is renamed to the provided filename.
+//
+// This is intended for use with the textfile collector of the node exporter.
+// Note that the node exporter expects the filename to be suffixed with ".prom".
+func WriteToTextfile(filename string, g Gatherer) error {
+	tmp, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename))
+	if err != nil {
+		return err
+	}
+	defer os.Remove(tmp.Name())
+
+	mfs, err := g.Gather()
+	if err != nil {
+		return err
+	}
+	for _, mf := range mfs {
+		if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil {
+			return err
+		}
+	}
+	if err := tmp.Close(); err != nil {
+		return err
+	}
+
+	if err := os.Chmod(tmp.Name(), 0o644); err != nil {
+		return err
+	}
+	return os.Rename(tmp.Name(), filename)
+}
+
+// processMetric is an internal helper method only used by the Gather method.
+func processMetric(
+	metric Metric,
+	metricFamiliesByName map[string]*dto.MetricFamily,
+	metricHashes map[uint64]struct{},
+	registeredDescIDs map[uint64]struct{},
+) error {
+	desc := metric.Desc()
+	// Wrapped metrics collected by an unchecked Collector can have an
+	// invalid Desc.
+	if desc.err != nil {
+		return desc.err
+	}
+	dtoMetric := &dto.Metric{}
+	if err := metric.Write(dtoMetric); err != nil {
+		return fmt.Errorf("error collecting metric %v: %w", desc, err)
+	}
+	metricFamily, ok := metricFamiliesByName[desc.fqName]
+	if ok { // Existing name.
+		if metricFamily.GetHelp() != desc.help {
+			return fmt.Errorf(
+				"collected metric %s %s has help %q but should have %q",
+				desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
+			)
+		}
+		// TODO(beorn7): Simplify switch once Desc has type.
+		switch metricFamily.GetType() {
+		case dto.MetricType_COUNTER:
+			if dtoMetric.Counter == nil {
+				return fmt.Errorf(
+					"collected metric %s %s should be a Counter",
+					desc.fqName, dtoMetric,
+				)
+			}
+		case dto.MetricType_GAUGE:
+			if dtoMetric.Gauge == nil {
+				return fmt.Errorf(
+					"collected metric %s %s should be a Gauge",
+					desc.fqName, dtoMetric,
+				)
+			}
+		case dto.MetricType_SUMMARY:
+			if dtoMetric.Summary == nil {
+				return fmt.Errorf(
+					"collected metric %s %s should be a Summary",
+					desc.fqName, dtoMetric,
+				)
+			}
+		case dto.MetricType_UNTYPED:
+			if dtoMetric.Untyped == nil {
+				return fmt.Errorf(
+					"collected metric %s %s should be Untyped",
+					desc.fqName, dtoMetric,
+				)
+			}
+		case dto.MetricType_HISTOGRAM:
+			if dtoMetric.Histogram == nil {
+				return fmt.Errorf(
+					"collected metric %s %s should be a Histogram",
+					desc.fqName, dtoMetric,
+				)
+			}
+		default:
+			panic("encountered MetricFamily with invalid type")
+		}
+	} else { // New name.
+		metricFamily = &dto.MetricFamily{}
+		metricFamily.Name = proto.String(desc.fqName)
+		metricFamily.Help = proto.String(desc.help)
+		// TODO(beorn7): Simplify switch once Desc has type.
+		switch {
+		case dtoMetric.Gauge != nil:
+			metricFamily.Type = dto.MetricType_GAUGE.Enum()
+		case dtoMetric.Counter != nil:
+			metricFamily.Type = dto.MetricType_COUNTER.Enum()
+		case dtoMetric.Summary != nil:
+			metricFamily.Type = dto.MetricType_SUMMARY.Enum()
+		case dtoMetric.Untyped != nil:
+			metricFamily.Type = dto.MetricType_UNTYPED.Enum()
+		case dtoMetric.Histogram != nil:
+			metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
+		default:
+			return fmt.Errorf("empty metric collected: %s", dtoMetric)
+		}
+		if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil {
+			return err
+		}
+		metricFamiliesByName[desc.fqName] = metricFamily
+	}
+	if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil {
+		return err
+	}
+	if registeredDescIDs != nil {
+		// Is the desc registered at all?
+		if _, exist := registeredDescIDs[desc.id]; !exist {
+			return fmt.Errorf(
+				"collected metric %s %s with unregistered descriptor %s",
+				metricFamily.GetName(), dtoMetric, desc,
+			)
+		}
+		if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
+			return err
+		}
+	}
+	metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
+	return nil
+}
+
+// Gatherers is a slice of Gatherer instances that implements the Gatherer
+// interface itself. Its Gather method calls Gather on all Gatherers in the
+// slice in order and returns the merged results. Errors returned from the
+// Gather calls are all returned in a flattened MultiError. Duplicate and
+// inconsistent Metrics are skipped (first occurrence in slice order wins) and
+// reported in the returned error.
+//
+// Gatherers can be used to merge the Gather results from multiple
+// Registries. It also provides a way to directly inject existing MetricFamily
+// protobufs into the gathering by creating a custom Gatherer with a Gather
+// method that simply returns the existing MetricFamily protobufs. Note that no
+// registration is involved (in contrast to Collector registration), so
+// obviously registration-time checks cannot happen. Any inconsistencies between
+// the gathered MetricFamilies are reported as errors by the Gather method, and
+// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies
+// (e.g. syntactically invalid metric or label names) will go undetected.
+type Gatherers []Gatherer
+
+// Gather implements Gatherer.
+func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
+	var (
+		metricFamiliesByName = map[string]*dto.MetricFamily{}
+		metricHashes         = map[uint64]struct{}{}
+		errs                 MultiError // The collected errors to return in the end.
+	)
+
+	for i, g := range gs {
+		mfs, err := g.Gather()
+		if err != nil {
+			multiErr := MultiError{}
+			if errors.As(err, &multiErr) {
+				for _, err := range multiErr {
+					errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
+				}
+			} else {
+				errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
+			}
+		}
+		for _, mf := range mfs {
+			existingMF, exists := metricFamiliesByName[mf.GetName()]
+			if exists {
+				if existingMF.GetHelp() != mf.GetHelp() {
+					errs = append(errs, fmt.Errorf(
+						"gathered metric family %s has help %q but should have %q",
+						mf.GetName(), mf.GetHelp(), existingMF.GetHelp(),
+					))
+					continue
+				}
+				if existingMF.GetType() != mf.GetType() {
+					errs = append(errs, fmt.Errorf(
+						"gathered metric family %s has type %s but should have %s",
+						mf.GetName(), mf.GetType(), existingMF.GetType(),
+					))
+					continue
+				}
+			} else {
+				existingMF = &dto.MetricFamily{}
+				existingMF.Name = mf.Name
+				existingMF.Help = mf.Help
+				existingMF.Type = mf.Type
+				if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil {
+					errs = append(errs, err)
+					continue
+				}
+				metricFamiliesByName[mf.GetName()] = existingMF
+			}
+			for _, m := range mf.Metric {
+				if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil {
+					errs = append(errs, err)
+					continue
+				}
+				existingMF.Metric = append(existingMF.Metric, m)
+			}
+		}
+	}
+	return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
+}
+
+// checkSuffixCollisions checks for collisions with the “magic” suffixes the
+// Prometheus text format and the internal metric representation of the
+// Prometheus server add while flattening Summaries and Histograms.
+func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error {
+	var (
+		newName              = mf.GetName()
+		newType              = mf.GetType()
+		newNameWithoutSuffix = ""
+	)
+	switch {
+	case strings.HasSuffix(newName, "_count"):
+		newNameWithoutSuffix = newName[:len(newName)-6]
+	case strings.HasSuffix(newName, "_sum"):
+		newNameWithoutSuffix = newName[:len(newName)-4]
+	case strings.HasSuffix(newName, "_bucket"):
+		newNameWithoutSuffix = newName[:len(newName)-7]
+	}
+	if newNameWithoutSuffix != "" {
+		if existingMF, ok := mfs[newNameWithoutSuffix]; ok {
+			switch existingMF.GetType() {
+			case dto.MetricType_SUMMARY:
+				if !strings.HasSuffix(newName, "_bucket") {
+					return fmt.Errorf(
+						"collected metric named %q collides with previously collected summary named %q",
+						newName, newNameWithoutSuffix,
+					)
+				}
+			case dto.MetricType_HISTOGRAM:
+				return fmt.Errorf(
+					"collected metric named %q collides with previously collected histogram named %q",
+					newName, newNameWithoutSuffix,
+				)
+			}
+		}
+	}
+	if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM {
+		if _, ok := mfs[newName+"_count"]; ok {
+			return fmt.Errorf(
+				"collected histogram or summary named %q collides with previously collected metric named %q",
+				newName, newName+"_count",
+			)
+		}
+		if _, ok := mfs[newName+"_sum"]; ok {
+			return fmt.Errorf(
+				"collected histogram or summary named %q collides with previously collected metric named %q",
+				newName, newName+"_sum",
+			)
+		}
+	}
+	if newType == dto.MetricType_HISTOGRAM {
+		if _, ok := mfs[newName+"_bucket"]; ok {
+			return fmt.Errorf(
+				"collected histogram named %q collides with previously collected metric named %q",
+				newName, newName+"_bucket",
+			)
+		}
+	}
+	return nil
+}
+
+// checkMetricConsistency checks if the provided Metric is consistent with the
+// provided MetricFamily. It also hashes the Metric labels and the MetricFamily
+// name. If the resulting hash is already in the provided metricHashes, an error
+// is returned. If not, it is added to metricHashes.
+func checkMetricConsistency(
+	metricFamily *dto.MetricFamily,
+	dtoMetric *dto.Metric,
+	metricHashes map[uint64]struct{},
+) error {
+	name := metricFamily.GetName()
+
+	// Type consistency with metric family.
+	if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
+		metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
+		metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||
+		metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
+		metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
+		return fmt.Errorf(
+			"collected metric %q { %s} is not a %s",
+			name, dtoMetric, metricFamily.GetType(),
+		)
+	}
+
+	previousLabelName := ""
+	for _, labelPair := range dtoMetric.GetLabel() {
+		labelName := labelPair.GetName()
+		if labelName == previousLabelName {
+			return fmt.Errorf(
+				"collected metric %q { %s} has two or more labels with the same name: %s",
+				name, dtoMetric, labelName,
+			)
+		}
+		if !checkLabelName(labelName) {
+			return fmt.Errorf(
+				"collected metric %q { %s} has a label with an invalid name: %s",
+				name, dtoMetric, labelName,
+			)
+		}
+		if dtoMetric.Summary != nil && labelName == quantileLabel {
+			return fmt.Errorf(
+				"collected metric %q { %s} must not have an explicit %q label",
+				name, dtoMetric, quantileLabel,
+			)
+		}
+		if !utf8.ValidString(labelPair.GetValue()) {
+			return fmt.Errorf(
+				"collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
+				name, dtoMetric, labelName, labelPair.GetValue())
+		}
+		previousLabelName = labelName
+	}
+
+	// Is the metric unique (i.e. no other metric with the same name and the same labels)?
+	h := xxhash.New()
+	h.WriteString(name)
+	h.Write(separatorByteSlice)
+	// Make sure label pairs are sorted. We depend on it for the consistency
+	// check.
+	if !sort.IsSorted(internal.LabelPairSorter(dtoMetric.Label)) {
+		// We cannot sort dtoMetric.Label in place as it is immutable by contract.
+		copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label))
+		copy(copiedLabels, dtoMetric.Label)
+		sort.Sort(internal.LabelPairSorter(copiedLabels))
+		dtoMetric.Label = copiedLabels
+	}
+	for _, lp := range dtoMetric.Label {
+		h.WriteString(lp.GetName())
+		h.Write(separatorByteSlice)
+		h.WriteString(lp.GetValue())
+		h.Write(separatorByteSlice)
+	}
+	if dtoMetric.TimestampMs != nil {
+		h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10))
+		h.Write(separatorByteSlice)
+	}
+	hSum := h.Sum64()
+	if _, exists := metricHashes[hSum]; exists {
+		return fmt.Errorf(
+			"collected metric %q { %s} was collected before with the same name and label values",
+			name, dtoMetric,
+		)
+	}
+	metricHashes[hSum] = struct{}{}
+	return nil
+}
+
+func checkDescConsistency(
+	metricFamily *dto.MetricFamily,
+	dtoMetric *dto.Metric,
+	desc *Desc,
+) error {
+	// Desc help consistency with metric family help.
+	if metricFamily.GetHelp() != desc.help {
+		return fmt.Errorf(
+			"collected metric %s %s has help %q but should have %q",
+			metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,
+		)
+	}
+
+	// Is the desc consistent with the content of the metric?
+	lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
+	copy(lpsFromDesc, desc.constLabelPairs)
+	for _, l := range desc.variableLabels.names {
+		lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
+			Name: proto.String(l),
+		})
+	}
+	if len(lpsFromDesc) != len(dtoMetric.Label) {
+		return fmt.Errorf(
+			"labels in collected metric %s %s are inconsistent with descriptor %s",
+			metricFamily.GetName(), dtoMetric, desc,
+		)
+	}
+	sort.Sort(internal.LabelPairSorter(lpsFromDesc))
+	for i, lpFromDesc := range lpsFromDesc {
+		lpFromMetric := dtoMetric.Label[i]
+		if lpFromDesc.GetName() != lpFromMetric.GetName() ||
+			lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {
+			return fmt.Errorf(
+				"labels in collected metric %s %s are inconsistent with descriptor %s",
+				metricFamily.GetName(), dtoMetric, desc,
+			)
+		}
+	}
+	return nil
+}
+
+var _ TransactionalGatherer = &MultiTRegistry{}
+
+// MultiTRegistry is a TransactionalGatherer that joins gathered metrics from multiple
+// transactional gatherers.
+//
+// It is caller responsibility to ensure two registries have mutually exclusive metric families,
+// no deduplication will happen.
+type MultiTRegistry struct {
+	tGatherers []TransactionalGatherer
+}
+
+// NewMultiTRegistry creates MultiTRegistry.
+func NewMultiTRegistry(tGatherers ...TransactionalGatherer) *MultiTRegistry {
+	return &MultiTRegistry{
+		tGatherers: tGatherers,
+	}
+}
+
+// Gather implements TransactionalGatherer interface.
+func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err error) {
+	errs := MultiError{}
+
+	dFns := make([]func(), 0, len(r.tGatherers))
+	// TODO(bwplotka): Implement concurrency for those?
+	for _, g := range r.tGatherers {
+		// TODO(bwplotka): Check for duplicates?
+		m, d, err := g.Gather()
+		errs.Append(err)
+
+		mfs = append(mfs, m...)
+		dFns = append(dFns, d)
+	}
+
+	// TODO(bwplotka): Consider sort in place, given metric family in gather is sorted already.
+	sort.Slice(mfs, func(i, j int) bool {
+		return *mfs[i].Name < *mfs[j].Name
+	})
+	return mfs, func() {
+		for _, d := range dFns {
+			d()
+		}
+	}, errs.MaybeUnwrap()
+}
+
+// TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory
+// used by metric family is no longer used by a caller. This allows implementations with cache.
+type TransactionalGatherer interface {
+	// Gather returns metrics in a lexicographically sorted slice
+	// of uniquely named MetricFamily protobufs. Gather ensures that the
+	// returned slice is valid and self-consistent so that it can be used
+	// for valid exposition. As an exception to the strict consistency
+	// requirements described for metric.Desc, Gather will tolerate
+	// different sets of label names for metrics of the same metric family.
+	//
+	// Even if an error occurs, Gather attempts to gather as many metrics as
+	// possible. Hence, if a non-nil error is returned, the returned
+	// MetricFamily slice could be nil (in case of a fatal error that
+	// prevented any meaningful metric collection) or contain a number of
+	// MetricFamily protobufs, some of which might be incomplete, and some
+	// might be missing altogether. The returned error (which might be a
+	// MultiError) explains the details. Note that this is mostly useful for
+	// debugging purposes. If the gathered protobufs are to be used for
+	// exposition in actual monitoring, it is almost always better to not
+	// expose an incomplete result and instead disregard the returned
+	// MetricFamily protobufs in case the returned error is non-nil.
+	//
+	// Important: done is expected to be triggered (even if the error occurs!)
+	// once caller does not need returned slice of dto.MetricFamily.
+	Gather() (_ []*dto.MetricFamily, done func(), err error)
+}
+
+// ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function.
+func ToTransactionalGatherer(g Gatherer) TransactionalGatherer {
+	return &noTransactionGatherer{g: g}
+}
+
+type noTransactionGatherer struct {
+	g Gatherer
+}
+
+// Gather implements TransactionalGatherer interface.
+func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) {
+	mfs, err := g.g.Gather()
+	return mfs, func() {}, err
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
new file mode 100644
index 0000000..ac5203c
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -0,0 +1,830 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"fmt"
+	"math"
+	"runtime"
+	"sort"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	dto "github.com/prometheus/client_model/go"
+
+	"github.com/beorn7/perks/quantile"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/types/known/timestamppb"
+)
+
+// quantileLabel is used for the label that defines the quantile in a
+// summary.
+const quantileLabel = "quantile"
+
+// A Summary captures individual observations from an event or sample stream and
+// summarizes them in a manner similar to traditional summary statistics: 1. sum
+// of observations, 2. observation count, 3. rank estimations.
+//
+// A typical use-case is the observation of request latencies. By default, a
+// Summary provides the median, the 90th and the 99th percentile of the latency
+// as rank estimations. However, the default behavior will change in the
+// upcoming v1.0.0 of the library. There will be no rank estimations at all by
+// default. For a sane transition, it is recommended to set the desired rank
+// estimations explicitly.
+//
+// Note that the rank estimations cannot be aggregated in a meaningful way with
+// the Prometheus query language (i.e. you cannot average or add them). If you
+// need aggregatable quantiles (e.g. you want the 99th percentile latency of all
+// queries served across all instances of a service), consider the Histogram
+// metric type. See the Prometheus documentation for more details.
+//
+// To create Summary instances, use NewSummary.
+type Summary interface {
+	Metric
+	Collector
+
+	// Observe adds a single observation to the summary. Observations are
+	// usually positive or zero. Negative observations are accepted but
+	// prevent current versions of Prometheus from properly detecting
+	// counter resets in the sum of observations. See
+	// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
+	// for details.
+	Observe(float64)
+}
+
+var errQuantileLabelNotAllowed = fmt.Errorf(
+	"%q is not allowed as label name in summaries", quantileLabel,
+)
+
+// Default values for SummaryOpts.
+const (
+	// DefMaxAge is the default duration for which observations stay
+	// relevant.
+	DefMaxAge time.Duration = 10 * time.Minute
+	// DefAgeBuckets is the default number of buckets used to calculate the
+	// age of observations.
+	DefAgeBuckets = 5
+	// DefBufCap is the standard buffer size for collecting Summary observations.
+	DefBufCap = 500
+)
+
+// SummaryOpts bundles the options for creating a Summary metric. It is
+// mandatory to set Name to a non-empty string. While all other fields are
+// optional and can safely be left at their zero value, it is recommended to set
+// a help string and to explicitly set the Objectives field to the desired value
+// as the default value will change in the upcoming v1.0.0 of the library.
+type SummaryOpts struct {
+	// Namespace, Subsystem, and Name are components of the fully-qualified
+	// name of the Summary (created by joining these components with
+	// "_"). Only Name is mandatory, the others merely help structuring the
+	// name. Note that the fully-qualified name of the Summary must be a
+	// valid Prometheus metric name.
+	Namespace string
+	Subsystem string
+	Name      string
+
+	// Help provides information about this Summary.
+	//
+	// Metrics with the same fully-qualified name must have the same Help
+	// string.
+	Help string
+
+	// ConstLabels are used to attach fixed labels to this metric. Metrics
+	// with the same fully-qualified name must have the same label names in
+	// their ConstLabels.
+	//
+	// Due to the way a Summary is represented in the Prometheus text format
+	// and how it is handled by the Prometheus server internally, “quantile”
+	// is an illegal label name. Construction of a Summary or SummaryVec
+	// will panic if this label name is used in ConstLabels.
+	//
+	// ConstLabels are only used rarely. In particular, do not use them to
+	// attach the same labels to all your metrics. Those use cases are
+	// better covered by target labels set by the scraping Prometheus
+	// server, or by one specific metric (e.g. a build_info or a
+	// machine_role metric). See also
+	// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
+	ConstLabels Labels
+
+	// Objectives defines the quantile rank estimates with their respective
+	// absolute error. If Objectives[q] = e, then the value reported for q
+	// will be the φ-quantile value for some φ between q-e and q+e.  The
+	// default value is an empty map, resulting in a summary without
+	// quantiles.
+	Objectives map[float64]float64
+
+	// MaxAge defines the duration for which an observation stays relevant
+	// for the summary. Only applies to pre-calculated quantiles, does not
+	// apply to _sum and _count. Must be positive. The default value is
+	// DefMaxAge.
+	MaxAge time.Duration
+
+	// AgeBuckets is the number of buckets used to exclude observations that
+	// are older than MaxAge from the summary. A higher number has a
+	// resource penalty, so only increase it if the higher resolution is
+	// really required. For very high observation rates, you might want to
+	// reduce the number of age buckets. With only one age bucket, you will
+	// effectively see a complete reset of the summary each time MaxAge has
+	// passed. The default value is DefAgeBuckets.
+	AgeBuckets uint32
+
+	// BufCap defines the default sample stream buffer size.  The default
+	// value of DefBufCap should suffice for most uses. If there is a need
+	// to increase the value, a multiple of 500 is recommended (because that
+	// is the internal buffer size of the underlying package
+	// "github.com/bmizerany/perks/quantile").
+	BufCap uint32
+
+	// now is for testing purposes, by default it's time.Now.
+	now func() time.Time
+}
+
+// SummaryVecOpts bundles the options to create a SummaryVec metric.
+// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type SummaryVecOpts struct {
+	SummaryOpts
+
+	// VariableLabels are used to partition the metric vector by the given set
+	// of labels. Each label value will be constrained with the optional Constraint
+	// function, if provided.
+	VariableLabels ConstrainableLabels
+}
+
+// Problem with the sliding-window decay algorithm... The Merge method of
+// perk/quantile is actually not working as advertised - and it might be
+// unfixable, as the underlying algorithm is apparently not capable of merging
+// summaries in the first place. To avoid using Merge, we are currently adding
+// observations to _each_ age bucket, i.e. the effort to add a sample is
+// essentially multiplied by the number of age buckets. When rotating age
+// buckets, we empty the previous head stream. On scrape time, we simply take
+// the quantiles from the head stream (no merging required). Result: More effort
+// on observation time, less effort on scrape time, which is exactly the
+// opposite of what we try to accomplish, but at least the results are correct.
+//
+// The quite elegant previous contraption to merge the age buckets efficiently
+// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)
+// can't be used anymore.
+
+// NewSummary creates a new Summary based on the provided SummaryOpts.
+func NewSummary(opts SummaryOpts) Summary {
+	return newSummary(
+		NewDesc(
+			BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+			opts.Help,
+			nil,
+			opts.ConstLabels,
+		),
+		opts,
+	)
+}
+
+func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
+	if len(desc.variableLabels.names) != len(labelValues) {
+		panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
+	}
+
+	for _, n := range desc.variableLabels.names {
+		if n == quantileLabel {
+			panic(errQuantileLabelNotAllowed)
+		}
+	}
+	for _, lp := range desc.constLabelPairs {
+		if lp.GetName() == quantileLabel {
+			panic(errQuantileLabelNotAllowed)
+		}
+	}
+
+	if opts.Objectives == nil {
+		opts.Objectives = map[float64]float64{}
+	}
+
+	if opts.MaxAge < 0 {
+		panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge))
+	}
+	if opts.MaxAge == 0 {
+		opts.MaxAge = DefMaxAge
+	}
+
+	if opts.AgeBuckets == 0 {
+		opts.AgeBuckets = DefAgeBuckets
+	}
+
+	if opts.BufCap == 0 {
+		opts.BufCap = DefBufCap
+	}
+
+	if opts.now == nil {
+		opts.now = time.Now
+	}
+	if len(opts.Objectives) == 0 {
+		// Use the lock-free implementation of a Summary without objectives.
+		s := &noObjectivesSummary{
+			desc:       desc,
+			labelPairs: MakeLabelPairs(desc, labelValues),
+			counts:     [2]*summaryCounts{{}, {}},
+		}
+		s.init(s) // Init self-collection.
+		s.createdTs = timestamppb.New(opts.now())
+		return s
+	}
+
+	s := &summary{
+		desc: desc,
+		now:  opts.now,
+
+		objectives:       opts.Objectives,
+		sortedObjectives: make([]float64, 0, len(opts.Objectives)),
+
+		labelPairs: MakeLabelPairs(desc, labelValues),
+
+		hotBuf:         make([]float64, 0, opts.BufCap),
+		coldBuf:        make([]float64, 0, opts.BufCap),
+		streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
+	}
+	s.headStreamExpTime = opts.now().Add(s.streamDuration)
+	s.hotBufExpTime = s.headStreamExpTime
+
+	for i := uint32(0); i < opts.AgeBuckets; i++ {
+		s.streams = append(s.streams, s.newStream())
+	}
+	s.headStream = s.streams[0]
+
+	for qu := range s.objectives {
+		s.sortedObjectives = append(s.sortedObjectives, qu)
+	}
+	sort.Float64s(s.sortedObjectives)
+
+	s.init(s) // Init self-collection.
+	s.createdTs = timestamppb.New(opts.now())
+	return s
+}
+
+type summary struct {
+	selfCollector
+
+	bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.
+	mtx    sync.Mutex // Protects every other moving part.
+	// Lock bufMtx before mtx if both are needed.
+
+	desc *Desc
+
+	now func() time.Time
+
+	objectives       map[float64]float64
+	sortedObjectives []float64
+
+	labelPairs []*dto.LabelPair
+
+	sum float64
+	cnt uint64
+
+	hotBuf, coldBuf []float64
+
+	streams                          []*quantile.Stream
+	streamDuration                   time.Duration
+	headStream                       *quantile.Stream
+	headStreamIdx                    int
+	headStreamExpTime, hotBufExpTime time.Time
+
+	createdTs *timestamppb.Timestamp
+}
+
+func (s *summary) Desc() *Desc {
+	return s.desc
+}
+
+func (s *summary) Observe(v float64) {
+	s.bufMtx.Lock()
+	defer s.bufMtx.Unlock()
+
+	now := s.now()
+	if now.After(s.hotBufExpTime) {
+		s.asyncFlush(now)
+	}
+	s.hotBuf = append(s.hotBuf, v)
+	if len(s.hotBuf) == cap(s.hotBuf) {
+		s.asyncFlush(now)
+	}
+}
+
+func (s *summary) Write(out *dto.Metric) error {
+	sum := &dto.Summary{
+		CreatedTimestamp: s.createdTs,
+	}
+	qs := make([]*dto.Quantile, 0, len(s.objectives))
+
+	s.bufMtx.Lock()
+	s.mtx.Lock()
+	// Swap bufs even if hotBuf is empty to set new hotBufExpTime.
+	s.swapBufs(s.now())
+	s.bufMtx.Unlock()
+
+	s.flushColdBuf()
+	sum.SampleCount = proto.Uint64(s.cnt)
+	sum.SampleSum = proto.Float64(s.sum)
+
+	for _, rank := range s.sortedObjectives {
+		var q float64
+		if s.headStream.Count() == 0 {
+			q = math.NaN()
+		} else {
+			q = s.headStream.Query(rank)
+		}
+		qs = append(qs, &dto.Quantile{
+			Quantile: proto.Float64(rank),
+			Value:    proto.Float64(q),
+		})
+	}
+
+	s.mtx.Unlock()
+
+	if len(qs) > 0 {
+		sort.Sort(quantSort(qs))
+	}
+	sum.Quantile = qs
+
+	out.Summary = sum
+	out.Label = s.labelPairs
+	return nil
+}
+
+func (s *summary) newStream() *quantile.Stream {
+	return quantile.NewTargeted(s.objectives)
+}
+
+// asyncFlush needs bufMtx locked.
+func (s *summary) asyncFlush(now time.Time) {
+	s.mtx.Lock()
+	s.swapBufs(now)
+
+	// Unblock the original goroutine that was responsible for the mutation
+	// that triggered the compaction.  But hold onto the global non-buffer
+	// state mutex until the operation finishes.
+	go func() {
+		s.flushColdBuf()
+		s.mtx.Unlock()
+	}()
+}
+
+// rotateStreams needs mtx AND bufMtx locked.
+func (s *summary) maybeRotateStreams() {
+	for !s.hotBufExpTime.Equal(s.headStreamExpTime) {
+		s.headStream.Reset()
+		s.headStreamIdx++
+		if s.headStreamIdx >= len(s.streams) {
+			s.headStreamIdx = 0
+		}
+		s.headStream = s.streams[s.headStreamIdx]
+		s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)
+	}
+}
+
+// flushColdBuf needs mtx locked.
+func (s *summary) flushColdBuf() {
+	for _, v := range s.coldBuf {
+		for _, stream := range s.streams {
+			stream.Insert(v)
+		}
+		s.cnt++
+		s.sum += v
+	}
+	s.coldBuf = s.coldBuf[0:0]
+	s.maybeRotateStreams()
+}
+
+// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.
+func (s *summary) swapBufs(now time.Time) {
+	if len(s.coldBuf) != 0 {
+		panic("coldBuf is not empty")
+	}
+	s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf
+	// hotBuf is now empty and gets new expiration set.
+	for now.After(s.hotBufExpTime) {
+		s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)
+	}
+}
+
+type summaryCounts struct {
+	// sumBits contains the bits of the float64 representing the sum of all
+	// observations. sumBits and count have to go first in the struct to
+	// guarantee alignment for atomic operations.
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	sumBits uint64
+	count   uint64
+}
+
+type noObjectivesSummary struct {
+	// countAndHotIdx enables lock-free writes with use of atomic updates.
+	// The most significant bit is the hot index [0 or 1] of the count field
+	// below. Observe calls update the hot one. All remaining bits count the
+	// number of Observe calls. Observe starts by incrementing this counter,
+	// and finish by incrementing the count field in the respective
+	// summaryCounts, as a marker for completion.
+	//
+	// Calls of the Write method (which are non-mutating reads from the
+	// perspective of the summary) swap the hot–cold under the writeMtx
+	// lock. A cooldown is awaited (while locked) by comparing the number of
+	// observations with the initiation count. Once they match, then the
+	// last observation on the now cool one has completed. All cool fields must
+	// be merged into the new hot before releasing writeMtx.
+
+	// Fields with atomic access first! See alignment constraint:
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+	countAndHotIdx uint64
+
+	selfCollector
+	desc     *Desc
+	writeMtx sync.Mutex // Only used in the Write method.
+
+	// Two counts, one is "hot" for lock-free observations, the other is
+	// "cold" for writing out a dto.Metric. It has to be an array of
+	// pointers to guarantee 64bit alignment of the histogramCounts, see
+	// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
+	counts [2]*summaryCounts
+
+	labelPairs []*dto.LabelPair
+
+	createdTs *timestamppb.Timestamp
+}
+
+func (s *noObjectivesSummary) Desc() *Desc {
+	return s.desc
+}
+
+func (s *noObjectivesSummary) Observe(v float64) {
+	// We increment h.countAndHotIdx so that the counter in the lower
+	// 63 bits gets incremented. At the same time, we get the new value
+	// back, which we can use to find the currently-hot counts.
+	n := atomic.AddUint64(&s.countAndHotIdx, 1)
+	hotCounts := s.counts[n>>63]
+
+	for {
+		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
+		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+			break
+		}
+	}
+	// Increment count last as we take it as a signal that the observation
+	// is complete.
+	atomic.AddUint64(&hotCounts.count, 1)
+}
+
+func (s *noObjectivesSummary) Write(out *dto.Metric) error {
+	// For simplicity, we protect this whole method by a mutex. It is not in
+	// the hot path, i.e. Observe is called much more often than Write. The
+	// complication of making Write lock-free isn't worth it, if possible at
+	// all.
+	s.writeMtx.Lock()
+	defer s.writeMtx.Unlock()
+
+	// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
+	// without touching the count bits. See the struct comments for a full
+	// description of the algorithm.
+	n := atomic.AddUint64(&s.countAndHotIdx, 1<<63)
+	// count is contained unchanged in the lower 63 bits.
+	count := n & ((1 << 63) - 1)
+	// The most significant bit tells us which counts is hot. The complement
+	// is thus the cold one.
+	hotCounts := s.counts[n>>63]
+	coldCounts := s.counts[(^n)>>63]
+
+	// Await cooldown.
+	for count != atomic.LoadUint64(&coldCounts.count) {
+		runtime.Gosched() // Let observations get work done.
+	}
+
+	sum := &dto.Summary{
+		SampleCount:      proto.Uint64(count),
+		SampleSum:        proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+		CreatedTimestamp: s.createdTs,
+	}
+
+	out.Summary = sum
+	out.Label = s.labelPairs
+
+	// Finally add all the cold counts to the new hot counts and reset the cold counts.
+	atomic.AddUint64(&hotCounts.count, count)
+	atomic.StoreUint64(&coldCounts.count, 0)
+	for {
+		oldBits := atomic.LoadUint64(&hotCounts.sumBits)
+		newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum())
+		if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
+			atomic.StoreUint64(&coldCounts.sumBits, 0)
+			break
+		}
+	}
+	return nil
+}
+
+type quantSort []*dto.Quantile
+
+func (s quantSort) Len() int {
+	return len(s)
+}
+
+func (s quantSort) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s quantSort) Less(i, j int) bool {
+	return s[i].GetQuantile() < s[j].GetQuantile()
+}
+
+// SummaryVec is a Collector that bundles a set of Summaries that all share the
+// same Desc, but have different values for their variable labels. This is used
+// if you want to count the same thing partitioned by various dimensions
+// (e.g. HTTP request latencies, partitioned by status code and method). Create
+// instances with NewSummaryVec.
+type SummaryVec struct {
+	*MetricVec
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
+// partitioned by the given label names.
+//
+// Due to the way a Summary is represented in the Prometheus text format and how
+// it is handled by the Prometheus server internally, “quantile” is an illegal
+// label name. NewSummaryVec will panic if this label name is used.
+func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
+	return V2.NewSummaryVec(SummaryVecOpts{
+		SummaryOpts:    opts,
+		VariableLabels: UnconstrainedLabels(labelNames),
+	})
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts.
+func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec {
+	for _, ln := range opts.VariableLabels.labelNames() {
+		if ln == quantileLabel {
+			panic(errQuantileLabelNotAllowed)
+		}
+	}
+	desc := V2.NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		opts.VariableLabels,
+		opts.ConstLabels,
+	)
+	return &SummaryVec{
+		MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
+			return newSummary(desc, opts.SummaryOpts, lvs...)
+		}),
+	}
+}
+
+// GetMetricWithLabelValues returns the Summary for the given slice of label
+// values (same order as the variable labels in Desc). If that combination of
+// label values is accessed for the first time, a new Summary is created.
+//
+// It is possible to call this method without using the returned Summary to only
+// create the new Summary but leave it at its starting value, a Summary without
+// any observations.
+//
+// Keeping the Summary for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Summary from the SummaryVec. In that case,
+// the Summary will still exist, but it will not be exported anymore, even if a
+// Summary with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of variable labels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+	metric, err := v.MetricVec.GetMetricWithLabelValues(lvs...)
+	if metric != nil {
+		return metric.(Observer), err
+	}
+	return nil, err
+}
+
+// GetMetricWith returns the Summary for the given Labels map (the label names
+// must match those of the variable labels in Desc). If that label map is
+// accessed for the first time, a new Summary is created. Implications of
+// creating a Summary without using it and keeping the Summary for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the variable labels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
+	metric, err := v.MetricVec.GetMetricWith(labels)
+	if metric != nil {
+		return metric.(Observer), err
+	}
+	return nil, err
+}
+
+// WithLabelValues works as GetMetricWithLabelValues, but panics where
+// GetMetricWithLabelValues would have returned an error. Not returning an
+// error allows shortcuts like
+//
+//	myVec.WithLabelValues("404", "GET").Observe(42.21)
+func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
+	s, err := v.GetMetricWithLabelValues(lvs...)
+	if err != nil {
+		panic(err)
+	}
+	return s
+}
+
+// With works as GetMetricWith, but panics where GetMetricWithLabels would have
+// returned an error. Not returning an error allows shortcuts like
+//
+//	myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+func (v *SummaryVec) With(labels Labels) Observer {
+	s, err := v.GetMetricWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return s
+}
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the SummaryVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) {
+	vec, err := v.MetricVec.CurryWith(labels)
+	if vec != nil {
+		return &SummaryVec{vec}, err
+	}
+	return nil, err
+}
+
+// MustCurryWith works as CurryWith but panics where CurryWith would have
+// returned an error.
+func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec {
+	vec, err := v.CurryWith(labels)
+	if err != nil {
+		panic(err)
+	}
+	return vec
+}
+
+type constSummary struct {
+	desc       *Desc
+	count      uint64
+	sum        float64
+	quantiles  map[float64]float64
+	labelPairs []*dto.LabelPair
+	createdTs  *timestamppb.Timestamp
+}
+
+func (s *constSummary) Desc() *Desc {
+	return s.desc
+}
+
+func (s *constSummary) Write(out *dto.Metric) error {
+	sum := &dto.Summary{
+		CreatedTimestamp: s.createdTs,
+	}
+	qs := make([]*dto.Quantile, 0, len(s.quantiles))
+
+	sum.SampleCount = proto.Uint64(s.count)
+	sum.SampleSum = proto.Float64(s.sum)
+
+	for rank, q := range s.quantiles {
+		qs = append(qs, &dto.Quantile{
+			Quantile: proto.Float64(rank),
+			Value:    proto.Float64(q),
+		})
+	}
+
+	if len(qs) > 0 {
+		sort.Sort(quantSort(qs))
+	}
+	sum.Quantile = qs
+
+	out.Summary = sum
+	out.Label = s.labelPairs
+
+	return nil
+}
+
+// NewConstSummary returns a metric representing a Prometheus summary with fixed
+// values for the count, sum, and quantiles. As those parameters cannot be
+// changed, the returned value does not implement the Summary interface (but
+// only the Metric interface). Users of this package will not have much use for
+// it in regular operations. However, when implementing custom Collectors, it is
+// useful as a throw-away metric that is generated on the fly to send it to
+// Prometheus in the Collect method.
+//
+// quantiles maps ranks to quantile values. For example, a median latency of
+// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
+//
+//	map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+// NewConstSummary returns an error if the length of labelValues is not
+// consistent with the variable labels in Desc or if Desc is invalid.
+func NewConstSummary(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	quantiles map[float64]float64,
+	labelValues ...string,
+) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+		return nil, err
+	}
+	return &constSummary{
+		desc:       desc,
+		count:      count,
+		sum:        sum,
+		quantiles:  quantiles,
+		labelPairs: MakeLabelPairs(desc, labelValues),
+	}, nil
+}
+
+// MustNewConstSummary is a version of NewConstSummary that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstSummary(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	quantiles map[float64]float64,
+	labelValues ...string,
+) Metric {
+	m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
+
+// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp.
+func NewConstSummaryWithCreatedTimestamp(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	quantiles map[float64]float64,
+	ct time.Time,
+	labelValues ...string,
+) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+		return nil, err
+	}
+	return &constSummary{
+		desc:       desc,
+		count:      count,
+		sum:        sum,
+		quantiles:  quantiles,
+		labelPairs: MakeLabelPairs(desc, labelValues),
+		createdTs:  timestamppb.New(ct),
+	}, nil
+}
+
+// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where
+// NewConstSummaryWithCreatedTimestamp would have returned an error.
+func MustNewConstSummaryWithCreatedTimestamp(
+	desc *Desc,
+	count uint64,
+	sum float64,
+	quantiles map[float64]float64,
+	ct time.Time,
+	labelValues ...string,
+) Metric {
+	m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
new file mode 100644
index 0000000..52344fe
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
@@ -0,0 +1,81 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "time"
+
+// Timer is a helper type to time functions. Use NewTimer to create new
+// instances.
+type Timer struct {
+	begin    time.Time
+	observer Observer
+}
+
+// NewTimer creates a new Timer. The provided Observer is used to observe a
+// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar
+// later on will be also supported.
+// Timer is usually used to time a function call in the
+// following way:
+//
+//	func TimeMe() {
+//	    timer := NewTimer(myHistogram)
+//	    defer timer.ObserveDuration()
+//	    // Do actual work.
+//	}
+//
+// or
+//
+//	func TimeMeWithExemplar() {
+//		    timer := NewTimer(myHistogram)
+//		    defer timer.ObserveDurationWithExemplar(exemplar)
+//		    // Do actual work.
+//		}
+func NewTimer(o Observer) *Timer {
+	return &Timer{
+		begin:    time.Now(),
+		observer: o,
+	}
+}
+
+// ObserveDuration records the duration passed since the Timer was created with
+// NewTimer. It calls the Observe method of the Observer provided during
+// construction with the duration in seconds as an argument. The observed
+// duration is also returned. ObserveDuration is usually called with a defer
+// statement.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func (t *Timer) ObserveDuration() time.Duration {
+	d := time.Since(t.begin)
+	if t.observer != nil {
+		t.observer.Observe(d.Seconds())
+	}
+	return d
+}
+
+// ObserveDurationWithExemplar is like ObserveDuration, but it will also
+// observe exemplar with the duration unless exemplar is nil or provided Observer can't
+// be casted to ExemplarObserver.
+func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration {
+	d := time.Since(t.begin)
+	eo, ok := t.observer.(ExemplarObserver)
+	if ok && exemplar != nil {
+		eo.ObserveWithExemplar(d.Seconds(), exemplar)
+		return d
+	}
+	if t.observer != nil {
+		t.observer.Observe(d.Seconds())
+	}
+	return d
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
new file mode 100644
index 0000000..0f9ce63
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
@@ -0,0 +1,42 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// UntypedOpts is an alias for Opts. See there for doc comments.
+type UntypedOpts Opts
+
+// UntypedFunc works like GaugeFunc but the collected metric is of type
+// "Untyped". UntypedFunc is useful to mirror an external metric of unknown
+// type.
+//
+// To create UntypedFunc instances, use NewUntypedFunc.
+type UntypedFunc interface {
+	Metric
+	Collector
+}
+
+// NewUntypedFunc creates a new UntypedFunc based on the provided
+// UntypedOpts. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where an UntypedFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {
+	return newValueFunc(NewDesc(
+		BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
+		opts.Help,
+		nil,
+		opts.ConstLabels,
+	), UntypedValue, function)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
new file mode 100644
index 0000000..cc23011
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -0,0 +1,274 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"errors"
+	"fmt"
+	"sort"
+	"time"
+	"unicode/utf8"
+
+	"github.com/prometheus/client_golang/prometheus/internal"
+
+	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/types/known/timestamppb"
+)
+
+// ValueType is an enumeration of metric types that represent a simple value.
+type ValueType int
+
+// Possible values for the ValueType enum. Use UntypedValue to mark a metric
+// with an unknown type.
+const (
+	_ ValueType = iota
+	CounterValue
+	GaugeValue
+	UntypedValue
+)
+
+var (
+	CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }()
+	GaugeMetricTypePtr   = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }()
+	UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }()
+)
+
+func (v ValueType) ToDTO() *dto.MetricType {
+	switch v {
+	case CounterValue:
+		return CounterMetricTypePtr
+	case GaugeValue:
+		return GaugeMetricTypePtr
+	default:
+		return UntypedMetricTypePtr
+	}
+}
+
+// valueFunc is a generic metric for simple values retrieved on collect time
+// from a function. It implements Metric and Collector. Its effective type is
+// determined by ValueType. This is a low-level building block used by the
+// library to back the implementations of CounterFunc, GaugeFunc, and
+// UntypedFunc.
+type valueFunc struct {
+	selfCollector
+
+	desc       *Desc
+	valType    ValueType
+	function   func() float64
+	labelPairs []*dto.LabelPair
+}
+
+// newValueFunc returns a newly allocated valueFunc with the given Desc and
+// ValueType. The value reported is determined by calling the given function
+// from within the Write method. Take into account that metric collection may
+// happen concurrently. If that results in concurrent calls to Write, like in
+// the case where a valueFunc is directly registered with Prometheus, the
+// provided function must be concurrency-safe.
+func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {
+	result := &valueFunc{
+		desc:       desc,
+		valType:    valueType,
+		function:   function,
+		labelPairs: MakeLabelPairs(desc, nil),
+	}
+	result.init(result)
+	return result
+}
+
+func (v *valueFunc) Desc() *Desc {
+	return v.desc
+}
+
+func (v *valueFunc) Write(out *dto.Metric) error {
+	return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil)
+}
+
+// NewConstMetric returns a metric with one fixed value that cannot be
+// changed. Users of this package will not have much use for it in regular
+// operations. However, when implementing custom Collectors, it is useful as a
+// throw-away metric that is generated on the fly to send it to Prometheus in
+// the Collect method. NewConstMetric returns an error if the length of
+// labelValues is not consistent with the variable labels in Desc or if Desc is
+// invalid.
+func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+		return nil, err
+	}
+
+	metric := &dto.Metric{}
+	if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil {
+		return nil, err
+	}
+
+	return &constMetric{
+		desc:   desc,
+		metric: metric,
+	}, nil
+}
+
+// MustNewConstMetric is a version of NewConstMetric that panics where
+// NewConstMetric would have returned an error.
+func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {
+	m, err := NewConstMetric(desc, valueType, value, labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
+
+// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters
+// with created timestamp set and returns an error for other metric types.
+func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) {
+	if desc.err != nil {
+		return nil, desc.err
+	}
+	if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+		return nil, err
+	}
+	switch valueType {
+	case CounterValue:
+		break
+	default:
+		return nil, errors.New("created timestamps are only supported for counters")
+	}
+
+	metric := &dto.Metric{}
+	if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil {
+		return nil, err
+	}
+
+	return &constMetric{
+		desc:   desc,
+		metric: metric,
+	}, nil
+}
+
+// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where
+// NewConstMetricWithCreatedTimestamp would have returned an error.
+func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric {
+	m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
+
+type constMetric struct {
+	desc   *Desc
+	metric *dto.Metric
+}
+
+func (m *constMetric) Desc() *Desc {
+	return m.desc
+}
+
+func (m *constMetric) Write(out *dto.Metric) error {
+	out.Label = m.metric.Label
+	out.Counter = m.metric.Counter
+	out.Gauge = m.metric.Gauge
+	out.Untyped = m.metric.Untyped
+	return nil
+}
+
+func populateMetric(
+	t ValueType,
+	v float64,
+	labelPairs []*dto.LabelPair,
+	e *dto.Exemplar,
+	m *dto.Metric,
+	ct *timestamppb.Timestamp,
+) error {
+	m.Label = labelPairs
+	switch t {
+	case CounterValue:
+		m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct}
+	case GaugeValue:
+		m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
+	case UntypedValue:
+		m.Untyped = &dto.Untyped{Value: proto.Float64(v)}
+	default:
+		return fmt.Errorf("encountered unknown type %v", t)
+	}
+	return nil
+}
+
+// MakeLabelPairs is a helper function to create protobuf LabelPairs from the
+// variable and constant labels in the provided Desc. The values for the
+// variable labels are defined by the labelValues slice, which must be in the
+// same order as the corresponding variable labels in the Desc.
+//
+// This function is only needed for custom Metric implementations. See MetricVec
+// example.
+func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
+	totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs)
+	if totalLen == 0 {
+		// Super fast path.
+		return nil
+	}
+	if len(desc.variableLabels.names) == 0 {
+		// Moderately fast path.
+		return desc.constLabelPairs
+	}
+	labelPairs := make([]*dto.LabelPair, 0, totalLen)
+	for i, l := range desc.variableLabels.names {
+		labelPairs = append(labelPairs, &dto.LabelPair{
+			Name:  proto.String(l),
+			Value: proto.String(labelValues[i]),
+		})
+	}
+	labelPairs = append(labelPairs, desc.constLabelPairs...)
+	sort.Sort(internal.LabelPairSorter(labelPairs))
+	return labelPairs
+}
+
+// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
+const ExemplarMaxRunes = 128
+
+// newExemplar creates a new dto.Exemplar from the provided values. An error is
+// returned if any of the label names or values are invalid or if the total
+// number of runes in the label names and values exceeds ExemplarMaxRunes.
+func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
+	e := &dto.Exemplar{}
+	e.Value = proto.Float64(value)
+	tsProto := timestamppb.New(ts)
+	if err := tsProto.CheckValid(); err != nil {
+		return nil, err
+	}
+	e.Timestamp = tsProto
+	labelPairs := make([]*dto.LabelPair, 0, len(l))
+	var runes int
+	for name, value := range l {
+		if !checkLabelName(name) {
+			return nil, fmt.Errorf("exemplar label name %q is invalid", name)
+		}
+		runes += utf8.RuneCountInString(name)
+		if !utf8.ValidString(value) {
+			return nil, fmt.Errorf("exemplar label value %q is not valid UTF-8", value)
+		}
+		runes += utf8.RuneCountInString(value)
+		labelPairs = append(labelPairs, &dto.LabelPair{
+			Name:  proto.String(name),
+			Value: proto.String(value),
+		})
+	}
+	if runes > ExemplarMaxRunes {
+		return nil, fmt.Errorf("exemplar labels have %d runes, exceeding the limit of %d", runes, ExemplarMaxRunes)
+	}
+	e.Label = labelPairs
+	return e, nil
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
new file mode 100644
index 0000000..487b466
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -0,0 +1,709 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"fmt"
+	"sync"
+
+	"github.com/prometheus/common/model"
+)
+
+// MetricVec is a Collector to bundle metrics of the same name that differ in
+// their label values. MetricVec is not used directly but as a building block
+// for implementations of vectors of a given metric type, like GaugeVec,
+// CounterVec, SummaryVec, and HistogramVec. It is exported so that it can be
+// used for custom Metric implementations.
+//
+// To create a FooVec for custom Metric Foo, embed a pointer to MetricVec in
+// FooVec and initialize it with NewMetricVec. Implement wrappers for
+// GetMetricWithLabelValues and GetMetricWith that return (Foo, error) rather
+// than (Metric, error). Similarly, create a wrapper for CurryWith that returns
+// (*FooVec, error) rather than (*MetricVec, error). It is recommended to also
+// add the convenience methods WithLabelValues, With, and MustCurryWith, which
+// panic instead of returning errors. See also the MetricVec example.
+type MetricVec struct {
+	*metricMap
+
+	curry []curriedLabelValue
+
+	// hashAdd and hashAddByte can be replaced for testing collision handling.
+	hashAdd     func(h uint64, s string) uint64
+	hashAddByte func(h uint64, b byte) uint64
+}
+
+// NewMetricVec returns an initialized metricVec.
+func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
+	return &MetricVec{
+		metricMap: &metricMap{
+			metrics:   map[uint64][]metricWithLabelValues{},
+			desc:      desc,
+			newMetric: newMetric,
+		},
+		hashAdd:     hashAdd,
+		hashAddByte: hashAddByte,
+	}
+}
+
+// DeleteLabelValues removes the metric where the variable labels are the same
+// as those passed in as labels (same order as the VariableLabels in Desc). It
+// returns true if a metric was deleted.
+//
+// It is not an error if the number of label values is not the same as the
+// number of VariableLabels in Desc. However, such inconsistent label count can
+// never match an actual metric, so the method will always return false in that
+// case.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider Delete(Labels) as an
+// alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the CounterVec example.
+func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+	lvs = constrainLabelValues(m.desc, lvs, m.curry)
+
+	h, err := m.hashLabelValues(lvs)
+	if err != nil {
+		return false
+	}
+
+	return m.deleteByHashWithLabelValues(h, lvs, m.curry)
+}
+
+// Delete deletes the metric where the variable labels are the same as those
+// passed in as labels. It returns true if a metric was deleted.
+//
+// It is not an error if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc. However, such inconsistent Labels
+// can never match an actual metric, so the method will always return false in
+// that case.
+//
+// This method is used for the same purpose as DeleteLabelValues(...string). See
+// there for pros and cons of the two methods.
+func (m *MetricVec) Delete(labels Labels) bool {
+	labels, closer := constrainLabels(m.desc, labels)
+	defer closer()
+
+	h, err := m.hashLabels(labels)
+	if err != nil {
+		return false
+	}
+
+	return m.deleteByHashWithLabels(h, labels, m.curry)
+}
+
+// DeletePartialMatch deletes all metrics where the variable labels contain all of those
+// passed in as labels. The order of the labels does not matter.
+// It returns the number of metrics deleted.
+//
+// Note that curried labels will never be matched if deleting from the curried vector.
+// To match curried labels with DeletePartialMatch, it must be called on the base vector.
+func (m *MetricVec) DeletePartialMatch(labels Labels) int {
+	labels, closer := constrainLabels(m.desc, labels)
+	defer closer()
+
+	return m.deleteByLabels(labels, m.curry)
+}
+
+// Without explicit forwarding of Describe, Collect, Reset, those methods won't
+// show up in GoDoc.
+
+// Describe implements Collector.
+func (m *MetricVec) Describe(ch chan<- *Desc) { m.metricMap.Describe(ch) }
+
+// Collect implements Collector.
+func (m *MetricVec) Collect(ch chan<- Metric) { m.metricMap.Collect(ch) }
+
+// Reset deletes all metrics in this vector.
+func (m *MetricVec) Reset() { m.metricMap.Reset() }
+
+// CurryWith returns a vector curried with the provided labels, i.e. the
+// returned vector has those labels pre-set for all labeled operations performed
+// on it. The cardinality of the curried vector is reduced accordingly. The
+// order of the remaining labels stays the same (just with the curried labels
+// taken out of the sequence – which is relevant for the
+// (GetMetric)WithLabelValues methods). It is possible to curry a curried
+// vector, but only with labels not yet used for currying before.
+//
+// The metrics contained in the MetricVec are shared between the curried and
+// uncurried vectors. They are just accessed differently. Curried and uncurried
+// vectors behave identically in terms of collection. Only one must be
+// registered with a given registry (usually the uncurried version). The Reset
+// method deletes all metrics, even if called on a curried vector.
+//
+// Note that CurryWith is usually not called directly but through a wrapper
+// around MetricVec, implementing a vector for a specific Metric
+// implementation, for example GaugeVec.
+func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
+	var (
+		newCurry []curriedLabelValue
+		oldCurry = m.curry
+		iCurry   int
+	)
+	for i, labelName := range m.desc.variableLabels.names {
+		val, ok := labels[labelName]
+		if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
+			if ok {
+				return nil, fmt.Errorf("label name %q is already curried", labelName)
+			}
+			newCurry = append(newCurry, oldCurry[iCurry])
+			iCurry++
+		} else {
+			if !ok {
+				continue // Label stays uncurried.
+			}
+			newCurry = append(newCurry, curriedLabelValue{
+				i,
+				m.desc.variableLabels.constrain(labelName, val),
+			})
+		}
+	}
+	if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
+		return nil, fmt.Errorf("%d unknown label(s) found during currying", l)
+	}
+
+	return &MetricVec{
+		metricMap:   m.metricMap,
+		curry:       newCurry,
+		hashAdd:     m.hashAdd,
+		hashAddByte: m.hashAddByte,
+	}, nil
+}
+
+// GetMetricWithLabelValues returns the Metric for the given slice of label
+// values (same order as the variable labels in Desc). If that combination of
+// label values is accessed for the first time, a new Metric is created (by
+// calling the newMetric function provided during construction of the
+// MetricVec).
+//
+// It is possible to call this method without using the returned Metric to only
+// create the new Metric but leave it in its initial state.
+//
+// Keeping the Metric for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Metric from the MetricVec. In that case, the
+// Metric will still exist, but it will not be exported anymore, even if a
+// Metric with the same label values is created later.
+//
+// An error is returned if the number of label values is not the same as the
+// number of variable labels in Desc (minus any curried labels).
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+//
+// Note that GetMetricWithLabelValues is usually not called directly but through
+// a wrapper around MetricVec, implementing a vector for a specific Metric
+// implementation, for example GaugeVec.
+func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+	lvs = constrainLabelValues(m.desc, lvs, m.curry)
+	h, err := m.hashLabelValues(lvs)
+	if err != nil {
+		return nil, err
+	}
+
+	return m.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
+}
+
+// GetMetricWith returns the Metric for the given Labels map (the label names
+// must match those of the variable labels in Desc). If that label map is
+// accessed for the first time, a new Metric is created. Implications of
+// creating a Metric without using it and keeping the Metric for later use
+// are the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the variable labels in Desc (minus any curried labels).
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+//
+// Note that GetMetricWith is usually not called directly but through a wrapper
+// around MetricVec, implementing a vector for a specific Metric implementation,
+// for example GaugeVec.
+func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+	labels, closer := constrainLabels(m.desc, labels)
+	defer closer()
+
+	h, err := m.hashLabels(labels)
+	if err != nil {
+		return nil, err
+	}
+
+	return m.getOrCreateMetricWithLabels(h, labels, m.curry), nil
+}
+
+func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
+	if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
+		return 0, err
+	}
+
+	var (
+		h             = hashNew()
+		curry         = m.curry
+		iVals, iCurry int
+	)
+	for i := 0; i < len(m.desc.variableLabels.names); i++ {
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			h = m.hashAdd(h, curry[iCurry].value)
+			iCurry++
+		} else {
+			h = m.hashAdd(h, vals[iVals])
+			iVals++
+		}
+		h = m.hashAddByte(h, model.SeparatorByte)
+	}
+	return h, nil
+}
+
+func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
+	if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
+		return 0, err
+	}
+
+	var (
+		h      = hashNew()
+		curry  = m.curry
+		iCurry int
+	)
+	for i, labelName := range m.desc.variableLabels.names {
+		val, ok := labels[labelName]
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			if ok {
+				return 0, fmt.Errorf("label name %q is already curried", labelName)
+			}
+			h = m.hashAdd(h, curry[iCurry].value)
+			iCurry++
+		} else {
+			if !ok {
+				return 0, fmt.Errorf("label name %q missing in label map", labelName)
+			}
+			h = m.hashAdd(h, val)
+		}
+		h = m.hashAddByte(h, model.SeparatorByte)
+	}
+	return h, nil
+}
+
+// metricWithLabelValues provides the metric and its label values for
+// disambiguation on hash collision.
+type metricWithLabelValues struct {
+	values []string
+	metric Metric
+}
+
+// curriedLabelValue sets the curried value for a label at the given index.
+type curriedLabelValue struct {
+	index int
+	value string
+}
+
+// metricMap is a helper for metricVec and shared between differently curried
+// metricVecs.
+type metricMap struct {
+	mtx       sync.RWMutex // Protects metrics.
+	metrics   map[uint64][]metricWithLabelValues
+	desc      *Desc
+	newMetric func(labelValues ...string) Metric
+}
+
+// Describe implements Collector. It will send exactly one Desc to the provided
+// channel.
+func (m *metricMap) Describe(ch chan<- *Desc) {
+	ch <- m.desc
+}
+
+// Collect implements Collector.
+func (m *metricMap) Collect(ch chan<- Metric) {
+	m.mtx.RLock()
+	defer m.mtx.RUnlock()
+
+	for _, metrics := range m.metrics {
+		for _, metric := range metrics {
+			ch <- metric.metric
+		}
+	}
+}
+
+// Reset deletes all metrics in this vector.
+func (m *metricMap) Reset() {
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	for h := range m.metrics {
+		delete(m.metrics, h)
+	}
+}
+
+// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
+// there are multiple matches in the bucket, use lvs to select a metric and
+// remove only that metric.
+func (m *metricMap) deleteByHashWithLabelValues(
+	h uint64, lvs []string, curry []curriedLabelValue,
+) bool {
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	metrics, ok := m.metrics[h]
+	if !ok {
+		return false
+	}
+
+	i := findMetricWithLabelValues(metrics, lvs, curry)
+	if i >= len(metrics) {
+		return false
+	}
+
+	if len(metrics) > 1 {
+		old := metrics
+		m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
+		old[len(old)-1] = metricWithLabelValues{}
+	} else {
+		delete(m.metrics, h)
+	}
+	return true
+}
+
+// deleteByHashWithLabels removes the metric from the hash bucket h. If there
+// are multiple matches in the bucket, use lvs to select a metric and remove
+// only that metric.
+func (m *metricMap) deleteByHashWithLabels(
+	h uint64, labels Labels, curry []curriedLabelValue,
+) bool {
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	metrics, ok := m.metrics[h]
+	if !ok {
+		return false
+	}
+	i := findMetricWithLabels(m.desc, metrics, labels, curry)
+	if i >= len(metrics) {
+		return false
+	}
+
+	if len(metrics) > 1 {
+		old := metrics
+		m.metrics[h] = append(metrics[:i], metrics[i+1:]...)
+		old[len(old)-1] = metricWithLabelValues{}
+	} else {
+		delete(m.metrics, h)
+	}
+	return true
+}
+
+// deleteByLabels deletes a metric if the given labels are present in the metric.
+func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int {
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+
+	var numDeleted int
+
+	for h, metrics := range m.metrics {
+		i := findMetricWithPartialLabels(m.desc, metrics, labels, curry)
+		if i >= len(metrics) {
+			// Didn't find matching labels in this metric slice.
+			continue
+		}
+		delete(m.metrics, h)
+		numDeleted++
+	}
+
+	return numDeleted
+}
+
+// findMetricWithPartialLabel returns the index of the matching metric or
+// len(metrics) if not found.
+func findMetricWithPartialLabels(
+	desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
+) int {
+	for i, metric := range metrics {
+		if matchPartialLabels(desc, metric.values, labels, curry) {
+			return i
+		}
+	}
+	return len(metrics)
+}
+
+// indexOf searches the given slice of strings for the target string and returns
+// the index or len(items) as well as a boolean whether the search succeeded.
+func indexOf(target string, items []string) (int, bool) {
+	for i, l := range items {
+		if l == target {
+			return i, true
+		}
+	}
+	return len(items), false
+}
+
+// valueMatchesVariableOrCurriedValue determines if a value was previously curried,
+// and returns whether it matches either the "base" value or the curried value accordingly.
+// It also indicates whether the match is against a curried or uncurried value.
+func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) {
+	for _, curriedValue := range curry {
+		if curriedValue.index == index {
+			// This label was curried. See if the curried value matches our target.
+			return curriedValue.value == targetValue, true
+		}
+	}
+	// This label was not curried. See if the current value matches our target label.
+	return values[index] == targetValue, false
+}
+
+// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present.
+func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
+	for l, v := range labels {
+		// Check if the target label exists in our metrics and get the index.
+		varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names)
+		if validLabel {
+			// Check the value of that label against the target value.
+			// We don't consider curried values in partial matches.
+			matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry)
+			if matches && !curried {
+				continue
+			}
+		}
+		return false
+	}
+	return true
+}
+
+// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *metricMap) getOrCreateMetricWithLabelValues(
+	hash uint64, lvs []string, curry []curriedLabelValue,
+) Metric {
+	m.mtx.RLock()
+	metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry)
+	m.mtx.RUnlock()
+	if ok {
+		return metric
+	}
+
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry)
+	if !ok {
+		inlinedLVs := inlineLabelValues(lvs, curry)
+		metric = m.newMetric(inlinedLVs...)
+		m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric})
+	}
+	return metric
+}
+
+// getOrCreateMetricWithLabels retrieves the metric by hash and label value
+// or creates it and returns the new one.
+//
+// This function holds the mutex.
+func (m *metricMap) getOrCreateMetricWithLabels(
+	hash uint64, labels Labels, curry []curriedLabelValue,
+) Metric {
+	m.mtx.RLock()
+	metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry)
+	m.mtx.RUnlock()
+	if ok {
+		return metric
+	}
+
+	m.mtx.Lock()
+	defer m.mtx.Unlock()
+	metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry)
+	if !ok {
+		lvs := extractLabelValues(m.desc, labels, curry)
+		metric = m.newMetric(lvs...)
+		m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric})
+	}
+	return metric
+}
+
+// getMetricWithHashAndLabelValues gets a metric while handling possible
+// collisions in the hash space. Must be called while holding the read mutex.
+func (m *metricMap) getMetricWithHashAndLabelValues(
+	h uint64, lvs []string, curry []curriedLabelValue,
+) (Metric, bool) {
+	metrics, ok := m.metrics[h]
+	if ok {
+		if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) {
+			return metrics[i].metric, true
+		}
+	}
+	return nil, false
+}
+
+// getMetricWithHashAndLabels gets a metric while handling possible collisions in
+// the hash space. Must be called while holding read mutex.
+func (m *metricMap) getMetricWithHashAndLabels(
+	h uint64, labels Labels, curry []curriedLabelValue,
+) (Metric, bool) {
+	metrics, ok := m.metrics[h]
+	if ok {
+		if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) {
+			return metrics[i].metric, true
+		}
+	}
+	return nil, false
+}
+
+// findMetricWithLabelValues returns the index of the matching metric or
+// len(metrics) if not found.
+func findMetricWithLabelValues(
+	metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue,
+) int {
+	for i, metric := range metrics {
+		if matchLabelValues(metric.values, lvs, curry) {
+			return i
+		}
+	}
+	return len(metrics)
+}
+
+// findMetricWithLabels returns the index of the matching metric or len(metrics)
+// if not found.
+func findMetricWithLabels(
+	desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
+) int {
+	for i, metric := range metrics {
+		if matchLabels(desc, metric.values, labels, curry) {
+			return i
+		}
+	}
+	return len(metrics)
+}
+
+func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool {
+	if len(values) != len(lvs)+len(curry) {
+		return false
+	}
+	var iLVs, iCurry int
+	for i, v := range values {
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			if v != curry[iCurry].value {
+				return false
+			}
+			iCurry++
+			continue
+		}
+		if v != lvs[iLVs] {
+			return false
+		}
+		iLVs++
+	}
+	return true
+}
+
+func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
+	if len(values) != len(labels)+len(curry) {
+		return false
+	}
+	iCurry := 0
+	for i, k := range desc.variableLabels.names {
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			if values[i] != curry[iCurry].value {
+				return false
+			}
+			iCurry++
+			continue
+		}
+		if values[i] != labels[k] {
+			return false
+		}
+	}
+	return true
+}
+
+func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
+	labelValues := make([]string, len(labels)+len(curry))
+	iCurry := 0
+	for i, k := range desc.variableLabels.names {
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			labelValues[i] = curry[iCurry].value
+			iCurry++
+			continue
+		}
+		labelValues[i] = labels[k]
+	}
+	return labelValues
+}
+
+func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
+	labelValues := make([]string, len(lvs)+len(curry))
+	var iCurry, iLVs int
+	for i := range labelValues {
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			labelValues[i] = curry[iCurry].value
+			iCurry++
+			continue
+		}
+		labelValues[i] = lvs[iLVs]
+		iLVs++
+	}
+	return labelValues
+}
+
+var labelsPool = &sync.Pool{
+	New: func() interface{} {
+		return make(Labels)
+	},
+}
+
+func constrainLabels(desc *Desc, labels Labels) (Labels, func()) {
+	if len(desc.variableLabels.labelConstraints) == 0 {
+		// Fast path when there's no constraints
+		return labels, func() {}
+	}
+
+	constrainedLabels := labelsPool.Get().(Labels)
+	for l, v := range labels {
+		constrainedLabels[l] = desc.variableLabels.constrain(l, v)
+	}
+
+	return constrainedLabels, func() {
+		for k := range constrainedLabels {
+			delete(constrainedLabels, k)
+		}
+		labelsPool.Put(constrainedLabels)
+	}
+}
+
+func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
+	if len(desc.variableLabels.labelConstraints) == 0 {
+		// Fast path when there's no constraints
+		return lvs
+	}
+
+	constrainedValues := make([]string, len(lvs))
+	var iCurry, iLVs int
+	for i := 0; i < len(lvs)+len(curry); i++ {
+		if iCurry < len(curry) && curry[iCurry].index == i {
+			iCurry++
+			continue
+		}
+
+		if i < len(desc.variableLabels.names) {
+			constrainedValues[iLVs] = desc.variableLabels.constrain(
+				desc.variableLabels.names[i],
+				lvs[iLVs],
+			)
+		} else {
+			constrainedValues[iLVs] = lvs[iLVs]
+		}
+		iLVs++
+	}
+	return constrainedValues
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vnext.go b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
new file mode 100644
index 0000000..42bc3a8
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+type v2 struct{}
+
+// V2 is a struct that can be referenced to access experimental API that might
+// be present in v2 of client golang someday. It offers extended functionality
+// of v1 with slightly changed API. It is acceptable to use some pieces from v1
+// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc`
+// in the same codebase.
+var V2 = v2{}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
new file mode 100644
index 0000000..2ed1285
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
@@ -0,0 +1,248 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import (
+	"fmt"
+	"sort"
+
+	"github.com/prometheus/client_golang/prometheus/internal"
+
+	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/proto"
+)
+
+// WrapRegistererWith returns a Registerer wrapping the provided
+// Registerer. Collectors registered with the returned Registerer will be
+// registered with the wrapped Registerer in a modified way. The modified
+// Collector adds the provided Labels to all Metrics it collects (as
+// ConstLabels). The Metrics collected by the unmodified Collector must not
+// duplicate any of those labels. Wrapping a nil value is valid, resulting
+// in a no-op Registerer.
+//
+// WrapRegistererWith provides a way to add fixed labels to a subset of
+// Collectors. It should not be used to add fixed labels to all metrics
+// exposed. See also
+// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
+//
+// Conflicts between Collectors registered through the original Registerer with
+// Collectors registered through the wrapping Registerer will still be
+// detected. Any AlreadyRegisteredError returned by the Register method of
+// either Registerer will contain the ExistingCollector in the form it was
+// provided to the respective registry.
+//
+// The Collector example demonstrates a use of WrapRegistererWith.
+func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
+	return &wrappingRegisterer{
+		wrappedRegisterer: reg,
+		labels:            labels,
+	}
+}
+
+// WrapRegistererWithPrefix returns a Registerer wrapping the provided
+// Registerer. Collectors registered with the returned Registerer will be
+// registered with the wrapped Registerer in a modified way. The modified
+// Collector adds the provided prefix to the name of all Metrics it collects.
+// Wrapping a nil value is valid, resulting in a no-op Registerer.
+//
+// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
+// a sub-system. To make this work, register metrics of the sub-system with the
+// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful
+// to use the same prefix for all metrics exposed. In particular, do not prefix
+// metric names that are standardized across applications, as that would break
+// horizontal monitoring, for example the metrics provided by the Go collector
+// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
+// fact, those metrics are already prefixed with "go_" or "process_",
+// respectively.)
+//
+// Conflicts between Collectors registered through the original Registerer with
+// Collectors registered through the wrapping Registerer will still be
+// detected. Any AlreadyRegisteredError returned by the Register method of
+// either Registerer will contain the ExistingCollector in the form it was
+// provided to the respective registry.
+func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
+	return &wrappingRegisterer{
+		wrappedRegisterer: reg,
+		prefix:            prefix,
+	}
+}
+
+// WrapCollectorWith returns a Collector wrapping the provided Collector. The
+// wrapped Collector will add the provided Labels to all Metrics it collects (as
+// ConstLabels). The Metrics collected by the unmodified Collector must not
+// duplicate any of those labels.
+//
+// WrapCollectorWith can be useful to work with multiple instances of a third
+// party library that does not expose enough flexibility on the lifecycle of its
+// registered metrics.
+// For example, let's say you have a foo.New(reg Registerer) constructor that
+// registers metrics but never unregisters them, and you want to create multiple
+// instances of foo.Foo with different labels.
+// The way to achieve that, is to create a new Registry, pass it to foo.New,
+// then use WrapCollectorWith to wrap that Registry with the desired labels and
+// register that as a collector in your main Registry.
+// Then you can un-register the wrapped collector effectively un-registering the
+// metrics registered by foo.New.
+func WrapCollectorWith(labels Labels, c Collector) Collector {
+	return &wrappingCollector{
+		wrappedCollector: c,
+		labels:           labels,
+	}
+}
+
+// WrapCollectorWithPrefix returns a Collector wrapping the provided Collector. The
+// wrapped Collector will add the provided prefix to the name of all Metrics it collects.
+//
+// See the documentation of WrapCollectorWith for more details on the use case.
+func WrapCollectorWithPrefix(prefix string, c Collector) Collector {
+	return &wrappingCollector{
+		wrappedCollector: c,
+		prefix:           prefix,
+	}
+}
+
+type wrappingRegisterer struct {
+	wrappedRegisterer Registerer
+	prefix            string
+	labels            Labels
+}
+
+func (r *wrappingRegisterer) Register(c Collector) error {
+	if r.wrappedRegisterer == nil {
+		return nil
+	}
+	return r.wrappedRegisterer.Register(&wrappingCollector{
+		wrappedCollector: c,
+		prefix:           r.prefix,
+		labels:           r.labels,
+	})
+}
+
+func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
+	if r.wrappedRegisterer == nil {
+		return
+	}
+	for _, c := range cs {
+		if err := r.Register(c); err != nil {
+			panic(err)
+		}
+	}
+}
+
+func (r *wrappingRegisterer) Unregister(c Collector) bool {
+	if r.wrappedRegisterer == nil {
+		return false
+	}
+	return r.wrappedRegisterer.Unregister(&wrappingCollector{
+		wrappedCollector: c,
+		prefix:           r.prefix,
+		labels:           r.labels,
+	})
+}
+
+type wrappingCollector struct {
+	wrappedCollector Collector
+	prefix           string
+	labels           Labels
+}
+
+func (c *wrappingCollector) Collect(ch chan<- Metric) {
+	wrappedCh := make(chan Metric)
+	go func() {
+		c.wrappedCollector.Collect(wrappedCh)
+		close(wrappedCh)
+	}()
+	for m := range wrappedCh {
+		ch <- &wrappingMetric{
+			wrappedMetric: m,
+			prefix:        c.prefix,
+			labels:        c.labels,
+		}
+	}
+}
+
+func (c *wrappingCollector) Describe(ch chan<- *Desc) {
+	wrappedCh := make(chan *Desc)
+	go func() {
+		c.wrappedCollector.Describe(wrappedCh)
+		close(wrappedCh)
+	}()
+	for desc := range wrappedCh {
+		ch <- wrapDesc(desc, c.prefix, c.labels)
+	}
+}
+
+func (c *wrappingCollector) unwrapRecursively() Collector {
+	switch wc := c.wrappedCollector.(type) {
+	case *wrappingCollector:
+		return wc.unwrapRecursively()
+	default:
+		return wc
+	}
+}
+
+type wrappingMetric struct {
+	wrappedMetric Metric
+	prefix        string
+	labels        Labels
+}
+
+func (m *wrappingMetric) Desc() *Desc {
+	return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels)
+}
+
+func (m *wrappingMetric) Write(out *dto.Metric) error {
+	if err := m.wrappedMetric.Write(out); err != nil {
+		return err
+	}
+	if len(m.labels) == 0 {
+		// No wrapping labels.
+		return nil
+	}
+	for ln, lv := range m.labels {
+		out.Label = append(out.Label, &dto.LabelPair{
+			Name:  proto.String(ln),
+			Value: proto.String(lv),
+		})
+	}
+	sort.Sort(internal.LabelPairSorter(out.Label))
+	return nil
+}
+
+func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
+	constLabels := Labels{}
+	for _, lp := range desc.constLabelPairs {
+		constLabels[*lp.Name] = *lp.Value
+	}
+	for ln, lv := range labels {
+		if _, alreadyUsed := constLabels[ln]; alreadyUsed {
+			return &Desc{
+				fqName:          desc.fqName,
+				help:            desc.help,
+				variableLabels:  desc.variableLabels,
+				constLabelPairs: desc.constLabelPairs,
+				err:             fmt.Errorf("attempted wrapping with already existing label name %q", ln),
+			}
+		}
+		constLabels[ln] = lv
+	}
+	// NewDesc will do remaining validations.
+	newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
+	// Propagate errors if there was any. This will override any errer
+	// created by NewDesc above, i.e. earlier errors get precedence.
+	if desc.err != nil {
+		newDesc.err = desc.err
+	}
+	return newDesc
+}
diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE
similarity index 100%
rename from vendor/go.opentelemetry.io/otel/LICENSE
rename to vendor/github.com/prometheus/client_model/LICENSE
diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE
new file mode 100644
index 0000000..20110e4
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/NOTICE
@@ -0,0 +1,5 @@
+Data model artifacts for Prometheus.
+Copyright 2012-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
new file mode 100644
index 0000000..2f15490
--- /dev/null
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -0,0 +1,1399 @@
+// Copyright 2013 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.30.0
+// 	protoc        v3.20.3
+// source: io/prometheus/client/metrics.proto
+
+package io_prometheus_client
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+	reflect "reflect"
+	sync "sync"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type MetricType int32
+
+const (
+	// COUNTER must use the Metric field "counter".
+	MetricType_COUNTER MetricType = 0
+	// GAUGE must use the Metric field "gauge".
+	MetricType_GAUGE MetricType = 1
+	// SUMMARY must use the Metric field "summary".
+	MetricType_SUMMARY MetricType = 2
+	// UNTYPED must use the Metric field "untyped".
+	MetricType_UNTYPED MetricType = 3
+	// HISTOGRAM must use the Metric field "histogram".
+	MetricType_HISTOGRAM MetricType = 4
+	// GAUGE_HISTOGRAM must use the Metric field "histogram".
+	MetricType_GAUGE_HISTOGRAM MetricType = 5
+)
+
+// Enum value maps for MetricType.
+var (
+	MetricType_name = map[int32]string{
+		0: "COUNTER",
+		1: "GAUGE",
+		2: "SUMMARY",
+		3: "UNTYPED",
+		4: "HISTOGRAM",
+		5: "GAUGE_HISTOGRAM",
+	}
+	MetricType_value = map[string]int32{
+		"COUNTER":         0,
+		"GAUGE":           1,
+		"SUMMARY":         2,
+		"UNTYPED":         3,
+		"HISTOGRAM":       4,
+		"GAUGE_HISTOGRAM": 5,
+	}
+)
+
+func (x MetricType) Enum() *MetricType {
+	p := new(MetricType)
+	*p = x
+	return p
+}
+
+func (x MetricType) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (MetricType) Descriptor() protoreflect.EnumDescriptor {
+	return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor()
+}
+
+func (MetricType) Type() protoreflect.EnumType {
+	return &file_io_prometheus_client_metrics_proto_enumTypes[0]
+}
+
+func (x MetricType) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *MetricType) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = MetricType(num)
+	return nil
+}
+
+// Deprecated: Use MetricType.Descriptor instead.
+func (MetricType) EnumDescriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
+}
+
+type LabelPair struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Name  *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+}
+
+func (x *LabelPair) Reset() {
+	*x = LabelPair{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *LabelPair) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LabelPair) ProtoMessage() {}
+
+func (x *LabelPair) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead.
+func (*LabelPair) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *LabelPair) GetName() string {
+	if x != nil && x.Name != nil {
+		return *x.Name
+	}
+	return ""
+}
+
+func (x *LabelPair) GetValue() string {
+	if x != nil && x.Value != nil {
+		return *x.Value
+	}
+	return ""
+}
+
+type Gauge struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (x *Gauge) Reset() {
+	*x = Gauge{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Gauge) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Gauge) ProtoMessage() {}
+
+func (x *Gauge) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Gauge.ProtoReflect.Descriptor instead.
+func (*Gauge) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *Gauge) GetValue() float64 {
+	if x != nil && x.Value != nil {
+		return *x.Value
+	}
+	return 0
+}
+
+type Counter struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Value            *float64               `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+	Exemplar         *Exemplar              `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
+	CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
+}
+
+func (x *Counter) Reset() {
+	*x = Counter{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Counter) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Counter) ProtoMessage() {}
+
+func (x *Counter) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Counter.ProtoReflect.Descriptor instead.
+func (*Counter) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *Counter) GetValue() float64 {
+	if x != nil && x.Value != nil {
+		return *x.Value
+	}
+	return 0
+}
+
+func (x *Counter) GetExemplar() *Exemplar {
+	if x != nil {
+		return x.Exemplar
+	}
+	return nil
+}
+
+func (x *Counter) GetCreatedTimestamp() *timestamppb.Timestamp {
+	if x != nil {
+		return x.CreatedTimestamp
+	}
+	return nil
+}
+
+type Quantile struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+	Value    *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+}
+
+func (x *Quantile) Reset() {
+	*x = Quantile{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Quantile) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Quantile) ProtoMessage() {}
+
+func (x *Quantile) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Quantile.ProtoReflect.Descriptor instead.
+func (*Quantile) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Quantile) GetQuantile() float64 {
+	if x != nil && x.Quantile != nil {
+		return *x.Quantile
+	}
+	return 0
+}
+
+func (x *Quantile) GetValue() float64 {
+	if x != nil && x.Value != nil {
+		return *x.Value
+	}
+	return 0
+}
+
+type Summary struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	SampleCount      *uint64                `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+	SampleSum        *float64               `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+	Quantile         []*Quantile            `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+	CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
+}
+
+func (x *Summary) Reset() {
+	*x = Summary{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Summary) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Summary) ProtoMessage() {}
+
+func (x *Summary) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Summary.ProtoReflect.Descriptor instead.
+func (*Summary) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Summary) GetSampleCount() uint64 {
+	if x != nil && x.SampleCount != nil {
+		return *x.SampleCount
+	}
+	return 0
+}
+
+func (x *Summary) GetSampleSum() float64 {
+	if x != nil && x.SampleSum != nil {
+		return *x.SampleSum
+	}
+	return 0
+}
+
+func (x *Summary) GetQuantile() []*Quantile {
+	if x != nil {
+		return x.Quantile
+	}
+	return nil
+}
+
+func (x *Summary) GetCreatedTimestamp() *timestamppb.Timestamp {
+	if x != nil {
+		return x.CreatedTimestamp
+	}
+	return nil
+}
+
+type Untyped struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+}
+
+func (x *Untyped) Reset() {
+	*x = Untyped{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Untyped) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Untyped) ProtoMessage() {}
+
+func (x *Untyped) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Untyped.ProtoReflect.Descriptor instead.
+func (*Untyped) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *Untyped) GetValue() float64 {
+	if x != nil && x.Value != nil {
+		return *x.Value
+	}
+	return 0
+}
+
+type Histogram struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	SampleCount      *uint64  `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+	SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0.
+	SampleSum        *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+	// Buckets for the conventional histogram.
+	Bucket           []*Bucket              `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional.
+	CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
+	// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
+	// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
+	// then each power of two is divided into 2^n logarithmic buckets.
+	// Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
+	// In the future, more bucket schemas may be added using numbers < -4 or > 8.
+	Schema         *int32   `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"`
+	ZeroThreshold  *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"`      // Breadth of the zero bucket.
+	ZeroCount      *uint64  `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"`                   // Count in zero bucket.
+	ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0.
+	// Negative buckets for the native histogram.
+	NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"`
+	// Use either "negative_delta" or "negative_count", the former for
+	// regular histograms with integer counts, the latter for float
+	// histograms.
+	NegativeDelta []int64   `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+	NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"`  // Absolute count of each bucket.
+	// Positive buckets for the native histogram.
+	// Use a no-op span (offset 0, length 0) for a native histogram without any
+	// observations yet and with a zero_threshold of 0. Otherwise, it would be
+	// indistinguishable from a classic histogram.
+	PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"`
+	// Use either "positive_delta" or "positive_count", the former for
+	// regular histograms with integer counts, the latter for float
+	// histograms.
+	PositiveDelta []int64   `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+	PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"`  // Absolute count of each bucket.
+	// Only used for native histograms. These exemplars MUST have a timestamp.
+	Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"`
+}
+
+func (x *Histogram) Reset() {
+	*x = Histogram{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Histogram) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Histogram) ProtoMessage() {}
+
+func (x *Histogram) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Histogram.ProtoReflect.Descriptor instead.
+func (*Histogram) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *Histogram) GetSampleCount() uint64 {
+	if x != nil && x.SampleCount != nil {
+		return *x.SampleCount
+	}
+	return 0
+}
+
+func (x *Histogram) GetSampleCountFloat() float64 {
+	if x != nil && x.SampleCountFloat != nil {
+		return *x.SampleCountFloat
+	}
+	return 0
+}
+
+func (x *Histogram) GetSampleSum() float64 {
+	if x != nil && x.SampleSum != nil {
+		return *x.SampleSum
+	}
+	return 0
+}
+
+func (x *Histogram) GetBucket() []*Bucket {
+	if x != nil {
+		return x.Bucket
+	}
+	return nil
+}
+
+func (x *Histogram) GetCreatedTimestamp() *timestamppb.Timestamp {
+	if x != nil {
+		return x.CreatedTimestamp
+	}
+	return nil
+}
+
+func (x *Histogram) GetSchema() int32 {
+	if x != nil && x.Schema != nil {
+		return *x.Schema
+	}
+	return 0
+}
+
+func (x *Histogram) GetZeroThreshold() float64 {
+	if x != nil && x.ZeroThreshold != nil {
+		return *x.ZeroThreshold
+	}
+	return 0
+}
+
+func (x *Histogram) GetZeroCount() uint64 {
+	if x != nil && x.ZeroCount != nil {
+		return *x.ZeroCount
+	}
+	return 0
+}
+
+func (x *Histogram) GetZeroCountFloat() float64 {
+	if x != nil && x.ZeroCountFloat != nil {
+		return *x.ZeroCountFloat
+	}
+	return 0
+}
+
+func (x *Histogram) GetNegativeSpan() []*BucketSpan {
+	if x != nil {
+		return x.NegativeSpan
+	}
+	return nil
+}
+
+func (x *Histogram) GetNegativeDelta() []int64 {
+	if x != nil {
+		return x.NegativeDelta
+	}
+	return nil
+}
+
+func (x *Histogram) GetNegativeCount() []float64 {
+	if x != nil {
+		return x.NegativeCount
+	}
+	return nil
+}
+
+func (x *Histogram) GetPositiveSpan() []*BucketSpan {
+	if x != nil {
+		return x.PositiveSpan
+	}
+	return nil
+}
+
+func (x *Histogram) GetPositiveDelta() []int64 {
+	if x != nil {
+		return x.PositiveDelta
+	}
+	return nil
+}
+
+func (x *Histogram) GetPositiveCount() []float64 {
+	if x != nil {
+		return x.PositiveCount
+	}
+	return nil
+}
+
+func (x *Histogram) GetExemplars() []*Exemplar {
+	if x != nil {
+		return x.Exemplars
+	}
+	return nil
+}
+
+// A Bucket of a conventional histogram, each of which is treated as
+// an individual counter-like time series by Prometheus.
+type Bucket struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	CumulativeCount      *uint64   `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`                   // Cumulative in increasing order.
+	CumulativeCountFloat *float64  `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0.
+	UpperBound           *float64  `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`                                 // Inclusive.
+	Exemplar             *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
+}
+
+func (x *Bucket) Reset() {
+	*x = Bucket{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Bucket) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bucket) ProtoMessage() {}
+
+func (x *Bucket) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bucket.ProtoReflect.Descriptor instead.
+func (*Bucket) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *Bucket) GetCumulativeCount() uint64 {
+	if x != nil && x.CumulativeCount != nil {
+		return *x.CumulativeCount
+	}
+	return 0
+}
+
+func (x *Bucket) GetCumulativeCountFloat() float64 {
+	if x != nil && x.CumulativeCountFloat != nil {
+		return *x.CumulativeCountFloat
+	}
+	return 0
+}
+
+func (x *Bucket) GetUpperBound() float64 {
+	if x != nil && x.UpperBound != nil {
+		return *x.UpperBound
+	}
+	return 0
+}
+
+func (x *Bucket) GetExemplar() *Exemplar {
+	if x != nil {
+		return x.Exemplar
+	}
+	return nil
+}
+
+// A BucketSpan defines a number of consecutive buckets in a native
+// histogram with their offset. Logically, it would be more
+// straightforward to include the bucket counts in the Span. However,
+// the protobuf representation is more compact in the way the data is
+// structured here (with all the buckets in a single array separate
+// from the Spans).
+type BucketSpan struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Offset *int32  `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative).
+	Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"`   // Length of consecutive buckets.
+}
+
+func (x *BucketSpan) Reset() {
+	*x = BucketSpan{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *BucketSpan) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BucketSpan) ProtoMessage() {}
+
+func (x *BucketSpan) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead.
+func (*BucketSpan) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *BucketSpan) GetOffset() int32 {
+	if x != nil && x.Offset != nil {
+		return *x.Offset
+	}
+	return 0
+}
+
+func (x *BucketSpan) GetLength() uint32 {
+	if x != nil && x.Length != nil {
+		return *x.Length
+	}
+	return 0
+}
+
+type Exemplar struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Label     []*LabelPair           `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+	Value     *float64               `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+	Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style.
+}
+
+func (x *Exemplar) Reset() {
+	*x = Exemplar{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Exemplar) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Exemplar) ProtoMessage() {}
+
+func (x *Exemplar) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead.
+func (*Exemplar) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *Exemplar) GetLabel() []*LabelPair {
+	if x != nil {
+		return x.Label
+	}
+	return nil
+}
+
+func (x *Exemplar) GetValue() float64 {
+	if x != nil && x.Value != nil {
+		return *x.Value
+	}
+	return 0
+}
+
+func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp {
+	if x != nil {
+		return x.Timestamp
+	}
+	return nil
+}
+
+type Metric struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Label       []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+	Gauge       *Gauge       `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+	Counter     *Counter     `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+	Summary     *Summary     `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+	Untyped     *Untyped     `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+	Histogram   *Histogram   `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+	TimestampMs *int64       `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
+}
+
+func (x *Metric) Reset() {
+	*x = Metric{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *Metric) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Metric) ProtoMessage() {}
+
+func (x *Metric) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Metric.ProtoReflect.Descriptor instead.
+func (*Metric) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10}
+}
+
+func (x *Metric) GetLabel() []*LabelPair {
+	if x != nil {
+		return x.Label
+	}
+	return nil
+}
+
+func (x *Metric) GetGauge() *Gauge {
+	if x != nil {
+		return x.Gauge
+	}
+	return nil
+}
+
+func (x *Metric) GetCounter() *Counter {
+	if x != nil {
+		return x.Counter
+	}
+	return nil
+}
+
+func (x *Metric) GetSummary() *Summary {
+	if x != nil {
+		return x.Summary
+	}
+	return nil
+}
+
+func (x *Metric) GetUntyped() *Untyped {
+	if x != nil {
+		return x.Untyped
+	}
+	return nil
+}
+
+func (x *Metric) GetHistogram() *Histogram {
+	if x != nil {
+		return x.Histogram
+	}
+	return nil
+}
+
+func (x *Metric) GetTimestampMs() int64 {
+	if x != nil && x.TimestampMs != nil {
+		return *x.TimestampMs
+	}
+	return 0
+}
+
+type MetricFamily struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	Name   *string     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Help   *string     `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+	Type   *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+	Metric []*Metric   `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+	Unit   *string     `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"`
+}
+
+func (x *MetricFamily) Reset() {
+	*x = MetricFamily{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *MetricFamily) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetricFamily) ProtoMessage() {}
+
+func (x *MetricFamily) ProtoReflect() protoreflect.Message {
+	mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead.
+func (*MetricFamily) Descriptor() ([]byte, []int) {
+	return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11}
+}
+
+func (x *MetricFamily) GetName() string {
+	if x != nil && x.Name != nil {
+		return *x.Name
+	}
+	return ""
+}
+
+func (x *MetricFamily) GetHelp() string {
+	if x != nil && x.Help != nil {
+		return *x.Help
+	}
+	return ""
+}
+
+func (x *MetricFamily) GetType() MetricType {
+	if x != nil && x.Type != nil {
+		return *x.Type
+	}
+	return MetricType_COUNTER
+}
+
+func (x *MetricFamily) GetMetric() []*Metric {
+	if x != nil {
+		return x.Metric
+	}
+	return nil
+}
+
+func (x *MetricFamily) GetUnit() string {
+	if x != nil && x.Unit != nil {
+		return *x.Unit
+	}
+	return ""
+}
+
+var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor
+
+var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
+	0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f,
+	0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+	0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65,
+	0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c,
+	0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05,
+	0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+	0x65, 0x22, 0xa4, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61,
+	0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18,
+	0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
+	0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65,
+	0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12,
+	0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73,
+	0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+	0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54,
+	0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e,
+	0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+	0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52,
+	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61,
+	0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75,
+	0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65,
+	0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f,
+	0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c,
+	0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+	0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+	0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75,
+	0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+	0x12, 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+	0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+	0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
+	0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74,
+	0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48,
+	0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
+	0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
+	0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73,
+	0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61,
+	0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43,
+	0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d,
+	0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73,
+	0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b,
+	0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
+	0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
+	0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x47,
+	0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
+	0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+	0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69,
+	0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d,
+	0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12,
+	0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c,
+	0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72,
+	0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63,
+	0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f,
+	0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f,
+	0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52,
+	0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12,
+	0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e,
+	0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+	0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75,
+	0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
+	0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
+	0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d,
+	0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a,
+	0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
+	0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43,
+	0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65,
+	0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f,
+	0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
+	0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x70,
+	0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70,
+	0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0d, 0x20,
+	0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c,
+	0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
+	0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69,
+	0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65,
+	0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
+	0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+	0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78,
+	0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b,
+	0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65,
+	0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75,
+	0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a,
+	0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
+	0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63,
+	0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c,
+	0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75,
+	0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42,
+	0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
+	0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+	0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78,
+	0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
+	0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16,
+	0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06,
+	0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
+	0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91,
+	0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c,
+	0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e,
+	0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+	0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62,
+	0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65,
+	0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+	0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+	0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a,
+	0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69,
+	0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+	0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c,
+	0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+	0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65,
+	0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+	0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
+	0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
+	0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72,
+	0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
+	0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79,
+	0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74,
+	0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e,
+	0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+	0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70,
+	0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18,
+	0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
+	0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73,
+	0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
+	0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d,
+	0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+	0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46,
+	0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c,
+	0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a,
+	0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f,
+	0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
+	0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74,
+	0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20,
+	0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+	0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69,
+	0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69,
+	0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a,
+	0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43,
+	0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47,
+	0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02,
+	0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a,
+	0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f,
+	0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10,
+	0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
+	0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75,
+	0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
+	0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f,
+	0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63,
+	0x6c, 0x69, 0x65, 0x6e, 0x74,
+}
+
+var (
+	file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once
+	file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc
+)
+
+func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte {
+	file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() {
+		file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData)
+	})
+	return file_io_prometheus_client_metrics_proto_rawDescData
+}
+
+var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
+var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{
+	(MetricType)(0),               // 0: io.prometheus.client.MetricType
+	(*LabelPair)(nil),             // 1: io.prometheus.client.LabelPair
+	(*Gauge)(nil),                 // 2: io.prometheus.client.Gauge
+	(*Counter)(nil),               // 3: io.prometheus.client.Counter
+	(*Quantile)(nil),              // 4: io.prometheus.client.Quantile
+	(*Summary)(nil),               // 5: io.prometheus.client.Summary
+	(*Untyped)(nil),               // 6: io.prometheus.client.Untyped
+	(*Histogram)(nil),             // 7: io.prometheus.client.Histogram
+	(*Bucket)(nil),                // 8: io.prometheus.client.Bucket
+	(*BucketSpan)(nil),            // 9: io.prometheus.client.BucketSpan
+	(*Exemplar)(nil),              // 10: io.prometheus.client.Exemplar
+	(*Metric)(nil),                // 11: io.prometheus.client.Metric
+	(*MetricFamily)(nil),          // 12: io.prometheus.client.MetricFamily
+	(*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp
+}
+var file_io_prometheus_client_metrics_proto_depIdxs = []int32{
+	10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar
+	13, // 1: io.prometheus.client.Counter.created_timestamp:type_name -> google.protobuf.Timestamp
+	4,  // 2: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile
+	13, // 3: io.prometheus.client.Summary.created_timestamp:type_name -> google.protobuf.Timestamp
+	8,  // 4: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket
+	13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp
+	9,  // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
+	9,  // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
+	10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar
+	10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
+	1,  // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
+	13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
+	1,  // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
+	2,  // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
+	3,  // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
+	5,  // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
+	6,  // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
+	7,  // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
+	0,  // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
+	11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
+	20, // [20:20] is the sub-list for method output_type
+	20, // [20:20] is the sub-list for method input_type
+	20, // [20:20] is the sub-list for extension type_name
+	20, // [20:20] is the sub-list for extension extendee
+	0,  // [0:20] is the sub-list for field type_name
+}
+
+func init() { file_io_prometheus_client_metrics_proto_init() }
+func file_io_prometheus_client_metrics_proto_init() {
+	if File_io_prometheus_client_metrics_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*LabelPair); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Gauge); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Counter); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Quantile); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Summary); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Untyped); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Histogram); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Bucket); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*BucketSpan); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Exemplar); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*Metric); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*MetricFamily); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc,
+			NumEnums:      1,
+			NumMessages:   12,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_io_prometheus_client_metrics_proto_goTypes,
+		DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs,
+		EnumInfos:         file_io_prometheus_client_metrics_proto_enumTypes,
+		MessageInfos:      file_io_prometheus_client_metrics_proto_msgTypes,
+	}.Build()
+	File_io_prometheus_client_metrics_proto = out.File
+	file_io_prometheus_client_metrics_proto_rawDesc = nil
+	file_io_prometheus_client_metrics_proto_goTypes = nil
+	file_io_prometheus_client_metrics_proto_depIdxs = nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/github.com/prometheus/common/LICENSE
similarity index 100%
copy from vendor/go.opentelemetry.io/otel/LICENSE
copy to vendor/github.com/prometheus/common/LICENSE
diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE
new file mode 100644
index 0000000..636a2c1
--- /dev/null
+++ b/vendor/github.com/prometheus/common/NOTICE
@@ -0,0 +1,5 @@
+Common libraries shared by Prometheus Go components.
+Copyright 2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
new file mode 100644
index 0000000..7b76237
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -0,0 +1,456 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"math"
+	"mime"
+	"net/http"
+
+	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/encoding/protodelim"
+
+	"github.com/prometheus/common/model"
+)
+
+// Decoder types decode an input stream into metric families.
+type Decoder interface {
+	Decode(*dto.MetricFamily) error
+}
+
+// DecodeOptions contains options used by the Decoder and in sample extraction.
+type DecodeOptions struct {
+	// Timestamp is added to each value from the stream that has no explicit timestamp set.
+	Timestamp model.Time
+}
+
+// ResponseFormat extracts the correct format from a HTTP response header.
+// If no matching format can be found FormatUnknown is returned.
+func ResponseFormat(h http.Header) Format {
+	ct := h.Get(hdrContentType)
+
+	mediatype, params, err := mime.ParseMediaType(ct)
+	if err != nil {
+		return FmtUnknown
+	}
+
+	const textType = "text/plain"
+
+	switch mediatype {
+	case ProtoType:
+		if p, ok := params["proto"]; ok && p != ProtoProtocol {
+			return FmtUnknown
+		}
+		if e, ok := params["encoding"]; ok && e != "delimited" {
+			return FmtUnknown
+		}
+		return FmtProtoDelim
+
+	case textType:
+		if v, ok := params["version"]; ok && v != TextVersion {
+			return FmtUnknown
+		}
+		return FmtText
+	}
+
+	return FmtUnknown
+}
+
+// NewDecoder returns a new decoder based on the given input format. Metric
+// names are validated based on the provided Format -- if the format requires
+// escaping, raditional Prometheues validity checking is used. Otherwise, names
+// are checked for UTF-8 validity. Supported formats include delimited protobuf
+// and Prometheus text format.  For historical reasons, this decoder fallbacks
+// to classic text decoding for any other format. This decoder does not fully
+// support OpenMetrics although it may often succeed due to the similarities
+// between the formats. This decoder may not support the latest features of
+// Prometheus text format and is not intended for high-performance applications.
+// See: https://github.com/prometheus/common/issues/812
+func NewDecoder(r io.Reader, format Format) Decoder {
+	scheme := model.LegacyValidation
+	if format.ToEscapingScheme() == model.NoEscaping {
+		scheme = model.UTF8Validation
+	}
+	switch format.FormatType() {
+	case TypeProtoDelim:
+		return &protoDecoder{r: bufio.NewReader(r), s: scheme}
+	case TypeProtoText, TypeProtoCompact:
+		return &errDecoder{err: fmt.Errorf("format %s not supported for decoding", format)}
+	}
+	return &textDecoder{r: r, s: scheme}
+}
+
+// protoDecoder implements the Decoder interface for protocol buffers.
+type protoDecoder struct {
+	r protodelim.Reader
+	s model.ValidationScheme
+}
+
+// Decode implements the Decoder interface.
+func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
+	opts := protodelim.UnmarshalOptions{
+		MaxSize: -1,
+	}
+	if err := opts.UnmarshalFrom(d.r, v); err != nil {
+		return err
+	}
+	if !d.s.IsValidMetricName(v.GetName()) {
+		return fmt.Errorf("invalid metric name %q", v.GetName())
+	}
+	for _, m := range v.GetMetric() {
+		if m == nil {
+			continue
+		}
+		for _, l := range m.GetLabel() {
+			if l == nil {
+				continue
+			}
+			if !model.LabelValue(l.GetValue()).IsValid() {
+				return fmt.Errorf("invalid label value %q", l.GetValue())
+			}
+			if !d.s.IsValidLabelName(l.GetName()) {
+				return fmt.Errorf("invalid label name %q", l.GetName())
+			}
+		}
+	}
+	return nil
+}
+
+// errDecoder is an error-state decoder that always returns the same error.
+type errDecoder struct {
+	err error
+}
+
+func (d *errDecoder) Decode(*dto.MetricFamily) error {
+	return d.err
+}
+
+// textDecoder implements the Decoder interface for the text protocol.
+type textDecoder struct {
+	r    io.Reader
+	fams map[string]*dto.MetricFamily
+	s    model.ValidationScheme
+	err  error
+}
+
+// Decode implements the Decoder interface.
+func (d *textDecoder) Decode(v *dto.MetricFamily) error {
+	if d.err == nil {
+		// Read all metrics in one shot.
+		p := NewTextParser(d.s)
+		d.fams, d.err = p.TextToMetricFamilies(d.r)
+		// If we don't get an error, store io.EOF for the end.
+		if d.err == nil {
+			d.err = io.EOF
+		}
+	}
+	// Pick off one MetricFamily per Decode until there's nothing left.
+	for key, fam := range d.fams {
+		v.Name = fam.Name
+		v.Help = fam.Help
+		v.Type = fam.Type
+		v.Metric = fam.Metric
+		delete(d.fams, key)
+		return nil
+	}
+	return d.err
+}
+
+// SampleDecoder wraps a Decoder to extract samples from the metric families
+// decoded by the wrapped Decoder.
+type SampleDecoder struct {
+	Dec  Decoder
+	Opts *DecodeOptions
+
+	f dto.MetricFamily
+}
+
+// Decode calls the Decode method of the wrapped Decoder and then extracts the
+// samples from the decoded MetricFamily into the provided model.Vector.
+func (sd *SampleDecoder) Decode(s *model.Vector) error {
+	err := sd.Dec.Decode(&sd.f)
+	if err != nil {
+		return err
+	}
+	*s, err = extractSamples(&sd.f, sd.Opts)
+	return err
+}
+
+// ExtractSamples builds a slice of samples from the provided metric
+// families. If an error occurs during sample extraction, it continues to
+// extract from the remaining metric families. The returned error is the last
+// error that has occurred.
+func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
+	var (
+		all     model.Vector
+		lastErr error
+	)
+	for _, f := range fams {
+		some, err := extractSamples(f, o)
+		if err != nil {
+			lastErr = err
+			continue
+		}
+		all = append(all, some...)
+	}
+	return all, lastErr
+}
+
+func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
+	switch f.GetType() {
+	case dto.MetricType_COUNTER:
+		return extractCounter(o, f), nil
+	case dto.MetricType_GAUGE:
+		return extractGauge(o, f), nil
+	case dto.MetricType_SUMMARY:
+		return extractSummary(o, f), nil
+	case dto.MetricType_UNTYPED:
+		return extractUntyped(o, f), nil
+	case dto.MetricType_HISTOGRAM:
+		return extractHistogram(o, f), nil
+	}
+	return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
+}
+
+func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+	samples := make(model.Vector, 0, len(f.Metric))
+
+	for _, m := range f.Metric {
+		if m.Counter == nil {
+			continue
+		}
+
+		lset := make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+		smpl := &model.Sample{
+			Metric: model.Metric(lset),
+			Value:  model.SampleValue(m.Counter.GetValue()),
+		}
+
+		if m.TimestampMs != nil {
+			smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+		} else {
+			smpl.Timestamp = o.Timestamp
+		}
+
+		samples = append(samples, smpl)
+	}
+
+	return samples
+}
+
+func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+	samples := make(model.Vector, 0, len(f.Metric))
+
+	for _, m := range f.Metric {
+		if m.Gauge == nil {
+			continue
+		}
+
+		lset := make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+		smpl := &model.Sample{
+			Metric: model.Metric(lset),
+			Value:  model.SampleValue(m.Gauge.GetValue()),
+		}
+
+		if m.TimestampMs != nil {
+			smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+		} else {
+			smpl.Timestamp = o.Timestamp
+		}
+
+		samples = append(samples, smpl)
+	}
+
+	return samples
+}
+
+func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+	samples := make(model.Vector, 0, len(f.Metric))
+
+	for _, m := range f.Metric {
+		if m.Untyped == nil {
+			continue
+		}
+
+		lset := make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+		smpl := &model.Sample{
+			Metric: model.Metric(lset),
+			Value:  model.SampleValue(m.Untyped.GetValue()),
+		}
+
+		if m.TimestampMs != nil {
+			smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+		} else {
+			smpl.Timestamp = o.Timestamp
+		}
+
+		samples = append(samples, smpl)
+	}
+
+	return samples
+}
+
+func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+	samples := make(model.Vector, 0, len(f.Metric))
+
+	for _, m := range f.Metric {
+		if m.Summary == nil {
+			continue
+		}
+
+		timestamp := o.Timestamp
+		if m.TimestampMs != nil {
+			timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+		}
+
+		for _, q := range m.Summary.Quantile {
+			lset := make(model.LabelSet, len(m.Label)+2)
+			for _, p := range m.Label {
+				lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+			}
+			// BUG(matt): Update other names to "quantile".
+			lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))
+			lset[model.MetricNameLabel] = model.LabelValue(f.GetName())
+
+			samples = append(samples, &model.Sample{
+				Metric:    model.Metric(lset),
+				Value:     model.SampleValue(q.GetValue()),
+				Timestamp: timestamp,
+			})
+		}
+
+		lset := make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+		samples = append(samples, &model.Sample{
+			Metric:    model.Metric(lset),
+			Value:     model.SampleValue(m.Summary.GetSampleSum()),
+			Timestamp: timestamp,
+		})
+
+		lset = make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+		samples = append(samples, &model.Sample{
+			Metric:    model.Metric(lset),
+			Value:     model.SampleValue(m.Summary.GetSampleCount()),
+			Timestamp: timestamp,
+		})
+	}
+
+	return samples
+}
+
+func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
+	samples := make(model.Vector, 0, len(f.Metric))
+
+	for _, m := range f.Metric {
+		if m.Histogram == nil {
+			continue
+		}
+
+		timestamp := o.Timestamp
+		if m.TimestampMs != nil {
+			timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)
+		}
+
+		infSeen := false
+
+		for _, q := range m.Histogram.Bucket {
+			lset := make(model.LabelSet, len(m.Label)+2)
+			for _, p := range m.Label {
+				lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+			}
+			lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))
+			lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+			if math.IsInf(q.GetUpperBound(), +1) {
+				infSeen = true
+			}
+
+			samples = append(samples, &model.Sample{
+				Metric:    model.Metric(lset),
+				Value:     model.SampleValue(q.GetCumulativeCount()),
+				Timestamp: timestamp,
+			})
+		}
+
+		lset := make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum")
+
+		samples = append(samples, &model.Sample{
+			Metric:    model.Metric(lset),
+			Value:     model.SampleValue(m.Histogram.GetSampleSum()),
+			Timestamp: timestamp,
+		})
+
+		lset = make(model.LabelSet, len(m.Label)+1)
+		for _, p := range m.Label {
+			lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+		}
+		lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count")
+
+		count := &model.Sample{
+			Metric:    model.Metric(lset),
+			Value:     model.SampleValue(m.Histogram.GetSampleCount()),
+			Timestamp: timestamp,
+		}
+		samples = append(samples, count)
+
+		if !infSeen {
+			// Append an infinity bucket sample.
+			lset := make(model.LabelSet, len(m.Label)+2)
+			for _, p := range m.Label {
+				lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())
+			}
+			lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf")
+			lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket")
+
+			samples = append(samples, &model.Sample{
+				Metric:    model.Metric(lset),
+				Value:     count.Value,
+				Timestamp: timestamp,
+			})
+		}
+	}
+
+	return samples
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
new file mode 100644
index 0000000..73c24df
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -0,0 +1,196 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+	"fmt"
+	"io"
+	"net/http"
+
+	"github.com/munnerz/goautoneg"
+	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/encoding/protodelim"
+	"google.golang.org/protobuf/encoding/prototext"
+
+	"github.com/prometheus/common/model"
+)
+
+// Encoder types encode metric families into an underlying wire protocol.
+type Encoder interface {
+	Encode(*dto.MetricFamily) error
+}
+
+// Closer is implemented by Encoders that need to be closed to finalize
+// encoding. (For example, OpenMetrics needs a final `# EOF` line.)
+//
+// Note that all Encoder implementations returned from this package implement
+// Closer, too, even if the Close call is a no-op. This happens in preparation
+// for adding a Close method to the Encoder interface directly in a (mildly
+// breaking) release in the future.
+type Closer interface {
+	Close() error
+}
+
+type encoderCloser struct {
+	encode func(*dto.MetricFamily) error
+	close  func() error
+}
+
+func (ec encoderCloser) Encode(v *dto.MetricFamily) error {
+	return ec.encode(v)
+}
+
+func (ec encoderCloser) Close() error {
+	return ec.close()
+}
+
+// Negotiate returns the Content-Type based on the given Accept header. If no
+// appropriate accepted type is found, FmtText is returned (which is the
+// Prometheus text format). This function will never negotiate FmtOpenMetrics,
+// as the support is still experimental. To include the option to negotiate
+// FmtOpenMetrics, use NegotiateIncludingOpenMetrics.
+func Negotiate(h http.Header) Format {
+	escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
+	for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+		if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
+			switch Format(escapeParam) {
+			case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
+				escapingScheme = Format("; escaping=" + escapeParam)
+			default:
+				// If the escaping parameter is unknown, ignore it.
+			}
+		}
+		ver := ac.Params["version"]
+		if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+			switch ac.Params["encoding"] {
+			case "delimited":
+				return FmtProtoDelim + escapingScheme
+			case "text":
+				return FmtProtoText + escapingScheme
+			case "compact-text":
+				return FmtProtoCompact + escapingScheme
+			}
+		}
+		if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+			return FmtText + escapingScheme
+		}
+	}
+	return FmtText + escapingScheme
+}
+
+// NegotiateIncludingOpenMetrics works like Negotiate but includes
+// FmtOpenMetrics as an option for the result. Note that this function is
+// temporary and will disappear once FmtOpenMetrics is fully supported and as
+// such may be negotiated by the normal Negotiate function.
+func NegotiateIncludingOpenMetrics(h http.Header) Format {
+	escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
+	for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+		if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
+			switch Format(escapeParam) {
+			case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
+				escapingScheme = Format("; escaping=" + escapeParam)
+			default:
+				// If the escaping parameter is unknown, ignore it.
+			}
+		}
+		ver := ac.Params["version"]
+		if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
+			switch ac.Params["encoding"] {
+			case "delimited":
+				return FmtProtoDelim + escapingScheme
+			case "text":
+				return FmtProtoText + escapingScheme
+			case "compact-text":
+				return FmtProtoCompact + escapingScheme
+			}
+		}
+		if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
+			return FmtText + escapingScheme
+		}
+		if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
+			switch ver {
+			case OpenMetricsVersion_1_0_0:
+				return FmtOpenMetrics_1_0_0 + escapingScheme
+			default:
+				return FmtOpenMetrics_0_0_1 + escapingScheme
+			}
+		}
+	}
+	return FmtText + escapingScheme
+}
+
+// NewEncoder returns a new encoder based on content type negotiation. All
+// Encoder implementations returned by NewEncoder also implement Closer, and
+// callers should always call the Close method. It is currently only required
+// for FmtOpenMetrics, but a future (breaking) release will add the Close method
+// to the Encoder interface directly. The current version of the Encoder
+// interface is kept for backwards compatibility.
+// In cases where the Format does not allow for UTF-8 names, the global
+// NameEscapingScheme will be applied.
+//
+// NewEncoder can be called with additional options to customize the OpenMetrics text output.
+// For example:
+// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines())
+//
+// Extra options are ignored for all other formats.
+func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder {
+	escapingScheme := format.ToEscapingScheme()
+
+	switch format.FormatType() {
+	case TypeProtoDelim:
+		return encoderCloser{
+			encode: func(v *dto.MetricFamily) error {
+				_, err := protodelim.MarshalTo(w, model.EscapeMetricFamily(v, escapingScheme))
+				return err
+			},
+			close: func() error { return nil },
+		}
+	case TypeProtoCompact:
+		return encoderCloser{
+			encode: func(v *dto.MetricFamily) error {
+				_, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String())
+				return err
+			},
+			close: func() error { return nil },
+		}
+	case TypeProtoText:
+		return encoderCloser{
+			encode: func(v *dto.MetricFamily) error {
+				_, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme)))
+				return err
+			},
+			close: func() error { return nil },
+		}
+	case TypeTextPlain:
+		return encoderCloser{
+			encode: func(v *dto.MetricFamily) error {
+				_, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme))
+				return err
+			},
+			close: func() error { return nil },
+		}
+	case TypeOpenMetrics:
+		return encoderCloser{
+			encode: func(v *dto.MetricFamily) error {
+				_, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...)
+				return err
+			},
+			close: func() error {
+				_, err := FinalizeOpenMetrics(w)
+				return err
+			},
+		}
+	}
+	panic(fmt.Errorf("expfmt.NewEncoder: unknown format %q", format))
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
new file mode 100644
index 0000000..c34c7de
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -0,0 +1,211 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package expfmt contains tools for reading and writing Prometheus metrics.
+package expfmt
+
+import (
+	"errors"
+	"strings"
+
+	"github.com/prometheus/common/model"
+)
+
+// Format specifies the HTTP content type of the different wire protocols.
+type Format string
+
+// Constants to assemble the Content-Type values for the different wire
+// protocols. The Content-Type strings here are all for the legacy exposition
+// formats, where valid characters for metric names and label names are limited.
+// Support for arbitrary UTF-8 characters in those names is already partially
+// implemented in this module (see model.ValidationScheme), but to actually use
+// it on the wire, new content-type strings will have to be agreed upon and
+// added here.
+const (
+	TextVersion   = "0.0.4"
+	ProtoType     = `application/vnd.google.protobuf`
+	ProtoProtocol = `io.prometheus.client.MetricFamily`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
+	ProtoFmt        = ProtoType + "; proto=" + ProtoProtocol + ";"
+	OpenMetricsType = `application/openmetrics-text`
+	//nolint:revive // Allow for underscores.
+	OpenMetricsVersion_0_0_1 = "0.0.1"
+	//nolint:revive // Allow for underscores.
+	OpenMetricsVersion_1_0_0 = "1.0.0"
+
+	// The Content-Type values for the different wire protocols. Do not do direct
+	// comparisons to these constants, instead use the comparison functions.
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead.
+	FmtUnknown Format = `<unknown>`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead.
+	FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead.
+	FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead.
+	FmtProtoText Format = ProtoFmt + ` encoding=text`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
+	FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
+	//nolint:revive // Allow for underscores.
+	FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
+	// Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
+	//nolint:revive // Allow for underscores.
+	FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
+)
+
+const (
+	hdrContentType = "Content-Type"
+	hdrAccept      = "Accept"
+)
+
+// FormatType is a Go enum representing the overall category for the given
+// Format. As the number of Format permutations increases, doing basic string
+// comparisons are not feasible, so this enum captures the most useful
+// high-level attribute of the Format string.
+type FormatType int
+
+const (
+	TypeUnknown FormatType = iota
+	TypeProtoCompact
+	TypeProtoDelim
+	TypeProtoText
+	TypeTextPlain
+	TypeOpenMetrics
+)
+
+// NewFormat generates a new Format from the type provided. Mostly used for
+// tests, most Formats should be generated as part of content negotiation in
+// encode.go. If a type has more than one version, the latest version will be
+// returned.
+func NewFormat(t FormatType) Format {
+	switch t {
+	case TypeProtoCompact:
+		return FmtProtoCompact
+	case TypeProtoDelim:
+		return FmtProtoDelim
+	case TypeProtoText:
+		return FmtProtoText
+	case TypeTextPlain:
+		return FmtText
+	case TypeOpenMetrics:
+		return FmtOpenMetrics_1_0_0
+	default:
+		return FmtUnknown
+	}
+}
+
+// NewOpenMetricsFormat generates a new OpenMetrics format matching the
+// specified version number.
+func NewOpenMetricsFormat(version string) (Format, error) {
+	if version == OpenMetricsVersion_0_0_1 {
+		return FmtOpenMetrics_0_0_1, nil
+	}
+	if version == OpenMetricsVersion_1_0_0 {
+		return FmtOpenMetrics_1_0_0, nil
+	}
+	return FmtUnknown, errors.New("unknown open metrics version string")
+}
+
+// WithEscapingScheme returns a copy of Format with the specified escaping
+// scheme appended to the end. If an escaping scheme already exists it is
+// removed.
+func (f Format) WithEscapingScheme(s model.EscapingScheme) Format {
+	var terms []string
+	for _, p := range strings.Split(string(f), ";") {
+		toks := strings.Split(p, "=")
+		if len(toks) != 2 {
+			trimmed := strings.TrimSpace(p)
+			if len(trimmed) > 0 {
+				terms = append(terms, trimmed)
+			}
+			continue
+		}
+		key := strings.TrimSpace(toks[0])
+		if key != model.EscapingKey {
+			terms = append(terms, strings.TrimSpace(p))
+		}
+	}
+	terms = append(terms, model.EscapingKey+"="+s.String())
+	return Format(strings.Join(terms, "; "))
+}
+
+// FormatType deduces an overall FormatType for the given format.
+func (f Format) FormatType() FormatType {
+	toks := strings.Split(string(f), ";")
+	params := make(map[string]string)
+	for i, t := range toks {
+		if i == 0 {
+			continue
+		}
+		args := strings.Split(t, "=")
+		if len(args) != 2 {
+			continue
+		}
+		params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1])
+	}
+
+	switch strings.TrimSpace(toks[0]) {
+	case ProtoType:
+		if params["proto"] != ProtoProtocol {
+			return TypeUnknown
+		}
+		switch params["encoding"] {
+		case "delimited":
+			return TypeProtoDelim
+		case "text":
+			return TypeProtoText
+		case "compact-text":
+			return TypeProtoCompact
+		default:
+			return TypeUnknown
+		}
+	case OpenMetricsType:
+		if params["charset"] != "utf-8" {
+			return TypeUnknown
+		}
+		return TypeOpenMetrics
+	case "text/plain":
+		v, ok := params["version"]
+		if !ok {
+			return TypeTextPlain
+		}
+		if v == TextVersion {
+			return TypeTextPlain
+		}
+		return TypeUnknown
+	default:
+		return TypeUnknown
+	}
+}
+
+// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the
+// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid
+// "escaping" term exists, that will be used. Otherwise, the global default will
+// be returned.
+func (f Format) ToEscapingScheme() model.EscapingScheme {
+	for _, p := range strings.Split(string(f), ";") {
+		toks := strings.Split(p, "=")
+		if len(toks) != 2 {
+			continue
+		}
+		key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1])
+		if key == model.EscapingKey {
+			scheme, err := model.ToEscapingScheme(value)
+			if err != nil {
+				return model.NameEscapingScheme
+			}
+			return scheme
+		}
+	}
+	return model.NameEscapingScheme
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
new file mode 100644
index 0000000..0290f6a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -0,0 +1,40 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Build only when actually fuzzing
+//go:build gofuzz
+// +build gofuzz
+
+package expfmt
+
+import (
+	"bytes"
+
+	"github.com/prometheus/common/model"
+)
+
+// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
+//
+//	go-fuzz-build github.com/prometheus/common/expfmt
+//	go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+//
+// Further input samples should go in the folder fuzz/corpus.
+func Fuzz(in []byte) int {
+	parser := NewTextParser(model.UTF8Validation)
+	_, err := parser.TextToMetricFamilies(bytes.NewReader(in))
+	if err != nil {
+		return 0
+	}
+
+	return 1
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
new file mode 100644
index 0000000..8dbf6d0
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
@@ -0,0 +1,695 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"math"
+	"strconv"
+	"strings"
+
+	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/types/known/timestamppb"
+
+	"github.com/prometheus/common/model"
+)
+
+type encoderOption struct {
+	withCreatedLines bool
+	withUnit         bool
+}
+
+type EncoderOption func(*encoderOption)
+
+// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder
+// to include _created lines (See
+// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1).
+// Created timestamps can improve the accuracy of series reset detection, but
+// come with a bandwidth cost.
+//
+// At the time of writing, created timestamp ingestion is still experimental in
+// Prometheus and need to be enabled with the feature-flag
+// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are
+// still possible. Therefore, it is recommended to use this feature with caution.
+func WithCreatedLines() EncoderOption {
+	return func(t *encoderOption) {
+		t.withCreatedLines = true
+	}
+}
+
+// WithUnit is an EncoderOption enabling a set unit to be written to the output
+// and to be added to the metric name, if it's not there already, as a suffix.
+// Without opting in this way, the unit will not be added to the metric name and,
+// on top of that, the unit will not be passed onto the output, even if it
+// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil.
+func WithUnit() EncoderOption {
+	return func(t *encoderOption) {
+		t.withUnit = true
+	}
+}
+
+// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
+// OpenMetrics text format and writes the resulting lines to 'out'. It returns
+// the number of bytes written and any error encountered. The output will have
+// the same order as the input, no further sorting is performed. Furthermore,
+// this function assumes the input is already sanitized and does not perform any
+// sanity checks. If the input contains duplicate metrics or invalid metric or
+// label names, the conversion will result in invalid text format output.
+//
+// If metric names conform to the legacy validation pattern, they will be placed
+// outside the brackets in the traditional way, like `foo{}`. If the metric name
+// fails the legacy validation check, it will be placed quoted inside the
+// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
+// Similar to metric names, if label names conform to the legacy validation
+// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
+// name fails the legacy validation check, it will be quoted:
+// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
+// This function fulfills the type 'expfmt.encoder'.
+//
+// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
+// on individual metric families, it is the responsibility of the caller to
+// append this line to 'out' once all metric families have been written.
+// Conveniently, this can be done by calling FinalizeOpenMetrics.
+//
+// The output should be fully OpenMetrics compliant. However, there are a few
+// missing features and peculiarities to avoid complications when switching from
+// Prometheus to OpenMetrics or vice versa:
+//
+//   - Counters are expected to have the `_total` suffix in their metric name. In
+//     the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT`
+//     lines. A counter with a missing `_total` suffix is not an error. However,
+//     its type will be set to `unknown` in that case to avoid invalid OpenMetrics
+//     output.
+//
+//   - According to the OM specs, the `# UNIT` line is optional, but if populated,
+//     the unit has to be present in the metric name as its suffix:
+//     (see https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#unit).
+//     However, in order to accommodate any potential scenario where such a change in the
+//     metric name is not desirable, the users are here given the choice of either explicitly
+//     opt in, in case they wish for the unit to be included in the output AND in the metric name
+//     as a suffix (see the description of the WithUnit function above),
+//     or not to opt in, in case they don't want for any of that to happen.
+//
+//   - No support for the following (optional) features: info type,
+//     stateset type, gaugehistogram type.
+//
+//   - The size of exemplar labels is not checked (i.e. it's possible to create
+//     exemplars that are larger than allowed by the OpenMetrics specification).
+//
+//   - The value of Counters is not checked. (OpenMetrics doesn't allow counters
+//     with a `NaN` value.)
+func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) {
+	toOM := encoderOption{}
+	for _, option := range options {
+		option(&toOM)
+	}
+
+	name := in.GetName()
+	if name == "" {
+		return 0, fmt.Errorf("MetricFamily has no name: %s", in)
+	}
+
+	// Try the interface upgrade. If it doesn't work, we'll use a
+	// bufio.Writer from the sync.Pool.
+	w, ok := out.(enhancedWriter)
+	if !ok {
+		b := bufPool.Get().(*bufio.Writer)
+		b.Reset(out)
+		w = b
+		defer func() {
+			bErr := b.Flush()
+			if err == nil {
+				err = bErr
+			}
+			bufPool.Put(b)
+		}()
+	}
+
+	var (
+		n             int
+		metricType    = in.GetType()
+		compliantName = name
+	)
+	if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") {
+		compliantName = name[:len(name)-6]
+	}
+	if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) {
+		compliantName = compliantName + "_" + *in.Unit
+	}
+
+	// Comments, first HELP, then TYPE.
+	if in.Help != nil {
+		n, err = w.WriteString("# HELP ")
+		written += n
+		if err != nil {
+			return
+		}
+		n, err = writeName(w, compliantName)
+		written += n
+		if err != nil {
+			return
+		}
+		err = w.WriteByte(' ')
+		written++
+		if err != nil {
+			return
+		}
+		n, err = writeEscapedString(w, *in.Help, true)
+		written += n
+		if err != nil {
+			return
+		}
+		err = w.WriteByte('\n')
+		written++
+		if err != nil {
+			return
+		}
+	}
+	n, err = w.WriteString("# TYPE ")
+	written += n
+	if err != nil {
+		return
+	}
+	n, err = writeName(w, compliantName)
+	written += n
+	if err != nil {
+		return
+	}
+	switch metricType {
+	case dto.MetricType_COUNTER:
+		if strings.HasSuffix(name, "_total") {
+			n, err = w.WriteString(" counter\n")
+		} else {
+			n, err = w.WriteString(" unknown\n")
+		}
+	case dto.MetricType_GAUGE:
+		n, err = w.WriteString(" gauge\n")
+	case dto.MetricType_SUMMARY:
+		n, err = w.WriteString(" summary\n")
+	case dto.MetricType_UNTYPED:
+		n, err = w.WriteString(" unknown\n")
+	case dto.MetricType_HISTOGRAM:
+		n, err = w.WriteString(" histogram\n")
+	default:
+		return written, fmt.Errorf("unknown metric type %s", metricType.String())
+	}
+	written += n
+	if err != nil {
+		return
+	}
+	if toOM.withUnit && in.Unit != nil {
+		n, err = w.WriteString("# UNIT ")
+		written += n
+		if err != nil {
+			return
+		}
+		n, err = writeName(w, compliantName)
+		written += n
+		if err != nil {
+			return
+		}
+
+		err = w.WriteByte(' ')
+		written++
+		if err != nil {
+			return
+		}
+		n, err = writeEscapedString(w, *in.Unit, true)
+		written += n
+		if err != nil {
+			return
+		}
+		err = w.WriteByte('\n')
+		written++
+		if err != nil {
+			return
+		}
+	}
+
+	var createdTsBytesWritten int
+
+	// Finally the samples, one line for each.
+	if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") {
+		compliantName += "_total"
+	}
+	for _, metric := range in.Metric {
+		switch metricType {
+		case dto.MetricType_COUNTER:
+			if metric.Counter == nil {
+				return written, fmt.Errorf(
+					"expected counter in metric %s %s", compliantName, metric,
+				)
+			}
+			n, err = writeOpenMetricsSample(
+				w, compliantName, "", metric, "", 0,
+				metric.Counter.GetValue(), 0, false,
+				metric.Counter.Exemplar,
+			)
+			if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil {
+				createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp())
+				n += createdTsBytesWritten
+			}
+		case dto.MetricType_GAUGE:
+			if metric.Gauge == nil {
+				return written, fmt.Errorf(
+					"expected gauge in metric %s %s", compliantName, metric,
+				)
+			}
+			n, err = writeOpenMetricsSample(
+				w, compliantName, "", metric, "", 0,
+				metric.Gauge.GetValue(), 0, false,
+				nil,
+			)
+		case dto.MetricType_UNTYPED:
+			if metric.Untyped == nil {
+				return written, fmt.Errorf(
+					"expected untyped in metric %s %s", compliantName, metric,
+				)
+			}
+			n, err = writeOpenMetricsSample(
+				w, compliantName, "", metric, "", 0,
+				metric.Untyped.GetValue(), 0, false,
+				nil,
+			)
+		case dto.MetricType_SUMMARY:
+			if metric.Summary == nil {
+				return written, fmt.Errorf(
+					"expected summary in metric %s %s", compliantName, metric,
+				)
+			}
+			for _, q := range metric.Summary.Quantile {
+				n, err = writeOpenMetricsSample(
+					w, compliantName, "", metric,
+					model.QuantileLabel, q.GetQuantile(),
+					q.GetValue(), 0, false,
+					nil,
+				)
+				written += n
+				if err != nil {
+					return
+				}
+			}
+			n, err = writeOpenMetricsSample(
+				w, compliantName, "_sum", metric, "", 0,
+				metric.Summary.GetSampleSum(), 0, false,
+				nil,
+			)
+			written += n
+			if err != nil {
+				return
+			}
+			n, err = writeOpenMetricsSample(
+				w, compliantName, "_count", metric, "", 0,
+				0, metric.Summary.GetSampleCount(), true,
+				nil,
+			)
+			if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil {
+				createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp())
+				n += createdTsBytesWritten
+			}
+		case dto.MetricType_HISTOGRAM:
+			if metric.Histogram == nil {
+				return written, fmt.Errorf(
+					"expected histogram in metric %s %s", compliantName, metric,
+				)
+			}
+			infSeen := false
+			for _, b := range metric.Histogram.Bucket {
+				n, err = writeOpenMetricsSample(
+					w, compliantName, "_bucket", metric,
+					model.BucketLabel, b.GetUpperBound(),
+					0, b.GetCumulativeCount(), true,
+					b.Exemplar,
+				)
+				written += n
+				if err != nil {
+					return
+				}
+				if math.IsInf(b.GetUpperBound(), +1) {
+					infSeen = true
+				}
+			}
+			if !infSeen {
+				n, err = writeOpenMetricsSample(
+					w, compliantName, "_bucket", metric,
+					model.BucketLabel, math.Inf(+1),
+					0, metric.Histogram.GetSampleCount(), true,
+					nil,
+				)
+				written += n
+				if err != nil {
+					return
+				}
+			}
+			n, err = writeOpenMetricsSample(
+				w, compliantName, "_sum", metric, "", 0,
+				metric.Histogram.GetSampleSum(), 0, false,
+				nil,
+			)
+			written += n
+			if err != nil {
+				return
+			}
+			n, err = writeOpenMetricsSample(
+				w, compliantName, "_count", metric, "", 0,
+				0, metric.Histogram.GetSampleCount(), true,
+				nil,
+			)
+			if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil {
+				createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp())
+				n += createdTsBytesWritten
+			}
+		default:
+			return written, fmt.Errorf(
+				"unexpected type in metric %s %s", compliantName, metric,
+			)
+		}
+		written += n
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// FinalizeOpenMetrics writes the final `# EOF\n` line required by OpenMetrics.
+func FinalizeOpenMetrics(w io.Writer) (written int, err error) {
+	return w.Write([]byte("# EOF\n"))
+}
+
+// writeOpenMetricsSample writes a single sample in OpenMetrics text format to
+// w, given the metric name, the metric proto message itself, optionally an
+// additional label name with a float64 value (use empty string as label name if
+// not required), the value (optionally as float64 or uint64, determined by
+// useIntValue), and optionally an exemplar (use nil if not required). The
+// function returns the number of bytes written and any error encountered.
+func writeOpenMetricsSample(
+	w enhancedWriter,
+	name, suffix string,
+	metric *dto.Metric,
+	additionalLabelName string, additionalLabelValue float64,
+	floatValue float64, intValue uint64, useIntValue bool,
+	exemplar *dto.Exemplar,
+) (int, error) {
+	written := 0
+	n, err := writeOpenMetricsNameAndLabelPairs(
+		w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
+	)
+	written += n
+	if err != nil {
+		return written, err
+	}
+	err = w.WriteByte(' ')
+	written++
+	if err != nil {
+		return written, err
+	}
+	if useIntValue {
+		n, err = writeUint(w, intValue)
+	} else {
+		n, err = writeOpenMetricsFloat(w, floatValue)
+	}
+	written += n
+	if err != nil {
+		return written, err
+	}
+	if metric.TimestampMs != nil {
+		err = w.WriteByte(' ')
+		written++
+		if err != nil {
+			return written, err
+		}
+		// TODO(beorn7): Format this directly without converting to a float first.
+		n, err = writeOpenMetricsFloat(w, float64(*metric.TimestampMs)/1000)
+		written += n
+		if err != nil {
+			return written, err
+		}
+	}
+	if exemplar != nil && len(exemplar.Label) > 0 {
+		n, err = writeExemplar(w, exemplar)
+		written += n
+		if err != nil {
+			return written, err
+		}
+	}
+	err = w.WriteByte('\n')
+	written++
+	if err != nil {
+		return written, err
+	}
+	return written, nil
+}
+
+// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but
+// formats the float in OpenMetrics style.
+func writeOpenMetricsNameAndLabelPairs(
+	w enhancedWriter,
+	name string,
+	in []*dto.LabelPair,
+	additionalLabelName string, additionalLabelValue float64,
+) (int, error) {
+	var (
+		written            int
+		separator          byte = '{'
+		metricInsideBraces      = false
+	)
+
+	if name != "" {
+		// If the name does not pass the legacy validity check, we must put the
+		// metric name inside the braces, quoted.
+		if !model.LegacyValidation.IsValidMetricName(name) {
+			metricInsideBraces = true
+			err := w.WriteByte(separator)
+			written++
+			if err != nil {
+				return written, err
+			}
+			separator = ','
+		}
+
+		n, err := writeName(w, name)
+		written += n
+		if err != nil {
+			return written, err
+		}
+	}
+
+	if len(in) == 0 && additionalLabelName == "" {
+		if metricInsideBraces {
+			err := w.WriteByte('}')
+			written++
+			if err != nil {
+				return written, err
+			}
+		}
+		return written, nil
+	}
+
+	for _, lp := range in {
+		err := w.WriteByte(separator)
+		written++
+		if err != nil {
+			return written, err
+		}
+		n, err := writeName(w, lp.GetName())
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = w.WriteString(`="`)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = writeEscapedString(w, lp.GetValue(), true)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		err = w.WriteByte('"')
+		written++
+		if err != nil {
+			return written, err
+		}
+		separator = ','
+	}
+	if additionalLabelName != "" {
+		err := w.WriteByte(separator)
+		written++
+		if err != nil {
+			return written, err
+		}
+		n, err := w.WriteString(additionalLabelName)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = w.WriteString(`="`)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = writeOpenMetricsFloat(w, additionalLabelValue)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		err = w.WriteByte('"')
+		written++
+		if err != nil {
+			return written, err
+		}
+	}
+	err := w.WriteByte('}')
+	written++
+	if err != nil {
+		return written, err
+	}
+	return written, nil
+}
+
+// writeOpenMetricsCreated writes the created timestamp for a single time series
+// following OpenMetrics text format to w, given the metric name, the metric proto
+// message itself, optionally a suffix to be removed, e.g. '_total' for counters,
+// an additional label name with a float64 value (use empty string as label name if
+// not required) and the timestamp that represents the created timestamp.
+// The function returns the number of bytes written and any error encountered.
+func writeOpenMetricsCreated(w enhancedWriter,
+	name, suffixToTrim string, metric *dto.Metric,
+	additionalLabelName string, additionalLabelValue float64,
+	createdTimestamp *timestamppb.Timestamp,
+) (int, error) {
+	written := 0
+	n, err := writeOpenMetricsNameAndLabelPairs(
+		w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue,
+	)
+	written += n
+	if err != nil {
+		return written, err
+	}
+
+	err = w.WriteByte(' ')
+	written++
+	if err != nil {
+		return written, err
+	}
+
+	// TODO(beorn7): Format this directly from components of ts to
+	// avoid overflow/underflow and precision issues of the float
+	// conversion.
+	n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9)
+	written += n
+	if err != nil {
+		return written, err
+	}
+
+	err = w.WriteByte('\n')
+	written++
+	if err != nil {
+		return written, err
+	}
+	return written, nil
+}
+
+// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
+// function returns the number of bytes written and any error encountered.
+func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
+	written := 0
+	n, err := w.WriteString(" # ")
+	written += n
+	if err != nil {
+		return written, err
+	}
+	n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0)
+	written += n
+	if err != nil {
+		return written, err
+	}
+	err = w.WriteByte(' ')
+	written++
+	if err != nil {
+		return written, err
+	}
+	n, err = writeOpenMetricsFloat(w, e.GetValue())
+	written += n
+	if err != nil {
+		return written, err
+	}
+	if e.Timestamp != nil {
+		err = w.WriteByte(' ')
+		written++
+		if err != nil {
+			return written, err
+		}
+		err = e.Timestamp.CheckValid()
+		if err != nil {
+			return written, err
+		}
+		ts := e.Timestamp.AsTime()
+		// TODO(beorn7): Format this directly from components of ts to
+		// avoid overflow/underflow and precision issues of the float
+		// conversion.
+		n, err = writeOpenMetricsFloat(w, float64(ts.UnixNano())/1e9)
+		written += n
+		if err != nil {
+			return written, err
+		}
+	}
+	return written, nil
+}
+
+// writeOpenMetricsFloat works like writeFloat but appends ".0" if the resulting
+// number would otherwise contain neither a "." nor an "e".
+func writeOpenMetricsFloat(w enhancedWriter, f float64) (int, error) {
+	switch {
+	case f == 1:
+		return w.WriteString("1.0")
+	case f == 0:
+		return w.WriteString("0.0")
+	case f == -1:
+		return w.WriteString("-1.0")
+	case math.IsNaN(f):
+		return w.WriteString("NaN")
+	case math.IsInf(f, +1):
+		return w.WriteString("+Inf")
+	case math.IsInf(f, -1):
+		return w.WriteString("-Inf")
+	default:
+		bp := numBufPool.Get().(*[]byte)
+		*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
+		if !bytes.ContainsAny(*bp, "e.") {
+			*bp = append(*bp, '.', '0')
+		}
+		written, err := w.Write(*bp)
+		numBufPool.Put(bp)
+		return written, err
+	}
+}
+
+// writeUint is like writeInt just for uint64.
+func writeUint(w enhancedWriter, u uint64) (int, error) {
+	bp := numBufPool.Get().(*[]byte)
+	*bp = strconv.AppendUint((*bp)[:0], u, 10)
+	written, err := w.Write(*bp)
+	numBufPool.Put(bp)
+	return written, err
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
new file mode 100644
index 0000000..c4e9c1b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -0,0 +1,520 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"math"
+	"strconv"
+	"strings"
+	"sync"
+
+	dto "github.com/prometheus/client_model/go"
+
+	"github.com/prometheus/common/model"
+)
+
+// enhancedWriter has all the enhanced write functions needed here. bufio.Writer
+// implements it.
+type enhancedWriter interface {
+	io.Writer
+	WriteRune(r rune) (n int, err error)
+	WriteString(s string) (n int, err error)
+	WriteByte(c byte) error
+}
+
+const (
+	initialNumBufSize = 24
+)
+
+var (
+	bufPool = sync.Pool{
+		New: func() interface{} {
+			return bufio.NewWriter(io.Discard)
+		},
+	}
+	numBufPool = sync.Pool{
+		New: func() interface{} {
+			b := make([]byte, 0, initialNumBufSize)
+			return &b
+		},
+	}
+)
+
+// MetricFamilyToText converts a MetricFamily proto message into text format and
+// writes the resulting lines to 'out'. It returns the number of bytes written
+// and any error encountered. The output will have the same order as the input,
+// no further sorting is performed. Furthermore, this function assumes the input
+// is already sanitized and does not perform any sanity checks. If the input
+// contains duplicate metrics or invalid metric or label names, the conversion
+// will result in invalid text format output.
+//
+// If metric names conform to the legacy validation pattern, they will be placed
+// outside the brackets in the traditional way, like `foo{}`. If the metric name
+// fails the legacy validation check, it will be placed quoted inside the
+// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
+// Similar to metric names, if label names conform to the legacy validation
+// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
+// name fails the legacy validation check, it will be quoted:
+// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
+// This method fulfills the type 'prometheus.encoder'.
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
+	// Fail-fast checks.
+	if len(in.Metric) == 0 {
+		return 0, fmt.Errorf("MetricFamily has no metrics: %s", in)
+	}
+	name := in.GetName()
+	if name == "" {
+		return 0, fmt.Errorf("MetricFamily has no name: %s", in)
+	}
+
+	// Try the interface upgrade. If it doesn't work, we'll use a
+	// bufio.Writer from the sync.Pool.
+	w, ok := out.(enhancedWriter)
+	if !ok {
+		b := bufPool.Get().(*bufio.Writer)
+		b.Reset(out)
+		w = b
+		defer func() {
+			bErr := b.Flush()
+			if err == nil {
+				err = bErr
+			}
+			bufPool.Put(b)
+		}()
+	}
+
+	var n int
+
+	// Comments, first HELP, then TYPE.
+	if in.Help != nil {
+		n, err = w.WriteString("# HELP ")
+		written += n
+		if err != nil {
+			return
+		}
+		n, err = writeName(w, name)
+		written += n
+		if err != nil {
+			return
+		}
+		err = w.WriteByte(' ')
+		written++
+		if err != nil {
+			return
+		}
+		n, err = writeEscapedString(w, *in.Help, false)
+		written += n
+		if err != nil {
+			return
+		}
+		err = w.WriteByte('\n')
+		written++
+		if err != nil {
+			return
+		}
+	}
+	n, err = w.WriteString("# TYPE ")
+	written += n
+	if err != nil {
+		return
+	}
+	n, err = writeName(w, name)
+	written += n
+	if err != nil {
+		return
+	}
+	metricType := in.GetType()
+	switch metricType {
+	case dto.MetricType_COUNTER:
+		n, err = w.WriteString(" counter\n")
+	case dto.MetricType_GAUGE:
+		n, err = w.WriteString(" gauge\n")
+	case dto.MetricType_SUMMARY:
+		n, err = w.WriteString(" summary\n")
+	case dto.MetricType_UNTYPED:
+		n, err = w.WriteString(" untyped\n")
+	case dto.MetricType_HISTOGRAM:
+		n, err = w.WriteString(" histogram\n")
+	default:
+		return written, fmt.Errorf("unknown metric type %s", metricType.String())
+	}
+	written += n
+	if err != nil {
+		return
+	}
+
+	// Finally the samples, one line for each.
+	for _, metric := range in.Metric {
+		switch metricType {
+		case dto.MetricType_COUNTER:
+			if metric.Counter == nil {
+				return written, fmt.Errorf(
+					"expected counter in metric %s %s", name, metric,
+				)
+			}
+			n, err = writeSample(
+				w, name, "", metric, "", 0,
+				metric.Counter.GetValue(),
+			)
+		case dto.MetricType_GAUGE:
+			if metric.Gauge == nil {
+				return written, fmt.Errorf(
+					"expected gauge in metric %s %s", name, metric,
+				)
+			}
+			n, err = writeSample(
+				w, name, "", metric, "", 0,
+				metric.Gauge.GetValue(),
+			)
+		case dto.MetricType_UNTYPED:
+			if metric.Untyped == nil {
+				return written, fmt.Errorf(
+					"expected untyped in metric %s %s", name, metric,
+				)
+			}
+			n, err = writeSample(
+				w, name, "", metric, "", 0,
+				metric.Untyped.GetValue(),
+			)
+		case dto.MetricType_SUMMARY:
+			if metric.Summary == nil {
+				return written, fmt.Errorf(
+					"expected summary in metric %s %s", name, metric,
+				)
+			}
+			for _, q := range metric.Summary.Quantile {
+				n, err = writeSample(
+					w, name, "", metric,
+					model.QuantileLabel, q.GetQuantile(),
+					q.GetValue(),
+				)
+				written += n
+				if err != nil {
+					return
+				}
+			}
+			n, err = writeSample(
+				w, name, "_sum", metric, "", 0,
+				metric.Summary.GetSampleSum(),
+			)
+			written += n
+			if err != nil {
+				return
+			}
+			n, err = writeSample(
+				w, name, "_count", metric, "", 0,
+				float64(metric.Summary.GetSampleCount()),
+			)
+		case dto.MetricType_HISTOGRAM:
+			if metric.Histogram == nil {
+				return written, fmt.Errorf(
+					"expected histogram in metric %s %s", name, metric,
+				)
+			}
+			infSeen := false
+			for _, b := range metric.Histogram.Bucket {
+				n, err = writeSample(
+					w, name, "_bucket", metric,
+					model.BucketLabel, b.GetUpperBound(),
+					float64(b.GetCumulativeCount()),
+				)
+				written += n
+				if err != nil {
+					return
+				}
+				if math.IsInf(b.GetUpperBound(), +1) {
+					infSeen = true
+				}
+			}
+			if !infSeen {
+				n, err = writeSample(
+					w, name, "_bucket", metric,
+					model.BucketLabel, math.Inf(+1),
+					float64(metric.Histogram.GetSampleCount()),
+				)
+				written += n
+				if err != nil {
+					return
+				}
+			}
+			n, err = writeSample(
+				w, name, "_sum", metric, "", 0,
+				metric.Histogram.GetSampleSum(),
+			)
+			written += n
+			if err != nil {
+				return
+			}
+			n, err = writeSample(
+				w, name, "_count", metric, "", 0,
+				float64(metric.Histogram.GetSampleCount()),
+			)
+		default:
+			return written, fmt.Errorf(
+				"unexpected type in metric %s %s", name, metric,
+			)
+		}
+		written += n
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// writeSample writes a single sample in text format to w, given the metric
+// name, the metric proto message itself, optionally an additional label name
+// with a float64 value (use empty string as label name if not required), and
+// the value. The function returns the number of bytes written and any error
+// encountered.
+func writeSample(
+	w enhancedWriter,
+	name, suffix string,
+	metric *dto.Metric,
+	additionalLabelName string, additionalLabelValue float64,
+	value float64,
+) (int, error) {
+	written := 0
+	n, err := writeNameAndLabelPairs(
+		w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
+	)
+	written += n
+	if err != nil {
+		return written, err
+	}
+	err = w.WriteByte(' ')
+	written++
+	if err != nil {
+		return written, err
+	}
+	n, err = writeFloat(w, value)
+	written += n
+	if err != nil {
+		return written, err
+	}
+	if metric.TimestampMs != nil {
+		err = w.WriteByte(' ')
+		written++
+		if err != nil {
+			return written, err
+		}
+		n, err = writeInt(w, *metric.TimestampMs)
+		written += n
+		if err != nil {
+			return written, err
+		}
+	}
+	err = w.WriteByte('\n')
+	written++
+	if err != nil {
+		return written, err
+	}
+	return written, nil
+}
+
+// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the
+// explicitly given metric name and additional label pair into text formatted as
+// required by the text format and writes it to 'w'. An empty slice in
+// combination with an empty string 'additionalLabelName' results in nothing
+// being written. Otherwise, the label pairs are written, escaped as required by
+// the text format, and enclosed in '{...}'. The function returns the number of
+// bytes written and any error encountered. If the metric name is not
+// legacy-valid, it will be put inside the brackets as well. Legacy-invalid
+// label names will also be quoted.
+func writeNameAndLabelPairs(
+	w enhancedWriter,
+	name string,
+	in []*dto.LabelPair,
+	additionalLabelName string, additionalLabelValue float64,
+) (int, error) {
+	var (
+		written            int
+		separator          byte = '{'
+		metricInsideBraces      = false
+	)
+
+	if name != "" {
+		// If the name does not pass the legacy validity check, we must put the
+		// metric name inside the braces.
+		if !model.LegacyValidation.IsValidMetricName(name) {
+			metricInsideBraces = true
+			err := w.WriteByte(separator)
+			written++
+			if err != nil {
+				return written, err
+			}
+			separator = ','
+		}
+		n, err := writeName(w, name)
+		written += n
+		if err != nil {
+			return written, err
+		}
+	}
+
+	if len(in) == 0 && additionalLabelName == "" {
+		if metricInsideBraces {
+			err := w.WriteByte('}')
+			written++
+			if err != nil {
+				return written, err
+			}
+		}
+		return written, nil
+	}
+
+	for _, lp := range in {
+		err := w.WriteByte(separator)
+		written++
+		if err != nil {
+			return written, err
+		}
+		n, err := writeName(w, lp.GetName())
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = w.WriteString(`="`)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = writeEscapedString(w, lp.GetValue(), true)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		err = w.WriteByte('"')
+		written++
+		if err != nil {
+			return written, err
+		}
+		separator = ','
+	}
+	if additionalLabelName != "" {
+		err := w.WriteByte(separator)
+		written++
+		if err != nil {
+			return written, err
+		}
+		n, err := w.WriteString(additionalLabelName)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = w.WriteString(`="`)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		n, err = writeFloat(w, additionalLabelValue)
+		written += n
+		if err != nil {
+			return written, err
+		}
+		err = w.WriteByte('"')
+		written++
+		if err != nil {
+			return written, err
+		}
+	}
+	err := w.WriteByte('}')
+	written++
+	if err != nil {
+		return written, err
+	}
+	return written, nil
+}
+
+// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if
+// includeDoubleQuote is true - '"' by '\"'.
+var (
+	escaper       = strings.NewReplacer("\\", `\\`, "\n", `\n`)
+	quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
+)
+
+func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
+	if includeDoubleQuote {
+		return quotedEscaper.WriteString(w, v)
+	}
+	return escaper.WriteString(w, v)
+}
+
+// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
+// a few common cases for increased efficiency. For non-hardcoded cases, it uses
+// strconv.AppendFloat to avoid allocations, similar to writeInt.
+func writeFloat(w enhancedWriter, f float64) (int, error) {
+	switch {
+	case f == 1:
+		return 1, w.WriteByte('1')
+	case f == 0:
+		return 1, w.WriteByte('0')
+	case f == -1:
+		return w.WriteString("-1")
+	case math.IsNaN(f):
+		return w.WriteString("NaN")
+	case math.IsInf(f, +1):
+		return w.WriteString("+Inf")
+	case math.IsInf(f, -1):
+		return w.WriteString("-Inf")
+	default:
+		bp := numBufPool.Get().(*[]byte)
+		*bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
+		written, err := w.Write(*bp)
+		numBufPool.Put(bp)
+		return written, err
+	}
+}
+
+// writeInt is equivalent to fmt.Fprint with an int64 argument but uses
+// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid
+// allocations.
+func writeInt(w enhancedWriter, i int64) (int, error) {
+	bp := numBufPool.Get().(*[]byte)
+	*bp = strconv.AppendInt((*bp)[:0], i, 10)
+	written, err := w.Write(*bp)
+	numBufPool.Put(bp)
+	return written, err
+}
+
+// writeName writes a string as-is if it complies with the legacy naming
+// scheme, or escapes it in double quotes if not.
+func writeName(w enhancedWriter, name string) (int, error) {
+	if model.LegacyValidation.IsValidMetricName(name) {
+		return w.WriteString(name)
+	}
+	var written int
+	var err error
+	err = w.WriteByte('"')
+	written++
+	if err != nil {
+		return written, err
+	}
+	var n int
+	n, err = writeEscapedString(w, name, true)
+	written += n
+	if err != nil {
+		return written, err
+	}
+	err = w.WriteByte('"')
+	written++
+	return written, err
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
new file mode 100644
index 0000000..8f2edde
--- /dev/null
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -0,0 +1,933 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package expfmt
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+
+	dto "github.com/prometheus/client_model/go"
+	"google.golang.org/protobuf/proto"
+
+	"github.com/prometheus/common/model"
+)
+
+// A stateFn is a function that represents a state in a state machine. By
+// executing it, the state is progressed to the next state. The stateFn returns
+// another stateFn, which represents the new state. The end state is represented
+// by nil.
+type stateFn func() stateFn
+
+// ParseError signals errors while parsing the simple and flat text-based
+// exchange format.
+type ParseError struct {
+	Line int
+	Msg  string
+}
+
+// Error implements the error interface.
+func (e ParseError) Error() string {
+	return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg)
+}
+
+// TextParser is used to parse the simple and flat text-based exchange format. Its
+// zero value is ready to use.
+type TextParser struct {
+	metricFamiliesByName map[string]*dto.MetricFamily
+	buf                  *bufio.Reader // Where the parsed input is read through.
+	err                  error         // Most recent error.
+	lineCount            int           // Tracks the line count for error messages.
+	currentByte          byte          // The most recent byte read.
+	currentToken         bytes.Buffer  // Re-used each time a token has to be gathered from multiple bytes.
+	currentMF            *dto.MetricFamily
+	currentMetric        *dto.Metric
+	currentLabelPair     *dto.LabelPair
+	currentLabelPairs    []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line.
+
+	// The remaining member variables are only used for summaries/histograms.
+	currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
+	// Summary specific.
+	summaries       map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+	currentQuantile float64
+	// Histogram specific.
+	histograms    map[uint64]*dto.Metric // Key is created with LabelsToSignature.
+	currentBucket float64
+	// These tell us if the currently processed line ends on '_count' or
+	// '_sum' respectively and belong to a summary/histogram, representing the sample
+	// count and sum of that summary/histogram.
+	currentIsSummaryCount, currentIsSummarySum     bool
+	currentIsHistogramCount, currentIsHistogramSum bool
+	// These indicate if the metric name from the current line being parsed is inside
+	// braces and if that metric name was found respectively.
+	currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool
+	// scheme sets the desired ValidationScheme for names. Defaults to the invalid
+	// UnsetValidation.
+	scheme model.ValidationScheme
+}
+
+// NewTextParser returns a new TextParser with the provided nameValidationScheme.
+func NewTextParser(nameValidationScheme model.ValidationScheme) TextParser {
+	return TextParser{scheme: nameValidationScheme}
+}
+
+// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
+// format and creates MetricFamily proto messages. It returns the MetricFamily
+// proto messages in a map where the metric names are the keys, along with any
+// error encountered.
+//
+// If the input contains duplicate metrics (i.e. lines with the same metric name
+// and exactly the same label set), the resulting MetricFamily will contain
+// duplicate Metric proto messages. Similar is true for duplicate label
+// names. Checks for duplicates have to be performed separately, if required.
+// Also note that neither the metrics within each MetricFamily are sorted nor
+// the label pairs within each Metric. Sorting is not required for the most
+// frequent use of this method, which is sample ingestion in the Prometheus
+// server. However, for presentation purposes, you might want to sort the
+// metrics, and in some cases, you must sort the labels, e.g. for consumption by
+// the metric family injection hook of the Prometheus registry.
+//
+// Summaries and histograms are rather special beasts. You would probably not
+// use them in the simple text format anyway. This method can deal with
+// summaries and histograms if they are presented in exactly the way the
+// text.Create function creates them.
+//
+// This method must not be called concurrently. If you want to parse different
+// input concurrently, instantiate a separate Parser for each goroutine.
+func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {
+	p.reset(in)
+	for nextState := p.startOfLine; nextState != nil; nextState = nextState() {
+		// Magic happens here...
+	}
+	// Get rid of empty metric families.
+	for k, mf := range p.metricFamiliesByName {
+		if len(mf.GetMetric()) == 0 {
+			delete(p.metricFamiliesByName, k)
+		}
+	}
+	// If p.err is io.EOF now, we have run into a premature end of the input
+	// stream. Turn this error into something nicer and more
+	// meaningful. (io.EOF is often used as a signal for the legitimate end
+	// of an input stream.)
+	if p.err != nil && errors.Is(p.err, io.EOF) {
+		p.parseError("unexpected end of input stream")
+	}
+	return p.metricFamiliesByName, p.err
+}
+
+func (p *TextParser) reset(in io.Reader) {
+	p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+	p.currentLabelPairs = nil
+	if p.buf == nil {
+		p.buf = bufio.NewReader(in)
+	} else {
+		p.buf.Reset(in)
+	}
+	p.err = nil
+	p.lineCount = 0
+	if p.summaries == nil || len(p.summaries) > 0 {
+		p.summaries = map[uint64]*dto.Metric{}
+	}
+	if p.histograms == nil || len(p.histograms) > 0 {
+		p.histograms = map[uint64]*dto.Metric{}
+	}
+	p.currentQuantile = math.NaN()
+	p.currentBucket = math.NaN()
+	p.currentMF = nil
+}
+
+// startOfLine represents the state where the next byte read from p.buf is the
+// start of a line (or whitespace leading up to it).
+func (p *TextParser) startOfLine() stateFn {
+	p.lineCount++
+	p.currentMetricIsInsideBraces = false
+	p.currentMetricInsideBracesIsPresent = false
+	if p.skipBlankTab(); p.err != nil {
+		// This is the only place that we expect to see io.EOF,
+		// which is not an error but the signal that we are done.
+		// Any other error that happens to align with the start of
+		// a line is still an error.
+		if errors.Is(p.err, io.EOF) {
+			p.err = nil
+		}
+		return nil
+	}
+	switch p.currentByte {
+	case '#':
+		return p.startComment
+	case '\n':
+		return p.startOfLine // Empty line, start the next one.
+	case '{':
+		p.currentMetricIsInsideBraces = true
+		return p.readingLabels
+	}
+	return p.readingMetricName
+}
+
+// startComment represents the state where the next byte read from p.buf is the
+// start of a comment (or whitespace leading up to it).
+func (p *TextParser) startComment() stateFn {
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentByte == '\n' {
+		return p.startOfLine
+	}
+	if p.readTokenUntilWhitespace(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	// If we have hit the end of line already, there is nothing left
+	// to do. This is not considered a syntax error.
+	if p.currentByte == '\n' {
+		return p.startOfLine
+	}
+	keyword := p.currentToken.String()
+	if keyword != "HELP" && keyword != "TYPE" {
+		// Generic comment, ignore by fast forwarding to end of line.
+		for p.currentByte != '\n' {
+			if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+				return nil // Unexpected end of input.
+			}
+		}
+		return p.startOfLine
+	}
+	// There is something. Next has to be a metric name.
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.readTokenAsMetricName(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentByte == '\n' {
+		// At the end of the line already.
+		// Again, this is not considered a syntax error.
+		return p.startOfLine
+	}
+	if !isBlankOrTab(p.currentByte) {
+		p.parseError("invalid metric name in comment")
+		return nil
+	}
+	p.setOrCreateCurrentMF()
+	if p.err != nil {
+		return nil
+	}
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentByte == '\n' {
+		// At the end of the line already.
+		// Again, this is not considered a syntax error.
+		return p.startOfLine
+	}
+	switch keyword {
+	case "HELP":
+		return p.readingHelp
+	case "TYPE":
+		return p.readingType
+	}
+	panic(fmt.Sprintf("code error: unexpected keyword %q", keyword))
+}
+
+// readingMetricName represents the state where the last byte read (now in
+// p.currentByte) is the first byte of a metric name.
+func (p *TextParser) readingMetricName() stateFn {
+	if p.readTokenAsMetricName(); p.err != nil {
+		return nil
+	}
+	if p.currentToken.Len() == 0 {
+		p.parseError("invalid metric name")
+		return nil
+	}
+	p.setOrCreateCurrentMF()
+	if p.err != nil {
+		return nil
+	}
+	// Now is the time to fix the type if it hasn't happened yet.
+	if p.currentMF.Type == nil {
+		p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+	}
+	p.currentMetric = &dto.Metric{}
+	// Do not append the newly created currentMetric to
+	// currentMF.Metric right now. First wait if this is a summary,
+	// and the metric exists already, which we can only know after
+	// having read all the labels.
+	if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	return p.readingLabels
+}
+
+// readingLabels represents the state where the last byte read (now in
+// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the
+// first byte of the value (otherwise).
+func (p *TextParser) readingLabels() stateFn {
+	// Summaries/histograms are special. We have to reset the
+	// currentLabels map, currentQuantile and currentBucket before starting to
+	// read labels.
+	if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+		p.currentLabels = map[string]string{}
+		p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()
+		p.currentQuantile = math.NaN()
+		p.currentBucket = math.NaN()
+	}
+	if p.currentByte != '{' {
+		return p.readingValue
+	}
+	return p.startLabelName
+}
+
+// startLabelName represents the state where the next byte read from p.buf is
+// the start of a label name (or whitespace leading up to it).
+func (p *TextParser) startLabelName() stateFn {
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentByte == '}' {
+		p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
+		p.currentLabelPairs = nil
+		if p.skipBlankTab(); p.err != nil {
+			return nil // Unexpected end of input.
+		}
+		return p.readingValue
+	}
+	if p.readTokenAsLabelName(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentToken.Len() == 0 {
+		p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
+		return nil
+	}
+	if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentByte != '=' {
+		if p.currentMetricIsInsideBraces {
+			if p.currentMetricInsideBracesIsPresent {
+				p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName()))
+				return nil
+			}
+			switch p.currentByte {
+			case ',':
+				p.setOrCreateCurrentMF()
+				if p.err != nil {
+					return nil
+				}
+				if p.currentMF.Type == nil {
+					p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+				}
+				p.currentMetric = &dto.Metric{}
+				p.currentMetricInsideBracesIsPresent = true
+				return p.startLabelName
+			case '}':
+				p.setOrCreateCurrentMF()
+				if p.err != nil {
+					p.currentLabelPairs = nil
+					return nil
+				}
+				if p.currentMF.Type == nil {
+					p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+				}
+				p.currentMetric = &dto.Metric{}
+				p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
+				p.currentLabelPairs = nil
+				if p.skipBlankTab(); p.err != nil {
+					return nil // Unexpected end of input.
+				}
+				return p.readingValue
+			default:
+				p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte))
+				return nil
+			}
+		}
+		p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+		p.currentLabelPairs = nil
+		return nil
+	}
+	p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+	if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
+		p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+		p.currentLabelPairs = nil
+		return nil
+	}
+	if !p.scheme.IsValidLabelName(p.currentLabelPair.GetName()) {
+		p.parseError(fmt.Sprintf("invalid label name %q", p.currentLabelPair.GetName()))
+		p.currentLabelPairs = nil
+		return nil
+	}
+	// Special summary/histogram treatment. Don't add 'quantile' and 'le'
+	// labels to 'real' labels.
+	if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) &&
+		(p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) {
+		p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair)
+	}
+	// Check for duplicate label names.
+	labels := make(map[string]struct{})
+	for _, l := range p.currentLabelPairs {
+		lName := l.GetName()
+		if _, exists := labels[lName]; exists {
+			p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
+			p.currentLabelPairs = nil
+			return nil
+		}
+		labels[lName] = struct{}{}
+	}
+	return p.startLabelValue
+}
+
+// startLabelValue represents the state where the next byte read from p.buf is
+// the start of a (quoted) label value (or whitespace leading up to it).
+func (p *TextParser) startLabelValue() stateFn {
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentByte != '"' {
+		p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte))
+		return nil
+	}
+	if p.readTokenAsLabelValue(); p.err != nil {
+		return nil
+	}
+	if !model.LabelValue(p.currentToken.String()).IsValid() {
+		p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String()))
+		return nil
+	}
+	p.currentLabelPair.Value = proto.String(p.currentToken.String())
+	// Special treatment of summaries:
+	// - Quantile labels are special, will result in dto.Quantile later.
+	// - Other labels have to be added to currentLabels for signature calculation.
+	if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+		if p.currentLabelPair.GetName() == model.QuantileLabel {
+			if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
+				// Create a more helpful error message.
+				p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+				p.currentLabelPairs = nil
+				return nil
+			}
+		} else {
+			p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+		}
+	}
+	// Similar special treatment of histograms.
+	if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+		if p.currentLabelPair.GetName() == model.BucketLabel {
+			if p.currentBucket, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
+				// Create a more helpful error message.
+				p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue()))
+				return nil
+			}
+		} else {
+			p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()
+		}
+	}
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	switch p.currentByte {
+	case ',':
+		return p.startLabelName
+
+	case '}':
+		if p.currentMF == nil {
+			p.parseError("invalid metric name")
+			return nil
+		}
+		p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
+		p.currentLabelPairs = nil
+		if p.skipBlankTab(); p.err != nil {
+			return nil // Unexpected end of input.
+		}
+		return p.readingValue
+	default:
+		p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
+		p.currentLabelPairs = nil
+		return nil
+	}
+}
+
+// readingValue represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the sample value (i.e. a float).
+func (p *TextParser) readingValue() stateFn {
+	// When we are here, we have read all the labels, so for the
+	// special case of a summary/histogram, we can finally find out
+	// if the metric already exists.
+	switch p.currentMF.GetType() {
+	case dto.MetricType_SUMMARY:
+		signature := model.LabelsToSignature(p.currentLabels)
+		if summary := p.summaries[signature]; summary != nil {
+			p.currentMetric = summary
+		} else {
+			p.summaries[signature] = p.currentMetric
+			p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+		}
+	case dto.MetricType_HISTOGRAM:
+		signature := model.LabelsToSignature(p.currentLabels)
+		if histogram := p.histograms[signature]; histogram != nil {
+			p.currentMetric = histogram
+		} else {
+			p.histograms[signature] = p.currentMetric
+			p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+		}
+	default:
+		p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
+	}
+	if p.readTokenUntilWhitespace(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	value, err := parseFloat(p.currentToken.String())
+	if err != nil {
+		// Create a more helpful error message.
+		p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String()))
+		return nil
+	}
+	switch p.currentMF.GetType() {
+	case dto.MetricType_COUNTER:
+		p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}
+	case dto.MetricType_GAUGE:
+		p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}
+	case dto.MetricType_UNTYPED:
+		p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}
+	case dto.MetricType_SUMMARY:
+		// *sigh*
+		if p.currentMetric.Summary == nil {
+			p.currentMetric.Summary = &dto.Summary{}
+		}
+		switch {
+		case p.currentIsSummaryCount:
+			p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))
+		case p.currentIsSummarySum:
+			p.currentMetric.Summary.SampleSum = proto.Float64(value)
+		case !math.IsNaN(p.currentQuantile):
+			p.currentMetric.Summary.Quantile = append(
+				p.currentMetric.Summary.Quantile,
+				&dto.Quantile{
+					Quantile: proto.Float64(p.currentQuantile),
+					Value:    proto.Float64(value),
+				},
+			)
+		}
+	case dto.MetricType_HISTOGRAM:
+		// *sigh*
+		if p.currentMetric.Histogram == nil {
+			p.currentMetric.Histogram = &dto.Histogram{}
+		}
+		switch {
+		case p.currentIsHistogramCount:
+			p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))
+		case p.currentIsHistogramSum:
+			p.currentMetric.Histogram.SampleSum = proto.Float64(value)
+		case !math.IsNaN(p.currentBucket):
+			p.currentMetric.Histogram.Bucket = append(
+				p.currentMetric.Histogram.Bucket,
+				&dto.Bucket{
+					UpperBound:      proto.Float64(p.currentBucket),
+					CumulativeCount: proto.Uint64(uint64(value)),
+				},
+			)
+		}
+	default:
+		p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName())
+	}
+	if p.currentByte == '\n' {
+		return p.startOfLine
+	}
+	return p.startTimestamp
+}
+
+// startTimestamp represents the state where the next byte read from p.buf is
+// the start of the timestamp (or whitespace leading up to it).
+func (p *TextParser) startTimestamp() stateFn {
+	if p.skipBlankTab(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.readTokenUntilWhitespace(); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)
+	if err != nil {
+		// Create a more helpful error message.
+		p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String()))
+		return nil
+	}
+	p.currentMetric.TimestampMs = proto.Int64(timestamp)
+	if p.readTokenUntilNewline(false); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	if p.currentToken.Len() > 0 {
+		p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String()))
+		return nil
+	}
+	return p.startOfLine
+}
+
+// readingHelp represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the docstring after 'HELP'.
+func (p *TextParser) readingHelp() stateFn {
+	if p.currentMF.Help != nil {
+		p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName()))
+		return nil
+	}
+	// Rest of line is the docstring.
+	if p.readTokenUntilNewline(true); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	p.currentMF.Help = proto.String(p.currentToken.String())
+	return p.startOfLine
+}
+
+// readingType represents the state where the last byte read (now in
+// p.currentByte) is the first byte of the type hint after 'HELP'.
+func (p *TextParser) readingType() stateFn {
+	if p.currentMF.Type != nil {
+		p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName()))
+		return nil
+	}
+	// Rest of line is the type.
+	if p.readTokenUntilNewline(false); p.err != nil {
+		return nil // Unexpected end of input.
+	}
+	metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]
+	if !ok {
+		p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String()))
+		return nil
+	}
+	p.currentMF.Type = dto.MetricType(metricType).Enum()
+	return p.startOfLine
+}
+
+// parseError sets p.err to a ParseError at the current line with the given
+// message.
+func (p *TextParser) parseError(msg string) {
+	p.err = ParseError{
+		Line: p.lineCount,
+		Msg:  msg,
+	}
+}
+
+// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte
+// that is neither ' ' nor '\t'. That byte is left in p.currentByte.
+func (p *TextParser) skipBlankTab() {
+	for {
+		if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {
+			return
+		}
+	}
+}
+
+// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do
+// anything if p.currentByte is neither ' ' nor '\t'.
+func (p *TextParser) skipBlankTabIfCurrentBlankTab() {
+	if isBlankOrTab(p.currentByte) {
+		p.skipBlankTab()
+	}
+}
+
+// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken.  The
+// first byte considered is the byte already read (now in p.currentByte).  The
+// first whitespace byte encountered is still copied into p.currentByte, but not
+// into p.currentToken.
+func (p *TextParser) readTokenUntilWhitespace() {
+	p.currentToken.Reset()
+	for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' {
+		p.currentToken.WriteByte(p.currentByte)
+		p.currentByte, p.err = p.buf.ReadByte()
+	}
+}
+
+// readTokenUntilNewline copies bytes from p.buf into p.currentToken.  The first
+// byte considered is the byte already read (now in p.currentByte).  The first
+// newline byte encountered is still copied into p.currentByte, but not into
+// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are
+// recognized: '\\' translates into '\', and '\n' into a line-feed character.
+// All other escape sequences are invalid and cause an error.
+func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {
+	p.currentToken.Reset()
+	escaped := false
+	for p.err == nil {
+		if recognizeEscapeSequence && escaped {
+			switch p.currentByte {
+			case '\\':
+				p.currentToken.WriteByte(p.currentByte)
+			case 'n':
+				p.currentToken.WriteByte('\n')
+			case '"':
+				p.currentToken.WriteByte('"')
+			default:
+				p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+				return
+			}
+			escaped = false
+		} else {
+			switch p.currentByte {
+			case '\n':
+				return
+			case '\\':
+				escaped = true
+			default:
+				p.currentToken.WriteByte(p.currentByte)
+			}
+		}
+		p.currentByte, p.err = p.buf.ReadByte()
+	}
+}
+
+// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a metric name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsMetricName() {
+	p.currentToken.Reset()
+	// A UTF-8 metric name must be quoted and may have escaped characters.
+	quoted := false
+	escaped := false
+	if !isValidMetricNameStart(p.currentByte) {
+		return
+	}
+	for p.err == nil {
+		if escaped {
+			switch p.currentByte {
+			case '\\':
+				p.currentToken.WriteByte(p.currentByte)
+			case 'n':
+				p.currentToken.WriteByte('\n')
+			case '"':
+				p.currentToken.WriteByte('"')
+			default:
+				p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+				return
+			}
+			escaped = false
+		} else {
+			switch p.currentByte {
+			case '"':
+				quoted = !quoted
+				if !quoted {
+					p.currentByte, p.err = p.buf.ReadByte()
+					return
+				}
+			case '\n':
+				p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String()))
+				return
+			case '\\':
+				escaped = true
+			default:
+				p.currentToken.WriteByte(p.currentByte)
+			}
+		}
+		p.currentByte, p.err = p.buf.ReadByte()
+		if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') {
+			return
+		}
+	}
+}
+
+// readTokenAsLabelName copies a label name from p.buf into p.currentToken.
+// The first byte considered is the byte already read (now in p.currentByte).
+// The first byte not part of a label name is still copied into p.currentByte,
+// but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelName() {
+	p.currentToken.Reset()
+	// A UTF-8 label name must be quoted and may have escaped characters.
+	quoted := false
+	escaped := false
+	if !isValidLabelNameStart(p.currentByte) {
+		return
+	}
+	for p.err == nil {
+		if escaped {
+			switch p.currentByte {
+			case '\\':
+				p.currentToken.WriteByte(p.currentByte)
+			case 'n':
+				p.currentToken.WriteByte('\n')
+			case '"':
+				p.currentToken.WriteByte('"')
+			default:
+				p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+				return
+			}
+			escaped = false
+		} else {
+			switch p.currentByte {
+			case '"':
+				quoted = !quoted
+				if !quoted {
+					p.currentByte, p.err = p.buf.ReadByte()
+					return
+				}
+			case '\n':
+				p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String()))
+				return
+			case '\\':
+				escaped = true
+			default:
+				p.currentToken.WriteByte(p.currentByte)
+			}
+		}
+		p.currentByte, p.err = p.buf.ReadByte()
+		if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') {
+			return
+		}
+	}
+}
+
+// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.
+// In contrast to the other 'readTokenAs...' functions, which start with the
+// last read byte in p.currentByte, this method ignores p.currentByte and starts
+// with reading a new byte from p.buf. The first byte not part of a label value
+// is still copied into p.currentByte, but not into p.currentToken.
+func (p *TextParser) readTokenAsLabelValue() {
+	p.currentToken.Reset()
+	escaped := false
+	for {
+		if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {
+			return
+		}
+		if escaped {
+			switch p.currentByte {
+			case '"', '\\':
+				p.currentToken.WriteByte(p.currentByte)
+			case 'n':
+				p.currentToken.WriteByte('\n')
+			default:
+				p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+				p.currentLabelPairs = nil
+				return
+			}
+			escaped = false
+			continue
+		}
+		switch p.currentByte {
+		case '"':
+			return
+		case '\n':
+			p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String()))
+			return
+		case '\\':
+			escaped = true
+		default:
+			p.currentToken.WriteByte(p.currentByte)
+		}
+	}
+}
+
+func (p *TextParser) setOrCreateCurrentMF() {
+	p.currentIsSummaryCount = false
+	p.currentIsSummarySum = false
+	p.currentIsHistogramCount = false
+	p.currentIsHistogramSum = false
+	name := p.currentToken.String()
+	if !p.scheme.IsValidMetricName(name) {
+		p.parseError(fmt.Sprintf("invalid metric name %q", name))
+		return
+	}
+	if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
+		return
+	}
+	// Try out if this is a _sum or _count for a summary/histogram.
+	summaryName := summaryMetricName(name)
+	if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {
+		if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+			if isCount(name) {
+				p.currentIsSummaryCount = true
+			}
+			if isSum(name) {
+				p.currentIsSummarySum = true
+			}
+			return
+		}
+	}
+	histogramName := histogramMetricName(name)
+	if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {
+		if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+			if isCount(name) {
+				p.currentIsHistogramCount = true
+			}
+			if isSum(name) {
+				p.currentIsHistogramSum = true
+			}
+			return
+		}
+	}
+	p.currentMF = &dto.MetricFamily{Name: proto.String(name)}
+	p.metricFamiliesByName[name] = p.currentMF
+}
+
+func isValidLabelNameStart(b byte) bool {
+	return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"'
+}
+
+func isValidLabelNameContinuation(b byte, quoted bool) bool {
+	return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b)))
+}
+
+func isValidMetricNameStart(b byte) bool {
+	return isValidLabelNameStart(b) || b == ':'
+}
+
+func isValidMetricNameContinuation(b byte, quoted bool) bool {
+	return isValidLabelNameContinuation(b, quoted) || b == ':'
+}
+
+func isBlankOrTab(b byte) bool {
+	return b == ' ' || b == '\t'
+}
+
+func isCount(name string) bool {
+	return len(name) > 6 && name[len(name)-6:] == "_count"
+}
+
+func isSum(name string) bool {
+	return len(name) > 4 && name[len(name)-4:] == "_sum"
+}
+
+func isBucket(name string) bool {
+	return len(name) > 7 && name[len(name)-7:] == "_bucket"
+}
+
+func summaryMetricName(name string) string {
+	switch {
+	case isCount(name):
+		return name[:len(name)-6]
+	case isSum(name):
+		return name[:len(name)-4]
+	default:
+		return name
+	}
+}
+
+func histogramMetricName(name string) string {
+	switch {
+	case isCount(name):
+		return name[:len(name)-6]
+	case isSum(name):
+		return name[:len(name)-4]
+	case isBucket(name):
+		return name[:len(name)-7]
+	default:
+		return name
+	}
+}
+
+func parseFloat(s string) (float64, error) {
+	if strings.ContainsAny(s, "pP_") {
+		return 0, errors.New("unsupported character in float")
+	}
+	return strconv.ParseFloat(s, 64)
+}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
new file mode 100644
index 0000000..460f554
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -0,0 +1,162 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"errors"
+	"fmt"
+	"time"
+)
+
+type AlertStatus string
+
+const (
+	AlertFiring   AlertStatus = "firing"
+	AlertResolved AlertStatus = "resolved"
+)
+
+// Alert is a generic representation of an alert in the Prometheus eco-system.
+type Alert struct {
+	// Label value pairs for purpose of aggregation, matching, and disposition
+	// dispatching. This must minimally include an "alertname" label.
+	Labels LabelSet `json:"labels"`
+
+	// Extra key/value information which does not define alert identity.
+	Annotations LabelSet `json:"annotations"`
+
+	// The known time range for this alert. Both ends are optional.
+	StartsAt     time.Time `json:"startsAt,omitempty"`
+	EndsAt       time.Time `json:"endsAt,omitempty"`
+	GeneratorURL string    `json:"generatorURL"`
+}
+
+// Name returns the name of the alert. It is equivalent to the "alertname" label.
+func (a *Alert) Name() string {
+	return string(a.Labels[AlertNameLabel])
+}
+
+// Fingerprint returns a unique hash for the alert. It is equivalent to
+// the fingerprint of the alert's label set.
+func (a *Alert) Fingerprint() Fingerprint {
+	return a.Labels.Fingerprint()
+}
+
+func (a *Alert) String() string {
+	s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7])
+	if a.Resolved() {
+		return s + "[resolved]"
+	}
+	return s + "[active]"
+}
+
+// Resolved returns true iff the activity interval ended in the past.
+func (a *Alert) Resolved() bool {
+	return a.ResolvedAt(time.Now())
+}
+
+// ResolvedAt returns true iff the activity interval ended before
+// the given timestamp.
+func (a *Alert) ResolvedAt(ts time.Time) bool {
+	if a.EndsAt.IsZero() {
+		return false
+	}
+	return !a.EndsAt.After(ts)
+}
+
+// Status returns the status of the alert.
+func (a *Alert) Status() AlertStatus {
+	return a.StatusAt(time.Now())
+}
+
+// StatusAt returns the status of the alert at the given timestamp.
+func (a *Alert) StatusAt(ts time.Time) AlertStatus {
+	if a.ResolvedAt(ts) {
+		return AlertResolved
+	}
+	return AlertFiring
+}
+
+// Validate checks whether the alert data is inconsistent.
+func (a *Alert) Validate() error {
+	if a.StartsAt.IsZero() {
+		return errors.New("start time missing")
+	}
+	if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
+		return errors.New("start time must be before end time")
+	}
+	if err := a.Labels.Validate(); err != nil {
+		return fmt.Errorf("invalid label set: %w", err)
+	}
+	if len(a.Labels) == 0 {
+		return errors.New("at least one label pair required")
+	}
+	if err := a.Annotations.Validate(); err != nil {
+		return fmt.Errorf("invalid annotations: %w", err)
+	}
+	return nil
+}
+
+// Alert is a list of alerts that can be sorted in chronological order.
+type Alerts []*Alert
+
+func (as Alerts) Len() int      { return len(as) }
+func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }
+
+func (as Alerts) Less(i, j int) bool {
+	if as[i].StartsAt.Before(as[j].StartsAt) {
+		return true
+	}
+	if as[i].EndsAt.Before(as[j].EndsAt) {
+		return true
+	}
+	return as[i].Fingerprint() < as[j].Fingerprint()
+}
+
+// HasFiring returns true iff one of the alerts is not resolved.
+func (as Alerts) HasFiring() bool {
+	for _, a := range as {
+		if !a.Resolved() {
+			return true
+		}
+	}
+	return false
+}
+
+// HasFiringAt returns true iff one of the alerts is not resolved
+// at the time ts.
+func (as Alerts) HasFiringAt(ts time.Time) bool {
+	for _, a := range as {
+		if !a.ResolvedAt(ts) {
+			return true
+		}
+	}
+	return false
+}
+
+// Status returns StatusFiring iff at least one of the alerts is firing.
+func (as Alerts) Status() AlertStatus {
+	if as.HasFiring() {
+		return AlertFiring
+	}
+	return AlertResolved
+}
+
+// StatusAt returns StatusFiring iff at least one of the alerts is firing
+// at the time ts.
+func (as Alerts) StatusAt(ts time.Time) AlertStatus {
+	if as.HasFiringAt(ts) {
+		return AlertFiring
+	}
+	return AlertResolved
+}
diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go
new file mode 100644
index 0000000..fc4de41
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fingerprinting.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"fmt"
+	"strconv"
+)
+
+// Fingerprint provides a hash-capable representation of a Metric.
+// For our purposes, FNV-1A 64-bit is used.
+type Fingerprint uint64
+
+// FingerprintFromString transforms a string representation into a Fingerprint.
+func FingerprintFromString(s string) (Fingerprint, error) {
+	num, err := strconv.ParseUint(s, 16, 64)
+	return Fingerprint(num), err
+}
+
+// ParseFingerprint parses the input string into a fingerprint.
+func ParseFingerprint(s string) (Fingerprint, error) {
+	num, err := strconv.ParseUint(s, 16, 64)
+	if err != nil {
+		return 0, err
+	}
+	return Fingerprint(num), nil
+}
+
+func (f Fingerprint) String() string {
+	return fmt.Sprintf("%016x", uint64(f))
+}
+
+// Fingerprints represents a collection of Fingerprint subject to a given
+// natural sorting scheme. It implements sort.Interface.
+type Fingerprints []Fingerprint
+
+// Len implements sort.Interface.
+func (f Fingerprints) Len() int {
+	return len(f)
+}
+
+// Less implements sort.Interface.
+func (f Fingerprints) Less(i, j int) bool {
+	return f[i] < f[j]
+}
+
+// Swap implements sort.Interface.
+func (f Fingerprints) Swap(i, j int) {
+	f[i], f[j] = f[j], f[i]
+}
+
+// FingerprintSet is a set of Fingerprints.
+type FingerprintSet map[Fingerprint]struct{}
+
+// Equal returns true if both sets contain the same elements (and not more).
+func (s FingerprintSet) Equal(o FingerprintSet) bool {
+	if len(s) != len(o) {
+		return false
+	}
+
+	for k := range s {
+		if _, ok := o[k]; !ok {
+			return false
+		}
+	}
+
+	return true
+}
+
+// Intersection returns the elements contained in both sets.
+func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {
+	myLength, otherLength := len(s), len(o)
+	if myLength == 0 || otherLength == 0 {
+		return FingerprintSet{}
+	}
+
+	subSet := s
+	superSet := o
+
+	if otherLength < myLength {
+		subSet = o
+		superSet = s
+	}
+
+	out := FingerprintSet{}
+
+	for k := range subSet {
+		if _, ok := superSet[k]; ok {
+			out[k] = struct{}{}
+		}
+	}
+
+	return out
+}
diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go
new file mode 100644
index 0000000..367afec
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/fnv.go
@@ -0,0 +1,42 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// Inline and byte-free variant of hash/fnv's fnv64a.
+
+const (
+	offset64 = 14695981039346656037
+	prime64  = 1099511628211
+)
+
+// hashNew initializes a new fnv64a hash value.
+func hashNew() uint64 {
+	return offset64
+}
+
+// hashAdd adds a string to a fnv64a hash value, returning the updated hash.
+func hashAdd(h uint64, s string) uint64 {
+	for i := 0; i < len(s); i++ {
+		h ^= uint64(s[i])
+		h *= prime64
+	}
+	return h
+}
+
+// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.
+func hashAddByte(h uint64, b byte) uint64 {
+	h ^= uint64(b)
+	h *= prime64
+	return h
+}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
new file mode 100644
index 0000000..dfeb34b
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -0,0 +1,229 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"fmt"
+	"regexp"
+	"strings"
+	"unicode/utf8"
+)
+
+const (
+	// AlertNameLabel is the name of the label containing the alert's name.
+	AlertNameLabel = "alertname"
+
+	// ExportedLabelPrefix is the prefix to prepend to the label names present in
+	// exported metrics if a label of the same name is added by the server.
+	ExportedLabelPrefix = "exported_"
+
+	// MetricNameLabel is the label name indicating the metric name of a
+	// timeseries.
+	MetricNameLabel = "__name__"
+	// MetricTypeLabel is the label name indicating the metric type of
+	// timeseries as per the PROM-39 proposal.
+	MetricTypeLabel = "__type__"
+	// MetricUnitLabel is the label name indicating the metric unit of
+	// timeseries as per the PROM-39 proposal.
+	MetricUnitLabel = "__unit__"
+
+	// SchemeLabel is the name of the label that holds the scheme on which to
+	// scrape a target.
+	SchemeLabel = "__scheme__"
+
+	// AddressLabel is the name of the label that holds the address of
+	// a scrape target.
+	AddressLabel = "__address__"
+
+	// MetricsPathLabel is the name of the label that holds the path on which to
+	// scrape a target.
+	MetricsPathLabel = "__metrics_path__"
+
+	// ScrapeIntervalLabel is the name of the label that holds the scrape interval
+	// used to scrape a target.
+	ScrapeIntervalLabel = "__scrape_interval__"
+
+	// ScrapeTimeoutLabel is the name of the label that holds the scrape
+	// timeout used to scrape a target.
+	ScrapeTimeoutLabel = "__scrape_timeout__"
+
+	// ReservedLabelPrefix is a prefix which is not legal in user-supplied
+	// label names.
+	ReservedLabelPrefix = "__"
+
+	// MetaLabelPrefix is a prefix for labels that provide meta information.
+	// Labels with this prefix are used for intermediate label processing and
+	// will not be attached to time series.
+	MetaLabelPrefix = "__meta_"
+
+	// TmpLabelPrefix is a prefix for temporary labels as part of relabelling.
+	// Labels with this prefix are used for intermediate label processing and
+	// will not be attached to time series. This is reserved for use in
+	// Prometheus configuration files by users.
+	TmpLabelPrefix = "__tmp_"
+
+	// ParamLabelPrefix is a prefix for labels that provide URL parameters
+	// used to scrape a target.
+	ParamLabelPrefix = "__param_"
+
+	// JobLabel is the label name indicating the job from which a timeseries
+	// was scraped.
+	JobLabel = "job"
+
+	// InstanceLabel is the label name used for the instance label.
+	InstanceLabel = "instance"
+
+	// BucketLabel is used for the label that defines the upper bound of a
+	// bucket of a histogram ("le" -> "less or equal").
+	BucketLabel = "le"
+
+	// QuantileLabel is used for the label that defines the quantile in a
+	// summary.
+	QuantileLabel = "quantile"
+)
+
+// LabelNameRE is a regular expression matching valid label names. Note that the
+// IsValid method of LabelName performs the same check but faster than a match
+// with this regular expression.
+var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+// A LabelName is a key for a LabelSet or Metric.  It has a value associated
+// therewith.
+type LabelName string
+
+// IsValid returns true iff the name matches the pattern of LabelNameRE when
+// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if
+// NameValidationScheme is set to UTF8Validation.
+//
+// Deprecated: This method should not be used and may be removed in the future.
+// Use [ValidationScheme.IsValidLabelName] instead.
+func (ln LabelName) IsValid() bool {
+	return NameValidationScheme.IsValidLabelName(string(ln))
+}
+
+// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for
+// legacy names. It does not use LabelNameRE for the check but a much faster
+// hardcoded implementation.
+//
+// Deprecated: This method should not be used and may be removed in the future.
+// Use [LegacyValidation.IsValidLabelName] instead.
+func (ln LabelName) IsValidLegacy() bool {
+	return LegacyValidation.IsValidLabelName(string(ln))
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	var s string
+	if err := unmarshal(&s); err != nil {
+		return err
+	}
+	if !LabelName(s).IsValid() {
+		return fmt.Errorf("%q is not a valid label name", s)
+	}
+	*ln = LabelName(s)
+	return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (ln *LabelName) UnmarshalJSON(b []byte) error {
+	var s string
+	if err := json.Unmarshal(b, &s); err != nil {
+		return err
+	}
+	if !LabelName(s).IsValid() {
+		return fmt.Errorf("%q is not a valid label name", s)
+	}
+	*ln = LabelName(s)
+	return nil
+}
+
+// LabelNames is a sortable LabelName slice. In implements sort.Interface.
+type LabelNames []LabelName
+
+func (l LabelNames) Len() int {
+	return len(l)
+}
+
+func (l LabelNames) Less(i, j int) bool {
+	return l[i] < l[j]
+}
+
+func (l LabelNames) Swap(i, j int) {
+	l[i], l[j] = l[j], l[i]
+}
+
+func (l LabelNames) String() string {
+	labelStrings := make([]string, 0, len(l))
+	for _, label := range l {
+		labelStrings = append(labelStrings, string(label))
+	}
+	return strings.Join(labelStrings, ", ")
+}
+
+// A LabelValue is an associated value for a LabelName.
+type LabelValue string
+
+// IsValid returns true iff the string is a valid UTF-8.
+func (lv LabelValue) IsValid() bool {
+	return utf8.ValidString(string(lv))
+}
+
+// LabelValues is a sortable LabelValue slice. It implements sort.Interface.
+type LabelValues []LabelValue
+
+func (l LabelValues) Len() int {
+	return len(l)
+}
+
+func (l LabelValues) Less(i, j int) bool {
+	return string(l[i]) < string(l[j])
+}
+
+func (l LabelValues) Swap(i, j int) {
+	l[i], l[j] = l[j], l[i]
+}
+
+// LabelPair pairs a name with a value.
+type LabelPair struct {
+	Name  LabelName
+	Value LabelValue
+}
+
+// LabelPairs is a sortable slice of LabelPair pointers. It implements
+// sort.Interface.
+type LabelPairs []*LabelPair
+
+func (l LabelPairs) Len() int {
+	return len(l)
+}
+
+func (l LabelPairs) Less(i, j int) bool {
+	switch {
+	case l[i].Name > l[j].Name:
+		return false
+	case l[i].Name < l[j].Name:
+		return true
+	case l[i].Value > l[j].Value:
+		return false
+	case l[i].Value < l[j].Value:
+		return true
+	default:
+		return false
+	}
+}
+
+func (l LabelPairs) Swap(i, j int) {
+	l[i], l[j] = l[j], l[i]
+}
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
new file mode 100644
index 0000000..9de47b2
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset.go
@@ -0,0 +1,158 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"fmt"
+	"sort"
+)
+
+// A LabelSet is a collection of LabelName and LabelValue pairs.  The LabelSet
+// may be fully-qualified down to the point where it may resolve to a single
+// Metric in the data store or not.  All operations that occur within the realm
+// of a LabelSet can emit a vector of Metric entities to which the LabelSet may
+// match.
+type LabelSet map[LabelName]LabelValue
+
+// Validate checks whether all names and values in the label set
+// are valid.
+func (ls LabelSet) Validate() error {
+	for ln, lv := range ls {
+		if !ln.IsValid() {
+			return fmt.Errorf("invalid name %q", ln)
+		}
+		if !lv.IsValid() {
+			return fmt.Errorf("invalid value %q", lv)
+		}
+	}
+	return nil
+}
+
+// Equal returns true iff both label sets have exactly the same key/value pairs.
+func (ls LabelSet) Equal(o LabelSet) bool {
+	if len(ls) != len(o) {
+		return false
+	}
+	for ln, lv := range ls {
+		olv, ok := o[ln]
+		if !ok {
+			return false
+		}
+		if olv != lv {
+			return false
+		}
+	}
+	return true
+}
+
+// Before compares the metrics, using the following criteria:
+//
+// If m has fewer labels than o, it is before o. If it has more, it is not.
+//
+// If the number of labels is the same, the superset of all label names is
+// sorted alphanumerically. The first differing label pair found in that order
+// determines the outcome: If the label does not exist at all in m, then m is
+// before o, and vice versa. Otherwise the label value is compared
+// alphanumerically.
+//
+// If m and o are equal, the method returns false.
+func (ls LabelSet) Before(o LabelSet) bool {
+	if len(ls) < len(o) {
+		return true
+	}
+	if len(ls) > len(o) {
+		return false
+	}
+
+	lns := make(LabelNames, 0, len(ls)+len(o))
+	for ln := range ls {
+		lns = append(lns, ln)
+	}
+	for ln := range o {
+		lns = append(lns, ln)
+	}
+	// It's probably not worth it to de-dup lns.
+	sort.Sort(lns)
+	for _, ln := range lns {
+		mlv, ok := ls[ln]
+		if !ok {
+			return true
+		}
+		olv, ok := o[ln]
+		if !ok {
+			return false
+		}
+		if mlv < olv {
+			return true
+		}
+		if mlv > olv {
+			return false
+		}
+	}
+	return false
+}
+
+// Clone returns a copy of the label set.
+func (ls LabelSet) Clone() LabelSet {
+	lsn := make(LabelSet, len(ls))
+	for ln, lv := range ls {
+		lsn[ln] = lv
+	}
+	return lsn
+}
+
+// Merge is a helper function to non-destructively merge two label sets.
+func (ls LabelSet) Merge(other LabelSet) LabelSet {
+	result := make(LabelSet, len(ls))
+
+	for k, v := range ls {
+		result[k] = v
+	}
+
+	for k, v := range other {
+		result[k] = v
+	}
+
+	return result
+}
+
+// Fingerprint returns the LabelSet's fingerprint.
+func (ls LabelSet) Fingerprint() Fingerprint {
+	return labelSetToFingerprint(ls)
+}
+
+// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (ls LabelSet) FastFingerprint() Fingerprint {
+	return labelSetToFastFingerprint(ls)
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (ls *LabelSet) UnmarshalJSON(b []byte) error {
+	var m map[LabelName]LabelValue
+	if err := json.Unmarshal(b, &m); err != nil {
+		return err
+	}
+	// encoding/json only unmarshals maps of the form map[string]T. It treats
+	// LabelName as a string and does not call its UnmarshalJSON method.
+	// Thus, we have to replicate the behavior here.
+	for ln := range m {
+		if !ln.IsValid() {
+			return fmt.Errorf("%q is not a valid label name", ln)
+		}
+	}
+	*ls = LabelSet(m)
+	return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go
new file mode 100644
index 0000000..abb2c90
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset_string.go
@@ -0,0 +1,43 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"bytes"
+	"slices"
+	"strconv"
+)
+
+// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically.
+func (l LabelSet) String() string {
+	var lna [32]string // On stack to avoid memory allocation for sorting names.
+	labelNames := lna[:0]
+	for name := range l {
+		labelNames = append(labelNames, string(name))
+	}
+	slices.Sort(labelNames)
+	var bytea [1024]byte // On stack to avoid memory allocation while building the output.
+	b := bytes.NewBuffer(bytea[:0])
+	b.WriteByte('{')
+	for i, name := range labelNames {
+		if i > 0 {
+			b.WriteString(", ")
+		}
+		b.WriteString(name)
+		b.WriteByte('=')
+		b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[LabelName(name)])))
+	}
+	b.WriteByte('}')
+	return b.String()
+}
diff --git a/vendor/github.com/prometheus/common/model/metadata.go b/vendor/github.com/prometheus/common/model/metadata.go
new file mode 100644
index 0000000..447ab8a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metadata.go
@@ -0,0 +1,28 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// MetricType represents metric type values.
+type MetricType string
+
+const (
+	MetricTypeCounter        = MetricType("counter")
+	MetricTypeGauge          = MetricType("gauge")
+	MetricTypeHistogram      = MetricType("histogram")
+	MetricTypeGaugeHistogram = MetricType("gaugehistogram")
+	MetricTypeSummary        = MetricType("summary")
+	MetricTypeInfo           = MetricType("info")
+	MetricTypeStateset       = MetricType("stateset")
+	MetricTypeUnknown        = MetricType("unknown")
+)
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
new file mode 100644
index 0000000..3feebf3
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -0,0 +1,593 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+
+	dto "github.com/prometheus/client_model/go"
+	"go.yaml.in/yaml/v2"
+	"google.golang.org/protobuf/proto"
+)
+
+var (
+	// NameValidationScheme determines the global default method of the name
+	// validation to be used by all calls to IsValidMetricName() and LabelName
+	// IsValid().
+	//
+	// Deprecated: This variable should not be used and might be removed in the
+	// far future. If you wish to stick to the legacy name validation use
+	// `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods
+	// instead. This variable is here as an escape hatch for emergency cases,
+	// given the recent change from `LegacyValidation` to `UTF8Validation`, e.g.,
+	// to delay UTF-8 migrations in time or aid in debugging unforeseen results of
+	// the change. In such a case, a temporary assignment to `LegacyValidation`
+	// value in the `init()` function in your main.go or so, could be considered.
+	//
+	// Historically we opted for a global variable for feature gating different
+	// validation schemes in operations that were not otherwise easily adjustable
+	// (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate
+	// Labels structure or package might have been a better choice. Given the
+	// change was made and many upgraded the common already, we live this as-is
+	// with this warning and learning for the future.
+	NameValidationScheme = UTF8Validation
+
+	// NameEscapingScheme defines the default way that names will be escaped when
+	// presented to systems that do not support UTF-8 names. If the Content-Type
+	// "escaping" term is specified, that will override this value.
+	// NameEscapingScheme should not be set to the NoEscaping value. That string
+	// is used in content negotiation to indicate that a system supports UTF-8 and
+	// has that feature enabled.
+	NameEscapingScheme = UnderscoreEscaping
+)
+
+// ValidationScheme is a Go enum for determining how metric and label names will
+// be validated by this library.
+type ValidationScheme int
+
+const (
+	// UnsetValidation represents an undefined ValidationScheme.
+	// Should not be used in practice.
+	UnsetValidation ValidationScheme = iota
+
+	// LegacyValidation is a setting that requires that all metric and label names
+	// conform to the original Prometheus character requirements described by
+	// MetricNameRE and LabelNameRE.
+	LegacyValidation
+
+	// UTF8Validation only requires that metric and label names be valid UTF-8
+	// strings.
+	UTF8Validation
+)
+
+var _ interface {
+	yaml.Marshaler
+	yaml.Unmarshaler
+	json.Marshaler
+	json.Unmarshaler
+	fmt.Stringer
+} = new(ValidationScheme)
+
+// String returns the string representation of s.
+func (s ValidationScheme) String() string {
+	switch s {
+	case UnsetValidation:
+		return "unset"
+	case LegacyValidation:
+		return "legacy"
+	case UTF8Validation:
+		return "utf8"
+	default:
+		panic(fmt.Errorf("unhandled ValidationScheme: %d", s))
+	}
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (s ValidationScheme) MarshalYAML() (any, error) {
+	switch s {
+	case UnsetValidation:
+		return "", nil
+	case LegacyValidation, UTF8Validation:
+		return s.String(), nil
+	default:
+		panic(fmt.Errorf("unhandled ValidationScheme: %d", s))
+	}
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (s *ValidationScheme) UnmarshalYAML(unmarshal func(any) error) error {
+	var scheme string
+	if err := unmarshal(&scheme); err != nil {
+		return err
+	}
+	return s.Set(scheme)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (s ValidationScheme) MarshalJSON() ([]byte, error) {
+	switch s {
+	case UnsetValidation:
+		return json.Marshal("")
+	case UTF8Validation, LegacyValidation:
+		return json.Marshal(s.String())
+	default:
+		return nil, fmt.Errorf("unhandled ValidationScheme: %d", s)
+	}
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (s *ValidationScheme) UnmarshalJSON(bytes []byte) error {
+	var repr string
+	if err := json.Unmarshal(bytes, &repr); err != nil {
+		return err
+	}
+	return s.Set(repr)
+}
+
+// Set implements the pflag.Value interface.
+func (s *ValidationScheme) Set(text string) error {
+	switch text {
+	case "":
+		// Don't change the value.
+	case LegacyValidation.String():
+		*s = LegacyValidation
+	case UTF8Validation.String():
+		*s = UTF8Validation
+	default:
+		return fmt.Errorf("unrecognized ValidationScheme: %q", text)
+	}
+	return nil
+}
+
+// IsValidMetricName returns whether metricName is valid according to s.
+func (s ValidationScheme) IsValidMetricName(metricName string) bool {
+	switch s {
+	case LegacyValidation:
+		if len(metricName) == 0 {
+			return false
+		}
+		for i, b := range metricName {
+			if !isValidLegacyRune(b, i) {
+				return false
+			}
+		}
+		return true
+	case UTF8Validation:
+		if len(metricName) == 0 {
+			return false
+		}
+		return utf8.ValidString(metricName)
+	default:
+		panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s.String()))
+	}
+}
+
+// IsValidLabelName returns whether labelName is valid according to s.
+func (s ValidationScheme) IsValidLabelName(labelName string) bool {
+	switch s {
+	case LegacyValidation:
+		if len(labelName) == 0 {
+			return false
+		}
+		for i, b := range labelName {
+			// TODO: Apply De Morgan's law. Make sure there are tests for this.
+			if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck
+				return false
+			}
+		}
+		return true
+	case UTF8Validation:
+		if len(labelName) == 0 {
+			return false
+		}
+		return utf8.ValidString(labelName)
+	default:
+		panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s))
+	}
+}
+
+// Type implements the pflag.Value interface.
+func (ValidationScheme) Type() string {
+	return "validationScheme"
+}
+
+type EscapingScheme int
+
+const (
+	// NoEscaping indicates that a name will not be escaped. Unescaped names that
+	// do not conform to the legacy validity check will use a new exposition
+	// format syntax that will be officially standardized in future versions.
+	NoEscaping EscapingScheme = iota
+
+	// UnderscoreEscaping replaces all legacy-invalid characters with underscores.
+	UnderscoreEscaping
+
+	// DotsEscaping is similar to UnderscoreEscaping, except that dots are
+	// converted to `_dot_` and pre-existing underscores are converted to `__`.
+	DotsEscaping
+
+	// ValueEncodingEscaping prepends the name with `U__` and replaces all invalid
+	// characters with the unicode value, surrounded by underscores. Single
+	// underscores are replaced with double underscores.
+	ValueEncodingEscaping
+)
+
+const (
+	// EscapingKey is the key in an Accept or Content-Type header that defines how
+	// metric and label names that do not conform to the legacy character
+	// requirements should be escaped when being scraped by a legacy prometheus
+	// system. If a system does not explicitly pass an escaping parameter in the
+	// Accept header, the default NameEscapingScheme will be used.
+	EscapingKey = "escaping"
+
+	// Possible values for Escaping Key.
+	AllowUTF8         = "allow-utf-8" // No escaping required.
+	EscapeUnderscores = "underscores"
+	EscapeDots        = "dots"
+	EscapeValues      = "values"
+)
+
+// MetricNameRE is a regular expression matching valid metric
+// names. Note that the IsValidMetricName function performs the same
+// check but faster than a match with this regular expression.
+var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
+
+// A Metric is similar to a LabelSet, but the key difference is that a Metric is
+// a singleton and refers to one and only one stream of samples.
+type Metric LabelSet
+
+// Equal compares the metrics.
+func (m Metric) Equal(o Metric) bool {
+	return LabelSet(m).Equal(LabelSet(o))
+}
+
+// Before compares the metrics' underlying label sets.
+func (m Metric) Before(o Metric) bool {
+	return LabelSet(m).Before(LabelSet(o))
+}
+
+// Clone returns a copy of the Metric.
+func (m Metric) Clone() Metric {
+	clone := make(Metric, len(m))
+	for k, v := range m {
+		clone[k] = v
+	}
+	return clone
+}
+
+func (m Metric) String() string {
+	metricName, hasName := m[MetricNameLabel]
+	numLabels := len(m) - 1
+	if !hasName {
+		numLabels = len(m)
+	}
+	labelStrings := make([]string, 0, numLabels)
+	for label, value := range m {
+		if label != MetricNameLabel {
+			labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value))
+		}
+	}
+
+	switch numLabels {
+	case 0:
+		if hasName {
+			return string(metricName)
+		}
+		return "{}"
+	default:
+		sort.Strings(labelStrings)
+		return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", "))
+	}
+}
+
+// Fingerprint returns a Metric's Fingerprint.
+func (m Metric) Fingerprint() Fingerprint {
+	return LabelSet(m).Fingerprint()
+}
+
+// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing
+// algorithm, which is, however, more susceptible to hash collisions.
+func (m Metric) FastFingerprint() Fingerprint {
+	return LabelSet(m).FastFingerprint()
+}
+
+// IsValidMetricName returns true iff name matches the pattern of MetricNameRE
+// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is
+// selected.
+//
+// Deprecated: This function should not be used and might be removed in the future.
+// Use [ValidationScheme.IsValidMetricName] instead.
+func IsValidMetricName(n LabelValue) bool {
+	return NameValidationScheme.IsValidMetricName(string(n))
+}
+
+// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the
+// legacy validation scheme regardless of the value of NameValidationScheme.
+// This function, however, does not use MetricNameRE for the check but a much
+// faster hardcoded implementation.
+//
+// Deprecated: This function should not be used and might be removed in the future.
+// Use [LegacyValidation.IsValidMetricName] instead.
+func IsValidLegacyMetricName(n string) bool {
+	return LegacyValidation.IsValidMetricName(n)
+}
+
+// EscapeMetricFamily escapes the given metric names and labels with the given
+// escaping scheme. Returns a new object that uses the same pointers to fields
+// when possible and creates new escaped versions so as not to mutate the
+// input.
+func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily {
+	if v == nil {
+		return nil
+	}
+
+	if scheme == NoEscaping {
+		return v
+	}
+
+	out := &dto.MetricFamily{
+		Help: v.Help,
+		Type: v.Type,
+		Unit: v.Unit,
+	}
+
+	// If the name is nil, copy as-is, don't try to escape.
+	if v.Name == nil || IsValidLegacyMetricName(v.GetName()) {
+		out.Name = v.Name
+	} else {
+		out.Name = proto.String(EscapeName(v.GetName(), scheme))
+	}
+	for _, m := range v.Metric {
+		if !metricNeedsEscaping(m) {
+			out.Metric = append(out.Metric, m)
+			continue
+		}
+
+		escaped := &dto.Metric{
+			Gauge:       m.Gauge,
+			Counter:     m.Counter,
+			Summary:     m.Summary,
+			Untyped:     m.Untyped,
+			Histogram:   m.Histogram,
+			TimestampMs: m.TimestampMs,
+		}
+
+		for _, l := range m.Label {
+			if l.GetName() == MetricNameLabel {
+				if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) {
+					escaped.Label = append(escaped.Label, l)
+					continue
+				}
+				escaped.Label = append(escaped.Label, &dto.LabelPair{
+					Name:  proto.String(MetricNameLabel),
+					Value: proto.String(EscapeName(l.GetValue(), scheme)),
+				})
+				continue
+			}
+			if l.Name == nil || IsValidLegacyMetricName(l.GetName()) {
+				escaped.Label = append(escaped.Label, l)
+				continue
+			}
+			escaped.Label = append(escaped.Label, &dto.LabelPair{
+				Name:  proto.String(EscapeName(l.GetName(), scheme)),
+				Value: l.Value,
+			})
+		}
+		out.Metric = append(out.Metric, escaped)
+	}
+	return out
+}
+
+func metricNeedsEscaping(m *dto.Metric) bool {
+	for _, l := range m.Label {
+		if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) {
+			return true
+		}
+		if !IsValidLegacyMetricName(l.GetName()) {
+			return true
+		}
+	}
+	return false
+}
+
+// EscapeName escapes the incoming name according to the provided escaping
+// scheme. Depending on the rules of escaping, this may cause no change in the
+// string that is returned. (Especially NoEscaping, which by definition is a
+// noop). This function does not do any validation of the name.
+func EscapeName(name string, scheme EscapingScheme) string {
+	if len(name) == 0 {
+		return name
+	}
+	var escaped strings.Builder
+	switch scheme {
+	case NoEscaping:
+		return name
+	case UnderscoreEscaping:
+		if IsValidLegacyMetricName(name) {
+			return name
+		}
+		for i, b := range name {
+			if isValidLegacyRune(b, i) {
+				escaped.WriteRune(b)
+			} else {
+				escaped.WriteRune('_')
+			}
+		}
+		return escaped.String()
+	case DotsEscaping:
+		// Do not early return for legacy valid names, we still escape underscores.
+		for i, b := range name {
+			switch {
+			case b == '_':
+				escaped.WriteString("__")
+			case b == '.':
+				escaped.WriteString("_dot_")
+			case isValidLegacyRune(b, i):
+				escaped.WriteRune(b)
+			default:
+				escaped.WriteString("__")
+			}
+		}
+		return escaped.String()
+	case ValueEncodingEscaping:
+		if IsValidLegacyMetricName(name) {
+			return name
+		}
+		escaped.WriteString("U__")
+		for i, b := range name {
+			switch {
+			case b == '_':
+				escaped.WriteString("__")
+			case isValidLegacyRune(b, i):
+				escaped.WriteRune(b)
+			case !utf8.ValidRune(b):
+				escaped.WriteString("_FFFD_")
+			default:
+				escaped.WriteRune('_')
+				escaped.WriteString(strconv.FormatInt(int64(b), 16))
+				escaped.WriteRune('_')
+			}
+		}
+		return escaped.String()
+	default:
+		panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
+	}
+}
+
+// lower function taken from strconv.atoi.
+func lower(c byte) byte {
+	return c | ('x' - 'X')
+}
+
+// UnescapeName unescapes the incoming name according to the provided escaping
+// scheme if possible. Some schemes are partially or totally non-roundtripable.
+// If any error is enountered, returns the original input.
+func UnescapeName(name string, scheme EscapingScheme) string {
+	if len(name) == 0 {
+		return name
+	}
+	switch scheme {
+	case NoEscaping:
+		return name
+	case UnderscoreEscaping:
+		// It is not possible to unescape from underscore replacement.
+		return name
+	case DotsEscaping:
+		name = strings.ReplaceAll(name, "_dot_", ".")
+		name = strings.ReplaceAll(name, "__", "_")
+		return name
+	case ValueEncodingEscaping:
+		escapedName, found := strings.CutPrefix(name, "U__")
+		if !found {
+			return name
+		}
+
+		var unescaped strings.Builder
+	TOP:
+		for i := 0; i < len(escapedName); i++ {
+			// All non-underscores are treated normally.
+			if escapedName[i] != '_' {
+				unescaped.WriteByte(escapedName[i])
+				continue
+			}
+			i++
+			if i >= len(escapedName) {
+				return name
+			}
+			// A double underscore is a single underscore.
+			if escapedName[i] == '_' {
+				unescaped.WriteByte('_')
+				continue
+			}
+			// We think we are in a UTF-8 code, process it.
+			var utf8Val uint
+			for j := 0; i < len(escapedName); j++ {
+				// This is too many characters for a utf8 value based on the MaxRune
+				// value of '\U0010FFFF'.
+				if j >= 6 {
+					return name
+				}
+				// Found a closing underscore, convert to a rune, check validity, and append.
+				if escapedName[i] == '_' {
+					utf8Rune := rune(utf8Val)
+					if !utf8.ValidRune(utf8Rune) {
+						return name
+					}
+					unescaped.WriteRune(utf8Rune)
+					continue TOP
+				}
+				r := lower(escapedName[i])
+				utf8Val *= 16
+				switch {
+				case r >= '0' && r <= '9':
+					utf8Val += uint(r) - '0'
+				case r >= 'a' && r <= 'f':
+					utf8Val += uint(r) - 'a' + 10
+				default:
+					return name
+				}
+				i++
+			}
+			// Didn't find closing underscore, invalid.
+			return name
+		}
+		return unescaped.String()
+	default:
+		panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
+	}
+}
+
+func isValidLegacyRune(b rune, i int) bool {
+	return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)
+}
+
+func (e EscapingScheme) String() string {
+	switch e {
+	case NoEscaping:
+		return AllowUTF8
+	case UnderscoreEscaping:
+		return EscapeUnderscores
+	case DotsEscaping:
+		return EscapeDots
+	case ValueEncodingEscaping:
+		return EscapeValues
+	default:
+		panic(fmt.Sprintf("unknown format scheme %d", e))
+	}
+}
+
+func ToEscapingScheme(s string) (EscapingScheme, error) {
+	if s == "" {
+		return NoEscaping, errors.New("got empty string instead of escaping scheme")
+	}
+	switch s {
+	case AllowUTF8:
+		return NoEscaping, nil
+	case EscapeUnderscores:
+		return UnderscoreEscaping, nil
+	case EscapeDots:
+		return DotsEscaping, nil
+	case EscapeValues:
+		return ValueEncodingEscaping, nil
+	default:
+		return NoEscaping, fmt.Errorf("unknown format scheme %s", s)
+	}
+}
diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go
new file mode 100644
index 0000000..a7b9691
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/model.go
@@ -0,0 +1,16 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package model contains common data structures that are shared across
+// Prometheus components and libraries.
+package model
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
new file mode 100644
index 0000000..dc8a002
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/signature.go
@@ -0,0 +1,142 @@
+// Copyright 2014 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"sort"
+)
+
+// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is
+// used to separate label names, label values, and other strings from each other
+// when calculating their combined hash value (aka signature aka fingerprint).
+const SeparatorByte byte = 255
+
+// cache the signature of an empty label set.
+var emptyLabelSignature = hashNew()
+
+// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
+// given label set. (Collisions are possible but unlikely if the number of label
+// sets the function is applied to is small.)
+func LabelsToSignature(labels map[string]string) uint64 {
+	if len(labels) == 0 {
+		return emptyLabelSignature
+	}
+
+	labelNames := make([]string, 0, len(labels))
+	for labelName := range labels {
+		labelNames = append(labelNames, labelName)
+	}
+	sort.Strings(labelNames)
+
+	sum := hashNew()
+	for _, labelName := range labelNames {
+		sum = hashAdd(sum, labelName)
+		sum = hashAddByte(sum, SeparatorByte)
+		sum = hashAdd(sum, labels[labelName])
+		sum = hashAddByte(sum, SeparatorByte)
+	}
+	return sum
+}
+
+// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as
+// parameter (rather than a label map) and returns a Fingerprint.
+func labelSetToFingerprint(ls LabelSet) Fingerprint {
+	if len(ls) == 0 {
+		return Fingerprint(emptyLabelSignature)
+	}
+
+	labelNames := make(LabelNames, 0, len(ls))
+	for labelName := range ls {
+		labelNames = append(labelNames, labelName)
+	}
+	sort.Sort(labelNames)
+
+	sum := hashNew()
+	for _, labelName := range labelNames {
+		sum = hashAdd(sum, string(labelName))
+		sum = hashAddByte(sum, SeparatorByte)
+		sum = hashAdd(sum, string(ls[labelName]))
+		sum = hashAddByte(sum, SeparatorByte)
+	}
+	return Fingerprint(sum)
+}
+
+// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a
+// faster and less allocation-heavy hash function, which is more susceptible to
+// create hash collisions. Therefore, collision detection should be applied.
+func labelSetToFastFingerprint(ls LabelSet) Fingerprint {
+	if len(ls) == 0 {
+		return Fingerprint(emptyLabelSignature)
+	}
+
+	var result uint64
+	for labelName, labelValue := range ls {
+		sum := hashNew()
+		sum = hashAdd(sum, string(labelName))
+		sum = hashAddByte(sum, SeparatorByte)
+		sum = hashAdd(sum, string(labelValue))
+		result ^= sum
+	}
+	return Fingerprint(result)
+}
+
+// SignatureForLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and only includes the labels with the
+// specified LabelNames into the signature calculation. The labels passed in
+// will be sorted by this function.
+func SignatureForLabels(m Metric, labels ...LabelName) uint64 {
+	if len(labels) == 0 {
+		return emptyLabelSignature
+	}
+
+	sort.Sort(LabelNames(labels))
+
+	sum := hashNew()
+	for _, label := range labels {
+		sum = hashAdd(sum, string(label))
+		sum = hashAddByte(sum, SeparatorByte)
+		sum = hashAdd(sum, string(m[label]))
+		sum = hashAddByte(sum, SeparatorByte)
+	}
+	return sum
+}
+
+// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as
+// parameter (rather than a label map) and excludes the labels with any of the
+// specified LabelNames from the signature calculation.
+func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {
+	if len(m) == 0 {
+		return emptyLabelSignature
+	}
+
+	labelNames := make(LabelNames, 0, len(m))
+	for labelName := range m {
+		if _, exclude := labels[labelName]; !exclude {
+			labelNames = append(labelNames, labelName)
+		}
+	}
+	if len(labelNames) == 0 {
+		return emptyLabelSignature
+	}
+	sort.Sort(labelNames)
+
+	sum := hashNew()
+	for _, labelName := range labelNames {
+		sum = hashAdd(sum, string(labelName))
+		sum = hashAddByte(sum, SeparatorByte)
+		sum = hashAdd(sum, string(m[labelName]))
+		sum = hashAddByte(sum, SeparatorByte)
+	}
+	return sum
+}
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
new file mode 100644
index 0000000..8f91a97
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -0,0 +1,107 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"regexp"
+	"time"
+)
+
+// Matcher describes a matches the value of a given label.
+type Matcher struct {
+	Name    LabelName `json:"name"`
+	Value   string    `json:"value"`
+	IsRegex bool      `json:"isRegex"`
+}
+
+func (m *Matcher) UnmarshalJSON(b []byte) error {
+	type plain Matcher
+	if err := json.Unmarshal(b, (*plain)(m)); err != nil {
+		return err
+	}
+
+	if len(m.Name) == 0 {
+		return errors.New("label name in matcher must not be empty")
+	}
+	if m.IsRegex {
+		if _, err := regexp.Compile(m.Value); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Validate returns true iff all fields of the matcher have valid values.
+func (m *Matcher) Validate() error {
+	if !m.Name.IsValid() {
+		return fmt.Errorf("invalid name %q", m.Name)
+	}
+	if m.IsRegex {
+		if _, err := regexp.Compile(m.Value); err != nil {
+			return fmt.Errorf("invalid regular expression %q", m.Value)
+		}
+	} else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {
+		return fmt.Errorf("invalid value %q", m.Value)
+	}
+	return nil
+}
+
+// Silence defines the representation of a silence definition in the Prometheus
+// eco-system.
+type Silence struct {
+	ID uint64 `json:"id,omitempty"`
+
+	Matchers []*Matcher `json:"matchers"`
+
+	StartsAt time.Time `json:"startsAt"`
+	EndsAt   time.Time `json:"endsAt"`
+
+	CreatedAt time.Time `json:"createdAt,omitempty"`
+	CreatedBy string    `json:"createdBy"`
+	Comment   string    `json:"comment,omitempty"`
+}
+
+// Validate returns true iff all fields of the silence have valid values.
+func (s *Silence) Validate() error {
+	if len(s.Matchers) == 0 {
+		return errors.New("at least one matcher required")
+	}
+	for _, m := range s.Matchers {
+		if err := m.Validate(); err != nil {
+			return fmt.Errorf("invalid matcher: %w", err)
+		}
+	}
+	if s.StartsAt.IsZero() {
+		return errors.New("start time missing")
+	}
+	if s.EndsAt.IsZero() {
+		return errors.New("end time missing")
+	}
+	if s.EndsAt.Before(s.StartsAt) {
+		return errors.New("start time must be before end time")
+	}
+	if s.CreatedBy == "" {
+		return errors.New("creator information missing")
+	}
+	if s.Comment == "" {
+		return errors.New("comment missing")
+	}
+	if s.CreatedAt.IsZero() {
+		return errors.New("creation timestamp missing")
+	}
+	return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
new file mode 100644
index 0000000..1730b0f
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -0,0 +1,359 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+	"time"
+)
+
+const (
+	// MinimumTick is the minimum supported time resolution. This has to be
+	// at least time.Second in order for the code below to work.
+	minimumTick = time.Millisecond
+	// second is the Time duration equivalent to one second.
+	second = int64(time.Second / minimumTick)
+	// The number of nanoseconds per minimum tick.
+	nanosPerTick = int64(minimumTick / time.Nanosecond)
+
+	// Earliest is the earliest Time representable. Handy for
+	// initializing a high watermark.
+	Earliest = Time(math.MinInt64)
+	// Latest is the latest Time representable. Handy for initializing
+	// a low watermark.
+	Latest = Time(math.MaxInt64)
+)
+
+// Time is the number of milliseconds since the epoch
+// (1970-01-01 00:00 UTC) excluding leap seconds.
+type Time int64
+
+// Interval describes an interval between two timestamps.
+type Interval struct {
+	Start, End Time
+}
+
+// Now returns the current time as a Time.
+func Now() Time {
+	return TimeFromUnixNano(time.Now().UnixNano())
+}
+
+// TimeFromUnix returns the Time equivalent to the Unix Time t
+// provided in seconds.
+func TimeFromUnix(t int64) Time {
+	return Time(t * second)
+}
+
+// TimeFromUnixNano returns the Time equivalent to the Unix Time
+// t provided in nanoseconds.
+func TimeFromUnixNano(t int64) Time {
+	return Time(t / nanosPerTick)
+}
+
+// Equal reports whether two Times represent the same instant.
+func (t Time) Equal(o Time) bool {
+	return t == o
+}
+
+// Before reports whether the Time t is before o.
+func (t Time) Before(o Time) bool {
+	return t < o
+}
+
+// After reports whether the Time t is after o.
+func (t Time) After(o Time) bool {
+	return t > o
+}
+
+// Add returns the Time t + d.
+func (t Time) Add(d time.Duration) Time {
+	return t + Time(d/minimumTick)
+}
+
+// Sub returns the Duration t - o.
+func (t Time) Sub(o Time) time.Duration {
+	return time.Duration(t-o) * minimumTick
+}
+
+// Time returns the time.Time representation of t.
+func (t Time) Time() time.Time {
+	return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)
+}
+
+// Unix returns t as a Unix time, the number of seconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) Unix() int64 {
+	return int64(t) / second
+}
+
+// UnixNano returns t as a Unix time, the number of nanoseconds elapsed
+// since January 1, 1970 UTC.
+func (t Time) UnixNano() int64 {
+	return int64(t) * nanosPerTick
+}
+
+// The number of digits after the dot.
+var dotPrecision = int(math.Log10(float64(second)))
+
+// String returns a string representation of the Time.
+func (t Time) String() string {
+	return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (t Time) MarshalJSON() ([]byte, error) {
+	return []byte(t.String()), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (t *Time) UnmarshalJSON(b []byte) error {
+	p := strings.Split(string(b), ".")
+	switch len(p) {
+	case 1:
+		v, err := strconv.ParseInt(p[0], 10, 64)
+		if err != nil {
+			return err
+		}
+		*t = Time(v * second)
+
+	case 2:
+		v, err := strconv.ParseInt(p[0], 10, 64)
+		if err != nil {
+			return err
+		}
+		v *= second
+
+		prec := dotPrecision - len(p[1])
+		if prec < 0 {
+			p[1] = p[1][:dotPrecision]
+		} else if prec > 0 {
+			p[1] += strings.Repeat("0", prec)
+		}
+
+		va, err := strconv.ParseInt(p[1], 10, 32)
+		if err != nil {
+			return err
+		}
+
+		// If the value was something like -0.1 the negative is lost in the
+		// parsing because of the leading zero, this ensures that we capture it.
+		if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 {
+			*t = Time(v+va) * -1
+		} else {
+			*t = Time(v + va)
+		}
+
+	default:
+		return fmt.Errorf("invalid time %q", string(b))
+	}
+	return nil
+}
+
+// Duration wraps time.Duration. It is used to parse the custom duration format
+// from YAML.
+// This type should not propagate beyond the scope of input/output processing.
+type Duration time.Duration
+
+// Set implements pflag/flag.Value.
+func (d *Duration) Set(s string) error {
+	var err error
+	*d, err = ParseDuration(s)
+	return err
+}
+
+// Type implements pflag.Value.
+func (*Duration) Type() string {
+	return "duration"
+}
+
+func isdigit(c byte) bool { return c >= '0' && c <= '9' }
+
+// Units are required to go in order from biggest to smallest.
+// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day.
+var unitMap = map[string]struct {
+	pos  int
+	mult uint64
+}{
+	"ms": {7, uint64(time.Millisecond)},
+	"s":  {6, uint64(time.Second)},
+	"m":  {5, uint64(time.Minute)},
+	"h":  {4, uint64(time.Hour)},
+	"d":  {3, uint64(24 * time.Hour)},
+	"w":  {2, uint64(7 * 24 * time.Hour)},
+	"y":  {1, uint64(365 * 24 * time.Hour)},
+}
+
+// ParseDuration parses a string into a time.Duration, assuming that a year
+// always has 365d, a week always has 7d, and a day always has 24h.
+// Negative durations are not supported.
+func ParseDuration(s string) (Duration, error) {
+	switch s {
+	case "0":
+		// Allow 0 without a unit.
+		return 0, nil
+	case "":
+		return 0, errors.New("empty duration string")
+	}
+
+	orig := s
+	var dur uint64
+	lastUnitPos := 0
+
+	for s != "" {
+		if !isdigit(s[0]) {
+			return 0, fmt.Errorf("not a valid duration string: %q", orig)
+		}
+		// Consume [0-9]*
+		i := 0
+		for ; i < len(s) && isdigit(s[i]); i++ {
+		}
+		v, err := strconv.ParseUint(s[:i], 10, 0)
+		if err != nil {
+			return 0, fmt.Errorf("not a valid duration string: %q", orig)
+		}
+		s = s[i:]
+
+		// Consume unit.
+		for i = 0; i < len(s) && !isdigit(s[i]); i++ {
+		}
+		if i == 0 {
+			return 0, fmt.Errorf("not a valid duration string: %q", orig)
+		}
+		u := s[:i]
+		s = s[i:]
+		unit, ok := unitMap[u]
+		if !ok {
+			return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig)
+		}
+		if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest.
+			return 0, fmt.Errorf("not a valid duration string: %q", orig)
+		}
+		lastUnitPos = unit.pos
+		// Check if the provided duration overflows time.Duration (> ~ 290years).
+		if v > 1<<63/unit.mult {
+			return 0, errors.New("duration out of range")
+		}
+		dur += v * unit.mult
+		if dur > 1<<63-1 {
+			return 0, errors.New("duration out of range")
+		}
+	}
+
+	return Duration(dur), nil
+}
+
+// ParseDurationAllowNegative is like ParseDuration but also accepts negative durations.
+func ParseDurationAllowNegative(s string) (Duration, error) {
+	if s == "" || s[0] != '-' {
+		return ParseDuration(s)
+	}
+
+	d, err := ParseDuration(s[1:])
+
+	return -d, err
+}
+
+func (d Duration) String() string {
+	var (
+		ms   = int64(time.Duration(d) / time.Millisecond)
+		r    = ""
+		sign = ""
+	)
+
+	if ms == 0 {
+		return "0s"
+	}
+
+	if ms < 0 {
+		sign, ms = "-", -ms
+	}
+
+	f := func(unit string, mult int64, exact bool) {
+		if exact && ms%mult != 0 {
+			return
+		}
+		if v := ms / mult; v > 0 {
+			r += fmt.Sprintf("%d%s", v, unit)
+			ms -= v * mult
+		}
+	}
+
+	// Only format years and weeks if the remainder is zero, as it is often
+	// easier to read 90d than 12w6d.
+	f("y", 1000*60*60*24*365, true)
+	f("w", 1000*60*60*24*7, true)
+
+	f("d", 1000*60*60*24, false)
+	f("h", 1000*60*60, false)
+	f("m", 1000*60, false)
+	f("s", 1000, false)
+	f("ms", 1, false)
+
+	return sign + r
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (d Duration) MarshalJSON() ([]byte, error) {
+	return json.Marshal(d.String())
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (d *Duration) UnmarshalJSON(bytes []byte) error {
+	var s string
+	if err := json.Unmarshal(bytes, &s); err != nil {
+		return err
+	}
+	dur, err := ParseDuration(s)
+	if err != nil {
+		return err
+	}
+	*d = dur
+	return nil
+}
+
+// MarshalText implements the encoding.TextMarshaler interface.
+func (d *Duration) MarshalText() ([]byte, error) {
+	return []byte(d.String()), nil
+}
+
+// UnmarshalText implements the encoding.TextUnmarshaler interface.
+func (d *Duration) UnmarshalText(text []byte) error {
+	var err error
+	*d, err = ParseDuration(string(text))
+	return err
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (d Duration) MarshalYAML() (interface{}, error) {
+	return d.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	var s string
+	if err := unmarshal(&s); err != nil {
+		return err
+	}
+	dur, err := ParseDuration(s)
+	if err != nil {
+		return err
+	}
+	*d = dur
+	return nil
+}
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
new file mode 100644
index 0000000..a9995a3
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -0,0 +1,365 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"fmt"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// ZeroSample is the pseudo zero-value of Sample used to signal a
+// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+// and metric nil. Note that the natural zero value of Sample has a timestamp
+// of 0, which is possible to appear in a real Sample and thus not suitable
+// to signal a non-existing Sample.
+var ZeroSample = Sample{Timestamp: Earliest}
+
+// Sample is a sample pair associated with a metric. A single sample must either
+// define Value or Histogram but not both. Histogram == nil implies the Value
+// field is used, otherwise it should be ignored.
+type Sample struct {
+	Metric    Metric           `json:"metric"`
+	Value     SampleValue      `json:"value"`
+	Timestamp Time             `json:"timestamp"`
+	Histogram *SampleHistogram `json:"histogram"`
+}
+
+// Equal compares first the metrics, then the timestamp, then the value. The
+// semantics of value equality is defined by SampleValue.Equal.
+func (s *Sample) Equal(o *Sample) bool {
+	if s == o {
+		return true
+	}
+
+	if !s.Metric.Equal(o.Metric) {
+		return false
+	}
+	if !s.Timestamp.Equal(o.Timestamp) {
+		return false
+	}
+	if s.Histogram != nil {
+		return s.Histogram.Equal(o.Histogram)
+	}
+	return s.Value.Equal(o.Value)
+}
+
+func (s Sample) String() string {
+	if s.Histogram != nil {
+		return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{
+			Timestamp: s.Timestamp,
+			Histogram: s.Histogram,
+		})
+	}
+	return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
+		Timestamp: s.Timestamp,
+		Value:     s.Value,
+	})
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Sample) MarshalJSON() ([]byte, error) {
+	if s.Histogram != nil {
+		v := struct {
+			Metric    Metric              `json:"metric"`
+			Histogram SampleHistogramPair `json:"histogram"`
+		}{
+			Metric: s.Metric,
+			Histogram: SampleHistogramPair{
+				Timestamp: s.Timestamp,
+				Histogram: s.Histogram,
+			},
+		}
+		return json.Marshal(&v)
+	}
+	v := struct {
+		Metric Metric     `json:"metric"`
+		Value  SamplePair `json:"value"`
+	}{
+		Metric: s.Metric,
+		Value: SamplePair{
+			Timestamp: s.Timestamp,
+			Value:     s.Value,
+		},
+	}
+	return json.Marshal(&v)
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Sample) UnmarshalJSON(b []byte) error {
+	v := struct {
+		Metric    Metric              `json:"metric"`
+		Value     SamplePair          `json:"value"`
+		Histogram SampleHistogramPair `json:"histogram"`
+	}{
+		Metric: s.Metric,
+		Value: SamplePair{
+			Timestamp: s.Timestamp,
+			Value:     s.Value,
+		},
+		Histogram: SampleHistogramPair{
+			Timestamp: s.Timestamp,
+			Histogram: s.Histogram,
+		},
+	}
+
+	if err := json.Unmarshal(b, &v); err != nil {
+		return err
+	}
+
+	s.Metric = v.Metric
+	if v.Histogram.Histogram != nil {
+		s.Timestamp = v.Histogram.Timestamp
+		s.Histogram = v.Histogram.Histogram
+	} else {
+		s.Timestamp = v.Value.Timestamp
+		s.Value = v.Value.Value
+	}
+
+	return nil
+}
+
+// Samples is a sortable Sample slice. It implements sort.Interface.
+type Samples []*Sample
+
+func (s Samples) Len() int {
+	return len(s)
+}
+
+// Less compares first the metrics, then the timestamp.
+func (s Samples) Less(i, j int) bool {
+	switch {
+	case s[i].Metric.Before(s[j].Metric):
+		return true
+	case s[j].Metric.Before(s[i].Metric):
+		return false
+	case s[i].Timestamp.Before(s[j].Timestamp):
+		return true
+	default:
+		return false
+	}
+}
+
+func (s Samples) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (s Samples) Equal(o Samples) bool {
+	if len(s) != len(o) {
+		return false
+	}
+
+	for i, sample := range s {
+		if !sample.Equal(o[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+// SampleStream is a stream of Values belonging to an attached COWMetric.
+type SampleStream struct {
+	Metric     Metric                `json:"metric"`
+	Values     []SamplePair          `json:"values"`
+	Histograms []SampleHistogramPair `json:"histograms"`
+}
+
+func (ss SampleStream) String() string {
+	valuesLength := len(ss.Values)
+	vals := make([]string, valuesLength+len(ss.Histograms))
+	for i, v := range ss.Values {
+		vals[i] = v.String()
+	}
+	for i, v := range ss.Histograms {
+		vals[i+valuesLength] = v.String()
+	}
+	return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
+}
+
+func (ss SampleStream) MarshalJSON() ([]byte, error) {
+	switch {
+	case len(ss.Histograms) > 0 && len(ss.Values) > 0:
+		v := struct {
+			Metric     Metric                `json:"metric"`
+			Values     []SamplePair          `json:"values"`
+			Histograms []SampleHistogramPair `json:"histograms"`
+		}{
+			Metric:     ss.Metric,
+			Values:     ss.Values,
+			Histograms: ss.Histograms,
+		}
+		return json.Marshal(&v)
+	case len(ss.Histograms) > 0:
+		v := struct {
+			Metric     Metric                `json:"metric"`
+			Histograms []SampleHistogramPair `json:"histograms"`
+		}{
+			Metric:     ss.Metric,
+			Histograms: ss.Histograms,
+		}
+		return json.Marshal(&v)
+	default:
+		v := struct {
+			Metric Metric       `json:"metric"`
+			Values []SamplePair `json:"values"`
+		}{
+			Metric: ss.Metric,
+			Values: ss.Values,
+		}
+		return json.Marshal(&v)
+	}
+}
+
+func (ss *SampleStream) UnmarshalJSON(b []byte) error {
+	v := struct {
+		Metric     Metric                `json:"metric"`
+		Values     []SamplePair          `json:"values"`
+		Histograms []SampleHistogramPair `json:"histograms"`
+	}{
+		Metric:     ss.Metric,
+		Values:     ss.Values,
+		Histograms: ss.Histograms,
+	}
+
+	if err := json.Unmarshal(b, &v); err != nil {
+		return err
+	}
+
+	ss.Metric = v.Metric
+	ss.Values = v.Values
+	ss.Histograms = v.Histograms
+
+	return nil
+}
+
+// Scalar is a scalar value evaluated at the set timestamp.
+type Scalar struct {
+	Value     SampleValue `json:"value"`
+	Timestamp Time        `json:"timestamp"`
+}
+
+func (s Scalar) String() string {
+	return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp)
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s Scalar) MarshalJSON() ([]byte, error) {
+	v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
+	return json.Marshal([...]interface{}{s.Timestamp, v})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *Scalar) UnmarshalJSON(b []byte) error {
+	var f string
+	v := [...]interface{}{&s.Timestamp, &f}
+
+	if err := json.Unmarshal(b, &v); err != nil {
+		return err
+	}
+
+	value, err := strconv.ParseFloat(f, 64)
+	if err != nil {
+		return fmt.Errorf("error parsing sample value: %w", err)
+	}
+	s.Value = SampleValue(value)
+	return nil
+}
+
+// String is a string value evaluated at the set timestamp.
+type String struct {
+	Value     string `json:"value"`
+	Timestamp Time   `json:"timestamp"`
+}
+
+func (s *String) String() string {
+	return s.Value
+}
+
+// MarshalJSON implements json.Marshaler.
+func (s String) MarshalJSON() ([]byte, error) {
+	return json.Marshal([]interface{}{s.Timestamp, s.Value})
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *String) UnmarshalJSON(b []byte) error {
+	v := [...]interface{}{&s.Timestamp, &s.Value}
+	return json.Unmarshal(b, &v)
+}
+
+// Vector is basically only an alias for Samples, but the
+// contract is that in a Vector, all Samples have the same timestamp.
+type Vector []*Sample
+
+func (vec Vector) String() string {
+	entries := make([]string, len(vec))
+	for i, s := range vec {
+		entries[i] = s.String()
+	}
+	return strings.Join(entries, "\n")
+}
+
+func (vec Vector) Len() int      { return len(vec) }
+func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }
+
+// Less compares first the metrics, then the timestamp.
+func (vec Vector) Less(i, j int) bool {
+	switch {
+	case vec[i].Metric.Before(vec[j].Metric):
+		return true
+	case vec[j].Metric.Before(vec[i].Metric):
+		return false
+	case vec[i].Timestamp.Before(vec[j].Timestamp):
+		return true
+	default:
+		return false
+	}
+}
+
+// Equal compares two sets of samples and returns true if they are equal.
+func (vec Vector) Equal(o Vector) bool {
+	if len(vec) != len(o) {
+		return false
+	}
+
+	for i, sample := range vec {
+		if !sample.Equal(o[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+// Matrix is a list of time series.
+type Matrix []*SampleStream
+
+func (m Matrix) Len() int           { return len(m) }
+func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
+func (m Matrix) Swap(i, j int)      { m[i], m[j] = m[j], m[i] }
+
+func (m Matrix) String() string {
+	matCp := make(Matrix, len(m))
+	copy(matCp, m)
+	sort.Sort(matCp)
+
+	strs := make([]string, len(matCp))
+
+	for i, ss := range matCp {
+		strs[i] = ss.String()
+	}
+
+	return strings.Join(strs, "\n")
+}
diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go
new file mode 100644
index 0000000..6bfc757
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_float.go
@@ -0,0 +1,99 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"math"
+	"strconv"
+)
+
+// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+// non-existing sample pair. It is a SamplePair with timestamp Earliest and
+// value 0.0. Note that the natural zero value of SamplePair has a timestamp
+// of 0, which is possible to appear in a real SamplePair and thus not
+// suitable to signal a non-existing SamplePair.
+var ZeroSamplePair = SamplePair{Timestamp: Earliest}
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+	return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+	if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+		return errors.New("sample value must be a quoted string")
+	}
+	f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+	if err != nil {
+		return err
+	}
+	*v = SampleValue(f)
+	return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+	if v == o {
+		return true
+	}
+	return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+	return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+	Timestamp Time
+	Value     SampleValue
+}
+
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+	t, err := json.Marshal(s.Timestamp)
+	if err != nil {
+		return nil, err
+	}
+	v, err := json.Marshal(s.Value)
+	if err != nil {
+		return nil, err
+	}
+	return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+	v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+	return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+	return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+	return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go
new file mode 100644
index 0000000..91ce5b7
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_histogram.go
@@ -0,0 +1,179 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+type FloatString float64
+
+func (v FloatString) String() string {
+	return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+func (v FloatString) MarshalJSON() ([]byte, error) {
+	return json.Marshal(v.String())
+}
+
+func (v *FloatString) UnmarshalJSON(b []byte) error {
+	if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+		return errors.New("float value must be a quoted string")
+	}
+	f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+	if err != nil {
+		return err
+	}
+	*v = FloatString(f)
+	return nil
+}
+
+type HistogramBucket struct {
+	Boundaries int32
+	Lower      FloatString
+	Upper      FloatString
+	Count      FloatString
+}
+
+func (s HistogramBucket) MarshalJSON() ([]byte, error) {
+	b, err := json.Marshal(s.Boundaries)
+	if err != nil {
+		return nil, err
+	}
+	l, err := json.Marshal(s.Lower)
+	if err != nil {
+		return nil, err
+	}
+	u, err := json.Marshal(s.Upper)
+	if err != nil {
+		return nil, err
+	}
+	c, err := json.Marshal(s.Count)
+	if err != nil {
+		return nil, err
+	}
+	return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil
+}
+
+func (s *HistogramBucket) UnmarshalJSON(buf []byte) error {
+	tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count}
+	wantLen := len(tmp)
+	if err := json.Unmarshal(buf, &tmp); err != nil {
+		return err
+	}
+	if gotLen := len(tmp); gotLen != wantLen {
+		return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
+	}
+	return nil
+}
+
+func (s *HistogramBucket) Equal(o *HistogramBucket) bool {
+	return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count)
+}
+
+func (s HistogramBucket) String() string {
+	var sb strings.Builder
+	lowerInclusive := s.Boundaries == 1 || s.Boundaries == 3
+	upperInclusive := s.Boundaries == 0 || s.Boundaries == 3
+	if lowerInclusive {
+		sb.WriteRune('[')
+	} else {
+		sb.WriteRune('(')
+	}
+	fmt.Fprintf(&sb, "%g,%g", s.Lower, s.Upper)
+	if upperInclusive {
+		sb.WriteRune(']')
+	} else {
+		sb.WriteRune(')')
+	}
+	fmt.Fprintf(&sb, ":%v", s.Count)
+	return sb.String()
+}
+
+type HistogramBuckets []*HistogramBucket
+
+func (s HistogramBuckets) Equal(o HistogramBuckets) bool {
+	if len(s) != len(o) {
+		return false
+	}
+
+	for i, bucket := range s {
+		if !bucket.Equal(o[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+type SampleHistogram struct {
+	Count   FloatString      `json:"count"`
+	Sum     FloatString      `json:"sum"`
+	Buckets HistogramBuckets `json:"buckets"`
+}
+
+func (s SampleHistogram) String() string {
+	return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets)
+}
+
+func (s *SampleHistogram) Equal(o *SampleHistogram) bool {
+	return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets))
+}
+
+type SampleHistogramPair struct {
+	Timestamp Time
+	// Histogram should never be nil, it's only stored as pointer for efficiency.
+	Histogram *SampleHistogram
+}
+
+func (s SampleHistogramPair) MarshalJSON() ([]byte, error) {
+	if s.Histogram == nil {
+		return nil, errors.New("histogram is nil")
+	}
+	t, err := json.Marshal(s.Timestamp)
+	if err != nil {
+		return nil, err
+	}
+	v, err := json.Marshal(s.Histogram)
+	if err != nil {
+		return nil, err
+	}
+	return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error {
+	tmp := []interface{}{&s.Timestamp, &s.Histogram}
+	wantLen := len(tmp)
+	if err := json.Unmarshal(buf, &tmp); err != nil {
+		return err
+	}
+	if gotLen := len(tmp); gotLen != wantLen {
+		return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
+	}
+	if s.Histogram == nil {
+		return errors.New("histogram is null")
+	}
+	return nil
+}
+
+func (s SampleHistogramPair) String() string {
+	return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp)
+}
+
+func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool {
+	return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp))
+}
diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go
new file mode 100644
index 0000000..078910f
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_type.go
@@ -0,0 +1,83 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+	"encoding/json"
+	"fmt"
+)
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+	Type() ValueType
+	String() string
+}
+
+func (Matrix) Type() ValueType  { return ValMatrix }
+func (Vector) Type() ValueType  { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+	ValNone ValueType = iota
+	ValScalar
+	ValVector
+	ValMatrix
+	ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+	return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+	var s string
+	if err := json.Unmarshal(b, &s); err != nil {
+		return err
+	}
+	switch s {
+	case "<ValNone>":
+		*et = ValNone
+	case "scalar":
+		*et = ValScalar
+	case "vector":
+		*et = ValVector
+	case "matrix":
+		*et = ValMatrix
+	case "string":
+		*et = ValString
+	default:
+		return fmt.Errorf("unknown value type %q", s)
+	}
+	return nil
+}
+
+func (et ValueType) String() string {
+	switch et {
+	case ValNone:
+		return "<ValNone>"
+	case ValScalar:
+		return "scalar"
+	case ValVector:
+		return "vector"
+	case ValMatrix:
+		return "matrix"
+	case ValString:
+		return "string"
+	}
+	panic("ValueType.String: unhandled value type")
+}
diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore
new file mode 100644
index 0000000..7cc33ae
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/.gitignore
@@ -0,0 +1,2 @@
+/testdata/fixtures/
+/fixtures
diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml
new file mode 100644
index 0000000..3c3bf91
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/.golangci.yml
@@ -0,0 +1,45 @@
+version: "2"
+linters:
+  enable:
+    - forbidigo
+    - godot
+    - misspell
+    - revive
+    - testifylint
+  settings:
+    forbidigo:
+      forbid:
+        - pattern: ^fmt\.Print.*$
+          msg: Do not commit print statements.
+    godot:
+      exclude:
+        # Ignore "See: URL".
+        - 'See:'
+      capital: true
+    misspell:
+      locale: US
+  exclusions:
+    generated: lax
+    presets:
+      - comments
+      - common-false-positives
+      - legacy
+      - std-error-handling
+    paths:
+      - third_party$
+      - builtin$
+      - examples$
+formatters:
+  enable:
+    - gofmt
+    - goimports
+  settings:
+    goimports:
+      local-prefixes:
+        - github.com/prometheus/procfs
+  exclusions:
+    generated: lax
+    paths:
+      - third_party$
+      - builtin$
+      - examples$
diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..d325872
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
@@ -0,0 +1,3 @@
+# Prometheus Community Code of Conduct
+
+Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
new file mode 100644
index 0000000..853eb9d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -0,0 +1,121 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you are a new contributor see: [Steps to Contribute](#steps-to-contribute)
+
+* If you have a trivial fix or improvement, go ahead and create a pull request,
+  addressing (with `@...`) a suitable maintainer of this repository (see
+  [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+  on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+  This will avoid unnecessary work and surely give you and us a good deal
+  of inspiration. Also please see our [non-goals issue](https://github.com/prometheus/docs/issues/149) on areas that the Prometheus community doesn't plan to work on.
+
+* Relevant coding style guidelines are the [Go Code Review
+  Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)
+  and the _Formatting and style_ section of Peter Bourgon's [Go: Best
+  Practices for Production
+  Environments](https://peter.bourgon.org/go-in-production/#formatting-and-style).
+
+* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works)
+
+## Steps to Contribute
+
+Should you wish to work on an issue, please claim it first by commenting on the GitHub issue that you want to work on it. This is to prevent duplicated efforts from contributors on the same issue.
+
+Please check the [`help-wanted`](https://github.com/prometheus/procfs/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) label to find issues that are good for getting started. If you have questions about one of the issues, with or without the tag, please comment on them and one of the maintainers will clarify it. For a quicker response, contact us over [IRC](https://prometheus.io/community).
+
+For quickly compiling and testing your changes do:
+```
+make test         # Make sure all the tests pass before you commit and push :)
+```
+
+We use [`golangci-lint`](https://github.com/golangci/golangci-lint) for linting the code. If it reports an issue and you think that the warning needs to be disregarded or is a false-positive, you can add a special comment `//nolint:linter1[,linter2,...]` before the offending line. Use this sparingly though, fixing the code to comply with the linter's recommendation is in general the preferred course of action.
+
+## Pull Request Checklist
+
+* Branch from the master branch and, if needed, rebase to the current master branch before submitting your pull request. If it doesn't merge cleanly with master you may be asked to rebase your changes.
+
+* Commits should be as small as possible, while ensuring that each commit is correct independently (i.e., each commit should compile and pass tests).
+
+* If your patch is not getting reviewed or you need a specific person to review it, you can @-reply a reviewer asking for a review in the pull request or a comment, or you can ask for a review on IRC channel [#prometheus](https://webchat.freenode.net/?channels=#prometheus) on irc.freenode.net (for the easiest start, [join via Riot](https://riot.im/app/#/room/#prometheus:matrix.org)).
+
+* Add tests relevant to the fixed bug or new feature.
+
+## Dependency management
+
+The Prometheus project uses [Go modules](https://golang.org/cmd/go/#hdr-Modules__module_versions__and_more) to manage dependencies on external packages. This requires a working Go environment with version 1.12 or greater installed.
+
+All dependencies are vendored in the `vendor/` directory.
+
+To add or update a new dependency, use the `go get` command:
+
+```bash
+# Pick the latest tagged release.
+go get example.com/some/module/pkg
+
+# Pick a specific version.
+go get example.com/some/module/pkg@vX.Y.Z
+```
+
+Tidy up the `go.mod` and `go.sum` files and copy the new/updated dependency to the `vendor/` directory:
+
+
+```bash
+# The GO111MODULE variable can be omitted when the code isn't located in GOPATH.
+GO111MODULE=on go mod tidy
+
+GO111MODULE=on go mod vendor
+```
+
+You have to commit the changes to `go.mod`, `go.sum` and the `vendor/` directory before submitting the pull request.
+
+
+## API Implementation Guidelines
+
+### Naming and Documentation
+
+Public functions and structs should normally be named according to the file(s) being read and parsed.  For example, 
+the `fs.BuddyInfo()` function reads the file `/proc/buddyinfo`.  In addition, the godoc for each public function
+should contain the path to the file(s) being read and a URL of the linux kernel documentation describing the file(s).
+
+### Reading vs. Parsing
+
+Most functionality in this library consists of reading files and then parsing the text into structured data.  In most
+cases reading and parsing should be separated into different functions/methods with a public `fs.Thing()` method and 
+a private `parseThing(r Reader)` function.  This provides a logical separation and allows parsing to be tested
+directly without the need to read from the filesystem.  Using a `Reader` argument is preferred over other data types
+such as `string` or `*File` because it provides the most flexibility regarding the data source.  When a set of files 
+in a directory needs to be parsed, then a `path` string parameter to the parse function can be used instead.
+
+### /proc and /sys filesystem I/O 
+
+The `proc` and `sys` filesystems are pseudo file systems and work a bit differently from standard disk I/O.  
+Many of the files are changing continuously and the data being read can in some cases change between subsequent 
+reads in the same file.  Also, most of the files are relatively small (less than a few KBs), and system calls
+to the `stat` function will often return the wrong size.  Therefore, for most files it's recommended to read the 
+full file in a single operation using an internal utility function called `util.ReadFileNoStat`.
+This function is similar to `os.ReadFile`, but it avoids the system call to `stat` to get the current size of
+the file.
+
+Note that parsing the file's contents can still be performed one line at a time.  This is done by first reading 
+the full file, and then using a scanner on the `[]byte` or `string` containing the data.
+
+```
+    data, err := util.ReadFileNoStat("/proc/cpuinfo")
+    if err != nil {
+        return err
+    }
+    reader := bytes.NewReader(data)
+    scanner := bufio.NewScanner(reader)
+```
+
+The `/sys` filesystem contains many very small files which contain only a single numeric or text value.  These files
+can be read using an internal function called `util.SysReadFile` which is similar to `os.ReadFile` but does
+not bother to check the size of the file before reading.
+```
+    data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity")
+```
+
diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE
similarity index 100%
copy from vendor/go.opentelemetry.io/otel/LICENSE
copy to vendor/github.com/prometheus/procfs/LICENSE
diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
new file mode 100644
index 0000000..e00f3b3
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
@@ -0,0 +1,3 @@
+* Johannes 'fish' Ziemke <github@freigeist.org> @discordianfish
+* Paul Gier <paulgier@gmail.com> @pgier
+* Ben Kochie <superq@gmail.com> @SuperQ
diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile
new file mode 100644
index 0000000..7edfe4d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/Makefile
@@ -0,0 +1,31 @@
+# Copyright 2018 The Prometheus Authors
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+include Makefile.common
+
+%/.unpacked: %.ttar
+	@echo ">> extracting fixtures $*"
+	./ttar -C $(dir $*) -x -f $*.ttar
+	touch $@
+
+fixtures: testdata/fixtures/.unpacked
+
+update_fixtures:
+	rm -vf testdata/fixtures/.unpacked
+	./ttar -c -f testdata/fixtures.ttar -C testdata/ fixtures/
+
+.PHONY: build
+build:
+
+.PHONY: test
+test: testdata/fixtures/.unpacked common-test
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
new file mode 100644
index 0000000..0ed55c2
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/Makefile.common
@@ -0,0 +1,283 @@
+# Copyright 2018 The Prometheus Authors
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# A common Makefile that includes rules to be reused in different prometheus projects.
+# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!
+
+# Example usage :
+# Create the main Makefile in the root project directory.
+# include Makefile.common
+# customTarget:
+# 	@echo ">> Running customTarget"
+#
+
+# Ensure GOBIN is not set during build so that promu is installed to the correct path
+unexport GOBIN
+
+GO           ?= go
+GOFMT        ?= $(GO)fmt
+FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
+GOOPTS       ?=
+GOHOSTOS     ?= $(shell $(GO) env GOHOSTOS)
+GOHOSTARCH   ?= $(shell $(GO) env GOHOSTARCH)
+
+GO_VERSION        ?= $(shell $(GO) version)
+GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File
+PRE_GO_111        ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
+
+PROMU        := $(FIRST_GOPATH)/bin/promu
+pkgs          = ./...
+
+ifeq (arm, $(GOHOSTARCH))
+	GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM)
+	GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM)
+else
+	GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)
+endif
+
+GOTEST := $(GO) test
+GOTEST_DIR :=
+ifneq ($(CIRCLE_JOB),)
+ifneq ($(shell command -v gotestsum 2> /dev/null),)
+	GOTEST_DIR := test-results
+	GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
+endif
+endif
+
+PROMU_VERSION ?= 0.17.0
+PROMU_URL     := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
+
+SKIP_GOLANGCI_LINT :=
+GOLANGCI_LINT :=
+GOLANGCI_LINT_OPTS ?=
+GOLANGCI_LINT_VERSION ?= v2.0.2
+# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
+# windows isn't included here because of the path separator being different.
+ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
+	ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))
+		# If we're in CI and there is an Actions file, that means the linter
+		# is being run in Actions, so we don't need to run it here.
+		ifneq (,$(SKIP_GOLANGCI_LINT))
+			GOLANGCI_LINT :=
+		else ifeq (,$(CIRCLE_JOB))
+			GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
+		else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
+			GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
+		endif
+	endif
+endif
+
+PREFIX                  ?= $(shell pwd)
+BIN_DIR                 ?= $(shell pwd)
+DOCKER_IMAGE_TAG        ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
+DOCKERFILE_PATH         ?= ./Dockerfile
+DOCKERBUILD_CONTEXT     ?= ./
+DOCKER_REPO             ?= prom
+
+DOCKER_ARCHS            ?= amd64
+
+BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS))
+PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
+TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
+
+SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
+
+ifeq ($(GOHOSTARCH),amd64)
+        ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
+                # Only supported on amd64
+                test-flags := -race
+        endif
+endif
+
+# This rule is used to forward a target like "build" to "common-build".  This
+# allows a new "build" target to be defined in a Makefile which includes this
+# one and override "common-build" without override warnings.
+%: common-% ;
+
+.PHONY: common-all
+common-all: precheck style check_license lint yamllint unused build test
+
+.PHONY: common-style
+common-style:
+	@echo ">> checking code style"
+	@fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \
+	if [ -n "$${fmtRes}" ]; then \
+		echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \
+		echo "Please ensure you are using $$($(GO) version) for formatting code."; \
+		exit 1; \
+	fi
+
+.PHONY: common-check_license
+common-check_license:
+	@echo ">> checking license header"
+	@licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \
+               awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \
+       done); \
+       if [ -n "$${licRes}" ]; then \
+               echo "license header checking failed:"; echo "$${licRes}"; \
+               exit 1; \
+       fi
+
+.PHONY: common-deps
+common-deps:
+	@echo ">> getting dependencies"
+	$(GO) mod download
+
+.PHONY: update-go-deps
+update-go-deps:
+	@echo ">> updating Go dependencies"
+	@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
+		$(GO) get -d $$m; \
+	done
+	$(GO) mod tidy
+
+.PHONY: common-test-short
+common-test-short: $(GOTEST_DIR)
+	@echo ">> running short tests"
+	$(GOTEST) -short $(GOOPTS) $(pkgs)
+
+.PHONY: common-test
+common-test: $(GOTEST_DIR)
+	@echo ">> running all tests"
+	$(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
+
+$(GOTEST_DIR):
+	@mkdir -p $@
+
+.PHONY: common-format
+common-format:
+	@echo ">> formatting code"
+	$(GO) fmt $(pkgs)
+
+.PHONY: common-vet
+common-vet:
+	@echo ">> vetting code"
+	$(GO) vet $(GOOPTS) $(pkgs)
+
+.PHONY: common-lint
+common-lint: $(GOLANGCI_LINT)
+ifdef GOLANGCI_LINT
+	@echo ">> running golangci-lint"
+	$(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
+endif
+
+.PHONY: common-lint-fix
+common-lint-fix: $(GOLANGCI_LINT)
+ifdef GOLANGCI_LINT
+	@echo ">> running golangci-lint fix"
+	$(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
+endif
+
+.PHONY: common-yamllint
+common-yamllint:
+	@echo ">> running yamllint on all YAML files in the repository"
+ifeq (, $(shell command -v yamllint 2> /dev/null))
+	@echo "yamllint not installed so skipping"
+else
+	yamllint .
+endif
+
+# For backward-compatibility.
+.PHONY: common-staticcheck
+common-staticcheck: lint
+
+.PHONY: common-unused
+common-unused:
+	@echo ">> running check for unused/missing packages in go.mod"
+	$(GO) mod tidy
+	@git diff --exit-code -- go.sum go.mod
+
+.PHONY: common-build
+common-build: promu
+	@echo ">> building binaries"
+	$(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
+
+.PHONY: common-tarball
+common-tarball: promu
+	@echo ">> building release tarball"
+	$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
+
+.PHONY: common-docker-repo-name
+common-docker-repo-name:
+	@echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
+
+.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
+common-docker: $(BUILD_DOCKER_ARCHS)
+$(BUILD_DOCKER_ARCHS): common-docker-%:
+	docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
+		-f $(DOCKERFILE_PATH) \
+		--build-arg ARCH="$*" \
+		--build-arg OS="linux" \
+		$(DOCKERBUILD_CONTEXT)
+
+.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
+common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
+$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
+	docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
+
+DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
+.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
+common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
+$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
+	docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
+	docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
+
+.PHONY: common-docker-manifest
+common-docker-manifest:
+	DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
+	DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
+
+.PHONY: promu
+promu: $(PROMU)
+
+$(PROMU):
+	$(eval PROMU_TMP := $(shell mktemp -d))
+	curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP)
+	mkdir -p $(FIRST_GOPATH)/bin
+	cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu
+	rm -r $(PROMU_TMP)
+
+.PHONY: proto
+proto:
+	@echo ">> generating code from proto files"
+	@./scripts/genproto.sh
+
+ifdef GOLANGCI_LINT
+$(GOLANGCI_LINT):
+	mkdir -p $(FIRST_GOPATH)/bin
+	curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/$(GOLANGCI_LINT_VERSION)/install.sh \
+		| sed -e '/install -d/d' \
+		| sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
+endif
+
+.PHONY: precheck
+precheck::
+
+define PRECHECK_COMMAND_template =
+precheck:: $(1)_precheck
+
+PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1)))
+.PHONY: $(1)_precheck
+$(1)_precheck:
+	@if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \
+		echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \
+		exit 1; \
+	fi
+endef
+
+govulncheck: install-govulncheck
+	govulncheck ./...
+
+install-govulncheck:
+	command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest
diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE
new file mode 100644
index 0000000..53c5e9a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/NOTICE
@@ -0,0 +1,7 @@
+procfs provides functions to retrieve system, kernel and process
+metrics from the pseudo-filesystem proc.
+
+Copyright 2014-2015 The Prometheus Authors
+
+This product includes software developed at
+SoundCloud Ltd. (http://soundcloud.com/).
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
new file mode 100644
index 0000000..0718239
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -0,0 +1,61 @@
+# procfs
+
+This package provides functions to retrieve system, kernel, and process
+metrics from the pseudo-filesystems /proc and /sys.
+
+*WARNING*: This package is a work in progress. Its API may still break in
+backwards-incompatible ways without warnings. Use it at your own risk.
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/procfs.svg)](https://pkg.go.dev/github.com/prometheus/procfs)
+[![CircleCI](https://circleci.com/gh/prometheus/procfs/tree/master.svg?style=svg)](https://circleci.com/gh/prometheus/procfs/tree/master)
+[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
+
+## Usage
+
+The procfs library is organized by packages based on whether the gathered data is coming from
+/proc, /sys, or both.  Each package contains an `FS` type which represents the path to either /proc, 
+/sys, or both.  For example, cpu statistics are gathered from
+`/proc/stat` and are available via the root procfs package.  First, the proc filesystem mount
+point is initialized, and then the stat information is read.
+
+```go
+fs, err := procfs.NewFS("/proc")
+stats, err := fs.Stat()
+```
+
+Some sub-packages such as `blockdevice`, require access to both the proc and sys filesystems.
+
+```go
+    fs, err := blockdevice.NewFS("/proc", "/sys")
+    stats, err := fs.ProcDiskstats()
+```
+
+## Package Organization
+
+The packages in this project are organized according to (1) whether the data comes from the `/proc` or
+`/sys` filesystem and (2) the type of information being retrieved.  For example, most process information
+can be gathered from the functions in the root `procfs` package.  Information about block devices such as disk drives
+is available in the `blockdevices` sub-package.
+
+## Building and Testing
+
+The procfs library is intended to be built as part of another application, so there are no distributable binaries.  
+However, most of the API includes unit tests which can be run with `make test`.
+
+### Updating Test Fixtures
+
+The procfs library includes a set of test fixtures which include many example files from
+the `/proc` and `/sys` filesystems.  These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
+which is extracted automatically during testing.  To add/update the test fixtures, first
+ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then
+extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`.
+
+```bash
+rm -rf testdata/fixtures
+make test
+```
+
+Next, make the required changes to the extracted files in the `testdata/fixtures` directory.  When
+the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
+based on the updated `fixtures` directory.  And finally, verify the changes using
+`git diff testdata/fixtures.ttar`.
diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md
new file mode 100644
index 0000000..fed02d8
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/SECURITY.md
@@ -0,0 +1,6 @@
+# Reporting a security issue
+
+The Prometheus security policy, including how to report vulnerabilities, can be
+found here:
+
+<https://prometheus.io/docs/operating/security/>
diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go
new file mode 100644
index 0000000..2e53344
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/arp.go
@@ -0,0 +1,116 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+	"net"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// Learned from include/uapi/linux/if_arp.h.
+const (
+	// Completed entry (ha valid).
+	ATFComplete = 0x02
+	// Permanent entry.
+	ATFPermanent = 0x04
+	// Publish entry.
+	ATFPublish = 0x08
+	// Has requested trailers.
+	ATFUseTrailers = 0x10
+	// Obsoleted: Want to use a netmask (only for proxy entries).
+	ATFNetmask = 0x20
+	// Don't answer this addresses.
+	ATFDontPublish = 0x40
+)
+
+// ARPEntry contains a single row of the columnar data represented in
+// /proc/net/arp.
+type ARPEntry struct {
+	// IP address
+	IPAddr net.IP
+	// MAC address
+	HWAddr net.HardwareAddr
+	// Name of the device
+	Device string
+	// Flags
+	Flags byte
+}
+
+// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
+// and then return a slice of ARPEntry's.
+func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
+	data, err := os.ReadFile(fs.proc.Path("net/arp"))
+	if err != nil {
+		return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
+	}
+
+	return parseARPEntries(data)
+}
+
+func parseARPEntries(data []byte) ([]ARPEntry, error) {
+	lines := strings.Split(string(data), "\n")
+	entries := make([]ARPEntry, 0)
+	var err error
+	const (
+		expectedDataWidth   = 6
+		expectedHeaderWidth = 9
+	)
+	for _, line := range lines {
+		columns := strings.Fields(line)
+		width := len(columns)
+
+		if width == expectedHeaderWidth || width == 0 {
+			continue
+		} else if width == expectedDataWidth {
+			entry, err := parseARPEntry(columns)
+			if err != nil {
+				return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
+			}
+			entries = append(entries, entry)
+		} else {
+			return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
+		}
+
+	}
+
+	return entries, err
+}
+
+func parseARPEntry(columns []string) (ARPEntry, error) {
+	entry := ARPEntry{Device: columns[5]}
+	ip := net.ParseIP(columns[0])
+	entry.IPAddr = ip
+
+	if mac, err := net.ParseMAC(columns[3]); err == nil {
+		entry.HWAddr = mac
+	} else {
+		return ARPEntry{}, err
+	}
+
+	if flags, err := strconv.ParseUint(columns[2], 0, 8); err == nil {
+		entry.Flags = byte(flags)
+	} else {
+		return ARPEntry{}, err
+	}
+
+	return entry, nil
+}
+
+// IsComplete returns true if ARP entry is marked with complete flag.
+func (entry *ARPEntry) IsComplete() bool {
+	return entry.Flags&ATFComplete != 0
+}
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go
new file mode 100644
index 0000000..8380750
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/buddyinfo.go
@@ -0,0 +1,85 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// A BuddyInfo is the details parsed from /proc/buddyinfo.
+// The data is comprised of an array of free fragments of each size.
+// The sizes are 2^n*PAGE_SIZE, where n is the array index.
+type BuddyInfo struct {
+	Node  string
+	Zone  string
+	Sizes []float64
+}
+
+// BuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.
+func (fs FS) BuddyInfo() ([]BuddyInfo, error) {
+	file, err := os.Open(fs.proc.Path("buddyinfo"))
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	return parseBuddyInfo(file)
+}
+
+func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
+	var (
+		buddyInfo   = []BuddyInfo{}
+		scanner     = bufio.NewScanner(r)
+		bucketCount = -1
+	)
+
+	for scanner.Scan() {
+		var err error
+		line := scanner.Text()
+		parts := strings.Fields(line)
+
+		if len(parts) < 4 {
+			return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
+		}
+
+		node := strings.TrimSuffix(parts[1], ",")
+		zone := strings.TrimSuffix(parts[3], ",")
+		arraySize := len(parts[4:])
+
+		if bucketCount == -1 {
+			bucketCount = arraySize
+		} else {
+			if bucketCount != arraySize {
+				return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize)
+			}
+		}
+
+		sizes := make([]float64, arraySize)
+		for i := 0; i < arraySize; i++ {
+			sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
+			if err != nil {
+				return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
+			}
+		}
+
+		buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})
+	}
+
+	return buddyInfo, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/cmdline.go b/vendor/github.com/prometheus/procfs/cmdline.go
new file mode 100644
index 0000000..bf4f3b4
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cmdline.go
@@ -0,0 +1,30 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// CmdLine returns the command line of the kernel.
+func (fs FS) CmdLine() ([]string, error) {
+	data, err := util.ReadFileNoStat(fs.proc.Path("cmdline"))
+	if err != nil {
+		return nil, err
+	}
+
+	return strings.Fields(string(data)), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go
new file mode 100644
index 0000000..f0950bb
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo.go
@@ -0,0 +1,519 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+// +build linux
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// CPUInfo contains general information about a system CPU found in /proc/cpuinfo.
+type CPUInfo struct {
+	Processor       uint
+	VendorID        string
+	CPUFamily       string
+	Model           string
+	ModelName       string
+	Stepping        string
+	Microcode       string
+	CPUMHz          float64
+	CacheSize       string
+	PhysicalID      string
+	Siblings        uint
+	CoreID          string
+	CPUCores        uint
+	APICID          string
+	InitialAPICID   string
+	FPU             string
+	FPUException    string
+	CPUIDLevel      uint
+	WP              string
+	Flags           []string
+	Bugs            []string
+	BogoMips        float64
+	CLFlushSize     uint
+	CacheAlignment  uint
+	AddressSizes    string
+	PowerManagement string
+}
+
+var (
+	cpuinfoClockRegexp          = regexp.MustCompile(`([\d.]+)`)
+	cpuinfoS390XProcessorRegexp = regexp.MustCompile(`^processor\s+(\d+):.*`)
+)
+
+// CPUInfo returns information about current system CPUs.
+// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+func (fs FS) CPUInfo() ([]CPUInfo, error) {
+	data, err := util.ReadFileNoStat(fs.proc.Path("cpuinfo"))
+	if err != nil {
+		return nil, err
+	}
+	return parseCPUInfo(data)
+}
+
+func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+
+	// find the first "processor" line
+	firstLine := firstNonEmptyLine(scanner)
+	if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
+		return nil, fmt.Errorf("%w: Cannot parse  line: %q", ErrFileParse, firstLine)
+	}
+	field := strings.SplitN(firstLine, ": ", 2)
+	v, err := strconv.ParseUint(field[1], 0, 32)
+	if err != nil {
+		return nil, err
+	}
+	firstcpu := CPUInfo{Processor: uint(v)}
+	cpuinfo := []CPUInfo{firstcpu}
+	i := 0
+
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !strings.Contains(line, ":") {
+			continue
+		}
+		field := strings.SplitN(line, ": ", 2)
+		switch strings.TrimSpace(field[0]) {
+		case "processor":
+			cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+			i++
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].Processor = uint(v)
+		case "vendor", "vendor_id":
+			cpuinfo[i].VendorID = field[1]
+		case "cpu family":
+			cpuinfo[i].CPUFamily = field[1]
+		case "model":
+			cpuinfo[i].Model = field[1]
+		case "model name":
+			cpuinfo[i].ModelName = field[1]
+		case "stepping":
+			cpuinfo[i].Stepping = field[1]
+		case "microcode":
+			cpuinfo[i].Microcode = field[1]
+		case "cpu MHz":
+			v, err := strconv.ParseFloat(field[1], 64)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CPUMHz = v
+		case "cache size":
+			cpuinfo[i].CacheSize = field[1]
+		case "physical id":
+			cpuinfo[i].PhysicalID = field[1]
+		case "siblings":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].Siblings = uint(v)
+		case "core id":
+			cpuinfo[i].CoreID = field[1]
+		case "cpu cores":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CPUCores = uint(v)
+		case "apicid":
+			cpuinfo[i].APICID = field[1]
+		case "initial apicid":
+			cpuinfo[i].InitialAPICID = field[1]
+		case "fpu":
+			cpuinfo[i].FPU = field[1]
+		case "fpu_exception":
+			cpuinfo[i].FPUException = field[1]
+		case "cpuid level":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CPUIDLevel = uint(v)
+		case "wp":
+			cpuinfo[i].WP = field[1]
+		case "flags":
+			cpuinfo[i].Flags = strings.Fields(field[1])
+		case "bugs":
+			cpuinfo[i].Bugs = strings.Fields(field[1])
+		case "bogomips":
+			v, err := strconv.ParseFloat(field[1], 64)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].BogoMips = v
+		case "clflush size":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CLFlushSize = uint(v)
+		case "cache_alignment":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CacheAlignment = uint(v)
+		case "address sizes":
+			cpuinfo[i].AddressSizes = field[1]
+		case "power management":
+			cpuinfo[i].PowerManagement = field[1]
+		}
+	}
+	return cpuinfo, nil
+}
+
+func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+
+	firstLine := firstNonEmptyLine(scanner)
+	match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
+	if !match || !strings.Contains(firstLine, ":") {
+		return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
+
+	}
+	field := strings.SplitN(firstLine, ": ", 2)
+	cpuinfo := []CPUInfo{}
+	featuresLine := ""
+	commonCPUInfo := CPUInfo{}
+	i := 0
+	if strings.TrimSpace(field[0]) == "Processor" {
+		commonCPUInfo = CPUInfo{ModelName: field[1]}
+		i = -1
+	} else {
+		v, err := strconv.ParseUint(field[1], 0, 32)
+		if err != nil {
+			return nil, err
+		}
+		firstcpu := CPUInfo{Processor: uint(v)}
+		cpuinfo = []CPUInfo{firstcpu}
+	}
+
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !strings.Contains(line, ":") {
+			continue
+		}
+		field := strings.SplitN(line, ": ", 2)
+		switch strings.TrimSpace(field[0]) {
+		case "processor":
+			cpuinfo = append(cpuinfo, commonCPUInfo) // start of the next processor
+			i++
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].Processor = uint(v)
+		case "BogoMIPS":
+			if i == -1 {
+				cpuinfo = append(cpuinfo, commonCPUInfo) // There is only one processor
+				i++
+				cpuinfo[i].Processor = 0
+			}
+			v, err := strconv.ParseFloat(field[1], 64)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].BogoMips = v
+		case "Features":
+			featuresLine = line
+		case "model name":
+			cpuinfo[i].ModelName = field[1]
+		}
+	}
+	fields := strings.SplitN(featuresLine, ": ", 2)
+	for i := range cpuinfo {
+		cpuinfo[i].Flags = strings.Fields(fields[1])
+	}
+	return cpuinfo, nil
+
+}
+
+func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+
+	firstLine := firstNonEmptyLine(scanner)
+	if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
+		return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
+	}
+	field := strings.SplitN(firstLine, ": ", 2)
+	cpuinfo := []CPUInfo{}
+	commonCPUInfo := CPUInfo{VendorID: field[1]}
+
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !strings.Contains(line, ":") {
+			continue
+		}
+		field := strings.SplitN(line, ": ", 2)
+		switch strings.TrimSpace(field[0]) {
+		case "bogomips per cpu":
+			v, err := strconv.ParseFloat(field[1], 64)
+			if err != nil {
+				return nil, err
+			}
+			commonCPUInfo.BogoMips = v
+		case "features":
+			commonCPUInfo.Flags = strings.Fields(field[1])
+		}
+		if strings.HasPrefix(line, "processor") {
+			match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
+			if len(match) < 2 {
+				return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
+			}
+			cpu := commonCPUInfo
+			v, err := strconv.ParseUint(match[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpu.Processor = uint(v)
+			cpuinfo = append(cpuinfo, cpu)
+		}
+		if strings.HasPrefix(line, "cpu number") {
+			break
+		}
+	}
+
+	i := 0
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !strings.Contains(line, ":") {
+			continue
+		}
+		field := strings.SplitN(line, ": ", 2)
+		switch strings.TrimSpace(field[0]) {
+		case "cpu number":
+			i++
+		case "cpu MHz dynamic":
+			clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
+			v, err := strconv.ParseFloat(clock, 64)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CPUMHz = v
+		case "physical id":
+			cpuinfo[i].PhysicalID = field[1]
+		case "core id":
+			cpuinfo[i].CoreID = field[1]
+		case "cpu cores":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CPUCores = uint(v)
+		case "siblings":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].Siblings = uint(v)
+		}
+	}
+
+	return cpuinfo, nil
+}
+
+func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+
+	// find the first "processor" line
+	firstLine := firstNonEmptyLine(scanner)
+	if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
+		return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
+	}
+	field := strings.SplitN(firstLine, ": ", 2)
+	cpuinfo := []CPUInfo{}
+	systemType := field[1]
+
+	i := 0
+
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !strings.Contains(line, ":") {
+			continue
+		}
+		field := strings.SplitN(line, ": ", 2)
+		switch strings.TrimSpace(field[0]) {
+		case "processor":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			i = int(v)
+			cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+			cpuinfo[i].Processor = uint(v)
+			cpuinfo[i].VendorID = systemType
+		case "cpu model":
+			cpuinfo[i].ModelName = field[1]
+		case "BogoMIPS":
+			v, err := strconv.ParseFloat(field[1], 64)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].BogoMips = v
+		}
+	}
+	return cpuinfo, nil
+}
+
+func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+	// find the first "processor" line
+	firstLine := firstNonEmptyLine(scanner)
+	if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
+		return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
+	}
+	field := strings.SplitN(firstLine, ": ", 2)
+	cpuinfo := []CPUInfo{}
+	systemType := field[1]
+	i := 0
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !strings.Contains(line, ":") {
+			continue
+		}
+		field := strings.SplitN(line, ": ", 2)
+		switch strings.TrimSpace(field[0]) {
+		case "processor":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			i = int(v)
+			cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+			cpuinfo[i].Processor = uint(v)
+			cpuinfo[i].VendorID = systemType
+		case "CPU Family":
+			cpuinfo[i].CPUFamily = field[1]
+		case "Model Name":
+			cpuinfo[i].ModelName = field[1]
+		}
+	}
+	return cpuinfo, nil
+}
+
+func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+
+	firstLine := firstNonEmptyLine(scanner)
+	if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
+		return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
+	}
+	field := strings.SplitN(firstLine, ": ", 2)
+	v, err := strconv.ParseUint(field[1], 0, 32)
+	if err != nil {
+		return nil, err
+	}
+	firstcpu := CPUInfo{Processor: uint(v)}
+	cpuinfo := []CPUInfo{firstcpu}
+	i := 0
+
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !strings.Contains(line, ":") {
+			continue
+		}
+		field := strings.SplitN(line, ": ", 2)
+		switch strings.TrimSpace(field[0]) {
+		case "processor":
+			cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+			i++
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].Processor = uint(v)
+		case "cpu":
+			cpuinfo[i].VendorID = field[1]
+		case "clock":
+			clock := cpuinfoClockRegexp.FindString(strings.TrimSpace(field[1]))
+			v, err := strconv.ParseFloat(clock, 64)
+			if err != nil {
+				return nil, err
+			}
+			cpuinfo[i].CPUMHz = v
+		}
+	}
+	return cpuinfo, nil
+}
+
+func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+
+	firstLine := firstNonEmptyLine(scanner)
+	if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
+		return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
+	}
+	field := strings.SplitN(firstLine, ": ", 2)
+	v, err := strconv.ParseUint(field[1], 0, 32)
+	if err != nil {
+		return nil, err
+	}
+	firstcpu := CPUInfo{Processor: uint(v)}
+	cpuinfo := []CPUInfo{firstcpu}
+	i := 0
+
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !strings.Contains(line, ":") {
+			continue
+		}
+		field := strings.SplitN(line, ": ", 2)
+		switch strings.TrimSpace(field[0]) {
+		case "processor":
+			v, err := strconv.ParseUint(field[1], 0, 32)
+			if err != nil {
+				return nil, err
+			}
+			i = int(v)
+			cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+			cpuinfo[i].Processor = uint(v)
+		case "hart":
+			cpuinfo[i].CoreID = field[1]
+		case "isa":
+			cpuinfo[i].ModelName = field[1]
+		}
+	}
+	return cpuinfo, nil
+}
+
+func parseCPUInfoDummy(_ []byte) ([]CPUInfo, error) { // nolint:unused,deadcode
+	return nil, errors.New("not implemented")
+}
+
+// firstNonEmptyLine advances the scanner to the first non-empty line
+// and returns the contents of that line.
+func firstNonEmptyLine(scanner *bufio.Scanner) string {
+	for scanner.Scan() {
+		line := scanner.Text()
+		if strings.TrimSpace(line) != "" {
+			return line
+		}
+	}
+	return ""
+}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
new file mode 100644
index 0000000..64cfd53
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
@@ -0,0 +1,20 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux && (arm || arm64)
+// +build linux
+// +build arm arm64
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoARM
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
new file mode 100644
index 0000000..d88442f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
@@ -0,0 +1,19 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+// +build linux
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoLoong
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
new file mode 100644
index 0000000..c11207f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
@@ -0,0 +1,20 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux && (mips || mipsle || mips64 || mips64le)
+// +build linux
+// +build mips mipsle mips64 mips64le
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoMips
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
new file mode 100644
index 0000000..a6b2b31
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
@@ -0,0 +1,19 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
+// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoDummy
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
new file mode 100644
index 0000000..003bc2a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
@@ -0,0 +1,20 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux && (ppc64 || ppc64le)
+// +build linux
+// +build ppc64 ppc64le
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoPPC
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
new file mode 100644
index 0000000..1c9b731
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
@@ -0,0 +1,20 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux && (riscv || riscv64)
+// +build linux
+// +build riscv riscv64
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoRISCV
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
new file mode 100644
index 0000000..fa3686bc
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
@@ -0,0 +1,19 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+// +build linux
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoS390X
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
new file mode 100644
index 0000000..a0ef555
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
@@ -0,0 +1,20 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux && (386 || amd64)
+// +build linux
+// +build 386 amd64
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoX86
diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go
new file mode 100644
index 0000000..5f2a37a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/crypto.go
@@ -0,0 +1,154 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Crypto holds info parsed from /proc/crypto.
+type Crypto struct {
+	Alignmask   *uint64
+	Async       bool
+	Blocksize   *uint64
+	Chunksize   *uint64
+	Ctxsize     *uint64
+	Digestsize  *uint64
+	Driver      string
+	Geniv       string
+	Internal    string
+	Ivsize      *uint64
+	Maxauthsize *uint64
+	MaxKeysize  *uint64
+	MinKeysize  *uint64
+	Module      string
+	Name        string
+	Priority    *int64
+	Refcnt      *int64
+	Seedsize    *uint64
+	Selftest    string
+	Type        string
+	Walksize    *uint64
+}
+
+// Crypto parses an crypto-file (/proc/crypto) and returns a slice of
+// structs containing the relevant info.  More information available here:
+// https://kernel.readthedocs.io/en/sphinx-samples/crypto-API.html
+func (fs FS) Crypto() ([]Crypto, error) {
+	path := fs.proc.Path("crypto")
+	b, err := util.ReadFileNoStat(path)
+	if err != nil {
+		return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err)
+
+	}
+
+	crypto, err := parseCrypto(bytes.NewReader(b))
+	if err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err)
+	}
+
+	return crypto, nil
+}
+
+// parseCrypto parses a /proc/crypto stream into Crypto elements.
+func parseCrypto(r io.Reader) ([]Crypto, error) {
+	var out []Crypto
+
+	s := bufio.NewScanner(r)
+	for s.Scan() {
+		text := s.Text()
+		switch {
+		case strings.HasPrefix(text, "name"):
+			// Each crypto element begins with its name.
+			out = append(out, Crypto{})
+		case text == "":
+			continue
+		}
+
+		kv := strings.Split(text, ":")
+		if len(kv) != 2 {
+			return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text)
+		}
+
+		k := strings.TrimSpace(kv[0])
+		v := strings.TrimSpace(kv[1])
+
+		// Parse the key/value pair into the currently focused element.
+		c := &out[len(out)-1]
+		if err := c.parseKV(k, v); err != nil {
+			return nil, err
+		}
+	}
+
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+
+	return out, nil
+}
+
+// parseKV parses a key/value pair into the appropriate field of c.
+func (c *Crypto) parseKV(k, v string) error {
+	vp := util.NewValueParser(v)
+
+	switch k {
+	case "async":
+		// Interpret literal yes as true.
+		c.Async = v == "yes"
+	case "blocksize":
+		c.Blocksize = vp.PUInt64()
+	case "chunksize":
+		c.Chunksize = vp.PUInt64()
+	case "digestsize":
+		c.Digestsize = vp.PUInt64()
+	case "driver":
+		c.Driver = v
+	case "geniv":
+		c.Geniv = v
+	case "internal":
+		c.Internal = v
+	case "ivsize":
+		c.Ivsize = vp.PUInt64()
+	case "maxauthsize":
+		c.Maxauthsize = vp.PUInt64()
+	case "max keysize":
+		c.MaxKeysize = vp.PUInt64()
+	case "min keysize":
+		c.MinKeysize = vp.PUInt64()
+	case "module":
+		c.Module = v
+	case "name":
+		c.Name = v
+	case "priority":
+		c.Priority = vp.PInt64()
+	case "refcnt":
+		c.Refcnt = vp.PInt64()
+	case "seedsize":
+		c.Seedsize = vp.PUInt64()
+	case "selftest":
+		c.Selftest = v
+	case "type":
+		c.Type = v
+	case "walksize":
+		c.Walksize = vp.PUInt64()
+	}
+
+	return vp.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
new file mode 100644
index 0000000..f9d961e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/doc.go
@@ -0,0 +1,44 @@
+// Copyright 2014 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package procfs provides functions to retrieve system, kernel and process
+// metrics from the pseudo-filesystem proc.
+//
+// Example:
+//
+//	package main
+//
+//	import (
+//		"fmt"
+//		"log"
+//
+//		"github.com/prometheus/procfs"
+//	)
+//
+//	func main() {
+//		p, err := procfs.Self()
+//		if err != nil {
+//			log.Fatalf("could not get process: %s", err)
+//		}
+//
+//		stat, err := p.Stat()
+//		if err != nil {
+//			log.Fatalf("could not get process stat: %s", err)
+//		}
+//
+//		fmt.Printf("command:  %s\n", stat.Comm)
+//		fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+//		fmt.Printf("vsize:    %dB\n", stat.VirtualMemory())
+//		fmt.Printf("rss:      %dB\n", stat.ResidentMemory())
+//	}
+package procfs
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
new file mode 100644
index 0000000..9bdaccc
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -0,0 +1,56 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"github.com/prometheus/procfs/internal/fs"
+)
+
+// FS represents the pseudo-filesystem sys, which provides an interface to
+// kernel data structures.
+type FS struct {
+	proc   fs.FS
+	isReal bool
+}
+
+const (
+	// DefaultMountPoint is the common mount point of the proc filesystem.
+	DefaultMountPoint = fs.DefaultProcMountPoint
+
+	// SectorSize represents the size of a sector in bytes.
+	// It is specific to Linux block I/O operations.
+	SectorSize = 512
+)
+
+// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
+// It will error if the mount point directory can't be read or is a file.
+func NewDefaultFS() (FS, error) {
+	return NewFS(DefaultMountPoint)
+}
+
+// NewFS returns a new proc FS mounted under the given proc mountPoint. It will error
+// if the mount point directory can't be read or is a file.
+func NewFS(mountPoint string) (FS, error) {
+	fs, err := fs.NewFS(mountPoint)
+	if err != nil {
+		return FS{}, err
+	}
+
+	isReal, err := isRealProc(mountPoint)
+	if err != nil {
+		return FS{}, err
+	}
+
+	return FS{fs, isReal}, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
new file mode 100644
index 0000000..1b5bdbd
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
@@ -0,0 +1,23 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !freebsd && !linux
+// +build !freebsd,!linux
+
+package procfs
+
+// isRealProc returns true on architectures that don't have a Type argument
+// in their Statfs_t struct.
+func isRealProc(_ string) (bool, error) {
+	return true, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go
new file mode 100644
index 0000000..80df79c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go
@@ -0,0 +1,33 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build freebsd || linux
+// +build freebsd linux
+
+package procfs
+
+import (
+	"syscall"
+)
+
+// isRealProc determines whether supplied mountpoint is really a proc filesystem.
+func isRealProc(mountPoint string) (bool, error) {
+	stat := syscall.Statfs_t{}
+	err := syscall.Statfs(mountPoint, &stat)
+	if err != nil {
+		return false, err
+	}
+
+	// 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87
+	return stat.Type == 0x9fa0, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go
new file mode 100644
index 0000000..7db8633
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fscache.go
@@ -0,0 +1,422 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Fscacheinfo represents fscache statistics.
+type Fscacheinfo struct {
+	// Number of index cookies allocated
+	IndexCookiesAllocated uint64
+	// data storage cookies allocated
+	DataStorageCookiesAllocated uint64
+	// Number of special cookies allocated
+	SpecialCookiesAllocated uint64
+	// Number of objects allocated
+	ObjectsAllocated uint64
+	// Number of object allocation failures
+	ObjectAllocationsFailure uint64
+	// Number of objects that reached the available state
+	ObjectsAvailable uint64
+	// Number of objects that reached the dead state
+	ObjectsDead uint64
+	// Number of objects that didn't have a coherency check
+	ObjectsWithoutCoherencyCheck uint64
+	// Number of objects that passed a coherency check
+	ObjectsWithCoherencyCheck uint64
+	// Number of objects that needed a coherency data update
+	ObjectsNeedCoherencyCheckUpdate uint64
+	// Number of objects that were declared obsolete
+	ObjectsDeclaredObsolete uint64
+	// Number of pages marked as being cached
+	PagesMarkedAsBeingCached uint64
+	// Number of uncache page requests seen
+	UncachePagesRequestSeen uint64
+	// Number of acquire cookie requests seen
+	AcquireCookiesRequestSeen uint64
+	// Number of acq reqs given a NULL parent
+	AcquireRequestsWithNullParent uint64
+	// Number of acq reqs rejected due to no cache available
+	AcquireRequestsRejectedNoCacheAvailable uint64
+	// Number of acq reqs succeeded
+	AcquireRequestsSucceeded uint64
+	// Number of acq reqs rejected due to error
+	AcquireRequestsRejectedDueToError uint64
+	// Number of acq reqs failed on ENOMEM
+	AcquireRequestsFailedDueToEnomem uint64
+	// Number of lookup calls made on cache backends
+	LookupsNumber uint64
+	// Number of negative lookups made
+	LookupsNegative uint64
+	// Number of positive lookups made
+	LookupsPositive uint64
+	// Number of objects created by lookup
+	ObjectsCreatedByLookup uint64
+	// Number of lookups timed out and requeued
+	LookupsTimedOutAndRequed uint64
+	InvalidationsNumber      uint64
+	InvalidationsRunning     uint64
+	// Number of update cookie requests seen
+	UpdateCookieRequestSeen uint64
+	// Number of upd reqs given a NULL parent
+	UpdateRequestsWithNullParent uint64
+	// Number of upd reqs granted CPU time
+	UpdateRequestsRunning uint64
+	// Number of relinquish cookie requests seen
+	RelinquishCookiesRequestSeen uint64
+	// Number of rlq reqs given a NULL parent
+	RelinquishCookiesWithNullParent uint64
+	// Number of rlq reqs waited on completion of creation
+	RelinquishRequestsWaitingCompleteCreation uint64
+	// Relinqs rtr
+	RelinquishRetries uint64
+	// Number of attribute changed requests seen
+	AttributeChangedRequestsSeen uint64
+	// Number of attr changed requests queued
+	AttributeChangedRequestsQueued uint64
+	// Number of attr changed rejected -ENOBUFS
+	AttributeChangedRejectDueToEnobufs uint64
+	// Number of attr changed failed -ENOMEM
+	AttributeChangedFailedDueToEnomem uint64
+	// Number of attr changed ops given CPU time
+	AttributeChangedOps uint64
+	// Number of allocation requests seen
+	AllocationRequestsSeen uint64
+	// Number of successful alloc reqs
+	AllocationOkRequests uint64
+	// Number of alloc reqs that waited on lookup completion
+	AllocationWaitingOnLookup uint64
+	// Number of alloc reqs rejected -ENOBUFS
+	AllocationsRejectedDueToEnobufs uint64
+	// Number of alloc reqs aborted -ERESTARTSYS
+	AllocationsAbortedDueToErestartsys uint64
+	// Number of alloc reqs submitted
+	AllocationOperationsSubmitted uint64
+	// Number of alloc reqs waited for CPU time
+	AllocationsWaitedForCPU uint64
+	// Number of alloc reqs aborted due to object death
+	AllocationsAbortedDueToObjectDeath uint64
+	// Number of retrieval (read) requests seen
+	RetrievalsReadRequests uint64
+	// Number of successful retr reqs
+	RetrievalsOk uint64
+	// Number of retr reqs that waited on lookup completion
+	RetrievalsWaitingLookupCompletion uint64
+	// Number of retr reqs returned -ENODATA
+	RetrievalsReturnedEnodata uint64
+	// Number of retr reqs rejected -ENOBUFS
+	RetrievalsRejectedDueToEnobufs uint64
+	// Number of retr reqs aborted -ERESTARTSYS
+	RetrievalsAbortedDueToErestartsys uint64
+	// Number of retr reqs failed -ENOMEM
+	RetrievalsFailedDueToEnomem uint64
+	// Number of retr reqs submitted
+	RetrievalsRequests uint64
+	// Number of retr reqs waited for CPU time
+	RetrievalsWaitingCPU uint64
+	// Number of retr reqs aborted due to object death
+	RetrievalsAbortedDueToObjectDeath uint64
+	// Number of storage (write) requests seen
+	StoreWriteRequests uint64
+	// Number of successful store reqs
+	StoreSuccessfulRequests uint64
+	// Number of store reqs on a page already pending storage
+	StoreRequestsOnPendingStorage uint64
+	// Number of store reqs rejected -ENOBUFS
+	StoreRequestsRejectedDueToEnobufs uint64
+	// Number of store reqs failed -ENOMEM
+	StoreRequestsFailedDueToEnomem uint64
+	// Number of store reqs submitted
+	StoreRequestsSubmitted uint64
+	// Number of store reqs granted CPU time
+	StoreRequestsRunning uint64
+	// Number of pages given store req processing time
+	StorePagesWithRequestsProcessing uint64
+	// Number of store reqs deleted from tracking tree
+	StoreRequestsDeleted uint64
+	// Number of store reqs over store limit
+	StoreRequestsOverStoreLimit uint64
+	// Number of release reqs against pages with no pending store
+	ReleaseRequestsAgainstPagesWithNoPendingStorage uint64
+	// Number of release reqs against pages stored by time lock granted
+	ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
+	// Number of release reqs ignored due to in-progress store
+	ReleaseRequestsIgnoredDueToInProgressStore uint64
+	// Number of page stores canceled due to release req
+	PageStoresCancelledByReleaseRequests uint64
+	VmscanWaiting                        uint64
+	// Number of times async ops added to pending queues
+	OpsPending uint64
+	// Number of times async ops given CPU time
+	OpsRunning uint64
+	// Number of times async ops queued for processing
+	OpsEnqueued uint64
+	// Number of async ops canceled
+	OpsCancelled uint64
+	// Number of async ops rejected due to object lookup/create failure
+	OpsRejected uint64
+	// Number of async ops initialized
+	OpsInitialised uint64
+	// Number of async ops queued for deferred release
+	OpsDeferred uint64
+	// Number of async ops released (should equal ini=N when idle)
+	OpsReleased uint64
+	// Number of deferred-release async ops garbage collected
+	OpsGarbageCollected uint64
+	// Number of in-progress alloc_object() cache ops
+	CacheopAllocationsinProgress uint64
+	// Number of in-progress lookup_object() cache ops
+	CacheopLookupObjectInProgress uint64
+	// Number of in-progress lookup_complete() cache ops
+	CacheopLookupCompleteInPorgress uint64
+	// Number of in-progress grab_object() cache ops
+	CacheopGrabObjectInProgress uint64
+	CacheopInvalidations        uint64
+	// Number of in-progress update_object() cache ops
+	CacheopUpdateObjectInProgress uint64
+	// Number of in-progress drop_object() cache ops
+	CacheopDropObjectInProgress uint64
+	// Number of in-progress put_object() cache ops
+	CacheopPutObjectInProgress uint64
+	// Number of in-progress attr_changed() cache ops
+	CacheopAttributeChangeInProgress uint64
+	// Number of in-progress sync_cache() cache ops
+	CacheopSyncCacheInProgress uint64
+	// Number of in-progress read_or_alloc_page() cache ops
+	CacheopReadOrAllocPageInProgress uint64
+	// Number of in-progress read_or_alloc_pages() cache ops
+	CacheopReadOrAllocPagesInProgress uint64
+	// Number of in-progress allocate_page() cache ops
+	CacheopAllocatePageInProgress uint64
+	// Number of in-progress allocate_pages() cache ops
+	CacheopAllocatePagesInProgress uint64
+	// Number of in-progress write_page() cache ops
+	CacheopWritePagesInProgress uint64
+	// Number of in-progress uncache_page() cache ops
+	CacheopUncachePagesInProgress uint64
+	// Number of in-progress dissociate_pages() cache ops
+	CacheopDissociatePagesInProgress uint64
+	// Number of object lookups/creations rejected due to lack of space
+	CacheevLookupsAndCreationsRejectedLackSpace uint64
+	// Number of stale objects deleted
+	CacheevStaleObjectsDeleted uint64
+	// Number of objects retired when relinquished
+	CacheevRetiredWhenReliquished uint64
+	// Number of objects culled
+	CacheevObjectsCulled uint64
+}
+
+// Fscacheinfo returns information about current fscache statistics.
+// See https://www.kernel.org/doc/Documentation/filesystems/caching/fscache.txt
+func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
+	b, err := util.ReadFileNoStat(fs.proc.Path("fs/fscache/stats"))
+	if err != nil {
+		return Fscacheinfo{}, err
+	}
+
+	m, err := parseFscacheinfo(bytes.NewReader(b))
+	if err != nil {
+		return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err)
+	}
+
+	return *m, nil
+}
+
+func setFSCacheFields(fields []string, setFields ...*uint64) error {
+	var err error
+	if len(fields) < len(setFields) {
+		return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
+	}
+
+	for i := range setFields {
+		*setFields[i], err = strconv.ParseUint(strings.Split(fields[i], "=")[1], 0, 64)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) {
+	var m Fscacheinfo
+	s := bufio.NewScanner(r)
+	for s.Scan() {
+		fields := strings.Fields(s.Text())
+		if len(fields) < 2 {
+			return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text())
+		}
+
+		switch fields[0] {
+		case "Cookies:":
+			err := setFSCacheFields(fields[1:], &m.IndexCookiesAllocated, &m.DataStorageCookiesAllocated,
+				&m.SpecialCookiesAllocated)
+			if err != nil {
+				return &m, err
+			}
+		case "Objects:":
+			err := setFSCacheFields(fields[1:], &m.ObjectsAllocated, &m.ObjectAllocationsFailure,
+				&m.ObjectsAvailable, &m.ObjectsDead)
+			if err != nil {
+				return &m, err
+			}
+		case "ChkAux":
+			err := setFSCacheFields(fields[2:], &m.ObjectsWithoutCoherencyCheck, &m.ObjectsWithCoherencyCheck,
+				&m.ObjectsNeedCoherencyCheckUpdate, &m.ObjectsDeclaredObsolete)
+			if err != nil {
+				return &m, err
+			}
+		case "Pages":
+			err := setFSCacheFields(fields[2:], &m.PagesMarkedAsBeingCached, &m.UncachePagesRequestSeen)
+			if err != nil {
+				return &m, err
+			}
+		case "Acquire:":
+			err := setFSCacheFields(fields[1:], &m.AcquireCookiesRequestSeen, &m.AcquireRequestsWithNullParent,
+				&m.AcquireRequestsRejectedNoCacheAvailable, &m.AcquireRequestsSucceeded, &m.AcquireRequestsRejectedDueToError,
+				&m.AcquireRequestsFailedDueToEnomem)
+			if err != nil {
+				return &m, err
+			}
+		case "Lookups:":
+			err := setFSCacheFields(fields[1:], &m.LookupsNumber, &m.LookupsNegative, &m.LookupsPositive,
+				&m.ObjectsCreatedByLookup, &m.LookupsTimedOutAndRequed)
+			if err != nil {
+				return &m, err
+			}
+		case "Invals":
+			err := setFSCacheFields(fields[2:], &m.InvalidationsNumber, &m.InvalidationsRunning)
+			if err != nil {
+				return &m, err
+			}
+		case "Updates:":
+			err := setFSCacheFields(fields[1:], &m.UpdateCookieRequestSeen, &m.UpdateRequestsWithNullParent,
+				&m.UpdateRequestsRunning)
+			if err != nil {
+				return &m, err
+			}
+		case "Relinqs:":
+			err := setFSCacheFields(fields[1:], &m.RelinquishCookiesRequestSeen, &m.RelinquishCookiesWithNullParent,
+				&m.RelinquishRequestsWaitingCompleteCreation, &m.RelinquishRetries)
+			if err != nil {
+				return &m, err
+			}
+		case "AttrChg:":
+			err := setFSCacheFields(fields[1:], &m.AttributeChangedRequestsSeen, &m.AttributeChangedRequestsQueued,
+				&m.AttributeChangedRejectDueToEnobufs, &m.AttributeChangedFailedDueToEnomem, &m.AttributeChangedOps)
+			if err != nil {
+				return &m, err
+			}
+		case "Allocs":
+			if strings.Split(fields[2], "=")[0] == "n" {
+				err := setFSCacheFields(fields[2:], &m.AllocationRequestsSeen, &m.AllocationOkRequests,
+					&m.AllocationWaitingOnLookup, &m.AllocationsRejectedDueToEnobufs, &m.AllocationsAbortedDueToErestartsys)
+				if err != nil {
+					return &m, err
+				}
+			} else {
+				err := setFSCacheFields(fields[2:], &m.AllocationOperationsSubmitted, &m.AllocationsWaitedForCPU,
+					&m.AllocationsAbortedDueToObjectDeath)
+				if err != nil {
+					return &m, err
+				}
+			}
+		case "Retrvls:":
+			if strings.Split(fields[1], "=")[0] == "n" {
+				err := setFSCacheFields(fields[1:], &m.RetrievalsReadRequests, &m.RetrievalsOk, &m.RetrievalsWaitingLookupCompletion,
+					&m.RetrievalsReturnedEnodata, &m.RetrievalsRejectedDueToEnobufs, &m.RetrievalsAbortedDueToErestartsys,
+					&m.RetrievalsFailedDueToEnomem)
+				if err != nil {
+					return &m, err
+				}
+			} else {
+				err := setFSCacheFields(fields[1:], &m.RetrievalsRequests, &m.RetrievalsWaitingCPU, &m.RetrievalsAbortedDueToObjectDeath)
+				if err != nil {
+					return &m, err
+				}
+			}
+		case "Stores":
+			if strings.Split(fields[2], "=")[0] == "n" {
+				err := setFSCacheFields(fields[2:], &m.StoreWriteRequests, &m.StoreSuccessfulRequests,
+					&m.StoreRequestsOnPendingStorage, &m.StoreRequestsRejectedDueToEnobufs, &m.StoreRequestsFailedDueToEnomem)
+				if err != nil {
+					return &m, err
+				}
+			} else {
+				err := setFSCacheFields(fields[2:], &m.StoreRequestsSubmitted, &m.StoreRequestsRunning,
+					&m.StorePagesWithRequestsProcessing, &m.StoreRequestsDeleted, &m.StoreRequestsOverStoreLimit)
+				if err != nil {
+					return &m, err
+				}
+			}
+		case "VmScan":
+			err := setFSCacheFields(fields[2:], &m.ReleaseRequestsAgainstPagesWithNoPendingStorage,
+				&m.ReleaseRequestsAgainstPagesStoredByTimeLockGranted, &m.ReleaseRequestsIgnoredDueToInProgressStore,
+				&m.PageStoresCancelledByReleaseRequests, &m.VmscanWaiting)
+			if err != nil {
+				return &m, err
+			}
+		case "Ops":
+			if strings.Split(fields[2], "=")[0] == "pend" {
+				err := setFSCacheFields(fields[2:], &m.OpsPending, &m.OpsRunning, &m.OpsEnqueued, &m.OpsCancelled, &m.OpsRejected)
+				if err != nil {
+					return &m, err
+				}
+			} else {
+				err := setFSCacheFields(fields[2:], &m.OpsInitialised, &m.OpsDeferred, &m.OpsReleased, &m.OpsGarbageCollected)
+				if err != nil {
+					return &m, err
+				}
+			}
+		case "CacheOp:":
+			if strings.Split(fields[1], "=")[0] == "alo" {
+				err := setFSCacheFields(fields[1:], &m.CacheopAllocationsinProgress, &m.CacheopLookupObjectInProgress,
+					&m.CacheopLookupCompleteInPorgress, &m.CacheopGrabObjectInProgress)
+				if err != nil {
+					return &m, err
+				}
+			} else if strings.Split(fields[1], "=")[0] == "inv" {
+				err := setFSCacheFields(fields[1:], &m.CacheopInvalidations, &m.CacheopUpdateObjectInProgress,
+					&m.CacheopDropObjectInProgress, &m.CacheopPutObjectInProgress, &m.CacheopAttributeChangeInProgress,
+					&m.CacheopSyncCacheInProgress)
+				if err != nil {
+					return &m, err
+				}
+			} else {
+				err := setFSCacheFields(fields[1:], &m.CacheopReadOrAllocPageInProgress, &m.CacheopReadOrAllocPagesInProgress,
+					&m.CacheopAllocatePageInProgress, &m.CacheopAllocatePagesInProgress, &m.CacheopWritePagesInProgress,
+					&m.CacheopUncachePagesInProgress, &m.CacheopDissociatePagesInProgress)
+				if err != nil {
+					return &m, err
+				}
+			}
+		case "CacheEv:":
+			err := setFSCacheFields(fields[1:], &m.CacheevLookupsAndCreationsRejectedLackSpace, &m.CacheevStaleObjectsDeleted,
+				&m.CacheevRetiredWhenReliquished, &m.CacheevObjectsCulled)
+			if err != nil {
+				return &m, err
+			}
+		}
+	}
+
+	return &m, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
new file mode 100644
index 0000000..3a43e83
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
@@ -0,0 +1,58 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fs
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+)
+
+const (
+	// DefaultProcMountPoint is the common mount point of the proc filesystem.
+	DefaultProcMountPoint = "/proc"
+
+	// DefaultSysMountPoint is the common mount point of the sys filesystem.
+	DefaultSysMountPoint = "/sys"
+
+	// DefaultConfigfsMountPoint is the common mount point of the configfs.
+	DefaultConfigfsMountPoint = "/sys/kernel/config"
+
+	// DefaultSelinuxMountPoint is the common mount point of the selinuxfs.
+	DefaultSelinuxMountPoint = "/sys/fs/selinux"
+)
+
+// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
+// interface to kernel data structures.
+type FS string
+
+// NewFS returns a new FS mounted under the given mountPoint. It will error
+// if the mount point can't be read.
+func NewFS(mountPoint string) (FS, error) {
+	info, err := os.Stat(mountPoint)
+	if err != nil {
+		return "", fmt.Errorf("could not read %q: %w", mountPoint, err)
+	}
+	if !info.IsDir() {
+		return "", fmt.Errorf("mount point %q is not a directory", mountPoint)
+	}
+
+	return FS(mountPoint), nil
+}
+
+// Path appends the given path elements to the filesystem path, adding separators
+// as necessary.
+func (fs FS) Path(p ...string) string {
+	return filepath.Join(append([]string{string(fs)}, p...)...)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
new file mode 100644
index 0000000..5a7d2df
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -0,0 +1,126 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+	"errors"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// ParseUint32s parses a slice of strings into a slice of uint32s.
+func ParseUint32s(ss []string) ([]uint32, error) {
+	us := make([]uint32, 0, len(ss))
+	for _, s := range ss {
+		u, err := strconv.ParseUint(s, 10, 32)
+		if err != nil {
+			return nil, err
+		}
+
+		us = append(us, uint32(u))
+	}
+
+	return us, nil
+}
+
+// ParseUint64s parses a slice of strings into a slice of uint64s.
+func ParseUint64s(ss []string) ([]uint64, error) {
+	us := make([]uint64, 0, len(ss))
+	for _, s := range ss {
+		u, err := strconv.ParseUint(s, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		us = append(us, u)
+	}
+
+	return us, nil
+}
+
+// ParsePInt64s parses a slice of strings into a slice of int64 pointers.
+func ParsePInt64s(ss []string) ([]*int64, error) {
+	us := make([]*int64, 0, len(ss))
+	for _, s := range ss {
+		u, err := strconv.ParseInt(s, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		us = append(us, &u)
+	}
+
+	return us, nil
+}
+
+// Parses a uint64 from given hex in string.
+func ParseHexUint64s(ss []string) ([]*uint64, error) {
+	us := make([]*uint64, 0, len(ss))
+	for _, s := range ss {
+		u, err := strconv.ParseUint(s, 16, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		us = append(us, &u)
+	}
+
+	return us, nil
+}
+
+// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
+func ReadUintFromFile(path string) (uint64, error) {
+	data, err := os.ReadFile(path)
+	if err != nil {
+		return 0, err
+	}
+	return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
+
+// ReadIntFromFile reads a file and attempts to parse a int64 from it.
+func ReadIntFromFile(path string) (int64, error) {
+	data, err := os.ReadFile(path)
+	if err != nil {
+		return 0, err
+	}
+	return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
+}
+
+// ParseBool parses a string into a boolean pointer.
+func ParseBool(b string) *bool {
+	var truth bool
+	switch b {
+	case "enabled":
+		truth = true
+	case "disabled":
+		truth = false
+	default:
+		return nil
+	}
+	return &truth
+}
+
+// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX.
+func ReadHexFromFile(path string) (uint64, error) {
+	data, err := os.ReadFile(path)
+	if err != nil {
+		return 0, err
+	}
+	hexString := strings.TrimSpace(string(data))
+	if !strings.HasPrefix(hexString, "0x") {
+		return 0, errors.New("invalid format: hex string does not start with '0x'")
+	}
+	return strconv.ParseUint(hexString[2:], 16, 64)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go
new file mode 100644
index 0000000..71b7a70
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go
@@ -0,0 +1,37 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+	"io"
+	"os"
+)
+
+// ReadFileNoStat uses io.ReadAll to read contents of entire file.
+// This is similar to os.ReadFile but without the call to os.Stat, because
+// many files in /proc and /sys report incorrect file sizes (either 0 or 4096).
+// Reads a max file size of 1024kB.  For files larger than this, a scanner
+// should be used.
+func ReadFileNoStat(filename string) ([]byte, error) {
+	const maxBufferSize = 1024 * 1024
+
+	f, err := os.Open(filename)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	reader := io.LimitReader(f, maxBufferSize)
+	return io.ReadAll(reader)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
new file mode 100644
index 0000000..d5404a6
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
@@ -0,0 +1,70 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build (linux || darwin) && !appengine
+// +build linux darwin
+// +build !appengine
+
+package util
+
+import (
+	"bytes"
+	"os"
+	"strconv"
+	"strings"
+	"syscall"
+)
+
+// SysReadFile is a simplified os.ReadFile that invokes syscall.Read directly.
+// https://github.com/prometheus/node_exporter/pull/728/files
+//
+// Note that this function will not read files larger than 128 bytes.
+func SysReadFile(file string) (string, error) {
+	f, err := os.Open(file)
+	if err != nil {
+		return "", err
+	}
+	defer f.Close()
+
+	// On some machines, hwmon drivers are broken and return EAGAIN.  This causes
+	// Go's os.ReadFile implementation to poll forever.
+	//
+	// Since we either want to read data or bail immediately, do the simplest
+	// possible read using syscall directly.
+	const sysFileBufferSize = 128
+	b := make([]byte, sysFileBufferSize)
+	n, err := syscall.Read(int(f.Fd()), b)
+	if err != nil {
+		return "", err
+	}
+
+	return string(bytes.TrimSpace(b[:n])), nil
+}
+
+// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it.
+func SysReadUintFromFile(path string) (uint64, error) {
+	data, err := SysReadFile(path)
+	if err != nil {
+		return 0, err
+	}
+	return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
+
+// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it.
+func SysReadIntFromFile(path string) (int64, error) {
+	data, err := SysReadFile(path)
+	if err != nil {
+		return 0, err
+	}
+	return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
new file mode 100644
index 0000000..1d86f5e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
@@ -0,0 +1,27 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build (linux && appengine) || (!linux && !darwin)
+// +build linux,appengine !linux,!darwin
+
+package util
+
+import (
+	"fmt"
+)
+
+// SysReadFile is here implemented as a noop for builds that do not support
+// the read syscall. For example Windows, or Linux on Google App Engine.
+func SysReadFile(file string) (string, error) {
+	return "", fmt.Errorf("not supported on this platform")
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/valueparser.go b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go
new file mode 100644
index 0000000..fe2355d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/valueparser.go
@@ -0,0 +1,91 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package util
+
+import (
+	"strconv"
+)
+
+// TODO(mdlayher): util packages are an anti-pattern and this should be moved
+// somewhere else that is more focused in the future.
+
+// A ValueParser enables parsing a single string into a variety of data types
+// in a concise and safe way. The Err method must be invoked after invoking
+// any other methods to ensure a value was successfully parsed.
+type ValueParser struct {
+	v   string
+	err error
+}
+
+// NewValueParser creates a ValueParser using the input string.
+func NewValueParser(v string) *ValueParser {
+	return &ValueParser{v: v}
+}
+
+// Int interprets the underlying value as an int and returns that value.
+func (vp *ValueParser) Int() int { return int(vp.int64()) }
+
+// PInt64 interprets the underlying value as an int64 and returns a pointer to
+// that value.
+func (vp *ValueParser) PInt64() *int64 {
+	if vp.err != nil {
+		return nil
+	}
+
+	v := vp.int64()
+	return &v
+}
+
+// int64 interprets the underlying value as an int64 and returns that value.
+// TODO: export if/when necessary.
+func (vp *ValueParser) int64() int64 {
+	if vp.err != nil {
+		return 0
+	}
+
+	// A base value of zero makes ParseInt infer the correct base using the
+	// string's prefix, if any.
+	const base = 0
+	v, err := strconv.ParseInt(vp.v, base, 64)
+	if err != nil {
+		vp.err = err
+		return 0
+	}
+
+	return v
+}
+
+// PUInt64 interprets the underlying value as an uint64 and returns a pointer to
+// that value.
+func (vp *ValueParser) PUInt64() *uint64 {
+	if vp.err != nil {
+		return nil
+	}
+
+	// A base value of zero makes ParseInt infer the correct base using the
+	// string's prefix, if any.
+	const base = 0
+	v, err := strconv.ParseUint(vp.v, base, 64)
+	if err != nil {
+		vp.err = err
+		return nil
+	}
+
+	return &v
+}
+
+// Err returns the last error, if any, encountered by the ValueParser.
+func (vp *ValueParser) Err() error {
+	return vp.err
+}
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
new file mode 100644
index 0000000..bc3a20c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -0,0 +1,241 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.
+type IPVSStats struct {
+	// Total count of connections.
+	Connections uint64
+	// Total incoming packages processed.
+	IncomingPackets uint64
+	// Total outgoing packages processed.
+	OutgoingPackets uint64
+	// Total incoming traffic.
+	IncomingBytes uint64
+	// Total outgoing traffic.
+	OutgoingBytes uint64
+}
+
+// IPVSBackendStatus holds current metrics of one virtual / real address pair.
+type IPVSBackendStatus struct {
+	// The local (virtual) IP address.
+	LocalAddress net.IP
+	// The remote (real) IP address.
+	RemoteAddress net.IP
+	// The local (virtual) port.
+	LocalPort uint16
+	// The remote (real) port.
+	RemotePort uint16
+	// The local firewall mark
+	LocalMark string
+	// The transport protocol (TCP, UDP).
+	Proto string
+	// The current number of active connections for this virtual/real address pair.
+	ActiveConn uint64
+	// The current number of inactive connections for this virtual/real address pair.
+	InactConn uint64
+	// The current weight of this virtual/real address pair.
+	Weight uint64
+}
+
+// IPVSStats reads the IPVS statistics from the specified `proc` filesystem.
+func (fs FS) IPVSStats() (IPVSStats, error) {
+	data, err := util.ReadFileNoStat(fs.proc.Path("net/ip_vs_stats"))
+	if err != nil {
+		return IPVSStats{}, err
+	}
+
+	return parseIPVSStats(bytes.NewReader(data))
+}
+
+// parseIPVSStats performs the actual parsing of `ip_vs_stats`.
+func parseIPVSStats(r io.Reader) (IPVSStats, error) {
+	var (
+		statContent []byte
+		statLines   []string
+		statFields  []string
+		stats       IPVSStats
+	)
+
+	statContent, err := io.ReadAll(r)
+	if err != nil {
+		return IPVSStats{}, err
+	}
+
+	statLines = strings.SplitN(string(statContent), "\n", 4)
+	if len(statLines) != 4 {
+		return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short")
+	}
+
+	statFields = strings.Fields(statLines[2])
+	if len(statFields) != 5 {
+		return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields")
+	}
+
+	stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)
+	if err != nil {
+		return IPVSStats{}, err
+	}
+	stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)
+	if err != nil {
+		return IPVSStats{}, err
+	}
+	stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)
+	if err != nil {
+		return IPVSStats{}, err
+	}
+	stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)
+	if err != nil {
+		return IPVSStats{}, err
+	}
+	stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)
+	if err != nil {
+		return IPVSStats{}, err
+	}
+
+	return stats, nil
+}
+
+// IPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.
+func (fs FS) IPVSBackendStatus() ([]IPVSBackendStatus, error) {
+	file, err := os.Open(fs.proc.Path("net/ip_vs"))
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	return parseIPVSBackendStatus(file)
+}
+
+func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
+	var (
+		status       []IPVSBackendStatus
+		scanner      = bufio.NewScanner(file)
+		proto        string
+		localMark    string
+		localAddress net.IP
+		localPort    uint16
+		err          error
+	)
+
+	for scanner.Scan() {
+		fields := strings.Fields(scanner.Text())
+		if len(fields) == 0 {
+			continue
+		}
+		switch {
+		case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port":
+			continue
+		case fields[0] == "TCP" || fields[0] == "UDP":
+			if len(fields) < 2 {
+				continue
+			}
+			proto = fields[0]
+			localMark = ""
+			localAddress, localPort, err = parseIPPort(fields[1])
+			if err != nil {
+				return nil, err
+			}
+		case fields[0] == "FWM":
+			if len(fields) < 2 {
+				continue
+			}
+			proto = fields[0]
+			localMark = fields[1]
+			localAddress = nil
+			localPort = 0
+		case fields[0] == "->":
+			if len(fields) < 6 {
+				continue
+			}
+			remoteAddress, remotePort, err := parseIPPort(fields[1])
+			if err != nil {
+				return nil, err
+			}
+			weight, err := strconv.ParseUint(fields[3], 10, 64)
+			if err != nil {
+				return nil, err
+			}
+			activeConn, err := strconv.ParseUint(fields[4], 10, 64)
+			if err != nil {
+				return nil, err
+			}
+			inactConn, err := strconv.ParseUint(fields[5], 10, 64)
+			if err != nil {
+				return nil, err
+			}
+			status = append(status, IPVSBackendStatus{
+				LocalAddress:  localAddress,
+				LocalPort:     localPort,
+				LocalMark:     localMark,
+				RemoteAddress: remoteAddress,
+				RemotePort:    remotePort,
+				Proto:         proto,
+				Weight:        weight,
+				ActiveConn:    activeConn,
+				InactConn:     inactConn,
+			})
+		}
+	}
+	return status, nil
+}
+
+func parseIPPort(s string) (net.IP, uint16, error) {
+	var (
+		ip  net.IP
+		err error
+	)
+
+	switch len(s) {
+	case 13:
+		ip, err = hex.DecodeString(s[0:8])
+		if err != nil {
+			return nil, 0, err
+		}
+	case 46:
+		ip = net.ParseIP(s[1:40])
+		if ip == nil {
+			return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
+		}
+	default:
+		return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
+	}
+
+	portString := s[len(s)-4:]
+	if len(portString) != 4 {
+		return nil, 0,
+			fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err)
+	}
+	port, err := strconv.ParseUint(portString, 16, 16)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	return ip, uint16(port), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go
new file mode 100644
index 0000000..db88566
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/kernel_random.go
@@ -0,0 +1,63 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+package procfs
+
+import (
+	"os"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// KernelRandom contains information about to the kernel's random number generator.
+type KernelRandom struct {
+	// EntropyAvaliable gives the available entropy, in bits.
+	EntropyAvaliable *uint64
+	// PoolSize gives the size of the entropy pool, in bits.
+	PoolSize *uint64
+	// URandomMinReseedSeconds is the number of seconds after which the DRNG will be reseeded.
+	URandomMinReseedSeconds *uint64
+	// WriteWakeupThreshold the number of bits of entropy below which we wake up processes
+	// that do a select(2) or poll(2) for write access to /dev/random.
+	WriteWakeupThreshold *uint64
+	// ReadWakeupThreshold is the number of bits of entropy required for waking up processes that sleep
+	// waiting for entropy from /dev/random.
+	ReadWakeupThreshold *uint64
+}
+
+// KernelRandom returns values from /proc/sys/kernel/random.
+func (fs FS) KernelRandom() (KernelRandom, error) {
+	random := KernelRandom{}
+
+	for file, p := range map[string]**uint64{
+		"entropy_avail":           &random.EntropyAvaliable,
+		"poolsize":                &random.PoolSize,
+		"urandom_min_reseed_secs": &random.URandomMinReseedSeconds,
+		"write_wakeup_threshold":  &random.WriteWakeupThreshold,
+		"read_wakeup_threshold":   &random.ReadWakeupThreshold,
+	} {
+		val, err := util.ReadUintFromFile(fs.proc.Path("sys", "kernel", "random", file))
+		if os.IsNotExist(err) {
+			continue
+		}
+		if err != nil {
+			return random, err
+		}
+		*p = &val
+	}
+
+	return random, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go
new file mode 100644
index 0000000..332e76c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/loadavg.go
@@ -0,0 +1,62 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// LoadAvg represents an entry in /proc/loadavg.
+type LoadAvg struct {
+	Load1  float64
+	Load5  float64
+	Load15 float64
+}
+
+// LoadAvg returns loadavg from /proc.
+func (fs FS) LoadAvg() (*LoadAvg, error) {
+	path := fs.proc.Path("loadavg")
+
+	data, err := util.ReadFileNoStat(path)
+	if err != nil {
+		return nil, err
+	}
+	return parseLoad(data)
+}
+
+// Parse /proc loadavg and return 1m, 5m and 15m.
+func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
+	loads := make([]float64, 3)
+	parts := strings.Fields(string(loadavgBytes))
+	if len(parts) < 3 {
+		return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes))
+	}
+
+	var err error
+	for i, load := range parts[0:3] {
+		loads[i], err = strconv.ParseFloat(load, 64)
+		if err != nil {
+			return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
+		}
+	}
+	return &LoadAvg{
+		Load1:  loads[0],
+		Load5:  loads[1],
+		Load15: loads[2],
+	}, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
new file mode 100644
index 0000000..67a9d2b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -0,0 +1,276 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+	"os"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+var (
+	statusLineRE         = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
+	recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`)
+	recoveryLinePctRE    = regexp.MustCompile(`= (.+)%`)
+	recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
+	recoveryLineSpeedRE  = regexp.MustCompile(`speed=(.+)[A-Z]`)
+	componentDeviceRE    = regexp.MustCompile(`(.*)\[\d+\]`)
+)
+
+// MDStat holds info parsed from /proc/mdstat.
+type MDStat struct {
+	// Name of the device.
+	Name string
+	// activity-state of the device.
+	ActivityState string
+	// Number of active disks.
+	DisksActive int64
+	// Total number of disks the device requires.
+	DisksTotal int64
+	// Number of failed disks.
+	DisksFailed int64
+	// Number of "down" disks. (the _ indicator in the status line)
+	DisksDown int64
+	// Spare disks in the device.
+	DisksSpare int64
+	// Number of blocks the device holds.
+	BlocksTotal int64
+	// Number of blocks on the device that are in sync.
+	BlocksSynced int64
+	// Number of blocks on the device that need to be synced.
+	BlocksToBeSynced int64
+	// progress percentage of current sync
+	BlocksSyncedPct float64
+	// estimated finishing time for current sync (in minutes)
+	BlocksSyncedFinishTime float64
+	// current sync speed (in Kilobytes/sec)
+	BlocksSyncedSpeed float64
+	// Name of md component devices
+	Devices []string
+}
+
+// MDStat parses an mdstat-file (/proc/mdstat) and returns a slice of
+// structs containing the relevant info.  More information available here:
+// https://raid.wiki.kernel.org/index.php/Mdstat
+func (fs FS) MDStat() ([]MDStat, error) {
+	data, err := os.ReadFile(fs.proc.Path("mdstat"))
+	if err != nil {
+		return nil, err
+	}
+	mdstat, err := parseMDStat(data)
+	if err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
+	}
+	return mdstat, nil
+}
+
+// parseMDStat parses data from mdstat file (/proc/mdstat) and returns a slice of
+// structs containing the relevant info.
+func parseMDStat(mdStatData []byte) ([]MDStat, error) {
+	mdStats := []MDStat{}
+	lines := strings.Split(string(mdStatData), "\n")
+
+	for i, line := range lines {
+		if strings.TrimSpace(line) == "" || line[0] == ' ' ||
+			strings.HasPrefix(line, "Personalities") ||
+			strings.HasPrefix(line, "unused") {
+			continue
+		}
+
+		deviceFields := strings.Fields(line)
+		if len(deviceFields) < 3 {
+			return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line)
+		}
+		mdName := deviceFields[0] // mdx
+		state := deviceFields[2]  // active or inactive
+
+		if len(lines) <= i+3 {
+			return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName)
+		}
+
+		// Failed disks have the suffix (F) & Spare disks have the suffix (S).
+		fail := int64(strings.Count(line, "(F)"))
+		spare := int64(strings.Count(line, "(S)"))
+		active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
+
+		if err != nil {
+			return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
+		}
+
+		syncLineIdx := i + 2
+		if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line
+			syncLineIdx++
+		}
+
+		// If device is syncing at the moment, get the number of currently
+		// synced bytes, otherwise that number equals the size of the device.
+		blocksSynced := size
+		blocksToBeSynced := size
+		speed := float64(0)
+		finish := float64(0)
+		pct := float64(0)
+		recovering := strings.Contains(lines[syncLineIdx], "recovery")
+		resyncing := strings.Contains(lines[syncLineIdx], "resync")
+		checking := strings.Contains(lines[syncLineIdx], "check")
+
+		// Append recovery and resyncing state info.
+		if recovering || resyncing || checking {
+			if recovering {
+				state = "recovering"
+			} else if checking {
+				state = "checking"
+			} else {
+				state = "resyncing"
+			}
+
+			// Handle case when resync=PENDING or resync=DELAYED.
+			if strings.Contains(lines[syncLineIdx], "PENDING") ||
+				strings.Contains(lines[syncLineIdx], "DELAYED") {
+				blocksSynced = 0
+			} else {
+				blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
+				if err != nil {
+					return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
+				}
+			}
+		}
+
+		mdStats = append(mdStats, MDStat{
+			Name:                   mdName,
+			ActivityState:          state,
+			DisksActive:            active,
+			DisksFailed:            fail,
+			DisksDown:              down,
+			DisksSpare:             spare,
+			DisksTotal:             total,
+			BlocksTotal:            size,
+			BlocksSynced:           blocksSynced,
+			BlocksToBeSynced:       blocksToBeSynced,
+			BlocksSyncedPct:        pct,
+			BlocksSyncedFinishTime: finish,
+			BlocksSyncedSpeed:      speed,
+			Devices:                evalComponentDevices(deviceFields),
+		})
+	}
+
+	return mdStats, nil
+}
+
+func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
+	statusFields := strings.Fields(statusLine)
+	if len(statusFields) < 1 {
+		return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
+	}
+
+	sizeStr := statusFields[0]
+	size, err = strconv.ParseInt(sizeStr, 10, 64)
+	if err != nil {
+		return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
+	}
+
+	if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
+		// In the device deviceLine, only disks have a number associated with them in [].
+		total = int64(strings.Count(deviceLine, "["))
+		return total, total, 0, size, nil
+	}
+
+	if strings.Contains(deviceLine, "inactive") {
+		return 0, 0, 0, size, nil
+	}
+
+	matches := statusLineRE.FindStringSubmatch(statusLine)
+	if len(matches) != 5 {
+		return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
+	}
+
+	total, err = strconv.ParseInt(matches[2], 10, 64)
+	if err != nil {
+		return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
+	}
+
+	active, err = strconv.ParseInt(matches[3], 10, 64)
+	if err != nil {
+		return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err)
+	}
+	down = int64(strings.Count(matches[4], "_"))
+
+	return active, total, down, size, nil
+}
+
+func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) {
+	matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
+	if len(matches) != 2 {
+		return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err)
+	}
+
+	blocks := strings.Split(matches[1], "/")
+	blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64)
+	if err != nil {
+		return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err)
+	}
+
+	blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64)
+	if err != nil {
+		return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err)
+	}
+
+	// Get percentage complete
+	matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
+	if len(matches) != 2 {
+		return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
+	}
+	pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
+	if err != nil {
+		return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
+	}
+
+	// Get time expected left to complete
+	matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
+	if len(matches) != 2 {
+		return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
+	}
+	finish, err = strconv.ParseFloat(matches[1], 64)
+	if err != nil {
+		return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
+	}
+
+	// Get recovery speed
+	matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
+	if len(matches) != 2 {
+		return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
+	}
+	speed, err = strconv.ParseFloat(matches[1], 64)
+	if err != nil {
+		return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
+	}
+
+	return blocksSynced, blocksToBeSynced, pct, finish, speed, nil
+}
+
+func evalComponentDevices(deviceFields []string) []string {
+	mdComponentDevices := make([]string, 0)
+	if len(deviceFields) > 3 {
+		for _, field := range deviceFields[4:] {
+			match := componentDeviceRE.FindStringSubmatch(field)
+			if match == nil {
+				continue
+			}
+			mdComponentDevices = append(mdComponentDevices, match[1])
+		}
+	}
+
+	return mdComponentDevices
+}
diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go
new file mode 100644
index 0000000..4b2c405
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/meminfo.go
@@ -0,0 +1,389 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Meminfo represents memory statistics.
+type Meminfo struct {
+	// Total usable ram (i.e. physical ram minus a few reserved
+	// bits and the kernel binary code)
+	MemTotal *uint64
+	// The sum of LowFree+HighFree
+	MemFree *uint64
+	// An estimate of how much memory is available for starting
+	// new applications, without swapping. Calculated from
+	// MemFree, SReclaimable, the size of the file LRU lists, and
+	// the low watermarks in each zone.  The estimate takes into
+	// account that the system needs some page cache to function
+	// well, and that not all reclaimable slab will be
+	// reclaimable, due to items being in use. The impact of those
+	// factors will vary from system to system.
+	MemAvailable *uint64
+	// Relatively temporary storage for raw disk blocks shouldn't
+	// get tremendously large (20MB or so)
+	Buffers *uint64
+	Cached  *uint64
+	// Memory that once was swapped out, is swapped back in but
+	// still also is in the swapfile (if memory is needed it
+	// doesn't need to be swapped out AGAIN because it is already
+	// in the swapfile. This saves I/O)
+	SwapCached *uint64
+	// Memory that has been used more recently and usually not
+	// reclaimed unless absolutely necessary.
+	Active *uint64
+	// Memory which has been less recently used.  It is more
+	// eligible to be reclaimed for other purposes
+	Inactive     *uint64
+	ActiveAnon   *uint64
+	InactiveAnon *uint64
+	ActiveFile   *uint64
+	InactiveFile *uint64
+	Unevictable  *uint64
+	Mlocked      *uint64
+	// total amount of swap space available
+	SwapTotal *uint64
+	// Memory which has been evicted from RAM, and is temporarily
+	// on the disk
+	SwapFree *uint64
+	// Memory which is waiting to get written back to the disk
+	Dirty *uint64
+	// Memory which is actively being written back to the disk
+	Writeback *uint64
+	// Non-file backed pages mapped into userspace page tables
+	AnonPages *uint64
+	// files which have been mapped, such as libraries
+	Mapped *uint64
+	Shmem  *uint64
+	// in-kernel data structures cache
+	Slab *uint64
+	// Part of Slab, that might be reclaimed, such as caches
+	SReclaimable *uint64
+	// Part of Slab, that cannot be reclaimed on memory pressure
+	SUnreclaim  *uint64
+	KernelStack *uint64
+	// amount of memory dedicated to the lowest level of page
+	// tables.
+	PageTables *uint64
+	// NFS pages sent to the server, but not yet committed to
+	// stable storage
+	NFSUnstable *uint64
+	// Memory used for block device "bounce buffers"
+	Bounce *uint64
+	// Memory used by FUSE for temporary writeback buffers
+	WritebackTmp *uint64
+	// Based on the overcommit ratio ('vm.overcommit_ratio'),
+	// this is the total amount of  memory currently available to
+	// be allocated on the system. This limit is only adhered to
+	// if strict overcommit accounting is enabled (mode 2 in
+	// 'vm.overcommit_memory').
+	// The CommitLimit is calculated with the following formula:
+	// CommitLimit = ([total RAM pages] - [total huge TLB pages]) *
+	//                overcommit_ratio / 100 + [total swap pages]
+	// For example, on a system with 1G of physical RAM and 7G
+	// of swap with a `vm.overcommit_ratio` of 30 it would
+	// yield a CommitLimit of 7.3G.
+	// For more details, see the memory overcommit documentation
+	// in vm/overcommit-accounting.
+	CommitLimit *uint64
+	// The amount of memory presently allocated on the system.
+	// The committed memory is a sum of all of the memory which
+	// has been allocated by processes, even if it has not been
+	// "used" by them as of yet. A process which malloc()'s 1G
+	// of memory, but only touches 300M of it will show up as
+	// using 1G. This 1G is memory which has been "committed" to
+	// by the VM and can be used at any time by the allocating
+	// application. With strict overcommit enabled on the system
+	// (mode 2 in 'vm.overcommit_memory'),allocations which would
+	// exceed the CommitLimit (detailed above) will not be permitted.
+	// This is useful if one needs to guarantee that processes will
+	// not fail due to lack of memory once that memory has been
+	// successfully allocated.
+	CommittedAS *uint64
+	// total size of vmalloc memory area
+	VmallocTotal *uint64
+	// amount of vmalloc area which is used
+	VmallocUsed *uint64
+	// largest contiguous block of vmalloc area which is free
+	VmallocChunk      *uint64
+	Percpu            *uint64
+	HardwareCorrupted *uint64
+	AnonHugePages     *uint64
+	ShmemHugePages    *uint64
+	ShmemPmdMapped    *uint64
+	CmaTotal          *uint64
+	CmaFree           *uint64
+	HugePagesTotal    *uint64
+	HugePagesFree     *uint64
+	HugePagesRsvd     *uint64
+	HugePagesSurp     *uint64
+	Hugepagesize      *uint64
+	DirectMap4k       *uint64
+	DirectMap2M       *uint64
+	DirectMap1G       *uint64
+
+	// The struct fields below are the byte-normalized counterparts to the
+	// existing struct fields. Values are normalized using the optional
+	// unit field in the meminfo line.
+	MemTotalBytes          *uint64
+	MemFreeBytes           *uint64
+	MemAvailableBytes      *uint64
+	BuffersBytes           *uint64
+	CachedBytes            *uint64
+	SwapCachedBytes        *uint64
+	ActiveBytes            *uint64
+	InactiveBytes          *uint64
+	ActiveAnonBytes        *uint64
+	InactiveAnonBytes      *uint64
+	ActiveFileBytes        *uint64
+	InactiveFileBytes      *uint64
+	UnevictableBytes       *uint64
+	MlockedBytes           *uint64
+	SwapTotalBytes         *uint64
+	SwapFreeBytes          *uint64
+	DirtyBytes             *uint64
+	WritebackBytes         *uint64
+	AnonPagesBytes         *uint64
+	MappedBytes            *uint64
+	ShmemBytes             *uint64
+	SlabBytes              *uint64
+	SReclaimableBytes      *uint64
+	SUnreclaimBytes        *uint64
+	KernelStackBytes       *uint64
+	PageTablesBytes        *uint64
+	NFSUnstableBytes       *uint64
+	BounceBytes            *uint64
+	WritebackTmpBytes      *uint64
+	CommitLimitBytes       *uint64
+	CommittedASBytes       *uint64
+	VmallocTotalBytes      *uint64
+	VmallocUsedBytes       *uint64
+	VmallocChunkBytes      *uint64
+	PercpuBytes            *uint64
+	HardwareCorruptedBytes *uint64
+	AnonHugePagesBytes     *uint64
+	ShmemHugePagesBytes    *uint64
+	ShmemPmdMappedBytes    *uint64
+	CmaTotalBytes          *uint64
+	CmaFreeBytes           *uint64
+	HugepagesizeBytes      *uint64
+	DirectMap4kBytes       *uint64
+	DirectMap2MBytes       *uint64
+	DirectMap1GBytes       *uint64
+}
+
+// Meminfo returns an information about current kernel/system memory statistics.
+// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+func (fs FS) Meminfo() (Meminfo, error) {
+	b, err := util.ReadFileNoStat(fs.proc.Path("meminfo"))
+	if err != nil {
+		return Meminfo{}, err
+	}
+
+	m, err := parseMemInfo(bytes.NewReader(b))
+	if err != nil {
+		return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err)
+	}
+
+	return *m, nil
+}
+
+func parseMemInfo(r io.Reader) (*Meminfo, error) {
+	var m Meminfo
+	s := bufio.NewScanner(r)
+	for s.Scan() {
+		fields := strings.Fields(s.Text())
+		var val, valBytes uint64
+
+		val, err := strconv.ParseUint(fields[1], 0, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		switch len(fields) {
+		case 2:
+			// No unit present, use the parsed the value as bytes directly.
+			valBytes = val
+		case 3:
+			// Unit present in optional 3rd field, convert it to
+			// bytes. The only unit supported within the Linux
+			// kernel is `kB`.
+			if fields[2] != "kB" {
+				return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2])
+			}
+
+			valBytes = 1024 * val
+
+		default:
+			return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
+		}
+
+		switch fields[0] {
+		case "MemTotal:":
+			m.MemTotal = &val
+			m.MemTotalBytes = &valBytes
+		case "MemFree:":
+			m.MemFree = &val
+			m.MemFreeBytes = &valBytes
+		case "MemAvailable:":
+			m.MemAvailable = &val
+			m.MemAvailableBytes = &valBytes
+		case "Buffers:":
+			m.Buffers = &val
+			m.BuffersBytes = &valBytes
+		case "Cached:":
+			m.Cached = &val
+			m.CachedBytes = &valBytes
+		case "SwapCached:":
+			m.SwapCached = &val
+			m.SwapCachedBytes = &valBytes
+		case "Active:":
+			m.Active = &val
+			m.ActiveBytes = &valBytes
+		case "Inactive:":
+			m.Inactive = &val
+			m.InactiveBytes = &valBytes
+		case "Active(anon):":
+			m.ActiveAnon = &val
+			m.ActiveAnonBytes = &valBytes
+		case "Inactive(anon):":
+			m.InactiveAnon = &val
+			m.InactiveAnonBytes = &valBytes
+		case "Active(file):":
+			m.ActiveFile = &val
+			m.ActiveFileBytes = &valBytes
+		case "Inactive(file):":
+			m.InactiveFile = &val
+			m.InactiveFileBytes = &valBytes
+		case "Unevictable:":
+			m.Unevictable = &val
+			m.UnevictableBytes = &valBytes
+		case "Mlocked:":
+			m.Mlocked = &val
+			m.MlockedBytes = &valBytes
+		case "SwapTotal:":
+			m.SwapTotal = &val
+			m.SwapTotalBytes = &valBytes
+		case "SwapFree:":
+			m.SwapFree = &val
+			m.SwapFreeBytes = &valBytes
+		case "Dirty:":
+			m.Dirty = &val
+			m.DirtyBytes = &valBytes
+		case "Writeback:":
+			m.Writeback = &val
+			m.WritebackBytes = &valBytes
+		case "AnonPages:":
+			m.AnonPages = &val
+			m.AnonPagesBytes = &valBytes
+		case "Mapped:":
+			m.Mapped = &val
+			m.MappedBytes = &valBytes
+		case "Shmem:":
+			m.Shmem = &val
+			m.ShmemBytes = &valBytes
+		case "Slab:":
+			m.Slab = &val
+			m.SlabBytes = &valBytes
+		case "SReclaimable:":
+			m.SReclaimable = &val
+			m.SReclaimableBytes = &valBytes
+		case "SUnreclaim:":
+			m.SUnreclaim = &val
+			m.SUnreclaimBytes = &valBytes
+		case "KernelStack:":
+			m.KernelStack = &val
+			m.KernelStackBytes = &valBytes
+		case "PageTables:":
+			m.PageTables = &val
+			m.PageTablesBytes = &valBytes
+		case "NFS_Unstable:":
+			m.NFSUnstable = &val
+			m.NFSUnstableBytes = &valBytes
+		case "Bounce:":
+			m.Bounce = &val
+			m.BounceBytes = &valBytes
+		case "WritebackTmp:":
+			m.WritebackTmp = &val
+			m.WritebackTmpBytes = &valBytes
+		case "CommitLimit:":
+			m.CommitLimit = &val
+			m.CommitLimitBytes = &valBytes
+		case "Committed_AS:":
+			m.CommittedAS = &val
+			m.CommittedASBytes = &valBytes
+		case "VmallocTotal:":
+			m.VmallocTotal = &val
+			m.VmallocTotalBytes = &valBytes
+		case "VmallocUsed:":
+			m.VmallocUsed = &val
+			m.VmallocUsedBytes = &valBytes
+		case "VmallocChunk:":
+			m.VmallocChunk = &val
+			m.VmallocChunkBytes = &valBytes
+		case "Percpu:":
+			m.Percpu = &val
+			m.PercpuBytes = &valBytes
+		case "HardwareCorrupted:":
+			m.HardwareCorrupted = &val
+			m.HardwareCorruptedBytes = &valBytes
+		case "AnonHugePages:":
+			m.AnonHugePages = &val
+			m.AnonHugePagesBytes = &valBytes
+		case "ShmemHugePages:":
+			m.ShmemHugePages = &val
+			m.ShmemHugePagesBytes = &valBytes
+		case "ShmemPmdMapped:":
+			m.ShmemPmdMapped = &val
+			m.ShmemPmdMappedBytes = &valBytes
+		case "CmaTotal:":
+			m.CmaTotal = &val
+			m.CmaTotalBytes = &valBytes
+		case "CmaFree:":
+			m.CmaFree = &val
+			m.CmaFreeBytes = &valBytes
+		case "HugePages_Total:":
+			m.HugePagesTotal = &val
+		case "HugePages_Free:":
+			m.HugePagesFree = &val
+		case "HugePages_Rsvd:":
+			m.HugePagesRsvd = &val
+		case "HugePages_Surp:":
+			m.HugePagesSurp = &val
+		case "Hugepagesize:":
+			m.Hugepagesize = &val
+			m.HugepagesizeBytes = &valBytes
+		case "DirectMap4k:":
+			m.DirectMap4k = &val
+			m.DirectMap4kBytes = &valBytes
+		case "DirectMap2M:":
+			m.DirectMap2M = &val
+			m.DirectMap2MBytes = &valBytes
+		case "DirectMap1G:":
+			m.DirectMap1G = &val
+			m.DirectMap1GBytes = &valBytes
+		}
+	}
+
+	return &m, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go
new file mode 100644
index 0000000..a704c5e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mountinfo.go
@@ -0,0 +1,180 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// A MountInfo is a type that describes the details, options
+// for each mount, parsed from /proc/self/mountinfo.
+// The fields described in each entry of /proc/self/mountinfo
+// is described in the following man page.
+// http://man7.org/linux/man-pages/man5/proc.5.html
+type MountInfo struct {
+	// Unique ID for the mount
+	MountID int
+	// The ID of the parent mount
+	ParentID int
+	// The value of `st_dev` for the files on this FS
+	MajorMinorVer string
+	// The pathname of the directory in the FS that forms
+	// the root for this mount
+	Root string
+	// The pathname of the mount point relative to the root
+	MountPoint string
+	// Mount options
+	Options map[string]string
+	// Zero or more optional fields
+	OptionalFields map[string]string
+	// The Filesystem type
+	FSType string
+	// FS specific information or "none"
+	Source string
+	// Superblock options
+	SuperOptions map[string]string
+}
+
+// Reads each line of the mountinfo file, and returns a list of formatted MountInfo structs.
+func parseMountInfo(info []byte) ([]*MountInfo, error) {
+	mounts := []*MountInfo{}
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+	for scanner.Scan() {
+		mountString := scanner.Text()
+		parsedMounts, err := parseMountInfoString(mountString)
+		if err != nil {
+			return nil, err
+		}
+		mounts = append(mounts, parsedMounts)
+	}
+
+	err := scanner.Err()
+	return mounts, err
+}
+
+// Parses a mountinfo file line, and converts it to a MountInfo struct.
+// An important check here is to see if the hyphen separator, as if it does not exist,
+// it means that the line is malformed.
+func parseMountInfoString(mountString string) (*MountInfo, error) {
+	var err error
+
+	mountInfo := strings.Split(mountString, " ")
+	mountInfoLength := len(mountInfo)
+	if mountInfoLength < 10 {
+		return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString)
+	}
+
+	if mountInfo[mountInfoLength-4] != "-" {
+		return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4])
+	}
+
+	mount := &MountInfo{
+		MajorMinorVer:  mountInfo[2],
+		Root:           mountInfo[3],
+		MountPoint:     mountInfo[4],
+		Options:        mountOptionsParser(mountInfo[5]),
+		OptionalFields: nil,
+		FSType:         mountInfo[mountInfoLength-3],
+		Source:         mountInfo[mountInfoLength-2],
+		SuperOptions:   mountOptionsParser(mountInfo[mountInfoLength-1]),
+	}
+
+	mount.MountID, err = strconv.Atoi(mountInfo[0])
+	if err != nil {
+		return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID)
+	}
+	mount.ParentID, err = strconv.Atoi(mountInfo[1])
+	if err != nil {
+		return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID)
+	}
+	// Has optional fields, which is a space separated list of values.
+	// Example: shared:2 master:7
+	if mountInfo[6] != "" {
+		mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
+		if err != nil {
+			return nil, fmt.Errorf("%w: %w", ErrFileParse, err)
+		}
+	}
+	return mount, nil
+}
+
+// mountOptionsIsValidField checks a string against a valid list of optional fields keys.
+func mountOptionsIsValidField(s string) bool {
+	switch s {
+	case
+		"shared",
+		"master",
+		"propagate_from",
+		"unbindable":
+		return true
+	}
+	return false
+}
+
+// mountOptionsParseOptionalFields parses a list of optional fields strings into a double map of strings.
+func mountOptionsParseOptionalFields(o []string) (map[string]string, error) {
+	optionalFields := make(map[string]string)
+	for _, field := range o {
+		optionSplit := strings.SplitN(field, ":", 2)
+		value := ""
+		if len(optionSplit) == 2 {
+			value = optionSplit[1]
+		}
+		if mountOptionsIsValidField(optionSplit[0]) {
+			optionalFields[optionSplit[0]] = value
+		}
+	}
+	return optionalFields, nil
+}
+
+// mountOptionsParser parses the mount options, superblock options.
+func mountOptionsParser(mountOptions string) map[string]string {
+	opts := make(map[string]string)
+	options := strings.Split(mountOptions, ",")
+	for _, opt := range options {
+		splitOption := strings.Split(opt, "=")
+		if len(splitOption) < 2 {
+			key := splitOption[0]
+			opts[key] = ""
+		} else {
+			key, value := splitOption[0], splitOption[1]
+			opts[key] = value
+		}
+	}
+	return opts
+}
+
+// GetMounts retrieves mountinfo information from `/proc/self/mountinfo`.
+func GetMounts() ([]*MountInfo, error) {
+	data, err := util.ReadFileNoStat("/proc/self/mountinfo")
+	if err != nil {
+		return nil, err
+	}
+	return parseMountInfo(data)
+}
+
+// GetProcMounts retrieves mountinfo information from a processes' `/proc/<pid>/mountinfo`.
+func GetProcMounts(pid int) ([]*MountInfo, error) {
+	data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/mountinfo", pid))
+	if err != nil {
+		return nil, err
+	}
+	return parseMountInfo(data)
+}
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
new file mode 100644
index 0000000..50caa73
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -0,0 +1,710 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+// While implementing parsing of /proc/[pid]/mountstats, this blog was used
+// heavily as a reference:
+//   https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex
+//
+// Special thanks to Chris Siebenmann for all of his posts explaining the
+// various statistics available for NFS.
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Constants shared between multiple functions.
+const (
+	deviceEntryLen = 8
+
+	fieldBytesLen  = 8
+	fieldEventsLen = 27
+
+	statVersion10 = "1.0"
+	statVersion11 = "1.1"
+
+	fieldTransport10TCPLen = 10
+	fieldTransport10UDPLen = 7
+
+	fieldTransport11TCPLen = 13
+	fieldTransport11UDPLen = 10
+
+	// Kernel version >= 4.14 MaxLen
+	// See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
+	fieldTransport11RDMAMaxLen = 28
+
+	// Kernel version <= 4.2 MinLen
+	// See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
+	fieldTransport11RDMAMinLen = 20
+)
+
+// A Mount is a device mount parsed from /proc/[pid]/mountstats.
+type Mount struct {
+	// Name of the device.
+	Device string
+	// The mount point of the device.
+	Mount string
+	// The filesystem type used by the device.
+	Type string
+	// If available additional statistics related to this Mount.
+	// Use a type assertion to determine if additional statistics are available.
+	Stats MountStats
+}
+
+// A MountStats is a type which contains detailed statistics for a specific
+// type of Mount.
+type MountStats interface {
+	mountStats()
+}
+
+// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.
+type MountStatsNFS struct {
+	// The version of statistics provided.
+	StatVersion string
+	// The mount options of the NFS mount.
+	Opts map[string]string
+	// The age of the NFS mount.
+	Age time.Duration
+	// Statistics related to byte counters for various operations.
+	Bytes NFSBytesStats
+	// Statistics related to various NFS event occurrences.
+	Events NFSEventsStats
+	// Statistics broken down by filesystem operation.
+	Operations []NFSOperationStats
+	// Statistics about the NFS RPC transport.
+	Transport []NFSTransportStats
+}
+
+// mountStats implements MountStats.
+func (m MountStatsNFS) mountStats() {}
+
+// A NFSBytesStats contains statistics about the number of bytes read and written
+// by an NFS client to and from an NFS server.
+type NFSBytesStats struct {
+	// Number of bytes read using the read() syscall.
+	Read uint64
+	// Number of bytes written using the write() syscall.
+	Write uint64
+	// Number of bytes read using the read() syscall in O_DIRECT mode.
+	DirectRead uint64
+	// Number of bytes written using the write() syscall in O_DIRECT mode.
+	DirectWrite uint64
+	// Number of bytes read from the NFS server, in total.
+	ReadTotal uint64
+	// Number of bytes written to the NFS server, in total.
+	WriteTotal uint64
+	// Number of pages read directly via mmap()'d files.
+	ReadPages uint64
+	// Number of pages written directly via mmap()'d files.
+	WritePages uint64
+}
+
+// A NFSEventsStats contains statistics about NFS event occurrences.
+type NFSEventsStats struct {
+	// Number of times cached inode attributes are re-validated from the server.
+	InodeRevalidate uint64
+	// Number of times cached dentry nodes are re-validated from the server.
+	DnodeRevalidate uint64
+	// Number of times an inode cache is cleared.
+	DataInvalidate uint64
+	// Number of times cached inode attributes are invalidated.
+	AttributeInvalidate uint64
+	// Number of times files or directories have been open()'d.
+	VFSOpen uint64
+	// Number of times a directory lookup has occurred.
+	VFSLookup uint64
+	// Number of times permissions have been checked.
+	VFSAccess uint64
+	// Number of updates (and potential writes) to pages.
+	VFSUpdatePage uint64
+	// Number of pages read directly via mmap()'d files.
+	VFSReadPage uint64
+	// Number of times a group of pages have been read.
+	VFSReadPages uint64
+	// Number of pages written directly via mmap()'d files.
+	VFSWritePage uint64
+	// Number of times a group of pages have been written.
+	VFSWritePages uint64
+	// Number of times directory entries have been read with getdents().
+	VFSGetdents uint64
+	// Number of times attributes have been set on inodes.
+	VFSSetattr uint64
+	// Number of pending writes that have been forcefully flushed to the server.
+	VFSFlush uint64
+	// Number of times fsync() has been called on directories and files.
+	VFSFsync uint64
+	// Number of times locking has been attempted on a file.
+	VFSLock uint64
+	// Number of times files have been closed and released.
+	VFSFileRelease uint64
+	// Unknown.  Possibly unused.
+	CongestionWait uint64
+	// Number of times files have been truncated.
+	Truncation uint64
+	// Number of times a file has been grown due to writes beyond its existing end.
+	WriteExtension uint64
+	// Number of times a file was removed while still open by another process.
+	SillyRename uint64
+	// Number of times the NFS server gave less data than expected while reading.
+	ShortRead uint64
+	// Number of times the NFS server wrote less data than expected while writing.
+	ShortWrite uint64
+	// Number of times the NFS server indicated EJUKEBOX; retrieving data from
+	// offline storage.
+	JukeboxDelay uint64
+	// Number of NFS v4.1+ pNFS reads.
+	PNFSRead uint64
+	// Number of NFS v4.1+ pNFS writes.
+	PNFSWrite uint64
+}
+
+// A NFSOperationStats contains statistics for a single operation.
+type NFSOperationStats struct {
+	// The name of the operation.
+	Operation string
+	// Number of requests performed for this operation.
+	Requests uint64
+	// Number of times an actual RPC request has been transmitted for this operation.
+	Transmissions uint64
+	// Number of times a request has had a major timeout.
+	MajorTimeouts uint64
+	// Number of bytes sent for this operation, including RPC headers and payload.
+	BytesSent uint64
+	// Number of bytes received for this operation, including RPC headers and payload.
+	BytesReceived uint64
+	// Duration all requests spent queued for transmission before they were sent.
+	CumulativeQueueMilliseconds uint64
+	// Duration it took to get a reply back after the request was transmitted.
+	CumulativeTotalResponseMilliseconds uint64
+	// Duration from when a request was enqueued to when it was completely handled.
+	CumulativeTotalRequestMilliseconds uint64
+	// The count of operations that complete with tk_status < 0.  These statuses usually indicate error conditions.
+	Errors uint64
+}
+
+// A NFSTransportStats contains statistics for the NFS mount RPC requests and
+// responses.
+type NFSTransportStats struct {
+	// The transport protocol used for the NFS mount.
+	Protocol string
+	// The local port used for the NFS mount.
+	Port uint64
+	// Number of times the client has had to establish a connection from scratch
+	// to the NFS server.
+	Bind uint64
+	// Number of times the client has made a TCP connection to the NFS server.
+	Connect uint64
+	// Duration (in jiffies, a kernel internal unit of time) the NFS mount has
+	// spent waiting for connections to the server to be established.
+	ConnectIdleTime uint64
+	// Duration since the NFS mount last saw any RPC traffic.
+	IdleTimeSeconds uint64
+	// Number of RPC requests for this mount sent to the NFS server.
+	Sends uint64
+	// Number of RPC responses for this mount received from the NFS server.
+	Receives uint64
+	// Number of times the NFS server sent a response with a transaction ID
+	// unknown to this client.
+	BadTransactionIDs uint64
+	// A running counter, incremented on each request as the current difference
+	// ebetween sends and receives.
+	CumulativeActiveRequests uint64
+	// A running counter, incremented on each request by the current backlog
+	// queue size.
+	CumulativeBacklog uint64
+
+	// Stats below only available with stat version 1.1.
+
+	// Maximum number of simultaneously active RPC requests ever used.
+	MaximumRPCSlotsUsed uint64
+	// A running counter, incremented on each request as the current size of the
+	// sending queue.
+	CumulativeSendingQueue uint64
+	// A running counter, incremented on each request as the current size of the
+	// pending queue.
+	CumulativePendingQueue uint64
+
+	// Stats below only available with stat version 1.1.
+	// Transport over RDMA
+
+	// accessed when sending a call
+	ReadChunkCount   uint64
+	WriteChunkCount  uint64
+	ReplyChunkCount  uint64
+	TotalRdmaRequest uint64
+
+	// rarely accessed error counters
+	PullupCopyCount      uint64
+	HardwayRegisterCount uint64
+	FailedMarshalCount   uint64
+	BadReplyCount        uint64
+	MrsRecovered         uint64
+	MrsOrphaned          uint64
+	MrsAllocated         uint64
+	EmptySendctxQ        uint64
+
+	// accessed when receiving a reply
+	TotalRdmaReply    uint64
+	FixupCopyCount    uint64
+	ReplyWaitsForSend uint64
+	LocalInvNeeded    uint64
+	NomsgCallCount    uint64
+	BcallCount        uint64
+}
+
+// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
+// of Mount structures containing detailed information about each mount.
+// If available, statistics for each mount are parsed as well.
+func parseMountStats(r io.Reader) ([]*Mount, error) {
+	const (
+		device            = "device"
+		statVersionPrefix = "statvers="
+
+		nfs3Type = "nfs"
+		nfs4Type = "nfs4"
+	)
+
+	var mounts []*Mount
+
+	s := bufio.NewScanner(r)
+	for s.Scan() {
+		// Only look for device entries in this function
+		ss := strings.Fields(string(s.Bytes()))
+		if len(ss) == 0 || ss[0] != device {
+			continue
+		}
+
+		m, err := parseMount(ss)
+		if err != nil {
+			return nil, err
+		}
+
+		// Does this mount also possess statistics information?
+		if len(ss) > deviceEntryLen {
+			// Only NFSv3 and v4 are supported for parsing statistics
+			if m.Type != nfs3Type && m.Type != nfs4Type {
+				return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type)
+			}
+
+			statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
+
+			stats, err := parseMountStatsNFS(s, statVersion)
+			if err != nil {
+				return nil, err
+			}
+
+			m.Stats = stats
+		}
+
+		mounts = append(mounts, m)
+	}
+
+	return mounts, s.Err()
+}
+
+// parseMount parses an entry in /proc/[pid]/mountstats in the format:
+//
+//	device [device] mounted on [mount] with fstype [type]
+func parseMount(ss []string) (*Mount, error) {
+	if len(ss) < deviceEntryLen {
+		return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
+	}
+
+	// Check for specific words appearing at specific indices to ensure
+	// the format is consistent with what we expect
+	format := []struct {
+		i int
+		s string
+	}{
+		{i: 0, s: "device"},
+		{i: 2, s: "mounted"},
+		{i: 3, s: "on"},
+		{i: 5, s: "with"},
+		{i: 6, s: "fstype"},
+	}
+
+	for _, f := range format {
+		if ss[f.i] != f.s {
+			return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
+		}
+	}
+
+	return &Mount{
+		Device: ss[1],
+		Mount:  ss[4],
+		Type:   ss[7],
+	}, nil
+}
+
+// parseMountStatsNFS parses a MountStatsNFS by scanning additional information
+// related to NFS statistics.
+func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {
+	// Field indicators for parsing specific types of data
+	const (
+		fieldOpts       = "opts:"
+		fieldAge        = "age:"
+		fieldBytes      = "bytes:"
+		fieldEvents     = "events:"
+		fieldPerOpStats = "per-op"
+		fieldTransport  = "xprt:"
+	)
+
+	stats := &MountStatsNFS{
+		StatVersion: statVersion,
+	}
+
+	for s.Scan() {
+		ss := strings.Fields(string(s.Bytes()))
+		if len(ss) == 0 {
+			break
+		}
+
+		switch ss[0] {
+		case fieldOpts:
+			if len(ss) < 2 {
+				return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
+			}
+			if stats.Opts == nil {
+				stats.Opts = map[string]string{}
+			}
+			for _, opt := range strings.Split(ss[1], ",") {
+				split := strings.Split(opt, "=")
+				if len(split) == 2 {
+					stats.Opts[split[0]] = split[1]
+				} else {
+					stats.Opts[opt] = ""
+				}
+			}
+		case fieldAge:
+			if len(ss) < 2 {
+				return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
+			}
+			// Age integer is in seconds
+			d, err := time.ParseDuration(ss[1] + "s")
+			if err != nil {
+				return nil, err
+			}
+
+			stats.Age = d
+		case fieldBytes:
+			if len(ss) < 2 {
+				return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
+			}
+			bstats, err := parseNFSBytesStats(ss[1:])
+			if err != nil {
+				return nil, err
+			}
+
+			stats.Bytes = *bstats
+		case fieldEvents:
+			if len(ss) < 2 {
+				return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss)
+			}
+			estats, err := parseNFSEventsStats(ss[1:])
+			if err != nil {
+				return nil, err
+			}
+
+			stats.Events = *estats
+		case fieldTransport:
+			if len(ss) < 3 {
+				return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss)
+			}
+
+			tstats, err := parseNFSTransportStats(ss[1:], statVersion)
+			if err != nil {
+				return nil, err
+			}
+
+			stats.Transport = append(stats.Transport, *tstats)
+		}
+
+		// When encountering "per-operation statistics", we must break this
+		// loop and parse them separately to ensure we can terminate parsing
+		// before reaching another device entry; hence why this 'if' statement
+		// is not just another switch case
+		if ss[0] == fieldPerOpStats {
+			break
+		}
+	}
+
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+
+	// NFS per-operation stats appear last before the next device entry
+	perOpStats, err := parseNFSOperationStats(s)
+	if err != nil {
+		return nil, err
+	}
+
+	stats.Operations = perOpStats
+
+	return stats, nil
+}
+
+// parseNFSBytesStats parses a NFSBytesStats line using an input set of
+// integer fields.
+func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
+	if len(ss) != fieldBytesLen {
+		return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss)
+	}
+
+	ns := make([]uint64, 0, fieldBytesLen)
+	for _, s := range ss {
+		n, err := strconv.ParseUint(s, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		ns = append(ns, n)
+	}
+
+	return &NFSBytesStats{
+		Read:        ns[0],
+		Write:       ns[1],
+		DirectRead:  ns[2],
+		DirectWrite: ns[3],
+		ReadTotal:   ns[4],
+		WriteTotal:  ns[5],
+		ReadPages:   ns[6],
+		WritePages:  ns[7],
+	}, nil
+}
+
+// parseNFSEventsStats parses a NFSEventsStats line using an input set of
+// integer fields.
+func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
+	if len(ss) != fieldEventsLen {
+		return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss)
+	}
+
+	ns := make([]uint64, 0, fieldEventsLen)
+	for _, s := range ss {
+		n, err := strconv.ParseUint(s, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		ns = append(ns, n)
+	}
+
+	return &NFSEventsStats{
+		InodeRevalidate:     ns[0],
+		DnodeRevalidate:     ns[1],
+		DataInvalidate:      ns[2],
+		AttributeInvalidate: ns[3],
+		VFSOpen:             ns[4],
+		VFSLookup:           ns[5],
+		VFSAccess:           ns[6],
+		VFSUpdatePage:       ns[7],
+		VFSReadPage:         ns[8],
+		VFSReadPages:        ns[9],
+		VFSWritePage:        ns[10],
+		VFSWritePages:       ns[11],
+		VFSGetdents:         ns[12],
+		VFSSetattr:          ns[13],
+		VFSFlush:            ns[14],
+		VFSFsync:            ns[15],
+		VFSLock:             ns[16],
+		VFSFileRelease:      ns[17],
+		CongestionWait:      ns[18],
+		Truncation:          ns[19],
+		WriteExtension:      ns[20],
+		SillyRename:         ns[21],
+		ShortRead:           ns[22],
+		ShortWrite:          ns[23],
+		JukeboxDelay:        ns[24],
+		PNFSRead:            ns[25],
+		PNFSWrite:           ns[26],
+	}, nil
+}
+
+// parseNFSOperationStats parses a slice of NFSOperationStats by scanning
+// additional information about per-operation statistics until an empty
+// line is reached.
+func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
+	const (
+		// Minimum number of expected fields in each per-operation statistics set
+		minFields = 9
+	)
+
+	var ops []NFSOperationStats
+
+	for s.Scan() {
+		ss := strings.Fields(string(s.Bytes()))
+		if len(ss) == 0 {
+			// Must break when reading a blank line after per-operation stats to
+			// enable top-level function to parse the next device entry
+			break
+		}
+
+		if len(ss) < minFields {
+			return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss)
+		}
+
+		// Skip string operation name for integers
+		ns := make([]uint64, 0, minFields-1)
+		for _, st := range ss[1:] {
+			n, err := strconv.ParseUint(st, 10, 64)
+			if err != nil {
+				return nil, err
+			}
+
+			ns = append(ns, n)
+		}
+		opStats := NFSOperationStats{
+			Operation:                           strings.TrimSuffix(ss[0], ":"),
+			Requests:                            ns[0],
+			Transmissions:                       ns[1],
+			MajorTimeouts:                       ns[2],
+			BytesSent:                           ns[3],
+			BytesReceived:                       ns[4],
+			CumulativeQueueMilliseconds:         ns[5],
+			CumulativeTotalResponseMilliseconds: ns[6],
+			CumulativeTotalRequestMilliseconds:  ns[7],
+		}
+
+		if len(ns) > 8 {
+			opStats.Errors = ns[8]
+		}
+
+		ops = append(ops, opStats)
+	}
+
+	return ops, s.Err()
+}
+
+// parseNFSTransportStats parses a NFSTransportStats line using an input set of
+// integer fields matched to a specific stats version.
+func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {
+	// Extract the protocol field. It is the only string value in the line
+	protocol := ss[0]
+	ss = ss[1:]
+
+	switch statVersion {
+	case statVersion10:
+		var expectedLength int
+		switch protocol {
+		case "tcp":
+			expectedLength = fieldTransport10TCPLen
+		case "udp":
+			expectedLength = fieldTransport10UDPLen
+		default:
+			return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
+		}
+		if len(ss) != expectedLength {
+			return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss)
+		}
+	case statVersion11:
+		var expectedLength int
+		switch protocol {
+		case "tcp":
+			expectedLength = fieldTransport11TCPLen
+		case "udp":
+			expectedLength = fieldTransport11UDPLen
+		case "rdma":
+			expectedLength = fieldTransport11RDMAMinLen
+		default:
+			return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
+		}
+		if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
+			(protocol == "rdma" && len(ss) < expectedLength) {
+			return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
+		}
+	default:
+		return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
+	}
+
+	// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
+	// in a v1.0 response. Since the stat length is bigger for TCP stats, we use
+	// the TCP length here.
+	//
+	// Note: slice length must be set to length of v1.1 stats to avoid a panic when
+	// only v1.0 stats are present.
+	// See: https://github.com/prometheus/node_exporter/issues/571.
+	//
+	// Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen
+	ns := make([]uint64, fieldTransport11RDMAMaxLen+3)
+	for i, s := range ss {
+		n, err := strconv.ParseUint(s, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+
+		ns[i] = n
+	}
+
+	// The fields differ depending on the transport protocol (TCP or UDP)
+	// From https://utcc.utoronto.ca/%7Ecks/space/blog/linux/NFSMountstatsXprt
+	//
+	// For the udp RPC transport there is no connection count, connect idle time,
+	// or idle time (fields #3, #4, and #5); all other fields are the same. So
+	// we set them to 0 here.
+	switch protocol {
+	case "udp":
+		ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
+	case "tcp":
+		ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
+	case "rdma":
+		ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
+	}
+
+	return &NFSTransportStats{
+		// NFS xprt over tcp or udp
+		Protocol:                 protocol,
+		Port:                     ns[0],
+		Bind:                     ns[1],
+		Connect:                  ns[2],
+		ConnectIdleTime:          ns[3],
+		IdleTimeSeconds:          ns[4],
+		Sends:                    ns[5],
+		Receives:                 ns[6],
+		BadTransactionIDs:        ns[7],
+		CumulativeActiveRequests: ns[8],
+		CumulativeBacklog:        ns[9],
+
+		// NFS xprt over tcp or udp
+		// And statVersion 1.1
+		MaximumRPCSlotsUsed:    ns[10],
+		CumulativeSendingQueue: ns[11],
+		CumulativePendingQueue: ns[12],
+
+		// NFS xprt over rdma
+		// And stat Version 1.1
+		ReadChunkCount:       ns[13],
+		WriteChunkCount:      ns[14],
+		ReplyChunkCount:      ns[15],
+		TotalRdmaRequest:     ns[16],
+		PullupCopyCount:      ns[17],
+		HardwayRegisterCount: ns[18],
+		FailedMarshalCount:   ns[19],
+		BadReplyCount:        ns[20],
+		MrsRecovered:         ns[21],
+		MrsOrphaned:          ns[22],
+		MrsAllocated:         ns[23],
+		EmptySendctxQ:        ns[24],
+		TotalRdmaReply:       ns[25],
+		FixupCopyCount:       ns[26],
+		ReplyWaitsForSend:    ns[27],
+		LocalInvNeeded:       ns[28],
+		NomsgCallCount:       ns[29],
+		BcallCount:           ns[30],
+	}, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
new file mode 100644
index 0000000..316df5f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
@@ -0,0 +1,118 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// A ConntrackStatEntry represents one line from net/stat/nf_conntrack
+// and contains netfilter conntrack statistics at one CPU core.
+type ConntrackStatEntry struct {
+	Entries       uint64
+	Searched      uint64
+	Found         uint64
+	New           uint64
+	Invalid       uint64
+	Ignore        uint64
+	Delete        uint64
+	DeleteList    uint64
+	Insert        uint64
+	InsertFailed  uint64
+	Drop          uint64
+	EarlyDrop     uint64
+	SearchRestart uint64
+}
+
+// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores.
+func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
+	return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
+}
+
+// Parses a slice of ConntrackStatEntries from the given filepath.
+func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
+	// This file is small and can be read with one syscall.
+	b, err := util.ReadFileNoStat(path)
+	if err != nil {
+		// Do not wrap this error so the caller can detect os.IsNotExist and
+		// similar conditions.
+		return nil, err
+	}
+
+	stat, err := parseConntrackStat(bytes.NewReader(b))
+	if err != nil {
+		return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err)
+	}
+
+	return stat, nil
+}
+
+// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries.
+func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
+	var entries []ConntrackStatEntry
+
+	scanner := bufio.NewScanner(r)
+	scanner.Scan()
+	for scanner.Scan() {
+		fields := strings.Fields(scanner.Text())
+		conntrackEntry, err := parseConntrackStatEntry(fields)
+		if err != nil {
+			return nil, err
+		}
+		entries = append(entries, *conntrackEntry)
+	}
+
+	return entries, nil
+}
+
+// Parses a ConntrackStatEntry from given array of fields.
+func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
+	entries, err := util.ParseHexUint64s(fields)
+	if err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
+	}
+	numEntries := len(entries)
+	if numEntries < 16 || numEntries > 17 {
+		return nil,
+			fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries)
+	}
+
+	stats := &ConntrackStatEntry{
+		Entries:      *entries[0],
+		Searched:     *entries[1],
+		Found:        *entries[2],
+		New:          *entries[3],
+		Invalid:      *entries[4],
+		Ignore:       *entries[5],
+		Delete:       *entries[6],
+		DeleteList:   *entries[7],
+		Insert:       *entries[8],
+		InsertFailed: *entries[9],
+		Drop:         *entries[10],
+		EarlyDrop:    *entries[11],
+	}
+
+	// Ignore missing search_restart on Linux < 2.6.35.
+	if numEntries == 17 {
+		stats.SearchRestart = *entries[16]
+	}
+
+	return stats, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go
new file mode 100644
index 0000000..e66208a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_dev.go
@@ -0,0 +1,205 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"errors"
+	"os"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev.
+type NetDevLine struct {
+	Name         string `json:"name"`          // The name of the interface.
+	RxBytes      uint64 `json:"rx_bytes"`      // Cumulative count of bytes received.
+	RxPackets    uint64 `json:"rx_packets"`    // Cumulative count of packets received.
+	RxErrors     uint64 `json:"rx_errors"`     // Cumulative count of receive errors encountered.
+	RxDropped    uint64 `json:"rx_dropped"`    // Cumulative count of packets dropped while receiving.
+	RxFIFO       uint64 `json:"rx_fifo"`       // Cumulative count of FIFO buffer errors.
+	RxFrame      uint64 `json:"rx_frame"`      // Cumulative count of packet framing errors.
+	RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver.
+	RxMulticast  uint64 `json:"rx_multicast"`  // Cumulative count of multicast frames received by the device driver.
+	TxBytes      uint64 `json:"tx_bytes"`      // Cumulative count of bytes transmitted.
+	TxPackets    uint64 `json:"tx_packets"`    // Cumulative count of packets transmitted.
+	TxErrors     uint64 `json:"tx_errors"`     // Cumulative count of transmit errors encountered.
+	TxDropped    uint64 `json:"tx_dropped"`    // Cumulative count of packets dropped while transmitting.
+	TxFIFO       uint64 `json:"tx_fifo"`       // Cumulative count of FIFO buffer errors.
+	TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface.
+	TxCarrier    uint64 `json:"tx_carrier"`    // Cumulative count of carrier losses detected by the device driver.
+	TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver.
+}
+
+// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys
+// are interface names.
+type NetDev map[string]NetDevLine
+
+// NetDev returns kernel/system statistics read from /proc/net/dev.
+func (fs FS) NetDev() (NetDev, error) {
+	return newNetDev(fs.proc.Path("net/dev"))
+}
+
+// NetDev returns kernel/system statistics read from /proc/[pid]/net/dev.
+func (p Proc) NetDev() (NetDev, error) {
+	return newNetDev(p.path("net/dev"))
+}
+
+// newNetDev creates a new NetDev from the contents of the given file.
+func newNetDev(file string) (NetDev, error) {
+	f, err := os.Open(file)
+	if err != nil {
+		return NetDev{}, err
+	}
+	defer f.Close()
+
+	netDev := NetDev{}
+	s := bufio.NewScanner(f)
+	for n := 0; s.Scan(); n++ {
+		// Skip the 2 header lines.
+		if n < 2 {
+			continue
+		}
+
+		line, err := netDev.parseLine(s.Text())
+		if err != nil {
+			return netDev, err
+		}
+
+		netDev[line.Name] = *line
+	}
+
+	return netDev, s.Err()
+}
+
+// parseLine parses a single line from the /proc/net/dev file. Header lines
+// must be filtered prior to calling this method.
+func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) {
+	idx := strings.LastIndex(rawLine, ":")
+	if idx == -1 {
+		return nil, errors.New("invalid net/dev line, missing colon")
+	}
+	fields := strings.Fields(strings.TrimSpace(rawLine[idx+1:]))
+
+	var err error
+	line := &NetDevLine{}
+
+	// Interface Name
+	line.Name = strings.TrimSpace(rawLine[:idx])
+	if line.Name == "" {
+		return nil, errors.New("invalid net/dev line, empty interface name")
+	}
+
+	// RX
+	line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+
+	// TX
+	line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+
+	return line, nil
+}
+
+// Total aggregates the values across interfaces and returns a new NetDevLine.
+// The Name field will be a sorted comma separated list of interface names.
+func (netDev NetDev) Total() NetDevLine {
+	total := NetDevLine{}
+
+	names := make([]string, 0, len(netDev))
+	for _, ifc := range netDev {
+		names = append(names, ifc.Name)
+		total.RxBytes += ifc.RxBytes
+		total.RxPackets += ifc.RxPackets
+		total.RxErrors += ifc.RxErrors
+		total.RxDropped += ifc.RxDropped
+		total.RxFIFO += ifc.RxFIFO
+		total.RxFrame += ifc.RxFrame
+		total.RxCompressed += ifc.RxCompressed
+		total.RxMulticast += ifc.RxMulticast
+		total.TxBytes += ifc.TxBytes
+		total.TxPackets += ifc.TxPackets
+		total.TxErrors += ifc.TxErrors
+		total.TxDropped += ifc.TxDropped
+		total.TxFIFO += ifc.TxFIFO
+		total.TxCollisions += ifc.TxCollisions
+		total.TxCarrier += ifc.TxCarrier
+		total.TxCompressed += ifc.TxCompressed
+	}
+	sort.Strings(names)
+	total.Name = strings.Join(names, ", ")
+
+	return total
+}
diff --git a/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go
new file mode 100644
index 0000000..f50b38e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go
@@ -0,0 +1,96 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"errors"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc/<PID>/net/dev_snmp6/.
+// The outer map's keys are interface names and the inner map's keys are stat names.
+//
+// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type.
+type NetDevSNMP6 map[string]map[string]uint64
+
+// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/
+// directory.
+func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) {
+	return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6"))
+}
+
+// Returns kernel/system statistics read from interface files within the /proc/<PID>/net/dev_snmp6/
+// directory.
+func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) {
+	return newNetDevSNMP6(p.path("net/dev_snmp6"))
+}
+
+// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory.
+func newNetDevSNMP6(dir string) (NetDevSNMP6, error) {
+	netDevSNMP6 := make(NetDevSNMP6)
+
+	// The net/dev_snmp6 folders contain one file per interface
+	ifaceFiles, err := os.ReadDir(dir)
+	if err != nil {
+		// On systems with IPv6 disabled, this directory won't exist.
+		// Do nothing.
+		if errors.Is(err, os.ErrNotExist) {
+			return netDevSNMP6, err
+		}
+		return netDevSNMP6, err
+	}
+
+	for _, iFaceFile := range ifaceFiles {
+		f, err := os.Open(dir + "/" + iFaceFile.Name())
+		if err != nil {
+			return netDevSNMP6, err
+		}
+		defer f.Close()
+
+		netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f)
+		if err != nil {
+			return netDevSNMP6, err
+		}
+	}
+
+	return netDevSNMP6, nil
+}
+
+func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) {
+	m := make(map[string]uint64)
+
+	scanner := bufio.NewScanner(r)
+	for scanner.Scan() {
+		stat := strings.Fields(scanner.Text())
+		if len(stat) < 2 {
+			continue
+		}
+		key, val := stat[0], stat[1]
+
+		// Expect stat name to contain "6" or be "ifIndex"
+		if strings.Contains(key, "6") || key == "ifIndex" {
+			v, err := strconv.ParseUint(val, 10, 64)
+			if err != nil {
+				return m, err
+			}
+
+			m[key] = v
+		}
+	}
+	return m, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go
new file mode 100644
index 0000000..19e3378
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go
@@ -0,0 +1,248 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"strconv"
+	"strings"
+)
+
+const (
+	// Maximum size limit used by io.LimitReader while reading the content of the
+	// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
+	// as each line represents a single used socket.
+	// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
+	// With e.g. 150 Byte per line and the maximum number of 65535,
+	// the reader needs to handle 150 Byte * 65535 =~ 10 MB for a single IP.
+	readLimit = 4294967296 // Byte -> 4 GiB
+)
+
+// This contains generic data structures for both udp and tcp sockets.
+type (
+	// NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header.
+	NetIPSocket []*netIPSocketLine
+
+	// NetIPSocketSummary provides already computed values like the total queue lengths or
+	// the total number of used sockets. In contrast to NetIPSocket it does not collect
+	// the parsed lines into a slice.
+	NetIPSocketSummary struct {
+		// TxQueueLength shows the total queue length of all parsed tx_queue lengths.
+		TxQueueLength uint64
+		// RxQueueLength shows the total queue length of all parsed rx_queue lengths.
+		RxQueueLength uint64
+		// UsedSockets shows the total number of parsed lines representing the
+		// number of used sockets.
+		UsedSockets uint64
+		// Drops shows the total number of dropped packets of all UDP sockets.
+		Drops *uint64
+	}
+
+	// A single line parser for fields from /proc/net/{t,u}dp{,6}.
+	// Fields which are not used by IPSocket are skipped.
+	// Drops is non-nil for udp{,6}, but nil for tcp{,6}.
+	// For the proc file format details, see https://linux.die.net/man/5/proc.
+	netIPSocketLine struct {
+		Sl        uint64
+		LocalAddr net.IP
+		LocalPort uint64
+		RemAddr   net.IP
+		RemPort   uint64
+		St        uint64
+		TxQueue   uint64
+		RxQueue   uint64
+		UID       uint64
+		Inode     uint64
+		Drops     *uint64
+	}
+)
+
+func newNetIPSocket(file string) (NetIPSocket, error) {
+	f, err := os.Open(file)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	var netIPSocket NetIPSocket
+	isUDP := strings.Contains(file, "udp")
+
+	lr := io.LimitReader(f, readLimit)
+	s := bufio.NewScanner(lr)
+	s.Scan() // skip first line with headers
+	for s.Scan() {
+		fields := strings.Fields(s.Text())
+		line, err := parseNetIPSocketLine(fields, isUDP)
+		if err != nil {
+			return nil, err
+		}
+		netIPSocket = append(netIPSocket, line)
+	}
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+	return netIPSocket, nil
+}
+
+// newNetIPSocketSummary creates a new NetIPSocket{,6} from the contents of the given file.
+func newNetIPSocketSummary(file string) (*NetIPSocketSummary, error) {
+	f, err := os.Open(file)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	var netIPSocketSummary NetIPSocketSummary
+	var udpPacketDrops uint64
+	isUDP := strings.Contains(file, "udp")
+
+	lr := io.LimitReader(f, readLimit)
+	s := bufio.NewScanner(lr)
+	s.Scan() // skip first line with headers
+	for s.Scan() {
+		fields := strings.Fields(s.Text())
+		line, err := parseNetIPSocketLine(fields, isUDP)
+		if err != nil {
+			return nil, err
+		}
+		netIPSocketSummary.TxQueueLength += line.TxQueue
+		netIPSocketSummary.RxQueueLength += line.RxQueue
+		netIPSocketSummary.UsedSockets++
+		if isUDP {
+			udpPacketDrops += *line.Drops
+			netIPSocketSummary.Drops = &udpPacketDrops
+		}
+	}
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+	return &netIPSocketSummary, nil
+}
+
+// the /proc/net/{t,u}dp{,6} files are network byte order for ipv4 and for ipv6 the address is four words consisting of four bytes each. In each of those four words the four bytes are written in reverse order.
+
+func parseIP(hexIP string) (net.IP, error) {
+	var byteIP []byte
+	byteIP, err := hex.DecodeString(hexIP)
+	if err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
+	}
+	switch len(byteIP) {
+	case 4:
+		return net.IP{byteIP[3], byteIP[2], byteIP[1], byteIP[0]}, nil
+	case 16:
+		i := net.IP{
+			byteIP[3], byteIP[2], byteIP[1], byteIP[0],
+			byteIP[7], byteIP[6], byteIP[5], byteIP[4],
+			byteIP[11], byteIP[10], byteIP[9], byteIP[8],
+			byteIP[15], byteIP[14], byteIP[13], byteIP[12],
+		}
+		return i, nil
+	default:
+		return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil)
+	}
+}
+
+// parseNetIPSocketLine parses a single line, represented by a list of fields.
+func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) {
+	line := &netIPSocketLine{}
+	if len(fields) < 10 {
+		return nil, fmt.Errorf(
+			"%w: Less than 10 columns found %q",
+			ErrFileParse,
+			strings.Join(fields, " "),
+		)
+	}
+	var err error // parse error
+
+	// sl
+	s := strings.Split(fields[0], ":")
+	if len(s) != 2 {
+		return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0])
+	}
+
+	if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
+		return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
+	}
+	// local_address
+	l := strings.Split(fields[1], ":")
+	if len(l) != 2 {
+		return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1])
+	}
+	if line.LocalAddr, err = parseIP(l[0]); err != nil {
+		return nil, err
+	}
+	if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
+		return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
+	}
+
+	// remote_address
+	r := strings.Split(fields[2], ":")
+	if len(r) != 2 {
+		return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1])
+	}
+	if line.RemAddr, err = parseIP(r[0]); err != nil {
+		return nil, err
+	}
+	if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
+	}
+
+	// st
+	if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
+	}
+
+	// tx_queue and rx_queue
+	q := strings.Split(fields[4], ":")
+	if len(q) != 2 {
+		return nil, fmt.Errorf(
+			"%w: Missing colon for tx/rx queues in socket line %q",
+			ErrFileParse,
+			fields[4],
+		)
+	}
+	if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
+	}
+	if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
+	}
+
+	// uid
+	if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
+	}
+
+	// inode
+	if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
+		return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
+	}
+
+	// drops
+	if isUDP {
+		drops, err := strconv.ParseUint(fields[12], 0, 64)
+		if err != nil {
+			return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err)
+		}
+		line.Drops = &drops
+	}
+
+	return line, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go
new file mode 100644
index 0000000..8d4b1ac
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_protocols.go
@@ -0,0 +1,183 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// NetProtocolStats stores the contents from /proc/net/protocols.
+type NetProtocolStats map[string]NetProtocolStatLine
+
+// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We
+// only care about the first six columns as the rest are not likely to change
+// and only serve to provide a set of capabilities for each protocol.
+type NetProtocolStatLine struct {
+	Name         string // 0 The name of the protocol
+	Size         uint64 // 1 The size, in bytes, of a given protocol structure. e.g. sizeof(struct tcp_sock) or sizeof(struct unix_sock)
+	Sockets      int64  // 2 Number of sockets in use by this protocol
+	Memory       int64  // 3 Number of 4KB pages allocated by all sockets of this protocol
+	Pressure     int    // 4 This is either yes, no, or NI (not implemented). For the sake of simplicity we treat NI as not experiencing memory pressure.
+	MaxHeader    uint64 // 5 Protocol specific max header size
+	Slab         bool   // 6 Indicates whether or not memory is allocated from the SLAB
+	ModuleName   string // 7 The name of the module that implemented this protocol or "kernel" if not from a module
+	Capabilities NetProtocolCapabilities
+}
+
+// NetProtocolCapabilities contains a list of capabilities for each protocol.
+type NetProtocolCapabilities struct {
+	Close               bool // 8
+	Connect             bool // 9
+	Disconnect          bool // 10
+	Accept              bool // 11
+	IoCtl               bool // 12
+	Init                bool // 13
+	Destroy             bool // 14
+	Shutdown            bool // 15
+	SetSockOpt          bool // 16
+	GetSockOpt          bool // 17
+	SendMsg             bool // 18
+	RecvMsg             bool // 19
+	SendPage            bool // 20
+	Bind                bool // 21
+	BacklogRcv          bool // 22
+	Hash                bool // 23
+	UnHash              bool // 24
+	GetPort             bool // 25
+	EnterMemoryPressure bool // 26
+}
+
+// NetProtocols reads stats from /proc/net/protocols and returns a map of
+// PortocolStatLine entries. As of this writing no official Linux Documentation
+// exists, however the source is fairly self-explanatory and the format seems
+// stable since its introduction in 2.6.12-rc2
+// Linux 2.6.12-rc2 - https://elixir.bootlin.com/linux/v2.6.12-rc2/source/net/core/sock.c#L1452
+// Linux 5.10 - https://elixir.bootlin.com/linux/v5.10.4/source/net/core/sock.c#L3586
+func (fs FS) NetProtocols() (NetProtocolStats, error) {
+	data, err := util.ReadFileNoStat(fs.proc.Path("net/protocols"))
+	if err != nil {
+		return NetProtocolStats{}, err
+	}
+	return parseNetProtocols(bufio.NewScanner(bytes.NewReader(data)))
+}
+
+func parseNetProtocols(s *bufio.Scanner) (NetProtocolStats, error) {
+	nps := NetProtocolStats{}
+
+	// Skip the header line
+	s.Scan()
+
+	for s.Scan() {
+		line, err := nps.parseLine(s.Text())
+		if err != nil {
+			return NetProtocolStats{}, err
+		}
+
+		nps[line.Name] = *line
+	}
+	return nps, nil
+}
+
+func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, error) {
+	line := &NetProtocolStatLine{Capabilities: NetProtocolCapabilities{}}
+	var err error
+	const enabled = "yes"
+	const disabled = "no"
+
+	fields := strings.Fields(rawLine)
+	line.Name = fields[0]
+	line.Size, err = strconv.ParseUint(fields[1], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.Sockets, err = strconv.ParseInt(fields[2], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	line.Memory, err = strconv.ParseInt(fields[3], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	switch fields[4] {
+	case enabled:
+		line.Pressure = 1
+	case disabled:
+		line.Pressure = 0
+	default:
+		line.Pressure = -1
+	}
+	line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	switch fields[6] {
+	case enabled:
+		line.Slab = true
+	case disabled:
+		line.Slab = false
+	default:
+		return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
+	}
+	line.ModuleName = fields[7]
+
+	err = line.Capabilities.parseCapabilities(fields[8:])
+	if err != nil {
+		return nil, err
+	}
+
+	return line, nil
+}
+
+func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) error {
+	// The capabilities are all bools so we can loop over to map them
+	capabilityFields := [...]*bool{
+		&pc.Close,
+		&pc.Connect,
+		&pc.Disconnect,
+		&pc.Accept,
+		&pc.IoCtl,
+		&pc.Init,
+		&pc.Destroy,
+		&pc.Shutdown,
+		&pc.SetSockOpt,
+		&pc.GetSockOpt,
+		&pc.SendMsg,
+		&pc.RecvMsg,
+		&pc.SendPage,
+		&pc.Bind,
+		&pc.BacklogRcv,
+		&pc.Hash,
+		&pc.UnHash,
+		&pc.GetPort,
+		&pc.EnterMemoryPressure,
+	}
+
+	for i := 0; i < len(capabilities); i++ {
+		switch capabilities[i] {
+		case "y":
+			*capabilityFields[i] = true
+		case "n":
+			*capabilityFields[i] = false
+		default:
+			return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_route.go b/vendor/github.com/prometheus/procfs/net_route.go
new file mode 100644
index 0000000..deb7029
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_route.go
@@ -0,0 +1,143 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+const (
+	blackholeRepresentation string = "*"
+	blackholeIfaceName      string = "blackhole"
+	routeLineColumns        int    = 11
+)
+
+// A NetRouteLine represents one line from net/route.
+type NetRouteLine struct {
+	Iface       string
+	Destination uint32
+	Gateway     uint32
+	Flags       uint32
+	RefCnt      uint32
+	Use         uint32
+	Metric      uint32
+	Mask        uint32
+	MTU         uint32
+	Window      uint32
+	IRTT        uint32
+}
+
+func (fs FS) NetRoute() ([]NetRouteLine, error) {
+	return readNetRoute(fs.proc.Path("net", "route"))
+}
+
+func readNetRoute(path string) ([]NetRouteLine, error) {
+	b, err := util.ReadFileNoStat(path)
+	if err != nil {
+		return nil, err
+	}
+
+	routelines, err := parseNetRoute(bytes.NewReader(b))
+	if err != nil {
+		return nil, fmt.Errorf("failed to read net route from %s: %w", path, err)
+	}
+	return routelines, nil
+}
+
+func parseNetRoute(r io.Reader) ([]NetRouteLine, error) {
+	var routelines []NetRouteLine
+
+	scanner := bufio.NewScanner(r)
+	scanner.Scan()
+	for scanner.Scan() {
+		fields := strings.Fields(scanner.Text())
+		routeline, err := parseNetRouteLine(fields)
+		if err != nil {
+			return nil, err
+		}
+		routelines = append(routelines, *routeline)
+	}
+	return routelines, nil
+}
+
+func parseNetRouteLine(fields []string) (*NetRouteLine, error) {
+	if len(fields) != routeLineColumns {
+		return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields))
+	}
+	iface := fields[0]
+	if iface == blackholeRepresentation {
+		iface = blackholeIfaceName
+	}
+	destination, err := strconv.ParseUint(fields[1], 16, 32)
+	if err != nil {
+		return nil, err
+	}
+	gateway, err := strconv.ParseUint(fields[2], 16, 32)
+	if err != nil {
+		return nil, err
+	}
+	flags, err := strconv.ParseUint(fields[3], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	refcnt, err := strconv.ParseUint(fields[4], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	use, err := strconv.ParseUint(fields[5], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	metric, err := strconv.ParseUint(fields[6], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	mask, err := strconv.ParseUint(fields[7], 16, 32)
+	if err != nil {
+		return nil, err
+	}
+	mtu, err := strconv.ParseUint(fields[8], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	window, err := strconv.ParseUint(fields[9], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	irtt, err := strconv.ParseUint(fields[10], 10, 32)
+	if err != nil {
+		return nil, err
+	}
+	routeline := &NetRouteLine{
+		Iface:       iface,
+		Destination: uint32(destination),
+		Gateway:     uint32(gateway),
+		Flags:       uint32(flags),
+		RefCnt:      uint32(refcnt),
+		Use:         uint32(use),
+		Metric:      uint32(metric),
+		Mask:        uint32(mask),
+		MTU:         uint32(mtu),
+		Window:      uint32(window),
+		IRTT:        uint32(irtt),
+	}
+	return routeline, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go
new file mode 100644
index 0000000..fae62b1
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_sockstat.go
@@ -0,0 +1,162 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// A NetSockstat contains the output of /proc/net/sockstat{,6} for IPv4 or IPv6,
+// respectively.
+type NetSockstat struct {
+	// Used is non-nil for IPv4 sockstat results, but nil for IPv6.
+	Used      *int
+	Protocols []NetSockstatProtocol
+}
+
+// A NetSockstatProtocol contains statistics about a given socket protocol.
+// Pointer fields indicate that the value may or may not be present on any
+// given protocol.
+type NetSockstatProtocol struct {
+	Protocol string
+	InUse    int
+	Orphan   *int
+	TW       *int
+	Alloc    *int
+	Mem      *int
+	Memory   *int
+}
+
+// NetSockstat retrieves IPv4 socket statistics.
+func (fs FS) NetSockstat() (*NetSockstat, error) {
+	return readSockstat(fs.proc.Path("net", "sockstat"))
+}
+
+// NetSockstat6 retrieves IPv6 socket statistics.
+//
+// If IPv6 is disabled on this kernel, the returned error can be checked with
+// os.IsNotExist.
+func (fs FS) NetSockstat6() (*NetSockstat, error) {
+	return readSockstat(fs.proc.Path("net", "sockstat6"))
+}
+
+// readSockstat opens and parses a NetSockstat from the input file.
+func readSockstat(name string) (*NetSockstat, error) {
+	// This file is small and can be read with one syscall.
+	b, err := util.ReadFileNoStat(name)
+	if err != nil {
+		// Do not wrap this error so the caller can detect os.IsNotExist and
+		// similar conditions.
+		return nil, err
+	}
+
+	stat, err := parseSockstat(bytes.NewReader(b))
+	if err != nil {
+		return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err)
+	}
+
+	return stat, nil
+}
+
+// parseSockstat reads the contents of a sockstat file and parses a NetSockstat.
+func parseSockstat(r io.Reader) (*NetSockstat, error) {
+	var stat NetSockstat
+	s := bufio.NewScanner(r)
+	for s.Scan() {
+		// Expect a minimum of a protocol and one key/value pair.
+		fields := strings.Split(s.Text(), " ")
+		if len(fields) < 3 {
+			return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text())
+		}
+
+		// The remaining fields are key/value pairs.
+		kvs, err := parseSockstatKVs(fields[1:])
+		if err != nil {
+			return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
+		}
+
+		// The first field is the protocol. We must trim its colon suffix.
+		proto := strings.TrimSuffix(fields[0], ":")
+		switch proto {
+		case "sockets":
+			// Special case: IPv4 has a sockets "used" key/value pair that we
+			// embed at the top level of the structure.
+			used := kvs["used"]
+			stat.Used = &used
+		default:
+			// Parse all other lines as individual protocols.
+			nsp := parseSockstatProtocol(kvs)
+			nsp.Protocol = proto
+			stat.Protocols = append(stat.Protocols, nsp)
+		}
+	}
+
+	if err := s.Err(); err != nil {
+		return nil, err
+	}
+
+	return &stat, nil
+}
+
+// parseSockstatKVs parses a string slice into a map of key/value pairs.
+func parseSockstatKVs(kvs []string) (map[string]int, error) {
+	if len(kvs)%2 != 0 {
+		return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs)
+	}
+
+	// Iterate two values at a time to gather key/value pairs.
+	out := make(map[string]int, len(kvs)/2)
+	for i := 0; i < len(kvs); i += 2 {
+		vp := util.NewValueParser(kvs[i+1])
+		out[kvs[i]] = vp.Int()
+
+		if err := vp.Err(); err != nil {
+			return nil, err
+		}
+	}
+
+	return out, nil
+}
+
+// parseSockstatProtocol parses a NetSockstatProtocol from the input kvs map.
+func parseSockstatProtocol(kvs map[string]int) NetSockstatProtocol {
+	var nsp NetSockstatProtocol
+	for k, v := range kvs {
+		// Capture the range variable to ensure we get unique pointers for
+		// each of the optional fields.
+		v := v
+		switch k {
+		case "inuse":
+			nsp.InUse = v
+		case "orphan":
+			nsp.Orphan = &v
+		case "tw":
+			nsp.TW = &v
+		case "alloc":
+			nsp.Alloc = &v
+		case "mem":
+			nsp.Mem = &v
+		case "memory":
+			nsp.Memory = &v
+		}
+	}
+
+	return nsp
+}
diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go
new file mode 100644
index 0000000..71c8059
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_softnet.go
@@ -0,0 +1,155 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// For the proc file format details,
+// See:
+// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
+// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
+// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
+// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
+
+// SoftnetStat contains a single row of data from /proc/net/softnet_stat.
+type SoftnetStat struct {
+	// Number of processed packets.
+	Processed uint32
+	// Number of dropped packets.
+	Dropped uint32
+	// Number of times processing packets ran out of quota.
+	TimeSqueezed uint32
+	// Number of collision occur while obtaining device lock while transmitting.
+	CPUCollision uint32
+	// Number of times cpu woken up received_rps.
+	ReceivedRps uint32
+	// number of times flow limit has been reached.
+	FlowLimitCount uint32
+	// Softnet backlog status.
+	SoftnetBacklogLen uint32
+	// CPU id owning this softnet_data.
+	Index uint32
+	// softnet_data's Width.
+	Width int
+}
+
+var softNetProcFile = "net/softnet_stat"
+
+// NetSoftnetStat reads data from /proc/net/softnet_stat.
+func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
+	b, err := util.ReadFileNoStat(fs.proc.Path(softNetProcFile))
+	if err != nil {
+		return nil, err
+	}
+
+	entries, err := parseSoftnet(bytes.NewReader(b))
+	if err != nil {
+		return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err)
+	}
+
+	return entries, nil
+}
+
+func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
+	const minColumns = 9
+
+	s := bufio.NewScanner(r)
+
+	var stats []SoftnetStat
+	cpuIndex := 0
+	for s.Scan() {
+		columns := strings.Fields(s.Text())
+		width := len(columns)
+		softnetStat := SoftnetStat{}
+
+		if width < minColumns {
+			return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns)
+		}
+
+		// Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347
+		if width >= minColumns {
+			us, err := parseHexUint32s(columns[0:9])
+			if err != nil {
+				return nil, err
+			}
+
+			softnetStat.Processed = us[0]
+			softnetStat.Dropped = us[1]
+			softnetStat.TimeSqueezed = us[2]
+			softnetStat.CPUCollision = us[8]
+		}
+
+		// Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
+		if width >= 10 {
+			us, err := parseHexUint32s(columns[9:10])
+			if err != nil {
+				return nil, err
+			}
+
+			softnetStat.ReceivedRps = us[0]
+		}
+
+		// Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
+		if width >= 11 {
+			us, err := parseHexUint32s(columns[10:11])
+			if err != nil {
+				return nil, err
+			}
+
+			softnetStat.FlowLimitCount = us[0]
+		}
+
+		// Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
+		if width >= 13 {
+			us, err := parseHexUint32s(columns[11:13])
+			if err != nil {
+				return nil, err
+			}
+
+			softnetStat.SoftnetBacklogLen = us[0]
+			softnetStat.Index = us[1]
+		} else {
+			// For older kernels, create the Index based on the scan line number.
+			softnetStat.Index = uint32(cpuIndex)
+		}
+		softnetStat.Width = width
+		stats = append(stats, softnetStat)
+		cpuIndex++
+	}
+
+	return stats, nil
+}
+
+func parseHexUint32s(ss []string) ([]uint32, error) {
+	us := make([]uint32, 0, len(ss))
+	for _, s := range ss {
+		u, err := strconv.ParseUint(s, 16, 32)
+		if err != nil {
+			return nil, err
+		}
+
+		us = append(us, uint32(u))
+	}
+
+	return us, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go
new file mode 100644
index 0000000..0396d72
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_tcp.go
@@ -0,0 +1,68 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+type (
+	// NetTCP represents the contents of /proc/net/tcp{,6} file without the header.
+	NetTCP []*netIPSocketLine
+
+	// NetTCPSummary provides already computed values like the total queue lengths or
+	// the total number of used sockets. In contrast to NetTCP it does not collect
+	// the parsed lines into a slice.
+	NetTCPSummary NetIPSocketSummary
+)
+
+// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
+// read from /proc/net/tcp.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
+func (fs FS) NetTCP() (NetTCP, error) {
+	return newNetTCP(fs.proc.Path("net/tcp"))
+}
+
+// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
+// read from /proc/net/tcp6.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
+func (fs FS) NetTCP6() (NetTCP, error) {
+	return newNetTCP(fs.proc.Path("net/tcp6"))
+}
+
+// NetTCPSummary returns already computed statistics like the total queue lengths
+// for TCP datagrams read from /proc/net/tcp.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
+func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
+	return newNetTCPSummary(fs.proc.Path("net/tcp"))
+}
+
+// NetTCP6Summary returns already computed statistics like the total queue lengths
+// for TCP datagrams read from /proc/net/tcp6.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
+func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
+	return newNetTCPSummary(fs.proc.Path("net/tcp6"))
+}
+
+// newNetTCP creates a new NetTCP{,6} from the contents of the given file.
+func newNetTCP(file string) (NetTCP, error) {
+	n, err := newNetIPSocket(file)
+	n1 := NetTCP(n)
+	return n1, err
+}
+
+func newNetTCPSummary(file string) (*NetTCPSummary, error) {
+	n, err := newNetIPSocketSummary(file)
+	if n == nil {
+		return nil, err
+	}
+	n1 := NetTCPSummary(*n)
+	return &n1, err
+}
diff --git a/vendor/github.com/prometheus/procfs/net_tls_stat.go b/vendor/github.com/prometheus/procfs/net_tls_stat.go
new file mode 100644
index 0000000..13994c1
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_tls_stat.go
@@ -0,0 +1,119 @@
+// Copyright 2023 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// TLSStat struct represents data in /proc/net/tls_stat.
+// See https://docs.kernel.org/networking/tls.html#statistics
+type TLSStat struct {
+	// number of TX sessions currently installed where host handles cryptography
+	TLSCurrTxSw int
+	// number of RX sessions currently installed where host handles cryptography
+	TLSCurrRxSw int
+	// number of TX sessions currently installed where NIC handles cryptography
+	TLSCurrTxDevice int
+	// number of RX sessions currently installed where NIC handles cryptography
+	TLSCurrRxDevice int
+	//number of TX sessions opened with host cryptography
+	TLSTxSw int
+	//number of RX sessions opened with host cryptography
+	TLSRxSw int
+	// number of TX sessions opened with NIC cryptography
+	TLSTxDevice int
+	// number of RX sessions opened with NIC cryptography
+	TLSRxDevice int
+	// record decryption failed (e.g. due to incorrect authentication tag)
+	TLSDecryptError int
+	//  number of RX resyncs sent to NICs handling cryptography
+	TLSRxDeviceResync int
+	// number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records.
+	TLSDecryptRetry int
+	// number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction.
+	TLSRxNoPadViolation int
+}
+
+// NewTLSStat reads the tls_stat statistics.
+func NewTLSStat() (TLSStat, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return TLSStat{}, err
+	}
+
+	return fs.NewTLSStat()
+}
+
+// NewTLSStat reads the tls_stat statistics.
+func (fs FS) NewTLSStat() (TLSStat, error) {
+	file, err := os.Open(fs.proc.Path("net/tls_stat"))
+	if err != nil {
+		return TLSStat{}, err
+	}
+	defer file.Close()
+
+	var (
+		tlsstat = TLSStat{}
+		s       = bufio.NewScanner(file)
+	)
+
+	for s.Scan() {
+		fields := strings.Fields(s.Text())
+
+		if len(fields) != 2 {
+			return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
+		}
+
+		name := fields[0]
+		value, err := strconv.Atoi(fields[1])
+		if err != nil {
+			return TLSStat{}, err
+		}
+
+		switch name {
+		case "TlsCurrTxSw":
+			tlsstat.TLSCurrTxSw = value
+		case "TlsCurrRxSw":
+			tlsstat.TLSCurrRxSw = value
+		case "TlsCurrTxDevice":
+			tlsstat.TLSCurrTxDevice = value
+		case "TlsCurrRxDevice":
+			tlsstat.TLSCurrRxDevice = value
+		case "TlsTxSw":
+			tlsstat.TLSTxSw = value
+		case "TlsRxSw":
+			tlsstat.TLSRxSw = value
+		case "TlsTxDevice":
+			tlsstat.TLSTxDevice = value
+		case "TlsRxDevice":
+			tlsstat.TLSRxDevice = value
+		case "TlsDecryptError":
+			tlsstat.TLSDecryptError = value
+		case "TlsRxDeviceResync":
+			tlsstat.TLSRxDeviceResync = value
+		case "TlsDecryptRetry":
+			tlsstat.TLSDecryptRetry = value
+		case "TlsRxNoPadViolation":
+			tlsstat.TLSRxNoPadViolation = value
+		}
+
+	}
+
+	return tlsstat, s.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/net_udp.go b/vendor/github.com/prometheus/procfs/net_udp.go
new file mode 100644
index 0000000..9ac3daf
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_udp.go
@@ -0,0 +1,64 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+type (
+	// NetUDP represents the contents of /proc/net/udp{,6} file without the header.
+	NetUDP []*netIPSocketLine
+
+	// NetUDPSummary provides already computed values like the total queue lengths or
+	// the total number of used sockets. In contrast to NetUDP it does not collect
+	// the parsed lines into a slice.
+	NetUDPSummary NetIPSocketSummary
+)
+
+// NetUDP returns the IPv4 kernel/networking statistics for UDP datagrams
+// read from /proc/net/udp.
+func (fs FS) NetUDP() (NetUDP, error) {
+	return newNetUDP(fs.proc.Path("net/udp"))
+}
+
+// NetUDP6 returns the IPv6 kernel/networking statistics for UDP datagrams
+// read from /proc/net/udp6.
+func (fs FS) NetUDP6() (NetUDP, error) {
+	return newNetUDP(fs.proc.Path("net/udp6"))
+}
+
+// NetUDPSummary returns already computed statistics like the total queue lengths
+// for UDP datagrams read from /proc/net/udp.
+func (fs FS) NetUDPSummary() (*NetUDPSummary, error) {
+	return newNetUDPSummary(fs.proc.Path("net/udp"))
+}
+
+// NetUDP6Summary returns already computed statistics like the total queue lengths
+// for UDP datagrams read from /proc/net/udp6.
+func (fs FS) NetUDP6Summary() (*NetUDPSummary, error) {
+	return newNetUDPSummary(fs.proc.Path("net/udp6"))
+}
+
+// newNetUDP creates a new NetUDP{,6} from the contents of the given file.
+func newNetUDP(file string) (NetUDP, error) {
+	n, err := newNetIPSocket(file)
+	n1 := NetUDP(n)
+	return n1, err
+}
+
+func newNetUDPSummary(file string) (*NetUDPSummary, error) {
+	n, err := newNetIPSocketSummary(file)
+	if n == nil {
+		return nil, err
+	}
+	n1 := NetUDPSummary(*n)
+	return &n1, err
+}
diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go
new file mode 100644
index 0000000..d7e0cac
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_unix.go
@@ -0,0 +1,257 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// For the proc file format details,
+// see https://elixir.bootlin.com/linux/v4.17/source/net/unix/af_unix.c#L2815
+// and https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/net.h#L48.
+
+// Constants for the various /proc/net/unix enumerations.
+// TODO: match against x/sys/unix or similar?
+const (
+	netUnixTypeStream    = 1
+	netUnixTypeDgram     = 2
+	netUnixTypeSeqpacket = 5
+
+	netUnixFlagDefault = 0
+	netUnixFlagListen  = 1 << 16
+
+	netUnixStateUnconnected  = 1
+	netUnixStateConnecting   = 2
+	netUnixStateConnected    = 3
+	netUnixStateDisconnected = 4
+)
+
+// NetUNIXType is the type of the type field.
+type NetUNIXType uint64
+
+// NetUNIXFlags is the type of the flags field.
+type NetUNIXFlags uint64
+
+// NetUNIXState is the type of the state field.
+type NetUNIXState uint64
+
+// NetUNIXLine represents a line of /proc/net/unix.
+type NetUNIXLine struct {
+	KernelPtr string
+	RefCount  uint64
+	Protocol  uint64
+	Flags     NetUNIXFlags
+	Type      NetUNIXType
+	State     NetUNIXState
+	Inode     uint64
+	Path      string
+}
+
+// NetUNIX holds the data read from /proc/net/unix.
+type NetUNIX struct {
+	Rows []*NetUNIXLine
+}
+
+// NetUNIX returns data read from /proc/net/unix.
+func (fs FS) NetUNIX() (*NetUNIX, error) {
+	return readNetUNIX(fs.proc.Path("net/unix"))
+}
+
+// readNetUNIX reads data in /proc/net/unix format from the specified file.
+func readNetUNIX(file string) (*NetUNIX, error) {
+	// This file could be quite large and a streaming read is desirable versus
+	// reading the entire contents at once.
+	f, err := os.Open(file)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	return parseNetUNIX(f)
+}
+
+// parseNetUNIX creates a NetUnix structure from the incoming stream.
+func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
+	// Begin scanning by checking for the existence of Inode.
+	s := bufio.NewScanner(r)
+	s.Scan()
+
+	// From the man page of proc(5), it does not contain an Inode field,
+	// but in actually it exists. This code works for both cases.
+	hasInode := strings.Contains(s.Text(), "Inode")
+
+	// Expect a minimum number of fields, but Inode and Path are optional:
+	// Num       RefCount Protocol Flags    Type St Inode Path
+	minFields := 6
+	if hasInode {
+		minFields++
+	}
+
+	var nu NetUNIX
+	for s.Scan() {
+		line := s.Text()
+		item, err := nu.parseLine(line, hasInode, minFields)
+		if err != nil {
+			return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
+		}
+
+		nu.Rows = append(nu.Rows, item)
+	}
+
+	if err := s.Err(); err != nil {
+		return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err)
+	}
+
+	return &nu, nil
+}
+
+func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) {
+	fields := strings.Fields(line)
+
+	l := len(fields)
+	if l < minFields {
+		return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l)
+	}
+
+	// Field offsets are as follows:
+	// Num       RefCount Protocol Flags    Type St Inode Path
+
+	kernelPtr := strings.TrimSuffix(fields[0], ":")
+
+	users, err := u.parseUsers(fields[1])
+	if err != nil {
+		return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err)
+	}
+
+	flags, err := u.parseFlags(fields[3])
+	if err != nil {
+		return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
+	}
+
+	typ, err := u.parseType(fields[4])
+	if err != nil {
+		return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
+	}
+
+	state, err := u.parseState(fields[5])
+	if err != nil {
+		return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
+	}
+
+	var inode uint64
+	if hasInode {
+		inode, err = u.parseInode(fields[6])
+		if err != nil {
+			return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err)
+		}
+	}
+
+	n := &NetUNIXLine{
+		KernelPtr: kernelPtr,
+		RefCount:  users,
+		Type:      typ,
+		Flags:     flags,
+		State:     state,
+		Inode:     inode,
+	}
+
+	// Path field is optional.
+	if l > minFields {
+		// Path occurs at either index 6 or 7 depending on whether inode is
+		// already present.
+		pathIdx := 7
+		if !hasInode {
+			pathIdx--
+		}
+
+		n.Path = fields[pathIdx]
+	}
+
+	return n, nil
+}
+
+func (u NetUNIX) parseUsers(s string) (uint64, error) {
+	return strconv.ParseUint(s, 16, 32)
+}
+
+func (u NetUNIX) parseType(s string) (NetUNIXType, error) {
+	typ, err := strconv.ParseUint(s, 16, 16)
+	if err != nil {
+		return 0, err
+	}
+
+	return NetUNIXType(typ), nil
+}
+
+func (u NetUNIX) parseFlags(s string) (NetUNIXFlags, error) {
+	flags, err := strconv.ParseUint(s, 16, 32)
+	if err != nil {
+		return 0, err
+	}
+
+	return NetUNIXFlags(flags), nil
+}
+
+func (u NetUNIX) parseState(s string) (NetUNIXState, error) {
+	st, err := strconv.ParseInt(s, 16, 8)
+	if err != nil {
+		return 0, err
+	}
+
+	return NetUNIXState(st), nil
+}
+
+func (u NetUNIX) parseInode(s string) (uint64, error) {
+	return strconv.ParseUint(s, 10, 64)
+}
+
+func (t NetUNIXType) String() string {
+	switch t {
+	case netUnixTypeStream:
+		return "stream"
+	case netUnixTypeDgram:
+		return "dgram"
+	case netUnixTypeSeqpacket:
+		return "seqpacket"
+	}
+	return "unknown"
+}
+
+func (f NetUNIXFlags) String() string {
+	switch f {
+	case netUnixFlagListen:
+		return "listen"
+	default:
+		return "default"
+	}
+}
+
+func (s NetUNIXState) String() string {
+	switch s {
+	case netUnixStateUnconnected:
+		return "unconnected"
+	case netUnixStateConnecting:
+		return "connecting"
+	case netUnixStateConnected:
+		return "connected"
+	case netUnixStateDisconnected:
+		return "disconnected"
+	}
+	return "unknown"
+}
diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go
new file mode 100644
index 0000000..7c597bc
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_wireless.go
@@ -0,0 +1,182 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Wireless models the content of /proc/net/wireless.
+type Wireless struct {
+	Name string
+
+	// Status is the current 4-digit hex value status of the interface.
+	Status uint64
+
+	// QualityLink is the link quality.
+	QualityLink int
+
+	// QualityLevel is the signal gain (dBm).
+	QualityLevel int
+
+	// QualityNoise is the signal noise baseline (dBm).
+	QualityNoise int
+
+	// DiscardedNwid is the number of discarded packets with wrong nwid/essid.
+	DiscardedNwid int
+
+	// DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP).
+	DiscardedCrypt int
+
+	// DiscardedFrag is the number of discarded packets that can't perform MAC reassembly.
+	DiscardedFrag int
+
+	// DiscardedRetry is the number of discarded packets that reached max MAC retries.
+	DiscardedRetry int
+
+	// DiscardedMisc is the number of discarded packets for other reasons.
+	DiscardedMisc int
+
+	// MissedBeacon is the number of missed beacons/superframe.
+	MissedBeacon int
+}
+
+// Wireless returns kernel wireless statistics.
+func (fs FS) Wireless() ([]*Wireless, error) {
+	b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless"))
+	if err != nil {
+		return nil, err
+	}
+
+	m, err := parseWireless(bytes.NewReader(b))
+	if err != nil {
+		return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err)
+	}
+
+	return m, nil
+}
+
+// parseWireless parses the contents of /proc/net/wireless.
+/*
+Inter-| sta-|   Quality        |   Discarded packets               | Missed | WE
+face | tus | link level noise |  nwid  crypt   frag  retry   misc | beacon | 22
+ eth1: 0000    5.  -256.  -10.       0      1      0     3      0        0
+ eth2: 0000    5.  -256.  -20.       0      2      0     4      0        0
+*/
+func parseWireless(r io.Reader) ([]*Wireless, error) {
+	var (
+		interfaces []*Wireless
+		scanner    = bufio.NewScanner(r)
+	)
+
+	for n := 0; scanner.Scan(); n++ {
+		// Skip the 2 header lines.
+		if n < 2 {
+			continue
+		}
+
+		line := scanner.Text()
+
+		parts := strings.Split(line, ":")
+		if len(parts) != 2 {
+			return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line)
+		}
+
+		name := strings.TrimSpace(parts[0])
+		stats := strings.Fields(parts[1])
+
+		if len(stats) < 10 {
+			return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line)
+		}
+
+		status, err := strconv.ParseUint(stats[0], 16, 16)
+		if err != nil {
+			return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line)
+		}
+
+		qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
+		if err != nil {
+			return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
+		}
+
+		qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
+		if err != nil {
+			return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
+		}
+
+		qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
+		if err != nil {
+			return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
+		}
+
+		dnwid, err := strconv.Atoi(stats[4])
+		if err != nil {
+			return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
+		}
+
+		dcrypt, err := strconv.Atoi(stats[5])
+		if err != nil {
+			return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
+		}
+
+		dfrag, err := strconv.Atoi(stats[6])
+		if err != nil {
+			return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
+		}
+
+		dretry, err := strconv.Atoi(stats[7])
+		if err != nil {
+			return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
+		}
+
+		dmisc, err := strconv.Atoi(stats[8])
+		if err != nil {
+			return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
+		}
+
+		mbeacon, err := strconv.Atoi(stats[9])
+		if err != nil {
+			return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
+		}
+
+		w := &Wireless{
+			Name:           name,
+			Status:         status,
+			QualityLink:    qlink,
+			QualityLevel:   qlevel,
+			QualityNoise:   qnoise,
+			DiscardedNwid:  dnwid,
+			DiscardedCrypt: dcrypt,
+			DiscardedFrag:  dfrag,
+			DiscardedRetry: dretry,
+			DiscardedMisc:  dmisc,
+			MissedBeacon:   mbeacon,
+		}
+
+		interfaces = append(interfaces, w)
+	}
+
+	if err := scanner.Err(); err != nil {
+		return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
+	}
+
+	return interfaces, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go
new file mode 100644
index 0000000..932ef20
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_xfrm.go
@@ -0,0 +1,189 @@
+// Copyright 2017 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// XfrmStat models the contents of /proc/net/xfrm_stat.
+type XfrmStat struct {
+	// All errors which are not matched by other
+	XfrmInError int
+	// No buffer is left
+	XfrmInBufferError int
+	// Header Error
+	XfrmInHdrError int
+	// No state found
+	// i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong
+	XfrmInNoStates int
+	// Transformation protocol specific error
+	// e.g. SA Key is wrong
+	XfrmInStateProtoError int
+	// Transformation mode specific error
+	XfrmInStateModeError int
+	// Sequence error
+	// e.g. sequence number is out of window
+	XfrmInStateSeqError int
+	// State is expired
+	XfrmInStateExpired int
+	// State has mismatch option
+	// e.g. UDP encapsulation type is mismatched
+	XfrmInStateMismatch int
+	// State is invalid
+	XfrmInStateInvalid int
+	// No matching template for states
+	// e.g. Inbound SAs are correct but SP rule is wrong
+	XfrmInTmplMismatch int
+	// No policy is found for states
+	// e.g. Inbound SAs are correct but no SP is found
+	XfrmInNoPols int
+	// Policy discards
+	XfrmInPolBlock int
+	// Policy error
+	XfrmInPolError int
+	// All errors which are not matched by others
+	XfrmOutError int
+	// Bundle generation error
+	XfrmOutBundleGenError int
+	// Bundle check error
+	XfrmOutBundleCheckError int
+	// No state was found
+	XfrmOutNoStates int
+	// Transformation protocol specific error
+	XfrmOutStateProtoError int
+	// Transportation mode specific error
+	XfrmOutStateModeError int
+	// Sequence error
+	// i.e sequence number overflow
+	XfrmOutStateSeqError int
+	// State is expired
+	XfrmOutStateExpired int
+	// Policy discads
+	XfrmOutPolBlock int
+	// Policy is dead
+	XfrmOutPolDead int
+	// Policy Error
+	XfrmOutPolError int
+	// Forward routing of a packet is not allowed
+	XfrmFwdHdrError int
+	// State is invalid, perhaps expired
+	XfrmOutStateInvalid int
+	// State hasn’t been fully acquired before use
+	XfrmAcquireError int
+}
+
+// NewXfrmStat reads the xfrm_stat statistics.
+func NewXfrmStat() (XfrmStat, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return XfrmStat{}, err
+	}
+
+	return fs.NewXfrmStat()
+}
+
+// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
+func (fs FS) NewXfrmStat() (XfrmStat, error) {
+	file, err := os.Open(fs.proc.Path("net/xfrm_stat"))
+	if err != nil {
+		return XfrmStat{}, err
+	}
+	defer file.Close()
+
+	var (
+		x = XfrmStat{}
+		s = bufio.NewScanner(file)
+	)
+
+	for s.Scan() {
+		fields := strings.Fields(s.Text())
+
+		if len(fields) != 2 {
+			return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
+		}
+
+		name := fields[0]
+		value, err := strconv.Atoi(fields[1])
+		if err != nil {
+			return XfrmStat{}, err
+		}
+
+		switch name {
+		case "XfrmInError":
+			x.XfrmInError = value
+		case "XfrmInBufferError":
+			x.XfrmInBufferError = value
+		case "XfrmInHdrError":
+			x.XfrmInHdrError = value
+		case "XfrmInNoStates":
+			x.XfrmInNoStates = value
+		case "XfrmInStateProtoError":
+			x.XfrmInStateProtoError = value
+		case "XfrmInStateModeError":
+			x.XfrmInStateModeError = value
+		case "XfrmInStateSeqError":
+			x.XfrmInStateSeqError = value
+		case "XfrmInStateExpired":
+			x.XfrmInStateExpired = value
+		case "XfrmInStateInvalid":
+			x.XfrmInStateInvalid = value
+		case "XfrmInTmplMismatch":
+			x.XfrmInTmplMismatch = value
+		case "XfrmInNoPols":
+			x.XfrmInNoPols = value
+		case "XfrmInPolBlock":
+			x.XfrmInPolBlock = value
+		case "XfrmInPolError":
+			x.XfrmInPolError = value
+		case "XfrmOutError":
+			x.XfrmOutError = value
+		case "XfrmInStateMismatch":
+			x.XfrmInStateMismatch = value
+		case "XfrmOutBundleGenError":
+			x.XfrmOutBundleGenError = value
+		case "XfrmOutBundleCheckError":
+			x.XfrmOutBundleCheckError = value
+		case "XfrmOutNoStates":
+			x.XfrmOutNoStates = value
+		case "XfrmOutStateProtoError":
+			x.XfrmOutStateProtoError = value
+		case "XfrmOutStateModeError":
+			x.XfrmOutStateModeError = value
+		case "XfrmOutStateSeqError":
+			x.XfrmOutStateSeqError = value
+		case "XfrmOutStateExpired":
+			x.XfrmOutStateExpired = value
+		case "XfrmOutPolBlock":
+			x.XfrmOutPolBlock = value
+		case "XfrmOutPolDead":
+			x.XfrmOutPolDead = value
+		case "XfrmOutPolError":
+			x.XfrmOutPolError = value
+		case "XfrmFwdHdrError":
+			x.XfrmFwdHdrError = value
+		case "XfrmOutStateInvalid":
+			x.XfrmOutStateInvalid = value
+		case "XfrmAcquireError":
+			x.XfrmAcquireError = value
+		}
+
+	}
+
+	return x, s.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go
new file mode 100644
index 0000000..742dff4
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/netstat.go
@@ -0,0 +1,82 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+)
+
+// NetStat contains statistics for all the counters from one file.
+type NetStat struct {
+	Stats    map[string][]uint64
+	Filename string
+}
+
+// NetStat retrieves stats from `/proc/net/stat/`.
+func (fs FS) NetStat() ([]NetStat, error) {
+	statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*"))
+	if err != nil {
+		return nil, err
+	}
+
+	var netStatsTotal []NetStat
+
+	for _, filePath := range statFiles {
+		procNetstat, err := parseNetstat(filePath)
+		if err != nil {
+			return nil, err
+		}
+		procNetstat.Filename = filepath.Base(filePath)
+
+		netStatsTotal = append(netStatsTotal, procNetstat)
+	}
+	return netStatsTotal, nil
+}
+
+// parseNetstat parses the metrics from `/proc/net/stat/` file
+// and returns a NetStat structure.
+func parseNetstat(filePath string) (NetStat, error) {
+	netStat := NetStat{
+		Stats: make(map[string][]uint64),
+	}
+	file, err := os.Open(filePath)
+	if err != nil {
+		return netStat, err
+	}
+	defer file.Close()
+
+	scanner := bufio.NewScanner(file)
+	scanner.Scan()
+
+	// First string is always a header for stats
+	var headers []string
+	headers = append(headers, strings.Fields(scanner.Text())...)
+
+	// Other strings represent per-CPU counters
+	for scanner.Scan() {
+		for num, counter := range strings.Fields(scanner.Text()) {
+			value, err := strconv.ParseUint(counter, 16, 64)
+			if err != nil {
+				return NetStat{}, err
+			}
+			netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value)
+		}
+	}
+
+	return netStat, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
new file mode 100644
index 0000000..368187f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -0,0 +1,338 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Proc provides information about a running process.
+type Proc struct {
+	// The process ID.
+	PID int
+
+	fs FS
+}
+
+// Procs represents a list of Proc structs.
+type Procs []Proc
+
+var (
+	ErrFileParse  = errors.New("error parsing file")
+	ErrFileRead   = errors.New("error reading file")
+	ErrMountPoint = errors.New("error accessing mount point")
+)
+
+func (p Procs) Len() int           { return len(p) }
+func (p Procs) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
+
+// Self returns a process for the current process read via /proc/self.
+func Self() (Proc, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil || errors.Unwrap(err) == ErrMountPoint {
+		return Proc{}, err
+	}
+	return fs.Self()
+}
+
+// NewProc returns a process for the given pid under /proc.
+func NewProc(pid int) (Proc, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return Proc{}, err
+	}
+	return fs.Proc(pid)
+}
+
+// AllProcs returns a list of all currently available processes under /proc.
+func AllProcs() (Procs, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return Procs{}, err
+	}
+	return fs.AllProcs()
+}
+
+// Self returns a process for the current process.
+func (fs FS) Self() (Proc, error) {
+	p, err := os.Readlink(fs.proc.Path("self"))
+	if err != nil {
+		return Proc{}, err
+	}
+	pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), ""))
+	if err != nil {
+		return Proc{}, err
+	}
+	return fs.Proc(pid)
+}
+
+// NewProc returns a process for the given pid.
+//
+// Deprecated: Use fs.Proc() instead.
+func (fs FS) NewProc(pid int) (Proc, error) {
+	return fs.Proc(pid)
+}
+
+// Proc returns a process for the given pid.
+func (fs FS) Proc(pid int) (Proc, error) {
+	if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
+		return Proc{}, err
+	}
+	return Proc{PID: pid, fs: fs}, nil
+}
+
+// AllProcs returns a list of all currently available processes.
+func (fs FS) AllProcs() (Procs, error) {
+	d, err := os.Open(fs.proc.Path())
+	if err != nil {
+		return Procs{}, err
+	}
+	defer d.Close()
+
+	names, err := d.Readdirnames(-1)
+	if err != nil {
+		return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
+	}
+
+	p := Procs{}
+	for _, n := range names {
+		pid, err := strconv.ParseInt(n, 10, 64)
+		if err != nil {
+			continue
+		}
+		p = append(p, Proc{PID: int(pid), fs: fs})
+	}
+
+	return p, nil
+}
+
+// CmdLine returns the command line of a process.
+func (p Proc) CmdLine() ([]string, error) {
+	data, err := util.ReadFileNoStat(p.path("cmdline"))
+	if err != nil {
+		return nil, err
+	}
+
+	if len(data) < 1 {
+		return []string{}, nil
+	}
+
+	return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil
+}
+
+// Wchan returns the wchan (wait channel) of a process.
+func (p Proc) Wchan() (string, error) {
+	f, err := os.Open(p.path("wchan"))
+	if err != nil {
+		return "", err
+	}
+	defer f.Close()
+
+	data, err := io.ReadAll(f)
+	if err != nil {
+		return "", err
+	}
+
+	wchan := string(data)
+	if wchan == "" || wchan == "0" {
+		return "", nil
+	}
+
+	return wchan, nil
+}
+
+// Comm returns the command name of a process.
+func (p Proc) Comm() (string, error) {
+	data, err := util.ReadFileNoStat(p.path("comm"))
+	if err != nil {
+		return "", err
+	}
+
+	return strings.TrimSpace(string(data)), nil
+}
+
+// Executable returns the absolute path of the executable command of a process.
+func (p Proc) Executable() (string, error) {
+	exe, err := os.Readlink(p.path("exe"))
+	if os.IsNotExist(err) {
+		return "", nil
+	}
+
+	return exe, err
+}
+
+// Cwd returns the absolute path to the current working directory of the process.
+func (p Proc) Cwd() (string, error) {
+	wd, err := os.Readlink(p.path("cwd"))
+	if os.IsNotExist(err) {
+		return "", nil
+	}
+
+	return wd, err
+}
+
+// RootDir returns the absolute path to the process's root directory (as set by chroot).
+func (p Proc) RootDir() (string, error) {
+	rdir, err := os.Readlink(p.path("root"))
+	if os.IsNotExist(err) {
+		return "", nil
+	}
+
+	return rdir, err
+}
+
+// FileDescriptors returns the currently open file descriptors of a process.
+func (p Proc) FileDescriptors() ([]uintptr, error) {
+	names, err := p.fileDescriptors()
+	if err != nil {
+		return nil, err
+	}
+
+	fds := make([]uintptr, len(names))
+	for i, n := range names {
+		fd, err := strconv.ParseInt(n, 10, 32)
+		if err != nil {
+			return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err)
+		}
+		fds[i] = uintptr(fd)
+	}
+
+	return fds, nil
+}
+
+// FileDescriptorTargets returns the targets of all file descriptors of a process.
+// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.
+func (p Proc) FileDescriptorTargets() ([]string, error) {
+	names, err := p.fileDescriptors()
+	if err != nil {
+		return nil, err
+	}
+
+	targets := make([]string, len(names))
+
+	for i, name := range names {
+		target, err := os.Readlink(p.path("fd", name))
+		if err == nil {
+			targets[i] = target
+		}
+	}
+
+	return targets, nil
+}
+
+// FileDescriptorsLen returns the number of currently open file descriptors of
+// a process.
+func (p Proc) FileDescriptorsLen() (int, error) {
+	// Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901
+	if p.fs.isReal {
+		stat, err := os.Stat(p.path("fd"))
+		if err != nil {
+			return 0, err
+		}
+
+		size := stat.Size()
+		if size > 0 {
+			return int(size), nil
+		}
+	}
+
+	fds, err := p.fileDescriptors()
+	if err != nil {
+		return 0, err
+	}
+
+	return len(fds), nil
+}
+
+// MountStats retrieves statistics and configuration for mount points in a
+// process's namespace.
+func (p Proc) MountStats() ([]*Mount, error) {
+	f, err := os.Open(p.path("mountstats"))
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	return parseMountStats(f)
+}
+
+// MountInfo retrieves mount information for mount points in a
+// process's namespace.
+// It supplies information missing in `/proc/self/mounts` and
+// fixes various other problems with that file too.
+func (p Proc) MountInfo() ([]*MountInfo, error) {
+	data, err := util.ReadFileNoStat(p.path("mountinfo"))
+	if err != nil {
+		return nil, err
+	}
+	return parseMountInfo(data)
+}
+
+func (p Proc) fileDescriptors() ([]string, error) {
+	d, err := os.Open(p.path("fd"))
+	if err != nil {
+		return nil, err
+	}
+	defer d.Close()
+
+	names, err := d.Readdirnames(-1)
+	if err != nil {
+		return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
+	}
+
+	return names, nil
+}
+
+func (p Proc) path(pa ...string) string {
+	return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
+}
+
+// FileDescriptorsInfo retrieves information about all file descriptors of
+// the process.
+func (p Proc) FileDescriptorsInfo() (ProcFDInfos, error) {
+	names, err := p.fileDescriptors()
+	if err != nil {
+		return nil, err
+	}
+
+	var fdinfos ProcFDInfos
+
+	for _, n := range names {
+		fdinfo, err := p.FDInfo(n)
+		if err != nil {
+			continue
+		}
+		fdinfos = append(fdinfos, *fdinfo)
+	}
+
+	return fdinfos, nil
+}
+
+// Schedstat returns task scheduling information for the process.
+func (p Proc) Schedstat() (ProcSchedstat, error) {
+	contents, err := os.ReadFile(p.path("schedstat"))
+	if err != nil {
+		return ProcSchedstat{}, err
+	}
+	return parseProcSchedstat(string(contents))
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go
new file mode 100644
index 0000000..4a64347
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go
@@ -0,0 +1,98 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
+// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource
+// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
+// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
+// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
+// *this specific* hierarchy, you can locate the relevant pseudo-files needed to read/set the data for this PID
+// in this hierarchy
+//
+// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
+type Cgroup struct {
+	// HierarchyID that can be matched to a named hierarchy using /proc/cgroups. Cgroups V2 only has one
+	// hierarchy, so HierarchyID is always 0. For cgroups v1 this is a unique ID number
+	HierarchyID int
+	// Controllers using this hierarchy of processes. Controllers are also known as subsystems. For
+	// Cgroups V2 this may be empty, as all active controllers use the same hierarchy
+	Controllers []string
+	// Path of this control group, relative to the mount point of the cgroupfs representing this specific
+	// hierarchy
+	Path string
+}
+
+// parseCgroupString parses each line of the /proc/[pid]/cgroup file
+// Line format is hierarchyID:[controller1,controller2]:path.
+func parseCgroupString(cgroupStr string) (*Cgroup, error) {
+	var err error
+
+	fields := strings.SplitN(cgroupStr, ":", 3)
+	if len(fields) < 3 {
+		return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr)
+	}
+
+	cgroup := &Cgroup{
+		Path:        fields[2],
+		Controllers: nil,
+	}
+	cgroup.HierarchyID, err = strconv.Atoi(fields[0])
+	if err != nil {
+		return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID)
+	}
+	if fields[1] != "" {
+		ssNames := strings.Split(fields[1], ",")
+		cgroup.Controllers = append(cgroup.Controllers, ssNames...)
+	}
+	return cgroup, nil
+}
+
+// parseCgroups reads each line of the /proc/[pid]/cgroup file.
+func parseCgroups(data []byte) ([]Cgroup, error) {
+	var cgroups []Cgroup
+	scanner := bufio.NewScanner(bytes.NewReader(data))
+	for scanner.Scan() {
+		mountString := scanner.Text()
+		parsedMounts, err := parseCgroupString(mountString)
+		if err != nil {
+			return nil, err
+		}
+		cgroups = append(cgroups, *parsedMounts)
+	}
+
+	err := scanner.Err()
+	return cgroups, err
+}
+
+// Cgroups reads from /proc/<pid>/cgroups and returns a []*Cgroup struct locating this PID in each process
+// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
+// so the len of the returned struct is equal to the number of active hierarchies on this system.
+func (p Proc) Cgroups() ([]Cgroup, error) {
+	data, err := util.ReadFileNoStat(p.path("cgroup"))
+	if err != nil {
+		return nil, err
+	}
+	return parseCgroups(data)
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go
new file mode 100644
index 0000000..5dd4938
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go
@@ -0,0 +1,98 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// CgroupSummary models one line from /proc/cgroups.
+// This file contains information about the controllers that are compiled into the kernel.
+//
+// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
+type CgroupSummary struct {
+	// The name of the controller. controller is also known as subsystem.
+	SubsysName string
+	// The unique ID of the cgroup hierarchy on which this controller is mounted.
+	Hierarchy int
+	// The number of control groups in this hierarchy using this controller.
+	Cgroups int
+	// This field contains the value 1 if this controller is enabled, or 0 if it has been disabled
+	Enabled int
+}
+
+// parseCgroupSummary parses each line of the /proc/cgroup file
+// Line format is `subsys_name	hierarchy	num_cgroups	enabled`.
+func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) {
+	var err error
+
+	fields := strings.Fields(CgroupSummaryStr)
+	// require at least 4 fields
+	if len(fields) < 4 {
+		return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr)
+	}
+
+	CgroupSummary := &CgroupSummary{
+		SubsysName: fields[0],
+	}
+	CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1])
+	if err != nil {
+		return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1])
+	}
+	CgroupSummary.Cgroups, err = strconv.Atoi(fields[2])
+	if err != nil {
+		return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2])
+	}
+	CgroupSummary.Enabled, err = strconv.Atoi(fields[3])
+	if err != nil {
+		return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3])
+	}
+	return CgroupSummary, nil
+}
+
+// parseCgroupSummary reads each line of the /proc/cgroup file.
+func parseCgroupSummary(data []byte) ([]CgroupSummary, error) {
+	var CgroupSummarys []CgroupSummary
+	scanner := bufio.NewScanner(bytes.NewReader(data))
+	for scanner.Scan() {
+		CgroupSummaryString := scanner.Text()
+		// ignore comment lines
+		if strings.HasPrefix(CgroupSummaryString, "#") {
+			continue
+		}
+		CgroupSummary, err := parseCgroupSummaryString(CgroupSummaryString)
+		if err != nil {
+			return nil, err
+		}
+		CgroupSummarys = append(CgroupSummarys, *CgroupSummary)
+	}
+
+	err := scanner.Err()
+	return CgroupSummarys, err
+}
+
+// CgroupSummarys returns information about current /proc/cgroups.
+func (fs FS) CgroupSummarys() ([]CgroupSummary, error) {
+	data, err := util.ReadFileNoStat(fs.proc.Path("cgroups"))
+	if err != nil {
+		return nil, err
+	}
+	return parseCgroupSummary(data)
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go
new file mode 100644
index 0000000..57a8989
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_environ.go
@@ -0,0 +1,37 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Environ reads process environments from `/proc/<pid>/environ`.
+func (p Proc) Environ() ([]string, error) {
+	environments := make([]string, 0)
+
+	data, err := util.ReadFileNoStat(p.path("environ"))
+	if err != nil {
+		return environments, err
+	}
+
+	environments = strings.Split(string(data), "\000")
+	if len(environments) > 0 {
+		environments = environments[:len(environments)-1]
+	}
+
+	return environments, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
new file mode 100644
index 0000000..fa761b3
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
@@ -0,0 +1,138 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"regexp"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+var (
+	rPos          = regexp.MustCompile(`^pos:\s+(\d+)$`)
+	rFlags        = regexp.MustCompile(`^flags:\s+(\d+)$`)
+	rMntID        = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
+	rIno          = regexp.MustCompile(`^ino:\s+(\d+)$`)
+	rInotify      = regexp.MustCompile(`^inotify`)
+	rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`)
+)
+
+// ProcFDInfo contains represents file descriptor information.
+type ProcFDInfo struct {
+	// File descriptor
+	FD string
+	// File offset
+	Pos string
+	// File access mode and status flags
+	Flags string
+	// Mount point ID
+	MntID string
+	// Inode number
+	Ino string
+	// List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only)
+	InotifyInfos []InotifyInfo
+}
+
+// FDInfo constructor. On kernels older than 3.8, InotifyInfos will always be empty.
+func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
+	data, err := util.ReadFileNoStat(p.path("fdinfo", fd))
+	if err != nil {
+		return nil, err
+	}
+
+	var text, pos, flags, mntid, ino string
+	var inotify []InotifyInfo
+
+	scanner := bufio.NewScanner(bytes.NewReader(data))
+	for scanner.Scan() {
+		text = scanner.Text()
+		if rPos.MatchString(text) {
+			pos = rPos.FindStringSubmatch(text)[1]
+		} else if rFlags.MatchString(text) {
+			flags = rFlags.FindStringSubmatch(text)[1]
+		} else if rMntID.MatchString(text) {
+			mntid = rMntID.FindStringSubmatch(text)[1]
+		} else if rIno.MatchString(text) {
+			ino = rIno.FindStringSubmatch(text)[1]
+		} else if rInotify.MatchString(text) {
+			newInotify, err := parseInotifyInfo(text)
+			if err != nil {
+				return nil, err
+			}
+			inotify = append(inotify, *newInotify)
+		}
+	}
+
+	i := &ProcFDInfo{
+		FD:           fd,
+		Pos:          pos,
+		Flags:        flags,
+		MntID:        mntid,
+		Ino:          ino,
+		InotifyInfos: inotify,
+	}
+
+	return i, nil
+}
+
+// InotifyInfo represents a single inotify line in the fdinfo file.
+type InotifyInfo struct {
+	// Watch descriptor number
+	WD string
+	// Inode number
+	Ino string
+	// Device ID
+	Sdev string
+	// Mask of events being monitored
+	Mask string
+}
+
+// InotifyInfo constructor. Only available on kernel 3.8+.
+func parseInotifyInfo(line string) (*InotifyInfo, error) {
+	m := rInotifyParts.FindStringSubmatch(line)
+	if len(m) >= 4 {
+		var mask string
+		if len(m) == 5 {
+			mask = m[4]
+		}
+		i := &InotifyInfo{
+			WD:   m[1],
+			Ino:  m[2],
+			Sdev: m[3],
+			Mask: mask,
+		}
+		return i, nil
+	}
+	return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line)
+}
+
+// ProcFDInfos represents a list of ProcFDInfo structs.
+type ProcFDInfos []ProcFDInfo
+
+func (p ProcFDInfos) Len() int           { return len(p) }
+func (p ProcFDInfos) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD }
+
+// InotifyWatchLen returns the total number of inotify watches.
+func (p ProcFDInfos) InotifyWatchLen() (int, error) {
+	length := 0
+	for _, f := range p {
+		length += len(f.InotifyInfos)
+	}
+
+	return length, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go
new file mode 100644
index 0000000..86b4b45
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go
@@ -0,0 +1,98 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Interrupt represents a single interrupt line.
+type Interrupt struct {
+	// Info is the type of interrupt.
+	Info string
+	// Devices is the name of the device that is located at that IRQ
+	Devices string
+	// Values is the number of interrupts per CPU.
+	Values []string
+}
+
+// Interrupts models the content of /proc/interrupts. Key is the IRQ number.
+// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts
+// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output
+type Interrupts map[string]Interrupt
+
+// Interrupts creates a new instance from a given Proc instance.
+func (p Proc) Interrupts() (Interrupts, error) {
+	data, err := util.ReadFileNoStat(p.path("interrupts"))
+	if err != nil {
+		return nil, err
+	}
+	return parseInterrupts(bytes.NewReader(data))
+}
+
+func parseInterrupts(r io.Reader) (Interrupts, error) {
+	var (
+		interrupts = Interrupts{}
+		scanner    = bufio.NewScanner(r)
+	)
+
+	if !scanner.Scan() {
+		return nil, errors.New("interrupts empty")
+	}
+	cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu
+
+	for scanner.Scan() {
+		parts := strings.Fields(scanner.Text())
+		if len(parts) == 0 { // skip empty lines
+			continue
+		}
+		if len(parts) < 2 {
+			return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts)
+		}
+		intName := parts[0][:len(parts[0])-1] // remove trailing :
+
+		if len(parts) == 2 {
+			interrupts[intName] = Interrupt{
+				Info:    "",
+				Devices: "",
+				Values: []string{
+					parts[1],
+				},
+			}
+			continue
+		}
+
+		intr := Interrupt{
+			Values: parts[1 : cpuNum+1],
+		}
+
+		if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt
+			intr.Info = parts[cpuNum+1]
+			intr.Devices = strings.Join(parts[cpuNum+2:], " ")
+		} else {
+			intr.Info = strings.Join(parts[cpuNum+1:], " ")
+		}
+		interrupts[intName] = intr
+	}
+
+	return interrupts, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
new file mode 100644
index 0000000..d15b66d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_io.go
@@ -0,0 +1,59 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// ProcIO models the content of /proc/<pid>/io.
+type ProcIO struct {
+	// Chars read.
+	RChar uint64
+	// Chars written.
+	WChar uint64
+	// Read syscalls.
+	SyscR uint64
+	// Write syscalls.
+	SyscW uint64
+	// Bytes read.
+	ReadBytes uint64
+	// Bytes written.
+	WriteBytes uint64
+	// Bytes written, but taking into account truncation. See
+	// Documentation/filesystems/proc.txt in the kernel sources for
+	// detailed explanation.
+	CancelledWriteBytes int64
+}
+
+// IO creates a new ProcIO instance from a given Proc instance.
+func (p Proc) IO() (ProcIO, error) {
+	pio := ProcIO{}
+
+	data, err := util.ReadFileNoStat(p.path("io"))
+	if err != nil {
+		return pio, err
+	}
+
+	ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
+		"read_bytes: %d\nwrite_bytes: %d\n" +
+		"cancelled_write_bytes: %d\n" //nolint:misspell
+
+	_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
+		&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
+
+	return pio, err
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
new file mode 100644
index 0000000..9530b14
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -0,0 +1,160 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"regexp"
+	"strconv"
+)
+
+// ProcLimits represents the soft limits for each of the process's resource
+// limits. For more information see getrlimit(2):
+// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
+type ProcLimits struct {
+	// CPU time limit in seconds.
+	CPUTime uint64
+	// Maximum size of files that the process may create.
+	FileSize uint64
+	// Maximum size of the process's data segment (initialized data,
+	// uninitialized data, and heap).
+	DataSize uint64
+	// Maximum size of the process stack in bytes.
+	StackSize uint64
+	// Maximum size of a core file.
+	CoreFileSize uint64
+	// Limit of the process's resident set in pages.
+	ResidentSet uint64
+	// Maximum number of processes that can be created for the real user ID of
+	// the calling process.
+	Processes uint64
+	// Value one greater than the maximum file descriptor number that can be
+	// opened by this process.
+	OpenFiles uint64
+	// Maximum number of bytes of memory that may be locked into RAM.
+	LockedMemory uint64
+	// Maximum size of the process's virtual memory address space in bytes.
+	AddressSpace uint64
+	// Limit on the combined number of flock(2) locks and fcntl(2) leases that
+	// this process may establish.
+	FileLocks uint64
+	// Limit of signals that may be queued for the real user ID of the calling
+	// process.
+	PendingSignals uint64
+	// Limit on the number of bytes that can be allocated for POSIX message
+	// queues for the real user ID of the calling process.
+	MsqqueueSize uint64
+	// Limit of the nice priority set using setpriority(2) or nice(2).
+	NicePriority uint64
+	// Limit of the real-time priority set using sched_setscheduler(2) or
+	// sched_setparam(2).
+	RealtimePriority uint64
+	// Limit (in microseconds) on the amount of CPU time that a process
+	// scheduled under a real-time scheduling policy may consume without making
+	// a blocking system call.
+	RealtimeTimeout uint64
+}
+
+const (
+	limitsFields    = 4
+	limitsUnlimited = "unlimited"
+)
+
+var (
+	limitsMatch = regexp.MustCompile(`(Max \w+\s{0,1}?\w*\s{0,1}\w*)\s{2,}(\w+)\s+(\w+)`)
+)
+
+// NewLimits returns the current soft limits of the process.
+//
+// Deprecated: Use p.Limits() instead.
+func (p Proc) NewLimits() (ProcLimits, error) {
+	return p.Limits()
+}
+
+// Limits returns the current soft limits of the process.
+func (p Proc) Limits() (ProcLimits, error) {
+	f, err := os.Open(p.path("limits"))
+	if err != nil {
+		return ProcLimits{}, err
+	}
+	defer f.Close()
+
+	var (
+		l = ProcLimits{}
+		s = bufio.NewScanner(f)
+	)
+
+	s.Scan() // Skip limits header
+
+	for s.Scan() {
+		//fields := limitsMatch.Split(s.Text(), limitsFields)
+		fields := limitsMatch.FindStringSubmatch(s.Text())
+		if len(fields) != limitsFields {
+			return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text())
+		}
+
+		switch fields[1] {
+		case "Max cpu time":
+			l.CPUTime, err = parseUint(fields[2])
+		case "Max file size":
+			l.FileSize, err = parseUint(fields[2])
+		case "Max data size":
+			l.DataSize, err = parseUint(fields[2])
+		case "Max stack size":
+			l.StackSize, err = parseUint(fields[2])
+		case "Max core file size":
+			l.CoreFileSize, err = parseUint(fields[2])
+		case "Max resident set":
+			l.ResidentSet, err = parseUint(fields[2])
+		case "Max processes":
+			l.Processes, err = parseUint(fields[2])
+		case "Max open files":
+			l.OpenFiles, err = parseUint(fields[2])
+		case "Max locked memory":
+			l.LockedMemory, err = parseUint(fields[2])
+		case "Max address space":
+			l.AddressSpace, err = parseUint(fields[2])
+		case "Max file locks":
+			l.FileLocks, err = parseUint(fields[2])
+		case "Max pending signals":
+			l.PendingSignals, err = parseUint(fields[2])
+		case "Max msgqueue size":
+			l.MsqqueueSize, err = parseUint(fields[2])
+		case "Max nice priority":
+			l.NicePriority, err = parseUint(fields[2])
+		case "Max realtime priority":
+			l.RealtimePriority, err = parseUint(fields[2])
+		case "Max realtime timeout":
+			l.RealtimeTimeout, err = parseUint(fields[2])
+		}
+		if err != nil {
+			return ProcLimits{}, err
+		}
+	}
+
+	return l, s.Err()
+}
+
+func parseUint(s string) (uint64, error) {
+	if s == limitsUnlimited {
+		return 18446744073709551615, nil
+	}
+	i, err := strconv.ParseUint(s, 10, 64)
+	if err != nil {
+		return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err)
+	}
+	return i, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go
new file mode 100644
index 0000000..7e75c28
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_maps.go
@@ -0,0 +1,211 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !js
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build !js
+
+package procfs
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+
+	"golang.org/x/sys/unix"
+)
+
+// ProcMapPermissions contains permission settings read from `/proc/[pid]/maps`.
+type ProcMapPermissions struct {
+	// mapping has the [R]ead flag set
+	Read bool
+	// mapping has the [W]rite flag set
+	Write bool
+	// mapping has the [X]ecutable flag set
+	Execute bool
+	// mapping has the [S]hared flag set
+	Shared bool
+	// mapping is marked as [P]rivate (copy on write)
+	Private bool
+}
+
+// ProcMap contains the process memory-mappings of the process
+// read from `/proc/[pid]/maps`.
+type ProcMap struct {
+	// The start address of current mapping.
+	StartAddr uintptr
+	// The end address of the current mapping
+	EndAddr uintptr
+	// The permissions for this mapping
+	Perms *ProcMapPermissions
+	// The current offset into the file/fd (e.g., shared libs)
+	Offset int64
+	// Device owner of this mapping (major:minor) in Mkdev format.
+	Dev uint64
+	// The inode of the device above
+	Inode uint64
+	// The file or psuedofile (or empty==anonymous)
+	Pathname string
+}
+
+// parseDevice parses the device token of a line and converts it to a dev_t
+// (mkdev) like structure.
+func parseDevice(s string) (uint64, error) {
+	i := strings.Index(s, ":")
+	if i == -1 {
+		return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s)
+	}
+
+	major, err := strconv.ParseUint(s[0:i], 16, 0)
+	if err != nil {
+		return 0, err
+	}
+
+	minor, err := strconv.ParseUint(s[i+1:], 16, 0)
+	if err != nil {
+		return 0, err
+	}
+
+	return unix.Mkdev(uint32(major), uint32(minor)), nil
+}
+
+// parseAddress converts a hex-string to a uintptr.
+func parseAddress(s string) (uintptr, error) {
+	a, err := strconv.ParseUint(s, 16, 0)
+	if err != nil {
+		return 0, err
+	}
+
+	return uintptr(a), nil
+}
+
+// parseAddresses parses the start-end address.
+func parseAddresses(s string) (uintptr, uintptr, error) {
+	idx := strings.Index(s, "-")
+	if idx == -1 {
+		return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s)
+	}
+
+	saddr, err := parseAddress(s[0:idx])
+	if err != nil {
+		return 0, 0, err
+	}
+
+	eaddr, err := parseAddress(s[idx+1:])
+	if err != nil {
+		return 0, 0, err
+	}
+
+	return saddr, eaddr, nil
+}
+
+// parsePermissions parses a token and returns any that are set.
+func parsePermissions(s string) (*ProcMapPermissions, error) {
+	if len(s) < 4 {
+		return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse)
+	}
+
+	perms := ProcMapPermissions{}
+	for _, ch := range s {
+		switch ch {
+		case 'r':
+			perms.Read = true
+		case 'w':
+			perms.Write = true
+		case 'x':
+			perms.Execute = true
+		case 'p':
+			perms.Private = true
+		case 's':
+			perms.Shared = true
+		}
+	}
+
+	return &perms, nil
+}
+
+// parseProcMap will attempt to parse a single line within a proc/[pid]/maps
+// buffer.
+func parseProcMap(text string) (*ProcMap, error) {
+	fields := strings.Fields(text)
+	if len(fields) < 5 {
+		return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse)
+	}
+
+	saddr, eaddr, err := parseAddresses(fields[0])
+	if err != nil {
+		return nil, err
+	}
+
+	perms, err := parsePermissions(fields[1])
+	if err != nil {
+		return nil, err
+	}
+
+	offset, err := strconv.ParseInt(fields[2], 16, 0)
+	if err != nil {
+		return nil, err
+	}
+
+	device, err := parseDevice(fields[3])
+	if err != nil {
+		return nil, err
+	}
+
+	inode, err := strconv.ParseUint(fields[4], 10, 0)
+	if err != nil {
+		return nil, err
+	}
+
+	pathname := ""
+
+	if len(fields) >= 5 {
+		pathname = strings.Join(fields[5:], " ")
+	}
+
+	return &ProcMap{
+		StartAddr: saddr,
+		EndAddr:   eaddr,
+		Perms:     perms,
+		Offset:    offset,
+		Dev:       device,
+		Inode:     inode,
+		Pathname:  pathname,
+	}, nil
+}
+
+// ProcMaps reads from /proc/[pid]/maps to get the memory-mappings of the
+// process.
+func (p Proc) ProcMaps() ([]*ProcMap, error) {
+	file, err := os.Open(p.path("maps"))
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	maps := []*ProcMap{}
+	scan := bufio.NewScanner(file)
+
+	for scan.Scan() {
+		m, err := parseProcMap(scan.Text())
+		if err != nil {
+			return nil, err
+		}
+
+		maps = append(maps, m)
+	}
+
+	return maps, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go
new file mode 100644
index 0000000..4248c17
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_netstat.go
@@ -0,0 +1,443 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// ProcNetstat models the content of /proc/<pid>/net/netstat.
+type ProcNetstat struct {
+	// The process ID.
+	PID int
+	TcpExt
+	IpExt
+}
+
+type TcpExt struct { // nolint:revive
+	SyncookiesSent            *float64
+	SyncookiesRecv            *float64
+	SyncookiesFailed          *float64
+	EmbryonicRsts             *float64
+	PruneCalled               *float64
+	RcvPruned                 *float64
+	OfoPruned                 *float64
+	OutOfWindowIcmps          *float64
+	LockDroppedIcmps          *float64
+	ArpFilter                 *float64
+	TW                        *float64
+	TWRecycled                *float64
+	TWKilled                  *float64
+	PAWSActive                *float64
+	PAWSEstab                 *float64
+	DelayedACKs               *float64
+	DelayedACKLocked          *float64
+	DelayedACKLost            *float64
+	ListenOverflows           *float64
+	ListenDrops               *float64
+	TCPHPHits                 *float64
+	TCPPureAcks               *float64
+	TCPHPAcks                 *float64
+	TCPRenoRecovery           *float64
+	TCPSackRecovery           *float64
+	TCPSACKReneging           *float64
+	TCPSACKReorder            *float64
+	TCPRenoReorder            *float64
+	TCPTSReorder              *float64
+	TCPFullUndo               *float64
+	TCPPartialUndo            *float64
+	TCPDSACKUndo              *float64
+	TCPLossUndo               *float64
+	TCPLostRetransmit         *float64
+	TCPRenoFailures           *float64
+	TCPSackFailures           *float64
+	TCPLossFailures           *float64
+	TCPFastRetrans            *float64
+	TCPSlowStartRetrans       *float64
+	TCPTimeouts               *float64
+	TCPLossProbes             *float64
+	TCPLossProbeRecovery      *float64
+	TCPRenoRecoveryFail       *float64
+	TCPSackRecoveryFail       *float64
+	TCPRcvCollapsed           *float64
+	TCPDSACKOldSent           *float64
+	TCPDSACKOfoSent           *float64
+	TCPDSACKRecv              *float64
+	TCPDSACKOfoRecv           *float64
+	TCPAbortOnData            *float64
+	TCPAbortOnClose           *float64
+	TCPAbortOnMemory          *float64
+	TCPAbortOnTimeout         *float64
+	TCPAbortOnLinger          *float64
+	TCPAbortFailed            *float64
+	TCPMemoryPressures        *float64
+	TCPMemoryPressuresChrono  *float64
+	TCPSACKDiscard            *float64
+	TCPDSACKIgnoredOld        *float64
+	TCPDSACKIgnoredNoUndo     *float64
+	TCPSpuriousRTOs           *float64
+	TCPMD5NotFound            *float64
+	TCPMD5Unexpected          *float64
+	TCPMD5Failure             *float64
+	TCPSackShifted            *float64
+	TCPSackMerged             *float64
+	TCPSackShiftFallback      *float64
+	TCPBacklogDrop            *float64
+	PFMemallocDrop            *float64
+	TCPMinTTLDrop             *float64
+	TCPDeferAcceptDrop        *float64
+	IPReversePathFilter       *float64
+	TCPTimeWaitOverflow       *float64
+	TCPReqQFullDoCookies      *float64
+	TCPReqQFullDrop           *float64
+	TCPRetransFail            *float64
+	TCPRcvCoalesce            *float64
+	TCPRcvQDrop               *float64
+	TCPOFOQueue               *float64
+	TCPOFODrop                *float64
+	TCPOFOMerge               *float64
+	TCPChallengeACK           *float64
+	TCPSYNChallenge           *float64
+	TCPFastOpenActive         *float64
+	TCPFastOpenActiveFail     *float64
+	TCPFastOpenPassive        *float64
+	TCPFastOpenPassiveFail    *float64
+	TCPFastOpenListenOverflow *float64
+	TCPFastOpenCookieReqd     *float64
+	TCPFastOpenBlackhole      *float64
+	TCPSpuriousRtxHostQueues  *float64
+	BusyPollRxPackets         *float64
+	TCPAutoCorking            *float64
+	TCPFromZeroWindowAdv      *float64
+	TCPToZeroWindowAdv        *float64
+	TCPWantZeroWindowAdv      *float64
+	TCPSynRetrans             *float64
+	TCPOrigDataSent           *float64
+	TCPHystartTrainDetect     *float64
+	TCPHystartTrainCwnd       *float64
+	TCPHystartDelayDetect     *float64
+	TCPHystartDelayCwnd       *float64
+	TCPACKSkippedSynRecv      *float64
+	TCPACKSkippedPAWS         *float64
+	TCPACKSkippedSeq          *float64
+	TCPACKSkippedFinWait2     *float64
+	TCPACKSkippedTimeWait     *float64
+	TCPACKSkippedChallenge    *float64
+	TCPWinProbe               *float64
+	TCPKeepAlive              *float64
+	TCPMTUPFail               *float64
+	TCPMTUPSuccess            *float64
+	TCPWqueueTooBig           *float64
+}
+
+type IpExt struct { // nolint:revive
+	InNoRoutes      *float64
+	InTruncatedPkts *float64
+	InMcastPkts     *float64
+	OutMcastPkts    *float64
+	InBcastPkts     *float64
+	OutBcastPkts    *float64
+	InOctets        *float64
+	OutOctets       *float64
+	InMcastOctets   *float64
+	OutMcastOctets  *float64
+	InBcastOctets   *float64
+	OutBcastOctets  *float64
+	InCsumErrors    *float64
+	InNoECTPkts     *float64
+	InECT1Pkts      *float64
+	InECT0Pkts      *float64
+	InCEPkts        *float64
+	ReasmOverlaps   *float64
+}
+
+func (p Proc) Netstat() (ProcNetstat, error) {
+	filename := p.path("net/netstat")
+	data, err := util.ReadFileNoStat(filename)
+	if err != nil {
+		return ProcNetstat{PID: p.PID}, err
+	}
+	procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename)
+	procNetstat.PID = p.PID
+	return procNetstat, err
+}
+
+// parseProcNetstat parses the metrics from proc/<pid>/net/netstat file
+// and returns a ProcNetstat structure.
+func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
+	var (
+		scanner     = bufio.NewScanner(r)
+		procNetstat = ProcNetstat{}
+	)
+
+	for scanner.Scan() {
+		nameParts := strings.Split(scanner.Text(), " ")
+		scanner.Scan()
+		valueParts := strings.Split(scanner.Text(), " ")
+		// Remove trailing :.
+		protocol := strings.TrimSuffix(nameParts[0], ":")
+		if len(nameParts) != len(valueParts) {
+			return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
+				ErrFileParse, fileName, protocol)
+		}
+		for i := 1; i < len(nameParts); i++ {
+			value, err := strconv.ParseFloat(valueParts[i], 64)
+			if err != nil {
+				return procNetstat, err
+			}
+			key := nameParts[i]
+
+			switch protocol {
+			case "TcpExt":
+				switch key {
+				case "SyncookiesSent":
+					procNetstat.SyncookiesSent = &value
+				case "SyncookiesRecv":
+					procNetstat.SyncookiesRecv = &value
+				case "SyncookiesFailed":
+					procNetstat.SyncookiesFailed = &value
+				case "EmbryonicRsts":
+					procNetstat.EmbryonicRsts = &value
+				case "PruneCalled":
+					procNetstat.PruneCalled = &value
+				case "RcvPruned":
+					procNetstat.RcvPruned = &value
+				case "OfoPruned":
+					procNetstat.OfoPruned = &value
+				case "OutOfWindowIcmps":
+					procNetstat.OutOfWindowIcmps = &value
+				case "LockDroppedIcmps":
+					procNetstat.LockDroppedIcmps = &value
+				case "ArpFilter":
+					procNetstat.ArpFilter = &value
+				case "TW":
+					procNetstat.TW = &value
+				case "TWRecycled":
+					procNetstat.TWRecycled = &value
+				case "TWKilled":
+					procNetstat.TWKilled = &value
+				case "PAWSActive":
+					procNetstat.PAWSActive = &value
+				case "PAWSEstab":
+					procNetstat.PAWSEstab = &value
+				case "DelayedACKs":
+					procNetstat.DelayedACKs = &value
+				case "DelayedACKLocked":
+					procNetstat.DelayedACKLocked = &value
+				case "DelayedACKLost":
+					procNetstat.DelayedACKLost = &value
+				case "ListenOverflows":
+					procNetstat.ListenOverflows = &value
+				case "ListenDrops":
+					procNetstat.ListenDrops = &value
+				case "TCPHPHits":
+					procNetstat.TCPHPHits = &value
+				case "TCPPureAcks":
+					procNetstat.TCPPureAcks = &value
+				case "TCPHPAcks":
+					procNetstat.TCPHPAcks = &value
+				case "TCPRenoRecovery":
+					procNetstat.TCPRenoRecovery = &value
+				case "TCPSackRecovery":
+					procNetstat.TCPSackRecovery = &value
+				case "TCPSACKReneging":
+					procNetstat.TCPSACKReneging = &value
+				case "TCPSACKReorder":
+					procNetstat.TCPSACKReorder = &value
+				case "TCPRenoReorder":
+					procNetstat.TCPRenoReorder = &value
+				case "TCPTSReorder":
+					procNetstat.TCPTSReorder = &value
+				case "TCPFullUndo":
+					procNetstat.TCPFullUndo = &value
+				case "TCPPartialUndo":
+					procNetstat.TCPPartialUndo = &value
+				case "TCPDSACKUndo":
+					procNetstat.TCPDSACKUndo = &value
+				case "TCPLossUndo":
+					procNetstat.TCPLossUndo = &value
+				case "TCPLostRetransmit":
+					procNetstat.TCPLostRetransmit = &value
+				case "TCPRenoFailures":
+					procNetstat.TCPRenoFailures = &value
+				case "TCPSackFailures":
+					procNetstat.TCPSackFailures = &value
+				case "TCPLossFailures":
+					procNetstat.TCPLossFailures = &value
+				case "TCPFastRetrans":
+					procNetstat.TCPFastRetrans = &value
+				case "TCPSlowStartRetrans":
+					procNetstat.TCPSlowStartRetrans = &value
+				case "TCPTimeouts":
+					procNetstat.TCPTimeouts = &value
+				case "TCPLossProbes":
+					procNetstat.TCPLossProbes = &value
+				case "TCPLossProbeRecovery":
+					procNetstat.TCPLossProbeRecovery = &value
+				case "TCPRenoRecoveryFail":
+					procNetstat.TCPRenoRecoveryFail = &value
+				case "TCPSackRecoveryFail":
+					procNetstat.TCPSackRecoveryFail = &value
+				case "TCPRcvCollapsed":
+					procNetstat.TCPRcvCollapsed = &value
+				case "TCPDSACKOldSent":
+					procNetstat.TCPDSACKOldSent = &value
+				case "TCPDSACKOfoSent":
+					procNetstat.TCPDSACKOfoSent = &value
+				case "TCPDSACKRecv":
+					procNetstat.TCPDSACKRecv = &value
+				case "TCPDSACKOfoRecv":
+					procNetstat.TCPDSACKOfoRecv = &value
+				case "TCPAbortOnData":
+					procNetstat.TCPAbortOnData = &value
+				case "TCPAbortOnClose":
+					procNetstat.TCPAbortOnClose = &value
+				case "TCPDeferAcceptDrop":
+					procNetstat.TCPDeferAcceptDrop = &value
+				case "IPReversePathFilter":
+					procNetstat.IPReversePathFilter = &value
+				case "TCPTimeWaitOverflow":
+					procNetstat.TCPTimeWaitOverflow = &value
+				case "TCPReqQFullDoCookies":
+					procNetstat.TCPReqQFullDoCookies = &value
+				case "TCPReqQFullDrop":
+					procNetstat.TCPReqQFullDrop = &value
+				case "TCPRetransFail":
+					procNetstat.TCPRetransFail = &value
+				case "TCPRcvCoalesce":
+					procNetstat.TCPRcvCoalesce = &value
+				case "TCPRcvQDrop":
+					procNetstat.TCPRcvQDrop = &value
+				case "TCPOFOQueue":
+					procNetstat.TCPOFOQueue = &value
+				case "TCPOFODrop":
+					procNetstat.TCPOFODrop = &value
+				case "TCPOFOMerge":
+					procNetstat.TCPOFOMerge = &value
+				case "TCPChallengeACK":
+					procNetstat.TCPChallengeACK = &value
+				case "TCPSYNChallenge":
+					procNetstat.TCPSYNChallenge = &value
+				case "TCPFastOpenActive":
+					procNetstat.TCPFastOpenActive = &value
+				case "TCPFastOpenActiveFail":
+					procNetstat.TCPFastOpenActiveFail = &value
+				case "TCPFastOpenPassive":
+					procNetstat.TCPFastOpenPassive = &value
+				case "TCPFastOpenPassiveFail":
+					procNetstat.TCPFastOpenPassiveFail = &value
+				case "TCPFastOpenListenOverflow":
+					procNetstat.TCPFastOpenListenOverflow = &value
+				case "TCPFastOpenCookieReqd":
+					procNetstat.TCPFastOpenCookieReqd = &value
+				case "TCPFastOpenBlackhole":
+					procNetstat.TCPFastOpenBlackhole = &value
+				case "TCPSpuriousRtxHostQueues":
+					procNetstat.TCPSpuriousRtxHostQueues = &value
+				case "BusyPollRxPackets":
+					procNetstat.BusyPollRxPackets = &value
+				case "TCPAutoCorking":
+					procNetstat.TCPAutoCorking = &value
+				case "TCPFromZeroWindowAdv":
+					procNetstat.TCPFromZeroWindowAdv = &value
+				case "TCPToZeroWindowAdv":
+					procNetstat.TCPToZeroWindowAdv = &value
+				case "TCPWantZeroWindowAdv":
+					procNetstat.TCPWantZeroWindowAdv = &value
+				case "TCPSynRetrans":
+					procNetstat.TCPSynRetrans = &value
+				case "TCPOrigDataSent":
+					procNetstat.TCPOrigDataSent = &value
+				case "TCPHystartTrainDetect":
+					procNetstat.TCPHystartTrainDetect = &value
+				case "TCPHystartTrainCwnd":
+					procNetstat.TCPHystartTrainCwnd = &value
+				case "TCPHystartDelayDetect":
+					procNetstat.TCPHystartDelayDetect = &value
+				case "TCPHystartDelayCwnd":
+					procNetstat.TCPHystartDelayCwnd = &value
+				case "TCPACKSkippedSynRecv":
+					procNetstat.TCPACKSkippedSynRecv = &value
+				case "TCPACKSkippedPAWS":
+					procNetstat.TCPACKSkippedPAWS = &value
+				case "TCPACKSkippedSeq":
+					procNetstat.TCPACKSkippedSeq = &value
+				case "TCPACKSkippedFinWait2":
+					procNetstat.TCPACKSkippedFinWait2 = &value
+				case "TCPACKSkippedTimeWait":
+					procNetstat.TCPACKSkippedTimeWait = &value
+				case "TCPACKSkippedChallenge":
+					procNetstat.TCPACKSkippedChallenge = &value
+				case "TCPWinProbe":
+					procNetstat.TCPWinProbe = &value
+				case "TCPKeepAlive":
+					procNetstat.TCPKeepAlive = &value
+				case "TCPMTUPFail":
+					procNetstat.TCPMTUPFail = &value
+				case "TCPMTUPSuccess":
+					procNetstat.TCPMTUPSuccess = &value
+				case "TCPWqueueTooBig":
+					procNetstat.TCPWqueueTooBig = &value
+				}
+			case "IpExt":
+				switch key {
+				case "InNoRoutes":
+					procNetstat.InNoRoutes = &value
+				case "InTruncatedPkts":
+					procNetstat.InTruncatedPkts = &value
+				case "InMcastPkts":
+					procNetstat.InMcastPkts = &value
+				case "OutMcastPkts":
+					procNetstat.OutMcastPkts = &value
+				case "InBcastPkts":
+					procNetstat.InBcastPkts = &value
+				case "OutBcastPkts":
+					procNetstat.OutBcastPkts = &value
+				case "InOctets":
+					procNetstat.InOctets = &value
+				case "OutOctets":
+					procNetstat.OutOctets = &value
+				case "InMcastOctets":
+					procNetstat.InMcastOctets = &value
+				case "OutMcastOctets":
+					procNetstat.OutMcastOctets = &value
+				case "InBcastOctets":
+					procNetstat.InBcastOctets = &value
+				case "OutBcastOctets":
+					procNetstat.OutBcastOctets = &value
+				case "InCsumErrors":
+					procNetstat.InCsumErrors = &value
+				case "InNoECTPkts":
+					procNetstat.InNoECTPkts = &value
+				case "InECT1Pkts":
+					procNetstat.InECT1Pkts = &value
+				case "InECT0Pkts":
+					procNetstat.InECT0Pkts = &value
+				case "InCEPkts":
+					procNetstat.InCEPkts = &value
+				case "ReasmOverlaps":
+					procNetstat.ReasmOverlaps = &value
+				}
+			}
+		}
+	}
+	return procNetstat, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go
new file mode 100644
index 0000000..0f8f847
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_ns.go
@@ -0,0 +1,68 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+)
+
+// Namespace represents a single namespace of a process.
+type Namespace struct {
+	Type  string // Namespace type.
+	Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match.
+}
+
+// Namespaces contains all of the namespaces that the process is contained in.
+type Namespaces map[string]Namespace
+
+// Namespaces reads from /proc/<pid>/ns/* to get the namespaces of which the
+// process is a member.
+func (p Proc) Namespaces() (Namespaces, error) {
+	d, err := os.Open(p.path("ns"))
+	if err != nil {
+		return nil, err
+	}
+	defer d.Close()
+
+	names, err := d.Readdirnames(-1)
+	if err != nil {
+		return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err)
+	}
+
+	ns := make(Namespaces, len(names))
+	for _, name := range names {
+		target, err := os.Readlink(p.path("ns", name))
+		if err != nil {
+			return nil, err
+		}
+
+		fields := strings.SplitN(target, ":", 2)
+		if len(fields) != 2 {
+			return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target)
+		}
+
+		typ := fields[0]
+		inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
+		if err != nil {
+			return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err)
+		}
+
+		ns[name] = Namespace{typ, uint32(inode)}
+	}
+
+	return ns, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go
new file mode 100644
index 0000000..ccd35f1
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_psi.go
@@ -0,0 +1,102 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+// The PSI / pressure interface is described at
+//   https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/accounting/psi.txt
+// Each resource (cpu, io, memory, ...) is exposed as a single file.
+// Each file may contain up to two lines, one for "some" pressure and one for "full" pressure.
+// Each line contains several averages (over n seconds) and a total in µs.
+//
+// Example io pressure file:
+// > some avg10=0.06 avg60=0.21 avg300=0.99 total=8537362
+// > full avg10=0.00 avg60=0.13 avg300=0.96 total=8183134
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d"
+
+// PSILine is a single line of values as returned by `/proc/pressure/*`.
+//
+// The Avg entries are averages over n seconds, as a percentage.
+// The Total line is in microseconds.
+type PSILine struct {
+	Avg10  float64
+	Avg60  float64
+	Avg300 float64
+	Total  uint64
+}
+
+// PSIStats represent pressure stall information from /proc/pressure/*
+//
+// "Some" indicates the share of time in which at least some tasks are stalled.
+// "Full" indicates the share of time in which all non-idle tasks are stalled simultaneously.
+type PSIStats struct {
+	Some *PSILine
+	Full *PSILine
+}
+
+// PSIStatsForResource reads pressure stall information for the specified
+// resource from /proc/pressure/<resource>. At time of writing this can be
+// either "cpu", "memory" or "io".
+func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
+	data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
+	if err != nil {
+		return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
+	}
+
+	return parsePSIStats(bytes.NewReader(data))
+}
+
+// parsePSIStats parses the specified file for pressure stall information.
+func parsePSIStats(r io.Reader) (PSIStats, error) {
+	psiStats := PSIStats{}
+
+	scanner := bufio.NewScanner(r)
+	for scanner.Scan() {
+		l := scanner.Text()
+		prefix := strings.Split(l, " ")[0]
+		switch prefix {
+		case "some":
+			psi := PSILine{}
+			_, err := fmt.Sscanf(l, fmt.Sprintf("some %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
+			if err != nil {
+				return PSIStats{}, err
+			}
+			psiStats.Some = &psi
+		case "full":
+			psi := PSILine{}
+			_, err := fmt.Sscanf(l, fmt.Sprintf("full %s", lineFormat), &psi.Avg10, &psi.Avg60, &psi.Avg300, &psi.Total)
+			if err != nil {
+				return PSIStats{}, err
+			}
+			psiStats.Full = &psi
+		default:
+			// If we encounter a line with an unknown prefix, ignore it and move on
+			// Should new measurement types be added in the future we'll simply ignore them instead
+			// of erroring on retrieval
+			continue
+		}
+	}
+
+	return psiStats, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go
new file mode 100644
index 0000000..9a297af
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_smaps.go
@@ -0,0 +1,164 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+package procfs
+
+import (
+	"bufio"
+	"errors"
+	"os"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+var (
+	// Match the header line before each mapped zone in `/proc/pid/smaps`.
+	procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
+)
+
+type ProcSMapsRollup struct {
+	// Amount of the mapping that is currently resident in RAM.
+	Rss uint64
+	// Process's proportional share of this mapping.
+	Pss uint64
+	// Size in bytes of clean shared pages.
+	SharedClean uint64
+	// Size in bytes of dirty shared pages.
+	SharedDirty uint64
+	// Size in bytes of clean private pages.
+	PrivateClean uint64
+	// Size in bytes of dirty private pages.
+	PrivateDirty uint64
+	// Amount of memory currently marked as referenced or accessed.
+	Referenced uint64
+	// Amount of memory that does not belong to any file.
+	Anonymous uint64
+	// Amount would-be-anonymous memory currently on swap.
+	Swap uint64
+	// Process's proportional memory on swap.
+	SwapPss uint64
+}
+
+// ProcSMapsRollup reads from /proc/[pid]/smaps_rollup to get summed memory information of the
+// process.
+//
+// If smaps_rollup does not exists (require kernel >= 4.15), the content of /proc/pid/smaps will
+// we read and summed.
+func (p Proc) ProcSMapsRollup() (ProcSMapsRollup, error) {
+	data, err := util.ReadFileNoStat(p.path("smaps_rollup"))
+	if err != nil && os.IsNotExist(err) {
+		return p.procSMapsRollupManual()
+	}
+	if err != nil {
+		return ProcSMapsRollup{}, err
+	}
+
+	lines := strings.Split(string(data), "\n")
+	smaps := ProcSMapsRollup{}
+
+	// skip first line which don't contains information we need
+	lines = lines[1:]
+	for _, line := range lines {
+		if line == "" {
+			continue
+		}
+
+		if err := smaps.parseLine(line); err != nil {
+			return ProcSMapsRollup{}, err
+		}
+	}
+
+	return smaps, nil
+}
+
+// Read /proc/pid/smaps and do the roll-up in Go code.
+func (p Proc) procSMapsRollupManual() (ProcSMapsRollup, error) {
+	file, err := os.Open(p.path("smaps"))
+	if err != nil {
+		return ProcSMapsRollup{}, err
+	}
+	defer file.Close()
+
+	smaps := ProcSMapsRollup{}
+	scan := bufio.NewScanner(file)
+
+	for scan.Scan() {
+		line := scan.Text()
+
+		if procSMapsHeaderLine.MatchString(line) {
+			continue
+		}
+
+		if err := smaps.parseLine(line); err != nil {
+			return ProcSMapsRollup{}, err
+		}
+	}
+
+	return smaps, nil
+}
+
+func (s *ProcSMapsRollup) parseLine(line string) error {
+	kv := strings.SplitN(line, ":", 2)
+	if len(kv) != 2 {
+		return errors.New("invalid net/dev line, missing colon")
+	}
+
+	k := kv[0]
+	if k == "VmFlags" {
+		return nil
+	}
+
+	v := strings.TrimSpace(kv[1])
+	v = strings.TrimSuffix(v, " kB")
+
+	vKBytes, err := strconv.ParseUint(v, 10, 64)
+	if err != nil {
+		return err
+	}
+	vBytes := vKBytes * 1024
+
+	s.addValue(k, vBytes)
+
+	return nil
+}
+
+func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) {
+	switch k {
+	case "Rss":
+		s.Rss += vUintBytes
+	case "Pss":
+		s.Pss += vUintBytes
+	case "Shared_Clean":
+		s.SharedClean += vUintBytes
+	case "Shared_Dirty":
+		s.SharedDirty += vUintBytes
+	case "Private_Clean":
+		s.PrivateClean += vUintBytes
+	case "Private_Dirty":
+		s.PrivateDirty += vUintBytes
+	case "Referenced":
+		s.Referenced += vUintBytes
+	case "Anonymous":
+		s.Anonymous += vUintBytes
+	case "Swap":
+		s.Swap += vUintBytes
+	case "SwapPss":
+		s.SwapPss += vUintBytes
+	}
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go
new file mode 100644
index 0000000..4bdc90b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_snmp.go
@@ -0,0 +1,353 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// ProcSnmp models the content of /proc/<pid>/net/snmp.
+type ProcSnmp struct {
+	// The process ID.
+	PID int
+	Ip
+	Icmp
+	IcmpMsg
+	Tcp
+	Udp
+	UdpLite
+}
+
+type Ip struct { // nolint:revive
+	Forwarding      *float64
+	DefaultTTL      *float64
+	InReceives      *float64
+	InHdrErrors     *float64
+	InAddrErrors    *float64
+	ForwDatagrams   *float64
+	InUnknownProtos *float64
+	InDiscards      *float64
+	InDelivers      *float64
+	OutRequests     *float64
+	OutDiscards     *float64
+	OutNoRoutes     *float64
+	ReasmTimeout    *float64
+	ReasmReqds      *float64
+	ReasmOKs        *float64
+	ReasmFails      *float64
+	FragOKs         *float64
+	FragFails       *float64
+	FragCreates     *float64
+}
+
+type Icmp struct { // nolint:revive
+	InMsgs           *float64
+	InErrors         *float64
+	InCsumErrors     *float64
+	InDestUnreachs   *float64
+	InTimeExcds      *float64
+	InParmProbs      *float64
+	InSrcQuenchs     *float64
+	InRedirects      *float64
+	InEchos          *float64
+	InEchoReps       *float64
+	InTimestamps     *float64
+	InTimestampReps  *float64
+	InAddrMasks      *float64
+	InAddrMaskReps   *float64
+	OutMsgs          *float64
+	OutErrors        *float64
+	OutDestUnreachs  *float64
+	OutTimeExcds     *float64
+	OutParmProbs     *float64
+	OutSrcQuenchs    *float64
+	OutRedirects     *float64
+	OutEchos         *float64
+	OutEchoReps      *float64
+	OutTimestamps    *float64
+	OutTimestampReps *float64
+	OutAddrMasks     *float64
+	OutAddrMaskReps  *float64
+}
+
+type IcmpMsg struct {
+	InType3  *float64
+	OutType3 *float64
+}
+
+type Tcp struct { // nolint:revive
+	RtoAlgorithm *float64
+	RtoMin       *float64
+	RtoMax       *float64
+	MaxConn      *float64
+	ActiveOpens  *float64
+	PassiveOpens *float64
+	AttemptFails *float64
+	EstabResets  *float64
+	CurrEstab    *float64
+	InSegs       *float64
+	OutSegs      *float64
+	RetransSegs  *float64
+	InErrs       *float64
+	OutRsts      *float64
+	InCsumErrors *float64
+}
+
+type Udp struct { // nolint:revive
+	InDatagrams  *float64
+	NoPorts      *float64
+	InErrors     *float64
+	OutDatagrams *float64
+	RcvbufErrors *float64
+	SndbufErrors *float64
+	InCsumErrors *float64
+	IgnoredMulti *float64
+}
+
+type UdpLite struct { // nolint:revive
+	InDatagrams  *float64
+	NoPorts      *float64
+	InErrors     *float64
+	OutDatagrams *float64
+	RcvbufErrors *float64
+	SndbufErrors *float64
+	InCsumErrors *float64
+	IgnoredMulti *float64
+}
+
+func (p Proc) Snmp() (ProcSnmp, error) {
+	filename := p.path("net/snmp")
+	data, err := util.ReadFileNoStat(filename)
+	if err != nil {
+		return ProcSnmp{PID: p.PID}, err
+	}
+	procSnmp, err := parseSnmp(bytes.NewReader(data), filename)
+	procSnmp.PID = p.PID
+	return procSnmp, err
+}
+
+// parseSnmp parses the metrics from proc/<pid>/net/snmp file
+// and returns a map contains those metrics (e.g. {"Ip": {"Forwarding": 2}}).
+func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
+	var (
+		scanner  = bufio.NewScanner(r)
+		procSnmp = ProcSnmp{}
+	)
+
+	for scanner.Scan() {
+		nameParts := strings.Split(scanner.Text(), " ")
+		scanner.Scan()
+		valueParts := strings.Split(scanner.Text(), " ")
+		// Remove trailing :.
+		protocol := strings.TrimSuffix(nameParts[0], ":")
+		if len(nameParts) != len(valueParts) {
+			return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
+				ErrFileParse, fileName, protocol)
+		}
+		for i := 1; i < len(nameParts); i++ {
+			value, err := strconv.ParseFloat(valueParts[i], 64)
+			if err != nil {
+				return procSnmp, err
+			}
+			key := nameParts[i]
+
+			switch protocol {
+			case "Ip":
+				switch key {
+				case "Forwarding":
+					procSnmp.Forwarding = &value
+				case "DefaultTTL":
+					procSnmp.DefaultTTL = &value
+				case "InReceives":
+					procSnmp.InReceives = &value
+				case "InHdrErrors":
+					procSnmp.InHdrErrors = &value
+				case "InAddrErrors":
+					procSnmp.InAddrErrors = &value
+				case "ForwDatagrams":
+					procSnmp.ForwDatagrams = &value
+				case "InUnknownProtos":
+					procSnmp.InUnknownProtos = &value
+				case "InDiscards":
+					procSnmp.InDiscards = &value
+				case "InDelivers":
+					procSnmp.InDelivers = &value
+				case "OutRequests":
+					procSnmp.OutRequests = &value
+				case "OutDiscards":
+					procSnmp.OutDiscards = &value
+				case "OutNoRoutes":
+					procSnmp.OutNoRoutes = &value
+				case "ReasmTimeout":
+					procSnmp.ReasmTimeout = &value
+				case "ReasmReqds":
+					procSnmp.ReasmReqds = &value
+				case "ReasmOKs":
+					procSnmp.ReasmOKs = &value
+				case "ReasmFails":
+					procSnmp.ReasmFails = &value
+				case "FragOKs":
+					procSnmp.FragOKs = &value
+				case "FragFails":
+					procSnmp.FragFails = &value
+				case "FragCreates":
+					procSnmp.FragCreates = &value
+				}
+			case "Icmp":
+				switch key {
+				case "InMsgs":
+					procSnmp.InMsgs = &value
+				case "InErrors":
+					procSnmp.Icmp.InErrors = &value
+				case "InCsumErrors":
+					procSnmp.Icmp.InCsumErrors = &value
+				case "InDestUnreachs":
+					procSnmp.InDestUnreachs = &value
+				case "InTimeExcds":
+					procSnmp.InTimeExcds = &value
+				case "InParmProbs":
+					procSnmp.InParmProbs = &value
+				case "InSrcQuenchs":
+					procSnmp.InSrcQuenchs = &value
+				case "InRedirects":
+					procSnmp.InRedirects = &value
+				case "InEchos":
+					procSnmp.InEchos = &value
+				case "InEchoReps":
+					procSnmp.InEchoReps = &value
+				case "InTimestamps":
+					procSnmp.InTimestamps = &value
+				case "InTimestampReps":
+					procSnmp.InTimestampReps = &value
+				case "InAddrMasks":
+					procSnmp.InAddrMasks = &value
+				case "InAddrMaskReps":
+					procSnmp.InAddrMaskReps = &value
+				case "OutMsgs":
+					procSnmp.OutMsgs = &value
+				case "OutErrors":
+					procSnmp.OutErrors = &value
+				case "OutDestUnreachs":
+					procSnmp.OutDestUnreachs = &value
+				case "OutTimeExcds":
+					procSnmp.OutTimeExcds = &value
+				case "OutParmProbs":
+					procSnmp.OutParmProbs = &value
+				case "OutSrcQuenchs":
+					procSnmp.OutSrcQuenchs = &value
+				case "OutRedirects":
+					procSnmp.OutRedirects = &value
+				case "OutEchos":
+					procSnmp.OutEchos = &value
+				case "OutEchoReps":
+					procSnmp.OutEchoReps = &value
+				case "OutTimestamps":
+					procSnmp.OutTimestamps = &value
+				case "OutTimestampReps":
+					procSnmp.OutTimestampReps = &value
+				case "OutAddrMasks":
+					procSnmp.OutAddrMasks = &value
+				case "OutAddrMaskReps":
+					procSnmp.OutAddrMaskReps = &value
+				}
+			case "IcmpMsg":
+				switch key {
+				case "InType3":
+					procSnmp.InType3 = &value
+				case "OutType3":
+					procSnmp.OutType3 = &value
+				}
+			case "Tcp":
+				switch key {
+				case "RtoAlgorithm":
+					procSnmp.RtoAlgorithm = &value
+				case "RtoMin":
+					procSnmp.RtoMin = &value
+				case "RtoMax":
+					procSnmp.RtoMax = &value
+				case "MaxConn":
+					procSnmp.MaxConn = &value
+				case "ActiveOpens":
+					procSnmp.ActiveOpens = &value
+				case "PassiveOpens":
+					procSnmp.PassiveOpens = &value
+				case "AttemptFails":
+					procSnmp.AttemptFails = &value
+				case "EstabResets":
+					procSnmp.EstabResets = &value
+				case "CurrEstab":
+					procSnmp.CurrEstab = &value
+				case "InSegs":
+					procSnmp.InSegs = &value
+				case "OutSegs":
+					procSnmp.OutSegs = &value
+				case "RetransSegs":
+					procSnmp.RetransSegs = &value
+				case "InErrs":
+					procSnmp.InErrs = &value
+				case "OutRsts":
+					procSnmp.OutRsts = &value
+				case "InCsumErrors":
+					procSnmp.Tcp.InCsumErrors = &value
+				}
+			case "Udp":
+				switch key {
+				case "InDatagrams":
+					procSnmp.Udp.InDatagrams = &value
+				case "NoPorts":
+					procSnmp.Udp.NoPorts = &value
+				case "InErrors":
+					procSnmp.Udp.InErrors = &value
+				case "OutDatagrams":
+					procSnmp.Udp.OutDatagrams = &value
+				case "RcvbufErrors":
+					procSnmp.Udp.RcvbufErrors = &value
+				case "SndbufErrors":
+					procSnmp.Udp.SndbufErrors = &value
+				case "InCsumErrors":
+					procSnmp.Udp.InCsumErrors = &value
+				case "IgnoredMulti":
+					procSnmp.Udp.IgnoredMulti = &value
+				}
+			case "UdpLite":
+				switch key {
+				case "InDatagrams":
+					procSnmp.UdpLite.InDatagrams = &value
+				case "NoPorts":
+					procSnmp.UdpLite.NoPorts = &value
+				case "InErrors":
+					procSnmp.UdpLite.InErrors = &value
+				case "OutDatagrams":
+					procSnmp.UdpLite.OutDatagrams = &value
+				case "RcvbufErrors":
+					procSnmp.UdpLite.RcvbufErrors = &value
+				case "SndbufErrors":
+					procSnmp.UdpLite.SndbufErrors = &value
+				case "InCsumErrors":
+					procSnmp.UdpLite.InCsumErrors = &value
+				case "IgnoredMulti":
+					procSnmp.UdpLite.IgnoredMulti = &value
+				}
+			}
+		}
+	}
+	return procSnmp, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go
new file mode 100644
index 0000000..fb7fd39
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go
@@ -0,0 +1,381 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"io"
+	"os"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// ProcSnmp6 models the content of /proc/<pid>/net/snmp6.
+type ProcSnmp6 struct {
+	// The process ID.
+	PID int
+	Ip6
+	Icmp6
+	Udp6
+	UdpLite6
+}
+
+type Ip6 struct { // nolint:revive
+	InReceives       *float64
+	InHdrErrors      *float64
+	InTooBigErrors   *float64
+	InNoRoutes       *float64
+	InAddrErrors     *float64
+	InUnknownProtos  *float64
+	InTruncatedPkts  *float64
+	InDiscards       *float64
+	InDelivers       *float64
+	OutForwDatagrams *float64
+	OutRequests      *float64
+	OutDiscards      *float64
+	OutNoRoutes      *float64
+	ReasmTimeout     *float64
+	ReasmReqds       *float64
+	ReasmOKs         *float64
+	ReasmFails       *float64
+	FragOKs          *float64
+	FragFails        *float64
+	FragCreates      *float64
+	InMcastPkts      *float64
+	OutMcastPkts     *float64
+	InOctets         *float64
+	OutOctets        *float64
+	InMcastOctets    *float64
+	OutMcastOctets   *float64
+	InBcastOctets    *float64
+	OutBcastOctets   *float64
+	InNoECTPkts      *float64
+	InECT1Pkts       *float64
+	InECT0Pkts       *float64
+	InCEPkts         *float64
+}
+
+type Icmp6 struct {
+	InMsgs                    *float64
+	InErrors                  *float64
+	OutMsgs                   *float64
+	OutErrors                 *float64
+	InCsumErrors              *float64
+	InDestUnreachs            *float64
+	InPktTooBigs              *float64
+	InTimeExcds               *float64
+	InParmProblems            *float64
+	InEchos                   *float64
+	InEchoReplies             *float64
+	InGroupMembQueries        *float64
+	InGroupMembResponses      *float64
+	InGroupMembReductions     *float64
+	InRouterSolicits          *float64
+	InRouterAdvertisements    *float64
+	InNeighborSolicits        *float64
+	InNeighborAdvertisements  *float64
+	InRedirects               *float64
+	InMLDv2Reports            *float64
+	OutDestUnreachs           *float64
+	OutPktTooBigs             *float64
+	OutTimeExcds              *float64
+	OutParmProblems           *float64
+	OutEchos                  *float64
+	OutEchoReplies            *float64
+	OutGroupMembQueries       *float64
+	OutGroupMembResponses     *float64
+	OutGroupMembReductions    *float64
+	OutRouterSolicits         *float64
+	OutRouterAdvertisements   *float64
+	OutNeighborSolicits       *float64
+	OutNeighborAdvertisements *float64
+	OutRedirects              *float64
+	OutMLDv2Reports           *float64
+	InType1                   *float64
+	InType134                 *float64
+	InType135                 *float64
+	InType136                 *float64
+	InType143                 *float64
+	OutType133                *float64
+	OutType135                *float64
+	OutType136                *float64
+	OutType143                *float64
+}
+
+type Udp6 struct { // nolint:revive
+	InDatagrams  *float64
+	NoPorts      *float64
+	InErrors     *float64
+	OutDatagrams *float64
+	RcvbufErrors *float64
+	SndbufErrors *float64
+	InCsumErrors *float64
+	IgnoredMulti *float64
+}
+
+type UdpLite6 struct { // nolint:revive
+	InDatagrams  *float64
+	NoPorts      *float64
+	InErrors     *float64
+	OutDatagrams *float64
+	RcvbufErrors *float64
+	SndbufErrors *float64
+	InCsumErrors *float64
+}
+
+func (p Proc) Snmp6() (ProcSnmp6, error) {
+	filename := p.path("net/snmp6")
+	data, err := util.ReadFileNoStat(filename)
+	if err != nil {
+		// On systems with IPv6 disabled, this file won't exist.
+		// Do nothing.
+		if errors.Is(err, os.ErrNotExist) {
+			return ProcSnmp6{PID: p.PID}, nil
+		}
+
+		return ProcSnmp6{PID: p.PID}, err
+	}
+
+	procSnmp6, err := parseSNMP6Stats(bytes.NewReader(data))
+	procSnmp6.PID = p.PID
+	return procSnmp6, err
+}
+
+// parseSnmp6 parses the metrics from proc/<pid>/net/snmp6 file
+// and returns a map contains those metrics.
+func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
+	var (
+		scanner   = bufio.NewScanner(r)
+		procSnmp6 = ProcSnmp6{}
+	)
+
+	for scanner.Scan() {
+		stat := strings.Fields(scanner.Text())
+		if len(stat) < 2 {
+			continue
+		}
+		// Expect to have "6" in metric name, skip line otherwise
+		if sixIndex := strings.Index(stat[0], "6"); sixIndex != -1 {
+			protocol := stat[0][:sixIndex+1]
+			key := stat[0][sixIndex+1:]
+			value, err := strconv.ParseFloat(stat[1], 64)
+			if err != nil {
+				return procSnmp6, err
+			}
+
+			switch protocol {
+			case "Ip6":
+				switch key {
+				case "InReceives":
+					procSnmp6.InReceives = &value
+				case "InHdrErrors":
+					procSnmp6.InHdrErrors = &value
+				case "InTooBigErrors":
+					procSnmp6.InTooBigErrors = &value
+				case "InNoRoutes":
+					procSnmp6.InNoRoutes = &value
+				case "InAddrErrors":
+					procSnmp6.InAddrErrors = &value
+				case "InUnknownProtos":
+					procSnmp6.InUnknownProtos = &value
+				case "InTruncatedPkts":
+					procSnmp6.InTruncatedPkts = &value
+				case "InDiscards":
+					procSnmp6.InDiscards = &value
+				case "InDelivers":
+					procSnmp6.InDelivers = &value
+				case "OutForwDatagrams":
+					procSnmp6.OutForwDatagrams = &value
+				case "OutRequests":
+					procSnmp6.OutRequests = &value
+				case "OutDiscards":
+					procSnmp6.OutDiscards = &value
+				case "OutNoRoutes":
+					procSnmp6.OutNoRoutes = &value
+				case "ReasmTimeout":
+					procSnmp6.ReasmTimeout = &value
+				case "ReasmReqds":
+					procSnmp6.ReasmReqds = &value
+				case "ReasmOKs":
+					procSnmp6.ReasmOKs = &value
+				case "ReasmFails":
+					procSnmp6.ReasmFails = &value
+				case "FragOKs":
+					procSnmp6.FragOKs = &value
+				case "FragFails":
+					procSnmp6.FragFails = &value
+				case "FragCreates":
+					procSnmp6.FragCreates = &value
+				case "InMcastPkts":
+					procSnmp6.InMcastPkts = &value
+				case "OutMcastPkts":
+					procSnmp6.OutMcastPkts = &value
+				case "InOctets":
+					procSnmp6.InOctets = &value
+				case "OutOctets":
+					procSnmp6.OutOctets = &value
+				case "InMcastOctets":
+					procSnmp6.InMcastOctets = &value
+				case "OutMcastOctets":
+					procSnmp6.OutMcastOctets = &value
+				case "InBcastOctets":
+					procSnmp6.InBcastOctets = &value
+				case "OutBcastOctets":
+					procSnmp6.OutBcastOctets = &value
+				case "InNoECTPkts":
+					procSnmp6.InNoECTPkts = &value
+				case "InECT1Pkts":
+					procSnmp6.InECT1Pkts = &value
+				case "InECT0Pkts":
+					procSnmp6.InECT0Pkts = &value
+				case "InCEPkts":
+					procSnmp6.InCEPkts = &value
+
+				}
+			case "Icmp6":
+				switch key {
+				case "InMsgs":
+					procSnmp6.InMsgs = &value
+				case "InErrors":
+					procSnmp6.Icmp6.InErrors = &value
+				case "OutMsgs":
+					procSnmp6.OutMsgs = &value
+				case "OutErrors":
+					procSnmp6.OutErrors = &value
+				case "InCsumErrors":
+					procSnmp6.Icmp6.InCsumErrors = &value
+				case "InDestUnreachs":
+					procSnmp6.InDestUnreachs = &value
+				case "InPktTooBigs":
+					procSnmp6.InPktTooBigs = &value
+				case "InTimeExcds":
+					procSnmp6.InTimeExcds = &value
+				case "InParmProblems":
+					procSnmp6.InParmProblems = &value
+				case "InEchos":
+					procSnmp6.InEchos = &value
+				case "InEchoReplies":
+					procSnmp6.InEchoReplies = &value
+				case "InGroupMembQueries":
+					procSnmp6.InGroupMembQueries = &value
+				case "InGroupMembResponses":
+					procSnmp6.InGroupMembResponses = &value
+				case "InGroupMembReductions":
+					procSnmp6.InGroupMembReductions = &value
+				case "InRouterSolicits":
+					procSnmp6.InRouterSolicits = &value
+				case "InRouterAdvertisements":
+					procSnmp6.InRouterAdvertisements = &value
+				case "InNeighborSolicits":
+					procSnmp6.InNeighborSolicits = &value
+				case "InNeighborAdvertisements":
+					procSnmp6.InNeighborAdvertisements = &value
+				case "InRedirects":
+					procSnmp6.InRedirects = &value
+				case "InMLDv2Reports":
+					procSnmp6.InMLDv2Reports = &value
+				case "OutDestUnreachs":
+					procSnmp6.OutDestUnreachs = &value
+				case "OutPktTooBigs":
+					procSnmp6.OutPktTooBigs = &value
+				case "OutTimeExcds":
+					procSnmp6.OutTimeExcds = &value
+				case "OutParmProblems":
+					procSnmp6.OutParmProblems = &value
+				case "OutEchos":
+					procSnmp6.OutEchos = &value
+				case "OutEchoReplies":
+					procSnmp6.OutEchoReplies = &value
+				case "OutGroupMembQueries":
+					procSnmp6.OutGroupMembQueries = &value
+				case "OutGroupMembResponses":
+					procSnmp6.OutGroupMembResponses = &value
+				case "OutGroupMembReductions":
+					procSnmp6.OutGroupMembReductions = &value
+				case "OutRouterSolicits":
+					procSnmp6.OutRouterSolicits = &value
+				case "OutRouterAdvertisements":
+					procSnmp6.OutRouterAdvertisements = &value
+				case "OutNeighborSolicits":
+					procSnmp6.OutNeighborSolicits = &value
+				case "OutNeighborAdvertisements":
+					procSnmp6.OutNeighborAdvertisements = &value
+				case "OutRedirects":
+					procSnmp6.OutRedirects = &value
+				case "OutMLDv2Reports":
+					procSnmp6.OutMLDv2Reports = &value
+				case "InType1":
+					procSnmp6.InType1 = &value
+				case "InType134":
+					procSnmp6.InType134 = &value
+				case "InType135":
+					procSnmp6.InType135 = &value
+				case "InType136":
+					procSnmp6.InType136 = &value
+				case "InType143":
+					procSnmp6.InType143 = &value
+				case "OutType133":
+					procSnmp6.OutType133 = &value
+				case "OutType135":
+					procSnmp6.OutType135 = &value
+				case "OutType136":
+					procSnmp6.OutType136 = &value
+				case "OutType143":
+					procSnmp6.OutType143 = &value
+				}
+			case "Udp6":
+				switch key {
+				case "InDatagrams":
+					procSnmp6.Udp6.InDatagrams = &value
+				case "NoPorts":
+					procSnmp6.Udp6.NoPorts = &value
+				case "InErrors":
+					procSnmp6.Udp6.InErrors = &value
+				case "OutDatagrams":
+					procSnmp6.Udp6.OutDatagrams = &value
+				case "RcvbufErrors":
+					procSnmp6.Udp6.RcvbufErrors = &value
+				case "SndbufErrors":
+					procSnmp6.Udp6.SndbufErrors = &value
+				case "InCsumErrors":
+					procSnmp6.Udp6.InCsumErrors = &value
+				case "IgnoredMulti":
+					procSnmp6.IgnoredMulti = &value
+				}
+			case "UdpLite6":
+				switch key {
+				case "InDatagrams":
+					procSnmp6.UdpLite6.InDatagrams = &value
+				case "NoPorts":
+					procSnmp6.UdpLite6.NoPorts = &value
+				case "InErrors":
+					procSnmp6.UdpLite6.InErrors = &value
+				case "OutDatagrams":
+					procSnmp6.UdpLite6.OutDatagrams = &value
+				case "RcvbufErrors":
+					procSnmp6.UdpLite6.RcvbufErrors = &value
+				case "SndbufErrors":
+					procSnmp6.UdpLite6.SndbufErrors = &value
+				case "InCsumErrors":
+					procSnmp6.UdpLite6.InCsumErrors = &value
+				}
+			}
+		}
+	}
+	return procSnmp6, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
new file mode 100644
index 0000000..06a8d93
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -0,0 +1,229 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Originally, this USER_HZ value was dynamically retrieved via a sysconf call
+// which required cgo. However, that caused a lot of problems regarding
+// cross-compilation. Alternatives such as running a binary to determine the
+// value, or trying to derive it in some other way were all problematic.  After
+// much research it was determined that USER_HZ is actually hardcoded to 100 on
+// all Go-supported platforms as of the time of this writing. This is why we
+// decided to hardcode it here as well. It is not impossible that there could
+// be systems with exceptions, but they should be very exotic edge cases, and
+// in that case, the worst outcome will be two misreported metrics.
+//
+// See also the following discussions:
+//
+// - https://github.com/prometheus/node_exporter/issues/52
+// - https://github.com/prometheus/procfs/pull/2
+// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue
+const userHZ = 100
+
+// ProcStat provides status information about the process,
+// read from /proc/[pid]/stat.
+type ProcStat struct {
+	// The process ID.
+	PID int
+	// The filename of the executable.
+	Comm string
+	// The process state.
+	State string
+	// The PID of the parent of this process.
+	PPID int
+	// The process group ID of the process.
+	PGRP int
+	// The session ID of the process.
+	Session int
+	// The controlling terminal of the process.
+	TTY int
+	// The ID of the foreground process group of the controlling terminal of
+	// the process.
+	TPGID int
+	// The kernel flags word of the process.
+	Flags uint
+	// The number of minor faults the process has made which have not required
+	// loading a memory page from disk.
+	MinFlt uint
+	// The number of minor faults that the process's waited-for children have
+	// made.
+	CMinFlt uint
+	// The number of major faults the process has made which have required
+	// loading a memory page from disk.
+	MajFlt uint
+	// The number of major faults that the process's waited-for children have
+	// made.
+	CMajFlt uint
+	// Amount of time that this process has been scheduled in user mode,
+	// measured in clock ticks.
+	UTime uint
+	// Amount of time that this process has been scheduled in kernel mode,
+	// measured in clock ticks.
+	STime uint
+	// Amount of time that this process's waited-for children have been
+	// scheduled in user mode, measured in clock ticks.
+	CUTime int
+	// Amount of time that this process's waited-for children have been
+	// scheduled in kernel mode, measured in clock ticks.
+	CSTime int
+	// For processes running a real-time scheduling policy, this is the negated
+	// scheduling priority, minus one.
+	Priority int
+	// The nice value, a value in the range 19 (low priority) to -20 (high
+	// priority).
+	Nice int
+	// Number of threads in this process.
+	NumThreads int
+	// The time the process started after system boot, the value is expressed
+	// in clock ticks.
+	Starttime uint64
+	// Virtual memory size in bytes.
+	VSize uint
+	// Resident set size in pages.
+	RSS int
+	// Soft limit in bytes on the rss of the process.
+	RSSLimit uint64
+	// CPU number last executed on.
+	Processor uint
+	// Real-time scheduling priority, a number in the range 1 to 99 for processes
+	// scheduled under a real-time policy, or 0, for non-real-time processes.
+	RTPriority uint
+	// Scheduling policy.
+	Policy uint
+	// Aggregated block I/O delays, measured in clock ticks (centiseconds).
+	DelayAcctBlkIOTicks uint64
+	// Guest time of the process (time spent running a virtual CPU for a guest
+	// operating system), measured in clock ticks.
+	GuestTime int
+	// Guest time of the process's children, measured in clock ticks.
+	CGuestTime int
+
+	proc FS
+}
+
+// NewStat returns the current status information of the process.
+//
+// Deprecated: Use p.Stat() instead.
+func (p Proc) NewStat() (ProcStat, error) {
+	return p.Stat()
+}
+
+// Stat returns the current status information of the process.
+func (p Proc) Stat() (ProcStat, error) {
+	data, err := util.ReadFileNoStat(p.path("stat"))
+	if err != nil {
+		return ProcStat{}, err
+	}
+
+	var (
+		ignoreInt64  int64
+		ignoreUint64 uint64
+
+		s = ProcStat{PID: p.PID, proc: p.fs}
+		l = bytes.Index(data, []byte("("))
+		r = bytes.LastIndex(data, []byte(")"))
+	)
+
+	if l < 0 || r < 0 {
+		return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data)
+	}
+
+	s.Comm = string(data[l+1 : r])
+
+	// Check the following resources for the details about the particular stat
+	// fields and their data types:
+	// * https://man7.org/linux/man-pages/man5/proc.5.html
+	// * https://man7.org/linux/man-pages/man3/scanf.3.html
+	_, err = fmt.Fscan(
+		bytes.NewBuffer(data[r+2:]),
+		&s.State,
+		&s.PPID,
+		&s.PGRP,
+		&s.Session,
+		&s.TTY,
+		&s.TPGID,
+		&s.Flags,
+		&s.MinFlt,
+		&s.CMinFlt,
+		&s.MajFlt,
+		&s.CMajFlt,
+		&s.UTime,
+		&s.STime,
+		&s.CUTime,
+		&s.CSTime,
+		&s.Priority,
+		&s.Nice,
+		&s.NumThreads,
+		&ignoreInt64,
+		&s.Starttime,
+		&s.VSize,
+		&s.RSS,
+		&s.RSSLimit,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreUint64,
+		&ignoreInt64,
+		&s.Processor,
+		&s.RTPriority,
+		&s.Policy,
+		&s.DelayAcctBlkIOTicks,
+		&s.GuestTime,
+		&s.CGuestTime,
+	)
+	if err != nil {
+		return ProcStat{}, err
+	}
+
+	return s, nil
+}
+
+// VirtualMemory returns the virtual memory size in bytes.
+func (s ProcStat) VirtualMemory() uint {
+	return s.VSize
+}
+
+// ResidentMemory returns the resident memory size in bytes.
+func (s ProcStat) ResidentMemory() int {
+	return s.RSS * os.Getpagesize()
+}
+
+// StartTime returns the unix timestamp of the process in seconds.
+func (s ProcStat) StartTime() (float64, error) {
+	stat, err := s.proc.Stat()
+	if err != nil {
+		return 0, err
+	}
+	return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil
+}
+
+// CPUTime returns the total CPU user and system time in seconds.
+func (s ProcStat) CPUTime() float64 {
+	return float64(s.UTime+s.STime) / userHZ
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
new file mode 100644
index 0000000..dd8aa56
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_status.go
@@ -0,0 +1,242 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bytes"
+	"math/bits"
+	"sort"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// ProcStatus provides status information about the process,
+// read from /proc/[pid]/status.
+type ProcStatus struct {
+	// The process ID.
+	PID int
+	// The process name.
+	Name string
+
+	// Thread group ID.
+	TGID int
+	// List of Pid namespace.
+	NSpids []uint64
+
+	// Peak virtual memory size.
+	VmPeak uint64 // nolint:revive
+	// Virtual memory size.
+	VmSize uint64 // nolint:revive
+	// Locked memory size.
+	VmLck uint64 // nolint:revive
+	// Pinned memory size.
+	VmPin uint64 // nolint:revive
+	// Peak resident set size.
+	VmHWM uint64 // nolint:revive
+	// Resident set size (sum of RssAnnon RssFile and RssShmem).
+	VmRSS uint64 // nolint:revive
+	// Size of resident anonymous memory.
+	RssAnon uint64 // nolint:revive
+	// Size of resident file mappings.
+	RssFile uint64 // nolint:revive
+	// Size of resident shared memory.
+	RssShmem uint64 // nolint:revive
+	// Size of data segments.
+	VmData uint64 // nolint:revive
+	// Size of stack segments.
+	VmStk uint64 // nolint:revive
+	// Size of text segments.
+	VmExe uint64 // nolint:revive
+	// Shared library code size.
+	VmLib uint64 // nolint:revive
+	// Page table entries size.
+	VmPTE uint64 // nolint:revive
+	// Size of second-level page tables.
+	VmPMD uint64 // nolint:revive
+	// Swapped-out virtual memory size by anonymous private.
+	VmSwap uint64 // nolint:revive
+	// Size of hugetlb memory portions
+	HugetlbPages uint64
+
+	// Number of voluntary context switches.
+	VoluntaryCtxtSwitches uint64
+	// Number of involuntary context switches.
+	NonVoluntaryCtxtSwitches uint64
+
+	// UIDs of the process (Real, effective, saved set, and filesystem UIDs)
+	UIDs [4]uint64
+	// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
+	GIDs [4]uint64
+
+	// CpusAllowedList: List of cpu cores processes are allowed to run on.
+	CpusAllowedList []uint64
+}
+
+// NewStatus returns the current status information of the process.
+func (p Proc) NewStatus() (ProcStatus, error) {
+	data, err := util.ReadFileNoStat(p.path("status"))
+	if err != nil {
+		return ProcStatus{}, err
+	}
+
+	s := ProcStatus{PID: p.PID}
+
+	lines := strings.Split(string(data), "\n")
+	for _, line := range lines {
+		if !bytes.Contains([]byte(line), []byte(":")) {
+			continue
+		}
+
+		kv := strings.SplitN(line, ":", 2)
+
+		// removes spaces
+		k := strings.TrimSpace(kv[0])
+		v := strings.TrimSpace(kv[1])
+		// removes "kB"
+		v = strings.TrimSuffix(v, " kB")
+
+		// value to int when possible
+		// we can skip error check here, 'cause vKBytes is not used when value is a string
+		vKBytes, _ := strconv.ParseUint(v, 10, 64)
+		// convert kB to B
+		vBytes := vKBytes * 1024
+
+		err = s.fillStatus(k, v, vKBytes, vBytes)
+		if err != nil {
+			return ProcStatus{}, err
+		}
+	}
+
+	return s, nil
+}
+
+func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error {
+	switch k {
+	case "Tgid":
+		s.TGID = int(vUint)
+	case "Name":
+		s.Name = vString
+	case "Uid":
+		var err error
+		for i, v := range strings.Split(vString, "\t") {
+			s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
+			if err != nil {
+				return err
+			}
+		}
+	case "Gid":
+		var err error
+		for i, v := range strings.Split(vString, "\t") {
+			s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
+			if err != nil {
+				return err
+			}
+		}
+	case "NSpid":
+		nspids, err := calcNSPidsList(vString)
+		if err != nil {
+			return err
+		}
+		s.NSpids = nspids
+	case "VmPeak":
+		s.VmPeak = vUintBytes
+	case "VmSize":
+		s.VmSize = vUintBytes
+	case "VmLck":
+		s.VmLck = vUintBytes
+	case "VmPin":
+		s.VmPin = vUintBytes
+	case "VmHWM":
+		s.VmHWM = vUintBytes
+	case "VmRSS":
+		s.VmRSS = vUintBytes
+	case "RssAnon":
+		s.RssAnon = vUintBytes
+	case "RssFile":
+		s.RssFile = vUintBytes
+	case "RssShmem":
+		s.RssShmem = vUintBytes
+	case "VmData":
+		s.VmData = vUintBytes
+	case "VmStk":
+		s.VmStk = vUintBytes
+	case "VmExe":
+		s.VmExe = vUintBytes
+	case "VmLib":
+		s.VmLib = vUintBytes
+	case "VmPTE":
+		s.VmPTE = vUintBytes
+	case "VmPMD":
+		s.VmPMD = vUintBytes
+	case "VmSwap":
+		s.VmSwap = vUintBytes
+	case "HugetlbPages":
+		s.HugetlbPages = vUintBytes
+	case "voluntary_ctxt_switches":
+		s.VoluntaryCtxtSwitches = vUint
+	case "nonvoluntary_ctxt_switches":
+		s.NonVoluntaryCtxtSwitches = vUint
+	case "Cpus_allowed_list":
+		s.CpusAllowedList = calcCpusAllowedList(vString)
+	}
+
+	return nil
+}
+
+// TotalCtxtSwitches returns the total context switch.
+func (s ProcStatus) TotalCtxtSwitches() uint64 {
+	return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
+}
+
+func calcCpusAllowedList(cpuString string) []uint64 {
+	s := strings.Split(cpuString, ",")
+
+	var g []uint64
+
+	for _, cpu := range s {
+		// parse cpu ranges, example: 1-3=[1,2,3]
+		if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 {
+			startCPU, _ := strconv.ParseUint(l[0], 10, 64)
+			endCPU, _ := strconv.ParseUint(l[1], 10, 64)
+
+			for i := startCPU; i <= endCPU; i++ {
+				g = append(g, i)
+			}
+		} else if len(l) == 1 {
+			cpu, _ := strconv.ParseUint(l[0], 10, 64)
+			g = append(g, cpu)
+		}
+
+	}
+
+	sort.Slice(g, func(i, j int) bool { return g[i] < g[j] })
+	return g
+}
+
+func calcNSPidsList(nspidsString string) ([]uint64, error) {
+	s := strings.Split(nspidsString, "\t")
+	var nspids []uint64
+
+	for _, nspid := range s {
+		nspid, err := strconv.ParseUint(nspid, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+		nspids = append(nspids, nspid)
+	}
+
+	return nspids, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go
new file mode 100644
index 0000000..3810d1a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_sys.go
@@ -0,0 +1,51 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+func sysctlToPath(sysctl string) string {
+	return strings.ReplaceAll(sysctl, ".", "/")
+}
+
+func (fs FS) SysctlStrings(sysctl string) ([]string, error) {
+	value, err := util.SysReadFile(fs.proc.Path("sys", sysctlToPath(sysctl)))
+	if err != nil {
+		return nil, err
+	}
+	return strings.Fields(value), nil
+
+}
+
+func (fs FS) SysctlInts(sysctl string) ([]int, error) {
+	fields, err := fs.SysctlStrings(sysctl)
+	if err != nil {
+		return nil, err
+	}
+
+	values := make([]int, len(fields))
+	for i, f := range fields {
+		vp := util.NewValueParser(f)
+		values[i] = vp.Int()
+		if err := vp.Err(); err != nil {
+			return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
+		}
+	}
+	return values, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go
new file mode 100644
index 0000000..5f7f32d
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/schedstat.go
@@ -0,0 +1,121 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"errors"
+	"os"
+	"regexp"
+	"strconv"
+)
+
+var (
+	cpuLineRE  = regexp.MustCompile(`cpu(\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+) (\d+)`)
+	procLineRE = regexp.MustCompile(`(\d+) (\d+) (\d+)`)
+)
+
+// Schedstat contains scheduler statistics from /proc/schedstat
+//
+// See
+// https://www.kernel.org/doc/Documentation/scheduler/sched-stats.txt
+// for a detailed description of what these numbers mean.
+//
+// Note the current kernel documentation claims some of the time units are in
+// jiffies when they are actually in nanoseconds since 2.6.23 with the
+// introduction of CFS. A fix to the documentation is pending. See
+// https://lore.kernel.org/patchwork/project/lkml/list/?series=403473
+type Schedstat struct {
+	CPUs []*SchedstatCPU
+}
+
+// SchedstatCPU contains the values from one "cpu<N>" line.
+type SchedstatCPU struct {
+	CPUNum string
+
+	RunningNanoseconds uint64
+	WaitingNanoseconds uint64
+	RunTimeslices      uint64
+}
+
+// ProcSchedstat contains the values from `/proc/<pid>/schedstat`.
+type ProcSchedstat struct {
+	RunningNanoseconds uint64
+	WaitingNanoseconds uint64
+	RunTimeslices      uint64
+}
+
+// Schedstat reads data from `/proc/schedstat`.
+func (fs FS) Schedstat() (*Schedstat, error) {
+	file, err := os.Open(fs.proc.Path("schedstat"))
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	stats := &Schedstat{}
+	scanner := bufio.NewScanner(file)
+
+	for scanner.Scan() {
+		match := cpuLineRE.FindStringSubmatch(scanner.Text())
+		if match != nil {
+			cpu := &SchedstatCPU{}
+			cpu.CPUNum = match[1]
+
+			cpu.RunningNanoseconds, err = strconv.ParseUint(match[8], 10, 64)
+			if err != nil {
+				continue
+			}
+
+			cpu.WaitingNanoseconds, err = strconv.ParseUint(match[9], 10, 64)
+			if err != nil {
+				continue
+			}
+
+			cpu.RunTimeslices, err = strconv.ParseUint(match[10], 10, 64)
+			if err != nil {
+				continue
+			}
+
+			stats.CPUs = append(stats.CPUs, cpu)
+		}
+	}
+
+	return stats, nil
+}
+
+func parseProcSchedstat(contents string) (ProcSchedstat, error) {
+	var (
+		stats ProcSchedstat
+		err   error
+	)
+	match := procLineRE.FindStringSubmatch(contents)
+
+	if match != nil {
+		stats.RunningNanoseconds, err = strconv.ParseUint(match[1], 10, 64)
+		if err != nil {
+			return stats, err
+		}
+
+		stats.WaitingNanoseconds, err = strconv.ParseUint(match[2], 10, 64)
+		if err != nil {
+			return stats, err
+		}
+
+		stats.RunTimeslices, err = strconv.ParseUint(match[3], 10, 64)
+		return stats, err
+	}
+
+	return stats, errors.New("could not parse schedstat")
+}
diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go
new file mode 100644
index 0000000..8611c90
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/slab.go
@@ -0,0 +1,151 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+var (
+	slabSpace  = regexp.MustCompile(`\s+`)
+	slabVer    = regexp.MustCompile(`slabinfo -`)
+	slabHeader = regexp.MustCompile(`# name`)
+)
+
+// Slab represents a slab pool in the kernel.
+type Slab struct {
+	Name         string
+	ObjActive    int64
+	ObjNum       int64
+	ObjSize      int64
+	ObjPerSlab   int64
+	PagesPerSlab int64
+	// tunables
+	Limit        int64
+	Batch        int64
+	SharedFactor int64
+	SlabActive   int64
+	SlabNum      int64
+	SharedAvail  int64
+}
+
+// SlabInfo represents info for all slabs.
+type SlabInfo struct {
+	Slabs []*Slab
+}
+
+func shouldParseSlab(line string) bool {
+	if slabVer.MatchString(line) {
+		return false
+	}
+	if slabHeader.MatchString(line) {
+		return false
+	}
+	return true
+}
+
+// parseV21SlabEntry is used to parse a line from /proc/slabinfo version 2.1.
+func parseV21SlabEntry(line string) (*Slab, error) {
+	// First cleanup whitespace.
+	l := slabSpace.ReplaceAllString(line, " ")
+	s := strings.Split(l, " ")
+	if len(s) != 16 {
+		return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line)
+	}
+	var err error
+	i := &Slab{Name: s[0]}
+	i.ObjActive, err = strconv.ParseInt(s[1], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	i.ObjNum, err = strconv.ParseInt(s[2], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	i.ObjSize, err = strconv.ParseInt(s[3], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	i.ObjPerSlab, err = strconv.ParseInt(s[4], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	i.PagesPerSlab, err = strconv.ParseInt(s[5], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	i.Limit, err = strconv.ParseInt(s[8], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	i.Batch, err = strconv.ParseInt(s[9], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	i.SharedFactor, err = strconv.ParseInt(s[10], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	i.SlabActive, err = strconv.ParseInt(s[13], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	i.SlabNum, err = strconv.ParseInt(s[14], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	i.SharedAvail, err = strconv.ParseInt(s[15], 10, 64)
+	if err != nil {
+		return nil, err
+	}
+	return i, nil
+}
+
+// parseSlabInfo21 is used to parse a slabinfo 2.1 file.
+func parseSlabInfo21(r *bytes.Reader) (SlabInfo, error) {
+	scanner := bufio.NewScanner(r)
+	s := SlabInfo{Slabs: []*Slab{}}
+	for scanner.Scan() {
+		line := scanner.Text()
+		if !shouldParseSlab(line) {
+			continue
+		}
+		slab, err := parseV21SlabEntry(line)
+		if err != nil {
+			return s, err
+		}
+		s.Slabs = append(s.Slabs, slab)
+	}
+	return s, nil
+}
+
+// SlabInfo reads data from `/proc/slabinfo`.
+func (fs FS) SlabInfo() (SlabInfo, error) {
+	// TODO: Consider passing options to allow for parsing different
+	// slabinfo versions. However, slabinfo 2.1 has been stable since
+	// kernel 2.6.10 and later.
+	data, err := util.ReadFileNoStat(fs.proc.Path("slabinfo"))
+	if err != nil {
+		return SlabInfo{}, err
+	}
+
+	return parseSlabInfo21(bytes.NewReader(data))
+}
diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go
new file mode 100644
index 0000000..403e6ae
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/softirqs.go
@@ -0,0 +1,160 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Softirqs represents the softirq statistics.
+type Softirqs struct {
+	Hi      []uint64
+	Timer   []uint64
+	NetTx   []uint64
+	NetRx   []uint64
+	Block   []uint64
+	IRQPoll []uint64
+	Tasklet []uint64
+	Sched   []uint64
+	HRTimer []uint64
+	RCU     []uint64
+}
+
+func (fs FS) Softirqs() (Softirqs, error) {
+	fileName := fs.proc.Path("softirqs")
+	data, err := util.ReadFileNoStat(fileName)
+	if err != nil {
+		return Softirqs{}, err
+	}
+
+	reader := bytes.NewReader(data)
+
+	return parseSoftirqs(reader)
+}
+
+func parseSoftirqs(r io.Reader) (Softirqs, error) {
+	var (
+		softirqs = Softirqs{}
+		scanner  = bufio.NewScanner(r)
+	)
+
+	if !scanner.Scan() {
+		return Softirqs{}, fmt.Errorf("%w: softirqs empty", ErrFileRead)
+	}
+
+	for scanner.Scan() {
+		parts := strings.Fields(scanner.Text())
+		var err error
+
+		// require at least one cpu
+		if len(parts) < 2 {
+			continue
+		}
+		switch parts[0] {
+		case "HI:":
+			perCPU := parts[1:]
+			softirqs.Hi = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "TIMER:":
+			perCPU := parts[1:]
+			softirqs.Timer = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "NET_TX:":
+			perCPU := parts[1:]
+			softirqs.NetTx = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "NET_RX:":
+			perCPU := parts[1:]
+			softirqs.NetRx = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "BLOCK:":
+			perCPU := parts[1:]
+			softirqs.Block = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "IRQ_POLL:":
+			perCPU := parts[1:]
+			softirqs.IRQPoll = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "TASKLET:":
+			perCPU := parts[1:]
+			softirqs.Tasklet = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "SCHED:":
+			perCPU := parts[1:]
+			softirqs.Sched = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "HRTIMER:":
+			perCPU := parts[1:]
+			softirqs.HRTimer = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case "RCU:":
+			perCPU := parts[1:]
+			softirqs.RCU = make([]uint64, len(perCPU))
+			for i, count := range perCPU {
+				if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		}
+	}
+
+	if err := scanner.Err(); err != nil {
+		return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err)
+	}
+
+	return softirqs, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
new file mode 100644
index 0000000..e36b41c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -0,0 +1,258 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/fs"
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// CPUStat shows how much time the cpu spend in various stages.
+type CPUStat struct {
+	User      float64
+	Nice      float64
+	System    float64
+	Idle      float64
+	Iowait    float64
+	IRQ       float64
+	SoftIRQ   float64
+	Steal     float64
+	Guest     float64
+	GuestNice float64
+}
+
+// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
+// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
+// It is possible to get per-cpu stats by reading `/proc/softirqs`.
+type SoftIRQStat struct {
+	Hi          uint64
+	Timer       uint64
+	NetTx       uint64
+	NetRx       uint64
+	Block       uint64
+	BlockIoPoll uint64
+	Tasklet     uint64
+	Sched       uint64
+	Hrtimer     uint64
+	Rcu         uint64
+}
+
+// Stat represents kernel/system statistics.
+type Stat struct {
+	// Boot time in seconds since the Epoch.
+	BootTime uint64
+	// Summed up cpu statistics.
+	CPUTotal CPUStat
+	// Per-CPU statistics.
+	CPU map[int64]CPUStat
+	// Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
+	IRQTotal uint64
+	// Number of times a numbered IRQ was triggered.
+	IRQ []uint64
+	// Number of times a context switch happened.
+	ContextSwitches uint64
+	// Number of times a process was created.
+	ProcessCreated uint64
+	// Number of processes currently running.
+	ProcessesRunning uint64
+	// Number of processes currently blocked (waiting for IO).
+	ProcessesBlocked uint64
+	// Number of times a softirq was scheduled.
+	SoftIRQTotal uint64
+	// Detailed softirq statistics.
+	SoftIRQ SoftIRQStat
+}
+
+// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
+func parseCPUStat(line string) (CPUStat, int64, error) {
+	cpuStat := CPUStat{}
+	var cpu string
+
+	count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f",
+		&cpu,
+		&cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle,
+		&cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal,
+		&cpuStat.Guest, &cpuStat.GuestNice)
+
+	if err != nil && err != io.EOF {
+		return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
+	}
+	if count == 0 {
+		return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line)
+	}
+
+	cpuStat.User /= userHZ
+	cpuStat.Nice /= userHZ
+	cpuStat.System /= userHZ
+	cpuStat.Idle /= userHZ
+	cpuStat.Iowait /= userHZ
+	cpuStat.IRQ /= userHZ
+	cpuStat.SoftIRQ /= userHZ
+	cpuStat.Steal /= userHZ
+	cpuStat.Guest /= userHZ
+	cpuStat.GuestNice /= userHZ
+
+	if cpu == "cpu" {
+		return cpuStat, -1, nil
+	}
+
+	cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
+	if err != nil {
+		return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
+	}
+
+	return cpuStat, cpuID, nil
+}
+
+// Parse a softirq line.
+func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
+	softIRQStat := SoftIRQStat{}
+	var total uint64
+	var prefix string
+
+	_, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d",
+		&prefix, &total,
+		&softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx,
+		&softIRQStat.Block, &softIRQStat.BlockIoPoll,
+		&softIRQStat.Tasklet, &softIRQStat.Sched,
+		&softIRQStat.Hrtimer, &softIRQStat.Rcu)
+
+	if err != nil {
+		return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
+	}
+
+	return softIRQStat, total, nil
+}
+
+// NewStat returns information about current cpu/process statistics.
+// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+//
+// Deprecated: Use fs.Stat() instead.
+func NewStat() (Stat, error) {
+	fs, err := NewFS(fs.DefaultProcMountPoint)
+	if err != nil {
+		return Stat{}, err
+	}
+	return fs.Stat()
+}
+
+// NewStat returns information about current cpu/process statistics.
+// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+//
+// Deprecated: Use fs.Stat() instead.
+func (fs FS) NewStat() (Stat, error) {
+	return fs.Stat()
+}
+
+// Stat returns information about current cpu/process statistics.
+// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+func (fs FS) Stat() (Stat, error) {
+	fileName := fs.proc.Path("stat")
+	data, err := util.ReadFileNoStat(fileName)
+	if err != nil {
+		return Stat{}, err
+	}
+	procStat, err := parseStat(bytes.NewReader(data), fileName)
+	if err != nil {
+		return Stat{}, err
+	}
+	return procStat, nil
+}
+
+// parseStat parses the metrics from /proc/[pid]/stat.
+func parseStat(r io.Reader, fileName string) (Stat, error) {
+	var (
+		scanner = bufio.NewScanner(r)
+		stat    = Stat{
+			CPU: make(map[int64]CPUStat),
+		}
+		err error
+	)
+
+	// Increase default scanner buffer to handle very long `intr` lines.
+	buf := make([]byte, 0, 8*1024)
+	scanner.Buffer(buf, 1024*1024)
+
+	for scanner.Scan() {
+		line := scanner.Text()
+		parts := strings.Fields(scanner.Text())
+		// require at least <key> <value>
+		if len(parts) < 2 {
+			continue
+		}
+		switch {
+		case parts[0] == "btime":
+			if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+				return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
+			}
+		case parts[0] == "intr":
+			if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+				return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
+			}
+			numberedIRQs := parts[2:]
+			stat.IRQ = make([]uint64, len(numberedIRQs))
+			for i, count := range numberedIRQs {
+				if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+					return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
+				}
+			}
+		case parts[0] == "ctxt":
+			if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+				return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
+			}
+		case parts[0] == "processes":
+			if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+				return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
+			}
+		case parts[0] == "procs_running":
+			if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+				return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
+			}
+		case parts[0] == "procs_blocked":
+			if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+				return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
+			}
+		case parts[0] == "softirq":
+			softIRQStats, total, err := parseSoftIRQStat(line)
+			if err != nil {
+				return Stat{}, err
+			}
+			stat.SoftIRQTotal = total
+			stat.SoftIRQ = softIRQStats
+		case strings.HasPrefix(parts[0], "cpu"):
+			cpuStat, cpuID, err := parseCPUStat(line)
+			if err != nil {
+				return Stat{}, err
+			}
+			if cpuID == -1 {
+				stat.CPUTotal = cpuStat
+			} else {
+				stat.CPU[cpuID] = cpuStat
+			}
+		}
+	}
+
+	if err := scanner.Err(); err != nil {
+		return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err)
+	}
+
+	return stat, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go
new file mode 100644
index 0000000..65fec83
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/swaps.go
@@ -0,0 +1,89 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Swap represents an entry in /proc/swaps.
+type Swap struct {
+	Filename string
+	Type     string
+	Size     int
+	Used     int
+	Priority int
+}
+
+// Swaps returns a slice of all configured swap devices on the system.
+func (fs FS) Swaps() ([]*Swap, error) {
+	data, err := util.ReadFileNoStat(fs.proc.Path("swaps"))
+	if err != nil {
+		return nil, err
+	}
+	return parseSwaps(data)
+}
+
+func parseSwaps(info []byte) ([]*Swap, error) {
+	swaps := []*Swap{}
+	scanner := bufio.NewScanner(bytes.NewReader(info))
+	scanner.Scan() // ignore header line
+	for scanner.Scan() {
+		swapString := scanner.Text()
+		parsedSwap, err := parseSwapString(swapString)
+		if err != nil {
+			return nil, err
+		}
+		swaps = append(swaps, parsedSwap)
+	}
+
+	err := scanner.Err()
+	return swaps, err
+}
+
+func parseSwapString(swapString string) (*Swap, error) {
+	var err error
+
+	swapFields := strings.Fields(swapString)
+	swapLength := len(swapFields)
+	if swapLength < 5 {
+		return nil, fmt.Errorf("%w: too few fields in swap string: %s", ErrFileParse, swapString)
+	}
+
+	swap := &Swap{
+		Filename: swapFields[0],
+		Type:     swapFields[1],
+	}
+
+	swap.Size, err = strconv.Atoi(swapFields[2])
+	if err != nil {
+		return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
+	}
+	swap.Used, err = strconv.Atoi(swapFields[3])
+	if err != nil {
+		return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
+	}
+	swap.Priority, err = strconv.Atoi(swapFields[4])
+	if err != nil {
+		return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
+	}
+
+	return swap, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go
new file mode 100644
index 0000000..80e0e94
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/thread.go
@@ -0,0 +1,80 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+	"fmt"
+	"os"
+	"strconv"
+
+	fsi "github.com/prometheus/procfs/internal/fs"
+)
+
+// Provide access to /proc/PID/task/TID files, for thread specific values. Since
+// such files have the same structure as /proc/PID/ ones, the data structures
+// and the parsers for the latter may be reused.
+
+// AllThreads returns a list of all currently available threads under /proc/PID.
+func AllThreads(pid int) (Procs, error) {
+	fs, err := NewFS(DefaultMountPoint)
+	if err != nil {
+		return Procs{}, err
+	}
+	return fs.AllThreads(pid)
+}
+
+// AllThreads returns a list of all currently available threads for PID.
+func (fs FS) AllThreads(pid int) (Procs, error) {
+	taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
+	d, err := os.Open(taskPath)
+	if err != nil {
+		return Procs{}, err
+	}
+	defer d.Close()
+
+	names, err := d.Readdirnames(-1)
+	if err != nil {
+		return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err)
+	}
+
+	t := Procs{}
+	for _, n := range names {
+		tid, err := strconv.ParseInt(n, 10, 64)
+		if err != nil {
+			continue
+		}
+
+		t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.isReal}})
+	}
+
+	return t, nil
+}
+
+// Thread returns a process for a given PID, TID.
+func (fs FS) Thread(pid, tid int) (Proc, error) {
+	taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
+	if _, err := os.Stat(taskPath); err != nil {
+		return Proc{}, err
+	}
+	return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.isReal}}, nil
+}
+
+// Thread returns a process for a given TID of Proc.
+func (proc Proc) Thread(tid int) (Proc, error) {
+	tfs := FS{fsi.FS(proc.path("task")), proc.fs.isReal}
+	if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil {
+		return Proc{}, err
+	}
+	return Proc{PID: tid, fs: tfs}, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar
new file mode 100644
index 0000000..19ef02b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/ttar
@@ -0,0 +1,413 @@
+#!/usr/bin/env bash
+
+# Purpose: plain text tar format
+# Limitations: - only suitable for text files, directories, and symlinks
+#              - stores only filename, content, and mode
+#              - not designed for untrusted input
+#
+# Note: must work with bash version 3.2 (macOS)
+
+# Copyright 2017 Roger Luethi
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -o errexit -o nounset
+
+# Sanitize environment (for instance, standard sorting of glob matches)
+export LC_ALL=C
+
+path=""
+CMD=""
+ARG_STRING="$*"
+
+#------------------------------------------------------------------------------
+# Not all sed implementations can work on null bytes. In order to make ttar
+# work out of the box on macOS, use Python as a stream editor.
+
+USE_PYTHON=0
+
+PYTHON_CREATE_FILTER=$(cat << 'PCF'
+#!/usr/bin/env python
+
+import re
+import sys
+
+for line in sys.stdin:
+    line = re.sub(r'EOF', r'\EOF', line)
+    line = re.sub(r'NULLBYTE', r'\NULLBYTE', line)
+    line = re.sub('\x00', r'NULLBYTE', line)
+    sys.stdout.write(line)
+PCF
+)
+
+PYTHON_EXTRACT_FILTER=$(cat << 'PEF'
+#!/usr/bin/env python
+
+import re
+import sys
+
+for line in sys.stdin:
+    line = re.sub(r'(?<!\\)NULLBYTE', '\x00', line)
+    line = re.sub(r'\\NULLBYTE', 'NULLBYTE', line)
+    line = re.sub(r'([^\\])EOF', r'\1', line)
+    line = re.sub(r'\\EOF', 'EOF', line)
+    sys.stdout.write(line)
+PEF
+)
+
+function test_environment {
+    if [[ "$(echo "a" | sed 's/a/\x0/' | wc -c)" -ne 2 ]]; then
+        echo "WARNING sed unable to handle null bytes, using Python (slow)."
+        if ! which python >/dev/null; then
+            echo "ERROR Python not found. Aborting."
+            exit 2
+        fi
+        USE_PYTHON=1
+    fi
+}
+
+#------------------------------------------------------------------------------
+
+function usage {
+    bname=$(basename "$0")
+    cat << USAGE
+Usage:   $bname [-C <DIR>] -c -f <ARCHIVE> <FILE...> (create archive)
+         $bname            -t -f <ARCHIVE>           (list archive contents)
+         $bname [-C <DIR>] -x -f <ARCHIVE>           (extract archive)
+
+Options:
+         -C <DIR>           (change directory)
+         -v                 (verbose)
+         --recursive-unlink (recursively delete existing directory if path
+                             collides with file or directory to extract)
+
+Example: Change to sysfs directory, create ttar file from fixtures directory
+         $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
+USAGE
+exit "$1"
+}
+
+function vecho {
+    if [ "${VERBOSE:-}" == "yes" ]; then
+        echo >&7 "$@"
+    fi
+}
+
+function set_cmd {
+    if [ -n "$CMD" ]; then
+        echo "ERROR: more than one command given"
+        echo
+        usage 2
+    fi
+    CMD=$1
+}
+
+unset VERBOSE
+unset RECURSIVE_UNLINK
+
+while getopts :cf:-:htxvC: opt; do
+    case $opt in
+        c)
+            set_cmd "create"
+            ;;
+        f)
+            ARCHIVE=$OPTARG
+            ;;
+        h)
+            usage 0
+            ;;
+        t)
+            set_cmd "list"
+            ;;
+        x)
+            set_cmd "extract"
+            ;;
+        v)
+            VERBOSE=yes
+            exec 7>&1
+            ;;
+        C)
+            CDIR=$OPTARG
+            ;;
+        -)
+            case $OPTARG in
+                recursive-unlink)
+                    RECURSIVE_UNLINK="yes"
+                    ;;
+                *)
+                    echo -e "Error: invalid option -$OPTARG"
+                    echo
+                    usage 1
+                    ;;
+            esac
+            ;;
+        *)
+            echo >&2 "ERROR: invalid option -$OPTARG"
+            echo
+            usage 1
+            ;;
+    esac
+done
+
+# Remove processed options from arguments
+shift $(( OPTIND - 1 ));
+
+if [ "${CMD:-}" == "" ]; then
+    echo >&2 "ERROR: no command given"
+    echo
+    usage 1
+elif [ "${ARCHIVE:-}" == "" ]; then
+    echo >&2 "ERROR: no archive name given"
+    echo
+    usage 1
+fi
+
+function list {
+    local path=""
+    local size=0
+    local line_no=0
+    local ttar_file=$1
+    if [ -n "${2:-}" ]; then
+        echo >&2 "ERROR: too many arguments."
+        echo
+        usage 1
+    fi
+    if [ ! -e "$ttar_file" ]; then
+        echo >&2 "ERROR: file not found ($ttar_file)"
+        echo
+        usage 1
+    fi
+    while read -r line; do
+        line_no=$(( line_no + 1 ))
+        if [ $size -gt 0 ]; then
+            size=$(( size - 1 ))
+            continue
+        fi
+        if [[ $line =~ ^Path:\ (.*)$ ]]; then
+            path=${BASH_REMATCH[1]}
+        elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
+            size=${BASH_REMATCH[1]}
+            echo "$path"
+        elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
+            path=${BASH_REMATCH[1]}
+            echo "$path/"
+        elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
+            echo  "$path -> ${BASH_REMATCH[1]}"
+        fi
+    done < "$ttar_file"
+}
+
+function extract {
+    local path=""
+    local size=0
+    local line_no=0
+    local ttar_file=$1
+    if [ -n "${2:-}" ]; then
+        echo >&2 "ERROR: too many arguments."
+        echo
+        usage 1
+    fi
+    if [ ! -e "$ttar_file" ]; then
+        echo >&2 "ERROR: file not found ($ttar_file)"
+        echo
+        usage 1
+    fi
+    while IFS= read -r line; do
+        line_no=$(( line_no + 1 ))
+        local eof_without_newline
+        if [ "$size" -gt 0 ]; then
+            if [[ "$line" =~ [^\\]EOF ]]; then
+                # An EOF not preceded by a backslash indicates that the line
+                # does not end with a newline
+                eof_without_newline=1
+            else
+                eof_without_newline=0
+            fi
+            # Replace NULLBYTE with null byte if at beginning of line
+            # Replace NULLBYTE with null byte unless preceded by backslash
+            # Remove one backslash in front of NULLBYTE (if any)
+            # Remove EOF unless preceded by backslash
+            # Remove one backslash in front of EOF
+            if [ $USE_PYTHON -eq 1 ]; then
+                echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path"
+            else
+                # The repeated pattern makes up for sed's lack of negative
+                # lookbehind assertions (for consecutive null bytes).
+                echo -n "$line" | \
+                    sed -e 's/^NULLBYTE/\x0/g;
+                            s/\([^\\]\)NULLBYTE/\1\x0/g;
+                            s/\([^\\]\)NULLBYTE/\1\x0/g;
+                            s/\\NULLBYTE/NULLBYTE/g;
+                            s/\([^\\]\)EOF/\1/g;
+                            s/\\EOF/EOF/g;
+                    ' >> "$path"
+            fi
+            if [[ "$eof_without_newline" -eq 0 ]]; then
+                echo >> "$path"
+            fi
+            size=$(( size - 1 ))
+            continue
+        fi
+        if [[ $line =~ ^Path:\ (.*)$ ]]; then
+            path=${BASH_REMATCH[1]}
+            if [ -L "$path" ]; then
+                rm "$path"
+            elif [ -d "$path" ]; then
+                if [ "${RECURSIVE_UNLINK:-}" == "yes" ]; then
+                    rm -r "$path"
+                else
+                    # Safe because symlinks to directories are dealt with above
+                    rmdir "$path"
+                fi
+            elif [ -e "$path" ]; then
+                rm "$path"
+            fi
+        elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
+            size=${BASH_REMATCH[1]}
+            # Create file even if it is zero-length.
+            touch "$path"
+            vecho "    $path"
+        elif [[ $line =~ ^Mode:\ (.*)$ ]]; then
+            mode=${BASH_REMATCH[1]}
+            chmod "$mode" "$path"
+            vecho "$mode"
+        elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
+            path=${BASH_REMATCH[1]}
+            mkdir -p "$path"
+            vecho "    $path/"
+        elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
+            ln -s "${BASH_REMATCH[1]}" "$path"
+            vecho "    $path -> ${BASH_REMATCH[1]}"
+        elif [[ $line =~ ^# ]]; then
+            # Ignore comments between files
+            continue
+        else
+            echo >&2 "ERROR: Unknown keyword on line $line_no: $line"
+            exit 1
+        fi
+    done < "$ttar_file"
+}
+
+function div {
+    echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \
+         "- - - - - -"
+}
+
+function get_mode {
+    local mfile=$1
+    if [ -z "${STAT_OPTION:-}" ]; then
+        if stat -c '%a' "$mfile" >/dev/null 2>&1; then
+            # GNU stat
+            STAT_OPTION='-c'
+            STAT_FORMAT='%a'
+        else
+            # BSD stat
+            STAT_OPTION='-f'
+            # Octal output, user/group/other (omit file type, sticky bit)
+            STAT_FORMAT='%OLp'
+        fi
+    fi
+    stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile"
+}
+
+function _create {
+    shopt -s nullglob
+    local mode
+    local eof_without_newline
+    while (( "$#" )); do
+        file=$1
+        if [ -L "$file" ]; then
+            echo "Path: $file"
+            symlinkTo=$(readlink "$file")
+            echo "SymlinkTo: $symlinkTo"
+            vecho "    $file -> $symlinkTo"
+            div
+        elif [ -d "$file" ]; then
+            # Strip trailing slash (if there is one)
+            file=${file%/}
+            echo "Directory: $file"
+            mode=$(get_mode "$file")
+            echo "Mode: $mode"
+            vecho "$mode $file/"
+            div
+            # Find all files and dirs, including hidden/dot files
+            for x in "$file/"{*,.[^.]*}; do
+                _create "$x"
+            done
+        elif [ -f "$file" ]; then
+            echo "Path: $file"
+            lines=$(wc -l "$file"|awk '{print $1}')
+            eof_without_newline=0
+            if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \
+                    [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then
+                eof_without_newline=1
+                lines=$((lines+1))
+            fi
+            echo "Lines: $lines"
+            # Add backslash in front of EOF
+            # Add backslash in front of NULLBYTE
+            # Replace null byte with NULLBYTE
+            if [ $USE_PYTHON -eq 1 ]; then
+                < "$file" python -c "$PYTHON_CREATE_FILTER"
+            else
+                < "$file" \
+                    sed 's/EOF/\\EOF/g;
+                            s/NULLBYTE/\\NULLBYTE/g;
+                            s/\x0/NULLBYTE/g;
+                    '
+            fi
+            if [[ "$eof_without_newline" -eq 1 ]]; then
+                # Finish line with EOF to indicate that the original line did
+                # not end with a linefeed
+                echo "EOF"
+            fi
+            mode=$(get_mode "$file")
+            echo "Mode: $mode"
+            vecho "$mode $file"
+            div
+        else
+            echo >&2 "ERROR: file not found ($file in $(pwd))"
+            exit 2
+        fi
+        shift
+    done
+}
+
+function create {
+    ttar_file=$1
+    shift
+    if [ -z "${1:-}" ]; then
+        echo >&2 "ERROR: missing arguments."
+        echo
+        usage 1
+    fi
+    if [ -e "$ttar_file" ]; then
+        rm "$ttar_file"
+    fi
+    exec > "$ttar_file"
+    echo "# Archive created by ttar $ARG_STRING"
+    _create "$@"
+}
+
+test_environment
+
+if [ -n "${CDIR:-}" ]; then
+    if [[ "$ARCHIVE" != /* ]]; then
+        # Relative path: preserve the archive's location before changing
+        # directory
+        ARCHIVE="$(pwd)/$ARCHIVE"
+    fi
+    cd "$CDIR"
+fi
+
+"$CMD" "$ARCHIVE" "$@"
diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go
new file mode 100644
index 0000000..51c49d8
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/vm.go
@@ -0,0 +1,212 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+package procfs
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// The VM interface is described at
+//
+//	https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+//
+// Each setting is exposed as a single file.
+// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
+// and numa_zonelist_order (deprecated) which is a string.
+type VM struct {
+	AdminReserveKbytes        *int64   // /proc/sys/vm/admin_reserve_kbytes
+	BlockDump                 *int64   // /proc/sys/vm/block_dump
+	CompactUnevictableAllowed *int64   // /proc/sys/vm/compact_unevictable_allowed
+	DirtyBackgroundBytes      *int64   // /proc/sys/vm/dirty_background_bytes
+	DirtyBackgroundRatio      *int64   // /proc/sys/vm/dirty_background_ratio
+	DirtyBytes                *int64   // /proc/sys/vm/dirty_bytes
+	DirtyExpireCentisecs      *int64   // /proc/sys/vm/dirty_expire_centisecs
+	DirtyRatio                *int64   // /proc/sys/vm/dirty_ratio
+	DirtytimeExpireSeconds    *int64   // /proc/sys/vm/dirtytime_expire_seconds
+	DirtyWritebackCentisecs   *int64   // /proc/sys/vm/dirty_writeback_centisecs
+	DropCaches                *int64   // /proc/sys/vm/drop_caches
+	ExtfragThreshold          *int64   // /proc/sys/vm/extfrag_threshold
+	HugetlbShmGroup           *int64   // /proc/sys/vm/hugetlb_shm_group
+	LaptopMode                *int64   // /proc/sys/vm/laptop_mode
+	LegacyVaLayout            *int64   // /proc/sys/vm/legacy_va_layout
+	LowmemReserveRatio        []*int64 // /proc/sys/vm/lowmem_reserve_ratio
+	MaxMapCount               *int64   // /proc/sys/vm/max_map_count
+	MemoryFailureEarlyKill    *int64   // /proc/sys/vm/memory_failure_early_kill
+	MemoryFailureRecovery     *int64   // /proc/sys/vm/memory_failure_recovery
+	MinFreeKbytes             *int64   // /proc/sys/vm/min_free_kbytes
+	MinSlabRatio              *int64   // /proc/sys/vm/min_slab_ratio
+	MinUnmappedRatio          *int64   // /proc/sys/vm/min_unmapped_ratio
+	MmapMinAddr               *int64   // /proc/sys/vm/mmap_min_addr
+	NrHugepages               *int64   // /proc/sys/vm/nr_hugepages
+	NrHugepagesMempolicy      *int64   // /proc/sys/vm/nr_hugepages_mempolicy
+	NrOvercommitHugepages     *int64   // /proc/sys/vm/nr_overcommit_hugepages
+	NumaStat                  *int64   // /proc/sys/vm/numa_stat
+	NumaZonelistOrder         string   // /proc/sys/vm/numa_zonelist_order
+	OomDumpTasks              *int64   // /proc/sys/vm/oom_dump_tasks
+	OomKillAllocatingTask     *int64   // /proc/sys/vm/oom_kill_allocating_task
+	OvercommitKbytes          *int64   // /proc/sys/vm/overcommit_kbytes
+	OvercommitMemory          *int64   // /proc/sys/vm/overcommit_memory
+	OvercommitRatio           *int64   // /proc/sys/vm/overcommit_ratio
+	PageCluster               *int64   // /proc/sys/vm/page-cluster
+	PanicOnOom                *int64   // /proc/sys/vm/panic_on_oom
+	PercpuPagelistFraction    *int64   // /proc/sys/vm/percpu_pagelist_fraction
+	StatInterval              *int64   // /proc/sys/vm/stat_interval
+	Swappiness                *int64   // /proc/sys/vm/swappiness
+	UserReserveKbytes         *int64   // /proc/sys/vm/user_reserve_kbytes
+	VfsCachePressure          *int64   // /proc/sys/vm/vfs_cache_pressure
+	WatermarkBoostFactor      *int64   // /proc/sys/vm/watermark_boost_factor
+	WatermarkScaleFactor      *int64   // /proc/sys/vm/watermark_scale_factor
+	ZoneReclaimMode           *int64   // /proc/sys/vm/zone_reclaim_mode
+}
+
+// VM reads the VM statistics from the specified `proc` filesystem.
+func (fs FS) VM() (*VM, error) {
+	path := fs.proc.Path("sys/vm")
+	file, err := os.Stat(path)
+	if err != nil {
+		return nil, err
+	}
+	if !file.Mode().IsDir() {
+		return nil, fmt.Errorf("%w: %s is not a directory", ErrFileRead, path)
+	}
+
+	files, err := os.ReadDir(path)
+	if err != nil {
+		return nil, err
+	}
+
+	var vm VM
+	for _, f := range files {
+		if f.IsDir() {
+			continue
+		}
+
+		name := filepath.Join(path, f.Name())
+		// ignore errors on read, as there are some write only
+		// in /proc/sys/vm
+		value, err := util.SysReadFile(name)
+		if err != nil {
+			continue
+		}
+		vp := util.NewValueParser(value)
+
+		switch f.Name() {
+		case "admin_reserve_kbytes":
+			vm.AdminReserveKbytes = vp.PInt64()
+		case "block_dump":
+			vm.BlockDump = vp.PInt64()
+		case "compact_unevictable_allowed":
+			vm.CompactUnevictableAllowed = vp.PInt64()
+		case "dirty_background_bytes":
+			vm.DirtyBackgroundBytes = vp.PInt64()
+		case "dirty_background_ratio":
+			vm.DirtyBackgroundRatio = vp.PInt64()
+		case "dirty_bytes":
+			vm.DirtyBytes = vp.PInt64()
+		case "dirty_expire_centisecs":
+			vm.DirtyExpireCentisecs = vp.PInt64()
+		case "dirty_ratio":
+			vm.DirtyRatio = vp.PInt64()
+		case "dirtytime_expire_seconds":
+			vm.DirtytimeExpireSeconds = vp.PInt64()
+		case "dirty_writeback_centisecs":
+			vm.DirtyWritebackCentisecs = vp.PInt64()
+		case "drop_caches":
+			vm.DropCaches = vp.PInt64()
+		case "extfrag_threshold":
+			vm.ExtfragThreshold = vp.PInt64()
+		case "hugetlb_shm_group":
+			vm.HugetlbShmGroup = vp.PInt64()
+		case "laptop_mode":
+			vm.LaptopMode = vp.PInt64()
+		case "legacy_va_layout":
+			vm.LegacyVaLayout = vp.PInt64()
+		case "lowmem_reserve_ratio":
+			stringSlice := strings.Fields(value)
+			pint64Slice := make([]*int64, 0, len(stringSlice))
+			for _, value := range stringSlice {
+				vp := util.NewValueParser(value)
+				pint64Slice = append(pint64Slice, vp.PInt64())
+			}
+			vm.LowmemReserveRatio = pint64Slice
+		case "max_map_count":
+			vm.MaxMapCount = vp.PInt64()
+		case "memory_failure_early_kill":
+			vm.MemoryFailureEarlyKill = vp.PInt64()
+		case "memory_failure_recovery":
+			vm.MemoryFailureRecovery = vp.PInt64()
+		case "min_free_kbytes":
+			vm.MinFreeKbytes = vp.PInt64()
+		case "min_slab_ratio":
+			vm.MinSlabRatio = vp.PInt64()
+		case "min_unmapped_ratio":
+			vm.MinUnmappedRatio = vp.PInt64()
+		case "mmap_min_addr":
+			vm.MmapMinAddr = vp.PInt64()
+		case "nr_hugepages":
+			vm.NrHugepages = vp.PInt64()
+		case "nr_hugepages_mempolicy":
+			vm.NrHugepagesMempolicy = vp.PInt64()
+		case "nr_overcommit_hugepages":
+			vm.NrOvercommitHugepages = vp.PInt64()
+		case "numa_stat":
+			vm.NumaStat = vp.PInt64()
+		case "numa_zonelist_order":
+			vm.NumaZonelistOrder = value
+		case "oom_dump_tasks":
+			vm.OomDumpTasks = vp.PInt64()
+		case "oom_kill_allocating_task":
+			vm.OomKillAllocatingTask = vp.PInt64()
+		case "overcommit_kbytes":
+			vm.OvercommitKbytes = vp.PInt64()
+		case "overcommit_memory":
+			vm.OvercommitMemory = vp.PInt64()
+		case "overcommit_ratio":
+			vm.OvercommitRatio = vp.PInt64()
+		case "page-cluster":
+			vm.PageCluster = vp.PInt64()
+		case "panic_on_oom":
+			vm.PanicOnOom = vp.PInt64()
+		case "percpu_pagelist_fraction":
+			vm.PercpuPagelistFraction = vp.PInt64()
+		case "stat_interval":
+			vm.StatInterval = vp.PInt64()
+		case "swappiness":
+			vm.Swappiness = vp.PInt64()
+		case "user_reserve_kbytes":
+			vm.UserReserveKbytes = vp.PInt64()
+		case "vfs_cache_pressure":
+			vm.VfsCachePressure = vp.PInt64()
+		case "watermark_boost_factor":
+			vm.WatermarkBoostFactor = vp.PInt64()
+		case "watermark_scale_factor":
+			vm.WatermarkScaleFactor = vp.PInt64()
+		case "zone_reclaim_mode":
+			vm.ZoneReclaimMode = vp.PInt64()
+		}
+		if err := vp.Err(); err != nil {
+			return nil, err
+		}
+	}
+
+	return &vm, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go
new file mode 100644
index 0000000..e54d94b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/zoneinfo.go
@@ -0,0 +1,196 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+package procfs
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"regexp"
+	"strings"
+
+	"github.com/prometheus/procfs/internal/util"
+)
+
+// Zoneinfo holds info parsed from /proc/zoneinfo.
+type Zoneinfo struct {
+	Node                       string
+	Zone                       string
+	NrFreePages                *int64
+	Min                        *int64
+	Low                        *int64
+	High                       *int64
+	Scanned                    *int64
+	Spanned                    *int64
+	Present                    *int64
+	Managed                    *int64
+	NrActiveAnon               *int64
+	NrInactiveAnon             *int64
+	NrIsolatedAnon             *int64
+	NrAnonPages                *int64
+	NrAnonTransparentHugepages *int64
+	NrActiveFile               *int64
+	NrInactiveFile             *int64
+	NrIsolatedFile             *int64
+	NrFilePages                *int64
+	NrSlabReclaimable          *int64
+	NrSlabUnreclaimable        *int64
+	NrMlockStack               *int64
+	NrKernelStack              *int64
+	NrMapped                   *int64
+	NrDirty                    *int64
+	NrWriteback                *int64
+	NrUnevictable              *int64
+	NrShmem                    *int64
+	NrDirtied                  *int64
+	NrWritten                  *int64
+	NumaHit                    *int64
+	NumaMiss                   *int64
+	NumaForeign                *int64
+	NumaInterleave             *int64
+	NumaLocal                  *int64
+	NumaOther                  *int64
+	Protection                 []*int64
+}
+
+var nodeZoneRE = regexp.MustCompile(`(\d+), zone\s+(\w+)`)
+
+// Zoneinfo parses an zoneinfo-file (/proc/zoneinfo) and returns a slice of
+// structs containing the relevant info.  More information available here:
+// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
+	data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
+	if err != nil {
+		return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
+	}
+	zoneinfo, err := parseZoneinfo(data)
+	if err != nil {
+		return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
+	}
+	return zoneinfo, nil
+}
+
+func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) {
+
+	zoneinfo := []Zoneinfo{}
+
+	zoneinfoBlocks := bytes.Split(zoneinfoData, []byte("\nNode"))
+	for _, block := range zoneinfoBlocks {
+		var zoneinfoElement Zoneinfo
+		lines := strings.Split(string(block), "\n")
+		for _, line := range lines {
+
+			if nodeZone := nodeZoneRE.FindStringSubmatch(line); nodeZone != nil {
+				zoneinfoElement.Node = nodeZone[1]
+				zoneinfoElement.Zone = nodeZone[2]
+				continue
+			}
+			if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") {
+				continue
+			}
+			parts := strings.Fields(strings.TrimSpace(line))
+			if len(parts) < 2 {
+				continue
+			}
+			vp := util.NewValueParser(parts[1])
+			switch parts[0] {
+			case "nr_free_pages":
+				zoneinfoElement.NrFreePages = vp.PInt64()
+			case "min":
+				zoneinfoElement.Min = vp.PInt64()
+			case "low":
+				zoneinfoElement.Low = vp.PInt64()
+			case "high":
+				zoneinfoElement.High = vp.PInt64()
+			case "scanned":
+				zoneinfoElement.Scanned = vp.PInt64()
+			case "spanned":
+				zoneinfoElement.Spanned = vp.PInt64()
+			case "present":
+				zoneinfoElement.Present = vp.PInt64()
+			case "managed":
+				zoneinfoElement.Managed = vp.PInt64()
+			case "nr_active_anon":
+				zoneinfoElement.NrActiveAnon = vp.PInt64()
+			case "nr_inactive_anon":
+				zoneinfoElement.NrInactiveAnon = vp.PInt64()
+			case "nr_isolated_anon":
+				zoneinfoElement.NrIsolatedAnon = vp.PInt64()
+			case "nr_anon_pages":
+				zoneinfoElement.NrAnonPages = vp.PInt64()
+			case "nr_anon_transparent_hugepages":
+				zoneinfoElement.NrAnonTransparentHugepages = vp.PInt64()
+			case "nr_active_file":
+				zoneinfoElement.NrActiveFile = vp.PInt64()
+			case "nr_inactive_file":
+				zoneinfoElement.NrInactiveFile = vp.PInt64()
+			case "nr_isolated_file":
+				zoneinfoElement.NrIsolatedFile = vp.PInt64()
+			case "nr_file_pages":
+				zoneinfoElement.NrFilePages = vp.PInt64()
+			case "nr_slab_reclaimable":
+				zoneinfoElement.NrSlabReclaimable = vp.PInt64()
+			case "nr_slab_unreclaimable":
+				zoneinfoElement.NrSlabUnreclaimable = vp.PInt64()
+			case "nr_mlock_stack":
+				zoneinfoElement.NrMlockStack = vp.PInt64()
+			case "nr_kernel_stack":
+				zoneinfoElement.NrKernelStack = vp.PInt64()
+			case "nr_mapped":
+				zoneinfoElement.NrMapped = vp.PInt64()
+			case "nr_dirty":
+				zoneinfoElement.NrDirty = vp.PInt64()
+			case "nr_writeback":
+				zoneinfoElement.NrWriteback = vp.PInt64()
+			case "nr_unevictable":
+				zoneinfoElement.NrUnevictable = vp.PInt64()
+			case "nr_shmem":
+				zoneinfoElement.NrShmem = vp.PInt64()
+			case "nr_dirtied":
+				zoneinfoElement.NrDirtied = vp.PInt64()
+			case "nr_written":
+				zoneinfoElement.NrWritten = vp.PInt64()
+			case "numa_hit":
+				zoneinfoElement.NumaHit = vp.PInt64()
+			case "numa_miss":
+				zoneinfoElement.NumaMiss = vp.PInt64()
+			case "numa_foreign":
+				zoneinfoElement.NumaForeign = vp.PInt64()
+			case "numa_interleave":
+				zoneinfoElement.NumaInterleave = vp.PInt64()
+			case "numa_local":
+				zoneinfoElement.NumaLocal = vp.PInt64()
+			case "numa_other":
+				zoneinfoElement.NumaOther = vp.PInt64()
+			case "protection:":
+				protectionParts := strings.Split(line, ":")
+				protectionValues := strings.Replace(protectionParts[1], "(", "", 1)
+				protectionValues = strings.Replace(protectionValues, ")", "", 1)
+				protectionValues = strings.TrimSpace(protectionValues)
+				protectionStringMap := strings.Split(protectionValues, ", ")
+				val, err := util.ParsePInt64s(protectionStringMap)
+				if err == nil {
+					zoneinfoElement.Protection = val
+				}
+			}
+
+		}
+
+		zoneinfo = append(zoneinfo, zoneinfoElement)
+	}
+	return zoneinfo, nil
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md
index 27ddfee..6492bfe 100644
--- a/vendor/github.com/rcrowley/go-metrics/README.md
+++ b/vendor/github.com/rcrowley/go-metrics/README.md
@@ -7,6 +7,15 @@
 
 Documentation: <http://godoc.org/github.com/rcrowley/go-metrics>.
 
+Archived as of April 1 2025
+-----
+This repository is no longer maintained. The authors recommend you explore the
+following newer, more widely adopted libraries for your Go instrumentation
+needs:
+
+* [OpenTelemetry Go SDK](https://opentelemetry.io/docs/languages/go/instrumentation/#metrics)
+* [Prometheus Go Client Library](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus)
+
 Usage
 -----
 
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
index 95d8e59..ffb24e8 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
@@ -7,10 +7,13 @@
 	"time"
 )
 
-type CompareType int
+// Deprecated: CompareType has only ever been for internal use and has accidentally been published since v1.6.0. Do not use it.
+type CompareType = compareResult
+
+type compareResult int
 
 const (
-	compareLess CompareType = iota - 1
+	compareLess compareResult = iota - 1
 	compareEqual
 	compareGreater
 )
@@ -28,6 +31,8 @@
 	uint32Type = reflect.TypeOf(uint32(1))
 	uint64Type = reflect.TypeOf(uint64(1))
 
+	uintptrType = reflect.TypeOf(uintptr(1))
+
 	float32Type = reflect.TypeOf(float32(1))
 	float64Type = reflect.TypeOf(float64(1))
 
@@ -37,7 +42,7 @@
 	bytesType = reflect.TypeOf([]byte{})
 )
 
-func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
+func compare(obj1, obj2 interface{}, kind reflect.Kind) (compareResult, bool) {
 	obj1Value := reflect.ValueOf(obj1)
 	obj2Value := reflect.ValueOf(obj2)
 
@@ -308,11 +313,11 @@
 	case reflect.Struct:
 		{
 			// All structs enter here. We're not interested in most types.
-			if !canConvert(obj1Value, timeType) {
+			if !obj1Value.CanConvert(timeType) {
 				break
 			}
 
-			// time.Time can compared!
+			// time.Time can be compared!
 			timeObj1, ok := obj1.(time.Time)
 			if !ok {
 				timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time)
@@ -323,12 +328,18 @@
 				timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time)
 			}
 
-			return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64)
+			if timeObj1.Before(timeObj2) {
+				return compareLess, true
+			}
+			if timeObj1.Equal(timeObj2) {
+				return compareEqual, true
+			}
+			return compareGreater, true
 		}
 	case reflect.Slice:
 		{
 			// We only care about the []byte type.
-			if !canConvert(obj1Value, bytesType) {
+			if !obj1Value.CanConvert(bytesType) {
 				break
 			}
 
@@ -343,7 +354,27 @@
 				bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte)
 			}
 
-			return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true
+			return compareResult(bytes.Compare(bytesObj1, bytesObj2)), true
+		}
+	case reflect.Uintptr:
+		{
+			uintptrObj1, ok := obj1.(uintptr)
+			if !ok {
+				uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr)
+			}
+			uintptrObj2, ok := obj2.(uintptr)
+			if !ok {
+				uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr)
+			}
+			if uintptrObj1 > uintptrObj2 {
+				return compareGreater, true
+			}
+			if uintptrObj1 == uintptrObj2 {
+				return compareEqual, true
+			}
+			if uintptrObj1 < uintptrObj2 {
+				return compareLess, true
+			}
 		}
 	}
 
@@ -352,79 +383,85 @@
 
 // Greater asserts that the first element is greater than the second
 //
-//    assert.Greater(t, 2, 1)
-//    assert.Greater(t, float64(2), float64(1))
-//    assert.Greater(t, "b", "a")
+//	assert.Greater(t, 2, 1)
+//	assert.Greater(t, float64(2), float64(1))
+//	assert.Greater(t, "b", "a")
 func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
-	return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
+	failMessage := fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2)
+	return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, failMessage, msgAndArgs...)
 }
 
 // GreaterOrEqual asserts that the first element is greater than or equal to the second
 //
-//    assert.GreaterOrEqual(t, 2, 1)
-//    assert.GreaterOrEqual(t, 2, 2)
-//    assert.GreaterOrEqual(t, "b", "a")
-//    assert.GreaterOrEqual(t, "b", "b")
+//	assert.GreaterOrEqual(t, 2, 1)
+//	assert.GreaterOrEqual(t, 2, 2)
+//	assert.GreaterOrEqual(t, "b", "a")
+//	assert.GreaterOrEqual(t, "b", "b")
 func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
-	return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
+	failMessage := fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2)
+	return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, failMessage, msgAndArgs...)
 }
 
 // Less asserts that the first element is less than the second
 //
-//    assert.Less(t, 1, 2)
-//    assert.Less(t, float64(1), float64(2))
-//    assert.Less(t, "a", "b")
+//	assert.Less(t, 1, 2)
+//	assert.Less(t, float64(1), float64(2))
+//	assert.Less(t, "a", "b")
 func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
-	return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
+	failMessage := fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2)
+	return compareTwoValues(t, e1, e2, []compareResult{compareLess}, failMessage, msgAndArgs...)
 }
 
 // LessOrEqual asserts that the first element is less than or equal to the second
 //
-//    assert.LessOrEqual(t, 1, 2)
-//    assert.LessOrEqual(t, 2, 2)
-//    assert.LessOrEqual(t, "a", "b")
-//    assert.LessOrEqual(t, "b", "b")
+//	assert.LessOrEqual(t, 1, 2)
+//	assert.LessOrEqual(t, 2, 2)
+//	assert.LessOrEqual(t, "a", "b")
+//	assert.LessOrEqual(t, "b", "b")
 func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
-	return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
+	failMessage := fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2)
+	return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, failMessage, msgAndArgs...)
 }
 
 // Positive asserts that the specified element is positive
 //
-//    assert.Positive(t, 1)
-//    assert.Positive(t, 1.23)
+//	assert.Positive(t, 1)
+//	assert.Positive(t, 1.23)
 func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
 	zero := reflect.Zero(reflect.TypeOf(e))
-	return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
+	failMessage := fmt.Sprintf("\"%v\" is not positive", e)
+	return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, failMessage, msgAndArgs...)
 }
 
 // Negative asserts that the specified element is negative
 //
-//    assert.Negative(t, -1)
-//    assert.Negative(t, -1.23)
+//	assert.Negative(t, -1)
+//	assert.Negative(t, -1.23)
 func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
 	zero := reflect.Zero(reflect.TypeOf(e))
-	return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...)
+	failMessage := fmt.Sprintf("\"%v\" is not negative", e)
+	return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, failMessage, msgAndArgs...)
 }
 
-func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
+func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
@@ -437,17 +474,17 @@
 
 	compareResult, isComparable := compare(e1, e2, e1Kind)
 	if !isComparable {
-		return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+		return Fail(t, fmt.Sprintf(`Can not compare type "%T"`, e1), msgAndArgs...)
 	}
 
 	if !containsValue(allowedComparesResults, compareResult) {
-		return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...)
+		return Fail(t, failMessage, msgAndArgs...)
 	}
 
 	return true
 }
 
-func containsValue(values []CompareType, value CompareType) bool {
+func containsValue(values []compareResult, value compareResult) bool {
 	for _, v := range values {
 		if v == value {
 			return true
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
deleted file mode 100644
index da86790..0000000
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
+++ /dev/null
@@ -1,16 +0,0 @@
-//go:build go1.17
-// +build go1.17
-
-// TODO: once support for Go 1.16 is dropped, this file can be
-//       merged/removed with assertion_compare_go1.17_test.go and
-//       assertion_compare_legacy.go
-
-package assert
-
-import "reflect"
-
-// Wrapper around reflect.Value.CanConvert, for compatibility
-// reasons.
-func canConvert(value reflect.Value, to reflect.Type) bool {
-	return value.CanConvert(to)
-}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
deleted file mode 100644
index 1701af2..0000000
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
+++ /dev/null
@@ -1,16 +0,0 @@
-//go:build !go1.17
-// +build !go1.17
-
-// TODO: once support for Go 1.16 is dropped, this file can be
-//       merged/removed with assertion_compare_go1.17_test.go and
-//       assertion_compare_can_convert.go
-
-package assert
-
-import "reflect"
-
-// Older versions of Go does not have the reflect.Value.CanConvert
-// method.
-func canConvert(value reflect.Value, to reflect.Type) bool {
-	return false
-}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
index 7880b8f..c592f6a 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -1,7 +1,4 @@
-/*
-* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
-* THIS FILE MUST NOT BE EDITED BY HAND
- */
+// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
 
 package assert
 
@@ -22,9 +19,9 @@
 // Containsf asserts that the specified string, list(array, slice...) or map contains the
 // specified substring or element.
 //
-//    assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
-//    assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
-//    assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
+//	assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
+//	assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
+//	assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
 func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -53,10 +50,19 @@
 	return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
 }
 
-// Emptyf asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Emptyf asserts that the given value is "empty".
 //
-//  assert.Emptyf(t, obj, "error message %s", "formatted")
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
+//
+//	assert.Emptyf(t, obj, "error message %s", "formatted")
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
 func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -66,7 +72,7 @@
 
 // Equalf asserts that two objects are equal.
 //
-//    assert.Equalf(t, 123, 123, "error message %s", "formatted")
+//	assert.Equalf(t, 123, 123, "error message %s", "formatted")
 //
 // Pointer variable equality is determined based on the equality of the
 // referenced values (as opposed to the memory addresses). Function equality
@@ -81,8 +87,8 @@
 // EqualErrorf asserts that a function returned an error (i.e. not `nil`)
 // and that it is equal to the provided error.
 //
-//   actualObj, err := SomeFunction()
-//   assert.EqualErrorf(t, err,  expectedErrorString, "error message %s", "formatted")
+//	actualObj, err := SomeFunction()
+//	assert.EqualErrorf(t, err,  expectedErrorString, "error message %s", "formatted")
 func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -90,10 +96,27 @@
 	return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...)
 }
 
-// EqualValuesf asserts that two objects are equal or convertable to the same types
-// and equal.
+// EqualExportedValuesf asserts that the types of two objects are equal and their public
+// fields are also equal. This is useful for comparing structs that have private fields
+// that could potentially differ.
 //
-//    assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
+//	 type S struct {
+//		Exported     	int
+//		notExported   	int
+//	 }
+//	 assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true
+//	 assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false
+func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// EqualValuesf asserts that two objects are equal or convertible to the larger
+// type and equal.
+//
+//	assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
 func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -103,10 +126,8 @@
 
 // Errorf asserts that a function returned an error (i.e. not `nil`).
 //
-//   actualObj, err := SomeFunction()
-//   if assert.Errorf(t, err, "error message %s", "formatted") {
-// 	   assert.Equal(t, expectedErrorf, err)
-//   }
+//	actualObj, err := SomeFunction()
+//	assert.Errorf(t, err, "error message %s", "formatted")
 func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -126,8 +147,8 @@
 // ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
 // and that the error contains the specified substring.
 //
-//   actualObj, err := SomeFunction()
-//   assert.ErrorContainsf(t, err,  expectedErrorSubString, "error message %s", "formatted")
+//	actualObj, err := SomeFunction()
+//	assert.ErrorContainsf(t, err,  expectedErrorSubString, "error message %s", "formatted")
 func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -147,7 +168,7 @@
 // Eventuallyf asserts that given condition will be met in waitFor time,
 // periodically checking target function each tick.
 //
-//    assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+//	assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
 func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -155,9 +176,34 @@
 	return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
 }
 
+// EventuallyWithTf asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick. In contrast to Eventually,
+// it supplies a CollectT to the condition function, so that the condition
+// function can use the CollectT to call other assertions.
+// The condition is considered "met" if no errors are raised in a tick.
+// The supplied CollectT collects all errors from one tick (if there are any).
+// If the condition is not met before waitFor, the collected errors of
+// the last tick are copied to t.
+//
+//	externalValue := false
+//	go func() {
+//		time.Sleep(8*time.Second)
+//		externalValue = true
+//	}()
+//	assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") {
+//		// add assertions as needed; any assertion failure will fail the current tick
+//		assert.True(c, externalValue, "expected 'externalValue' to be true")
+//	}, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
+func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return EventuallyWithT(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
+}
+
 // Exactlyf asserts that two objects are equal in value and type.
 //
-//    assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
+//	assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
 func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -183,7 +229,7 @@
 
 // Falsef asserts that the specified value is false.
 //
-//    assert.Falsef(t, myBool, "error message %s", "formatted")
+//	assert.Falsef(t, myBool, "error message %s", "formatted")
 func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -202,9 +248,9 @@
 
 // Greaterf asserts that the first element is greater than the second
 //
-//    assert.Greaterf(t, 2, 1, "error message %s", "formatted")
-//    assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
-//    assert.Greaterf(t, "b", "a", "error message %s", "formatted")
+//	assert.Greaterf(t, 2, 1, "error message %s", "formatted")
+//	assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
+//	assert.Greaterf(t, "b", "a", "error message %s", "formatted")
 func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -214,10 +260,10 @@
 
 // GreaterOrEqualf asserts that the first element is greater than or equal to the second
 //
-//    assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
-//    assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
-//    assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
-//    assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
+//	assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
+//	assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
+//	assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
+//	assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
 func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -228,7 +274,7 @@
 // HTTPBodyContainsf asserts that a specified handler returns a
 // body that contains a string.
 //
-//  assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//	assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
@@ -241,7 +287,7 @@
 // HTTPBodyNotContainsf asserts that a specified handler returns a
 // body that does not contain a string.
 //
-//  assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//	assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
@@ -253,7 +299,7 @@
 
 // HTTPErrorf asserts that a specified handler returns an error status code.
 //
-//  assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//	assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@@ -265,7 +311,7 @@
 
 // HTTPRedirectf asserts that a specified handler returns a redirect status code.
 //
-//  assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//	assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@@ -277,7 +323,7 @@
 
 // HTTPStatusCodef asserts that a specified handler returns a specified status code.
 //
-//  assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+//	assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
@@ -289,7 +335,7 @@
 
 // HTTPSuccessf asserts that a specified handler returns a success status code.
 //
-//  assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//	assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@@ -301,7 +347,7 @@
 
 // Implementsf asserts that an object is implemented by the specified interface.
 //
-//    assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+//	assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
 func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -311,7 +357,7 @@
 
 // InDeltaf asserts that the two numerals are within delta of each other.
 //
-// 	 assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
+//	assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
 func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -353,9 +399,9 @@
 
 // IsDecreasingf asserts that the collection is decreasing
 //
-//    assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
-//    assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
-//    assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
+//	assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
+//	assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
+//	assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
 func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -365,9 +411,9 @@
 
 // IsIncreasingf asserts that the collection is increasing
 //
-//    assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
-//    assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
-//    assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
+//	assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
+//	assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
+//	assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
 func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -377,9 +423,9 @@
 
 // IsNonDecreasingf asserts that the collection is not decreasing
 //
-//    assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
-//    assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
-//    assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
+//	assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
+//	assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
+//	assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
 func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -389,9 +435,9 @@
 
 // IsNonIncreasingf asserts that the collection is not increasing
 //
-//    assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
-//    assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
-//    assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
+//	assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
+//	assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
+//	assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
 func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -399,7 +445,19 @@
 	return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...)
 }
 
+// IsNotTypef asserts that the specified objects are not of the same type.
+//
+//	assert.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted")
+func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return IsNotType(t, theType, object, append([]interface{}{msg}, args...)...)
+}
+
 // IsTypef asserts that the specified objects are of the same type.
+//
+//	assert.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted")
 func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -409,7 +467,7 @@
 
 // JSONEqf asserts that two JSON strings are equivalent.
 //
-//  assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+//	assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
 func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -420,7 +478,7 @@
 // Lenf asserts that the specified object has specific length.
 // Lenf also fails if the object has a type that len() not accept.
 //
-//    assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
+//	assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
 func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -430,9 +488,9 @@
 
 // Lessf asserts that the first element is less than the second
 //
-//    assert.Lessf(t, 1, 2, "error message %s", "formatted")
-//    assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
-//    assert.Lessf(t, "a", "b", "error message %s", "formatted")
+//	assert.Lessf(t, 1, 2, "error message %s", "formatted")
+//	assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
+//	assert.Lessf(t, "a", "b", "error message %s", "formatted")
 func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -442,10 +500,10 @@
 
 // LessOrEqualf asserts that the first element is less than or equal to the second
 //
-//    assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
-//    assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
-//    assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
-//    assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
+//	assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
+//	assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
+//	assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
+//	assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
 func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -455,8 +513,8 @@
 
 // Negativef asserts that the specified element is negative
 //
-//    assert.Negativef(t, -1, "error message %s", "formatted")
-//    assert.Negativef(t, -1.23, "error message %s", "formatted")
+//	assert.Negativef(t, -1, "error message %s", "formatted")
+//	assert.Negativef(t, -1.23, "error message %s", "formatted")
 func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -467,7 +525,7 @@
 // Neverf asserts that the given condition doesn't satisfy in waitFor time,
 // periodically checking the target function each tick.
 //
-//    assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+//	assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
 func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -477,7 +535,7 @@
 
 // Nilf asserts that the specified object is nil.
 //
-//    assert.Nilf(t, err, "error message %s", "formatted")
+//	assert.Nilf(t, err, "error message %s", "formatted")
 func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -496,10 +554,10 @@
 
 // NoErrorf asserts that a function returned no error (i.e. `nil`).
 //
-//   actualObj, err := SomeFunction()
-//   if assert.NoErrorf(t, err, "error message %s", "formatted") {
-// 	   assert.Equal(t, expectedObj, actualObj)
-//   }
+//	  actualObj, err := SomeFunction()
+//	  if assert.NoErrorf(t, err, "error message %s", "formatted") {
+//		   assert.Equal(t, expectedObj, actualObj)
+//	  }
 func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -519,9 +577,9 @@
 // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
 // specified substring or element.
 //
-//    assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
-//    assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
-//    assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
+//	assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
+//	assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
+//	assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
 func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -529,12 +587,28 @@
 	return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
 }
 
-// NotEmptyf asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
 //
-//  if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
-//    assert.Equal(t, "two", obj[1])
-//  }
+// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false
+//
+// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true
+//
+// assert.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true
+func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
+}
+
+// NotEmptyf asserts that the specified object is NOT [Empty].
+//
+//	if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
+//	  assert.Equal(t, "two", obj[1])
+//	}
 func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -544,7 +618,7 @@
 
 // NotEqualf asserts that the specified values are NOT equal.
 //
-//    assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
+//	assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
 //
 // Pointer variable equality is determined based on the equality of the
 // referenced values (as opposed to the memory addresses).
@@ -557,7 +631,7 @@
 
 // NotEqualValuesf asserts that two objects are not equal even when converted to the same type
 //
-//    assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
+//	assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
 func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -565,7 +639,16 @@
 	return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
 }
 
-// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// NotErrorAsf asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
+}
+
+// NotErrorIsf asserts that none of the errors in err's chain matches target.
 // This is a wrapper for errors.Is.
 func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
@@ -574,9 +657,19 @@
 	return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
 }
 
+// NotImplementsf asserts that an object does not implement the specified interface.
+//
+//	assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
+}
+
 // NotNilf asserts that the specified object is not nil.
 //
-//    assert.NotNilf(t, err, "error message %s", "formatted")
+//	assert.NotNilf(t, err, "error message %s", "formatted")
 func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -586,7 +679,7 @@
 
 // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
 //
-//   assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
+//	assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
 func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -596,8 +689,8 @@
 
 // NotRegexpf asserts that a specified regexp does not match a string.
 //
-//  assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
-//  assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
+//	assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
+//	assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
 func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -607,7 +700,7 @@
 
 // NotSamef asserts that two pointers do not reference the same object.
 //
-//    assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
+//	assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
 //
 // Both arguments must be pointer variables. Pointer variable sameness is
 // determined based on the equality of both type and value.
@@ -618,10 +711,15 @@
 	return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
 }
 
-// NotSubsetf asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
+// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
 //
-//    assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+//	assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
+//	assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
+//	assert.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted")
+//	assert.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted")
 func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -639,7 +737,7 @@
 
 // Panicsf asserts that the code inside the specified PanicTestFunc panics.
 //
-//   assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
+//	assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
 func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -651,7 +749,7 @@
 // panics, and that the recovered panic value is an error that satisfies the
 // EqualError comparison.
 //
-//   assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+//	assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
 func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -662,7 +760,7 @@
 // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
 // the recovered panic value equals the expected panic value.
 //
-//   assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+//	assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
 func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -672,8 +770,8 @@
 
 // Positivef asserts that the specified element is positive
 //
-//    assert.Positivef(t, 1, "error message %s", "formatted")
-//    assert.Positivef(t, 1.23, "error message %s", "formatted")
+//	assert.Positivef(t, 1, "error message %s", "formatted")
+//	assert.Positivef(t, 1.23, "error message %s", "formatted")
 func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -683,8 +781,8 @@
 
 // Regexpf asserts that a specified regexp matches a string.
 //
-//  assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
-//  assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
+//	assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
+//	assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
 func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -694,7 +792,7 @@
 
 // Samef asserts that two pointers reference the same object.
 //
-//    assert.Samef(t, ptr1, ptr2, "error message %s", "formatted")
+//	assert.Samef(t, ptr1, ptr2, "error message %s", "formatted")
 //
 // Both arguments must be pointer variables. Pointer variable sameness is
 // determined based on the equality of both type and value.
@@ -705,10 +803,15 @@
 	return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
 }
 
-// Subsetf asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
+// Subsetf asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
 //
-//    assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+//	assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
+//	assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
+//	assert.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted")
+//	assert.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted")
 func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -718,7 +821,7 @@
 
 // Truef asserts that the specified value is true.
 //
-//    assert.Truef(t, myBool, "error message %s", "formatted")
+//	assert.Truef(t, myBool, "error message %s", "formatted")
 func Truef(t TestingT, value bool, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -728,7 +831,7 @@
 
 // WithinDurationf asserts that the two times are within duration delta of each other.
 //
-//   assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+//	assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
 func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -738,7 +841,7 @@
 
 // WithinRangef asserts that a time is within a time range (inclusive).
 //
-//   assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
+//	assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
 func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index 339515b..58db928 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -1,7 +1,4 @@
-/*
-* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
-* THIS FILE MUST NOT BE EDITED BY HAND
- */
+// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
 
 package assert
 
@@ -30,9 +27,9 @@
 // Contains asserts that the specified string, list(array, slice...) or map contains the
 // specified substring or element.
 //
-//    a.Contains("Hello World", "World")
-//    a.Contains(["Hello", "World"], "World")
-//    a.Contains({"Hello": "World"}, "Hello")
+//	a.Contains("Hello World", "World")
+//	a.Contains(["Hello", "World"], "World")
+//	a.Contains({"Hello": "World"}, "Hello")
 func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -43,9 +40,9 @@
 // Containsf asserts that the specified string, list(array, slice...) or map contains the
 // specified substring or element.
 //
-//    a.Containsf("Hello World", "World", "error message %s", "formatted")
-//    a.Containsf(["Hello", "World"], "World", "error message %s", "formatted")
-//    a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted")
+//	a.Containsf("Hello World", "World", "error message %s", "formatted")
+//	a.Containsf(["Hello", "World"], "World", "error message %s", "formatted")
+//	a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted")
 func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -95,10 +92,19 @@
 	return ElementsMatchf(a.t, listA, listB, msg, args...)
 }
 
-// Empty asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Empty asserts that the given value is "empty".
 //
-//  a.Empty(obj)
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
+//
+//	a.Empty(obj)
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
 func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -106,10 +112,19 @@
 	return Empty(a.t, object, msgAndArgs...)
 }
 
-// Emptyf asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Emptyf asserts that the given value is "empty".
 //
-//  a.Emptyf(obj, "error message %s", "formatted")
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
+//
+//	a.Emptyf(obj, "error message %s", "formatted")
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
 func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -119,7 +134,7 @@
 
 // Equal asserts that two objects are equal.
 //
-//    a.Equal(123, 123)
+//	a.Equal(123, 123)
 //
 // Pointer variable equality is determined based on the equality of the
 // referenced values (as opposed to the memory addresses). Function equality
@@ -134,8 +149,8 @@
 // EqualError asserts that a function returned an error (i.e. not `nil`)
 // and that it is equal to the provided error.
 //
-//   actualObj, err := SomeFunction()
-//   a.EqualError(err,  expectedErrorString)
+//	actualObj, err := SomeFunction()
+//	a.EqualError(err,  expectedErrorString)
 func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -146,8 +161,8 @@
 // EqualErrorf asserts that a function returned an error (i.e. not `nil`)
 // and that it is equal to the provided error.
 //
-//   actualObj, err := SomeFunction()
-//   a.EqualErrorf(err,  expectedErrorString, "error message %s", "formatted")
+//	actualObj, err := SomeFunction()
+//	a.EqualErrorf(err,  expectedErrorString, "error message %s", "formatted")
 func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -155,10 +170,44 @@
 	return EqualErrorf(a.t, theError, errString, msg, args...)
 }
 
-// EqualValues asserts that two objects are equal or convertable to the same types
-// and equal.
+// EqualExportedValues asserts that the types of two objects are equal and their public
+// fields are also equal. This is useful for comparing structs that have private fields
+// that could potentially differ.
 //
-//    a.EqualValues(uint32(123), int32(123))
+//	 type S struct {
+//		Exported     	int
+//		notExported   	int
+//	 }
+//	 a.EqualExportedValues(S{1, 2}, S{1, 3}) => true
+//	 a.EqualExportedValues(S{1, 2}, S{2, 3}) => false
+func (a *Assertions) EqualExportedValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return EqualExportedValues(a.t, expected, actual, msgAndArgs...)
+}
+
+// EqualExportedValuesf asserts that the types of two objects are equal and their public
+// fields are also equal. This is useful for comparing structs that have private fields
+// that could potentially differ.
+//
+//	 type S struct {
+//		Exported     	int
+//		notExported   	int
+//	 }
+//	 a.EqualExportedValuesf(S{1, 2}, S{1, 3}, "error message %s", "formatted") => true
+//	 a.EqualExportedValuesf(S{1, 2}, S{2, 3}, "error message %s", "formatted") => false
+func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return EqualExportedValuesf(a.t, expected, actual, msg, args...)
+}
+
+// EqualValues asserts that two objects are equal or convertible to the larger
+// type and equal.
+//
+//	a.EqualValues(uint32(123), int32(123))
 func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -166,10 +215,10 @@
 	return EqualValues(a.t, expected, actual, msgAndArgs...)
 }
 
-// EqualValuesf asserts that two objects are equal or convertable to the same types
-// and equal.
+// EqualValuesf asserts that two objects are equal or convertible to the larger
+// type and equal.
 //
-//    a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
+//	a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
 func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -179,7 +228,7 @@
 
 // Equalf asserts that two objects are equal.
 //
-//    a.Equalf(123, 123, "error message %s", "formatted")
+//	a.Equalf(123, 123, "error message %s", "formatted")
 //
 // Pointer variable equality is determined based on the equality of the
 // referenced values (as opposed to the memory addresses). Function equality
@@ -193,10 +242,8 @@
 
 // Error asserts that a function returned an error (i.e. not `nil`).
 //
-//   actualObj, err := SomeFunction()
-//   if a.Error(err) {
-// 	   assert.Equal(t, expectedError, err)
-//   }
+//	actualObj, err := SomeFunction()
+//	a.Error(err)
 func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -225,8 +272,8 @@
 // ErrorContains asserts that a function returned an error (i.e. not `nil`)
 // and that the error contains the specified substring.
 //
-//   actualObj, err := SomeFunction()
-//   a.ErrorContains(err,  expectedErrorSubString)
+//	actualObj, err := SomeFunction()
+//	a.ErrorContains(err,  expectedErrorSubString)
 func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -237,8 +284,8 @@
 // ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
 // and that the error contains the specified substring.
 //
-//   actualObj, err := SomeFunction()
-//   a.ErrorContainsf(err,  expectedErrorSubString, "error message %s", "formatted")
+//	actualObj, err := SomeFunction()
+//	a.ErrorContainsf(err,  expectedErrorSubString, "error message %s", "formatted")
 func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -266,10 +313,8 @@
 
 // Errorf asserts that a function returned an error (i.e. not `nil`).
 //
-//   actualObj, err := SomeFunction()
-//   if a.Errorf(err, "error message %s", "formatted") {
-// 	   assert.Equal(t, expectedErrorf, err)
-//   }
+//	actualObj, err := SomeFunction()
+//	a.Errorf(err, "error message %s", "formatted")
 func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -280,7 +325,7 @@
 // Eventually asserts that given condition will be met in waitFor time,
 // periodically checking target function each tick.
 //
-//    a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond)
+//	a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond)
 func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -288,10 +333,60 @@
 	return Eventually(a.t, condition, waitFor, tick, msgAndArgs...)
 }
 
+// EventuallyWithT asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick. In contrast to Eventually,
+// it supplies a CollectT to the condition function, so that the condition
+// function can use the CollectT to call other assertions.
+// The condition is considered "met" if no errors are raised in a tick.
+// The supplied CollectT collects all errors from one tick (if there are any).
+// If the condition is not met before waitFor, the collected errors of
+// the last tick are copied to t.
+//
+//	externalValue := false
+//	go func() {
+//		time.Sleep(8*time.Second)
+//		externalValue = true
+//	}()
+//	a.EventuallyWithT(func(c *assert.CollectT) {
+//		// add assertions as needed; any assertion failure will fail the current tick
+//		assert.True(c, externalValue, "expected 'externalValue' to be true")
+//	}, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
+func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return EventuallyWithT(a.t, condition, waitFor, tick, msgAndArgs...)
+}
+
+// EventuallyWithTf asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick. In contrast to Eventually,
+// it supplies a CollectT to the condition function, so that the condition
+// function can use the CollectT to call other assertions.
+// The condition is considered "met" if no errors are raised in a tick.
+// The supplied CollectT collects all errors from one tick (if there are any).
+// If the condition is not met before waitFor, the collected errors of
+// the last tick are copied to t.
+//
+//	externalValue := false
+//	go func() {
+//		time.Sleep(8*time.Second)
+//		externalValue = true
+//	}()
+//	a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") {
+//		// add assertions as needed; any assertion failure will fail the current tick
+//		assert.True(c, externalValue, "expected 'externalValue' to be true")
+//	}, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
+func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return EventuallyWithTf(a.t, condition, waitFor, tick, msg, args...)
+}
+
 // Eventuallyf asserts that given condition will be met in waitFor time,
 // periodically checking target function each tick.
 //
-//    a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+//	a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
 func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -301,7 +396,7 @@
 
 // Exactly asserts that two objects are equal in value and type.
 //
-//    a.Exactly(int32(123), int64(123))
+//	a.Exactly(int32(123), int64(123))
 func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -311,7 +406,7 @@
 
 // Exactlyf asserts that two objects are equal in value and type.
 //
-//    a.Exactlyf(int32(123), int64(123), "error message %s", "formatted")
+//	a.Exactlyf(int32(123), int64(123), "error message %s", "formatted")
 func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -353,7 +448,7 @@
 
 // False asserts that the specified value is false.
 //
-//    a.False(myBool)
+//	a.False(myBool)
 func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -363,7 +458,7 @@
 
 // Falsef asserts that the specified value is false.
 //
-//    a.Falsef(myBool, "error message %s", "formatted")
+//	a.Falsef(myBool, "error message %s", "formatted")
 func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -391,9 +486,9 @@
 
 // Greater asserts that the first element is greater than the second
 //
-//    a.Greater(2, 1)
-//    a.Greater(float64(2), float64(1))
-//    a.Greater("b", "a")
+//	a.Greater(2, 1)
+//	a.Greater(float64(2), float64(1))
+//	a.Greater("b", "a")
 func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -403,10 +498,10 @@
 
 // GreaterOrEqual asserts that the first element is greater than or equal to the second
 //
-//    a.GreaterOrEqual(2, 1)
-//    a.GreaterOrEqual(2, 2)
-//    a.GreaterOrEqual("b", "a")
-//    a.GreaterOrEqual("b", "b")
+//	a.GreaterOrEqual(2, 1)
+//	a.GreaterOrEqual(2, 2)
+//	a.GreaterOrEqual("b", "a")
+//	a.GreaterOrEqual("b", "b")
 func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -416,10 +511,10 @@
 
 // GreaterOrEqualf asserts that the first element is greater than or equal to the second
 //
-//    a.GreaterOrEqualf(2, 1, "error message %s", "formatted")
-//    a.GreaterOrEqualf(2, 2, "error message %s", "formatted")
-//    a.GreaterOrEqualf("b", "a", "error message %s", "formatted")
-//    a.GreaterOrEqualf("b", "b", "error message %s", "formatted")
+//	a.GreaterOrEqualf(2, 1, "error message %s", "formatted")
+//	a.GreaterOrEqualf(2, 2, "error message %s", "formatted")
+//	a.GreaterOrEqualf("b", "a", "error message %s", "formatted")
+//	a.GreaterOrEqualf("b", "b", "error message %s", "formatted")
 func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -429,9 +524,9 @@
 
 // Greaterf asserts that the first element is greater than the second
 //
-//    a.Greaterf(2, 1, "error message %s", "formatted")
-//    a.Greaterf(float64(2), float64(1), "error message %s", "formatted")
-//    a.Greaterf("b", "a", "error message %s", "formatted")
+//	a.Greaterf(2, 1, "error message %s", "formatted")
+//	a.Greaterf(float64(2), float64(1), "error message %s", "formatted")
+//	a.Greaterf("b", "a", "error message %s", "formatted")
 func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -442,7 +537,7 @@
 // HTTPBodyContains asserts that a specified handler returns a
 // body that contains a string.
 //
-//  a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//	a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
@@ -455,7 +550,7 @@
 // HTTPBodyContainsf asserts that a specified handler returns a
 // body that contains a string.
 //
-//  a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//	a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
@@ -468,7 +563,7 @@
 // HTTPBodyNotContains asserts that a specified handler returns a
 // body that does not contain a string.
 //
-//  a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//	a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
@@ -481,7 +576,7 @@
 // HTTPBodyNotContainsf asserts that a specified handler returns a
 // body that does not contain a string.
 //
-//  a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//	a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
@@ -493,7 +588,7 @@
 
 // HTTPError asserts that a specified handler returns an error status code.
 //
-//  a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//	a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
@@ -505,7 +600,7 @@
 
 // HTTPErrorf asserts that a specified handler returns an error status code.
 //
-//  a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//	a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@@ -517,7 +612,7 @@
 
 // HTTPRedirect asserts that a specified handler returns a redirect status code.
 //
-//  a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//	a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
@@ -529,7 +624,7 @@
 
 // HTTPRedirectf asserts that a specified handler returns a redirect status code.
 //
-//  a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//	a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@@ -541,7 +636,7 @@
 
 // HTTPStatusCode asserts that a specified handler returns a specified status code.
 //
-//  a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501)
+//	a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501)
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
@@ -553,7 +648,7 @@
 
 // HTTPStatusCodef asserts that a specified handler returns a specified status code.
 //
-//  a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+//	a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
@@ -565,7 +660,7 @@
 
 // HTTPSuccess asserts that a specified handler returns a success status code.
 //
-//  a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
+//	a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
@@ -577,7 +672,7 @@
 
 // HTTPSuccessf asserts that a specified handler returns a success status code.
 //
-//  a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//	a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@@ -589,7 +684,7 @@
 
 // Implements asserts that an object is implemented by the specified interface.
 //
-//    a.Implements((*MyInterface)(nil), new(MyObject))
+//	a.Implements((*MyInterface)(nil), new(MyObject))
 func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -599,7 +694,7 @@
 
 // Implementsf asserts that an object is implemented by the specified interface.
 //
-//    a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+//	a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
 func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -609,7 +704,7 @@
 
 // InDelta asserts that the two numerals are within delta of each other.
 //
-// 	 a.InDelta(math.Pi, 22/7.0, 0.01)
+//	a.InDelta(math.Pi, 22/7.0, 0.01)
 func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -651,7 +746,7 @@
 
 // InDeltaf asserts that the two numerals are within delta of each other.
 //
-// 	 a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
+//	a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
 func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -693,9 +788,9 @@
 
 // IsDecreasing asserts that the collection is decreasing
 //
-//    a.IsDecreasing([]int{2, 1, 0})
-//    a.IsDecreasing([]float{2, 1})
-//    a.IsDecreasing([]string{"b", "a"})
+//	a.IsDecreasing([]int{2, 1, 0})
+//	a.IsDecreasing([]float{2, 1})
+//	a.IsDecreasing([]string{"b", "a"})
 func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -705,9 +800,9 @@
 
 // IsDecreasingf asserts that the collection is decreasing
 //
-//    a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted")
-//    a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted")
-//    a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted")
+//	a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted")
+//	a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted")
+//	a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted")
 func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -717,9 +812,9 @@
 
 // IsIncreasing asserts that the collection is increasing
 //
-//    a.IsIncreasing([]int{1, 2, 3})
-//    a.IsIncreasing([]float{1, 2})
-//    a.IsIncreasing([]string{"a", "b"})
+//	a.IsIncreasing([]int{1, 2, 3})
+//	a.IsIncreasing([]float{1, 2})
+//	a.IsIncreasing([]string{"a", "b"})
 func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -729,9 +824,9 @@
 
 // IsIncreasingf asserts that the collection is increasing
 //
-//    a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted")
-//    a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted")
-//    a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted")
+//	a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted")
+//	a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted")
+//	a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted")
 func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -741,9 +836,9 @@
 
 // IsNonDecreasing asserts that the collection is not decreasing
 //
-//    a.IsNonDecreasing([]int{1, 1, 2})
-//    a.IsNonDecreasing([]float{1, 2})
-//    a.IsNonDecreasing([]string{"a", "b"})
+//	a.IsNonDecreasing([]int{1, 1, 2})
+//	a.IsNonDecreasing([]float{1, 2})
+//	a.IsNonDecreasing([]string{"a", "b"})
 func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -753,9 +848,9 @@
 
 // IsNonDecreasingf asserts that the collection is not decreasing
 //
-//    a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted")
-//    a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted")
-//    a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted")
+//	a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted")
+//	a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted")
+//	a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted")
 func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -765,9 +860,9 @@
 
 // IsNonIncreasing asserts that the collection is not increasing
 //
-//    a.IsNonIncreasing([]int{2, 1, 1})
-//    a.IsNonIncreasing([]float{2, 1})
-//    a.IsNonIncreasing([]string{"b", "a"})
+//	a.IsNonIncreasing([]int{2, 1, 1})
+//	a.IsNonIncreasing([]float{2, 1})
+//	a.IsNonIncreasing([]string{"b", "a"})
 func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -777,9 +872,9 @@
 
 // IsNonIncreasingf asserts that the collection is not increasing
 //
-//    a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted")
-//    a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted")
-//    a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted")
+//	a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted")
+//	a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted")
+//	a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted")
 func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -787,7 +882,29 @@
 	return IsNonIncreasingf(a.t, object, msg, args...)
 }
 
+// IsNotType asserts that the specified objects are not of the same type.
+//
+//	a.IsNotType(&NotMyStruct{}, &MyStruct{})
+func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return IsNotType(a.t, theType, object, msgAndArgs...)
+}
+
+// IsNotTypef asserts that the specified objects are not of the same type.
+//
+//	a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted")
+func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return IsNotTypef(a.t, theType, object, msg, args...)
+}
+
 // IsType asserts that the specified objects are of the same type.
+//
+//	a.IsType(&MyStruct{}, &MyStruct{})
 func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -796,6 +913,8 @@
 }
 
 // IsTypef asserts that the specified objects are of the same type.
+//
+//	a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted")
 func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -805,7 +924,7 @@
 
 // JSONEq asserts that two JSON strings are equivalent.
 //
-//  a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+//	a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
 func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -815,7 +934,7 @@
 
 // JSONEqf asserts that two JSON strings are equivalent.
 //
-//  a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+//	a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
 func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -826,7 +945,7 @@
 // Len asserts that the specified object has specific length.
 // Len also fails if the object has a type that len() not accept.
 //
-//    a.Len(mySlice, 3)
+//	a.Len(mySlice, 3)
 func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -837,7 +956,7 @@
 // Lenf asserts that the specified object has specific length.
 // Lenf also fails if the object has a type that len() not accept.
 //
-//    a.Lenf(mySlice, 3, "error message %s", "formatted")
+//	a.Lenf(mySlice, 3, "error message %s", "formatted")
 func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -847,9 +966,9 @@
 
 // Less asserts that the first element is less than the second
 //
-//    a.Less(1, 2)
-//    a.Less(float64(1), float64(2))
-//    a.Less("a", "b")
+//	a.Less(1, 2)
+//	a.Less(float64(1), float64(2))
+//	a.Less("a", "b")
 func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -859,10 +978,10 @@
 
 // LessOrEqual asserts that the first element is less than or equal to the second
 //
-//    a.LessOrEqual(1, 2)
-//    a.LessOrEqual(2, 2)
-//    a.LessOrEqual("a", "b")
-//    a.LessOrEqual("b", "b")
+//	a.LessOrEqual(1, 2)
+//	a.LessOrEqual(2, 2)
+//	a.LessOrEqual("a", "b")
+//	a.LessOrEqual("b", "b")
 func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -872,10 +991,10 @@
 
 // LessOrEqualf asserts that the first element is less than or equal to the second
 //
-//    a.LessOrEqualf(1, 2, "error message %s", "formatted")
-//    a.LessOrEqualf(2, 2, "error message %s", "formatted")
-//    a.LessOrEqualf("a", "b", "error message %s", "formatted")
-//    a.LessOrEqualf("b", "b", "error message %s", "formatted")
+//	a.LessOrEqualf(1, 2, "error message %s", "formatted")
+//	a.LessOrEqualf(2, 2, "error message %s", "formatted")
+//	a.LessOrEqualf("a", "b", "error message %s", "formatted")
+//	a.LessOrEqualf("b", "b", "error message %s", "formatted")
 func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -885,9 +1004,9 @@
 
 // Lessf asserts that the first element is less than the second
 //
-//    a.Lessf(1, 2, "error message %s", "formatted")
-//    a.Lessf(float64(1), float64(2), "error message %s", "formatted")
-//    a.Lessf("a", "b", "error message %s", "formatted")
+//	a.Lessf(1, 2, "error message %s", "formatted")
+//	a.Lessf(float64(1), float64(2), "error message %s", "formatted")
+//	a.Lessf("a", "b", "error message %s", "formatted")
 func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -897,8 +1016,8 @@
 
 // Negative asserts that the specified element is negative
 //
-//    a.Negative(-1)
-//    a.Negative(-1.23)
+//	a.Negative(-1)
+//	a.Negative(-1.23)
 func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -908,8 +1027,8 @@
 
 // Negativef asserts that the specified element is negative
 //
-//    a.Negativef(-1, "error message %s", "formatted")
-//    a.Negativef(-1.23, "error message %s", "formatted")
+//	a.Negativef(-1, "error message %s", "formatted")
+//	a.Negativef(-1.23, "error message %s", "formatted")
 func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -920,7 +1039,7 @@
 // Never asserts that the given condition doesn't satisfy in waitFor time,
 // periodically checking the target function each tick.
 //
-//    a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond)
+//	a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond)
 func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -931,7 +1050,7 @@
 // Neverf asserts that the given condition doesn't satisfy in waitFor time,
 // periodically checking the target function each tick.
 //
-//    a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+//	a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
 func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -941,7 +1060,7 @@
 
 // Nil asserts that the specified object is nil.
 //
-//    a.Nil(err)
+//	a.Nil(err)
 func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -951,7 +1070,7 @@
 
 // Nilf asserts that the specified object is nil.
 //
-//    a.Nilf(err, "error message %s", "formatted")
+//	a.Nilf(err, "error message %s", "formatted")
 func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -979,10 +1098,10 @@
 
 // NoError asserts that a function returned no error (i.e. `nil`).
 //
-//   actualObj, err := SomeFunction()
-//   if a.NoError(err) {
-// 	   assert.Equal(t, expectedObj, actualObj)
-//   }
+//	  actualObj, err := SomeFunction()
+//	  if a.NoError(err) {
+//		   assert.Equal(t, expectedObj, actualObj)
+//	  }
 func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -992,10 +1111,10 @@
 
 // NoErrorf asserts that a function returned no error (i.e. `nil`).
 //
-//   actualObj, err := SomeFunction()
-//   if a.NoErrorf(err, "error message %s", "formatted") {
-// 	   assert.Equal(t, expectedObj, actualObj)
-//   }
+//	  actualObj, err := SomeFunction()
+//	  if a.NoErrorf(err, "error message %s", "formatted") {
+//		   assert.Equal(t, expectedObj, actualObj)
+//	  }
 func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1024,9 +1143,9 @@
 // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
 // specified substring or element.
 //
-//    a.NotContains("Hello World", "Earth")
-//    a.NotContains(["Hello", "World"], "Earth")
-//    a.NotContains({"Hello": "World"}, "Earth")
+//	a.NotContains("Hello World", "Earth")
+//	a.NotContains(["Hello", "World"], "Earth")
+//	a.NotContains({"Hello": "World"}, "Earth")
 func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1037,9 +1156,9 @@
 // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
 // specified substring or element.
 //
-//    a.NotContainsf("Hello World", "Earth", "error message %s", "formatted")
-//    a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted")
-//    a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted")
+//	a.NotContainsf("Hello World", "Earth", "error message %s", "formatted")
+//	a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted")
+//	a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted")
 func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1047,12 +1166,45 @@
 	return NotContainsf(a.t, s, contains, msg, args...)
 }
 
-// NotEmpty asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
 //
-//  if a.NotEmpty(obj) {
-//    assert.Equal(t, "two", obj[1])
-//  }
+// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false
+//
+// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true
+//
+// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true
+func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotElementsMatch(a.t, listA, listB, msgAndArgs...)
+}
+
+// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
+//
+// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false
+//
+// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true
+//
+// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true
+func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotElementsMatchf(a.t, listA, listB, msg, args...)
+}
+
+// NotEmpty asserts that the specified object is NOT [Empty].
+//
+//	if a.NotEmpty(obj) {
+//	  assert.Equal(t, "two", obj[1])
+//	}
 func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1060,12 +1212,11 @@
 	return NotEmpty(a.t, object, msgAndArgs...)
 }
 
-// NotEmptyf asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotEmptyf asserts that the specified object is NOT [Empty].
 //
-//  if a.NotEmptyf(obj, "error message %s", "formatted") {
-//    assert.Equal(t, "two", obj[1])
-//  }
+//	if a.NotEmptyf(obj, "error message %s", "formatted") {
+//	  assert.Equal(t, "two", obj[1])
+//	}
 func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1075,7 +1226,7 @@
 
 // NotEqual asserts that the specified values are NOT equal.
 //
-//    a.NotEqual(obj1, obj2)
+//	a.NotEqual(obj1, obj2)
 //
 // Pointer variable equality is determined based on the equality of the
 // referenced values (as opposed to the memory addresses).
@@ -1088,7 +1239,7 @@
 
 // NotEqualValues asserts that two objects are not equal even when converted to the same type
 //
-//    a.NotEqualValues(obj1, obj2)
+//	a.NotEqualValues(obj1, obj2)
 func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1098,7 +1249,7 @@
 
 // NotEqualValuesf asserts that two objects are not equal even when converted to the same type
 //
-//    a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted")
+//	a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted")
 func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1108,7 +1259,7 @@
 
 // NotEqualf asserts that the specified values are NOT equal.
 //
-//    a.NotEqualf(obj1, obj2, "error message %s", "formatted")
+//	a.NotEqualf(obj1, obj2, "error message %s", "formatted")
 //
 // Pointer variable equality is determined based on the equality of the
 // referenced values (as opposed to the memory addresses).
@@ -1119,7 +1270,25 @@
 	return NotEqualf(a.t, expected, actual, msg, args...)
 }
 
-// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// NotErrorAs asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotErrorAs(a.t, err, target, msgAndArgs...)
+}
+
+// NotErrorAsf asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotErrorAsf(a.t, err, target, msg, args...)
+}
+
+// NotErrorIs asserts that none of the errors in err's chain matches target.
 // This is a wrapper for errors.Is.
 func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
@@ -1128,7 +1297,7 @@
 	return NotErrorIs(a.t, err, target, msgAndArgs...)
 }
 
-// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// NotErrorIsf asserts that none of the errors in err's chain matches target.
 // This is a wrapper for errors.Is.
 func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
@@ -1137,9 +1306,29 @@
 	return NotErrorIsf(a.t, err, target, msg, args...)
 }
 
+// NotImplements asserts that an object does not implement the specified interface.
+//
+//	a.NotImplements((*MyInterface)(nil), new(MyObject))
+func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotImplements(a.t, interfaceObject, object, msgAndArgs...)
+}
+
+// NotImplementsf asserts that an object does not implement the specified interface.
+//
+//	a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+	if h, ok := a.t.(tHelper); ok {
+		h.Helper()
+	}
+	return NotImplementsf(a.t, interfaceObject, object, msg, args...)
+}
+
 // NotNil asserts that the specified object is not nil.
 //
-//    a.NotNil(err)
+//	a.NotNil(err)
 func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1149,7 +1338,7 @@
 
 // NotNilf asserts that the specified object is not nil.
 //
-//    a.NotNilf(err, "error message %s", "formatted")
+//	a.NotNilf(err, "error message %s", "formatted")
 func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1159,7 +1348,7 @@
 
 // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
 //
-//   a.NotPanics(func(){ RemainCalm() })
+//	a.NotPanics(func(){ RemainCalm() })
 func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1169,7 +1358,7 @@
 
 // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
 //
-//   a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted")
+//	a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted")
 func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1179,8 +1368,8 @@
 
 // NotRegexp asserts that a specified regexp does not match a string.
 //
-//  a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
-//  a.NotRegexp("^start", "it's not starting")
+//	a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
+//	a.NotRegexp("^start", "it's not starting")
 func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1190,8 +1379,8 @@
 
 // NotRegexpf asserts that a specified regexp does not match a string.
 //
-//  a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
-//  a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
+//	a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
+//	a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
 func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1201,7 +1390,7 @@
 
 // NotSame asserts that two pointers do not reference the same object.
 //
-//    a.NotSame(ptr1, ptr2)
+//	a.NotSame(ptr1, ptr2)
 //
 // Both arguments must be pointer variables. Pointer variable sameness is
 // determined based on the equality of both type and value.
@@ -1214,7 +1403,7 @@
 
 // NotSamef asserts that two pointers do not reference the same object.
 //
-//    a.NotSamef(ptr1, ptr2, "error message %s", "formatted")
+//	a.NotSamef(ptr1, ptr2, "error message %s", "formatted")
 //
 // Both arguments must be pointer variables. Pointer variable sameness is
 // determined based on the equality of both type and value.
@@ -1225,10 +1414,15 @@
 	return NotSamef(a.t, expected, actual, msg, args...)
 }
 
-// NotSubset asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
+// NotSubset asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
 //
-//    a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+//	a.NotSubset([1, 3, 4], [1, 2])
+//	a.NotSubset({"x": 1, "y": 2}, {"z": 3})
+//	a.NotSubset([1, 3, 4], {1: "one", 2: "two"})
+//	a.NotSubset({"x": 1, "y": 2}, ["z"])
 func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1236,10 +1430,15 @@
 	return NotSubset(a.t, list, subset, msgAndArgs...)
 }
 
-// NotSubsetf asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
+// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
 //
-//    a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+//	a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted")
+//	a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
+//	a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted")
+//	a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted")
 func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1265,7 +1464,7 @@
 
 // Panics asserts that the code inside the specified PanicTestFunc panics.
 //
-//   a.Panics(func(){ GoCrazy() })
+//	a.Panics(func(){ GoCrazy() })
 func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1277,7 +1476,7 @@
 // panics, and that the recovered panic value is an error that satisfies the
 // EqualError comparison.
 //
-//   a.PanicsWithError("crazy error", func(){ GoCrazy() })
+//	a.PanicsWithError("crazy error", func(){ GoCrazy() })
 func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1289,7 +1488,7 @@
 // panics, and that the recovered panic value is an error that satisfies the
 // EqualError comparison.
 //
-//   a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+//	a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
 func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1300,7 +1499,7 @@
 // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
 // the recovered panic value equals the expected panic value.
 //
-//   a.PanicsWithValue("crazy error", func(){ GoCrazy() })
+//	a.PanicsWithValue("crazy error", func(){ GoCrazy() })
 func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1311,7 +1510,7 @@
 // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
 // the recovered panic value equals the expected panic value.
 //
-//   a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+//	a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
 func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1321,7 +1520,7 @@
 
 // Panicsf asserts that the code inside the specified PanicTestFunc panics.
 //
-//   a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted")
+//	a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted")
 func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1331,8 +1530,8 @@
 
 // Positive asserts that the specified element is positive
 //
-//    a.Positive(1)
-//    a.Positive(1.23)
+//	a.Positive(1)
+//	a.Positive(1.23)
 func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1342,8 +1541,8 @@
 
 // Positivef asserts that the specified element is positive
 //
-//    a.Positivef(1, "error message %s", "formatted")
-//    a.Positivef(1.23, "error message %s", "formatted")
+//	a.Positivef(1, "error message %s", "formatted")
+//	a.Positivef(1.23, "error message %s", "formatted")
 func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1353,8 +1552,8 @@
 
 // Regexp asserts that a specified regexp matches a string.
 //
-//  a.Regexp(regexp.MustCompile("start"), "it's starting")
-//  a.Regexp("start...$", "it's not starting")
+//	a.Regexp(regexp.MustCompile("start"), "it's starting")
+//	a.Regexp("start...$", "it's not starting")
 func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1364,8 +1563,8 @@
 
 // Regexpf asserts that a specified regexp matches a string.
 //
-//  a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
-//  a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
+//	a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
+//	a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
 func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1375,7 +1574,7 @@
 
 // Same asserts that two pointers reference the same object.
 //
-//    a.Same(ptr1, ptr2)
+//	a.Same(ptr1, ptr2)
 //
 // Both arguments must be pointer variables. Pointer variable sameness is
 // determined based on the equality of both type and value.
@@ -1388,7 +1587,7 @@
 
 // Samef asserts that two pointers reference the same object.
 //
-//    a.Samef(ptr1, ptr2, "error message %s", "formatted")
+//	a.Samef(ptr1, ptr2, "error message %s", "formatted")
 //
 // Both arguments must be pointer variables. Pointer variable sameness is
 // determined based on the equality of both type and value.
@@ -1399,10 +1598,15 @@
 	return Samef(a.t, expected, actual, msg, args...)
 }
 
-// Subset asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
+// Subset asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
 //
-//    a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+//	a.Subset([1, 2, 3], [1, 2])
+//	a.Subset({"x": 1, "y": 2}, {"x": 1})
+//	a.Subset([1, 2, 3], {1: "one", 2: "two"})
+//	a.Subset({"x": 1, "y": 2}, ["x"])
 func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1410,10 +1614,15 @@
 	return Subset(a.t, list, subset, msgAndArgs...)
 }
 
-// Subsetf asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
+// Subsetf asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
 //
-//    a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+//	a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted")
+//	a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
+//	a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted")
+//	a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted")
 func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1423,7 +1632,7 @@
 
 // True asserts that the specified value is true.
 //
-//    a.True(myBool)
+//	a.True(myBool)
 func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1433,7 +1642,7 @@
 
 // Truef asserts that the specified value is true.
 //
-//    a.Truef(myBool, "error message %s", "formatted")
+//	a.Truef(myBool, "error message %s", "formatted")
 func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1443,7 +1652,7 @@
 
 // WithinDuration asserts that the two times are within duration delta of each other.
 //
-//   a.WithinDuration(time.Now(), time.Now(), 10*time.Second)
+//	a.WithinDuration(time.Now(), time.Now(), 10*time.Second)
 func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1453,7 +1662,7 @@
 
 // WithinDurationf asserts that the two times are within duration delta of each other.
 //
-//   a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+//	a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
 func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1463,7 +1672,7 @@
 
 // WithinRange asserts that a time is within a time range (inclusive).
 //
-//   a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
+//	a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
 func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
@@ -1473,7 +1682,7 @@
 
 // WithinRangef asserts that a time is within a time range (inclusive).
 //
-//   a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
+//	a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
 func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool {
 	if h, ok := a.t.(tHelper); ok {
 		h.Helper()
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go
index 7594487..2fdf80f 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_order.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go
@@ -6,7 +6,7 @@
 )
 
 // isOrdered checks that collection contains orderable elements.
-func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
+func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool {
 	objKind := reflect.TypeOf(object).Kind()
 	if objKind != reflect.Slice && objKind != reflect.Array {
 		return false
@@ -33,7 +33,7 @@
 		compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind)
 
 		if !isComparable {
-			return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...)
+			return Fail(t, fmt.Sprintf(`Can not compare type "%T" and "%T"`, value, prevValue), msgAndArgs...)
 		}
 
 		if !containsValue(allowedComparesResults, compareResult) {
@@ -46,36 +46,36 @@
 
 // IsIncreasing asserts that the collection is increasing
 //
-//    assert.IsIncreasing(t, []int{1, 2, 3})
-//    assert.IsIncreasing(t, []float{1, 2})
-//    assert.IsIncreasing(t, []string{"a", "b"})
+//	assert.IsIncreasing(t, []int{1, 2, 3})
+//	assert.IsIncreasing(t, []float{1, 2})
+//	assert.IsIncreasing(t, []string{"a", "b"})
 func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
-	return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
+	return isOrdered(t, object, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
 }
 
 // IsNonIncreasing asserts that the collection is not increasing
 //
-//    assert.IsNonIncreasing(t, []int{2, 1, 1})
-//    assert.IsNonIncreasing(t, []float{2, 1})
-//    assert.IsNonIncreasing(t, []string{"b", "a"})
+//	assert.IsNonIncreasing(t, []int{2, 1, 1})
+//	assert.IsNonIncreasing(t, []float{2, 1})
+//	assert.IsNonIncreasing(t, []string{"b", "a"})
 func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
-	return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
+	return isOrdered(t, object, []compareResult{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
 }
 
 // IsDecreasing asserts that the collection is decreasing
 //
-//    assert.IsDecreasing(t, []int{2, 1, 0})
-//    assert.IsDecreasing(t, []float{2, 1})
-//    assert.IsDecreasing(t, []string{"b", "a"})
+//	assert.IsDecreasing(t, []int{2, 1, 0})
+//	assert.IsDecreasing(t, []float{2, 1})
+//	assert.IsDecreasing(t, []string{"b", "a"})
 func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
-	return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
+	return isOrdered(t, object, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
 }
 
 // IsNonDecreasing asserts that the collection is not decreasing
 //
-//    assert.IsNonDecreasing(t, []int{1, 1, 2})
-//    assert.IsNonDecreasing(t, []float{1, 2})
-//    assert.IsNonDecreasing(t, []string{"a", "b"})
+//	assert.IsNonDecreasing(t, []int{1, 1, 2})
+//	assert.IsNonDecreasing(t, []float{1, 2})
+//	assert.IsNonDecreasing(t, []string{"a", "b"})
 func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
-	return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
+	return isOrdered(t, object, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
 }
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index fa1245b..de8de0c 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -8,7 +8,6 @@
 	"fmt"
 	"math"
 	"os"
-	"path/filepath"
 	"reflect"
 	"regexp"
 	"runtime"
@@ -20,7 +19,9 @@
 
 	"github.com/davecgh/go-spew/spew"
 	"github.com/pmezard/go-difflib/difflib"
-	yaml "gopkg.in/yaml.v3"
+
+	// Wrapper around gopkg.in/yaml.v3
+	"github.com/stretchr/testify/assert/yaml"
 )
 
 //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
@@ -46,6 +47,10 @@
 // for table driven tests.
 type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
 
+// PanicAssertionFunc is a common function prototype when validating a panic value.  Can be useful
+// for table driven tests.
+type PanicAssertionFunc = func(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool
+
 // Comparison is a custom function that returns true on success and false on failure
 type Comparison func() (success bool)
 
@@ -76,6 +81,84 @@
 	return bytes.Equal(exp, act)
 }
 
+// copyExportedFields iterates downward through nested data structures and creates a copy
+// that only contains the exported struct fields.
+func copyExportedFields(expected interface{}) interface{} {
+	if isNil(expected) {
+		return expected
+	}
+
+	expectedType := reflect.TypeOf(expected)
+	expectedKind := expectedType.Kind()
+	expectedValue := reflect.ValueOf(expected)
+
+	switch expectedKind {
+	case reflect.Struct:
+		result := reflect.New(expectedType).Elem()
+		for i := 0; i < expectedType.NumField(); i++ {
+			field := expectedType.Field(i)
+			isExported := field.IsExported()
+			if isExported {
+				fieldValue := expectedValue.Field(i)
+				if isNil(fieldValue) || isNil(fieldValue.Interface()) {
+					continue
+				}
+				newValue := copyExportedFields(fieldValue.Interface())
+				result.Field(i).Set(reflect.ValueOf(newValue))
+			}
+		}
+		return result.Interface()
+
+	case reflect.Ptr:
+		result := reflect.New(expectedType.Elem())
+		unexportedRemoved := copyExportedFields(expectedValue.Elem().Interface())
+		result.Elem().Set(reflect.ValueOf(unexportedRemoved))
+		return result.Interface()
+
+	case reflect.Array, reflect.Slice:
+		var result reflect.Value
+		if expectedKind == reflect.Array {
+			result = reflect.New(reflect.ArrayOf(expectedValue.Len(), expectedType.Elem())).Elem()
+		} else {
+			result = reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len())
+		}
+		for i := 0; i < expectedValue.Len(); i++ {
+			index := expectedValue.Index(i)
+			if isNil(index) {
+				continue
+			}
+			unexportedRemoved := copyExportedFields(index.Interface())
+			result.Index(i).Set(reflect.ValueOf(unexportedRemoved))
+		}
+		return result.Interface()
+
+	case reflect.Map:
+		result := reflect.MakeMap(expectedType)
+		for _, k := range expectedValue.MapKeys() {
+			index := expectedValue.MapIndex(k)
+			unexportedRemoved := copyExportedFields(index.Interface())
+			result.SetMapIndex(k, reflect.ValueOf(unexportedRemoved))
+		}
+		return result.Interface()
+
+	default:
+		return expected
+	}
+}
+
+// ObjectsExportedFieldsAreEqual determines if the exported (public) fields of two objects are
+// considered equal. This comparison of only exported fields is applied recursively to nested data
+// structures.
+//
+// This function does no assertion of any kind.
+//
+// Deprecated: Use [EqualExportedValues] instead.
+func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool {
+	expectedCleaned := copyExportedFields(expected)
+	actualCleaned := copyExportedFields(actual)
+	return ObjectsAreEqualValues(expectedCleaned, actualCleaned)
+}
+
 // ObjectsAreEqualValues gets whether two objects are equal, or if their
 // values are equal.
 func ObjectsAreEqualValues(expected, actual interface{}) bool {
@@ -83,17 +166,40 @@
 		return true
 	}
 
-	actualType := reflect.TypeOf(actual)
-	if actualType == nil {
+	expectedValue := reflect.ValueOf(expected)
+	actualValue := reflect.ValueOf(actual)
+	if !expectedValue.IsValid() || !actualValue.IsValid() {
 		return false
 	}
-	expectedValue := reflect.ValueOf(expected)
-	if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {
-		// Attempt comparison after type conversion
-		return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)
+
+	expectedType := expectedValue.Type()
+	actualType := actualValue.Type()
+	if !expectedType.ConvertibleTo(actualType) {
+		return false
 	}
 
-	return false
+	if !isNumericType(expectedType) || !isNumericType(actualType) {
+		// Attempt comparison after type conversion
+		return reflect.DeepEqual(
+			expectedValue.Convert(actualType).Interface(), actual,
+		)
+	}
+
+	// If BOTH values are numeric, there are chances of false positives due
+	// to overflow or underflow. So, we need to make sure to always convert
+	// the smaller type to a larger type before comparing.
+	if expectedType.Size() >= actualType.Size() {
+		return actualValue.Convert(expectedType).Interface() == expected
+	}
+
+	return expectedValue.Convert(actualType).Interface() == actual
+}
+
+// isNumericType returns true if the type is one of:
+// int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64,
+// float32, float64, complex64, complex128
+func isNumericType(t reflect.Type) bool {
+	return t.Kind() >= reflect.Int && t.Kind() <= reflect.Complex128
 }
 
 /* CallerInfo is necessary because the assert functions use the testing object
@@ -104,60 +210,77 @@
 // of each stack frame leading from the current test to the assert call that
 // failed.
 func CallerInfo() []string {
-
 	var pc uintptr
-	var ok bool
 	var file string
 	var line int
 	var name string
 
+	const stackFrameBufferSize = 10
+	pcs := make([]uintptr, stackFrameBufferSize)
+
 	callers := []string{}
-	for i := 0; ; i++ {
-		pc, file, line, ok = runtime.Caller(i)
-		if !ok {
-			// The breaks below failed to terminate the loop, and we ran off the
-			// end of the call stack.
+	offset := 1
+
+	for {
+		n := runtime.Callers(offset, pcs)
+
+		if n == 0 {
 			break
 		}
 
-		// This is a huge edge case, but it will panic if this is the case, see #180
-		if file == "<autogenerated>" {
-			break
-		}
+		frames := runtime.CallersFrames(pcs[:n])
 
-		f := runtime.FuncForPC(pc)
-		if f == nil {
-			break
-		}
-		name = f.Name()
+		for {
+			frame, more := frames.Next()
+			pc = frame.PC
+			file = frame.File
+			line = frame.Line
 
-		// testing.tRunner is the standard library function that calls
-		// tests. Subtests are called directly by tRunner, without going through
-		// the Test/Benchmark/Example function that contains the t.Run calls, so
-		// with subtests we should break when we hit tRunner, without adding it
-		// to the list of callers.
-		if name == "testing.tRunner" {
-			break
-		}
+			// This is a huge edge case, but it will panic if this is the case, see #180
+			if file == "<autogenerated>" {
+				break
+			}
 
-		parts := strings.Split(file, "/")
-		file = parts[len(parts)-1]
-		if len(parts) > 1 {
-			dir := parts[len(parts)-2]
-			if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
-				path, _ := filepath.Abs(file)
-				callers = append(callers, fmt.Sprintf("%s:%d", path, line))
+			f := runtime.FuncForPC(pc)
+			if f == nil {
+				break
+			}
+			name = f.Name()
+
+			// testing.tRunner is the standard library function that calls
+			// tests. Subtests are called directly by tRunner, without going through
+			// the Test/Benchmark/Example function that contains the t.Run calls, so
+			// with subtests we should break when we hit tRunner, without adding it
+			// to the list of callers.
+			if name == "testing.tRunner" {
+				break
+			}
+
+			parts := strings.Split(file, "/")
+			if len(parts) > 1 {
+				filename := parts[len(parts)-1]
+				dir := parts[len(parts)-2]
+				if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" {
+					callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+				}
+			}
+
+			// Drop the package
+			dotPos := strings.LastIndexByte(name, '.')
+			name = name[dotPos+1:]
+			if isTest(name, "Test") ||
+				isTest(name, "Benchmark") ||
+				isTest(name, "Example") {
+				break
+			}
+
+			if !more {
+				break
 			}
 		}
 
-		// Drop the package
-		segments := strings.Split(name, ".")
-		name = segments[len(segments)-1]
-		if isTest(name, "Test") ||
-			isTest(name, "Benchmark") ||
-			isTest(name, "Example") {
-			break
-		}
+		// Next batch
+		offset += cap(pcs)
 	}
 
 	return callers
@@ -197,7 +320,7 @@
 
 // Aligns the provided message so that all lines after the first line start at the same location as the first line.
 // Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab).
-// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the
+// The longestLabelLen parameter specifies the length of the longest label in the output (required because this is the
 // basis on which the alignment occurs).
 func indentMessageLines(message string, longestLabelLen int) string {
 	outBuf := new(bytes.Buffer)
@@ -273,7 +396,7 @@
 
 // labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner:
 //
-//   \t{{label}}:{{align_spaces}}\t{{content}}\n
+//	\t{{label}}:{{align_spaces}}\t{{content}}\n
 //
 // The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label.
 // If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this
@@ -296,7 +419,7 @@
 
 // Implements asserts that an object is implemented by the specified interface.
 //
-//    assert.Implements(t, (*MyInterface)(nil), new(MyObject))
+//	assert.Implements(t, (*MyInterface)(nil), new(MyObject))
 func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -313,22 +436,58 @@
 	return true
 }
 
-// IsType asserts that the specified objects are of the same type.
-func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+// NotImplements asserts that an object does not implement the specified interface.
+//
+//	assert.NotImplements(t, (*MyInterface)(nil), new(MyObject))
+func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
+	interfaceType := reflect.TypeOf(interfaceObject).Elem()
 
-	if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) {
-		return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...)
+	if object == nil {
+		return Fail(t, fmt.Sprintf("Cannot check if nil does not implement %v", interfaceType), msgAndArgs...)
+	}
+	if reflect.TypeOf(object).Implements(interfaceType) {
+		return Fail(t, fmt.Sprintf("%T implements %v", object, interfaceType), msgAndArgs...)
 	}
 
 	return true
 }
 
+func isType(expectedType, object interface{}) bool {
+	return ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType))
+}
+
+// IsType asserts that the specified objects are of the same type.
+//
+//	assert.IsType(t, &MyStruct{}, &MyStruct{})
+func IsType(t TestingT, expectedType, object interface{}, msgAndArgs ...interface{}) bool {
+	if isType(expectedType, object) {
+		return true
+	}
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Fail(t, fmt.Sprintf("Object expected to be of type %T, but was %T", expectedType, object), msgAndArgs...)
+}
+
+// IsNotType asserts that the specified objects are not of the same type.
+//
+//	assert.IsNotType(t, &NotMyStruct{}, &MyStruct{})
+func IsNotType(t TestingT, theType, object interface{}, msgAndArgs ...interface{}) bool {
+	if !isType(theType, object) {
+		return true
+	}
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	return Fail(t, fmt.Sprintf("Object type expected to be different than %T", theType), msgAndArgs...)
+}
+
 // Equal asserts that two objects are equal.
 //
-//    assert.Equal(t, 123, 123)
+//	assert.Equal(t, 123, 123)
 //
 // Pointer variable equality is determined based on the equality of the
 // referenced values (as opposed to the memory addresses). Function equality
@@ -351,7 +510,6 @@
 	}
 
 	return true
-
 }
 
 // validateEqualArgs checks whether provided arguments can be safely used in the
@@ -369,7 +527,7 @@
 
 // Same asserts that two pointers reference the same object.
 //
-//    assert.Same(t, ptr1, ptr2)
+//	assert.Same(t, ptr1, ptr2)
 //
 // Both arguments must be pointer variables. Pointer variable sameness is
 // determined based on the equality of both type and value.
@@ -378,10 +536,17 @@
 		h.Helper()
 	}
 
-	if !samePointers(expected, actual) {
+	same, ok := samePointers(expected, actual)
+	if !ok {
+		return Fail(t, "Both arguments must be pointers", msgAndArgs...)
+	}
+
+	if !same {
+		// both are pointers but not the same type & pointing to the same address
 		return Fail(t, fmt.Sprintf("Not same: \n"+
-			"expected: %p %#v\n"+
-			"actual  : %p %#v", expected, expected, actual, actual), msgAndArgs...)
+			"expected: %p %#[1]v\n"+
+			"actual  : %p %#[2]v",
+			expected, actual), msgAndArgs...)
 	}
 
 	return true
@@ -389,7 +554,7 @@
 
 // NotSame asserts that two pointers do not reference the same object.
 //
-//    assert.NotSame(t, ptr1, ptr2)
+//	assert.NotSame(t, ptr1, ptr2)
 //
 // Both arguments must be pointer variables. Pointer variable sameness is
 // determined based on the equality of both type and value.
@@ -398,36 +563,44 @@
 		h.Helper()
 	}
 
-	if samePointers(expected, actual) {
+	same, ok := samePointers(expected, actual)
+	if !ok {
+		// fails when the arguments are not pointers
+		return !(Fail(t, "Both arguments must be pointers", msgAndArgs...))
+	}
+
+	if same {
 		return Fail(t, fmt.Sprintf(
-			"Expected and actual point to the same object: %p %#v",
-			expected, expected), msgAndArgs...)
+			"Expected and actual point to the same object: %p %#[1]v",
+			expected), msgAndArgs...)
 	}
 	return true
 }
 
-// samePointers compares two generic interface objects and returns whether
-// they point to the same object
-func samePointers(first, second interface{}) bool {
+// samePointers checks if two generic interface objects are pointers of the same
+// type pointing to the same object. It returns two values: same indicating if
+// they are the same type and point to the same object, and ok indicating that
+// both inputs are pointers.
+func samePointers(first, second interface{}) (same bool, ok bool) {
 	firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second)
 	if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr {
-		return false
+		return false, false // not both are pointers
 	}
 
 	firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second)
 	if firstType != secondType {
-		return false
+		return false, true // both are pointers, but of different types
 	}
 
 	// compare pointer addresses
-	return first == second
+	return first == second, true
 }
 
 // formatUnequalValues takes two values of arbitrary types and returns string
 // representations appropriate to be presented to the user.
 //
 // If the values are not of like type, the returned strings will be prefixed
-// with the type name, and the value will be enclosed in parenthesis similar
+// with the type name, and the value will be enclosed in parentheses similar
 // to a type conversion in the Go grammar.
 func formatUnequalValues(expected, actual interface{}) (e string, a string) {
 	if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
@@ -454,10 +627,10 @@
 	return value
 }
 
-// EqualValues asserts that two objects are equal or convertable to the same types
-// and equal.
+// EqualValues asserts that two objects are equal or convertible to the larger
+// type and equal.
 //
-//    assert.EqualValues(t, uint32(123), int32(123))
+//	assert.EqualValues(t, uint32(123), int32(123))
 func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -472,12 +645,47 @@
 	}
 
 	return true
+}
 
+// EqualExportedValues asserts that the types of two objects are equal and their public
+// fields are also equal. This is useful for comparing structs that have private fields
+// that could potentially differ.
+//
+//	 type S struct {
+//		Exported     	int
+//		notExported   	int
+//	 }
+//	 assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true
+//	 assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false
+func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	aType := reflect.TypeOf(expected)
+	bType := reflect.TypeOf(actual)
+
+	if aType != bType {
+		return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...)
+	}
+
+	expected = copyExportedFields(expected)
+	actual = copyExportedFields(actual)
+
+	if !ObjectsAreEqualValues(expected, actual) {
+		diff := diff(expected, actual)
+		expected, actual = formatUnequalValues(expected, actual)
+		return Fail(t, fmt.Sprintf("Not equal (comparing only exported fields): \n"+
+			"expected: %s\n"+
+			"actual  : %s%s", expected, actual, diff), msgAndArgs...)
+	}
+
+	return true
 }
 
 // Exactly asserts that two objects are equal in value and type.
 //
-//    assert.Exactly(t, int32(123), int64(123))
+//	assert.Exactly(t, int32(123), int64(123))
 func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -491,12 +699,11 @@
 	}
 
 	return Equal(t, expected, actual, msgAndArgs...)
-
 }
 
 // NotNil asserts that the specified object is not nil.
 //
-//    assert.NotNil(t, err)
+//	assert.NotNil(t, err)
 func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
 	if !isNil(object) {
 		return true
@@ -507,17 +714,6 @@
 	return Fail(t, "Expected value not to be nil.", msgAndArgs...)
 }
 
-// containsKind checks if a specified kind in the slice of kinds.
-func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool {
-	for i := 0; i < len(kinds); i++ {
-		if kind == kinds[i] {
-			return true
-		}
-	}
-
-	return false
-}
-
 // isNil checks if a specified object is nil or not, without Failing.
 func isNil(object interface{}) bool {
 	if object == nil {
@@ -525,16 +721,13 @@
 	}
 
 	value := reflect.ValueOf(object)
-	kind := value.Kind()
-	isNilableKind := containsKind(
-		[]reflect.Kind{
-			reflect.Chan, reflect.Func,
-			reflect.Interface, reflect.Map,
-			reflect.Ptr, reflect.Slice},
-		kind)
+	switch value.Kind() {
+	case
+		reflect.Chan, reflect.Func,
+		reflect.Interface, reflect.Map,
+		reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
 
-	if isNilableKind && value.IsNil() {
-		return true
+		return value.IsNil()
 	}
 
 	return false
@@ -542,7 +735,7 @@
 
 // Nil asserts that the specified object is nil.
 //
-//    assert.Nil(t, err)
+//	assert.Nil(t, err)
 func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
 	if isNil(object) {
 		return true
@@ -555,37 +748,45 @@
 
 // isEmpty gets whether the specified object is considered empty or not.
 func isEmpty(object interface{}) bool {
-
 	// get nil case out of the way
 	if object == nil {
 		return true
 	}
 
-	objValue := reflect.ValueOf(object)
-
-	switch objValue.Kind() {
-	// collection types are empty when they have no element
-	case reflect.Chan, reflect.Map, reflect.Slice:
-		return objValue.Len() == 0
-	// pointers are empty if nil or if the value they point to is empty
-	case reflect.Ptr:
-		if objValue.IsNil() {
-			return true
-		}
-		deref := objValue.Elem().Interface()
-		return isEmpty(deref)
-	// for all other types, compare against the zero value
-	// array types are empty when they match their zero-initialized state
-	default:
-		zero := reflect.Zero(objValue.Type())
-		return reflect.DeepEqual(object, zero.Interface())
-	}
+	return isEmptyValue(reflect.ValueOf(object))
 }
 
-// Empty asserts that the specified object is empty.  I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// isEmptyValue gets whether the specified reflect.Value is considered empty or not.
+func isEmptyValue(objValue reflect.Value) bool {
+	if objValue.IsZero() {
+		return true
+	}
+	// Special cases of non-zero values that we consider empty
+	switch objValue.Kind() {
+	// collection types are empty when they have no element
+	// Note: array types are empty when they match their zero-initialized state.
+	case reflect.Chan, reflect.Map, reflect.Slice:
+		return objValue.Len() == 0
+	// non-nil pointers are empty if the value they point to is empty
+	case reflect.Ptr:
+		return isEmptyValue(objValue.Elem())
+	}
+	return false
+}
+
+// Empty asserts that the given value is "empty".
 //
-//  assert.Empty(t, obj)
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
+//
+//	assert.Empty(t, obj)
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
 func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
 	pass := isEmpty(object)
 	if !pass {
@@ -596,15 +797,13 @@
 	}
 
 	return pass
-
 }
 
-// NotEmpty asserts that the specified object is NOT empty.  I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotEmpty asserts that the specified object is NOT [Empty].
 //
-//  if assert.NotEmpty(t, obj) {
-//    assert.Equal(t, "two", obj[1])
-//  }
+//	if assert.NotEmpty(t, obj) {
+//	  assert.Equal(t, "two", obj[1])
+//	}
 func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
 	pass := !isEmpty(object)
 	if !pass {
@@ -615,43 +814,40 @@
 	}
 
 	return pass
-
 }
 
-// getLen try to get length of object.
-// return (false, 0) if impossible.
-func getLen(x interface{}) (ok bool, length int) {
+// getLen tries to get the length of an object.
+// It returns (0, false) if impossible.
+func getLen(x interface{}) (length int, ok bool) {
 	v := reflect.ValueOf(x)
 	defer func() {
-		if e := recover(); e != nil {
-			ok = false
-		}
+		ok = recover() == nil
 	}()
-	return true, v.Len()
+	return v.Len(), true
 }
 
 // Len asserts that the specified object has specific length.
 // Len also fails if the object has a type that len() not accept.
 //
-//    assert.Len(t, mySlice, 3)
+//	assert.Len(t, mySlice, 3)
 func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
-	ok, l := getLen(object)
+	l, ok := getLen(object)
 	if !ok {
-		return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...)
+		return Fail(t, fmt.Sprintf("\"%v\" could not be applied builtin len()", object), msgAndArgs...)
 	}
 
 	if l != length {
-		return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
+		return Fail(t, fmt.Sprintf("\"%v\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
 	}
 	return true
 }
 
 // True asserts that the specified value is true.
 //
-//    assert.True(t, myBool)
+//	assert.True(t, myBool)
 func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
 	if !value {
 		if h, ok := t.(tHelper); ok {
@@ -661,12 +857,11 @@
 	}
 
 	return true
-
 }
 
 // False asserts that the specified value is false.
 //
-//    assert.False(t, myBool)
+//	assert.False(t, myBool)
 func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
 	if value {
 		if h, ok := t.(tHelper); ok {
@@ -676,12 +871,11 @@
 	}
 
 	return true
-
 }
 
 // NotEqual asserts that the specified values are NOT equal.
 //
-//    assert.NotEqual(t, obj1, obj2)
+//	assert.NotEqual(t, obj1, obj2)
 //
 // Pointer variable equality is determined based on the equality of the
 // referenced values (as opposed to the memory addresses).
@@ -699,12 +893,11 @@
 	}
 
 	return true
-
 }
 
 // NotEqualValues asserts that two objects are not equal even when converted to the same type
 //
-//    assert.NotEqualValues(t, obj1, obj2)
+//	assert.NotEqualValues(t, obj1, obj2)
 func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -722,7 +915,6 @@
 // return (true, false) if element was not found.
 // return (true, true) if element was found.
 func containsElement(list interface{}, element interface{}) (ok, found bool) {
-
 	listValue := reflect.ValueOf(list)
 	listType := reflect.TypeOf(list)
 	if listType == nil {
@@ -757,15 +949,14 @@
 		}
 	}
 	return true, false
-
 }
 
 // Contains asserts that the specified string, list(array, slice...) or map contains the
 // specified substring or element.
 //
-//    assert.Contains(t, "Hello World", "World")
-//    assert.Contains(t, ["Hello", "World"], "World")
-//    assert.Contains(t, {"Hello": "World"}, "Hello")
+//	assert.Contains(t, "Hello World", "World")
+//	assert.Contains(t, ["Hello", "World"], "World")
+//	assert.Contains(t, {"Hello": "World"}, "Hello")
 func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -780,15 +971,14 @@
 	}
 
 	return true
-
 }
 
 // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
 // specified substring or element.
 //
-//    assert.NotContains(t, "Hello World", "Earth")
-//    assert.NotContains(t, ["Hello", "World"], "Earth")
-//    assert.NotContains(t, {"Hello": "World"}, "Earth")
+//	assert.NotContains(t, "Hello World", "Earth")
+//	assert.NotContains(t, ["Hello", "World"], "Earth")
+//	assert.NotContains(t, {"Hello": "World"}, "Earth")
 func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -796,20 +986,24 @@
 
 	ok, found := containsElement(s, contains)
 	if !ok {
-		return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
+		return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...)
 	}
 	if found {
-		return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...)
+		return Fail(t, fmt.Sprintf("%#v should not contain %#v", s, contains), msgAndArgs...)
 	}
 
 	return true
-
 }
 
-// Subset asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
+// Subset asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
 //
-//    assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+//	assert.Subset(t, [1, 2, 3], [1, 2])
+//	assert.Subset(t, {"x": 1, "y": 2}, {"x": 1})
+//	assert.Subset(t, [1, 2, 3], {1: "one", 2: "two"})
+//	assert.Subset(t, {"x": 1, "y": 2}, ["x"])
 func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -818,59 +1012,66 @@
 		return true // we consider nil to be equal to the nil set
 	}
 
-	defer func() {
-		if e := recover(); e != nil {
-			ok = false
-		}
-	}()
-
 	listKind := reflect.TypeOf(list).Kind()
-	subsetKind := reflect.TypeOf(subset).Kind()
-
 	if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
 		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
 	}
 
-	if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
+	subsetKind := reflect.TypeOf(subset).Kind()
+	if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map {
 		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
 	}
 
-	subsetValue := reflect.ValueOf(subset)
 	if subsetKind == reflect.Map && listKind == reflect.Map {
-		listValue := reflect.ValueOf(list)
-		subsetKeys := subsetValue.MapKeys()
+		subsetMap := reflect.ValueOf(subset)
+		actualMap := reflect.ValueOf(list)
 
-		for i := 0; i < len(subsetKeys); i++ {
-			subsetKey := subsetKeys[i]
-			subsetElement := subsetValue.MapIndex(subsetKey).Interface()
-			listElement := listValue.MapIndex(subsetKey).Interface()
+		for _, k := range subsetMap.MapKeys() {
+			ev := subsetMap.MapIndex(k)
+			av := actualMap.MapIndex(k)
 
-			if !ObjectsAreEqual(subsetElement, listElement) {
-				return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...)
+			if !av.IsValid() {
+				return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...)
+			}
+			if !ObjectsAreEqual(ev.Interface(), av.Interface()) {
+				return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...)
 			}
 		}
 
 		return true
 	}
 
-	for i := 0; i < subsetValue.Len(); i++ {
-		element := subsetValue.Index(i).Interface()
+	subsetList := reflect.ValueOf(subset)
+	if subsetKind == reflect.Map {
+		keys := make([]interface{}, subsetList.Len())
+		for idx, key := range subsetList.MapKeys() {
+			keys[idx] = key.Interface()
+		}
+		subsetList = reflect.ValueOf(keys)
+	}
+	for i := 0; i < subsetList.Len(); i++ {
+		element := subsetList.Index(i).Interface()
 		ok, found := containsElement(list, element)
 		if !ok {
-			return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+			return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", list), msgAndArgs...)
 		}
 		if !found {
-			return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...)
+			return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, element), msgAndArgs...)
 		}
 	}
 
 	return true
 }
 
-// NotSubset asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
+// NotSubset asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
 //
-//    assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+//	assert.NotSubset(t, [1, 3, 4], [1, 2])
+//	assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
+//	assert.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"})
+//	assert.NotSubset(t, {"x": 1, "y": 2}, ["z"])
 func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -879,34 +1080,28 @@
 		return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...)
 	}
 
-	defer func() {
-		if e := recover(); e != nil {
-			ok = false
-		}
-	}()
-
 	listKind := reflect.TypeOf(list).Kind()
-	subsetKind := reflect.TypeOf(subset).Kind()
-
 	if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
 		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
 	}
 
-	if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
+	subsetKind := reflect.TypeOf(subset).Kind()
+	if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map {
 		return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
 	}
 
-	subsetValue := reflect.ValueOf(subset)
 	if subsetKind == reflect.Map && listKind == reflect.Map {
-		listValue := reflect.ValueOf(list)
-		subsetKeys := subsetValue.MapKeys()
+		subsetMap := reflect.ValueOf(subset)
+		actualMap := reflect.ValueOf(list)
 
-		for i := 0; i < len(subsetKeys); i++ {
-			subsetKey := subsetKeys[i]
-			subsetElement := subsetValue.MapIndex(subsetKey).Interface()
-			listElement := listValue.MapIndex(subsetKey).Interface()
+		for _, k := range subsetMap.MapKeys() {
+			ev := subsetMap.MapIndex(k)
+			av := actualMap.MapIndex(k)
 
-			if !ObjectsAreEqual(subsetElement, listElement) {
+			if !av.IsValid() {
+				return true
+			}
+			if !ObjectsAreEqual(ev.Interface(), av.Interface()) {
 				return true
 			}
 		}
@@ -914,11 +1109,19 @@
 		return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
 	}
 
-	for i := 0; i < subsetValue.Len(); i++ {
-		element := subsetValue.Index(i).Interface()
+	subsetList := reflect.ValueOf(subset)
+	if subsetKind == reflect.Map {
+		keys := make([]interface{}, subsetList.Len())
+		for idx, key := range subsetList.MapKeys() {
+			keys[idx] = key.Interface()
+		}
+		subsetList = reflect.ValueOf(keys)
+	}
+	for i := 0; i < subsetList.Len(); i++ {
+		element := subsetList.Index(i).Interface()
 		ok, found := containsElement(list, element)
 		if !ok {
-			return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+			return Fail(t, fmt.Sprintf("%q could not be applied builtin len()", list), msgAndArgs...)
 		}
 		if !found {
 			return true
@@ -1024,6 +1227,39 @@
 	return msg.String()
 }
 
+// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
+//
+// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false
+//
+// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true
+//
+// assert.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true
+func NotElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if isEmpty(listA) && isEmpty(listB) {
+		return Fail(t, "listA and listB contain the same elements", msgAndArgs)
+	}
+
+	if !isList(t, listA, msgAndArgs...) {
+		return Fail(t, "listA is not a list type", msgAndArgs...)
+	}
+	if !isList(t, listB, msgAndArgs...) {
+		return Fail(t, "listB is not a list type", msgAndArgs...)
+	}
+
+	extraA, extraB := diffLists(listA, listB)
+	if len(extraA) == 0 && len(extraB) == 0 {
+		return Fail(t, "listA and listB contain the same elements", msgAndArgs)
+	}
+
+	return true
+}
+
 // Condition uses a Comparison to assert a complex condition.
 func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
@@ -1060,7 +1296,7 @@
 
 // Panics asserts that the code inside the specified PanicTestFunc panics.
 //
-//   assert.Panics(t, func(){ GoCrazy() })
+//	assert.Panics(t, func(){ GoCrazy() })
 func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1076,7 +1312,7 @@
 // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
 // the recovered panic value equals the expected panic value.
 //
-//   assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
+//	assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
 func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1097,7 +1333,7 @@
 // panics, and that the recovered panic value is an error that satisfies the
 // EqualError comparison.
 //
-//   assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
+//	assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
 func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1117,7 +1353,7 @@
 
 // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
 //
-//   assert.NotPanics(t, func(){ RemainCalm() })
+//	assert.NotPanics(t, func(){ RemainCalm() })
 func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1132,7 +1368,7 @@
 
 // WithinDuration asserts that the two times are within duration delta of each other.
 //
-//   assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
+//	assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
 func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1148,7 +1384,7 @@
 
 // WithinRange asserts that a time is within a time range (inclusive).
 //
-//   assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
+//	assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
 func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1207,7 +1443,7 @@
 
 // InDelta asserts that the two numerals are within delta of each other.
 //
-// 	 assert.InDelta(t, math.Pi, 22/7.0, 0.01)
+//	assert.InDelta(t, math.Pi, 22/7.0, 0.01)
 func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1336,12 +1572,15 @@
 		h.Helper()
 	}
 	if math.IsNaN(epsilon) {
-		return Fail(t, "epsilon must not be NaN")
+		return Fail(t, "epsilon must not be NaN", msgAndArgs...)
 	}
 	actualEpsilon, err := calcRelativeError(expected, actual)
 	if err != nil {
 		return Fail(t, err.Error(), msgAndArgs...)
 	}
+	if math.IsNaN(actualEpsilon) {
+		return Fail(t, "relative error is NaN", msgAndArgs...)
+	}
 	if actualEpsilon > epsilon {
 		return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+
 			"        < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...)
@@ -1355,19 +1594,26 @@
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
-	if expected == nil || actual == nil ||
-		reflect.TypeOf(actual).Kind() != reflect.Slice ||
-		reflect.TypeOf(expected).Kind() != reflect.Slice {
+
+	if expected == nil || actual == nil {
 		return Fail(t, "Parameters must be slice", msgAndArgs...)
 	}
 
-	actualSlice := reflect.ValueOf(actual)
 	expectedSlice := reflect.ValueOf(expected)
+	actualSlice := reflect.ValueOf(actual)
 
-	for i := 0; i < actualSlice.Len(); i++ {
-		result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon)
-		if !result {
-			return result
+	if expectedSlice.Type().Kind() != reflect.Slice {
+		return Fail(t, "Expected value must be slice", msgAndArgs...)
+	}
+
+	expectedLen := expectedSlice.Len()
+	if !IsType(t, expected, actual) || !Len(t, actual, expectedLen) {
+		return false
+	}
+
+	for i := 0; i < expectedLen; i++ {
+		if !InEpsilon(t, expectedSlice.Index(i).Interface(), actualSlice.Index(i).Interface(), epsilon, "at index %d", i) {
+			return false
 		}
 	}
 
@@ -1380,10 +1626,10 @@
 
 // NoError asserts that a function returned no error (i.e. `nil`).
 //
-//   actualObj, err := SomeFunction()
-//   if assert.NoError(t, err) {
-//	   assert.Equal(t, expectedObj, actualObj)
-//   }
+//	  actualObj, err := SomeFunction()
+//	  if assert.NoError(t, err) {
+//		   assert.Equal(t, expectedObj, actualObj)
+//	  }
 func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
 	if err != nil {
 		if h, ok := t.(tHelper); ok {
@@ -1397,10 +1643,8 @@
 
 // Error asserts that a function returned an error (i.e. not `nil`).
 //
-//   actualObj, err := SomeFunction()
-//   if assert.Error(t, err) {
-//	   assert.Equal(t, expectedError, err)
-//   }
+//	actualObj, err := SomeFunction()
+//	assert.Error(t, err)
 func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
 	if err == nil {
 		if h, ok := t.(tHelper); ok {
@@ -1415,8 +1659,8 @@
 // EqualError asserts that a function returned an error (i.e. not `nil`)
 // and that it is equal to the provided error.
 //
-//   actualObj, err := SomeFunction()
-//   assert.EqualError(t, err,  expectedErrorString)
+//	actualObj, err := SomeFunction()
+//	assert.EqualError(t, err,  expectedErrorString)
 func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1438,8 +1682,8 @@
 // ErrorContains asserts that a function returned an error (i.e. not `nil`)
 // and that the error contains the specified substring.
 //
-//   actualObj, err := SomeFunction()
-//   assert.ErrorContains(t, err,  expectedErrorSubString)
+//	actualObj, err := SomeFunction()
+//	assert.ErrorContains(t, err,  expectedErrorSubString)
 func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1458,7 +1702,6 @@
 
 // matchRegexp return true if a specified regexp matches a string.
 func matchRegexp(rx interface{}, str interface{}) bool {
-
 	var r *regexp.Regexp
 	if rr, ok := rx.(*regexp.Regexp); ok {
 		r = rr
@@ -1466,14 +1709,20 @@
 		r = regexp.MustCompile(fmt.Sprint(rx))
 	}
 
-	return (r.FindStringIndex(fmt.Sprint(str)) != nil)
-
+	switch v := str.(type) {
+	case []byte:
+		return r.Match(v)
+	case string:
+		return r.MatchString(v)
+	default:
+		return r.MatchString(fmt.Sprint(v))
+	}
 }
 
 // Regexp asserts that a specified regexp matches a string.
 //
-//  assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
-//  assert.Regexp(t, "start...$", "it's not starting")
+//	assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
+//	assert.Regexp(t, "start...$", "it's not starting")
 func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1490,8 +1739,8 @@
 
 // NotRegexp asserts that a specified regexp does not match a string.
 //
-//  assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
-//  assert.NotRegexp(t, "^start", "it's not starting")
+//	assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
+//	assert.NotRegexp(t, "^start", "it's not starting")
 func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1503,7 +1752,6 @@
 	}
 
 	return !match
-
 }
 
 // Zero asserts that i is the zero value for its type.
@@ -1603,7 +1851,7 @@
 
 // JSONEq asserts that two JSON strings are equivalent.
 //
-//  assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+//	assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
 func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
@@ -1614,6 +1862,11 @@
 		return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...)
 	}
 
+	// Shortcut if same bytes
+	if actual == expected {
+		return true
+	}
+
 	if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil {
 		return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...)
 	}
@@ -1632,6 +1885,11 @@
 		return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...)
 	}
 
+	// Shortcut if same bytes
+	if actual == expected {
+		return true
+	}
+
 	if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil {
 		return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...)
 	}
@@ -1719,20 +1977,21 @@
 	MaxDepth:                10,
 }
 
-type tHelper interface {
+type tHelper = interface {
 	Helper()
 }
 
 // Eventually asserts that given condition will be met in waitFor time,
 // periodically checking target function each tick.
 //
-//    assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
+//	assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
 func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
 
 	ch := make(chan bool, 1)
+	checkCond := func() { ch <- condition() }
 
 	timer := time.NewTimer(waitFor)
 	defer timer.Stop()
@@ -1740,18 +1999,131 @@
 	ticker := time.NewTicker(tick)
 	defer ticker.Stop()
 
-	for tick := ticker.C; ; {
+	var tickC <-chan time.Time
+
+	// Check the condition once first on the initial call.
+	go checkCond()
+
+	for {
 		select {
 		case <-timer.C:
 			return Fail(t, "Condition never satisfied", msgAndArgs...)
-		case <-tick:
-			tick = nil
-			go func() { ch <- condition() }()
+		case <-tickC:
+			tickC = nil
+			go checkCond()
 		case v := <-ch:
 			if v {
 				return true
 			}
-			tick = ticker.C
+			tickC = ticker.C
+		}
+	}
+}
+
+// CollectT implements the TestingT interface and collects all errors.
+type CollectT struct {
+	// A slice of errors. Non-nil slice denotes a failure.
+	// If it's non-nil but len(c.errors) == 0, this is also a failure
+	// obtained by direct c.FailNow() call.
+	errors []error
+}
+
+// Helper is like [testing.T.Helper] but does nothing.
+func (CollectT) Helper() {}
+
+// Errorf collects the error.
+func (c *CollectT) Errorf(format string, args ...interface{}) {
+	c.errors = append(c.errors, fmt.Errorf(format, args...))
+}
+
+// FailNow stops execution by calling runtime.Goexit.
+func (c *CollectT) FailNow() {
+	c.fail()
+	runtime.Goexit()
+}
+
+// Deprecated: That was a method for internal usage that should not have been published. Now just panics.
+func (*CollectT) Reset() {
+	panic("Reset() is deprecated")
+}
+
+// Deprecated: That was a method for internal usage that should not have been published. Now just panics.
+func (*CollectT) Copy(TestingT) {
+	panic("Copy() is deprecated")
+}
+
+func (c *CollectT) fail() {
+	if !c.failed() {
+		c.errors = []error{} // Make it non-nil to mark a failure.
+	}
+}
+
+func (c *CollectT) failed() bool {
+	return c.errors != nil
+}
+
+// EventuallyWithT asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick. In contrast to Eventually,
+// it supplies a CollectT to the condition function, so that the condition
+// function can use the CollectT to call other assertions.
+// The condition is considered "met" if no errors are raised in a tick.
+// The supplied CollectT collects all errors from one tick (if there are any).
+// If the condition is not met before waitFor, the collected errors of
+// the last tick are copied to t.
+//
+//	externalValue := false
+//	go func() {
+//		time.Sleep(8*time.Second)
+//		externalValue = true
+//	}()
+//	assert.EventuallyWithT(t, func(c *assert.CollectT) {
+//		// add assertions as needed; any assertion failure will fail the current tick
+//		assert.True(c, externalValue, "expected 'externalValue' to be true")
+//	}, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
+func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+
+	var lastFinishedTickErrs []error
+	ch := make(chan *CollectT, 1)
+
+	checkCond := func() {
+		collect := new(CollectT)
+		defer func() {
+			ch <- collect
+		}()
+		condition(collect)
+	}
+
+	timer := time.NewTimer(waitFor)
+	defer timer.Stop()
+
+	ticker := time.NewTicker(tick)
+	defer ticker.Stop()
+
+	var tickC <-chan time.Time
+
+	// Check the condition once first on the initial call.
+	go checkCond()
+
+	for {
+		select {
+		case <-timer.C:
+			for _, err := range lastFinishedTickErrs {
+				t.Errorf("%v", err)
+			}
+			return Fail(t, "Condition never satisfied", msgAndArgs...)
+		case <-tickC:
+			tickC = nil
+			go checkCond()
+		case collect := <-ch:
+			if !collect.failed() {
+				return true
+			}
+			// Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached.
+			lastFinishedTickErrs = collect.errors
+			tickC = ticker.C
 		}
 	}
 }
@@ -1759,13 +2131,14 @@
 // Never asserts that the given condition doesn't satisfy in waitFor time,
 // periodically checking the target function each tick.
 //
-//    assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
+//	assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
 func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
 		h.Helper()
 	}
 
 	ch := make(chan bool, 1)
+	checkCond := func() { ch <- condition() }
 
 	timer := time.NewTimer(waitFor)
 	defer timer.Stop()
@@ -1773,18 +2146,23 @@
 	ticker := time.NewTicker(tick)
 	defer ticker.Stop()
 
-	for tick := ticker.C; ; {
+	var tickC <-chan time.Time
+
+	// Check the condition once first on the initial call.
+	go checkCond()
+
+	for {
 		select {
 		case <-timer.C:
 			return true
-		case <-tick:
-			tick = nil
-			go func() { ch <- condition() }()
+		case <-tickC:
+			tickC = nil
+			go checkCond()
 		case v := <-ch:
 			if v {
 				return Fail(t, "Condition satisfied", msgAndArgs...)
 			}
-			tick = ticker.C
+			tickC = ticker.C
 		}
 	}
 }
@@ -1802,9 +2180,12 @@
 	var expectedText string
 	if target != nil {
 		expectedText = target.Error()
+		if err == nil {
+			return Fail(t, fmt.Sprintf("Expected error with %q in chain but got nil.", expectedText), msgAndArgs...)
+		}
 	}
 
-	chain := buildErrorChainString(err)
+	chain := buildErrorChainString(err, false)
 
 	return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+
 		"expected: %q\n"+
@@ -1812,7 +2193,7 @@
 	), msgAndArgs...)
 }
 
-// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// NotErrorIs asserts that none of the errors in err's chain matches target.
 // This is a wrapper for errors.Is.
 func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
 	if h, ok := t.(tHelper); ok {
@@ -1827,7 +2208,7 @@
 		expectedText = target.Error()
 	}
 
-	chain := buildErrorChainString(err)
+	chain := buildErrorChainString(err, false)
 
 	return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
 		"found: %q\n"+
@@ -1845,24 +2226,70 @@
 		return true
 	}
 
-	chain := buildErrorChainString(err)
+	expectedType := reflect.TypeOf(target).Elem().String()
+	if err == nil {
+		return Fail(t, fmt.Sprintf("An error is expected but got nil.\n"+
+			"expected: %s", expectedType), msgAndArgs...)
+	}
+
+	chain := buildErrorChainString(err, true)
 
 	return Fail(t, fmt.Sprintf("Should be in error chain:\n"+
-		"expected: %q\n"+
-		"in chain: %s", target, chain,
+		"expected: %s\n"+
+		"in chain: %s", expectedType, chain,
 	), msgAndArgs...)
 }
 
-func buildErrorChainString(err error) string {
+// NotErrorAs asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool {
+	if h, ok := t.(tHelper); ok {
+		h.Helper()
+	}
+	if !errors.As(err, target) {
+		return true
+	}
+
+	chain := buildErrorChainString(err, true)
+
+	return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
+		"found: %s\n"+
+		"in chain: %s", reflect.TypeOf(target).Elem().String(), chain,
+	), msgAndArgs...)
+}
+
+func unwrapAll(err error) (errs []error) {
+	errs = append(errs, err)
+	switch x := err.(type) {
+	case interface{ Unwrap() error }:
+		err = x.Unwrap()
+		if err == nil {
+			return
+		}
+		errs = append(errs, unwrapAll(err)...)
+	case interface{ Unwrap() []error }:
+		for _, err := range x.Unwrap() {
+			errs = append(errs, unwrapAll(err)...)
+		}
+	}
+	return
+}
+
+func buildErrorChainString(err error, withType bool) string {
 	if err == nil {
 		return ""
 	}
 
-	e := errors.Unwrap(err)
-	chain := fmt.Sprintf("%q", err.Error())
-	for e != nil {
-		chain += fmt.Sprintf("\n\t%q", e.Error())
-		e = errors.Unwrap(e)
+	var chain string
+	errs := unwrapAll(err)
+	for i := range errs {
+		if i != 0 {
+			chain += "\n\t"
+		}
+		chain += fmt.Sprintf("%q", errs[i].Error())
+		if withType {
+			chain += fmt.Sprintf(" (%T)", errs[i])
+		}
 	}
 	return chain
 }
diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go
index c9dccc4..a0b953a 100644
--- a/vendor/github.com/stretchr/testify/assert/doc.go
+++ b/vendor/github.com/stretchr/testify/assert/doc.go
@@ -1,39 +1,44 @@
 // Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
 //
-// Example Usage
+// # Note
+//
+// All functions in this package return a bool value indicating whether the assertion has passed.
+//
+// # Example Usage
 //
 // The following is a complete example using assert in a standard test function:
-//    import (
-//      "testing"
-//      "github.com/stretchr/testify/assert"
-//    )
 //
-//    func TestSomething(t *testing.T) {
+//	import (
+//	  "testing"
+//	  "github.com/stretchr/testify/assert"
+//	)
 //
-//      var a string = "Hello"
-//      var b string = "Hello"
+//	func TestSomething(t *testing.T) {
 //
-//      assert.Equal(t, a, b, "The two words should be the same.")
+//	  var a string = "Hello"
+//	  var b string = "Hello"
 //
-//    }
+//	  assert.Equal(t, a, b, "The two words should be the same.")
+//
+//	}
 //
 // if you assert many times, use the format below:
 //
-//    import (
-//      "testing"
-//      "github.com/stretchr/testify/assert"
-//    )
+//	import (
+//	  "testing"
+//	  "github.com/stretchr/testify/assert"
+//	)
 //
-//    func TestSomething(t *testing.T) {
-//      assert := assert.New(t)
+//	func TestSomething(t *testing.T) {
+//	  assert := assert.New(t)
 //
-//      var a string = "Hello"
-//      var b string = "Hello"
+//	  var a string = "Hello"
+//	  var b string = "Hello"
 //
-//      assert.Equal(a, b, "The two words should be the same.")
-//    }
+//	  assert.Equal(a, b, "The two words should be the same.")
+//	}
 //
-// Assertions
+// # Assertions
 //
 // Assertions allow you to easily write test code, and are global funcs in the `assert` package.
 // All assertion functions take, as the first argument, the `*testing.T` object provided by the
diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go
index 4ed341d..5a6bb75 100644
--- a/vendor/github.com/stretchr/testify/assert/http_assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go
@@ -12,7 +12,7 @@
 // an error if building a new request fails.
 func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
 	w := httptest.NewRecorder()
-	req, err := http.NewRequest(method, url, nil)
+	req, err := http.NewRequest(method, url, http.NoBody)
 	if err != nil {
 		return -1, err
 	}
@@ -23,7 +23,7 @@
 
 // HTTPSuccess asserts that a specified handler returns a success status code.
 //
-//  assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
+//	assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
@@ -32,12 +32,12 @@
 	}
 	code, err := httpCode(handler, method, url, values)
 	if err != nil {
-		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
 	}
 
 	isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
 	if !isSuccessCode {
-		Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code))
+		Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
 	}
 
 	return isSuccessCode
@@ -45,7 +45,7 @@
 
 // HTTPRedirect asserts that a specified handler returns a redirect status code.
 //
-//  assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//	assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
@@ -54,12 +54,12 @@
 	}
 	code, err := httpCode(handler, method, url, values)
 	if err != nil {
-		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
 	}
 
 	isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
 	if !isRedirectCode {
-		Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code))
+		Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
 	}
 
 	return isRedirectCode
@@ -67,7 +67,7 @@
 
 // HTTPError asserts that a specified handler returns an error status code.
 //
-//  assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//	assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
@@ -76,12 +76,12 @@
 	}
 	code, err := httpCode(handler, method, url, values)
 	if err != nil {
-		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
 	}
 
 	isErrorCode := code >= http.StatusBadRequest
 	if !isErrorCode {
-		Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code))
+		Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
 	}
 
 	return isErrorCode
@@ -89,7 +89,7 @@
 
 // HTTPStatusCode asserts that a specified handler returns a specified status code.
 //
-//  assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
+//	assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
@@ -98,12 +98,12 @@
 	}
 	code, err := httpCode(handler, method, url, values)
 	if err != nil {
-		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+		Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
 	}
 
 	successful := code == statuscode
 	if !successful {
-		Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code))
+		Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code), msgAndArgs...)
 	}
 
 	return successful
@@ -113,7 +113,10 @@
 // empty string if building a new request fails.
 func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
 	w := httptest.NewRecorder()
-	req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
+	if len(values) > 0 {
+		url += "?" + values.Encode()
+	}
+	req, err := http.NewRequest(method, url, http.NoBody)
 	if err != nil {
 		return ""
 	}
@@ -124,7 +127,7 @@
 // HTTPBodyContains asserts that a specified handler returns a
 // body that contains a string.
 //
-//  assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//	assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
@@ -135,7 +138,7 @@
 
 	contains := strings.Contains(body, fmt.Sprint(str))
 	if !contains {
-		Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+		Fail(t, fmt.Sprintf("Expected response body for %q to contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...)
 	}
 
 	return contains
@@ -144,7 +147,7 @@
 // HTTPBodyNotContains asserts that a specified handler returns a
 // body that does not contain a string.
 //
-//  assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//	assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
 //
 // Returns whether the assertion was successful (true) or not (false).
 func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
@@ -155,7 +158,7 @@
 
 	contains := strings.Contains(body, fmt.Sprint(str))
 	if contains {
-		Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+		Fail(t, fmt.Sprintf("Expected response body for %q to NOT contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...)
 	}
 
 	return !contains
diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go
new file mode 100644
index 0000000..5a74c4f
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go
@@ -0,0 +1,24 @@
+//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default
+
+// Package yaml is an implementation of YAML functions that calls a pluggable implementation.
+//
+// This implementation is selected with the testify_yaml_custom build tag.
+//
+//	go test -tags testify_yaml_custom
+//
+// This implementation can be used at build time to replace the default implementation
+// to avoid linking with [gopkg.in/yaml.v3].
+//
+// In your test package:
+//
+//		import assertYaml "github.com/stretchr/testify/assert/yaml"
+//
+//		func init() {
+//			assertYaml.Unmarshal = func (in []byte, out interface{}) error {
+//				// ...
+//	     			return nil
+//			}
+//		}
+package yaml
+
+var Unmarshal func(in []byte, out interface{}) error
diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go
new file mode 100644
index 0000000..0bae80e
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go
@@ -0,0 +1,36 @@
+//go:build !testify_yaml_fail && !testify_yaml_custom
+
+// Package yaml is just an indirection to handle YAML deserialization.
+//
+// This package is just an indirection that allows the builder to override the
+// indirection with an alternative implementation of this package that uses
+// another implementation of YAML deserialization. This allows to not either not
+// use YAML deserialization at all, or to use another implementation than
+// [gopkg.in/yaml.v3] (for example for license compatibility reasons, see [PR #1120]).
+//
+// Alternative implementations are selected using build tags:
+//
+//   - testify_yaml_fail: [Unmarshal] always fails with an error
+//   - testify_yaml_custom: [Unmarshal] is a variable. Caller must initialize it
+//     before calling any of [github.com/stretchr/testify/assert.YAMLEq] or
+//     [github.com/stretchr/testify/assert.YAMLEqf].
+//
+// Usage:
+//
+//	go test -tags testify_yaml_fail
+//
+// You can check with "go list" which implementation is linked:
+//
+//	go list -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
+//	go list -tags testify_yaml_fail -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
+//	go list -tags testify_yaml_custom -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
+//
+// [PR #1120]: https://github.com/stretchr/testify/pull/1120
+package yaml
+
+import goyaml "gopkg.in/yaml.v3"
+
+// Unmarshal is just a wrapper of [gopkg.in/yaml.v3.Unmarshal].
+func Unmarshal(in []byte, out interface{}) error {
+	return goyaml.Unmarshal(in, out)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go
new file mode 100644
index 0000000..8041803
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go
@@ -0,0 +1,17 @@
+//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default
+
+// Package yaml is an implementation of YAML functions that always fail.
+//
+// This implementation can be used at build time to replace the default implementation
+// to avoid linking with [gopkg.in/yaml.v3]:
+//
+//	go test -tags testify_yaml_fail
+package yaml
+
+import "errors"
+
+var errNotImplemented = errors.New("YAML functions are not available (see https://pkg.go.dev/github.com/stretchr/testify/assert/yaml)")
+
+func Unmarshal([]byte, interface{}) error {
+	return errNotImplemented
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
index 956790e..964a404 100644
--- a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
+++ b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
@@ -1,10 +1,16 @@
 Changes by Version
 ==================
 
-2.29.2 (unreleased)
+2.30.0 (2021-12-07)
 -------------------
-- Nothing yet.
-
+- Add deprecation notice -- Yuri Shkuro
+- Use public struct for tracer options to document initialization better (#605) -- Yuri Shkuro
+- Remove redundant newline in NewReporter init message (#603) -- wwade
+- [zipkin] Encode span IDs as full 16-hex strings #601 -- Nathan
+- [docs] Replace godoc.org with pkg.go.dev (#591) -- Aaron Jheng
+- Remove outdated reference to Zipkin model. -- Yuri Shkuro
+- Move thrift compilation to a script (#590) -- Aaron Jheng
+- Document JAEGER_TRACEID_128BIT env var -- Yuri Shkuro
 
 2.29.1 (2021-05-24)
 -------------------
diff --git a/vendor/github.com/uber/jaeger-client-go/Makefile b/vendor/github.com/uber/jaeger-client-go/Makefile
index bb7463c..ee7b212 100644
--- a/vendor/github.com/uber/jaeger-client-go/Makefile
+++ b/vendor/github.com/uber/jaeger-client-go/Makefile
@@ -21,9 +21,7 @@
 
 THRIFT_VER=0.14
 THRIFT_IMG=jaegertracing/thrift:$(THRIFT_VER)
-THRIFT=docker run -v "${PWD}:/data" $(THRIFT_IMG) thrift
-THRIFT_GO_ARGS=thrift_import="github.com/apache/thrift/lib/go/thrift"
-THRIFT_GEN_DIR=thrift-gen
+THRIFT=docker run -v "${PWD}:/data" -u ${shell id -u}:${shell id -g} $(THRIFT_IMG) thrift
 
 PASS=$(shell printf "\033[32mPASS\033[0m")
 FAIL=$(shell printf "\033[31mFAIL\033[0m")
@@ -105,19 +103,7 @@
 # TODO at the moment we're not generating tchan_*.go files
 .PHONY: thrift-compile
 thrift-compile: thrift-image
-	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/agent.thrift
-	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/sampling.thrift
-	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/jaeger.thrift
-	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/zipkincore.thrift
-	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/baggage.thrift
-	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/crossdock/thrift/ /data/idl/thrift/crossdock/tracetest.thrift
-	sed -i '' 's|"zipkincore"|"$(PROJECT_ROOT)/thrift-gen/zipkincore"|g' $(THRIFT_GEN_DIR)/agent/*.go
-	sed -i '' 's|"jaeger"|"$(PROJECT_ROOT)/thrift-gen/jaeger"|g' $(THRIFT_GEN_DIR)/agent/*.go
-	sed -i '' 's|"github.com/apache/thrift/lib/go/thrift"|"github.com/uber/jaeger-client-go/thrift"|g' \
-		$(THRIFT_GEN_DIR)/*/*.go crossdock/thrift/tracetest/*.go
-	rm -rf thrift-gen/*/*-remote
-	rm -rf crossdock/thrift/*/*-remote
-	rm -rf thrift-gen/jaeger/collector.go
+	docker run -v "${PWD}:/data" -u ${shell id -u}:${shell id -g} $(THRIFT_IMG) /data/scripts/gen-thrift.sh
 
 .PHONY: idl-submodule
 idl-submodule:
diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md
index 687f578..e23912b 100644
--- a/vendor/github.com/uber/jaeger-client-go/README.md
+++ b/vendor/github.com/uber/jaeger-client-go/README.md
@@ -1,5 +1,9 @@
 [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![OpenTracing 1.0 Enabled][ot-img]][ot-url]
 
+# 🛑 This library is being deprecated!
+
+We urge all users to migrate to [OpenTelemetry](https://opentelemetry.io/). Please refer to the [notice in the documentation](https://www.jaegertracing.io/docs/latest/client-libraries/#deprecating-jaeger-clients) for details.
+
 # Jaeger Bindings for Go OpenTracing API
 
 Instrumentation library that implements an
@@ -15,6 +19,12 @@
 
 ## Installation
 
+### Preferred
+
+Add `github.com/uber/jaeger-client-go` to `go.mod`.
+
+### Old way
+
 We recommended using a dependency manager like [dep](https://golang.github.io/dep/)
 and [semantic versioning](http://semver.org/) when including this library into an application.
 For example, Jaeger backend imports this library like this:
@@ -39,15 +49,19 @@
 
 ## Initialization
 
-See tracer initialization examples in [godoc](https://godoc.org/github.com/uber/jaeger-client-go/config#pkg-examples)
+See tracer initialization examples in [godoc](https://pkg.go.dev/github.com/uber/jaeger-client-go/config#pkg-examples)
 and [config/example_test.go](./config/example_test.go).
 
+There are two ways to create a tracer:
+  * Using [Configuration](https://pkg.go.dev/github.com/uber/jaeger-client-go/config#Configuration) struct that allows declarative configuration. For example, you can populate that struct from a YAML/JSON config, or ask it to initialize itself using environment variables (see next section).
+  * Using [NewTracer()](https://pkg.go.dev/github.com/uber/jaeger-client-go#NewTracer) function that allows for full programmatic control of configuring the tracer using TracerOptions.
+
 ### Environment variables
 
 The tracer can be initialized with values coming from environment variables, if it is
 [built from a config](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#Configuration.NewTracer)
 that was created via [FromEnv()](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#FromEnv).
-None of the env vars are required and all of them can be overridden via direct setting 
+None of the env vars are required and all of them can be overridden via direct setting
 of the property on the configuration object.
 
 Property| Description
@@ -70,6 +84,7 @@
 JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of (default `2000`).
 JAEGER_SAMPLER_REFRESH_INTERVAL | How often the `remote` sampler should poll the configuration server for the appropriate sampling strategy, e.g. "1m" or "30s" ([valid units][timeunits]; default `1m`).
 JAEGER_TAGS | A comma separated list of `name=value` tracer-level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:defaultValue}`.
+JAEGER_TRACEID_128BIT | Whether to enable 128bit trace-id generation, `true` or `false`. If not enabled, the SDK defaults to 64bit trace-ids.
 JAEGER_DISABLED | Whether the tracer is disabled or not. If `true`, the `opentracing.NoopTracer` is used (default `false`).
 JAEGER_RPC_METRICS | Whether to store RPC metrics, `true` or `false` (default `false`).
 
@@ -194,7 +209,7 @@
 of the root span. It involves several features and architectural changes:
   * **Shared sampling state**: the sampling state is shared across all local
     (i.e. in-process) spans for a given trace.
-  * **New `SamplerV2` API** allows the sampler to be called at multiple points 
+  * **New `SamplerV2` API** allows the sampler to be called at multiple points
     in the life cycle of a span:
     * on span creation
     * on overwriting span operation name
@@ -311,8 +326,8 @@
 [Apache 2.0 License](LICENSE).
 
 
-[doc-img]: https://godoc.org/github.com/uber/jaeger-client-go?status.svg
-[doc]: https://godoc.org/github.com/uber/jaeger-client-go
+[doc-img]: https://pkg.go.dev/badge/github.com/uber/jaeger-client-go.svg
+[doc]: https://pkg.go.dev/github.com/uber/jaeger-client-go
 [ci-img]: https://travis-ci.org/jaegertracing/jaeger-client-go.svg?branch=master
 [ci]: https://travis-ci.org/jaegertracing/jaeger-client-go
 [cov-img]: https://codecov.io/gh/jaegertracing/jaeger-client-go/branch/master/graph/badge.svg
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go
index c2222f1..0667635 100644
--- a/vendor/github.com/uber/jaeger-client-go/config/config.go
+++ b/vendor/github.com/uber/jaeger-client-go/config/config.go
@@ -420,7 +420,7 @@
 		jaeger.ReporterOptions.Logger(logger),
 		jaeger.ReporterOptions.Metrics(metrics))
 	if rc.LogSpans && logger != nil {
-		logger.Infof("Initializing logging reporter\n")
+		logger.Infof("Initializing logging reporter")
 		reporter = jaeger.NewCompositeReporter(jaeger.NewLoggingReporter(logger), reporter)
 	}
 	return reporter, err
diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go
index d8eb698..35710cf 100644
--- a/vendor/github.com/uber/jaeger-client-go/constants.go
+++ b/vendor/github.com/uber/jaeger-client-go/constants.go
@@ -22,7 +22,7 @@
 
 const (
 	// JaegerClientVersion is the version of the client library reported as Span tag.
-	JaegerClientVersion = "Go-2.29.1"
+	JaegerClientVersion = "Go-2.30.0"
 
 	// JaegerClientVersionTagKey is the name of the tag used to report client version.
 	JaegerClientVersionTagKey = "jaeger.version"
diff --git a/vendor/github.com/uber/jaeger-client-go/doc.go b/vendor/github.com/uber/jaeger-client-go/doc.go
index 4f55490..fac3c09 100644
--- a/vendor/github.com/uber/jaeger-client-go/doc.go
+++ b/vendor/github.com/uber/jaeger-client-go/doc.go
@@ -14,8 +14,6 @@
 
 /*
 Package jaeger implements an OpenTracing (http://opentracing.io) Tracer.
-It is currently using Zipkin-compatible data model and can be directly
-itegrated with Zipkin backend (http://zipkin.io).
 
 For integration instructions please refer to the README:
 
diff --git a/vendor/github.com/uber/jaeger-client-go/span_allocator.go b/vendor/github.com/uber/jaeger-client-go/span_allocator.go
index 6fe0cd0..fba1e43 100644
--- a/vendor/github.com/uber/jaeger-client-go/span_allocator.go
+++ b/vendor/github.com/uber/jaeger-client-go/span_allocator.go
@@ -16,7 +16,7 @@
 
 import "sync"
 
-// SpanAllocator abstraction of managign span allocations
+// SpanAllocator abstraction of managing span allocations
 type SpanAllocator interface {
 	Get() *Span
 	Put(*Span)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go b/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go
index ac9bd48..ca25568 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go
@@ -23,7 +23,7 @@
 	"context"
 )
 
-// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
+// See https://pkg.go.dev/context#WithValue on why do we need the unexported typedefs.
 type (
 	headerKey     string
 	headerKeyList int
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go b/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go
index d884c6a..02f0613 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go
@@ -23,7 +23,7 @@
 	"context"
 )
 
-// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
+// See https://pkg.go.dev/context#WithValue on why do we need the unexported typedefs.
 type responseHelperKey struct{}
 
 // TResponseHelper defines a object with a set of helper functions that can be
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer_options.go b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
index f0734b7..16b4606 100644
--- a/vendor/github.com/uber/jaeger-client-go/tracer_options.go
+++ b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
@@ -27,27 +27,30 @@
 // TracerOption is a function that sets some option on the tracer
 type TracerOption func(tracer *Tracer)
 
-// TracerOptions is a factory for all available TracerOption's
-var TracerOptions tracerOptions
+// TracerOptions is a factory for all available TracerOption's.
+var TracerOptions TracerOptionsFactory
 
-type tracerOptions struct{}
+// TracerOptionsFactory is a struct that defines functions for all available TracerOption's.
+type TracerOptionsFactory struct{}
 
 // Metrics creates a TracerOption that initializes Metrics on the tracer,
 // which is used to emit statistics.
-func (tracerOptions) Metrics(m *Metrics) TracerOption {
+func (TracerOptionsFactory) Metrics(m *Metrics) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.metrics = *m
 	}
 }
 
 // Logger creates a TracerOption that gives the tracer a Logger.
-func (tracerOptions) Logger(logger Logger) TracerOption {
+func (TracerOptionsFactory) Logger(logger Logger) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.logger = log.DebugLogAdapter(logger)
 	}
 }
 
-func (tracerOptions) CustomHeaderKeys(headerKeys *HeadersConfig) TracerOption {
+// CustomHeaderKeys allows to override default HTTP header keys used to propagate
+// tracing context.
+func (TracerOptionsFactory) CustomHeaderKeys(headerKeys *HeadersConfig) TracerOption {
 	return func(tracer *Tracer) {
 		if headerKeys == nil {
 			return
@@ -62,7 +65,7 @@
 
 // TimeNow creates a TracerOption that gives the tracer a function
 // used to generate timestamps for spans.
-func (tracerOptions) TimeNow(timeNow func() time.Time) TracerOption {
+func (TracerOptionsFactory) TimeNow(timeNow func() time.Time) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.timeNow = timeNow
 	}
@@ -70,7 +73,7 @@
 
 // RandomNumber creates a TracerOption that gives the tracer
 // a thread-safe random number generator function for generating trace IDs.
-func (tracerOptions) RandomNumber(randomNumber func() uint64) TracerOption {
+func (TracerOptionsFactory) RandomNumber(randomNumber func() uint64) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.randomNumber = randomNumber
 	}
@@ -80,7 +83,7 @@
 // an object pool to minimize span allocations.
 // This should be used with care, only if the service is not running any async tasks
 // that can access parent spans after those spans have been finished.
-func (tracerOptions) PoolSpans(poolSpans bool) TracerOption {
+func (TracerOptionsFactory) PoolSpans(poolSpans bool) TracerOption {
 	return func(tracer *Tracer) {
 		if poolSpans {
 			tracer.spanAllocator = newSyncPollSpanAllocator()
@@ -90,56 +93,67 @@
 	}
 }
 
-// Deprecated: HostIPv4 creates a TracerOption that identifies the current service/process.
+// HostIPv4 creates a TracerOption that identifies the current service/process.
 // If not set, the factory method will obtain the current IP address.
 // The TracerOption is deprecated; the tracer will attempt to automatically detect the IP.
-func (tracerOptions) HostIPv4(hostIPv4 uint32) TracerOption {
+//
+// Deprecated.
+func (TracerOptionsFactory) HostIPv4(hostIPv4 uint32) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.hostIPv4 = hostIPv4
 	}
 }
 
-func (tracerOptions) Injector(format interface{}, injector Injector) TracerOption {
+// Injector registers a Injector for given format.
+func (TracerOptionsFactory) Injector(format interface{}, injector Injector) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.injectors[format] = injector
 	}
 }
 
-func (tracerOptions) Extractor(format interface{}, extractor Extractor) TracerOption {
+// Extractor registers an Extractor for given format.
+func (TracerOptionsFactory) Extractor(format interface{}, extractor Extractor) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.extractors[format] = extractor
 	}
 }
 
-func (t tracerOptions) Observer(observer Observer) TracerOption {
+// Observer registers an Observer.
+func (t TracerOptionsFactory) Observer(observer Observer) TracerOption {
 	return t.ContribObserver(&oldObserver{obs: observer})
 }
 
-func (tracerOptions) ContribObserver(observer ContribObserver) TracerOption {
+// ContribObserver registers a ContribObserver.
+func (TracerOptionsFactory) ContribObserver(observer ContribObserver) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.observer.append(observer)
 	}
 }
 
-func (tracerOptions) Gen128Bit(gen128Bit bool) TracerOption {
+// Gen128Bit enables generation of 128bit trace IDs.
+func (TracerOptionsFactory) Gen128Bit(gen128Bit bool) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.options.gen128Bit = gen128Bit
 	}
 }
 
-func (tracerOptions) NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) TracerOption {
+// NoDebugFlagOnForcedSampling turns off setting the debug flag in the trace context
+// when the trace is force-started via sampling=1 span tag.
+func (TracerOptionsFactory) NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.options.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling
 	}
 }
 
-func (tracerOptions) HighTraceIDGenerator(highTraceIDGenerator func() uint64) TracerOption {
+// HighTraceIDGenerator allows to override define ID generator.
+func (TracerOptionsFactory) HighTraceIDGenerator(highTraceIDGenerator func() uint64) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.options.highTraceIDGenerator = highTraceIDGenerator
 	}
 }
 
-func (tracerOptions) MaxTagValueLength(maxTagValueLength int) TracerOption {
+// MaxTagValueLength sets the limit on the max length of tag values.
+func (TracerOptionsFactory) MaxTagValueLength(maxTagValueLength int) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.options.maxTagValueLength = maxTagValueLength
 	}
@@ -151,31 +165,37 @@
 //
 // About half of the MaxLogsPerSpan logs kept are the oldest logs, and about
 // half are the newest logs.
-func (tracerOptions) MaxLogsPerSpan(maxLogsPerSpan int) TracerOption {
+func (TracerOptionsFactory) MaxLogsPerSpan(maxLogsPerSpan int) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.options.maxLogsPerSpan = maxLogsPerSpan
 	}
 }
 
-func (tracerOptions) ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) TracerOption {
+// ZipkinSharedRPCSpan enables a mode where server-side span shares the span ID
+// from the client span from the incoming request, for compatibility with Zipkin's
+// "one span per RPC" model.
+func (TracerOptionsFactory) ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.options.zipkinSharedRPCSpan = zipkinSharedRPCSpan
 	}
 }
 
-func (tracerOptions) Tag(key string, value interface{}) TracerOption {
+// Tag adds a tracer-level tag that will be added to all spans.
+func (TracerOptionsFactory) Tag(key string, value interface{}) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.tags = append(tracer.tags, Tag{key: key, value: value})
 	}
 }
 
-func (tracerOptions) BaggageRestrictionManager(mgr baggage.RestrictionManager) TracerOption {
+// BaggageRestrictionManager registers BaggageRestrictionManager.
+func (TracerOptionsFactory) BaggageRestrictionManager(mgr baggage.RestrictionManager) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.baggageRestrictionManager = mgr
 	}
 }
 
-func (tracerOptions) DebugThrottler(throttler throttler.Throttler) TracerOption {
+// DebugThrottler registers a Throttler for debug spans.
+func (TracerOptionsFactory) DebugThrottler(throttler throttler.Throttler) TracerOption {
 	return func(tracer *Tracer) {
 		tracer.debugThrottler = throttler
 	}
diff --git a/vendor/go.etcd.io/etcd/NOTICE b/vendor/go.etcd.io/etcd/NOTICE
deleted file mode 100644
index b39ddfa..0000000
--- a/vendor/go.etcd.io/etcd/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-CoreOS Project
-Copyright 2014 CoreOS, Inc
-
-This product includes software developed at CoreOS, Inc.
-(http://www.coreos.com/).
diff --git a/vendor/go.etcd.io/etcd/LICENSE b/vendor/go.etcd.io/etcd/api/v3/LICENSE
similarity index 100%
rename from vendor/go.etcd.io/etcd/LICENSE
rename to vendor/go.etcd.io/etcd/api/v3/LICENSE
diff --git a/vendor/go.etcd.io/etcd/api/v3/authpb/auth.pb.go b/vendor/go.etcd.io/etcd/api/v3/authpb/auth.pb.go
new file mode 100644
index 0000000..37374c5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/authpb/auth.pb.go
@@ -0,0 +1,1159 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: auth.proto
+
+package authpb
+
+import (
+	fmt "fmt"
+	io "io"
+	math "math"
+	math_bits "math/bits"
+
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type Permission_Type int32
+
+const (
+	READ      Permission_Type = 0
+	WRITE     Permission_Type = 1
+	READWRITE Permission_Type = 2
+)
+
+var Permission_Type_name = map[int32]string{
+	0: "READ",
+	1: "WRITE",
+	2: "READWRITE",
+}
+
+var Permission_Type_value = map[string]int32{
+	"READ":      0,
+	"WRITE":     1,
+	"READWRITE": 2,
+}
+
+func (x Permission_Type) String() string {
+	return proto.EnumName(Permission_Type_name, int32(x))
+}
+
+func (Permission_Type) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_8bbd6f3875b0e874, []int{2, 0}
+}
+
+type UserAddOptions struct {
+	NoPassword           bool     `protobuf:"varint,1,opt,name=no_password,json=noPassword,proto3" json:"no_password,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *UserAddOptions) Reset()         { *m = UserAddOptions{} }
+func (m *UserAddOptions) String() string { return proto.CompactTextString(m) }
+func (*UserAddOptions) ProtoMessage()    {}
+func (*UserAddOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8bbd6f3875b0e874, []int{0}
+}
+func (m *UserAddOptions) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *UserAddOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_UserAddOptions.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *UserAddOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UserAddOptions.Merge(m, src)
+}
+func (m *UserAddOptions) XXX_Size() int {
+	return m.Size()
+}
+func (m *UserAddOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_UserAddOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UserAddOptions proto.InternalMessageInfo
+
+// User is a single entry in the bucket authUsers
+type User struct {
+	Name                 []byte          `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Password             []byte          `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+	Roles                []string        `protobuf:"bytes,3,rep,name=roles,proto3" json:"roles,omitempty"`
+	Options              *UserAddOptions `protobuf:"bytes,4,opt,name=options,proto3" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *User) Reset()         { *m = User{} }
+func (m *User) String() string { return proto.CompactTextString(m) }
+func (*User) ProtoMessage()    {}
+func (*User) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8bbd6f3875b0e874, []int{1}
+}
+func (m *User) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_User.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *User) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_User.Merge(m, src)
+}
+func (m *User) XXX_Size() int {
+	return m.Size()
+}
+func (m *User) XXX_DiscardUnknown() {
+	xxx_messageInfo_User.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_User proto.InternalMessageInfo
+
+// Permission is a single entity
+type Permission struct {
+	PermType             Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"`
+	Key                  []byte          `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+	RangeEnd             []byte          `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *Permission) Reset()         { *m = Permission{} }
+func (m *Permission) String() string { return proto.CompactTextString(m) }
+func (*Permission) ProtoMessage()    {}
+func (*Permission) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8bbd6f3875b0e874, []int{2}
+}
+func (m *Permission) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Permission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Permission.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Permission) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Permission.Merge(m, src)
+}
+func (m *Permission) XXX_Size() int {
+	return m.Size()
+}
+func (m *Permission) XXX_DiscardUnknown() {
+	xxx_messageInfo_Permission.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Permission proto.InternalMessageInfo
+
+// Role is a single entry in the bucket authRoles
+type Role struct {
+	Name                 []byte        `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	KeyPermission        []*Permission `protobuf:"bytes,2,rep,name=keyPermission,proto3" json:"keyPermission,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *Role) Reset()         { *m = Role{} }
+func (m *Role) String() string { return proto.CompactTextString(m) }
+func (*Role) ProtoMessage()    {}
+func (*Role) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8bbd6f3875b0e874, []int{3}
+}
+func (m *Role) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Role.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Role) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Role.Merge(m, src)
+}
+func (m *Role) XXX_Size() int {
+	return m.Size()
+}
+func (m *Role) XXX_DiscardUnknown() {
+	xxx_messageInfo_Role.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Role proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
+	proto.RegisterType((*UserAddOptions)(nil), "authpb.UserAddOptions")
+	proto.RegisterType((*User)(nil), "authpb.User")
+	proto.RegisterType((*Permission)(nil), "authpb.Permission")
+	proto.RegisterType((*Role)(nil), "authpb.Role")
+}
+
+func init() { proto.RegisterFile("auth.proto", fileDescriptor_8bbd6f3875b0e874) }
+
+var fileDescriptor_8bbd6f3875b0e874 = []byte{
+	// 359 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4e, 0xc2, 0x40,
+	0x10, 0xc6, 0xbb, 0xb4, 0x60, 0x3b, 0x08, 0x21, 0x1b, 0xa2, 0x0d, 0xc6, 0xda, 0xf4, 0xd4, 0x78,
+	0x68, 0x15, 0x0e, 0x7a, 0xc5, 0xc8, 0xc1, 0x93, 0x64, 0x83, 0x31, 0xf1, 0x42, 0x8a, 0xdd, 0xd4,
+	0x06, 0xd8, 0x6d, 0xda, 0xaa, 0xe1, 0xe2, 0x73, 0x78, 0xf0, 0x81, 0x38, 0xf2, 0x08, 0x82, 0x2f,
+	0x62, 0xba, 0xcb, 0x9f, 0x10, 0x3d, 0xed, 0x37, 0xdf, 0x7c, 0x33, 0xfb, 0xcb, 0x2e, 0x40, 0xf0,
+	0x9a, 0xbf, 0x78, 0x49, 0xca, 0x73, 0x8e, 0x2b, 0x85, 0x4e, 0x46, 0xad, 0x66, 0xc4, 0x23, 0x2e,
+	0x2c, 0xbf, 0x50, 0xb2, 0xeb, 0x5c, 0x42, 0xfd, 0x21, 0xa3, 0x69, 0x37, 0x0c, 0xef, 0x93, 0x3c,
+	0xe6, 0x2c, 0xc3, 0x67, 0x50, 0x65, 0x7c, 0x98, 0x04, 0x59, 0xf6, 0xce, 0xd3, 0xd0, 0x44, 0x36,
+	0x72, 0x75, 0x02, 0x8c, 0xf7, 0xd7, 0x8e, 0xf3, 0x01, 0x5a, 0x31, 0x82, 0x31, 0x68, 0x2c, 0x98,
+	0x52, 0x91, 0x38, 0x24, 0x42, 0xe3, 0x16, 0xe8, 0xdb, 0xc9, 0x92, 0xf0, 0xb7, 0x35, 0x6e, 0x42,
+	0x39, 0xe5, 0x13, 0x9a, 0x99, 0xaa, 0xad, 0xba, 0x06, 0x91, 0x05, 0xbe, 0x80, 0x03, 0x2e, 0x6f,
+	0x36, 0x35, 0x1b, 0xb9, 0xd5, 0xf6, 0x91, 0x27, 0x81, 0xbd, 0x7d, 0x2e, 0xb2, 0x89, 0x39, 0x5f,
+	0x08, 0xa0, 0x4f, 0xd3, 0x69, 0x9c, 0x65, 0x31, 0x67, 0xb8, 0x03, 0x7a, 0x42, 0xd3, 0xe9, 0x60,
+	0x96, 0x48, 0x94, 0x7a, 0xfb, 0x78, 0xb3, 0x61, 0x97, 0xf2, 0x8a, 0x36, 0xd9, 0x06, 0x71, 0x03,
+	0xd4, 0x31, 0x9d, 0xad, 0x11, 0x0b, 0x89, 0x4f, 0xc0, 0x48, 0x03, 0x16, 0xd1, 0x21, 0x65, 0xa1,
+	0xa9, 0x4a, 0x74, 0x61, 0xf4, 0x58, 0xe8, 0x9c, 0x83, 0x26, 0xc6, 0x74, 0xd0, 0x48, 0xaf, 0x7b,
+	0xdb, 0x50, 0xb0, 0x01, 0xe5, 0x47, 0x72, 0x37, 0xe8, 0x35, 0x10, 0xae, 0x81, 0x51, 0x98, 0xb2,
+	0x2c, 0x39, 0x03, 0xd0, 0x08, 0x9f, 0xd0, 0x7f, 0x9f, 0xe7, 0x1a, 0x6a, 0x63, 0x3a, 0xdb, 0x61,
+	0x99, 0x25, 0x5b, 0x75, 0xab, 0x6d, 0xfc, 0x17, 0x98, 0xec, 0x07, 0x6f, 0xae, 0xe6, 0x4b, 0x4b,
+	0x59, 0x2c, 0x2d, 0x65, 0xbe, 0xb2, 0xd0, 0x62, 0x65, 0xa1, 0xef, 0x95, 0x85, 0x3e, 0x7f, 0x2c,
+	0xe5, 0xe9, 0x34, 0xe2, 0x1e, 0xcd, 0x9f, 0x43, 0x2f, 0xe6, 0x7e, 0x71, 0xfa, 0x41, 0x12, 0xfb,
+	0x6f, 0x1d, 0x5f, 0xae, 0x1c, 0x55, 0xc4, 0x3f, 0x77, 0x7e, 0x03, 0x00, 0x00, 0xff, 0xff, 0x61,
+	0x5a, 0xfe, 0x48, 0x13, 0x02, 0x00, 0x00,
+}
+
+func (m *UserAddOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *UserAddOptions) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UserAddOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.NoPassword {
+		i--
+		if m.NoPassword {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *User) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *User) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *User) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Options != nil {
+		{
+			size, err := m.Options.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintAuth(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	if len(m.Roles) > 0 {
+		for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Roles[iNdEx])
+			copy(dAtA[i:], m.Roles[iNdEx])
+			i = encodeVarintAuth(dAtA, i, uint64(len(m.Roles[iNdEx])))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Password) > 0 {
+		i -= len(m.Password)
+		copy(dAtA[i:], m.Password)
+		i = encodeVarintAuth(dAtA, i, uint64(len(m.Password)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Permission) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Permission) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Permission) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.RangeEnd) > 0 {
+		i -= len(m.RangeEnd)
+		copy(dAtA[i:], m.RangeEnd)
+		i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintAuth(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.PermType != 0 {
+		i = encodeVarintAuth(dAtA, i, uint64(m.PermType))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Role) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Role) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.KeyPermission) > 0 {
+		for iNdEx := len(m.KeyPermission) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.KeyPermission[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintAuth(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
+	offset -= sovAuth(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *UserAddOptions) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.NoPassword {
+		n += 2
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *User) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovAuth(uint64(l))
+	}
+	l = len(m.Password)
+	if l > 0 {
+		n += 1 + l + sovAuth(uint64(l))
+	}
+	if len(m.Roles) > 0 {
+		for _, s := range m.Roles {
+			l = len(s)
+			n += 1 + l + sovAuth(uint64(l))
+		}
+	}
+	if m.Options != nil {
+		l = m.Options.Size()
+		n += 1 + l + sovAuth(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Permission) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.PermType != 0 {
+		n += 1 + sovAuth(uint64(m.PermType))
+	}
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovAuth(uint64(l))
+	}
+	l = len(m.RangeEnd)
+	if l > 0 {
+		n += 1 + l + sovAuth(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Role) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovAuth(uint64(l))
+	}
+	if len(m.KeyPermission) > 0 {
+		for _, e := range m.KeyPermission {
+			l = e.Size()
+			n += 1 + l + sovAuth(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovAuth(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozAuth(x uint64) (n int) {
+	return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *UserAddOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowAuth
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: UserAddOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: UserAddOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NoPassword", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.NoPassword = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipAuth(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *User) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowAuth
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: User: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+			if m.Name == nil {
+				m.Name = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...)
+			if m.Password == nil {
+				m.Password = []byte{}
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Options == nil {
+				m.Options = &UserAddOptions{}
+			}
+			if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipAuth(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Permission) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowAuth
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Permission: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType)
+			}
+			m.PermType = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.PermType |= Permission_Type(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+			if m.RangeEnd == nil {
+				m.RangeEnd = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipAuth(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Role) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowAuth
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Role: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+			if m.Name == nil {
+				m.Name = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.KeyPermission = append(m.KeyPermission, &Permission{})
+			if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipAuth(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipAuth(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	depth := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowAuth
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+		case 1:
+			iNdEx += 8
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthAuth
+			}
+			iNdEx += length
+		case 3:
+			depth++
+		case 4:
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupAuth
+			}
+			depth--
+		case 5:
+			iNdEx += 4
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthAuth
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
+	}
+	return 0, io.ErrUnexpectedEOF
+}
+
+var (
+	ErrInvalidLengthAuth        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowAuth          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupAuth = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/go.etcd.io/etcd/api/v3/authpb/auth.proto b/vendor/go.etcd.io/etcd/api/v3/authpb/auth.proto
new file mode 100644
index 0000000..5a7856b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/authpb/auth.proto
@@ -0,0 +1,44 @@
+syntax = "proto3";
+package authpb;
+
+import "gogoproto/gogo.proto";
+
+option go_package = "go.etcd.io/etcd/api/v3/authpb";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+message UserAddOptions {
+  bool no_password = 1;
+};
+
+// User is a single entry in the bucket authUsers
+message User {
+  bytes name = 1;
+  bytes password = 2;
+  repeated string roles = 3;
+  UserAddOptions options = 4;
+}
+
+// Permission is a single entity
+message Permission {
+  enum Type {
+    READ = 0;
+    WRITE = 1;
+    READWRITE = 2;
+  }
+  Type permType = 1;
+
+  bytes key = 2;
+  bytes range_end = 3;
+}
+
+// Role is a single entry in the bucket authRoles
+message Role {
+  bytes name = 1;
+
+  repeated Permission keyPermission = 2;
+}
diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.pb.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.pb.go
new file mode 100644
index 0000000..eaefa2d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.pb.go
@@ -0,0 +1,1004 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: etcdserver.proto
+
+package etcdserverpb
+
+import (
+	fmt "fmt"
+	io "io"
+	math "math"
+	math_bits "math/bits"
+
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type Request struct {
+	ID                   uint64   `protobuf:"varint,1,opt,name=ID" json:"ID"`
+	Method               string   `protobuf:"bytes,2,opt,name=Method" json:"Method"`
+	Path                 string   `protobuf:"bytes,3,opt,name=Path" json:"Path"`
+	Val                  string   `protobuf:"bytes,4,opt,name=Val" json:"Val"`
+	Dir                  bool     `protobuf:"varint,5,opt,name=Dir" json:"Dir"`
+	PrevValue            string   `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"`
+	PrevIndex            uint64   `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"`
+	PrevExist            *bool    `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"`
+	Expiration           int64    `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"`
+	Wait                 bool     `protobuf:"varint,10,opt,name=Wait" json:"Wait"`
+	Since                uint64   `protobuf:"varint,11,opt,name=Since" json:"Since"`
+	Recursive            bool     `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"`
+	Sorted               bool     `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"`
+	Quorum               bool     `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"`
+	Time                 int64    `protobuf:"varint,15,opt,name=Time" json:"Time"`
+	Stream               bool     `protobuf:"varint,16,opt,name=Stream" json:"Stream"`
+	Refresh              *bool    `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Request) Reset()         { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage()    {}
+func (*Request) Descriptor() ([]byte, []int) {
+	return fileDescriptor_09ffbeb3bebbce7e, []int{0}
+}
+func (m *Request) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Request.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Request) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Request.Merge(m, src)
+}
+func (m *Request) XXX_Size() int {
+	return m.Size()
+}
+func (m *Request) XXX_DiscardUnknown() {
+	xxx_messageInfo_Request.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Request proto.InternalMessageInfo
+
+type Metadata struct {
+	NodeID               uint64   `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"`
+	ClusterID            uint64   `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Metadata) Reset()         { *m = Metadata{} }
+func (m *Metadata) String() string { return proto.CompactTextString(m) }
+func (*Metadata) ProtoMessage()    {}
+func (*Metadata) Descriptor() ([]byte, []int) {
+	return fileDescriptor_09ffbeb3bebbce7e, []int{1}
+}
+func (m *Metadata) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Metadata) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Metadata.Merge(m, src)
+}
+func (m *Metadata) XXX_Size() int {
+	return m.Size()
+}
+func (m *Metadata) XXX_DiscardUnknown() {
+	xxx_messageInfo_Metadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Metadata proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
+	proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata")
+}
+
+func init() { proto.RegisterFile("etcdserver.proto", fileDescriptor_09ffbeb3bebbce7e) }
+
+var fileDescriptor_09ffbeb3bebbce7e = []byte{
+	// 402 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0x41, 0xef, 0xd2, 0x30,
+	0x14, 0x00, 0x70, 0x0a, 0xfb, 0xff, 0x81, 0x8a, 0x8a, 0x0d, 0x31, 0x2f, 0xc4, 0xcc, 0x05, 0x3d,
+	0xec, 0xc4, 0x0e, 0x9e, 0xbc, 0xe2, 0x38, 0x2c, 0x11, 0x83, 0xc3, 0x60, 0xe2, 0xad, 0xb2, 0x27,
+	0x34, 0x01, 0x3a, 0xbb, 0x6e, 0xe1, 0x1b, 0xf8, 0x15, 0xfc, 0x48, 0x1c, 0xfd, 0x04, 0x46, 0xf1,
+	0x8b, 0x98, 0x8e, 0x8d, 0x55, 0x4f, 0x5b, 0x7e, 0xef, 0xf5, 0xf5, 0xb5, 0x7d, 0x74, 0x88, 0x7a,
+	0x93, 0x64, 0xa8, 0x0a, 0x54, 0xd3, 0x54, 0x49, 0x2d, 0xd9, 0xa0, 0x91, 0xf4, 0xf3, 0x78, 0xb4,
+	0x95, 0x5b, 0x59, 0x06, 0x02, 0xf3, 0x77, 0xcd, 0x99, 0x7c, 0x73, 0x68, 0x37, 0xc6, 0xaf, 0x39,
+	0x66, 0x9a, 0x8d, 0x68, 0x3b, 0x0a, 0x81, 0x78, 0xc4, 0x77, 0x66, 0xce, 0xf9, 0xe7, 0xf3, 0x56,
+	0xdc, 0x8e, 0x42, 0xf6, 0x8c, 0xde, 0x2f, 0x50, 0xef, 0x64, 0x02, 0x6d, 0x8f, 0xf8, 0xfd, 0x2a,
+	0x52, 0x19, 0x03, 0xea, 0x2c, 0xb9, 0xde, 0x41, 0xc7, 0x8a, 0x95, 0xc2, 0x9e, 0xd2, 0xce, 0x9a,
+	0xef, 0xc1, 0xb1, 0x02, 0x06, 0x8c, 0x87, 0x42, 0xc1, 0x9d, 0x47, 0xfc, 0x5e, 0xed, 0xa1, 0x50,
+	0x6c, 0x42, 0xfb, 0x4b, 0x85, 0xc5, 0x9a, 0xef, 0x73, 0x84, 0x7b, 0x6b, 0x55, 0xc3, 0x75, 0x4e,
+	0x74, 0x4c, 0xf0, 0x04, 0x5d, 0xab, 0xd1, 0x86, 0xeb, 0x9c, 0xf9, 0x49, 0x64, 0x1a, 0x7a, 0xb7,
+	0x5d, 0x48, 0xdc, 0x30, 0x7b, 0x49, 0xe9, 0xfc, 0x94, 0x0a, 0xc5, 0xb5, 0x90, 0x47, 0xe8, 0x7b,
+	0xc4, 0xef, 0x54, 0x85, 0x2c, 0x37, 0x67, 0xfb, 0xc8, 0x85, 0x06, 0x6a, 0xb5, 0x5a, 0x0a, 0x1b,
+	0xd3, 0xbb, 0x95, 0x38, 0x6e, 0x10, 0x1e, 0x58, 0x3d, 0x5c, 0xc9, 0xec, 0x1f, 0xe3, 0x26, 0x57,
+	0x99, 0x28, 0x10, 0x06, 0xd6, 0xd2, 0x86, 0xcd, 0x9d, 0xae, 0xa4, 0xd2, 0x98, 0xc0, 0x43, 0x2b,
+	0xa1, 0x32, 0x13, 0x7d, 0x9f, 0x4b, 0x95, 0x1f, 0xe0, 0x91, 0x1d, 0xbd, 0x9a, 0xe9, 0xea, 0x83,
+	0x38, 0x20, 0x3c, 0xb6, 0xba, 0x2e, 0xa5, 0xac, 0xaa, 0x15, 0xf2, 0x03, 0x0c, 0xff, 0xa9, 0x5a,
+	0x1a, 0x73, 0xcd, 0x43, 0x7f, 0x51, 0x98, 0xed, 0xe0, 0x89, 0x75, 0x2b, 0x35, 0x4e, 0xde, 0xd2,
+	0xde, 0x02, 0x35, 0x4f, 0xb8, 0xe6, 0xa6, 0xd2, 0x3b, 0x99, 0xe0, 0x7f, 0xd3, 0x50, 0x99, 0x39,
+	0xe1, 0x9b, 0x7d, 0x9e, 0x69, 0x54, 0x51, 0x58, 0x0e, 0xc5, 0xed, 0x15, 0x6e, 0x3c, 0x7b, 0x7d,
+	0xfe, 0xed, 0xb6, 0xce, 0x17, 0x97, 0xfc, 0xb8, 0xb8, 0xe4, 0xd7, 0xc5, 0x25, 0xdf, 0xff, 0xb8,
+	0xad, 0x4f, 0x2f, 0xb6, 0x72, 0x6a, 0x86, 0x72, 0x2a, 0x64, 0x60, 0xbe, 0x01, 0x4f, 0x45, 0x50,
+	0xbc, 0x0a, 0xec, 0x41, 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x79, 0xf9, 0xf5, 0xc9, 0x02,
+	0x00, 0x00,
+}
+
+func (m *Request) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Request) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Refresh != nil {
+		i--
+		if *m.Refresh {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0x88
+	}
+	i--
+	if m.Stream {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x1
+	i--
+	dAtA[i] = 0x80
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time))
+	i--
+	dAtA[i] = 0x78
+	i--
+	if m.Quorum {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x70
+	i--
+	if m.Sorted {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x68
+	i--
+	if m.Recursive {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x60
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since))
+	i--
+	dAtA[i] = 0x58
+	i--
+	if m.Wait {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x50
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration))
+	i--
+	dAtA[i] = 0x48
+	if m.PrevExist != nil {
+		i--
+		if *m.PrevExist {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x40
+	}
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex))
+	i--
+	dAtA[i] = 0x38
+	i -= len(m.PrevValue)
+	copy(dAtA[i:], m.PrevValue)
+	i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue)))
+	i--
+	dAtA[i] = 0x32
+	i--
+	if m.Dir {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x28
+	i -= len(m.Val)
+	copy(dAtA[i:], m.Val)
+	i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Path)
+	copy(dAtA[i:], m.Path)
+	i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Method)
+	copy(dAtA[i:], m.Method)
+	i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method)))
+	i--
+	dAtA[i] = 0x12
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *Metadata) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int {
+	offset -= sovEtcdserver(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *Request) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovEtcdserver(uint64(m.ID))
+	l = len(m.Method)
+	n += 1 + l + sovEtcdserver(uint64(l))
+	l = len(m.Path)
+	n += 1 + l + sovEtcdserver(uint64(l))
+	l = len(m.Val)
+	n += 1 + l + sovEtcdserver(uint64(l))
+	n += 2
+	l = len(m.PrevValue)
+	n += 1 + l + sovEtcdserver(uint64(l))
+	n += 1 + sovEtcdserver(uint64(m.PrevIndex))
+	if m.PrevExist != nil {
+		n += 2
+	}
+	n += 1 + sovEtcdserver(uint64(m.Expiration))
+	n += 2
+	n += 1 + sovEtcdserver(uint64(m.Since))
+	n += 2
+	n += 2
+	n += 2
+	n += 1 + sovEtcdserver(uint64(m.Time))
+	n += 3
+	if m.Refresh != nil {
+		n += 3
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Metadata) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovEtcdserver(uint64(m.NodeID))
+	n += 1 + sovEtcdserver(uint64(m.ClusterID))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovEtcdserver(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozEtcdserver(x uint64) (n int) {
+	return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Request) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowEtcdserver
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Request: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Method = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Path = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Val = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Dir = bool(v != 0)
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevValue", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PrevValue = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevIndex", wireType)
+			}
+			m.PrevIndex = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.PrevIndex |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevExist", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.PrevExist = &b
+		case 9:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType)
+			}
+			m.Expiration = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Expiration |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 10:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Wait = bool(v != 0)
+		case 11:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType)
+			}
+			m.Since = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Since |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 12:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Recursive = bool(v != 0)
+		case 13:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Sorted", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Sorted = bool(v != 0)
+		case 14:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Quorum", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Quorum = bool(v != 0)
+		case 15:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
+			}
+			m.Time = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Time |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 16:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Stream = bool(v != 0)
+		case 17:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Refresh = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipEtcdserver(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Metadata) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowEtcdserver
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Metadata: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+			}
+			m.NodeID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.NodeID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType)
+			}
+			m.ClusterID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ClusterID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipEtcdserver(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipEtcdserver(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	depth := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowEtcdserver
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+		case 1:
+			iNdEx += 8
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthEtcdserver
+			}
+			iNdEx += length
+		case 3:
+			depth++
+		case 4:
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupEtcdserver
+			}
+			depth--
+		case 5:
+			iNdEx += 4
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthEtcdserver
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
+	}
+	return 0, io.ErrUnexpectedEOF
+}
+
+var (
+	ErrInvalidLengthEtcdserver        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowEtcdserver          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupEtcdserver = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.proto b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.proto
new file mode 100644
index 0000000..ff639b9
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.proto
@@ -0,0 +1,36 @@
+syntax = "proto2";
+package etcdserverpb;
+
+import "gogoproto/gogo.proto";
+
+option go_package = "go.etcd.io/etcd/api/v3/etcdserverpb";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message Request {
+	optional uint64 ID         =  1 [(gogoproto.nullable) = false];
+	optional string Method     =  2 [(gogoproto.nullable) = false];
+	optional string Path       =  3 [(gogoproto.nullable) = false];
+	optional string Val        =  4 [(gogoproto.nullable) = false];
+	optional bool   Dir        =  5 [(gogoproto.nullable) = false];
+	optional string PrevValue  =  6 [(gogoproto.nullable) = false];
+	optional uint64 PrevIndex  =  7 [(gogoproto.nullable) = false];
+	optional bool   PrevExist  =  8 [(gogoproto.nullable) = true];
+	optional int64  Expiration =  9 [(gogoproto.nullable) = false];
+	optional bool   Wait       = 10 [(gogoproto.nullable) = false];
+	optional uint64 Since      = 11 [(gogoproto.nullable) = false];
+	optional bool   Recursive  = 12 [(gogoproto.nullable) = false];
+	optional bool   Sorted     = 13 [(gogoproto.nullable) = false];
+	optional bool   Quorum     = 14 [(gogoproto.nullable) = false];
+	optional int64  Time       = 15 [(gogoproto.nullable) = false];
+	optional bool   Stream     = 16 [(gogoproto.nullable) = false];
+	optional bool   Refresh    = 17 [(gogoproto.nullable) = true];
+}
+
+message Metadata {
+	optional uint64 NodeID    = 1 [(gogoproto.nullable) = false];
+	optional uint64 ClusterID = 2 [(gogoproto.nullable) = false];
+}
diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.pb.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.pb.go
new file mode 100644
index 0000000..e1cd9eb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.pb.go
@@ -0,0 +1,2737 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: raft_internal.proto
+
+package etcdserverpb
+
+import (
+	fmt "fmt"
+	io "io"
+	math "math"
+	math_bits "math/bits"
+
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/golang/protobuf/proto"
+	membershippb "go.etcd.io/etcd/api/v3/membershippb"
+	_ "go.etcd.io/etcd/api/v3/versionpb"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type RequestHeader struct {
+	ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	// username is a username that is associated with an auth token of gRPC connection
+	Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"`
+	// auth_revision is a revision number of auth.authStore. It is not related to mvcc
+	AuthRevision         uint64   `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *RequestHeader) Reset()         { *m = RequestHeader{} }
+func (m *RequestHeader) String() string { return proto.CompactTextString(m) }
+func (*RequestHeader) ProtoMessage()    {}
+func (*RequestHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b4c9a9be0cfca103, []int{0}
+}
+func (m *RequestHeader) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_RequestHeader.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *RequestHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RequestHeader.Merge(m, src)
+}
+func (m *RequestHeader) XXX_Size() int {
+	return m.Size()
+}
+func (m *RequestHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_RequestHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RequestHeader proto.InternalMessageInfo
+
+// An InternalRaftRequest is the union of all requests which can be
+// sent via raft.
+type InternalRaftRequest struct {
+	Header                   *RequestHeader                            `protobuf:"bytes,100,opt,name=header,proto3" json:"header,omitempty"`
+	ID                       uint64                                    `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	V2                       *Request                                  `protobuf:"bytes,2,opt,name=v2,proto3" json:"v2,omitempty"`
+	Range                    *RangeRequest                             `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"`
+	Put                      *PutRequest                               `protobuf:"bytes,4,opt,name=put,proto3" json:"put,omitempty"`
+	DeleteRange              *DeleteRangeRequest                       `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange,proto3" json:"delete_range,omitempty"`
+	Txn                      *TxnRequest                               `protobuf:"bytes,6,opt,name=txn,proto3" json:"txn,omitempty"`
+	Compaction               *CompactionRequest                        `protobuf:"bytes,7,opt,name=compaction,proto3" json:"compaction,omitempty"`
+	LeaseGrant               *LeaseGrantRequest                        `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant,proto3" json:"lease_grant,omitempty"`
+	LeaseRevoke              *LeaseRevokeRequest                       `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke,proto3" json:"lease_revoke,omitempty"`
+	Alarm                    *AlarmRequest                             `protobuf:"bytes,10,opt,name=alarm,proto3" json:"alarm,omitempty"`
+	LeaseCheckpoint          *LeaseCheckpointRequest                   `protobuf:"bytes,11,opt,name=lease_checkpoint,json=leaseCheckpoint,proto3" json:"lease_checkpoint,omitempty"`
+	AuthEnable               *AuthEnableRequest                        `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable,proto3" json:"auth_enable,omitempty"`
+	AuthDisable              *AuthDisableRequest                       `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable,proto3" json:"auth_disable,omitempty"`
+	AuthStatus               *AuthStatusRequest                        `protobuf:"bytes,1013,opt,name=auth_status,json=authStatus,proto3" json:"auth_status,omitempty"`
+	Authenticate             *InternalAuthenticateRequest              `protobuf:"bytes,1012,opt,name=authenticate,proto3" json:"authenticate,omitempty"`
+	AuthUserAdd              *AuthUserAddRequest                       `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd,proto3" json:"auth_user_add,omitempty"`
+	AuthUserDelete           *AuthUserDeleteRequest                    `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete,proto3" json:"auth_user_delete,omitempty"`
+	AuthUserGet              *AuthUserGetRequest                       `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet,proto3" json:"auth_user_get,omitempty"`
+	AuthUserChangePassword   *AuthUserChangePasswordRequest            `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword,proto3" json:"auth_user_change_password,omitempty"`
+	AuthUserGrantRole        *AuthUserGrantRoleRequest                 `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole,proto3" json:"auth_user_grant_role,omitempty"`
+	AuthUserRevokeRole       *AuthUserRevokeRoleRequest                `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole,proto3" json:"auth_user_revoke_role,omitempty"`
+	AuthUserList             *AuthUserListRequest                      `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList,proto3" json:"auth_user_list,omitempty"`
+	AuthRoleList             *AuthRoleListRequest                      `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList,proto3" json:"auth_role_list,omitempty"`
+	AuthRoleAdd              *AuthRoleAddRequest                       `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd,proto3" json:"auth_role_add,omitempty"`
+	AuthRoleDelete           *AuthRoleDeleteRequest                    `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete,proto3" json:"auth_role_delete,omitempty"`
+	AuthRoleGet              *AuthRoleGetRequest                       `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet,proto3" json:"auth_role_get,omitempty"`
+	AuthRoleGrantPermission  *AuthRoleGrantPermissionRequest           `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission,proto3" json:"auth_role_grant_permission,omitempty"`
+	AuthRoleRevokePermission *AuthRoleRevokePermissionRequest          `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission,proto3" json:"auth_role_revoke_permission,omitempty"`
+	ClusterVersionSet        *membershippb.ClusterVersionSetRequest    `protobuf:"bytes,1300,opt,name=cluster_version_set,json=clusterVersionSet,proto3" json:"cluster_version_set,omitempty"`
+	ClusterMemberAttrSet     *membershippb.ClusterMemberAttrSetRequest `protobuf:"bytes,1301,opt,name=cluster_member_attr_set,json=clusterMemberAttrSet,proto3" json:"cluster_member_attr_set,omitempty"`
+	DowngradeInfoSet         *membershippb.DowngradeInfoSetRequest     `protobuf:"bytes,1302,opt,name=downgrade_info_set,json=downgradeInfoSet,proto3" json:"downgrade_info_set,omitempty"`
+	DowngradeVersionTest     *DowngradeVersionTestRequest              `protobuf:"bytes,9900,opt,name=downgrade_version_test,json=downgradeVersionTest,proto3" json:"downgrade_version_test,omitempty"`
+	XXX_NoUnkeyedLiteral     struct{}                                  `json:"-"`
+	XXX_unrecognized         []byte                                    `json:"-"`
+	XXX_sizecache            int32                                     `json:"-"`
+}
+
+func (m *InternalRaftRequest) Reset()         { *m = InternalRaftRequest{} }
+func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) }
+func (*InternalRaftRequest) ProtoMessage()    {}
+func (*InternalRaftRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b4c9a9be0cfca103, []int{1}
+}
+func (m *InternalRaftRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *InternalRaftRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_InternalRaftRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *InternalRaftRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_InternalRaftRequest.Merge(m, src)
+}
+func (m *InternalRaftRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *InternalRaftRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_InternalRaftRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_InternalRaftRequest proto.InternalMessageInfo
+
+type EmptyResponse struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EmptyResponse) Reset()         { *m = EmptyResponse{} }
+func (m *EmptyResponse) String() string { return proto.CompactTextString(m) }
+func (*EmptyResponse) ProtoMessage()    {}
+func (*EmptyResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b4c9a9be0cfca103, []int{2}
+}
+func (m *EmptyResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *EmptyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_EmptyResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *EmptyResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EmptyResponse.Merge(m, src)
+}
+func (m *EmptyResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *EmptyResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_EmptyResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EmptyResponse proto.InternalMessageInfo
+
+// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
+// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
+// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
+type InternalAuthenticateRequest struct {
+	Name     string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+	// simple_token is generated in API layer (etcdserver/v3_server.go)
+	SimpleToken          string   `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *InternalAuthenticateRequest) Reset()         { *m = InternalAuthenticateRequest{} }
+func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) }
+func (*InternalAuthenticateRequest) ProtoMessage()    {}
+func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b4c9a9be0cfca103, []int{3}
+}
+func (m *InternalAuthenticateRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *InternalAuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_InternalAuthenticateRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *InternalAuthenticateRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_InternalAuthenticateRequest.Merge(m, src)
+}
+func (m *InternalAuthenticateRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *InternalAuthenticateRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_InternalAuthenticateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_InternalAuthenticateRequest proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader")
+	proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest")
+	proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse")
+	proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest")
+}
+
+func init() { proto.RegisterFile("raft_internal.proto", fileDescriptor_b4c9a9be0cfca103) }
+
+var fileDescriptor_b4c9a9be0cfca103 = []byte{
+	// 1101 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x56, 0xcb, 0x72, 0x1b, 0x45,
+	0x14, 0x8d, 0x6c, 0xc7, 0xb6, 0x5a, 0xb6, 0xe3, 0xb4, 0x9d, 0xa4, 0xb1, 0xab, 0x8c, 0xe3, 0x90,
+	0x60, 0x20, 0xc8, 0xc1, 0x06, 0xaa, 0x60, 0x03, 0x8a, 0xe5, 0x72, 0x4c, 0x25, 0x29, 0xd7, 0xc4,
+	0x50, 0x29, 0x28, 0x6a, 0x68, 0xcd, 0x5c, 0x4b, 0x13, 0x8f, 0x66, 0x86, 0xee, 0x96, 0xe2, 0x6c,
+	0x59, 0xb2, 0x06, 0x8a, 0x8f, 0x60, 0xc1, 0x2b, 0xff, 0x90, 0x05, 0x8f, 0x00, 0x3f, 0x00, 0x66,
+	0xc3, 0x1e, 0xd8, 0xa7, 0xfa, 0x31, 0x2f, 0xa9, 0xe5, 0xdd, 0xe8, 0xde, 0x73, 0xcf, 0x39, 0xdd,
+	0x7d, 0xbb, 0x75, 0xd1, 0x02, 0xa3, 0x87, 0xc2, 0x0d, 0x22, 0x01, 0x2c, 0xa2, 0x61, 0x3d, 0x61,
+	0xb1, 0x88, 0xf1, 0x0c, 0x08, 0xcf, 0xe7, 0xc0, 0xfa, 0xc0, 0x92, 0xd6, 0xd2, 0x62, 0x3b, 0x6e,
+	0xc7, 0x2a, 0xb1, 0x21, 0xbf, 0x34, 0x66, 0x69, 0x3e, 0xc7, 0x98, 0x48, 0x95, 0x25, 0x9e, 0xf9,
+	0x5c, 0x95, 0xc9, 0x0d, 0x9a, 0x04, 0x1b, 0x7d, 0x60, 0x3c, 0x88, 0xa3, 0xa4, 0x95, 0x7e, 0x19,
+	0xc4, 0xb5, 0x0c, 0xd1, 0x85, 0x6e, 0x0b, 0x18, 0xef, 0x04, 0x49, 0xd2, 0x2a, 0xfc, 0xd0, 0xb8,
+	0x35, 0x86, 0x66, 0x1d, 0xf8, 0xb4, 0x07, 0x5c, 0xdc, 0x02, 0xea, 0x03, 0xc3, 0x73, 0x68, 0x6c,
+	0xaf, 0x49, 0x2a, 0xab, 0x95, 0xf5, 0x09, 0x67, 0x6c, 0xaf, 0x89, 0x97, 0xd0, 0x74, 0x8f, 0x4b,
+	0xf3, 0x5d, 0x20, 0x63, 0xab, 0x95, 0xf5, 0xaa, 0x93, 0xfd, 0xc6, 0xd7, 0xd1, 0x2c, 0xed, 0x89,
+	0x8e, 0xcb, 0xa0, 0x1f, 0x48, 0x6d, 0x32, 0x2e, 0xcb, 0x6e, 0x4e, 0x7d, 0xfe, 0x98, 0x8c, 0x6f,
+	0xd5, 0x5f, 0x73, 0x66, 0x64, 0xd6, 0x31, 0xc9, 0xb7, 0xa7, 0x3e, 0x53, 0xe1, 0x1b, 0x6b, 0x8f,
+	0x17, 0xd0, 0xc2, 0x9e, 0xd9, 0x11, 0x87, 0x1e, 0x0a, 0x63, 0x00, 0x6f, 0xa1, 0xc9, 0x8e, 0x32,
+	0x41, 0xfc, 0xd5, 0xca, 0x7a, 0x6d, 0x73, 0xb9, 0x5e, 0xdc, 0xa7, 0x7a, 0xc9, 0xa7, 0x63, 0xa0,
+	0x43, 0x7e, 0xaf, 0xa2, 0xb1, 0xfe, 0xa6, 0x72, 0x5a, 0xdb, 0xbc, 0x60, 0x25, 0x70, 0xc6, 0xfa,
+	0x9b, 0xf8, 0x06, 0x3a, 0xcb, 0x68, 0xd4, 0x06, 0x65, 0xb9, 0xb6, 0xb9, 0x34, 0x80, 0x94, 0xa9,
+	0x14, 0xae, 0x81, 0xf8, 0x65, 0x34, 0x9e, 0xf4, 0x04, 0x99, 0x50, 0x78, 0x52, 0xc6, 0xef, 0xf7,
+	0xd2, 0x45, 0x38, 0x12, 0x84, 0xb7, 0xd1, 0x8c, 0x0f, 0x21, 0x08, 0x70, 0xb5, 0xc8, 0x59, 0x55,
+	0xb4, 0x5a, 0x2e, 0x6a, 0x2a, 0x44, 0x49, 0xaa, 0xe6, 0xe7, 0x31, 0x29, 0x28, 0x8e, 0x23, 0x32,
+	0x69, 0x13, 0x3c, 0x38, 0x8e, 0x32, 0x41, 0x71, 0x1c, 0xe1, 0x77, 0x10, 0xf2, 0xe2, 0x6e, 0x42,
+	0x3d, 0x21, 0x8f, 0x61, 0x4a, 0x95, 0x3c, 0x5f, 0x2e, 0xd9, 0xce, 0xf2, 0x69, 0x65, 0xa1, 0x04,
+	0xbf, 0x8b, 0x6a, 0x21, 0x50, 0x0e, 0x6e, 0x9b, 0xd1, 0x48, 0x90, 0x69, 0x1b, 0xc3, 0x6d, 0x09,
+	0xd8, 0x95, 0xf9, 0x8c, 0x21, 0xcc, 0x42, 0x72, 0xcd, 0x9a, 0x81, 0x41, 0x3f, 0x3e, 0x02, 0x52,
+	0xb5, 0xad, 0x59, 0x51, 0x38, 0x0a, 0x90, 0xad, 0x39, 0xcc, 0x63, 0xf2, 0x58, 0x68, 0x48, 0x59,
+	0x97, 0x20, 0xdb, 0xb1, 0x34, 0x64, 0x2a, 0x3b, 0x16, 0x05, 0xc4, 0xf7, 0xd1, 0xbc, 0x96, 0xf5,
+	0x3a, 0xe0, 0x1d, 0x25, 0x71, 0x10, 0x09, 0x52, 0x53, 0xc5, 0x2f, 0x58, 0xa4, 0xb7, 0x33, 0x90,
+	0xa1, 0x49, 0x9b, 0xf5, 0x75, 0xe7, 0x5c, 0x58, 0x06, 0xe0, 0x06, 0xaa, 0xa9, 0xee, 0x86, 0x88,
+	0xb6, 0x42, 0x20, 0xff, 0x58, 0x77, 0xb5, 0xd1, 0x13, 0x9d, 0x1d, 0x05, 0xc8, 0xf6, 0x84, 0x66,
+	0x21, 0xdc, 0x44, 0xea, 0x0a, 0xb8, 0x7e, 0xc0, 0x15, 0xc7, 0xbf, 0x53, 0xb6, 0x4d, 0x91, 0x1c,
+	0x4d, 0x8d, 0xc8, 0x36, 0x85, 0xe6, 0x31, 0xfc, 0x9e, 0x31, 0xc2, 0x05, 0x15, 0x3d, 0x4e, 0xfe,
+	0x1f, 0x69, 0xe4, 0x9e, 0x02, 0x0c, 0xac, 0xec, 0x0d, 0xed, 0x48, 0xe7, 0xf0, 0x5d, 0xed, 0x08,
+	0x22, 0x11, 0x78, 0x54, 0x00, 0xf9, 0x4f, 0x93, 0xbd, 0x54, 0x26, 0x4b, 0x6f, 0x67, 0xa3, 0x00,
+	0x4d, 0xad, 0x95, 0xea, 0xf1, 0x8e, 0x79, 0x02, 0xe4, 0x9b, 0xe0, 0x52, 0xdf, 0x27, 0x3f, 0x4d,
+	0x8f, 0x5a, 0xe2, 0xfb, 0x1c, 0x58, 0xc3, 0xf7, 0x4b, 0x4b, 0x34, 0x31, 0x7c, 0x17, 0xcd, 0xe7,
+	0x34, 0xfa, 0x12, 0x90, 0x9f, 0x35, 0xd3, 0x15, 0x3b, 0x93, 0xb9, 0x3d, 0x86, 0x6c, 0x8e, 0x96,
+	0xc2, 0x65, 0x5b, 0x6d, 0x10, 0xe4, 0x97, 0x53, 0x6d, 0xed, 0x82, 0x18, 0xb2, 0xb5, 0x0b, 0x02,
+	0xb7, 0xd1, 0x73, 0x39, 0x8d, 0xd7, 0x91, 0xd7, 0xd2, 0x4d, 0x28, 0xe7, 0x0f, 0x63, 0xe6, 0x93,
+	0x5f, 0x35, 0xe5, 0x2b, 0x76, 0xca, 0x6d, 0x85, 0xde, 0x37, 0xe0, 0x94, 0xfd, 0x22, 0xb5, 0xa6,
+	0xf1, 0x7d, 0xb4, 0x58, 0xf0, 0x2b, 0xef, 0x93, 0xcb, 0xe2, 0x10, 0xc8, 0x53, 0xad, 0x71, 0x6d,
+	0x84, 0x6d, 0x75, 0x17, 0xe3, 0xbc, 0x6d, 0xce, 0xd3, 0xc1, 0x0c, 0xfe, 0x08, 0x5d, 0xc8, 0x99,
+	0xf5, 0xd5, 0xd4, 0xd4, 0xbf, 0x69, 0xea, 0x17, 0xed, 0xd4, 0xe6, 0x8e, 0x16, 0xb8, 0x31, 0x1d,
+	0x4a, 0xe1, 0x5b, 0x68, 0x2e, 0x27, 0x0f, 0x03, 0x2e, 0xc8, 0xef, 0x9a, 0xf5, 0xb2, 0x9d, 0xf5,
+	0x76, 0xc0, 0x45, 0xa9, 0x8f, 0xd2, 0x60, 0xc6, 0x24, 0xad, 0x69, 0xa6, 0x3f, 0x46, 0x32, 0x49,
+	0xe9, 0x21, 0xa6, 0x34, 0x98, 0x1d, 0xbd, 0x62, 0x92, 0x1d, 0xf9, 0x6d, 0x75, 0xd4, 0xd1, 0xcb,
+	0x9a, 0xc1, 0x8e, 0x34, 0xb1, 0xac, 0x23, 0x15, 0x8d, 0xe9, 0xc8, 0xef, 0xaa, 0xa3, 0x3a, 0x52,
+	0x56, 0x59, 0x3a, 0x32, 0x0f, 0x97, 0x6d, 0xc9, 0x8e, 0xfc, 0xfe, 0x54, 0x5b, 0x83, 0x1d, 0x69,
+	0x62, 0xf8, 0x01, 0x5a, 0x2a, 0xd0, 0xa8, 0x46, 0x49, 0x80, 0x75, 0x03, 0xae, 0xfe, 0x7f, 0x7f,
+	0xd0, 0x9c, 0xd7, 0x47, 0x70, 0x4a, 0xf8, 0x7e, 0x86, 0x4e, 0xf9, 0x2f, 0x51, 0x7b, 0x1e, 0x77,
+	0xd1, 0x72, 0xae, 0x65, 0x5a, 0xa7, 0x20, 0xf6, 0xa3, 0x16, 0x7b, 0xd5, 0x2e, 0xa6, 0xbb, 0x64,
+	0x58, 0x8d, 0xd0, 0x11, 0x00, 0xfc, 0x09, 0x5a, 0xf0, 0xc2, 0x1e, 0x17, 0xc0, 0x5c, 0x33, 0xcb,
+	0xb8, 0x1c, 0x04, 0xf9, 0x02, 0x99, 0x2b, 0x50, 0x1c, 0x64, 0xea, 0xdb, 0x1a, 0xf9, 0x81, 0x06,
+	0xde, 0x03, 0x31, 0xf4, 0xea, 0x9d, 0xf7, 0x06, 0x21, 0xf8, 0x01, 0xba, 0x94, 0x2a, 0x68, 0x32,
+	0x97, 0x0a, 0xc1, 0x94, 0xca, 0x97, 0xc8, 0xbc, 0x83, 0x36, 0x95, 0x3b, 0x2a, 0xd6, 0x10, 0x82,
+	0xd9, 0x84, 0x16, 0x3d, 0x0b, 0x0a, 0x7f, 0x8c, 0xb0, 0x1f, 0x3f, 0x8c, 0xda, 0x8c, 0xfa, 0xe0,
+	0x06, 0xd1, 0x61, 0xac, 0x64, 0xbe, 0xd2, 0x32, 0x57, 0xcb, 0x32, 0xcd, 0x14, 0xb8, 0x17, 0x1d,
+	0xc6, 0x36, 0x89, 0x79, 0x7f, 0x00, 0x81, 0x03, 0x74, 0x31, 0xa7, 0x4f, 0xb7, 0x4b, 0x00, 0x17,
+	0xe4, 0x9b, 0x3b, 0xb6, 0x17, 0x3d, 0x93, 0x30, 0xdb, 0x71, 0x00, 0x7c, 0x50, 0xe6, 0x4d, 0x67,
+	0xd1, 0xb7, 0xa0, 0xf2, 0xb9, 0xed, 0x1c, 0x9a, 0xdd, 0xe9, 0x26, 0xe2, 0x91, 0x03, 0x3c, 0x89,
+	0x23, 0x0e, 0x6b, 0x8f, 0xd0, 0xf2, 0x29, 0xff, 0x14, 0x18, 0xa3, 0x09, 0x35, 0x36, 0x56, 0xd4,
+	0xd8, 0xa8, 0xbe, 0xe5, 0x38, 0x99, 0x3d, 0xa0, 0x66, 0x9c, 0x4c, 0x7f, 0xe3, 0xcb, 0x68, 0x86,
+	0x07, 0xdd, 0x24, 0x04, 0x57, 0xc4, 0x47, 0xa0, 0xa7, 0xc9, 0xaa, 0x53, 0xd3, 0xb1, 0x03, 0x19,
+	0xca, 0xbc, 0xdc, 0x7c, 0xeb, 0xc9, 0x5f, 0x2b, 0x67, 0x9e, 0x9c, 0xac, 0x54, 0x9e, 0x9e, 0xac,
+	0x54, 0xfe, 0x3c, 0x59, 0xa9, 0x7c, 0xfd, 0xf7, 0xca, 0x99, 0x0f, 0xaf, 0xb4, 0x63, 0xb5, 0xec,
+	0x7a, 0x10, 0x6f, 0xe4, 0x23, 0xf2, 0xd6, 0x46, 0x71, 0x2b, 0x5a, 0x93, 0x6a, 0xf2, 0xdd, 0x7a,
+	0x16, 0x00, 0x00, 0xff, 0xff, 0xb2, 0xa0, 0x15, 0x1f, 0x9b, 0x0b, 0x00, 0x00,
+}
+
+func (m *RequestHeader) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RequestHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.AuthRevision != 0 {
+		i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision))
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.Username) > 0 {
+		i -= len(m.Username)
+		copy(dAtA[i:], m.Username)
+		i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.ID != 0 {
+		i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *InternalRaftRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.DowngradeVersionTest != nil {
+		{
+			size, err := m.DowngradeVersionTest.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4
+		i--
+		dAtA[i] = 0xea
+		i--
+		dAtA[i] = 0xe2
+	}
+	if m.DowngradeInfoSet != nil {
+		{
+			size, err := m.DowngradeInfoSet.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x51
+		i--
+		dAtA[i] = 0xb2
+	}
+	if m.ClusterMemberAttrSet != nil {
+		{
+			size, err := m.ClusterMemberAttrSet.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x51
+		i--
+		dAtA[i] = 0xaa
+	}
+	if m.ClusterVersionSet != nil {
+		{
+			size, err := m.ClusterVersionSet.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x51
+		i--
+		dAtA[i] = 0xa2
+	}
+	if m.AuthRoleRevokePermission != nil {
+		{
+			size, err := m.AuthRoleRevokePermission.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4b
+		i--
+		dAtA[i] = 0xa2
+	}
+	if m.AuthRoleGrantPermission != nil {
+		{
+			size, err := m.AuthRoleGrantPermission.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4b
+		i--
+		dAtA[i] = 0x9a
+	}
+	if m.AuthRoleGet != nil {
+		{
+			size, err := m.AuthRoleGet.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4b
+		i--
+		dAtA[i] = 0x92
+	}
+	if m.AuthRoleDelete != nil {
+		{
+			size, err := m.AuthRoleDelete.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4b
+		i--
+		dAtA[i] = 0x8a
+	}
+	if m.AuthRoleAdd != nil {
+		{
+			size, err := m.AuthRoleAdd.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4b
+		i--
+		dAtA[i] = 0x82
+	}
+	if m.AuthRoleList != nil {
+		{
+			size, err := m.AuthRoleList.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x45
+		i--
+		dAtA[i] = 0x9a
+	}
+	if m.AuthUserList != nil {
+		{
+			size, err := m.AuthUserList.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x45
+		i--
+		dAtA[i] = 0x92
+	}
+	if m.AuthUserRevokeRole != nil {
+		{
+			size, err := m.AuthUserRevokeRole.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x45
+		i--
+		dAtA[i] = 0x8a
+	}
+	if m.AuthUserGrantRole != nil {
+		{
+			size, err := m.AuthUserGrantRole.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x45
+		i--
+		dAtA[i] = 0x82
+	}
+	if m.AuthUserChangePassword != nil {
+		{
+			size, err := m.AuthUserChangePassword.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x44
+		i--
+		dAtA[i] = 0xfa
+	}
+	if m.AuthUserGet != nil {
+		{
+			size, err := m.AuthUserGet.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x44
+		i--
+		dAtA[i] = 0xf2
+	}
+	if m.AuthUserDelete != nil {
+		{
+			size, err := m.AuthUserDelete.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x44
+		i--
+		dAtA[i] = 0xea
+	}
+	if m.AuthUserAdd != nil {
+		{
+			size, err := m.AuthUserAdd.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x44
+		i--
+		dAtA[i] = 0xe2
+	}
+	if m.AuthStatus != nil {
+		{
+			size, err := m.AuthStatus.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3f
+		i--
+		dAtA[i] = 0xaa
+	}
+	if m.Authenticate != nil {
+		{
+			size, err := m.Authenticate.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3f
+		i--
+		dAtA[i] = 0xa2
+	}
+	if m.AuthDisable != nil {
+		{
+			size, err := m.AuthDisable.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3f
+		i--
+		dAtA[i] = 0x9a
+	}
+	if m.AuthEnable != nil {
+		{
+			size, err := m.AuthEnable.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3e
+		i--
+		dAtA[i] = 0xc2
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x6
+		i--
+		dAtA[i] = 0xa2
+	}
+	if m.LeaseCheckpoint != nil {
+		{
+			size, err := m.LeaseCheckpoint.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x5a
+	}
+	if m.Alarm != nil {
+		{
+			size, err := m.Alarm.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x52
+	}
+	if m.LeaseRevoke != nil {
+		{
+			size, err := m.LeaseRevoke.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4a
+	}
+	if m.LeaseGrant != nil {
+		{
+			size, err := m.LeaseGrant.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x42
+	}
+	if m.Compaction != nil {
+		{
+			size, err := m.Compaction.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3a
+	}
+	if m.Txn != nil {
+		{
+			size, err := m.Txn.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x32
+	}
+	if m.DeleteRange != nil {
+		{
+			size, err := m.DeleteRange.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2a
+	}
+	if m.Put != nil {
+		{
+			size, err := m.Put.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.Range != nil {
+		{
+			size, err := m.Range.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.V2 != nil {
+		{
+			size, err := m.V2.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.ID != 0 {
+		i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *EmptyResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EmptyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *InternalAuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.SimpleToken) > 0 {
+		i -= len(m.SimpleToken)
+		copy(dAtA[i:], m.SimpleToken)
+		i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if len(m.Password) > 0 {
+		i -= len(m.Password)
+		copy(dAtA[i:], m.Password)
+		i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int {
+	offset -= sovRaftInternal(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *RequestHeader) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRaftInternal(uint64(m.ID))
+	}
+	l = len(m.Username)
+	if l > 0 {
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRevision != 0 {
+		n += 1 + sovRaftInternal(uint64(m.AuthRevision))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *InternalRaftRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRaftInternal(uint64(m.ID))
+	}
+	if m.V2 != nil {
+		l = m.V2.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Range != nil {
+		l = m.Range.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Put != nil {
+		l = m.Put.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.DeleteRange != nil {
+		l = m.DeleteRange.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Txn != nil {
+		l = m.Txn.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Compaction != nil {
+		l = m.Compaction.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.LeaseGrant != nil {
+		l = m.LeaseGrant.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.LeaseRevoke != nil {
+		l = m.LeaseRevoke.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Alarm != nil {
+		l = m.Alarm.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.LeaseCheckpoint != nil {
+		l = m.LeaseCheckpoint.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthEnable != nil {
+		l = m.AuthEnable.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthDisable != nil {
+		l = m.AuthDisable.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Authenticate != nil {
+		l = m.Authenticate.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthStatus != nil {
+		l = m.AuthStatus.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserAdd != nil {
+		l = m.AuthUserAdd.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserDelete != nil {
+		l = m.AuthUserDelete.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserGet != nil {
+		l = m.AuthUserGet.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserChangePassword != nil {
+		l = m.AuthUserChangePassword.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserGrantRole != nil {
+		l = m.AuthUserGrantRole.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserRevokeRole != nil {
+		l = m.AuthUserRevokeRole.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserList != nil {
+		l = m.AuthUserList.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRoleList != nil {
+		l = m.AuthRoleList.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRoleAdd != nil {
+		l = m.AuthRoleAdd.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRoleDelete != nil {
+		l = m.AuthRoleDelete.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRoleGet != nil {
+		l = m.AuthRoleGet.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRoleGrantPermission != nil {
+		l = m.AuthRoleGrantPermission.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRoleRevokePermission != nil {
+		l = m.AuthRoleRevokePermission.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.ClusterVersionSet != nil {
+		l = m.ClusterVersionSet.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.ClusterMemberAttrSet != nil {
+		l = m.ClusterMemberAttrSet.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.DowngradeInfoSet != nil {
+		l = m.DowngradeInfoSet.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.DowngradeVersionTest != nil {
+		l = m.DowngradeVersionTest.Size()
+		n += 3 + l + sovRaftInternal(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *EmptyResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *InternalAuthenticateRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	l = len(m.Password)
+	if l > 0 {
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	l = len(m.SimpleToken)
+	if l > 0 {
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovRaftInternal(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozRaftInternal(x uint64) (n int) {
+	return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *RequestHeader) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaftInternal
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Username = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType)
+			}
+			m.AuthRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.AuthRevision |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaftInternal(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaftInternal
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field V2", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.V2 == nil {
+				m.V2 = &Request{}
+			}
+			if err := m.V2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Range == nil {
+				m.Range = &RangeRequest{}
+			}
+			if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Put == nil {
+				m.Put = &PutRequest{}
+			}
+			if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.DeleteRange == nil {
+				m.DeleteRange = &DeleteRangeRequest{}
+			}
+			if err := m.DeleteRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Txn == nil {
+				m.Txn = &TxnRequest{}
+			}
+			if err := m.Txn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Compaction", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Compaction == nil {
+				m.Compaction = &CompactionRequest{}
+			}
+			if err := m.Compaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LeaseGrant", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.LeaseGrant == nil {
+				m.LeaseGrant = &LeaseGrantRequest{}
+			}
+			if err := m.LeaseGrant.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LeaseRevoke", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.LeaseRevoke == nil {
+				m.LeaseRevoke = &LeaseRevokeRequest{}
+			}
+			if err := m.LeaseRevoke.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Alarm == nil {
+				m.Alarm = &AlarmRequest{}
+			}
+			if err := m.Alarm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LeaseCheckpoint", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.LeaseCheckpoint == nil {
+				m.LeaseCheckpoint = &LeaseCheckpointRequest{}
+			}
+			if err := m.LeaseCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 100:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &RequestHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1000:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthEnable", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthEnable == nil {
+				m.AuthEnable = &AuthEnableRequest{}
+			}
+			if err := m.AuthEnable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1011:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthDisable", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthDisable == nil {
+				m.AuthDisable = &AuthDisableRequest{}
+			}
+			if err := m.AuthDisable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1012:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Authenticate", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Authenticate == nil {
+				m.Authenticate = &InternalAuthenticateRequest{}
+			}
+			if err := m.Authenticate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1013:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthStatus", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthStatus == nil {
+				m.AuthStatus = &AuthStatusRequest{}
+			}
+			if err := m.AuthStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1100:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserAdd", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserAdd == nil {
+				m.AuthUserAdd = &AuthUserAddRequest{}
+			}
+			if err := m.AuthUserAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1101:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserDelete", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserDelete == nil {
+				m.AuthUserDelete = &AuthUserDeleteRequest{}
+			}
+			if err := m.AuthUserDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1102:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGet", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserGet == nil {
+				m.AuthUserGet = &AuthUserGetRequest{}
+			}
+			if err := m.AuthUserGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1103:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserChangePassword", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserChangePassword == nil {
+				m.AuthUserChangePassword = &AuthUserChangePasswordRequest{}
+			}
+			if err := m.AuthUserChangePassword.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1104:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGrantRole", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserGrantRole == nil {
+				m.AuthUserGrantRole = &AuthUserGrantRoleRequest{}
+			}
+			if err := m.AuthUserGrantRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1105:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserRevokeRole", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserRevokeRole == nil {
+				m.AuthUserRevokeRole = &AuthUserRevokeRoleRequest{}
+			}
+			if err := m.AuthUserRevokeRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1106:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserList", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserList == nil {
+				m.AuthUserList = &AuthUserListRequest{}
+			}
+			if err := m.AuthUserList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1107:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleList", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthRoleList == nil {
+				m.AuthRoleList = &AuthRoleListRequest{}
+			}
+			if err := m.AuthRoleList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1200:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleAdd", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthRoleAdd == nil {
+				m.AuthRoleAdd = &AuthRoleAddRequest{}
+			}
+			if err := m.AuthRoleAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1201:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleDelete", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthRoleDelete == nil {
+				m.AuthRoleDelete = &AuthRoleDeleteRequest{}
+			}
+			if err := m.AuthRoleDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1202:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGet", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthRoleGet == nil {
+				m.AuthRoleGet = &AuthRoleGetRequest{}
+			}
+			if err := m.AuthRoleGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1203:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGrantPermission", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthRoleGrantPermission == nil {
+				m.AuthRoleGrantPermission = &AuthRoleGrantPermissionRequest{}
+			}
+			if err := m.AuthRoleGrantPermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1204:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleRevokePermission", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthRoleRevokePermission == nil {
+				m.AuthRoleRevokePermission = &AuthRoleRevokePermissionRequest{}
+			}
+			if err := m.AuthRoleRevokePermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1300:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ClusterVersionSet", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ClusterVersionSet == nil {
+				m.ClusterVersionSet = &membershippb.ClusterVersionSetRequest{}
+			}
+			if err := m.ClusterVersionSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1301:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ClusterMemberAttrSet", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.ClusterMemberAttrSet == nil {
+				m.ClusterMemberAttrSet = &membershippb.ClusterMemberAttrSetRequest{}
+			}
+			if err := m.ClusterMemberAttrSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1302:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DowngradeInfoSet", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.DowngradeInfoSet == nil {
+				m.DowngradeInfoSet = &membershippb.DowngradeInfoSetRequest{}
+			}
+			if err := m.DowngradeInfoSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9900:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DowngradeVersionTest", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.DowngradeVersionTest == nil {
+				m.DowngradeVersionTest = &DowngradeVersionTestRequest{}
+			}
+			if err := m.DowngradeVersionTest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaftInternal(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *EmptyResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaftInternal
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: EmptyResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: EmptyResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaftInternal(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaftInternal
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: InternalAuthenticateRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: InternalAuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Password = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SimpleToken", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SimpleToken = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaftInternal(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipRaftInternal(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	depth := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowRaftInternal
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+		case 1:
+			iNdEx += 8
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthRaftInternal
+			}
+			iNdEx += length
+		case 3:
+			depth++
+		case 4:
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupRaftInternal
+			}
+			depth--
+		case 5:
+			iNdEx += 4
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthRaftInternal
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
+	}
+	return 0, io.ErrUnexpectedEOF
+}
+
+var (
+	ErrInvalidLengthRaftInternal        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowRaftInternal          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupRaftInternal = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.proto b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.proto
new file mode 100644
index 0000000..88b8ab5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.proto
@@ -0,0 +1,91 @@
+syntax = "proto3";
+package etcdserverpb;
+
+import "gogoproto/gogo.proto";
+import "etcdserver.proto";
+import "rpc.proto";
+import "etcd/api/versionpb/version.proto";
+import "etcd/api/membershippb/membership.proto";
+
+option go_package = "go.etcd.io/etcd/api/v3/etcdserverpb";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message RequestHeader {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  uint64 ID = 1;
+  // username is a username that is associated with an auth token of gRPC connection
+  string username = 2;
+  // auth_revision is a revision number of auth.authStore. It is not related to mvcc
+  uint64 auth_revision = 3 [(versionpb.etcd_version_field) = "3.1"];
+}
+
+// An InternalRaftRequest is the union of all requests which can be
+// sent via raft.
+message InternalRaftRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  RequestHeader header = 100;
+  uint64 ID = 1;
+
+  Request v2 = 2;
+
+  RangeRequest range = 3;
+  PutRequest put = 4;
+  DeleteRangeRequest delete_range = 5;
+  TxnRequest txn = 6;
+  CompactionRequest compaction = 7;
+
+  LeaseGrantRequest lease_grant = 8;
+  LeaseRevokeRequest lease_revoke = 9;
+
+  AlarmRequest alarm = 10;
+
+  LeaseCheckpointRequest lease_checkpoint = 11 [(versionpb.etcd_version_field) = "3.4"];
+
+  AuthEnableRequest auth_enable = 1000;
+  AuthDisableRequest auth_disable = 1011;
+  AuthStatusRequest auth_status = 1013 [(versionpb.etcd_version_field) = "3.5"];
+
+  InternalAuthenticateRequest authenticate = 1012;
+
+  AuthUserAddRequest auth_user_add = 1100;
+  AuthUserDeleteRequest auth_user_delete = 1101;
+  AuthUserGetRequest auth_user_get = 1102;
+  AuthUserChangePasswordRequest auth_user_change_password = 1103;
+  AuthUserGrantRoleRequest auth_user_grant_role = 1104;
+  AuthUserRevokeRoleRequest auth_user_revoke_role = 1105;
+  AuthUserListRequest auth_user_list = 1106;
+  AuthRoleListRequest auth_role_list = 1107;
+
+  AuthRoleAddRequest auth_role_add = 1200;
+  AuthRoleDeleteRequest auth_role_delete = 1201;
+  AuthRoleGetRequest auth_role_get = 1202;
+  AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203;
+  AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204;
+
+  membershippb.ClusterVersionSetRequest cluster_version_set = 1300 [(versionpb.etcd_version_field) = "3.5"];
+  membershippb.ClusterMemberAttrSetRequest cluster_member_attr_set = 1301 [(versionpb.etcd_version_field) = "3.5"];
+  membershippb.DowngradeInfoSetRequest  downgrade_info_set = 1302 [(versionpb.etcd_version_field) = "3.5"];
+
+  DowngradeVersionTestRequest downgrade_version_test = 9900 [(versionpb.etcd_version_field) = "3.6"];
+}
+
+message EmptyResponse {
+}
+
+// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
+// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
+// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
+message InternalAuthenticateRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+  string name = 1;
+  string password = 2;
+
+  // simple_token is generated in API layer (etcdserver/v3_server.go)
+  string simple_token = 3;
+}
diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal_stringer.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal_stringer.go
new file mode 100644
index 0000000..a9431d5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal_stringer.go
@@ -0,0 +1,183 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserverpb
+
+import (
+	"fmt"
+	"strings"
+
+	proto "github.com/golang/protobuf/proto"
+)
+
+// InternalRaftStringer implements custom proto Stringer:
+// redact password, replace value fields with value_size fields.
+type InternalRaftStringer struct {
+	Request *InternalRaftRequest
+}
+
+func (as *InternalRaftStringer) String() string {
+	switch {
+	case as.Request.LeaseGrant != nil:
+		return fmt.Sprintf("header:<%s> lease_grant:<ttl:%d-second id:%016x>",
+			as.Request.Header.String(),
+			as.Request.LeaseGrant.TTL,
+			as.Request.LeaseGrant.ID,
+		)
+	case as.Request.LeaseRevoke != nil:
+		return fmt.Sprintf("header:<%s> lease_revoke:<id:%016x>",
+			as.Request.Header.String(),
+			as.Request.LeaseRevoke.ID,
+		)
+	case as.Request.Authenticate != nil:
+		return fmt.Sprintf("header:<%s> authenticate:<name:%s simple_token:%s>",
+			as.Request.Header.String(),
+			as.Request.Authenticate.Name,
+			as.Request.Authenticate.SimpleToken,
+		)
+	case as.Request.AuthUserAdd != nil:
+		return fmt.Sprintf("header:<%s> auth_user_add:<name:%s>",
+			as.Request.Header.String(),
+			as.Request.AuthUserAdd.Name,
+		)
+	case as.Request.AuthUserChangePassword != nil:
+		return fmt.Sprintf("header:<%s> auth_user_change_password:<name:%s>",
+			as.Request.Header.String(),
+			as.Request.AuthUserChangePassword.Name,
+		)
+	case as.Request.Put != nil:
+		return fmt.Sprintf("header:<%s> put:<%s>",
+			as.Request.Header.String(),
+			NewLoggablePutRequest(as.Request.Put).String(),
+		)
+	case as.Request.Txn != nil:
+		return fmt.Sprintf("header:<%s> txn:<%s>",
+			as.Request.Header.String(),
+			NewLoggableTxnRequest(as.Request.Txn).String(),
+		)
+	default:
+		// nothing to redact
+	}
+	return as.Request.String()
+}
+
+// txnRequestStringer implements fmt.Stringer, a custom proto String to replace value bytes
+// fields with value size fields in any nested txn and put operations.
+type txnRequestStringer struct {
+	Request *TxnRequest
+}
+
+func NewLoggableTxnRequest(request *TxnRequest) fmt.Stringer {
+	return &txnRequestStringer{request}
+}
+
+func (as *txnRequestStringer) String() string {
+	var compare []string
+	for _, c := range as.Request.Compare {
+		switch cv := c.TargetUnion.(type) {
+		case *Compare_Value:
+			compare = append(compare, newLoggableValueCompare(c, cv).String())
+		default:
+			// nothing to redact
+			compare = append(compare, c.String())
+		}
+	}
+	var success []string
+	for _, s := range as.Request.Success {
+		success = append(success, newLoggableRequestOp(s).String())
+	}
+	var failure []string
+	for _, f := range as.Request.Failure {
+		failure = append(failure, newLoggableRequestOp(f).String())
+	}
+	return fmt.Sprintf("compare:<%s> success:<%s> failure:<%s>",
+		strings.Join(compare, " "),
+		strings.Join(success, " "),
+		strings.Join(failure, " "),
+	)
+}
+
+// requestOpStringer implements a custom proto String to replace value bytes fields with value
+// size fields in any nested txn and put operations.
+type requestOpStringer struct {
+	Op *RequestOp
+}
+
+func newLoggableRequestOp(op *RequestOp) *requestOpStringer {
+	return &requestOpStringer{op}
+}
+
+func (as *requestOpStringer) String() string {
+	switch op := as.Op.Request.(type) {
+	case *RequestOp_RequestPut:
+		return fmt.Sprintf("request_put:<%s>", NewLoggablePutRequest(op.RequestPut).String())
+	case *RequestOp_RequestTxn:
+		return fmt.Sprintf("request_txn:<%s>", NewLoggableTxnRequest(op.RequestTxn).String())
+	default:
+		// nothing to redact
+	}
+	return as.Op.String()
+}
+
+// loggableValueCompare implements a custom proto String for Compare.Value union member types to
+// replace the value bytes field with a value size field.
+// To preserve proto encoding of the key and range_end bytes, a faked out proto type is used here.
+type loggableValueCompare struct {
+	Result    Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult"`
+	Target    Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget"`
+	Key       []byte                `protobuf:"bytes,3,opt,name=key,proto3"`
+	ValueSize int64                 `protobuf:"varint,7,opt,name=value_size,proto3"`
+	RangeEnd  []byte                `protobuf:"bytes,64,opt,name=range_end,proto3"`
+}
+
+func newLoggableValueCompare(c *Compare, cv *Compare_Value) *loggableValueCompare {
+	return &loggableValueCompare{
+		c.Result,
+		c.Target,
+		c.Key,
+		int64(len(cv.Value)),
+		c.RangeEnd,
+	}
+}
+
+func (m *loggableValueCompare) Reset()         { *m = loggableValueCompare{} }
+func (m *loggableValueCompare) String() string { return proto.CompactTextString(m) }
+func (*loggableValueCompare) ProtoMessage()    {}
+
+// loggablePutRequest implements proto.Message, a custom proto String to replace value bytes
+// field with a value size field.
+// To preserve proto encoding of the key bytes, a faked out proto type is used here.
+type loggablePutRequest struct {
+	Key         []byte `protobuf:"bytes,1,opt,name=key,proto3"`
+	ValueSize   int64  `protobuf:"varint,2,opt,name=value_size,proto3"`
+	Lease       int64  `protobuf:"varint,3,opt,name=lease,proto3"`
+	PrevKv      bool   `protobuf:"varint,4,opt,name=prev_kv,proto3"`
+	IgnoreValue bool   `protobuf:"varint,5,opt,name=ignore_value,proto3"`
+	IgnoreLease bool   `protobuf:"varint,6,opt,name=ignore_lease,proto3"`
+}
+
+func NewLoggablePutRequest(request *PutRequest) proto.Message {
+	return &loggablePutRequest{
+		request.Key,
+		int64(len(request.Value)),
+		request.Lease,
+		request.PrevKv,
+		request.IgnoreValue,
+		request.IgnoreLease,
+	}
+}
+
+func (m *loggablePutRequest) Reset()         { *m = loggablePutRequest{} }
+func (m *loggablePutRequest) String() string { return proto.CompactTextString(m) }
+func (*loggablePutRequest) ProtoMessage()    {}
diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go
new file mode 100644
index 0000000..42bf641
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go
@@ -0,0 +1,26545 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: rpc.proto
+
+package etcdserverpb
+
+import (
+	context "context"
+	fmt "fmt"
+	io "io"
+	math "math"
+	math_bits "math/bits"
+
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/golang/protobuf/proto"
+	_ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
+	authpb "go.etcd.io/etcd/api/v3/authpb"
+	mvccpb "go.etcd.io/etcd/api/v3/mvccpb"
+	_ "go.etcd.io/etcd/api/v3/versionpb"
+	_ "google.golang.org/genproto/googleapis/api/annotations"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type AlarmType int32
+
+const (
+	AlarmType_NONE    AlarmType = 0
+	AlarmType_NOSPACE AlarmType = 1
+	AlarmType_CORRUPT AlarmType = 2
+)
+
+var AlarmType_name = map[int32]string{
+	0: "NONE",
+	1: "NOSPACE",
+	2: "CORRUPT",
+}
+
+var AlarmType_value = map[string]int32{
+	"NONE":    0,
+	"NOSPACE": 1,
+	"CORRUPT": 2,
+}
+
+func (x AlarmType) String() string {
+	return proto.EnumName(AlarmType_name, int32(x))
+}
+
+func (AlarmType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{0}
+}
+
+type RangeRequest_SortOrder int32
+
+const (
+	RangeRequest_NONE    RangeRequest_SortOrder = 0
+	RangeRequest_ASCEND  RangeRequest_SortOrder = 1
+	RangeRequest_DESCEND RangeRequest_SortOrder = 2
+)
+
+var RangeRequest_SortOrder_name = map[int32]string{
+	0: "NONE",
+	1: "ASCEND",
+	2: "DESCEND",
+}
+
+var RangeRequest_SortOrder_value = map[string]int32{
+	"NONE":    0,
+	"ASCEND":  1,
+	"DESCEND": 2,
+}
+
+func (x RangeRequest_SortOrder) String() string {
+	return proto.EnumName(RangeRequest_SortOrder_name, int32(x))
+}
+
+func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{1, 0}
+}
+
+type RangeRequest_SortTarget int32
+
+const (
+	RangeRequest_KEY     RangeRequest_SortTarget = 0
+	RangeRequest_VERSION RangeRequest_SortTarget = 1
+	RangeRequest_CREATE  RangeRequest_SortTarget = 2
+	RangeRequest_MOD     RangeRequest_SortTarget = 3
+	RangeRequest_VALUE   RangeRequest_SortTarget = 4
+)
+
+var RangeRequest_SortTarget_name = map[int32]string{
+	0: "KEY",
+	1: "VERSION",
+	2: "CREATE",
+	3: "MOD",
+	4: "VALUE",
+}
+
+var RangeRequest_SortTarget_value = map[string]int32{
+	"KEY":     0,
+	"VERSION": 1,
+	"CREATE":  2,
+	"MOD":     3,
+	"VALUE":   4,
+}
+
+func (x RangeRequest_SortTarget) String() string {
+	return proto.EnumName(RangeRequest_SortTarget_name, int32(x))
+}
+
+func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{1, 1}
+}
+
+type Compare_CompareResult int32
+
+const (
+	Compare_EQUAL     Compare_CompareResult = 0
+	Compare_GREATER   Compare_CompareResult = 1
+	Compare_LESS      Compare_CompareResult = 2
+	Compare_NOT_EQUAL Compare_CompareResult = 3
+)
+
+var Compare_CompareResult_name = map[int32]string{
+	0: "EQUAL",
+	1: "GREATER",
+	2: "LESS",
+	3: "NOT_EQUAL",
+}
+
+var Compare_CompareResult_value = map[string]int32{
+	"EQUAL":     0,
+	"GREATER":   1,
+	"LESS":      2,
+	"NOT_EQUAL": 3,
+}
+
+func (x Compare_CompareResult) String() string {
+	return proto.EnumName(Compare_CompareResult_name, int32(x))
+}
+
+func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{9, 0}
+}
+
+type Compare_CompareTarget int32
+
+const (
+	Compare_VERSION Compare_CompareTarget = 0
+	Compare_CREATE  Compare_CompareTarget = 1
+	Compare_MOD     Compare_CompareTarget = 2
+	Compare_VALUE   Compare_CompareTarget = 3
+	Compare_LEASE   Compare_CompareTarget = 4
+)
+
+var Compare_CompareTarget_name = map[int32]string{
+	0: "VERSION",
+	1: "CREATE",
+	2: "MOD",
+	3: "VALUE",
+	4: "LEASE",
+}
+
+var Compare_CompareTarget_value = map[string]int32{
+	"VERSION": 0,
+	"CREATE":  1,
+	"MOD":     2,
+	"VALUE":   3,
+	"LEASE":   4,
+}
+
+func (x Compare_CompareTarget) String() string {
+	return proto.EnumName(Compare_CompareTarget_name, int32(x))
+}
+
+func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{9, 1}
+}
+
+type WatchCreateRequest_FilterType int32
+
+const (
+	// filter out put event.
+	WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0
+	// filter out delete event.
+	WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1
+)
+
+var WatchCreateRequest_FilterType_name = map[int32]string{
+	0: "NOPUT",
+	1: "NODELETE",
+}
+
+var WatchCreateRequest_FilterType_value = map[string]int32{
+	"NOPUT":    0,
+	"NODELETE": 1,
+}
+
+func (x WatchCreateRequest_FilterType) String() string {
+	return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x))
+}
+
+func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{21, 0}
+}
+
+type AlarmRequest_AlarmAction int32
+
+const (
+	AlarmRequest_GET        AlarmRequest_AlarmAction = 0
+	AlarmRequest_ACTIVATE   AlarmRequest_AlarmAction = 1
+	AlarmRequest_DEACTIVATE AlarmRequest_AlarmAction = 2
+)
+
+var AlarmRequest_AlarmAction_name = map[int32]string{
+	0: "GET",
+	1: "ACTIVATE",
+	2: "DEACTIVATE",
+}
+
+var AlarmRequest_AlarmAction_value = map[string]int32{
+	"GET":        0,
+	"ACTIVATE":   1,
+	"DEACTIVATE": 2,
+}
+
+func (x AlarmRequest_AlarmAction) String() string {
+	return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x))
+}
+
+func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{54, 0}
+}
+
+type DowngradeRequest_DowngradeAction int32
+
+const (
+	DowngradeRequest_VALIDATE DowngradeRequest_DowngradeAction = 0
+	DowngradeRequest_ENABLE   DowngradeRequest_DowngradeAction = 1
+	DowngradeRequest_CANCEL   DowngradeRequest_DowngradeAction = 2
+)
+
+var DowngradeRequest_DowngradeAction_name = map[int32]string{
+	0: "VALIDATE",
+	1: "ENABLE",
+	2: "CANCEL",
+}
+
+var DowngradeRequest_DowngradeAction_value = map[string]int32{
+	"VALIDATE": 0,
+	"ENABLE":   1,
+	"CANCEL":   2,
+}
+
+func (x DowngradeRequest_DowngradeAction) String() string {
+	return proto.EnumName(DowngradeRequest_DowngradeAction_name, int32(x))
+}
+
+func (DowngradeRequest_DowngradeAction) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{57, 0}
+}
+
+type ResponseHeader struct {
+	// cluster_id is the ID of the cluster which sent the response.
+	ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+	// member_id is the ID of the member which sent the response.
+	MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"`
+	// revision is the key-value store revision when the request was applied, and it's
+	// unset (so 0) in case of calls not interacting with key-value store.
+	// For watch progress responses, the header.revision indicates progress. All future events
+	// received in this stream are guaranteed to have a higher revision number than the
+	// header.revision number.
+	Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"`
+	// raft_term is the raft term when the request was applied.
+	RaftTerm             uint64   `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ResponseHeader) Reset()         { *m = ResponseHeader{} }
+func (m *ResponseHeader) String() string { return proto.CompactTextString(m) }
+func (*ResponseHeader) ProtoMessage()    {}
+func (*ResponseHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{0}
+}
+func (m *ResponseHeader) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResponseHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ResponseHeader.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ResponseHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResponseHeader.Merge(m, src)
+}
+func (m *ResponseHeader) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResponseHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResponseHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResponseHeader proto.InternalMessageInfo
+
+func (m *ResponseHeader) GetClusterId() uint64 {
+	if m != nil {
+		return m.ClusterId
+	}
+	return 0
+}
+
+func (m *ResponseHeader) GetMemberId() uint64 {
+	if m != nil {
+		return m.MemberId
+	}
+	return 0
+}
+
+func (m *ResponseHeader) GetRevision() int64 {
+	if m != nil {
+		return m.Revision
+	}
+	return 0
+}
+
+func (m *ResponseHeader) GetRaftTerm() uint64 {
+	if m != nil {
+		return m.RaftTerm
+	}
+	return 0
+}
+
+type RangeRequest struct {
+	// key is the first key for the range. If range_end is not given, the request only looks up key.
+	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	// range_end is the upper bound on the requested range [key, range_end).
+	// If range_end is '\0', the range is all keys >= key.
+	// If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
+	// then the range request gets all keys prefixed with key.
+	// If both key and range_end are '\0', then the range request returns all keys.
+	RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+	// limit is a limit on the number of keys returned for the request. When limit is set to 0,
+	// it is treated as no limit.
+	Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
+	// revision is the point-in-time of the key-value store to use for the range.
+	// If revision is less or equal to zero, the range is over the newest key-value store.
+	// If the revision has been compacted, ErrCompacted is returned as a response.
+	Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"`
+	// sort_order is the order for returned sorted results.
+	SortOrder RangeRequest_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=etcdserverpb.RangeRequest_SortOrder" json:"sort_order,omitempty"`
+	// sort_target is the key-value field to use for sorting.
+	SortTarget RangeRequest_SortTarget `protobuf:"varint,6,opt,name=sort_target,json=sortTarget,proto3,enum=etcdserverpb.RangeRequest_SortTarget" json:"sort_target,omitempty"`
+	// serializable sets the range request to use serializable member-local reads.
+	// Range requests are linearizable by default; linearizable requests have higher
+	// latency and lower throughput than serializable requests but reflect the current
+	// consensus of the cluster. For better performance, in exchange for possible stale reads,
+	// a serializable range request is served locally without needing to reach consensus
+	// with other nodes in the cluster.
+	Serializable bool `protobuf:"varint,7,opt,name=serializable,proto3" json:"serializable,omitempty"`
+	// keys_only when set returns only the keys and not the values.
+	KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"`
+	// count_only when set returns only the count of the keys in the range.
+	CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"`
+	// min_mod_revision is the lower bound for returned key mod revisions; all keys with
+	// lesser mod revisions will be filtered away.
+	MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"`
+	// max_mod_revision is the upper bound for returned key mod revisions; all keys with
+	// greater mod revisions will be filtered away.
+	MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"`
+	// min_create_revision is the lower bound for returned key create revisions; all keys with
+	// lesser create revisions will be filtered away.
+	MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"`
+	// max_create_revision is the upper bound for returned key create revisions; all keys with
+	// greater create revisions will be filtered away.
+	MaxCreateRevision    int64    `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *RangeRequest) Reset()         { *m = RangeRequest{} }
+func (m *RangeRequest) String() string { return proto.CompactTextString(m) }
+func (*RangeRequest) ProtoMessage()    {}
+func (*RangeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{1}
+}
+func (m *RangeRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_RangeRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *RangeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RangeRequest.Merge(m, src)
+}
+func (m *RangeRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *RangeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_RangeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RangeRequest proto.InternalMessageInfo
+
+func (m *RangeRequest) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *RangeRequest) GetRangeEnd() []byte {
+	if m != nil {
+		return m.RangeEnd
+	}
+	return nil
+}
+
+func (m *RangeRequest) GetLimit() int64 {
+	if m != nil {
+		return m.Limit
+	}
+	return 0
+}
+
+func (m *RangeRequest) GetRevision() int64 {
+	if m != nil {
+		return m.Revision
+	}
+	return 0
+}
+
+func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder {
+	if m != nil {
+		return m.SortOrder
+	}
+	return RangeRequest_NONE
+}
+
+func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget {
+	if m != nil {
+		return m.SortTarget
+	}
+	return RangeRequest_KEY
+}
+
+func (m *RangeRequest) GetSerializable() bool {
+	if m != nil {
+		return m.Serializable
+	}
+	return false
+}
+
+func (m *RangeRequest) GetKeysOnly() bool {
+	if m != nil {
+		return m.KeysOnly
+	}
+	return false
+}
+
+func (m *RangeRequest) GetCountOnly() bool {
+	if m != nil {
+		return m.CountOnly
+	}
+	return false
+}
+
+func (m *RangeRequest) GetMinModRevision() int64 {
+	if m != nil {
+		return m.MinModRevision
+	}
+	return 0
+}
+
+func (m *RangeRequest) GetMaxModRevision() int64 {
+	if m != nil {
+		return m.MaxModRevision
+	}
+	return 0
+}
+
+func (m *RangeRequest) GetMinCreateRevision() int64 {
+	if m != nil {
+		return m.MinCreateRevision
+	}
+	return 0
+}
+
+func (m *RangeRequest) GetMaxCreateRevision() int64 {
+	if m != nil {
+		return m.MaxCreateRevision
+	}
+	return 0
+}
+
+type RangeResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// kvs is the list of key-value pairs matched by the range request.
+	// kvs is empty when count is requested.
+	Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs,proto3" json:"kvs,omitempty"`
+	// more indicates if there are more keys to return in the requested range.
+	More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"`
+	// count is set to the actual number of keys within the range when requested.
+	// Unlike Kvs, it is unaffected by limits and filters (e.g., Min/Max, Create/Modify, Revisions)
+	// and reflects the full count within the specified range.
+	Count                int64    `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *RangeResponse) Reset()         { *m = RangeResponse{} }
+func (m *RangeResponse) String() string { return proto.CompactTextString(m) }
+func (*RangeResponse) ProtoMessage()    {}
+func (*RangeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{2}
+}
+func (m *RangeResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_RangeResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *RangeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RangeResponse.Merge(m, src)
+}
+func (m *RangeResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *RangeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_RangeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RangeResponse proto.InternalMessageInfo
+
+func (m *RangeResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue {
+	if m != nil {
+		return m.Kvs
+	}
+	return nil
+}
+
+func (m *RangeResponse) GetMore() bool {
+	if m != nil {
+		return m.More
+	}
+	return false
+}
+
+func (m *RangeResponse) GetCount() int64 {
+	if m != nil {
+		return m.Count
+	}
+	return 0
+}
+
+type PutRequest struct {
+	// key is the key, in bytes, to put into the key-value store.
+	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	// value is the value, in bytes, to associate with the key in the key-value store.
+	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	// lease is the lease ID to associate with the key in the key-value store. A lease
+	// value of 0 indicates no lease.
+	Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"`
+	// If prev_kv is set, etcd gets the previous key-value pair before changing it.
+	// The previous key-value pair will be returned in the put response.
+	PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+	// If ignore_value is set, etcd updates the key using its current value.
+	// Returns an error if the key does not exist.
+	IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"`
+	// If ignore_lease is set, etcd updates the key using its current lease.
+	// Returns an error if the key does not exist.
+	IgnoreLease          bool     `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *PutRequest) Reset()         { *m = PutRequest{} }
+func (m *PutRequest) String() string { return proto.CompactTextString(m) }
+func (*PutRequest) ProtoMessage()    {}
+func (*PutRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{3}
+}
+func (m *PutRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *PutRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PutRequest.Merge(m, src)
+}
+func (m *PutRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *PutRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_PutRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PutRequest proto.InternalMessageInfo
+
+func (m *PutRequest) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *PutRequest) GetValue() []byte {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *PutRequest) GetLease() int64 {
+	if m != nil {
+		return m.Lease
+	}
+	return 0
+}
+
+func (m *PutRequest) GetPrevKv() bool {
+	if m != nil {
+		return m.PrevKv
+	}
+	return false
+}
+
+func (m *PutRequest) GetIgnoreValue() bool {
+	if m != nil {
+		return m.IgnoreValue
+	}
+	return false
+}
+
+func (m *PutRequest) GetIgnoreLease() bool {
+	if m != nil {
+		return m.IgnoreLease
+	}
+	return false
+}
+
+type PutResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// if prev_kv is set in the request, the previous key-value pair will be returned.
+	PrevKv               *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *PutResponse) Reset()         { *m = PutResponse{} }
+func (m *PutResponse) String() string { return proto.CompactTextString(m) }
+func (*PutResponse) ProtoMessage()    {}
+func (*PutResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{4}
+}
+func (m *PutResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *PutResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PutResponse.Merge(m, src)
+}
+func (m *PutResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *PutResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_PutResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PutResponse proto.InternalMessageInfo
+
+func (m *PutResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue {
+	if m != nil {
+		return m.PrevKv
+	}
+	return nil
+}
+
+type DeleteRangeRequest struct {
+	// key is the first key to delete in the range.
+	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	// range_end is the key following the last key to delete for the range [key, range_end).
+	// If range_end is not given, the range is defined to contain only the key argument.
+	// If range_end is one bit larger than the given key, then the range is all the keys
+	// with the prefix (the given key).
+	// If range_end is '\0', the range is all keys greater than or equal to the key argument.
+	RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+	// If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
+	// The previous key-value pairs will be returned in the delete response.
+	PrevKv               bool     `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DeleteRangeRequest) Reset()         { *m = DeleteRangeRequest{} }
+func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteRangeRequest) ProtoMessage()    {}
+func (*DeleteRangeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{5}
+}
+func (m *DeleteRangeRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeleteRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DeleteRangeRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DeleteRangeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteRangeRequest.Merge(m, src)
+}
+func (m *DeleteRangeRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeleteRangeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteRangeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteRangeRequest proto.InternalMessageInfo
+
+func (m *DeleteRangeRequest) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *DeleteRangeRequest) GetRangeEnd() []byte {
+	if m != nil {
+		return m.RangeEnd
+	}
+	return nil
+}
+
+func (m *DeleteRangeRequest) GetPrevKv() bool {
+	if m != nil {
+		return m.PrevKv
+	}
+	return false
+}
+
+type DeleteRangeResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// deleted is the number of keys deleted by the delete range request.
+	Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"`
+	// if prev_kv is set in the request, the previous key-value pairs will be returned.
+	PrevKvs              []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs,proto3" json:"prev_kvs,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
+	XXX_unrecognized     []byte             `json:"-"`
+	XXX_sizecache        int32              `json:"-"`
+}
+
+func (m *DeleteRangeResponse) Reset()         { *m = DeleteRangeResponse{} }
+func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteRangeResponse) ProtoMessage()    {}
+func (*DeleteRangeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{6}
+}
+func (m *DeleteRangeResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeleteRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DeleteRangeResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DeleteRangeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteRangeResponse.Merge(m, src)
+}
+func (m *DeleteRangeResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeleteRangeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteRangeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteRangeResponse proto.InternalMessageInfo
+
+func (m *DeleteRangeResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *DeleteRangeResponse) GetDeleted() int64 {
+	if m != nil {
+		return m.Deleted
+	}
+	return 0
+}
+
+func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue {
+	if m != nil {
+		return m.PrevKvs
+	}
+	return nil
+}
+
+type RequestOp struct {
+	// request is a union of request types accepted by a transaction.
+	//
+	// Types that are valid to be assigned to Request:
+	//	*RequestOp_RequestRange
+	//	*RequestOp_RequestPut
+	//	*RequestOp_RequestDeleteRange
+	//	*RequestOp_RequestTxn
+	Request              isRequestOp_Request `protobuf_oneof:"request"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *RequestOp) Reset()         { *m = RequestOp{} }
+func (m *RequestOp) String() string { return proto.CompactTextString(m) }
+func (*RequestOp) ProtoMessage()    {}
+func (*RequestOp) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{7}
+}
+func (m *RequestOp) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RequestOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_RequestOp.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *RequestOp) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RequestOp.Merge(m, src)
+}
+func (m *RequestOp) XXX_Size() int {
+	return m.Size()
+}
+func (m *RequestOp) XXX_DiscardUnknown() {
+	xxx_messageInfo_RequestOp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RequestOp proto.InternalMessageInfo
+
+type isRequestOp_Request interface {
+	isRequestOp_Request()
+	MarshalTo([]byte) (int, error)
+	Size() int
+}
+
+type RequestOp_RequestRange struct {
+	RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,proto3,oneof" json:"request_range,omitempty"`
+}
+type RequestOp_RequestPut struct {
+	RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,proto3,oneof" json:"request_put,omitempty"`
+}
+type RequestOp_RequestDeleteRange struct {
+	RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,proto3,oneof" json:"request_delete_range,omitempty"`
+}
+type RequestOp_RequestTxn struct {
+	RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,proto3,oneof" json:"request_txn,omitempty"`
+}
+
+func (*RequestOp_RequestRange) isRequestOp_Request()       {}
+func (*RequestOp_RequestPut) isRequestOp_Request()         {}
+func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {}
+func (*RequestOp_RequestTxn) isRequestOp_Request()         {}
+
+func (m *RequestOp) GetRequest() isRequestOp_Request {
+	if m != nil {
+		return m.Request
+	}
+	return nil
+}
+
+func (m *RequestOp) GetRequestRange() *RangeRequest {
+	if x, ok := m.GetRequest().(*RequestOp_RequestRange); ok {
+		return x.RequestRange
+	}
+	return nil
+}
+
+func (m *RequestOp) GetRequestPut() *PutRequest {
+	if x, ok := m.GetRequest().(*RequestOp_RequestPut); ok {
+		return x.RequestPut
+	}
+	return nil
+}
+
+func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest {
+	if x, ok := m.GetRequest().(*RequestOp_RequestDeleteRange); ok {
+		return x.RequestDeleteRange
+	}
+	return nil
+}
+
+func (m *RequestOp) GetRequestTxn() *TxnRequest {
+	if x, ok := m.GetRequest().(*RequestOp_RequestTxn); ok {
+		return x.RequestTxn
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*RequestOp) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*RequestOp_RequestRange)(nil),
+		(*RequestOp_RequestPut)(nil),
+		(*RequestOp_RequestDeleteRange)(nil),
+		(*RequestOp_RequestTxn)(nil),
+	}
+}
+
+type ResponseOp struct {
+	// response is a union of response types returned by a transaction.
+	//
+	// Types that are valid to be assigned to Response:
+	//	*ResponseOp_ResponseRange
+	//	*ResponseOp_ResponsePut
+	//	*ResponseOp_ResponseDeleteRange
+	//	*ResponseOp_ResponseTxn
+	Response             isResponseOp_Response `protobuf_oneof:"response"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *ResponseOp) Reset()         { *m = ResponseOp{} }
+func (m *ResponseOp) String() string { return proto.CompactTextString(m) }
+func (*ResponseOp) ProtoMessage()    {}
+func (*ResponseOp) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{8}
+}
+func (m *ResponseOp) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResponseOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ResponseOp.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ResponseOp) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResponseOp.Merge(m, src)
+}
+func (m *ResponseOp) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResponseOp) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResponseOp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResponseOp proto.InternalMessageInfo
+
+type isResponseOp_Response interface {
+	isResponseOp_Response()
+	MarshalTo([]byte) (int, error)
+	Size() int
+}
+
+type ResponseOp_ResponseRange struct {
+	ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,proto3,oneof" json:"response_range,omitempty"`
+}
+type ResponseOp_ResponsePut struct {
+	ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,proto3,oneof" json:"response_put,omitempty"`
+}
+type ResponseOp_ResponseDeleteRange struct {
+	ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,proto3,oneof" json:"response_delete_range,omitempty"`
+}
+type ResponseOp_ResponseTxn struct {
+	ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,proto3,oneof" json:"response_txn,omitempty"`
+}
+
+func (*ResponseOp_ResponseRange) isResponseOp_Response()       {}
+func (*ResponseOp_ResponsePut) isResponseOp_Response()         {}
+func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {}
+func (*ResponseOp_ResponseTxn) isResponseOp_Response()         {}
+
+func (m *ResponseOp) GetResponse() isResponseOp_Response {
+	if m != nil {
+		return m.Response
+	}
+	return nil
+}
+
+func (m *ResponseOp) GetResponseRange() *RangeResponse {
+	if x, ok := m.GetResponse().(*ResponseOp_ResponseRange); ok {
+		return x.ResponseRange
+	}
+	return nil
+}
+
+func (m *ResponseOp) GetResponsePut() *PutResponse {
+	if x, ok := m.GetResponse().(*ResponseOp_ResponsePut); ok {
+		return x.ResponsePut
+	}
+	return nil
+}
+
+func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse {
+	if x, ok := m.GetResponse().(*ResponseOp_ResponseDeleteRange); ok {
+		return x.ResponseDeleteRange
+	}
+	return nil
+}
+
+func (m *ResponseOp) GetResponseTxn() *TxnResponse {
+	if x, ok := m.GetResponse().(*ResponseOp_ResponseTxn); ok {
+		return x.ResponseTxn
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*ResponseOp) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*ResponseOp_ResponseRange)(nil),
+		(*ResponseOp_ResponsePut)(nil),
+		(*ResponseOp_ResponseDeleteRange)(nil),
+		(*ResponseOp_ResponseTxn)(nil),
+	}
+}
+
+type Compare struct {
+	// result is logical comparison operation for this comparison.
+	Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"`
+	// target is the key-value field to inspect for the comparison.
+	Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"`
+	// key is the subject key for the comparison operation.
+	Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
+	// Types that are valid to be assigned to TargetUnion:
+	//	*Compare_Version
+	//	*Compare_CreateRevision
+	//	*Compare_ModRevision
+	//	*Compare_Value
+	//	*Compare_Lease
+	TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"`
+	// range_end compares the given target to all keys in the range [key, range_end).
+	// See RangeRequest for more details on key ranges.
+	RangeEnd             []byte   `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Compare) Reset()         { *m = Compare{} }
+func (m *Compare) String() string { return proto.CompactTextString(m) }
+func (*Compare) ProtoMessage()    {}
+func (*Compare) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{9}
+}
+func (m *Compare) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Compare) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Compare.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Compare) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Compare.Merge(m, src)
+}
+func (m *Compare) XXX_Size() int {
+	return m.Size()
+}
+func (m *Compare) XXX_DiscardUnknown() {
+	xxx_messageInfo_Compare.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Compare proto.InternalMessageInfo
+
+type isCompare_TargetUnion interface {
+	isCompare_TargetUnion()
+	MarshalTo([]byte) (int, error)
+	Size() int
+}
+
+type Compare_Version struct {
+	Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof" json:"version,omitempty"`
+}
+type Compare_CreateRevision struct {
+	CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,json=createRevision,proto3,oneof" json:"create_revision,omitempty"`
+}
+type Compare_ModRevision struct {
+	ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,json=modRevision,proto3,oneof" json:"mod_revision,omitempty"`
+}
+type Compare_Value struct {
+	Value []byte `protobuf:"bytes,7,opt,name=value,proto3,oneof" json:"value,omitempty"`
+}
+type Compare_Lease struct {
+	Lease int64 `protobuf:"varint,8,opt,name=lease,proto3,oneof" json:"lease,omitempty"`
+}
+
+func (*Compare_Version) isCompare_TargetUnion()        {}
+func (*Compare_CreateRevision) isCompare_TargetUnion() {}
+func (*Compare_ModRevision) isCompare_TargetUnion()    {}
+func (*Compare_Value) isCompare_TargetUnion()          {}
+func (*Compare_Lease) isCompare_TargetUnion()          {}
+
+func (m *Compare) GetTargetUnion() isCompare_TargetUnion {
+	if m != nil {
+		return m.TargetUnion
+	}
+	return nil
+}
+
+func (m *Compare) GetResult() Compare_CompareResult {
+	if m != nil {
+		return m.Result
+	}
+	return Compare_EQUAL
+}
+
+func (m *Compare) GetTarget() Compare_CompareTarget {
+	if m != nil {
+		return m.Target
+	}
+	return Compare_VERSION
+}
+
+func (m *Compare) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *Compare) GetVersion() int64 {
+	if x, ok := m.GetTargetUnion().(*Compare_Version); ok {
+		return x.Version
+	}
+	return 0
+}
+
+func (m *Compare) GetCreateRevision() int64 {
+	if x, ok := m.GetTargetUnion().(*Compare_CreateRevision); ok {
+		return x.CreateRevision
+	}
+	return 0
+}
+
+func (m *Compare) GetModRevision() int64 {
+	if x, ok := m.GetTargetUnion().(*Compare_ModRevision); ok {
+		return x.ModRevision
+	}
+	return 0
+}
+
+func (m *Compare) GetValue() []byte {
+	if x, ok := m.GetTargetUnion().(*Compare_Value); ok {
+		return x.Value
+	}
+	return nil
+}
+
+func (m *Compare) GetLease() int64 {
+	if x, ok := m.GetTargetUnion().(*Compare_Lease); ok {
+		return x.Lease
+	}
+	return 0
+}
+
+func (m *Compare) GetRangeEnd() []byte {
+	if m != nil {
+		return m.RangeEnd
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Compare) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*Compare_Version)(nil),
+		(*Compare_CreateRevision)(nil),
+		(*Compare_ModRevision)(nil),
+		(*Compare_Value)(nil),
+		(*Compare_Lease)(nil),
+	}
+}
+
+// From google paxosdb paper:
+// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
+// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
+// and consists of three components:
+// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
+// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
+// may apply to the same or different entries in the database. All tests in the guard are applied and
+// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
+// it executes f op (see item 3 below).
+// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
+// lookup operation, and applies to a single database entry. Two different operations in the list may apply
+// to the same or different entries in the database. These operations are executed
+// if guard evaluates to
+// true.
+// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
+type TxnRequest struct {
+	// compare is a list of predicates representing a conjunction of terms.
+	// If the comparisons succeed, then the success requests will be processed in order,
+	// and the response will contain their respective responses in order.
+	// If the comparisons fail, then the failure requests will be processed in order,
+	// and the response will contain their respective responses in order.
+	Compare []*Compare `protobuf:"bytes,1,rep,name=compare,proto3" json:"compare,omitempty"`
+	// success is a list of requests which will be applied when compare evaluates to true.
+	Success []*RequestOp `protobuf:"bytes,2,rep,name=success,proto3" json:"success,omitempty"`
+	// failure is a list of requests which will be applied when compare evaluates to false.
+	Failure              []*RequestOp `protobuf:"bytes,3,rep,name=failure,proto3" json:"failure,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *TxnRequest) Reset()         { *m = TxnRequest{} }
+func (m *TxnRequest) String() string { return proto.CompactTextString(m) }
+func (*TxnRequest) ProtoMessage()    {}
+func (*TxnRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{10}
+}
+func (m *TxnRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TxnRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_TxnRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *TxnRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TxnRequest.Merge(m, src)
+}
+func (m *TxnRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *TxnRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_TxnRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TxnRequest proto.InternalMessageInfo
+
+func (m *TxnRequest) GetCompare() []*Compare {
+	if m != nil {
+		return m.Compare
+	}
+	return nil
+}
+
+func (m *TxnRequest) GetSuccess() []*RequestOp {
+	if m != nil {
+		return m.Success
+	}
+	return nil
+}
+
+func (m *TxnRequest) GetFailure() []*RequestOp {
+	if m != nil {
+		return m.Failure
+	}
+	return nil
+}
+
+type TxnResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// succeeded is set to true if the compare evaluated to true or false otherwise.
+	Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"`
+	// responses is a list of responses corresponding to the results from applying
+	// success if succeeded is true or failure if succeeded is false.
+	Responses            []*ResponseOp `protobuf:"bytes,3,rep,name=responses,proto3" json:"responses,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *TxnResponse) Reset()         { *m = TxnResponse{} }
+func (m *TxnResponse) String() string { return proto.CompactTextString(m) }
+func (*TxnResponse) ProtoMessage()    {}
+func (*TxnResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{11}
+}
+func (m *TxnResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TxnResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_TxnResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *TxnResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TxnResponse.Merge(m, src)
+}
+func (m *TxnResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *TxnResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_TxnResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TxnResponse proto.InternalMessageInfo
+
+func (m *TxnResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *TxnResponse) GetSucceeded() bool {
+	if m != nil {
+		return m.Succeeded
+	}
+	return false
+}
+
+func (m *TxnResponse) GetResponses() []*ResponseOp {
+	if m != nil {
+		return m.Responses
+	}
+	return nil
+}
+
+// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
+// with a revision less than the compaction revision will be removed.
+type CompactionRequest struct {
+	// revision is the key-value store revision for the compaction operation.
+	Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"`
+	// physical is set so the RPC will wait until the compaction is physically
+	// applied to the local database such that compacted entries are totally
+	// removed from the backend database.
+	Physical             bool     `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *CompactionRequest) Reset()         { *m = CompactionRequest{} }
+func (m *CompactionRequest) String() string { return proto.CompactTextString(m) }
+func (*CompactionRequest) ProtoMessage()    {}
+func (*CompactionRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{12}
+}
+func (m *CompactionRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CompactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CompactionRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CompactionRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CompactionRequest.Merge(m, src)
+}
+func (m *CompactionRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *CompactionRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CompactionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompactionRequest proto.InternalMessageInfo
+
+func (m *CompactionRequest) GetRevision() int64 {
+	if m != nil {
+		return m.Revision
+	}
+	return 0
+}
+
+func (m *CompactionRequest) GetPhysical() bool {
+	if m != nil {
+		return m.Physical
+	}
+	return false
+}
+
+type CompactionResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *CompactionResponse) Reset()         { *m = CompactionResponse{} }
+func (m *CompactionResponse) String() string { return proto.CompactTextString(m) }
+func (*CompactionResponse) ProtoMessage()    {}
+func (*CompactionResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{13}
+}
+func (m *CompactionResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CompactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CompactionResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CompactionResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CompactionResponse.Merge(m, src)
+}
+func (m *CompactionResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *CompactionResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_CompactionResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompactionResponse proto.InternalMessageInfo
+
+func (m *CompactionResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type HashRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *HashRequest) Reset()         { *m = HashRequest{} }
+func (m *HashRequest) String() string { return proto.CompactTextString(m) }
+func (*HashRequest) ProtoMessage()    {}
+func (*HashRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{14}
+}
+func (m *HashRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_HashRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *HashRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HashRequest.Merge(m, src)
+}
+func (m *HashRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *HashRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_HashRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HashRequest proto.InternalMessageInfo
+
+type HashKVRequest struct {
+	// revision is the key-value store revision for the hash operation.
+	Revision             int64    `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *HashKVRequest) Reset()         { *m = HashKVRequest{} }
+func (m *HashKVRequest) String() string { return proto.CompactTextString(m) }
+func (*HashKVRequest) ProtoMessage()    {}
+func (*HashKVRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{15}
+}
+func (m *HashKVRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HashKVRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_HashKVRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *HashKVRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HashKVRequest.Merge(m, src)
+}
+func (m *HashKVRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *HashKVRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_HashKVRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HashKVRequest proto.InternalMessageInfo
+
+func (m *HashKVRequest) GetRevision() int64 {
+	if m != nil {
+		return m.Revision
+	}
+	return 0
+}
+
+type HashKVResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// hash is the hash value computed from the responding member's MVCC keys up to a given revision.
+	Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
+	// compact_revision is the compacted revision of key-value store when hash begins.
+	CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
+	// hash_revision is the revision up to which the hash is calculated.
+	HashRevision         int64    `protobuf:"varint,4,opt,name=hash_revision,json=hashRevision,proto3" json:"hash_revision,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *HashKVResponse) Reset()         { *m = HashKVResponse{} }
+func (m *HashKVResponse) String() string { return proto.CompactTextString(m) }
+func (*HashKVResponse) ProtoMessage()    {}
+func (*HashKVResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{16}
+}
+func (m *HashKVResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HashKVResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_HashKVResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *HashKVResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HashKVResponse.Merge(m, src)
+}
+func (m *HashKVResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *HashKVResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_HashKVResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HashKVResponse proto.InternalMessageInfo
+
+func (m *HashKVResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *HashKVResponse) GetHash() uint32 {
+	if m != nil {
+		return m.Hash
+	}
+	return 0
+}
+
+func (m *HashKVResponse) GetCompactRevision() int64 {
+	if m != nil {
+		return m.CompactRevision
+	}
+	return 0
+}
+
+func (m *HashKVResponse) GetHashRevision() int64 {
+	if m != nil {
+		return m.HashRevision
+	}
+	return 0
+}
+
+type HashResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// hash is the hash value computed from the responding member's KV's backend.
+	Hash                 uint32   `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *HashResponse) Reset()         { *m = HashResponse{} }
+func (m *HashResponse) String() string { return proto.CompactTextString(m) }
+func (*HashResponse) ProtoMessage()    {}
+func (*HashResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{17}
+}
+func (m *HashResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_HashResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *HashResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HashResponse.Merge(m, src)
+}
+func (m *HashResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *HashResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_HashResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HashResponse proto.InternalMessageInfo
+
+func (m *HashResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *HashResponse) GetHash() uint32 {
+	if m != nil {
+		return m.Hash
+	}
+	return 0
+}
+
+type SnapshotRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *SnapshotRequest) Reset()         { *m = SnapshotRequest{} }
+func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) }
+func (*SnapshotRequest) ProtoMessage()    {}
+func (*SnapshotRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{18}
+}
+func (m *SnapshotRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_SnapshotRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *SnapshotRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SnapshotRequest.Merge(m, src)
+}
+func (m *SnapshotRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *SnapshotRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_SnapshotRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SnapshotRequest proto.InternalMessageInfo
+
+type SnapshotResponse struct {
+	// header has the current key-value store information. The first header in the snapshot
+	// stream indicates the point in time of the snapshot.
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// remaining_bytes is the number of blob bytes to be sent after this message
+	RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"`
+	// blob contains the next chunk of the snapshot in the snapshot stream.
+	Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"`
+	// local version of server that created the snapshot.
+	// In cluster with binaries with different version, each cluster can return different result.
+	// Informs which etcd server version should be used when restoring the snapshot.
+	Version              string   `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *SnapshotResponse) Reset()         { *m = SnapshotResponse{} }
+func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) }
+func (*SnapshotResponse) ProtoMessage()    {}
+func (*SnapshotResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{19}
+}
+func (m *SnapshotResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_SnapshotResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *SnapshotResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SnapshotResponse.Merge(m, src)
+}
+func (m *SnapshotResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *SnapshotResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_SnapshotResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SnapshotResponse proto.InternalMessageInfo
+
+func (m *SnapshotResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *SnapshotResponse) GetRemainingBytes() uint64 {
+	if m != nil {
+		return m.RemainingBytes
+	}
+	return 0
+}
+
+func (m *SnapshotResponse) GetBlob() []byte {
+	if m != nil {
+		return m.Blob
+	}
+	return nil
+}
+
+func (m *SnapshotResponse) GetVersion() string {
+	if m != nil {
+		return m.Version
+	}
+	return ""
+}
+
+type WatchRequest struct {
+	// request_union is a request to either create a new watcher or cancel an existing watcher.
+	//
+	// Types that are valid to be assigned to RequestUnion:
+	//	*WatchRequest_CreateRequest
+	//	*WatchRequest_CancelRequest
+	//	*WatchRequest_ProgressRequest
+	RequestUnion         isWatchRequest_RequestUnion `protobuf_oneof:"request_union"`
+	XXX_NoUnkeyedLiteral struct{}                    `json:"-"`
+	XXX_unrecognized     []byte                      `json:"-"`
+	XXX_sizecache        int32                       `json:"-"`
+}
+
+func (m *WatchRequest) Reset()         { *m = WatchRequest{} }
+func (m *WatchRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchRequest) ProtoMessage()    {}
+func (*WatchRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{20}
+}
+func (m *WatchRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WatchRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WatchRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WatchRequest.Merge(m, src)
+}
+func (m *WatchRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *WatchRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_WatchRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchRequest proto.InternalMessageInfo
+
+type isWatchRequest_RequestUnion interface {
+	isWatchRequest_RequestUnion()
+	MarshalTo([]byte) (int, error)
+	Size() int
+}
+
+type WatchRequest_CreateRequest struct {
+	CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,proto3,oneof" json:"create_request,omitempty"`
+}
+type WatchRequest_CancelRequest struct {
+	CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,proto3,oneof" json:"cancel_request,omitempty"`
+}
+type WatchRequest_ProgressRequest struct {
+	ProgressRequest *WatchProgressRequest `protobuf:"bytes,3,opt,name=progress_request,json=progressRequest,proto3,oneof" json:"progress_request,omitempty"`
+}
+
+func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion()   {}
+func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion()   {}
+func (*WatchRequest_ProgressRequest) isWatchRequest_RequestUnion() {}
+
+func (m *WatchRequest) GetRequestUnion() isWatchRequest_RequestUnion {
+	if m != nil {
+		return m.RequestUnion
+	}
+	return nil
+}
+
+func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest {
+	if x, ok := m.GetRequestUnion().(*WatchRequest_CreateRequest); ok {
+		return x.CreateRequest
+	}
+	return nil
+}
+
+func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest {
+	if x, ok := m.GetRequestUnion().(*WatchRequest_CancelRequest); ok {
+		return x.CancelRequest
+	}
+	return nil
+}
+
+func (m *WatchRequest) GetProgressRequest() *WatchProgressRequest {
+	if x, ok := m.GetRequestUnion().(*WatchRequest_ProgressRequest); ok {
+		return x.ProgressRequest
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*WatchRequest) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*WatchRequest_CreateRequest)(nil),
+		(*WatchRequest_CancelRequest)(nil),
+		(*WatchRequest_ProgressRequest)(nil),
+	}
+}
+
+type WatchCreateRequest struct {
+	// key is the key to register for watching.
+	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	// range_end is the end of the range [key, range_end) to watch. If range_end is not given,
+	// only the key argument is watched. If range_end is equal to '\0', all keys greater than
+	// or equal to the key argument are watched.
+	// If the range_end is one bit larger than the given key,
+	// then all keys with the prefix (the given key) will be watched.
+	RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+	// start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
+	StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"`
+	// progress_notify is set so that the etcd server will periodically send a WatchResponse with
+	// no events to the new watcher if there are no recent events. It is useful when clients
+	// wish to recover a disconnected watcher starting from a recent known revision.
+	// The etcd server may decide how often it will send notifications based on current load.
+	ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"`
+	// filters filter the events at server side before it sends back to the watcher.
+	Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,proto3,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"`
+	// If prev_kv is set, created watcher gets the previous KV before the event happens.
+	// If the previous KV is already compacted, nothing will be returned.
+	PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+	// If watch_id is provided and non-zero, it will be assigned to this watcher.
+	// Since creating a watcher in etcd is not a synchronous operation,
+	// this can be used ensure that ordering is correct when creating multiple
+	// watchers on the same stream. Creating a watcher with an ID already in
+	// use on the stream will cause an error to be returned.
+	WatchId int64 `protobuf:"varint,7,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
+	// fragment enables splitting large revisions into multiple watch responses.
+	Fragment             bool     `protobuf:"varint,8,opt,name=fragment,proto3" json:"fragment,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *WatchCreateRequest) Reset()         { *m = WatchCreateRequest{} }
+func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchCreateRequest) ProtoMessage()    {}
+func (*WatchCreateRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{21}
+}
+func (m *WatchCreateRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WatchCreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WatchCreateRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WatchCreateRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WatchCreateRequest.Merge(m, src)
+}
+func (m *WatchCreateRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *WatchCreateRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_WatchCreateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchCreateRequest proto.InternalMessageInfo
+
+func (m *WatchCreateRequest) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *WatchCreateRequest) GetRangeEnd() []byte {
+	if m != nil {
+		return m.RangeEnd
+	}
+	return nil
+}
+
+func (m *WatchCreateRequest) GetStartRevision() int64 {
+	if m != nil {
+		return m.StartRevision
+	}
+	return 0
+}
+
+func (m *WatchCreateRequest) GetProgressNotify() bool {
+	if m != nil {
+		return m.ProgressNotify
+	}
+	return false
+}
+
+func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType {
+	if m != nil {
+		return m.Filters
+	}
+	return nil
+}
+
+func (m *WatchCreateRequest) GetPrevKv() bool {
+	if m != nil {
+		return m.PrevKv
+	}
+	return false
+}
+
+func (m *WatchCreateRequest) GetWatchId() int64 {
+	if m != nil {
+		return m.WatchId
+	}
+	return 0
+}
+
+func (m *WatchCreateRequest) GetFragment() bool {
+	if m != nil {
+		return m.Fragment
+	}
+	return false
+}
+
+type WatchCancelRequest struct {
+	// watch_id is the watcher id to cancel so that no more events are transmitted.
+	WatchId              int64    `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *WatchCancelRequest) Reset()         { *m = WatchCancelRequest{} }
+func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchCancelRequest) ProtoMessage()    {}
+func (*WatchCancelRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{22}
+}
+func (m *WatchCancelRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WatchCancelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WatchCancelRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WatchCancelRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WatchCancelRequest.Merge(m, src)
+}
+func (m *WatchCancelRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *WatchCancelRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_WatchCancelRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchCancelRequest proto.InternalMessageInfo
+
+func (m *WatchCancelRequest) GetWatchId() int64 {
+	if m != nil {
+		return m.WatchId
+	}
+	return 0
+}
+
+// Requests the a watch stream progress status be sent in the watch response stream as soon as
+// possible.
+type WatchProgressRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *WatchProgressRequest) Reset()         { *m = WatchProgressRequest{} }
+func (m *WatchProgressRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchProgressRequest) ProtoMessage()    {}
+func (*WatchProgressRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{23}
+}
+func (m *WatchProgressRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WatchProgressRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WatchProgressRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WatchProgressRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WatchProgressRequest.Merge(m, src)
+}
+func (m *WatchProgressRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *WatchProgressRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_WatchProgressRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchProgressRequest proto.InternalMessageInfo
+
+type WatchResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// watch_id is the ID of the watcher that corresponds to the response.
+	WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
+	// created is set to true if the response is for a create watch request.
+	// The client should record the watch_id and expect to receive events for
+	// the created watcher from the same stream.
+	// All events sent to the created watcher will attach with the same watch_id.
+	Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"`
+	// canceled is set to true if the response is for a cancel watch request
+	// or if the start_revision has already been compacted.
+	// No further events will be sent to the canceled watcher.
+	Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"`
+	// compact_revision is set to the minimum index if a watcher tries to watch
+	// at a compacted index.
+	//
+	// This happens when creating a watcher at a compacted revision or the watcher cannot
+	// catch up with the progress of the key-value store.
+	//
+	// The client should treat the watcher as canceled and should not try to create any
+	// watcher with the same start_revision again.
+	CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
+	// cancel_reason indicates the reason for canceling the watcher.
+	CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"`
+	// framgment is true if large watch response was split over multiple responses.
+	Fragment             bool            `protobuf:"varint,7,opt,name=fragment,proto3" json:"fragment,omitempty"`
+	Events               []*mvccpb.Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *WatchResponse) Reset()         { *m = WatchResponse{} }
+func (m *WatchResponse) String() string { return proto.CompactTextString(m) }
+func (*WatchResponse) ProtoMessage()    {}
+func (*WatchResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{24}
+}
+func (m *WatchResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WatchResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WatchResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WatchResponse.Merge(m, src)
+}
+func (m *WatchResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *WatchResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_WatchResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchResponse proto.InternalMessageInfo
+
+func (m *WatchResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *WatchResponse) GetWatchId() int64 {
+	if m != nil {
+		return m.WatchId
+	}
+	return 0
+}
+
+func (m *WatchResponse) GetCreated() bool {
+	if m != nil {
+		return m.Created
+	}
+	return false
+}
+
+func (m *WatchResponse) GetCanceled() bool {
+	if m != nil {
+		return m.Canceled
+	}
+	return false
+}
+
+func (m *WatchResponse) GetCompactRevision() int64 {
+	if m != nil {
+		return m.CompactRevision
+	}
+	return 0
+}
+
+func (m *WatchResponse) GetCancelReason() string {
+	if m != nil {
+		return m.CancelReason
+	}
+	return ""
+}
+
+func (m *WatchResponse) GetFragment() bool {
+	if m != nil {
+		return m.Fragment
+	}
+	return false
+}
+
+func (m *WatchResponse) GetEvents() []*mvccpb.Event {
+	if m != nil {
+		return m.Events
+	}
+	return nil
+}
+
+type LeaseGrantRequest struct {
+	// TTL is the advisory time-to-live in seconds. Expired lease will return -1.
+	TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"`
+	// ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
+	ID                   int64    `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseGrantRequest) Reset()         { *m = LeaseGrantRequest{} }
+func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseGrantRequest) ProtoMessage()    {}
+func (*LeaseGrantRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{25}
+}
+func (m *LeaseGrantRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseGrantRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseGrantRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseGrantRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseGrantRequest.Merge(m, src)
+}
+func (m *LeaseGrantRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseGrantRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseGrantRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseGrantRequest proto.InternalMessageInfo
+
+func (m *LeaseGrantRequest) GetTTL() int64 {
+	if m != nil {
+		return m.TTL
+	}
+	return 0
+}
+
+func (m *LeaseGrantRequest) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+type LeaseGrantResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// ID is the lease ID for the granted lease.
+	ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+	// TTL is the server chosen lease time-to-live in seconds.
+	TTL                  int64    `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
+	Error                string   `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseGrantResponse) Reset()         { *m = LeaseGrantResponse{} }
+func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseGrantResponse) ProtoMessage()    {}
+func (*LeaseGrantResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{26}
+}
+func (m *LeaseGrantResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseGrantResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseGrantResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseGrantResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseGrantResponse.Merge(m, src)
+}
+func (m *LeaseGrantResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseGrantResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseGrantResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseGrantResponse proto.InternalMessageInfo
+
+func (m *LeaseGrantResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *LeaseGrantResponse) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+func (m *LeaseGrantResponse) GetTTL() int64 {
+	if m != nil {
+		return m.TTL
+	}
+	return 0
+}
+
+func (m *LeaseGrantResponse) GetError() string {
+	if m != nil {
+		return m.Error
+	}
+	return ""
+}
+
+type LeaseRevokeRequest struct {
+	// ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
+	ID                   int64    `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseRevokeRequest) Reset()         { *m = LeaseRevokeRequest{} }
+func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseRevokeRequest) ProtoMessage()    {}
+func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{27}
+}
+func (m *LeaseRevokeRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseRevokeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseRevokeRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseRevokeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseRevokeRequest.Merge(m, src)
+}
+func (m *LeaseRevokeRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseRevokeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseRevokeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseRevokeRequest proto.InternalMessageInfo
+
+func (m *LeaseRevokeRequest) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+type LeaseRevokeResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *LeaseRevokeResponse) Reset()         { *m = LeaseRevokeResponse{} }
+func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseRevokeResponse) ProtoMessage()    {}
+func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{28}
+}
+func (m *LeaseRevokeResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseRevokeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseRevokeResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseRevokeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseRevokeResponse.Merge(m, src)
+}
+func (m *LeaseRevokeResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseRevokeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseRevokeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseRevokeResponse proto.InternalMessageInfo
+
+func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type LeaseCheckpoint struct {
+	// ID is the lease ID to checkpoint.
+	ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	// Remaining_TTL is the remaining time until expiry of the lease.
+	Remaining_TTL        int64    `protobuf:"varint,2,opt,name=remaining_TTL,json=remainingTTL,proto3" json:"remaining_TTL,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseCheckpoint) Reset()         { *m = LeaseCheckpoint{} }
+func (m *LeaseCheckpoint) String() string { return proto.CompactTextString(m) }
+func (*LeaseCheckpoint) ProtoMessage()    {}
+func (*LeaseCheckpoint) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{29}
+}
+func (m *LeaseCheckpoint) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseCheckpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseCheckpoint.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseCheckpoint) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseCheckpoint.Merge(m, src)
+}
+func (m *LeaseCheckpoint) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseCheckpoint) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseCheckpoint.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseCheckpoint proto.InternalMessageInfo
+
+func (m *LeaseCheckpoint) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+func (m *LeaseCheckpoint) GetRemaining_TTL() int64 {
+	if m != nil {
+		return m.Remaining_TTL
+	}
+	return 0
+}
+
+type LeaseCheckpointRequest struct {
+	Checkpoints          []*LeaseCheckpoint `protobuf:"bytes,1,rep,name=checkpoints,proto3" json:"checkpoints,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
+	XXX_unrecognized     []byte             `json:"-"`
+	XXX_sizecache        int32              `json:"-"`
+}
+
+func (m *LeaseCheckpointRequest) Reset()         { *m = LeaseCheckpointRequest{} }
+func (m *LeaseCheckpointRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseCheckpointRequest) ProtoMessage()    {}
+func (*LeaseCheckpointRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{30}
+}
+func (m *LeaseCheckpointRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseCheckpointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseCheckpointRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseCheckpointRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseCheckpointRequest.Merge(m, src)
+}
+func (m *LeaseCheckpointRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseCheckpointRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseCheckpointRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseCheckpointRequest proto.InternalMessageInfo
+
+func (m *LeaseCheckpointRequest) GetCheckpoints() []*LeaseCheckpoint {
+	if m != nil {
+		return m.Checkpoints
+	}
+	return nil
+}
+
+type LeaseCheckpointResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *LeaseCheckpointResponse) Reset()         { *m = LeaseCheckpointResponse{} }
+func (m *LeaseCheckpointResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseCheckpointResponse) ProtoMessage()    {}
+func (*LeaseCheckpointResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{31}
+}
+func (m *LeaseCheckpointResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseCheckpointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseCheckpointResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseCheckpointResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseCheckpointResponse.Merge(m, src)
+}
+func (m *LeaseCheckpointResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseCheckpointResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseCheckpointResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseCheckpointResponse proto.InternalMessageInfo
+
+func (m *LeaseCheckpointResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type LeaseKeepAliveRequest struct {
+	// ID is the lease ID for the lease to keep alive.
+	ID                   int64    `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseKeepAliveRequest) Reset()         { *m = LeaseKeepAliveRequest{} }
+func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseKeepAliveRequest) ProtoMessage()    {}
+func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{32}
+}
+func (m *LeaseKeepAliveRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseKeepAliveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseKeepAliveRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseKeepAliveRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseKeepAliveRequest.Merge(m, src)
+}
+func (m *LeaseKeepAliveRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseKeepAliveRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseKeepAliveRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseKeepAliveRequest proto.InternalMessageInfo
+
+func (m *LeaseKeepAliveRequest) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+type LeaseKeepAliveResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// ID is the lease ID from the keep alive request.
+	ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+	// TTL is the new time-to-live for the lease.
+	TTL                  int64    `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseKeepAliveResponse) Reset()         { *m = LeaseKeepAliveResponse{} }
+func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseKeepAliveResponse) ProtoMessage()    {}
+func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{33}
+}
+func (m *LeaseKeepAliveResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseKeepAliveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseKeepAliveResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseKeepAliveResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseKeepAliveResponse.Merge(m, src)
+}
+func (m *LeaseKeepAliveResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseKeepAliveResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseKeepAliveResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseKeepAliveResponse proto.InternalMessageInfo
+
+func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *LeaseKeepAliveResponse) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+func (m *LeaseKeepAliveResponse) GetTTL() int64 {
+	if m != nil {
+		return m.TTL
+	}
+	return 0
+}
+
+type LeaseTimeToLiveRequest struct {
+	// ID is the lease ID for the lease.
+	ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	// keys is true to query all the keys attached to this lease.
+	Keys                 bool     `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseTimeToLiveRequest) Reset()         { *m = LeaseTimeToLiveRequest{} }
+func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseTimeToLiveRequest) ProtoMessage()    {}
+func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{34}
+}
+func (m *LeaseTimeToLiveRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseTimeToLiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseTimeToLiveRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseTimeToLiveRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseTimeToLiveRequest.Merge(m, src)
+}
+func (m *LeaseTimeToLiveRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseTimeToLiveRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseTimeToLiveRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseTimeToLiveRequest proto.InternalMessageInfo
+
+func (m *LeaseTimeToLiveRequest) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+func (m *LeaseTimeToLiveRequest) GetKeys() bool {
+	if m != nil {
+		return m.Keys
+	}
+	return false
+}
+
+type LeaseTimeToLiveResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// ID is the lease ID from the keep alive request.
+	ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+	// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
+	TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
+	// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
+	GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"`
+	// Keys is the list of keys attached to this lease.
+	Keys                 [][]byte `protobuf:"bytes,5,rep,name=keys,proto3" json:"keys,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseTimeToLiveResponse) Reset()         { *m = LeaseTimeToLiveResponse{} }
+func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseTimeToLiveResponse) ProtoMessage()    {}
+func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{35}
+}
+func (m *LeaseTimeToLiveResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseTimeToLiveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseTimeToLiveResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseTimeToLiveResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseTimeToLiveResponse.Merge(m, src)
+}
+func (m *LeaseTimeToLiveResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseTimeToLiveResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseTimeToLiveResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseTimeToLiveResponse proto.InternalMessageInfo
+
+func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *LeaseTimeToLiveResponse) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+func (m *LeaseTimeToLiveResponse) GetTTL() int64 {
+	if m != nil {
+		return m.TTL
+	}
+	return 0
+}
+
+func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 {
+	if m != nil {
+		return m.GrantedTTL
+	}
+	return 0
+}
+
+func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte {
+	if m != nil {
+		return m.Keys
+	}
+	return nil
+}
+
+type LeaseLeasesRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseLeasesRequest) Reset()         { *m = LeaseLeasesRequest{} }
+func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseLeasesRequest) ProtoMessage()    {}
+func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{36}
+}
+func (m *LeaseLeasesRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseLeasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseLeasesRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseLeasesRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseLeasesRequest.Merge(m, src)
+}
+func (m *LeaseLeasesRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseLeasesRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseLeasesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseLeasesRequest proto.InternalMessageInfo
+
+type LeaseStatus struct {
+	ID                   int64    `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseStatus) Reset()         { *m = LeaseStatus{} }
+func (m *LeaseStatus) String() string { return proto.CompactTextString(m) }
+func (*LeaseStatus) ProtoMessage()    {}
+func (*LeaseStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{37}
+}
+func (m *LeaseStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseStatus.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseStatus.Merge(m, src)
+}
+func (m *LeaseStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseStatus proto.InternalMessageInfo
+
+func (m *LeaseStatus) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+type LeaseLeasesResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	Leases               []*LeaseStatus  `protobuf:"bytes,2,rep,name=leases,proto3" json:"leases,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *LeaseLeasesResponse) Reset()         { *m = LeaseLeasesResponse{} }
+func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseLeasesResponse) ProtoMessage()    {}
+func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{38}
+}
+func (m *LeaseLeasesResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseLeasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseLeasesResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseLeasesResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseLeasesResponse.Merge(m, src)
+}
+func (m *LeaseLeasesResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseLeasesResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseLeasesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseLeasesResponse proto.InternalMessageInfo
+
+func (m *LeaseLeasesResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *LeaseLeasesResponse) GetLeases() []*LeaseStatus {
+	if m != nil {
+		return m.Leases
+	}
+	return nil
+}
+
+type Member struct {
+	// ID is the member ID for this member.
+	ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	// name is the human-readable name of the member. If the member is not started, the name will be an empty string.
+	Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+	// peerURLs is the list of URLs the member exposes to the cluster for communication.
+	PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
+	// clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
+	ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs,proto3" json:"clientURLs,omitempty"`
+	// isLearner indicates if the member is raft learner.
+	IsLearner            bool     `protobuf:"varint,5,opt,name=isLearner,proto3" json:"isLearner,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Member) Reset()         { *m = Member{} }
+func (m *Member) String() string { return proto.CompactTextString(m) }
+func (*Member) ProtoMessage()    {}
+func (*Member) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{39}
+}
+func (m *Member) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Member.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Member) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Member.Merge(m, src)
+}
+func (m *Member) XXX_Size() int {
+	return m.Size()
+}
+func (m *Member) XXX_DiscardUnknown() {
+	xxx_messageInfo_Member.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Member proto.InternalMessageInfo
+
+func (m *Member) GetID() uint64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+func (m *Member) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *Member) GetPeerURLs() []string {
+	if m != nil {
+		return m.PeerURLs
+	}
+	return nil
+}
+
+func (m *Member) GetClientURLs() []string {
+	if m != nil {
+		return m.ClientURLs
+	}
+	return nil
+}
+
+func (m *Member) GetIsLearner() bool {
+	if m != nil {
+		return m.IsLearner
+	}
+	return false
+}
+
+type MemberAddRequest struct {
+	// peerURLs is the list of URLs the added member will use to communicate with the cluster.
+	PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
+	// isLearner indicates if the added member is raft learner.
+	IsLearner            bool     `protobuf:"varint,2,opt,name=isLearner,proto3" json:"isLearner,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MemberAddRequest) Reset()         { *m = MemberAddRequest{} }
+func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberAddRequest) ProtoMessage()    {}
+func (*MemberAddRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{40}
+}
+func (m *MemberAddRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberAddRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberAddRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberAddRequest.Merge(m, src)
+}
+func (m *MemberAddRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberAddRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberAddRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberAddRequest proto.InternalMessageInfo
+
+func (m *MemberAddRequest) GetPeerURLs() []string {
+	if m != nil {
+		return m.PeerURLs
+	}
+	return nil
+}
+
+func (m *MemberAddRequest) GetIsLearner() bool {
+	if m != nil {
+		return m.IsLearner
+	}
+	return false
+}
+
+type MemberAddResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// member is the member information for the added member.
+	Member *Member `protobuf:"bytes,2,opt,name=member,proto3" json:"member,omitempty"`
+	// members is a list of all members after adding the new member.
+	Members              []*Member `protobuf:"bytes,3,rep,name=members,proto3" json:"members,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *MemberAddResponse) Reset()         { *m = MemberAddResponse{} }
+func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberAddResponse) ProtoMessage()    {}
+func (*MemberAddResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{41}
+}
+func (m *MemberAddResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberAddResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberAddResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberAddResponse.Merge(m, src)
+}
+func (m *MemberAddResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberAddResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberAddResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberAddResponse proto.InternalMessageInfo
+
+func (m *MemberAddResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *MemberAddResponse) GetMember() *Member {
+	if m != nil {
+		return m.Member
+	}
+	return nil
+}
+
+func (m *MemberAddResponse) GetMembers() []*Member {
+	if m != nil {
+		return m.Members
+	}
+	return nil
+}
+
+type MemberRemoveRequest struct {
+	// ID is the member ID of the member to remove.
+	ID                   uint64   `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MemberRemoveRequest) Reset()         { *m = MemberRemoveRequest{} }
+func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberRemoveRequest) ProtoMessage()    {}
+func (*MemberRemoveRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{42}
+}
+func (m *MemberRemoveRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberRemoveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberRemoveRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberRemoveRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberRemoveRequest.Merge(m, src)
+}
+func (m *MemberRemoveRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberRemoveRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberRemoveRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberRemoveRequest proto.InternalMessageInfo
+
+func (m *MemberRemoveRequest) GetID() uint64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+type MemberRemoveResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// members is a list of all members after removing the member.
+	Members              []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *MemberRemoveResponse) Reset()         { *m = MemberRemoveResponse{} }
+func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberRemoveResponse) ProtoMessage()    {}
+func (*MemberRemoveResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{43}
+}
+func (m *MemberRemoveResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberRemoveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberRemoveResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberRemoveResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberRemoveResponse.Merge(m, src)
+}
+func (m *MemberRemoveResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberRemoveResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberRemoveResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberRemoveResponse proto.InternalMessageInfo
+
+func (m *MemberRemoveResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *MemberRemoveResponse) GetMembers() []*Member {
+	if m != nil {
+		return m.Members
+	}
+	return nil
+}
+
+type MemberUpdateRequest struct {
+	// ID is the member ID of the member to update.
+	ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	// peerURLs is the new list of URLs the member will use to communicate with the cluster.
+	PeerURLs             []string `protobuf:"bytes,2,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MemberUpdateRequest) Reset()         { *m = MemberUpdateRequest{} }
+func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberUpdateRequest) ProtoMessage()    {}
+func (*MemberUpdateRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{44}
+}
+func (m *MemberUpdateRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberUpdateRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberUpdateRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberUpdateRequest.Merge(m, src)
+}
+func (m *MemberUpdateRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberUpdateRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberUpdateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberUpdateRequest proto.InternalMessageInfo
+
+func (m *MemberUpdateRequest) GetID() uint64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+func (m *MemberUpdateRequest) GetPeerURLs() []string {
+	if m != nil {
+		return m.PeerURLs
+	}
+	return nil
+}
+
+type MemberUpdateResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// members is a list of all members after updating the member.
+	Members              []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *MemberUpdateResponse) Reset()         { *m = MemberUpdateResponse{} }
+func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberUpdateResponse) ProtoMessage()    {}
+func (*MemberUpdateResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{45}
+}
+func (m *MemberUpdateResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberUpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberUpdateResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberUpdateResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberUpdateResponse.Merge(m, src)
+}
+func (m *MemberUpdateResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberUpdateResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberUpdateResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberUpdateResponse proto.InternalMessageInfo
+
+func (m *MemberUpdateResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *MemberUpdateResponse) GetMembers() []*Member {
+	if m != nil {
+		return m.Members
+	}
+	return nil
+}
+
+type MemberListRequest struct {
+	Linearizable         bool     `protobuf:"varint,1,opt,name=linearizable,proto3" json:"linearizable,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MemberListRequest) Reset()         { *m = MemberListRequest{} }
+func (m *MemberListRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberListRequest) ProtoMessage()    {}
+func (*MemberListRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{46}
+}
+func (m *MemberListRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberListRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberListRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberListRequest.Merge(m, src)
+}
+func (m *MemberListRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberListRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberListRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberListRequest proto.InternalMessageInfo
+
+func (m *MemberListRequest) GetLinearizable() bool {
+	if m != nil {
+		return m.Linearizable
+	}
+	return false
+}
+
+type MemberListResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// members is a list of all members associated with the cluster.
+	Members              []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *MemberListResponse) Reset()         { *m = MemberListResponse{} }
+func (m *MemberListResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberListResponse) ProtoMessage()    {}
+func (*MemberListResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{47}
+}
+func (m *MemberListResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberListResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberListResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberListResponse.Merge(m, src)
+}
+func (m *MemberListResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberListResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberListResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberListResponse proto.InternalMessageInfo
+
+func (m *MemberListResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *MemberListResponse) GetMembers() []*Member {
+	if m != nil {
+		return m.Members
+	}
+	return nil
+}
+
+type MemberPromoteRequest struct {
+	// ID is the member ID of the member to promote.
+	ID                   uint64   `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MemberPromoteRequest) Reset()         { *m = MemberPromoteRequest{} }
+func (m *MemberPromoteRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberPromoteRequest) ProtoMessage()    {}
+func (*MemberPromoteRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{48}
+}
+func (m *MemberPromoteRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberPromoteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberPromoteRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberPromoteRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberPromoteRequest.Merge(m, src)
+}
+func (m *MemberPromoteRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberPromoteRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberPromoteRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberPromoteRequest proto.InternalMessageInfo
+
+func (m *MemberPromoteRequest) GetID() uint64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+type MemberPromoteResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// members is a list of all members after promoting the member.
+	Members              []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *MemberPromoteResponse) Reset()         { *m = MemberPromoteResponse{} }
+func (m *MemberPromoteResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberPromoteResponse) ProtoMessage()    {}
+func (*MemberPromoteResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{49}
+}
+func (m *MemberPromoteResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberPromoteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberPromoteResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberPromoteResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberPromoteResponse.Merge(m, src)
+}
+func (m *MemberPromoteResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberPromoteResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberPromoteResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberPromoteResponse proto.InternalMessageInfo
+
+func (m *MemberPromoteResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *MemberPromoteResponse) GetMembers() []*Member {
+	if m != nil {
+		return m.Members
+	}
+	return nil
+}
+
+type DefragmentRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DefragmentRequest) Reset()         { *m = DefragmentRequest{} }
+func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) }
+func (*DefragmentRequest) ProtoMessage()    {}
+func (*DefragmentRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{50}
+}
+func (m *DefragmentRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DefragmentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DefragmentRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DefragmentRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DefragmentRequest.Merge(m, src)
+}
+func (m *DefragmentRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *DefragmentRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DefragmentRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DefragmentRequest proto.InternalMessageInfo
+
+type DefragmentResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *DefragmentResponse) Reset()         { *m = DefragmentResponse{} }
+func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) }
+func (*DefragmentResponse) ProtoMessage()    {}
+func (*DefragmentResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{51}
+}
+func (m *DefragmentResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DefragmentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DefragmentResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DefragmentResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DefragmentResponse.Merge(m, src)
+}
+func (m *DefragmentResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *DefragmentResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_DefragmentResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DefragmentResponse proto.InternalMessageInfo
+
+func (m *DefragmentResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type MoveLeaderRequest struct {
+	// targetID is the node ID for the new leader.
+	TargetID             uint64   `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MoveLeaderRequest) Reset()         { *m = MoveLeaderRequest{} }
+func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) }
+func (*MoveLeaderRequest) ProtoMessage()    {}
+func (*MoveLeaderRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{52}
+}
+func (m *MoveLeaderRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MoveLeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MoveLeaderRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MoveLeaderRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MoveLeaderRequest.Merge(m, src)
+}
+func (m *MoveLeaderRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *MoveLeaderRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_MoveLeaderRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MoveLeaderRequest proto.InternalMessageInfo
+
+func (m *MoveLeaderRequest) GetTargetID() uint64 {
+	if m != nil {
+		return m.TargetID
+	}
+	return 0
+}
+
+type MoveLeaderResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *MoveLeaderResponse) Reset()         { *m = MoveLeaderResponse{} }
+func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) }
+func (*MoveLeaderResponse) ProtoMessage()    {}
+func (*MoveLeaderResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{53}
+}
+func (m *MoveLeaderResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MoveLeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MoveLeaderResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MoveLeaderResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MoveLeaderResponse.Merge(m, src)
+}
+func (m *MoveLeaderResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *MoveLeaderResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_MoveLeaderResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MoveLeaderResponse proto.InternalMessageInfo
+
+func (m *MoveLeaderResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AlarmRequest struct {
+	// action is the kind of alarm request to issue. The action
+	// may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
+	// raised alarm.
+	Action AlarmRequest_AlarmAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.AlarmRequest_AlarmAction" json:"action,omitempty"`
+	// memberID is the ID of the member associated with the alarm. If memberID is 0, the
+	// alarm request covers all members.
+	MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"`
+	// alarm is the type of alarm to consider for this request.
+	Alarm                AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *AlarmRequest) Reset()         { *m = AlarmRequest{} }
+func (m *AlarmRequest) String() string { return proto.CompactTextString(m) }
+func (*AlarmRequest) ProtoMessage()    {}
+func (*AlarmRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{54}
+}
+func (m *AlarmRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AlarmRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AlarmRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AlarmRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmRequest.Merge(m, src)
+}
+func (m *AlarmRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AlarmRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmRequest proto.InternalMessageInfo
+
+func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction {
+	if m != nil {
+		return m.Action
+	}
+	return AlarmRequest_GET
+}
+
+func (m *AlarmRequest) GetMemberID() uint64 {
+	if m != nil {
+		return m.MemberID
+	}
+	return 0
+}
+
+func (m *AlarmRequest) GetAlarm() AlarmType {
+	if m != nil {
+		return m.Alarm
+	}
+	return AlarmType_NONE
+}
+
+type AlarmMember struct {
+	// memberID is the ID of the member associated with the raised alarm.
+	MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"`
+	// alarm is the type of alarm which has been raised.
+	Alarm                AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *AlarmMember) Reset()         { *m = AlarmMember{} }
+func (m *AlarmMember) String() string { return proto.CompactTextString(m) }
+func (*AlarmMember) ProtoMessage()    {}
+func (*AlarmMember) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{55}
+}
+func (m *AlarmMember) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AlarmMember) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AlarmMember.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AlarmMember) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmMember.Merge(m, src)
+}
+func (m *AlarmMember) XXX_Size() int {
+	return m.Size()
+}
+func (m *AlarmMember) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmMember.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmMember proto.InternalMessageInfo
+
+func (m *AlarmMember) GetMemberID() uint64 {
+	if m != nil {
+		return m.MemberID
+	}
+	return 0
+}
+
+func (m *AlarmMember) GetAlarm() AlarmType {
+	if m != nil {
+		return m.Alarm
+	}
+	return AlarmType_NONE
+}
+
+type AlarmResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// alarms is a list of alarms associated with the alarm request.
+	Alarms               []*AlarmMember `protobuf:"bytes,2,rep,name=alarms,proto3" json:"alarms,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
+}
+
+func (m *AlarmResponse) Reset()         { *m = AlarmResponse{} }
+func (m *AlarmResponse) String() string { return proto.CompactTextString(m) }
+func (*AlarmResponse) ProtoMessage()    {}
+func (*AlarmResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{56}
+}
+func (m *AlarmResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AlarmResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AlarmResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AlarmResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmResponse.Merge(m, src)
+}
+func (m *AlarmResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AlarmResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmResponse proto.InternalMessageInfo
+
+func (m *AlarmResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AlarmResponse) GetAlarms() []*AlarmMember {
+	if m != nil {
+		return m.Alarms
+	}
+	return nil
+}
+
+type DowngradeRequest struct {
+	// action is the kind of downgrade request to issue. The action may
+	// VALIDATE the target version, DOWNGRADE the cluster version,
+	// or CANCEL the current downgrading job.
+	Action DowngradeRequest_DowngradeAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.DowngradeRequest_DowngradeAction" json:"action,omitempty"`
+	// version is the target version to downgrade.
+	Version              string   `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DowngradeRequest) Reset()         { *m = DowngradeRequest{} }
+func (m *DowngradeRequest) String() string { return proto.CompactTextString(m) }
+func (*DowngradeRequest) ProtoMessage()    {}
+func (*DowngradeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{57}
+}
+func (m *DowngradeRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DowngradeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DowngradeRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DowngradeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DowngradeRequest.Merge(m, src)
+}
+func (m *DowngradeRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *DowngradeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DowngradeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DowngradeRequest proto.InternalMessageInfo
+
+func (m *DowngradeRequest) GetAction() DowngradeRequest_DowngradeAction {
+	if m != nil {
+		return m.Action
+	}
+	return DowngradeRequest_VALIDATE
+}
+
+func (m *DowngradeRequest) GetVersion() string {
+	if m != nil {
+		return m.Version
+	}
+	return ""
+}
+
+type DowngradeResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// version is the current cluster version.
+	Version              string   `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DowngradeResponse) Reset()         { *m = DowngradeResponse{} }
+func (m *DowngradeResponse) String() string { return proto.CompactTextString(m) }
+func (*DowngradeResponse) ProtoMessage()    {}
+func (*DowngradeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{58}
+}
+func (m *DowngradeResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DowngradeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DowngradeResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DowngradeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DowngradeResponse.Merge(m, src)
+}
+func (m *DowngradeResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *DowngradeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_DowngradeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DowngradeResponse proto.InternalMessageInfo
+
+func (m *DowngradeResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *DowngradeResponse) GetVersion() string {
+	if m != nil {
+		return m.Version
+	}
+	return ""
+}
+
+// DowngradeVersionTestRequest is used for test only. The version in
+// this request will be read as the WAL record version.If the downgrade
+// target version is less than this version, then the downgrade(online)
+// or migration(offline) isn't safe, so shouldn't be allowed.
+type DowngradeVersionTestRequest struct {
+	Ver                  string   `protobuf:"bytes,1,opt,name=ver,proto3" json:"ver,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DowngradeVersionTestRequest) Reset()         { *m = DowngradeVersionTestRequest{} }
+func (m *DowngradeVersionTestRequest) String() string { return proto.CompactTextString(m) }
+func (*DowngradeVersionTestRequest) ProtoMessage()    {}
+func (*DowngradeVersionTestRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{59}
+}
+func (m *DowngradeVersionTestRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DowngradeVersionTestRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DowngradeVersionTestRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DowngradeVersionTestRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DowngradeVersionTestRequest.Merge(m, src)
+}
+func (m *DowngradeVersionTestRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *DowngradeVersionTestRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DowngradeVersionTestRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DowngradeVersionTestRequest proto.InternalMessageInfo
+
+func (m *DowngradeVersionTestRequest) GetVer() string {
+	if m != nil {
+		return m.Ver
+	}
+	return ""
+}
+
+type StatusRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *StatusRequest) Reset()         { *m = StatusRequest{} }
+func (m *StatusRequest) String() string { return proto.CompactTextString(m) }
+func (*StatusRequest) ProtoMessage()    {}
+func (*StatusRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{60}
+}
+func (m *StatusRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *StatusRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StatusRequest.Merge(m, src)
+}
+func (m *StatusRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *StatusRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_StatusRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatusRequest proto.InternalMessageInfo
+
+type StatusResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// version is the cluster protocol version used by the responding member.
+	Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+	// dbSize is the size of the backend database physically allocated, in bytes, of the responding member.
+	DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"`
+	// leader is the member ID which the responding member believes is the current leader.
+	Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"`
+	// raftIndex is the current raft committed index of the responding member.
+	RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"`
+	// raftTerm is the current raft term of the responding member.
+	RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"`
+	// raftAppliedIndex is the current raft applied index of the responding member.
+	RaftAppliedIndex uint64 `protobuf:"varint,7,opt,name=raftAppliedIndex,proto3" json:"raftAppliedIndex,omitempty"`
+	// errors contains alarm/health information and status.
+	Errors []string `protobuf:"bytes,8,rep,name=errors,proto3" json:"errors,omitempty"`
+	// dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member.
+	DbSizeInUse int64 `protobuf:"varint,9,opt,name=dbSizeInUse,proto3" json:"dbSizeInUse,omitempty"`
+	// isLearner indicates if the member is raft learner.
+	IsLearner bool `protobuf:"varint,10,opt,name=isLearner,proto3" json:"isLearner,omitempty"`
+	// storageVersion is the version of the db file. It might be updated with delay in relationship to the target cluster version.
+	StorageVersion string `protobuf:"bytes,11,opt,name=storageVersion,proto3" json:"storageVersion,omitempty"`
+	// dbSizeQuota is the configured etcd storage quota in bytes (the value passed to etcd instance by flag --quota-backend-bytes)
+	DbSizeQuota int64 `protobuf:"varint,12,opt,name=dbSizeQuota,proto3" json:"dbSizeQuota,omitempty"`
+	// downgradeInfo indicates if there is downgrade process.
+	DowngradeInfo        *DowngradeInfo `protobuf:"bytes,13,opt,name=downgradeInfo,proto3" json:"downgradeInfo,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
+}
+
+func (m *StatusResponse) Reset()         { *m = StatusResponse{} }
+func (m *StatusResponse) String() string { return proto.CompactTextString(m) }
+func (*StatusResponse) ProtoMessage()    {}
+func (*StatusResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{61}
+}
+func (m *StatusResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *StatusResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StatusResponse.Merge(m, src)
+}
+func (m *StatusResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *StatusResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_StatusResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatusResponse proto.InternalMessageInfo
+
+func (m *StatusResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *StatusResponse) GetVersion() string {
+	if m != nil {
+		return m.Version
+	}
+	return ""
+}
+
+func (m *StatusResponse) GetDbSize() int64 {
+	if m != nil {
+		return m.DbSize
+	}
+	return 0
+}
+
+func (m *StatusResponse) GetLeader() uint64 {
+	if m != nil {
+		return m.Leader
+	}
+	return 0
+}
+
+func (m *StatusResponse) GetRaftIndex() uint64 {
+	if m != nil {
+		return m.RaftIndex
+	}
+	return 0
+}
+
+func (m *StatusResponse) GetRaftTerm() uint64 {
+	if m != nil {
+		return m.RaftTerm
+	}
+	return 0
+}
+
+func (m *StatusResponse) GetRaftAppliedIndex() uint64 {
+	if m != nil {
+		return m.RaftAppliedIndex
+	}
+	return 0
+}
+
+func (m *StatusResponse) GetErrors() []string {
+	if m != nil {
+		return m.Errors
+	}
+	return nil
+}
+
+func (m *StatusResponse) GetDbSizeInUse() int64 {
+	if m != nil {
+		return m.DbSizeInUse
+	}
+	return 0
+}
+
+func (m *StatusResponse) GetIsLearner() bool {
+	if m != nil {
+		return m.IsLearner
+	}
+	return false
+}
+
+func (m *StatusResponse) GetStorageVersion() string {
+	if m != nil {
+		return m.StorageVersion
+	}
+	return ""
+}
+
+func (m *StatusResponse) GetDbSizeQuota() int64 {
+	if m != nil {
+		return m.DbSizeQuota
+	}
+	return 0
+}
+
+func (m *StatusResponse) GetDowngradeInfo() *DowngradeInfo {
+	if m != nil {
+		return m.DowngradeInfo
+	}
+	return nil
+}
+
+type DowngradeInfo struct {
+	// enabled indicates whether the cluster is enabled to downgrade.
+	Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+	// targetVersion is the target downgrade version.
+	TargetVersion        string   `protobuf:"bytes,2,opt,name=targetVersion,proto3" json:"targetVersion,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DowngradeInfo) Reset()         { *m = DowngradeInfo{} }
+func (m *DowngradeInfo) String() string { return proto.CompactTextString(m) }
+func (*DowngradeInfo) ProtoMessage()    {}
+func (*DowngradeInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{62}
+}
+func (m *DowngradeInfo) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DowngradeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DowngradeInfo.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DowngradeInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DowngradeInfo.Merge(m, src)
+}
+func (m *DowngradeInfo) XXX_Size() int {
+	return m.Size()
+}
+func (m *DowngradeInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_DowngradeInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DowngradeInfo proto.InternalMessageInfo
+
+func (m *DowngradeInfo) GetEnabled() bool {
+	if m != nil {
+		return m.Enabled
+	}
+	return false
+}
+
+func (m *DowngradeInfo) GetTargetVersion() string {
+	if m != nil {
+		return m.TargetVersion
+	}
+	return ""
+}
+
+type AuthEnableRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthEnableRequest) Reset()         { *m = AuthEnableRequest{} }
+func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthEnableRequest) ProtoMessage()    {}
+func (*AuthEnableRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{63}
+}
+func (m *AuthEnableRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthEnableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthEnableRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthEnableRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthEnableRequest.Merge(m, src)
+}
+func (m *AuthEnableRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthEnableRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthEnableRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthEnableRequest proto.InternalMessageInfo
+
+type AuthDisableRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthDisableRequest) Reset()         { *m = AuthDisableRequest{} }
+func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthDisableRequest) ProtoMessage()    {}
+func (*AuthDisableRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{64}
+}
+func (m *AuthDisableRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthDisableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthDisableRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthDisableRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthDisableRequest.Merge(m, src)
+}
+func (m *AuthDisableRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthDisableRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthDisableRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthDisableRequest proto.InternalMessageInfo
+
+type AuthStatusRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthStatusRequest) Reset()         { *m = AuthStatusRequest{} }
+func (m *AuthStatusRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthStatusRequest) ProtoMessage()    {}
+func (*AuthStatusRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{65}
+}
+func (m *AuthStatusRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthStatusRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthStatusRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthStatusRequest.Merge(m, src)
+}
+func (m *AuthStatusRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthStatusRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthStatusRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthStatusRequest proto.InternalMessageInfo
+
+type AuthenticateRequest struct {
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Password             string   `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthenticateRequest) Reset()         { *m = AuthenticateRequest{} }
+func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthenticateRequest) ProtoMessage()    {}
+func (*AuthenticateRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{66}
+}
+func (m *AuthenticateRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthenticateRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthenticateRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthenticateRequest.Merge(m, src)
+}
+func (m *AuthenticateRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthenticateRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthenticateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthenticateRequest proto.InternalMessageInfo
+
+func (m *AuthenticateRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *AuthenticateRequest) GetPassword() string {
+	if m != nil {
+		return m.Password
+	}
+	return ""
+}
+
+type AuthUserAddRequest struct {
+	Name                 string                 `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Password             string                 `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+	Options              *authpb.UserAddOptions `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"`
+	HashedPassword       string                 `protobuf:"bytes,4,opt,name=hashedPassword,proto3" json:"hashedPassword,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
+	XXX_unrecognized     []byte                 `json:"-"`
+	XXX_sizecache        int32                  `json:"-"`
+}
+
+func (m *AuthUserAddRequest) Reset()         { *m = AuthUserAddRequest{} }
+func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserAddRequest) ProtoMessage()    {}
+func (*AuthUserAddRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{67}
+}
+func (m *AuthUserAddRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserAddRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserAddRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserAddRequest.Merge(m, src)
+}
+func (m *AuthUserAddRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserAddRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserAddRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserAddRequest proto.InternalMessageInfo
+
+func (m *AuthUserAddRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *AuthUserAddRequest) GetPassword() string {
+	if m != nil {
+		return m.Password
+	}
+	return ""
+}
+
+func (m *AuthUserAddRequest) GetOptions() *authpb.UserAddOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *AuthUserAddRequest) GetHashedPassword() string {
+	if m != nil {
+		return m.HashedPassword
+	}
+	return ""
+}
+
+type AuthUserGetRequest struct {
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthUserGetRequest) Reset()         { *m = AuthUserGetRequest{} }
+func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGetRequest) ProtoMessage()    {}
+func (*AuthUserGetRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{68}
+}
+func (m *AuthUserGetRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserGetRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserGetRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserGetRequest.Merge(m, src)
+}
+func (m *AuthUserGetRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserGetRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserGetRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserGetRequest proto.InternalMessageInfo
+
+func (m *AuthUserGetRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+type AuthUserDeleteRequest struct {
+	// name is the name of the user to delete.
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthUserDeleteRequest) Reset()         { *m = AuthUserDeleteRequest{} }
+func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserDeleteRequest) ProtoMessage()    {}
+func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{69}
+}
+func (m *AuthUserDeleteRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserDeleteRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserDeleteRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserDeleteRequest.Merge(m, src)
+}
+func (m *AuthUserDeleteRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserDeleteRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserDeleteRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserDeleteRequest proto.InternalMessageInfo
+
+func (m *AuthUserDeleteRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+type AuthUserChangePasswordRequest struct {
+	// name is the name of the user whose password is being changed.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// password is the new password for the user. Note that this field will be removed in the API layer.
+	Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+	// hashedPassword is the new password for the user. Note that this field will be initialized in the API layer.
+	HashedPassword       string   `protobuf:"bytes,3,opt,name=hashedPassword,proto3" json:"hashedPassword,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthUserChangePasswordRequest) Reset()         { *m = AuthUserChangePasswordRequest{} }
+func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserChangePasswordRequest) ProtoMessage()    {}
+func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{70}
+}
+func (m *AuthUserChangePasswordRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserChangePasswordRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserChangePasswordRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserChangePasswordRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserChangePasswordRequest.Merge(m, src)
+}
+func (m *AuthUserChangePasswordRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserChangePasswordRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserChangePasswordRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserChangePasswordRequest proto.InternalMessageInfo
+
+func (m *AuthUserChangePasswordRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *AuthUserChangePasswordRequest) GetPassword() string {
+	if m != nil {
+		return m.Password
+	}
+	return ""
+}
+
+func (m *AuthUserChangePasswordRequest) GetHashedPassword() string {
+	if m != nil {
+		return m.HashedPassword
+	}
+	return ""
+}
+
+type AuthUserGrantRoleRequest struct {
+	// user is the name of the user which should be granted a given role.
+	User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
+	// role is the name of the role to grant to the user.
+	Role                 string   `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthUserGrantRoleRequest) Reset()         { *m = AuthUserGrantRoleRequest{} }
+func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGrantRoleRequest) ProtoMessage()    {}
+func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{71}
+}
+func (m *AuthUserGrantRoleRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserGrantRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserGrantRoleRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserGrantRoleRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserGrantRoleRequest.Merge(m, src)
+}
+func (m *AuthUserGrantRoleRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserGrantRoleRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserGrantRoleRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserGrantRoleRequest proto.InternalMessageInfo
+
+func (m *AuthUserGrantRoleRequest) GetUser() string {
+	if m != nil {
+		return m.User
+	}
+	return ""
+}
+
+func (m *AuthUserGrantRoleRequest) GetRole() string {
+	if m != nil {
+		return m.Role
+	}
+	return ""
+}
+
+type AuthUserRevokeRoleRequest struct {
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Role                 string   `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthUserRevokeRoleRequest) Reset()         { *m = AuthUserRevokeRoleRequest{} }
+func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserRevokeRoleRequest) ProtoMessage()    {}
+func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{72}
+}
+func (m *AuthUserRevokeRoleRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserRevokeRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserRevokeRoleRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserRevokeRoleRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserRevokeRoleRequest.Merge(m, src)
+}
+func (m *AuthUserRevokeRoleRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserRevokeRoleRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserRevokeRoleRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserRevokeRoleRequest proto.InternalMessageInfo
+
+func (m *AuthUserRevokeRoleRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *AuthUserRevokeRoleRequest) GetRole() string {
+	if m != nil {
+		return m.Role
+	}
+	return ""
+}
+
+type AuthRoleAddRequest struct {
+	// name is the name of the role to add to the authentication system.
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthRoleAddRequest) Reset()         { *m = AuthRoleAddRequest{} }
+func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleAddRequest) ProtoMessage()    {}
+func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{73}
+}
+func (m *AuthRoleAddRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleAddRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleAddRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleAddRequest.Merge(m, src)
+}
+func (m *AuthRoleAddRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleAddRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleAddRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleAddRequest proto.InternalMessageInfo
+
+func (m *AuthRoleAddRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+type AuthRoleGetRequest struct {
+	Role                 string   `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthRoleGetRequest) Reset()         { *m = AuthRoleGetRequest{} }
+func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGetRequest) ProtoMessage()    {}
+func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{74}
+}
+func (m *AuthRoleGetRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleGetRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleGetRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleGetRequest.Merge(m, src)
+}
+func (m *AuthRoleGetRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleGetRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleGetRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleGetRequest proto.InternalMessageInfo
+
+func (m *AuthRoleGetRequest) GetRole() string {
+	if m != nil {
+		return m.Role
+	}
+	return ""
+}
+
+type AuthUserListRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthUserListRequest) Reset()         { *m = AuthUserListRequest{} }
+func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserListRequest) ProtoMessage()    {}
+func (*AuthUserListRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{75}
+}
+func (m *AuthUserListRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserListRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserListRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserListRequest.Merge(m, src)
+}
+func (m *AuthUserListRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserListRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserListRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserListRequest proto.InternalMessageInfo
+
+type AuthRoleListRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthRoleListRequest) Reset()         { *m = AuthRoleListRequest{} }
+func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleListRequest) ProtoMessage()    {}
+func (*AuthRoleListRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{76}
+}
+func (m *AuthRoleListRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleListRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleListRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleListRequest.Merge(m, src)
+}
+func (m *AuthRoleListRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleListRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleListRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleListRequest proto.InternalMessageInfo
+
+type AuthRoleDeleteRequest struct {
+	Role                 string   `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthRoleDeleteRequest) Reset()         { *m = AuthRoleDeleteRequest{} }
+func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleDeleteRequest) ProtoMessage()    {}
+func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{77}
+}
+func (m *AuthRoleDeleteRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleDeleteRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleDeleteRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleDeleteRequest.Merge(m, src)
+}
+func (m *AuthRoleDeleteRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleDeleteRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleDeleteRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleDeleteRequest proto.InternalMessageInfo
+
+func (m *AuthRoleDeleteRequest) GetRole() string {
+	if m != nil {
+		return m.Role
+	}
+	return ""
+}
+
+type AuthRoleGrantPermissionRequest struct {
+	// name is the name of the role which will be granted the permission.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// perm is the permission to grant to the role.
+	Perm                 *authpb.Permission `protobuf:"bytes,2,opt,name=perm,proto3" json:"perm,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
+	XXX_unrecognized     []byte             `json:"-"`
+	XXX_sizecache        int32              `json:"-"`
+}
+
+func (m *AuthRoleGrantPermissionRequest) Reset()         { *m = AuthRoleGrantPermissionRequest{} }
+func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGrantPermissionRequest) ProtoMessage()    {}
+func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{78}
+}
+func (m *AuthRoleGrantPermissionRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleGrantPermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleGrantPermissionRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleGrantPermissionRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleGrantPermissionRequest.Merge(m, src)
+}
+func (m *AuthRoleGrantPermissionRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleGrantPermissionRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleGrantPermissionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleGrantPermissionRequest proto.InternalMessageInfo
+
+func (m *AuthRoleGrantPermissionRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission {
+	if m != nil {
+		return m.Perm
+	}
+	return nil
+}
+
+type AuthRoleRevokePermissionRequest struct {
+	Role                 string   `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
+	Key                  []byte   `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+	RangeEnd             []byte   `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthRoleRevokePermissionRequest) Reset()         { *m = AuthRoleRevokePermissionRequest{} }
+func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleRevokePermissionRequest) ProtoMessage()    {}
+func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{79}
+}
+func (m *AuthRoleRevokePermissionRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleRevokePermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleRevokePermissionRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleRevokePermissionRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleRevokePermissionRequest.Merge(m, src)
+}
+func (m *AuthRoleRevokePermissionRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleRevokePermissionRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleRevokePermissionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleRevokePermissionRequest proto.InternalMessageInfo
+
+func (m *AuthRoleRevokePermissionRequest) GetRole() string {
+	if m != nil {
+		return m.Role
+	}
+	return ""
+}
+
+func (m *AuthRoleRevokePermissionRequest) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() []byte {
+	if m != nil {
+		return m.RangeEnd
+	}
+	return nil
+}
+
+type AuthEnableResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthEnableResponse) Reset()         { *m = AuthEnableResponse{} }
+func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthEnableResponse) ProtoMessage()    {}
+func (*AuthEnableResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{80}
+}
+func (m *AuthEnableResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthEnableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthEnableResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthEnableResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthEnableResponse.Merge(m, src)
+}
+func (m *AuthEnableResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthEnableResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthEnableResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthEnableResponse proto.InternalMessageInfo
+
+func (m *AuthEnableResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthDisableResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthDisableResponse) Reset()         { *m = AuthDisableResponse{} }
+func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthDisableResponse) ProtoMessage()    {}
+func (*AuthDisableResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{81}
+}
+func (m *AuthDisableResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthDisableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthDisableResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthDisableResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthDisableResponse.Merge(m, src)
+}
+func (m *AuthDisableResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthDisableResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthDisableResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthDisableResponse proto.InternalMessageInfo
+
+func (m *AuthDisableResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthStatusResponse struct {
+	Header  *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	Enabled bool            `protobuf:"varint,2,opt,name=enabled,proto3" json:"enabled,omitempty"`
+	// authRevision is the current revision of auth store
+	AuthRevision         uint64   `protobuf:"varint,3,opt,name=authRevision,proto3" json:"authRevision,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthStatusResponse) Reset()         { *m = AuthStatusResponse{} }
+func (m *AuthStatusResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthStatusResponse) ProtoMessage()    {}
+func (*AuthStatusResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{82}
+}
+func (m *AuthStatusResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthStatusResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthStatusResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthStatusResponse.Merge(m, src)
+}
+func (m *AuthStatusResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthStatusResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthStatusResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthStatusResponse proto.InternalMessageInfo
+
+func (m *AuthStatusResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AuthStatusResponse) GetEnabled() bool {
+	if m != nil {
+		return m.Enabled
+	}
+	return false
+}
+
+func (m *AuthStatusResponse) GetAuthRevision() uint64 {
+	if m != nil {
+		return m.AuthRevision
+	}
+	return 0
+}
+
+type AuthenticateResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// token is an authorized token that can be used in succeeding RPCs
+	Token                string   `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthenticateResponse) Reset()         { *m = AuthenticateResponse{} }
+func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthenticateResponse) ProtoMessage()    {}
+func (*AuthenticateResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{83}
+}
+func (m *AuthenticateResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthenticateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthenticateResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthenticateResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthenticateResponse.Merge(m, src)
+}
+func (m *AuthenticateResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthenticateResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthenticateResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthenticateResponse proto.InternalMessageInfo
+
+func (m *AuthenticateResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AuthenticateResponse) GetToken() string {
+	if m != nil {
+		return m.Token
+	}
+	return ""
+}
+
+type AuthUserAddResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserAddResponse) Reset()         { *m = AuthUserAddResponse{} }
+func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserAddResponse) ProtoMessage()    {}
+func (*AuthUserAddResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{84}
+}
+func (m *AuthUserAddResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserAddResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserAddResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserAddResponse.Merge(m, src)
+}
+func (m *AuthUserAddResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserAddResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserAddResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserAddResponse proto.InternalMessageInfo
+
+func (m *AuthUserAddResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthUserGetResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	Roles                []string        `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserGetResponse) Reset()         { *m = AuthUserGetResponse{} }
+func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGetResponse) ProtoMessage()    {}
+func (*AuthUserGetResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{85}
+}
+func (m *AuthUserGetResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserGetResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserGetResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserGetResponse.Merge(m, src)
+}
+func (m *AuthUserGetResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserGetResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserGetResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserGetResponse proto.InternalMessageInfo
+
+func (m *AuthUserGetResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AuthUserGetResponse) GetRoles() []string {
+	if m != nil {
+		return m.Roles
+	}
+	return nil
+}
+
+type AuthUserDeleteResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserDeleteResponse) Reset()         { *m = AuthUserDeleteResponse{} }
+func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserDeleteResponse) ProtoMessage()    {}
+func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{86}
+}
+func (m *AuthUserDeleteResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserDeleteResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserDeleteResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserDeleteResponse.Merge(m, src)
+}
+func (m *AuthUserDeleteResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserDeleteResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserDeleteResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserDeleteResponse proto.InternalMessageInfo
+
+func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthUserChangePasswordResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserChangePasswordResponse) Reset()         { *m = AuthUserChangePasswordResponse{} }
+func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserChangePasswordResponse) ProtoMessage()    {}
+func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{87}
+}
+func (m *AuthUserChangePasswordResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserChangePasswordResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserChangePasswordResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserChangePasswordResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserChangePasswordResponse.Merge(m, src)
+}
+func (m *AuthUserChangePasswordResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserChangePasswordResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserChangePasswordResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserChangePasswordResponse proto.InternalMessageInfo
+
+func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthUserGrantRoleResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserGrantRoleResponse) Reset()         { *m = AuthUserGrantRoleResponse{} }
+func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGrantRoleResponse) ProtoMessage()    {}
+func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{88}
+}
+func (m *AuthUserGrantRoleResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserGrantRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserGrantRoleResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserGrantRoleResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserGrantRoleResponse.Merge(m, src)
+}
+func (m *AuthUserGrantRoleResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserGrantRoleResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserGrantRoleResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserGrantRoleResponse proto.InternalMessageInfo
+
+func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthUserRevokeRoleResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserRevokeRoleResponse) Reset()         { *m = AuthUserRevokeRoleResponse{} }
+func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserRevokeRoleResponse) ProtoMessage()    {}
+func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{89}
+}
+func (m *AuthUserRevokeRoleResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserRevokeRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserRevokeRoleResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserRevokeRoleResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserRevokeRoleResponse.Merge(m, src)
+}
+func (m *AuthUserRevokeRoleResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserRevokeRoleResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserRevokeRoleResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserRevokeRoleResponse proto.InternalMessageInfo
+
+func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthRoleAddResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthRoleAddResponse) Reset()         { *m = AuthRoleAddResponse{} }
+func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleAddResponse) ProtoMessage()    {}
+func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{90}
+}
+func (m *AuthRoleAddResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleAddResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleAddResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleAddResponse.Merge(m, src)
+}
+func (m *AuthRoleAddResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleAddResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleAddResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleAddResponse proto.InternalMessageInfo
+
+func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthRoleGetResponse struct {
+	Header               *ResponseHeader      `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	Perm                 []*authpb.Permission `protobuf:"bytes,2,rep,name=perm,proto3" json:"perm,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *AuthRoleGetResponse) Reset()         { *m = AuthRoleGetResponse{} }
+func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGetResponse) ProtoMessage()    {}
+func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{91}
+}
+func (m *AuthRoleGetResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleGetResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleGetResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleGetResponse.Merge(m, src)
+}
+func (m *AuthRoleGetResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleGetResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleGetResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleGetResponse proto.InternalMessageInfo
+
+func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission {
+	if m != nil {
+		return m.Perm
+	}
+	return nil
+}
+
+type AuthRoleListResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	Roles                []string        `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthRoleListResponse) Reset()         { *m = AuthRoleListResponse{} }
+func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleListResponse) ProtoMessage()    {}
+func (*AuthRoleListResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{92}
+}
+func (m *AuthRoleListResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleListResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleListResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleListResponse.Merge(m, src)
+}
+func (m *AuthRoleListResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleListResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleListResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleListResponse proto.InternalMessageInfo
+
+func (m *AuthRoleListResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AuthRoleListResponse) GetRoles() []string {
+	if m != nil {
+		return m.Roles
+	}
+	return nil
+}
+
+type AuthUserListResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	Users                []string        `protobuf:"bytes,2,rep,name=users,proto3" json:"users,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserListResponse) Reset()         { *m = AuthUserListResponse{} }
+func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserListResponse) ProtoMessage()    {}
+func (*AuthUserListResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{93}
+}
+func (m *AuthUserListResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserListResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserListResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserListResponse.Merge(m, src)
+}
+func (m *AuthUserListResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserListResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserListResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserListResponse proto.InternalMessageInfo
+
+func (m *AuthUserListResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AuthUserListResponse) GetUsers() []string {
+	if m != nil {
+		return m.Users
+	}
+	return nil
+}
+
+type AuthRoleDeleteResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthRoleDeleteResponse) Reset()         { *m = AuthRoleDeleteResponse{} }
+func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleDeleteResponse) ProtoMessage()    {}
+func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{94}
+}
+func (m *AuthRoleDeleteResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleDeleteResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleDeleteResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleDeleteResponse.Merge(m, src)
+}
+func (m *AuthRoleDeleteResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleDeleteResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleDeleteResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleDeleteResponse proto.InternalMessageInfo
+
+func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthRoleGrantPermissionResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthRoleGrantPermissionResponse) Reset()         { *m = AuthRoleGrantPermissionResponse{} }
+func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGrantPermissionResponse) ProtoMessage()    {}
+func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{95}
+}
+func (m *AuthRoleGrantPermissionResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleGrantPermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleGrantPermissionResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleGrantPermissionResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleGrantPermissionResponse.Merge(m, src)
+}
+func (m *AuthRoleGrantPermissionResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleGrantPermissionResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleGrantPermissionResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleGrantPermissionResponse proto.InternalMessageInfo
+
+func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthRoleRevokePermissionResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthRoleRevokePermissionResponse) Reset()         { *m = AuthRoleRevokePermissionResponse{} }
+func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleRevokePermissionResponse) ProtoMessage()    {}
+func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{96}
+}
+func (m *AuthRoleRevokePermissionResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleRevokePermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleRevokePermissionResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleRevokePermissionResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleRevokePermissionResponse.Merge(m, src)
+}
+func (m *AuthRoleRevokePermissionResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleRevokePermissionResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleRevokePermissionResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleRevokePermissionResponse proto.InternalMessageInfo
+
+func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value)
+	proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value)
+	proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value)
+	proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value)
+	proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value)
+	proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value)
+	proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value)
+	proto.RegisterEnum("etcdserverpb.DowngradeRequest_DowngradeAction", DowngradeRequest_DowngradeAction_name, DowngradeRequest_DowngradeAction_value)
+	proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader")
+	proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest")
+	proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse")
+	proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest")
+	proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse")
+	proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest")
+	proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse")
+	proto.RegisterType((*RequestOp)(nil), "etcdserverpb.RequestOp")
+	proto.RegisterType((*ResponseOp)(nil), "etcdserverpb.ResponseOp")
+	proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare")
+	proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest")
+	proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse")
+	proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest")
+	proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse")
+	proto.RegisterType((*HashRequest)(nil), "etcdserverpb.HashRequest")
+	proto.RegisterType((*HashKVRequest)(nil), "etcdserverpb.HashKVRequest")
+	proto.RegisterType((*HashKVResponse)(nil), "etcdserverpb.HashKVResponse")
+	proto.RegisterType((*HashResponse)(nil), "etcdserverpb.HashResponse")
+	proto.RegisterType((*SnapshotRequest)(nil), "etcdserverpb.SnapshotRequest")
+	proto.RegisterType((*SnapshotResponse)(nil), "etcdserverpb.SnapshotResponse")
+	proto.RegisterType((*WatchRequest)(nil), "etcdserverpb.WatchRequest")
+	proto.RegisterType((*WatchCreateRequest)(nil), "etcdserverpb.WatchCreateRequest")
+	proto.RegisterType((*WatchCancelRequest)(nil), "etcdserverpb.WatchCancelRequest")
+	proto.RegisterType((*WatchProgressRequest)(nil), "etcdserverpb.WatchProgressRequest")
+	proto.RegisterType((*WatchResponse)(nil), "etcdserverpb.WatchResponse")
+	proto.RegisterType((*LeaseGrantRequest)(nil), "etcdserverpb.LeaseGrantRequest")
+	proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse")
+	proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest")
+	proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse")
+	proto.RegisterType((*LeaseCheckpoint)(nil), "etcdserverpb.LeaseCheckpoint")
+	proto.RegisterType((*LeaseCheckpointRequest)(nil), "etcdserverpb.LeaseCheckpointRequest")
+	proto.RegisterType((*LeaseCheckpointResponse)(nil), "etcdserverpb.LeaseCheckpointResponse")
+	proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest")
+	proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse")
+	proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest")
+	proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse")
+	proto.RegisterType((*LeaseLeasesRequest)(nil), "etcdserverpb.LeaseLeasesRequest")
+	proto.RegisterType((*LeaseStatus)(nil), "etcdserverpb.LeaseStatus")
+	proto.RegisterType((*LeaseLeasesResponse)(nil), "etcdserverpb.LeaseLeasesResponse")
+	proto.RegisterType((*Member)(nil), "etcdserverpb.Member")
+	proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest")
+	proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse")
+	proto.RegisterType((*MemberRemoveRequest)(nil), "etcdserverpb.MemberRemoveRequest")
+	proto.RegisterType((*MemberRemoveResponse)(nil), "etcdserverpb.MemberRemoveResponse")
+	proto.RegisterType((*MemberUpdateRequest)(nil), "etcdserverpb.MemberUpdateRequest")
+	proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse")
+	proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest")
+	proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse")
+	proto.RegisterType((*MemberPromoteRequest)(nil), "etcdserverpb.MemberPromoteRequest")
+	proto.RegisterType((*MemberPromoteResponse)(nil), "etcdserverpb.MemberPromoteResponse")
+	proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest")
+	proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse")
+	proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest")
+	proto.RegisterType((*MoveLeaderResponse)(nil), "etcdserverpb.MoveLeaderResponse")
+	proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest")
+	proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember")
+	proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse")
+	proto.RegisterType((*DowngradeRequest)(nil), "etcdserverpb.DowngradeRequest")
+	proto.RegisterType((*DowngradeResponse)(nil), "etcdserverpb.DowngradeResponse")
+	proto.RegisterType((*DowngradeVersionTestRequest)(nil), "etcdserverpb.DowngradeVersionTestRequest")
+	proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest")
+	proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse")
+	proto.RegisterType((*DowngradeInfo)(nil), "etcdserverpb.DowngradeInfo")
+	proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest")
+	proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest")
+	proto.RegisterType((*AuthStatusRequest)(nil), "etcdserverpb.AuthStatusRequest")
+	proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest")
+	proto.RegisterType((*AuthUserAddRequest)(nil), "etcdserverpb.AuthUserAddRequest")
+	proto.RegisterType((*AuthUserGetRequest)(nil), "etcdserverpb.AuthUserGetRequest")
+	proto.RegisterType((*AuthUserDeleteRequest)(nil), "etcdserverpb.AuthUserDeleteRequest")
+	proto.RegisterType((*AuthUserChangePasswordRequest)(nil), "etcdserverpb.AuthUserChangePasswordRequest")
+	proto.RegisterType((*AuthUserGrantRoleRequest)(nil), "etcdserverpb.AuthUserGrantRoleRequest")
+	proto.RegisterType((*AuthUserRevokeRoleRequest)(nil), "etcdserverpb.AuthUserRevokeRoleRequest")
+	proto.RegisterType((*AuthRoleAddRequest)(nil), "etcdserverpb.AuthRoleAddRequest")
+	proto.RegisterType((*AuthRoleGetRequest)(nil), "etcdserverpb.AuthRoleGetRequest")
+	proto.RegisterType((*AuthUserListRequest)(nil), "etcdserverpb.AuthUserListRequest")
+	proto.RegisterType((*AuthRoleListRequest)(nil), "etcdserverpb.AuthRoleListRequest")
+	proto.RegisterType((*AuthRoleDeleteRequest)(nil), "etcdserverpb.AuthRoleDeleteRequest")
+	proto.RegisterType((*AuthRoleGrantPermissionRequest)(nil), "etcdserverpb.AuthRoleGrantPermissionRequest")
+	proto.RegisterType((*AuthRoleRevokePermissionRequest)(nil), "etcdserverpb.AuthRoleRevokePermissionRequest")
+	proto.RegisterType((*AuthEnableResponse)(nil), "etcdserverpb.AuthEnableResponse")
+	proto.RegisterType((*AuthDisableResponse)(nil), "etcdserverpb.AuthDisableResponse")
+	proto.RegisterType((*AuthStatusResponse)(nil), "etcdserverpb.AuthStatusResponse")
+	proto.RegisterType((*AuthenticateResponse)(nil), "etcdserverpb.AuthenticateResponse")
+	proto.RegisterType((*AuthUserAddResponse)(nil), "etcdserverpb.AuthUserAddResponse")
+	proto.RegisterType((*AuthUserGetResponse)(nil), "etcdserverpb.AuthUserGetResponse")
+	proto.RegisterType((*AuthUserDeleteResponse)(nil), "etcdserverpb.AuthUserDeleteResponse")
+	proto.RegisterType((*AuthUserChangePasswordResponse)(nil), "etcdserverpb.AuthUserChangePasswordResponse")
+	proto.RegisterType((*AuthUserGrantRoleResponse)(nil), "etcdserverpb.AuthUserGrantRoleResponse")
+	proto.RegisterType((*AuthUserRevokeRoleResponse)(nil), "etcdserverpb.AuthUserRevokeRoleResponse")
+	proto.RegisterType((*AuthRoleAddResponse)(nil), "etcdserverpb.AuthRoleAddResponse")
+	proto.RegisterType((*AuthRoleGetResponse)(nil), "etcdserverpb.AuthRoleGetResponse")
+	proto.RegisterType((*AuthRoleListResponse)(nil), "etcdserverpb.AuthRoleListResponse")
+	proto.RegisterType((*AuthUserListResponse)(nil), "etcdserverpb.AuthUserListResponse")
+	proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse")
+	proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse")
+	proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse")
+}
+
+func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) }
+
+var fileDescriptor_77a6da22d6a3feb1 = []byte{
+	// 4574 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x3c, 0x5d, 0x6f, 0x1b, 0x57,
+	0x76, 0x1a, 0x92, 0x12, 0xc9, 0xc3, 0x0f, 0xd1, 0xd7, 0xb2, 0x4d, 0xd3, 0xb6, 0xac, 0x8c, 0xed,
+	0xc4, 0x71, 0x62, 0xd1, 0x96, 0xec, 0x64, 0xeb, 0x22, 0xe9, 0xd2, 0x12, 0x63, 0x6b, 0x2d, 0x4b,
+	0xca, 0x88, 0x76, 0x36, 0x2e, 0xb0, 0xea, 0x88, 0xbc, 0xa6, 0x66, 0x45, 0xce, 0x70, 0x67, 0x86,
+	0xb4, 0x94, 0x3e, 0x6c, 0xba, 0xed, 0x76, 0xb1, 0x2d, 0xb0, 0x40, 0x53, 0xa0, 0x58, 0x14, 0xed,
+	0x4b, 0x5b, 0xa0, 0x7d, 0x68, 0x8b, 0xf6, 0xa1, 0x0f, 0x45, 0x0b, 0xf4, 0xa1, 0x7d, 0x68, 0x1f,
+	0x0a, 0x14, 0xe8, 0x1f, 0x68, 0xd3, 0x7d, 0xea, 0xaf, 0x58, 0xdc, 0xaf, 0xb9, 0x77, 0xbe, 0x24,
+	0x67, 0xa5, 0x60, 0x5f, 0x62, 0xce, 0x3d, 0x9f, 0xf7, 0x9c, 0x7b, 0xcf, 0xb9, 0xf7, 0x9c, 0x1b,
+	0x41, 0xd1, 0x1d, 0x75, 0x17, 0x47, 0xae, 0xe3, 0x3b, 0xa8, 0x8c, 0xfd, 0x6e, 0xcf, 0xc3, 0xee,
+	0x04, 0xbb, 0xa3, 0xdd, 0xc6, 0x5c, 0xdf, 0xe9, 0x3b, 0x14, 0xd0, 0x24, 0xbf, 0x18, 0x4e, 0xa3,
+	0x4e, 0x70, 0x9a, 0xe6, 0xc8, 0x6a, 0x0e, 0x27, 0xdd, 0xee, 0x68, 0xb7, 0xb9, 0x3f, 0xe1, 0x90,
+	0x46, 0x00, 0x31, 0xc7, 0xfe, 0xde, 0x68, 0x97, 0xfe, 0xc3, 0x61, 0x0b, 0x01, 0x6c, 0x82, 0x5d,
+	0xcf, 0x72, 0xec, 0xd1, 0xae, 0xf8, 0xc5, 0x31, 0x2e, 0xf7, 0x1d, 0xa7, 0x3f, 0xc0, 0x8c, 0xde,
+	0xb6, 0x1d, 0xdf, 0xf4, 0x2d, 0xc7, 0xf6, 0x38, 0x94, 0xfd, 0xd3, 0xbd, 0xdd, 0xc7, 0xf6, 0x6d,
+	0x67, 0x84, 0x6d, 0x73, 0x64, 0x4d, 0x96, 0x9a, 0xce, 0x88, 0xe2, 0xc4, 0xf1, 0xf5, 0x9f, 0x68,
+	0x50, 0x35, 0xb0, 0x37, 0x72, 0x6c, 0x0f, 0x3f, 0xc6, 0x66, 0x0f, 0xbb, 0xe8, 0x0a, 0x40, 0x77,
+	0x30, 0xf6, 0x7c, 0xec, 0xee, 0x58, 0xbd, 0xba, 0xb6, 0xa0, 0xdd, 0xcc, 0x19, 0x45, 0x3e, 0xb2,
+	0xd6, 0x43, 0x97, 0xa0, 0x38, 0xc4, 0xc3, 0x5d, 0x06, 0xcd, 0x50, 0x68, 0x81, 0x0d, 0xac, 0xf5,
+	0x50, 0x03, 0x0a, 0x2e, 0x9e, 0x58, 0x44, 0xdd, 0x7a, 0x76, 0x41, 0xbb, 0x99, 0x35, 0x82, 0x6f,
+	0x42, 0xe8, 0x9a, 0x2f, 0xfd, 0x1d, 0x1f, 0xbb, 0xc3, 0x7a, 0x8e, 0x11, 0x92, 0x81, 0x0e, 0x76,
+	0x87, 0x0f, 0xf2, 0x3f, 0xf8, 0x87, 0x7a, 0x76, 0x79, 0xf1, 0x8e, 0xfe, 0xaf, 0xd3, 0x50, 0x36,
+	0x4c, 0xbb, 0x8f, 0x0d, 0xfc, 0xbd, 0x31, 0xf6, 0x7c, 0x54, 0x83, 0xec, 0x3e, 0x3e, 0xa4, 0x7a,
+	0x94, 0x0d, 0xf2, 0x93, 0x31, 0xb2, 0xfb, 0x78, 0x07, 0xdb, 0x4c, 0x83, 0x32, 0x61, 0x64, 0xf7,
+	0x71, 0xdb, 0xee, 0xa1, 0x39, 0x98, 0x1e, 0x58, 0x43, 0xcb, 0xe7, 0xe2, 0xd9, 0x47, 0x48, 0xaf,
+	0x5c, 0x44, 0xaf, 0x15, 0x00, 0xcf, 0x71, 0xfd, 0x1d, 0xc7, 0xed, 0x61, 0xb7, 0x3e, 0xbd, 0xa0,
+	0xdd, 0xac, 0x2e, 0x5d, 0x5f, 0x54, 0x3d, 0xbc, 0xa8, 0x2a, 0xb4, 0xb8, 0xed, 0xb8, 0xfe, 0x26,
+	0xc1, 0x35, 0x8a, 0x9e, 0xf8, 0x89, 0x3e, 0x82, 0x12, 0x65, 0xe2, 0x9b, 0x6e, 0x1f, 0xfb, 0xf5,
+	0x19, 0xca, 0xe5, 0xc6, 0x31, 0x5c, 0x3a, 0x14, 0xd9, 0xa0, 0xe2, 0xd9, 0x6f, 0xa4, 0x43, 0xd9,
+	0xc3, 0xae, 0x65, 0x0e, 0xac, 0xcf, 0xcc, 0xdd, 0x01, 0xae, 0xe7, 0x17, 0xb4, 0x9b, 0x05, 0x23,
+	0x34, 0x46, 0xe6, 0xbf, 0x8f, 0x0f, 0xbd, 0x1d, 0xc7, 0x1e, 0x1c, 0xd6, 0x0b, 0x14, 0xa1, 0x40,
+	0x06, 0x36, 0xed, 0xc1, 0x21, 0xf5, 0x9e, 0x33, 0xb6, 0x7d, 0x06, 0x2d, 0x52, 0x68, 0x91, 0x8e,
+	0x50, 0xf0, 0x5d, 0xa8, 0x0d, 0x2d, 0x7b, 0x67, 0xe8, 0xf4, 0x76, 0x02, 0x83, 0x00, 0x31, 0xc8,
+	0xc3, 0xfc, 0xef, 0x51, 0x0f, 0xdc, 0x35, 0xaa, 0x43, 0xcb, 0x7e, 0xea, 0xf4, 0x0c, 0x61, 0x1f,
+	0x42, 0x62, 0x1e, 0x84, 0x49, 0x4a, 0x51, 0x12, 0xf3, 0x40, 0x25, 0x79, 0x1f, 0xce, 0x12, 0x29,
+	0x5d, 0x17, 0x9b, 0x3e, 0x96, 0x54, 0xe5, 0x30, 0xd5, 0x99, 0xa1, 0x65, 0xaf, 0x50, 0x94, 0x10,
+	0xa1, 0x79, 0x10, 0x23, 0xac, 0x44, 0x09, 0xcd, 0x83, 0x30, 0xa1, 0xfe, 0x3e, 0x14, 0x03, 0xbf,
+	0xa0, 0x02, 0xe4, 0x36, 0x36, 0x37, 0xda, 0xb5, 0x29, 0x04, 0x30, 0xd3, 0xda, 0x5e, 0x69, 0x6f,
+	0xac, 0xd6, 0x34, 0x54, 0x82, 0xfc, 0x6a, 0x9b, 0x7d, 0x64, 0x1a, 0xf9, 0x2f, 0xf8, 0x7a, 0x7b,
+	0x02, 0x20, 0x5d, 0x81, 0xf2, 0x90, 0x7d, 0xd2, 0xfe, 0xb4, 0x36, 0x45, 0x90, 0x9f, 0xb7, 0x8d,
+	0xed, 0xb5, 0xcd, 0x8d, 0x9a, 0x46, 0xb8, 0xac, 0x18, 0xed, 0x56, 0xa7, 0x5d, 0xcb, 0x10, 0x8c,
+	0xa7, 0x9b, 0xab, 0xb5, 0x2c, 0x2a, 0xc2, 0xf4, 0xf3, 0xd6, 0xfa, 0xb3, 0x76, 0x2d, 0x17, 0x30,
+	0x93, 0xab, 0xf8, 0x4f, 0x34, 0xa8, 0x70, 0x77, 0xb3, 0xbd, 0x85, 0xee, 0xc1, 0xcc, 0x1e, 0xdd,
+	0x5f, 0x74, 0x25, 0x97, 0x96, 0x2e, 0x47, 0xd6, 0x46, 0x68, 0x0f, 0x1a, 0x1c, 0x17, 0xe9, 0x90,
+	0xdd, 0x9f, 0x78, 0xf5, 0xcc, 0x42, 0xf6, 0x66, 0x69, 0xa9, 0xb6, 0xc8, 0x22, 0xc9, 0xe2, 0x13,
+	0x7c, 0xf8, 0xdc, 0x1c, 0x8c, 0xb1, 0x41, 0x80, 0x08, 0x41, 0x6e, 0xe8, 0xb8, 0x98, 0x2e, 0xf8,
+	0x82, 0x41, 0x7f, 0x93, 0x5d, 0x40, 0x7d, 0xce, 0x17, 0x3b, 0xfb, 0x90, 0xea, 0xfd, 0xa7, 0x06,
+	0xb0, 0x35, 0xf6, 0xd3, 0xb7, 0xd8, 0x1c, 0x4c, 0x4f, 0x88, 0x04, 0xbe, 0xbd, 0xd8, 0x07, 0xdd,
+	0x5b, 0xd8, 0xf4, 0x70, 0xb0, 0xb7, 0xc8, 0x07, 0x5a, 0x80, 0xfc, 0xc8, 0xc5, 0x93, 0x9d, 0xfd,
+	0x09, 0x95, 0x56, 0x90, 0x7e, 0x9a, 0x21, 0xe3, 0x4f, 0x26, 0xe8, 0x16, 0x94, 0xad, 0xbe, 0xed,
+	0xb8, 0x78, 0x87, 0x31, 0x9d, 0x56, 0xd1, 0x96, 0x8c, 0x12, 0x03, 0xd2, 0x29, 0x29, 0xb8, 0x4c,
+	0xd4, 0x4c, 0x22, 0xee, 0x3a, 0x81, 0xc9, 0xf9, 0x7c, 0xae, 0x41, 0x89, 0xce, 0xe7, 0x44, 0xc6,
+	0x5e, 0x92, 0x13, 0xc9, 0x50, 0xb2, 0x98, 0xc1, 0x63, 0x53, 0x93, 0x2a, 0xd8, 0x80, 0x56, 0xf1,
+	0x00, 0xfb, 0xf8, 0x24, 0xc1, 0x4b, 0x31, 0x65, 0x36, 0xd1, 0x94, 0x52, 0xde, 0x5f, 0x68, 0x70,
+	0x36, 0x24, 0xf0, 0x44, 0x53, 0xaf, 0x43, 0xbe, 0x47, 0x99, 0x31, 0x9d, 0xb2, 0x86, 0xf8, 0x44,
+	0xf7, 0xa0, 0xc0, 0x55, 0xf2, 0xea, 0xd9, 0xe4, 0x65, 0x28, 0xb5, 0xcc, 0x33, 0x2d, 0x3d, 0xa9,
+	0xe6, 0x3f, 0x65, 0xa0, 0xc8, 0x8d, 0xb1, 0x39, 0x42, 0x2d, 0xa8, 0xb8, 0xec, 0x63, 0x87, 0xce,
+	0x99, 0xeb, 0xd8, 0x48, 0x8f, 0x93, 0x8f, 0xa7, 0x8c, 0x32, 0x27, 0xa1, 0xc3, 0xe8, 0x57, 0xa1,
+	0x24, 0x58, 0x8c, 0xc6, 0x3e, 0x77, 0x54, 0x3d, 0xcc, 0x40, 0x2e, 0xed, 0xc7, 0x53, 0x06, 0x70,
+	0xf4, 0xad, 0xb1, 0x8f, 0x3a, 0x30, 0x27, 0x88, 0xd9, 0xfc, 0xb8, 0x1a, 0x59, 0xca, 0x65, 0x21,
+	0xcc, 0x25, 0xee, 0xce, 0xc7, 0x53, 0x06, 0xe2, 0xf4, 0x0a, 0x10, 0xad, 0x4a, 0x95, 0xfc, 0x03,
+	0x96, 0x5f, 0x62, 0x2a, 0x75, 0x0e, 0x6c, 0xce, 0x44, 0x58, 0x6b, 0x59, 0xd1, 0xad, 0x73, 0x60,
+	0x07, 0x26, 0x7b, 0x58, 0x84, 0x3c, 0x1f, 0xd6, 0xff, 0x23, 0x03, 0x20, 0x3c, 0xb6, 0x39, 0x42,
+	0xab, 0x50, 0x75, 0xf9, 0x57, 0xc8, 0x7e, 0x97, 0x12, 0xed, 0xc7, 0x1d, 0x3d, 0x65, 0x54, 0x04,
+	0x11, 0x53, 0xf7, 0x43, 0x28, 0x07, 0x5c, 0xa4, 0x09, 0x2f, 0x26, 0x98, 0x30, 0xe0, 0x50, 0x12,
+	0x04, 0xc4, 0x88, 0x9f, 0xc0, 0xb9, 0x80, 0x3e, 0xc1, 0x8a, 0x6f, 0x1c, 0x61, 0xc5, 0x80, 0xe1,
+	0x59, 0xc1, 0x41, 0xb5, 0xe3, 0x23, 0x45, 0x31, 0x69, 0xc8, 0x8b, 0x09, 0x86, 0x64, 0x48, 0xaa,
+	0x25, 0x03, 0x0d, 0x43, 0xa6, 0x04, 0x92, 0xf6, 0xd9, 0xb8, 0xfe, 0x57, 0x39, 0xc8, 0xaf, 0x38,
+	0xc3, 0x91, 0xe9, 0x92, 0x45, 0x34, 0xe3, 0x62, 0x6f, 0x3c, 0xf0, 0xa9, 0x01, 0xab, 0x4b, 0xd7,
+	0xc2, 0x32, 0x38, 0x9a, 0xf8, 0xd7, 0xa0, 0xa8, 0x06, 0x27, 0x21, 0xc4, 0x3c, 0xcb, 0x67, 0x5e,
+	0x83, 0x98, 0xe7, 0x78, 0x4e, 0x22, 0x02, 0x42, 0x56, 0x06, 0x84, 0x06, 0xe4, 0xf9, 0x01, 0x8f,
+	0x05, 0xeb, 0xc7, 0x53, 0x86, 0x18, 0x40, 0x6f, 0xc3, 0x6c, 0x34, 0x15, 0x4e, 0x73, 0x9c, 0x6a,
+	0x37, 0x9c, 0x39, 0xaf, 0x41, 0x39, 0x94, 0xa1, 0x67, 0x38, 0x5e, 0x69, 0xa8, 0xe4, 0xe5, 0xf3,
+	0x22, 0xac, 0x93, 0x63, 0x45, 0xf9, 0xf1, 0x94, 0x08, 0xec, 0x57, 0x45, 0x60, 0x2f, 0xa8, 0x89,
+	0x96, 0xd8, 0x95, 0xc7, 0xf8, 0xeb, 0x6a, 0xd4, 0xfa, 0x26, 0x21, 0x0e, 0x90, 0x64, 0xf8, 0xd2,
+	0x0d, 0xa8, 0x84, 0x4c, 0x46, 0x72, 0x64, 0xfb, 0xe3, 0x67, 0xad, 0x75, 0x96, 0x50, 0x1f, 0xd1,
+	0x1c, 0x6a, 0xd4, 0x34, 0x92, 0xa0, 0xd7, 0xdb, 0xdb, 0xdb, 0xb5, 0x0c, 0x3a, 0x0f, 0xc5, 0x8d,
+	0xcd, 0xce, 0x0e, 0xc3, 0xca, 0x36, 0xf2, 0x7f, 0xcc, 0x22, 0x89, 0xcc, 0xcf, 0x9f, 0x06, 0x3c,
+	0x79, 0x8a, 0x56, 0x32, 0xf3, 0x94, 0x92, 0x99, 0x35, 0x91, 0x99, 0x33, 0x32, 0x33, 0x67, 0x11,
+	0x82, 0xe9, 0xf5, 0x76, 0x6b, 0x9b, 0x26, 0x69, 0xc6, 0x7a, 0x39, 0x9e, 0xad, 0x1f, 0x56, 0xa1,
+	0xcc, 0xdc, 0xb3, 0x33, 0xb6, 0xc9, 0x61, 0xe2, 0xaf, 0x35, 0x00, 0xb9, 0x61, 0x51, 0x13, 0xf2,
+	0x5d, 0xa6, 0x42, 0x5d, 0xa3, 0x11, 0xf0, 0x5c, 0xa2, 0xc7, 0x0d, 0x81, 0x85, 0xee, 0x42, 0xde,
+	0x1b, 0x77, 0xbb, 0xd8, 0x13, 0x99, 0xfb, 0x42, 0x34, 0x08, 0xf3, 0x80, 0x68, 0x08, 0x3c, 0x42,
+	0xf2, 0xd2, 0xb4, 0x06, 0x63, 0x9a, 0xc7, 0x8f, 0x26, 0xe1, 0x78, 0x32, 0xc6, 0xfe, 0x99, 0x06,
+	0x25, 0x65, 0x5b, 0xfc, 0x82, 0x29, 0xe0, 0x32, 0x14, 0xa9, 0x32, 0xb8, 0xc7, 0x93, 0x40, 0xc1,
+	0x90, 0x03, 0xe8, 0x3d, 0x28, 0x8a, 0x9d, 0x24, 0xf2, 0x40, 0x3d, 0x99, 0xed, 0xe6, 0xc8, 0x90,
+	0xa8, 0x52, 0xc9, 0x0e, 0x9c, 0xa1, 0x76, 0xea, 0x92, 0xdb, 0x87, 0xb0, 0xac, 0x7a, 0x2c, 0xd7,
+	0x22, 0xc7, 0xf2, 0x06, 0x14, 0x46, 0x7b, 0x87, 0x9e, 0xd5, 0x35, 0x07, 0x5c, 0x9d, 0xe0, 0x5b,
+	0x72, 0xdd, 0x06, 0xa4, 0x72, 0x3d, 0x89, 0x01, 0x24, 0xd3, 0xf3, 0x50, 0x7a, 0x6c, 0x7a, 0x7b,
+	0x5c, 0x49, 0x39, 0x7e, 0x0f, 0x2a, 0x64, 0xfc, 0xc9, 0xf3, 0xd7, 0x50, 0x5f, 0x50, 0x2d, 0xeb,
+	0xff, 0xac, 0x41, 0x55, 0x90, 0x9d, 0xc8, 0x41, 0x08, 0x72, 0x7b, 0xa6, 0xb7, 0x47, 0x8d, 0x51,
+	0x31, 0xe8, 0x6f, 0xf4, 0x36, 0xd4, 0xba, 0x6c, 0xfe, 0x3b, 0x91, 0x7b, 0xd7, 0x2c, 0x1f, 0x0f,
+	0xf6, 0xfe, 0xbb, 0x50, 0x21, 0x24, 0x3b, 0xe1, 0x7b, 0x90, 0xd8, 0xc6, 0xef, 0x19, 0xe5, 0x3d,
+	0x3a, 0xe7, 0xa8, 0xfa, 0x26, 0x94, 0x99, 0x31, 0x4e, 0x5b, 0x77, 0x69, 0xd7, 0x06, 0xcc, 0x6e,
+	0xdb, 0xe6, 0xc8, 0xdb, 0x73, 0xfc, 0x88, 0xcd, 0x97, 0xf5, 0xbf, 0xd7, 0xa0, 0x26, 0x81, 0x27,
+	0xd2, 0xe1, 0x2d, 0x98, 0x75, 0xf1, 0xd0, 0xb4, 0x6c, 0xcb, 0xee, 0xef, 0xec, 0x1e, 0xfa, 0xd8,
+	0xe3, 0xd7, 0xd7, 0x6a, 0x30, 0xfc, 0x90, 0x8c, 0x12, 0x65, 0x77, 0x07, 0xce, 0x2e, 0x0f, 0xd2,
+	0xf4, 0x37, 0x7a, 0x23, 0x1c, 0xa5, 0x8b, 0xd2, 0x6e, 0x62, 0x5c, 0xea, 0xfc, 0xd3, 0x0c, 0x94,
+	0x3f, 0x31, 0xfd, 0xae, 0x58, 0x41, 0x68, 0x0d, 0xaa, 0x41, 0x18, 0xa7, 0x23, 0x5c, 0xef, 0xc8,
+	0x81, 0x83, 0xd2, 0x88, 0x7b, 0x8d, 0x38, 0x70, 0x54, 0xba, 0xea, 0x00, 0x65, 0x65, 0xda, 0x5d,
+	0x3c, 0x08, 0x58, 0x65, 0xd2, 0x59, 0x51, 0x44, 0x95, 0x95, 0x3a, 0x80, 0xbe, 0x0d, 0xb5, 0x91,
+	0xeb, 0xf4, 0x5d, 0xec, 0x79, 0x01, 0x33, 0x96, 0xc2, 0xf5, 0x04, 0x66, 0x5b, 0x1c, 0x35, 0x72,
+	0x8a, 0xb9, 0xf7, 0x78, 0xca, 0x98, 0x1d, 0x85, 0x61, 0x32, 0xb0, 0xce, 0xca, 0xf3, 0x1e, 0x8b,
+	0xac, 0x3f, 0xca, 0x02, 0x8a, 0x4f, 0xf3, 0xab, 0x1e, 0x93, 0x6f, 0x40, 0xd5, 0xf3, 0x4d, 0x37,
+	0xb6, 0xe6, 0x2b, 0x74, 0x34, 0x58, 0xf1, 0x6f, 0x41, 0xa0, 0xd9, 0x8e, 0xed, 0xf8, 0xd6, 0xcb,
+	0x43, 0x76, 0x41, 0x31, 0xaa, 0x62, 0x78, 0x83, 0x8e, 0xa2, 0x0d, 0xc8, 0xbf, 0xb4, 0x06, 0x3e,
+	0x76, 0xbd, 0xfa, 0xf4, 0x42, 0xf6, 0x66, 0x75, 0xe9, 0x9d, 0xe3, 0x1c, 0xb3, 0xf8, 0x11, 0xc5,
+	0xef, 0x1c, 0x8e, 0xd4, 0xd3, 0x2f, 0x67, 0xa2, 0x1e, 0xe3, 0x67, 0x92, 0x6f, 0x44, 0x3a, 0x14,
+	0x5e, 0x11, 0xa6, 0x3b, 0x56, 0x8f, 0xe6, 0xe2, 0x60, 0x1f, 0xde, 0x33, 0xf2, 0x14, 0xb0, 0xd6,
+	0x43, 0xd7, 0xa0, 0xf0, 0xd2, 0x35, 0xfb, 0x43, 0x6c, 0xfb, 0xec, 0x96, 0x2f, 0x71, 0x02, 0x80,
+	0xbe, 0x08, 0x20, 0x55, 0x21, 0x99, 0x6f, 0x63, 0x73, 0xeb, 0x59, 0xa7, 0x36, 0x85, 0xca, 0x50,
+	0xd8, 0xd8, 0x5c, 0x6d, 0xaf, 0xb7, 0x49, 0x6e, 0x14, 0x39, 0xef, 0xae, 0xdc, 0x74, 0x2d, 0xe1,
+	0x88, 0xd0, 0x9a, 0x50, 0xf5, 0xd2, 0xc2, 0x97, 0x6e, 0xa1, 0x97, 0x60, 0x71, 0x57, 0xbf, 0x0a,
+	0x73, 0x49, 0x4b, 0x43, 0x20, 0xdc, 0xd3, 0xff, 0x2d, 0x03, 0x15, 0xbe, 0x11, 0x4e, 0xb4, 0x73,
+	0x2f, 0x2a, 0x5a, 0xf1, 0xeb, 0x89, 0x30, 0x52, 0x1d, 0xf2, 0x6c, 0x83, 0xf4, 0xf8, 0xfd, 0x57,
+	0x7c, 0x92, 0xe0, 0xcc, 0xd6, 0x3b, 0xee, 0x71, 0xb7, 0x07, 0xdf, 0x89, 0x61, 0x73, 0x3a, 0x35,
+	0x6c, 0x06, 0x1b, 0xce, 0xf4, 0xf8, 0xc1, 0xaa, 0x28, 0x5d, 0x51, 0x16, 0x9b, 0x8a, 0x00, 0x43,
+	0x3e, 0xcb, 0xa7, 0xf8, 0x0c, 0xdd, 0x80, 0x19, 0x3c, 0xc1, 0xb6, 0xef, 0xd5, 0x4b, 0x34, 0x91,
+	0x56, 0xc4, 0x85, 0xaa, 0x4d, 0x46, 0x0d, 0x0e, 0x94, 0xae, 0xfa, 0x10, 0xce, 0xd0, 0xfb, 0xee,
+	0x23, 0xd7, 0xb4, 0xd5, 0x3b, 0x7b, 0xa7, 0xb3, 0xce, 0xd3, 0x0e, 0xf9, 0x89, 0xaa, 0x90, 0x59,
+	0x5b, 0xe5, 0xf6, 0xc9, 0xac, 0xad, 0x4a, 0xfa, 0xdf, 0xd7, 0x00, 0xa9, 0x0c, 0x4e, 0xe4, 0x8b,
+	0x88, 0x14, 0xa1, 0x47, 0x56, 0xea, 0x31, 0x07, 0xd3, 0xd8, 0x75, 0x1d, 0x97, 0x05, 0x4a, 0x83,
+	0x7d, 0x48, 0x6d, 0x6e, 0x73, 0x65, 0x0c, 0x3c, 0x71, 0xf6, 0x83, 0x08, 0xc0, 0xd8, 0x6a, 0x71,
+	0xe5, 0x3b, 0x70, 0x36, 0x84, 0x7e, 0x3a, 0x29, 0x7e, 0x13, 0x66, 0x29, 0xd7, 0x95, 0x3d, 0xdc,
+	0xdd, 0x1f, 0x39, 0x96, 0x1d, 0xd3, 0x00, 0x5d, 0x23, 0xb1, 0x4b, 0xa4, 0x0b, 0x32, 0x45, 0x36,
+	0xe7, 0x72, 0x30, 0xd8, 0xe9, 0xac, 0xcb, 0xa5, 0xbe, 0x0b, 0xe7, 0x23, 0x0c, 0xc5, 0xcc, 0x7e,
+	0x0d, 0x4a, 0xdd, 0x60, 0xd0, 0xe3, 0x27, 0xc8, 0x2b, 0x61, 0x75, 0xa3, 0xa4, 0x2a, 0x85, 0x94,
+	0xf1, 0x6d, 0xb8, 0x10, 0x93, 0x71, 0x1a, 0xe6, 0xb8, 0xa7, 0xdf, 0x81, 0x73, 0x94, 0xf3, 0x13,
+	0x8c, 0x47, 0xad, 0x81, 0x35, 0x39, 0xde, 0x2d, 0x87, 0x7c, 0xbe, 0x0a, 0xc5, 0xd7, 0xbb, 0xac,
+	0xa4, 0xe8, 0x36, 0x17, 0xdd, 0xb1, 0x86, 0xb8, 0xe3, 0xac, 0xa7, 0x6b, 0x4b, 0x12, 0xf9, 0x3e,
+	0x3e, 0xf4, 0xf8, 0xf1, 0x91, 0xfe, 0x96, 0xd1, 0xeb, 0x6f, 0x35, 0x6e, 0x4e, 0x95, 0xcf, 0xd7,
+	0xbc, 0x35, 0xe6, 0x01, 0xfa, 0x64, 0x0f, 0xe2, 0x1e, 0x01, 0xb0, 0xda, 0x9c, 0x32, 0x12, 0x28,
+	0x4c, 0xb2, 0x50, 0x39, 0xaa, 0xf0, 0x15, 0xbe, 0x71, 0xe8, 0x7f, 0xbc, 0xd8, 0x49, 0xe9, 0x4d,
+	0x28, 0x51, 0xc8, 0xb6, 0x6f, 0xfa, 0x63, 0x2f, 0xcd, 0x73, 0xcb, 0xfa, 0x8f, 0x34, 0xbe, 0xa3,
+	0x04, 0x9f, 0x13, 0xcd, 0xf9, 0x2e, 0xcc, 0xd0, 0x1b, 0xa2, 0xb8, 0xe9, 0x5c, 0x4c, 0x58, 0xd8,
+	0x4c, 0x23, 0x83, 0x23, 0x2a, 0xe7, 0x24, 0x0d, 0x66, 0x9e, 0xd2, 0xce, 0x81, 0xa2, 0x6d, 0x4e,
+	0x78, 0xce, 0x36, 0x87, 0xac, 0xfc, 0x58, 0x34, 0xe8, 0x6f, 0x7a, 0x21, 0xc0, 0xd8, 0x7d, 0x66,
+	0xac, 0xb3, 0x1b, 0x48, 0xd1, 0x08, 0xbe, 0x89, 0x61, 0xbb, 0x03, 0x0b, 0xdb, 0x3e, 0x85, 0xe6,
+	0x28, 0x54, 0x19, 0x41, 0x37, 0xa0, 0x68, 0x79, 0xeb, 0xd8, 0x74, 0x6d, 0x5e, 0xe2, 0x57, 0x02,
+	0xb3, 0x84, 0xc8, 0x35, 0xf6, 0x1d, 0xa8, 0x31, 0xcd, 0x5a, 0xbd, 0x9e, 0x72, 0xda, 0x0f, 0xe4,
+	0x6b, 0x11, 0xf9, 0x21, 0xfe, 0x99, 0xe3, 0xf9, 0xff, 0x9d, 0x06, 0x67, 0x14, 0x01, 0x27, 0x72,
+	0xc1, 0xbb, 0x30, 0xc3, 0xfa, 0x2f, 0xfc, 0x28, 0x38, 0x17, 0xa6, 0x62, 0x62, 0x0c, 0x8e, 0x83,
+	0x16, 0x21, 0xcf, 0x7e, 0x89, 0x6b, 0x5c, 0x32, 0xba, 0x40, 0x92, 0x2a, 0x2f, 0xc2, 0x59, 0x0e,
+	0xc3, 0x43, 0x27, 0x69, 0xcf, 0xe5, 0xc2, 0x11, 0xe2, 0x87, 0x1a, 0xcc, 0x85, 0x09, 0x4e, 0x34,
+	0x4b, 0x45, 0xef, 0xcc, 0x57, 0xd2, 0xfb, 0x5b, 0x42, 0xef, 0x67, 0xa3, 0x9e, 0x72, 0xe4, 0x8c,
+	0xae, 0x38, 0xd5, 0xbb, 0x99, 0xb0, 0x77, 0x25, 0xaf, 0x9f, 0x04, 0x73, 0x12, 0xcc, 0x4e, 0x34,
+	0xa7, 0xf7, 0x5f, 0x6b, 0x4e, 0xca, 0x11, 0x2c, 0x36, 0xb9, 0x35, 0xb1, 0x8c, 0xd6, 0x2d, 0x2f,
+	0xc8, 0x38, 0xef, 0x40, 0x79, 0x60, 0xd9, 0xd8, 0x74, 0x79, 0x0f, 0x49, 0x53, 0xd7, 0xe3, 0x7d,
+	0x23, 0x04, 0x94, 0xac, 0x7e, 0x5b, 0x03, 0xa4, 0xf2, 0xfa, 0xe5, 0x78, 0xab, 0x29, 0x0c, 0xbc,
+	0xe5, 0x3a, 0x43, 0xc7, 0x3f, 0x6e, 0x99, 0xdd, 0xd3, 0x7f, 0x57, 0x83, 0x73, 0x11, 0x8a, 0x5f,
+	0x86, 0xe6, 0xf7, 0xf4, 0xcb, 0x70, 0x66, 0x15, 0x8b, 0x33, 0x5e, 0xac, 0x76, 0xb0, 0x0d, 0x48,
+	0x85, 0x9e, 0xce, 0x29, 0xe6, 0x1b, 0x70, 0xe6, 0xa9, 0x33, 0x21, 0x81, 0x9c, 0x80, 0x65, 0x98,
+	0x62, 0xc5, 0xac, 0xc0, 0x5e, 0xc1, 0xb7, 0x0c, 0xbd, 0xdb, 0x80, 0x54, 0xca, 0xd3, 0x50, 0x67,
+	0x59, 0xff, 0x5f, 0x0d, 0xca, 0xad, 0x81, 0xe9, 0x0e, 0x85, 0x2a, 0x1f, 0xc2, 0x0c, 0xab, 0xcc,
+	0xf0, 0x32, 0xeb, 0x9b, 0x61, 0x7e, 0x2a, 0x2e, 0xfb, 0x68, 0xb1, 0x3a, 0x0e, 0xa7, 0x22, 0x53,
+	0xe1, 0x9d, 0xe5, 0xd5, 0x48, 0xa7, 0x79, 0x15, 0xdd, 0x86, 0x69, 0x93, 0x90, 0xd0, 0xf4, 0x5a,
+	0x8d, 0x96, 0xcb, 0x28, 0x37, 0x72, 0x25, 0x32, 0x18, 0x96, 0xfe, 0x01, 0x94, 0x14, 0x09, 0x28,
+	0x0f, 0xd9, 0x47, 0x6d, 0x7e, 0x4d, 0x6a, 0xad, 0x74, 0xd6, 0x9e, 0xb3, 0x12, 0x62, 0x15, 0x60,
+	0xb5, 0x1d, 0x7c, 0x67, 0x12, 0x1a, 0x7b, 0x26, 0xe7, 0xc3, 0xf3, 0x96, 0xaa, 0xa1, 0x96, 0xa6,
+	0x61, 0xe6, 0x75, 0x34, 0x94, 0x22, 0x7e, 0x4b, 0x83, 0x0a, 0x37, 0xcd, 0x49, 0x53, 0x33, 0xe5,
+	0x9c, 0x92, 0x9a, 0x95, 0x69, 0x18, 0x1c, 0x51, 0xea, 0xf0, 0x2f, 0x1a, 0xd4, 0x56, 0x9d, 0x57,
+	0x76, 0xdf, 0x35, 0x7b, 0xc1, 0x1e, 0xfc, 0x28, 0xe2, 0xce, 0xc5, 0x48, 0xa5, 0x3f, 0x82, 0x2f,
+	0x07, 0x22, 0x6e, 0xad, 0xcb, 0x5a, 0x0a, 0xcb, 0xef, 0xe2, 0x53, 0xff, 0x26, 0xcc, 0x46, 0x88,
+	0x88, 0x83, 0x9e, 0xb7, 0xd6, 0xd7, 0x56, 0x89, 0x43, 0x68, 0xbd, 0xb7, 0xbd, 0xd1, 0x7a, 0xb8,
+	0xde, 0xe6, 0x5d, 0xd9, 0xd6, 0xc6, 0x4a, 0x7b, 0x5d, 0x3a, 0xea, 0xbe, 0x98, 0xc1, 0x7d, 0x7d,
+	0x00, 0x67, 0x14, 0x85, 0x4e, 0xda, 0x1c, 0x4b, 0xd6, 0x57, 0x4a, 0xfb, 0x06, 0x5c, 0x0a, 0xa4,
+	0x3d, 0x67, 0xc0, 0x0e, 0xf6, 0xd4, 0xcb, 0xda, 0x84, 0x0b, 0x2d, 0x1a, 0xe4, 0xa7, 0xa0, 0x7c,
+	0x4f, 0xaf, 0x43, 0x85, 0x9f, 0x8f, 0xa2, 0x21, 0xe3, 0xcf, 0x73, 0x50, 0x15, 0xa0, 0xaf, 0x47,
+	0x7f, 0x74, 0x1e, 0x66, 0x7a, 0xbb, 0xdb, 0xd6, 0x67, 0xa2, 0xa3, 0xcb, 0xbf, 0xc8, 0xf8, 0x80,
+	0xc9, 0x61, 0xef, 0x34, 0xf8, 0x17, 0xba, 0xcc, 0x9e, 0x70, 0xac, 0xd9, 0x3d, 0x7c, 0x40, 0x8f,
+	0x51, 0x39, 0x43, 0x0e, 0xd0, 0x72, 0x28, 0x7f, 0xcf, 0x41, 0x6f, 0xc9, 0xca, 0xfb, 0x0e, 0xb4,
+	0x0c, 0x35, 0xf2, 0xbb, 0x35, 0x1a, 0x0d, 0x2c, 0xdc, 0x63, 0x0c, 0xc8, 0x05, 0x39, 0x27, 0xcf,
+	0x49, 0x31, 0x04, 0x74, 0x15, 0x66, 0xe8, 0xe5, 0xd1, 0xab, 0x17, 0x48, 0x46, 0x96, 0xa8, 0x7c,
+	0x18, 0xbd, 0x0d, 0x25, 0xa6, 0xf1, 0x9a, 0xfd, 0xcc, 0xc3, 0xf4, 0xb5, 0x83, 0x52, 0x49, 0x51,
+	0x61, 0xe1, 0x13, 0x1a, 0xa4, 0x9d, 0xd0, 0x50, 0x13, 0xaa, 0x9e, 0xef, 0xb8, 0x66, 0x5f, 0xb8,
+	0x91, 0x3e, 0x75, 0x50, 0xca, 0x7d, 0x11, 0xb0, 0x54, 0xe1, 0xe3, 0xb1, 0xe3, 0x9b, 0xe1, 0x27,
+	0x0e, 0xef, 0x19, 0x2a, 0x0c, 0x7d, 0x0b, 0x2a, 0x3d, 0xb1, 0x48, 0xd6, 0xec, 0x97, 0x0e, 0x7d,
+	0xd6, 0x10, 0xeb, 0xde, 0xad, 0xaa, 0x28, 0x92, 0x53, 0x98, 0x54, 0xbd, 0xc9, 0x56, 0x42, 0x14,
+	0xc4, 0xdb, 0xd8, 0x26, 0xa9, 0x9d, 0x55, 0x70, 0x0a, 0x86, 0xf8, 0x44, 0xd7, 0xa1, 0xc2, 0x32,
+	0xc1, 0xf3, 0xd0, 0x6a, 0x08, 0x0f, 0x92, 0x3c, 0xd6, 0x1a, 0xfb, 0x7b, 0x6d, 0x4a, 0x14, 0x5b,
+	0x94, 0x57, 0x00, 0x11, 0xe8, 0xaa, 0xe5, 0x25, 0x82, 0x39, 0x71, 0xe2, 0x8a, 0xbe, 0xaf, 0x6f,
+	0xc0, 0x59, 0x02, 0xc5, 0xb6, 0x6f, 0x75, 0x95, 0xa3, 0x98, 0x38, 0xec, 0x6b, 0x91, 0xc3, 0xbe,
+	0xe9, 0x79, 0xaf, 0x1c, 0xb7, 0xc7, 0xd5, 0x0c, 0xbe, 0xa5, 0xb4, 0x7f, 0xd4, 0x98, 0x36, 0xcf,
+	0xbc, 0xd0, 0x41, 0xfd, 0x2b, 0xf2, 0x43, 0xbf, 0x02, 0x79, 0xfe, 0x40, 0x8a, 0xd7, 0x3f, 0xcf,
+	0x2f, 0xb2, 0x87, 0x59, 0x8b, 0x9c, 0xf1, 0x26, 0x83, 0x2a, 0x35, 0x3a, 0x8e, 0x4f, 0x96, 0xcb,
+	0x9e, 0xe9, 0xed, 0xe1, 0xde, 0x96, 0x60, 0x1e, 0xaa, 0x0e, 0xdf, 0x37, 0x22, 0x60, 0xa9, 0xfb,
+	0x5d, 0xa9, 0xfa, 0x23, 0xec, 0x1f, 0xa1, 0xba, 0xda, 0x7f, 0x38, 0x27, 0x48, 0x78, 0xdb, 0xf4,
+	0x75, 0xa8, 0x7e, 0xac, 0xc1, 0x15, 0x41, 0xb6, 0xb2, 0x67, 0xda, 0x7d, 0x2c, 0x94, 0xf9, 0x45,
+	0xed, 0x15, 0x9f, 0x74, 0xf6, 0x35, 0x27, 0xfd, 0x04, 0xea, 0xc1, 0xa4, 0x69, 0x2d, 0xca, 0x19,
+	0xa8, 0x93, 0x18, 0x7b, 0x41, 0x90, 0xa4, 0xbf, 0xc9, 0x98, 0xeb, 0x0c, 0x82, 0x6b, 0x20, 0xf9,
+	0x2d, 0x99, 0xad, 0xc3, 0x45, 0xc1, 0x8c, 0x17, 0x87, 0xc2, 0xdc, 0x62, 0x73, 0x3a, 0x92, 0x1b,
+	0xf7, 0x07, 0xe1, 0x71, 0xf4, 0x52, 0x4a, 0x24, 0x09, 0xbb, 0x90, 0x4a, 0xd1, 0x92, 0xa4, 0xcc,
+	0xb3, 0x1d, 0x40, 0x74, 0x56, 0x4e, 0xec, 0x31, 0x38, 0x61, 0x99, 0x08, 0xe7, 0x4b, 0x80, 0xc0,
+	0x63, 0x4b, 0x20, 0x5d, 0x2a, 0x86, 0xf9, 0x40, 0x51, 0x62, 0xf6, 0x2d, 0xec, 0x0e, 0x2d, 0xcf,
+	0x53, 0x1a, 0x71, 0x49, 0xe6, 0x7a, 0x13, 0x72, 0x23, 0xcc, 0x8f, 0x2f, 0xa5, 0x25, 0x24, 0xf6,
+	0x84, 0x42, 0x4c, 0xe1, 0x52, 0xcc, 0x10, 0xae, 0x0a, 0x31, 0xcc, 0x21, 0x89, 0x72, 0xa2, 0x6a,
+	0x8a, 0xe2, 0x7f, 0x26, 0xa5, 0xf8, 0x9f, 0x0d, 0x17, 0xff, 0x43, 0x47, 0x6a, 0x35, 0x50, 0x9d,
+	0xce, 0x91, 0xba, 0xc3, 0x1c, 0x10, 0xc4, 0xb7, 0xd3, 0xe1, 0xfa, 0x07, 0x3c, 0x50, 0x9d, 0x56,
+	0x3a, 0x17, 0x01, 0x3e, 0x13, 0x0e, 0xf0, 0x3a, 0x94, 0x89, 0x93, 0x0c, 0xb5, 0x2b, 0x92, 0x33,
+	0x42, 0x63, 0x32, 0x18, 0xef, 0xc3, 0x5c, 0x38, 0x18, 0x9f, 0x48, 0xa9, 0x39, 0x98, 0xf6, 0x9d,
+	0x7d, 0x2c, 0x72, 0x0a, 0xfb, 0x88, 0x99, 0x35, 0x08, 0xd4, 0xa7, 0x63, 0xd6, 0xef, 0x4a, 0xae,
+	0x74, 0x03, 0x9e, 0x74, 0x06, 0x64, 0x39, 0x8a, 0xdb, 0x3f, 0xfb, 0x90, 0xb2, 0x3e, 0x81, 0xf3,
+	0xd1, 0xe0, 0x7b, 0x3a, 0x93, 0xd8, 0x61, 0x9b, 0x33, 0x29, 0x3c, 0x9f, 0x8e, 0x80, 0x17, 0x32,
+	0x4e, 0x2a, 0x41, 0xf7, 0x74, 0x78, 0xff, 0x3a, 0x34, 0x92, 0x62, 0xf0, 0xa9, 0xee, 0xc5, 0x20,
+	0x24, 0x9f, 0x0e, 0xd7, 0x1f, 0x6a, 0x92, 0xad, 0xba, 0x6a, 0x3e, 0xf8, 0x2a, 0x6c, 0x45, 0xae,
+	0xbb, 0x13, 0x2c, 0x9f, 0x66, 0x10, 0x2d, 0xb3, 0xc9, 0xd1, 0x52, 0x92, 0x50, 0x44, 0xb1, 0xff,
+	0x64, 0xa8, 0xff, 0x3a, 0x57, 0x2f, 0x17, 0x26, 0xf3, 0xce, 0x49, 0x85, 0x91, 0xf4, 0x1c, 0x08,
+	0xa3, 0x1f, 0xb1, 0xad, 0xa2, 0x26, 0xa9, 0xd3, 0x71, 0xdd, 0x6f, 0xc8, 0x04, 0x13, 0xcb, 0x63,
+	0xa7, 0x23, 0xc1, 0x84, 0x85, 0xf4, 0x14, 0x76, 0x2a, 0x22, 0x6e, 0xb5, 0xa0, 0x18, 0xdc, 0xfd,
+	0x95, 0x97, 0xca, 0x25, 0xc8, 0x6f, 0x6c, 0x6e, 0x6f, 0xb5, 0x56, 0xc8, 0xd5, 0x76, 0x0e, 0xf2,
+	0x2b, 0x9b, 0x86, 0xf1, 0x6c, 0xab, 0x43, 0xee, 0xb6, 0xd1, 0x87, 0x4b, 0x4b, 0x3f, 0xcb, 0x42,
+	0xe6, 0xc9, 0x73, 0xf4, 0x29, 0x4c, 0xb3, 0x87, 0x73, 0x47, 0xbc, 0x9f, 0x6c, 0x1c, 0xf5, 0x36,
+	0x50, 0xbf, 0xf0, 0x83, 0xff, 0xfe, 0xd9, 0x1f, 0x66, 0xce, 0xe8, 0xe5, 0xe6, 0x64, 0xb9, 0xb9,
+	0x3f, 0x69, 0xd2, 0x24, 0xfb, 0x40, 0xbb, 0x85, 0x3e, 0x86, 0xec, 0xd6, 0xd8, 0x47, 0xa9, 0xef,
+	0x2a, 0x1b, 0xe9, 0xcf, 0x05, 0xf5, 0x73, 0x94, 0xe9, 0xac, 0x0e, 0x9c, 0xe9, 0x68, 0xec, 0x13,
+	0x96, 0xdf, 0x83, 0x92, 0xfa, 0xd8, 0xef, 0xd8, 0xc7, 0x96, 0x8d, 0xe3, 0x1f, 0x12, 0xea, 0x57,
+	0xa8, 0xa8, 0x0b, 0x3a, 0xe2, 0xa2, 0xd8, 0x73, 0x44, 0x75, 0x16, 0x9d, 0x03, 0x1b, 0xa5, 0x3e,
+	0xc5, 0x6c, 0xa4, 0xbf, 0x2d, 0x8c, 0xcd, 0xc2, 0x3f, 0xb0, 0x09, 0xcb, 0xef, 0xf2, 0x47, 0x84,
+	0x5d, 0x1f, 0x5d, 0x4d, 0x78, 0x05, 0xa6, 0xbe, 0x6e, 0x6a, 0x2c, 0xa4, 0x23, 0x70, 0x21, 0x97,
+	0xa9, 0x90, 0xf3, 0xfa, 0x19, 0x2e, 0xa4, 0x1b, 0xa0, 0x3c, 0xd0, 0x6e, 0x2d, 0x75, 0x61, 0x9a,
+	0x76, 0xcf, 0xd1, 0x0b, 0xf1, 0xa3, 0x91, 0xf0, 0x2e, 0x21, 0xc5, 0xd1, 0xa1, 0xbe, 0xbb, 0x3e,
+	0x47, 0x05, 0x55, 0xf5, 0x22, 0x11, 0x44, 0x7b, 0xe7, 0x0f, 0xb4, 0x5b, 0x37, 0xb5, 0x3b, 0xda,
+	0xd2, 0xdf, 0x4c, 0xc3, 0x34, 0xed, 0xd2, 0xa0, 0x7d, 0x00, 0xd9, 0x25, 0x8e, 0xce, 0x2e, 0xd6,
+	0x80, 0x8e, 0xce, 0x2e, 0xde, 0x60, 0xd6, 0x1b, 0x54, 0xe8, 0x9c, 0x3e, 0x4b, 0x84, 0xd2, 0xe6,
+	0x4f, 0x93, 0xf6, 0xba, 0x88, 0x1d, 0x7f, 0xac, 0xf1, 0x76, 0x15, 0xdb, 0x66, 0x28, 0x89, 0x5b,
+	0xa8, 0x43, 0x1c, 0x5d, 0x0e, 0x09, 0x4d, 0x61, 0xfd, 0x3e, 0x15, 0xd8, 0xd4, 0x6b, 0x52, 0xa0,
+	0x4b, 0x31, 0x1e, 0x68, 0xb7, 0x5e, 0xd4, 0xf5, 0xb3, 0xdc, 0xca, 0x11, 0x08, 0xfa, 0x3e, 0x54,
+	0xc3, 0xbd, 0x4c, 0x74, 0x2d, 0x41, 0x56, 0xb4, 0x37, 0xda, 0xb8, 0x7e, 0x34, 0x12, 0xd7, 0x69,
+	0x9e, 0xea, 0xc4, 0x85, 0x33, 0xc9, 0xfb, 0x18, 0x8f, 0x4c, 0x82, 0xc4, 0x7d, 0x80, 0xfe, 0x54,
+	0xe3, 0xed, 0x68, 0xd9, 0x8a, 0x44, 0x49, 0xdc, 0x63, 0x1d, 0xcf, 0xc6, 0x8d, 0x63, 0xb0, 0xb8,
+	0x12, 0x1f, 0x50, 0x25, 0xde, 0xd7, 0xe7, 0xa4, 0x12, 0xbe, 0x35, 0xc4, 0xbe, 0xc3, 0xb5, 0x78,
+	0x71, 0x59, 0xbf, 0x10, 0x32, 0x4e, 0x08, 0x2a, 0x9d, 0xc5, 0x5a, 0x86, 0x89, 0xce, 0x0a, 0x75,
+	0x25, 0x13, 0x9d, 0x15, 0xee, 0x37, 0x26, 0x39, 0x8b, 0x37, 0x08, 0x13, 0x9c, 0x15, 0x40, 0x96,
+	0xfe, 0x3f, 0x07, 0xf9, 0x15, 0xf6, 0x3f, 0x23, 0x21, 0x07, 0x8a, 0x41, 0x13, 0x0d, 0xcd, 0x27,
+	0xd5, 0xe9, 0xe5, 0x55, 0xae, 0x71, 0x35, 0x15, 0xce, 0x15, 0x7a, 0x83, 0x2a, 0x74, 0x49, 0x3f,
+	0x4f, 0x24, 0xf3, 0xff, 0xdf, 0xa9, 0xc9, 0xaa, 0xb9, 0x4d, 0xb3, 0xd7, 0x23, 0x86, 0xf8, 0x4d,
+	0x28, 0xab, 0x2d, 0x2d, 0xf4, 0x46, 0x62, 0x6f, 0x40, 0xed, 0x8f, 0x35, 0xf4, 0xa3, 0x50, 0xb8,
+	0xe4, 0xeb, 0x54, 0xf2, 0xbc, 0x7e, 0x31, 0x41, 0xb2, 0x4b, 0x51, 0x43, 0xc2, 0x59, 0xef, 0x29,
+	0x59, 0x78, 0xa8, 0xc9, 0x95, 0x2c, 0x3c, 0xdc, 0xba, 0x3a, 0x52, 0xf8, 0x98, 0xa2, 0x12, 0xe1,
+	0x1e, 0x80, 0x6c, 0x0e, 0xa1, 0x44, 0x5b, 0x2a, 0x17, 0xd6, 0x68, 0x70, 0x88, 0xf7, 0x95, 0x74,
+	0x9d, 0x8a, 0xe5, 0xeb, 0x2e, 0x22, 0x76, 0x60, 0x79, 0x3e, 0xdb, 0x98, 0x95, 0x50, 0x6b, 0x07,
+	0x25, 0xce, 0x27, 0xdc, 0x29, 0x6a, 0x5c, 0x3b, 0x12, 0x87, 0x4b, 0xbf, 0x41, 0xa5, 0x5f, 0xd5,
+	0x1b, 0x09, 0xd2, 0x47, 0x0c, 0x97, 0x2c, 0xb6, 0xcf, 0xf3, 0x50, 0x7a, 0x6a, 0x5a, 0xb6, 0x8f,
+	0x6d, 0xd3, 0xee, 0x62, 0xb4, 0x0b, 0xd3, 0x34, 0x77, 0x47, 0x03, 0xb1, 0xda, 0xc9, 0x88, 0x06,
+	0xe2, 0x50, 0x29, 0x5f, 0x5f, 0xa0, 0x82, 0x1b, 0xfa, 0x39, 0x22, 0x78, 0x28, 0x59, 0x37, 0x59,
+	0x13, 0x40, 0xbb, 0x85, 0x5e, 0xc2, 0x0c, 0x6f, 0xe1, 0x47, 0x18, 0x85, 0x8a, 0x6a, 0x8d, 0xcb,
+	0xc9, 0xc0, 0xa4, 0xb5, 0xac, 0x8a, 0xf1, 0x28, 0x1e, 0x91, 0x33, 0x01, 0x90, 0x1d, 0xa9, 0xa8,
+	0x47, 0x63, 0x9d, 0xac, 0xc6, 0x42, 0x3a, 0x42, 0x92, 0x4d, 0x55, 0x99, 0xbd, 0x00, 0x97, 0xc8,
+	0xfd, 0x0e, 0xe4, 0x1e, 0x9b, 0xde, 0x1e, 0x8a, 0xe4, 0x5e, 0xe5, 0xc5, 0x6d, 0xa3, 0x91, 0x04,
+	0xe2, 0x52, 0xae, 0x52, 0x29, 0x17, 0x59, 0x28, 0x53, 0xa5, 0xd0, 0x37, 0xa5, 0xcc, 0x7e, 0xec,
+	0xb9, 0x6d, 0xd4, 0x7e, 0xa1, 0xb7, 0xbb, 0x51, 0xfb, 0x85, 0x5f, 0xe8, 0xa6, 0xdb, 0x8f, 0x48,
+	0xd9, 0x9f, 0x10, 0x39, 0x23, 0x28, 0x88, 0x87, 0xa9, 0x28, 0xf2, 0x9c, 0x27, 0xf2, 0x9a, 0xb5,
+	0x31, 0x9f, 0x06, 0xe6, 0xd2, 0xae, 0x51, 0x69, 0x57, 0xf4, 0x7a, 0xcc, 0x5b, 0x1c, 0xf3, 0x81,
+	0x76, 0xeb, 0x8e, 0x86, 0xbe, 0x0f, 0x20, 0x9b, 0x76, 0xb1, 0x3d, 0x18, 0x6d, 0x04, 0xc6, 0xf6,
+	0x60, 0xac, 0xdf, 0xa7, 0x2f, 0x52, 0xb9, 0x37, 0xf5, 0x6b, 0x51, 0xb9, 0xbe, 0x6b, 0xda, 0xde,
+	0x4b, 0xec, 0xde, 0x66, 0x75, 0x7f, 0x6f, 0xcf, 0x1a, 0x91, 0x29, 0xbb, 0x50, 0x0c, 0x6a, 0xcd,
+	0xd1, 0x78, 0x1b, 0xed, 0xfe, 0x44, 0xe3, 0x6d, 0xac, 0x19, 0x13, 0x0e, 0x3c, 0xa1, 0xf5, 0x22,
+	0x50, 0xc9, 0x16, 0xfc, 0xcb, 0x1a, 0xe4, 0xc8, 0x91, 0x9c, 0x1c, 0x4f, 0x64, 0xb9, 0x27, 0x3a,
+	0xfb, 0x58, 0xc5, 0x3a, 0x3a, 0xfb, 0x78, 0xa5, 0x28, 0x7c, 0x3c, 0x21, 0xd7, 0xb5, 0x26, 0xab,
+	0xa3, 0x90, 0x99, 0x3a, 0x50, 0x52, 0xca, 0x40, 0x28, 0x81, 0x59, 0xb8, 0x02, 0x1e, 0x4d, 0x78,
+	0x09, 0x35, 0x24, 0xfd, 0x12, 0x95, 0x77, 0x8e, 0x25, 0x3c, 0x2a, 0xaf, 0xc7, 0x30, 0x88, 0x40,
+	0x3e, 0x3b, 0xbe, 0xf3, 0x13, 0x66, 0x17, 0xde, 0xfd, 0x0b, 0xe9, 0x08, 0xa9, 0xb3, 0x93, 0x5b,
+	0xff, 0x15, 0x94, 0xd5, 0xd2, 0x0f, 0x4a, 0x50, 0x3e, 0x52, 0xa3, 0x8f, 0x66, 0x92, 0xa4, 0xca,
+	0x51, 0x38, 0xb6, 0x51, 0x91, 0xa6, 0x82, 0x46, 0x04, 0x0f, 0x20, 0xcf, 0x4b, 0x40, 0x49, 0x26,
+	0x0d, 0x97, 0xf1, 0x93, 0x4c, 0x1a, 0xa9, 0x1f, 0x85, 0xcf, 0xcf, 0x54, 0x22, 0xb9, 0x8a, 0x8a,
+	0x6c, 0xcd, 0xa5, 0x3d, 0xc2, 0x7e, 0x9a, 0x34, 0x59, 0xb6, 0x4d, 0x93, 0xa6, 0x54, 0x08, 0xd2,
+	0xa4, 0xf5, 0xb1, 0xcf, 0xe3, 0x81, 0xb8, 0x5e, 0xa3, 0x14, 0x66, 0x6a, 0x86, 0xd4, 0x8f, 0x42,
+	0x49, 0xba, 0xde, 0x48, 0x81, 0x22, 0x3d, 0x1e, 0x00, 0xc8, 0x72, 0x54, 0xf4, 0xcc, 0x9a, 0xd8,
+	0x29, 0x88, 0x9e, 0x59, 0x93, 0x2b, 0x5a, 0xe1, 0x18, 0x2b, 0xe5, 0xb2, 0xdb, 0x15, 0x91, 0xfc,
+	0x85, 0x06, 0x28, 0x5e, 0xb0, 0x42, 0xef, 0x24, 0x73, 0x4f, 0xec, 0x3a, 0x34, 0xde, 0x7d, 0x3d,
+	0xe4, 0xa4, 0x80, 0x2c, 0x55, 0xea, 0x52, 0xec, 0xd1, 0x2b, 0xa2, 0xd4, 0xe7, 0x1a, 0x54, 0x42,
+	0x45, 0x2e, 0xf4, 0x66, 0x8a, 0x4f, 0x23, 0xad, 0x87, 0xc6, 0x5b, 0xc7, 0xe2, 0x25, 0x1d, 0xe6,
+	0x95, 0x15, 0x20, 0x6e, 0x35, 0xbf, 0xa3, 0x41, 0x35, 0x5c, 0x0b, 0x43, 0x29, 0xbc, 0x63, 0x1d,
+	0x8b, 0xc6, 0xcd, 0xe3, 0x11, 0x8f, 0x76, 0x8f, 0xbc, 0xd0, 0x0c, 0x20, 0xcf, 0x8b, 0x66, 0x49,
+	0x0b, 0x3f, 0xdc, 0xe2, 0x48, 0x5a, 0xf8, 0x91, 0x8a, 0x5b, 0xc2, 0xc2, 0x77, 0x9d, 0x01, 0x56,
+	0xb6, 0x19, 0xaf, 0xa5, 0xa5, 0x49, 0x3b, 0x7a, 0x9b, 0x45, 0x0a, 0x71, 0x69, 0xd2, 0xe4, 0x36,
+	0x13, 0x25, 0x33, 0x94, 0xc2, 0xec, 0x98, 0x6d, 0x16, 0xad, 0xb8, 0x25, 0x6c, 0x33, 0x2a, 0x50,
+	0xd9, 0x66, 0xb2, 0x94, 0x95, 0xb4, 0xcd, 0x62, 0xdd, 0x98, 0xa4, 0x6d, 0x16, 0xaf, 0x86, 0x25,
+	0xf8, 0x91, 0xca, 0x0d, 0x6d, 0xb3, 0xb3, 0x09, 0xc5, 0x2e, 0xf4, 0x6e, 0x8a, 0x11, 0x13, 0x7b,
+	0x3b, 0x8d, 0xdb, 0xaf, 0x89, 0x9d, 0xba, 0xc6, 0x99, 0xf9, 0xc5, 0x1a, 0xff, 0x23, 0x0d, 0xe6,
+	0x92, 0xea, 0x63, 0x28, 0x45, 0x4e, 0x4a, 0x2b, 0xa8, 0xb1, 0xf8, 0xba, 0xe8, 0x47, 0x5b, 0x2b,
+	0x58, 0xf5, 0x0f, 0xfb, 0x5f, 0xb4, 0x9a, 0x2f, 0xae, 0xc2, 0x15, 0x98, 0x69, 0x8d, 0xac, 0x27,
+	0xf8, 0x10, 0x9d, 0x2d, 0x64, 0x1a, 0x15, 0xc2, 0xd7, 0x71, 0xad, 0xcf, 0xe8, 0x5f, 0xbd, 0x58,
+	0xc8, 0xec, 0x96, 0x01, 0x02, 0x84, 0xa9, 0x7f, 0xff, 0x72, 0x5e, 0xfb, 0xaf, 0x2f, 0xe7, 0xb5,
+	0xff, 0xf9, 0x72, 0x5e, 0xfb, 0xe9, 0xff, 0xcd, 0x4f, 0xbd, 0xb8, 0xd6, 0x77, 0xa8, 0x5a, 0x8b,
+	0x96, 0xd3, 0x94, 0x7f, 0x89, 0x63, 0xb9, 0xa9, 0xaa, 0xba, 0x3b, 0x43, 0xff, 0x74, 0xc6, 0xf2,
+	0xcf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x82, 0x9b, 0xab, 0xde, 0x11, 0x44, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// KVClient is the client API for KV service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type KVClient interface {
+	// Range gets the keys in the range from the key-value store.
+	Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error)
+	// Put puts the given key into the key-value store.
+	// A put request increments the revision of the key-value store
+	// and generates one event in the event history.
+	Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error)
+	// DeleteRange deletes the given range from the key-value store.
+	// A delete request increments the revision of the key-value store
+	// and generates a delete event in the event history for every deleted key.
+	DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error)
+	// Txn processes multiple requests in a single transaction.
+	// A txn request increments the revision of the key-value store
+	// and generates events with the same revision for every completed request.
+	// It is not allowed to modify the same key several times within one txn.
+	Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error)
+	// Compact compacts the event history in the etcd key-value store. The key-value
+	// store should be periodically compacted or the event history will continue to grow
+	// indefinitely.
+	Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error)
+}
+
+type kVClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewKVClient(cc *grpc.ClientConn) KVClient {
+	return &kVClient{cc}
+}
+
+func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) {
+	out := new(RangeResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) {
+	out := new(PutResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) {
+	out := new(DeleteRangeResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) {
+	out := new(TxnResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) {
+	out := new(CompactionResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// KVServer is the server API for KV service.
+type KVServer interface {
+	// Range gets the keys in the range from the key-value store.
+	Range(context.Context, *RangeRequest) (*RangeResponse, error)
+	// Put puts the given key into the key-value store.
+	// A put request increments the revision of the key-value store
+	// and generates one event in the event history.
+	Put(context.Context, *PutRequest) (*PutResponse, error)
+	// DeleteRange deletes the given range from the key-value store.
+	// A delete request increments the revision of the key-value store
+	// and generates a delete event in the event history for every deleted key.
+	DeleteRange(context.Context, *DeleteRangeRequest) (*DeleteRangeResponse, error)
+	// Txn processes multiple requests in a single transaction.
+	// A txn request increments the revision of the key-value store
+	// and generates events with the same revision for every completed request.
+	// It is not allowed to modify the same key several times within one txn.
+	Txn(context.Context, *TxnRequest) (*TxnResponse, error)
+	// Compact compacts the event history in the etcd key-value store. The key-value
+	// store should be periodically compacted or the event history will continue to grow
+	// indefinitely.
+	Compact(context.Context, *CompactionRequest) (*CompactionResponse, error)
+}
+
+// UnimplementedKVServer can be embedded to have forward compatible implementations.
+type UnimplementedKVServer struct {
+}
+
+func (*UnimplementedKVServer) Range(ctx context.Context, req *RangeRequest) (*RangeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Range not implemented")
+}
+func (*UnimplementedKVServer) Put(ctx context.Context, req *PutRequest) (*PutResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Put not implemented")
+}
+func (*UnimplementedKVServer) DeleteRange(ctx context.Context, req *DeleteRangeRequest) (*DeleteRangeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DeleteRange not implemented")
+}
+func (*UnimplementedKVServer) Txn(ctx context.Context, req *TxnRequest) (*TxnResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Txn not implemented")
+}
+func (*UnimplementedKVServer) Compact(ctx context.Context, req *CompactionRequest) (*CompactionResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Compact not implemented")
+}
+
+func RegisterKVServer(s *grpc.Server, srv KVServer) {
+	s.RegisterService(&_KV_serviceDesc, srv)
+}
+
+func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(RangeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(KVServer).Range(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.KV/Range",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(KVServer).Range(ctx, req.(*RangeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _KV_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(PutRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(KVServer).Put(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.KV/Put",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(KVServer).Put(ctx, req.(*PutRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _KV_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteRangeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(KVServer).DeleteRange(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.KV/DeleteRange",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(KVServer).DeleteRange(ctx, req.(*DeleteRangeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _KV_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(TxnRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(KVServer).Txn(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.KV/Txn",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(KVServer).Txn(ctx, req.(*TxnRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _KV_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(CompactionRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(KVServer).Compact(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.KV/Compact",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(KVServer).Compact(ctx, req.(*CompactionRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _KV_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "etcdserverpb.KV",
+	HandlerType: (*KVServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Range",
+			Handler:    _KV_Range_Handler,
+		},
+		{
+			MethodName: "Put",
+			Handler:    _KV_Put_Handler,
+		},
+		{
+			MethodName: "DeleteRange",
+			Handler:    _KV_DeleteRange_Handler,
+		},
+		{
+			MethodName: "Txn",
+			Handler:    _KV_Txn_Handler,
+		},
+		{
+			MethodName: "Compact",
+			Handler:    _KV_Compact_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "rpc.proto",
+}
+
+// WatchClient is the client API for Watch service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type WatchClient interface {
+	// Watch watches for events happening or that have happened. Both input and output
+	// are streams; the input stream is for creating and canceling watchers and the output
+	// stream sends events. One watch RPC can watch on multiple key ranges, streaming events
+	// for several watches at once. The entire event history can be watched starting from the
+	// last compaction revision.
+	Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error)
+}
+
+type watchClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewWatchClient(cc *grpc.ClientConn) WatchClient {
+	return &watchClient{cc}
+}
+
+func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_Watch_serviceDesc.Streams[0], "/etcdserverpb.Watch/Watch", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &watchWatchClient{stream}
+	return x, nil
+}
+
+type Watch_WatchClient interface {
+	Send(*WatchRequest) error
+	Recv() (*WatchResponse, error)
+	grpc.ClientStream
+}
+
+type watchWatchClient struct {
+	grpc.ClientStream
+}
+
+func (x *watchWatchClient) Send(m *WatchRequest) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *watchWatchClient) Recv() (*WatchResponse, error) {
+	m := new(WatchResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+// WatchServer is the server API for Watch service.
+type WatchServer interface {
+	// Watch watches for events happening or that have happened. Both input and output
+	// are streams; the input stream is for creating and canceling watchers and the output
+	// stream sends events. One watch RPC can watch on multiple key ranges, streaming events
+	// for several watches at once. The entire event history can be watched starting from the
+	// last compaction revision.
+	Watch(Watch_WatchServer) error
+}
+
+// UnimplementedWatchServer can be embedded to have forward compatible implementations.
+type UnimplementedWatchServer struct {
+}
+
+func (*UnimplementedWatchServer) Watch(srv Watch_WatchServer) error {
+	return status.Errorf(codes.Unimplemented, "method Watch not implemented")
+}
+
+func RegisterWatchServer(s *grpc.Server, srv WatchServer) {
+	s.RegisterService(&_Watch_serviceDesc, srv)
+}
+
+func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(WatchServer).Watch(&watchWatchServer{stream})
+}
+
+type Watch_WatchServer interface {
+	Send(*WatchResponse) error
+	Recv() (*WatchRequest, error)
+	grpc.ServerStream
+}
+
+type watchWatchServer struct {
+	grpc.ServerStream
+}
+
+func (x *watchWatchServer) Send(m *WatchResponse) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func (x *watchWatchServer) Recv() (*WatchRequest, error) {
+	m := new(WatchRequest)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+var _Watch_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "etcdserverpb.Watch",
+	HandlerType: (*WatchServer)(nil),
+	Methods:     []grpc.MethodDesc{},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "Watch",
+			Handler:       _Watch_Watch_Handler,
+			ServerStreams: true,
+			ClientStreams: true,
+		},
+	},
+	Metadata: "rpc.proto",
+}
+
+// LeaseClient is the client API for Lease service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type LeaseClient interface {
+	// LeaseGrant creates a lease which expires if the server does not receive a keepAlive
+	// within a given time to live period. All keys attached to the lease will be expired and
+	// deleted if the lease expires. Each expired key generates a delete event in the event history.
+	LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error)
+	// LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
+	LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error)
+	// LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
+	// to the server and streaming keep alive responses from the server to the client.
+	LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error)
+	// LeaseTimeToLive retrieves lease information.
+	LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error)
+	// LeaseLeases lists all existing leases.
+	LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error)
+}
+
+type leaseClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewLeaseClient(cc *grpc.ClientConn) LeaseClient {
+	return &leaseClient{cc}
+}
+
+func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) {
+	out := new(LeaseGrantResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) {
+	out := new(LeaseRevokeResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_Lease_serviceDesc.Streams[0], "/etcdserverpb.Lease/LeaseKeepAlive", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &leaseLeaseKeepAliveClient{stream}
+	return x, nil
+}
+
+type Lease_LeaseKeepAliveClient interface {
+	Send(*LeaseKeepAliveRequest) error
+	Recv() (*LeaseKeepAliveResponse, error)
+	grpc.ClientStream
+}
+
+type leaseLeaseKeepAliveClient struct {
+	grpc.ClientStream
+}
+
+func (x *leaseLeaseKeepAliveClient) Send(m *LeaseKeepAliveRequest) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) {
+	m := new(LeaseKeepAliveResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) {
+	out := new(LeaseTimeToLiveResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *leaseClient) LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) {
+	out := new(LeaseLeasesResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// LeaseServer is the server API for Lease service.
+type LeaseServer interface {
+	// LeaseGrant creates a lease which expires if the server does not receive a keepAlive
+	// within a given time to live period. All keys attached to the lease will be expired and
+	// deleted if the lease expires. Each expired key generates a delete event in the event history.
+	LeaseGrant(context.Context, *LeaseGrantRequest) (*LeaseGrantResponse, error)
+	// LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
+	LeaseRevoke(context.Context, *LeaseRevokeRequest) (*LeaseRevokeResponse, error)
+	// LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
+	// to the server and streaming keep alive responses from the server to the client.
+	LeaseKeepAlive(Lease_LeaseKeepAliveServer) error
+	// LeaseTimeToLive retrieves lease information.
+	LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error)
+	// LeaseLeases lists all existing leases.
+	LeaseLeases(context.Context, *LeaseLeasesRequest) (*LeaseLeasesResponse, error)
+}
+
+// UnimplementedLeaseServer can be embedded to have forward compatible implementations.
+type UnimplementedLeaseServer struct {
+}
+
+func (*UnimplementedLeaseServer) LeaseGrant(ctx context.Context, req *LeaseGrantRequest) (*LeaseGrantResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method LeaseGrant not implemented")
+}
+func (*UnimplementedLeaseServer) LeaseRevoke(ctx context.Context, req *LeaseRevokeRequest) (*LeaseRevokeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method LeaseRevoke not implemented")
+}
+func (*UnimplementedLeaseServer) LeaseKeepAlive(srv Lease_LeaseKeepAliveServer) error {
+	return status.Errorf(codes.Unimplemented, "method LeaseKeepAlive not implemented")
+}
+func (*UnimplementedLeaseServer) LeaseTimeToLive(ctx context.Context, req *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method LeaseTimeToLive not implemented")
+}
+func (*UnimplementedLeaseServer) LeaseLeases(ctx context.Context, req *LeaseLeasesRequest) (*LeaseLeasesResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method LeaseLeases not implemented")
+}
+
+func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) {
+	s.RegisterService(&_Lease_serviceDesc, srv)
+}
+
+func _Lease_LeaseGrant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(LeaseGrantRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(LeaseServer).LeaseGrant(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Lease/LeaseGrant",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(LeaseServer).LeaseGrant(ctx, req.(*LeaseGrantRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Lease_LeaseRevoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(LeaseRevokeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(LeaseServer).LeaseRevoke(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Lease/LeaseRevoke",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(LeaseServer).LeaseRevoke(ctx, req.(*LeaseRevokeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Lease_LeaseKeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(LeaseServer).LeaseKeepAlive(&leaseLeaseKeepAliveServer{stream})
+}
+
+type Lease_LeaseKeepAliveServer interface {
+	Send(*LeaseKeepAliveResponse) error
+	Recv() (*LeaseKeepAliveRequest, error)
+	grpc.ServerStream
+}
+
+type leaseLeaseKeepAliveServer struct {
+	grpc.ServerStream
+}
+
+func (x *leaseLeaseKeepAliveServer) Send(m *LeaseKeepAliveResponse) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) {
+	m := new(LeaseKeepAliveRequest)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(LeaseTimeToLiveRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(LeaseServer).LeaseTimeToLive(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Lease_LeaseLeases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(LeaseLeasesRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(LeaseServer).LeaseLeases(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Lease/LeaseLeases",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(LeaseServer).LeaseLeases(ctx, req.(*LeaseLeasesRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Lease_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "etcdserverpb.Lease",
+	HandlerType: (*LeaseServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "LeaseGrant",
+			Handler:    _Lease_LeaseGrant_Handler,
+		},
+		{
+			MethodName: "LeaseRevoke",
+			Handler:    _Lease_LeaseRevoke_Handler,
+		},
+		{
+			MethodName: "LeaseTimeToLive",
+			Handler:    _Lease_LeaseTimeToLive_Handler,
+		},
+		{
+			MethodName: "LeaseLeases",
+			Handler:    _Lease_LeaseLeases_Handler,
+		},
+	},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "LeaseKeepAlive",
+			Handler:       _Lease_LeaseKeepAlive_Handler,
+			ServerStreams: true,
+			ClientStreams: true,
+		},
+	},
+	Metadata: "rpc.proto",
+}
+
+// ClusterClient is the client API for Cluster service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type ClusterClient interface {
+	// MemberAdd adds a member into the cluster.
+	MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error)
+	// MemberRemove removes an existing member from the cluster.
+	MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error)
+	// MemberUpdate updates the member configuration.
+	MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error)
+	// MemberList lists all the members in the cluster.
+	MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error)
+	// MemberPromote promotes a member from raft learner (non-voting) to raft voting member.
+	MemberPromote(ctx context.Context, in *MemberPromoteRequest, opts ...grpc.CallOption) (*MemberPromoteResponse, error)
+}
+
+type clusterClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewClusterClient(cc *grpc.ClientConn) ClusterClient {
+	return &clusterClient{cc}
+}
+
+func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) {
+	out := new(MemberAddResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) {
+	out := new(MemberRemoveResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) {
+	out := new(MemberUpdateResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) {
+	out := new(MemberListResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *clusterClient) MemberPromote(ctx context.Context, in *MemberPromoteRequest, opts ...grpc.CallOption) (*MemberPromoteResponse, error) {
+	out := new(MemberPromoteResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberPromote", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// ClusterServer is the server API for Cluster service.
+type ClusterServer interface {
+	// MemberAdd adds a member into the cluster.
+	MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error)
+	// MemberRemove removes an existing member from the cluster.
+	MemberRemove(context.Context, *MemberRemoveRequest) (*MemberRemoveResponse, error)
+	// MemberUpdate updates the member configuration.
+	MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error)
+	// MemberList lists all the members in the cluster.
+	MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error)
+	// MemberPromote promotes a member from raft learner (non-voting) to raft voting member.
+	MemberPromote(context.Context, *MemberPromoteRequest) (*MemberPromoteResponse, error)
+}
+
+// UnimplementedClusterServer can be embedded to have forward compatible implementations.
+type UnimplementedClusterServer struct {
+}
+
+func (*UnimplementedClusterServer) MemberAdd(ctx context.Context, req *MemberAddRequest) (*MemberAddResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method MemberAdd not implemented")
+}
+func (*UnimplementedClusterServer) MemberRemove(ctx context.Context, req *MemberRemoveRequest) (*MemberRemoveResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method MemberRemove not implemented")
+}
+func (*UnimplementedClusterServer) MemberUpdate(ctx context.Context, req *MemberUpdateRequest) (*MemberUpdateResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method MemberUpdate not implemented")
+}
+func (*UnimplementedClusterServer) MemberList(ctx context.Context, req *MemberListRequest) (*MemberListResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method MemberList not implemented")
+}
+func (*UnimplementedClusterServer) MemberPromote(ctx context.Context, req *MemberPromoteRequest) (*MemberPromoteResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method MemberPromote not implemented")
+}
+
+func RegisterClusterServer(s *grpc.Server, srv ClusterServer) {
+	s.RegisterService(&_Cluster_serviceDesc, srv)
+}
+
+func _Cluster_MemberAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(MemberAddRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ClusterServer).MemberAdd(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Cluster/MemberAdd",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ClusterServer).MemberAdd(ctx, req.(*MemberAddRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Cluster_MemberRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(MemberRemoveRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ClusterServer).MemberRemove(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Cluster/MemberRemove",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ClusterServer).MemberRemove(ctx, req.(*MemberRemoveRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Cluster_MemberUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(MemberUpdateRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ClusterServer).MemberUpdate(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Cluster/MemberUpdate",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ClusterServer).MemberUpdate(ctx, req.(*MemberUpdateRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(MemberListRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ClusterServer).MemberList(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Cluster/MemberList",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ClusterServer).MemberList(ctx, req.(*MemberListRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Cluster_MemberPromote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(MemberPromoteRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ClusterServer).MemberPromote(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Cluster/MemberPromote",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ClusterServer).MemberPromote(ctx, req.(*MemberPromoteRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Cluster_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "etcdserverpb.Cluster",
+	HandlerType: (*ClusterServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "MemberAdd",
+			Handler:    _Cluster_MemberAdd_Handler,
+		},
+		{
+			MethodName: "MemberRemove",
+			Handler:    _Cluster_MemberRemove_Handler,
+		},
+		{
+			MethodName: "MemberUpdate",
+			Handler:    _Cluster_MemberUpdate_Handler,
+		},
+		{
+			MethodName: "MemberList",
+			Handler:    _Cluster_MemberList_Handler,
+		},
+		{
+			MethodName: "MemberPromote",
+			Handler:    _Cluster_MemberPromote_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "rpc.proto",
+}
+
+// MaintenanceClient is the client API for Maintenance service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type MaintenanceClient interface {
+	// Alarm activates, deactivates, and queries alarms regarding cluster health.
+	Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error)
+	// Status gets the status of the member.
+	Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error)
+	// Defragment defragments a member's backend database to recover storage space.
+	Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error)
+	// Hash computes the hash of whole backend keyspace,
+	// including key, lease, and other buckets in storage.
+	// This is designed for testing ONLY!
+	// Do not rely on this in production with ongoing transactions,
+	// since Hash operation does not hold MVCC locks.
+	// Use "HashKV" API instead for "key" bucket consistency checks.
+	Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error)
+	// HashKV computes the hash of all MVCC keys up to a given revision.
+	// It only iterates "key" bucket in backend storage.
+	HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error)
+	// Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
+	Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error)
+	// MoveLeader requests current leader node to transfer its leadership to transferee.
+	MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error)
+	// Downgrade requests downgrades, verifies feasibility or cancels downgrade
+	// on the cluster version.
+	// Supported since etcd 3.5.
+	Downgrade(ctx context.Context, in *DowngradeRequest, opts ...grpc.CallOption) (*DowngradeResponse, error)
+}
+
+type maintenanceClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient {
+	return &maintenanceClient{cc}
+}
+
+func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) {
+	out := new(AlarmResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) {
+	out := new(StatusResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) {
+	out := new(DefragmentResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) {
+	out := new(HashResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) {
+	out := new(HashKVResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_Maintenance_serviceDesc.Streams[0], "/etcdserverpb.Maintenance/Snapshot", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &maintenanceSnapshotClient{stream}
+	if err := x.ClientStream.SendMsg(in); err != nil {
+		return nil, err
+	}
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return x, nil
+}
+
+type Maintenance_SnapshotClient interface {
+	Recv() (*SnapshotResponse, error)
+	grpc.ClientStream
+}
+
+type maintenanceSnapshotClient struct {
+	grpc.ClientStream
+}
+
+func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) {
+	m := new(SnapshotResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) {
+	out := new(MoveLeaderResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *maintenanceClient) Downgrade(ctx context.Context, in *DowngradeRequest, opts ...grpc.CallOption) (*DowngradeResponse, error) {
+	out := new(DowngradeResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Downgrade", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// MaintenanceServer is the server API for Maintenance service.
+type MaintenanceServer interface {
+	// Alarm activates, deactivates, and queries alarms regarding cluster health.
+	Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error)
+	// Status gets the status of the member.
+	Status(context.Context, *StatusRequest) (*StatusResponse, error)
+	// Defragment defragments a member's backend database to recover storage space.
+	Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error)
+	// Hash computes the hash of whole backend keyspace,
+	// including key, lease, and other buckets in storage.
+	// This is designed for testing ONLY!
+	// Do not rely on this in production with ongoing transactions,
+	// since Hash operation does not hold MVCC locks.
+	// Use "HashKV" API instead for "key" bucket consistency checks.
+	Hash(context.Context, *HashRequest) (*HashResponse, error)
+	// HashKV computes the hash of all MVCC keys up to a given revision.
+	// It only iterates "key" bucket in backend storage.
+	HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error)
+	// Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
+	Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error
+	// MoveLeader requests current leader node to transfer its leadership to transferee.
+	MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error)
+	// Downgrade requests downgrades, verifies feasibility or cancels downgrade
+	// on the cluster version.
+	// Supported since etcd 3.5.
+	Downgrade(context.Context, *DowngradeRequest) (*DowngradeResponse, error)
+}
+
+// UnimplementedMaintenanceServer can be embedded to have forward compatible implementations.
+type UnimplementedMaintenanceServer struct {
+}
+
+func (*UnimplementedMaintenanceServer) Alarm(ctx context.Context, req *AlarmRequest) (*AlarmResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Alarm not implemented")
+}
+func (*UnimplementedMaintenanceServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Status not implemented")
+}
+func (*UnimplementedMaintenanceServer) Defragment(ctx context.Context, req *DefragmentRequest) (*DefragmentResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Defragment not implemented")
+}
+func (*UnimplementedMaintenanceServer) Hash(ctx context.Context, req *HashRequest) (*HashResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Hash not implemented")
+}
+func (*UnimplementedMaintenanceServer) HashKV(ctx context.Context, req *HashKVRequest) (*HashKVResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method HashKV not implemented")
+}
+func (*UnimplementedMaintenanceServer) Snapshot(req *SnapshotRequest, srv Maintenance_SnapshotServer) error {
+	return status.Errorf(codes.Unimplemented, "method Snapshot not implemented")
+}
+func (*UnimplementedMaintenanceServer) MoveLeader(ctx context.Context, req *MoveLeaderRequest) (*MoveLeaderResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method MoveLeader not implemented")
+}
+func (*UnimplementedMaintenanceServer) Downgrade(ctx context.Context, req *DowngradeRequest) (*DowngradeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Downgrade not implemented")
+}
+
+func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) {
+	s.RegisterService(&_Maintenance_serviceDesc, srv)
+}
+
+func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AlarmRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MaintenanceServer).Alarm(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Maintenance/Alarm",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MaintenanceServer).Alarm(ctx, req.(*AlarmRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(StatusRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MaintenanceServer).Status(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Maintenance/Status",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MaintenanceServer).Status(ctx, req.(*StatusRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DefragmentRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MaintenanceServer).Defragment(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Maintenance/Defragment",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MaintenanceServer).Defragment(ctx, req.(*DefragmentRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Hash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(HashRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MaintenanceServer).Hash(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Maintenance/Hash",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MaintenanceServer).Hash(ctx, req.(*HashRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_HashKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(HashKVRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MaintenanceServer).HashKV(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Maintenance/HashKV",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MaintenanceServer).HashKV(ctx, req.(*HashKVRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error {
+	m := new(SnapshotRequest)
+	if err := stream.RecvMsg(m); err != nil {
+		return err
+	}
+	return srv.(MaintenanceServer).Snapshot(m, &maintenanceSnapshotServer{stream})
+}
+
+type Maintenance_SnapshotServer interface {
+	Send(*SnapshotResponse) error
+	grpc.ServerStream
+}
+
+type maintenanceSnapshotServer struct {
+	grpc.ServerStream
+}
+
+func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func _Maintenance_MoveLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(MoveLeaderRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MaintenanceServer).MoveLeader(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Maintenance/MoveLeader",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MaintenanceServer).MoveLeader(ctx, req.(*MoveLeaderRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Downgrade_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DowngradeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MaintenanceServer).Downgrade(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Maintenance/Downgrade",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MaintenanceServer).Downgrade(ctx, req.(*DowngradeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Maintenance_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "etcdserverpb.Maintenance",
+	HandlerType: (*MaintenanceServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Alarm",
+			Handler:    _Maintenance_Alarm_Handler,
+		},
+		{
+			MethodName: "Status",
+			Handler:    _Maintenance_Status_Handler,
+		},
+		{
+			MethodName: "Defragment",
+			Handler:    _Maintenance_Defragment_Handler,
+		},
+		{
+			MethodName: "Hash",
+			Handler:    _Maintenance_Hash_Handler,
+		},
+		{
+			MethodName: "HashKV",
+			Handler:    _Maintenance_HashKV_Handler,
+		},
+		{
+			MethodName: "MoveLeader",
+			Handler:    _Maintenance_MoveLeader_Handler,
+		},
+		{
+			MethodName: "Downgrade",
+			Handler:    _Maintenance_Downgrade_Handler,
+		},
+	},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "Snapshot",
+			Handler:       _Maintenance_Snapshot_Handler,
+			ServerStreams: true,
+		},
+	},
+	Metadata: "rpc.proto",
+}
+
+// AuthClient is the client API for Auth service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type AuthClient interface {
+	// AuthEnable enables authentication.
+	AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error)
+	// AuthDisable disables authentication.
+	AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error)
+	// AuthStatus displays authentication status.
+	AuthStatus(ctx context.Context, in *AuthStatusRequest, opts ...grpc.CallOption) (*AuthStatusResponse, error)
+	// Authenticate processes an authenticate request.
+	Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error)
+	// UserAdd adds a new user. User name cannot be empty.
+	UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error)
+	// UserGet gets detailed user information.
+	UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error)
+	// UserList gets a list of all users.
+	UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error)
+	// UserDelete deletes a specified user.
+	UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error)
+	// UserChangePassword changes the password of a specified user.
+	UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error)
+	// UserGrant grants a role to a specified user.
+	UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error)
+	// UserRevokeRole revokes a role of specified user.
+	UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error)
+	// RoleAdd adds a new role. Role name cannot be empty.
+	RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error)
+	// RoleGet gets detailed role information.
+	RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error)
+	// RoleList gets lists of all roles.
+	RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error)
+	// RoleDelete deletes a specified role.
+	RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error)
+	// RoleGrantPermission grants a permission of a specified key or range to a specified role.
+	RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error)
+	// RoleRevokePermission revokes a key or range permission of a specified role.
+	RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error)
+}
+
+type authClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewAuthClient(cc *grpc.ClientConn) AuthClient {
+	return &authClient{cc}
+}
+
+func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) {
+	out := new(AuthEnableResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) {
+	out := new(AuthDisableResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) AuthStatus(ctx context.Context, in *AuthStatusRequest, opts ...grpc.CallOption) (*AuthStatusResponse, error) {
+	out := new(AuthStatusResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthStatus", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) {
+	out := new(AuthenticateResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) {
+	out := new(AuthUserAddResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) {
+	out := new(AuthUserGetResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) {
+	out := new(AuthUserListResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) {
+	out := new(AuthUserDeleteResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) {
+	out := new(AuthUserChangePasswordResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) {
+	out := new(AuthUserGrantRoleResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) {
+	out := new(AuthUserRevokeRoleResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) {
+	out := new(AuthRoleAddResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) {
+	out := new(AuthRoleGetResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) {
+	out := new(AuthRoleListResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) {
+	out := new(AuthRoleDeleteResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) {
+	out := new(AuthRoleGrantPermissionResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) {
+	out := new(AuthRoleRevokePermissionResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// AuthServer is the server API for Auth service.
+type AuthServer interface {
+	// AuthEnable enables authentication.
+	AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error)
+	// AuthDisable disables authentication.
+	AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error)
+	// AuthStatus displays authentication status.
+	AuthStatus(context.Context, *AuthStatusRequest) (*AuthStatusResponse, error)
+	// Authenticate processes an authenticate request.
+	Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error)
+	// UserAdd adds a new user. User name cannot be empty.
+	UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error)
+	// UserGet gets detailed user information.
+	UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error)
+	// UserList gets a list of all users.
+	UserList(context.Context, *AuthUserListRequest) (*AuthUserListResponse, error)
+	// UserDelete deletes a specified user.
+	UserDelete(context.Context, *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error)
+	// UserChangePassword changes the password of a specified user.
+	UserChangePassword(context.Context, *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error)
+	// UserGrant grants a role to a specified user.
+	UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error)
+	// UserRevokeRole revokes a role of specified user.
+	UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error)
+	// RoleAdd adds a new role. Role name cannot be empty.
+	RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error)
+	// RoleGet gets detailed role information.
+	RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error)
+	// RoleList gets lists of all roles.
+	RoleList(context.Context, *AuthRoleListRequest) (*AuthRoleListResponse, error)
+	// RoleDelete deletes a specified role.
+	RoleDelete(context.Context, *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error)
+	// RoleGrantPermission grants a permission of a specified key or range to a specified role.
+	RoleGrantPermission(context.Context, *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error)
+	// RoleRevokePermission revokes a key or range permission of a specified role.
+	RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error)
+}
+
+// UnimplementedAuthServer can be embedded to have forward compatible implementations.
+type UnimplementedAuthServer struct {
+}
+
+func (*UnimplementedAuthServer) AuthEnable(ctx context.Context, req *AuthEnableRequest) (*AuthEnableResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method AuthEnable not implemented")
+}
+func (*UnimplementedAuthServer) AuthDisable(ctx context.Context, req *AuthDisableRequest) (*AuthDisableResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method AuthDisable not implemented")
+}
+func (*UnimplementedAuthServer) AuthStatus(ctx context.Context, req *AuthStatusRequest) (*AuthStatusResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method AuthStatus not implemented")
+}
+func (*UnimplementedAuthServer) Authenticate(ctx context.Context, req *AuthenticateRequest) (*AuthenticateResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Authenticate not implemented")
+}
+func (*UnimplementedAuthServer) UserAdd(ctx context.Context, req *AuthUserAddRequest) (*AuthUserAddResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserAdd not implemented")
+}
+func (*UnimplementedAuthServer) UserGet(ctx context.Context, req *AuthUserGetRequest) (*AuthUserGetResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserGet not implemented")
+}
+func (*UnimplementedAuthServer) UserList(ctx context.Context, req *AuthUserListRequest) (*AuthUserListResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserList not implemented")
+}
+func (*UnimplementedAuthServer) UserDelete(ctx context.Context, req *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserDelete not implemented")
+}
+func (*UnimplementedAuthServer) UserChangePassword(ctx context.Context, req *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserChangePassword not implemented")
+}
+func (*UnimplementedAuthServer) UserGrantRole(ctx context.Context, req *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserGrantRole not implemented")
+}
+func (*UnimplementedAuthServer) UserRevokeRole(ctx context.Context, req *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserRevokeRole not implemented")
+}
+func (*UnimplementedAuthServer) RoleAdd(ctx context.Context, req *AuthRoleAddRequest) (*AuthRoleAddResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RoleAdd not implemented")
+}
+func (*UnimplementedAuthServer) RoleGet(ctx context.Context, req *AuthRoleGetRequest) (*AuthRoleGetResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RoleGet not implemented")
+}
+func (*UnimplementedAuthServer) RoleList(ctx context.Context, req *AuthRoleListRequest) (*AuthRoleListResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RoleList not implemented")
+}
+func (*UnimplementedAuthServer) RoleDelete(ctx context.Context, req *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RoleDelete not implemented")
+}
+func (*UnimplementedAuthServer) RoleGrantPermission(ctx context.Context, req *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RoleGrantPermission not implemented")
+}
+func (*UnimplementedAuthServer) RoleRevokePermission(ctx context.Context, req *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RoleRevokePermission not implemented")
+}
+
+func RegisterAuthServer(s *grpc.Server, srv AuthServer) {
+	s.RegisterService(&_Auth_serviceDesc, srv)
+}
+
+func _Auth_AuthEnable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthEnableRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).AuthEnable(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/AuthEnable",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).AuthEnable(ctx, req.(*AuthEnableRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_AuthDisable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthDisableRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).AuthDisable(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/AuthDisable",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).AuthDisable(ctx, req.(*AuthDisableRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_AuthStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthStatusRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).AuthStatus(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/AuthStatus",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).AuthStatus(ctx, req.(*AuthStatusRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthenticateRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).Authenticate(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/Authenticate",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).Authenticate(ctx, req.(*AuthenticateRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserAddRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserAdd(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserAdd",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserAdd(ctx, req.(*AuthUserAddRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserGetRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserGet(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserGet",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserGet(ctx, req.(*AuthUserGetRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserListRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserList(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserList",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserList(ctx, req.(*AuthUserListRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserDeleteRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserDelete(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserDelete",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserDelete(ctx, req.(*AuthUserDeleteRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserChangePasswordRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserChangePassword(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserChangePassword",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserChangePassword(ctx, req.(*AuthUserChangePasswordRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserGrantRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserGrantRoleRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserGrantRole(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserGrantRole",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserGrantRole(ctx, req.(*AuthUserGrantRoleRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserRevokeRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserRevokeRoleRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserRevokeRole(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserRevokeRole",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserRevokeRole(ctx, req.(*AuthUserRevokeRoleRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthRoleAddRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).RoleAdd(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/RoleAdd",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).RoleAdd(ctx, req.(*AuthRoleAddRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthRoleGetRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).RoleGet(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/RoleGet",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).RoleGet(ctx, req.(*AuthRoleGetRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthRoleListRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).RoleList(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/RoleList",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).RoleList(ctx, req.(*AuthRoleListRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthRoleDeleteRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).RoleDelete(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/RoleDelete",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).RoleDelete(ctx, req.(*AuthRoleDeleteRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleGrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthRoleGrantPermissionRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).RoleGrantPermission(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/RoleGrantPermission",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).RoleGrantPermission(ctx, req.(*AuthRoleGrantPermissionRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleRevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthRoleRevokePermissionRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).RoleRevokePermission(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/RoleRevokePermission",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).RoleRevokePermission(ctx, req.(*AuthRoleRevokePermissionRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Auth_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "etcdserverpb.Auth",
+	HandlerType: (*AuthServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "AuthEnable",
+			Handler:    _Auth_AuthEnable_Handler,
+		},
+		{
+			MethodName: "AuthDisable",
+			Handler:    _Auth_AuthDisable_Handler,
+		},
+		{
+			MethodName: "AuthStatus",
+			Handler:    _Auth_AuthStatus_Handler,
+		},
+		{
+			MethodName: "Authenticate",
+			Handler:    _Auth_Authenticate_Handler,
+		},
+		{
+			MethodName: "UserAdd",
+			Handler:    _Auth_UserAdd_Handler,
+		},
+		{
+			MethodName: "UserGet",
+			Handler:    _Auth_UserGet_Handler,
+		},
+		{
+			MethodName: "UserList",
+			Handler:    _Auth_UserList_Handler,
+		},
+		{
+			MethodName: "UserDelete",
+			Handler:    _Auth_UserDelete_Handler,
+		},
+		{
+			MethodName: "UserChangePassword",
+			Handler:    _Auth_UserChangePassword_Handler,
+		},
+		{
+			MethodName: "UserGrantRole",
+			Handler:    _Auth_UserGrantRole_Handler,
+		},
+		{
+			MethodName: "UserRevokeRole",
+			Handler:    _Auth_UserRevokeRole_Handler,
+		},
+		{
+			MethodName: "RoleAdd",
+			Handler:    _Auth_RoleAdd_Handler,
+		},
+		{
+			MethodName: "RoleGet",
+			Handler:    _Auth_RoleGet_Handler,
+		},
+		{
+			MethodName: "RoleList",
+			Handler:    _Auth_RoleList_Handler,
+		},
+		{
+			MethodName: "RoleDelete",
+			Handler:    _Auth_RoleDelete_Handler,
+		},
+		{
+			MethodName: "RoleGrantPermission",
+			Handler:    _Auth_RoleGrantPermission_Handler,
+		},
+		{
+			MethodName: "RoleRevokePermission",
+			Handler:    _Auth_RoleRevokePermission_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "rpc.proto",
+}
+
+func (m *ResponseHeader) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResponseHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.RaftTerm != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.Revision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.MemberId != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MemberId))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.ClusterId != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *RangeRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.MaxCreateRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision))
+		i--
+		dAtA[i] = 0x68
+	}
+	if m.MinCreateRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision))
+		i--
+		dAtA[i] = 0x60
+	}
+	if m.MaxModRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision))
+		i--
+		dAtA[i] = 0x58
+	}
+	if m.MinModRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision))
+		i--
+		dAtA[i] = 0x50
+	}
+	if m.CountOnly {
+		i--
+		if m.CountOnly {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x48
+	}
+	if m.KeysOnly {
+		i--
+		if m.KeysOnly {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x40
+	}
+	if m.Serializable {
+		i--
+		if m.Serializable {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x38
+	}
+	if m.SortTarget != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget))
+		i--
+		dAtA[i] = 0x30
+	}
+	if m.SortOrder != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder))
+		i--
+		dAtA[i] = 0x28
+	}
+	if m.Revision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.Limit != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Limit))
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.RangeEnd) > 0 {
+		i -= len(m.RangeEnd)
+		copy(dAtA[i:], m.RangeEnd)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *RangeResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Count != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Count))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.More {
+		i--
+		if m.More {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.Kvs) > 0 {
+		for iNdEx := len(m.Kvs) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Kvs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *PutRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PutRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.IgnoreLease {
+		i--
+		if m.IgnoreLease {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x30
+	}
+	if m.IgnoreValue {
+		i--
+		if m.IgnoreValue {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x28
+	}
+	if m.PrevKv {
+		i--
+		if m.PrevKv {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.Lease != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Lease))
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.Value) > 0 {
+		i -= len(m.Value)
+		copy(dAtA[i:], m.Value)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Value)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *PutResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PutResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.PrevKv != nil {
+		{
+			size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteRangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.PrevKv {
+		i--
+		if m.PrevKv {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.RangeEnd) > 0 {
+		i -= len(m.RangeEnd)
+		copy(dAtA[i:], m.RangeEnd)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteRangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.PrevKvs) > 0 {
+		for iNdEx := len(m.PrevKvs) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.PrevKvs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if m.Deleted != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Deleted))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *RequestOp) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RequestOp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Request != nil {
+		{
+			size := m.Request.Size()
+			i -= size
+			if _, err := m.Request.MarshalTo(dAtA[i:]); err != nil {
+				return 0, err
+			}
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RequestOp_RequestRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.RequestRange != nil {
+		{
+			size, err := m.RequestRange.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RequestOp_RequestPut) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.RequestPut != nil {
+		{
+			size, err := m.RequestPut.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	return len(dAtA) - i, nil
+}
+func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RequestOp_RequestDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.RequestDeleteRange != nil {
+		{
+			size, err := m.RequestDeleteRange.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	return len(dAtA) - i, nil
+}
+func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RequestOp_RequestTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.RequestTxn != nil {
+		{
+			size, err := m.RequestTxn.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	return len(dAtA) - i, nil
+}
+func (m *ResponseOp) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResponseOp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Response != nil {
+		{
+			size := m.Response.Size()
+			i -= size
+			if _, err := m.Response.MarshalTo(dAtA[i:]); err != nil {
+				return 0, err
+			}
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResponseOp_ResponseRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.ResponseRange != nil {
+		{
+			size, err := m.ResponseRange.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResponseOp_ResponsePut) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.ResponsePut != nil {
+		{
+			size, err := m.ResponsePut.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	return len(dAtA) - i, nil
+}
+func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResponseOp_ResponseDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.ResponseDeleteRange != nil {
+		{
+			size, err := m.ResponseDeleteRange.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	return len(dAtA) - i, nil
+}
+func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResponseOp_ResponseTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.ResponseTxn != nil {
+		{
+			size, err := m.ResponseTxn.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	return len(dAtA) - i, nil
+}
+func (m *Compare) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Compare) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Compare) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.RangeEnd) > 0 {
+		i -= len(m.RangeEnd)
+		copy(dAtA[i:], m.RangeEnd)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+		i--
+		dAtA[i] = 0x4
+		i--
+		dAtA[i] = 0x82
+	}
+	if m.TargetUnion != nil {
+		{
+			size := m.TargetUnion.Size()
+			i -= size
+			if _, err := m.TargetUnion.MarshalTo(dAtA[i:]); err != nil {
+				return 0, err
+			}
+		}
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.Target != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Target))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Result != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Result))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Compare_Version) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i = encodeVarintRpc(dAtA, i, uint64(m.Version))
+	i--
+	dAtA[i] = 0x20
+	return len(dAtA) - i, nil
+}
+func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Compare_CreateRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision))
+	i--
+	dAtA[i] = 0x28
+	return len(dAtA) - i, nil
+}
+func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Compare_ModRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision))
+	i--
+	dAtA[i] = 0x30
+	return len(dAtA) - i, nil
+}
+func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Compare_Value) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.Value != nil {
+		i -= len(m.Value)
+		copy(dAtA[i:], m.Value)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Value)))
+		i--
+		dAtA[i] = 0x3a
+	}
+	return len(dAtA) - i, nil
+}
+func (m *Compare_Lease) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Compare_Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i = encodeVarintRpc(dAtA, i, uint64(m.Lease))
+	i--
+	dAtA[i] = 0x40
+	return len(dAtA) - i, nil
+}
+func (m *TxnRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TxnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Failure) > 0 {
+		for iNdEx := len(m.Failure) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Failure[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Success) > 0 {
+		for iNdEx := len(m.Success) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Success[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Compare) > 0 {
+		for iNdEx := len(m.Compare) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Compare[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *TxnResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TxnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Responses) > 0 {
+		for iNdEx := len(m.Responses) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Responses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if m.Succeeded {
+		i--
+		if m.Succeeded {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *CompactionRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CompactionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Physical {
+		i--
+		if m.Physical {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Revision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *CompactionResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CompactionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *HashRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *HashKVRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HashKVRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HashKVRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Revision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *HashKVResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HashKVResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HashKVResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.HashRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.HashRevision))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.CompactRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.Hash != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Hash))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *HashResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Hash != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Hash))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Version) > 0 {
+		i -= len(m.Version)
+		copy(dAtA[i:], m.Version)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Version)))
+		i--
+		dAtA[i] = 0x22
+	}
+	if len(m.Blob) > 0 {
+		i -= len(m.Blob)
+		copy(dAtA[i:], m.Blob)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.RemainingBytes != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *WatchRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.RequestUnion != nil {
+		{
+			size := m.RequestUnion.Size()
+			i -= size
+			if _, err := m.RequestUnion.MarshalTo(dAtA[i:]); err != nil {
+				return 0, err
+			}
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchRequest_CreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.CreateRequest != nil {
+		{
+			size, err := m.CreateRequest.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchRequest_CancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.CancelRequest != nil {
+		{
+			size, err := m.CancelRequest.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	return len(dAtA) - i, nil
+}
+func (m *WatchRequest_ProgressRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchRequest_ProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.ProgressRequest != nil {
+		{
+			size, err := m.ProgressRequest.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	return len(dAtA) - i, nil
+}
+func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchCreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Fragment {
+		i--
+		if m.Fragment {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x40
+	}
+	if m.WatchId != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
+		i--
+		dAtA[i] = 0x38
+	}
+	if m.PrevKv {
+		i--
+		if m.PrevKv {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x30
+	}
+	if len(m.Filters) > 0 {
+		dAtA22 := make([]byte, len(m.Filters)*10)
+		var j21 int
+		for _, num := range m.Filters {
+			for num >= 1<<7 {
+				dAtA22[j21] = uint8(uint64(num)&0x7f | 0x80)
+				num >>= 7
+				j21++
+			}
+			dAtA22[j21] = uint8(num)
+			j21++
+		}
+		i -= j21
+		copy(dAtA[i:], dAtA22[:j21])
+		i = encodeVarintRpc(dAtA, i, uint64(j21))
+		i--
+		dAtA[i] = 0x2a
+	}
+	if m.ProgressNotify {
+		i--
+		if m.ProgressNotify {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.StartRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision))
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.RangeEnd) > 0 {
+		i -= len(m.RangeEnd)
+		copy(dAtA[i:], m.RangeEnd)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchCancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.WatchId != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *WatchProgressRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WatchProgressRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *WatchResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Events) > 0 {
+		for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x5a
+		}
+	}
+	if m.Fragment {
+		i--
+		if m.Fragment {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x38
+	}
+	if len(m.CancelReason) > 0 {
+		i -= len(m.CancelReason)
+		copy(dAtA[i:], m.CancelReason)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason)))
+		i--
+		dAtA[i] = 0x32
+	}
+	if m.CompactRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
+		i--
+		dAtA[i] = 0x28
+	}
+	if m.Canceled {
+		i--
+		if m.Canceled {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.Created {
+		i--
+		if m.Created {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.WatchId != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseGrantRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.TTL != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseGrantResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Error) > 0 {
+		i -= len(m.Error)
+		copy(dAtA[i:], m.Error)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Error)))
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.TTL != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseRevokeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseRevokeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseCheckpoint) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseCheckpoint) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseCheckpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Remaining_TTL != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Remaining_TTL))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseCheckpointRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseCheckpointRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseCheckpointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Checkpoints) > 0 {
+		for iNdEx := len(m.Checkpoints) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Checkpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseCheckpointResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseCheckpointResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseCheckpointResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseKeepAliveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseKeepAliveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.TTL != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseTimeToLiveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Keys {
+		i--
+		if m.Keys {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseTimeToLiveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Keys) > 0 {
+		for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Keys[iNdEx])
+			copy(dAtA[i:], m.Keys[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.Keys[iNdEx])))
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	if m.GrantedTTL != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.TTL != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseLeasesRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseLeasesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseLeasesResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseLeasesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Leases) > 0 {
+		for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Member) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Member) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.IsLearner {
+		i--
+		if m.IsLearner {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x28
+	}
+	if len(m.ClientURLs) > 0 {
+		for iNdEx := len(m.ClientURLs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.ClientURLs[iNdEx])
+			copy(dAtA[i:], m.ClientURLs[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientURLs[iNdEx])))
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	if len(m.PeerURLs) > 0 {
+		for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.PeerURLs[iNdEx])
+			copy(dAtA[i:], m.PeerURLs[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.IsLearner {
+		i--
+		if m.IsLearner {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x10
+	}
+	if len(m.PeerURLs) > 0 {
+		for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.PeerURLs[iNdEx])
+			copy(dAtA[i:], m.PeerURLs[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Members) > 0 {
+		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if m.Member != nil {
+		{
+			size, err := m.Member.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberRemoveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberRemoveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Members) > 0 {
+		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberUpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.PeerURLs) > 0 {
+		for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.PeerURLs[iNdEx])
+			copy(dAtA[i:], m.PeerURLs[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberUpdateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Members) > 0 {
+		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberListRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Linearizable {
+		i--
+		if m.Linearizable {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberListResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Members) > 0 {
+		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberPromoteRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberPromoteRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberPromoteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberPromoteResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberPromoteResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberPromoteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Members) > 0 {
+		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DefragmentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DefragmentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MoveLeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.TargetID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.TargetID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MoveLeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AlarmRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AlarmRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Alarm != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Alarm))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.MemberID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MemberID))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Action != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Action))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AlarmMember) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AlarmMember) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Alarm != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Alarm))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.MemberID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MemberID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AlarmResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AlarmResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Alarms) > 0 {
+		for iNdEx := len(m.Alarms) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Alarms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DowngradeRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DowngradeRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DowngradeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Version) > 0 {
+		i -= len(m.Version)
+		copy(dAtA[i:], m.Version)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Version)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Action != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Action))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DowngradeResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DowngradeResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DowngradeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Version) > 0 {
+		i -= len(m.Version)
+		copy(dAtA[i:], m.Version)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Version)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DowngradeVersionTestRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DowngradeVersionTestRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DowngradeVersionTestRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Ver) > 0 {
+		i -= len(m.Ver)
+		copy(dAtA[i:], m.Ver)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Ver)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *StatusRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *StatusResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.DowngradeInfo != nil {
+		{
+			size, err := m.DowngradeInfo.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x6a
+	}
+	if m.DbSizeQuota != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.DbSizeQuota))
+		i--
+		dAtA[i] = 0x60
+	}
+	if len(m.StorageVersion) > 0 {
+		i -= len(m.StorageVersion)
+		copy(dAtA[i:], m.StorageVersion)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.StorageVersion)))
+		i--
+		dAtA[i] = 0x5a
+	}
+	if m.IsLearner {
+		i--
+		if m.IsLearner {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x50
+	}
+	if m.DbSizeInUse != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.DbSizeInUse))
+		i--
+		dAtA[i] = 0x48
+	}
+	if len(m.Errors) > 0 {
+		for iNdEx := len(m.Errors) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Errors[iNdEx])
+			copy(dAtA[i:], m.Errors[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.Errors[iNdEx])))
+			i--
+			dAtA[i] = 0x42
+		}
+	}
+	if m.RaftAppliedIndex != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.RaftAppliedIndex))
+		i--
+		dAtA[i] = 0x38
+	}
+	if m.RaftTerm != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm))
+		i--
+		dAtA[i] = 0x30
+	}
+	if m.RaftIndex != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex))
+		i--
+		dAtA[i] = 0x28
+	}
+	if m.Leader != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Leader))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.DbSize != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.DbSize))
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.Version) > 0 {
+		i -= len(m.Version)
+		copy(dAtA[i:], m.Version)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Version)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DowngradeInfo) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DowngradeInfo) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DowngradeInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.TargetVersion) > 0 {
+		i -= len(m.TargetVersion)
+		copy(dAtA[i:], m.TargetVersion)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.TargetVersion)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Enabled {
+		i--
+		if m.Enabled {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthEnableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthDisableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthStatusRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthStatusRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthStatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Password) > 0 {
+		i -= len(m.Password)
+		copy(dAtA[i:], m.Password)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.HashedPassword) > 0 {
+		i -= len(m.HashedPassword)
+		copy(dAtA[i:], m.HashedPassword)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.HashedPassword)))
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.Options != nil {
+		{
+			size, err := m.Options.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if len(m.Password) > 0 {
+		i -= len(m.Password)
+		copy(dAtA[i:], m.Password)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserChangePasswordRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.HashedPassword) > 0 {
+		i -= len(m.HashedPassword)
+		copy(dAtA[i:], m.HashedPassword)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.HashedPassword)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if len(m.Password) > 0 {
+		i -= len(m.Password)
+		copy(dAtA[i:], m.Password)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserGrantRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Role) > 0 {
+		i -= len(m.Role)
+		copy(dAtA[i:], m.Role)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.User) > 0 {
+		i -= len(m.User)
+		copy(dAtA[i:], m.User)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.User)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserRevokeRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Role) > 0 {
+		i -= len(m.Role)
+		copy(dAtA[i:], m.Role)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Role) > 0 {
+		i -= len(m.Role)
+		copy(dAtA[i:], m.Role)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Role) > 0 {
+		i -= len(m.Role)
+		copy(dAtA[i:], m.Role)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleGrantPermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Perm != nil {
+		{
+			size, err := m.Perm.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleRevokePermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.RangeEnd) > 0 {
+		i -= len(m.RangeEnd)
+		copy(dAtA[i:], m.RangeEnd)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Role) > 0 {
+		i -= len(m.Role)
+		copy(dAtA[i:], m.Role)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthEnableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthDisableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthStatusResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthStatusResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.AuthRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.AuthRevision))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.Enabled {
+		i--
+		if m.Enabled {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthenticateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Token) > 0 {
+		i -= len(m.Token)
+		copy(dAtA[i:], m.Token)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Token)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Roles) > 0 {
+		for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Roles[iNdEx])
+			copy(dAtA[i:], m.Roles[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserChangePasswordResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserGrantRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserRevokeRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Perm) > 0 {
+		for iNdEx := len(m.Perm) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Perm[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Roles) > 0 {
+		for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Roles[iNdEx])
+			copy(dAtA[i:], m.Roles[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Users) > 0 {
+		for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Users[iNdEx])
+			copy(dAtA[i:], m.Users[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.Users[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleGrantPermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleRevokePermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintRpc(dAtA []byte, offset int, v uint64) int {
+	offset -= sovRpc(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *ResponseHeader) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ClusterId != 0 {
+		n += 1 + sovRpc(uint64(m.ClusterId))
+	}
+	if m.MemberId != 0 {
+		n += 1 + sovRpc(uint64(m.MemberId))
+	}
+	if m.Revision != 0 {
+		n += 1 + sovRpc(uint64(m.Revision))
+	}
+	if m.RaftTerm != 0 {
+		n += 1 + sovRpc(uint64(m.RaftTerm))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *RangeRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.RangeEnd)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Limit != 0 {
+		n += 1 + sovRpc(uint64(m.Limit))
+	}
+	if m.Revision != 0 {
+		n += 1 + sovRpc(uint64(m.Revision))
+	}
+	if m.SortOrder != 0 {
+		n += 1 + sovRpc(uint64(m.SortOrder))
+	}
+	if m.SortTarget != 0 {
+		n += 1 + sovRpc(uint64(m.SortTarget))
+	}
+	if m.Serializable {
+		n += 2
+	}
+	if m.KeysOnly {
+		n += 2
+	}
+	if m.CountOnly {
+		n += 2
+	}
+	if m.MinModRevision != 0 {
+		n += 1 + sovRpc(uint64(m.MinModRevision))
+	}
+	if m.MaxModRevision != 0 {
+		n += 1 + sovRpc(uint64(m.MaxModRevision))
+	}
+	if m.MinCreateRevision != 0 {
+		n += 1 + sovRpc(uint64(m.MinCreateRevision))
+	}
+	if m.MaxCreateRevision != 0 {
+		n += 1 + sovRpc(uint64(m.MaxCreateRevision))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *RangeResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Kvs) > 0 {
+		for _, e := range m.Kvs {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.More {
+		n += 2
+	}
+	if m.Count != 0 {
+		n += 1 + sovRpc(uint64(m.Count))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *PutRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Value)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Lease != 0 {
+		n += 1 + sovRpc(uint64(m.Lease))
+	}
+	if m.PrevKv {
+		n += 2
+	}
+	if m.IgnoreValue {
+		n += 2
+	}
+	if m.IgnoreLease {
+		n += 2
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *PutResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.PrevKv != nil {
+		l = m.PrevKv.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *DeleteRangeRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.RangeEnd)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.PrevKv {
+		n += 2
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *DeleteRangeResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Deleted != 0 {
+		n += 1 + sovRpc(uint64(m.Deleted))
+	}
+	if len(m.PrevKvs) > 0 {
+		for _, e := range m.PrevKvs {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *RequestOp) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Request != nil {
+		n += m.Request.Size()
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *RequestOp_RequestRange) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.RequestRange != nil {
+		l = m.RequestRange.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *RequestOp_RequestPut) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.RequestPut != nil {
+		l = m.RequestPut.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *RequestOp_RequestDeleteRange) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.RequestDeleteRange != nil {
+		l = m.RequestDeleteRange.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *RequestOp_RequestTxn) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.RequestTxn != nil {
+		l = m.RequestTxn.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *ResponseOp) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Response != nil {
+		n += m.Response.Size()
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *ResponseOp_ResponseRange) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ResponseRange != nil {
+		l = m.ResponseRange.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *ResponseOp_ResponsePut) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ResponsePut != nil {
+		l = m.ResponsePut.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *ResponseOp_ResponseDeleteRange) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ResponseDeleteRange != nil {
+		l = m.ResponseDeleteRange.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *ResponseOp_ResponseTxn) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ResponseTxn != nil {
+		l = m.ResponseTxn.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *Compare) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Result != 0 {
+		n += 1 + sovRpc(uint64(m.Result))
+	}
+	if m.Target != 0 {
+		n += 1 + sovRpc(uint64(m.Target))
+	}
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.TargetUnion != nil {
+		n += m.TargetUnion.Size()
+	}
+	l = len(m.RangeEnd)
+	if l > 0 {
+		n += 2 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Compare_Version) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRpc(uint64(m.Version))
+	return n
+}
+func (m *Compare_CreateRevision) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRpc(uint64(m.CreateRevision))
+	return n
+}
+func (m *Compare_ModRevision) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRpc(uint64(m.ModRevision))
+	return n
+}
+func (m *Compare_Value) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Value != nil {
+		l = len(m.Value)
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *Compare_Lease) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRpc(uint64(m.Lease))
+	return n
+}
+func (m *TxnRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Compare) > 0 {
+		for _, e := range m.Compare {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if len(m.Success) > 0 {
+		for _, e := range m.Success {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if len(m.Failure) > 0 {
+		for _, e := range m.Failure {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *TxnResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Succeeded {
+		n += 2
+	}
+	if len(m.Responses) > 0 {
+		for _, e := range m.Responses {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *CompactionRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Revision != 0 {
+		n += 1 + sovRpc(uint64(m.Revision))
+	}
+	if m.Physical {
+		n += 2
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *CompactionResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *HashRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *HashKVRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Revision != 0 {
+		n += 1 + sovRpc(uint64(m.Revision))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *HashKVResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Hash != 0 {
+		n += 1 + sovRpc(uint64(m.Hash))
+	}
+	if m.CompactRevision != 0 {
+		n += 1 + sovRpc(uint64(m.CompactRevision))
+	}
+	if m.HashRevision != 0 {
+		n += 1 + sovRpc(uint64(m.HashRevision))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *HashResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Hash != 0 {
+		n += 1 + sovRpc(uint64(m.Hash))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *SnapshotRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *SnapshotResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.RemainingBytes != 0 {
+		n += 1 + sovRpc(uint64(m.RemainingBytes))
+	}
+	l = len(m.Blob)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Version)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *WatchRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.RequestUnion != nil {
+		n += m.RequestUnion.Size()
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *WatchRequest_CreateRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.CreateRequest != nil {
+		l = m.CreateRequest.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *WatchRequest_CancelRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.CancelRequest != nil {
+		l = m.CancelRequest.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *WatchRequest_ProgressRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ProgressRequest != nil {
+		l = m.ProgressRequest.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *WatchCreateRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.RangeEnd)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.StartRevision != 0 {
+		n += 1 + sovRpc(uint64(m.StartRevision))
+	}
+	if m.ProgressNotify {
+		n += 2
+	}
+	if len(m.Filters) > 0 {
+		l = 0
+		for _, e := range m.Filters {
+			l += sovRpc(uint64(e))
+		}
+		n += 1 + sovRpc(uint64(l)) + l
+	}
+	if m.PrevKv {
+		n += 2
+	}
+	if m.WatchId != 0 {
+		n += 1 + sovRpc(uint64(m.WatchId))
+	}
+	if m.Fragment {
+		n += 2
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *WatchCancelRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.WatchId != 0 {
+		n += 1 + sovRpc(uint64(m.WatchId))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *WatchProgressRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *WatchResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.WatchId != 0 {
+		n += 1 + sovRpc(uint64(m.WatchId))
+	}
+	if m.Created {
+		n += 2
+	}
+	if m.Canceled {
+		n += 2
+	}
+	if m.CompactRevision != 0 {
+		n += 1 + sovRpc(uint64(m.CompactRevision))
+	}
+	l = len(m.CancelReason)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Fragment {
+		n += 2
+	}
+	if len(m.Events) > 0 {
+		for _, e := range m.Events {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseGrantRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.TTL != 0 {
+		n += 1 + sovRpc(uint64(m.TTL))
+	}
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseGrantResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.TTL != 0 {
+		n += 1 + sovRpc(uint64(m.TTL))
+	}
+	l = len(m.Error)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseRevokeRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseRevokeResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseCheckpoint) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.Remaining_TTL != 0 {
+		n += 1 + sovRpc(uint64(m.Remaining_TTL))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseCheckpointRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Checkpoints) > 0 {
+		for _, e := range m.Checkpoints {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseCheckpointResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseKeepAliveRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseKeepAliveResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.TTL != 0 {
+		n += 1 + sovRpc(uint64(m.TTL))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseTimeToLiveRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.Keys {
+		n += 2
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseTimeToLiveResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.TTL != 0 {
+		n += 1 + sovRpc(uint64(m.TTL))
+	}
+	if m.GrantedTTL != 0 {
+		n += 1 + sovRpc(uint64(m.GrantedTTL))
+	}
+	if len(m.Keys) > 0 {
+		for _, b := range m.Keys {
+			l = len(b)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseLeasesRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseLeasesResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Leases) > 0 {
+		for _, e := range m.Leases {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Member) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.PeerURLs) > 0 {
+		for _, s := range m.PeerURLs {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if len(m.ClientURLs) > 0 {
+		for _, s := range m.ClientURLs {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.IsLearner {
+		n += 2
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberAddRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.PeerURLs) > 0 {
+		for _, s := range m.PeerURLs {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.IsLearner {
+		n += 2
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberAddResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Member != nil {
+		l = m.Member.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Members) > 0 {
+		for _, e := range m.Members {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberRemoveRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberRemoveResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Members) > 0 {
+		for _, e := range m.Members {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberUpdateRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if len(m.PeerURLs) > 0 {
+		for _, s := range m.PeerURLs {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberUpdateResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Members) > 0 {
+		for _, e := range m.Members {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberListRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Linearizable {
+		n += 2
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberListResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Members) > 0 {
+		for _, e := range m.Members {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberPromoteRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberPromoteResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Members) > 0 {
+		for _, e := range m.Members {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *DefragmentRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *DefragmentResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MoveLeaderRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.TargetID != 0 {
+		n += 1 + sovRpc(uint64(m.TargetID))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MoveLeaderResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AlarmRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Action != 0 {
+		n += 1 + sovRpc(uint64(m.Action))
+	}
+	if m.MemberID != 0 {
+		n += 1 + sovRpc(uint64(m.MemberID))
+	}
+	if m.Alarm != 0 {
+		n += 1 + sovRpc(uint64(m.Alarm))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AlarmMember) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.MemberID != 0 {
+		n += 1 + sovRpc(uint64(m.MemberID))
+	}
+	if m.Alarm != 0 {
+		n += 1 + sovRpc(uint64(m.Alarm))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AlarmResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Alarms) > 0 {
+		for _, e := range m.Alarms {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *DowngradeRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Action != 0 {
+		n += 1 + sovRpc(uint64(m.Action))
+	}
+	l = len(m.Version)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *DowngradeResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Version)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *DowngradeVersionTestRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Ver)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *StatusRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *StatusResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Version)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.DbSize != 0 {
+		n += 1 + sovRpc(uint64(m.DbSize))
+	}
+	if m.Leader != 0 {
+		n += 1 + sovRpc(uint64(m.Leader))
+	}
+	if m.RaftIndex != 0 {
+		n += 1 + sovRpc(uint64(m.RaftIndex))
+	}
+	if m.RaftTerm != 0 {
+		n += 1 + sovRpc(uint64(m.RaftTerm))
+	}
+	if m.RaftAppliedIndex != 0 {
+		n += 1 + sovRpc(uint64(m.RaftAppliedIndex))
+	}
+	if len(m.Errors) > 0 {
+		for _, s := range m.Errors {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.DbSizeInUse != 0 {
+		n += 1 + sovRpc(uint64(m.DbSizeInUse))
+	}
+	if m.IsLearner {
+		n += 2
+	}
+	l = len(m.StorageVersion)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.DbSizeQuota != 0 {
+		n += 1 + sovRpc(uint64(m.DbSizeQuota))
+	}
+	if m.DowngradeInfo != nil {
+		l = m.DowngradeInfo.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *DowngradeInfo) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Enabled {
+		n += 2
+	}
+	l = len(m.TargetVersion)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthEnableRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthDisableRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthStatusRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthenticateRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Password)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserAddRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Password)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Options != nil {
+		l = m.Options.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.HashedPassword)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserGetRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserDeleteRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserChangePasswordRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Password)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.HashedPassword)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserGrantRoleRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.User)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Role)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserRevokeRoleRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Role)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleAddRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleGetRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Role)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserListRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleListRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleDeleteRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Role)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleGrantPermissionRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Perm != nil {
+		l = m.Perm.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleRevokePermissionRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Role)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.RangeEnd)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthEnableResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthDisableResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthStatusResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Enabled {
+		n += 2
+	}
+	if m.AuthRevision != 0 {
+		n += 1 + sovRpc(uint64(m.AuthRevision))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthenticateResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Token)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserAddResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserGetResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Roles) > 0 {
+		for _, s := range m.Roles {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserDeleteResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserChangePasswordResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserGrantRoleResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserRevokeRoleResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleAddResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleGetResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Perm) > 0 {
+		for _, e := range m.Perm {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleListResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Roles) > 0 {
+		for _, s := range m.Roles {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserListResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Users) > 0 {
+		for _, s := range m.Users {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleDeleteResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleGrantPermissionResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleRevokePermissionResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovRpc(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozRpc(x uint64) (n int) {
+	return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *ResponseHeader) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
+			}
+			m.ClusterId = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ClusterId |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType)
+			}
+			m.MemberId = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MemberId |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+			}
+			m.Revision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Revision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType)
+			}
+			m.RaftTerm = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RaftTerm |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *RangeRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RangeRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+			if m.RangeEnd == nil {
+				m.RangeEnd = []byte{}
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
+			}
+			m.Limit = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Limit |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+			}
+			m.Revision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Revision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SortOrder", wireType)
+			}
+			m.SortOrder = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.SortOrder |= RangeRequest_SortOrder(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SortTarget", wireType)
+			}
+			m.SortTarget = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.SortTarget |= RangeRequest_SortTarget(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Serializable", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Serializable = bool(v != 0)
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field KeysOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.KeysOnly = bool(v != 0)
+		case 9:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CountOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.CountOnly = bool(v != 0)
+		case 10:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MinModRevision", wireType)
+			}
+			m.MinModRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MinModRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 11:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MaxModRevision", wireType)
+			}
+			m.MaxModRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MaxModRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 12:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MinCreateRevision", wireType)
+			}
+			m.MinCreateRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MinCreateRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 13:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MaxCreateRevision", wireType)
+			}
+			m.MaxCreateRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MaxCreateRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *RangeResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RangeResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Kvs = append(m.Kvs, &mvccpb.KeyValue{})
+			if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field More", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.More = bool(v != 0)
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+			}
+			m.Count = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Count |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PutRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PutRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+			if m.Value == nil {
+				m.Value = []byte{}
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+			}
+			m.Lease = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Lease |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.PrevKv = bool(v != 0)
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.IgnoreValue = bool(v != 0)
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.IgnoreLease = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PutResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PutResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.PrevKv == nil {
+				m.PrevKv = &mvccpb.KeyValue{}
+			}
+			if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+			if m.RangeEnd == nil {
+				m.RangeEnd = []byte{}
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.PrevKv = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
+			}
+			m.Deleted = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Deleted |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevKvs", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PrevKvs = append(m.PrevKvs, &mvccpb.KeyValue{})
+			if err := m.PrevKvs[len(m.PrevKvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *RequestOp) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RequestOp: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RequestOp: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestRange", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &RangeRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Request = &RequestOp_RequestRange{v}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestPut", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &PutRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Request = &RequestOp_RequestPut{v}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestDeleteRange", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &DeleteRangeRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Request = &RequestOp_RequestDeleteRange{v}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestTxn", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &TxnRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Request = &RequestOp_RequestTxn{v}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResponseOp) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResponseOp: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResponseOp: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &RangeResponse{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Response = &ResponseOp_ResponseRange{v}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &PutResponse{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Response = &ResponseOp_ResponsePut{v}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &DeleteRangeResponse{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Response = &ResponseOp_ResponseDeleteRange{v}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResponseTxn", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &TxnResponse{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Response = &ResponseOp_ResponseTxn{v}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Compare) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Compare: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Compare: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
+			}
+			m.Result = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Result |= Compare_CompareResult(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
+			}
+			m.Target = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Target |= Compare_CompareTarget(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TargetUnion = &Compare_Version{v}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TargetUnion = &Compare_CreateRevision{v}
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TargetUnion = &Compare_ModRevision{v}
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := make([]byte, postIndex-iNdEx)
+			copy(v, dAtA[iNdEx:postIndex])
+			m.TargetUnion = &Compare_Value{v}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TargetUnion = &Compare_Lease{v}
+		case 64:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+			if m.RangeEnd == nil {
+				m.RangeEnd = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TxnRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TxnRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TxnRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Compare", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Compare = append(m.Compare, &Compare{})
+			if err := m.Compare[len(m.Compare)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Success = append(m.Success, &RequestOp{})
+			if err := m.Success[len(m.Success)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Failure", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Failure = append(m.Failure, &RequestOp{})
+			if err := m.Failure[len(m.Failure)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TxnResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TxnResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TxnResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Succeeded = bool(v != 0)
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Responses = append(m.Responses, &ResponseOp{})
+			if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CompactionRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CompactionRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+			}
+			m.Revision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Revision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Physical = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CompactionResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CompactionResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HashRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HashRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HashRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HashKVRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HashKVRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HashKVRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+			}
+			m.Revision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Revision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HashKVResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HashKVResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HashKVResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
+			}
+			m.Hash = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Hash |= uint32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType)
+			}
+			m.CompactRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.CompactRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HashRevision", wireType)
+			}
+			m.HashRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.HashRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HashResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HashResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HashResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
+			}
+			m.Hash = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Hash |= uint32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SnapshotRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SnapshotRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SnapshotResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SnapshotResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RemainingBytes", wireType)
+			}
+			m.RemainingBytes = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RemainingBytes |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Blob = append(m.Blob[:0], dAtA[iNdEx:postIndex]...)
+			if m.Blob == nil {
+				m.Blob = []byte{}
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Version = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WatchRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CreateRequest", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &WatchCreateRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.RequestUnion = &WatchRequest_CreateRequest{v}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CancelRequest", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &WatchCancelRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.RequestUnion = &WatchRequest_CancelRequest{v}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ProgressRequest", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &WatchProgressRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.RequestUnion = &WatchRequest_ProgressRequest{v}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WatchCreateRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WatchCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+			if m.RangeEnd == nil {
+				m.RangeEnd = []byte{}
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StartRevision", wireType)
+			}
+			m.StartRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.StartRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ProgressNotify", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ProgressNotify = bool(v != 0)
+		case 5:
+			if wireType == 0 {
+				var v WatchCreateRequest_FilterType
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRpc
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= WatchCreateRequest_FilterType(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.Filters = append(m.Filters, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRpc
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthRpc
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthRpc
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				if elementCount != 0 && len(m.Filters) == 0 {
+					m.Filters = make([]WatchCreateRequest_FilterType, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v WatchCreateRequest_FilterType
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowRpc
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= WatchCreateRequest_FilterType(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.Filters = append(m.Filters, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
+			}
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.PrevKv = bool(v != 0)
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
+			}
+			m.WatchId = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.WatchId |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Fragment = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WatchCancelRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WatchCancelRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
+			}
+			m.WatchId = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.WatchId |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WatchProgressRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WatchProgressRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WatchProgressRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WatchResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WatchResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WatchResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
+			}
+			m.WatchId = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.WatchId |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Created = bool(v != 0)
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Canceled = bool(v != 0)
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType)
+			}
+			m.CompactRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.CompactRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.CancelReason = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Fragment = bool(v != 0)
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Events = append(m.Events, &mvccpb.Event{})
+			if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+			}
+			m.TTL = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TTL |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+			}
+			m.TTL = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TTL |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Error = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseCheckpoint) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseCheckpoint: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseCheckpoint: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Remaining_TTL", wireType)
+			}
+			m.Remaining_TTL = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Remaining_TTL |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseCheckpointRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseCheckpointRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseCheckpointRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Checkpoints", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Checkpoints = append(m.Checkpoints, &LeaseCheckpoint{})
+			if err := m.Checkpoints[len(m.Checkpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseCheckpointResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseCheckpointResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseCheckpointResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseKeepAliveRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseKeepAliveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseKeepAliveResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseKeepAliveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+			}
+			m.TTL = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TTL |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseTimeToLiveRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseTimeToLiveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Keys = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseTimeToLiveResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseTimeToLiveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+			}
+			m.TTL = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TTL |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field GrantedTTL", wireType)
+			}
+			m.GrantedTTL = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.GrantedTTL |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx))
+			copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseLeasesRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseLeasesResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Leases = append(m.Leases, &LeaseStatus{})
+			if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Member) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Member: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.IsLearner = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberAddRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberAddRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.IsLearner = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberAddResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberAddResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Member == nil {
+				m.Member = &Member{}
+			}
+			if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Members = append(m.Members, &Member{})
+			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Members = append(m.Members, &Member{})
+			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Members = append(m.Members, &Member{})
+			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberListRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Linearizable", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Linearizable = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberListResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Members = append(m.Members, &Member{})
+			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberPromoteRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberPromoteRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberPromoteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberPromoteResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberPromoteResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberPromoteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Members = append(m.Members, &Member{})
+			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DefragmentRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DefragmentRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DefragmentRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DefragmentResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DefragmentResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DefragmentResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MoveLeaderRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MoveLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TargetID", wireType)
+			}
+			m.TargetID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TargetID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MoveLeaderResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MoveLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AlarmRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AlarmRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AlarmRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+			}
+			m.Action = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Action |= AlarmRequest_AlarmAction(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType)
+			}
+			m.MemberID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MemberID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
+			}
+			m.Alarm = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Alarm |= AlarmType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AlarmMember) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AlarmMember: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AlarmMember: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType)
+			}
+			m.MemberID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MemberID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
+			}
+			m.Alarm = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Alarm |= AlarmType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AlarmResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AlarmResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AlarmResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Alarms", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Alarms = append(m.Alarms, &AlarmMember{})
+			if err := m.Alarms[len(m.Alarms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DowngradeRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DowngradeRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DowngradeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+			}
+			m.Action = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Action |= DowngradeRequest_DowngradeAction(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Version = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DowngradeResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DowngradeResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DowngradeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Version = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DowngradeVersionTestRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DowngradeVersionTestRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DowngradeVersionTestRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Ver", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Ver = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *StatusRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *StatusResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Version = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DbSize", wireType)
+			}
+			m.DbSize = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.DbSize |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
+			}
+			m.Leader = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Leader |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType)
+			}
+			m.RaftIndex = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RaftIndex |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType)
+			}
+			m.RaftTerm = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RaftTerm |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RaftAppliedIndex", wireType)
+			}
+			m.RaftAppliedIndex = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RaftAppliedIndex |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Errors = append(m.Errors, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 9:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DbSizeInUse", wireType)
+			}
+			m.DbSizeInUse = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.DbSizeInUse |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 10:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.IsLearner = bool(v != 0)
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StorageVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.StorageVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 12:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DbSizeQuota", wireType)
+			}
+			m.DbSizeQuota = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.DbSizeQuota |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 13:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DowngradeInfo", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.DowngradeInfo == nil {
+				m.DowngradeInfo = &DowngradeInfo{}
+			}
+			if err := m.DowngradeInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DowngradeInfo) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DowngradeInfo: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DowngradeInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Enabled = bool(v != 0)
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TargetVersion", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.TargetVersion = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthEnableRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthEnableRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthDisableRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthDisableRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthStatusRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthStatusRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthenticateRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Password = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserAddRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Password = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Options == nil {
+				m.Options = &authpb.UserAddOptions{}
+			}
+			if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HashedPassword", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.HashedPassword = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserGetRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserDeleteRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserChangePasswordRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserChangePasswordRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Password = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HashedPassword", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.HashedPassword = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserGrantRoleRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserGrantRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.User = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Role = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserRevokeRoleRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserRevokeRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Role = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleAddRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleGetRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Role = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserListRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleListRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleDeleteRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Role = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Perm == nil {
+				m.Perm = &authpb.Permission{}
+			}
+			if err := m.Perm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Role = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+			if m.RangeEnd == nil {
+				m.RangeEnd = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthEnableResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthEnableResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthDisableResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthDisableResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthStatusResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthStatusResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Enabled = bool(v != 0)
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType)
+			}
+			m.AuthRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.AuthRevision |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthenticateResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthenticateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Token = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserAddResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserGetResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserDeleteResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserChangePasswordResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserChangePasswordResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserGrantRoleResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserGrantRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserRevokeRoleResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserRevokeRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleAddResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleGetResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Perm = append(m.Perm, &authpb.Permission{})
+			if err := m.Perm[len(m.Perm)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleListResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserListResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Users = append(m.Users, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleDeleteResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipRpc(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	depth := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+		case 1:
+			iNdEx += 8
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthRpc
+			}
+			iNdEx += length
+		case 3:
+			depth++
+		case 4:
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupRpc
+			}
+			depth--
+		case 5:
+			iNdEx += 4
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthRpc
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
+	}
+	return 0, io.ErrUnexpectedEOF
+}
+
+var (
+	ErrInvalidLengthRpc        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowRpc          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupRpc = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto
new file mode 100644
index 0000000..983dc01
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto
@@ -0,0 +1,1436 @@
+syntax = "proto3";
+package etcdserverpb;
+
+import "gogoproto/gogo.proto";
+import "etcd/api/mvccpb/kv.proto";
+import "etcd/api/authpb/auth.proto";
+import "etcd/api/versionpb/version.proto";
+
+// for grpc-gateway
+import "google/api/annotations.proto";
+import "protoc-gen-openapiv2/options/annotations.proto";
+
+option go_package = "go.etcd.io/etcd/api/v3/etcdserverpb";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+  security_definitions: {
+    security: {
+      key: "ApiKey";
+      value: {
+        type: TYPE_API_KEY;
+        in: IN_HEADER;
+        name: "Authorization";
+      }
+    }
+  }
+  security: {
+    security_requirement: {
+      key: "ApiKey";
+      value: {};
+    }
+  }
+};
+
+service KV {
+  // Range gets the keys in the range from the key-value store.
+  rpc Range(RangeRequest) returns (RangeResponse) {
+      option (google.api.http) = {
+        post: "/v3/kv/range"
+        body: "*"
+    };
+  }
+
+  // Put puts the given key into the key-value store.
+  // A put request increments the revision of the key-value store
+  // and generates one event in the event history.
+  rpc Put(PutRequest) returns (PutResponse) {
+      option (google.api.http) = {
+        post: "/v3/kv/put"
+        body: "*"
+    };
+  }
+
+  // DeleteRange deletes the given range from the key-value store.
+  // A delete request increments the revision of the key-value store
+  // and generates a delete event in the event history for every deleted key.
+  rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) {
+      option (google.api.http) = {
+        post: "/v3/kv/deleterange"
+        body: "*"
+    };
+  }
+
+  // Txn processes multiple requests in a single transaction.
+  // A txn request increments the revision of the key-value store
+  // and generates events with the same revision for every completed request.
+  // It is not allowed to modify the same key several times within one txn.
+  rpc Txn(TxnRequest) returns (TxnResponse) {
+      option (google.api.http) = {
+        post: "/v3/kv/txn"
+        body: "*"
+    };
+  }
+
+  // Compact compacts the event history in the etcd key-value store. The key-value
+  // store should be periodically compacted or the event history will continue to grow
+  // indefinitely.
+  rpc Compact(CompactionRequest) returns (CompactionResponse) {
+      option (google.api.http) = {
+        post: "/v3/kv/compaction"
+        body: "*"
+    };
+  }
+}
+
+service Watch {
+  // Watch watches for events happening or that have happened. Both input and output
+  // are streams; the input stream is for creating and canceling watchers and the output
+  // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
+  // for several watches at once. The entire event history can be watched starting from the
+  // last compaction revision.
+  rpc Watch(stream WatchRequest) returns (stream WatchResponse) {
+      option (google.api.http) = {
+        post: "/v3/watch"
+        body: "*"
+    };
+  }
+}
+
+service Lease {
+  // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
+  // within a given time to live period. All keys attached to the lease will be expired and
+  // deleted if the lease expires. Each expired key generates a delete event in the event history.
+  rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) {
+      option (google.api.http) = {
+        post: "/v3/lease/grant"
+        body: "*"
+    };
+  }
+
+  // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
+  rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) {
+      option (google.api.http) = {
+        post: "/v3/lease/revoke"
+        body: "*"
+        additional_bindings {
+            post: "/v3/kv/lease/revoke"
+            body: "*"
+        }
+    };
+  }
+
+  // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
+  // to the server and streaming keep alive responses from the server to the client.
+  rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) {
+      option (google.api.http) = {
+        post: "/v3/lease/keepalive"
+        body: "*"
+    };
+  }
+
+  // LeaseTimeToLive retrieves lease information.
+  rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) {
+      option (google.api.http) = {
+        post: "/v3/lease/timetolive"
+        body: "*"
+        additional_bindings {
+            post: "/v3/kv/lease/timetolive"
+            body: "*"
+        }
+    };
+  }
+
+  // LeaseLeases lists all existing leases.
+  rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) {
+      option (google.api.http) = {
+        post: "/v3/lease/leases"
+        body: "*"
+        additional_bindings {
+            post: "/v3/kv/lease/leases"
+            body: "*"
+        }
+    };
+  }
+}
+
+service Cluster {
+  // MemberAdd adds a member into the cluster.
+  rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) {
+      option (google.api.http) = {
+        post: "/v3/cluster/member/add"
+        body: "*"
+    };
+  }
+
+  // MemberRemove removes an existing member from the cluster.
+  rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) {
+      option (google.api.http) = {
+        post: "/v3/cluster/member/remove"
+        body: "*"
+    };
+  }
+
+  // MemberUpdate updates the member configuration.
+  rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) {
+      option (google.api.http) = {
+        post: "/v3/cluster/member/update"
+        body: "*"
+    };
+  }
+
+  // MemberList lists all the members in the cluster.
+  rpc MemberList(MemberListRequest) returns (MemberListResponse) {
+      option (google.api.http) = {
+        post: "/v3/cluster/member/list"
+        body: "*"
+    };
+  }
+
+  // MemberPromote promotes a member from raft learner (non-voting) to raft voting member.
+  rpc MemberPromote(MemberPromoteRequest) returns (MemberPromoteResponse) {
+      option (google.api.http) = {
+        post: "/v3/cluster/member/promote"
+        body: "*"
+    };
+  }
+}
+
+service Maintenance {
+  // Alarm activates, deactivates, and queries alarms regarding cluster health.
+  rpc Alarm(AlarmRequest) returns (AlarmResponse) {
+      option (google.api.http) = {
+        post: "/v3/maintenance/alarm"
+        body: "*"
+    };
+  }
+
+  // Status gets the status of the member.
+  rpc Status(StatusRequest) returns (StatusResponse) {
+      option (google.api.http) = {
+        post: "/v3/maintenance/status"
+        body: "*"
+    };
+  }
+
+  // Defragment defragments a member's backend database to recover storage space.
+  rpc Defragment(DefragmentRequest) returns (DefragmentResponse) {
+      option (google.api.http) = {
+        post: "/v3/maintenance/defragment"
+        body: "*"
+    };
+  }
+
+  // Hash computes the hash of whole backend keyspace,
+  // including key, lease, and other buckets in storage.
+  // This is designed for testing ONLY!
+  // Do not rely on this in production with ongoing transactions,
+  // since Hash operation does not hold MVCC locks.
+  // Use "HashKV" API instead for "key" bucket consistency checks.
+  rpc Hash(HashRequest) returns (HashResponse) {
+      option (google.api.http) = {
+        post: "/v3/maintenance/hash"
+        body: "*"
+    };
+  }
+
+  // HashKV computes the hash of all MVCC keys up to a given revision.
+  // It only iterates "key" bucket in backend storage.
+  rpc HashKV(HashKVRequest) returns (HashKVResponse) {
+      option (google.api.http) = {
+        post: "/v3/maintenance/hashkv"
+        body: "*"
+    };
+  }
+
+  // Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
+  rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) {
+      option (google.api.http) = {
+        post: "/v3/maintenance/snapshot"
+        body: "*"
+    };
+  }
+
+  // MoveLeader requests current leader node to transfer its leadership to transferee.
+  rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) {
+      option (google.api.http) = {
+        post: "/v3/maintenance/transfer-leadership"
+        body: "*"
+    };
+  }
+
+  // Downgrade requests downgrades, verifies feasibility or cancels downgrade
+  // on the cluster version.
+  // Supported since etcd 3.5.
+  rpc Downgrade(DowngradeRequest) returns (DowngradeResponse) {
+    option (google.api.http) = {
+      post: "/v3/maintenance/downgrade"
+      body: "*"
+    };
+  }
+}
+
+service Auth {
+  // AuthEnable enables authentication.
+  rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) {
+      option (google.api.http) = {
+        post: "/v3/auth/enable"
+        body: "*"
+    };
+  }
+
+  // AuthDisable disables authentication.
+  rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) {
+      option (google.api.http) = {
+        post: "/v3/auth/disable"
+        body: "*"
+    };
+  }
+
+  // AuthStatus displays authentication status.
+  rpc AuthStatus(AuthStatusRequest) returns (AuthStatusResponse) {
+      option (google.api.http) = {
+        post: "/v3/auth/status"
+        body: "*"
+    };
+  }
+
+  // Authenticate processes an authenticate request.
+  rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) {
+      option (google.api.http) = {
+        post: "/v3/auth/authenticate"
+        body: "*"
+    };
+  }
+
+  // UserAdd adds a new user. User name cannot be empty.
+  rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) {
+      option (google.api.http) = {
+        post: "/v3/auth/user/add"
+        body: "*"
+    };
+  }
+
+  // UserGet gets detailed user information.
+  rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) {
+      option (google.api.http) = {
+        post: "/v3/auth/user/get"
+        body: "*"
+    };
+  }
+
+  // UserList gets a list of all users.
+  rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) {
+      option (google.api.http) = {
+        post: "/v3/auth/user/list"
+        body: "*"
+    };
+  }
+
+  // UserDelete deletes a specified user.
+  rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) {
+      option (google.api.http) = {
+        post: "/v3/auth/user/delete"
+        body: "*"
+    };
+  }
+
+  // UserChangePassword changes the password of a specified user.
+  rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) {
+      option (google.api.http) = {
+        post: "/v3/auth/user/changepw"
+        body: "*"
+    };
+  }
+
+  // UserGrant grants a role to a specified user.
+  rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) {
+      option (google.api.http) = {
+        post: "/v3/auth/user/grant"
+        body: "*"
+    };
+  }
+
+  // UserRevokeRole revokes a role of specified user.
+  rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) {
+      option (google.api.http) = {
+        post: "/v3/auth/user/revoke"
+        body: "*"
+    };
+  }
+
+  // RoleAdd adds a new role. Role name cannot be empty.
+  rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) {
+      option (google.api.http) = {
+        post: "/v3/auth/role/add"
+        body: "*"
+    };
+  }
+
+  // RoleGet gets detailed role information.
+  rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) {
+      option (google.api.http) = {
+        post: "/v3/auth/role/get"
+        body: "*"
+    };
+  }
+
+  // RoleList gets lists of all roles.
+  rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) {
+      option (google.api.http) = {
+        post: "/v3/auth/role/list"
+        body: "*"
+    };
+  }
+
+  // RoleDelete deletes a specified role.
+  rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) {
+      option (google.api.http) = {
+        post: "/v3/auth/role/delete"
+        body: "*"
+    };
+  }
+
+  // RoleGrantPermission grants a permission of a specified key or range to a specified role.
+  rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) {
+      option (google.api.http) = {
+        post: "/v3/auth/role/grant"
+        body: "*"
+    };
+  }
+
+  // RoleRevokePermission revokes a key or range permission of a specified role.
+  rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) {
+      option (google.api.http) = {
+        post: "/v3/auth/role/revoke"
+        body: "*"
+    };
+  }
+}
+
+message ResponseHeader {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  // cluster_id is the ID of the cluster which sent the response.
+  uint64 cluster_id = 1;
+  // member_id is the ID of the member which sent the response.
+  uint64 member_id = 2;
+  // revision is the key-value store revision when the request was applied, and it's
+  // unset (so 0) in case of calls not interacting with key-value store.
+  // For watch progress responses, the header.revision indicates progress. All future events
+  // received in this stream are guaranteed to have a higher revision number than the
+  // header.revision number.
+  int64 revision = 3;
+  // raft_term is the raft term when the request was applied.
+  uint64 raft_term = 4;
+}
+
+message RangeRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  enum SortOrder {
+    option (versionpb.etcd_version_enum) = "3.0";
+    NONE = 0; // default, no sorting
+    ASCEND = 1; // lowest target value first
+    DESCEND = 2; // highest target value first
+  }
+  enum SortTarget {
+    option (versionpb.etcd_version_enum) = "3.0";
+    KEY = 0;
+    VERSION = 1;
+    CREATE = 2;
+    MOD = 3;
+    VALUE = 4;
+  }
+
+  // key is the first key for the range. If range_end is not given, the request only looks up key.
+  bytes key = 1;
+  // range_end is the upper bound on the requested range [key, range_end).
+  // If range_end is '\0', the range is all keys >= key.
+  // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
+  // then the range request gets all keys prefixed with key.
+  // If both key and range_end are '\0', then the range request returns all keys.
+  bytes range_end = 2;
+  // limit is a limit on the number of keys returned for the request. When limit is set to 0,
+  // it is treated as no limit.
+  int64 limit = 3;
+  // revision is the point-in-time of the key-value store to use for the range.
+  // If revision is less or equal to zero, the range is over the newest key-value store.
+  // If the revision has been compacted, ErrCompacted is returned as a response.
+  int64 revision = 4;
+
+  // sort_order is the order for returned sorted results.
+  SortOrder sort_order = 5;
+
+  // sort_target is the key-value field to use for sorting.
+  SortTarget sort_target = 6;
+
+  // serializable sets the range request to use serializable member-local reads.
+  // Range requests are linearizable by default; linearizable requests have higher
+  // latency and lower throughput than serializable requests but reflect the current
+  // consensus of the cluster. For better performance, in exchange for possible stale reads,
+  // a serializable range request is served locally without needing to reach consensus
+  // with other nodes in the cluster.
+  bool serializable = 7;
+
+  // keys_only when set returns only the keys and not the values.
+  bool keys_only = 8;
+
+  // count_only when set returns only the count of the keys in the range.
+  bool count_only = 9;
+
+  // min_mod_revision is the lower bound for returned key mod revisions; all keys with
+  // lesser mod revisions will be filtered away.
+  int64 min_mod_revision = 10 [(versionpb.etcd_version_field)="3.1"];
+
+  // max_mod_revision is the upper bound for returned key mod revisions; all keys with
+  // greater mod revisions will be filtered away.
+  int64 max_mod_revision = 11 [(versionpb.etcd_version_field)="3.1"];
+
+  // min_create_revision is the lower bound for returned key create revisions; all keys with
+  // lesser create revisions will be filtered away.
+  int64 min_create_revision = 12 [(versionpb.etcd_version_field)="3.1"];
+
+  // max_create_revision is the upper bound for returned key create revisions; all keys with
+  // greater create revisions will be filtered away.
+  int64 max_create_revision = 13 [(versionpb.etcd_version_field)="3.1"];
+}
+
+message RangeResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+  // kvs is the list of key-value pairs matched by the range request.
+  // kvs is empty when count is requested.
+  repeated mvccpb.KeyValue kvs = 2;
+  // more indicates if there are more keys to return in the requested range.
+  bool more = 3;
+  // count is set to the actual number of keys within the range when requested.
+  // Unlike Kvs, it is unaffected by limits and filters (e.g., Min/Max, Create/Modify, Revisions)
+  // and reflects the full count within the specified range.
+  int64 count = 4;
+}
+
+message PutRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  // key is the key, in bytes, to put into the key-value store.
+  bytes key = 1;
+  // value is the value, in bytes, to associate with the key in the key-value store.
+  bytes value = 2;
+  // lease is the lease ID to associate with the key in the key-value store. A lease
+  // value of 0 indicates no lease.
+  int64 lease = 3;
+
+  // If prev_kv is set, etcd gets the previous key-value pair before changing it.
+  // The previous key-value pair will be returned in the put response.
+  bool prev_kv = 4 [(versionpb.etcd_version_field)="3.1"];
+
+  // If ignore_value is set, etcd updates the key using its current value.
+  // Returns an error if the key does not exist.
+  bool ignore_value = 5 [(versionpb.etcd_version_field)="3.2"];
+
+  // If ignore_lease is set, etcd updates the key using its current lease.
+  // Returns an error if the key does not exist.
+  bool ignore_lease = 6 [(versionpb.etcd_version_field)="3.2"];
+}
+
+message PutResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+  // if prev_kv is set in the request, the previous key-value pair will be returned.
+  mvccpb.KeyValue prev_kv = 2 [(versionpb.etcd_version_field)="3.1"];
+}
+
+message DeleteRangeRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  // key is the first key to delete in the range.
+  bytes key = 1;
+  // range_end is the key following the last key to delete for the range [key, range_end).
+  // If range_end is not given, the range is defined to contain only the key argument.
+  // If range_end is one bit larger than the given key, then the range is all the keys
+  // with the prefix (the given key).
+  // If range_end is '\0', the range is all keys greater than or equal to the key argument.
+  bytes range_end = 2;
+
+  // If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
+  // The previous key-value pairs will be returned in the delete response.
+  bool prev_kv = 3 [(versionpb.etcd_version_field)="3.1"];
+}
+
+message DeleteRangeResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+  // deleted is the number of keys deleted by the delete range request.
+  int64 deleted = 2;
+  // if prev_kv is set in the request, the previous key-value pairs will be returned.
+  repeated mvccpb.KeyValue prev_kvs = 3 [(versionpb.etcd_version_field)="3.1"];
+}
+
+message RequestOp {
+  option (versionpb.etcd_version_msg) = "3.0";
+  // request is a union of request types accepted by a transaction.
+  oneof request {
+    RangeRequest request_range = 1;
+    PutRequest request_put = 2;
+    DeleteRangeRequest request_delete_range = 3;
+    TxnRequest request_txn = 4 [(versionpb.etcd_version_field)="3.3"];
+  }
+}
+
+message ResponseOp {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  // response is a union of response types returned by a transaction.
+  oneof response {
+    RangeResponse response_range = 1;
+    PutResponse response_put = 2;
+    DeleteRangeResponse response_delete_range = 3;
+    TxnResponse response_txn = 4 [(versionpb.etcd_version_field)="3.3"];
+  }
+}
+
+message Compare {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  enum CompareResult {
+    option (versionpb.etcd_version_enum) = "3.0";
+
+    EQUAL = 0;
+    GREATER = 1;
+    LESS = 2;
+    NOT_EQUAL = 3 [(versionpb.etcd_version_enum_value)="3.1"];
+  }
+  enum CompareTarget {
+    option (versionpb.etcd_version_enum) = "3.0";
+
+    VERSION = 0;
+    CREATE = 1;
+    MOD = 2;
+    VALUE = 3;
+    LEASE = 4 [(versionpb.etcd_version_enum_value)="3.3"];
+  }
+  // result is logical comparison operation for this comparison.
+  CompareResult result = 1;
+  // target is the key-value field to inspect for the comparison.
+  CompareTarget target = 2;
+  // key is the subject key for the comparison operation.
+  bytes key = 3;
+  oneof target_union {
+    // version is the version of the given key
+    int64 version = 4;
+    // create_revision is the creation revision of the given key
+    int64 create_revision = 5;
+    // mod_revision is the last modified revision of the given key.
+    int64 mod_revision = 6;
+    // value is the value of the given key, in bytes.
+    bytes value = 7;
+    // lease is the lease id of the given key.
+    int64 lease = 8 [(versionpb.etcd_version_field)="3.3"];
+    // leave room for more target_union field tags, jump to 64
+  }
+
+  // range_end compares the given target to all keys in the range [key, range_end).
+  // See RangeRequest for more details on key ranges.
+  bytes range_end = 64 [(versionpb.etcd_version_field)="3.3"];
+  // TODO: fill out with most of the rest of RangeRequest fields when needed.
+}
+
+// From google paxosdb paper:
+// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
+// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
+// and consists of three components:
+// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
+// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
+// may apply to the same or different entries in the database. All tests in the guard are applied and
+// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
+// it executes f op (see item 3 below).
+// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
+// lookup operation, and applies to a single database entry. Two different operations in the list may apply
+// to the same or different entries in the database. These operations are executed
+// if guard evaluates to
+// true.
+// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
+message TxnRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  // compare is a list of predicates representing a conjunction of terms.
+  // If the comparisons succeed, then the success requests will be processed in order,
+  // and the response will contain their respective responses in order.
+  // If the comparisons fail, then the failure requests will be processed in order,
+  // and the response will contain their respective responses in order.
+  repeated Compare compare = 1;
+  // success is a list of requests which will be applied when compare evaluates to true.
+  repeated RequestOp success = 2;
+  // failure is a list of requests which will be applied when compare evaluates to false.
+  repeated RequestOp failure = 3;
+}
+
+message TxnResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+  // succeeded is set to true if the compare evaluated to true or false otherwise.
+  bool succeeded = 2;
+  // responses is a list of responses corresponding to the results from applying
+  // success if succeeded is true or failure if succeeded is false.
+  repeated ResponseOp responses = 3;
+}
+
+// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
+// with a revision less than the compaction revision will be removed.
+message CompactionRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  // revision is the key-value store revision for the compaction operation.
+  int64 revision = 1;
+  // physical is set so the RPC will wait until the compaction is physically
+  // applied to the local database such that compacted entries are totally
+  // removed from the backend database.
+  bool physical = 2;
+}
+
+message CompactionResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+}
+
+message HashRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+}
+
+message HashKVRequest {
+  option (versionpb.etcd_version_msg) = "3.3";
+  // revision is the key-value store revision for the hash operation.
+  int64 revision = 1;
+}
+
+message HashKVResponse {
+  option (versionpb.etcd_version_msg) = "3.3";
+
+  ResponseHeader header = 1;
+  // hash is the hash value computed from the responding member's MVCC keys up to a given revision.
+  uint32 hash = 2;
+  // compact_revision is the compacted revision of key-value store when hash begins.
+  int64 compact_revision = 3;
+  // hash_revision is the revision up to which the hash is calculated.
+  int64 hash_revision = 4 [(versionpb.etcd_version_field)="3.6"];
+}
+
+message HashResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+  // hash is the hash value computed from the responding member's KV's backend.
+  uint32 hash = 2;
+}
+
+message SnapshotRequest {
+  option (versionpb.etcd_version_msg) = "3.3";
+}
+
+message SnapshotResponse {
+  option (versionpb.etcd_version_msg) = "3.3";
+
+  // header has the current key-value store information. The first header in the snapshot
+  // stream indicates the point in time of the snapshot.
+  ResponseHeader header = 1;
+
+  // remaining_bytes is the number of blob bytes to be sent after this message
+  uint64 remaining_bytes = 2;
+
+  // blob contains the next chunk of the snapshot in the snapshot stream.
+  bytes blob = 3;
+
+  // local version of server that created the snapshot.
+  // In cluster with binaries with different version, each cluster can return different result.
+  // Informs which etcd server version should be used when restoring the snapshot.
+  string version = 4 [(versionpb.etcd_version_field)="3.6"];
+}
+
+message WatchRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+  // request_union is a request to either create a new watcher or cancel an existing watcher.
+  oneof request_union {
+    WatchCreateRequest create_request = 1;
+    WatchCancelRequest cancel_request = 2;
+    WatchProgressRequest progress_request = 3 [(versionpb.etcd_version_field)="3.4"];
+  }
+}
+
+message WatchCreateRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  // key is the key to register for watching.
+  bytes key = 1;
+
+  // range_end is the end of the range [key, range_end) to watch. If range_end is not given,
+  // only the key argument is watched. If range_end is equal to '\0', all keys greater than
+  // or equal to the key argument are watched.
+  // If the range_end is one bit larger than the given key,
+  // then all keys with the prefix (the given key) will be watched.
+  bytes range_end = 2;
+
+  // start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
+  int64 start_revision = 3;
+
+  // progress_notify is set so that the etcd server will periodically send a WatchResponse with
+  // no events to the new watcher if there are no recent events. It is useful when clients
+  // wish to recover a disconnected watcher starting from a recent known revision.
+  // The etcd server may decide how often it will send notifications based on current load.
+  bool progress_notify = 4;
+
+  enum FilterType {
+    option (versionpb.etcd_version_enum) = "3.1";
+
+    // filter out put event.
+    NOPUT = 0;
+    // filter out delete event.
+    NODELETE = 1;
+  }
+
+  // filters filter the events at server side before it sends back to the watcher.
+  repeated FilterType filters = 5 [(versionpb.etcd_version_field)="3.1"];
+
+  // If prev_kv is set, created watcher gets the previous KV before the event happens.
+  // If the previous KV is already compacted, nothing will be returned.
+  bool prev_kv = 6 [(versionpb.etcd_version_field)="3.1"];
+
+  // If watch_id is provided and non-zero, it will be assigned to this watcher.
+  // Since creating a watcher in etcd is not a synchronous operation,
+  // this can be used ensure that ordering is correct when creating multiple
+  // watchers on the same stream. Creating a watcher with an ID already in
+  // use on the stream will cause an error to be returned.
+  int64 watch_id = 7 [(versionpb.etcd_version_field)="3.4"];
+
+  // fragment enables splitting large revisions into multiple watch responses.
+  bool fragment = 8 [(versionpb.etcd_version_field)="3.4"];
+}
+
+message WatchCancelRequest {
+  option (versionpb.etcd_version_msg) = "3.1";
+  // watch_id is the watcher id to cancel so that no more events are transmitted.
+  int64 watch_id = 1 [(versionpb.etcd_version_field)="3.1"];
+}
+
+// Requests the a watch stream progress status be sent in the watch response stream as soon as
+// possible.
+message WatchProgressRequest {
+  option (versionpb.etcd_version_msg) = "3.4";
+}
+
+message WatchResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+  // watch_id is the ID of the watcher that corresponds to the response.
+  int64 watch_id = 2;
+
+  // created is set to true if the response is for a create watch request.
+  // The client should record the watch_id and expect to receive events for
+  // the created watcher from the same stream.
+  // All events sent to the created watcher will attach with the same watch_id.
+  bool created = 3;
+
+  // canceled is set to true if the response is for a cancel watch request
+  // or if the start_revision has already been compacted.
+  // No further events will be sent to the canceled watcher.
+  bool canceled = 4;
+
+  // compact_revision is set to the minimum index if a watcher tries to watch
+  // at a compacted index.
+  //
+  // This happens when creating a watcher at a compacted revision or the watcher cannot
+  // catch up with the progress of the key-value store.
+  //
+  // The client should treat the watcher as canceled and should not try to create any
+  // watcher with the same start_revision again.
+  int64 compact_revision = 5;
+
+  // cancel_reason indicates the reason for canceling the watcher.
+  string cancel_reason = 6 [(versionpb.etcd_version_field)="3.4"];
+
+  // framgment is true if large watch response was split over multiple responses.
+  bool fragment = 7 [(versionpb.etcd_version_field)="3.4"];
+
+  repeated mvccpb.Event events = 11;
+}
+
+message LeaseGrantRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  // TTL is the advisory time-to-live in seconds. Expired lease will return -1.
+  int64 TTL = 1;
+  // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
+  int64 ID = 2;
+}
+
+message LeaseGrantResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+  // ID is the lease ID for the granted lease.
+  int64 ID = 2;
+  // TTL is the server chosen lease time-to-live in seconds.
+  int64 TTL = 3;
+  string error = 4;
+}
+
+message LeaseRevokeRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
+  int64 ID = 1;
+}
+
+message LeaseRevokeResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+}
+
+message LeaseCheckpoint {
+  option (versionpb.etcd_version_msg) = "3.4";
+
+    // ID is the lease ID to checkpoint.
+  int64 ID = 1;
+
+  // Remaining_TTL is the remaining time until expiry of the lease.
+  int64 remaining_TTL = 2;
+}
+
+message LeaseCheckpointRequest {
+  option (versionpb.etcd_version_msg) = "3.4";
+
+  repeated LeaseCheckpoint checkpoints = 1;
+}
+
+message LeaseCheckpointResponse {
+  option (versionpb.etcd_version_msg) = "3.4";
+
+  ResponseHeader header = 1;
+}
+
+message LeaseKeepAliveRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+  // ID is the lease ID for the lease to keep alive.
+  int64 ID = 1;
+}
+
+message LeaseKeepAliveResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+  // ID is the lease ID from the keep alive request.
+  int64 ID = 2;
+  // TTL is the new time-to-live for the lease.
+  int64 TTL = 3;
+}
+
+message LeaseTimeToLiveRequest {
+  option (versionpb.etcd_version_msg) = "3.1";
+  // ID is the lease ID for the lease.
+  int64 ID = 1;
+  // keys is true to query all the keys attached to this lease.
+  bool keys = 2;
+}
+
+message LeaseTimeToLiveResponse {
+  option (versionpb.etcd_version_msg) = "3.1";
+
+  ResponseHeader header = 1;
+  // ID is the lease ID from the keep alive request.
+  int64 ID = 2;
+  // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
+  int64 TTL = 3;
+  // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
+  int64 grantedTTL = 4;
+  // Keys is the list of keys attached to this lease.
+  repeated bytes keys = 5;
+}
+
+message LeaseLeasesRequest {
+  option (versionpb.etcd_version_msg) = "3.3";
+}
+
+message LeaseStatus {
+  option (versionpb.etcd_version_msg) = "3.3";
+
+  int64 ID = 1;
+  // TODO: int64 TTL = 2;
+}
+
+message LeaseLeasesResponse {
+  option (versionpb.etcd_version_msg) = "3.3";
+
+  ResponseHeader header = 1;
+  repeated LeaseStatus leases = 2;
+}
+
+message Member {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  // ID is the member ID for this member.
+  uint64 ID = 1;
+  // name is the human-readable name of the member. If the member is not started, the name will be an empty string.
+  string name = 2;
+  // peerURLs is the list of URLs the member exposes to the cluster for communication.
+  repeated string peerURLs = 3;
+  // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
+  repeated string clientURLs = 4;
+  // isLearner indicates if the member is raft learner.
+  bool isLearner = 5 [(versionpb.etcd_version_field)="3.4"];
+}
+
+message MemberAddRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  // peerURLs is the list of URLs the added member will use to communicate with the cluster.
+  repeated string peerURLs = 1;
+  // isLearner indicates if the added member is raft learner.
+  bool isLearner = 2 [(versionpb.etcd_version_field)="3.4"];
+}
+
+message MemberAddResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+  // member is the member information for the added member.
+  Member member = 2;
+  // members is a list of all members after adding the new member.
+  repeated Member members = 3;
+}
+
+message MemberRemoveRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+  // ID is the member ID of the member to remove.
+  uint64 ID = 1;
+}
+
+message MemberRemoveResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+  // members is a list of all members after removing the member.
+  repeated Member members = 2;
+}
+
+message MemberUpdateRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  // ID is the member ID of the member to update.
+  uint64 ID = 1;
+  // peerURLs is the new list of URLs the member will use to communicate with the cluster.
+  repeated string peerURLs = 2;
+}
+
+message MemberUpdateResponse{
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+  // members is a list of all members after updating the member.
+  repeated Member members = 2 [(versionpb.etcd_version_field)="3.1"];
+}
+
+message MemberListRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  bool linearizable = 1 [(versionpb.etcd_version_field)="3.5"];
+}
+
+message MemberListResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+  // members is a list of all members associated with the cluster.
+  repeated Member members = 2;
+}
+
+message MemberPromoteRequest {
+  option (versionpb.etcd_version_msg) = "3.4";
+  // ID is the member ID of the member to promote.
+  uint64 ID = 1;
+}
+
+message MemberPromoteResponse {
+  option (versionpb.etcd_version_msg) = "3.4";
+
+  ResponseHeader header = 1;
+  // members is a list of all members after promoting the member.
+  repeated Member members = 2;
+}
+
+message DefragmentRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+}
+
+message DefragmentResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+}
+
+message MoveLeaderRequest {
+  option (versionpb.etcd_version_msg) = "3.3";
+  // targetID is the node ID for the new leader.
+  uint64 targetID = 1;
+}
+
+message MoveLeaderResponse {
+  option (versionpb.etcd_version_msg) = "3.3";
+
+  ResponseHeader header = 1;
+}
+
+enum AlarmType {
+  option (versionpb.etcd_version_enum) = "3.0";
+
+	NONE = 0; // default, used to query if any alarm is active
+	NOSPACE = 1; // space quota is exhausted
+	CORRUPT = 2 [(versionpb.etcd_version_enum_value)="3.3"]; // kv store corruption detected
+}
+
+message AlarmRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  enum AlarmAction {
+    option (versionpb.etcd_version_enum) = "3.0";
+
+    GET = 0;
+    ACTIVATE = 1;
+    DEACTIVATE = 2;
+  }
+  // action is the kind of alarm request to issue. The action
+  // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
+  // raised alarm.
+  AlarmAction action = 1;
+  // memberID is the ID of the member associated with the alarm. If memberID is 0, the
+  // alarm request covers all members.
+  uint64 memberID = 2;
+  // alarm is the type of alarm to consider for this request.
+  AlarmType alarm = 3;
+}
+
+message AlarmMember {
+  option (versionpb.etcd_version_msg) = "3.0";
+  // memberID is the ID of the member associated with the raised alarm.
+  uint64 memberID = 1;
+  // alarm is the type of alarm which has been raised.
+  AlarmType alarm = 2;
+}
+
+message AlarmResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+  // alarms is a list of alarms associated with the alarm request.
+  repeated AlarmMember alarms = 2;
+}
+
+message DowngradeRequest {
+  option (versionpb.etcd_version_msg) = "3.5";
+
+  enum DowngradeAction {
+    option (versionpb.etcd_version_enum) = "3.5";
+
+    VALIDATE = 0;
+    ENABLE = 1;
+    CANCEL = 2;
+  }
+
+  // action is the kind of downgrade request to issue. The action may
+  // VALIDATE the target version, DOWNGRADE the cluster version,
+  // or CANCEL the current downgrading job.
+  DowngradeAction action = 1;
+  // version is the target version to downgrade.
+  string version = 2;
+}
+
+message DowngradeResponse {
+  option (versionpb.etcd_version_msg) = "3.5";
+
+  ResponseHeader header = 1;
+  // version is the current cluster version.
+  string version = 2;
+}
+
+// DowngradeVersionTestRequest is used for test only. The version in
+// this request will be read as the WAL record version.If the downgrade
+// target version is less than this version, then the downgrade(online)
+// or migration(offline) isn't safe, so shouldn't be allowed.
+message DowngradeVersionTestRequest {
+  option (versionpb.etcd_version_msg) = "3.6";
+
+  string ver = 1;
+}
+
+message StatusRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+}
+
+message StatusResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+  // version is the cluster protocol version used by the responding member.
+  string version = 2;
+  // dbSize is the size of the backend database physically allocated, in bytes, of the responding member.
+  int64 dbSize = 3;
+  // leader is the member ID which the responding member believes is the current leader.
+  uint64 leader = 4;
+  // raftIndex is the current raft committed index of the responding member.
+  uint64 raftIndex = 5;
+  // raftTerm is the current raft term of the responding member.
+  uint64 raftTerm = 6;
+  // raftAppliedIndex is the current raft applied index of the responding member.
+  uint64 raftAppliedIndex = 7 [(versionpb.etcd_version_field)="3.4"];
+  // errors contains alarm/health information and status.
+  repeated string errors = 8 [(versionpb.etcd_version_field)="3.4"];
+  // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member.
+  int64 dbSizeInUse = 9 [(versionpb.etcd_version_field)="3.4"];
+  // isLearner indicates if the member is raft learner.
+  bool isLearner = 10 [(versionpb.etcd_version_field)="3.4"];
+  // storageVersion is the version of the db file. It might be updated with delay in relationship to the target cluster version.
+  string storageVersion = 11 [(versionpb.etcd_version_field)="3.6"];
+  // dbSizeQuota is the configured etcd storage quota in bytes (the value passed to etcd instance by flag --quota-backend-bytes)
+  int64 dbSizeQuota = 12 [(versionpb.etcd_version_field)="3.6"];
+  // downgradeInfo indicates if there is downgrade process.
+  DowngradeInfo downgradeInfo = 13 [(versionpb.etcd_version_field)="3.6"];
+}
+
+message DowngradeInfo {
+	// enabled indicates whether the cluster is enabled to downgrade.
+  bool enabled = 1;
+	// targetVersion is the target downgrade version.
+  string targetVersion = 2;
+}
+
+message AuthEnableRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+}
+
+message AuthDisableRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+}
+
+message AuthStatusRequest {
+  option (versionpb.etcd_version_msg) = "3.5";
+}
+
+message AuthenticateRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  string name = 1;
+  string password = 2;
+}
+
+message AuthUserAddRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  string name = 1;
+  string password = 2;
+  authpb.UserAddOptions options = 3 [(versionpb.etcd_version_field)="3.4"];
+  string hashedPassword = 4 [(versionpb.etcd_version_field)="3.5"];
+}
+
+message AuthUserGetRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  string name = 1;
+}
+
+message AuthUserDeleteRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+  // name is the name of the user to delete.
+  string name = 1;
+}
+
+message AuthUserChangePasswordRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  // name is the name of the user whose password is being changed.
+  string name = 1;
+  // password is the new password for the user. Note that this field will be removed in the API layer.
+  string password = 2;
+  // hashedPassword is the new password for the user. Note that this field will be initialized in the API layer.
+  string hashedPassword = 3 [(versionpb.etcd_version_field)="3.5"];
+}
+
+message AuthUserGrantRoleRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  // user is the name of the user which should be granted a given role.
+  string user = 1;
+  // role is the name of the role to grant to the user.
+  string role = 2;
+}
+
+message AuthUserRevokeRoleRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  string name = 1;
+  string role = 2;
+}
+
+message AuthRoleAddRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  // name is the name of the role to add to the authentication system.
+  string name = 1;
+}
+
+message AuthRoleGetRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  string role = 1;
+}
+
+message AuthUserListRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+}
+
+message AuthRoleListRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+}
+
+message AuthRoleDeleteRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  string role = 1;
+}
+
+message AuthRoleGrantPermissionRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  // name is the name of the role which will be granted the permission.
+  string name = 1;
+  // perm is the permission to grant to the role.
+  authpb.Permission perm = 2;
+}
+
+message AuthRoleRevokePermissionRequest {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  string role = 1;
+  bytes key = 2;
+  bytes range_end = 3;
+}
+
+message AuthEnableResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+}
+
+message AuthDisableResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+}
+
+message AuthStatusResponse {
+  option (versionpb.etcd_version_msg) = "3.5";
+
+  ResponseHeader header = 1;
+  bool enabled = 2;
+  // authRevision is the current revision of auth store
+  uint64 authRevision = 3;
+}
+
+message AuthenticateResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+  // token is an authorized token that can be used in succeeding RPCs
+  string token = 2;
+}
+
+message AuthUserAddResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+}
+
+message AuthUserGetResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+
+  repeated string roles = 2;
+}
+
+message AuthUserDeleteResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+}
+
+message AuthUserChangePasswordResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+}
+
+message AuthUserGrantRoleResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+}
+
+message AuthUserRevokeRoleResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+}
+
+message AuthRoleAddResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+}
+
+message AuthRoleGetResponse {
+  ResponseHeader header = 1 [(versionpb.etcd_version_field)="3.0"];
+
+  repeated authpb.Permission perm = 2 [(versionpb.etcd_version_field)="3.0"];
+}
+
+message AuthRoleListResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+
+  repeated string roles = 2;
+}
+
+message AuthUserListResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+
+  repeated string users = 2;
+}
+
+message AuthRoleDeleteResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+}
+
+message AuthRoleGrantPermissionResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+}
+
+message AuthRoleRevokePermissionResponse {
+  option (versionpb.etcd_version_msg) = "3.0";
+
+  ResponseHeader header = 1;
+}
diff --git a/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.pb.go b/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.pb.go
new file mode 100644
index 0000000..85a2a74
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.pb.go
@@ -0,0 +1,1459 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: membership.proto
+
+package membershippb
+
+import (
+	fmt "fmt"
+	io "io"
+	math "math"
+	math_bits "math/bits"
+
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/golang/protobuf/proto"
+	_ "go.etcd.io/etcd/api/v3/versionpb"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// RaftAttributes represents the raft related attributes of an etcd member.
+type RaftAttributes struct {
+	// peerURLs is the list of peers in the raft cluster.
+	PeerUrls []string `protobuf:"bytes,1,rep,name=peer_urls,json=peerUrls,proto3" json:"peer_urls,omitempty"`
+	// isLearner indicates if the member is raft learner.
+	IsLearner            bool     `protobuf:"varint,2,opt,name=is_learner,json=isLearner,proto3" json:"is_learner,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *RaftAttributes) Reset()         { *m = RaftAttributes{} }
+func (m *RaftAttributes) String() string { return proto.CompactTextString(m) }
+func (*RaftAttributes) ProtoMessage()    {}
+func (*RaftAttributes) Descriptor() ([]byte, []int) {
+	return fileDescriptor_949fe0d019050ef5, []int{0}
+}
+func (m *RaftAttributes) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RaftAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_RaftAttributes.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *RaftAttributes) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RaftAttributes.Merge(m, src)
+}
+func (m *RaftAttributes) XXX_Size() int {
+	return m.Size()
+}
+func (m *RaftAttributes) XXX_DiscardUnknown() {
+	xxx_messageInfo_RaftAttributes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RaftAttributes proto.InternalMessageInfo
+
+// Attributes represents all the non-raft related attributes of an etcd member.
+type Attributes struct {
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	ClientUrls           []string `protobuf:"bytes,2,rep,name=client_urls,json=clientUrls,proto3" json:"client_urls,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Attributes) Reset()         { *m = Attributes{} }
+func (m *Attributes) String() string { return proto.CompactTextString(m) }
+func (*Attributes) ProtoMessage()    {}
+func (*Attributes) Descriptor() ([]byte, []int) {
+	return fileDescriptor_949fe0d019050ef5, []int{1}
+}
+func (m *Attributes) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Attributes.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Attributes) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Attributes.Merge(m, src)
+}
+func (m *Attributes) XXX_Size() int {
+	return m.Size()
+}
+func (m *Attributes) XXX_DiscardUnknown() {
+	xxx_messageInfo_Attributes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Attributes proto.InternalMessageInfo
+
+type Member struct {
+	ID                   uint64          `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	RaftAttributes       *RaftAttributes `protobuf:"bytes,2,opt,name=raft_attributes,json=raftAttributes,proto3" json:"raft_attributes,omitempty"`
+	MemberAttributes     *Attributes     `protobuf:"bytes,3,opt,name=member_attributes,json=memberAttributes,proto3" json:"member_attributes,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *Member) Reset()         { *m = Member{} }
+func (m *Member) String() string { return proto.CompactTextString(m) }
+func (*Member) ProtoMessage()    {}
+func (*Member) Descriptor() ([]byte, []int) {
+	return fileDescriptor_949fe0d019050ef5, []int{2}
+}
+func (m *Member) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Member.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Member) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Member.Merge(m, src)
+}
+func (m *Member) XXX_Size() int {
+	return m.Size()
+}
+func (m *Member) XXX_DiscardUnknown() {
+	xxx_messageInfo_Member.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Member proto.InternalMessageInfo
+
+type ClusterVersionSetRequest struct {
+	Ver                  string   `protobuf:"bytes,1,opt,name=ver,proto3" json:"ver,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ClusterVersionSetRequest) Reset()         { *m = ClusterVersionSetRequest{} }
+func (m *ClusterVersionSetRequest) String() string { return proto.CompactTextString(m) }
+func (*ClusterVersionSetRequest) ProtoMessage()    {}
+func (*ClusterVersionSetRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_949fe0d019050ef5, []int{3}
+}
+func (m *ClusterVersionSetRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ClusterVersionSetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ClusterVersionSetRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ClusterVersionSetRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ClusterVersionSetRequest.Merge(m, src)
+}
+func (m *ClusterVersionSetRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ClusterVersionSetRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ClusterVersionSetRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterVersionSetRequest proto.InternalMessageInfo
+
+type ClusterMemberAttrSetRequest struct {
+	Member_ID            uint64      `protobuf:"varint,1,opt,name=member_ID,json=memberID,proto3" json:"member_ID,omitempty"`
+	MemberAttributes     *Attributes `protobuf:"bytes,2,opt,name=member_attributes,json=memberAttributes,proto3" json:"member_attributes,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
+}
+
+func (m *ClusterMemberAttrSetRequest) Reset()         { *m = ClusterMemberAttrSetRequest{} }
+func (m *ClusterMemberAttrSetRequest) String() string { return proto.CompactTextString(m) }
+func (*ClusterMemberAttrSetRequest) ProtoMessage()    {}
+func (*ClusterMemberAttrSetRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_949fe0d019050ef5, []int{4}
+}
+func (m *ClusterMemberAttrSetRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ClusterMemberAttrSetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ClusterMemberAttrSetRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ClusterMemberAttrSetRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ClusterMemberAttrSetRequest.Merge(m, src)
+}
+func (m *ClusterMemberAttrSetRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *ClusterMemberAttrSetRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ClusterMemberAttrSetRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterMemberAttrSetRequest proto.InternalMessageInfo
+
+type DowngradeInfoSetRequest struct {
+	Enabled              bool     `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+	Ver                  string   `protobuf:"bytes,2,opt,name=ver,proto3" json:"ver,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DowngradeInfoSetRequest) Reset()         { *m = DowngradeInfoSetRequest{} }
+func (m *DowngradeInfoSetRequest) String() string { return proto.CompactTextString(m) }
+func (*DowngradeInfoSetRequest) ProtoMessage()    {}
+func (*DowngradeInfoSetRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_949fe0d019050ef5, []int{5}
+}
+func (m *DowngradeInfoSetRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DowngradeInfoSetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DowngradeInfoSetRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DowngradeInfoSetRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DowngradeInfoSetRequest.Merge(m, src)
+}
+func (m *DowngradeInfoSetRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *DowngradeInfoSetRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DowngradeInfoSetRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DowngradeInfoSetRequest proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*RaftAttributes)(nil), "membershippb.RaftAttributes")
+	proto.RegisterType((*Attributes)(nil), "membershippb.Attributes")
+	proto.RegisterType((*Member)(nil), "membershippb.Member")
+	proto.RegisterType((*ClusterVersionSetRequest)(nil), "membershippb.ClusterVersionSetRequest")
+	proto.RegisterType((*ClusterMemberAttrSetRequest)(nil), "membershippb.ClusterMemberAttrSetRequest")
+	proto.RegisterType((*DowngradeInfoSetRequest)(nil), "membershippb.DowngradeInfoSetRequest")
+}
+
+func init() { proto.RegisterFile("membership.proto", fileDescriptor_949fe0d019050ef5) }
+
+var fileDescriptor_949fe0d019050ef5 = []byte{
+	// 422 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x52, 0xc1, 0x6e, 0xd3, 0x40,
+	0x10, 0xed, 0x3a, 0x55, 0x6b, 0x4f, 0x51, 0x28, 0x2b, 0x24, 0xac, 0x06, 0x8c, 0x55, 0x2e, 0x39,
+	0xd9, 0x12, 0x51, 0x0f, 0x70, 0x03, 0xd2, 0x43, 0x10, 0xe5, 0xb0, 0xa8, 0x1c, 0xb8, 0x44, 0xeb,
+	0x66, 0x62, 0x56, 0x72, 0xbc, 0x66, 0x77, 0x5d, 0xee, 0x1c, 0xf9, 0x02, 0xfe, 0x82, 0x13, 0xff,
+	0xd0, 0x23, 0x9f, 0x00, 0xe1, 0x47, 0x90, 0x77, 0x9d, 0xd8, 0x11, 0x9c, 0x7a, 0x1b, 0x3f, 0xcf,
+	0xbc, 0x79, 0xef, 0xed, 0xc0, 0xf1, 0x0a, 0x57, 0x19, 0x2a, 0xfd, 0x51, 0x54, 0x49, 0xa5, 0xa4,
+	0x91, 0xf4, 0x4e, 0x87, 0x54, 0xd9, 0xc9, 0xfd, 0x5c, 0xe6, 0xd2, 0xfe, 0x48, 0x9b, 0xca, 0xf5,
+	0x9c, 0xc4, 0x68, 0xae, 0x16, 0x29, 0xaf, 0x44, 0x7a, 0x8d, 0x4a, 0x0b, 0x59, 0x56, 0xd9, 0xa6,
+	0x72, 0x1d, 0xa7, 0x97, 0x30, 0x64, 0x7c, 0x69, 0x5e, 0x18, 0xa3, 0x44, 0x56, 0x1b, 0xd4, 0x74,
+	0x04, 0x41, 0x85, 0xa8, 0xe6, 0xb5, 0x2a, 0x74, 0x48, 0xe2, 0xc1, 0x38, 0x60, 0x7e, 0x03, 0x5c,
+	0xaa, 0x42, 0xd3, 0x47, 0x00, 0x42, 0xcf, 0x0b, 0xe4, 0xaa, 0x44, 0x15, 0x7a, 0x31, 0x19, 0xfb,
+	0x2c, 0x10, 0xfa, 0x8d, 0x03, 0x9e, 0x1f, 0x7e, 0xf9, 0x11, 0x0e, 0x26, 0xc9, 0xd9, 0xe9, 0x6b,
+	0x80, 0x1e, 0x25, 0x85, 0xfd, 0x92, 0xaf, 0x30, 0x24, 0x31, 0x19, 0x07, 0xcc, 0xd6, 0xf4, 0x31,
+	0x1c, 0x5d, 0x15, 0x02, 0x4b, 0xe3, 0x16, 0x79, 0x76, 0x11, 0x38, 0xa8, 0x59, 0xd5, 0x71, 0x7d,
+	0x27, 0x70, 0x70, 0x61, 0xbd, 0xd2, 0x21, 0x78, 0xb3, 0xa9, 0xa5, 0xd9, 0x67, 0xde, 0x6c, 0x4a,
+	0xcf, 0xe1, 0xae, 0xe2, 0x4b, 0x33, 0xe7, 0xdb, 0x5d, 0x56, 0xd3, 0xd1, 0xd3, 0x87, 0x49, 0x3f,
+	0x9d, 0x64, 0xd7, 0x22, 0x1b, 0xaa, 0x5d, 0xcb, 0xe7, 0x70, 0xcf, 0xb5, 0xf7, 0x89, 0x06, 0x96,
+	0x28, 0xdc, 0x25, 0xea, 0x91, 0xb4, 0x2f, 0xd2, 0x21, 0x9d, 0xe2, 0x33, 0x08, 0x5f, 0x15, 0xb5,
+	0x36, 0xa8, 0xde, 0xbb, 0xb0, 0xdf, 0xa1, 0x61, 0xf8, 0xa9, 0x46, 0x6d, 0xe8, 0x31, 0x0c, 0xae,
+	0x51, 0xb5, 0x51, 0x34, 0x65, 0x37, 0xf6, 0x95, 0xc0, 0xa8, 0x9d, 0xbb, 0xd8, 0x72, 0xf7, 0x46,
+	0x47, 0x10, 0xb4, 0x32, 0xb7, 0x21, 0xf8, 0x0e, 0xb0, 0x51, 0xfc, 0xc7, 0x83, 0x77, 0x7b, 0x0f,
+	0x6f, 0xe1, 0xc1, 0x54, 0x7e, 0x2e, 0x73, 0xc5, 0x17, 0x38, 0x2b, 0x97, 0xb2, 0xa7, 0x23, 0x84,
+	0x43, 0x2c, 0x79, 0x56, 0xe0, 0xc2, 0xaa, 0xf0, 0xd9, 0xe6, 0x73, 0x63, 0xce, 0xfb, 0xd7, 0xdc,
+	0xcb, 0x67, 0x37, 0xbf, 0xa3, 0xbd, 0x9b, 0x75, 0x44, 0x7e, 0xae, 0x23, 0xf2, 0x6b, 0x1d, 0x91,
+	0x6f, 0x7f, 0xa2, 0xbd, 0x0f, 0x4f, 0x72, 0x99, 0x34, 0x37, 0x9a, 0x08, 0x99, 0x76, 0xb7, 0x3a,
+	0x49, 0xfb, 0x82, 0xb3, 0x03, 0x7b, 0xaa, 0x93, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x56,
+	0x21, 0x97, 0x04, 0x03, 0x00, 0x00,
+}
+
+func (m *RaftAttributes) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RaftAttributes) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RaftAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.IsLearner {
+		i--
+		if m.IsLearner {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x10
+	}
+	if len(m.PeerUrls) > 0 {
+		for iNdEx := len(m.PeerUrls) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.PeerUrls[iNdEx])
+			copy(dAtA[i:], m.PeerUrls[iNdEx])
+			i = encodeVarintMembership(dAtA, i, uint64(len(m.PeerUrls[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Attributes) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Attributes) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Attributes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.ClientUrls) > 0 {
+		for iNdEx := len(m.ClientUrls) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.ClientUrls[iNdEx])
+			copy(dAtA[i:], m.ClientUrls[iNdEx])
+			i = encodeVarintMembership(dAtA, i, uint64(len(m.ClientUrls[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintMembership(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Member) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Member) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.MemberAttributes != nil {
+		{
+			size, err := m.MemberAttributes.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintMembership(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.RaftAttributes != nil {
+		{
+			size, err := m.RaftAttributes.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintMembership(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.ID != 0 {
+		i = encodeVarintMembership(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ClusterVersionSetRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ClusterVersionSetRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterVersionSetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Ver) > 0 {
+		i -= len(m.Ver)
+		copy(dAtA[i:], m.Ver)
+		i = encodeVarintMembership(dAtA, i, uint64(len(m.Ver)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ClusterMemberAttrSetRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ClusterMemberAttrSetRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterMemberAttrSetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.MemberAttributes != nil {
+		{
+			size, err := m.MemberAttributes.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintMembership(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Member_ID != 0 {
+		i = encodeVarintMembership(dAtA, i, uint64(m.Member_ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DowngradeInfoSetRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DowngradeInfoSetRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DowngradeInfoSetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Ver) > 0 {
+		i -= len(m.Ver)
+		copy(dAtA[i:], m.Ver)
+		i = encodeVarintMembership(dAtA, i, uint64(len(m.Ver)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Enabled {
+		i--
+		if m.Enabled {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintMembership(dAtA []byte, offset int, v uint64) int {
+	offset -= sovMembership(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *RaftAttributes) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.PeerUrls) > 0 {
+		for _, s := range m.PeerUrls {
+			l = len(s)
+			n += 1 + l + sovMembership(uint64(l))
+		}
+	}
+	if m.IsLearner {
+		n += 2
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Attributes) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovMembership(uint64(l))
+	}
+	if len(m.ClientUrls) > 0 {
+		for _, s := range m.ClientUrls {
+			l = len(s)
+			n += 1 + l + sovMembership(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Member) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovMembership(uint64(m.ID))
+	}
+	if m.RaftAttributes != nil {
+		l = m.RaftAttributes.Size()
+		n += 1 + l + sovMembership(uint64(l))
+	}
+	if m.MemberAttributes != nil {
+		l = m.MemberAttributes.Size()
+		n += 1 + l + sovMembership(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *ClusterVersionSetRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Ver)
+	if l > 0 {
+		n += 1 + l + sovMembership(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *ClusterMemberAttrSetRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Member_ID != 0 {
+		n += 1 + sovMembership(uint64(m.Member_ID))
+	}
+	if m.MemberAttributes != nil {
+		l = m.MemberAttributes.Size()
+		n += 1 + l + sovMembership(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *DowngradeInfoSetRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Enabled {
+		n += 2
+	}
+	l = len(m.Ver)
+	if l > 0 {
+		n += 1 + l + sovMembership(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovMembership(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozMembership(x uint64) (n int) {
+	return sovMembership(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *RaftAttributes) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowMembership
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RaftAttributes: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RaftAttributes: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PeerUrls", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMembership
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthMembership
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthMembership
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PeerUrls = append(m.PeerUrls, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMembership
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.IsLearner = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipMembership(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthMembership
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Attributes) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowMembership
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Attributes: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Attributes: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMembership
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthMembership
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthMembership
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ClientUrls", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMembership
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthMembership
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthMembership
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ClientUrls = append(m.ClientUrls, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipMembership(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthMembership
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Member) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowMembership
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Member: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMembership
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RaftAttributes", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMembership
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMembership
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthMembership
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.RaftAttributes == nil {
+				m.RaftAttributes = &RaftAttributes{}
+			}
+			if err := m.RaftAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MemberAttributes", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMembership
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMembership
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthMembership
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.MemberAttributes == nil {
+				m.MemberAttributes = &Attributes{}
+			}
+			if err := m.MemberAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipMembership(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthMembership
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ClusterVersionSetRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowMembership
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ClusterVersionSetRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ClusterVersionSetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Ver", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMembership
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthMembership
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthMembership
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Ver = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipMembership(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthMembership
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ClusterMemberAttrSetRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowMembership
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ClusterMemberAttrSetRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ClusterMemberAttrSetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Member_ID", wireType)
+			}
+			m.Member_ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMembership
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Member_ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MemberAttributes", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMembership
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMembership
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthMembership
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.MemberAttributes == nil {
+				m.MemberAttributes = &Attributes{}
+			}
+			if err := m.MemberAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipMembership(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthMembership
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DowngradeInfoSetRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowMembership
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DowngradeInfoSetRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DowngradeInfoSetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMembership
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Enabled = bool(v != 0)
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Ver", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMembership
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthMembership
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthMembership
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Ver = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipMembership(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthMembership
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipMembership(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	depth := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowMembership
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowMembership
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+		case 1:
+			iNdEx += 8
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowMembership
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthMembership
+			}
+			iNdEx += length
+		case 3:
+			depth++
+		case 4:
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupMembership
+			}
+			depth--
+		case 5:
+			iNdEx += 4
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthMembership
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
+	}
+	return 0, io.ErrUnexpectedEOF
+}
+
+var (
+	ErrInvalidLengthMembership        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowMembership          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupMembership = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.proto b/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.proto
new file mode 100644
index 0000000..cbccfef
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.proto
@@ -0,0 +1,58 @@
+syntax = "proto3";
+package membershippb;
+
+import "gogoproto/gogo.proto";
+import "etcd/api/versionpb/version.proto";
+
+option go_package = "go.etcd.io/etcd/api/v3/membershippb";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+// RaftAttributes represents the raft related attributes of an etcd member.
+message RaftAttributes {
+  option (versionpb.etcd_version_msg) = "3.5";
+
+  // peerURLs is the list of peers in the raft cluster.
+  repeated string peer_urls = 1;
+  // isLearner indicates if the member is raft learner.
+  bool is_learner = 2;
+}
+
+// Attributes represents all the non-raft related attributes of an etcd member.
+message Attributes {
+  option (versionpb.etcd_version_msg) = "3.5";
+
+  string name = 1;
+  repeated string client_urls = 2;
+}
+
+message Member {
+  option (versionpb.etcd_version_msg) = "3.5";
+
+  uint64 ID = 1;
+  RaftAttributes raft_attributes = 2;
+  Attributes member_attributes = 3;
+}
+
+message ClusterVersionSetRequest {
+  option (versionpb.etcd_version_msg) = "3.5";
+
+  string ver = 1;
+}
+
+message ClusterMemberAttrSetRequest {
+  option (versionpb.etcd_version_msg) = "3.5";
+
+  uint64 member_ID = 1;
+  Attributes member_attributes = 2;
+}
+
+message DowngradeInfoSetRequest {
+  option (versionpb.etcd_version_msg) = "3.5";
+
+  bool enabled = 1;
+  string ver = 2;
+}
\ No newline at end of file
diff --git a/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.pb.go b/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.pb.go
new file mode 100644
index 0000000..2fed424
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.pb.go
@@ -0,0 +1,800 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: kv.proto
+
+package mvccpb
+
+import (
+	fmt "fmt"
+	io "io"
+	math "math"
+	math_bits "math/bits"
+
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type Event_EventType int32
+
+const (
+	PUT    Event_EventType = 0
+	DELETE Event_EventType = 1
+)
+
+var Event_EventType_name = map[int32]string{
+	0: "PUT",
+	1: "DELETE",
+}
+
+var Event_EventType_value = map[string]int32{
+	"PUT":    0,
+	"DELETE": 1,
+}
+
+func (x Event_EventType) String() string {
+	return proto.EnumName(Event_EventType_name, int32(x))
+}
+
+func (Event_EventType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_2216fe83c9c12408, []int{1, 0}
+}
+
+type KeyValue struct {
+	// key is the key in bytes. An empty key is not allowed.
+	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	// create_revision is the revision of last creation on this key.
+	CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"`
+	// mod_revision is the revision of last modification on this key.
+	ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"`
+	// version is the version of the key. A deletion resets
+	// the version to zero and any modification of the key
+	// increases its version.
+	Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
+	// value is the value held by the key, in bytes.
+	Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"`
+	// lease is the ID of the lease that attached to key.
+	// When the attached lease expires, the key will be deleted.
+	// If lease is 0, then no lease is attached to the key.
+	Lease                int64    `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *KeyValue) Reset()         { *m = KeyValue{} }
+func (m *KeyValue) String() string { return proto.CompactTextString(m) }
+func (*KeyValue) ProtoMessage()    {}
+func (*KeyValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_2216fe83c9c12408, []int{0}
+}
+func (m *KeyValue) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *KeyValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_KeyValue.Merge(m, src)
+}
+func (m *KeyValue) XXX_Size() int {
+	return m.Size()
+}
+func (m *KeyValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_KeyValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KeyValue proto.InternalMessageInfo
+
+type Event struct {
+	// type is the kind of event. If type is a PUT, it indicates
+	// new data has been stored to the key. If type is a DELETE,
+	// it indicates the key was deleted.
+	Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"`
+	// kv holds the KeyValue for the event.
+	// A PUT event contains current kv pair.
+	// A PUT event with kv.Version=1 indicates the creation of a key.
+	// A DELETE/EXPIRE event contains the deleted key with
+	// its modification revision set to the revision of deletion.
+	Kv *KeyValue `protobuf:"bytes,2,opt,name=kv,proto3" json:"kv,omitempty"`
+	// prev_kv holds the key-value pair before the event happens.
+	PrevKv               *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *Event) Reset()         { *m = Event{} }
+func (m *Event) String() string { return proto.CompactTextString(m) }
+func (*Event) ProtoMessage()    {}
+func (*Event) Descriptor() ([]byte, []int) {
+	return fileDescriptor_2216fe83c9c12408, []int{1}
+}
+func (m *Event) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Event.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Event) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Event.Merge(m, src)
+}
+func (m *Event) XXX_Size() int {
+	return m.Size()
+}
+func (m *Event) XXX_DiscardUnknown() {
+	xxx_messageInfo_Event.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Event proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value)
+	proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue")
+	proto.RegisterType((*Event)(nil), "mvccpb.Event")
+}
+
+func init() { proto.RegisterFile("kv.proto", fileDescriptor_2216fe83c9c12408) }
+
+var fileDescriptor_2216fe83c9c12408 = []byte{
+	// 327 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xc1, 0x6a, 0xfa, 0x40,
+	0x10, 0xc6, 0xb3, 0x46, 0xa3, 0xff, 0x51, 0xfc, 0x87, 0x45, 0x68, 0x28, 0x34, 0xa4, 0x5e, 0x6a,
+	0x29, 0x24, 0xa0, 0x87, 0xde, 0x4b, 0x73, 0xb2, 0x87, 0x12, 0x6c, 0x0f, 0xbd, 0x48, 0x8c, 0x83,
+	0x84, 0xa8, 0x1b, 0x62, 0xba, 0x90, 0x37, 0xe9, 0xbd, 0xf7, 0x3e, 0x87, 0x47, 0x1f, 0xa1, 0xda,
+	0x17, 0x29, 0x3b, 0x5b, 0xed, 0xa5, 0x97, 0xdd, 0x99, 0xef, 0xfb, 0xb1, 0xf3, 0x0d, 0x0b, 0xad,
+	0x4c, 0xfa, 0x79, 0x21, 0x4a, 0xc1, 0xad, 0x95, 0x4c, 0x92, 0x7c, 0x76, 0xde, 0x5b, 0x88, 0x85,
+	0x20, 0x29, 0x50, 0x95, 0x76, 0xfb, 0x1f, 0x0c, 0x5a, 0x63, 0xac, 0x9e, 0xe3, 0xe5, 0x2b, 0x72,
+	0x1b, 0xcc, 0x0c, 0x2b, 0x87, 0x79, 0x6c, 0xd0, 0x89, 0x54, 0xc9, 0xaf, 0xe0, 0x7f, 0x52, 0x60,
+	0x5c, 0xe2, 0xb4, 0x40, 0x99, 0x6e, 0x52, 0xb1, 0x76, 0x6a, 0x1e, 0x1b, 0x98, 0x51, 0x57, 0xcb,
+	0xd1, 0x8f, 0xca, 0x2f, 0xa1, 0xb3, 0x12, 0xf3, 0x5f, 0xca, 0x24, 0xaa, 0xbd, 0x12, 0xf3, 0x13,
+	0xe2, 0x40, 0x53, 0x62, 0x41, 0x6e, 0x9d, 0xdc, 0x63, 0xcb, 0x7b, 0xd0, 0x90, 0x2a, 0x80, 0xd3,
+	0xa0, 0xc9, 0xba, 0x51, 0xea, 0x12, 0xe3, 0x0d, 0x3a, 0x16, 0xd1, 0xba, 0xe9, 0xbf, 0x33, 0x68,
+	0x84, 0x12, 0xd7, 0x25, 0xbf, 0x81, 0x7a, 0x59, 0xe5, 0x48, 0x71, 0xbb, 0xc3, 0x33, 0x5f, 0xef,
+	0xe9, 0x93, 0xa9, 0xcf, 0x49, 0x95, 0x63, 0x44, 0x10, 0xf7, 0xa0, 0x96, 0x49, 0xca, 0xde, 0x1e,
+	0xda, 0x47, 0xf4, 0xb8, 0x78, 0x54, 0xcb, 0x24, 0xbf, 0x86, 0x66, 0x5e, 0xa0, 0x9c, 0x66, 0x92,
+	0xc2, 0xff, 0x85, 0x59, 0x0a, 0x18, 0xcb, 0xbe, 0x07, 0xff, 0x4e, 0xef, 0xf3, 0x26, 0x98, 0x8f,
+	0x4f, 0x13, 0xdb, 0xe0, 0x00, 0xd6, 0x7d, 0xf8, 0x10, 0x4e, 0x42, 0x9b, 0xdd, 0xdd, 0x6e, 0xf7,
+	0xae, 0xb1, 0xdb, 0xbb, 0xc6, 0xf6, 0xe0, 0xb2, 0xdd, 0xc1, 0x65, 0x9f, 0x07, 0x97, 0xbd, 0x7d,
+	0xb9, 0xc6, 0xcb, 0xc5, 0x42, 0xf8, 0x58, 0x26, 0x73, 0x3f, 0x15, 0x81, 0xba, 0x83, 0x38, 0x4f,
+	0x03, 0x39, 0x0a, 0xf4, 0xac, 0x99, 0x45, 0xdf, 0x32, 0xfa, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x78,
+	0x06, 0x46, 0xf5, 0xc0, 0x01, 0x00, 0x00,
+}
+
+func (m *KeyValue) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Lease != 0 {
+		i = encodeVarintKv(dAtA, i, uint64(m.Lease))
+		i--
+		dAtA[i] = 0x30
+	}
+	if len(m.Value) > 0 {
+		i -= len(m.Value)
+		copy(dAtA[i:], m.Value)
+		i = encodeVarintKv(dAtA, i, uint64(len(m.Value)))
+		i--
+		dAtA[i] = 0x2a
+	}
+	if m.Version != 0 {
+		i = encodeVarintKv(dAtA, i, uint64(m.Version))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.ModRevision != 0 {
+		i = encodeVarintKv(dAtA, i, uint64(m.ModRevision))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.CreateRevision != 0 {
+		i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision))
+		i--
+		dAtA[i] = 0x10
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintKv(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Event) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Event) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.PrevKv != nil {
+		{
+			size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintKv(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.Kv != nil {
+		{
+			size, err := m.Kv.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintKv(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Type != 0 {
+		i = encodeVarintKv(dAtA, i, uint64(m.Type))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintKv(dAtA []byte, offset int, v uint64) int {
+	offset -= sovKv(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *KeyValue) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovKv(uint64(l))
+	}
+	if m.CreateRevision != 0 {
+		n += 1 + sovKv(uint64(m.CreateRevision))
+	}
+	if m.ModRevision != 0 {
+		n += 1 + sovKv(uint64(m.ModRevision))
+	}
+	if m.Version != 0 {
+		n += 1 + sovKv(uint64(m.Version))
+	}
+	l = len(m.Value)
+	if l > 0 {
+		n += 1 + l + sovKv(uint64(l))
+	}
+	if m.Lease != 0 {
+		n += 1 + sovKv(uint64(m.Lease))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Event) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Type != 0 {
+		n += 1 + sovKv(uint64(m.Type))
+	}
+	if m.Kv != nil {
+		l = m.Kv.Size()
+		n += 1 + l + sovKv(uint64(l))
+	}
+	if m.PrevKv != nil {
+		l = m.PrevKv.Size()
+		n += 1 + l + sovKv(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovKv(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozKv(x uint64) (n int) {
+	return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *KeyValue) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowKv
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: KeyValue: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthKv
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthKv
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
+			}
+			m.CreateRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.CreateRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
+			}
+			m.ModRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ModRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+			}
+			m.Version = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Version |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthKv
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthKv
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+			if m.Value == nil {
+				m.Value = []byte{}
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+			}
+			m.Lease = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Lease |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipKv(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthKv
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Event) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowKv
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Event: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Type |= Event_EventType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthKv
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthKv
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Kv == nil {
+				m.Kv = &KeyValue{}
+			}
+			if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthKv
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthKv
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.PrevKv == nil {
+				m.PrevKv = &KeyValue{}
+			}
+			if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipKv(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if (skippy < 0) || (iNdEx+skippy) < 0 {
+				return ErrInvalidLengthKv
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipKv(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	depth := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowKv
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+		case 1:
+			iNdEx += 8
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthKv
+			}
+			iNdEx += length
+		case 3:
+			depth++
+		case 4:
+			if depth == 0 {
+				return 0, ErrUnexpectedEndOfGroupKv
+			}
+			depth--
+		case 5:
+			iNdEx += 4
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+		if iNdEx < 0 {
+			return 0, ErrInvalidLengthKv
+		}
+		if depth == 0 {
+			return iNdEx, nil
+		}
+	}
+	return 0, io.ErrUnexpectedEOF
+}
+
+var (
+	ErrInvalidLengthKv        = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowKv          = fmt.Errorf("proto: integer overflow")
+	ErrUnexpectedEndOfGroupKv = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.proto b/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.proto
new file mode 100644
index 0000000..a93479c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.proto
@@ -0,0 +1,51 @@
+syntax = "proto3";
+package mvccpb;
+
+import "gogoproto/gogo.proto";
+
+option go_package = "go.etcd.io/etcd/api/v3/mvccpb";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+message KeyValue {
+  // key is the key in bytes. An empty key is not allowed.
+  bytes key = 1;
+  // create_revision is the revision of last creation on this key.
+  int64 create_revision = 2;
+  // mod_revision is the revision of last modification on this key.
+  int64 mod_revision = 3;
+  // version is the version of the key. A deletion resets
+  // the version to zero and any modification of the key
+  // increases its version.
+  int64 version = 4;
+  // value is the value held by the key, in bytes.
+  bytes value = 5;
+  // lease is the ID of the lease that attached to key.
+  // When the attached lease expires, the key will be deleted.
+  // If lease is 0, then no lease is attached to the key.
+  int64 lease = 6;
+}
+
+message Event {
+  enum EventType {
+    PUT = 0;
+    DELETE = 1;
+  }
+  // type is the kind of event. If type is a PUT, it indicates
+  // new data has been stored to the key. If type is a DELETE,
+  // it indicates the key was deleted.
+  EventType type = 1;
+  // kv holds the KeyValue for the event.
+  // A PUT event contains current kv pair.
+  // A PUT event with kv.Version=1 indicates the creation of a key.
+  // A DELETE/EXPIRE event contains the deleted key with
+  // its modification revision set to the revision of deletion.
+  KeyValue kv = 2;
+
+  // prev_kv holds the key-value pair before the event happens.
+  KeyValue prev_kv = 3;
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/doc.go b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/doc.go
similarity index 100%
rename from vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/doc.go
rename to vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/doc.go
diff --git a/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go
new file mode 100644
index 0000000..781c73b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go
@@ -0,0 +1,279 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpctypes
+
+import (
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+// server-side error
+var (
+	ErrGRPCEmptyKey                = status.Error(codes.InvalidArgument, "etcdserver: key is not provided")
+	ErrGRPCKeyNotFound             = status.Error(codes.InvalidArgument, "etcdserver: key not found")
+	ErrGRPCValueProvided           = status.Error(codes.InvalidArgument, "etcdserver: value is provided")
+	ErrGRPCLeaseProvided           = status.Error(codes.InvalidArgument, "etcdserver: lease is provided")
+	ErrGRPCTooManyOps              = status.Error(codes.InvalidArgument, "etcdserver: too many operations in txn request")
+	ErrGRPCDuplicateKey            = status.Error(codes.InvalidArgument, "etcdserver: duplicate key given in txn request")
+	ErrGRPCInvalidClientAPIVersion = status.Error(codes.InvalidArgument, "etcdserver: invalid client api version")
+	ErrGRPCInvalidSortOption       = status.Error(codes.InvalidArgument, "etcdserver: invalid sort option")
+	ErrGRPCCompacted               = status.Error(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted")
+	ErrGRPCFutureRev               = status.Error(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision")
+	ErrGRPCNoSpace                 = status.Error(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded")
+
+	ErrGRPCLeaseNotFound    = status.Error(codes.NotFound, "etcdserver: requested lease not found")
+	ErrGRPCLeaseExist       = status.Error(codes.FailedPrecondition, "etcdserver: lease already exists")
+	ErrGRPCLeaseTTLTooLarge = status.Error(codes.OutOfRange, "etcdserver: too large lease TTL")
+
+	ErrGRPCWatchCanceled = status.Error(codes.Canceled, "etcdserver: watch canceled")
+
+	ErrGRPCMemberExist            = status.Error(codes.FailedPrecondition, "etcdserver: member ID already exist")
+	ErrGRPCPeerURLExist           = status.Error(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
+	ErrGRPCMemberNotEnoughStarted = status.Error(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members")
+	ErrGRPCMemberBadURLs          = status.Error(codes.InvalidArgument, "etcdserver: given member URLs are invalid")
+	ErrGRPCMemberNotFound         = status.Error(codes.NotFound, "etcdserver: member not found")
+	ErrGRPCMemberNotLearner       = status.Error(codes.FailedPrecondition, "etcdserver: can only promote a learner member")
+	ErrGRPCLearnerNotReady        = status.Error(codes.FailedPrecondition, "etcdserver: can only promote a learner member which is in sync with leader")
+	ErrGRPCTooManyLearners        = status.Error(codes.FailedPrecondition, "etcdserver: too many learner members in cluster")
+	ErrGRPCClusterIDMismatch      = status.Error(codes.FailedPrecondition, "etcdserver: cluster ID mismatch")
+	//revive:disable:var-naming
+	// Deprecated: Please use ErrGRPCClusterIDMismatch.
+	ErrGRPCClusterIdMismatch = ErrGRPCClusterIDMismatch
+	//revive:enable:var-naming
+
+	ErrGRPCRequestTooLarge        = status.Error(codes.InvalidArgument, "etcdserver: request is too large")
+	ErrGRPCRequestTooManyRequests = status.Error(codes.ResourceExhausted, "etcdserver: too many requests")
+
+	ErrGRPCRootUserNotExist     = status.Error(codes.FailedPrecondition, "etcdserver: root user does not exist")
+	ErrGRPCRootRoleNotExist     = status.Error(codes.FailedPrecondition, "etcdserver: root user does not have root role")
+	ErrGRPCUserAlreadyExist     = status.Error(codes.FailedPrecondition, "etcdserver: user name already exists")
+	ErrGRPCUserEmpty            = status.Error(codes.InvalidArgument, "etcdserver: user name is empty")
+	ErrGRPCUserNotFound         = status.Error(codes.FailedPrecondition, "etcdserver: user name not found")
+	ErrGRPCRoleAlreadyExist     = status.Error(codes.FailedPrecondition, "etcdserver: role name already exists")
+	ErrGRPCRoleNotFound         = status.Error(codes.FailedPrecondition, "etcdserver: role name not found")
+	ErrGRPCRoleEmpty            = status.Error(codes.InvalidArgument, "etcdserver: role name is empty")
+	ErrGRPCAuthFailed           = status.Error(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password")
+	ErrGRPCPermissionNotGiven   = status.Error(codes.InvalidArgument, "etcdserver: permission not given")
+	ErrGRPCPermissionDenied     = status.Error(codes.PermissionDenied, "etcdserver: permission denied")
+	ErrGRPCRoleNotGranted       = status.Error(codes.FailedPrecondition, "etcdserver: role is not granted to the user")
+	ErrGRPCPermissionNotGranted = status.Error(codes.FailedPrecondition, "etcdserver: permission is not granted to the role")
+	ErrGRPCAuthNotEnabled       = status.Error(codes.FailedPrecondition, "etcdserver: authentication is not enabled")
+	ErrGRPCInvalidAuthToken     = status.Error(codes.Unauthenticated, "etcdserver: invalid auth token")
+	ErrGRPCInvalidAuthMgmt      = status.Error(codes.InvalidArgument, "etcdserver: invalid auth management")
+	ErrGRPCAuthOldRevision      = status.Error(codes.InvalidArgument, "etcdserver: revision of auth store is old")
+
+	ErrGRPCNoLeader                   = status.Error(codes.Unavailable, "etcdserver: no leader")
+	ErrGRPCNotLeader                  = status.Error(codes.FailedPrecondition, "etcdserver: not leader")
+	ErrGRPCLeaderChanged              = status.Error(codes.Unavailable, "etcdserver: leader changed")
+	ErrGRPCNotCapable                 = status.Error(codes.FailedPrecondition, "etcdserver: not capable")
+	ErrGRPCStopped                    = status.Error(codes.Unavailable, "etcdserver: server stopped")
+	ErrGRPCTimeout                    = status.Error(codes.Unavailable, "etcdserver: request timed out")
+	ErrGRPCTimeoutDueToLeaderFail     = status.Error(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure")
+	ErrGRPCTimeoutDueToConnectionLost = status.Error(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost")
+	ErrGRPCTimeoutWaitAppliedIndex    = status.Error(codes.Unavailable, "etcdserver: request timed out, waiting for the applied index took too long")
+	ErrGRPCUnhealthy                  = status.Error(codes.Unavailable, "etcdserver: unhealthy cluster")
+	ErrGRPCCorrupt                    = status.Error(codes.DataLoss, "etcdserver: corrupt cluster")
+	ErrGRPCNotSupportedForLearner     = status.Error(codes.FailedPrecondition, "etcdserver: rpc not supported for learner")
+	ErrGRPCBadLeaderTransferee        = status.Error(codes.FailedPrecondition, "etcdserver: bad leader transferee")
+
+	ErrGRPCWrongDowngradeVersionFormat   = status.Error(codes.InvalidArgument, "etcdserver: wrong downgrade target version format")
+	ErrGRPCInvalidDowngradeTargetVersion = status.Error(codes.InvalidArgument, "etcdserver: invalid downgrade target version")
+	ErrGRPCClusterVersionUnavailable     = status.Error(codes.FailedPrecondition, "etcdserver: cluster version not found during downgrade")
+	ErrGRPCDowngradeInProcess            = status.Error(codes.FailedPrecondition, "etcdserver: cluster has a downgrade job in progress")
+	ErrGRPCNoInflightDowngrade           = status.Error(codes.FailedPrecondition, "etcdserver: no inflight downgrade job")
+
+	ErrGRPCCanceled         = status.Error(codes.Canceled, "etcdserver: request canceled")
+	ErrGRPCDeadlineExceeded = status.Error(codes.DeadlineExceeded, "etcdserver: context deadline exceeded")
+
+	errStringToError = map[string]error{
+		ErrorDesc(ErrGRPCEmptyKey):      ErrGRPCEmptyKey,
+		ErrorDesc(ErrGRPCKeyNotFound):   ErrGRPCKeyNotFound,
+		ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided,
+		ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided,
+
+		ErrorDesc(ErrGRPCTooManyOps):        ErrGRPCTooManyOps,
+		ErrorDesc(ErrGRPCDuplicateKey):      ErrGRPCDuplicateKey,
+		ErrorDesc(ErrGRPCInvalidSortOption): ErrGRPCInvalidSortOption,
+		ErrorDesc(ErrGRPCCompacted):         ErrGRPCCompacted,
+		ErrorDesc(ErrGRPCFutureRev):         ErrGRPCFutureRev,
+		ErrorDesc(ErrGRPCNoSpace):           ErrGRPCNoSpace,
+
+		ErrorDesc(ErrGRPCLeaseNotFound):    ErrGRPCLeaseNotFound,
+		ErrorDesc(ErrGRPCLeaseExist):       ErrGRPCLeaseExist,
+		ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge,
+
+		ErrorDesc(ErrGRPCMemberExist):            ErrGRPCMemberExist,
+		ErrorDesc(ErrGRPCPeerURLExist):           ErrGRPCPeerURLExist,
+		ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted,
+		ErrorDesc(ErrGRPCMemberBadURLs):          ErrGRPCMemberBadURLs,
+		ErrorDesc(ErrGRPCMemberNotFound):         ErrGRPCMemberNotFound,
+		ErrorDesc(ErrGRPCMemberNotLearner):       ErrGRPCMemberNotLearner,
+		ErrorDesc(ErrGRPCLearnerNotReady):        ErrGRPCLearnerNotReady,
+		ErrorDesc(ErrGRPCTooManyLearners):        ErrGRPCTooManyLearners,
+		ErrorDesc(ErrGRPCClusterIDMismatch):      ErrGRPCClusterIDMismatch,
+
+		ErrorDesc(ErrGRPCRequestTooLarge):        ErrGRPCRequestTooLarge,
+		ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
+
+		ErrorDesc(ErrGRPCRootUserNotExist):     ErrGRPCRootUserNotExist,
+		ErrorDesc(ErrGRPCRootRoleNotExist):     ErrGRPCRootRoleNotExist,
+		ErrorDesc(ErrGRPCUserAlreadyExist):     ErrGRPCUserAlreadyExist,
+		ErrorDesc(ErrGRPCUserEmpty):            ErrGRPCUserEmpty,
+		ErrorDesc(ErrGRPCUserNotFound):         ErrGRPCUserNotFound,
+		ErrorDesc(ErrGRPCRoleAlreadyExist):     ErrGRPCRoleAlreadyExist,
+		ErrorDesc(ErrGRPCRoleNotFound):         ErrGRPCRoleNotFound,
+		ErrorDesc(ErrGRPCRoleEmpty):            ErrGRPCRoleEmpty,
+		ErrorDesc(ErrGRPCAuthFailed):           ErrGRPCAuthFailed,
+		ErrorDesc(ErrGRPCPermissionDenied):     ErrGRPCPermissionDenied,
+		ErrorDesc(ErrGRPCRoleNotGranted):       ErrGRPCRoleNotGranted,
+		ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
+		ErrorDesc(ErrGRPCAuthNotEnabled):       ErrGRPCAuthNotEnabled,
+		ErrorDesc(ErrGRPCInvalidAuthToken):     ErrGRPCInvalidAuthToken,
+		ErrorDesc(ErrGRPCInvalidAuthMgmt):      ErrGRPCInvalidAuthMgmt,
+		ErrorDesc(ErrGRPCAuthOldRevision):      ErrGRPCAuthOldRevision,
+
+		ErrorDesc(ErrGRPCNoLeader):                   ErrGRPCNoLeader,
+		ErrorDesc(ErrGRPCNotLeader):                  ErrGRPCNotLeader,
+		ErrorDesc(ErrGRPCLeaderChanged):              ErrGRPCLeaderChanged,
+		ErrorDesc(ErrGRPCNotCapable):                 ErrGRPCNotCapable,
+		ErrorDesc(ErrGRPCStopped):                    ErrGRPCStopped,
+		ErrorDesc(ErrGRPCTimeout):                    ErrGRPCTimeout,
+		ErrorDesc(ErrGRPCTimeoutDueToLeaderFail):     ErrGRPCTimeoutDueToLeaderFail,
+		ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost,
+		ErrorDesc(ErrGRPCUnhealthy):                  ErrGRPCUnhealthy,
+		ErrorDesc(ErrGRPCCorrupt):                    ErrGRPCCorrupt,
+		ErrorDesc(ErrGRPCNotSupportedForLearner):     ErrGRPCNotSupportedForLearner,
+		ErrorDesc(ErrGRPCBadLeaderTransferee):        ErrGRPCBadLeaderTransferee,
+
+		ErrorDesc(ErrGRPCClusterVersionUnavailable):     ErrGRPCClusterVersionUnavailable,
+		ErrorDesc(ErrGRPCWrongDowngradeVersionFormat):   ErrGRPCWrongDowngradeVersionFormat,
+		ErrorDesc(ErrGRPCInvalidDowngradeTargetVersion): ErrGRPCInvalidDowngradeTargetVersion,
+		ErrorDesc(ErrGRPCDowngradeInProcess):            ErrGRPCDowngradeInProcess,
+		ErrorDesc(ErrGRPCNoInflightDowngrade):           ErrGRPCNoInflightDowngrade,
+	}
+)
+
+// client-side error
+var (
+	ErrEmptyKey          = Error(ErrGRPCEmptyKey)
+	ErrKeyNotFound       = Error(ErrGRPCKeyNotFound)
+	ErrValueProvided     = Error(ErrGRPCValueProvided)
+	ErrLeaseProvided     = Error(ErrGRPCLeaseProvided)
+	ErrTooManyOps        = Error(ErrGRPCTooManyOps)
+	ErrDuplicateKey      = Error(ErrGRPCDuplicateKey)
+	ErrInvalidSortOption = Error(ErrGRPCInvalidSortOption)
+	ErrCompacted         = Error(ErrGRPCCompacted)
+	ErrFutureRev         = Error(ErrGRPCFutureRev)
+	ErrNoSpace           = Error(ErrGRPCNoSpace)
+
+	ErrLeaseNotFound    = Error(ErrGRPCLeaseNotFound)
+	ErrLeaseExist       = Error(ErrGRPCLeaseExist)
+	ErrLeaseTTLTooLarge = Error(ErrGRPCLeaseTTLTooLarge)
+
+	ErrMemberExist            = Error(ErrGRPCMemberExist)
+	ErrPeerURLExist           = Error(ErrGRPCPeerURLExist)
+	ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted)
+	ErrMemberBadURLs          = Error(ErrGRPCMemberBadURLs)
+	ErrMemberNotFound         = Error(ErrGRPCMemberNotFound)
+	ErrMemberNotLearner       = Error(ErrGRPCMemberNotLearner)
+	ErrMemberLearnerNotReady  = Error(ErrGRPCLearnerNotReady)
+	ErrTooManyLearners        = Error(ErrGRPCTooManyLearners)
+
+	ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
+	ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests)
+
+	ErrRootUserNotExist     = Error(ErrGRPCRootUserNotExist)
+	ErrRootRoleNotExist     = Error(ErrGRPCRootRoleNotExist)
+	ErrUserAlreadyExist     = Error(ErrGRPCUserAlreadyExist)
+	ErrUserEmpty            = Error(ErrGRPCUserEmpty)
+	ErrUserNotFound         = Error(ErrGRPCUserNotFound)
+	ErrRoleAlreadyExist     = Error(ErrGRPCRoleAlreadyExist)
+	ErrRoleNotFound         = Error(ErrGRPCRoleNotFound)
+	ErrRoleEmpty            = Error(ErrGRPCRoleEmpty)
+	ErrAuthFailed           = Error(ErrGRPCAuthFailed)
+	ErrPermissionDenied     = Error(ErrGRPCPermissionDenied)
+	ErrRoleNotGranted       = Error(ErrGRPCRoleNotGranted)
+	ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
+	ErrAuthNotEnabled       = Error(ErrGRPCAuthNotEnabled)
+	ErrInvalidAuthToken     = Error(ErrGRPCInvalidAuthToken)
+	ErrAuthOldRevision      = Error(ErrGRPCAuthOldRevision)
+	ErrInvalidAuthMgmt      = Error(ErrGRPCInvalidAuthMgmt)
+	ErrClusterIDMismatch    = Error(ErrGRPCClusterIDMismatch)
+	//revive:disable:var-naming
+	// Deprecated: Please use ErrClusterIDMismatch.
+	ErrClusterIdMismatch = ErrClusterIDMismatch
+	//revive:enable:var-naming
+
+	ErrNoLeader                   = Error(ErrGRPCNoLeader)
+	ErrNotLeader                  = Error(ErrGRPCNotLeader)
+	ErrLeaderChanged              = Error(ErrGRPCLeaderChanged)
+	ErrNotCapable                 = Error(ErrGRPCNotCapable)
+	ErrStopped                    = Error(ErrGRPCStopped)
+	ErrTimeout                    = Error(ErrGRPCTimeout)
+	ErrTimeoutDueToLeaderFail     = Error(ErrGRPCTimeoutDueToLeaderFail)
+	ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost)
+	ErrTimeoutWaitAppliedIndex    = Error(ErrGRPCTimeoutWaitAppliedIndex)
+	ErrUnhealthy                  = Error(ErrGRPCUnhealthy)
+	ErrCorrupt                    = Error(ErrGRPCCorrupt)
+	ErrBadLeaderTransferee        = Error(ErrGRPCBadLeaderTransferee)
+
+	ErrClusterVersionUnavailable     = Error(ErrGRPCClusterVersionUnavailable)
+	ErrWrongDowngradeVersionFormat   = Error(ErrGRPCWrongDowngradeVersionFormat)
+	ErrInvalidDowngradeTargetVersion = Error(ErrGRPCInvalidDowngradeTargetVersion)
+	ErrDowngradeInProcess            = Error(ErrGRPCDowngradeInProcess)
+	ErrNoInflightDowngrade           = Error(ErrGRPCNoInflightDowngrade)
+)
+
+// EtcdError defines gRPC server errors.
+// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323)
+type EtcdError struct {
+	code codes.Code
+	desc string
+}
+
+// Code returns grpc/codes.Code.
+// TODO: define clientv3/codes.Code.
+func (e EtcdError) Code() codes.Code {
+	return e.code
+}
+
+func (e EtcdError) Error() string {
+	return e.desc
+}
+
+func Error(err error) error {
+	if err == nil {
+		return nil
+	}
+	verr, ok := errStringToError[ErrorDesc(err)]
+	if !ok { // not gRPC error
+		return err
+	}
+	ev, ok := status.FromError(verr)
+	var desc string
+	if ok {
+		desc = ev.Message()
+	} else {
+		desc = verr.Error()
+	}
+	return EtcdError{code: ev.Code(), desc: desc}
+}
+
+func ErrorDesc(err error) string {
+	if s, ok := status.FromError(err); ok {
+		return s.Message()
+	}
+	return err.Error()
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/md.go
similarity index 100%
rename from vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go
rename to vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/md.go
diff --git a/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/metadatafields.go b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/metadatafields.go
new file mode 100644
index 0000000..e5770af
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/metadatafields.go
@@ -0,0 +1,23 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpctypes
+
+var (
+	TokenFieldNameGRPC    = "token"
+	TokenFieldNameSwagger = "authorization"
+)
+
+// TokenFieldNameGRPCKey is used as a key of context to store token.
+type TokenFieldNameGRPCKey struct{}
diff --git a/vendor/go.etcd.io/etcd/api/v3/version/version.go b/vendor/go.etcd.io/etcd/api/v3/version/version.go
new file mode 100644
index 0000000..9e7bc64
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/version/version.go
@@ -0,0 +1,85 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package version implements etcd version parsing and contains latest version
+// information.
+package version
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/coreos/go-semver/semver"
+)
+
+var (
+	// MinClusterVersion is the min cluster version this etcd binary is compatible with.
+	MinClusterVersion = "3.0.0"
+	Version           = "3.6.5"
+	APIVersion        = "unknown"
+
+	// Git SHA Value will be set during build
+	GitSHA = "Not provided (use ./build instead of go build)"
+)
+
+// Get all constant versions defined in a centralized place.
+var (
+	V3_0 = semver.Version{Major: 3, Minor: 0}
+	V3_1 = semver.Version{Major: 3, Minor: 1}
+	V3_2 = semver.Version{Major: 3, Minor: 2}
+	V3_3 = semver.Version{Major: 3, Minor: 3}
+	V3_4 = semver.Version{Major: 3, Minor: 4}
+	V3_5 = semver.Version{Major: 3, Minor: 5}
+	V3_6 = semver.Version{Major: 3, Minor: 6}
+	V3_7 = semver.Version{Major: 3, Minor: 7}
+	V4_0 = semver.Version{Major: 4, Minor: 0}
+
+	// AllVersions keeps all the versions in ascending order.
+	AllVersions = []semver.Version{V3_0, V3_1, V3_2, V3_3, V3_4, V3_5, V3_6, V3_7, V4_0}
+)
+
+func init() {
+	ver, err := semver.NewVersion(Version)
+	if err == nil {
+		APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor)
+	}
+}
+
+type Versions struct {
+	Server  string `json:"etcdserver"`
+	Cluster string `json:"etcdcluster"`
+	Storage string `json:"storage"`
+	// TODO: raft state machine version
+}
+
+// Cluster only keeps the major.minor.
+func Cluster(v string) string {
+	vs := strings.Split(v, ".")
+	if len(vs) <= 2 {
+		return v
+	}
+	return fmt.Sprintf("%s.%s", vs[0], vs[1])
+}
+
+func Compare(ver1, ver2 semver.Version) int {
+	return ver1.Compare(ver2)
+}
+
+func LessThan(ver1, ver2 semver.Version) bool {
+	return ver1.LessThan(ver2)
+}
+
+func Equal(ver1, ver2 semver.Version) bool {
+	return ver1.Equal(ver2)
+}
diff --git a/vendor/go.etcd.io/etcd/api/v3/versionpb/version.pb.go b/vendor/go.etcd.io/etcd/api/v3/versionpb/version.pb.go
new file mode 100644
index 0000000..71c74eb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/versionpb/version.pb.go
@@ -0,0 +1,91 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: version.proto
+
+package versionpb
+
+import (
+	fmt "fmt"
+	math "math"
+
+	_ "github.com/gogo/protobuf/gogoproto"
+	descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+	proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+var E_EtcdVersionMsg = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*string)(nil),
+	Field:         50000,
+	Name:          "versionpb.etcd_version_msg",
+	Tag:           "bytes,50000,opt,name=etcd_version_msg",
+	Filename:      "version.proto",
+}
+
+var E_EtcdVersionField = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*string)(nil),
+	Field:         50001,
+	Name:          "versionpb.etcd_version_field",
+	Tag:           "bytes,50001,opt,name=etcd_version_field",
+	Filename:      "version.proto",
+}
+
+var E_EtcdVersionEnum = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.EnumOptions)(nil),
+	ExtensionType: (*string)(nil),
+	Field:         50002,
+	Name:          "versionpb.etcd_version_enum",
+	Tag:           "bytes,50002,opt,name=etcd_version_enum",
+	Filename:      "version.proto",
+}
+
+var E_EtcdVersionEnumValue = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.EnumValueOptions)(nil),
+	ExtensionType: (*string)(nil),
+	Field:         50003,
+	Name:          "versionpb.etcd_version_enum_value",
+	Tag:           "bytes,50003,opt,name=etcd_version_enum_value",
+	Filename:      "version.proto",
+}
+
+func init() {
+	proto.RegisterExtension(E_EtcdVersionMsg)
+	proto.RegisterExtension(E_EtcdVersionField)
+	proto.RegisterExtension(E_EtcdVersionEnum)
+	proto.RegisterExtension(E_EtcdVersionEnumValue)
+}
+
+func init() { proto.RegisterFile("version.proto", fileDescriptor_7d2c07d79758f814) }
+
+var fileDescriptor_7d2c07d79758f814 = []byte{
+	// 284 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xd1, 0xc1, 0x4a, 0xc3, 0x30,
+	0x18, 0x07, 0x70, 0x83, 0x20, 0x2c, 0xa0, 0xce, 0x30, 0x50, 0x86, 0xd6, 0x7a, 0xf3, 0x94, 0x80,
+	0xbb, 0xed, 0x28, 0xe8, 0xad, 0x2a, 0x1e, 0x76, 0x10, 0xa4, 0xb4, 0x6b, 0x16, 0x02, 0x6d, 0xbf,
+	0xd0, 0xb4, 0x7d, 0x04, 0xd9, 0x23, 0xf8, 0x48, 0x1e, 0xa7, 0xbe, 0x80, 0xd4, 0x17, 0x91, 0xa4,
+	0xa9, 0xac, 0xd6, 0x53, 0xfb, 0x7d, 0xdf, 0xff, 0xff, 0xeb, 0xa1, 0x78, 0xbf, 0xe6, 0x85, 0x96,
+	0x90, 0x53, 0x55, 0x40, 0x09, 0x64, 0xe4, 0x46, 0x15, 0x4f, 0x27, 0x02, 0x04, 0xd8, 0x2d, 0x33,
+	0x6f, 0x6d, 0x60, 0xea, 0x0b, 0x00, 0x91, 0x72, 0x66, 0xa7, 0xb8, 0x5a, 0xb1, 0x84, 0xeb, 0x65,
+	0x21, 0x55, 0x09, 0x45, 0x9b, 0x98, 0xdf, 0xe1, 0x31, 0x2f, 0x97, 0x49, 0xe8, 0xa4, 0x30, 0xd3,
+	0x82, 0x9c, 0xd3, 0xb6, 0x46, 0xbb, 0x1a, 0x0d, 0xb8, 0xd6, 0x91, 0xe0, 0xf7, 0xaa, 0x94, 0x90,
+	0xeb, 0x93, 0xcd, 0xcb, 0xae, 0x8f, 0x2e, 0x47, 0x8f, 0x07, 0xa6, 0xba, 0x68, 0x9b, 0x81, 0x16,
+	0x6b, 0x84, 0xe6, 0x0f, 0x98, 0xf4, 0xbc, 0x95, 0xe4, 0x69, 0x42, 0xce, 0x06, 0xe2, 0xad, 0xd9,
+	0x77, 0xde, 0xbb, 0xf3, 0xc6, 0x5b, 0x9e, 0x0d, 0x18, 0x31, 0xc0, 0x47, 0x3d, 0x91, 0xe7, 0x55,
+	0x46, 0x4e, 0x07, 0xe0, 0x4d, 0x5e, 0x65, 0x9d, 0xf7, 0xe1, 0xbc, 0xc3, 0x2d, 0xcf, 0xdc, 0x0d,
+	0xf7, 0x8c, 0x8f, 0x07, 0x5c, 0x58, 0x47, 0x69, 0xc5, 0xc9, 0xc5, 0xbf, 0xe8, 0xc2, 0xdc, 0x3a,
+	0xf9, 0xd3, 0xc9, 0x93, 0x3f, 0xb2, 0x0d, 0xad, 0x11, 0xba, 0xbe, 0x7a, 0x6b, 0x3c, 0xb4, 0x69,
+	0x3c, 0xf4, 0xd5, 0x78, 0xe8, 0xf5, 0xdb, 0xdb, 0x79, 0xf2, 0x05, 0x50, 0x93, 0xa6, 0x12, 0x98,
+	0x79, 0xb2, 0x48, 0x49, 0x56, 0xcf, 0xd8, 0xef, 0xbf, 0x8b, 0xf7, 0xec, 0xf7, 0x66, 0x3f, 0x01,
+	0x00, 0x00, 0xff, 0xff, 0xe8, 0x02, 0x15, 0xc0, 0xde, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/go.etcd.io/etcd/api/v3/versionpb/version.proto b/vendor/go.etcd.io/etcd/api/v3/versionpb/version.proto
new file mode 100644
index 0000000..c81b2f5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/versionpb/version.proto
@@ -0,0 +1,30 @@
+syntax = "proto3";
+package versionpb;
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/descriptor.proto";
+
+option go_package = "go.etcd.io/etcd/api/v3/versionpb";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+// Indicates etcd version that introduced the message, used to determine minimal etcd version required to interpret wal that includes this message.
+extend google.protobuf.MessageOptions {
+  optional string etcd_version_msg = 50000;
+}
+
+// Indicates etcd version that introduced the field, used to determine minimal etcd version required to interpret wal that sets this field.
+extend google.protobuf.FieldOptions {
+  optional string etcd_version_field = 50001;
+}
+
+// Indicates etcd version that introduced the enum, used to determine minimal etcd version required to interpret wal that uses this enum.
+extend google.protobuf.EnumOptions {
+  optional string etcd_version_enum = 50002;
+}
+
+// Indicates etcd version that introduced the enum value, used to determine minimal etcd version required to interpret wal that sets this enum value.
+extend google.protobuf.EnumValueOptions {
+  optional string etcd_version_enum_value = 50003;
+}
diff --git a/vendor/go.etcd.io/etcd/LICENSE b/vendor/go.etcd.io/etcd/client/pkg/v3/LICENSE
similarity index 100%
copy from vendor/go.etcd.io/etcd/LICENSE
copy to vendor/go.etcd.io/etcd/client/pkg/v3/LICENSE
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_unix.go
new file mode 100644
index 0000000..42221f4
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_unix.go
@@ -0,0 +1,27 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+
+package fileutil
+
+import "os"
+
+const (
+	// PrivateDirMode grants owner to make/remove files inside the directory.
+	PrivateDirMode = 0o700
+)
+
+// OpenDir opens a directory for syncing.
+func OpenDir(path string) (*os.File, error) { return os.Open(path) }
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_windows.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_windows.go
new file mode 100644
index 0000000..0cb2280
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_windows.go
@@ -0,0 +1,51 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package fileutil
+
+import (
+	"os"
+	"syscall"
+)
+
+const (
+	// PrivateDirMode grants owner to make/remove files inside the directory.
+	PrivateDirMode = 0o777
+)
+
+// OpenDir opens a directory in windows with write access for syncing.
+func OpenDir(path string) (*os.File, error) {
+	fd, err := openDir(path)
+	if err != nil {
+		return nil, err
+	}
+	return os.NewFile(uintptr(fd), path), nil
+}
+
+func openDir(path string) (fd syscall.Handle, err error) {
+	if len(path) == 0 {
+		return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
+	}
+	pathp, err := syscall.UTF16PtrFromString(path)
+	if err != nil {
+		return syscall.InvalidHandle, err
+	}
+	access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE)
+	sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE)
+	createmode := uint32(syscall.OPEN_EXISTING)
+	fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
+	return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0)
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/doc.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/doc.go
new file mode 100644
index 0000000..69dde5a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package fileutil implements utility functions related to files and paths.
+package fileutil
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/filereader.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/filereader.go
new file mode 100644
index 0000000..5524888
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/filereader.go
@@ -0,0 +1,60 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+	"bufio"
+	"io"
+	"io/fs"
+	"os"
+)
+
+// FileReader is a wrapper of io.Reader. It also provides file info.
+type FileReader interface {
+	io.Reader
+	FileInfo() (fs.FileInfo, error)
+}
+
+type fileReader struct {
+	*os.File
+}
+
+func NewFileReader(f *os.File) FileReader {
+	return &fileReader{f}
+}
+
+func (fr *fileReader) FileInfo() (fs.FileInfo, error) {
+	return fr.Stat()
+}
+
+// FileBufReader is a wrapper of bufio.Reader. It also provides file info.
+type FileBufReader struct {
+	*bufio.Reader
+	fi fs.FileInfo
+}
+
+func NewFileBufReader(fr FileReader) *FileBufReader {
+	bufReader := bufio.NewReader(fr)
+	fi, err := fr.FileInfo()
+	if err != nil {
+		// This should never happen.
+		panic(err)
+	}
+	return &FileBufReader{bufReader, fi}
+}
+
+func (fbr *FileBufReader) FileInfo() fs.FileInfo {
+	return fbr.fi
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go
new file mode 100644
index 0000000..36394a3
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go
@@ -0,0 +1,183 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+	"fmt"
+	"io"
+	"io/fs"
+	"os"
+	"path/filepath"
+
+	"go.uber.org/zap"
+
+	"go.etcd.io/etcd/client/pkg/v3/verify"
+)
+
+const (
+	// PrivateFileMode grants owner to read/write a file.
+	PrivateFileMode = 0o600
+)
+
+// IsDirWriteable checks if dir is writable by writing and removing a file
+// to dir. It returns nil if dir is writable.
+func IsDirWriteable(dir string) error {
+	f, err := filepath.Abs(filepath.Join(dir, ".touch"))
+	if err != nil {
+		return err
+	}
+	if err := os.WriteFile(f, []byte(""), PrivateFileMode); err != nil {
+		return err
+	}
+	return os.Remove(f)
+}
+
+// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory
+// does not exists. TouchDirAll also ensures the given directory is writable.
+func TouchDirAll(lg *zap.Logger, dir string) error {
+	verify.Assert(lg != nil, "nil log isn't allowed")
+	// If path is already a directory, MkdirAll does nothing and returns nil, so,
+	// first check if dir exists with an expected permission mode.
+	if Exist(dir) {
+		err := CheckDirPermission(dir, PrivateDirMode)
+		if err != nil {
+			lg.Warn("check file permission", zap.Error(err))
+		}
+	} else {
+		err := os.MkdirAll(dir, PrivateDirMode)
+		if err != nil {
+			// if mkdirAll("a/text") and "text" is not
+			// a directory, this will return syscall.ENOTDIR
+			return err
+		}
+	}
+
+	return IsDirWriteable(dir)
+}
+
+// CreateDirAll is similar to TouchDirAll but returns error
+// if the deepest directory was not empty.
+func CreateDirAll(lg *zap.Logger, dir string) error {
+	err := TouchDirAll(lg, dir)
+	if err == nil {
+		var ns []string
+		ns, err = ReadDir(dir)
+		if err != nil {
+			return err
+		}
+		if len(ns) != 0 {
+			err = fmt.Errorf("expected %q to be empty, got %q", dir, ns)
+		}
+	}
+	return err
+}
+
+// Exist returns true if a file or directory exists.
+func Exist(name string) bool {
+	_, err := os.Stat(name)
+	return err == nil
+}
+
+// DirEmpty returns true if a directory empty and can access.
+func DirEmpty(name string) bool {
+	ns, err := ReadDir(name)
+	return len(ns) == 0 && err == nil
+}
+
+// ZeroToEnd zeros a file starting from SEEK_CUR to its SEEK_END. May temporarily
+// shorten the length of the file.
+func ZeroToEnd(f *os.File) error {
+	// TODO: support FALLOC_FL_ZERO_RANGE
+	off, err := f.Seek(0, io.SeekCurrent)
+	if err != nil {
+		return err
+	}
+	lenf, lerr := f.Seek(0, io.SeekEnd)
+	if lerr != nil {
+		return lerr
+	}
+	if err = f.Truncate(off); err != nil {
+		return err
+	}
+	// make sure blocks remain allocated
+	if err = Preallocate(f, lenf, true); err != nil {
+		return err
+	}
+	_, err = f.Seek(off, io.SeekStart)
+	return err
+}
+
+// CheckDirPermission checks permission on an existing dir.
+// Returns error if dir is empty or exist with a different permission than specified.
+func CheckDirPermission(dir string, perm os.FileMode) error {
+	if !Exist(dir) {
+		return fmt.Errorf("directory %q empty, cannot check permission", dir)
+	}
+	// check the existing permission on the directory
+	dirInfo, err := os.Stat(dir)
+	if err != nil {
+		return err
+	}
+	dirMode := dirInfo.Mode().Perm()
+	if dirMode != perm {
+		err = fmt.Errorf("directory %q exist, but the permission is %q. The recommended permission is %q to prevent possible unprivileged access to the data", dir, dirInfo.Mode(), os.FileMode(PrivateDirMode))
+		return err
+	}
+	return nil
+}
+
+// RemoveMatchFile deletes file if matchFunc is true on an existing dir
+// Returns error if the dir does not exist or remove file fail
+func RemoveMatchFile(lg *zap.Logger, dir string, matchFunc func(fileName string) bool) error {
+	if lg == nil {
+		lg = zap.NewNop()
+	}
+	if !Exist(dir) {
+		return fmt.Errorf("directory %s does not exist", dir)
+	}
+	fileNames, err := ReadDir(dir)
+	if err != nil {
+		return err
+	}
+	var removeFailedFiles []string
+	for _, fileName := range fileNames {
+		if matchFunc(fileName) {
+			file := filepath.Join(dir, fileName)
+			if err = os.Remove(file); err != nil {
+				removeFailedFiles = append(removeFailedFiles, fileName)
+				lg.Error("remove file failed",
+					zap.String("file", file),
+					zap.Error(err))
+			}
+		}
+	}
+	if len(removeFailedFiles) != 0 {
+		return fmt.Errorf("remove file(s) %v error", removeFailedFiles)
+	}
+	return nil
+}
+
+// ListFiles lists files if matchFunc is true on an existing dir
+// Returns error if the dir does not exist
+func ListFiles(dir string, matchFunc func(fileName string) bool) ([]string, error) {
+	var files []string
+	err := filepath.Walk(dir, func(path string, info fs.FileInfo, err error) error {
+		if matchFunc(path) {
+			files = append(files, path)
+		}
+		return nil
+	})
+	return files, err
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock.go
new file mode 100644
index 0000000..dd2fa54
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock.go
@@ -0,0 +1,24 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+	"errors"
+	"os"
+)
+
+var ErrLocked = errors.New("fileutil: file already locked")
+
+type LockedFile struct{ *os.File }
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_flock.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_flock.go
new file mode 100644
index 0000000..178c987
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_flock.go
@@ -0,0 +1,50 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows && !plan9 && !solaris
+
+package fileutil
+
+import (
+	"errors"
+	"os"
+	"syscall"
+)
+
+func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	f, err := os.OpenFile(path, flag, perm)
+	if err != nil {
+		return nil, err
+	}
+	if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil {
+		f.Close()
+		if errors.Is(err, syscall.EWOULDBLOCK) {
+			err = ErrLocked
+		}
+		return nil, err
+	}
+	return &LockedFile{f}, nil
+}
+
+func flockLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	f, err := os.OpenFile(path, flag, perm)
+	if err != nil {
+		return nil, err
+	}
+	if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX); err != nil {
+		f.Close()
+		return nil, err
+	}
+	return &LockedFile{f}, err
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_linux.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_linux.go
new file mode 100644
index 0000000..609ac39
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_linux.go
@@ -0,0 +1,93 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+
+package fileutil
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"syscall"
+
+	"golang.org/x/sys/unix"
+)
+
+// This used to call syscall.Flock() but that call fails with EBADF on NFS.
+// An alternative is lockf() which works on NFS but that call lets a process lock
+// the same file twice. Instead, use Linux's non-standard open file descriptor
+// locks which will block if the process already holds the file lock.
+
+var (
+	wrlck = syscall.Flock_t{
+		Type:   syscall.F_WRLCK,
+		Whence: int16(io.SeekStart),
+		Start:  0,
+		Len:    0,
+	}
+
+	linuxTryLockFile = flockTryLockFile
+	linuxLockFile    = flockLockFile
+)
+
+func init() {
+	// use open file descriptor locks if the system supports it
+	getlk := syscall.Flock_t{Type: syscall.F_RDLCK}
+	if err := syscall.FcntlFlock(0, unix.F_OFD_GETLK, &getlk); err == nil {
+		linuxTryLockFile = ofdTryLockFile
+		linuxLockFile = ofdLockFile
+	}
+}
+
+func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	return linuxTryLockFile(path, flag, perm)
+}
+
+func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	f, err := os.OpenFile(path, flag, perm)
+	if err != nil {
+		return nil, fmt.Errorf("ofdTryLockFile failed to open %q (%w)", path, err)
+	}
+
+	flock := wrlck
+	if err = syscall.FcntlFlock(f.Fd(), unix.F_OFD_SETLK, &flock); err != nil {
+		f.Close()
+		if errors.Is(err, syscall.EWOULDBLOCK) {
+			err = ErrLocked
+		}
+		return nil, err
+	}
+	return &LockedFile{f}, nil
+}
+
+func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	return linuxLockFile(path, flag, perm)
+}
+
+func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	f, err := os.OpenFile(path, flag, perm)
+	if err != nil {
+		return nil, fmt.Errorf("ofdLockFile failed to open %q (%w)", path, err)
+	}
+
+	flock := wrlck
+	err = syscall.FcntlFlock(f.Fd(), unix.F_OFD_SETLKW, &flock)
+	if err != nil {
+		f.Close()
+		return nil, err
+	}
+	return &LockedFile{f}, nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_plan9.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_plan9.go
new file mode 100644
index 0000000..fee6a7c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_plan9.go
@@ -0,0 +1,45 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+	"os"
+	"syscall"
+	"time"
+)
+
+func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil {
+		return nil, err
+	}
+	f, err := os.Open(path, flag, perm)
+	if err != nil {
+		return nil, ErrLocked
+	}
+	return &LockedFile{f}, nil
+}
+
+func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	if err := os.Chmod(path, syscall.DMEXCL|PrivateFileMode); err != nil {
+		return nil, err
+	}
+	for {
+		f, err := os.OpenFile(path, flag, perm)
+		if err == nil {
+			return &LockedFile{f}, nil
+		}
+		time.Sleep(10 * time.Millisecond)
+	}
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_solaris.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_solaris.go
new file mode 100644
index 0000000..2e892fe
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_solaris.go
@@ -0,0 +1,62 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build solaris
+
+package fileutil
+
+import (
+	"os"
+	"syscall"
+)
+
+func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	var lock syscall.Flock_t
+	lock.Start = 0
+	lock.Len = 0
+	lock.Pid = 0
+	lock.Type = syscall.F_WRLCK
+	lock.Whence = 0
+	lock.Pid = 0
+	f, err := os.OpenFile(path, flag, perm)
+	if err != nil {
+		return nil, err
+	}
+	if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock); err != nil {
+		f.Close()
+		if err == syscall.EAGAIN {
+			err = ErrLocked
+		}
+		return nil, err
+	}
+	return &LockedFile{f}, nil
+}
+
+func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	var lock syscall.Flock_t
+	lock.Start = 0
+	lock.Len = 0
+	lock.Pid = 0
+	lock.Type = syscall.F_WRLCK
+	lock.Whence = 0
+	f, err := os.OpenFile(path, flag, perm)
+	if err != nil {
+		return nil, err
+	}
+	if err = syscall.FcntlFlock(f.Fd(), syscall.F_SETLKW, &lock); err != nil {
+		f.Close()
+		return nil, err
+	}
+	return &LockedFile{f}, nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_unix.go
new file mode 100644
index 0000000..05db536
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_unix.go
@@ -0,0 +1,29 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows && !plan9 && !solaris && !linux
+
+package fileutil
+
+import (
+	"os"
+)
+
+func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	return flockTryLockFile(path, flag, perm)
+}
+
+func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	return flockLockFile(path, flag, perm)
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_windows.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_windows.go
new file mode 100644
index 0000000..51010bd
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_windows.go
@@ -0,0 +1,97 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package fileutil
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"syscall"
+
+	"golang.org/x/sys/windows"
+)
+
+var errLocked = errors.New("the process cannot access the file because another process has locked a portion of the file")
+
+func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	f, err := open(path, flag, perm)
+	if err != nil {
+		return nil, err
+	}
+	if err := lockFile(windows.Handle(f.Fd()), windows.LOCKFILE_FAIL_IMMEDIATELY); err != nil {
+		f.Close()
+		return nil, err
+	}
+	return &LockedFile{f}, nil
+}
+
+func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+	f, err := open(path, flag, perm)
+	if err != nil {
+		return nil, err
+	}
+	if err := lockFile(windows.Handle(f.Fd()), 0); err != nil {
+		f.Close()
+		return nil, err
+	}
+	return &LockedFile{f}, nil
+}
+
+func open(path string, flag int, perm os.FileMode) (*os.File, error) {
+	if path == "" {
+		return nil, errors.New("cannot open empty filename")
+	}
+	var access uint32
+	switch flag {
+	case syscall.O_RDONLY:
+		access = syscall.GENERIC_READ
+	case syscall.O_WRONLY:
+		access = syscall.GENERIC_WRITE
+	case syscall.O_RDWR:
+		access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
+	case syscall.O_WRONLY | syscall.O_CREAT:
+		access = syscall.GENERIC_ALL
+	default:
+		panic(fmt.Errorf("flag %v is not supported", flag))
+	}
+	fd, err := syscall.CreateFile(&(syscall.StringToUTF16(path)[0]),
+		access,
+		syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
+		nil,
+		syscall.OPEN_ALWAYS,
+		syscall.FILE_ATTRIBUTE_NORMAL,
+		0)
+	if err != nil {
+		return nil, err
+	}
+	return os.NewFile(uintptr(fd), path), nil
+}
+
+func lockFile(fd windows.Handle, flags uint32) error {
+	if fd == windows.InvalidHandle {
+		return nil
+	}
+	err := windows.LockFileEx(fd, flags|windows.LOCKFILE_EXCLUSIVE_LOCK, 0, 1, 0, &windows.Overlapped{})
+	if err == nil {
+		return nil
+	} else if err.Error() == errLocked.Error() {
+		return ErrLocked
+	} else if err != windows.ERROR_LOCK_VIOLATION {
+		return err
+	}
+	return nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate.go
new file mode 100644
index 0000000..aadbff7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate.go
@@ -0,0 +1,54 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+	"io"
+	"os"
+)
+
+// Preallocate tries to allocate the space for given file. This
+// operation is only supported on darwin and linux by a few
+// filesystems (APFS, btrfs, ext4, etc.).
+// If the operation is unsupported, no error will be returned.
+// Otherwise, the error encountered will be returned.
+func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error {
+	if sizeInBytes == 0 {
+		// fallocate will return EINVAL if length is 0; skip
+		return nil
+	}
+	if extendFile {
+		return preallocExtend(f, sizeInBytes)
+	}
+	return preallocFixed(f, sizeInBytes)
+}
+
+func preallocExtendTrunc(f *os.File, sizeInBytes int64) error {
+	curOff, err := f.Seek(0, io.SeekCurrent)
+	if err != nil {
+		return err
+	}
+	size, err := f.Seek(sizeInBytes, io.SeekEnd)
+	if err != nil {
+		return err
+	}
+	if _, err = f.Seek(curOff, io.SeekStart); err != nil {
+		return err
+	}
+	if sizeInBytes > size {
+		return nil
+	}
+	return f.Truncate(sizeInBytes)
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_darwin.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_darwin.go
new file mode 100644
index 0000000..72430ec
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_darwin.go
@@ -0,0 +1,67 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin
+
+package fileutil
+
+import (
+	"errors"
+	"os"
+	"syscall"
+
+	"golang.org/x/sys/unix"
+)
+
+func preallocExtend(f *os.File, sizeInBytes int64) error {
+	if err := preallocFixed(f, sizeInBytes); err != nil {
+		return err
+	}
+	return preallocExtendTrunc(f, sizeInBytes)
+}
+
+func preallocFixed(f *os.File, sizeInBytes int64) error {
+	// allocate all requested space or no space at all
+	// TODO: allocate contiguous space on disk with F_ALLOCATECONTIG flag
+	fstore := &unix.Fstore_t{
+		Flags:   unix.F_ALLOCATEALL,
+		Posmode: unix.F_PEOFPOSMODE,
+		Length:  sizeInBytes,
+	}
+	err := unix.FcntlFstore(f.Fd(), unix.F_PREALLOCATE, fstore)
+	if err == nil || errors.Is(err, unix.ENOTSUP) {
+		return nil
+	}
+
+	// wrong argument to fallocate syscall
+	if err == unix.EINVAL {
+		// filesystem "st_blocks" are allocated in the units of
+		// "Allocation Block Size" (run "diskutil info /" command)
+		var stat syscall.Stat_t
+		syscall.Fstat(int(f.Fd()), &stat)
+
+		// syscall.Statfs_t.Bsize is "optimal transfer block size"
+		// and contains matching 4096 value when latest OS X kernel
+		// supports 4,096 KB filesystem block size
+		var statfs syscall.Statfs_t
+		syscall.Fstatfs(int(f.Fd()), &statfs)
+		blockSize := int64(statfs.Bsize)
+
+		if stat.Blocks*blockSize >= sizeInBytes {
+			// enough blocks are already allocated
+			return nil
+		}
+	}
+	return err
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unix.go
new file mode 100644
index 0000000..b0a8166
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unix.go
@@ -0,0 +1,50 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+
+package fileutil
+
+import (
+	"errors"
+	"os"
+	"syscall"
+)
+
+func preallocExtend(f *os.File, sizeInBytes int64) error {
+	// use mode = 0 to change size
+	err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes)
+	if err != nil {
+		var errno syscall.Errno
+		// not supported; fallback
+		// fallocate EINTRs frequently in some environments; fallback
+		if errors.As(err, &errno) && (errno == syscall.ENOTSUP || errno == syscall.EINTR) {
+			return preallocExtendTrunc(f, sizeInBytes)
+		}
+	}
+	return err
+}
+
+func preallocFixed(f *os.File, sizeInBytes int64) error {
+	// use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE
+	err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes)
+	if err != nil {
+		var errno syscall.Errno
+		// treat not supported as nil error
+		if errors.As(err, &errno) && errno == syscall.ENOTSUP {
+			return nil
+		}
+	}
+	return err
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unsupported.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unsupported.go
new file mode 100644
index 0000000..e7fd937
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unsupported.go
@@ -0,0 +1,25 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !linux && !darwin
+
+package fileutil
+
+import "os"
+
+func preallocExtend(f *os.File, sizeInBytes int64) error {
+	return preallocExtendTrunc(f, sizeInBytes)
+}
+
+func preallocFixed(f *os.File, sizeInBytes int64) error { return nil }
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go
new file mode 100644
index 0000000..026ea03
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go
@@ -0,0 +1,120 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+	"os"
+	"path/filepath"
+	"strings"
+	"time"
+
+	"go.uber.org/zap"
+)
+
+func PurgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error {
+	return purgeFile(lg, dirname, suffix, max, interval, stop, nil, nil, true)
+}
+
+func PurgeFileWithDoneNotify(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) {
+	doneC := make(chan struct{})
+	errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC, true)
+	return doneC, errC
+}
+
+func PurgeFileWithoutFlock(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) {
+	doneC := make(chan struct{})
+	errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC, false)
+	return doneC, errC
+}
+
+// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil.
+// if donec is non-nil, the function closes it to notify its exit.
+func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}, flock bool) <-chan error {
+	if lg == nil {
+		lg = zap.NewNop()
+	}
+	errC := make(chan error, 1)
+	lg.Info("started to purge file",
+		zap.String("dir", dirname),
+		zap.String("suffix", suffix),
+		zap.Uint("max", max),
+		zap.Duration("interval", interval))
+
+	go func() {
+		if donec != nil {
+			defer close(donec)
+		}
+		for {
+			fnamesWithSuffix, err := readDirWithSuffix(dirname, suffix)
+			if err != nil {
+				errC <- err
+				return
+			}
+			nPurged := 0
+			for nPurged < len(fnamesWithSuffix)-int(max) {
+				f := filepath.Join(dirname, fnamesWithSuffix[nPurged])
+				var l *LockedFile
+				if flock {
+					l, err = TryLockFile(f, os.O_WRONLY, PrivateFileMode)
+					if err != nil {
+						lg.Warn("failed to lock file", zap.String("path", f), zap.Error(err))
+						break
+					}
+				}
+				if err = os.Remove(f); err != nil {
+					lg.Error("failed to remove file", zap.String("path", f), zap.Error(err))
+					errC <- err
+					return
+				}
+				if flock {
+					if err = l.Close(); err != nil {
+						lg.Error("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err))
+						errC <- err
+						return
+					}
+				}
+				lg.Info("purged", zap.String("path", f))
+				nPurged++
+			}
+
+			if purgec != nil {
+				for i := 0; i < nPurged; i++ {
+					purgec <- fnamesWithSuffix[i]
+				}
+			}
+			select {
+			case <-time.After(interval):
+			case <-stop:
+				return
+			}
+		}
+	}()
+	return errC
+}
+
+func readDirWithSuffix(dirname string, suffix string) ([]string, error) {
+	fnames, err := ReadDir(dirname)
+	if err != nil {
+		return nil, err
+	}
+	// filter in place (ref. https://go.dev/wiki/SliceTricks#filtering-without-allocating)
+	fnamesWithSuffix := fnames[:0]
+	for _, fname := range fnames {
+		if strings.HasSuffix(fname, suffix) {
+			fnamesWithSuffix = append(fnamesWithSuffix, fname)
+		}
+	}
+	return fnamesWithSuffix, nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/read_dir.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/read_dir.go
new file mode 100644
index 0000000..2eeaa89
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/read_dir.go
@@ -0,0 +1,70 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+	"os"
+	"path/filepath"
+	"sort"
+)
+
+// ReadDirOp represents an read-directory operation.
+type ReadDirOp struct {
+	ext string
+}
+
+// ReadDirOption configures archiver operations.
+type ReadDirOption func(*ReadDirOp)
+
+// WithExt filters file names by their extensions.
+// (e.g. WithExt(".wal") to list only WAL files)
+func WithExt(ext string) ReadDirOption {
+	return func(op *ReadDirOp) { op.ext = ext }
+}
+
+func (op *ReadDirOp) applyOpts(opts []ReadDirOption) {
+	for _, opt := range opts {
+		opt(op)
+	}
+}
+
+// ReadDir returns the filenames in the given directory in sorted order.
+func ReadDir(d string, opts ...ReadDirOption) ([]string, error) {
+	op := &ReadDirOp{}
+	op.applyOpts(opts)
+
+	dir, err := os.Open(d)
+	if err != nil {
+		return nil, err
+	}
+	defer dir.Close()
+
+	names, err := dir.Readdirnames(-1)
+	if err != nil {
+		return nil, err
+	}
+	sort.Strings(names)
+
+	if op.ext != "" {
+		tss := make([]string, 0)
+		for _, v := range names {
+			if filepath.Ext(v) == op.ext {
+				tss = append(tss, v)
+			}
+		}
+		names = tss
+	}
+	return names, nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync.go
new file mode 100644
index 0000000..670d01f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !linux && !darwin
+
+package fileutil
+
+import "os"
+
+// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform.
+func Fsync(f *os.File) error {
+	return f.Sync()
+}
+
+// Fdatasync is a wrapper around file.Sync(). Special handling is needed on linux platform.
+func Fdatasync(f *os.File) error {
+	return f.Sync()
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_darwin.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_darwin.go
new file mode 100644
index 0000000..7affa78
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_darwin.go
@@ -0,0 +1,38 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin
+
+package fileutil
+
+import (
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+// Fsync on HFS/OSX flushes the data on to the physical drive but the drive
+// may not write it to the persistent media for quite sometime and it may be
+// written in out-of-order sequence. Using F_FULLFSYNC ensures that the
+// physical drive's buffer will also get flushed to the media.
+func Fsync(f *os.File) error {
+	_, err := unix.FcntlInt(f.Fd(), unix.F_FULLFSYNC, 0)
+	return err
+}
+
+// Fdatasync on darwin platform invokes fcntl(F_FULLFSYNC) for actual persistence
+// on physical drive media.
+func Fdatasync(f *os.File) error {
+	return Fsync(f)
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_linux.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_linux.go
new file mode 100644
index 0000000..a317238
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_linux.go
@@ -0,0 +1,34 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+
+package fileutil
+
+import (
+	"os"
+	"syscall"
+)
+
+// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform.
+func Fsync(f *os.File) error {
+	return f.Sync()
+}
+
+// Fdatasync is similar to fsync(), but does not flush modified metadata
+// unless that metadata is needed in order to allow a subsequent data retrieval
+// to be correctly handled.
+func Fdatasync(f *os.File) error {
+	return syscall.Fdatasync(int(f.Fd()))
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/doc.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/logutil/doc.go
rename to vendor/go.etcd.io/etcd/client/pkg/v3/logutil/doc.go
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_format.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_format.go
new file mode 100644
index 0000000..286d385
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_format.go
@@ -0,0 +1,42 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import "fmt"
+
+const (
+	JSONLogFormat    = "json"
+	ConsoleLogFormat = "console"
+	//revive:disable:var-naming
+	// Deprecated: Please use JSONLogFormat.
+	JsonLogFormat = JSONLogFormat
+	//revive:enable:var-naming
+)
+
+var DefaultLogFormat = JSONLogFormat
+
+// ConvertToZapFormat converts and validated log format string.
+func ConvertToZapFormat(format string) (string, error) {
+	switch format {
+	case ConsoleLogFormat:
+		return ConsoleLogFormat, nil
+	case JSONLogFormat:
+		return JSONLogFormat, nil
+	case "":
+		return DefaultLogFormat, nil
+	default:
+		return "", fmt.Errorf("unknown log format: %s, supported values json, console", format)
+	}
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_level.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_level.go
new file mode 100644
index 0000000..6c95bcf
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_level.go
@@ -0,0 +1,30 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+	"go.uber.org/zap/zapcore"
+)
+
+var DefaultLogLevel = "info"
+
+// ConvertToZapLevel converts log level string to zapcore.Level.
+func ConvertToZapLevel(lvl string) zapcore.Level {
+	var level zapcore.Level
+	if err := level.Set(lvl); err != nil {
+		panic(err)
+	}
+	return level
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go
new file mode 100644
index 0000000..befa575
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go
@@ -0,0 +1,93 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+	"slices"
+	"time"
+
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// CreateDefaultZapLogger creates a logger with default zap configuration
+func CreateDefaultZapLogger(level zapcore.Level) (*zap.Logger, error) {
+	lcfg := DefaultZapLoggerConfig
+	lcfg.Level = zap.NewAtomicLevelAt(level)
+	c, err := lcfg.Build()
+	if err != nil {
+		return nil, err
+	}
+	return c, nil
+}
+
+// DefaultZapLoggerConfig defines default zap logger configuration.
+var DefaultZapLoggerConfig = zap.Config{
+	Level: zap.NewAtomicLevelAt(ConvertToZapLevel(DefaultLogLevel)),
+
+	Development: false,
+	Sampling: &zap.SamplingConfig{
+		Initial:    100,
+		Thereafter: 100,
+	},
+
+	Encoding: DefaultLogFormat,
+
+	// copied from "zap.NewProductionEncoderConfig" with some updates
+	EncoderConfig: zapcore.EncoderConfig{
+		TimeKey:       "ts",
+		LevelKey:      "level",
+		NameKey:       "logger",
+		CallerKey:     "caller",
+		MessageKey:    "msg",
+		StacktraceKey: "stacktrace",
+		LineEnding:    zapcore.DefaultLineEnding,
+		EncodeLevel:   zapcore.LowercaseLevelEncoder,
+
+		// Custom EncodeTime function to ensure we match format and precision of historic capnslog timestamps
+		EncodeTime: func(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
+			enc.AppendString(t.Format("2006-01-02T15:04:05.000000Z0700"))
+		},
+
+		EncodeDuration: zapcore.StringDurationEncoder,
+		EncodeCaller:   zapcore.ShortCallerEncoder,
+	},
+
+	// Use "/dev/null" to discard all
+	OutputPaths:      []string{"stderr"},
+	ErrorOutputPaths: []string{"stderr"},
+}
+
+// MergeOutputPaths merges logging output paths, resolving conflicts.
+func MergeOutputPaths(cfg zap.Config) zap.Config {
+	cfg.OutputPaths = mergePaths(cfg.OutputPaths)
+	cfg.ErrorOutputPaths = mergePaths(cfg.ErrorOutputPaths)
+	return cfg
+}
+
+func mergePaths(old []string) []string {
+	if len(old) == 0 {
+		// the original implementation ensures the result is non-nil
+		return []string{}
+	}
+	// use "/dev/null" to discard all
+	if slices.Contains(old, "/dev/null") {
+		return []string{"/dev/null"}
+	}
+	// clone a new one; don't modify the original, in case it matters.
+	dup := slices.Clone(old)
+	slices.Sort(dup)
+	return slices.Compact(dup)
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap_journal.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap_journal.go
new file mode 100644
index 0000000..06dc40d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap_journal.go
@@ -0,0 +1,92 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+
+package logutil
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+
+	"github.com/coreos/go-systemd/v22/journal"
+	"go.uber.org/zap/zapcore"
+
+	"go.etcd.io/etcd/client/pkg/v3/systemd"
+)
+
+// NewJournalWriter wraps "io.Writer" to redirect log output
+// to the local systemd journal. If journald send fails, it fails
+// back to writing to the original writer.
+// The decode overhead is only <30µs per write.
+// Reference: https://github.com/coreos/pkg/blob/master/capnslog/journald_formatter.go
+func NewJournalWriter(wr io.Writer) (io.Writer, error) {
+	return &journalWriter{Writer: wr}, systemd.DialJournal()
+}
+
+type journalWriter struct {
+	io.Writer
+}
+
+// WARN: assume that etcd uses default field names in zap encoder config
+// make sure to keep this up-to-date!
+type logLine struct {
+	Level  string `json:"level"`
+	Caller string `json:"caller"`
+}
+
+func (w *journalWriter) Write(p []byte) (int, error) {
+	line := &logLine{}
+	if err := json.NewDecoder(bytes.NewReader(p)).Decode(line); err != nil {
+		return 0, err
+	}
+
+	var pri journal.Priority
+	switch line.Level {
+	case zapcore.DebugLevel.String():
+		pri = journal.PriDebug
+	case zapcore.InfoLevel.String():
+		pri = journal.PriInfo
+
+	case zapcore.WarnLevel.String():
+		pri = journal.PriWarning
+	case zapcore.ErrorLevel.String():
+		pri = journal.PriErr
+
+	case zapcore.DPanicLevel.String():
+		pri = journal.PriCrit
+	case zapcore.PanicLevel.String():
+		pri = journal.PriCrit
+	case zapcore.FatalLevel.String():
+		pri = journal.PriCrit
+
+	default:
+		panic(fmt.Errorf("unknown log level: %q", line.Level))
+	}
+
+	err := journal.Send(string(p), pri, map[string]string{
+		"PACKAGE":           filepath.Dir(line.Caller),
+		"SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
+	})
+	if err != nil {
+		// "journal" also falls back to stderr
+		// "fmt.Fprintln(os.Stderr, s)"
+		return w.Writer.Write(p)
+	}
+	return 0, nil
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/systemd/doc.go b/vendor/go.etcd.io/etcd/client/pkg/v3/systemd/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/systemd/doc.go
rename to vendor/go.etcd.io/etcd/client/pkg/v3/systemd/doc.go
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/systemd/journal.go b/vendor/go.etcd.io/etcd/client/pkg/v3/systemd/journal.go
new file mode 100644
index 0000000..494ce37
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/systemd/journal.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package systemd
+
+import "net"
+
+// DialJournal returns no error if the process can dial journal socket.
+// Returns an error if dial failed, which indicates journald is not available
+// (e.g. run embedded etcd as docker daemon).
+// Reference: https://github.com/coreos/go-systemd/blob/master/journal/journal.go.
+func DialJournal() error {
+	conn, err := net.Dial("unixgram", "/run/systemd/journal/socket")
+	if conn != nil {
+		defer conn.Close()
+	}
+	return err
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/cipher_suites.go b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/cipher_suites.go
new file mode 100644
index 0000000..e1f2175
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/cipher_suites.go
@@ -0,0 +1,56 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tlsutil
+
+import (
+	"crypto/tls"
+	"fmt"
+)
+
+// GetCipherSuite returns the corresponding cipher suite,
+// and boolean value if it is supported.
+func GetCipherSuite(s string) (uint16, bool) {
+	for _, c := range tls.CipherSuites() {
+		if s == c.Name {
+			return c.ID, true
+		}
+	}
+	for _, c := range tls.InsecureCipherSuites() {
+		if s == c.Name {
+			return c.ID, true
+		}
+	}
+	switch s {
+	case "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305":
+		return tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, true
+	case "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305":
+		return tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, true
+	}
+	return 0, false
+}
+
+// GetCipherSuites returns list of corresponding cipher suite IDs.
+func GetCipherSuites(ss []string) ([]uint16, error) {
+	cs := make([]uint16, len(ss))
+	for i, s := range ss {
+		var ok bool
+		cs[i], ok = GetCipherSuite(s)
+		if !ok {
+			return nil, fmt.Errorf("unexpected TLS cipher suite %q", s)
+		}
+	}
+
+	return cs, nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/doc.go b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/doc.go
new file mode 100644
index 0000000..3b6aa67
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package tlsutil provides utility functions for handling TLS.
+package tlsutil
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go
new file mode 100644
index 0000000..0f79865
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go
@@ -0,0 +1,73 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tlsutil
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"encoding/pem"
+	"os"
+)
+
+// NewCertPool creates x509 certPool with provided CA files.
+func NewCertPool(CAFiles []string) (*x509.CertPool, error) {
+	certPool := x509.NewCertPool()
+
+	for _, CAFile := range CAFiles {
+		pemByte, err := os.ReadFile(CAFile)
+		if err != nil {
+			return nil, err
+		}
+
+		for {
+			var block *pem.Block
+			block, pemByte = pem.Decode(pemByte)
+			if block == nil {
+				break
+			}
+			cert, err := x509.ParseCertificate(block.Bytes)
+			if err != nil {
+				return nil, err
+			}
+
+			certPool.AddCert(cert)
+		}
+	}
+
+	return certPool, nil
+}
+
+// NewCert generates TLS cert by using the given cert,key and parse function.
+func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) {
+	cert, err := os.ReadFile(certfile)
+	if err != nil {
+		return nil, err
+	}
+
+	key, err := os.ReadFile(keyfile)
+	if err != nil {
+		return nil, err
+	}
+
+	if parseFunc == nil {
+		parseFunc = tls.X509KeyPair
+	}
+
+	tlsCert, err := parseFunc(cert, key)
+	if err != nil {
+		return nil, err
+	}
+	return &tlsCert, nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go
new file mode 100644
index 0000000..ffcecd8
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go
@@ -0,0 +1,47 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tlsutil
+
+import (
+	"crypto/tls"
+	"fmt"
+)
+
+type TLSVersion string
+
+// Constants for TLS versions.
+const (
+	TLSVersionDefault TLSVersion = ""
+	TLSVersion12      TLSVersion = "TLS1.2"
+	TLSVersion13      TLSVersion = "TLS1.3"
+)
+
+// GetTLSVersion returns the corresponding tls.Version or error.
+func GetTLSVersion(version string) (uint16, error) {
+	var v uint16
+
+	switch version {
+	case string(TLSVersionDefault):
+		v = 0 // 0 means let Go decide.
+	case string(TLSVersion12):
+		v = tls.VersionTLS12
+	case string(TLSVersion13):
+		v = tls.VersionTLS13
+	default:
+		return 0, fmt.Errorf("unexpected TLS version %q (must be one of: TLS1.2, TLS1.3)", version)
+	}
+
+	return v, nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/doc.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/doc.go
new file mode 100644
index 0000000..37658ce
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package transport implements various HTTP transport utilities based on Go
+// net package.
+package transport
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener.go
new file mode 100644
index 0000000..d43ac4f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener.go
@@ -0,0 +1,118 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"net"
+	"time"
+)
+
+// NewKeepAliveListener returns a listener that listens on the given address.
+// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil.
+// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake.
+// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html
+//
+// Note(ahrtr):
+// only `net.TCPConn` supports `SetKeepAlive` and `SetKeepAlivePeriod`
+// by default, so if you want to wrap multiple layers of net.Listener,
+// the `keepaliveListener` should be the one which is closest to the
+// original `net.Listener` implementation, namely `TCPListener`.
+func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (net.Listener, error) {
+	kal := &keepaliveListener{
+		Listener: l,
+	}
+
+	if scheme == "https" {
+		if tlscfg == nil {
+			return nil, errors.New("cannot listen on TLS for given listener: KeyFile and CertFile are not presented")
+		}
+		return newTLSKeepaliveListener(kal, tlscfg), nil
+	}
+
+	return kal, nil
+}
+
+type keepaliveListener struct{ net.Listener }
+
+func (kln *keepaliveListener) Accept() (net.Conn, error) {
+	c, err := kln.Listener.Accept()
+	if err != nil {
+		return nil, err
+	}
+
+	kac, err := createKeepaliveConn(c)
+	if err != nil {
+		return nil, fmt.Errorf("create keepalive connection failed, %w", err)
+	}
+	// detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
+	// default on linux:  30 + 8 * 30
+	// default on osx:    30 + 8 * 75
+	if err := kac.SetKeepAlive(true); err != nil {
+		return nil, fmt.Errorf("SetKeepAlive failed, %w", err)
+	}
+	if err := kac.SetKeepAlivePeriod(30 * time.Second); err != nil {
+		return nil, fmt.Errorf("SetKeepAlivePeriod failed, %w", err)
+	}
+	return kac, nil
+}
+
+func createKeepaliveConn(c net.Conn) (*keepAliveConn, error) {
+	tcpc, ok := c.(*net.TCPConn)
+	if !ok {
+		return nil, ErrNotTCP
+	}
+	return &keepAliveConn{tcpc}, nil
+}
+
+type keepAliveConn struct {
+	*net.TCPConn
+}
+
+// SetKeepAlive sets keepalive
+func (l *keepAliveConn) SetKeepAlive(doKeepAlive bool) error {
+	return l.TCPConn.SetKeepAlive(doKeepAlive)
+}
+
+// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections.
+type tlsKeepaliveListener struct {
+	net.Listener
+	config *tls.Config
+}
+
+// Accept waits for and returns the next incoming TLS connection.
+// The returned connection c is a *tls.Conn.
+func (l *tlsKeepaliveListener) Accept() (net.Conn, error) {
+	c, err := l.Listener.Accept()
+	if err != nil {
+		return nil, err
+	}
+
+	c = tls.Server(c, l.config)
+	return c, nil
+}
+
+// newTLSKeepaliveListener creates a Listener which accepts connections from an inner
+// Listener and wraps each connection with Server.
+// The configuration config must be non-nil and must have
+// at least one certificate.
+func newTLSKeepaliveListener(inner net.Listener, config *tls.Config) net.Listener {
+	l := &tlsKeepaliveListener{}
+	l.Listener = inner
+	l.config = config
+	return l
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener_openbsd.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener_openbsd.go
new file mode 100644
index 0000000..024c6c2
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener_openbsd.go
@@ -0,0 +1,26 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build openbsd
+
+package transport
+
+import "time"
+
+// SetKeepAlivePeriod sets keepalive period
+func (l *keepAliveConn) SetKeepAlivePeriod(d time.Duration) error {
+	// OpenBSD has no user-settable per-socket TCP keepalive options.
+	// Refer to https://github.com/etcd-io/etcd/issues/15811.
+	return nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener_unix.go
new file mode 100644
index 0000000..08061f7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener_unix.go
@@ -0,0 +1,24 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !openbsd
+
+package transport
+
+import "time"
+
+// SetKeepAlivePeriod sets keepalive period
+func (l *keepAliveConn) SetKeepAlivePeriod(d time.Duration) error {
+	return l.TCPConn.SetKeepAlivePeriod(d)
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/limit_listen.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/limit_listen.go
new file mode 100644
index 0000000..bf4c4e1
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/limit_listen.go
@@ -0,0 +1,84 @@
+// Copyright 2013 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package transport provides network utility functions, complementing the more
+// common ones in the net package.
+package transport
+
+import (
+	"errors"
+	"net"
+	"sync"
+	"time"
+)
+
+var ErrNotTCP = errors.New("only tcp connections have keepalive")
+
+// LimitListener returns a Listener that accepts at most n simultaneous
+// connections from the provided Listener.
+func LimitListener(l net.Listener, n int) net.Listener {
+	return &limitListener{l, make(chan struct{}, n)}
+}
+
+type limitListener struct {
+	net.Listener
+	sem chan struct{}
+}
+
+func (l *limitListener) acquire() { l.sem <- struct{}{} }
+func (l *limitListener) release() { <-l.sem }
+
+func (l *limitListener) Accept() (net.Conn, error) {
+	l.acquire()
+	c, err := l.Listener.Accept()
+	if err != nil {
+		l.release()
+		return nil, err
+	}
+	return &limitListenerConn{Conn: c, release: l.release}, nil
+}
+
+type limitListenerConn struct {
+	net.Conn
+	releaseOnce sync.Once
+	release     func()
+}
+
+func (l *limitListenerConn) Close() error {
+	err := l.Conn.Close()
+	l.releaseOnce.Do(l.release)
+	return err
+}
+
+// SetKeepAlive sets keepalive
+//
+// Deprecated: use (*keepAliveConn) SetKeepAlive instead.
+func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error {
+	tcpc, ok := l.Conn.(*net.TCPConn)
+	if !ok {
+		return ErrNotTCP
+	}
+	return tcpc.SetKeepAlive(doKeepAlive)
+}
+
+// SetKeepAlivePeriod sets keepalive period
+//
+// Deprecated: use (*keepAliveConn) SetKeepAlivePeriod instead.
+func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error {
+	tcpc, ok := l.Conn.(*net.TCPConn)
+	if !ok {
+		return ErrNotTCP
+	}
+	return tcpc.SetKeepAlivePeriod(d)
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go
new file mode 100644
index 0000000..9c2d29b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go
@@ -0,0 +1,622 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"context"
+	"crypto/ecdsa"
+	"crypto/elliptic"
+	"crypto/rand"
+	"crypto/tls"
+	"crypto/x509"
+	"crypto/x509/pkix"
+	"encoding/pem"
+	"errors"
+	"fmt"
+	"math/big"
+	"net"
+	"os"
+	"path/filepath"
+	"strings"
+	"time"
+
+	"go.uber.org/zap"
+
+	"go.etcd.io/etcd/client/pkg/v3/fileutil"
+	"go.etcd.io/etcd/client/pkg/v3/tlsutil"
+	"go.etcd.io/etcd/client/pkg/v3/verify"
+)
+
+// NewListener creates a new listner.
+func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) {
+	return newListener(addr, scheme, WithTLSInfo(tlsinfo))
+}
+
+// NewListenerWithOpts creates a new listener which accepts listener options.
+func NewListenerWithOpts(addr, scheme string, opts ...ListenerOption) (net.Listener, error) {
+	return newListener(addr, scheme, opts...)
+}
+
+func newListener(addr, scheme string, opts ...ListenerOption) (net.Listener, error) {
+	if scheme == "unix" || scheme == "unixs" {
+		// unix sockets via unix://laddr
+		return NewUnixListener(addr)
+	}
+
+	lnOpts := newListenOpts(opts...)
+
+	switch {
+	case lnOpts.IsSocketOpts():
+		// new ListenConfig with socket options.
+		lnOpts.ListenConfig = newListenConfig(lnOpts.socketOpts)
+		// check for timeout
+		fallthrough
+	case lnOpts.IsTimeout(), lnOpts.IsSocketOpts():
+		// timeout listener with socket options.
+		ln, err := newKeepAliveListener(&lnOpts.ListenConfig, addr)
+		if err != nil {
+			return nil, err
+		}
+		lnOpts.Listener = &rwTimeoutListener{
+			Listener:     ln,
+			readTimeout:  lnOpts.readTimeout,
+			writeTimeout: lnOpts.writeTimeout,
+		}
+	case lnOpts.IsTimeout():
+		ln, err := newKeepAliveListener(nil, addr)
+		if err != nil {
+			return nil, err
+		}
+		lnOpts.Listener = &rwTimeoutListener{
+			Listener:     ln,
+			readTimeout:  lnOpts.readTimeout,
+			writeTimeout: lnOpts.writeTimeout,
+		}
+	default:
+		ln, err := newKeepAliveListener(nil, addr)
+		if err != nil {
+			return nil, err
+		}
+		lnOpts.Listener = ln
+	}
+
+	//  only skip if not passing TLSInfo
+	if lnOpts.skipTLSInfoCheck && !lnOpts.IsTLS() {
+		return lnOpts.Listener, nil
+	}
+	return wrapTLS(scheme, lnOpts.tlsInfo, lnOpts.Listener)
+}
+
+func newKeepAliveListener(cfg *net.ListenConfig, addr string) (net.Listener, error) {
+	var ln net.Listener
+	var err error
+
+	if cfg != nil {
+		ln, err = cfg.Listen(context.TODO(), "tcp", addr)
+	} else {
+		ln, err = net.Listen("tcp", addr)
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	return NewKeepAliveListener(ln, "tcp", nil)
+}
+
+func wrapTLS(scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) {
+	if scheme != "https" && scheme != "unixs" {
+		return l, nil
+	}
+	if tlsinfo != nil && tlsinfo.SkipClientSANVerify {
+		return NewTLSListener(l, tlsinfo)
+	}
+	return newTLSListener(l, tlsinfo, checkSAN)
+}
+
+func newListenConfig(sopts *SocketOpts) net.ListenConfig {
+	lc := net.ListenConfig{}
+	if sopts != nil {
+		ctls := getControls(sopts)
+		if len(ctls) > 0 {
+			lc.Control = ctls.Control
+		}
+	}
+	return lc
+}
+
+type TLSInfo struct {
+	// CertFile is the _server_ cert, it will also be used as a _client_ certificate if ClientCertFile is empty
+	CertFile string
+	// KeyFile is the key for the CertFile
+	KeyFile string
+	// ClientCertFile is a _client_ cert for initiating connections when ClientCertAuth is defined. If ClientCertAuth
+	// is true but this value is empty, the CertFile will be used instead.
+	ClientCertFile string
+	// ClientKeyFile is the key for the ClientCertFile
+	ClientKeyFile string
+
+	TrustedCAFile       string
+	ClientCertAuth      bool
+	CRLFile             string
+	InsecureSkipVerify  bool
+	SkipClientSANVerify bool
+
+	// ServerName ensures the cert matches the given host in case of discovery / virtual hosting
+	ServerName string
+
+	// HandshakeFailure is optionally called when a connection fails to handshake. The
+	// connection will be closed immediately afterwards.
+	HandshakeFailure func(*tls.Conn, error)
+
+	// CipherSuites is a list of supported cipher suites.
+	// If empty, Go auto-populates it by default.
+	// Note that cipher suites are prioritized in the given order.
+	CipherSuites []uint16
+
+	// MinVersion is the minimum TLS version that is acceptable.
+	// If not set, the minimum version is TLS 1.2.
+	MinVersion uint16
+
+	// MaxVersion is the maximum TLS version that is acceptable.
+	// If not set, the default used by Go is selected (see tls.Config.MaxVersion).
+	MaxVersion uint16
+
+	selfCert bool
+
+	// parseFunc exists to simplify testing. Typically, parseFunc
+	// should be left nil. In that case, tls.X509KeyPair will be used.
+	parseFunc func([]byte, []byte) (tls.Certificate, error)
+
+	// AllowedCN is a CN which must be provided by a client.
+	//
+	// Deprecated: use AllowedCNs instead.
+	AllowedCN string
+
+	// AllowedHostname is an IP address or hostname that must match the TLS
+	// certificate provided by a client.
+	//
+	// Deprecated: use AllowedHostnames instead.
+	AllowedHostname string
+
+	// AllowedCNs is a list of acceptable CNs which must be provided by a client.
+	AllowedCNs []string
+
+	// AllowedHostnames is a list of acceptable IP addresses or hostnames that must match the
+	// TLS certificate provided by a client.
+	AllowedHostnames []string
+
+	// Logger logs TLS errors.
+	// If nil, all logs are discarded.
+	Logger *zap.Logger
+
+	// EmptyCN indicates that the cert must have empty CN.
+	// If true, ClientConfig() will return an error for a cert with non empty CN.
+	EmptyCN bool
+
+	// LocalAddr is the local IP address to use when communicating with a peer.
+	LocalAddr string
+}
+
+func (info TLSInfo) String() string {
+	return fmt.Sprintf("cert = %s, key = %s, client-cert=%s, client-key=%s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.ClientCertFile, info.ClientKeyFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile)
+}
+
+func (info TLSInfo) Empty() bool {
+	return info.CertFile == "" && info.KeyFile == ""
+}
+
+func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertValidity uint, additionalUsages ...x509.ExtKeyUsage) (TLSInfo, error) {
+	verify.Assert(lg != nil, "nil log isn't allowed")
+
+	var err error
+	info := TLSInfo{Logger: lg}
+	if selfSignedCertValidity == 0 {
+		err = errors.New("selfSignedCertValidity is invalid,it should be greater than 0")
+		info.Logger.Warn(
+			"cannot generate cert",
+			zap.Error(err),
+		)
+		return info, err
+	}
+	err = fileutil.TouchDirAll(lg, dirpath)
+	if err != nil {
+		info.Logger.Warn(
+			"cannot create cert directory",
+			zap.Error(err),
+		)
+		return info, err
+	}
+
+	certPath, err := filepath.Abs(filepath.Join(dirpath, "cert.pem"))
+	if err != nil {
+		return info, err
+	}
+	keyPath, err := filepath.Abs(filepath.Join(dirpath, "key.pem"))
+	if err != nil {
+		return info, err
+	}
+	_, errcert := os.Stat(certPath)
+	_, errkey := os.Stat(keyPath)
+	if errcert == nil && errkey == nil {
+		info.CertFile = certPath
+		info.KeyFile = keyPath
+		info.ClientCertFile = certPath
+		info.ClientKeyFile = keyPath
+		info.selfCert = true
+		return info, err
+	}
+
+	serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
+	serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
+	if err != nil {
+		info.Logger.Warn(
+			"cannot generate random number",
+			zap.Error(err),
+		)
+		return info, err
+	}
+
+	tmpl := x509.Certificate{
+		SerialNumber: serialNumber,
+		Subject:      pkix.Name{Organization: []string{"etcd"}},
+		NotBefore:    time.Now(),
+		NotAfter:     time.Now().Add(time.Duration(selfSignedCertValidity) * 365 * (24 * time.Hour)),
+
+		KeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCRLSign,
+		ExtKeyUsage:           append([]x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, additionalUsages...),
+		BasicConstraintsValid: true,
+		IsCA:                  true,
+	}
+
+	info.Logger.Warn(
+		"automatically generate certificates",
+		zap.Time("certificate-validity-bound-not-after", tmpl.NotAfter),
+	)
+
+	for _, host := range hosts {
+		h, _, _ := net.SplitHostPort(host)
+		if ip := net.ParseIP(h); ip != nil {
+			tmpl.IPAddresses = append(tmpl.IPAddresses, ip)
+		} else {
+			tmpl.DNSNames = append(tmpl.DNSNames, h)
+		}
+	}
+
+	priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
+	if err != nil {
+		info.Logger.Warn(
+			"cannot generate ECDSA key",
+			zap.Error(err),
+		)
+		return info, err
+	}
+
+	derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv)
+	if err != nil {
+		info.Logger.Warn(
+			"cannot generate x509 certificate",
+			zap.Error(err),
+		)
+		return info, err
+	}
+
+	certOut, err := os.Create(certPath)
+	if err != nil {
+		info.Logger.Warn(
+			"cannot cert file",
+			zap.String("path", certPath),
+			zap.Error(err),
+		)
+		return info, err
+	}
+	pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
+	certOut.Close()
+
+	info.Logger.Info("created cert file", zap.String("path", certPath))
+
+	b, err := x509.MarshalECPrivateKey(priv)
+	if err != nil {
+		return info, err
+	}
+	keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
+	if err != nil {
+		info.Logger.Warn(
+			"cannot key file",
+			zap.String("path", keyPath),
+			zap.Error(err),
+		)
+		return info, err
+	}
+	pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b})
+	keyOut.Close()
+	info.Logger.Info("created key file", zap.String("path", keyPath))
+	return SelfCert(lg, dirpath, hosts, selfSignedCertValidity)
+}
+
+// baseConfig is called on initial TLS handshake start.
+//
+// Previously,
+// 1. Server has non-empty (*tls.Config).Certificates on client hello
+// 2. Server calls (*tls.Config).GetCertificate iff:
+//   - Server's (*tls.Config).Certificates is not empty, or
+//   - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName
+//
+// When (*tls.Config).Certificates is always populated on initial handshake,
+// client is expected to provide a valid matching SNI to pass the TLS
+// verification, thus trigger server (*tls.Config).GetCertificate to reload
+// TLS assets. However, a cert whose SAN field does not include domain names
+// but only IP addresses, has empty (*tls.ClientHelloInfo).ServerName, thus
+// it was never able to trigger TLS reload on initial handshake; first
+// ceritifcate object was being used, never being updated.
+//
+// Now, (*tls.Config).Certificates is created empty on initial TLS client
+// handshake, in order to trigger (*tls.Config).GetCertificate and populate
+// rest of the certificates on every new TLS connection, even when client
+// SNI is empty (e.g. cert only includes IPs).
+func (info TLSInfo) baseConfig() (*tls.Config, error) {
+	if info.KeyFile == "" || info.CertFile == "" {
+		return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile)
+	}
+	if info.Logger == nil {
+		info.Logger = zap.NewNop()
+	}
+
+	_, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
+	if err != nil {
+		return nil, err
+	}
+
+	// Perform prevalidation of client cert and key if either are provided. This makes sure we crash before accepting any connections.
+	if (info.ClientKeyFile == "") != (info.ClientCertFile == "") {
+		return nil, fmt.Errorf("ClientKeyFile and ClientCertFile must both be present or both absent: key: %v, cert: %v]", info.ClientKeyFile, info.ClientCertFile)
+	}
+	if info.ClientCertFile != "" {
+		_, err := tlsutil.NewCert(info.ClientCertFile, info.ClientKeyFile, info.parseFunc)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	var minVersion uint16
+	if info.MinVersion != 0 {
+		minVersion = info.MinVersion
+	} else {
+		// Default minimum version is TLS 1.2, previous versions are insecure and deprecated.
+		minVersion = tls.VersionTLS12
+	}
+
+	cfg := &tls.Config{
+		MinVersion: minVersion,
+		MaxVersion: info.MaxVersion,
+		ServerName: info.ServerName,
+	}
+
+	if len(info.CipherSuites) > 0 {
+		cfg.CipherSuites = info.CipherSuites
+	}
+
+	// Client certificates may be verified by either an exact match on the CN,
+	// or a more general check of the CN and SANs.
+	var verifyCertificate func(*x509.Certificate) bool
+
+	if info.AllowedCN != "" && len(info.AllowedCNs) > 0 {
+		return nil, fmt.Errorf("AllowedCN and AllowedCNs are mutually exclusive (cn=%q, cns=%q)", info.AllowedCN, info.AllowedCNs)
+	}
+	if info.AllowedHostname != "" && len(info.AllowedHostnames) > 0 {
+		return nil, fmt.Errorf("AllowedHostname and AllowedHostnames are mutually exclusive (hostname=%q, hostnames=%q)", info.AllowedHostname, info.AllowedHostnames)
+	}
+	if info.AllowedCN != "" && info.AllowedHostname != "" {
+		return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname)
+	}
+	if len(info.AllowedCNs) > 0 && len(info.AllowedHostnames) > 0 {
+		return nil, fmt.Errorf("AllowedCNs and AllowedHostnames are mutually exclusive (cns=%q, hostnames=%q)", info.AllowedCNs, info.AllowedHostnames)
+	}
+
+	if info.AllowedCN != "" {
+		info.Logger.Warn("AllowedCN is deprecated, use AllowedCNs instead")
+		verifyCertificate = func(cert *x509.Certificate) bool {
+			return info.AllowedCN == cert.Subject.CommonName
+		}
+	}
+	if info.AllowedHostname != "" {
+		info.Logger.Warn("AllowedHostname is deprecated, use AllowedHostnames instead")
+		verifyCertificate = func(cert *x509.Certificate) bool {
+			return cert.VerifyHostname(info.AllowedHostname) == nil
+		}
+	}
+	if len(info.AllowedCNs) > 0 {
+		verifyCertificate = func(cert *x509.Certificate) bool {
+			for _, allowedCN := range info.AllowedCNs {
+				if allowedCN == cert.Subject.CommonName {
+					return true
+				}
+			}
+			return false
+		}
+	}
+	if len(info.AllowedHostnames) > 0 {
+		verifyCertificate = func(cert *x509.Certificate) bool {
+			for _, allowedHostname := range info.AllowedHostnames {
+				if cert.VerifyHostname(allowedHostname) == nil {
+					return true
+				}
+			}
+			return false
+		}
+	}
+	if verifyCertificate != nil {
+		cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
+			for _, chains := range verifiedChains {
+				if len(chains) != 0 {
+					if verifyCertificate(chains[0]) {
+						return nil
+					}
+				}
+			}
+			return errors.New("client certificate authentication failed")
+		}
+	}
+
+	// this only reloads certs when there's a client request
+	// TODO: support server-side refresh (e.g. inotify, SIGHUP), caching
+	cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (cert *tls.Certificate, err error) {
+		cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
+		if os.IsNotExist(err) {
+			info.Logger.Warn(
+				"failed to find peer cert files",
+				zap.String("cert-file", info.CertFile),
+				zap.String("key-file", info.KeyFile),
+				zap.Error(err),
+			)
+		} else if err != nil {
+			info.Logger.Warn(
+				"failed to create peer certificate",
+				zap.String("cert-file", info.CertFile),
+				zap.String("key-file", info.KeyFile),
+				zap.Error(err),
+			)
+		}
+		return cert, err
+	}
+	cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (cert *tls.Certificate, err error) {
+		certfile, keyfile := info.CertFile, info.KeyFile
+		if info.ClientCertFile != "" {
+			certfile, keyfile = info.ClientCertFile, info.ClientKeyFile
+		}
+		cert, err = tlsutil.NewCert(certfile, keyfile, info.parseFunc)
+		if os.IsNotExist(err) {
+			info.Logger.Warn(
+				"failed to find client cert files",
+				zap.String("cert-file", certfile),
+				zap.String("key-file", keyfile),
+				zap.Error(err),
+			)
+		} else if err != nil {
+			info.Logger.Warn(
+				"failed to create client certificate",
+				zap.String("cert-file", certfile),
+				zap.String("key-file", keyfile),
+				zap.Error(err),
+			)
+		}
+		return cert, err
+	}
+	return cfg, nil
+}
+
+// cafiles returns a list of CA file paths.
+func (info TLSInfo) cafiles() []string {
+	cs := make([]string, 0)
+	if info.TrustedCAFile != "" {
+		cs = append(cs, info.TrustedCAFile)
+	}
+	return cs
+}
+
+// ServerConfig generates a tls.Config object for use by an HTTP server.
+func (info TLSInfo) ServerConfig() (*tls.Config, error) {
+	cfg, err := info.baseConfig()
+	if err != nil {
+		return nil, err
+	}
+
+	if info.Logger == nil {
+		info.Logger = zap.NewNop()
+	}
+
+	cfg.ClientAuth = tls.NoClientCert
+	if info.TrustedCAFile != "" || info.ClientCertAuth {
+		cfg.ClientAuth = tls.RequireAndVerifyClientCert
+	}
+
+	cs := info.cafiles()
+	if len(cs) > 0 {
+		info.Logger.Info("Loading cert pool", zap.Strings("cs", cs),
+			zap.Any("tlsinfo", info))
+		cp, err := tlsutil.NewCertPool(cs)
+		if err != nil {
+			return nil, err
+		}
+		cfg.ClientCAs = cp
+	}
+
+	// "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server
+	cfg.NextProtos = []string{"h2"}
+
+	return cfg, nil
+}
+
+// ClientConfig generates a tls.Config object for use by an HTTP client.
+func (info TLSInfo) ClientConfig() (*tls.Config, error) {
+	var cfg *tls.Config
+	var err error
+
+	if !info.Empty() {
+		cfg, err = info.baseConfig()
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		cfg = &tls.Config{ServerName: info.ServerName}
+	}
+	cfg.InsecureSkipVerify = info.InsecureSkipVerify
+
+	cs := info.cafiles()
+	if len(cs) > 0 {
+		cfg.RootCAs, err = tlsutil.NewCertPool(cs)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if info.selfCert {
+		cfg.InsecureSkipVerify = true
+	}
+
+	if info.EmptyCN {
+		hasNonEmptyCN := false
+		cn := ""
+		_, err := tlsutil.NewCert(info.CertFile, info.KeyFile, func(certPEMBlock []byte, keyPEMBlock []byte) (tls.Certificate, error) {
+			var block *pem.Block
+			block, _ = pem.Decode(certPEMBlock)
+			cert, err := x509.ParseCertificate(block.Bytes)
+			if err != nil {
+				return tls.Certificate{}, err
+			}
+			if len(cert.Subject.CommonName) != 0 {
+				hasNonEmptyCN = true
+				cn = cert.Subject.CommonName
+			}
+			return tls.X509KeyPair(certPEMBlock, keyPEMBlock)
+		})
+		if err != nil {
+			return nil, err
+		}
+		if hasNonEmptyCN {
+			return nil, fmt.Errorf("cert has non empty Common Name (%s): %s", cn, info.CertFile)
+		}
+	}
+
+	return cfg, nil
+}
+
+// IsClosedConnError returns true if the error is from closing listener, cmux.
+// copied from golang.org/x/net/http2/http2.go
+func IsClosedConnError(err error) bool {
+	// 'use of closed network connection' (Go <=1.8)
+	// 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing)
+	// 'mux: listener closed' (cmux.ErrListenerClosed)
+	return err != nil && strings.Contains(err.Error(), "closed")
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_opts.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_opts.go
new file mode 100644
index 0000000..7536f6a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_opts.go
@@ -0,0 +1,90 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"net"
+	"time"
+)
+
+type ListenerOptions struct {
+	Listener     net.Listener
+	ListenConfig net.ListenConfig
+
+	socketOpts       *SocketOpts
+	tlsInfo          *TLSInfo
+	skipTLSInfoCheck bool
+	writeTimeout     time.Duration
+	readTimeout      time.Duration
+}
+
+func newListenOpts(opts ...ListenerOption) *ListenerOptions {
+	lnOpts := &ListenerOptions{}
+	lnOpts.applyOpts(opts)
+	return lnOpts
+}
+
+func (lo *ListenerOptions) applyOpts(opts []ListenerOption) {
+	for _, opt := range opts {
+		opt(lo)
+	}
+}
+
+// IsTimeout returns true if the listener has a read/write timeout defined.
+func (lo *ListenerOptions) IsTimeout() bool { return lo.readTimeout != 0 || lo.writeTimeout != 0 }
+
+// IsSocketOpts returns true if the listener options includes socket options.
+func (lo *ListenerOptions) IsSocketOpts() bool {
+	if lo.socketOpts == nil {
+		return false
+	}
+	return lo.socketOpts.ReusePort || lo.socketOpts.ReuseAddress
+}
+
+// IsTLS returns true if listner options includes TLSInfo.
+func (lo *ListenerOptions) IsTLS() bool {
+	if lo.tlsInfo == nil {
+		return false
+	}
+	return !lo.tlsInfo.Empty()
+}
+
+// ListenerOption are options which can be applied to the listener.
+type ListenerOption func(*ListenerOptions)
+
+// WithTimeout allows for a read or write timeout to be applied to the listener.
+func WithTimeout(read, write time.Duration) ListenerOption {
+	return func(lo *ListenerOptions) {
+		lo.writeTimeout = write
+		lo.readTimeout = read
+	}
+}
+
+// WithSocketOpts defines socket options that will be applied to the listener.
+func WithSocketOpts(s *SocketOpts) ListenerOption {
+	return func(lo *ListenerOptions) { lo.socketOpts = s }
+}
+
+// WithTLSInfo adds TLS credentials to the listener.
+func WithTLSInfo(t *TLSInfo) ListenerOption {
+	return func(lo *ListenerOptions) { lo.tlsInfo = t }
+}
+
+// WithSkipTLSInfoCheck when true a transport can be created with an https scheme
+// without passing TLSInfo, circumventing not presented error. Skipping this check
+// also requires that TLSInfo is not passed.
+func WithSkipTLSInfoCheck(skip bool) ListenerOption {
+	return func(lo *ListenerOptions) { lo.skipTLSInfoCheck = skip }
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_tls.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_tls.go
new file mode 100644
index 0000000..2c94841
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_tls.go
@@ -0,0 +1,273 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"context"
+	"crypto/tls"
+	"crypto/x509"
+	"fmt"
+	"net"
+	"os"
+	"strings"
+	"sync"
+)
+
+// tlsListener overrides a TLS listener so it will reject client
+// certificates with insufficient SAN credentials or CRL revoked
+// certificates.
+type tlsListener struct {
+	net.Listener
+	connc            chan net.Conn
+	donec            chan struct{}
+	err              error
+	handshakeFailure func(*tls.Conn, error)
+	check            tlsCheckFunc
+}
+
+type tlsCheckFunc func(context.Context, *tls.Conn) error
+
+// NewTLSListener handshakes TLS connections and performs optional CRL checking.
+func NewTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) {
+	check := func(context.Context, *tls.Conn) error { return nil }
+	return newTLSListener(l, tlsinfo, check)
+}
+
+func newTLSListener(l net.Listener, tlsinfo *TLSInfo, check tlsCheckFunc) (net.Listener, error) {
+	if tlsinfo == nil || tlsinfo.Empty() {
+		l.Close()
+		return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", l.Addr().String())
+	}
+	tlscfg, err := tlsinfo.ServerConfig()
+	if err != nil {
+		return nil, err
+	}
+
+	hf := tlsinfo.HandshakeFailure
+	if hf == nil {
+		hf = func(*tls.Conn, error) {}
+	}
+
+	if len(tlsinfo.CRLFile) > 0 {
+		prevCheck := check
+		check = func(ctx context.Context, tlsConn *tls.Conn) error {
+			if err := prevCheck(ctx, tlsConn); err != nil {
+				return err
+			}
+			st := tlsConn.ConnectionState()
+			if certs := st.PeerCertificates; len(certs) > 0 {
+				return checkCRL(tlsinfo.CRLFile, certs)
+			}
+			return nil
+		}
+	}
+
+	tlsl := &tlsListener{
+		Listener:         tls.NewListener(l, tlscfg),
+		connc:            make(chan net.Conn),
+		donec:            make(chan struct{}),
+		handshakeFailure: hf,
+		check:            check,
+	}
+	go tlsl.acceptLoop()
+	return tlsl, nil
+}
+
+func (l *tlsListener) Accept() (net.Conn, error) {
+	select {
+	case conn := <-l.connc:
+		return conn, nil
+	case <-l.donec:
+		return nil, l.err
+	}
+}
+
+func checkSAN(ctx context.Context, tlsConn *tls.Conn) error {
+	st := tlsConn.ConnectionState()
+	if certs := st.PeerCertificates; len(certs) > 0 {
+		addr := tlsConn.RemoteAddr().String()
+		return checkCertSAN(ctx, certs[0], addr)
+	}
+	return nil
+}
+
+// acceptLoop launches each TLS handshake in a separate goroutine
+// to prevent a hanging TLS connection from blocking other connections.
+func (l *tlsListener) acceptLoop() {
+	var wg sync.WaitGroup
+	var pendingMu sync.Mutex
+
+	pending := make(map[net.Conn]struct{})
+	ctx, cancel := context.WithCancel(context.Background())
+	defer func() {
+		cancel()
+		pendingMu.Lock()
+		for c := range pending {
+			c.Close()
+		}
+		pendingMu.Unlock()
+		wg.Wait()
+		close(l.donec)
+	}()
+
+	for {
+		conn, err := l.Listener.Accept()
+		if err != nil {
+			l.err = err
+			return
+		}
+
+		pendingMu.Lock()
+		pending[conn] = struct{}{}
+		pendingMu.Unlock()
+
+		wg.Add(1)
+		go func() {
+			defer func() {
+				if conn != nil {
+					conn.Close()
+				}
+				wg.Done()
+			}()
+
+			tlsConn := conn.(*tls.Conn)
+			herr := tlsConn.Handshake()
+			pendingMu.Lock()
+			delete(pending, conn)
+			pendingMu.Unlock()
+
+			if herr != nil {
+				l.handshakeFailure(tlsConn, herr)
+				return
+			}
+			if err := l.check(ctx, tlsConn); err != nil {
+				l.handshakeFailure(tlsConn, err)
+				return
+			}
+
+			select {
+			case l.connc <- tlsConn:
+				conn = nil
+			case <-ctx.Done():
+			}
+		}()
+	}
+}
+
+func checkCRL(crlPath string, cert []*x509.Certificate) error {
+	// TODO: cache
+	crlBytes, err := os.ReadFile(crlPath)
+	if err != nil {
+		return err
+	}
+	certList, err := x509.ParseRevocationList(crlBytes)
+	if err != nil {
+		return err
+	}
+	revokedSerials := make(map[string]struct{})
+	for _, rc := range certList.RevokedCertificateEntries {
+		revokedSerials[string(rc.SerialNumber.Bytes())] = struct{}{}
+	}
+	for _, c := range cert {
+		serial := string(c.SerialNumber.Bytes())
+		if _, ok := revokedSerials[serial]; ok {
+			return fmt.Errorf("transport: certificate serial %x revoked", serial)
+		}
+	}
+	return nil
+}
+
+func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string) error {
+	if len(cert.IPAddresses) == 0 && len(cert.DNSNames) == 0 {
+		return nil
+	}
+	h, _, herr := net.SplitHostPort(remoteAddr)
+	if herr != nil {
+		return herr
+	}
+	if len(cert.IPAddresses) > 0 {
+		cerr := cert.VerifyHostname(h)
+		if cerr == nil {
+			return nil
+		}
+		if len(cert.DNSNames) == 0 {
+			return cerr
+		}
+	}
+	if len(cert.DNSNames) > 0 {
+		ok, err := isHostInDNS(ctx, h, cert.DNSNames)
+		if ok {
+			return nil
+		}
+		errStr := ""
+		if err != nil {
+			errStr = " (" + err.Error() + ")"
+		}
+		return fmt.Errorf("tls: %q does not match any of DNSNames %q"+errStr, h, cert.DNSNames)
+	}
+	return nil
+}
+
+func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) {
+	// reverse lookup
+	var names []string
+	var wildcards []string
+	for _, dns := range dnsNames {
+		if strings.HasPrefix(dns, "*.") {
+			wildcards = append(wildcards, dns[1:])
+		} else {
+			names = append(names, dns)
+		}
+	}
+	lnames, lerr := net.DefaultResolver.LookupAddr(ctx, host)
+	for _, name := range lnames {
+		// strip trailing '.' from PTR record
+		if name[len(name)-1] == '.' {
+			name = name[:len(name)-1]
+		}
+		for _, wc := range wildcards {
+			if strings.HasSuffix(name, wc) {
+				return true, nil
+			}
+		}
+		for _, n := range names {
+			if n == name {
+				return true, nil
+			}
+		}
+	}
+	err = lerr
+
+	// forward lookup
+	for _, dns := range names {
+		addrs, lerr := net.DefaultResolver.LookupHost(ctx, dns)
+		if lerr != nil {
+			err = lerr
+			continue
+		}
+		for _, addr := range addrs {
+			if addr == host {
+				return true, nil
+			}
+		}
+	}
+	return false, err
+}
+
+func (l *tlsListener) Close() error {
+	err := l.Listener.Close()
+	<-l.donec
+	return err
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go
new file mode 100644
index 0000000..49b48dc
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go
@@ -0,0 +1,59 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"syscall"
+)
+
+type Controls []func(network, addr string, conn syscall.RawConn) error
+
+func (ctls Controls) Control(network, addr string, conn syscall.RawConn) error {
+	for _, s := range ctls {
+		if err := s(network, addr, conn); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+type SocketOpts struct {
+	// ReusePort enables socket option SO_REUSEPORT [1] which allows rebind of
+	// a port already in use. User should keep in mind that flock can fail
+	// in which case lock on data file could result in unexpected
+	// condition. User should take caution to protect against lock race.
+	// [1] https://man7.org/linux/man-pages/man7/socket.7.html
+	ReusePort bool `json:"reuse-port"`
+	// ReuseAddress enables a socket option SO_REUSEADDR which allows
+	// binding to an address in `TIME_WAIT` state. Useful to improve MTTR
+	// in cases where etcd slow to restart due to excessive `TIME_WAIT`.
+	// [1] https://man7.org/linux/man-pages/man7/socket.7.html
+	ReuseAddress bool `json:"reuse-address"`
+}
+
+func getControls(sopts *SocketOpts) Controls {
+	ctls := Controls{}
+	if sopts.ReuseAddress {
+		ctls = append(ctls, setReuseAddress)
+	}
+	if sopts.ReusePort {
+		ctls = append(ctls, setReusePort)
+	}
+	return ctls
+}
+
+func (sopts *SocketOpts) Empty() bool {
+	return !sopts.ReuseAddress && !sopts.ReusePort
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go
new file mode 100644
index 0000000..149ad51
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go
@@ -0,0 +1,34 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build solaris
+
+package transport
+
+import (
+	"errors"
+	"syscall"
+
+	"golang.org/x/sys/unix"
+)
+
+func setReusePort(network, address string, c syscall.RawConn) error {
+	return errors.New("port reuse is not supported on Solaris")
+}
+
+func setReuseAddress(network, address string, conn syscall.RawConn) error {
+	return conn.Control(func(fd uintptr) {
+		syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEADDR, 1)
+	})
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go
new file mode 100644
index 0000000..385eadb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go
@@ -0,0 +1,35 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows && !solaris && !wasm && !js
+
+package transport
+
+import (
+	"syscall"
+
+	"golang.org/x/sys/unix"
+)
+
+func setReusePort(network, address string, conn syscall.RawConn) error {
+	return conn.Control(func(fd uintptr) {
+		syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEPORT, 1)
+	})
+}
+
+func setReuseAddress(network, address string, conn syscall.RawConn) error {
+	return conn.Control(func(fd uintptr) {
+		syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEADDR, 1)
+	})
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_wasm.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_wasm.go
new file mode 100644
index 0000000..c6590b1
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_wasm.go
@@ -0,0 +1,30 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build wasm || js
+
+package transport
+
+import (
+	"errors"
+	"syscall"
+)
+
+func setReusePort(network, address string, c syscall.RawConn) error {
+	return errors.New("port reuse is not supported on WASM")
+}
+
+func setReuseAddress(network, addr string, conn syscall.RawConn) error {
+	return errors.New("address reuse is not supported on WASM")
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_windows.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_windows.go
new file mode 100644
index 0000000..2670b4d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_windows.go
@@ -0,0 +1,32 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package transport
+
+import (
+	"errors"
+	"syscall"
+)
+
+func setReusePort(network, address string, c syscall.RawConn) error {
+	return errors.New("port reuse is not supported on Windows")
+}
+
+// Windows supports SO_REUSEADDR, but it may cause undefined behavior, as
+// there is no protection against port hijacking.
+func setReuseAddress(network, addr string, conn syscall.RawConn) error {
+	return errors.New("address reuse is not supported on Windows")
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_conn.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_conn.go
new file mode 100644
index 0000000..80e3293
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_conn.go
@@ -0,0 +1,44 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"net"
+	"time"
+)
+
+type timeoutConn struct {
+	net.Conn
+	writeTimeout time.Duration
+	readTimeout  time.Duration
+}
+
+func (c timeoutConn) Write(b []byte) (n int, err error) {
+	if c.writeTimeout > 0 {
+		if err := c.SetWriteDeadline(time.Now().Add(c.writeTimeout)); err != nil {
+			return 0, err
+		}
+	}
+	return c.Conn.Write(b)
+}
+
+func (c timeoutConn) Read(b []byte) (n int, err error) {
+	if c.readTimeout > 0 {
+		if err := c.SetReadDeadline(time.Now().Add(c.readTimeout)); err != nil {
+			return 0, err
+		}
+	}
+	return c.Conn.Read(b)
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_dialer.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_dialer.go
new file mode 100644
index 0000000..9c0245d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_dialer.go
@@ -0,0 +1,36 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"net"
+	"time"
+)
+
+type rwTimeoutDialer struct {
+	wtimeoutd  time.Duration
+	rdtimeoutd time.Duration
+	net.Dialer
+}
+
+func (d *rwTimeoutDialer) Dial(network, address string) (net.Conn, error) {
+	conn, err := d.Dialer.Dial(network, address)
+	tconn := &timeoutConn{
+		readTimeout:  d.rdtimeoutd,
+		writeTimeout: d.wtimeoutd,
+		Conn:         conn,
+	}
+	return tconn, err
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_listener.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_listener.go
new file mode 100644
index 0000000..5d74bd7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_listener.go
@@ -0,0 +1,45 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"net"
+	"time"
+)
+
+// NewTimeoutListener returns a listener that listens on the given address.
+// If read/write on the accepted connection blocks longer than its time limit,
+// it will return timeout error.
+func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, readTimeout, writeTimeout time.Duration) (net.Listener, error) {
+	return newListener(addr, scheme, WithTimeout(readTimeout, writeTimeout), WithTLSInfo(tlsinfo))
+}
+
+type rwTimeoutListener struct {
+	net.Listener
+	writeTimeout time.Duration
+	readTimeout  time.Duration
+}
+
+func (rwln *rwTimeoutListener) Accept() (net.Conn, error) {
+	c, err := rwln.Listener.Accept()
+	if err != nil {
+		return nil, err
+	}
+	return timeoutConn{
+		Conn:         c,
+		writeTimeout: rwln.writeTimeout,
+		readTimeout:  rwln.readTimeout,
+	}, nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_transport.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_transport.go
new file mode 100644
index 0000000..ea16b4c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_transport.go
@@ -0,0 +1,51 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"net"
+	"net/http"
+	"time"
+)
+
+// NewTimeoutTransport returns a transport created using the given TLS info.
+// If read/write on the created connection blocks longer than its time limit,
+// it will return timeout error.
+// If read/write timeout is set, transport will not be able to reuse connection.
+func NewTimeoutTransport(info TLSInfo, dialtimeoutd, rdtimeoutd, wtimeoutd time.Duration) (*http.Transport, error) {
+	tr, err := NewTransport(info, dialtimeoutd)
+	if err != nil {
+		return nil, err
+	}
+
+	if rdtimeoutd != 0 || wtimeoutd != 0 {
+		// the timed out connection will timeout soon after it is idle.
+		// it should not be put back to http transport as an idle connection for future usage.
+		tr.MaxIdleConnsPerHost = -1
+	} else {
+		// allow more idle connections between peers to avoid unnecessary port allocation.
+		tr.MaxIdleConnsPerHost = 1024
+	}
+
+	tr.Dial = (&rwTimeoutDialer{
+		Dialer: net.Dialer{
+			Timeout:   dialtimeoutd,
+			KeepAlive: 30 * time.Second,
+		},
+		rdtimeoutd: rdtimeoutd,
+		wtimeoutd:  wtimeoutd,
+	}).Dial
+	return tr, nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/tls.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/tls.go
new file mode 100644
index 0000000..d537586
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/tls.go
@@ -0,0 +1,53 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"strings"
+	"time"
+)
+
+// ValidateSecureEndpoints scans the given endpoints against tls info, returning only those
+// endpoints that could be validated as secure.
+func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) {
+	t, err := NewTransport(tlsInfo, 5*time.Second)
+	if err != nil {
+		return nil, err
+	}
+	defer t.CloseIdleConnections()
+
+	var errs []string
+	var endpoints []string
+	for _, ep := range eps {
+		if !strings.HasPrefix(ep, "https://") {
+			errs = append(errs, fmt.Sprintf("%q is insecure", ep))
+			continue
+		}
+		conn, cerr := t.DialContext(context.Background(), "tcp", ep[len("https://"):])
+		if cerr != nil {
+			errs = append(errs, fmt.Sprintf("%q failed to dial (%v)", ep, cerr))
+			continue
+		}
+		conn.Close()
+		endpoints = append(endpoints, ep)
+	}
+	if len(errs) != 0 {
+		err = errors.New(strings.Join(errs, ","))
+	}
+	return endpoints, err
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/transport.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/transport.go
new file mode 100644
index 0000000..67170d7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/transport.go
@@ -0,0 +1,86 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"context"
+	"net"
+	"net/http"
+	"strings"
+	"time"
+)
+
+type unixTransport struct{ *http.Transport }
+
+func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, error) {
+	cfg, err := info.ClientConfig()
+	if err != nil {
+		return nil, err
+	}
+
+	var ipAddr net.Addr
+	if info.LocalAddr != "" {
+		ipAddr, err = net.ResolveTCPAddr("tcp", info.LocalAddr+":0")
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	t := &http.Transport{
+		Proxy: http.ProxyFromEnvironment,
+		DialContext: (&net.Dialer{
+			Timeout:   dialtimeoutd,
+			LocalAddr: ipAddr,
+			// value taken from http.DefaultTransport
+			KeepAlive: 30 * time.Second,
+		}).DialContext,
+		// value taken from http.DefaultTransport
+		TLSHandshakeTimeout: 10 * time.Second,
+		TLSClientConfig:     cfg,
+	}
+
+	dialer := &net.Dialer{
+		Timeout:   dialtimeoutd,
+		KeepAlive: 30 * time.Second,
+	}
+
+	dialContext := func(ctx context.Context, net, addr string) (net.Conn, error) {
+		return dialer.DialContext(ctx, "unix", addr)
+	}
+	tu := &http.Transport{
+		Proxy:               http.ProxyFromEnvironment,
+		DialContext:         dialContext,
+		TLSHandshakeTimeout: 10 * time.Second,
+		TLSClientConfig:     cfg,
+		// Cost of reopening connection on sockets is low, and they are mostly used in testing.
+		// Long living unix-transport connections were leading to 'leak' test flakes.
+		// Alternatively the returned Transport (t) should override CloseIdleConnections to
+		// forward it to 'tu' as well.
+		IdleConnTimeout: time.Microsecond,
+	}
+	ut := &unixTransport{tu}
+
+	t.RegisterProtocol("unix", ut)
+	t.RegisterProtocol("unixs", ut)
+
+	return t, nil
+}
+
+func (urt *unixTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+	url := *req.URL
+	req.URL = &url
+	req.URL.Scheme = strings.Replace(req.URL.Scheme, "unix", "http", 1)
+	return urt.Transport.RoundTrip(req)
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/unix_listener.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/unix_listener.go
new file mode 100644
index 0000000..123e203
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/unix_listener.go
@@ -0,0 +1,40 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"net"
+	"os"
+)
+
+type unixListener struct{ net.Listener }
+
+func NewUnixListener(addr string) (net.Listener, error) {
+	if err := os.Remove(addr); err != nil && !os.IsNotExist(err) {
+		return nil, err
+	}
+	l, err := net.Listen("unix", addr)
+	if err != nil {
+		return nil, err
+	}
+	return &unixListener{l}, nil
+}
+
+func (ul *unixListener) Close() error {
+	if err := os.Remove(ul.Addr().String()); err != nil && !os.IsNotExist(err) {
+		return err
+	}
+	return ul.Listener.Close()
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/doc.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/types/doc.go
rename to vendor/go.etcd.io/etcd/client/pkg/v3/types/doc.go
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/types/id.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/id.go
new file mode 100644
index 0000000..7a09647
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/types/id.go
@@ -0,0 +1,56 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+	"strconv"
+	"strings"
+)
+
+// ID represents a generic identifier which is canonically
+// stored as a uint64 but is typically represented as a
+// base-16 string for input/output
+type ID uint64
+
+func (i ID) String() string {
+	return strconv.FormatUint(uint64(i), 16)
+}
+
+// IDFromString attempts to create an ID from a base-16 string.
+func IDFromString(s string) (ID, error) {
+	i, err := strconv.ParseUint(s, 16, 64)
+	return ID(i), err
+}
+
+// IDSlice implements the sort interface
+type IDSlice []ID
+
+func (p IDSlice) Len() int           { return len(p) }
+func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) }
+func (p IDSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func (p IDSlice) String() string {
+	var b strings.Builder
+	if p.Len() > 0 {
+		b.WriteString(p[0].String())
+	}
+
+	for i := 1; i < p.Len(); i++ {
+		b.WriteString(",")
+		b.WriteString(p[i].String())
+	}
+
+	return b.String()
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/types/set.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/set.go
new file mode 100644
index 0000000..3e69c8d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/types/set.go
@@ -0,0 +1,195 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+	"reflect"
+	"sort"
+	"sync"
+)
+
+type Set interface {
+	Add(string)
+	Remove(string)
+	Contains(string) bool
+	Equals(Set) bool
+	Length() int
+	Values() []string
+	Copy() Set
+	Sub(Set) Set
+}
+
+func NewUnsafeSet(values ...string) *unsafeSet {
+	set := &unsafeSet{make(map[string]struct{})}
+	for _, v := range values {
+		set.Add(v)
+	}
+	return set
+}
+
+func NewThreadsafeSet(values ...string) *tsafeSet {
+	us := NewUnsafeSet(values...)
+	return &tsafeSet{us, sync.RWMutex{}}
+}
+
+type unsafeSet struct {
+	d map[string]struct{}
+}
+
+// Add adds a new value to the set (no-op if the value is already present)
+func (us *unsafeSet) Add(value string) {
+	us.d[value] = struct{}{}
+}
+
+// Remove removes the given value from the set
+func (us *unsafeSet) Remove(value string) {
+	delete(us.d, value)
+}
+
+// Contains returns whether the set contains the given value
+func (us *unsafeSet) Contains(value string) (exists bool) {
+	_, exists = us.d[value]
+	return exists
+}
+
+// ContainsAll returns whether the set contains all given values
+func (us *unsafeSet) ContainsAll(values []string) bool {
+	for _, s := range values {
+		if !us.Contains(s) {
+			return false
+		}
+	}
+	return true
+}
+
+// Equals returns whether the contents of two sets are identical
+func (us *unsafeSet) Equals(other Set) bool {
+	v1 := sort.StringSlice(us.Values())
+	v2 := sort.StringSlice(other.Values())
+	v1.Sort()
+	v2.Sort()
+	return reflect.DeepEqual(v1, v2)
+}
+
+// Length returns the number of elements in the set
+func (us *unsafeSet) Length() int {
+	return len(us.d)
+}
+
+// Values returns the values of the Set in an unspecified order.
+func (us *unsafeSet) Values() (values []string) {
+	values = make([]string, 0, len(us.d))
+	for val := range us.d {
+		values = append(values, val)
+	}
+	return values
+}
+
+// Copy creates a new Set containing the values of the first
+func (us *unsafeSet) Copy() Set {
+	cp := NewUnsafeSet()
+	for val := range us.d {
+		cp.Add(val)
+	}
+
+	return cp
+}
+
+// Sub removes all elements in other from the set
+func (us *unsafeSet) Sub(other Set) Set {
+	oValues := other.Values()
+	result := us.Copy().(*unsafeSet)
+
+	for _, val := range oValues {
+		if _, ok := result.d[val]; !ok {
+			continue
+		}
+		delete(result.d, val)
+	}
+
+	return result
+}
+
+type tsafeSet struct {
+	us *unsafeSet
+	m  sync.RWMutex
+}
+
+func (ts *tsafeSet) Add(value string) {
+	ts.m.Lock()
+	defer ts.m.Unlock()
+	ts.us.Add(value)
+}
+
+func (ts *tsafeSet) Remove(value string) {
+	ts.m.Lock()
+	defer ts.m.Unlock()
+	ts.us.Remove(value)
+}
+
+func (ts *tsafeSet) Contains(value string) (exists bool) {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	return ts.us.Contains(value)
+}
+
+func (ts *tsafeSet) Equals(other Set) bool {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+
+	// If ts and other represent the same variable, avoid calling
+	// ts.us.Equals(other), to avoid double RLock bug
+	if _other, ok := other.(*tsafeSet); ok {
+		if _other == ts {
+			return true
+		}
+	}
+	return ts.us.Equals(other)
+}
+
+func (ts *tsafeSet) Length() int {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	return ts.us.Length()
+}
+
+func (ts *tsafeSet) Values() (values []string) {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	return ts.us.Values()
+}
+
+func (ts *tsafeSet) Copy() Set {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	usResult := ts.us.Copy().(*unsafeSet)
+	return &tsafeSet{usResult, sync.RWMutex{}}
+}
+
+func (ts *tsafeSet) Sub(other Set) Set {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+
+	// If ts and other represent the same variable, avoid calling
+	// ts.us.Sub(other), to avoid double RLock bug
+	if _other, ok := other.(*tsafeSet); ok {
+		if _other == ts {
+			usResult := NewUnsafeSet()
+			return &tsafeSet{usResult, sync.RWMutex{}}
+		}
+	}
+	usResult := ts.us.Sub(other).(*unsafeSet)
+	return &tsafeSet{usResult, sync.RWMutex{}}
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/slice.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/slice.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/types/slice.go
rename to vendor/go.etcd.io/etcd/client/pkg/v3/types/slice.go
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/types/urls.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/urls.go
new file mode 100644
index 0000000..49a3896
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/types/urls.go
@@ -0,0 +1,87 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"net/url"
+	"sort"
+	"strings"
+)
+
+type URLs []url.URL
+
+func NewURLs(strs []string) (URLs, error) {
+	all := make([]url.URL, len(strs))
+	if len(all) == 0 {
+		return nil, errors.New("no valid URLs given")
+	}
+	for i, in := range strs {
+		in = strings.TrimSpace(in)
+		u, err := url.Parse(in)
+		if err != nil {
+			return nil, err
+		}
+
+		switch u.Scheme {
+		case "http", "https":
+			if _, _, err := net.SplitHostPort(u.Host); err != nil {
+				return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
+			}
+
+			if u.Path != "" {
+				return nil, fmt.Errorf("URL must not contain a path: %s", in)
+			}
+		case "unix", "unixs":
+			break
+		default:
+			return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in)
+		}
+		all[i] = *u
+	}
+	us := URLs(all)
+	us.Sort()
+	return us, nil
+}
+
+func MustNewURLs(strs []string) URLs {
+	urls, err := NewURLs(strs)
+	if err != nil {
+		panic(err)
+	}
+	return urls
+}
+
+func (us URLs) String() string {
+	return strings.Join(us.StringSlice(), ",")
+}
+
+func (us *URLs) Sort() {
+	sort.Sort(us)
+}
+func (us URLs) Len() int           { return len(us) }
+func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() }
+func (us URLs) Swap(i, j int)      { us[i], us[j] = us[j], us[i] }
+
+func (us URLs) StringSlice() []string {
+	out := make([]string, len(us))
+	for i := range us {
+		out[i] = us[i].String()
+	}
+
+	return out
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/urlsmap.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/types/urlsmap.go
rename to vendor/go.etcd.io/etcd/client/pkg/v3/types/urlsmap.go
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/verify/verify.go b/vendor/go.etcd.io/etcd/client/pkg/v3/verify/verify.go
new file mode 100644
index 0000000..a7b2097
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/verify/verify.go
@@ -0,0 +1,80 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package verify
+
+import (
+	"fmt"
+	"os"
+	"strings"
+)
+
+const envVerify = "ETCD_VERIFY"
+
+type VerificationType string
+
+const (
+	envVerifyValueAll    VerificationType = "all"
+	envVerifyValueAssert VerificationType = "assert"
+)
+
+func getEnvVerify() string {
+	return strings.ToLower(os.Getenv(envVerify))
+}
+
+func IsVerificationEnabled(verification VerificationType) bool {
+	env := getEnvVerify()
+	return env == string(envVerifyValueAll) || env == strings.ToLower(string(verification))
+}
+
+// EnableVerifications sets `envVerify` and returns a function that
+// can be used to bring the original settings.
+func EnableVerifications(verification VerificationType) func() {
+	previousEnv := getEnvVerify()
+	os.Setenv(envVerify, string(verification))
+	return func() {
+		os.Setenv(envVerify, previousEnv)
+	}
+}
+
+// EnableAllVerifications enables verification and returns a function
+// that can be used to bring the original settings.
+func EnableAllVerifications() func() {
+	return EnableVerifications(envVerifyValueAll)
+}
+
+// DisableVerifications unsets `envVerify` and returns a function that
+// can be used to bring the original settings.
+func DisableVerifications() func() {
+	previousEnv := getEnvVerify()
+	os.Unsetenv(envVerify)
+	return func() {
+		os.Setenv(envVerify, previousEnv)
+	}
+}
+
+// Verify performs verification if the assertions are enabled.
+// In the default setup running in tests and skipped in the production code.
+func Verify(f func()) {
+	if IsVerificationEnabled(envVerifyValueAssert) {
+		f()
+	}
+}
+
+// Assert will panic with a given formatted message if the given condition is false.
+func Assert(condition bool, msg string, v ...any) {
+	if !condition {
+		panic(fmt.Sprintf("assertion failed: "+msg, v...))
+	}
+}
diff --git a/vendor/go.etcd.io/etcd/LICENSE b/vendor/go.etcd.io/etcd/client/v3/LICENSE
similarity index 100%
copy from vendor/go.etcd.io/etcd/LICENSE
copy to vendor/go.etcd.io/etcd/client/v3/LICENSE
diff --git a/vendor/go.etcd.io/etcd/client/v3/OWNERS b/vendor/go.etcd.io/etcd/client/v3/OWNERS
new file mode 100644
index 0000000..2b7f28b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+labels:
+  - area/clientv3
diff --git a/vendor/go.etcd.io/etcd/client/v3/README.md b/vendor/go.etcd.io/etcd/client/v3/README.md
new file mode 100644
index 0000000..af0087e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/README.md
@@ -0,0 +1,89 @@
+# etcd/client/v3
+
+[![Docs](https://img.shields.io/badge/docs-latest-green.svg)](https://etcd.io/docs)
+[![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/go.etcd.io/etcd/client/v3)
+
+`etcd/clientv3` is the official Go etcd client for v3.
+
+## Install
+
+```bash
+go get go.etcd.io/etcd/client/v3
+```
+
+## Get started
+
+Create client using `clientv3.New`:
+
+```go
+import clientv3 "go.etcd.io/etcd/client/v3"
+
+func main() {
+	cli, err := clientv3.New(clientv3.Config{
+		Endpoints:   []string{"localhost:2379", "localhost:22379", "localhost:32379"},
+		DialTimeout: 5 * time.Second,
+	})
+	if err != nil {
+		// handle error!
+	}
+	defer cli.Close()
+}
+```
+
+etcd v3 uses [`gRPC`](https://www.grpc.io) for remote procedure calls. And `clientv3` uses
+[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it.
+If the client is not closed, the connection will have leaky goroutines. To specify client request timeout,
+pass `context.WithTimeout` to APIs:
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), timeout)
+resp, err := cli.Put(ctx, "sample_key", "sample_value")
+cancel()
+if err != nil {
+    // handle error!
+}
+// use the response
+```
+
+For full compatibility, it is recommended to install released versions of clients using go modules.
+
+## Error Handling
+
+etcd client returns 2 types of errors:
+
+1. context error: canceled or deadline exceeded.
+2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/go.etcd.io/etcd/api/v3rpc/rpctypes).
+
+Here is the example code to handle client errors:
+
+```go
+resp, err := cli.Put(ctx, "", "")
+if err != nil {
+	switch err {
+	case context.Canceled:
+		log.Fatalf("ctx is canceled by another routine: %v", err)
+	case context.DeadlineExceeded:
+		log.Fatalf("ctx is attached with a deadline is exceeded: %v", err)
+	case rpctypes.ErrEmptyKey:
+		log.Fatalf("client-side error: %v", err)
+	default:
+		log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err)
+	}
+}
+```
+
+## Metrics
+
+The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/etcd-io/etcd/blob/main/tests/integration/clientv3/examples/example_metrics_test.go).
+
+## Namespacing
+
+The [namespace](https://godoc.org/go.etcd.io/etcd/client/v3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix.
+
+## Request size limit
+
+Client request size limit is configurable via `clientv3.Config.MaxCallSendMsgSize` and `MaxCallRecvMsgSize` in bytes. If none given, client request send limit defaults to 2 MiB including gRPC overhead bytes. And receive limit defaults to `math.MaxInt32`.
+
+## Examples
+
+More code [examples](https://github.com/etcd-io/etcd/tree/main/tests/integration/clientv3/examples) can be found at [GoDoc](https://pkg.go.dev/go.etcd.io/etcd/client/v3).
diff --git a/vendor/go.etcd.io/etcd/client/v3/auth.go b/vendor/go.etcd.io/etcd/client/v3/auth.go
new file mode 100644
index 0000000..382172b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/auth.go
@@ -0,0 +1,237 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"fmt"
+	"strings"
+
+	"google.golang.org/grpc"
+
+	"go.etcd.io/etcd/api/v3/authpb"
+	pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+type (
+	AuthEnableResponse               pb.AuthEnableResponse
+	AuthDisableResponse              pb.AuthDisableResponse
+	AuthStatusResponse               pb.AuthStatusResponse
+	AuthenticateResponse             pb.AuthenticateResponse
+	AuthUserAddResponse              pb.AuthUserAddResponse
+	AuthUserDeleteResponse           pb.AuthUserDeleteResponse
+	AuthUserChangePasswordResponse   pb.AuthUserChangePasswordResponse
+	AuthUserGrantRoleResponse        pb.AuthUserGrantRoleResponse
+	AuthUserGetResponse              pb.AuthUserGetResponse
+	AuthUserRevokeRoleResponse       pb.AuthUserRevokeRoleResponse
+	AuthRoleAddResponse              pb.AuthRoleAddResponse
+	AuthRoleGrantPermissionResponse  pb.AuthRoleGrantPermissionResponse
+	AuthRoleGetResponse              pb.AuthRoleGetResponse
+	AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse
+	AuthRoleDeleteResponse           pb.AuthRoleDeleteResponse
+	AuthUserListResponse             pb.AuthUserListResponse
+	AuthRoleListResponse             pb.AuthRoleListResponse
+
+	PermissionType authpb.Permission_Type
+	Permission     authpb.Permission
+)
+
+const (
+	PermRead      = authpb.READ
+	PermWrite     = authpb.WRITE
+	PermReadWrite = authpb.READWRITE
+)
+
+type UserAddOptions authpb.UserAddOptions
+
+type Auth interface {
+	// Authenticate login and get token
+	Authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error)
+
+	// AuthEnable enables auth of an etcd cluster.
+	AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
+
+	// AuthDisable disables auth of an etcd cluster.
+	AuthDisable(ctx context.Context) (*AuthDisableResponse, error)
+
+	// AuthStatus returns the status of auth of an etcd cluster.
+	AuthStatus(ctx context.Context) (*AuthStatusResponse, error)
+
+	// UserAdd adds a new user to an etcd cluster.
+	UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
+
+	// UserAddWithOptions adds a new user to an etcd cluster with some options.
+	UserAddWithOptions(ctx context.Context, name string, password string, opt *UserAddOptions) (*AuthUserAddResponse, error)
+
+	// UserDelete deletes a user from an etcd cluster.
+	UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error)
+
+	// UserChangePassword changes a password of a user.
+	UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
+
+	// UserGrantRole grants a role to a user.
+	UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error)
+
+	// UserGet gets a detailed information of a user.
+	UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error)
+
+	// UserList gets a list of all users.
+	UserList(ctx context.Context) (*AuthUserListResponse, error)
+
+	// UserRevokeRole revokes a role of a user.
+	UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error)
+
+	// RoleAdd adds a new role to an etcd cluster.
+	RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
+
+	// RoleGrantPermission grants a permission to a role.
+	RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error)
+
+	// RoleGet gets a detailed information of a role.
+	RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error)
+
+	// RoleList gets a list of all roles.
+	RoleList(ctx context.Context) (*AuthRoleListResponse, error)
+
+	// RoleRevokePermission revokes a permission from a role.
+	RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error)
+
+	// RoleDelete deletes a role.
+	RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
+}
+
+type authClient struct {
+	remote   pb.AuthClient
+	callOpts []grpc.CallOption
+}
+
+func NewAuth(c *Client) Auth {
+	api := &authClient{remote: RetryAuthClient(c)}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func NewAuthFromAuthClient(remote pb.AuthClient, c *Client) Auth {
+	api := &authClient{remote: remote}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func (auth *authClient) Authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
+	resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...)
+	return (*AuthenticateResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
+	resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...)
+	return (*AuthEnableResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
+	resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...)
+	return (*AuthDisableResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) AuthStatus(ctx context.Context) (*AuthStatusResponse, error) {
+	resp, err := auth.remote.AuthStatus(ctx, &pb.AuthStatusRequest{}, auth.callOpts...)
+	return (*AuthStatusResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
+	resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: &authpb.UserAddOptions{NoPassword: false}}, auth.callOpts...)
+	return (*AuthUserAddResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) UserAddWithOptions(ctx context.Context, name string, password string, options *UserAddOptions) (*AuthUserAddResponse, error) {
+	resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: (*authpb.UserAddOptions)(options)}, auth.callOpts...)
+	return (*AuthUserAddResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
+	resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...)
+	return (*AuthUserDeleteResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
+	resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...)
+	return (*AuthUserChangePasswordResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
+	resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...)
+	return (*AuthUserGrantRoleResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
+	resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...)
+	return (*AuthUserGetResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) {
+	resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...)
+	return (*AuthUserListResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
+	resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...)
+	return (*AuthUserRevokeRoleResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
+	resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...)
+	return (*AuthRoleAddResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
+	perm := &authpb.Permission{
+		Key:      []byte(key),
+		RangeEnd: []byte(rangeEnd),
+		PermType: authpb.Permission_Type(permType),
+	}
+	resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...)
+	return (*AuthRoleGrantPermissionResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
+	resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...)
+	return (*AuthRoleGetResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
+	resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...)
+	return (*AuthRoleListResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
+	resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: []byte(key), RangeEnd: []byte(rangeEnd)}, auth.callOpts...)
+	return (*AuthRoleRevokePermissionResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
+	resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...)
+	return (*AuthRoleDeleteResponse)(resp), ContextError(ctx, err)
+}
+
+func StrToPermissionType(s string) (PermissionType, error) {
+	val, ok := authpb.Permission_Type_value[strings.ToUpper(s)]
+	if ok {
+		return PermissionType(val), nil
+	}
+	return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s)
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/client.go b/vendor/go.etcd.io/etcd/client/v3/client.go
new file mode 100644
index 0000000..24f5988
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/client.go
@@ -0,0 +1,655 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/coreos/go-semver/semver"
+	"go.uber.org/zap"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	grpccredentials "google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/credentials/insecure"
+	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/status"
+
+	"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+	"go.etcd.io/etcd/api/v3/version"
+	"go.etcd.io/etcd/client/pkg/v3/logutil"
+	"go.etcd.io/etcd/client/pkg/v3/verify"
+	"go.etcd.io/etcd/client/v3/credentials"
+	"go.etcd.io/etcd/client/v3/internal/endpoint"
+	"go.etcd.io/etcd/client/v3/internal/resolver"
+)
+
+var (
+	ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
+	ErrOldCluster           = errors.New("etcdclient: old cluster version")
+)
+
+// Client provides and manages an etcd v3 client session.
+type Client struct {
+	Cluster
+	KV
+	Lease
+	Watcher
+	Auth
+	Maintenance
+
+	conn *grpc.ClientConn
+
+	cfg      Config
+	creds    grpccredentials.TransportCredentials
+	resolver *resolver.EtcdManualResolver
+
+	epMu      *sync.RWMutex
+	endpoints []string
+
+	ctx    context.Context
+	cancel context.CancelFunc
+
+	// Username is a user name for authentication.
+	Username string
+	// Password is a password for authentication.
+	Password        string
+	authTokenBundle credentials.PerRPCCredentialsBundle
+
+	callOpts []grpc.CallOption
+
+	lgMu *sync.RWMutex
+	lg   *zap.Logger
+}
+
+// New creates a new etcdv3 client from a given configuration.
+func New(cfg Config) (*Client, error) {
+	if len(cfg.Endpoints) == 0 {
+		return nil, ErrNoAvailableEndpoints
+	}
+
+	return newClient(&cfg)
+}
+
+// NewCtxClient creates a client with a context but no underlying grpc
+// connection. This is useful for embedded cases that override the
+// service interface implementations and do not need connection management.
+func NewCtxClient(ctx context.Context, opts ...Option) *Client {
+	cctx, cancel := context.WithCancel(ctx)
+	c := &Client{ctx: cctx, cancel: cancel, lgMu: new(sync.RWMutex), epMu: new(sync.RWMutex)}
+	for _, opt := range opts {
+		opt(c)
+	}
+	if c.lg == nil {
+		c.lg = zap.NewNop()
+	}
+	return c
+}
+
+// Option is a function type that can be passed as argument to NewCtxClient to configure client
+type Option func(*Client)
+
+// NewFromURL creates a new etcdv3 client from a URL.
+func NewFromURL(url string) (*Client, error) {
+	return New(Config{Endpoints: []string{url}})
+}
+
+// NewFromURLs creates a new etcdv3 client from URLs.
+func NewFromURLs(urls []string) (*Client, error) {
+	return New(Config{Endpoints: urls})
+}
+
+// WithZapLogger is a NewCtxClient option that overrides the logger
+func WithZapLogger(lg *zap.Logger) Option {
+	return func(c *Client) {
+		c.lg = lg
+	}
+}
+
+// WithLogger overrides the logger.
+//
+// Deprecated: Please use WithZapLogger or Logger field in clientv3.Config
+//
+// Does not changes grpcLogger, that can be explicitly configured
+// using grpc_zap.ReplaceGrpcLoggerV2(..) method.
+func (c *Client) WithLogger(lg *zap.Logger) *Client {
+	c.lgMu.Lock()
+	c.lg = lg
+	c.lgMu.Unlock()
+	return c
+}
+
+// GetLogger gets the logger.
+// NOTE: This method is for internal use of etcd-client library and should not be used as general-purpose logger.
+func (c *Client) GetLogger() *zap.Logger {
+	c.lgMu.RLock()
+	l := c.lg
+	c.lgMu.RUnlock()
+	return l
+}
+
+// Close shuts down the client's etcd connections.
+func (c *Client) Close() error {
+	c.cancel()
+	if c.Watcher != nil {
+		c.Watcher.Close()
+	}
+	if c.Lease != nil {
+		c.Lease.Close()
+	}
+	if c.conn != nil {
+		return ContextError(c.ctx, c.conn.Close())
+	}
+	return c.ctx.Err()
+}
+
+// Ctx is a context for "out of band" messages (e.g., for sending
+// "clean up" message when another context is canceled). It is
+// canceled on client Close().
+func (c *Client) Ctx() context.Context { return c.ctx }
+
+// Endpoints lists the registered endpoints for the client.
+func (c *Client) Endpoints() []string {
+	// copy the slice; protect original endpoints from being changed
+	c.epMu.RLock()
+	defer c.epMu.RUnlock()
+	eps := make([]string, len(c.endpoints))
+	copy(eps, c.endpoints)
+	return eps
+}
+
+// SetEndpoints updates client's endpoints.
+func (c *Client) SetEndpoints(eps ...string) {
+	c.epMu.Lock()
+	defer c.epMu.Unlock()
+	c.endpoints = eps
+
+	c.resolver.SetEndpoints(eps)
+}
+
+// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
+func (c *Client) Sync(ctx context.Context) error {
+	mresp, err := c.MemberList(ctx)
+	if err != nil {
+		return err
+	}
+	var eps []string
+	for _, m := range mresp.Members {
+		if len(m.Name) != 0 && !m.IsLearner {
+			eps = append(eps, m.ClientURLs...)
+		}
+	}
+	// The linearizable `MemberList` returned successfully, so the
+	// endpoints shouldn't be empty.
+	verify.Verify(func() {
+		if len(eps) == 0 {
+			panic("empty endpoints returned from etcd cluster")
+		}
+	})
+	c.SetEndpoints(eps...)
+	c.lg.Debug("set etcd endpoints by autoSync", zap.Strings("endpoints", eps))
+	return nil
+}
+
+func (c *Client) autoSync() {
+	if c.cfg.AutoSyncInterval == time.Duration(0) {
+		return
+	}
+
+	for {
+		select {
+		case <-c.ctx.Done():
+			return
+		case <-time.After(c.cfg.AutoSyncInterval):
+			ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
+			err := c.Sync(ctx)
+			cancel()
+			if err != nil && !errors.Is(err, c.ctx.Err()) {
+				c.lg.Info("Auto sync endpoints failed.", zap.Error(err))
+			}
+		}
+	}
+}
+
+// dialSetupOpts gives the dial opts prior to any authentication.
+func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) []grpc.DialOption {
+	var opts []grpc.DialOption
+
+	if c.cfg.DialKeepAliveTime > 0 {
+		params := keepalive.ClientParameters{
+			Time:                c.cfg.DialKeepAliveTime,
+			Timeout:             c.cfg.DialKeepAliveTimeout,
+			PermitWithoutStream: c.cfg.PermitWithoutStream,
+		}
+		opts = append(opts, grpc.WithKeepaliveParams(params))
+	}
+	opts = append(opts, dopts...)
+
+	if creds != nil {
+		opts = append(opts, grpc.WithTransportCredentials(creds))
+	} else {
+		opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
+	}
+
+	unaryMaxRetries := defaultUnaryMaxRetries
+	if c.cfg.MaxUnaryRetries > 0 {
+		unaryMaxRetries = c.cfg.MaxUnaryRetries
+	}
+
+	backoffWaitBetween := defaultBackoffWaitBetween
+	if c.cfg.BackoffWaitBetween > 0 {
+		backoffWaitBetween = c.cfg.BackoffWaitBetween
+	}
+
+	backoffJitterFraction := defaultBackoffJitterFraction
+	if c.cfg.BackoffJitterFraction > 0 {
+		backoffJitterFraction = c.cfg.BackoffJitterFraction
+	}
+
+	// Interceptor retry and backoff.
+	// TODO: Replace all of clientv3/retry.go with RetryPolicy:
+	// https://github.com/grpc/grpc-proto/blob/cdd9ed5c3d3f87aef62f373b93361cf7bddc620d/grpc/service_config/service_config.proto#L130
+	rrBackoff := withBackoff(c.roundRobinQuorumBackoff(backoffWaitBetween, backoffJitterFraction))
+	opts = append(opts,
+		// Disable stream retry by default since go-grpc-middleware/retry does not support client streams.
+		// Streams that are safe to retry are enabled individually.
+		grpc.WithStreamInterceptor(c.streamClientInterceptor(withMax(0), rrBackoff)),
+		grpc.WithUnaryInterceptor(c.unaryClientInterceptor(withMax(unaryMaxRetries), rrBackoff)),
+	)
+
+	return opts
+}
+
+// Dial connects to a single endpoint using the client's config.
+func (c *Client) Dial(ep string) (*grpc.ClientConn, error) {
+	creds := c.credentialsForEndpoint(ep)
+
+	// Using ad-hoc created resolver, to guarantee only explicitly given
+	// endpoint is used.
+	return c.dial(creds, grpc.WithResolvers(resolver.New(ep)))
+}
+
+func (c *Client) getToken(ctx context.Context) error {
+	var err error // return last error in a case of fail
+
+	if c.Username == "" || c.Password == "" {
+		return nil
+	}
+
+	resp, err := c.Auth.Authenticate(ctx, c.Username, c.Password)
+	if err != nil {
+		if errors.Is(err, rpctypes.ErrAuthNotEnabled) {
+			c.authTokenBundle.UpdateAuthToken("")
+			return nil
+		}
+		return err
+	}
+	c.authTokenBundle.UpdateAuthToken(resp.Token)
+	return nil
+}
+
+// dialWithBalancer dials the client's current load balanced resolver group.  The scheme of the host
+// of the provided endpoint determines the scheme used for all endpoints of the client connection.
+func (c *Client) dialWithBalancer(dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
+	creds := c.credentialsForEndpoint(c.Endpoints()[0])
+	opts := append(dopts, grpc.WithResolvers(c.resolver))
+	return c.dial(creds, opts...)
+}
+
+// dial configures and dials any grpc balancer target.
+func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
+	opts := c.dialSetupOpts(creds, dopts...)
+
+	if c.authTokenBundle != nil {
+		opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials()))
+	}
+
+	opts = append(opts, c.cfg.DialOptions...)
+
+	dctx := c.ctx
+	if c.cfg.DialTimeout > 0 {
+		var cancel context.CancelFunc
+		dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
+		defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options?
+	}
+	target := fmt.Sprintf("%s://%p/%s", resolver.Schema, c, authority(c.endpoints[0]))
+	conn, err := grpc.DialContext(dctx, target, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return conn, nil
+}
+
+func authority(endpoint string) string {
+	spl := strings.SplitN(endpoint, "://", 2)
+	if len(spl) < 2 {
+		if strings.HasPrefix(endpoint, "unix:") {
+			return endpoint[len("unix:"):]
+		}
+		if strings.HasPrefix(endpoint, "unixs:") {
+			return endpoint[len("unixs:"):]
+		}
+		return endpoint
+	}
+	return spl[1]
+}
+
+func (c *Client) credentialsForEndpoint(ep string) grpccredentials.TransportCredentials {
+	r := endpoint.RequiresCredentials(ep)
+	switch r {
+	case endpoint.CredsDrop:
+		return nil
+	case endpoint.CredsOptional:
+		return c.creds
+	case endpoint.CredsRequire:
+		if c.creds != nil {
+			return c.creds
+		}
+		return credentials.NewTransportCredential(nil)
+	default:
+		panic(fmt.Errorf("unsupported CredsRequirement: %v", r))
+	}
+}
+
+func newClient(cfg *Config) (*Client, error) {
+	if cfg == nil {
+		cfg = &Config{}
+	}
+	var creds grpccredentials.TransportCredentials
+	if cfg.TLS != nil {
+		creds = credentials.NewTransportCredential(cfg.TLS)
+	}
+
+	// use a temporary skeleton client to bootstrap first connection
+	baseCtx := context.TODO()
+	if cfg.Context != nil {
+		baseCtx = cfg.Context
+	}
+
+	ctx, cancel := context.WithCancel(baseCtx)
+	client := &Client{
+		conn:     nil,
+		cfg:      *cfg,
+		creds:    creds,
+		ctx:      ctx,
+		cancel:   cancel,
+		epMu:     new(sync.RWMutex),
+		callOpts: defaultCallOpts,
+		lgMu:     new(sync.RWMutex),
+	}
+
+	var err error
+	if cfg.Logger != nil {
+		client.lg = cfg.Logger
+	} else if cfg.LogConfig != nil {
+		client.lg, err = cfg.LogConfig.Build()
+	} else {
+		client.lg, err = logutil.CreateDefaultZapLogger(etcdClientDebugLevel())
+		if client.lg != nil {
+			client.lg = client.lg.Named("etcd-client")
+		}
+	}
+	if err != nil {
+		return nil, err
+	}
+
+	if cfg.Username != "" && cfg.Password != "" {
+		client.Username = cfg.Username
+		client.Password = cfg.Password
+		client.authTokenBundle = credentials.NewPerRPCCredentialBundle()
+	}
+	if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 {
+		if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize {
+			return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize)
+		}
+		callOpts := []grpc.CallOption{
+			defaultWaitForReady,
+			defaultMaxCallSendMsgSize,
+			defaultMaxCallRecvMsgSize,
+		}
+		if cfg.MaxCallSendMsgSize > 0 {
+			callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize)
+		}
+		if cfg.MaxCallRecvMsgSize > 0 {
+			callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize)
+		}
+		client.callOpts = callOpts
+	}
+
+	client.resolver = resolver.New(cfg.Endpoints...)
+
+	if len(cfg.Endpoints) < 1 {
+		client.cancel()
+		return nil, errors.New("at least one Endpoint is required in client config")
+	}
+	client.SetEndpoints(cfg.Endpoints...)
+
+	// Use a provided endpoint target so that for https:// without any tls config given, then
+	// grpc will assume the certificate server name is the endpoint host.
+	conn, err := client.dialWithBalancer()
+	if err != nil {
+		client.cancel()
+		client.resolver.Close()
+		// TODO: Error like `fmt.Errorf(dialing [%s] failed: %v, strings.Join(cfg.Endpoints, ";"), err)` would help with debugging a lot.
+		return nil, err
+	}
+	client.conn = conn
+
+	client.Cluster = NewCluster(client)
+	client.KV = NewKV(client)
+	client.Lease = NewLease(client)
+	client.Watcher = NewWatcher(client)
+	client.Auth = NewAuth(client)
+	client.Maintenance = NewMaintenance(client)
+
+	// get token with established connection
+	ctx, cancel = client.ctx, func() {}
+	if client.cfg.DialTimeout > 0 {
+		ctx, cancel = context.WithTimeout(ctx, client.cfg.DialTimeout)
+	}
+	err = client.getToken(ctx)
+	if err != nil {
+		client.Close()
+		cancel()
+		// TODO: Consider fmt.Errorf("communicating with [%s] failed: %v", strings.Join(cfg.Endpoints, ";"), err)
+		return nil, err
+	}
+	cancel()
+
+	if cfg.RejectOldCluster {
+		if err := client.checkVersion(); err != nil {
+			client.Close()
+			return nil, err
+		}
+	}
+
+	go client.autoSync()
+	return client, nil
+}
+
+// roundRobinQuorumBackoff retries against quorum between each backoff.
+// This is intended for use with a round robin load balancer.
+func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc {
+	return func(attempt uint) time.Duration {
+		// after each round robin across quorum, backoff for our wait between duration
+		n := uint(len(c.Endpoints()))
+		quorum := (n/2 + 1)
+		if attempt%quorum == 0 {
+			c.lg.Debug("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction))
+			return jitterUp(waitBetween, jitterFraction)
+		}
+		c.lg.Debug("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum))
+		return 0
+	}
+}
+
+// minSupportedVersion returns the minimum version supported, which is the previous minor release.
+func minSupportedVersion() *semver.Version {
+	ver := semver.Must(semver.NewVersion(version.Version))
+	// consider only major and minor version
+	ver = &semver.Version{Major: ver.Major, Minor: ver.Minor}
+	for i := range version.AllVersions {
+		if version.AllVersions[i].Equal(*ver) {
+			if i == 0 {
+				return ver
+			}
+			return &version.AllVersions[i-1]
+		}
+	}
+	panic("current version is not in the version list")
+}
+
+func (c *Client) checkVersion() (err error) {
+	var wg sync.WaitGroup
+
+	eps := c.Endpoints()
+	errc := make(chan error, len(eps))
+	ctx, cancel := context.WithCancel(c.ctx)
+	if c.cfg.DialTimeout > 0 {
+		cancel()
+		ctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
+	}
+
+	wg.Add(len(eps))
+	for _, ep := range eps {
+		// if cluster is current, any endpoint gives a recent version
+		go func(e string) {
+			defer wg.Done()
+			resp, rerr := c.Status(ctx, e)
+			if rerr != nil {
+				errc <- rerr
+				return
+			}
+			vs, serr := semver.NewVersion(resp.Version)
+			if serr != nil {
+				errc <- serr
+				return
+			}
+
+			if vs.LessThan(*minSupportedVersion()) {
+				rerr = ErrOldCluster
+			}
+			errc <- rerr
+		}(ep)
+	}
+	// wait for success
+	for range eps {
+		if err = <-errc; err != nil {
+			break
+		}
+	}
+	cancel()
+	wg.Wait()
+	return err
+}
+
+// ActiveConnection returns the current in-use connection
+func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
+
+// isHaltErr returns true if the given error and context indicate no forward
+// progress can be made, even after reconnecting.
+func isHaltErr(ctx context.Context, err error) bool {
+	if ctx != nil && ctx.Err() != nil {
+		return true
+	}
+	if err == nil {
+		return false
+	}
+	ev, _ := status.FromError(err)
+	// Unavailable codes mean the system will be right back.
+	// (e.g., can't connect, lost leader)
+	// Treat Internal codes as if something failed, leaving the
+	// system in an inconsistent state, but retrying could make progress.
+	// (e.g., failed in middle of send, corrupted frame)
+	// TODO: are permanent Internal errors possible from grpc?
+	return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
+}
+
+// isUnavailableErr returns true if the given error is an unavailable error
+func isUnavailableErr(ctx context.Context, err error) bool {
+	if ctx != nil && ctx.Err() != nil {
+		return false
+	}
+	if err == nil {
+		return false
+	}
+	ev, ok := status.FromError(err)
+	if ok {
+		// Unavailable codes mean the system will be right back.
+		// (e.g., can't connect, lost leader)
+		return ev.Code() == codes.Unavailable
+	}
+	return false
+}
+
+// ContextError converts the error into an EtcdError if the error message matches one of
+// the defined messages; otherwise, it tries to retrieve the context error.
+func ContextError(ctx context.Context, err error) error {
+	if err == nil {
+		return nil
+	}
+	err = rpctypes.Error(err)
+	var serverErr rpctypes.EtcdError
+	if errors.As(err, &serverErr) {
+		return err
+	}
+	if ev, ok := status.FromError(err); ok {
+		code := ev.Code()
+		switch code {
+		case codes.DeadlineExceeded:
+			fallthrough
+		case codes.Canceled:
+			if ctx.Err() != nil {
+				err = ctx.Err()
+			}
+		}
+	}
+	return err
+}
+
+func canceledByCaller(stopCtx context.Context, err error) bool {
+	if stopCtx.Err() == nil || err == nil {
+		return false
+	}
+
+	return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded)
+}
+
+// IsConnCanceled returns true, if error is from a closed gRPC connection.
+// ref. https://github.com/grpc/grpc-go/pull/1854
+func IsConnCanceled(err error) bool {
+	if err == nil {
+		return false
+	}
+
+	// >= gRPC v1.23.x
+	s, ok := status.FromError(err)
+	if ok {
+		// connection is canceled or server has already closed the connection
+		return s.Code() == codes.Canceled || s.Message() == "transport is closing"
+	}
+
+	// >= gRPC v1.10.x
+	if errors.Is(err, context.Canceled) {
+		return true
+	}
+
+	// <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")'
+	return strings.Contains(err.Error(), "grpc: the client connection is closing")
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/cluster.go b/vendor/go.etcd.io/etcd/client/v3/cluster.go
new file mode 100644
index 0000000..1b7e833
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/cluster.go
@@ -0,0 +1,141 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+
+	"google.golang.org/grpc"
+
+	pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+	"go.etcd.io/etcd/client/pkg/v3/types"
+)
+
+type (
+	Member                pb.Member
+	MemberListResponse    pb.MemberListResponse
+	MemberAddResponse     pb.MemberAddResponse
+	MemberRemoveResponse  pb.MemberRemoveResponse
+	MemberUpdateResponse  pb.MemberUpdateResponse
+	MemberPromoteResponse pb.MemberPromoteResponse
+)
+
+type Cluster interface {
+	// MemberList lists the current cluster membership.
+	MemberList(ctx context.Context, opts ...OpOption) (*MemberListResponse, error)
+
+	// MemberAdd adds a new member into the cluster.
+	MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
+
+	// MemberAddAsLearner adds a new learner member into the cluster.
+	MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
+
+	// MemberRemove removes an existing member from the cluster.
+	MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error)
+
+	// MemberUpdate updates the peer addresses of the member.
+	MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error)
+
+	// MemberPromote promotes a member from raft learner (non-voting) to raft voting member.
+	MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error)
+}
+
+type cluster struct {
+	remote   pb.ClusterClient
+	callOpts []grpc.CallOption
+}
+
+func NewCluster(c *Client) Cluster {
+	api := &cluster{remote: RetryClusterClient(c)}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster {
+	api := &cluster{remote: remote}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
+	return c.memberAdd(ctx, peerAddrs, false)
+}
+
+func (c *cluster) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
+	return c.memberAdd(ctx, peerAddrs, true)
+}
+
+func (c *cluster) memberAdd(ctx context.Context, peerAddrs []string, isLearner bool) (*MemberAddResponse, error) {
+	// fail-fast before panic in rafthttp
+	if _, err := types.NewURLs(peerAddrs); err != nil {
+		return nil, err
+	}
+
+	r := &pb.MemberAddRequest{
+		PeerURLs:  peerAddrs,
+		IsLearner: isLearner,
+	}
+	resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
+	if err != nil {
+		return nil, ContextError(ctx, err)
+	}
+	return (*MemberAddResponse)(resp), nil
+}
+
+func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
+	r := &pb.MemberRemoveRequest{ID: id}
+	resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...)
+	if err != nil {
+		return nil, ContextError(ctx, err)
+	}
+	return (*MemberRemoveResponse)(resp), nil
+}
+
+func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
+	// fail-fast before panic in rafthttp
+	if _, err := types.NewURLs(peerAddrs); err != nil {
+		return nil, err
+	}
+
+	// it is safe to retry on update.
+	r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
+	resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...)
+	if err == nil {
+		return (*MemberUpdateResponse)(resp), nil
+	}
+	return nil, ContextError(ctx, err)
+}
+
+func (c *cluster) MemberList(ctx context.Context, opts ...OpOption) (*MemberListResponse, error) {
+	opt := OpGet("", opts...)
+	resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{Linearizable: !opt.serializable}, c.callOpts...)
+	if err == nil {
+		return (*MemberListResponse)(resp), nil
+	}
+	return nil, ContextError(ctx, err)
+}
+
+func (c *cluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) {
+	r := &pb.MemberPromoteRequest{ID: id}
+	resp, err := c.remote.MemberPromote(ctx, r, c.callOpts...)
+	if err != nil {
+		return nil, ContextError(ctx, err)
+	}
+	return (*MemberPromoteResponse)(resp), nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/compact_op.go b/vendor/go.etcd.io/etcd/client/v3/compact_op.go
new file mode 100644
index 0000000..a6e660a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/compact_op.go
@@ -0,0 +1,51 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+// CompactOp represents a compact operation.
+type CompactOp struct {
+	revision int64
+	physical bool
+}
+
+// CompactOption configures compact operation.
+type CompactOption func(*CompactOp)
+
+func (op *CompactOp) applyCompactOpts(opts []CompactOption) {
+	for _, opt := range opts {
+		opt(op)
+	}
+}
+
+// OpCompact wraps slice CompactOption to create a CompactOp.
+func OpCompact(rev int64, opts ...CompactOption) CompactOp {
+	ret := CompactOp{revision: rev}
+	ret.applyCompactOpts(opts)
+	return ret
+}
+
+func (op CompactOp) toRequest() *pb.CompactionRequest {
+	return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
+}
+
+// WithCompactPhysical makes Compact wait until all compacted entries are
+// removed from the etcd server's storage.
+func WithCompactPhysical() CompactOption {
+	return func(op *CompactOp) { op.physical = true }
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/compare.go b/vendor/go.etcd.io/etcd/client/v3/compare.go
new file mode 100644
index 0000000..663fdb4
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/compare.go
@@ -0,0 +1,142 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+type (
+	CompareTarget int
+	CompareResult int
+)
+
+const (
+	CompareVersion CompareTarget = iota
+	CompareCreated
+	CompareModified
+	CompareValue
+)
+
+type Cmp pb.Compare
+
+func Compare(cmp Cmp, result string, v any) Cmp {
+	var r pb.Compare_CompareResult
+
+	switch result {
+	case "=":
+		r = pb.Compare_EQUAL
+	case "!=":
+		r = pb.Compare_NOT_EQUAL
+	case ">":
+		r = pb.Compare_GREATER
+	case "<":
+		r = pb.Compare_LESS
+	default:
+		panic("Unknown result op")
+	}
+
+	cmp.Result = r
+	switch cmp.Target {
+	case pb.Compare_VALUE:
+		val, ok := v.(string)
+		if !ok {
+			panic("bad compare value")
+		}
+		cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)}
+	case pb.Compare_VERSION:
+		cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)}
+	case pb.Compare_CREATE:
+		cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
+	case pb.Compare_MOD:
+		cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
+	case pb.Compare_LEASE:
+		cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)}
+	default:
+		panic("Unknown compare type")
+	}
+	return cmp
+}
+
+func Value(key string) Cmp {
+	return Cmp{Key: []byte(key), Target: pb.Compare_VALUE}
+}
+
+func Version(key string) Cmp {
+	return Cmp{Key: []byte(key), Target: pb.Compare_VERSION}
+}
+
+func CreateRevision(key string) Cmp {
+	return Cmp{Key: []byte(key), Target: pb.Compare_CREATE}
+}
+
+func ModRevision(key string) Cmp {
+	return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
+}
+
+// LeaseValue compares a key's LeaseID to a value of your choosing. The empty
+// LeaseID is 0, otherwise known as `NoLease`.
+func LeaseValue(key string) Cmp {
+	return Cmp{Key: []byte(key), Target: pb.Compare_LEASE}
+}
+
+// KeyBytes returns the byte slice holding with the comparison key.
+func (cmp *Cmp) KeyBytes() []byte { return cmp.Key }
+
+// WithKeyBytes sets the byte slice for the comparison key.
+func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key }
+
+// ValueBytes returns the byte slice holding the comparison value, if any.
+func (cmp *Cmp) ValueBytes() []byte {
+	if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok {
+		return tu.Value
+	}
+	return nil
+}
+
+// WithValueBytes sets the byte slice for the comparison's value.
+func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
+
+// WithRange sets the comparison to scan the range [key, end).
+func (cmp Cmp) WithRange(end string) Cmp {
+	cmp.RangeEnd = []byte(end)
+	return cmp
+}
+
+// WithPrefix sets the comparison to scan all keys prefixed by the key.
+func (cmp Cmp) WithPrefix() Cmp {
+	cmp.RangeEnd = getPrefix(cmp.Key)
+	return cmp
+}
+
+// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
+func mustInt64(val any) int64 {
+	if v, ok := val.(int64); ok {
+		return v
+	}
+	if v, ok := val.(int); ok {
+		return int64(v)
+	}
+	panic("bad value")
+}
+
+// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
+// int64 otherwise.
+func mustInt64orLeaseID(val any) int64 {
+	if v, ok := val.(LeaseID); ok {
+		return int64(v)
+	}
+	return mustInt64(val)
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/config.go b/vendor/go.etcd.io/etcd/client/v3/config.go
new file mode 100644
index 0000000..8351828
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/config.go
@@ -0,0 +1,228 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"crypto/tls"
+	"time"
+
+	"go.uber.org/zap"
+	"google.golang.org/grpc"
+
+	"go.etcd.io/etcd/client/pkg/v3/transport"
+)
+
+type Config struct {
+	// Endpoints is a list of URLs.
+	Endpoints []string `json:"endpoints"`
+
+	// AutoSyncInterval is the interval to update endpoints with its latest members.
+	// 0 disables auto-sync. By default auto-sync is disabled.
+	AutoSyncInterval time.Duration `json:"auto-sync-interval"`
+
+	// DialTimeout is the timeout for failing to establish a connection.
+	DialTimeout time.Duration `json:"dial-timeout"`
+
+	// DialKeepAliveTime is the time after which client pings the server to see if
+	// transport is alive.
+	DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
+
+	// DialKeepAliveTimeout is the time that the client waits for a response for the
+	// keep-alive probe. If the response is not received in this time, the connection is closed.
+	DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
+
+	// MaxCallSendMsgSize is the client-side request send limit in bytes.
+	// If 0, it defaults to 2.0 MiB (2 * 1024 * 1024).
+	// Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit.
+	// ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
+	MaxCallSendMsgSize int
+
+	// MaxCallRecvMsgSize is the client-side response receive limit.
+	// If 0, it defaults to "math.MaxInt32", because range response can
+	// easily exceed request send limits.
+	// Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit.
+	// ("--max-recv-bytes" flag to etcd).
+	MaxCallRecvMsgSize int
+
+	// TLS holds the client secure credentials, if any.
+	TLS *tls.Config
+
+	// Username is a user name for authentication.
+	Username string `json:"username"`
+
+	// Password is a password for authentication.
+	Password string `json:"password"`
+
+	// RejectOldCluster when set will refuse to create a client against an outdated cluster.
+	RejectOldCluster bool `json:"reject-old-cluster"`
+
+	// DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
+	// For example, pass "grpc.WithBlock()" to block until the underlying connection is up.
+	// Without this, Dial returns immediately and connecting the server happens in background.
+	DialOptions []grpc.DialOption
+
+	// Context is the default client context; it can be used to cancel grpc dial out and
+	// other operations that do not have an explicit context.
+	Context context.Context
+
+	// Logger sets client-side logger.
+	// If nil, fallback to building LogConfig.
+	Logger *zap.Logger
+
+	// LogConfig configures client-side logger.
+	// If nil, use the default logger.
+	// TODO: configure gRPC logger
+	LogConfig *zap.Config
+
+	// PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs).
+	PermitWithoutStream bool `json:"permit-without-stream"`
+
+	// MaxUnaryRetries is the maximum number of retries for unary RPCs.
+	MaxUnaryRetries uint `json:"max-unary-retries"`
+
+	// BackoffWaitBetween is the wait time before retrying an RPC.
+	BackoffWaitBetween time.Duration `json:"backoff-wait-between"`
+
+	// BackoffJitterFraction is the jitter fraction to randomize backoff wait time.
+	BackoffJitterFraction float64 `json:"backoff-jitter-fraction"`
+
+	// TODO: support custom balancer picker
+}
+
+// ConfigSpec is the configuration from users, which comes from command-line flags,
+// environment variables or config file. It is a fully declarative configuration,
+// and can be serialized & deserialized to/from JSON.
+type ConfigSpec struct {
+	Endpoints          []string      `json:"endpoints"`
+	RequestTimeout     time.Duration `json:"request-timeout"`
+	DialTimeout        time.Duration `json:"dial-timeout"`
+	KeepAliveTime      time.Duration `json:"keepalive-time"`
+	KeepAliveTimeout   time.Duration `json:"keepalive-timeout"`
+	MaxCallSendMsgSize int           `json:"max-request-bytes"`
+	MaxCallRecvMsgSize int           `json:"max-recv-bytes"`
+	Secure             *SecureConfig `json:"secure"`
+	Auth               *AuthConfig   `json:"auth"`
+}
+
+type SecureConfig struct {
+	Cert       string `json:"cert"`
+	Key        string `json:"key"`
+	Cacert     string `json:"cacert"`
+	ServerName string `json:"server-name"`
+
+	InsecureTransport  bool `json:"insecure-transport"`
+	InsecureSkipVerify bool `json:"insecure-skip-tls-verify"`
+}
+
+type AuthConfig struct {
+	Username string `json:"username"`
+	Password string `json:"password"`
+}
+
+func (cs *ConfigSpec) Clone() *ConfigSpec {
+	if cs == nil {
+		return nil
+	}
+
+	clone := *cs
+
+	if len(cs.Endpoints) > 0 {
+		clone.Endpoints = make([]string, len(cs.Endpoints))
+		copy(clone.Endpoints, cs.Endpoints)
+	}
+
+	if cs.Secure != nil {
+		clone.Secure = &SecureConfig{}
+		*clone.Secure = *cs.Secure
+	}
+	if cs.Auth != nil {
+		clone.Auth = &AuthConfig{}
+		*clone.Auth = *cs.Auth
+	}
+
+	return &clone
+}
+
+func (cfg AuthConfig) Empty() bool {
+	return cfg.Username == "" && cfg.Password == ""
+}
+
+// NewClientConfig creates a Config based on the provided ConfigSpec.
+func NewClientConfig(confSpec *ConfigSpec, lg *zap.Logger) (*Config, error) {
+	tlsCfg, err := newTLSConfig(confSpec.Secure, lg)
+	if err != nil {
+		return nil, err
+	}
+
+	cfg := &Config{
+		Endpoints:            confSpec.Endpoints,
+		DialTimeout:          confSpec.DialTimeout,
+		DialKeepAliveTime:    confSpec.KeepAliveTime,
+		DialKeepAliveTimeout: confSpec.KeepAliveTimeout,
+		MaxCallSendMsgSize:   confSpec.MaxCallSendMsgSize,
+		MaxCallRecvMsgSize:   confSpec.MaxCallRecvMsgSize,
+		TLS:                  tlsCfg,
+	}
+
+	if confSpec.Auth != nil {
+		cfg.Username = confSpec.Auth.Username
+		cfg.Password = confSpec.Auth.Password
+	}
+
+	return cfg, nil
+}
+
+func newTLSConfig(scfg *SecureConfig, lg *zap.Logger) (*tls.Config, error) {
+	var (
+		tlsCfg *tls.Config
+		err    error
+	)
+
+	if scfg == nil {
+		return nil, nil
+	}
+
+	if scfg.Cert != "" || scfg.Key != "" || scfg.Cacert != "" || scfg.ServerName != "" {
+		cfgtls := &transport.TLSInfo{
+			CertFile:      scfg.Cert,
+			KeyFile:       scfg.Key,
+			TrustedCAFile: scfg.Cacert,
+			ServerName:    scfg.ServerName,
+			Logger:        lg,
+		}
+		if tlsCfg, err = cfgtls.ClientConfig(); err != nil {
+			return nil, err
+		}
+	}
+
+	// If key/cert is not given but user wants secure connection, we
+	// should still setup an empty tls configuration for gRPC to setup
+	// secure connection.
+	if tlsCfg == nil && !scfg.InsecureTransport {
+		tlsCfg = &tls.Config{}
+	}
+
+	// If the user wants to skip TLS verification then we should set
+	// the InsecureSkipVerify flag in tls configuration.
+	if scfg.InsecureSkipVerify {
+		if tlsCfg == nil {
+			tlsCfg = &tls.Config{}
+		}
+		tlsCfg.InsecureSkipVerify = scfg.InsecureSkipVerify
+	}
+
+	return tlsCfg, nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/credentials/credentials.go b/vendor/go.etcd.io/etcd/client/v3/credentials/credentials.go
new file mode 100644
index 0000000..a2d8b45
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/credentials/credentials.go
@@ -0,0 +1,83 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package credentials implements gRPC credential interface with etcd specific logic.
+// e.g., client handshake with custom authority parameter
+package credentials
+
+import (
+	"context"
+	"crypto/tls"
+	"sync"
+
+	grpccredentials "google.golang.org/grpc/credentials"
+
+	"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+)
+
+func NewTransportCredential(cfg *tls.Config) grpccredentials.TransportCredentials {
+	return grpccredentials.NewTLS(cfg)
+}
+
+// PerRPCCredentialsBundle defines gRPC credential interface.
+type PerRPCCredentialsBundle interface {
+	UpdateAuthToken(token string)
+	PerRPCCredentials() grpccredentials.PerRPCCredentials
+}
+
+func NewPerRPCCredentialBundle() PerRPCCredentialsBundle {
+	return &perRPCCredentialBundle{
+		rc: &perRPCCredential{},
+	}
+}
+
+// perRPCCredentialBundle implements `PerRPCCredentialsBundle` interface.
+type perRPCCredentialBundle struct {
+	rc *perRPCCredential
+}
+
+func (b *perRPCCredentialBundle) UpdateAuthToken(token string) {
+	if b.rc == nil {
+		return
+	}
+	b.rc.UpdateAuthToken(token)
+}
+
+func (b *perRPCCredentialBundle) PerRPCCredentials() grpccredentials.PerRPCCredentials {
+	return b.rc
+}
+
+// perRPCCredential implements `grpccredentials.PerRPCCredentials` interface.
+type perRPCCredential struct {
+	authToken   string
+	authTokenMu sync.RWMutex
+}
+
+func (rc *perRPCCredential) RequireTransportSecurity() bool { return false }
+
+func (rc *perRPCCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
+	rc.authTokenMu.RLock()
+	authToken := rc.authToken
+	rc.authTokenMu.RUnlock()
+	if authToken == "" {
+		return nil, nil
+	}
+	return map[string]string{rpctypes.TokenFieldNameGRPC: authToken}, nil
+}
+
+func (rc *perRPCCredential) UpdateAuthToken(token string) {
+	rc.authTokenMu.Lock()
+	rc.authToken = token
+	rc.authTokenMu.Unlock()
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/ctx.go b/vendor/go.etcd.io/etcd/client/v3/ctx.go
new file mode 100644
index 0000000..38cee6c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/ctx.go
@@ -0,0 +1,51 @@
+// Copyright 2020 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+
+	"google.golang.org/grpc/metadata"
+
+	"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+	"go.etcd.io/etcd/api/v3/version"
+)
+
+// WithRequireLeader requires client requests to only succeed
+// when the cluster has a leader.
+func WithRequireLeader(ctx context.Context) context.Context {
+	md, ok := metadata.FromOutgoingContext(ctx)
+	if !ok { // no outgoing metadata ctx key, create one
+		md = metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
+		return metadata.NewOutgoingContext(ctx, md)
+	}
+	copied := md.Copy() // avoid racey updates
+	// overwrite/add 'hasleader' key/value
+	copied.Set(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
+	return metadata.NewOutgoingContext(ctx, copied)
+}
+
+// embeds client version
+func withVersion(ctx context.Context) context.Context {
+	md, ok := metadata.FromOutgoingContext(ctx)
+	if !ok { // no outgoing metadata ctx key, create one
+		md = metadata.Pairs(rpctypes.MetadataClientAPIVersionKey, version.APIVersion)
+		return metadata.NewOutgoingContext(ctx, md)
+	}
+	copied := md.Copy() // avoid racey updates
+	// overwrite/add version key/value
+	copied.Set(rpctypes.MetadataClientAPIVersionKey, version.APIVersion)
+	return metadata.NewOutgoingContext(ctx, copied)
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/doc.go b/vendor/go.etcd.io/etcd/client/v3/doc.go
new file mode 100644
index 0000000..bd820d3
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/doc.go
@@ -0,0 +1,105 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package clientv3 implements the official Go etcd client for v3.
+//
+// Create client using `clientv3.New`:
+//
+//	// expect dial time-out on ipv4 blackhole
+//	_, err := clientv3.New(clientv3.Config{
+//		Endpoints:   []string{"http://254.0.0.1:12345"},
+//		DialTimeout: 2 * time.Second,
+//	})
+//
+//	// etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3
+//	if err == context.DeadlineExceeded {
+//		// handle errors
+//	}
+//
+//	// etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1
+//	if err == grpc.ErrClientConnTimeout {
+//		// handle errors
+//	}
+//
+//	cli, err := clientv3.New(clientv3.Config{
+//		Endpoints:   []string{"localhost:2379", "localhost:22379", "localhost:32379"},
+//		DialTimeout: 5 * time.Second,
+//	})
+//	if err != nil {
+//		// handle error!
+//	}
+//	defer cli.Close()
+//
+// Make sure to close the client after using it. If the client is not closed, the
+// connection will have leaky goroutines.
+//
+// To specify a client request timeout, wrap the context with context.WithTimeout:
+//
+//	ctx, cancel := context.WithTimeout(context.Background(), timeout)
+//	defer cancel()
+//	resp, err := kvc.Put(ctx, "sample_key", "sample_value")
+//	if err != nil {
+//	    // handle error!
+//	}
+//	// use the response
+//
+// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
+// Clients are safe for concurrent use by multiple goroutines.
+//
+// etcd client returns 2 types of errors:
+//
+//  1. context error: canceled or deadline exceeded.
+//  2. gRPC error: e.g. when clock drifts in server-side before client's context deadline exceeded.
+//     See https://github.com/etcd-io/etcd/blob/main/api/v3rpc/rpctypes/error.go
+//
+// Here is the example code to handle client errors:
+//
+//	resp, err := kvc.Put(ctx, "", "")
+//	if err != nil {
+//		if err == context.Canceled {
+//			// ctx is canceled by another routine
+//		} else if err == context.DeadlineExceeded {
+//			// ctx is attached with a deadline and it exceeded
+//		} else if err == rpctypes.ErrEmptyKey {
+//			// client-side error: key is not provided
+//		} else if ev, ok := status.FromError(err); ok {
+//			code := ev.Code()
+//			if code == codes.DeadlineExceeded {
+//				// server-side context might have timed-out first (due to clock skew)
+//				// while original client-side context is not timed-out yet
+//			}
+//		} else {
+//			// bad cluster endpoints, which are not etcd servers
+//		}
+//	}
+//
+//	go func() { cli.Close() }()
+//	_, err := kvc.Get(ctx, "a")
+//	if err != nil {
+//		// with etcd clientv3 <= v3.3
+//		if err == context.Canceled {
+//			// grpc balancer calls 'Get' with an inflight client.Close
+//		} else if err == grpc.ErrClientConnClosing { // <= gRCP v1.7.x
+//			// grpc balancer calls 'Get' after client.Close.
+//		}
+//		// with etcd clientv3 >= v3.4
+//		if clientv3.IsConnCanceled(err) {
+//			// gRPC client connection is closed
+//		}
+//	}
+//
+// The grpc load balancer is registered statically and is shared across etcd clients.
+// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment
+// variable.  E.g. "ETCD_CLIENT_DEBUG=1".
+package clientv3
diff --git a/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go b/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go
new file mode 100644
index 0000000..2c45b5e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go
@@ -0,0 +1,134 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package endpoint
+
+import (
+	"fmt"
+	"net"
+	"net/url"
+	"path"
+	"strings"
+)
+
+type CredsRequirement int
+
+const (
+	// CredsRequire - Credentials/certificate required for thi type of connection.
+	CredsRequire CredsRequirement = iota
+	// CredsDrop - Credentials/certificate not needed and should get ignored.
+	CredsDrop
+	// CredsOptional - Credentials/certificate might be used if supplied
+	CredsOptional
+)
+
+func extractHostFromHostPort(ep string) string {
+	host, _, err := net.SplitHostPort(ep)
+	if err != nil {
+		return ep
+	}
+	return host
+}
+
+// mustSplit2 returns the values from strings.SplitN(s, sep, 2).
+// If sep is not found, it returns ("", "", false) instead.
+func mustSplit2(s, sep string) (string, string) {
+	spl := strings.SplitN(s, sep, 2)
+	if len(spl) < 2 {
+		panic(fmt.Errorf("token '%v' expected to have separator sep: `%v`", s, sep))
+	}
+	return spl[0], spl[1]
+}
+
+func schemeToCredsRequirement(schema string) CredsRequirement {
+	switch schema {
+	case "https", "unixs":
+		return CredsRequire
+	case "http":
+		return CredsDrop
+	case "unix":
+		// Preserving previous behavior from:
+		// https://github.com/etcd-io/etcd/blob/dae29bb719dd69dc119146fc297a0628fcc1ccf8/client/v3/client.go#L212
+		// that likely was a bug due to missing 'fallthrough'.
+		// At the same time it seems legit to let the users decide whether they
+		// want credential control or not (and 'unixs' schema is not a standard thing).
+		return CredsOptional
+	case "":
+		return CredsOptional
+	default:
+		return CredsOptional
+	}
+}
+
+// This function translates endpoints names supported by etcd server into
+// endpoints as supported by grpc with additional information
+// (server_name for cert validation, requireCreds - whether certs are needed).
+// The main differences:
+//   - etcd supports unixs & https names as opposed to unix & http to
+//     distinguish need to configure certificates.
+//   - etcd support http(s) names as opposed to tcp supported by grpc/dial method.
+//   - etcd supports unix(s)://local-file naming schema
+//     (as opposed to unix:local-file canonical name used by grpc for current dir files).
+//   - Within the unix(s) schemas, the last segment (filename) without 'port' (content after colon)
+//     is considered serverName - to allow local testing of cert-protected communication.
+//
+// See more:
+//   - https://github.com/grpc/grpc-go/blob/26c143bd5f59344a4b8a1e491e0f5e18aa97abc7/internal/grpcutil/target.go#L47
+//   - https://golang.org/pkg/net/#Dial
+//   - https://github.com/grpc/grpc/blob/master/doc/naming.md
+func translateEndpoint(ep string) (addr string, serverName string, requireCreds CredsRequirement) {
+	if strings.HasPrefix(ep, "unix:") || strings.HasPrefix(ep, "unixs:") {
+		if strings.HasPrefix(ep, "unix:///") || strings.HasPrefix(ep, "unixs:///") {
+			// absolute path case
+			schema, absolutePath := mustSplit2(ep, "://")
+			return "unix://" + absolutePath, path.Base(absolutePath), schemeToCredsRequirement(schema)
+		}
+		if strings.HasPrefix(ep, "unix://") || strings.HasPrefix(ep, "unixs://") {
+			// legacy etcd local path
+			schema, localPath := mustSplit2(ep, "://")
+			return "unix:" + localPath, path.Base(localPath), schemeToCredsRequirement(schema)
+		}
+		schema, localPath := mustSplit2(ep, ":")
+		return "unix:" + localPath, path.Base(localPath), schemeToCredsRequirement(schema)
+	}
+
+	if strings.Contains(ep, "://") {
+		url, err := url.Parse(ep)
+		if err != nil {
+			return ep, ep, CredsOptional
+		}
+		if url.Scheme == "http" || url.Scheme == "https" {
+			return url.Host, url.Host, schemeToCredsRequirement(url.Scheme)
+		}
+		return ep, url.Host, schemeToCredsRequirement(url.Scheme)
+	}
+	// Handles plain addresses like 10.0.0.44:437.
+	return ep, ep, CredsOptional
+}
+
+// RequiresCredentials returns whether given endpoint requires
+// credentials/certificates for connection.
+func RequiresCredentials(ep string) CredsRequirement {
+	_, _, requireCreds := translateEndpoint(ep)
+	return requireCreds
+}
+
+// Interpret endpoint parses an endpoint of the form
+// (http|https)://<host>*|(unix|unixs)://<path>)
+// and returns low-level address (supported by 'net') to connect to,
+// and a server name used for x509 certificate matching.
+func Interpret(ep string) (address string, serverName string) {
+	addr, serverName, _ := translateEndpoint(ep)
+	return addr, serverName
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/internal/resolver/resolver.go b/vendor/go.etcd.io/etcd/client/v3/internal/resolver/resolver.go
new file mode 100644
index 0000000..403b745
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/internal/resolver/resolver.go
@@ -0,0 +1,77 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resolver
+
+import (
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/resolver/manual"
+	"google.golang.org/grpc/serviceconfig"
+
+	"go.etcd.io/etcd/client/v3/internal/endpoint"
+)
+
+const (
+	Schema = "etcd-endpoints"
+)
+
+// EtcdManualResolver is a Resolver (and resolver.Builder) that can be updated
+// using SetEndpoints.
+type EtcdManualResolver struct {
+	*manual.Resolver
+	endpoints     []string
+	serviceConfig *serviceconfig.ParseResult
+}
+
+func New(endpoints ...string) *EtcdManualResolver {
+	r := manual.NewBuilderWithScheme(Schema)
+	return &EtcdManualResolver{Resolver: r, endpoints: endpoints, serviceConfig: nil}
+}
+
+// Build returns itself for Resolver, because it's both a builder and a resolver.
+func (r *EtcdManualResolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
+	r.serviceConfig = cc.ParseServiceConfig(`{"loadBalancingPolicy": "round_robin"}`)
+	if r.serviceConfig.Err != nil {
+		return nil, r.serviceConfig.Err
+	}
+	res, err := r.Resolver.Build(target, cc, opts)
+	if err != nil {
+		return nil, err
+	}
+	// Populates endpoints stored in r into ClientConn (cc).
+	r.updateState()
+	return res, nil
+}
+
+func (r *EtcdManualResolver) SetEndpoints(endpoints []string) {
+	r.endpoints = endpoints
+	r.updateState()
+}
+
+func (r EtcdManualResolver) updateState() {
+	if r.CC != nil {
+		eps := make([]resolver.Endpoint, len(r.endpoints))
+		for i, ep := range r.endpoints {
+			addr, serverName := endpoint.Interpret(ep)
+			eps[i] = resolver.Endpoint{Addresses: []resolver.Address{
+				{Addr: addr, ServerName: serverName},
+			}}
+		}
+		state := resolver.State{
+			Endpoints:     eps,
+			ServiceConfig: r.serviceConfig,
+		}
+		r.UpdateState(state)
+	}
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/kv.go b/vendor/go.etcd.io/etcd/client/v3/kv.go
new file mode 100644
index 0000000..8d0c595
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/kv.go
@@ -0,0 +1,185 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+
+	"google.golang.org/grpc"
+
+	pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+	"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+)
+
+type (
+	CompactResponse pb.CompactionResponse
+	PutResponse     pb.PutResponse
+	GetResponse     pb.RangeResponse
+	DeleteResponse  pb.DeleteRangeResponse
+	TxnResponse     pb.TxnResponse
+)
+
+type KV interface {
+	// Put puts a key-value pair into etcd.
+	// Note that key,value can be plain bytes array and string is
+	// an immutable representation of that bytes array.
+	// To get a string of bytes, do string([]byte{0x10, 0x20}).
+	Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error)
+
+	// Get retrieves keys.
+	// By default, Get will return the value for "key", if any.
+	// When passed WithRange(end), Get will return the keys in the range [key, end).
+	// When passed WithFromKey(), Get returns keys greater than or equal to key.
+	// When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision;
+	// if the required revision is compacted, the request will fail with ErrCompacted .
+	// When passed WithLimit(limit), the number of returned keys is bounded by limit.
+	// When passed WithSort(), the keys will be sorted.
+	Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error)
+
+	// Delete deletes a key, or optionally using WithRange(end), [key, end).
+	Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
+
+	// Compact compacts etcd KV history before the given rev.
+	Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
+
+	// Do applies a single Op on KV without a transaction.
+	// Do is useful when creating arbitrary operations to be issued at a
+	// later time; the user can range over the operations, calling Do to
+	// execute them. Get/Put/Delete, on the other hand, are best suited
+	// for when the operation should be issued at the time of declaration.
+	Do(ctx context.Context, op Op) (OpResponse, error)
+
+	// Txn creates a transaction.
+	Txn(ctx context.Context) Txn
+}
+
+type OpResponse struct {
+	put *PutResponse
+	get *GetResponse
+	del *DeleteResponse
+	txn *TxnResponse
+}
+
+func (op OpResponse) Put() *PutResponse    { return op.put }
+func (op OpResponse) Get() *GetResponse    { return op.get }
+func (op OpResponse) Del() *DeleteResponse { return op.del }
+func (op OpResponse) Txn() *TxnResponse    { return op.txn }
+
+func (resp *PutResponse) OpResponse() OpResponse {
+	return OpResponse{put: resp}
+}
+
+func (resp *GetResponse) OpResponse() OpResponse {
+	return OpResponse{get: resp}
+}
+
+func (resp *DeleteResponse) OpResponse() OpResponse {
+	return OpResponse{del: resp}
+}
+
+func (resp *TxnResponse) OpResponse() OpResponse {
+	return OpResponse{txn: resp}
+}
+
+type kv struct {
+	remote   pb.KVClient
+	callOpts []grpc.CallOption
+}
+
+func NewKV(c *Client) KV {
+	api := &kv{remote: RetryKVClient(c)}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func NewKVFromKVClient(remote pb.KVClient, c *Client) KV {
+	api := &kv{remote: remote}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
+	r, err := kv.Do(ctx, OpPut(key, val, opts...))
+	return r.put, ContextError(ctx, err)
+}
+
+func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) {
+	r, err := kv.Do(ctx, OpGet(key, opts...))
+	return r.get, ContextError(ctx, err)
+}
+
+func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) {
+	r, err := kv.Do(ctx, OpDelete(key, opts...))
+	return r.del, ContextError(ctx, err)
+}
+
+func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
+	resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...)
+	if err != nil {
+		return nil, ContextError(ctx, err)
+	}
+	return (*CompactResponse)(resp), err
+}
+
+func (kv *kv) Txn(ctx context.Context) Txn {
+	return &txn{
+		kv:       kv,
+		ctx:      ctx,
+		callOpts: kv.callOpts,
+	}
+}
+
+func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
+	var err error
+	switch op.t {
+	case tRange:
+		if op.IsSortOptionValid() {
+			var resp *pb.RangeResponse
+			resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...)
+			if err == nil {
+				return OpResponse{get: (*GetResponse)(resp)}, nil
+			}
+		} else {
+			err = rpctypes.ErrInvalidSortOption
+		}
+	case tPut:
+		var resp *pb.PutResponse
+		r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
+		resp, err = kv.remote.Put(ctx, r, kv.callOpts...)
+		if err == nil {
+			return OpResponse{put: (*PutResponse)(resp)}, nil
+		}
+	case tDeleteRange:
+		var resp *pb.DeleteRangeResponse
+		r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
+		resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...)
+		if err == nil {
+			return OpResponse{del: (*DeleteResponse)(resp)}, nil
+		}
+	case tTxn:
+		var resp *pb.TxnResponse
+		resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...)
+		if err == nil {
+			return OpResponse{txn: (*TxnResponse)(resp)}, nil
+		}
+	default:
+		panic("Unknown op")
+	}
+	return OpResponse{}, ContextError(ctx, err)
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/lease.go b/vendor/go.etcd.io/etcd/client/v3/lease.go
new file mode 100644
index 0000000..11b5834
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/lease.go
@@ -0,0 +1,626 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"errors"
+	"sync"
+	"time"
+
+	"go.uber.org/zap"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/metadata"
+
+	pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+	"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+)
+
+type (
+	LeaseRevokeResponse pb.LeaseRevokeResponse
+	LeaseID             int64
+)
+
+// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
+type LeaseGrantResponse struct {
+	*pb.ResponseHeader
+	ID    LeaseID
+	TTL   int64
+	Error string
+}
+
+// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
+type LeaseKeepAliveResponse struct {
+	*pb.ResponseHeader
+	ID  LeaseID
+	TTL int64
+}
+
+// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
+type LeaseTimeToLiveResponse struct {
+	*pb.ResponseHeader
+	ID LeaseID `json:"id"`
+
+	// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1.
+	TTL int64 `json:"ttl"`
+
+	// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
+	GrantedTTL int64 `json:"granted-ttl"`
+
+	// Keys is the list of keys attached to this lease.
+	Keys [][]byte `json:"keys"`
+}
+
+// LeaseStatus represents a lease status.
+type LeaseStatus struct {
+	ID LeaseID `json:"id"`
+	// TODO: TTL int64
+}
+
+// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse.
+type LeaseLeasesResponse struct {
+	*pb.ResponseHeader
+	Leases []LeaseStatus `json:"leases"`
+}
+
+const (
+	// defaultTTL is the assumed lease TTL used for the first keepalive
+	// deadline before the actual TTL is known to the client.
+	defaultTTL = 5 * time.Second
+	// NoLease is a lease ID for the absence of a lease.
+	NoLease LeaseID = 0
+
+	// retryConnWait is how long to wait before retrying request due to an error
+	retryConnWait = 500 * time.Millisecond
+)
+
+// LeaseResponseChSize is the size of buffer to store unsent lease responses.
+// WARNING: DO NOT UPDATE.
+// Only for testing purposes.
+var LeaseResponseChSize = 16
+
+// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
+//
+// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
+type ErrKeepAliveHalted struct {
+	Reason error
+}
+
+func (e ErrKeepAliveHalted) Error() string {
+	s := "etcdclient: leases keep alive halted"
+	if e.Reason != nil {
+		s += ": " + e.Reason.Error()
+	}
+	return s
+}
+
+type Lease interface {
+	// Grant creates a new lease.
+	Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
+
+	// Revoke revokes the given lease.
+	Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
+
+	// TimeToLive retrieves the lease information of the given lease ID.
+	TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
+
+	// Leases retrieves all leases.
+	Leases(ctx context.Context) (*LeaseLeasesResponse, error)
+
+	// KeepAlive attempts to keep the given lease alive forever. If the keepalive responses posted
+	// to the channel are not consumed promptly the channel may become full. When full, the lease
+	// client will continue sending keep alive requests to the etcd server, but will drop responses
+	// until there is capacity on the channel to send more responses.
+	//
+	// If client keep alive loop halts with an unexpected error (e.g. "etcdserver: no leader") or
+	// canceled by the caller (e.g. context.Canceled), KeepAlive returns a ErrKeepAliveHalted error
+	// containing the error reason.
+	//
+	// The returned "LeaseKeepAliveResponse" channel closes if underlying keep
+	// alive stream is interrupted in some way the client cannot handle itself;
+	// given context "ctx" is canceled or timed out.
+	//
+	// TODO(v4.0): post errors to last keep alive message before closing
+	// (see https://github.com/etcd-io/etcd/pull/7866)
+	KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
+
+	// KeepAliveOnce renews the lease once. The response corresponds to the
+	// first message from calling KeepAlive. If the response has a recoverable
+	// error, KeepAliveOnce will retry the RPC with a new keep alive message.
+	//
+	// In most of the cases, Keepalive should be used instead of KeepAliveOnce.
+	KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
+
+	// Close releases all resources Lease keeps for efficient communication
+	// with the etcd server.
+	Close() error
+}
+
+type lessor struct {
+	mu sync.Mutex // guards all fields
+
+	// donec is closed and loopErr is set when recvKeepAliveLoop stops
+	donec   chan struct{}
+	loopErr error
+
+	remote pb.LeaseClient
+
+	stream       pb.Lease_LeaseKeepAliveClient
+	streamCancel context.CancelFunc
+
+	stopCtx    context.Context
+	stopCancel context.CancelFunc
+
+	keepAlives map[LeaseID]*keepAlive
+
+	// firstKeepAliveTimeout is the timeout for the first keepalive request
+	// before the actual TTL is known to the lease client
+	firstKeepAliveTimeout time.Duration
+
+	// firstKeepAliveOnce ensures stream starts after first KeepAlive call.
+	firstKeepAliveOnce sync.Once
+
+	callOpts []grpc.CallOption
+
+	lg *zap.Logger
+}
+
+// keepAlive multiplexes a keepalive for a lease over multiple channels
+type keepAlive struct {
+	chs  []chan<- *LeaseKeepAliveResponse
+	ctxs []context.Context
+	// deadline is the time the keep alive channels close if no response
+	deadline time.Time
+	// nextKeepAlive is when to send the next keep alive message
+	nextKeepAlive time.Time
+	// donec is closed on lease revoke, expiration, or cancel.
+	donec chan struct{}
+}
+
+func NewLease(c *Client) Lease {
+	return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second)
+}
+
+func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease {
+	l := &lessor{
+		donec:                 make(chan struct{}),
+		keepAlives:            make(map[LeaseID]*keepAlive),
+		remote:                remote,
+		firstKeepAliveTimeout: keepAliveTimeout,
+	}
+	if l.firstKeepAliveTimeout == time.Second {
+		l.firstKeepAliveTimeout = defaultTTL
+	}
+	if c != nil {
+		l.lg = c.lg
+		l.callOpts = c.callOpts
+	}
+	reqLeaderCtx := WithRequireLeader(context.Background())
+	l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
+	return l
+}
+
+func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
+	r := &pb.LeaseGrantRequest{TTL: ttl}
+	resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...)
+	if err == nil {
+		gresp := &LeaseGrantResponse{
+			ResponseHeader: resp.GetHeader(),
+			ID:             LeaseID(resp.ID),
+			TTL:            resp.TTL,
+			Error:          resp.Error,
+		}
+		return gresp, nil
+	}
+	return nil, ContextError(ctx, err)
+}
+
+func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
+	r := &pb.LeaseRevokeRequest{ID: int64(id)}
+	resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...)
+	if err == nil {
+		return (*LeaseRevokeResponse)(resp), nil
+	}
+	return nil, ContextError(ctx, err)
+}
+
+func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
+	r := toLeaseTimeToLiveRequest(id, opts...)
+	resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...)
+	if err != nil {
+		return nil, ContextError(ctx, err)
+	}
+	gresp := &LeaseTimeToLiveResponse{
+		ResponseHeader: resp.GetHeader(),
+		ID:             LeaseID(resp.ID),
+		TTL:            resp.TTL,
+		GrantedTTL:     resp.GrantedTTL,
+		Keys:           resp.Keys,
+	}
+	return gresp, nil
+}
+
+func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
+	resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...)
+	if err == nil {
+		leases := make([]LeaseStatus, len(resp.Leases))
+		for i := range resp.Leases {
+			leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)}
+		}
+		return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
+	}
+	return nil, ContextError(ctx, err)
+}
+
+// To identify the context passed to `KeepAlive`, a key/value pair is
+// attached to the context. The key is a `keepAliveCtxKey` object, and
+// the value is the pointer to the context object itself, ensuring
+// uniqueness as each context has a unique memory address.
+type keepAliveCtxKey struct{}
+
+func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
+	ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize)
+
+	l.mu.Lock()
+	// ensure that recvKeepAliveLoop is still running
+	select {
+	case <-l.donec:
+		err := l.loopErr
+		l.mu.Unlock()
+		close(ch)
+		return ch, ErrKeepAliveHalted{Reason: err}
+	default:
+	}
+	ka, ok := l.keepAlives[id]
+
+	if ctx.Done() != nil {
+		ctx = context.WithValue(ctx, keepAliveCtxKey{}, &ctx)
+	}
+	if !ok {
+		// create fresh keep alive
+		ka = &keepAlive{
+			chs:           []chan<- *LeaseKeepAliveResponse{ch},
+			ctxs:          []context.Context{ctx},
+			deadline:      time.Now().Add(l.firstKeepAliveTimeout),
+			nextKeepAlive: time.Now(),
+			donec:         make(chan struct{}),
+		}
+		l.keepAlives[id] = ka
+	} else {
+		// add channel and context to existing keep alive
+		ka.ctxs = append(ka.ctxs, ctx)
+		ka.chs = append(ka.chs, ch)
+	}
+	l.mu.Unlock()
+
+	if ctx.Done() != nil {
+		go l.keepAliveCtxCloser(ctx, id, ka.donec)
+	}
+	l.firstKeepAliveOnce.Do(func() {
+		go l.recvKeepAliveLoop()
+		go l.deadlineLoop()
+	})
+
+	return ch, nil
+}
+
+func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
+	for {
+		resp, err := l.keepAliveOnce(ctx, id)
+		if err == nil {
+			if resp.TTL <= 0 {
+				err = rpctypes.ErrLeaseNotFound
+			}
+			return resp, err
+		}
+		if isHaltErr(ctx, err) {
+			return nil, ContextError(ctx, err)
+		}
+	}
+}
+
+func (l *lessor) Close() error {
+	l.stopCancel()
+	// close for synchronous teardown if stream goroutines never launched
+	l.firstKeepAliveOnce.Do(func() { close(l.donec) })
+	<-l.donec
+	return nil
+}
+
+func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-chan struct{}) {
+	select {
+	case <-donec:
+		return
+	case <-l.donec:
+		return
+	case <-ctx.Done():
+	}
+
+	l.mu.Lock()
+	defer l.mu.Unlock()
+
+	ka, ok := l.keepAlives[id]
+	if !ok {
+		return
+	}
+
+	// close channel and remove context if still associated with keep alive
+	for i, c := range ka.ctxs {
+		if c.Value(keepAliveCtxKey{}) == ctx.Value(keepAliveCtxKey{}) {
+			close(ka.chs[i])
+			ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
+			ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
+			break
+		}
+	}
+	// remove if no one more listeners
+	if len(ka.chs) == 0 {
+		delete(l.keepAlives, id)
+	}
+}
+
+// closeRequireLeader scans keepAlives for ctxs that have require leader
+// and closes the associated channels.
+func (l *lessor) closeRequireLeader() {
+	l.mu.Lock()
+	defer l.mu.Unlock()
+	for _, ka := range l.keepAlives {
+		reqIdxs := 0
+		// find all required leader channels, close, mark as nil
+		for i, ctx := range ka.ctxs {
+			md, ok := metadata.FromOutgoingContext(ctx)
+			if !ok {
+				continue
+			}
+			ks := md[rpctypes.MetadataRequireLeaderKey]
+			if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
+				continue
+			}
+			close(ka.chs[i])
+			ka.chs[i] = nil
+			reqIdxs++
+		}
+		if reqIdxs == 0 {
+			continue
+		}
+		// remove all channels that required a leader from keepalive
+		newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
+		newCtxs := make([]context.Context, len(newChs))
+		newIdx := 0
+		for i := range ka.chs {
+			if ka.chs[i] == nil {
+				continue
+			}
+			newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
+			newIdx++
+		}
+		ka.chs, ka.ctxs = newChs, newCtxs
+	}
+}
+
+func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (karesp *LeaseKeepAliveResponse, ferr error) {
+	cctx, cancel := context.WithCancel(ctx)
+	defer cancel()
+
+	stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...)
+	if err != nil {
+		return nil, ContextError(ctx, err)
+	}
+
+	defer func() {
+		if cerr := stream.CloseSend(); cerr != nil {
+			if ferr == nil {
+				ferr = ContextError(ctx, cerr)
+			}
+			return
+		}
+	}()
+
+	err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
+	if err != nil {
+		return nil, ContextError(ctx, err)
+	}
+
+	resp, rerr := stream.Recv()
+	if rerr != nil {
+		return nil, ContextError(ctx, rerr)
+	}
+
+	karesp = &LeaseKeepAliveResponse{
+		ResponseHeader: resp.GetHeader(),
+		ID:             LeaseID(resp.ID),
+		TTL:            resp.TTL,
+	}
+	return karesp, nil
+}
+
+func (l *lessor) recvKeepAliveLoop() (gerr error) {
+	defer func() {
+		l.mu.Lock()
+		close(l.donec)
+		l.loopErr = gerr
+		for _, ka := range l.keepAlives {
+			ka.close()
+		}
+		l.keepAlives = make(map[LeaseID]*keepAlive)
+		l.mu.Unlock()
+	}()
+
+	for {
+		stream, err := l.resetRecv()
+		if err != nil {
+			l.lg.Warn("error occurred during lease keep alive loop",
+				zap.Error(err),
+			)
+			if canceledByCaller(l.stopCtx, err) {
+				return err
+			}
+		} else {
+			for {
+				resp, err := stream.Recv()
+				if err != nil {
+					if canceledByCaller(l.stopCtx, err) {
+						return err
+					}
+
+					if errors.Is(ContextError(l.stopCtx, err), rpctypes.ErrNoLeader) {
+						l.closeRequireLeader()
+					}
+					break
+				}
+
+				l.recvKeepAlive(resp)
+			}
+		}
+
+		select {
+		case <-time.After(retryConnWait):
+		case <-l.stopCtx.Done():
+			return l.stopCtx.Err()
+		}
+	}
+}
+
+// resetRecv opens a new lease stream and starts sending keep alive requests.
+func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
+	sctx, cancel := context.WithCancel(l.stopCtx)
+	stream, err := l.remote.LeaseKeepAlive(sctx, append(l.callOpts, withMax(0))...)
+	if err != nil {
+		cancel()
+		return nil, err
+	}
+
+	l.mu.Lock()
+	defer l.mu.Unlock()
+	if l.stream != nil && l.streamCancel != nil {
+		l.streamCancel()
+	}
+
+	l.streamCancel = cancel
+	l.stream = stream
+
+	go l.sendKeepAliveLoop(stream)
+	return stream, nil
+}
+
+// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
+func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
+	karesp := &LeaseKeepAliveResponse{
+		ResponseHeader: resp.GetHeader(),
+		ID:             LeaseID(resp.ID),
+		TTL:            resp.TTL,
+	}
+
+	l.mu.Lock()
+	defer l.mu.Unlock()
+
+	ka, ok := l.keepAlives[karesp.ID]
+	if !ok {
+		return
+	}
+
+	if karesp.TTL <= 0 {
+		// lease expired; close all keep alive channels
+		delete(l.keepAlives, karesp.ID)
+		ka.close()
+		return
+	}
+
+	// send update to all channels
+	nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
+	ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
+	for _, ch := range ka.chs {
+		select {
+		case ch <- karesp:
+		default:
+			if l.lg != nil {
+				l.lg.Warn("lease keepalive response queue is full; dropping response send",
+					zap.Int("queue-size", len(ch)),
+					zap.Int("queue-capacity", cap(ch)),
+				)
+			}
+		}
+		// still advance in order to rate-limit keep-alive sends
+		ka.nextKeepAlive = nextKeepAlive
+	}
+}
+
+// deadlineLoop reaps any keep alive channels that have not received a response
+// within the lease TTL
+func (l *lessor) deadlineLoop() {
+	timer := time.NewTimer(time.Second)
+	defer timer.Stop()
+	for {
+		timer.Reset(time.Second)
+		select {
+		case <-timer.C:
+		case <-l.donec:
+			return
+		}
+		now := time.Now()
+		l.mu.Lock()
+		for id, ka := range l.keepAlives {
+			if ka.deadline.Before(now) {
+				// waited too long for response; lease may be expired
+				ka.close()
+				delete(l.keepAlives, id)
+			}
+		}
+		l.mu.Unlock()
+	}
+}
+
+// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
+func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
+	for {
+		var tosend []LeaseID
+
+		now := time.Now()
+		l.mu.Lock()
+		for id, ka := range l.keepAlives {
+			if ka.nextKeepAlive.Before(now) {
+				tosend = append(tosend, id)
+			}
+		}
+		l.mu.Unlock()
+
+		for _, id := range tosend {
+			r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
+			if err := stream.Send(r); err != nil {
+				l.lg.Warn("error occurred during lease keep alive request sending",
+					zap.Error(err),
+				)
+				return
+			}
+		}
+
+		select {
+		case <-time.After(retryConnWait):
+		case <-stream.Context().Done():
+			return
+		case <-l.donec:
+			return
+		case <-l.stopCtx.Done():
+			return
+		}
+	}
+}
+
+func (ka *keepAlive) close() {
+	close(ka.donec)
+	for _, ch := range ka.chs {
+		close(ch)
+	}
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/logger.go b/vendor/go.etcd.io/etcd/client/v3/logger.go
new file mode 100644
index 0000000..300363c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/logger.go
@@ -0,0 +1,60 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"log"
+	"os"
+
+	"go.uber.org/zap/zapcore"
+	"go.uber.org/zap/zapgrpc"
+	"google.golang.org/grpc/grpclog"
+
+	"go.etcd.io/etcd/client/pkg/v3/logutil"
+)
+
+func init() {
+	// We override grpc logger only when the environment variable is set
+	// in order to not interfere by default with user's code or other libraries.
+	if os.Getenv("ETCD_CLIENT_DEBUG") != "" {
+		lg, err := logutil.CreateDefaultZapLogger(etcdClientDebugLevel())
+		if err != nil {
+			panic(err)
+		}
+		lg = lg.Named("etcd-client")
+		grpclog.SetLoggerV2(zapgrpc.NewLogger(lg))
+	}
+}
+
+// SetLogger sets grpc logger.
+//
+// Deprecated: use grpclog.SetLoggerV2 directly or grpc_zap.ReplaceGrpcLoggerV2.
+func SetLogger(l grpclog.LoggerV2) {
+	grpclog.SetLoggerV2(l)
+}
+
+// etcdClientDebugLevel translates ETCD_CLIENT_DEBUG into zap log level.
+func etcdClientDebugLevel() zapcore.Level {
+	envLevel := os.Getenv("ETCD_CLIENT_DEBUG")
+	if envLevel == "" || envLevel == "true" {
+		return zapcore.InfoLevel
+	}
+	var l zapcore.Level
+	if err := l.Set(envLevel); err != nil {
+		log.Print("Invalid value for environment variable 'ETCD_CLIENT_DEBUG'. Using default level: 'info'")
+		return zapcore.InfoLevel
+	}
+	return l
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/maintenance.go b/vendor/go.etcd.io/etcd/client/v3/maintenance.go
new file mode 100644
index 0000000..00aaacd
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/maintenance.go
@@ -0,0 +1,350 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io"
+
+	"go.uber.org/zap"
+	"google.golang.org/grpc"
+
+	pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+type (
+	DefragmentResponse pb.DefragmentResponse
+	AlarmResponse      pb.AlarmResponse
+	AlarmMember        pb.AlarmMember
+	StatusResponse     pb.StatusResponse
+	HashKVResponse     pb.HashKVResponse
+	MoveLeaderResponse pb.MoveLeaderResponse
+	DowngradeResponse  pb.DowngradeResponse
+
+	DowngradeAction pb.DowngradeRequest_DowngradeAction
+)
+
+const (
+	DowngradeValidate = DowngradeAction(pb.DowngradeRequest_VALIDATE)
+	DowngradeEnable   = DowngradeAction(pb.DowngradeRequest_ENABLE)
+	DowngradeCancel   = DowngradeAction(pb.DowngradeRequest_CANCEL)
+)
+
+type Maintenance interface {
+	// AlarmList gets all active alarms.
+	AlarmList(ctx context.Context) (*AlarmResponse, error)
+
+	// AlarmDisarm disarms a given alarm.
+	AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
+
+	// Defragment releases wasted space from internal fragmentation on a given etcd member.
+	// Defragment is only needed when deleting a large number of keys and want to reclaim
+	// the resources.
+	// Defragment is an expensive operation. User should avoid defragmenting multiple members
+	// at the same time.
+	// To defragment multiple members in the cluster, user need to call defragment multiple
+	// times with different endpoints.
+	Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)
+
+	// Status gets the status of the endpoint.
+	Status(ctx context.Context, endpoint string) (*StatusResponse, error)
+
+	// HashKV returns a hash of the KV state at the time of the RPC.
+	// If revision is zero, the hash is computed on all keys. If the revision
+	// is non-zero, the hash is computed on all keys at or below the given revision.
+	HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)
+
+	// SnapshotWithVersion returns a reader for a point-in-time snapshot and version of etcd that created it.
+	// If the context "ctx" is canceled or timed out, reading from returned
+	// "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded).
+	SnapshotWithVersion(ctx context.Context) (*SnapshotResponse, error)
+
+	// Snapshot provides a reader for a point-in-time snapshot of etcd.
+	// If the context "ctx" is canceled or timed out, reading from returned
+	// "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded).
+	// Deprecated: use SnapshotWithVersion instead.
+	Snapshot(ctx context.Context) (io.ReadCloser, error)
+
+	// MoveLeader requests current leader to transfer its leadership to the transferee.
+	// Request must be made to the leader.
+	MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error)
+
+	// Downgrade requests downgrades, verifies feasibility or cancels downgrade
+	// on the cluster version.
+	// Supported since etcd 3.5.
+	Downgrade(ctx context.Context, action DowngradeAction, version string) (*DowngradeResponse, error)
+}
+
+// SnapshotResponse is aggregated response from the snapshot stream.
+// Consumer is responsible for closing steam by calling .Snapshot.Close()
+type SnapshotResponse struct {
+	// Header is the first header in the snapshot stream, has the current key-value store information
+	// and indicates the point in time of the snapshot.
+	Header *pb.ResponseHeader
+	// Snapshot exposes ReaderCloser interface for data stored in the Blob field in the snapshot stream.
+	Snapshot io.ReadCloser
+	// Version is the local version of server that created the snapshot.
+	// In cluster with binaries with different version, each cluster can return different result.
+	// Informs which etcd server version should be used when restoring the snapshot.
+	// Supported on etcd >= v3.6.
+	Version string
+}
+
+type maintenance struct {
+	lg       *zap.Logger
+	dial     func(endpoint string) (pb.MaintenanceClient, func(), error)
+	remote   pb.MaintenanceClient
+	callOpts []grpc.CallOption
+}
+
+func NewMaintenance(c *Client) Maintenance {
+	api := &maintenance{
+		lg: c.lg,
+		dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
+			conn, err := c.Dial(endpoint)
+			if err != nil {
+				return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %w", endpoint, err)
+			}
+
+			cancel := func() { conn.Close() }
+			return RetryMaintenanceClient(c, conn), cancel, nil
+		},
+		remote: RetryMaintenanceClient(c, c.conn),
+	}
+	if c != nil {
+		api.callOpts = c.callOpts
+	}
+	return api
+}
+
+func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
+	api := &maintenance{
+		dial: func(string) (pb.MaintenanceClient, func(), error) {
+			return remote, func() {}, nil
+		},
+		remote: remote,
+	}
+	if c != nil {
+		api.callOpts = c.callOpts
+		api.lg = c.lg
+	}
+	return api
+}
+
+func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
+	req := &pb.AlarmRequest{
+		Action:   pb.AlarmRequest_GET,
+		MemberID: 0,                 // all
+		Alarm:    pb.AlarmType_NONE, // all
+	}
+	resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
+	if err == nil {
+		return (*AlarmResponse)(resp), nil
+	}
+	return nil, ContextError(ctx, err)
+}
+
+func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
+	req := &pb.AlarmRequest{
+		Action:   pb.AlarmRequest_DEACTIVATE,
+		MemberID: am.MemberID,
+		Alarm:    am.Alarm,
+	}
+
+	if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
+		ar, err := m.AlarmList(ctx)
+		if err != nil {
+			return nil, ContextError(ctx, err)
+		}
+		ret := AlarmResponse{}
+		for _, am := range ar.Alarms {
+			dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
+			if derr != nil {
+				return nil, ContextError(ctx, derr)
+			}
+			ret.Alarms = append(ret.Alarms, dresp.Alarms...)
+		}
+		return &ret, nil
+	}
+
+	resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
+	if err == nil {
+		return (*AlarmResponse)(resp), nil
+	}
+	return nil, ContextError(ctx, err)
+}
+
+func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
+	remote, cancel, err := m.dial(endpoint)
+	if err != nil {
+		return nil, ContextError(ctx, err)
+	}
+	defer cancel()
+	resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...)
+	if err != nil {
+		return nil, ContextError(ctx, err)
+	}
+	return (*DefragmentResponse)(resp), nil
+}
+
+func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
+	remote, cancel, err := m.dial(endpoint)
+	if err != nil {
+		return nil, ContextError(ctx, err)
+	}
+	defer cancel()
+	resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...)
+	if err != nil {
+		return nil, ContextError(ctx, err)
+	}
+	return (*StatusResponse)(resp), nil
+}
+
+func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
+	remote, cancel, err := m.dial(endpoint)
+	if err != nil {
+		return nil, ContextError(ctx, err)
+	}
+	defer cancel()
+	resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...)
+	if err != nil {
+		return nil, ContextError(ctx, err)
+	}
+	return (*HashKVResponse)(resp), nil
+}
+
+func (m *maintenance) SnapshotWithVersion(ctx context.Context) (*SnapshotResponse, error) {
+	ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...)
+	if err != nil {
+		return nil, ContextError(ctx, err)
+	}
+
+	m.lg.Info("opened snapshot stream; downloading")
+	pr, pw := io.Pipe()
+
+	resp, err := ss.Recv()
+	if err != nil {
+		m.logAndCloseWithError(err, pw)
+		return nil, err
+	}
+	go func() {
+		// Saving response is blocking
+		err := m.save(resp, pw)
+		if err != nil {
+			m.logAndCloseWithError(err, pw)
+			return
+		}
+		for {
+			sresp, err := ss.Recv()
+			if err != nil {
+				m.logAndCloseWithError(err, pw)
+				return
+			}
+
+			err = m.save(sresp, pw)
+			if err != nil {
+				m.logAndCloseWithError(err, pw)
+				return
+			}
+		}
+	}()
+
+	return &SnapshotResponse{
+		Header:   resp.GetHeader(),
+		Snapshot: &snapshotReadCloser{ctx: ctx, ReadCloser: pr},
+		Version:  resp.GetVersion(),
+	}, nil
+}
+
+func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
+	ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...)
+	if err != nil {
+		return nil, ContextError(ctx, err)
+	}
+
+	m.lg.Info("opened snapshot stream; downloading")
+	pr, pw := io.Pipe()
+
+	go func() {
+		for {
+			resp, err := ss.Recv()
+			if err != nil {
+				m.logAndCloseWithError(err, pw)
+				return
+			}
+			err = m.save(resp, pw)
+			if err != nil {
+				m.logAndCloseWithError(err, pw)
+				return
+			}
+		}
+	}()
+	return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil
+}
+
+func (m *maintenance) logAndCloseWithError(err error, pw *io.PipeWriter) {
+	switch {
+	case errors.Is(err, io.EOF):
+		m.lg.Info("completed snapshot read; closing")
+	default:
+		m.lg.Warn("failed to receive from snapshot stream; closing", zap.Error(err))
+	}
+	pw.CloseWithError(err)
+}
+
+func (m *maintenance) save(resp *pb.SnapshotResponse, pw *io.PipeWriter) error {
+	// can "resp == nil && err == nil"
+	// before we receive snapshot SHA digest?
+	// No, server sends EOF with an empty response
+	// after it sends SHA digest at the end
+
+	if _, werr := pw.Write(resp.Blob); werr != nil {
+		return werr
+	}
+	return nil
+}
+
+type snapshotReadCloser struct {
+	ctx context.Context
+	io.ReadCloser
+}
+
+func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) {
+	n, err = rc.ReadCloser.Read(p)
+	return n, ContextError(rc.ctx, err)
+}
+
+func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {
+	resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...)
+	return (*MoveLeaderResponse)(resp), ContextError(ctx, err)
+}
+
+func (m *maintenance) Downgrade(ctx context.Context, action DowngradeAction, version string) (*DowngradeResponse, error) {
+	var actionType pb.DowngradeRequest_DowngradeAction
+	switch action {
+	case DowngradeValidate:
+		actionType = pb.DowngradeRequest_VALIDATE
+	case DowngradeEnable:
+		actionType = pb.DowngradeRequest_ENABLE
+	case DowngradeCancel:
+		actionType = pb.DowngradeRequest_CANCEL
+	default:
+		return nil, errors.New("etcdclient: unknown downgrade action")
+	}
+	resp, err := m.remote.Downgrade(ctx, &pb.DowngradeRequest{Action: actionType, Version: version}, m.callOpts...)
+	return (*DowngradeResponse)(resp), ContextError(ctx, err)
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/op.go b/vendor/go.etcd.io/etcd/client/v3/op.go
new file mode 100644
index 0000000..20cb34f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/op.go
@@ -0,0 +1,612 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+
+type opType int
+
+const (
+	// A default Op has opType 0, which is invalid.
+	tRange opType = iota + 1
+	tPut
+	tDeleteRange
+	tTxn
+)
+
+var noPrefixEnd = []byte{0}
+
+// Op represents an Operation that kv can execute.
+type Op struct {
+	t   opType
+	key []byte
+	end []byte
+
+	// for range
+	limit        int64
+	sort         *SortOption
+	serializable bool
+	keysOnly     bool
+	countOnly    bool
+	minModRev    int64
+	maxModRev    int64
+	minCreateRev int64
+	maxCreateRev int64
+
+	// for range, watch
+	rev int64
+
+	// for watch, put, delete
+	prevKV bool
+
+	// for watch
+	// fragmentation should be disabled by default
+	// if true, split watch events when total exceeds
+	// "--max-request-bytes" flag value + 512-byte
+	fragment bool
+
+	// for put
+	ignoreValue bool
+	ignoreLease bool
+
+	// progressNotify is for progress updates.
+	progressNotify bool
+	// createdNotify is for created event
+	createdNotify bool
+	// filters for watchers
+	filterPut    bool
+	filterDelete bool
+
+	// for put
+	val     []byte
+	leaseID LeaseID
+
+	// txn
+	cmps    []Cmp
+	thenOps []Op
+	elseOps []Op
+
+	isOptsWithFromKey bool
+	isOptsWithPrefix  bool
+}
+
+// accessors / mutators
+
+// IsTxn returns true if the "Op" type is transaction.
+func (op Op) IsTxn() bool {
+	return op.t == tTxn
+}
+
+// Txn returns the comparison(if) operations, "then" operations, and "else" operations.
+func (op Op) Txn() ([]Cmp, []Op, []Op) {
+	return op.cmps, op.thenOps, op.elseOps
+}
+
+// KeyBytes returns the byte slice holding the Op's key.
+func (op Op) KeyBytes() []byte { return op.key }
+
+// WithKeyBytes sets the byte slice for the Op's key.
+func (op *Op) WithKeyBytes(key []byte) { op.key = key }
+
+// RangeBytes returns the byte slice holding with the Op's range end, if any.
+func (op Op) RangeBytes() []byte { return op.end }
+
+// Rev returns the requested revision, if any.
+func (op Op) Rev() int64 { return op.rev }
+
+// Limit returns limit of the result, if any.
+func (op Op) Limit() int64 { return op.limit }
+
+// IsPut returns true iff the operation is a Put.
+func (op Op) IsPut() bool { return op.t == tPut }
+
+// IsGet returns true iff the operation is a Get.
+func (op Op) IsGet() bool { return op.t == tRange }
+
+// IsDelete returns true iff the operation is a Delete.
+func (op Op) IsDelete() bool { return op.t == tDeleteRange }
+
+// IsSerializable returns true if the serializable field is true.
+func (op Op) IsSerializable() bool { return op.serializable }
+
+// IsKeysOnly returns whether keysOnly is set.
+func (op Op) IsKeysOnly() bool { return op.keysOnly }
+
+// IsCountOnly returns whether countOnly is set.
+func (op Op) IsCountOnly() bool { return op.countOnly }
+
+func (op Op) IsOptsWithFromKey() bool { return op.isOptsWithFromKey }
+
+func (op Op) IsOptsWithPrefix() bool { return op.isOptsWithPrefix }
+
+// MinModRev returns the operation's minimum modify revision.
+func (op Op) MinModRev() int64 { return op.minModRev }
+
+// MaxModRev returns the operation's maximum modify revision.
+func (op Op) MaxModRev() int64 { return op.maxModRev }
+
+// MinCreateRev returns the operation's minimum create revision.
+func (op Op) MinCreateRev() int64 { return op.minCreateRev }
+
+// MaxCreateRev returns the operation's maximum create revision.
+func (op Op) MaxCreateRev() int64 { return op.maxCreateRev }
+
+// WithRangeBytes sets the byte slice for the Op's range end.
+func (op *Op) WithRangeBytes(end []byte) { op.end = end }
+
+// ValueBytes returns the byte slice holding the Op's value, if any.
+func (op Op) ValueBytes() []byte { return op.val }
+
+// WithValueBytes sets the byte slice for the Op's value.
+func (op *Op) WithValueBytes(v []byte) { op.val = v }
+
+func (op Op) toRangeRequest() *pb.RangeRequest {
+	if op.t != tRange {
+		panic("op.t != tRange")
+	}
+	r := &pb.RangeRequest{
+		Key:               op.key,
+		RangeEnd:          op.end,
+		Limit:             op.limit,
+		Revision:          op.rev,
+		Serializable:      op.serializable,
+		KeysOnly:          op.keysOnly,
+		CountOnly:         op.countOnly,
+		MinModRevision:    op.minModRev,
+		MaxModRevision:    op.maxModRev,
+		MinCreateRevision: op.minCreateRev,
+		MaxCreateRevision: op.maxCreateRev,
+	}
+	if op.sort != nil {
+		r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
+		r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
+	}
+	return r
+}
+
+func (op Op) toTxnRequest() *pb.TxnRequest {
+	thenOps := make([]*pb.RequestOp, len(op.thenOps))
+	for i, tOp := range op.thenOps {
+		thenOps[i] = tOp.toRequestOp()
+	}
+	elseOps := make([]*pb.RequestOp, len(op.elseOps))
+	for i, eOp := range op.elseOps {
+		elseOps[i] = eOp.toRequestOp()
+	}
+	cmps := make([]*pb.Compare, len(op.cmps))
+	for i := range op.cmps {
+		cmps[i] = (*pb.Compare)(&op.cmps[i])
+	}
+	return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps}
+}
+
+func (op Op) toRequestOp() *pb.RequestOp {
+	switch op.t {
+	case tRange:
+		return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
+	case tPut:
+		r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
+		return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
+	case tDeleteRange:
+		r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
+		return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
+	case tTxn:
+		return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}}
+	default:
+		panic("Unknown Op")
+	}
+}
+
+func (op Op) isWrite() bool {
+	if op.t == tTxn {
+		for _, tOp := range op.thenOps {
+			if tOp.isWrite() {
+				return true
+			}
+		}
+		for _, tOp := range op.elseOps {
+			if tOp.isWrite() {
+				return true
+			}
+		}
+		return false
+	}
+	return op.t != tRange
+}
+
+func NewOp() *Op {
+	return &Op{key: []byte("")}
+}
+
+// OpGet returns "get" operation based on given key and operation options.
+func OpGet(key string, opts ...OpOption) Op {
+	// WithPrefix and WithFromKey are not supported together
+	if IsOptsWithPrefix(opts) && IsOptsWithFromKey(opts) {
+		panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one")
+	}
+	ret := Op{t: tRange, key: []byte(key)}
+	ret.applyOpts(opts)
+	return ret
+}
+
+// OpDelete returns "delete" operation based on given key and operation options.
+func OpDelete(key string, opts ...OpOption) Op {
+	// WithPrefix and WithFromKey are not supported together
+	if IsOptsWithPrefix(opts) && IsOptsWithFromKey(opts) {
+		panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one")
+	}
+	ret := Op{t: tDeleteRange, key: []byte(key)}
+	ret.applyOpts(opts)
+	switch {
+	case ret.leaseID != 0:
+		panic("unexpected lease in delete")
+	case ret.limit != 0:
+		panic("unexpected limit in delete")
+	case ret.rev != 0:
+		panic("unexpected revision in delete")
+	case ret.sort != nil:
+		panic("unexpected sort in delete")
+	case ret.serializable:
+		panic("unexpected serializable in delete")
+	case ret.countOnly:
+		panic("unexpected countOnly in delete")
+	case ret.minModRev != 0, ret.maxModRev != 0:
+		panic("unexpected mod revision filter in delete")
+	case ret.minCreateRev != 0, ret.maxCreateRev != 0:
+		panic("unexpected create revision filter in delete")
+	case ret.filterDelete, ret.filterPut:
+		panic("unexpected filter in delete")
+	case ret.createdNotify:
+		panic("unexpected createdNotify in delete")
+	}
+	return ret
+}
+
+// OpPut returns "put" operation based on given key-value and operation options.
+func OpPut(key, val string, opts ...OpOption) Op {
+	ret := Op{t: tPut, key: []byte(key), val: []byte(val)}
+	ret.applyOpts(opts)
+	switch {
+	case ret.end != nil:
+		panic("unexpected range in put")
+	case ret.limit != 0:
+		panic("unexpected limit in put")
+	case ret.rev != 0:
+		panic("unexpected revision in put")
+	case ret.sort != nil:
+		panic("unexpected sort in put")
+	case ret.serializable:
+		panic("unexpected serializable in put")
+	case ret.countOnly:
+		panic("unexpected countOnly in put")
+	case ret.minModRev != 0, ret.maxModRev != 0:
+		panic("unexpected mod revision filter in put")
+	case ret.minCreateRev != 0, ret.maxCreateRev != 0:
+		panic("unexpected create revision filter in put")
+	case ret.filterDelete, ret.filterPut:
+		panic("unexpected filter in put")
+	case ret.createdNotify:
+		panic("unexpected createdNotify in put")
+	}
+	return ret
+}
+
+// OpTxn returns "txn" operation based on given transaction conditions.
+func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op {
+	return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps}
+}
+
+func opWatch(key string, opts ...OpOption) Op {
+	ret := Op{t: tRange, key: []byte(key)}
+	ret.applyOpts(opts)
+	switch {
+	case ret.leaseID != 0:
+		panic("unexpected lease in watch")
+	case ret.limit != 0:
+		panic("unexpected limit in watch")
+	case ret.sort != nil:
+		panic("unexpected sort in watch")
+	case ret.serializable:
+		panic("unexpected serializable in watch")
+	case ret.countOnly:
+		panic("unexpected countOnly in watch")
+	case ret.minModRev != 0, ret.maxModRev != 0:
+		panic("unexpected mod revision filter in watch")
+	case ret.minCreateRev != 0, ret.maxCreateRev != 0:
+		panic("unexpected create revision filter in watch")
+	}
+	return ret
+}
+
+func (op *Op) applyOpts(opts []OpOption) {
+	for _, opt := range opts {
+		opt(op)
+	}
+}
+
+// OpOption configures Operations like Get, Put, Delete.
+type OpOption func(*Op)
+
+// WithLease attaches a lease ID to a key in 'Put' request.
+func WithLease(leaseID LeaseID) OpOption {
+	return func(op *Op) { op.leaseID = leaseID }
+}
+
+// WithLimit limits the number of results to return from 'Get' request.
+// If WithLimit is given a 0 limit, it is treated as no limit.
+func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } }
+
+// WithRev specifies the store revision for 'Get' request.
+// Or the start revision of 'Watch' request.
+func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } }
+
+// WithSort specifies the ordering in 'Get' request. It requires
+// 'WithRange' and/or 'WithPrefix' to be specified too.
+// 'target' specifies the target to sort by: key, version, revisions, value.
+// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
+func WithSort(target SortTarget, order SortOrder) OpOption {
+	return func(op *Op) {
+		if target == SortByKey && order == SortAscend {
+			// If order != SortNone, server fetches the entire key-space,
+			// and then applies the sort and limit, if provided.
+			// Since by default the server returns results sorted by keys
+			// in lexicographically ascending order, the client should ignore
+			// SortOrder if the target is SortByKey.
+			order = SortNone
+		}
+		op.sort = &SortOption{target, order}
+	}
+}
+
+// GetPrefixRangeEnd gets the range end of the prefix.
+// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'.
+func GetPrefixRangeEnd(prefix string) string {
+	return string(getPrefix([]byte(prefix)))
+}
+
+func getPrefix(key []byte) []byte {
+	end := make([]byte, len(key))
+	copy(end, key)
+	for i := len(end) - 1; i >= 0; i-- {
+		if end[i] < 0xff {
+			end[i] = end[i] + 1
+			end = end[:i+1]
+			return end
+		}
+	}
+	// next prefix does not exist (e.g., 0xffff);
+	// default to WithFromKey policy
+	return noPrefixEnd
+}
+
+// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate
+// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())'
+// can return 'foo1', 'foo2', and so on.
+func WithPrefix() OpOption {
+	return func(op *Op) {
+		op.isOptsWithPrefix = true
+		if len(op.key) == 0 {
+			op.key, op.end = []byte{0}, []byte{0}
+			return
+		}
+		op.end = getPrefix(op.key)
+	}
+}
+
+// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests.
+// For example, 'Get' requests with 'WithRange(end)' returns
+// the keys in the range [key, end).
+// endKey must be lexicographically greater than start key.
+func WithRange(endKey string) OpOption {
+	return func(op *Op) { op.end = []byte(endKey) }
+}
+
+// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests
+// to be equal or greater than the key in the argument.
+func WithFromKey() OpOption {
+	return func(op *Op) {
+		if len(op.key) == 0 {
+			op.key = []byte{0}
+		}
+		op.end = []byte("\x00")
+		op.isOptsWithFromKey = true
+	}
+}
+
+// WithSerializable makes `Get` and `MemberList` requests serializable.
+// By default, they are linearizable. Serializable requests are better
+// for lower latency requirement, but users should be aware that they
+// could get stale data with serializable requests.
+//
+// In some situations users may want to use serializable requests. For
+// example, when adding a new member to a one-node cluster, it's reasonable
+// and safe to use serializable request before the new added member gets
+// started.
+func WithSerializable() OpOption {
+	return func(op *Op) { op.serializable = true }
+}
+
+// WithKeysOnly makes the 'Get' request return only the keys and the corresponding
+// values will be omitted.
+func WithKeysOnly() OpOption {
+	return func(op *Op) { op.keysOnly = true }
+}
+
+// WithCountOnly makes the 'Get' request return only the count of keys.
+func WithCountOnly() OpOption {
+	return func(op *Op) { op.countOnly = true }
+}
+
+// WithMinModRev filters out keys for Get with modification revisions less than the given revision.
+func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } }
+
+// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision.
+func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } }
+
+// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision.
+func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } }
+
+// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision.
+func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } }
+
+// WithFirstCreate gets the key with the oldest creation revision in the request range.
+func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
+
+// WithLastCreate gets the key with the latest creation revision in the request range.
+func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) }
+
+// WithFirstKey gets the lexically first key in the request range.
+func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) }
+
+// WithLastKey gets the lexically last key in the request range.
+func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) }
+
+// WithFirstRev gets the key with the oldest modification revision in the request range.
+func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) }
+
+// WithLastRev gets the key with the latest modification revision in the request range.
+func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) }
+
+// withTop gets the first key over the get's prefix given a sort order
+func withTop(target SortTarget, order SortOrder) []OpOption {
+	return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
+}
+
+// WithProgressNotify makes watch server send periodic progress updates
+// every 10 minutes when there is no incoming events.
+// Progress updates have zero events in WatchResponse.
+func WithProgressNotify() OpOption {
+	return func(op *Op) {
+		op.progressNotify = true
+	}
+}
+
+// WithCreatedNotify makes watch server sends the created event.
+func WithCreatedNotify() OpOption {
+	return func(op *Op) {
+		op.createdNotify = true
+	}
+}
+
+// WithFilterPut discards PUT events from the watcher.
+func WithFilterPut() OpOption {
+	return func(op *Op) { op.filterPut = true }
+}
+
+// WithFilterDelete discards DELETE events from the watcher.
+func WithFilterDelete() OpOption {
+	return func(op *Op) { op.filterDelete = true }
+}
+
+// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted,
+// nothing will be returned.
+func WithPrevKV() OpOption {
+	return func(op *Op) {
+		op.prevKV = true
+	}
+}
+
+// WithFragment to receive raw watch response with fragmentation.
+// Fragmentation is disabled by default. If fragmentation is enabled,
+// etcd watch server will split watch response before sending to clients
+// when the total size of watch events exceed server-side request limit.
+// The default server-side request limit is 1.5 MiB, which can be configured
+// as "--max-request-bytes" flag value + gRPC-overhead 512 bytes.
+// See "etcdserver/api/v3rpc/watch.go" for more details.
+func WithFragment() OpOption {
+	return func(op *Op) { op.fragment = true }
+}
+
+// WithIgnoreValue updates the key using its current value.
+// This option can not be combined with non-empty values.
+// Returns an error if the key does not exist.
+func WithIgnoreValue() OpOption {
+	return func(op *Op) {
+		op.ignoreValue = true
+	}
+}
+
+// WithIgnoreLease updates the key using its current lease.
+// This option can not be combined with WithLease.
+// Returns an error if the key does not exist.
+func WithIgnoreLease() OpOption {
+	return func(op *Op) {
+		op.ignoreLease = true
+	}
+}
+
+// LeaseOp represents an Operation that lease can execute.
+type LeaseOp struct {
+	id LeaseID
+
+	// for TimeToLive
+	attachedKeys bool
+}
+
+// LeaseOption configures lease operations.
+type LeaseOption func(*LeaseOp)
+
+func (op *LeaseOp) applyOpts(opts []LeaseOption) {
+	for _, opt := range opts {
+		opt(op)
+	}
+}
+
+// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID.
+func WithAttachedKeys() LeaseOption {
+	return func(op *LeaseOp) { op.attachedKeys = true }
+}
+
+func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest {
+	ret := &LeaseOp{id: id}
+	ret.applyOpts(opts)
+	return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys}
+}
+
+// IsOptsWithPrefix returns true if WithPrefix option is called in the given opts.
+func IsOptsWithPrefix(opts []OpOption) bool {
+	ret := NewOp()
+	for _, opt := range opts {
+		opt(ret)
+	}
+
+	return ret.isOptsWithPrefix
+}
+
+// IsOptsWithFromKey returns true if WithFromKey option is called in the given opts.
+func IsOptsWithFromKey(opts []OpOption) bool {
+	ret := NewOp()
+	for _, opt := range opts {
+		opt(ret)
+	}
+
+	return ret.isOptsWithFromKey
+}
+
+func (op Op) IsSortOptionValid() bool {
+	if op.sort != nil {
+		sortOrder := int32(op.sort.Order)
+		sortTarget := int32(op.sort.Target)
+
+		if _, ok := pb.RangeRequest_SortOrder_name[sortOrder]; !ok {
+			return false
+		}
+
+		if _, ok := pb.RangeRequest_SortTarget_name[sortTarget]; !ok {
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/options.go b/vendor/go.etcd.io/etcd/client/v3/options.go
new file mode 100644
index 0000000..cc10a03
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/options.go
@@ -0,0 +1,69 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"math"
+	"time"
+
+	"google.golang.org/grpc"
+)
+
+var (
+	// client-side handling retrying of request failures where data was not written to the wire or
+	// where server indicates it did not process the data. gRPC default is "WaitForReady(false)"
+	// but for etcd we default to "WaitForReady(true)" to minimize client request error responses due to
+	// transient failures.
+	defaultWaitForReady = grpc.WaitForReady(true)
+
+	// client-side request send limit, gRPC default is math.MaxInt32
+	// Make sure that "client-side send limit < server-side default send/recv limit"
+	// Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes
+	defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024)
+
+	// client-side response receive limit, gRPC default is 4MB
+	// Make sure that "client-side receive limit >= server-side default send/recv limit"
+	// because range response can easily exceed request send limits
+	// Default to math.MaxInt32; writes exceeding server-side send limit fails anyway
+	defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32)
+
+	// client-side non-streaming retry limit, only applied to requests where server responds with
+	// a error code clearly indicating it was unable to process the request such as codes.Unavailable.
+	// If set to 0, retry is disabled.
+	defaultUnaryMaxRetries uint = 100
+
+	// client-side streaming retry limit, only applied to requests where server responds with
+	// a error code clearly indicating it was unable to process the request such as codes.Unavailable.
+	// If set to 0, retry is disabled.
+	defaultStreamMaxRetries = ^uint(0) // max uint
+
+	// client-side retry backoff wait between requests.
+	defaultBackoffWaitBetween = 25 * time.Millisecond
+
+	// client-side retry backoff default jitter fraction.
+	defaultBackoffJitterFraction = 0.10
+)
+
+// defaultCallOpts defines a list of default "gRPC.CallOption".
+// Some options are exposed to "clientv3.Config".
+// Defaults will be overridden by the settings in "clientv3.Config".
+var defaultCallOpts = []grpc.CallOption{
+	defaultWaitForReady,
+	defaultMaxCallSendMsgSize,
+	defaultMaxCallRecvMsgSize,
+}
+
+// MaxLeaseTTL is the maximum lease TTL value
+const MaxLeaseTTL = 9000000000
diff --git a/vendor/go.etcd.io/etcd/client/v3/retry.go b/vendor/go.etcd.io/etcd/client/v3/retry.go
new file mode 100644
index 0000000..9152c53
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/retry.go
@@ -0,0 +1,309 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"errors"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+
+	pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+	"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+)
+
+type retryPolicy uint8
+
+const (
+	repeatable retryPolicy = iota
+	nonRepeatable
+)
+
+func (rp retryPolicy) String() string {
+	switch rp {
+	case repeatable:
+		return "repeatable"
+	case nonRepeatable:
+		return "nonRepeatable"
+	default:
+		return "UNKNOWN"
+	}
+}
+
+// isSafeRetryImmutableRPC returns "true" when an immutable request is safe for retry.
+//
+// immutable requests (e.g. Get) should be retried unless it's
+// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge).
+//
+// Returning "false" means retry should stop, since client cannot
+// handle itself even with retries.
+func isSafeRetryImmutableRPC(err error) bool {
+	eErr := rpctypes.Error(err)
+	var serverErr rpctypes.EtcdError
+	if errors.As(eErr, &serverErr) && serverErr.Code() != codes.Unavailable {
+		// interrupted by non-transient server-side or gRPC-side error
+		// client cannot handle itself (e.g. rpctypes.ErrCompacted)
+		return false
+	}
+	// only retry if unavailable
+	ev, ok := status.FromError(err)
+	if !ok {
+		// all errors from RPC is typed "grpc/status.(*statusError)"
+		// (ref. https://github.com/grpc/grpc-go/pull/1782)
+		//
+		// if the error type is not "grpc/status.(*statusError)",
+		// it could be from "Dial"
+		// TODO: do not retry for now
+		// ref. https://github.com/grpc/grpc-go/issues/1581
+		return false
+	}
+	return ev.Code() == codes.Unavailable
+}
+
+// isSafeRetryMutableRPC returns "true" when a mutable request is safe for retry.
+//
+// mutable requests (e.g. Put, Delete, Txn) should only be retried
+// when the status code is codes.Unavailable when initial connection
+// has not been established (no endpoint is up).
+//
+// Returning "false" means retry should stop, otherwise it violates
+// write-at-most-once semantics.
+func isSafeRetryMutableRPC(err error) bool {
+	if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable {
+		// not safe for mutable RPCs
+		// e.g. interrupted by non-transient error that client cannot handle itself,
+		// or transient error while the connection has already been established
+		return false
+	}
+	desc := rpctypes.ErrorDesc(err)
+	return desc == "there is no address available" || desc == "there is no connection available"
+}
+
+type retryKVClient struct {
+	kc pb.KVClient
+}
+
+// RetryKVClient implements a KVClient.
+func RetryKVClient(c *Client) pb.KVClient {
+	return &retryKVClient{
+		kc: pb.NewKVClient(c.conn),
+	}
+}
+
+func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
+	return rkv.kc.Range(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
+	return rkv.kc.Put(ctx, in, opts...)
+}
+
+func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
+	return rkv.kc.DeleteRange(ctx, in, opts...)
+}
+
+func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
+	return rkv.kc.Txn(ctx, in, opts...)
+}
+
+func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
+	return rkv.kc.Compact(ctx, in, opts...)
+}
+
+type retryLeaseClient struct {
+	lc pb.LeaseClient
+}
+
+// RetryLeaseClient implements a LeaseClient.
+func RetryLeaseClient(c *Client) pb.LeaseClient {
+	return &retryLeaseClient{
+		lc: pb.NewLeaseClient(c.conn),
+	}
+}
+
+func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
+	return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) {
+	return rlc.lc.LeaseLeases(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
+	return rlc.lc.LeaseGrant(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
+	return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
+	return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRepeatablePolicy())...)
+}
+
+type retryClusterClient struct {
+	cc pb.ClusterClient
+}
+
+// RetryClusterClient implements a ClusterClient.
+func RetryClusterClient(c *Client) pb.ClusterClient {
+	return &retryClusterClient{
+		cc: pb.NewClusterClient(c.conn),
+	}
+}
+
+func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
+	return rcc.cc.MemberList(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
+	return rcc.cc.MemberAdd(ctx, in, opts...)
+}
+
+func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
+	return rcc.cc.MemberRemove(ctx, in, opts...)
+}
+
+func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
+	return rcc.cc.MemberUpdate(ctx, in, opts...)
+}
+
+func (rcc *retryClusterClient) MemberPromote(ctx context.Context, in *pb.MemberPromoteRequest, opts ...grpc.CallOption) (resp *pb.MemberPromoteResponse, err error) {
+	return rcc.cc.MemberPromote(ctx, in, opts...)
+}
+
+type retryMaintenanceClient struct {
+	mc pb.MaintenanceClient
+}
+
+// RetryMaintenanceClient implements a Maintenance.
+func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient {
+	return &retryMaintenanceClient{
+		mc: pb.NewMaintenanceClient(conn),
+	}
+}
+
+func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
+	return rmc.mc.Alarm(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
+	return rmc.mc.Status(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
+	return rmc.mc.Hash(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) {
+	return rmc.mc.HashKV(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
+	return rmc.mc.Snapshot(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) {
+	return rmc.mc.MoveLeader(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
+	return rmc.mc.Defragment(ctx, in, opts...)
+}
+
+func (rmc *retryMaintenanceClient) Downgrade(ctx context.Context, in *pb.DowngradeRequest, opts ...grpc.CallOption) (resp *pb.DowngradeResponse, err error) {
+	return rmc.mc.Downgrade(ctx, in, opts...)
+}
+
+type retryAuthClient struct {
+	ac pb.AuthClient
+}
+
+// RetryAuthClient implements a AuthClient.
+func RetryAuthClient(c *Client) pb.AuthClient {
+	return &retryAuthClient{
+		ac: pb.NewAuthClient(c.conn),
+	}
+}
+
+func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
+	return rac.ac.UserList(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
+	return rac.ac.UserGet(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
+	return rac.ac.RoleGet(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
+	return rac.ac.RoleList(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
+	return rac.ac.AuthEnable(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
+	return rac.ac.AuthDisable(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) AuthStatus(ctx context.Context, in *pb.AuthStatusRequest, opts ...grpc.CallOption) (resp *pb.AuthStatusResponse, err error) {
+	return rac.ac.AuthStatus(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
+	return rac.ac.UserAdd(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
+	return rac.ac.UserDelete(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
+	return rac.ac.UserChangePassword(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
+	return rac.ac.UserGrantRole(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
+	return rac.ac.UserRevokeRole(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
+	return rac.ac.RoleAdd(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
+	return rac.ac.RoleDelete(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
+	return rac.ac.RoleGrantPermission(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
+	return rac.ac.RoleRevokePermission(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
+	return rac.ac.Authenticate(ctx, in, opts...)
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go b/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go
new file mode 100644
index 0000000..7703e67
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go
@@ -0,0 +1,441 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Based on github.com/grpc-ecosystem/go-grpc-middleware/retry, but modified to support the more
+// fine grained error checking required by write-at-most-once retry semantics of etcd.
+
+package clientv3
+
+import (
+	"context"
+	"errors"
+	"io"
+	"sync"
+	"time"
+
+	"go.uber.org/zap"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+
+	"go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+)
+
+// unaryClientInterceptor returns a new retrying unary client interceptor.
+//
+// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
+// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
+func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClientInterceptor {
+	intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
+	return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+		ctx = withVersion(ctx)
+		grpcOpts, retryOpts := filterCallOptions(opts)
+		callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
+		// short circuit for simplicity, and avoiding allocations.
+		if callOpts.max == 0 {
+			return invoker(ctx, method, req, reply, cc, grpcOpts...)
+		}
+		var lastErr error
+		for attempt := uint(0); attempt < callOpts.max; attempt++ {
+			if err := waitRetryBackoff(ctx, attempt, callOpts); err != nil {
+				return err
+			}
+			c.GetLogger().Debug(
+				"retrying of unary invoker",
+				zap.String("target", cc.Target()),
+				zap.String("method", method),
+				zap.Uint("attempt", attempt),
+			)
+			lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...)
+			if lastErr == nil {
+				return nil
+			}
+			c.GetLogger().Warn(
+				"retrying of unary invoker failed",
+				zap.String("target", cc.Target()),
+				zap.String("method", method),
+				zap.Uint("attempt", attempt),
+				zap.Error(lastErr),
+			)
+			if isContextError(lastErr) {
+				if ctx.Err() != nil {
+					// its the context deadline or cancellation.
+					return lastErr
+				}
+				// its the callCtx deadline or cancellation, in which case try again.
+				continue
+			}
+			if c.shouldRefreshToken(lastErr, callOpts) {
+				gtErr := c.refreshToken(ctx)
+				if gtErr != nil {
+					c.GetLogger().Warn(
+						"retrying of unary invoker failed to fetch new auth token",
+						zap.String("target", cc.Target()),
+						zap.Error(gtErr),
+					)
+					return gtErr // lastErr must be invalid auth token
+				}
+				continue
+			}
+			if !isSafeRetry(c, lastErr, callOpts) {
+				return lastErr
+			}
+		}
+		return lastErr
+	}
+}
+
+// streamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls.
+//
+// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
+// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
+//
+// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs
+// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams,
+// BidiStreams), the retry interceptor will fail the call.
+func (c *Client) streamClientInterceptor(optFuncs ...retryOption) grpc.StreamClientInterceptor {
+	intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
+	return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+		ctx = withVersion(ctx)
+		// getToken automatically. Otherwise, auth token may be invalid after watch reconnection because the token has expired
+		// (see https://github.com/etcd-io/etcd/issues/11954 for more).
+		err := c.getToken(ctx)
+		if err != nil {
+			c.GetLogger().Error("clientv3/retry_interceptor: getToken failed", zap.Error(err))
+			return nil, err
+		}
+		grpcOpts, retryOpts := filterCallOptions(opts)
+		callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
+		// short circuit for simplicity, and avoiding allocations.
+		if callOpts.max == 0 {
+			return streamer(ctx, desc, cc, method, grpcOpts...)
+		}
+		if desc.ClientStreams {
+			return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
+		}
+		newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...)
+		if err != nil {
+			c.GetLogger().Error("streamer failed to create ClientStream", zap.Error(err))
+			return nil, err // TODO(mwitkow): Maybe dial and transport errors should be retriable?
+		}
+		retryingStreamer := &serverStreamingRetryingStream{
+			client:       c,
+			ClientStream: newStreamer,
+			callOpts:     callOpts,
+			ctx:          ctx,
+			streamerCall: func(ctx context.Context) (grpc.ClientStream, error) {
+				return streamer(ctx, desc, cc, method, grpcOpts...)
+			},
+		}
+		return retryingStreamer, nil
+	}
+}
+
+// shouldRefreshToken checks whether there's a need to refresh the token based on the error and callOptions,
+// and returns a boolean value.
+func (c *Client) shouldRefreshToken(err error, callOpts *options) bool {
+	if errors.Is(rpctypes.Error(err), rpctypes.ErrUserEmpty) {
+		// refresh the token when username, password is present but the server returns ErrUserEmpty
+		// which is possible when the client token is cleared somehow
+		return c.authTokenBundle != nil // equal to c.Username != "" && c.Password != ""
+	}
+
+	return callOpts.retryAuth &&
+		(errors.Is(rpctypes.Error(err), rpctypes.ErrInvalidAuthToken) || errors.Is(rpctypes.Error(err), rpctypes.ErrAuthOldRevision))
+}
+
+func (c *Client) refreshToken(ctx context.Context) error {
+	if c.authTokenBundle == nil {
+		// c.authTokenBundle will be initialized only when
+		// c.Username != "" && c.Password != "".
+		//
+		// When users use the TLS CommonName based authentication, the
+		// authTokenBundle is always nil. But it's possible for the clients
+		// to get `rpctypes.ErrAuthOldRevision` response when the clients
+		// concurrently modify auth data (e.g, addUser, deleteUser etc.).
+		// In this case, there is no need to refresh the token; instead the
+		// clients just need to retry the operations (e.g. Put, Delete etc).
+		return nil
+	}
+
+	return c.getToken(ctx)
+}
+
+// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a
+// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish
+// a new ClientStream according to the retry policy.
+type serverStreamingRetryingStream struct {
+	grpc.ClientStream
+	client        *Client
+	bufferedSends []any // single message that the client can sen
+	receivedGood  bool  // indicates whether any prior receives were successful
+	wasClosedSend bool  // indicates that CloseSend was closed
+	ctx           context.Context
+	callOpts      *options
+	streamerCall  func(ctx context.Context) (grpc.ClientStream, error)
+	mu            sync.RWMutex
+}
+
+func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) {
+	s.mu.Lock()
+	s.ClientStream = clientStream
+	s.mu.Unlock()
+}
+
+func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream {
+	s.mu.RLock()
+	defer s.mu.RUnlock()
+	return s.ClientStream
+}
+
+func (s *serverStreamingRetryingStream) SendMsg(m any) error {
+	s.mu.Lock()
+	s.bufferedSends = append(s.bufferedSends, m)
+	s.mu.Unlock()
+	return s.getStream().SendMsg(m)
+}
+
+func (s *serverStreamingRetryingStream) CloseSend() error {
+	s.mu.Lock()
+	s.wasClosedSend = true
+	s.mu.Unlock()
+	return s.getStream().CloseSend()
+}
+
+func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) {
+	return s.getStream().Header()
+}
+
+func (s *serverStreamingRetryingStream) Trailer() metadata.MD {
+	return s.getStream().Trailer()
+}
+
+func (s *serverStreamingRetryingStream) RecvMsg(m any) error {
+	attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m)
+	if !attemptRetry {
+		return lastErr // success or hard failure
+	}
+
+	// We start off from attempt 1, because zeroth was already made on normal SendMsg().
+	for attempt := uint(1); attempt < s.callOpts.max; attempt++ {
+		if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil {
+			return err
+		}
+		newStream, err := s.reestablishStreamAndResendBuffer(s.ctx)
+		if err != nil {
+			s.client.lg.Error("failed reestablishStreamAndResendBuffer", zap.Error(err))
+			return err // TODO(mwitkow): Maybe dial and transport errors should be retriable?
+		}
+		s.setStream(newStream)
+
+		s.client.lg.Warn("retrying RecvMsg", zap.Error(lastErr))
+		attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m)
+		if !attemptRetry {
+			return lastErr
+		}
+	}
+	return lastErr
+}
+
+func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m any) (bool, error) {
+	s.mu.RLock()
+	wasGood := s.receivedGood
+	s.mu.RUnlock()
+	err := s.getStream().RecvMsg(m)
+	if err == nil || errors.Is(err, io.EOF) {
+		s.mu.Lock()
+		s.receivedGood = true
+		s.mu.Unlock()
+		return false, err
+	} else if wasGood {
+		// previous RecvMsg in the stream succeeded, no retry logic should interfere
+		return false, err
+	}
+	if isContextError(err) {
+		if s.ctx.Err() != nil {
+			return false, err
+		}
+		// its the callCtx deadline or cancellation, in which case try again.
+		return true, err
+	}
+	if s.client.shouldRefreshToken(err, s.callOpts) {
+		gtErr := s.client.refreshToken(s.ctx)
+		if gtErr != nil {
+			s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gtErr))
+			return false, err // return the original error for simplicity
+		}
+		return true, err
+	}
+	return isSafeRetry(s.client, err, s.callOpts), err
+}
+
+func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) {
+	s.mu.RLock()
+	bufferedSends := s.bufferedSends
+	s.mu.RUnlock()
+	newStream, err := s.streamerCall(callCtx)
+	if err != nil {
+		return nil, err
+	}
+	for _, msg := range bufferedSends {
+		if err := newStream.SendMsg(msg); err != nil {
+			return nil, err
+		}
+	}
+	if err := newStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return newStream, nil
+}
+
+func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) error {
+	waitTime := time.Duration(0)
+	if attempt > 0 {
+		waitTime = callOpts.backoffFunc(attempt)
+	}
+	if waitTime > 0 {
+		timer := time.NewTimer(waitTime)
+		select {
+		case <-ctx.Done():
+			timer.Stop()
+			return contextErrToGRPCErr(ctx.Err())
+		case <-timer.C:
+		}
+	}
+	return nil
+}
+
+// isSafeRetry returns "true", if request is safe for retry with the given error.
+func isSafeRetry(c *Client, err error, callOpts *options) bool {
+	if isContextError(err) {
+		return false
+	}
+
+	// Situation when learner refuses RPC it is supposed to not serve is from the server
+	// perspective not retryable.
+	// But for backward-compatibility reasons we need  to support situation that
+	// customer provides mix of learners (not yet voters) and voters with an
+	// expectation to pick voter in the next attempt.
+	// TODO: Ideally client should be 'aware' which endpoint represents: leader/voter/learner with high probability.
+	if errors.Is(err, rpctypes.ErrGRPCNotSupportedForLearner) && len(c.Endpoints()) > 1 {
+		return true
+	}
+
+	switch callOpts.retryPolicy {
+	case repeatable:
+		return isSafeRetryImmutableRPC(err)
+	case nonRepeatable:
+		return isSafeRetryMutableRPC(err)
+	default:
+		c.lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String()))
+		return false
+	}
+}
+
+func isContextError(err error) bool {
+	return status.Code(err) == codes.DeadlineExceeded || status.Code(err) == codes.Canceled
+}
+
+func contextErrToGRPCErr(err error) error {
+	switch {
+	case errors.Is(err, context.DeadlineExceeded):
+		return status.Error(codes.DeadlineExceeded, err.Error())
+	case errors.Is(err, context.Canceled):
+		return status.Error(codes.Canceled, err.Error())
+	default:
+		return status.Error(codes.Unknown, err.Error())
+	}
+}
+
+var defaultOptions = &options{
+	retryPolicy: nonRepeatable,
+	max:         0, // disable
+	backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10),
+	retryAuth:   true,
+}
+
+// backoffFunc denotes a family of functions that control the backoff duration between call retries.
+//
+// They are called with an identifier of the attempt, and should return a time the system client should
+// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request
+// the deadline of the request takes precedence and the wait will be interrupted before proceeding
+// with the next iteration.
+type backoffFunc func(attempt uint) time.Duration
+
+// withRepeatablePolicy sets the repeatable policy of this call.
+func withRepeatablePolicy() retryOption {
+	return retryOption{applyFunc: func(o *options) {
+		o.retryPolicy = repeatable
+	}}
+}
+
+// withMax sets the maximum number of retries on this call, or this interceptor.
+func withMax(maxRetries uint) retryOption {
+	return retryOption{applyFunc: func(o *options) {
+		o.max = maxRetries
+	}}
+}
+
+// WithBackoff sets the `BackoffFunc` used to control time between retries.
+func withBackoff(bf backoffFunc) retryOption {
+	return retryOption{applyFunc: func(o *options) {
+		o.backoffFunc = bf
+	}}
+}
+
+type options struct {
+	retryPolicy retryPolicy
+	max         uint
+	backoffFunc backoffFunc
+	retryAuth   bool
+}
+
+// retryOption is a grpc.CallOption that is local to clientv3's retry interceptor.
+type retryOption struct {
+	grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic.
+	applyFunc            func(opt *options)
+}
+
+func reuseOrNewWithCallOptions(opt *options, retryOptions []retryOption) *options {
+	if len(retryOptions) == 0 {
+		return opt
+	}
+	optCopy := &options{}
+	*optCopy = *opt
+	for _, f := range retryOptions {
+		f.applyFunc(optCopy)
+	}
+	return optCopy
+}
+
+func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []retryOption) {
+	for _, opt := range callOptions {
+		if co, ok := opt.(retryOption); ok {
+			retryOptions = append(retryOptions, co)
+		} else {
+			grpcOptions = append(grpcOptions, opt)
+		}
+	}
+	return grpcOptions, retryOptions
+}
+
+// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment).
+//
+// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms.
+func backoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) backoffFunc {
+	return func(attempt uint) time.Duration {
+		return jitterUp(waitBetween, jitterFraction)
+	}
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/sort.go b/vendor/go.etcd.io/etcd/client/v3/sort.go
new file mode 100644
index 0000000..9918ea9
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/sort.go
@@ -0,0 +1,39 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+type (
+	SortTarget int
+	SortOrder  int
+)
+
+const (
+	SortNone SortOrder = iota
+	SortAscend
+	SortDescend
+)
+
+const (
+	SortByKey SortTarget = iota
+	SortByVersion
+	SortByCreateRevision
+	SortByModRevision
+	SortByValue
+)
+
+type SortOption struct {
+	Target SortTarget
+	Order  SortOrder
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/txn.go b/vendor/go.etcd.io/etcd/client/v3/txn.go
new file mode 100644
index 0000000..0a57332
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/txn.go
@@ -0,0 +1,150 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"sync"
+
+	"google.golang.org/grpc"
+
+	pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+// Txn is the interface that wraps mini-transactions.
+//
+//	Txn(context.TODO()).If(
+//	 Compare(Value(k1), ">", v1),
+//	 Compare(Version(k1), "=", 2)
+//	).Then(
+//	 OpPut(k2,v2), OpPut(k3,v3)
+//	).Else(
+//	 OpPut(k4,v4), OpPut(k5,v5)
+//	).Commit()
+type Txn interface {
+	// If takes a list of comparison. If all comparisons passed in succeed,
+	// the operations passed into Then() will be executed. Or the operations
+	// passed into Else() will be executed.
+	If(cs ...Cmp) Txn
+
+	// Then takes a list of operations. The Ops list will be executed, if the
+	// comparisons passed in If() succeed.
+	Then(ops ...Op) Txn
+
+	// Else takes a list of operations. The Ops list will be executed, if the
+	// comparisons passed in If() fail.
+	Else(ops ...Op) Txn
+
+	// Commit tries to commit the transaction.
+	Commit() (*TxnResponse, error)
+}
+
+type txn struct {
+	kv  *kv
+	ctx context.Context
+
+	mu    sync.Mutex
+	cif   bool
+	cthen bool
+	celse bool
+
+	isWrite bool
+
+	cmps []*pb.Compare
+
+	sus []*pb.RequestOp
+	fas []*pb.RequestOp
+
+	callOpts []grpc.CallOption
+}
+
+func (txn *txn) If(cs ...Cmp) Txn {
+	txn.mu.Lock()
+	defer txn.mu.Unlock()
+
+	if txn.cif {
+		panic("cannot call If twice!")
+	}
+
+	if txn.cthen {
+		panic("cannot call If after Then!")
+	}
+
+	if txn.celse {
+		panic("cannot call If after Else!")
+	}
+
+	txn.cif = true
+
+	for i := range cs {
+		txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i]))
+	}
+
+	return txn
+}
+
+func (txn *txn) Then(ops ...Op) Txn {
+	txn.mu.Lock()
+	defer txn.mu.Unlock()
+
+	if txn.cthen {
+		panic("cannot call Then twice!")
+	}
+	if txn.celse {
+		panic("cannot call Then after Else!")
+	}
+
+	txn.cthen = true
+
+	for _, op := range ops {
+		txn.isWrite = txn.isWrite || op.isWrite()
+		txn.sus = append(txn.sus, op.toRequestOp())
+	}
+
+	return txn
+}
+
+func (txn *txn) Else(ops ...Op) Txn {
+	txn.mu.Lock()
+	defer txn.mu.Unlock()
+
+	if txn.celse {
+		panic("cannot call Else twice!")
+	}
+
+	txn.celse = true
+
+	for _, op := range ops {
+		txn.isWrite = txn.isWrite || op.isWrite()
+		txn.fas = append(txn.fas, op.toRequestOp())
+	}
+
+	return txn
+}
+
+func (txn *txn) Commit() (*TxnResponse, error) {
+	txn.mu.Lock()
+	defer txn.mu.Unlock()
+
+	r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
+
+	var resp *pb.TxnResponse
+	var err error
+	resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...)
+	if err != nil {
+		return nil, ContextError(txn.ctx, err)
+	}
+	return (*TxnResponse)(resp), nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/utils.go b/vendor/go.etcd.io/etcd/client/v3/utils.go
new file mode 100644
index 0000000..8502758
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/utils.go
@@ -0,0 +1,31 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"math/rand"
+	"time"
+)
+
+// jitterUp adds random jitter to the duration.
+//
+// This adds or subtracts time from the duration within a given jitter fraction.
+// For example for 10s and jitter 0.1, it will return a time within [9s, 11s])
+//
+// Reference: https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils
+func jitterUp(duration time.Duration, jitter float64) time.Duration {
+	multiplier := jitter * (rand.Float64()*2 - 1)
+	return time.Duration(float64(duration) * (1 + multiplier))
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/watch.go b/vendor/go.etcd.io/etcd/client/v3/watch.go
new file mode 100644
index 0000000..a46f98b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/watch.go
@@ -0,0 +1,1043 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sync"
+	"time"
+
+	"go.uber.org/zap"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+
+	pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+	"go.etcd.io/etcd/api/v3/mvccpb"
+	v3rpc "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+)
+
+const (
+	EventTypeDelete = mvccpb.DELETE
+	EventTypePut    = mvccpb.PUT
+
+	closeSendErrTimeout = 250 * time.Millisecond
+
+	// AutoWatchID is the watcher ID passed in WatchStream.Watch when no
+	// user-provided ID is available. If pass, an ID will automatically be assigned.
+	AutoWatchID = 0
+
+	// InvalidWatchID represents an invalid watch ID and prevents duplication with an existing watch.
+	InvalidWatchID = -1
+)
+
+type Event mvccpb.Event
+
+type WatchChan <-chan WatchResponse
+
+type Watcher interface {
+	// Watch watches on a key or prefix. The watched events will be returned
+	// through the returned channel. If revisions waiting to be sent over the
+	// watch are compacted, then the watch will be canceled by the server, the
+	// client will post a compacted error watch response, and the channel will close.
+	// If the requested revision is 0 or unspecified, the returned channel will
+	// return watch events that happen after the server receives the watch request.
+	// If the context "ctx" is canceled or timed out, returned "WatchChan" is closed,
+	// and "WatchResponse" from this closed channel has zero events and nil "Err()".
+	// The context "ctx" MUST be canceled, as soon as watcher is no longer being used,
+	// to release the associated resources.
+	//
+	// If the context is "context.Background/TODO", returned "WatchChan" will
+	// not be closed and block until event is triggered, except when server
+	// returns a non-recoverable error (e.g. ErrCompacted).
+	// For example, when context passed with "WithRequireLeader" and the
+	// connected server has no leader (e.g. due to network partition),
+	// error "etcdserver: no leader" (ErrNoLeader) will be returned,
+	// and then "WatchChan" is closed with non-nil "Err()".
+	// In order to prevent a watch stream being stuck in a partitioned node,
+	// make sure to wrap context with "WithRequireLeader".
+	//
+	// Otherwise, as long as the context has not been canceled or timed out,
+	// watch will retry on other recoverable errors forever until reconnected.
+	//
+	// TODO: explicitly set context error in the last "WatchResponse" message and close channel?
+	// Currently, client contexts are overwritten with "valCtx" that never closes.
+	// TODO(v3.4): configure watch retry policy, limit maximum retry number
+	// (see https://github.com/etcd-io/etcd/issues/8980)
+	Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
+
+	// RequestProgress requests a progress notify response be sent in all watch channels.
+	RequestProgress(ctx context.Context) error
+
+	// Close closes the watcher and cancels all watch requests.
+	Close() error
+}
+
+type WatchResponse struct {
+	Header pb.ResponseHeader
+	Events []*Event
+
+	// CompactRevision is the minimum revision the watcher may receive.
+	CompactRevision int64
+
+	// Canceled is used to indicate watch failure.
+	// If the watch failed and the stream was about to close, before the channel is closed,
+	// the channel sends a final response that has Canceled set to true with a non-nil Err().
+	Canceled bool
+
+	// Created is used to indicate the creation of the watcher.
+	Created bool
+
+	closeErr error
+
+	// cancelReason is a reason of canceling watch
+	cancelReason string
+}
+
+// IsCreate returns true if the event tells that the key is newly created.
+func (e *Event) IsCreate() bool {
+	return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision
+}
+
+// IsModify returns true if the event tells that a new value is put on existing key.
+func (e *Event) IsModify() bool {
+	return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision
+}
+
+// Err is the error value if this WatchResponse holds an error.
+func (wr *WatchResponse) Err() error {
+	switch {
+	case wr.closeErr != nil:
+		return v3rpc.Error(wr.closeErr)
+	case wr.CompactRevision != 0:
+		return v3rpc.ErrCompacted
+	case wr.Canceled:
+		if len(wr.cancelReason) != 0 {
+			return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason))
+		}
+		return v3rpc.ErrFutureRev
+	}
+	return nil
+}
+
+// IsProgressNotify returns true if the WatchResponse is progress notification.
+func (wr *WatchResponse) IsProgressNotify() bool {
+	return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0
+}
+
+// watcher implements the Watcher interface
+type watcher struct {
+	remote   pb.WatchClient
+	callOpts []grpc.CallOption
+
+	// mu protects the grpc streams map
+	mu sync.Mutex
+
+	// streams holds all the active grpc streams keyed by ctx value.
+	streams map[string]*watchGRPCStream
+	lg      *zap.Logger
+}
+
+// watchGRPCStream tracks all watch resources attached to a single grpc stream.
+type watchGRPCStream struct {
+	owner    *watcher
+	remote   pb.WatchClient
+	callOpts []grpc.CallOption
+
+	// ctx controls internal remote.Watch requests
+	ctx context.Context
+	// ctxKey is the key used when looking up this stream's context
+	ctxKey string
+	cancel context.CancelFunc
+
+	// substreams holds all active watchers on this grpc stream
+	substreams map[int64]*watcherStream
+	// resuming holds all resuming watchers on this grpc stream
+	resuming []*watcherStream
+
+	// reqc sends a watch request from Watch() to the main goroutine
+	reqc chan watchStreamRequest
+	// respc receives data from the watch client
+	respc chan *pb.WatchResponse
+	// donec closes to broadcast shutdown
+	donec chan struct{}
+	// errc transmits errors from grpc Recv to the watch stream reconnect logic
+	errc chan error
+	// closingc gets the watcherStream of closing watchers
+	closingc chan *watcherStream
+	// wg is Done when all substream goroutines have exited
+	wg sync.WaitGroup
+
+	// resumec closes to signal that all substreams should begin resuming
+	resumec chan struct{}
+	// closeErr is the error that closed the watch stream
+	closeErr error
+
+	lg *zap.Logger
+}
+
+// watchStreamRequest is a union of the supported watch request operation types
+type watchStreamRequest interface {
+	toPB() *pb.WatchRequest
+}
+
+// watchRequest is issued by the subscriber to start a new watcher
+type watchRequest struct {
+	ctx context.Context
+	key string
+	end string
+	rev int64
+
+	// send created notification event if this field is true
+	createdNotify bool
+	// progressNotify is for progress updates
+	progressNotify bool
+	// fragmentation should be disabled by default
+	// if true, split watch events when total exceeds
+	// "--max-request-bytes" flag value + 512-byte
+	fragment bool
+
+	// filters is the list of events to filter out
+	filters []pb.WatchCreateRequest_FilterType
+	// get the previous key-value pair before the event happens
+	prevKV bool
+	// retc receives a chan WatchResponse once the watcher is established
+	retc chan chan WatchResponse
+}
+
+// progressRequest is issued by the subscriber to request watch progress
+type progressRequest struct{}
+
+// watcherStream represents a registered watcher
+type watcherStream struct {
+	// initReq is the request that initiated this request
+	initReq watchRequest
+
+	// outc publishes watch responses to subscriber
+	outc chan WatchResponse
+	// recvc buffers watch responses before publishing
+	recvc chan *WatchResponse
+	// donec closes when the watcherStream goroutine stops.
+	donec chan struct{}
+	// closing is set to true when stream should be scheduled to shutdown.
+	closing bool
+	// id is the registered watch id on the grpc stream
+	id int64
+
+	// buf holds all events received from etcd but not yet consumed by the client
+	buf []*WatchResponse
+}
+
+func NewWatcher(c *Client) Watcher {
+	return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c)
+}
+
+func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
+	w := &watcher{
+		remote:  wc,
+		streams: make(map[string]*watchGRPCStream),
+	}
+	if c != nil {
+		w.callOpts = c.callOpts
+		w.lg = c.lg
+	}
+	return w
+}
+
+// never closes
+var (
+	valCtxCh = make(chan struct{})
+	zeroTime = time.Unix(0, 0)
+)
+
+// ctx with only the values; never Done
+type valCtx struct{ context.Context }
+
+func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false }
+func (vc *valCtx) Done() <-chan struct{}       { return valCtxCh }
+func (vc *valCtx) Err() error                  { return nil }
+
+func (w *watcher) newWatcherGRPCStream(inctx context.Context) *watchGRPCStream {
+	ctx, cancel := context.WithCancel(&valCtx{inctx})
+	wgs := &watchGRPCStream{
+		owner:      w,
+		remote:     w.remote,
+		callOpts:   w.callOpts,
+		ctx:        ctx,
+		ctxKey:     streamKeyFromCtx(inctx),
+		cancel:     cancel,
+		substreams: make(map[int64]*watcherStream),
+		respc:      make(chan *pb.WatchResponse),
+		reqc:       make(chan watchStreamRequest),
+		donec:      make(chan struct{}),
+		errc:       make(chan error, 1),
+		closingc:   make(chan *watcherStream),
+		resumec:    make(chan struct{}),
+		lg:         w.lg,
+	}
+	go wgs.run()
+	return wgs
+}
+
+// Watch posts a watch request to run() and waits for a new watcher channel
+func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
+	ow := opWatch(key, opts...)
+
+	var filters []pb.WatchCreateRequest_FilterType
+	if ow.filterPut {
+		filters = append(filters, pb.WatchCreateRequest_NOPUT)
+	}
+	if ow.filterDelete {
+		filters = append(filters, pb.WatchCreateRequest_NODELETE)
+	}
+
+	wr := &watchRequest{
+		ctx:            ctx,
+		createdNotify:  ow.createdNotify,
+		key:            string(ow.key),
+		end:            string(ow.end),
+		rev:            ow.rev,
+		progressNotify: ow.progressNotify,
+		fragment:       ow.fragment,
+		filters:        filters,
+		prevKV:         ow.prevKV,
+		retc:           make(chan chan WatchResponse, 1),
+	}
+
+	ok := false
+	ctxKey := streamKeyFromCtx(ctx)
+
+	var closeCh chan WatchResponse
+	for {
+		// find or allocate appropriate grpc watch stream
+		w.mu.Lock()
+		if w.streams == nil {
+			// closed
+			w.mu.Unlock()
+			ch := make(chan WatchResponse)
+			close(ch)
+			return ch
+		}
+		wgs := w.streams[ctxKey]
+		if wgs == nil {
+			wgs = w.newWatcherGRPCStream(ctx)
+			w.streams[ctxKey] = wgs
+		}
+		donec := wgs.donec
+		reqc := wgs.reqc
+		w.mu.Unlock()
+
+		// couldn't create channel; return closed channel
+		if closeCh == nil {
+			closeCh = make(chan WatchResponse, 1)
+		}
+
+		// submit request
+		select {
+		case reqc <- wr:
+			ok = true
+		case <-wr.ctx.Done():
+			ok = false
+		case <-donec:
+			ok = false
+			if wgs.closeErr != nil {
+				closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
+				break
+			}
+			// retry; may have dropped stream from no ctxs
+			continue
+		}
+
+		// receive channel
+		if ok {
+			select {
+			case ret := <-wr.retc:
+				return ret
+			case <-ctx.Done():
+			case <-donec:
+				if wgs.closeErr != nil {
+					closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
+					break
+				}
+				// retry; may have dropped stream from no ctxs
+				continue
+			}
+		}
+		break
+	}
+
+	close(closeCh)
+	return closeCh
+}
+
+func (w *watcher) Close() (err error) {
+	w.mu.Lock()
+	streams := w.streams
+	w.streams = nil
+	w.mu.Unlock()
+	for _, wgs := range streams {
+		if werr := wgs.close(); werr != nil {
+			err = werr
+		}
+	}
+	// Consider context.Canceled as a successful close
+	if errors.Is(err, context.Canceled) {
+		err = nil
+	}
+	return err
+}
+
+// RequestProgress requests a progress notify response be sent in all watch channels.
+func (w *watcher) RequestProgress(ctx context.Context) (err error) {
+	ctxKey := streamKeyFromCtx(ctx)
+
+	w.mu.Lock()
+	if w.streams == nil {
+		w.mu.Unlock()
+		return errors.New("no stream found for context")
+	}
+	wgs := w.streams[ctxKey]
+	if wgs == nil {
+		wgs = w.newWatcherGRPCStream(ctx)
+		w.streams[ctxKey] = wgs
+	}
+	donec := wgs.donec
+	reqc := wgs.reqc
+	w.mu.Unlock()
+
+	pr := &progressRequest{}
+
+	select {
+	case reqc <- pr:
+		return nil
+	case <-ctx.Done():
+		return ctx.Err()
+	case <-donec:
+		if wgs.closeErr != nil {
+			return wgs.closeErr
+		}
+		// retry; may have dropped stream from no ctxs
+		return w.RequestProgress(ctx)
+	}
+}
+
+func (w *watchGRPCStream) close() (err error) {
+	w.cancel()
+	<-w.donec
+	select {
+	case err = <-w.errc:
+	default:
+	}
+	return ContextError(w.ctx, err)
+}
+
+func (w *watcher) closeStream(wgs *watchGRPCStream) {
+	w.mu.Lock()
+	close(wgs.donec)
+	wgs.cancel()
+	if w.streams != nil {
+		delete(w.streams, wgs.ctxKey)
+	}
+	w.mu.Unlock()
+}
+
+func (w *watchGRPCStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
+	// check watch ID for backward compatibility (<= v3.3)
+	if resp.WatchId == InvalidWatchID || (resp.Canceled && resp.CancelReason != "") {
+		w.closeErr = v3rpc.Error(errors.New(resp.CancelReason))
+		// failed; no channel
+		close(ws.recvc)
+		return
+	}
+	ws.id = resp.WatchId
+	w.substreams[ws.id] = ws
+}
+
+func (w *watchGRPCStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) {
+	select {
+	case ws.outc <- *resp:
+	case <-ws.initReq.ctx.Done():
+	case <-time.After(closeSendErrTimeout):
+	}
+	close(ws.outc)
+}
+
+func (w *watchGRPCStream) closeSubstream(ws *watcherStream) {
+	// send channel response in case stream was never established
+	select {
+	case ws.initReq.retc <- ws.outc:
+	default:
+	}
+	// close subscriber's channel
+	if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil {
+		go w.sendCloseSubstream(ws, &WatchResponse{Canceled: true, closeErr: w.closeErr})
+	} else if ws.outc != nil {
+		close(ws.outc)
+	}
+	if ws.id != InvalidWatchID {
+		delete(w.substreams, ws.id)
+		return
+	}
+	for i := range w.resuming {
+		if w.resuming[i] == ws {
+			w.resuming[i] = nil
+			return
+		}
+	}
+}
+
+// run is the root of the goroutines for managing a watcher client
+func (w *watchGRPCStream) run() {
+	var wc pb.Watch_WatchClient
+	var closeErr error
+
+	// substreams marked to close but goroutine still running; needed for
+	// avoiding double-closing recvc on grpc stream teardown
+	closing := make(map[*watcherStream]struct{})
+
+	defer func() {
+		w.closeErr = closeErr
+		// shutdown substreams and resuming substreams
+		for _, ws := range w.substreams {
+			if _, ok := closing[ws]; !ok {
+				close(ws.recvc)
+				closing[ws] = struct{}{}
+			}
+		}
+		for _, ws := range w.resuming {
+			if _, ok := closing[ws]; ws != nil && !ok {
+				close(ws.recvc)
+				closing[ws] = struct{}{}
+			}
+		}
+		w.joinSubstreams()
+		for range closing {
+			w.closeSubstream(<-w.closingc)
+		}
+		w.wg.Wait()
+		w.owner.closeStream(w)
+	}()
+
+	// start a stream with the etcd grpc server
+	if wc, closeErr = w.newWatchClient(); closeErr != nil {
+		return
+	}
+
+	cancelSet := make(map[int64]struct{})
+
+	var cur *pb.WatchResponse
+	backoff := time.Millisecond
+	for {
+		select {
+		// Watch() requested
+		case req := <-w.reqc:
+			switch wreq := req.(type) {
+			case *watchRequest:
+				outc := make(chan WatchResponse, 1)
+				// TODO: pass custom watch ID?
+				ws := &watcherStream{
+					initReq: *wreq,
+					id:      InvalidWatchID,
+					outc:    outc,
+					// unbuffered so resumes won't cause repeat events
+					recvc: make(chan *WatchResponse),
+				}
+
+				ws.donec = make(chan struct{})
+				w.wg.Add(1)
+				go w.serveSubstream(ws, w.resumec)
+
+				// queue up for watcher creation/resume
+				w.resuming = append(w.resuming, ws)
+				if len(w.resuming) == 1 {
+					// head of resume queue, can register a new watcher
+					if err := wc.Send(ws.initReq.toPB()); err != nil {
+						w.lg.Debug("error when sending request", zap.Error(err))
+					}
+				}
+			case *progressRequest:
+				if err := wc.Send(wreq.toPB()); err != nil {
+					w.lg.Debug("error when sending request", zap.Error(err))
+				}
+			}
+
+		// new events from the watch client
+		case pbresp := <-w.respc:
+			if cur == nil || pbresp.Created || pbresp.Canceled {
+				cur = pbresp
+			} else if cur != nil && cur.WatchId == pbresp.WatchId {
+				// merge new events
+				cur.Events = append(cur.Events, pbresp.Events...)
+				// update "Fragment" field; last response with "Fragment" == false
+				cur.Fragment = pbresp.Fragment
+			}
+
+			switch {
+			case pbresp.Created:
+				// response to head of queue creation
+				if len(w.resuming) != 0 {
+					if ws := w.resuming[0]; ws != nil {
+						w.addSubstream(pbresp, ws)
+						w.dispatchEvent(pbresp)
+						w.resuming[0] = nil
+					}
+				}
+
+				if ws := w.nextResume(); ws != nil {
+					if err := wc.Send(ws.initReq.toPB()); err != nil {
+						w.lg.Debug("error when sending request", zap.Error(err))
+					}
+				}
+
+				// reset for next iteration
+				cur = nil
+
+			case pbresp.Canceled && pbresp.CompactRevision == 0:
+				delete(cancelSet, pbresp.WatchId)
+				if ws, ok := w.substreams[pbresp.WatchId]; ok {
+					// signal to stream goroutine to update closingc
+					close(ws.recvc)
+					closing[ws] = struct{}{}
+				}
+
+				// reset for next iteration
+				cur = nil
+
+			case cur.Fragment:
+				// watch response events are still fragmented
+				// continue to fetch next fragmented event arrival
+				continue
+
+			default:
+				// dispatch to appropriate watch stream
+				ok := w.dispatchEvent(cur)
+
+				// reset for next iteration
+				cur = nil
+
+				if ok {
+					break
+				}
+
+				// watch response on unexpected watch id; cancel id
+				if _, ok := cancelSet[pbresp.WatchId]; ok {
+					break
+				}
+
+				cancelSet[pbresp.WatchId] = struct{}{}
+				cr := &pb.WatchRequest_CancelRequest{
+					CancelRequest: &pb.WatchCancelRequest{
+						WatchId: pbresp.WatchId,
+					},
+				}
+				req := &pb.WatchRequest{RequestUnion: cr}
+				w.lg.Debug("sending watch cancel request for failed dispatch", zap.Int64("watch-id", pbresp.WatchId))
+				if err := wc.Send(req); err != nil {
+					w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", pbresp.WatchId), zap.Error(err))
+				}
+			}
+
+		// watch client failed on Recv; spawn another if possible
+		case err := <-w.errc:
+			if isHaltErr(w.ctx, err) || errors.Is(ContextError(w.ctx, err), v3rpc.ErrNoLeader) {
+				closeErr = err
+				return
+			}
+			backoff = w.backoffIfUnavailable(backoff, err)
+			if wc, closeErr = w.newWatchClient(); closeErr != nil {
+				return
+			}
+			if ws := w.nextResume(); ws != nil {
+				if err := wc.Send(ws.initReq.toPB()); err != nil {
+					w.lg.Debug("error when sending request", zap.Error(err))
+				}
+			}
+			cancelSet = make(map[int64]struct{})
+
+		case <-w.ctx.Done():
+			return
+
+		case ws := <-w.closingc:
+			w.closeSubstream(ws)
+			delete(closing, ws)
+			// no more watchers on this stream, shutdown, skip cancellation
+			if len(w.substreams)+len(w.resuming) == 0 {
+				return
+			}
+			if ws.id != InvalidWatchID {
+				// client is closing an established watch; close it on the server proactively instead of waiting
+				// to close when the next message arrives
+				cancelSet[ws.id] = struct{}{}
+				cr := &pb.WatchRequest_CancelRequest{
+					CancelRequest: &pb.WatchCancelRequest{
+						WatchId: ws.id,
+					},
+				}
+				req := &pb.WatchRequest{RequestUnion: cr}
+				w.lg.Debug("sending watch cancel request for closed watcher", zap.Int64("watch-id", ws.id))
+				if err := wc.Send(req); err != nil {
+					w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", ws.id), zap.Error(err))
+				}
+			}
+		}
+	}
+}
+
+// nextResume chooses the next resuming to register with the grpc stream. Abandoned
+// streams are marked as nil in the queue since the head must wait for its inflight registration.
+func (w *watchGRPCStream) nextResume() *watcherStream {
+	for len(w.resuming) != 0 {
+		if w.resuming[0] != nil {
+			return w.resuming[0]
+		}
+		w.resuming = w.resuming[1:len(w.resuming)]
+	}
+	return nil
+}
+
+// dispatchEvent sends a WatchResponse to the appropriate watcher stream
+func (w *watchGRPCStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
+	events := make([]*Event, len(pbresp.Events))
+	for i, ev := range pbresp.Events {
+		events[i] = (*Event)(ev)
+	}
+	// TODO: return watch ID?
+	wr := &WatchResponse{
+		Header:          *pbresp.Header,
+		Events:          events,
+		CompactRevision: pbresp.CompactRevision,
+		Created:         pbresp.Created,
+		Canceled:        pbresp.Canceled,
+		cancelReason:    pbresp.CancelReason,
+	}
+
+	// watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of InvalidWatchID to
+	// indicate they should be broadcast.
+	if wr.IsProgressNotify() && pbresp.WatchId == InvalidWatchID {
+		return w.broadcastResponse(wr)
+	}
+
+	return w.unicastResponse(wr, pbresp.WatchId)
+}
+
+// broadcastResponse send a watch response to all watch substreams.
+func (w *watchGRPCStream) broadcastResponse(wr *WatchResponse) bool {
+	for _, ws := range w.substreams {
+		select {
+		case ws.recvc <- wr:
+		case <-ws.donec:
+		}
+	}
+	return true
+}
+
+// unicastResponse sends a watch response to a specific watch substream.
+func (w *watchGRPCStream) unicastResponse(wr *WatchResponse, watchID int64) bool {
+	ws, ok := w.substreams[watchID]
+	if !ok {
+		return false
+	}
+	select {
+	case ws.recvc <- wr:
+	case <-ws.donec:
+		return false
+	}
+	return true
+}
+
+// serveWatchClient forwards messages from the grpc stream to run()
+func (w *watchGRPCStream) serveWatchClient(wc pb.Watch_WatchClient) {
+	for {
+		resp, err := wc.Recv()
+		if err != nil {
+			select {
+			case w.errc <- err:
+			case <-w.donec:
+			}
+			return
+		}
+		select {
+		case w.respc <- resp:
+		case <-w.donec:
+			return
+		}
+	}
+}
+
+// serveSubstream forwards watch responses from run() to the subscriber
+func (w *watchGRPCStream) serveSubstream(ws *watcherStream, resumec chan struct{}) {
+	if ws.closing {
+		panic("created substream goroutine but substream is closing")
+	}
+
+	// nextRev is the minimum expected next revision
+	nextRev := ws.initReq.rev
+	resuming := false
+	defer func() {
+		if !resuming {
+			ws.closing = true
+		}
+		close(ws.donec)
+		if !resuming {
+			w.closingc <- ws
+		}
+		w.wg.Done()
+	}()
+
+	emptyWr := &WatchResponse{}
+	for {
+		curWr := emptyWr
+		outc := ws.outc
+
+		if len(ws.buf) > 0 {
+			curWr = ws.buf[0]
+		} else {
+			outc = nil
+		}
+		select {
+		case outc <- *curWr:
+			if ws.buf[0].Err() != nil {
+				return
+			}
+			ws.buf[0] = nil
+			ws.buf = ws.buf[1:]
+		case wr, ok := <-ws.recvc:
+			if !ok {
+				// shutdown from closeSubstream
+				return
+			}
+
+			if wr.Created {
+				if ws.initReq.retc != nil {
+					ws.initReq.retc <- ws.outc
+					// to prevent next write from taking the slot in buffered channel
+					// and posting duplicate create events
+					ws.initReq.retc = nil
+
+					// send first creation event only if requested
+					if ws.initReq.createdNotify {
+						ws.outc <- *wr
+					}
+					// once the watch channel is returned, a current revision
+					// watch must resume at the store revision. This is necessary
+					// for the following case to work as expected:
+					//	wch := m1.Watch("a")
+					//	m2.Put("a", "b")
+					//	<-wch
+					// If the revision is only bound on the first observed event,
+					// if wch is disconnected before the Put is issued, then reconnects
+					// after it is committed, it'll miss the Put.
+					if ws.initReq.rev == 0 {
+						nextRev = wr.Header.Revision
+					}
+				}
+			} else {
+				// current progress of watch; <= store revision
+				nextRev = wr.Header.Revision + 1
+			}
+
+			if len(wr.Events) > 0 {
+				nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
+			}
+
+			ws.initReq.rev = nextRev
+
+			// created event is already sent above,
+			// watcher should not post duplicate events
+			if wr.Created {
+				continue
+			}
+
+			// TODO pause channel if buffer gets too large
+			ws.buf = append(ws.buf, wr)
+		case <-w.ctx.Done():
+			return
+		case <-ws.initReq.ctx.Done():
+			return
+		case <-resumec:
+			resuming = true
+			return
+		}
+	}
+	// lazily send cancel message if events on missing id
+}
+
+func (w *watchGRPCStream) newWatchClient() (pb.Watch_WatchClient, error) {
+	// mark all substreams as resuming
+	close(w.resumec)
+	w.resumec = make(chan struct{})
+	w.joinSubstreams()
+	for _, ws := range w.substreams {
+		ws.id = InvalidWatchID
+		w.resuming = append(w.resuming, ws)
+	}
+	// strip out nils, if any
+	var resuming []*watcherStream
+	for _, ws := range w.resuming {
+		if ws != nil {
+			resuming = append(resuming, ws)
+		}
+	}
+	w.resuming = resuming
+	w.substreams = make(map[int64]*watcherStream)
+
+	// connect to grpc stream while accepting watcher cancelation
+	stopc := make(chan struct{})
+	donec := w.waitCancelSubstreams(stopc)
+	wc, err := w.openWatchClient()
+	close(stopc)
+	<-donec
+
+	// serve all non-closing streams, even if there's a client error
+	// so that the teardown path can shutdown the streams as expected.
+	for _, ws := range w.resuming {
+		if ws.closing {
+			continue
+		}
+		ws.donec = make(chan struct{})
+		w.wg.Add(1)
+		go w.serveSubstream(ws, w.resumec)
+	}
+
+	if err != nil {
+		return nil, v3rpc.Error(err)
+	}
+
+	// receive data from new grpc stream
+	go w.serveWatchClient(wc)
+	return wc, nil
+}
+
+func (w *watchGRPCStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} {
+	var wg sync.WaitGroup
+	wg.Add(len(w.resuming))
+	donec := make(chan struct{})
+	for i := range w.resuming {
+		go func(ws *watcherStream) {
+			defer wg.Done()
+			if ws.closing {
+				if ws.initReq.ctx.Err() != nil && ws.outc != nil {
+					close(ws.outc)
+					ws.outc = nil
+				}
+				return
+			}
+			select {
+			case <-ws.initReq.ctx.Done():
+				// closed ws will be removed from resuming
+				ws.closing = true
+				close(ws.outc)
+				ws.outc = nil
+				w.wg.Add(1)
+				go func() {
+					defer w.wg.Done()
+					w.closingc <- ws
+				}()
+			case <-stopc:
+			}
+		}(w.resuming[i])
+	}
+	go func() {
+		defer close(donec)
+		wg.Wait()
+	}()
+	return donec
+}
+
+// joinSubstreams waits for all substream goroutines to complete.
+func (w *watchGRPCStream) joinSubstreams() {
+	for _, ws := range w.substreams {
+		<-ws.donec
+	}
+	for _, ws := range w.resuming {
+		if ws != nil {
+			<-ws.donec
+		}
+	}
+}
+
+var maxBackoff = 100 * time.Millisecond
+
+func (w *watchGRPCStream) backoffIfUnavailable(backoff time.Duration, err error) time.Duration {
+	if isUnavailableErr(w.ctx, err) {
+		// retry, but backoff
+		if backoff < maxBackoff {
+			// 25% backoff factor
+			backoff = backoff + backoff/4
+			if backoff > maxBackoff {
+				backoff = maxBackoff
+			}
+		}
+		time.Sleep(backoff)
+	}
+	return backoff
+}
+
+// openWatchClient retries opening a watch client until success or halt.
+// manually retry in case "ws==nil && err==nil"
+// TODO: remove FailFast=false
+func (w *watchGRPCStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
+	backoff := time.Millisecond
+	for {
+		select {
+		case <-w.ctx.Done():
+			if err == nil {
+				return nil, w.ctx.Err()
+			}
+			return nil, err
+		default:
+		}
+		if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil {
+			break
+		}
+		if isHaltErr(w.ctx, err) {
+			return nil, v3rpc.Error(err)
+		}
+		backoff = w.backoffIfUnavailable(backoff, err)
+	}
+	return ws, nil
+}
+
+// toPB converts an internal watch request structure to its protobuf WatchRequest structure.
+func (wr *watchRequest) toPB() *pb.WatchRequest {
+	req := &pb.WatchCreateRequest{
+		StartRevision:  wr.rev,
+		Key:            []byte(wr.key),
+		RangeEnd:       []byte(wr.end),
+		ProgressNotify: wr.progressNotify,
+		Filters:        wr.filters,
+		PrevKv:         wr.prevKV,
+		Fragment:       wr.fragment,
+	}
+	cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
+	return &pb.WatchRequest{RequestUnion: cr}
+}
+
+// toPB converts an internal progress request structure to its protobuf WatchRequest structure.
+func (pr *progressRequest) toPB() *pb.WatchRequest {
+	req := &pb.WatchProgressRequest{}
+	cr := &pb.WatchRequest_ProgressRequest{ProgressRequest: req}
+	return &pb.WatchRequest{RequestUnion: cr}
+}
+
+func streamKeyFromCtx(ctx context.Context) string {
+	if md, ok := metadata.FromOutgoingContext(ctx); ok {
+		return fmt.Sprintf("%+v", map[string][]string(md))
+	}
+	return ""
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/auth.go b/vendor/go.etcd.io/etcd/clientv3/auth.go
deleted file mode 100644
index edccf1a..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/auth.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
-	"context"
-	"fmt"
-	"strings"
-
-	"github.com/coreos/etcd/auth/authpb"
-	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
-	"google.golang.org/grpc"
-)
-
-type (
-	AuthEnableResponse               pb.AuthEnableResponse
-	AuthDisableResponse              pb.AuthDisableResponse
-	AuthenticateResponse             pb.AuthenticateResponse
-	AuthUserAddResponse              pb.AuthUserAddResponse
-	AuthUserDeleteResponse           pb.AuthUserDeleteResponse
-	AuthUserChangePasswordResponse   pb.AuthUserChangePasswordResponse
-	AuthUserGrantRoleResponse        pb.AuthUserGrantRoleResponse
-	AuthUserGetResponse              pb.AuthUserGetResponse
-	AuthUserRevokeRoleResponse       pb.AuthUserRevokeRoleResponse
-	AuthRoleAddResponse              pb.AuthRoleAddResponse
-	AuthRoleGrantPermissionResponse  pb.AuthRoleGrantPermissionResponse
-	AuthRoleGetResponse              pb.AuthRoleGetResponse
-	AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse
-	AuthRoleDeleteResponse           pb.AuthRoleDeleteResponse
-	AuthUserListResponse             pb.AuthUserListResponse
-	AuthRoleListResponse             pb.AuthRoleListResponse
-
-	PermissionType authpb.Permission_Type
-	Permission     authpb.Permission
-)
-
-const (
-	PermRead      = authpb.READ
-	PermWrite     = authpb.WRITE
-	PermReadWrite = authpb.READWRITE
-)
-
-type Auth interface {
-	// AuthEnable enables auth of an etcd cluster.
-	AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
-
-	// AuthDisable disables auth of an etcd cluster.
-	AuthDisable(ctx context.Context) (*AuthDisableResponse, error)
-
-	// UserAdd adds a new user to an etcd cluster.
-	UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
-
-	// UserDelete deletes a user from an etcd cluster.
-	UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error)
-
-	// UserChangePassword changes a password of a user.
-	UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
-
-	// UserGrantRole grants a role to a user.
-	UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error)
-
-	// UserGet gets a detailed information of a user.
-	UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error)
-
-	// UserList gets a list of all users.
-	UserList(ctx context.Context) (*AuthUserListResponse, error)
-
-	// UserRevokeRole revokes a role of a user.
-	UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error)
-
-	// RoleAdd adds a new role to an etcd cluster.
-	RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
-
-	// RoleGrantPermission grants a permission to a role.
-	RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error)
-
-	// RoleGet gets a detailed information of a role.
-	RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error)
-
-	// RoleList gets a list of all roles.
-	RoleList(ctx context.Context) (*AuthRoleListResponse, error)
-
-	// RoleRevokePermission revokes a permission from a role.
-	RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error)
-
-	// RoleDelete deletes a role.
-	RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
-}
-
-type auth struct {
-	remote   pb.AuthClient
-	callOpts []grpc.CallOption
-}
-
-func NewAuth(c *Client) Auth {
-	api := &auth{remote: RetryAuthClient(c)}
-	if c != nil {
-		api.callOpts = c.callOpts
-	}
-	return api
-}
-
-func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
-	resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...)
-	return (*AuthEnableResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
-	resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...)
-	return (*AuthDisableResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
-	resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...)
-	return (*AuthUserAddResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
-	resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...)
-	return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
-	resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...)
-	return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
-	resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...)
-	return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
-	resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...)
-	return (*AuthUserGetResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
-	resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...)
-	return (*AuthUserListResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
-	resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...)
-	return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
-	resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...)
-	return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
-	perm := &authpb.Permission{
-		Key:      []byte(key),
-		RangeEnd: []byte(rangeEnd),
-		PermType: authpb.Permission_Type(permType),
-	}
-	resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...)
-	return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
-	resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...)
-	return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
-	resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...)
-	return (*AuthRoleListResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
-	resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, auth.callOpts...)
-	return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
-	resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...)
-	return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
-}
-
-func StrToPermissionType(s string) (PermissionType, error) {
-	val, ok := authpb.Permission_Type_value[strings.ToUpper(s)]
-	if ok {
-		return PermissionType(val), nil
-	}
-	return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s)
-}
-
-type authenticator struct {
-	conn     *grpc.ClientConn // conn in-use
-	remote   pb.AuthClient
-	callOpts []grpc.CallOption
-}
-
-func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
-	resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...)
-	return (*AuthenticateResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authenticator) close() {
-	auth.conn.Close()
-}
-
-func newAuthenticator(ctx context.Context, target string, opts []grpc.DialOption, c *Client) (*authenticator, error) {
-	conn, err := grpc.DialContext(ctx, target, opts...)
-	if err != nil {
-		return nil, err
-	}
-
-	api := &authenticator{
-		conn:   conn,
-		remote: pb.NewAuthClient(conn),
-	}
-	if c != nil {
-		api.callOpts = c.callOpts
-	}
-	return api, nil
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/client.go b/vendor/go.etcd.io/etcd/clientv3/client.go
deleted file mode 100644
index c49e4ba..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/client.go
+++ /dev/null
@@ -1,665 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"net"
-	"os"
-	"strconv"
-	"strings"
-	"sync"
-	"time"
-
-	"github.com/coreos/etcd/clientv3/balancer"
-	"github.com/coreos/etcd/clientv3/balancer/picker"
-	"github.com/coreos/etcd/clientv3/balancer/resolver/endpoint"
-	"github.com/coreos/etcd/clientv3/credentials"
-	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
-	"github.com/coreos/etcd/pkg/logutil"
-	"github.com/coreos/pkg/capnslog"
-	"github.com/google/uuid"
-	"go.uber.org/zap"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	grpccredentials "google.golang.org/grpc/credentials"
-	"google.golang.org/grpc/keepalive"
-	"google.golang.org/grpc/status"
-)
-
-var (
-	ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
-	ErrOldCluster           = errors.New("etcdclient: old cluster version")
-
-	roundRobinBalancerName = fmt.Sprintf("etcd-%s", picker.RoundrobinBalanced.String())
-)
-
-var (
-	plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "clientv3")
-)
-
-func init() {
-	lg := zap.NewNop()
-	if os.Getenv("ETCD_CLIENT_DEBUG") != "" {
-		lcfg := logutil.DefaultZapLoggerConfig
-		lcfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
-
-		var err error
-		lg, err = lcfg.Build() // info level logging
-		if err != nil {
-			panic(err)
-		}
-	}
-
-	// TODO: support custom balancer
-	balancer.RegisterBuilder(balancer.Config{
-		Policy: picker.RoundrobinBalanced,
-		Name:   roundRobinBalancerName,
-		Logger: lg,
-	})
-}
-
-// Client provides and manages an etcd v3 client session.
-type Client struct {
-	Cluster
-	KV
-	Lease
-	Watcher
-	Auth
-	Maintenance
-
-	conn *grpc.ClientConn
-
-	cfg           Config
-	creds         grpccredentials.TransportCredentials
-	resolverGroup *endpoint.ResolverGroup
-	mu            *sync.RWMutex
-
-	ctx    context.Context
-	cancel context.CancelFunc
-
-	// Username is a user name for authentication.
-	Username string
-	// Password is a password for authentication.
-	Password        string
-	authTokenBundle credentials.Bundle
-
-	callOpts []grpc.CallOption
-
-	lg *zap.Logger
-}
-
-// New creates a new etcdv3 client from a given configuration.
-func New(cfg Config) (*Client, error) {
-	if len(cfg.Endpoints) == 0 {
-		return nil, ErrNoAvailableEndpoints
-	}
-
-	return newClient(&cfg)
-}
-
-// NewCtxClient creates a client with a context but no underlying grpc
-// connection. This is useful for embedded cases that override the
-// service interface implementations and do not need connection management.
-func NewCtxClient(ctx context.Context) *Client {
-	cctx, cancel := context.WithCancel(ctx)
-	return &Client{ctx: cctx, cancel: cancel}
-}
-
-// NewFromURL creates a new etcdv3 client from a URL.
-func NewFromURL(url string) (*Client, error) {
-	return New(Config{Endpoints: []string{url}})
-}
-
-// NewFromURLs creates a new etcdv3 client from URLs.
-func NewFromURLs(urls []string) (*Client, error) {
-	return New(Config{Endpoints: urls})
-}
-
-// Close shuts down the client's etcd connections.
-func (c *Client) Close() error {
-	c.cancel()
-	c.Watcher.Close()
-	c.Lease.Close()
-	if c.resolverGroup != nil {
-		c.resolverGroup.Close()
-	}
-	if c.conn != nil {
-		return toErr(c.ctx, c.conn.Close())
-	}
-	return c.ctx.Err()
-}
-
-// Ctx is a context for "out of band" messages (e.g., for sending
-// "clean up" message when another context is canceled). It is
-// canceled on client Close().
-func (c *Client) Ctx() context.Context { return c.ctx }
-
-// Endpoints lists the registered endpoints for the client.
-func (c *Client) Endpoints() []string {
-	// copy the slice; protect original endpoints from being changed
-	c.mu.RLock()
-	defer c.mu.RUnlock()
-	eps := make([]string, len(c.cfg.Endpoints))
-	copy(eps, c.cfg.Endpoints)
-	return eps
-}
-
-// SetEndpoints updates client's endpoints.
-func (c *Client) SetEndpoints(eps ...string) {
-	c.mu.Lock()
-	defer c.mu.Unlock()
-	c.cfg.Endpoints = eps
-	c.resolverGroup.SetEndpoints(eps)
-}
-
-// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
-func (c *Client) Sync(ctx context.Context) error {
-	mresp, err := c.MemberList(ctx)
-	if err != nil {
-		return err
-	}
-	var eps []string
-	for _, m := range mresp.Members {
-		eps = append(eps, m.ClientURLs...)
-	}
-	c.SetEndpoints(eps...)
-	return nil
-}
-
-func (c *Client) autoSync() {
-	if c.cfg.AutoSyncInterval == time.Duration(0) {
-		return
-	}
-
-	for {
-		select {
-		case <-c.ctx.Done():
-			return
-		case <-time.After(c.cfg.AutoSyncInterval):
-			ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
-			err := c.Sync(ctx)
-			cancel()
-			if err != nil && err != c.ctx.Err() {
-				lg.Lvl(4).Infof("Auto sync endpoints failed: %v", err)
-			}
-		}
-	}
-}
-
-func (c *Client) processCreds(scheme string) (creds grpccredentials.TransportCredentials) {
-	creds = c.creds
-	switch scheme {
-	case "unix":
-	case "http":
-		creds = nil
-	case "https", "unixs":
-		if creds != nil {
-			break
-		}
-		creds = credentials.NewBundle(credentials.Config{}).TransportCredentials()
-	default:
-		creds = nil
-	}
-	return creds
-}
-
-// dialSetupOpts gives the dial opts prior to any authentication.
-func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) {
-	if c.cfg.DialKeepAliveTime > 0 {
-		params := keepalive.ClientParameters{
-			Time:                c.cfg.DialKeepAliveTime,
-			Timeout:             c.cfg.DialKeepAliveTimeout,
-			PermitWithoutStream: c.cfg.PermitWithoutStream,
-		}
-		opts = append(opts, grpc.WithKeepaliveParams(params))
-	}
-	opts = append(opts, dopts...)
-
-	dialer := endpoint.Dialer
-	if creds != nil {
-		opts = append(opts, grpc.WithTransportCredentials(creds))
-		// gRPC load balancer workaround. See credentials.transportCredential for details.
-		if credsDialer, ok := creds.(TransportCredentialsWithDialer); ok {
-			dialer = credsDialer.Dialer
-		}
-	} else {
-		opts = append(opts, grpc.WithInsecure())
-	}
-	opts = append(opts, grpc.WithContextDialer(dialer))
-
-	// Interceptor retry and backoff.
-	// TODO: Replace all of clientv3/retry.go with interceptor based retry, or with
-	// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#retry-policy
-	// once it is available.
-	rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction))
-	opts = append(opts,
-		// Disable stream retry by default since go-grpc-middleware/retry does not support client streams.
-		// Streams that are safe to retry are enabled individually.
-		grpc.WithStreamInterceptor(c.streamClientInterceptor(c.lg, withMax(0), rrBackoff)),
-		grpc.WithUnaryInterceptor(c.unaryClientInterceptor(c.lg, withMax(defaultUnaryMaxRetries), rrBackoff)),
-	)
-
-	return opts, nil
-}
-
-// Dial connects to a single endpoint using the client's config.
-func (c *Client) Dial(ep string) (*grpc.ClientConn, error) {
-	creds, err := c.directDialCreds(ep)
-	if err != nil {
-		return nil, err
-	}
-	// Use the grpc passthrough resolver to directly dial a single endpoint.
-	// This resolver passes through the 'unix' and 'unixs' endpoints schemes used
-	// by etcd without modification, allowing us to directly dial endpoints and
-	// using the same dial functions that we use for load balancer dialing.
-	return c.dial(fmt.Sprintf("passthrough:///%s", ep), creds)
-}
-
-func (c *Client) getToken(ctx context.Context) error {
-	var err error // return last error in a case of fail
-	var auth *authenticator
-
-	eps := c.Endpoints()
-	for _, ep := range eps {
-		// use dial options without dopts to avoid reusing the client balancer
-		var dOpts []grpc.DialOption
-		_, host, _ := endpoint.ParseEndpoint(ep)
-		target := c.resolverGroup.Target(host)
-		creds := c.dialWithBalancerCreds(ep)
-		dOpts, err = c.dialSetupOpts(creds, c.cfg.DialOptions...)
-		if err != nil {
-			err = fmt.Errorf("failed to configure auth dialer: %v", err)
-			continue
-		}
-		dOpts = append(dOpts, grpc.WithBalancerName(roundRobinBalancerName))
-		auth, err = newAuthenticator(ctx, target, dOpts, c)
-		if err != nil {
-			continue
-		}
-		defer auth.close()
-
-		var resp *AuthenticateResponse
-		resp, err = auth.authenticate(ctx, c.Username, c.Password)
-		if err != nil {
-			// return err without retrying other endpoints
-			if err == rpctypes.ErrAuthNotEnabled {
-				return err
-			}
-			continue
-		}
-
-		c.authTokenBundle.UpdateAuthToken(resp.Token)
-		return nil
-	}
-
-	return err
-}
-
-// dialWithBalancer dials the client's current load balanced resolver group.  The scheme of the host
-// of the provided endpoint determines the scheme used for all endpoints of the client connection.
-func (c *Client) dialWithBalancer(ep string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
-	_, host, _ := endpoint.ParseEndpoint(ep)
-	target := c.resolverGroup.Target(host)
-	creds := c.dialWithBalancerCreds(ep)
-	return c.dial(target, creds, dopts...)
-}
-
-// dial configures and dials any grpc balancer target.
-func (c *Client) dial(target string, creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
-	opts, err := c.dialSetupOpts(creds, dopts...)
-	if err != nil {
-		return nil, fmt.Errorf("failed to configure dialer: %v", err)
-	}
-
-	if c.Username != "" && c.Password != "" {
-		c.authTokenBundle = credentials.NewBundle(credentials.Config{})
-
-		ctx, cancel := c.ctx, func() {}
-		if c.cfg.DialTimeout > 0 {
-			ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
-		}
-
-		err = c.getToken(ctx)
-		if err != nil {
-			if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
-				if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
-					err = context.DeadlineExceeded
-				}
-				cancel()
-				return nil, err
-			}
-		} else {
-			opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials()))
-		}
-		cancel()
-	}
-
-	opts = append(opts, c.cfg.DialOptions...)
-
-	dctx := c.ctx
-	if c.cfg.DialTimeout > 0 {
-		var cancel context.CancelFunc
-		dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
-		defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options?
-	}
-
-	conn, err := grpc.DialContext(dctx, target, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return conn, nil
-}
-
-func (c *Client) directDialCreds(ep string) (grpccredentials.TransportCredentials, error) {
-	_, host, scheme := endpoint.ParseEndpoint(ep)
-	creds := c.creds
-	if len(scheme) != 0 {
-		creds = c.processCreds(scheme)
-		if creds != nil {
-			clone := creds.Clone()
-			// Set the server name must to the endpoint hostname without port since grpc
-			// otherwise attempts to check if x509 cert is valid for the full endpoint
-			// including the scheme and port, which fails.
-			overrideServerName, _, err := net.SplitHostPort(host)
-			if err != nil {
-				// Either the host didn't have a port or the host could not be parsed. Either way, continue with the
-				// original host string.
-				overrideServerName = host
-			}
-			clone.OverrideServerName(overrideServerName)
-			creds = clone
-		}
-	}
-	return creds, nil
-}
-
-func (c *Client) dialWithBalancerCreds(ep string) grpccredentials.TransportCredentials {
-	_, _, scheme := endpoint.ParseEndpoint(ep)
-	creds := c.creds
-	if len(scheme) != 0 {
-		creds = c.processCreds(scheme)
-	}
-	return creds
-}
-
-func newClient(cfg *Config) (*Client, error) {
-	if cfg == nil {
-		cfg = &Config{}
-	}
-	var creds grpccredentials.TransportCredentials
-	if cfg.TLS != nil {
-		creds = credentials.NewBundle(credentials.Config{TLSConfig: cfg.TLS}).TransportCredentials()
-	}
-
-	// use a temporary skeleton client to bootstrap first connection
-	baseCtx := context.TODO()
-	if cfg.Context != nil {
-		baseCtx = cfg.Context
-	}
-
-	ctx, cancel := context.WithCancel(baseCtx)
-	client := &Client{
-		conn:     nil,
-		cfg:      *cfg,
-		creds:    creds,
-		ctx:      ctx,
-		cancel:   cancel,
-		mu:       new(sync.RWMutex),
-		callOpts: defaultCallOpts,
-	}
-
-	lcfg := logutil.DefaultZapLoggerConfig
-	if cfg.LogConfig != nil {
-		lcfg = *cfg.LogConfig
-	}
-	var err error
-	client.lg, err = lcfg.Build()
-	if err != nil {
-		return nil, err
-	}
-
-	if cfg.Username != "" && cfg.Password != "" {
-		client.Username = cfg.Username
-		client.Password = cfg.Password
-	}
-	if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 {
-		if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize {
-			return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize)
-		}
-		callOpts := []grpc.CallOption{
-			defaultFailFast,
-			defaultMaxCallSendMsgSize,
-			defaultMaxCallRecvMsgSize,
-		}
-		if cfg.MaxCallSendMsgSize > 0 {
-			callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize)
-		}
-		if cfg.MaxCallRecvMsgSize > 0 {
-			callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize)
-		}
-		client.callOpts = callOpts
-	}
-
-	// Prepare a 'endpoint://<unique-client-id>/' resolver for the client and create a endpoint target to pass
-	// to dial so the client knows to use this resolver.
-	client.resolverGroup, err = endpoint.NewResolverGroup(fmt.Sprintf("client-%s", uuid.New().String()))
-	if err != nil {
-		client.cancel()
-		return nil, err
-	}
-	client.resolverGroup.SetEndpoints(cfg.Endpoints)
-
-	if len(cfg.Endpoints) < 1 {
-		return nil, fmt.Errorf("at least one Endpoint must is required in client config")
-	}
-	dialEndpoint := cfg.Endpoints[0]
-
-	// Use a provided endpoint target so that for https:// without any tls config given, then
-	// grpc will assume the certificate server name is the endpoint host.
-	conn, err := client.dialWithBalancer(dialEndpoint, grpc.WithBalancerName(roundRobinBalancerName))
-	if err != nil {
-		client.cancel()
-		client.resolverGroup.Close()
-		return nil, err
-	}
-	// TODO: With the old grpc balancer interface, we waited until the dial timeout
-	// for the balancer to be ready. Is there an equivalent wait we should do with the new grpc balancer interface?
-	client.conn = conn
-
-	client.Cluster = NewCluster(client)
-	client.KV = NewKV(client)
-	client.Lease = NewLease(client)
-	client.Watcher = NewWatcher(client)
-	client.Auth = NewAuth(client)
-	client.Maintenance = NewMaintenance(client)
-
-	if cfg.RejectOldCluster {
-		if err := client.checkVersion(); err != nil {
-			client.Close()
-			return nil, err
-		}
-	}
-
-	go client.autoSync()
-	return client, nil
-}
-
-// roundRobinQuorumBackoff retries against quorum between each backoff.
-// This is intended for use with a round robin load balancer.
-func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc {
-	return func(attempt uint) time.Duration {
-		// after each round robin across quorum, backoff for our wait between duration
-		n := uint(len(c.Endpoints()))
-		quorum := (n/2 + 1)
-		if attempt%quorum == 0 {
-			c.lg.Debug("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction))
-			return jitterUp(waitBetween, jitterFraction)
-		}
-		c.lg.Debug("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum))
-		return 0
-	}
-}
-
-func (c *Client) checkVersion() (err error) {
-	var wg sync.WaitGroup
-
-	eps := c.Endpoints()
-	errc := make(chan error, len(eps))
-	ctx, cancel := context.WithCancel(c.ctx)
-	if c.cfg.DialTimeout > 0 {
-		cancel()
-		ctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
-	}
-
-	wg.Add(len(eps))
-	for _, ep := range eps {
-		// if cluster is current, any endpoint gives a recent version
-		go func(e string) {
-			defer wg.Done()
-			resp, rerr := c.Status(ctx, e)
-			if rerr != nil {
-				errc <- rerr
-				return
-			}
-			vs := strings.Split(resp.Version, ".")
-			maj, min := 0, 0
-			if len(vs) >= 2 {
-				var serr error
-				if maj, serr = strconv.Atoi(vs[0]); serr != nil {
-					errc <- serr
-					return
-				}
-				if min, serr = strconv.Atoi(vs[1]); serr != nil {
-					errc <- serr
-					return
-				}
-			}
-			if maj < 3 || (maj == 3 && min < 2) {
-				rerr = ErrOldCluster
-			}
-			errc <- rerr
-		}(ep)
-	}
-	// wait for success
-	for range eps {
-		if err = <-errc; err == nil {
-			break
-		}
-	}
-	cancel()
-	wg.Wait()
-	return err
-}
-
-// ActiveConnection returns the current in-use connection
-func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
-
-// isHaltErr returns true if the given error and context indicate no forward
-// progress can be made, even after reconnecting.
-func isHaltErr(ctx context.Context, err error) bool {
-	if ctx != nil && ctx.Err() != nil {
-		return true
-	}
-	if err == nil {
-		return false
-	}
-	ev, _ := status.FromError(err)
-	// Unavailable codes mean the system will be right back.
-	// (e.g., can't connect, lost leader)
-	// Treat Internal codes as if something failed, leaving the
-	// system in an inconsistent state, but retrying could make progress.
-	// (e.g., failed in middle of send, corrupted frame)
-	// TODO: are permanent Internal errors possible from grpc?
-	return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
-}
-
-// isUnavailableErr returns true if the given error is an unavailable error
-func isUnavailableErr(ctx context.Context, err error) bool {
-	if ctx != nil && ctx.Err() != nil {
-		return false
-	}
-	if err == nil {
-		return false
-	}
-	ev, ok := status.FromError(err)
-	if ok {
-		// Unavailable codes mean the system will be right back.
-		// (e.g., can't connect, lost leader)
-		return ev.Code() == codes.Unavailable
-	}
-	return false
-}
-
-func toErr(ctx context.Context, err error) error {
-	if err == nil {
-		return nil
-	}
-	err = rpctypes.Error(err)
-	if _, ok := err.(rpctypes.EtcdError); ok {
-		return err
-	}
-	if ev, ok := status.FromError(err); ok {
-		code := ev.Code()
-		switch code {
-		case codes.DeadlineExceeded:
-			fallthrough
-		case codes.Canceled:
-			if ctx.Err() != nil {
-				err = ctx.Err()
-			}
-		}
-	}
-	return err
-}
-
-func canceledByCaller(stopCtx context.Context, err error) bool {
-	if stopCtx.Err() == nil || err == nil {
-		return false
-	}
-
-	return err == context.Canceled || err == context.DeadlineExceeded
-}
-
-// IsConnCanceled returns true, if error is from a closed gRPC connection.
-// ref. https://github.com/grpc/grpc-go/pull/1854
-func IsConnCanceled(err error) bool {
-	if err == nil {
-		return false
-	}
-
-	// >= gRPC v1.23.x
-	s, ok := status.FromError(err)
-	if ok {
-		// connection is canceled or server has already closed the connection
-		return s.Code() == codes.Canceled || s.Message() == "transport is closing"
-	}
-
-	// >= gRPC v1.10.x
-	if err == context.Canceled {
-		return true
-	}
-
-	// <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")'
-	return strings.Contains(err.Error(), "grpc: the client connection is closing")
-}
-
-// TransportCredentialsWithDialer is for a gRPC load balancer workaround. See credentials.transportCredential for details.
-type TransportCredentialsWithDialer interface {
-	grpccredentials.TransportCredentials
-	Dialer(ctx context.Context, dialEp string) (net.Conn, error)
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/cluster.go b/vendor/go.etcd.io/etcd/clientv3/cluster.go
deleted file mode 100644
index 785672b..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/cluster.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
-	"context"
-
-	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-	"github.com/coreos/etcd/pkg/types"
-
-	"google.golang.org/grpc"
-)
-
-type (
-	Member               pb.Member
-	MemberListResponse   pb.MemberListResponse
-	MemberAddResponse    pb.MemberAddResponse
-	MemberRemoveResponse pb.MemberRemoveResponse
-	MemberUpdateResponse pb.MemberUpdateResponse
-)
-
-type Cluster interface {
-	// MemberList lists the current cluster membership.
-	MemberList(ctx context.Context) (*MemberListResponse, error)
-
-	// MemberAdd adds a new member into the cluster.
-	MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
-
-	// MemberRemove removes an existing member from the cluster.
-	MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error)
-
-	// MemberUpdate updates the peer addresses of the member.
-	MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error)
-}
-
-type cluster struct {
-	remote   pb.ClusterClient
-	callOpts []grpc.CallOption
-}
-
-func NewCluster(c *Client) Cluster {
-	api := &cluster{remote: RetryClusterClient(c)}
-	if c != nil {
-		api.callOpts = c.callOpts
-	}
-	return api
-}
-
-func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster {
-	api := &cluster{remote: remote}
-	if c != nil {
-		api.callOpts = c.callOpts
-	}
-	return api
-}
-
-func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
-	// fail-fast before panic in rafthttp
-	if _, err := types.NewURLs(peerAddrs); err != nil {
-		return nil, err
-	}
-
-	r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
-	resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
-	if err != nil {
-		return nil, toErr(ctx, err)
-	}
-	return (*MemberAddResponse)(resp), nil
-}
-
-func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
-	r := &pb.MemberRemoveRequest{ID: id}
-	resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...)
-	if err != nil {
-		return nil, toErr(ctx, err)
-	}
-	return (*MemberRemoveResponse)(resp), nil
-}
-
-func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
-	// fail-fast before panic in rafthttp
-	if _, err := types.NewURLs(peerAddrs); err != nil {
-		return nil, err
-	}
-
-	// it is safe to retry on update.
-	r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
-	resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...)
-	if err == nil {
-		return (*MemberUpdateResponse)(resp), nil
-	}
-	return nil, toErr(ctx, err)
-}
-
-func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
-	// it is safe to retry on list.
-	resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...)
-	if err == nil {
-		return (*MemberListResponse)(resp), nil
-	}
-	return nil, toErr(ctx, err)
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/compact_op.go b/vendor/go.etcd.io/etcd/clientv3/compact_op.go
deleted file mode 100644
index 41e80c1..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/compact_op.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
-	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-)
-
-// CompactOp represents a compact operation.
-type CompactOp struct {
-	revision int64
-	physical bool
-}
-
-// CompactOption configures compact operation.
-type CompactOption func(*CompactOp)
-
-func (op *CompactOp) applyCompactOpts(opts []CompactOption) {
-	for _, opt := range opts {
-		opt(op)
-	}
-}
-
-// OpCompact wraps slice CompactOption to create a CompactOp.
-func OpCompact(rev int64, opts ...CompactOption) CompactOp {
-	ret := CompactOp{revision: rev}
-	ret.applyCompactOpts(opts)
-	return ret
-}
-
-func (op CompactOp) toRequest() *pb.CompactionRequest {
-	return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
-}
-
-// WithCompactPhysical makes Compact wait until all compacted entries are
-// removed from the etcd server's storage.
-func WithCompactPhysical() CompactOption {
-	return func(op *CompactOp) { op.physical = true }
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/compare.go b/vendor/go.etcd.io/etcd/clientv3/compare.go
deleted file mode 100644
index b5f0a25..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/compare.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
-	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-)
-
-type CompareTarget int
-type CompareResult int
-
-const (
-	CompareVersion CompareTarget = iota
-	CompareCreated
-	CompareModified
-	CompareValue
-)
-
-type Cmp pb.Compare
-
-func Compare(cmp Cmp, result string, v interface{}) Cmp {
-	var r pb.Compare_CompareResult
-
-	switch result {
-	case "=":
-		r = pb.Compare_EQUAL
-	case "!=":
-		r = pb.Compare_NOT_EQUAL
-	case ">":
-		r = pb.Compare_GREATER
-	case "<":
-		r = pb.Compare_LESS
-	default:
-		panic("Unknown result op")
-	}
-
-	cmp.Result = r
-	switch cmp.Target {
-	case pb.Compare_VALUE:
-		val, ok := v.(string)
-		if !ok {
-			panic("bad compare value")
-		}
-		cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)}
-	case pb.Compare_VERSION:
-		cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)}
-	case pb.Compare_CREATE:
-		cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
-	case pb.Compare_MOD:
-		cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
-	case pb.Compare_LEASE:
-		cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)}
-	default:
-		panic("Unknown compare type")
-	}
-	return cmp
-}
-
-func Value(key string) Cmp {
-	return Cmp{Key: []byte(key), Target: pb.Compare_VALUE}
-}
-
-func Version(key string) Cmp {
-	return Cmp{Key: []byte(key), Target: pb.Compare_VERSION}
-}
-
-func CreateRevision(key string) Cmp {
-	return Cmp{Key: []byte(key), Target: pb.Compare_CREATE}
-}
-
-func ModRevision(key string) Cmp {
-	return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
-}
-
-// LeaseValue compares a key's LeaseID to a value of your choosing. The empty
-// LeaseID is 0, otherwise known as `NoLease`.
-func LeaseValue(key string) Cmp {
-	return Cmp{Key: []byte(key), Target: pb.Compare_LEASE}
-}
-
-// KeyBytes returns the byte slice holding with the comparison key.
-func (cmp *Cmp) KeyBytes() []byte { return cmp.Key }
-
-// WithKeyBytes sets the byte slice for the comparison key.
-func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key }
-
-// ValueBytes returns the byte slice holding the comparison value, if any.
-func (cmp *Cmp) ValueBytes() []byte {
-	if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok {
-		return tu.Value
-	}
-	return nil
-}
-
-// WithValueBytes sets the byte slice for the comparison's value.
-func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
-
-// WithRange sets the comparison to scan the range [key, end).
-func (cmp Cmp) WithRange(end string) Cmp {
-	cmp.RangeEnd = []byte(end)
-	return cmp
-}
-
-// WithPrefix sets the comparison to scan all keys prefixed by the key.
-func (cmp Cmp) WithPrefix() Cmp {
-	cmp.RangeEnd = getPrefix(cmp.Key)
-	return cmp
-}
-
-// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
-func mustInt64(val interface{}) int64 {
-	if v, ok := val.(int64); ok {
-		return v
-	}
-	if v, ok := val.(int); ok {
-		return int64(v)
-	}
-	panic("bad value")
-}
-
-// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
-// int64 otherwise.
-func mustInt64orLeaseID(val interface{}) int64 {
-	if v, ok := val.(LeaseID); ok {
-		return int64(v)
-	}
-	return mustInt64(val)
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/config.go b/vendor/go.etcd.io/etcd/clientv3/config.go
deleted file mode 100644
index 9c17fc2..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/config.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
-	"context"
-	"crypto/tls"
-	"time"
-
-	"go.uber.org/zap"
-	"google.golang.org/grpc"
-)
-
-type Config struct {
-	// Endpoints is a list of URLs.
-	Endpoints []string `json:"endpoints"`
-
-	// AutoSyncInterval is the interval to update endpoints with its latest members.
-	// 0 disables auto-sync. By default auto-sync is disabled.
-	AutoSyncInterval time.Duration `json:"auto-sync-interval"`
-
-	// DialTimeout is the timeout for failing to establish a connection.
-	DialTimeout time.Duration `json:"dial-timeout"`
-
-	// DialKeepAliveTime is the time after which client pings the server to see if
-	// transport is alive.
-	DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
-
-	// DialKeepAliveTimeout is the time that the client waits for a response for the
-	// keep-alive probe. If the response is not received in this time, the connection is closed.
-	DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
-
-	// MaxCallSendMsgSize is the client-side request send limit in bytes.
-	// If 0, it defaults to 2.0 MiB (2 * 1024 * 1024).
-	// Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit.
-	// ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
-	MaxCallSendMsgSize int
-
-	// MaxCallRecvMsgSize is the client-side response receive limit.
-	// If 0, it defaults to "math.MaxInt32", because range response can
-	// easily exceed request send limits.
-	// Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit.
-	// ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
-	MaxCallRecvMsgSize int
-
-	// TLS holds the client secure credentials, if any.
-	TLS *tls.Config
-
-	// Username is a user name for authentication.
-	Username string `json:"username"`
-
-	// Password is a password for authentication.
-	Password string `json:"password"`
-
-	// RejectOldCluster when set will refuse to create a client against an outdated cluster.
-	RejectOldCluster bool `json:"reject-old-cluster"`
-
-	// DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
-	// For example, pass "grpc.WithBlock()" to block until the underlying connection is up.
-	// Without this, Dial returns immediately and connecting the server happens in background.
-	DialOptions []grpc.DialOption
-
-	// LogConfig configures client-side logger.
-	// If nil, use the default logger.
-	// TODO: configure gRPC logger
-	LogConfig *zap.Config
-
-	// Context is the default client context; it can be used to cancel grpc dial out and
-	// other operations that do not have an explicit context.
-	Context context.Context
-
-	// PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs).
-	PermitWithoutStream bool `json:"permit-without-stream"`
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/ctx.go b/vendor/go.etcd.io/etcd/clientv3/ctx.go
deleted file mode 100644
index da8297b..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/ctx.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2020 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
-	"context"
-	"strings"
-
-	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
-	"github.com/coreos/etcd/version"
-	"google.golang.org/grpc/metadata"
-)
-
-// WithRequireLeader requires client requests to only succeed
-// when the cluster has a leader.
-func WithRequireLeader(ctx context.Context) context.Context {
-	md, ok := metadata.FromOutgoingContext(ctx)
-	if !ok { // no outgoing metadata ctx key, create one
-		md = metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
-		return metadata.NewOutgoingContext(ctx, md)
-	}
-	copied := md.Copy() // avoid racey updates
-	// overwrite/add 'hasleader' key/value
-	metadataSet(copied, rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
-	return metadata.NewOutgoingContext(ctx, copied)
-}
-
-// embeds client version
-func withVersion(ctx context.Context) context.Context {
-	md, ok := metadata.FromOutgoingContext(ctx)
-	if !ok { // no outgoing metadata ctx key, create one
-		md = metadata.Pairs(rpctypes.MetadataClientAPIVersionKey, version.APIVersion)
-		return metadata.NewOutgoingContext(ctx, md)
-	}
-	copied := md.Copy() // avoid racey updates
-	// overwrite/add version key/value
-	metadataSet(copied, rpctypes.MetadataClientAPIVersionKey, version.APIVersion)
-	return metadata.NewOutgoingContext(ctx, copied)
-}
-
-func metadataGet(md metadata.MD, k string) []string {
-	k = strings.ToLower(k)
-	return md[k]
-}
-
-func metadataSet(md metadata.MD, k string, vals ...string) {
-	if len(vals) == 0 {
-		return
-	}
-	k = strings.ToLower(k)
-	md[k] = vals
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/doc.go b/vendor/go.etcd.io/etcd/clientv3/doc.go
deleted file mode 100644
index 717fbe4..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/doc.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package clientv3 implements the official Go etcd client for v3.
-//
-// Create client using `clientv3.New`:
-//
-//	// expect dial time-out on ipv4 blackhole
-//	_, err := clientv3.New(clientv3.Config{
-//		Endpoints:   []string{"http://254.0.0.1:12345"},
-//		DialTimeout: 2 * time.Second
-//	})
-//
-//	// etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3
-//	if err == context.DeadlineExceeded {
-//		// handle errors
-//	}
-//
-//	// etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1
-//	if err == grpc.ErrClientConnTimeout {
-//		// handle errors
-//	}
-//
-//	cli, err := clientv3.New(clientv3.Config{
-//		Endpoints:   []string{"localhost:2379", "localhost:22379", "localhost:32379"},
-//		DialTimeout: 5 * time.Second,
-//	})
-//	if err != nil {
-//		// handle error!
-//	}
-//	defer cli.Close()
-//
-// Make sure to close the client after using it. If the client is not closed, the
-// connection will have leaky goroutines.
-//
-// To specify a client request timeout, wrap the context with context.WithTimeout:
-//
-//	ctx, cancel := context.WithTimeout(context.Background(), timeout)
-//	resp, err := kvc.Put(ctx, "sample_key", "sample_value")
-//	cancel()
-//	if err != nil {
-//	    // handle error!
-//	}
-//	// use the response
-//
-// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
-// Clients are safe for concurrent use by multiple goroutines.
-//
-// etcd client returns 3 types of errors:
-//
-//  1. context error: canceled or deadline exceeded.
-//  2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded.
-//  3. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
-//
-// Here is the example code to handle client errors:
-//
-//	resp, err := kvc.Put(ctx, "", "")
-//	if err != nil {
-//		if err == context.Canceled {
-//			// ctx is canceled by another routine
-//		} else if err == context.DeadlineExceeded {
-//			// ctx is attached with a deadline and it exceeded
-//		} else if ev, ok := status.FromError(err); ok {
-//			code := ev.Code()
-//			if code == codes.DeadlineExceeded {
-//				// server-side context might have timed-out first (due to clock skew)
-//				// while original client-side context is not timed-out yet
-//			}
-//		} else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok {
-//			// process (verr.Errors)
-//		} else {
-//			// bad cluster endpoints, which are not etcd servers
-//		}
-//	}
-//
-//	go func() { cli.Close() }()
-//	_, err := kvc.Get(ctx, "a")
-//	if err != nil {
-//		if err == context.Canceled {
-//			// grpc balancer calls 'Get' with an inflight client.Close
-//		} else if err == grpc.ErrClientConnClosing {
-//			// grpc balancer calls 'Get' after client.Close.
-//		}
-//	}
-//
-package clientv3
diff --git a/vendor/go.etcd.io/etcd/clientv3/kv.go b/vendor/go.etcd.io/etcd/clientv3/kv.go
deleted file mode 100644
index 5a7469b..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/kv.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
-	"context"
-
-	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
-	"google.golang.org/grpc"
-)
-
-type (
-	CompactResponse pb.CompactionResponse
-	PutResponse     pb.PutResponse
-	GetResponse     pb.RangeResponse
-	DeleteResponse  pb.DeleteRangeResponse
-	TxnResponse     pb.TxnResponse
-)
-
-type KV interface {
-	// Put puts a key-value pair into etcd.
-	// Note that key,value can be plain bytes array and string is
-	// an immutable representation of that bytes array.
-	// To get a string of bytes, do string([]byte{0x10, 0x20}).
-	Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error)
-
-	// Get retrieves keys.
-	// By default, Get will return the value for "key", if any.
-	// When passed WithRange(end), Get will return the keys in the range [key, end).
-	// When passed WithFromKey(), Get returns keys greater than or equal to key.
-	// When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision;
-	// if the required revision is compacted, the request will fail with ErrCompacted .
-	// When passed WithLimit(limit), the number of returned keys is bounded by limit.
-	// When passed WithSort(), the keys will be sorted.
-	Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error)
-
-	// Delete deletes a key, or optionally using WithRange(end), [key, end).
-	Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
-
-	// Compact compacts etcd KV history before the given rev.
-	Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
-
-	// Do applies a single Op on KV without a transaction.
-	// Do is useful when creating arbitrary operations to be issued at a
-	// later time; the user can range over the operations, calling Do to
-	// execute them. Get/Put/Delete, on the other hand, are best suited
-	// for when the operation should be issued at the time of declaration.
-	Do(ctx context.Context, op Op) (OpResponse, error)
-
-	// Txn creates a transaction.
-	Txn(ctx context.Context) Txn
-}
-
-type OpResponse struct {
-	put *PutResponse
-	get *GetResponse
-	del *DeleteResponse
-	txn *TxnResponse
-}
-
-func (op OpResponse) Put() *PutResponse    { return op.put }
-func (op OpResponse) Get() *GetResponse    { return op.get }
-func (op OpResponse) Del() *DeleteResponse { return op.del }
-func (op OpResponse) Txn() *TxnResponse    { return op.txn }
-
-func (resp *PutResponse) OpResponse() OpResponse {
-	return OpResponse{put: resp}
-}
-func (resp *GetResponse) OpResponse() OpResponse {
-	return OpResponse{get: resp}
-}
-func (resp *DeleteResponse) OpResponse() OpResponse {
-	return OpResponse{del: resp}
-}
-func (resp *TxnResponse) OpResponse() OpResponse {
-	return OpResponse{txn: resp}
-}
-
-type kv struct {
-	remote   pb.KVClient
-	callOpts []grpc.CallOption
-}
-
-func NewKV(c *Client) KV {
-	api := &kv{remote: RetryKVClient(c)}
-	if c != nil {
-		api.callOpts = c.callOpts
-	}
-	return api
-}
-
-func NewKVFromKVClient(remote pb.KVClient, c *Client) KV {
-	api := &kv{remote: remote}
-	if c != nil {
-		api.callOpts = c.callOpts
-	}
-	return api
-}
-
-func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
-	r, err := kv.Do(ctx, OpPut(key, val, opts...))
-	return r.put, toErr(ctx, err)
-}
-
-func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) {
-	r, err := kv.Do(ctx, OpGet(key, opts...))
-	return r.get, toErr(ctx, err)
-}
-
-func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) {
-	r, err := kv.Do(ctx, OpDelete(key, opts...))
-	return r.del, toErr(ctx, err)
-}
-
-func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
-	resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...)
-	if err != nil {
-		return nil, toErr(ctx, err)
-	}
-	return (*CompactResponse)(resp), err
-}
-
-func (kv *kv) Txn(ctx context.Context) Txn {
-	return &txn{
-		kv:       kv,
-		ctx:      ctx,
-		callOpts: kv.callOpts,
-	}
-}
-
-func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
-	var err error
-	switch op.t {
-	case tRange:
-		var resp *pb.RangeResponse
-		resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...)
-		if err == nil {
-			return OpResponse{get: (*GetResponse)(resp)}, nil
-		}
-	case tPut:
-		var resp *pb.PutResponse
-		r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
-		resp, err = kv.remote.Put(ctx, r, kv.callOpts...)
-		if err == nil {
-			return OpResponse{put: (*PutResponse)(resp)}, nil
-		}
-	case tDeleteRange:
-		var resp *pb.DeleteRangeResponse
-		r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
-		resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...)
-		if err == nil {
-			return OpResponse{del: (*DeleteResponse)(resp)}, nil
-		}
-	case tTxn:
-		var resp *pb.TxnResponse
-		resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...)
-		if err == nil {
-			return OpResponse{txn: (*TxnResponse)(resp)}, nil
-		}
-	default:
-		panic("Unknown op")
-	}
-	return OpResponse{}, toErr(ctx, err)
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/lease.go b/vendor/go.etcd.io/etcd/clientv3/lease.go
deleted file mode 100644
index 3729cf3..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/lease.go
+++ /dev/null
@@ -1,588 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
-	"context"
-	"sync"
-	"time"
-
-	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
-	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/metadata"
-)
-
-type (
-	LeaseRevokeResponse pb.LeaseRevokeResponse
-	LeaseID             int64
-)
-
-// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
-type LeaseGrantResponse struct {
-	*pb.ResponseHeader
-	ID    LeaseID
-	TTL   int64
-	Error string
-}
-
-// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
-type LeaseKeepAliveResponse struct {
-	*pb.ResponseHeader
-	ID  LeaseID
-	TTL int64
-}
-
-// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
-type LeaseTimeToLiveResponse struct {
-	*pb.ResponseHeader
-	ID LeaseID `json:"id"`
-
-	// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1.
-	TTL int64 `json:"ttl"`
-
-	// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
-	GrantedTTL int64 `json:"granted-ttl"`
-
-	// Keys is the list of keys attached to this lease.
-	Keys [][]byte `json:"keys"`
-}
-
-// LeaseStatus represents a lease status.
-type LeaseStatus struct {
-	ID LeaseID `json:"id"`
-	// TODO: TTL int64
-}
-
-// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse.
-type LeaseLeasesResponse struct {
-	*pb.ResponseHeader
-	Leases []LeaseStatus `json:"leases"`
-}
-
-const (
-	// defaultTTL is the assumed lease TTL used for the first keepalive
-	// deadline before the actual TTL is known to the client.
-	defaultTTL = 5 * time.Second
-	// NoLease is a lease ID for the absence of a lease.
-	NoLease LeaseID = 0
-
-	// retryConnWait is how long to wait before retrying request due to an error
-	retryConnWait = 500 * time.Millisecond
-)
-
-// LeaseResponseChSize is the size of buffer to store unsent lease responses.
-// WARNING: DO NOT UPDATE.
-// Only for testing purposes.
-var LeaseResponseChSize = 16
-
-// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
-//
-// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
-type ErrKeepAliveHalted struct {
-	Reason error
-}
-
-func (e ErrKeepAliveHalted) Error() string {
-	s := "etcdclient: leases keep alive halted"
-	if e.Reason != nil {
-		s += ": " + e.Reason.Error()
-	}
-	return s
-}
-
-type Lease interface {
-	// Grant creates a new lease.
-	Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
-
-	// Revoke revokes the given lease.
-	Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
-
-	// TimeToLive retrieves the lease information of the given lease ID.
-	TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
-
-	// Leases retrieves all leases.
-	Leases(ctx context.Context) (*LeaseLeasesResponse, error)
-
-	// KeepAlive keeps the given lease alive forever. If the keepalive response
-	// posted to the channel is not consumed immediately, the lease client will
-	// continue sending keep alive requests to the etcd server at least every
-	// second until latest response is consumed.
-	//
-	// The returned "LeaseKeepAliveResponse" channel closes if underlying keep
-	// alive stream is interrupted in some way the client cannot handle itself;
-	// given context "ctx" is canceled or timed out. "LeaseKeepAliveResponse"
-	// from this closed channel is nil.
-	//
-	// If client keep alive loop halts with an unexpected error (e.g. "etcdserver:
-	// no leader") or canceled by the caller (e.g. context.Canceled), the error
-	// is returned. Otherwise, it retries.
-	//
-	// TODO(v4.0): post errors to last keep alive message before closing
-	// (see https://github.com/coreos/etcd/pull/7866)
-	KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
-
-	// KeepAliveOnce renews the lease once. The response corresponds to the
-	// first message from calling KeepAlive. If the response has a recoverable
-	// error, KeepAliveOnce will retry the RPC with a new keep alive message.
-	//
-	// In most of the cases, Keepalive should be used instead of KeepAliveOnce.
-	KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
-
-	// Close releases all resources Lease keeps for efficient communication
-	// with the etcd server.
-	Close() error
-}
-
-type lessor struct {
-	mu sync.Mutex // guards all fields
-
-	// donec is closed and loopErr is set when recvKeepAliveLoop stops
-	donec   chan struct{}
-	loopErr error
-
-	remote pb.LeaseClient
-
-	stream       pb.Lease_LeaseKeepAliveClient
-	streamCancel context.CancelFunc
-
-	stopCtx    context.Context
-	stopCancel context.CancelFunc
-
-	keepAlives map[LeaseID]*keepAlive
-
-	// firstKeepAliveTimeout is the timeout for the first keepalive request
-	// before the actual TTL is known to the lease client
-	firstKeepAliveTimeout time.Duration
-
-	// firstKeepAliveOnce ensures stream starts after first KeepAlive call.
-	firstKeepAliveOnce sync.Once
-
-	callOpts []grpc.CallOption
-}
-
-// keepAlive multiplexes a keepalive for a lease over multiple channels
-type keepAlive struct {
-	chs  []chan<- *LeaseKeepAliveResponse
-	ctxs []context.Context
-	// deadline is the time the keep alive channels close if no response
-	deadline time.Time
-	// nextKeepAlive is when to send the next keep alive message
-	nextKeepAlive time.Time
-	// donec is closed on lease revoke, expiration, or cancel.
-	donec chan struct{}
-}
-
-func NewLease(c *Client) Lease {
-	return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second)
-}
-
-func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease {
-	l := &lessor{
-		donec:                 make(chan struct{}),
-		keepAlives:            make(map[LeaseID]*keepAlive),
-		remote:                remote,
-		firstKeepAliveTimeout: keepAliveTimeout,
-	}
-	if l.firstKeepAliveTimeout == time.Second {
-		l.firstKeepAliveTimeout = defaultTTL
-	}
-	if c != nil {
-		l.callOpts = c.callOpts
-	}
-	reqLeaderCtx := WithRequireLeader(context.Background())
-	l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
-	return l
-}
-
-func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
-	r := &pb.LeaseGrantRequest{TTL: ttl}
-	resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...)
-	if err == nil {
-		gresp := &LeaseGrantResponse{
-			ResponseHeader: resp.GetHeader(),
-			ID:             LeaseID(resp.ID),
-			TTL:            resp.TTL,
-			Error:          resp.Error,
-		}
-		return gresp, nil
-	}
-	return nil, toErr(ctx, err)
-}
-
-func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
-	r := &pb.LeaseRevokeRequest{ID: int64(id)}
-	resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...)
-	if err == nil {
-		return (*LeaseRevokeResponse)(resp), nil
-	}
-	return nil, toErr(ctx, err)
-}
-
-func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
-	r := toLeaseTimeToLiveRequest(id, opts...)
-	resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...)
-	if err == nil {
-		gresp := &LeaseTimeToLiveResponse{
-			ResponseHeader: resp.GetHeader(),
-			ID:             LeaseID(resp.ID),
-			TTL:            resp.TTL,
-			GrantedTTL:     resp.GrantedTTL,
-			Keys:           resp.Keys,
-		}
-		return gresp, nil
-	}
-	return nil, toErr(ctx, err)
-}
-
-func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
-	resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...)
-	if err == nil {
-		leases := make([]LeaseStatus, len(resp.Leases))
-		for i := range resp.Leases {
-			leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)}
-		}
-		return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
-	}
-	return nil, toErr(ctx, err)
-}
-
-func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
-	ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize)
-
-	l.mu.Lock()
-	// ensure that recvKeepAliveLoop is still running
-	select {
-	case <-l.donec:
-		err := l.loopErr
-		l.mu.Unlock()
-		close(ch)
-		return ch, ErrKeepAliveHalted{Reason: err}
-	default:
-	}
-	ka, ok := l.keepAlives[id]
-	if !ok {
-		// create fresh keep alive
-		ka = &keepAlive{
-			chs:           []chan<- *LeaseKeepAliveResponse{ch},
-			ctxs:          []context.Context{ctx},
-			deadline:      time.Now().Add(l.firstKeepAliveTimeout),
-			nextKeepAlive: time.Now(),
-			donec:         make(chan struct{}),
-		}
-		l.keepAlives[id] = ka
-	} else {
-		// add channel and context to existing keep alive
-		ka.ctxs = append(ka.ctxs, ctx)
-		ka.chs = append(ka.chs, ch)
-	}
-	l.mu.Unlock()
-
-	go l.keepAliveCtxCloser(id, ctx, ka.donec)
-	l.firstKeepAliveOnce.Do(func() {
-		go l.recvKeepAliveLoop()
-		go l.deadlineLoop()
-	})
-
-	return ch, nil
-}
-
-func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
-	for {
-		resp, err := l.keepAliveOnce(ctx, id)
-		if err == nil {
-			if resp.TTL <= 0 {
-				err = rpctypes.ErrLeaseNotFound
-			}
-			return resp, err
-		}
-		if isHaltErr(ctx, err) {
-			return nil, toErr(ctx, err)
-		}
-	}
-}
-
-func (l *lessor) Close() error {
-	l.stopCancel()
-	// close for synchronous teardown if stream goroutines never launched
-	l.firstKeepAliveOnce.Do(func() { close(l.donec) })
-	<-l.donec
-	return nil
-}
-
-func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) {
-	select {
-	case <-donec:
-		return
-	case <-l.donec:
-		return
-	case <-ctx.Done():
-	}
-
-	l.mu.Lock()
-	defer l.mu.Unlock()
-
-	ka, ok := l.keepAlives[id]
-	if !ok {
-		return
-	}
-
-	// close channel and remove context if still associated with keep alive
-	for i, c := range ka.ctxs {
-		if c == ctx {
-			close(ka.chs[i])
-			ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
-			ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
-			break
-		}
-	}
-	// remove if no one more listeners
-	if len(ka.chs) == 0 {
-		delete(l.keepAlives, id)
-	}
-}
-
-// closeRequireLeader scans keepAlives for ctxs that have require leader
-// and closes the associated channels.
-func (l *lessor) closeRequireLeader() {
-	l.mu.Lock()
-	defer l.mu.Unlock()
-	for _, ka := range l.keepAlives {
-		reqIdxs := 0
-		// find all required leader channels, close, mark as nil
-		for i, ctx := range ka.ctxs {
-			md, ok := metadata.FromOutgoingContext(ctx)
-			if !ok {
-				continue
-			}
-			ks := md[rpctypes.MetadataRequireLeaderKey]
-			if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
-				continue
-			}
-			close(ka.chs[i])
-			ka.chs[i] = nil
-			reqIdxs++
-		}
-		if reqIdxs == 0 {
-			continue
-		}
-		// remove all channels that required a leader from keepalive
-		newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
-		newCtxs := make([]context.Context, len(newChs))
-		newIdx := 0
-		for i := range ka.chs {
-			if ka.chs[i] == nil {
-				continue
-			}
-			newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
-			newIdx++
-		}
-		ka.chs, ka.ctxs = newChs, newCtxs
-	}
-}
-
-func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
-	cctx, cancel := context.WithCancel(ctx)
-	defer cancel()
-
-	stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...)
-	if err != nil {
-		return nil, toErr(ctx, err)
-	}
-
-	err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
-	if err != nil {
-		return nil, toErr(ctx, err)
-	}
-
-	resp, rerr := stream.Recv()
-	if rerr != nil {
-		return nil, toErr(ctx, rerr)
-	}
-
-	karesp := &LeaseKeepAliveResponse{
-		ResponseHeader: resp.GetHeader(),
-		ID:             LeaseID(resp.ID),
-		TTL:            resp.TTL,
-	}
-	return karesp, nil
-}
-
-func (l *lessor) recvKeepAliveLoop() (gerr error) {
-	defer func() {
-		l.mu.Lock()
-		close(l.donec)
-		l.loopErr = gerr
-		for _, ka := range l.keepAlives {
-			ka.close()
-		}
-		l.keepAlives = make(map[LeaseID]*keepAlive)
-		l.mu.Unlock()
-	}()
-
-	for {
-		stream, err := l.resetRecv()
-		if err != nil {
-			if canceledByCaller(l.stopCtx, err) {
-				return err
-			}
-		} else {
-			for {
-				resp, err := stream.Recv()
-				if err != nil {
-					if canceledByCaller(l.stopCtx, err) {
-						return err
-					}
-
-					if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader {
-						l.closeRequireLeader()
-					}
-					break
-				}
-
-				l.recvKeepAlive(resp)
-			}
-		}
-
-		select {
-		case <-time.After(retryConnWait):
-			continue
-		case <-l.stopCtx.Done():
-			return l.stopCtx.Err()
-		}
-	}
-}
-
-// resetRecv opens a new lease stream and starts sending keep alive requests.
-func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
-	sctx, cancel := context.WithCancel(l.stopCtx)
-	stream, err := l.remote.LeaseKeepAlive(sctx, l.callOpts...)
-	if err != nil {
-		cancel()
-		return nil, err
-	}
-
-	l.mu.Lock()
-	defer l.mu.Unlock()
-	if l.stream != nil && l.streamCancel != nil {
-		l.streamCancel()
-	}
-
-	l.streamCancel = cancel
-	l.stream = stream
-
-	go l.sendKeepAliveLoop(stream)
-	return stream, nil
-}
-
-// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
-func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
-	karesp := &LeaseKeepAliveResponse{
-		ResponseHeader: resp.GetHeader(),
-		ID:             LeaseID(resp.ID),
-		TTL:            resp.TTL,
-	}
-
-	l.mu.Lock()
-	defer l.mu.Unlock()
-
-	ka, ok := l.keepAlives[karesp.ID]
-	if !ok {
-		return
-	}
-
-	if karesp.TTL <= 0 {
-		// lease expired; close all keep alive channels
-		delete(l.keepAlives, karesp.ID)
-		ka.close()
-		return
-	}
-
-	// send update to all channels
-	nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
-	ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
-	for _, ch := range ka.chs {
-		select {
-		case ch <- karesp:
-		default:
-		}
-		// still advance in order to rate-limit keep-alive sends
-		ka.nextKeepAlive = nextKeepAlive
-	}
-}
-
-// deadlineLoop reaps any keep alive channels that have not received a response
-// within the lease TTL
-func (l *lessor) deadlineLoop() {
-	for {
-		select {
-		case <-time.After(time.Second):
-		case <-l.donec:
-			return
-		}
-		now := time.Now()
-		l.mu.Lock()
-		for id, ka := range l.keepAlives {
-			if ka.deadline.Before(now) {
-				// waited too long for response; lease may be expired
-				ka.close()
-				delete(l.keepAlives, id)
-			}
-		}
-		l.mu.Unlock()
-	}
-}
-
-// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
-func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
-	for {
-		var tosend []LeaseID
-
-		now := time.Now()
-		l.mu.Lock()
-		for id, ka := range l.keepAlives {
-			if ka.nextKeepAlive.Before(now) {
-				tosend = append(tosend, id)
-			}
-		}
-		l.mu.Unlock()
-
-		for _, id := range tosend {
-			r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
-			if err := stream.Send(r); err != nil {
-				// TODO do something with this error?
-				return
-			}
-		}
-
-		select {
-		case <-time.After(500 * time.Millisecond):
-		case <-stream.Context().Done():
-			return
-		case <-l.donec:
-			return
-		case <-l.stopCtx.Done():
-			return
-		}
-	}
-}
-
-func (ka *keepAlive) close() {
-	close(ka.donec)
-	for _, ch := range ka.chs {
-		close(ch)
-	}
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/logger.go b/vendor/go.etcd.io/etcd/clientv3/logger.go
deleted file mode 100644
index 3276372..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/logger.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
-	"io/ioutil"
-	"sync"
-
-	"github.com/coreos/etcd/pkg/logutil"
-
-	"google.golang.org/grpc/grpclog"
-)
-
-var (
-	lgMu sync.RWMutex
-	lg   logutil.Logger
-)
-
-type settableLogger struct {
-	l  grpclog.LoggerV2
-	mu sync.RWMutex
-}
-
-func init() {
-	// disable client side logs by default
-	lg = &settableLogger{}
-	SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
-}
-
-// SetLogger sets client-side Logger.
-func SetLogger(l grpclog.LoggerV2) {
-	lgMu.Lock()
-	lg = logutil.NewLogger(l)
-	// override grpclog so that any changes happen with locking
-	grpclog.SetLoggerV2(lg)
-	lgMu.Unlock()
-}
-
-// GetLogger returns the current logutil.Logger.
-func GetLogger() logutil.Logger {
-	lgMu.RLock()
-	l := lg
-	lgMu.RUnlock()
-	return l
-}
-
-// NewLogger returns a new Logger with logutil.Logger.
-func NewLogger(gl grpclog.LoggerV2) logutil.Logger {
-	return &settableLogger{l: gl}
-}
-
-func (s *settableLogger) get() grpclog.LoggerV2 {
-	s.mu.RLock()
-	l := s.l
-	s.mu.RUnlock()
-	return l
-}
-
-// implement the grpclog.LoggerV2 interface
-
-func (s *settableLogger) Info(args ...interface{})                 { s.get().Info(args...) }
-func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) }
-func (s *settableLogger) Infoln(args ...interface{})               { s.get().Infoln(args...) }
-func (s *settableLogger) Warning(args ...interface{})              { s.get().Warning(args...) }
-func (s *settableLogger) Warningf(format string, args ...interface{}) {
-	s.get().Warningf(format, args...)
-}
-func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) }
-func (s *settableLogger) Error(args ...interface{})     { s.get().Error(args...) }
-func (s *settableLogger) Errorf(format string, args ...interface{}) {
-	s.get().Errorf(format, args...)
-}
-func (s *settableLogger) Errorln(args ...interface{})               { s.get().Errorln(args...) }
-func (s *settableLogger) Fatal(args ...interface{})                 { s.get().Fatal(args...) }
-func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
-func (s *settableLogger) Fatalln(args ...interface{})               { s.get().Fatalln(args...) }
-func (s *settableLogger) Print(args ...interface{})                 { s.get().Info(args...) }
-func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) }
-func (s *settableLogger) Println(args ...interface{})               { s.get().Infoln(args...) }
-func (s *settableLogger) V(l int) bool                              { return s.get().V(l) }
-func (s *settableLogger) Lvl(lvl int) grpclog.LoggerV2 {
-	s.mu.RLock()
-	l := s.l
-	s.mu.RUnlock()
-	if l.V(lvl) {
-		return s
-	}
-	return logutil.NewDiscardLogger()
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/maintenance.go b/vendor/go.etcd.io/etcd/clientv3/maintenance.go
deleted file mode 100644
index 5e87cf8..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/maintenance.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
-	"context"
-	"fmt"
-	"io"
-
-	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
-	"google.golang.org/grpc"
-)
-
-type (
-	DefragmentResponse pb.DefragmentResponse
-	AlarmResponse      pb.AlarmResponse
-	AlarmMember        pb.AlarmMember
-	StatusResponse     pb.StatusResponse
-	HashKVResponse     pb.HashKVResponse
-	MoveLeaderResponse pb.MoveLeaderResponse
-)
-
-type Maintenance interface {
-	// AlarmList gets all active alarms.
-	AlarmList(ctx context.Context) (*AlarmResponse, error)
-
-	// AlarmDisarm disarms a given alarm.
-	AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
-
-	// Defragment releases wasted space from internal fragmentation on a given etcd member.
-	// Defragment is only needed when deleting a large number of keys and want to reclaim
-	// the resources.
-	// Defragment is an expensive operation. User should avoid defragmenting multiple members
-	// at the same time.
-	// To defragment multiple members in the cluster, user need to call defragment multiple
-	// times with different endpoints.
-	Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)
-
-	// Status gets the status of the endpoint.
-	Status(ctx context.Context, endpoint string) (*StatusResponse, error)
-
-	// HashKV returns a hash of the KV state at the time of the RPC.
-	// If revision is zero, the hash is computed on all keys. If the revision
-	// is non-zero, the hash is computed on all keys at or below the given revision.
-	HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)
-
-	// Snapshot provides a reader for a point-in-time snapshot of etcd.
-	// If the context "ctx" is canceled or timed out, reading from returned
-	// "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded).
-	Snapshot(ctx context.Context) (io.ReadCloser, error)
-
-	// MoveLeader requests current leader to transfer its leadership to the transferee.
-	// Request must be made to the leader.
-	MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error)
-}
-
-type maintenance struct {
-	dial     func(endpoint string) (pb.MaintenanceClient, func(), error)
-	remote   pb.MaintenanceClient
-	callOpts []grpc.CallOption
-}
-
-func NewMaintenance(c *Client) Maintenance {
-	api := &maintenance{
-		dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
-			conn, err := c.Dial(endpoint)
-			if err != nil {
-				return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %v", endpoint, err)
-			}
-			cancel := func() { conn.Close() }
-			return RetryMaintenanceClient(c, conn), cancel, nil
-		},
-		remote: RetryMaintenanceClient(c, c.conn),
-	}
-	if c != nil {
-		api.callOpts = c.callOpts
-	}
-	return api
-}
-
-func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
-	api := &maintenance{
-		dial: func(string) (pb.MaintenanceClient, func(), error) {
-			return remote, func() {}, nil
-		},
-		remote: remote,
-	}
-	if c != nil {
-		api.callOpts = c.callOpts
-	}
-	return api
-}
-
-func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
-	req := &pb.AlarmRequest{
-		Action:   pb.AlarmRequest_GET,
-		MemberID: 0,                 // all
-		Alarm:    pb.AlarmType_NONE, // all
-	}
-	resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
-	if err == nil {
-		return (*AlarmResponse)(resp), nil
-	}
-	return nil, toErr(ctx, err)
-}
-
-func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
-	req := &pb.AlarmRequest{
-		Action:   pb.AlarmRequest_DEACTIVATE,
-		MemberID: am.MemberID,
-		Alarm:    am.Alarm,
-	}
-
-	if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
-		ar, err := m.AlarmList(ctx)
-		if err != nil {
-			return nil, toErr(ctx, err)
-		}
-		ret := AlarmResponse{}
-		for _, am := range ar.Alarms {
-			dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
-			if derr != nil {
-				return nil, toErr(ctx, derr)
-			}
-			ret.Alarms = append(ret.Alarms, dresp.Alarms...)
-		}
-		return &ret, nil
-	}
-
-	resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
-	if err == nil {
-		return (*AlarmResponse)(resp), nil
-	}
-	return nil, toErr(ctx, err)
-}
-
-func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
-	remote, cancel, err := m.dial(endpoint)
-	if err != nil {
-		return nil, toErr(ctx, err)
-	}
-	defer cancel()
-	resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...)
-	if err != nil {
-		return nil, toErr(ctx, err)
-	}
-	return (*DefragmentResponse)(resp), nil
-}
-
-func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
-	remote, cancel, err := m.dial(endpoint)
-	if err != nil {
-		return nil, toErr(ctx, err)
-	}
-	defer cancel()
-	resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...)
-	if err != nil {
-		return nil, toErr(ctx, err)
-	}
-	return (*StatusResponse)(resp), nil
-}
-
-func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
-	remote, cancel, err := m.dial(endpoint)
-	if err != nil {
-
-		return nil, toErr(ctx, err)
-	}
-	defer cancel()
-	resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...)
-	if err != nil {
-		return nil, toErr(ctx, err)
-	}
-	return (*HashKVResponse)(resp), nil
-}
-
-func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
-	ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...)
-	if err != nil {
-		return nil, toErr(ctx, err)
-	}
-
-	plog.Info("opened snapshot stream; downloading")
-	pr, pw := io.Pipe()
-	go func() {
-		for {
-			resp, err := ss.Recv()
-			if err != nil {
-				switch err {
-				case io.EOF:
-					plog.Info("completed snapshot read; closing")
-				default:
-					plog.Warningf("failed to receive from snapshot stream; closing (%v)", err)
-				}
-				pw.CloseWithError(err)
-				return
-			}
-
-			// can "resp == nil && err == nil"
-			// before we receive snapshot SHA digest?
-			// No, server sends EOF with an empty response
-			// after it sends SHA digest at the end
-
-			if _, werr := pw.Write(resp.Blob); werr != nil {
-				pw.CloseWithError(werr)
-				return
-			}
-		}
-	}()
-	return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil
-}
-
-type snapshotReadCloser struct {
-	ctx context.Context
-	io.ReadCloser
-}
-
-func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) {
-	n, err = rc.ReadCloser.Read(p)
-	return n, toErr(rc.ctx, err)
-}
-
-func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {
-	resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...)
-	return (*MoveLeaderResponse)(resp), toErr(ctx, err)
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/op.go b/vendor/go.etcd.io/etcd/clientv3/op.go
deleted file mode 100644
index 3dca41b..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/op.go
+++ /dev/null
@@ -1,530 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
-type opType int
-
-const (
-	// A default Op has opType 0, which is invalid.
-	tRange opType = iota + 1
-	tPut
-	tDeleteRange
-	tTxn
-)
-
-var (
-	noPrefixEnd = []byte{0}
-)
-
-// Op represents an Operation that kv can execute.
-type Op struct {
-	t   opType
-	key []byte
-	end []byte
-
-	// for range
-	limit        int64
-	sort         *SortOption
-	serializable bool
-	keysOnly     bool
-	countOnly    bool
-	minModRev    int64
-	maxModRev    int64
-	minCreateRev int64
-	maxCreateRev int64
-
-	// for range, watch
-	rev int64
-
-	// for watch, put, delete
-	prevKV bool
-
-	// for watch
-	// fragmentation should be disabled by default
-	// if true, split watch events when total exceeds
-	// "--max-request-bytes" flag value + 512-byte
-	fragment bool
-
-	// for put
-	ignoreValue bool
-	ignoreLease bool
-
-	// progressNotify is for progress updates.
-	progressNotify bool
-	// createdNotify is for created event
-	createdNotify bool
-	// filters for watchers
-	filterPut    bool
-	filterDelete bool
-
-	// for put
-	val     []byte
-	leaseID LeaseID
-
-	// txn
-	cmps    []Cmp
-	thenOps []Op
-	elseOps []Op
-}
-
-// accessors / mutators
-
-func (op Op) IsTxn() bool              { return op.t == tTxn }
-func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps }
-
-// KeyBytes returns the byte slice holding the Op's key.
-func (op Op) KeyBytes() []byte { return op.key }
-
-// WithKeyBytes sets the byte slice for the Op's key.
-func (op *Op) WithKeyBytes(key []byte) { op.key = key }
-
-// RangeBytes returns the byte slice holding with the Op's range end, if any.
-func (op Op) RangeBytes() []byte { return op.end }
-
-// Rev returns the requested revision, if any.
-func (op Op) Rev() int64 { return op.rev }
-
-// IsPut returns true iff the operation is a Put.
-func (op Op) IsPut() bool { return op.t == tPut }
-
-// IsGet returns true iff the operation is a Get.
-func (op Op) IsGet() bool { return op.t == tRange }
-
-// IsDelete returns true iff the operation is a Delete.
-func (op Op) IsDelete() bool { return op.t == tDeleteRange }
-
-// IsSerializable returns true if the serializable field is true.
-func (op Op) IsSerializable() bool { return op.serializable == true }
-
-// IsKeysOnly returns whether keysOnly is set.
-func (op Op) IsKeysOnly() bool { return op.keysOnly == true }
-
-// IsCountOnly returns whether countOnly is set.
-func (op Op) IsCountOnly() bool { return op.countOnly == true }
-
-// MinModRev returns the operation's minimum modify revision.
-func (op Op) MinModRev() int64 { return op.minModRev }
-
-// MaxModRev returns the operation's maximum modify revision.
-func (op Op) MaxModRev() int64 { return op.maxModRev }
-
-// MinCreateRev returns the operation's minimum create revision.
-func (op Op) MinCreateRev() int64 { return op.minCreateRev }
-
-// MaxCreateRev returns the operation's maximum create revision.
-func (op Op) MaxCreateRev() int64 { return op.maxCreateRev }
-
-// WithRangeBytes sets the byte slice for the Op's range end.
-func (op *Op) WithRangeBytes(end []byte) { op.end = end }
-
-// ValueBytes returns the byte slice holding the Op's value, if any.
-func (op Op) ValueBytes() []byte { return op.val }
-
-// WithValueBytes sets the byte slice for the Op's value.
-func (op *Op) WithValueBytes(v []byte) { op.val = v }
-
-func (op Op) toRangeRequest() *pb.RangeRequest {
-	if op.t != tRange {
-		panic("op.t != tRange")
-	}
-	r := &pb.RangeRequest{
-		Key:               op.key,
-		RangeEnd:          op.end,
-		Limit:             op.limit,
-		Revision:          op.rev,
-		Serializable:      op.serializable,
-		KeysOnly:          op.keysOnly,
-		CountOnly:         op.countOnly,
-		MinModRevision:    op.minModRev,
-		MaxModRevision:    op.maxModRev,
-		MinCreateRevision: op.minCreateRev,
-		MaxCreateRevision: op.maxCreateRev,
-	}
-	if op.sort != nil {
-		r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
-		r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
-	}
-	return r
-}
-
-func (op Op) toTxnRequest() *pb.TxnRequest {
-	thenOps := make([]*pb.RequestOp, len(op.thenOps))
-	for i, tOp := range op.thenOps {
-		thenOps[i] = tOp.toRequestOp()
-	}
-	elseOps := make([]*pb.RequestOp, len(op.elseOps))
-	for i, eOp := range op.elseOps {
-		elseOps[i] = eOp.toRequestOp()
-	}
-	cmps := make([]*pb.Compare, len(op.cmps))
-	for i := range op.cmps {
-		cmps[i] = (*pb.Compare)(&op.cmps[i])
-	}
-	return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps}
-}
-
-func (op Op) toRequestOp() *pb.RequestOp {
-	switch op.t {
-	case tRange:
-		return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
-	case tPut:
-		r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
-		return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
-	case tDeleteRange:
-		r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
-		return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
-	case tTxn:
-		return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}}
-	default:
-		panic("Unknown Op")
-	}
-}
-
-func (op Op) isWrite() bool {
-	if op.t == tTxn {
-		for _, tOp := range op.thenOps {
-			if tOp.isWrite() {
-				return true
-			}
-		}
-		for _, tOp := range op.elseOps {
-			if tOp.isWrite() {
-				return true
-			}
-		}
-		return false
-	}
-	return op.t != tRange
-}
-
-func OpGet(key string, opts ...OpOption) Op {
-	ret := Op{t: tRange, key: []byte(key)}
-	ret.applyOpts(opts)
-	return ret
-}
-
-func OpDelete(key string, opts ...OpOption) Op {
-	ret := Op{t: tDeleteRange, key: []byte(key)}
-	ret.applyOpts(opts)
-	switch {
-	case ret.leaseID != 0:
-		panic("unexpected lease in delete")
-	case ret.limit != 0:
-		panic("unexpected limit in delete")
-	case ret.rev != 0:
-		panic("unexpected revision in delete")
-	case ret.sort != nil:
-		panic("unexpected sort in delete")
-	case ret.serializable:
-		panic("unexpected serializable in delete")
-	case ret.countOnly:
-		panic("unexpected countOnly in delete")
-	case ret.minModRev != 0, ret.maxModRev != 0:
-		panic("unexpected mod revision filter in delete")
-	case ret.minCreateRev != 0, ret.maxCreateRev != 0:
-		panic("unexpected create revision filter in delete")
-	case ret.filterDelete, ret.filterPut:
-		panic("unexpected filter in delete")
-	case ret.createdNotify:
-		panic("unexpected createdNotify in delete")
-	}
-	return ret
-}
-
-func OpPut(key, val string, opts ...OpOption) Op {
-	ret := Op{t: tPut, key: []byte(key), val: []byte(val)}
-	ret.applyOpts(opts)
-	switch {
-	case ret.end != nil:
-		panic("unexpected range in put")
-	case ret.limit != 0:
-		panic("unexpected limit in put")
-	case ret.rev != 0:
-		panic("unexpected revision in put")
-	case ret.sort != nil:
-		panic("unexpected sort in put")
-	case ret.serializable:
-		panic("unexpected serializable in put")
-	case ret.countOnly:
-		panic("unexpected countOnly in put")
-	case ret.minModRev != 0, ret.maxModRev != 0:
-		panic("unexpected mod revision filter in put")
-	case ret.minCreateRev != 0, ret.maxCreateRev != 0:
-		panic("unexpected create revision filter in put")
-	case ret.filterDelete, ret.filterPut:
-		panic("unexpected filter in put")
-	case ret.createdNotify:
-		panic("unexpected createdNotify in put")
-	}
-	return ret
-}
-
-func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op {
-	return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps}
-}
-
-func opWatch(key string, opts ...OpOption) Op {
-	ret := Op{t: tRange, key: []byte(key)}
-	ret.applyOpts(opts)
-	switch {
-	case ret.leaseID != 0:
-		panic("unexpected lease in watch")
-	case ret.limit != 0:
-		panic("unexpected limit in watch")
-	case ret.sort != nil:
-		panic("unexpected sort in watch")
-	case ret.serializable:
-		panic("unexpected serializable in watch")
-	case ret.countOnly:
-		panic("unexpected countOnly in watch")
-	case ret.minModRev != 0, ret.maxModRev != 0:
-		panic("unexpected mod revision filter in watch")
-	case ret.minCreateRev != 0, ret.maxCreateRev != 0:
-		panic("unexpected create revision filter in watch")
-	}
-	return ret
-}
-
-func (op *Op) applyOpts(opts []OpOption) {
-	for _, opt := range opts {
-		opt(op)
-	}
-}
-
-// OpOption configures Operations like Get, Put, Delete.
-type OpOption func(*Op)
-
-// WithLease attaches a lease ID to a key in 'Put' request.
-func WithLease(leaseID LeaseID) OpOption {
-	return func(op *Op) { op.leaseID = leaseID }
-}
-
-// WithLimit limits the number of results to return from 'Get' request.
-// If WithLimit is given a 0 limit, it is treated as no limit.
-func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } }
-
-// WithRev specifies the store revision for 'Get' request.
-// Or the start revision of 'Watch' request.
-func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } }
-
-// WithSort specifies the ordering in 'Get' request. It requires
-// 'WithRange' and/or 'WithPrefix' to be specified too.
-// 'target' specifies the target to sort by: key, version, revisions, value.
-// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
-func WithSort(target SortTarget, order SortOrder) OpOption {
-	return func(op *Op) {
-		if target == SortByKey && order == SortAscend {
-			// If order != SortNone, server fetches the entire key-space,
-			// and then applies the sort and limit, if provided.
-			// Since by default the server returns results sorted by keys
-			// in lexicographically ascending order, the client should ignore
-			// SortOrder if the target is SortByKey.
-			order = SortNone
-		}
-		op.sort = &SortOption{target, order}
-	}
-}
-
-// GetPrefixRangeEnd gets the range end of the prefix.
-// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'.
-func GetPrefixRangeEnd(prefix string) string {
-	return string(getPrefix([]byte(prefix)))
-}
-
-func getPrefix(key []byte) []byte {
-	end := make([]byte, len(key))
-	copy(end, key)
-	for i := len(end) - 1; i >= 0; i-- {
-		if end[i] < 0xff {
-			end[i] = end[i] + 1
-			end = end[:i+1]
-			return end
-		}
-	}
-	// next prefix does not exist (e.g., 0xffff);
-	// default to WithFromKey policy
-	return noPrefixEnd
-}
-
-// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate
-// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())'
-// can return 'foo1', 'foo2', and so on.
-func WithPrefix() OpOption {
-	return func(op *Op) {
-		if len(op.key) == 0 {
-			op.key, op.end = []byte{0}, []byte{0}
-			return
-		}
-		op.end = getPrefix(op.key)
-	}
-}
-
-// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests.
-// For example, 'Get' requests with 'WithRange(end)' returns
-// the keys in the range [key, end).
-// endKey must be lexicographically greater than start key.
-func WithRange(endKey string) OpOption {
-	return func(op *Op) { op.end = []byte(endKey) }
-}
-
-// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests
-// to be equal or greater than the key in the argument.
-func WithFromKey() OpOption { return WithRange("\x00") }
-
-// WithSerializable makes 'Get' request serializable. By default,
-// it's linearizable. Serializable requests are better for lower latency
-// requirement.
-func WithSerializable() OpOption {
-	return func(op *Op) { op.serializable = true }
-}
-
-// WithKeysOnly makes the 'Get' request return only the keys and the corresponding
-// values will be omitted.
-func WithKeysOnly() OpOption {
-	return func(op *Op) { op.keysOnly = true }
-}
-
-// WithCountOnly makes the 'Get' request return only the count of keys.
-func WithCountOnly() OpOption {
-	return func(op *Op) { op.countOnly = true }
-}
-
-// WithMinModRev filters out keys for Get with modification revisions less than the given revision.
-func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } }
-
-// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision.
-func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } }
-
-// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision.
-func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } }
-
-// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision.
-func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } }
-
-// WithFirstCreate gets the key with the oldest creation revision in the request range.
-func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
-
-// WithLastCreate gets the key with the latest creation revision in the request range.
-func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) }
-
-// WithFirstKey gets the lexically first key in the request range.
-func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) }
-
-// WithLastKey gets the lexically last key in the request range.
-func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) }
-
-// WithFirstRev gets the key with the oldest modification revision in the request range.
-func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) }
-
-// WithLastRev gets the key with the latest modification revision in the request range.
-func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) }
-
-// withTop gets the first key over the get's prefix given a sort order
-func withTop(target SortTarget, order SortOrder) []OpOption {
-	return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
-}
-
-// WithProgressNotify makes watch server send periodic progress updates
-// every 10 minutes when there is no incoming events.
-// Progress updates have zero events in WatchResponse.
-func WithProgressNotify() OpOption {
-	return func(op *Op) {
-		op.progressNotify = true
-	}
-}
-
-// WithCreatedNotify makes watch server sends the created event.
-func WithCreatedNotify() OpOption {
-	return func(op *Op) {
-		op.createdNotify = true
-	}
-}
-
-// WithFilterPut discards PUT events from the watcher.
-func WithFilterPut() OpOption {
-	return func(op *Op) { op.filterPut = true }
-}
-
-// WithFilterDelete discards DELETE events from the watcher.
-func WithFilterDelete() OpOption {
-	return func(op *Op) { op.filterDelete = true }
-}
-
-// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted,
-// nothing will be returned.
-func WithPrevKV() OpOption {
-	return func(op *Op) {
-		op.prevKV = true
-	}
-}
-
-// WithIgnoreValue updates the key using its current value.
-// This option can not be combined with non-empty values.
-// Returns an error if the key does not exist.
-func WithIgnoreValue() OpOption {
-	return func(op *Op) {
-		op.ignoreValue = true
-	}
-}
-
-// WithIgnoreLease updates the key using its current lease.
-// This option can not be combined with WithLease.
-// Returns an error if the key does not exist.
-func WithIgnoreLease() OpOption {
-	return func(op *Op) {
-		op.ignoreLease = true
-	}
-}
-
-// LeaseOp represents an Operation that lease can execute.
-type LeaseOp struct {
-	id LeaseID
-
-	// for TimeToLive
-	attachedKeys bool
-}
-
-// LeaseOption configures lease operations.
-type LeaseOption func(*LeaseOp)
-
-func (op *LeaseOp) applyOpts(opts []LeaseOption) {
-	for _, opt := range opts {
-		opt(op)
-	}
-}
-
-// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID.
-func WithAttachedKeys() LeaseOption {
-	return func(op *LeaseOp) { op.attachedKeys = true }
-}
-
-func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest {
-	ret := &LeaseOp{id: id}
-	ret.applyOpts(opts)
-	return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys}
-}
-
-// WithFragment to receive raw watch response with fragmentation.
-// Fragmentation is disabled by default. If fragmentation is enabled,
-// etcd watch server will split watch response before sending to clients
-// when the total size of watch events exceed server-side request limit.
-// The default server-side request limit is 1.5 MiB, which can be configured
-// as "--max-request-bytes" flag value + gRPC-overhead 512 bytes.
-// See "etcdserver/api/v3rpc/watch.go" for more details.
-func WithFragment() OpOption {
-	return func(op *Op) { op.fragment = true }
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/options.go b/vendor/go.etcd.io/etcd/clientv3/options.go
deleted file mode 100644
index 700714c..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/options.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
-	"math"
-	"time"
-
-	"google.golang.org/grpc"
-)
-
-var (
-	// client-side handling retrying of request failures where data was not written to the wire or
-	// where server indicates it did not process the data. gRPC default is default is "FailFast(true)"
-	// but for etcd we default to "FailFast(false)" to minimize client request error responses due to
-	// transient failures.
-	defaultFailFast = grpc.FailFast(false)
-
-	// client-side request send limit, gRPC default is math.MaxInt32
-	// Make sure that "client-side send limit < server-side default send/recv limit"
-	// Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes
-	defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024)
-
-	// client-side response receive limit, gRPC default is 4MB
-	// Make sure that "client-side receive limit >= server-side default send/recv limit"
-	// because range response can easily exceed request send limits
-	// Default to math.MaxInt32; writes exceeding server-side send limit fails anyway
-	defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32)
-
-	// client-side non-streaming retry limit, only applied to requests where server responds with
-	// a error code clearly indicating it was unable to process the request such as codes.Unavailable.
-	// If set to 0, retry is disabled.
-	defaultUnaryMaxRetries uint = 100
-
-	// client-side streaming retry limit, only applied to requests where server responds with
-	// a error code clearly indicating it was unable to process the request such as codes.Unavailable.
-	// If set to 0, retry is disabled.
-	defaultStreamMaxRetries = ^uint(0) // max uint
-
-	// client-side retry backoff wait between requests.
-	defaultBackoffWaitBetween = 25 * time.Millisecond
-
-	// client-side retry backoff default jitter fraction.
-	defaultBackoffJitterFraction = 0.10
-)
-
-// defaultCallOpts defines a list of default "gRPC.CallOption".
-// Some options are exposed to "clientv3.Config".
-// Defaults will be overridden by the settings in "clientv3.Config".
-var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize}
-
-// MaxLeaseTTL is the maximum lease TTL value
-const MaxLeaseTTL = 9000000000
diff --git a/vendor/go.etcd.io/etcd/clientv3/retry.go b/vendor/go.etcd.io/etcd/clientv3/retry.go
deleted file mode 100644
index 6baa52e..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/retry.go
+++ /dev/null
@@ -1,294 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
-	"context"
-
-	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
-	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/status"
-)
-
-type retryPolicy uint8
-
-const (
-	repeatable retryPolicy = iota
-	nonRepeatable
-)
-
-func (rp retryPolicy) String() string {
-	switch rp {
-	case repeatable:
-		return "repeatable"
-	case nonRepeatable:
-		return "nonRepeatable"
-	default:
-		return "UNKNOWN"
-	}
-}
-
-// isSafeRetryImmutableRPC returns "true" when an immutable request is safe for retry.
-//
-// immutable requests (e.g. Get) should be retried unless it's
-// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge).
-//
-// Returning "false" means retry should stop, since client cannot
-// handle itself even with retries.
-func isSafeRetryImmutableRPC(err error) bool {
-	eErr := rpctypes.Error(err)
-	if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable {
-		// interrupted by non-transient server-side or gRPC-side error
-		// client cannot handle itself (e.g. rpctypes.ErrCompacted)
-		return false
-	}
-	// only retry if unavailable
-	ev, ok := status.FromError(err)
-	if !ok {
-		// all errors from RPC is typed "grpc/status.(*statusError)"
-		// (ref. https://github.com/grpc/grpc-go/pull/1782)
-		//
-		// if the error type is not "grpc/status.(*statusError)",
-		// it could be from "Dial"
-		// TODO: do not retry for now
-		// ref. https://github.com/grpc/grpc-go/issues/1581
-		return false
-	}
-	return ev.Code() == codes.Unavailable
-}
-
-// isSafeRetryMutableRPC returns "true" when a mutable request is safe for retry.
-//
-// mutable requests (e.g. Put, Delete, Txn) should only be retried
-// when the status code is codes.Unavailable when initial connection
-// has not been established (no endpoint is up).
-//
-// Returning "false" means retry should stop, otherwise it violates
-// write-at-most-once semantics.
-func isSafeRetryMutableRPC(err error) bool {
-	if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable {
-		// not safe for mutable RPCs
-		// e.g. interrupted by non-transient error that client cannot handle itself,
-		// or transient error while the connection has already been established
-		return false
-	}
-	desc := rpctypes.ErrorDesc(err)
-	return desc == "there is no address available" || desc == "there is no connection available"
-}
-
-type retryKVClient struct {
-	kc pb.KVClient
-}
-
-// RetryKVClient implements a KVClient.
-func RetryKVClient(c *Client) pb.KVClient {
-	return &retryKVClient{
-		kc: pb.NewKVClient(c.conn),
-	}
-}
-func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
-	return rkv.kc.Range(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
-	return rkv.kc.Put(ctx, in, opts...)
-}
-
-func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
-	return rkv.kc.DeleteRange(ctx, in, opts...)
-}
-
-func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
-	return rkv.kc.Txn(ctx, in, opts...)
-}
-
-func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
-	return rkv.kc.Compact(ctx, in, opts...)
-}
-
-type retryLeaseClient struct {
-	lc pb.LeaseClient
-}
-
-// RetryLeaseClient implements a LeaseClient.
-func RetryLeaseClient(c *Client) pb.LeaseClient {
-	return &retryLeaseClient{
-		lc: pb.NewLeaseClient(c.conn),
-	}
-}
-
-func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
-	return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) {
-	return rlc.lc.LeaseLeases(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
-	return rlc.lc.LeaseGrant(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
-	return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
-	return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRetryPolicy(repeatable))...)
-}
-
-type retryClusterClient struct {
-	cc pb.ClusterClient
-}
-
-// RetryClusterClient implements a ClusterClient.
-func RetryClusterClient(c *Client) pb.ClusterClient {
-	return &retryClusterClient{
-		cc: pb.NewClusterClient(c.conn),
-	}
-}
-
-func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
-	return rcc.cc.MemberList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
-	return rcc.cc.MemberAdd(ctx, in, opts...)
-}
-
-func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
-	return rcc.cc.MemberRemove(ctx, in, opts...)
-}
-
-func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
-	return rcc.cc.MemberUpdate(ctx, in, opts...)
-}
-
-type retryMaintenanceClient struct {
-	mc pb.MaintenanceClient
-}
-
-// RetryMaintenanceClient implements a Maintenance.
-func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient {
-	return &retryMaintenanceClient{
-		mc: pb.NewMaintenanceClient(conn),
-	}
-}
-
-func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
-	return rmc.mc.Alarm(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
-	return rmc.mc.Status(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
-	return rmc.mc.Hash(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) {
-	return rmc.mc.HashKV(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
-	return rmc.mc.Snapshot(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) {
-	return rmc.mc.MoveLeader(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
-	return rmc.mc.Defragment(ctx, in, opts...)
-}
-
-type retryAuthClient struct {
-	ac pb.AuthClient
-}
-
-// RetryAuthClient implements a AuthClient.
-func RetryAuthClient(c *Client) pb.AuthClient {
-	return &retryAuthClient{
-		ac: pb.NewAuthClient(c.conn),
-	}
-}
-
-func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
-	return rac.ac.UserList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
-	return rac.ac.UserGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
-	return rac.ac.RoleGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
-	return rac.ac.RoleList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
-	return rac.ac.AuthEnable(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
-	return rac.ac.AuthDisable(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
-	return rac.ac.UserAdd(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
-	return rac.ac.UserDelete(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
-	return rac.ac.UserChangePassword(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
-	return rac.ac.UserGrantRole(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
-	return rac.ac.UserRevokeRole(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
-	return rac.ac.RoleAdd(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
-	return rac.ac.RoleDelete(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
-	return rac.ac.RoleGrantPermission(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
-	return rac.ac.RoleRevokePermission(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
-	return rac.ac.Authenticate(ctx, in, opts...)
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go b/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go
deleted file mode 100644
index f3c5057..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go
+++ /dev/null
@@ -1,392 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Based on github.com/grpc-ecosystem/go-grpc-middleware/retry, but modified to support the more
-// fine grained error checking required by write-at-most-once retry semantics of etcd.
-
-package clientv3
-
-import (
-	"context"
-	"io"
-	"sync"
-	"time"
-
-	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
-	"go.uber.org/zap"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/metadata"
-	"google.golang.org/grpc/status"
-)
-
-// unaryClientInterceptor returns a new retrying unary client interceptor.
-//
-// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
-// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
-func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.UnaryClientInterceptor {
-	intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
-	return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
-		ctx = withVersion(ctx)
-		grpcOpts, retryOpts := filterCallOptions(opts)
-		callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
-		// short circuit for simplicity, and avoiding allocations.
-		if callOpts.max == 0 {
-			return invoker(ctx, method, req, reply, cc, grpcOpts...)
-		}
-		var lastErr error
-		for attempt := uint(0); attempt < callOpts.max; attempt++ {
-			if err := waitRetryBackoff(ctx, attempt, callOpts); err != nil {
-				return err
-			}
-			logger.Debug(
-				"retrying of unary invoker",
-				zap.String("target", cc.Target()),
-				zap.Uint("attempt", attempt),
-			)
-			lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...)
-			if lastErr == nil {
-				return nil
-			}
-			logger.Warn(
-				"retrying of unary invoker failed",
-				zap.String("target", cc.Target()),
-				zap.Uint("attempt", attempt),
-				zap.Error(lastErr),
-			)
-			if isContextError(lastErr) {
-				if ctx.Err() != nil {
-					// its the context deadline or cancellation.
-					return lastErr
-				}
-				// its the callCtx deadline or cancellation, in which case try again.
-				continue
-			}
-			if callOpts.retryAuth && rpctypes.Error(lastErr) == rpctypes.ErrInvalidAuthToken {
-				gterr := c.getToken(ctx)
-				if gterr != nil {
-					logger.Warn(
-						"retrying of unary invoker failed to fetch new auth token",
-						zap.String("target", cc.Target()),
-						zap.Error(gterr),
-					)
-					return gterr // lastErr must be invalid auth token
-				}
-				continue
-			}
-			if !isSafeRetry(c.lg, lastErr, callOpts) {
-				return lastErr
-			}
-		}
-		return lastErr
-	}
-}
-
-// streamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls.
-//
-// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
-// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
-//
-// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs
-// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams,
-// BidiStreams), the retry interceptor will fail the call.
-func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.StreamClientInterceptor {
-	intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
-	return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
-		ctx = withVersion(ctx)
-		grpcOpts, retryOpts := filterCallOptions(opts)
-		callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
-		// short circuit for simplicity, and avoiding allocations.
-		if callOpts.max == 0 {
-			return streamer(ctx, desc, cc, method, grpcOpts...)
-		}
-		if desc.ClientStreams {
-			return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
-		}
-		newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...)
-		if err != nil {
-			logger.Error("streamer failed to create ClientStream", zap.Error(err))
-			return nil, err // TODO(mwitkow): Maybe dial and transport errors should be retriable?
-		}
-		retryingStreamer := &serverStreamingRetryingStream{
-			client:       c,
-			ClientStream: newStreamer,
-			callOpts:     callOpts,
-			ctx:          ctx,
-			streamerCall: func(ctx context.Context) (grpc.ClientStream, error) {
-				return streamer(ctx, desc, cc, method, grpcOpts...)
-			},
-		}
-		return retryingStreamer, nil
-	}
-}
-
-// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a
-// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish
-// a new ClientStream according to the retry policy.
-type serverStreamingRetryingStream struct {
-	grpc.ClientStream
-	client        *Client
-	bufferedSends []interface{} // single message that the client can sen
-	receivedGood  bool          // indicates whether any prior receives were successful
-	wasClosedSend bool          // indicates that CloseSend was closed
-	ctx           context.Context
-	callOpts      *options
-	streamerCall  func(ctx context.Context) (grpc.ClientStream, error)
-	mu            sync.RWMutex
-}
-
-func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) {
-	s.mu.Lock()
-	s.ClientStream = clientStream
-	s.mu.Unlock()
-}
-
-func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream {
-	s.mu.RLock()
-	defer s.mu.RUnlock()
-	return s.ClientStream
-}
-
-func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error {
-	s.mu.Lock()
-	s.bufferedSends = append(s.bufferedSends, m)
-	s.mu.Unlock()
-	return s.getStream().SendMsg(m)
-}
-
-func (s *serverStreamingRetryingStream) CloseSend() error {
-	s.mu.Lock()
-	s.wasClosedSend = true
-	s.mu.Unlock()
-	return s.getStream().CloseSend()
-}
-
-func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) {
-	return s.getStream().Header()
-}
-
-func (s *serverStreamingRetryingStream) Trailer() metadata.MD {
-	return s.getStream().Trailer()
-}
-
-func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
-	attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m)
-	if !attemptRetry {
-		return lastErr // success or hard failure
-	}
-
-	// We start off from attempt 1, because zeroth was already made on normal SendMsg().
-	for attempt := uint(1); attempt < s.callOpts.max; attempt++ {
-		if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil {
-			return err
-		}
-		newStream, err := s.reestablishStreamAndResendBuffer(s.ctx)
-		if err != nil {
-			s.client.lg.Error("failed reestablishStreamAndResendBuffer", zap.Error(err))
-			return err // TODO(mwitkow): Maybe dial and transport errors should be retriable?
-		}
-		s.setStream(newStream)
-
-		s.client.lg.Warn("retrying RecvMsg", zap.Error(lastErr))
-		attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m)
-		if !attemptRetry {
-			return lastErr
-		}
-	}
-	return lastErr
-}
-
-func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) {
-	s.mu.RLock()
-	wasGood := s.receivedGood
-	s.mu.RUnlock()
-	err := s.getStream().RecvMsg(m)
-	if err == nil || err == io.EOF {
-		s.mu.Lock()
-		s.receivedGood = true
-		s.mu.Unlock()
-		return false, err
-	} else if wasGood {
-		// previous RecvMsg in the stream succeeded, no retry logic should interfere
-		return false, err
-	}
-	if isContextError(err) {
-		if s.ctx.Err() != nil {
-			return false, err
-		}
-		// its the callCtx deadline or cancellation, in which case try again.
-		return true, err
-	}
-	if s.callOpts.retryAuth && rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
-		gterr := s.client.getToken(s.ctx)
-		if gterr != nil {
-			s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr))
-			return false, err // return the original error for simplicity
-		}
-		return true, err
-
-	}
-	return isSafeRetry(s.client.lg, err, s.callOpts), err
-}
-
-func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) {
-	s.mu.RLock()
-	bufferedSends := s.bufferedSends
-	s.mu.RUnlock()
-	newStream, err := s.streamerCall(callCtx)
-	if err != nil {
-		return nil, err
-	}
-	for _, msg := range bufferedSends {
-		if err := newStream.SendMsg(msg); err != nil {
-			return nil, err
-		}
-	}
-	if err := newStream.CloseSend(); err != nil {
-		return nil, err
-	}
-	return newStream, nil
-}
-
-func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) error {
-	waitTime := time.Duration(0)
-	if attempt > 0 {
-		waitTime = callOpts.backoffFunc(attempt)
-	}
-	if waitTime > 0 {
-		timer := time.NewTimer(waitTime)
-		select {
-		case <-ctx.Done():
-			timer.Stop()
-			return contextErrToGrpcErr(ctx.Err())
-		case <-timer.C:
-		}
-	}
-	return nil
-}
-
-// isSafeRetry returns "true", if request is safe for retry with the given error.
-func isSafeRetry(lg *zap.Logger, err error, callOpts *options) bool {
-	if isContextError(err) {
-		return false
-	}
-	switch callOpts.retryPolicy {
-	case repeatable:
-		return isSafeRetryImmutableRPC(err)
-	case nonRepeatable:
-		return isSafeRetryMutableRPC(err)
-	default:
-		lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String()))
-		return false
-	}
-}
-
-func isContextError(err error) bool {
-	return grpc.Code(err) == codes.DeadlineExceeded || grpc.Code(err) == codes.Canceled
-}
-
-func contextErrToGrpcErr(err error) error {
-	switch err {
-	case context.DeadlineExceeded:
-		return status.Errorf(codes.DeadlineExceeded, err.Error())
-	case context.Canceled:
-		return status.Errorf(codes.Canceled, err.Error())
-	default:
-		return status.Errorf(codes.Unknown, err.Error())
-	}
-}
-
-var (
-	defaultOptions = &options{
-		retryPolicy: nonRepeatable,
-		max:         0, // disable
-		backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10),
-		retryAuth:   true,
-	}
-)
-
-// backoffFunc denotes a family of functions that control the backoff duration between call retries.
-//
-// They are called with an identifier of the attempt, and should return a time the system client should
-// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request
-// the deadline of the request takes precedence and the wait will be interrupted before proceeding
-// with the next iteration.
-type backoffFunc func(attempt uint) time.Duration
-
-// withRetryPolicy sets the retry policy of this call.
-func withRetryPolicy(rp retryPolicy) retryOption {
-	return retryOption{applyFunc: func(o *options) {
-		o.retryPolicy = rp
-	}}
-}
-
-// withMax sets the maximum number of retries on this call, or this interceptor.
-func withMax(maxRetries uint) retryOption {
-	return retryOption{applyFunc: func(o *options) {
-		o.max = maxRetries
-	}}
-}
-
-// WithBackoff sets the `BackoffFunc `used to control time between retries.
-func withBackoff(bf backoffFunc) retryOption {
-	return retryOption{applyFunc: func(o *options) {
-		o.backoffFunc = bf
-	}}
-}
-
-type options struct {
-	retryPolicy retryPolicy
-	max         uint
-	backoffFunc backoffFunc
-	retryAuth   bool
-}
-
-// retryOption is a grpc.CallOption that is local to clientv3's retry interceptor.
-type retryOption struct {
-	grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic.
-	applyFunc            func(opt *options)
-}
-
-func reuseOrNewWithCallOptions(opt *options, retryOptions []retryOption) *options {
-	if len(retryOptions) == 0 {
-		return opt
-	}
-	optCopy := &options{}
-	*optCopy = *opt
-	for _, f := range retryOptions {
-		f.applyFunc(optCopy)
-	}
-	return optCopy
-}
-
-func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []retryOption) {
-	for _, opt := range callOptions {
-		if co, ok := opt.(retryOption); ok {
-			retryOptions = append(retryOptions, co)
-		} else {
-			grpcOptions = append(grpcOptions, opt)
-		}
-	}
-	return grpcOptions, retryOptions
-}
-
-// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment).
-//
-// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms.
-func backoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) backoffFunc {
-	return func(attempt uint) time.Duration {
-		return jitterUp(waitBetween, jitterFraction)
-	}
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/sort.go b/vendor/go.etcd.io/etcd/clientv3/sort.go
deleted file mode 100644
index 2bb9d9a..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/sort.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-type SortTarget int
-type SortOrder int
-
-const (
-	SortNone SortOrder = iota
-	SortAscend
-	SortDescend
-)
-
-const (
-	SortByKey SortTarget = iota
-	SortByVersion
-	SortByCreateRevision
-	SortByModRevision
-	SortByValue
-)
-
-type SortOption struct {
-	Target SortTarget
-	Order  SortOrder
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/txn.go b/vendor/go.etcd.io/etcd/clientv3/txn.go
deleted file mode 100644
index c3c2d24..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/txn.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
-	"context"
-	"sync"
-
-	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
-	"google.golang.org/grpc"
-)
-
-// Txn is the interface that wraps mini-transactions.
-//
-//	 Txn(context.TODO()).If(
-//	  Compare(Value(k1), ">", v1),
-//	  Compare(Version(k1), "=", 2)
-//	 ).Then(
-//	  OpPut(k2,v2), OpPut(k3,v3)
-//	 ).Else(
-//	  OpPut(k4,v4), OpPut(k5,v5)
-//	 ).Commit()
-//
-type Txn interface {
-	// If takes a list of comparison. If all comparisons passed in succeed,
-	// the operations passed into Then() will be executed. Or the operations
-	// passed into Else() will be executed.
-	If(cs ...Cmp) Txn
-
-	// Then takes a list of operations. The Ops list will be executed, if the
-	// comparisons passed in If() succeed.
-	Then(ops ...Op) Txn
-
-	// Else takes a list of operations. The Ops list will be executed, if the
-	// comparisons passed in If() fail.
-	Else(ops ...Op) Txn
-
-	// Commit tries to commit the transaction.
-	Commit() (*TxnResponse, error)
-}
-
-type txn struct {
-	kv  *kv
-	ctx context.Context
-
-	mu    sync.Mutex
-	cif   bool
-	cthen bool
-	celse bool
-
-	isWrite bool
-
-	cmps []*pb.Compare
-
-	sus []*pb.RequestOp
-	fas []*pb.RequestOp
-
-	callOpts []grpc.CallOption
-}
-
-func (txn *txn) If(cs ...Cmp) Txn {
-	txn.mu.Lock()
-	defer txn.mu.Unlock()
-
-	if txn.cif {
-		panic("cannot call If twice!")
-	}
-
-	if txn.cthen {
-		panic("cannot call If after Then!")
-	}
-
-	if txn.celse {
-		panic("cannot call If after Else!")
-	}
-
-	txn.cif = true
-
-	for i := range cs {
-		txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i]))
-	}
-
-	return txn
-}
-
-func (txn *txn) Then(ops ...Op) Txn {
-	txn.mu.Lock()
-	defer txn.mu.Unlock()
-
-	if txn.cthen {
-		panic("cannot call Then twice!")
-	}
-	if txn.celse {
-		panic("cannot call Then after Else!")
-	}
-
-	txn.cthen = true
-
-	for _, op := range ops {
-		txn.isWrite = txn.isWrite || op.isWrite()
-		txn.sus = append(txn.sus, op.toRequestOp())
-	}
-
-	return txn
-}
-
-func (txn *txn) Else(ops ...Op) Txn {
-	txn.mu.Lock()
-	defer txn.mu.Unlock()
-
-	if txn.celse {
-		panic("cannot call Else twice!")
-	}
-
-	txn.celse = true
-
-	for _, op := range ops {
-		txn.isWrite = txn.isWrite || op.isWrite()
-		txn.fas = append(txn.fas, op.toRequestOp())
-	}
-
-	return txn
-}
-
-func (txn *txn) Commit() (*TxnResponse, error) {
-	txn.mu.Lock()
-	defer txn.mu.Unlock()
-
-	r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
-
-	var resp *pb.TxnResponse
-	var err error
-	resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...)
-	if err != nil {
-		return nil, toErr(txn.ctx, err)
-	}
-	return (*TxnResponse)(resp), nil
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/utils.go b/vendor/go.etcd.io/etcd/clientv3/utils.go
deleted file mode 100644
index b998c41..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/utils.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
-	"math/rand"
-	"reflect"
-	"runtime"
-	"strings"
-	"time"
-)
-
-// jitterUp adds random jitter to the duration.
-//
-// This adds or subtracts time from the duration within a given jitter fraction.
-// For example for 10s and jitter 0.1, it will return a time within [9s, 11s])
-//
-// Reference: https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils
-func jitterUp(duration time.Duration, jitter float64) time.Duration {
-	multiplier := jitter * (rand.Float64()*2 - 1)
-	return time.Duration(float64(duration) * (1 + multiplier))
-}
-
-// Check if the provided function is being called in the op options.
-func isOpFuncCalled(op string, opts []OpOption) bool {
-	for _, opt := range opts {
-		v := reflect.ValueOf(opt)
-		if v.Kind() == reflect.Func {
-			if opFunc := runtime.FuncForPC(v.Pointer()); opFunc != nil {
-				if strings.Contains(opFunc.Name(), op) {
-					return true
-				}
-			}
-		}
-	}
-	return false
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/watch.go b/vendor/go.etcd.io/etcd/clientv3/watch.go
deleted file mode 100644
index 4a3b8cc..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/watch.go
+++ /dev/null
@@ -1,987 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"sync"
-	"time"
-
-	v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
-	pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-	mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
-
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/metadata"
-	"google.golang.org/grpc/status"
-)
-
-const (
-	EventTypeDelete = mvccpb.DELETE
-	EventTypePut    = mvccpb.PUT
-
-	closeSendErrTimeout = 250 * time.Millisecond
-)
-
-type Event mvccpb.Event
-
-type WatchChan <-chan WatchResponse
-
-type Watcher interface {
-	// Watch watches on a key or prefix. The watched events will be returned
-	// through the returned channel. If revisions waiting to be sent over the
-	// watch are compacted, then the watch will be canceled by the server, the
-	// client will post a compacted error watch response, and the channel will close.
-	// If the context "ctx" is canceled or timed out, returned "WatchChan" is closed,
-	// and "WatchResponse" from this closed channel has zero events and nil "Err()".
-	// The context "ctx" MUST be canceled, as soon as watcher is no longer being used,
-	// to release the associated resources.
-	//
-	// If the context is "context.Background/TODO", returned "WatchChan" will
-	// not be closed and block until event is triggered, except when server
-	// returns a non-recoverable error (e.g. ErrCompacted).
-	// For example, when context passed with "WithRequireLeader" and the
-	// connected server has no leader (e.g. due to network partition),
-	// error "etcdserver: no leader" (ErrNoLeader) will be returned,
-	// and then "WatchChan" is closed with non-nil "Err()".
-	// In order to prevent a watch stream being stuck in a partitioned node,
-	// make sure to wrap context with "WithRequireLeader".
-	//
-	// Otherwise, as long as the context has not been canceled or timed out,
-	// watch will retry on other recoverable errors forever until reconnected.
-	//
-	// TODO: explicitly set context error in the last "WatchResponse" message and close channel?
-	// Currently, client contexts are overwritten with "valCtx" that never closes.
-	// TODO(v3.4): configure watch retry policy, limit maximum retry number
-	// (see https://github.com/etcd-io/etcd/issues/8980)
-	Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
-
-	// RequestProgress requests a progress notify response be sent in all watch channels.
-	RequestProgress(ctx context.Context) error
-
-	// Close closes the watcher and cancels all watch requests.
-	Close() error
-}
-
-type WatchResponse struct {
-	Header pb.ResponseHeader
-	Events []*Event
-
-	// CompactRevision is the minimum revision the watcher may receive.
-	CompactRevision int64
-
-	// Canceled is used to indicate watch failure.
-	// If the watch failed and the stream was about to close, before the channel is closed,
-	// the channel sends a final response that has Canceled set to true with a non-nil Err().
-	Canceled bool
-
-	// Created is used to indicate the creation of the watcher.
-	Created bool
-
-	closeErr error
-
-	// cancelReason is a reason of canceling watch
-	cancelReason string
-}
-
-// IsCreate returns true if the event tells that the key is newly created.
-func (e *Event) IsCreate() bool {
-	return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision
-}
-
-// IsModify returns true if the event tells that a new value is put on existing key.
-func (e *Event) IsModify() bool {
-	return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision
-}
-
-// Err is the error value if this WatchResponse holds an error.
-func (wr *WatchResponse) Err() error {
-	switch {
-	case wr.closeErr != nil:
-		return v3rpc.Error(wr.closeErr)
-	case wr.CompactRevision != 0:
-		return v3rpc.ErrCompacted
-	case wr.Canceled:
-		if len(wr.cancelReason) != 0 {
-			return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason))
-		}
-		return v3rpc.ErrFutureRev
-	}
-	return nil
-}
-
-// IsProgressNotify returns true if the WatchResponse is progress notification.
-func (wr *WatchResponse) IsProgressNotify() bool {
-	return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0
-}
-
-// watcher implements the Watcher interface
-type watcher struct {
-	remote   pb.WatchClient
-	callOpts []grpc.CallOption
-
-	// mu protects the grpc streams map
-	mu sync.RWMutex
-
-	// streams holds all the active grpc streams keyed by ctx value.
-	streams map[string]*watchGrpcStream
-}
-
-// watchGrpcStream tracks all watch resources attached to a single grpc stream.
-type watchGrpcStream struct {
-	owner    *watcher
-	remote   pb.WatchClient
-	callOpts []grpc.CallOption
-
-	// ctx controls internal remote.Watch requests
-	ctx context.Context
-	// ctxKey is the key used when looking up this stream's context
-	ctxKey string
-	cancel context.CancelFunc
-
-	// substreams holds all active watchers on this grpc stream
-	substreams map[int64]*watcherStream
-	// resuming holds all resuming watchers on this grpc stream
-	resuming []*watcherStream
-
-	// reqc sends a watch request from Watch() to the main goroutine
-	reqc chan watchStreamRequest
-	// respc receives data from the watch client
-	respc chan *pb.WatchResponse
-	// donec closes to broadcast shutdown
-	donec chan struct{}
-	// errc transmits errors from grpc Recv to the watch stream reconnect logic
-	errc chan error
-	// closingc gets the watcherStream of closing watchers
-	closingc chan *watcherStream
-	// wg is Done when all substream goroutines have exited
-	wg sync.WaitGroup
-
-	// resumec closes to signal that all substreams should begin resuming
-	resumec chan struct{}
-	// closeErr is the error that closed the watch stream
-	closeErr error
-}
-
-// watchStreamRequest is a union of the supported watch request operation types
-type watchStreamRequest interface {
-	toPB() *pb.WatchRequest
-}
-
-// watchRequest is issued by the subscriber to start a new watcher
-type watchRequest struct {
-	ctx context.Context
-	key string
-	end string
-	rev int64
-
-	// send created notification event if this field is true
-	createdNotify bool
-	// progressNotify is for progress updates
-	progressNotify bool
-	// fragmentation should be disabled by default
-	// if true, split watch events when total exceeds
-	// "--max-request-bytes" flag value + 512-byte
-	fragment bool
-
-	// filters is the list of events to filter out
-	filters []pb.WatchCreateRequest_FilterType
-	// get the previous key-value pair before the event happens
-	prevKV bool
-	// retc receives a chan WatchResponse once the watcher is established
-	retc chan chan WatchResponse
-}
-
-// progressRequest is issued by the subscriber to request watch progress
-type progressRequest struct {
-}
-
-// watcherStream represents a registered watcher
-type watcherStream struct {
-	// initReq is the request that initiated this request
-	initReq watchRequest
-
-	// outc publishes watch responses to subscriber
-	outc chan WatchResponse
-	// recvc buffers watch responses before publishing
-	recvc chan *WatchResponse
-	// donec closes when the watcherStream goroutine stops.
-	donec chan struct{}
-	// closing is set to true when stream should be scheduled to shutdown.
-	closing bool
-	// id is the registered watch id on the grpc stream
-	id int64
-
-	// buf holds all events received from etcd but not yet consumed by the client
-	buf []*WatchResponse
-}
-
-func NewWatcher(c *Client) Watcher {
-	return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c)
-}
-
-func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
-	w := &watcher{
-		remote:  wc,
-		streams: make(map[string]*watchGrpcStream),
-	}
-	if c != nil {
-		w.callOpts = c.callOpts
-	}
-	return w
-}
-
-// never closes
-var valCtxCh = make(chan struct{})
-var zeroTime = time.Unix(0, 0)
-
-// ctx with only the values; never Done
-type valCtx struct{ context.Context }
-
-func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false }
-func (vc *valCtx) Done() <-chan struct{}       { return valCtxCh }
-func (vc *valCtx) Err() error                  { return nil }
-
-func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
-	ctx, cancel := context.WithCancel(&valCtx{inctx})
-	wgs := &watchGrpcStream{
-		owner:      w,
-		remote:     w.remote,
-		callOpts:   w.callOpts,
-		ctx:        ctx,
-		ctxKey:     streamKeyFromCtx(inctx),
-		cancel:     cancel,
-		substreams: make(map[int64]*watcherStream),
-		respc:      make(chan *pb.WatchResponse),
-		reqc:       make(chan watchStreamRequest),
-		donec:      make(chan struct{}),
-		errc:       make(chan error, 1),
-		closingc:   make(chan *watcherStream),
-		resumec:    make(chan struct{}),
-	}
-	go wgs.run()
-	return wgs
-}
-
-// Watch posts a watch request to run() and waits for a new watcher channel
-func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
-	ow := opWatch(key, opts...)
-
-	var filters []pb.WatchCreateRequest_FilterType
-	if ow.filterPut {
-		filters = append(filters, pb.WatchCreateRequest_NOPUT)
-	}
-	if ow.filterDelete {
-		filters = append(filters, pb.WatchCreateRequest_NODELETE)
-	}
-
-	wr := &watchRequest{
-		ctx:            ctx,
-		createdNotify:  ow.createdNotify,
-		key:            string(ow.key),
-		end:            string(ow.end),
-		rev:            ow.rev,
-		progressNotify: ow.progressNotify,
-		fragment:       ow.fragment,
-		filters:        filters,
-		prevKV:         ow.prevKV,
-		retc:           make(chan chan WatchResponse, 1),
-	}
-
-	ok := false
-	ctxKey := streamKeyFromCtx(ctx)
-
-	// find or allocate appropriate grpc watch stream
-	w.mu.Lock()
-	if w.streams == nil {
-		// closed
-		w.mu.Unlock()
-		ch := make(chan WatchResponse)
-		close(ch)
-		return ch
-	}
-	wgs := w.streams[ctxKey]
-	if wgs == nil {
-		wgs = w.newWatcherGrpcStream(ctx)
-		w.streams[ctxKey] = wgs
-	}
-	donec := wgs.donec
-	reqc := wgs.reqc
-	w.mu.Unlock()
-
-	// couldn't create channel; return closed channel
-	closeCh := make(chan WatchResponse, 1)
-
-	// submit request
-	select {
-	case reqc <- wr:
-		ok = true
-	case <-wr.ctx.Done():
-	case <-donec:
-		if wgs.closeErr != nil {
-			closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
-			break
-		}
-		// retry; may have dropped stream from no ctxs
-		return w.Watch(ctx, key, opts...)
-	}
-
-	// receive channel
-	if ok {
-		select {
-		case ret := <-wr.retc:
-			return ret
-		case <-ctx.Done():
-		case <-donec:
-			if wgs.closeErr != nil {
-				closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
-				break
-			}
-			// retry; may have dropped stream from no ctxs
-			return w.Watch(ctx, key, opts...)
-		}
-	}
-
-	close(closeCh)
-	return closeCh
-}
-
-func (w *watcher) Close() (err error) {
-	w.mu.Lock()
-	streams := w.streams
-	w.streams = nil
-	w.mu.Unlock()
-	for _, wgs := range streams {
-		if werr := wgs.close(); werr != nil {
-			err = werr
-		}
-	}
-	// Consider context.Canceled as a successful close
-	if err == context.Canceled {
-		err = nil
-	}
-	return err
-}
-
-// RequestProgress requests a progress notify response be sent in all watch channels.
-func (w *watcher) RequestProgress(ctx context.Context) (err error) {
-	ctxKey := streamKeyFromCtx(ctx)
-
-	w.mu.Lock()
-	if w.streams == nil {
-		w.mu.Unlock()
-		return fmt.Errorf("no stream found for context")
-	}
-	wgs := w.streams[ctxKey]
-	if wgs == nil {
-		wgs = w.newWatcherGrpcStream(ctx)
-		w.streams[ctxKey] = wgs
-	}
-	donec := wgs.donec
-	reqc := wgs.reqc
-	w.mu.Unlock()
-
-	pr := &progressRequest{}
-
-	select {
-	case reqc <- pr:
-		return nil
-	case <-ctx.Done():
-		if err == nil {
-			return ctx.Err()
-		}
-		return err
-	case <-donec:
-		if wgs.closeErr != nil {
-			return wgs.closeErr
-		}
-		// retry; may have dropped stream from no ctxs
-		return w.RequestProgress(ctx)
-	}
-}
-
-func (w *watchGrpcStream) close() (err error) {
-	w.cancel()
-	<-w.donec
-	select {
-	case err = <-w.errc:
-	default:
-	}
-	return toErr(w.ctx, err)
-}
-
-func (w *watcher) closeStream(wgs *watchGrpcStream) {
-	w.mu.Lock()
-	close(wgs.donec)
-	wgs.cancel()
-	if w.streams != nil {
-		delete(w.streams, wgs.ctxKey)
-	}
-	w.mu.Unlock()
-}
-
-func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
-	// check watch ID for backward compatibility (<= v3.3)
-	if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") {
-		w.closeErr = v3rpc.Error(errors.New(resp.CancelReason))
-		// failed; no channel
-		close(ws.recvc)
-		return
-	}
-	ws.id = resp.WatchId
-	w.substreams[ws.id] = ws
-}
-
-func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) {
-	select {
-	case ws.outc <- *resp:
-	case <-ws.initReq.ctx.Done():
-	case <-time.After(closeSendErrTimeout):
-	}
-	close(ws.outc)
-}
-
-func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
-	// send channel response in case stream was never established
-	select {
-	case ws.initReq.retc <- ws.outc:
-	default:
-	}
-	// close subscriber's channel
-	if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil {
-		go w.sendCloseSubstream(ws, &WatchResponse{Canceled: true, closeErr: w.closeErr})
-	} else if ws.outc != nil {
-		close(ws.outc)
-	}
-	if ws.id != -1 {
-		delete(w.substreams, ws.id)
-		return
-	}
-	for i := range w.resuming {
-		if w.resuming[i] == ws {
-			w.resuming[i] = nil
-			return
-		}
-	}
-}
-
-// run is the root of the goroutines for managing a watcher client
-func (w *watchGrpcStream) run() {
-	var wc pb.Watch_WatchClient
-	var closeErr error
-
-	// substreams marked to close but goroutine still running; needed for
-	// avoiding double-closing recvc on grpc stream teardown
-	closing := make(map[*watcherStream]struct{})
-
-	defer func() {
-		w.closeErr = closeErr
-		// shutdown substreams and resuming substreams
-		for _, ws := range w.substreams {
-			if _, ok := closing[ws]; !ok {
-				close(ws.recvc)
-				closing[ws] = struct{}{}
-			}
-		}
-		for _, ws := range w.resuming {
-			if _, ok := closing[ws]; ws != nil && !ok {
-				close(ws.recvc)
-				closing[ws] = struct{}{}
-			}
-		}
-		w.joinSubstreams()
-		for range closing {
-			w.closeSubstream(<-w.closingc)
-		}
-		w.wg.Wait()
-		w.owner.closeStream(w)
-	}()
-
-	// start a stream with the etcd grpc server
-	if wc, closeErr = w.newWatchClient(); closeErr != nil {
-		return
-	}
-
-	cancelSet := make(map[int64]struct{})
-
-	var cur *pb.WatchResponse
-	for {
-		select {
-		// Watch() requested
-		case req := <-w.reqc:
-			switch wreq := req.(type) {
-			case *watchRequest:
-				outc := make(chan WatchResponse, 1)
-				// TODO: pass custom watch ID?
-				ws := &watcherStream{
-					initReq: *wreq,
-					id:      -1,
-					outc:    outc,
-					// unbuffered so resumes won't cause repeat events
-					recvc: make(chan *WatchResponse),
-				}
-
-				ws.donec = make(chan struct{})
-				w.wg.Add(1)
-				go w.serveSubstream(ws, w.resumec)
-
-				// queue up for watcher creation/resume
-				w.resuming = append(w.resuming, ws)
-				if len(w.resuming) == 1 {
-					// head of resume queue, can register a new watcher
-					wc.Send(ws.initReq.toPB())
-				}
-			case *progressRequest:
-				wc.Send(wreq.toPB())
-			}
-
-		// new events from the watch client
-		case pbresp := <-w.respc:
-			if cur == nil || pbresp.Created || pbresp.Canceled {
-				cur = pbresp
-			} else if cur != nil && cur.WatchId == pbresp.WatchId {
-				// merge new events
-				cur.Events = append(cur.Events, pbresp.Events...)
-				// update "Fragment" field; last response with "Fragment" == false
-				cur.Fragment = pbresp.Fragment
-			}
-
-			switch {
-			case pbresp.Created:
-				// response to head of queue creation
-				if ws := w.resuming[0]; ws != nil {
-					w.addSubstream(pbresp, ws)
-					w.dispatchEvent(pbresp)
-					w.resuming[0] = nil
-				}
-
-				if ws := w.nextResume(); ws != nil {
-					wc.Send(ws.initReq.toPB())
-				}
-
-				// reset for next iteration
-				cur = nil
-
-			case pbresp.Canceled && pbresp.CompactRevision == 0:
-				delete(cancelSet, pbresp.WatchId)
-				if ws, ok := w.substreams[pbresp.WatchId]; ok {
-					// signal to stream goroutine to update closingc
-					close(ws.recvc)
-					closing[ws] = struct{}{}
-				}
-
-				// reset for next iteration
-				cur = nil
-
-			case cur.Fragment:
-				// watch response events are still fragmented
-				// continue to fetch next fragmented event arrival
-				continue
-
-			default:
-				// dispatch to appropriate watch stream
-				ok := w.dispatchEvent(cur)
-
-				// reset for next iteration
-				cur = nil
-
-				if ok {
-					break
-				}
-
-				// watch response on unexpected watch id; cancel id
-				if _, ok := cancelSet[pbresp.WatchId]; ok {
-					break
-				}
-
-				cancelSet[pbresp.WatchId] = struct{}{}
-				cr := &pb.WatchRequest_CancelRequest{
-					CancelRequest: &pb.WatchCancelRequest{
-						WatchId: pbresp.WatchId,
-					},
-				}
-				req := &pb.WatchRequest{RequestUnion: cr}
-				wc.Send(req)
-			}
-
-		// watch client failed on Recv; spawn another if possible
-		case err := <-w.errc:
-			if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
-				closeErr = err
-				return
-			}
-			if wc, closeErr = w.newWatchClient(); closeErr != nil {
-				return
-			}
-			if ws := w.nextResume(); ws != nil {
-				wc.Send(ws.initReq.toPB())
-			}
-			cancelSet = make(map[int64]struct{})
-
-		case <-w.ctx.Done():
-			return
-
-		case ws := <-w.closingc:
-			w.closeSubstream(ws)
-			delete(closing, ws)
-			// no more watchers on this stream, shutdown
-			if len(w.substreams)+len(w.resuming) == 0 {
-				return
-			}
-		}
-	}
-}
-
-// nextResume chooses the next resuming to register with the grpc stream. Abandoned
-// streams are marked as nil in the queue since the head must wait for its inflight registration.
-func (w *watchGrpcStream) nextResume() *watcherStream {
-	for len(w.resuming) != 0 {
-		if w.resuming[0] != nil {
-			return w.resuming[0]
-		}
-		w.resuming = w.resuming[1:len(w.resuming)]
-	}
-	return nil
-}
-
-// dispatchEvent sends a WatchResponse to the appropriate watcher stream
-func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
-	events := make([]*Event, len(pbresp.Events))
-	for i, ev := range pbresp.Events {
-		events[i] = (*Event)(ev)
-	}
-	// TODO: return watch ID?
-	wr := &WatchResponse{
-		Header:          *pbresp.Header,
-		Events:          events,
-		CompactRevision: pbresp.CompactRevision,
-		Created:         pbresp.Created,
-		Canceled:        pbresp.Canceled,
-		cancelReason:    pbresp.CancelReason,
-	}
-
-	// watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to
-	// indicate they should be broadcast.
-	if wr.IsProgressNotify() && pbresp.WatchId == -1 {
-		return w.broadcastResponse(wr)
-	}
-
-	return w.unicastResponse(wr, pbresp.WatchId)
-
-}
-
-// broadcastResponse send a watch response to all watch substreams.
-func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool {
-	for _, ws := range w.substreams {
-		select {
-		case ws.recvc <- wr:
-		case <-ws.donec:
-		}
-	}
-	return true
-}
-
-// unicastResponse sends a watch response to a specific watch substream.
-func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool {
-	ws, ok := w.substreams[watchId]
-	if !ok {
-		return false
-	}
-	select {
-	case ws.recvc <- wr:
-	case <-ws.donec:
-		return false
-	}
-	return true
-}
-
-// serveWatchClient forwards messages from the grpc stream to run()
-func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) {
-	for {
-		resp, err := wc.Recv()
-		if err != nil {
-			select {
-			case w.errc <- err:
-			case <-w.donec:
-			}
-			return
-		}
-		select {
-		case w.respc <- resp:
-		case <-w.donec:
-			return
-		}
-	}
-}
-
-// serveSubstream forwards watch responses from run() to the subscriber
-func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) {
-	if ws.closing {
-		panic("created substream goroutine but substream is closing")
-	}
-
-	// nextRev is the minimum expected next revision
-	nextRev := ws.initReq.rev
-	resuming := false
-	defer func() {
-		if !resuming {
-			ws.closing = true
-		}
-		close(ws.donec)
-		if !resuming {
-			w.closingc <- ws
-		}
-		w.wg.Done()
-	}()
-
-	emptyWr := &WatchResponse{}
-	for {
-		curWr := emptyWr
-		outc := ws.outc
-
-		if len(ws.buf) > 0 {
-			curWr = ws.buf[0]
-		} else {
-			outc = nil
-		}
-		select {
-		case outc <- *curWr:
-			if ws.buf[0].Err() != nil {
-				return
-			}
-			ws.buf[0] = nil
-			ws.buf = ws.buf[1:]
-		case wr, ok := <-ws.recvc:
-			if !ok {
-				// shutdown from closeSubstream
-				return
-			}
-
-			if wr.Created {
-				if ws.initReq.retc != nil {
-					ws.initReq.retc <- ws.outc
-					// to prevent next write from taking the slot in buffered channel
-					// and posting duplicate create events
-					ws.initReq.retc = nil
-
-					// send first creation event only if requested
-					if ws.initReq.createdNotify {
-						ws.outc <- *wr
-					}
-					// once the watch channel is returned, a current revision
-					// watch must resume at the store revision. This is necessary
-					// for the following case to work as expected:
-					//	wch := m1.Watch("a")
-					//	m2.Put("a", "b")
-					//	<-wch
-					// If the revision is only bound on the first observed event,
-					// if wch is disconnected before the Put is issued, then reconnects
-					// after it is committed, it'll miss the Put.
-					if ws.initReq.rev == 0 {
-						nextRev = wr.Header.Revision
-					}
-				}
-			} else {
-				// current progress of watch; <= store revision
-				nextRev = wr.Header.Revision
-			}
-
-			if len(wr.Events) > 0 {
-				nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
-			}
-			ws.initReq.rev = nextRev
-
-			// created event is already sent above,
-			// watcher should not post duplicate events
-			if wr.Created {
-				continue
-			}
-
-			// TODO pause channel if buffer gets too large
-			ws.buf = append(ws.buf, wr)
-		case <-w.ctx.Done():
-			return
-		case <-ws.initReq.ctx.Done():
-			return
-		case <-resumec:
-			resuming = true
-			return
-		}
-	}
-	// lazily send cancel message if events on missing id
-}
-
-func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
-	// mark all substreams as resuming
-	close(w.resumec)
-	w.resumec = make(chan struct{})
-	w.joinSubstreams()
-	for _, ws := range w.substreams {
-		ws.id = -1
-		w.resuming = append(w.resuming, ws)
-	}
-	// strip out nils, if any
-	var resuming []*watcherStream
-	for _, ws := range w.resuming {
-		if ws != nil {
-			resuming = append(resuming, ws)
-		}
-	}
-	w.resuming = resuming
-	w.substreams = make(map[int64]*watcherStream)
-
-	// connect to grpc stream while accepting watcher cancelation
-	stopc := make(chan struct{})
-	donec := w.waitCancelSubstreams(stopc)
-	wc, err := w.openWatchClient()
-	close(stopc)
-	<-donec
-
-	// serve all non-closing streams, even if there's a client error
-	// so that the teardown path can shutdown the streams as expected.
-	for _, ws := range w.resuming {
-		if ws.closing {
-			continue
-		}
-		ws.donec = make(chan struct{})
-		w.wg.Add(1)
-		go w.serveSubstream(ws, w.resumec)
-	}
-
-	if err != nil {
-		return nil, v3rpc.Error(err)
-	}
-
-	// receive data from new grpc stream
-	go w.serveWatchClient(wc)
-	return wc, nil
-}
-
-func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} {
-	var wg sync.WaitGroup
-	wg.Add(len(w.resuming))
-	donec := make(chan struct{})
-	for i := range w.resuming {
-		go func(ws *watcherStream) {
-			defer wg.Done()
-			if ws.closing {
-				if ws.initReq.ctx.Err() != nil && ws.outc != nil {
-					close(ws.outc)
-					ws.outc = nil
-				}
-				return
-			}
-			select {
-			case <-ws.initReq.ctx.Done():
-				// closed ws will be removed from resuming
-				ws.closing = true
-				close(ws.outc)
-				ws.outc = nil
-				w.wg.Add(1)
-				go func() {
-					defer w.wg.Done()
-					w.closingc <- ws
-				}()
-			case <-stopc:
-			}
-		}(w.resuming[i])
-	}
-	go func() {
-		defer close(donec)
-		wg.Wait()
-	}()
-	return donec
-}
-
-// joinSubstreams waits for all substream goroutines to complete.
-func (w *watchGrpcStream) joinSubstreams() {
-	for _, ws := range w.substreams {
-		<-ws.donec
-	}
-	for _, ws := range w.resuming {
-		if ws != nil {
-			<-ws.donec
-		}
-	}
-}
-
-var maxBackoff = 100 * time.Millisecond
-
-// openWatchClient retries opening a watch client until success or halt.
-// manually retry in case "ws==nil && err==nil"
-// TODO: remove FailFast=false
-func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
-	backoff := time.Millisecond
-	for {
-		select {
-		case <-w.ctx.Done():
-			if err == nil {
-				return nil, w.ctx.Err()
-			}
-			return nil, err
-		default:
-		}
-		if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil {
-			break
-		}
-		if isHaltErr(w.ctx, err) {
-			return nil, v3rpc.Error(err)
-		}
-		if isUnavailableErr(w.ctx, err) {
-			// retry, but backoff
-			if backoff < maxBackoff {
-				// 25% backoff factor
-				backoff = backoff + backoff/4
-				if backoff > maxBackoff {
-					backoff = maxBackoff
-				}
-			}
-			time.Sleep(backoff)
-		}
-	}
-	return ws, nil
-}
-
-// toPB converts an internal watch request structure to its protobuf WatchRequest structure.
-func (wr *watchRequest) toPB() *pb.WatchRequest {
-	req := &pb.WatchCreateRequest{
-		StartRevision:  wr.rev,
-		Key:            []byte(wr.key),
-		RangeEnd:       []byte(wr.end),
-		ProgressNotify: wr.progressNotify,
-		Filters:        wr.filters,
-		PrevKv:         wr.prevKV,
-		Fragment:       wr.fragment,
-	}
-	cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
-	return &pb.WatchRequest{RequestUnion: cr}
-}
-
-// toPB converts an internal progress request structure to its protobuf WatchRequest structure.
-func (pr *progressRequest) toPB() *pb.WatchRequest {
-	req := &pb.WatchProgressRequest{}
-	cr := &pb.WatchRequest_ProgressRequest{ProgressRequest: req}
-	return &pb.WatchRequest{RequestUnion: cr}
-}
-
-func streamKeyFromCtx(ctx context.Context) string {
-	if md, ok := metadata.FromOutgoingContext(ctx); ok {
-		return fmt.Sprintf("%+v", md)
-	}
-	return ""
-}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go
deleted file mode 100644
index bc1ad7b..0000000
--- a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpctypes
-
-import (
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/status"
-)
-
-// server-side error
-var (
-	ErrGRPCEmptyKey      = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err()
-	ErrGRPCKeyNotFound   = status.New(codes.InvalidArgument, "etcdserver: key not found").Err()
-	ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err()
-	ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err()
-	ErrGRPCTooManyOps    = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err()
-	ErrGRPCDuplicateKey  = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err()
-	ErrGRPCCompacted     = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err()
-	ErrGRPCFutureRev     = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err()
-	ErrGRPCNoSpace       = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err()
-
-	ErrGRPCLeaseNotFound    = status.New(codes.NotFound, "etcdserver: requested lease not found").Err()
-	ErrGRPCLeaseExist       = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err()
-	ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err()
-
-	ErrGRPCMemberExist            = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err()
-	ErrGRPCPeerURLExist           = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err()
-	ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err()
-	ErrGRPCMemberBadURLs          = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err()
-	ErrGRPCMemberNotFound         = status.New(codes.NotFound, "etcdserver: member not found").Err()
-
-	ErrGRPCRequestTooLarge        = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err()
-	ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err()
-
-	ErrGRPCRootUserNotExist     = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err()
-	ErrGRPCRootRoleNotExist     = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err()
-	ErrGRPCUserAlreadyExist     = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err()
-	ErrGRPCUserEmpty            = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err()
-	ErrGRPCUserNotFound         = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err()
-	ErrGRPCRoleAlreadyExist     = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err()
-	ErrGRPCRoleNotFound         = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err()
-	ErrGRPCAuthFailed           = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err()
-	ErrGRPCPermissionDenied     = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err()
-	ErrGRPCRoleNotGranted       = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err()
-	ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err()
-	ErrGRPCAuthNotEnabled       = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err()
-	ErrGRPCInvalidAuthToken     = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err()
-	ErrGRPCInvalidAuthMgmt      = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err()
-
-	ErrGRPCNoLeader                   = status.New(codes.Unavailable, "etcdserver: no leader").Err()
-	ErrGRPCNotLeader                  = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err()
-	ErrGRPCLeaderChanged              = status.New(codes.Unavailable, "etcdserver: leader changed").Err()
-	ErrGRPCNotCapable                 = status.New(codes.Unavailable, "etcdserver: not capable").Err()
-	ErrGRPCStopped                    = status.New(codes.Unavailable, "etcdserver: server stopped").Err()
-	ErrGRPCTimeout                    = status.New(codes.Unavailable, "etcdserver: request timed out").Err()
-	ErrGRPCTimeoutDueToLeaderFail     = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err()
-	ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err()
-	ErrGRPCUnhealthy                  = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err()
-	ErrGRPCCorrupt                    = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err()
-
-	errStringToError = map[string]error{
-		ErrorDesc(ErrGRPCEmptyKey):      ErrGRPCEmptyKey,
-		ErrorDesc(ErrGRPCKeyNotFound):   ErrGRPCKeyNotFound,
-		ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided,
-		ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided,
-
-		ErrorDesc(ErrGRPCTooManyOps):   ErrGRPCTooManyOps,
-		ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
-		ErrorDesc(ErrGRPCCompacted):    ErrGRPCCompacted,
-		ErrorDesc(ErrGRPCFutureRev):    ErrGRPCFutureRev,
-		ErrorDesc(ErrGRPCNoSpace):      ErrGRPCNoSpace,
-
-		ErrorDesc(ErrGRPCLeaseNotFound):    ErrGRPCLeaseNotFound,
-		ErrorDesc(ErrGRPCLeaseExist):       ErrGRPCLeaseExist,
-		ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge,
-
-		ErrorDesc(ErrGRPCMemberExist):            ErrGRPCMemberExist,
-		ErrorDesc(ErrGRPCPeerURLExist):           ErrGRPCPeerURLExist,
-		ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted,
-		ErrorDesc(ErrGRPCMemberBadURLs):          ErrGRPCMemberBadURLs,
-		ErrorDesc(ErrGRPCMemberNotFound):         ErrGRPCMemberNotFound,
-
-		ErrorDesc(ErrGRPCRequestTooLarge):        ErrGRPCRequestTooLarge,
-		ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
-
-		ErrorDesc(ErrGRPCRootUserNotExist):     ErrGRPCRootUserNotExist,
-		ErrorDesc(ErrGRPCRootRoleNotExist):     ErrGRPCRootRoleNotExist,
-		ErrorDesc(ErrGRPCUserAlreadyExist):     ErrGRPCUserAlreadyExist,
-		ErrorDesc(ErrGRPCUserEmpty):            ErrGRPCUserEmpty,
-		ErrorDesc(ErrGRPCUserNotFound):         ErrGRPCUserNotFound,
-		ErrorDesc(ErrGRPCRoleAlreadyExist):     ErrGRPCRoleAlreadyExist,
-		ErrorDesc(ErrGRPCRoleNotFound):         ErrGRPCRoleNotFound,
-		ErrorDesc(ErrGRPCAuthFailed):           ErrGRPCAuthFailed,
-		ErrorDesc(ErrGRPCPermissionDenied):     ErrGRPCPermissionDenied,
-		ErrorDesc(ErrGRPCRoleNotGranted):       ErrGRPCRoleNotGranted,
-		ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
-		ErrorDesc(ErrGRPCAuthNotEnabled):       ErrGRPCAuthNotEnabled,
-		ErrorDesc(ErrGRPCInvalidAuthToken):     ErrGRPCInvalidAuthToken,
-		ErrorDesc(ErrGRPCInvalidAuthMgmt):      ErrGRPCInvalidAuthMgmt,
-
-		ErrorDesc(ErrGRPCNoLeader):                   ErrGRPCNoLeader,
-		ErrorDesc(ErrGRPCNotLeader):                  ErrGRPCNotLeader,
-		ErrorDesc(ErrGRPCNotCapable):                 ErrGRPCNotCapable,
-		ErrorDesc(ErrGRPCStopped):                    ErrGRPCStopped,
-		ErrorDesc(ErrGRPCTimeout):                    ErrGRPCTimeout,
-		ErrorDesc(ErrGRPCTimeoutDueToLeaderFail):     ErrGRPCTimeoutDueToLeaderFail,
-		ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost,
-		ErrorDesc(ErrGRPCUnhealthy):                  ErrGRPCUnhealthy,
-		ErrorDesc(ErrGRPCCorrupt):                    ErrGRPCCorrupt,
-	}
-)
-
-// client-side error
-var (
-	ErrEmptyKey      = Error(ErrGRPCEmptyKey)
-	ErrKeyNotFound   = Error(ErrGRPCKeyNotFound)
-	ErrValueProvided = Error(ErrGRPCValueProvided)
-	ErrLeaseProvided = Error(ErrGRPCLeaseProvided)
-	ErrTooManyOps    = Error(ErrGRPCTooManyOps)
-	ErrDuplicateKey  = Error(ErrGRPCDuplicateKey)
-	ErrCompacted     = Error(ErrGRPCCompacted)
-	ErrFutureRev     = Error(ErrGRPCFutureRev)
-	ErrNoSpace       = Error(ErrGRPCNoSpace)
-
-	ErrLeaseNotFound    = Error(ErrGRPCLeaseNotFound)
-	ErrLeaseExist       = Error(ErrGRPCLeaseExist)
-	ErrLeaseTTLTooLarge = Error(ErrGRPCLeaseTTLTooLarge)
-
-	ErrMemberExist            = Error(ErrGRPCMemberExist)
-	ErrPeerURLExist           = Error(ErrGRPCPeerURLExist)
-	ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted)
-	ErrMemberBadURLs          = Error(ErrGRPCMemberBadURLs)
-	ErrMemberNotFound         = Error(ErrGRPCMemberNotFound)
-
-	ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
-	ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests)
-
-	ErrRootUserNotExist     = Error(ErrGRPCRootUserNotExist)
-	ErrRootRoleNotExist     = Error(ErrGRPCRootRoleNotExist)
-	ErrUserAlreadyExist     = Error(ErrGRPCUserAlreadyExist)
-	ErrUserEmpty            = Error(ErrGRPCUserEmpty)
-	ErrUserNotFound         = Error(ErrGRPCUserNotFound)
-	ErrRoleAlreadyExist     = Error(ErrGRPCRoleAlreadyExist)
-	ErrRoleNotFound         = Error(ErrGRPCRoleNotFound)
-	ErrAuthFailed           = Error(ErrGRPCAuthFailed)
-	ErrPermissionDenied     = Error(ErrGRPCPermissionDenied)
-	ErrRoleNotGranted       = Error(ErrGRPCRoleNotGranted)
-	ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
-	ErrAuthNotEnabled       = Error(ErrGRPCAuthNotEnabled)
-	ErrInvalidAuthToken     = Error(ErrGRPCInvalidAuthToken)
-	ErrInvalidAuthMgmt      = Error(ErrGRPCInvalidAuthMgmt)
-
-	ErrNoLeader                   = Error(ErrGRPCNoLeader)
-	ErrNotLeader                  = Error(ErrGRPCNotLeader)
-	ErrLeaderChanged              = Error(ErrGRPCLeaderChanged)
-	ErrNotCapable                 = Error(ErrGRPCNotCapable)
-	ErrStopped                    = Error(ErrGRPCStopped)
-	ErrTimeout                    = Error(ErrGRPCTimeout)
-	ErrTimeoutDueToLeaderFail     = Error(ErrGRPCTimeoutDueToLeaderFail)
-	ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost)
-	ErrUnhealthy                  = Error(ErrGRPCUnhealthy)
-	ErrCorrupt                    = Error(ErrGRPCCorrupt)
-)
-
-// EtcdError defines gRPC server errors.
-// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323)
-type EtcdError struct {
-	code codes.Code
-	desc string
-}
-
-// Code returns grpc/codes.Code.
-// TODO: define clientv3/codes.Code.
-func (e EtcdError) Code() codes.Code {
-	return e.code
-}
-
-func (e EtcdError) Error() string {
-	return e.desc
-}
-
-func Error(err error) error {
-	if err == nil {
-		return nil
-	}
-	verr, ok := errStringToError[ErrorDesc(err)]
-	if !ok { // not gRPC error
-		return err
-	}
-	ev, ok := status.FromError(verr)
-	var desc string
-	if ok {
-		desc = ev.Message()
-	} else {
-		desc = verr.Error()
-	}
-	return EtcdError{code: ev.Code(), desc: desc}
-}
-
-func ErrorDesc(err error) string {
-	if s, ok := status.FromError(err); ok {
-		return s.Message()
-	}
-	return err.Error()
-}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go
deleted file mode 100644
index 8f8ac60..0000000
--- a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpctypes
-
-var (
-	TokenFieldNameGRPC    = "token"
-	TokenFieldNameSwagger = "authorization"
-)
diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore
deleted file mode 100644
index cbef197..0000000
--- a/vendor/go.opentelemetry.io/otel/.gitignore
+++ /dev/null
@@ -1,22 +0,0 @@
-.DS_Store
-Thumbs.db
-
-.tools/
-.idea/
-.vscode/
-*.iml
-*.so
-coverage.*
-
-gen/
-
-/example/basic/basic
-/example/grpc/client/client
-/example/grpc/server/server
-/example/http/client/client
-/example/http/server/server
-/example/jaeger/jaeger
-/example/namedtracer/namedtracer
-/example/prometheus/prometheus
-/example/zipkin/zipkin
-/example/otel-collector/otel-collector
diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules
deleted file mode 100644
index 38a1f56..0000000
--- a/vendor/go.opentelemetry.io/otel/.gitmodules
+++ /dev/null
@@ -1,3 +0,0 @@
-[submodule "opentelemetry-proto"]
-	path = exporters/otlp/internal/opentelemetry-proto
-	url = https://github.com/open-telemetry/opentelemetry-proto
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
deleted file mode 100644
index 2ef1681..0000000
--- a/vendor/go.opentelemetry.io/otel/.golangci.yml
+++ /dev/null
@@ -1,32 +0,0 @@
-# See https://github.com/golangci/golangci-lint#config-file
-run:
-  issues-exit-code: 1 #Default
-  tests: true #Default
-
-linters:
-  enable:
-    - misspell
-    - goimports
-    - golint
-    - gofmt
-
-issues:
-  exclude-rules:
-    # helpers in tests often (rightfully) pass a *testing.T as their first argument
-    - path: _test\.go
-      text: "context.Context should be the first parameter of a function"
-      linters:
-        - golint
-    # Yes, they are, but it's okay in a test
-    - path: _test\.go
-      text: "exported func.*returns unexported type.*which can be annoying to use"
-      linters:
-        - golint
-
-linters-settings:
-  misspell:
-    locale: US
-    ignore-words:
-      - cancelled
-  goimports:
-    local-prefixes: go.opentelemetry.io
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
deleted file mode 100644
index 1b77504..0000000
--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ /dev/null
@@ -1,922 +0,0 @@
-# Changelog
-
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
-
-This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-
-## [Unreleased]
-
-## [0.13.0] - 2020-10-08
-
-### Added
-
-- OTLP Metric exporter supports Histogram aggregation. (#1209)
-- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214)
-- A Baggage API to implement the OpenTelemetry specification. (#1217)
-
-### Changed
-
-- Set default propagator to no-op propagator. (#1184)
-- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212)
-- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212)
-- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification.
-   They now are `Unset`, `Error`, and `Ok`.
-   They no longer track the gRPC codes. (#1214)
-- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214)
-- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/propagators`. (#1217)
-
-### Fixed
-
-- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226)
-
-### Removed
-
-- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212)
-- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification.
-   The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212)
-- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216)
-- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217)
-- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219)
-- Nested array/slice support has been removed. (#1226)
-
-## [0.12.0] - 2020-09-24
-
-### Added
-
-- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108)
-- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s.
-   This addition was made to conform with our project option conventions. (#1155)
-- Instrumentation library information was added to the Zipkin exporter. (#1119)
-- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166)
-- More semantic conventions for k8s as resource attributes. (#1167)
-
-### Changed
-
-- Add reconnecting udp connection type to Jaeger exporter.
-   This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record.
-   It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063)
-- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`.
-   This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108)
-- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`.
-   This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108)
-- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109)
-- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package.
-    This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118)
-- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119)
-- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115)
-- Move `tools` package under `internal`. (#1141)
-- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142)
-   The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`.  Other exported functions and types are unchanged.
-- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153)
-- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155)
-- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161)
-- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to
-   recommend the use of `newConfig()` instead of `configure()`. (#1163)
-- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163)
-- Ensure exported interface types include parameter names and update the
-   Style Guide to reflect this styling rule. (#1172)
-- Don't consider unset environment variable for resource detection to be an error. (#1170)
-- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and
-  `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`.
-- ValueObserver instruments use LastValue aggregator by default. (#1165)
-- OTLP Metric exporter supports LastValue aggregation. (#1165)
-- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185)
-- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190)
-- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190)
-- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190)
-- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190)
-- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190)
-- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190)
-- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190)
-- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190)
-- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190)
-- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190)
-- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190)
-- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190)
-- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190)
-- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
-- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
-- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
-- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190)
-- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192)
-- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201)
-- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195)
-- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203)
-
-### Removed
-
-- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the
-   `go.opentelemetry.io/contrib/propagators/` module. (#1191)
-- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194)
-
-### Fixed
-
-- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171)
-- Fix missing shutdown processor in otel-collector example. (#1186)
-- Fix missing shutdown processor in basic and namedtracer examples. (#1197)
-
-## [0.11.0] - 2020-08-24
-
-### Added
-
-- Support for exporting array-valued attributes via OTLP. (#992)
-- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994)
-- Support for filtering metric label sets. (#1047)
-- A dimensionality-reducing metric Processor. (#1057)
-- Integration tests for more OTel Collector Attribute types. (#1062)
-- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078)
-
-### Changed
-
-- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049)
-- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049)
-- Rename `api/testharness` to `api/apitest`. (#1049)
-- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049)
-- Change Metric Processor to merge multiple observations. (#1024)
-- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module.
-   This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038)
-- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016)
-- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042)
-- Replace `WithSyncer` with `WithBatcher` in examples. (#1044)
-- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046)
-- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060)
-- Unify Callback Function Naming.
-   Rename `*Callback` with `*Func`. (#1061)
-- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064)
-- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface.
-   This interface still supports the export of `SpanData`, but only as a slice.
-   Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078)
-- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error.
-   If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078)
-- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`.
-   This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078)
-
-### Removed
-
-- Duplicate, unused API sampler interface. (#999)
-   Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead.
-- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository.
-   This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027)
-- The `WithSpan` method of the `Tracer` interface.
-   The functionality this method provided was limited compared to what a user can provide themselves.
-   It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043)
-- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions.
-   These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077)
-- The `oterror` package. (#1026)
-- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032)
-
-### Fixed
-
-- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031)
-- Correct instrumentation version tag in Jaeger exporter. (#1037)
-- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043)
-- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050)
-- The `otel-collector` example referenced outdated collector processors. (#1006)
-
-## [0.10.0] - 2020-07-29
-
-This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages.
-
-### Added
-
-- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern.
-    These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944)
-- Add propagator option for gRPC instrumentation. (#986)
-- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987)
-
-### Changed
-
-- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function.
-   This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944)
-- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`.
-   This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963)
-- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962)
-- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968)
-  - `value.Bool` was replaced with `kv.BoolValue`.
-  - `value.Int64` was replaced with `kv.Int64Value`.
-  - `value.Uint64` was replaced with `kv.Uint64Value`.
-  - `value.Float64` was replaced with `kv.Float64Value`.
-  - `value.Int32` was replaced with `kv.Int32Value`.
-  - `value.Uint32` was replaced with `kv.Uint32Value`.
-  - `value.Float32` was replaced with `kv.Float32Value`.
-  - `value.String` was replaced with `kv.StringValue`.
-  - `value.Int` was replaced with `kv.IntValue`.
-  - `value.Uint` was replaced with `kv.UintValue`.
-  - `value.Array` was replaced with `kv.ArrayValue`.
-- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972)
-- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979)
-- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980)
-- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985)
-- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989)
-
-### Removed
-
-- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970)
-
-### Fixed
-
-- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953)
-- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957)
-- Use `global.Handle` for span export errors in the OTLP exporter. (#946)
-- Correct Go language formatting in the README documentation. (#961)
-- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977)
-- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983)
-- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984)
-
-## [0.9.0] - 2020-07-20
-
-### Added
-
-- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939)
-- A Detector to automatically detect resources from an environment variable. (#939)
-- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938)
-- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`.
-   References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942)
-
-### Changed
-
-- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948)
-
-### Removed
-
-- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943)
-
-## [0.8.0] - 2020-07-09
-
-### Added
-
-- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject.
-   A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882)
-- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882)
-- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882)
-- Add `peer.service` semantic attribute. (#898)
-- Add database-specific semantic attributes. (#899)
-- Add semantic convention for `faas.coldstart` and `container.id`. (#909)
-- Add http content size semantic conventions. (#905)
-- Include `http.request_content_length` in HTTP request basic attributes. (#905)
-- Add semantic conventions for operating system process resource attribute keys. (#919)
-- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931)
-
-### Changed
-
-- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879)
-- Use lowercase header names for B3 Multiple Headers. (#881)
-- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`.
-   This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings.
-   If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882)
-- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header.
-   Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid.
-   This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882)
-- Extend semantic conventions for RPC. (#900)
-- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920)
-  - `"api/standard".FaaSName` -> `FaaSNameKey`
-  - `"api/standard".FaaSID` -> `FaaSIDKey`
-  - `"api/standard".FaaSVersion` -> `FaaSVersionKey`
-  - `"api/standard".FaaSInstance` -> `FaaSInstanceKey`
-
-### Removed
-
-- The `FlagsUnused` trace flag is removed.
-   The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882)
-- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed.
-   If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882)
-
-### Fixed
-
-- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881)
-- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882)
-- The B3 propagator now propagates the debug flag.
-   This removes the behavior of changing the debug flag into a set sampling bit.
-   Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882)
-- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882)
-- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883)
-- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885)
-- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896)
-- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908)
-- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912)
-- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903)
-- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905)
-- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913)
-- Update otel-colector example to use the v0.5.0 collector. (#915)
-- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922)
-- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922)
-- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists.
-   This is in accordance with OpenTelemetry semantic conventions. (#922)
-- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923)
-- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925)
-- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926)
-- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930)
-
-## [0.7.0] - 2020-06-26
-
-This release implements the v0.5.0 version of the OpenTelemetry specification.
-
-### Added
-
-- The othttp instrumentation now includes default metrics. (#861)
-- This CHANGELOG file to track all changes in the project going forward.
-- Support for array type attributes. (#798)
-- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844)
-- Timestamps are now passed to exporters for each export. (#835)
-- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s.
-   This replaces the prior `Record` `struct` use for this purpose. (#835)
-- New dependabot integration to automate package upgrades. (#814)
-- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument.
-   This instrumentation version is passed on to exporters. (#811) (#805) (#802)
-- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811)
-- Environment variables for Jaeger exporter are supported. (#796)
-- New `aggregation.Kind` in the export metric API. (#808)
-- New example that uses OTLP and the collector. (#790)
-- Handle errors in the span `SetName` during span initialization. (#791)
-- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777)
-- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778)
-- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`.
-   There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778)
-- Options to specify propagators for httptrace and grpctrace instrumentation. (#784)
-- The required `application/json` header for the Zipkin exporter is included in all exports. (#774)
-- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769
-
-### Changed
-
-- Rename `Integrator` to `Processor` in the metric SDK. (#863)
-- Rename `AggregationSelector` to `AggregatorSelector`. (#859)
-- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858)
-- Rename `simple` integrator to `basic` integrator. (#857)
-- Merge otlp collector examples. (#841)
-- Change the metric SDK to support cumulative, delta, and pass-through exporters directly.
-   With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840)
-- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812)
-- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other.
-   All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`.
-   Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812)
-- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812)
-- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810)
-- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808
-- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806)
-- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791)
-- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779)
-- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781)
-
-### Removed
-
-- `Uint64NumberKind` and related functions from the API. (#864)
-- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803)
-- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775)
-
-### Fixed
-
-- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866)
-- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871)
-- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824)
-- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867)
-- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853)
-- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854)
-- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848)
-- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817)
-- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828)
-- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838)
-- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829)
-- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815)
-- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823)
-- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830)
-- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822)
-- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820)
-- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831)
-- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836)
-- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837)
-- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839)
-- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843)
-- Set span status from HTTP status code in the othttp instrumentation. (#832)
-- Fixed typo in push controller comment. (#834)
-- The `Aggregator` testing has been updated and cleaned. (#812)
-- `metric.Number(0)` expressions are replaced by `0` where possible. (#812)
-- Fixed `global` `handler_test.go` test failure. #804
-- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766)
-- Fixed OTLP example's accidental early close of exporter. (#807)
-- Ensure zipkin exporter reads and closes response body. (#788)
-- Update instrumentation to use `api/standard` keys instead of custom keys. (#782)
-- Clean up tools and RELEASING documentation. (#762)
-
-## [0.6.0] - 2020-05-21
-
-### Added
-
-- Support for `Resource`s in the prometheus exporter. (#757)
-- New pull controller. (#751)
-- New `UpDownSumObserver` instrument. (#750)
-- OpenTelemetry collector demo. (#711)
-- New `SumObserver` instrument. (#747)
-- New `UpDownCounter` instrument. (#745)
-- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742)
-- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731)
-
-### Changed
-
-- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761)
-- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758)
-- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756)
-- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754)
-- The prometheus exporter now uses the new pull controller. (#751)
-- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752)
-- Support use of synchronous instruments in asynchronous callbacks (#725)
-- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739)
-- Rename `Observer` instrument to `ValueObserver`. (#734)
-- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738)
-- Replace `Measure` instrument by `ValueRecorder` instrument. (#732)
-- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. 727)
-
-### Fixed
-
-- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755)
-- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743)
-- Fix `string` case in `kv` `Infer` function. (#746)
-- Fix panic in grpctrace client interceptors. (#740)
-- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737)
-- Rewrite span batch process queue batching logic. (#719)
-- Remove the push controller named Meter map. (#738)
-- Fix Histogram aggregator initial state (fix #735). (#736)
-- Ensure golang alpine image is running `golang-1.14` for examples. (#733)
-- Added test for grpctrace `UnaryInterceptorClient`. (#695)
-- Rearrange `api/metric` code layout. (#724)
-
-## [0.5.0] - 2020-05-13
-
-### Added
-
-- Batch `Observer` callback support. (#717)
-- Alias `api` types to root package of project. (#696)
-- Create basic `othttp.Transport` for simple client instrumentation. (#678)
-- `SetAttribute(string, interface{})` to the trace API. (#674)
-- Jaeger exporter option that allows user to specify custom http client. (#671)
-- `Stringer` and `Infer` methods to `key`s. (#662)
-
-### Changed
-
-- Rename `NewKey` in the `kv` package to just `Key`. (#721)
-- Move `core` and `key` to `kv` package. (#720)
-- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709)
-- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710)
-- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710)
-- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710)
-- Move `Number` from `core` to `api/metric` package. (#706)
-- Move `SpanContext` from `core` to `trace` package. (#692)
-- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681)
-
-### Fixed
-
-- Update tooling to run generators in all submodules. (#705)
-- gRPC interceptor regexp to match methods without a service name. (#683)
-- Use a `const` for padding 64-bit B3 trace IDs. (#701)
-- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700)
-- Left-pad 64-bit B3 trace IDs with zero. (#698)
-- Propagate at least the first W3C tracestate header. (#694)
-- Remove internal `StateLocker` implementation. (#688)
-- Increase instance size CI system uses. (#690)
-- Add a `key` benchmark and use reflection in `key.Infer()`. (#679)
-- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680)
-- Reimplement histogram using mutex instead of `StateLocker`. (#669)
-- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667)
-- Update documentation to not include any references to `WithKeys`. (#672)
-- Correct misspelling. (#668)
-- Fix clobbering of the span context if extraction fails. (#656)
-- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670)
-
-## [0.4.3] - 2020-04-24
-
-### Added
-
-- `Dockerfile` and `docker-compose.yml` to run example code. (#635)
-- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621)
-- New `api/label` package, providing common label set implementation. (#651)
-- Support for JSON marshaling of `Resources`. (#654)
-- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642)
-- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627)
-- `WithSpanFormatter` option to the othttp plugin. (#617)
-- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612)
-- The prometheus exporter now supports exporting histograms. (#601)
-- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613)
-- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613)
-- An `Equal` method to the `Resource` test the equivalence of resources. (#613)
-- An iterable structure (`AttributeIterator`) for `Resource` attributes.
-
-### Changed
-
-- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644)
-- Pass `Resources` through the metrics export pipeline. (#659)
-
-### Removed
-
-- `WithKeys` option from the metric API. (#639)
-
-### Fixed
-
-- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658)
-- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653)
-- Use type names for return values in jaeger exporter. (#648)
-- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650)
-- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647)
-- Do not cache `reflect.ValueOf()` in metric Labels. (#649)
-- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626)
-- Add error wrapping to the prometheus exporter. (#631)
-- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623)
-- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614)
-- Update `Resource` internal representation to uniquely and reliably identify resources. (#613)
-- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622)
-- Ensure spans created by httptrace client tracer reflect operation structure. (#618)
-- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610
-- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611)
-
-
-## [0.4.2] - 2020-03-31
-
-### Fixed
-
-- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607)
-- Fix time conversion from internal to OTLP in OTLP exporter. (#606)
-
-## [0.4.1] - 2020-03-31
-
-### Fixed
-
-- Update `tag.sh` to create signed tags. (#604)
-
-## [0.4.0] - 2020-03-30
-
-### Added
-
-- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580)
-- Script to verify examples after a new release. (#579)
-
-### Removed
-
-- The dogstatsd exporter due to lack of support.
-   This additionally removes support for statsd. (#591)
-- `LabelSet` from the metric API.
-   This is replaced by a `[]core.KeyValue` slice. (#595)
-- `Labels` from the metric API's `Meter` interface. (#595)
-
-### Changed
-
-- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574)
-- Renamed `internal/metric.Meter` to `MeterImpl`. (#580)
-- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580)
-
-### Fixed
-
-- Corrected missing return in mock span. (#582)
-- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596)
-- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588)
-- Update pre-release script to be compatible between GNU and BSD based systems. (#592)
-- Add a `RecordBatch` benchmark. (#594)
-- Moved span transforms of the OTLP exporter to the internal package. (#593)
-- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569)
-- Removed unneeded allocation on empty labels in OLTP exporter. (#597)
-- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599)
-- Update project documentation godoc.org links to pkg.go.dev. (#602)
-
-## [0.3.0] - 2020-03-21
-
-This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality.
-There is still a possibility of breaking changes.
-
-### Added
-
-- Add `Observer` metric instrument. (#474)
-- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494)
-- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459)
-- The zipkin trace exporter. (#495)
-- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545)
-- The `StatusMessage` field was add to the trace `Span`. (#524)
-- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525)
-- The `Resource` type was added to the SDK. (#528)
-- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538)
-- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction.
-   Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560)
-- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560)
-- Scripts to better automate the release process. (#576)
-
-### Changed
-
-- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506)
-- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511)
-- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511)
-- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524)
-- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531)
-- Rename metric API `Options` to `Config`. (#541)
-- Rename metric `Counter` aggregator to be `Sum`. (#541)
-- Unify metric options into `Option` from instrument specific options. (#541)
-- The trace API's `TraceProvider` now support `Resource`s. (#545)
-- Correct error in zipkin module name. (#548)
-- The jaeger trace exporter now supports `Resource`s. (#551)
-- Metric SDK now supports `Resource`s.
-   The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552)
-- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557)
-- The stdout trace exporter now supports `Resource`s. (#558)
-- The metric `Descriptor` is now included at the API instead of the SDK. (#560)
-- Replace `Ordered` with an iterator in `export.Labels`. (#567)
-
-### Removed
-
-- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452)
-- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560)
-- `GetDescriptor` from the metric SDK. (#575)
-- The `Gauge` instrument from the metric API. (#537)
-
-### Fixed
-
-- Make histogram aggregator checkpoint consistent. (#438)
-- Update README with import instructions and how to build and test. (#505)
-- The default label encoding was updated to be unique. (#508)
-- Use `NewRoot` in the othttp plugin for public endpoints. (#513)
-- Fix data race in `BatchedSpanProcessor`. (#518)
-- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521
-- Use a variable-size array to represent ordered labels in maps. (#523)
-- Update the OTLP protobuf and update changed import path. (#532)
-- Use `StateLocker` implementation in `MinMaxSumCount`. (#546)
-- Eliminate goroutine leak in histogram stress test. (#547)
-- Update OTLP exporter with latest protobuf. (#550)
-- Add filters to the othttp plugin. (#556)
-- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565)
-- Encode labels once during checkpoint.
-   The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter.
-   This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572)
-- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573)
-
-## [0.2.3] - 2020-03-04
-
-### Added
-
-- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473)
-- Configurable push frequency for exporters setup pipeline. (#504)
-
-### Changed
-
-- Rename the `exporter` directory to `exporters`.
-   The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`.
-   This resulted in all subsequent releases not becoming the default latest.
-   A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages.
-   Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags.
-   Consequentially, this action also renames *all* exporter packages. (#502)
-
-### Removed
-
-- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503)
-
-## [0.2.2] - 2020-02-27
-
-### Added
-
-- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467)
-- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467)
-- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467)
-- `Config` and configuring `Option` to the propagator API. (#467)
-- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467)
-- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467)
-- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467)
-- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467)
-- Histogram aggregator. (#433)
-- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456)
-- `AlwaysParentSample` sampler to the trace API. (#455)
-- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451)
-
-
-### Changed
-
-- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481)
-- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481)
-- Move correlation context propagation to correlation package. (#479)
-- Do not default to putting remote span context into links. (#480)
-- Propagators extrac
-- `Tracer.WithSpan` updated to accept `StartOptions`. (#472)
-- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432)
-- Renamed the `export` package to `metric` to match directory structure. (#432)
-- Rename the `api/distributedcontext` package to `api/correlation`. (#444)
-- Rename the `api/propagators` package to `api/propagation`. (#444)
-- Move the propagators from the `propagators` package into the `trace` API package. (#444)
-- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462)
-- Moved all dependencies of tools package to a tools directory. (#466)
-
-### Removed
-
-- Binary propagators. (#467)
-- NOOP propagator. (#467)
-
-### Fixed
-
-- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492)
-- Fix a possible nil-dereference crash (#478)
-- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483)
-- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484)
-- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482)
-- Initialize `onError` based on `Config` in prometheus exporter. (#486)
-- Correct module name in prometheus exporter README. (#475)
-- Removed tracer name prefix from span names. (#430)
-- Fix `aggregator_test.go` import package comment. (#431)
-- Improved detail in stdout exporter. (#436)
-- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442)
-- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442)
-- Reword function documentation in gRPC plugin. (#446)
-- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441)
-- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441)
-- Upgraded to Go 1.13 in CI. (#465)
-- Correct opentelemetry.io URL in trace SDK documentation. (#464)
-- Refactored reference counting logic in SDK determination of stale records. (#468)
-- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469)
-
-## [0.2.1.1] - 2020-01-13
-
-### Fixed
-
-- Use stateful batcher on Prometheus exporter fixing regresion introduced in #395. (#428)
-
-## [0.2.1] - 2020-01-08
-
-### Added
-
-- Global meter forwarding implementation.
-   This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392)
-- Global trace forwarding implementation.
-   This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406)
-- Standardize export pipeline creation in all exporters. (#395)
-- A testing, organization, and comments for 64-bit field alignment. (#418)
-- Script to tag all modules in the project. (#414)
-
-### Changed
-
-- Renamed `propagation` package to `propagators`. (#362)
-- Renamed `B3Propagator` propagator to `B3`. (#362)
-- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362)
-- Renamed `BinaryPropagator` propagator to `Binary`. (#362)
-- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362)
-- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362)
-- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362)
-- Renamed `SpanOption` to `StartOption` in the trace API. (#369)
-- Renamed `StartOptions` to `StartConfig` in the trace API. (#369)
-- Renamed `EndOptions` to `EndConfig` in the trace API. (#369)
-- `Number` now has a pointer receiver for its methods. (#375)
-- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379)
-- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379)
-- Renamed `Message` in Event to `Name` in the trace API. (#389)
-- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385)
-- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400)
-- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400)
-- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400)
-- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400)
-- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400)
-- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400)
-- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400)
-- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400)
-- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400)
-- Renamed the `File` option in the stdout exporter to `Writer`. (#404)
-- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case.
-
-### Fixed
-
-- Aggregator import path corrected. (#421)
-- Correct links in README. (#368)
-- The README was updated to match latest code changes in its examples. (#374)
-- Don't capitalize error statements. (#375)
-- Fix ignored errors. (#375)
-- Fix ambiguous variable naming. (#375)
-- Removed unnecessary type casting. (#375)
-- Use named parameters. (#375)
-- Updated release schedule. (#378)
-- Correct http-stackdriver example module name. (#394)
-- Removed the `http.request` span in `httptrace` package. (#397)
-- Add comments in the metrics SDK (#399)
-- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403)
-- Add documentation of compatible exporters in the README. (#405)
-- Typo fix. (#408)
-- Simplify span check logic in SDK tracer implementation. (#419)
-
-## [0.2.0] - 2019-12-03
-
-### Added
-
-- Unary gRPC tracing example. (#351)
-- Prometheus exporter. (#334)
-- Dogstatsd metrics exporter. (#326)
-
-### Changed
-
-- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352)
-- Rename `GetMeter` to `Meter`. (#357)
-- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355)
-- Rename `HTTPB3Propagator` to `B3Propagator`. (#355)
-- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355)
-- Move `/global` package to `/api/global`. (#356)
-- Rename `GetTracer` to `Tracer`. (#347)
-
-### Removed
-
-- `SetAttribute` from the `Span` interface in the trace API. (#361)
-- `AddLink` from the `Span` interface in the trace API. (#349)
-- `Link` from the `Span` interface in the trace API. (#349)
-
-### Fixed
-
-- Exclude example directories from coverage report. (#365)
-- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360)
-- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359)
-- Run the race checker for all test. (#354)
-- Redundant commands in the Makefile are removed. (#354)
-- Split the `generate` and `lint` targets of the Makefile. (#354)
-- Renames `circle-ci` target to more generic `ci` in Makefile. (#354)
-- Add example Prometheus binary to gitignore. (#358)
-- Support negative numbers with the `MaxSumCount`. (#335)
-- Resolve race conditions in `push_test.go` identified in #339. (#340)
-- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336)
-- Trace benchmark now tests both `AlwaysSample` and `NeverSample`.
-   Previously it was testing `AlwaysSample` twice. (#325)
-- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325)
-- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325)
-- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint.
-   This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly.
-   This was corrected. (#333)
-
-
-## [0.1.2] - 2019-11-18
-
-### Fixed
-
-- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328)
-- Removed unnecessary unslicing of parameters that are already a slice. (#324)
-
-## [0.1.1] - 2019-11-18
-
-This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch.
-
-### Added
-
-- Metrics stdout export pipeline. (#265)
-- Array aggregation for raw measure metrics. (#282)
-- The core.Value now have a `MarshalJSON` method. (#281)
-
-### Removed
-
-- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314)
-- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292)
-
-### Changed
-
-- Allocation in LabelSet construction to reduce GC overhead. (#318)
-- `trace.WithAttributes` to append values instead of replacing (#315)
-- Use a formula for tolerance in sampling tests. (#298)
-- Move export types into trace and metric-specific sub-directories. (#289)
-- `SpanKind` back to being based on an `int` type. (#288)
-
-### Fixed
-
-- URL to OpenTelemetry website in README. (#323)
-- Name of othttp default tracer. (#321)
-- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294)
-- CI modules cache to correctly restore/save from/to the cache. (#316)
-- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293)
-- README now reflects the new code structure introduced with these changes. (#291)
-- Make the basic example work. (#279)
-
-## [0.1.0] - 2019-11-04
-
-This is the first release of open-telemetry go library.
-It contains api and sdk for trace and meter.
-
-### Added
-
-- Initial OpenTelemetry trace and metric API prototypes.
-- Initial OpenTelemetry trace, metric, and export SDK packages.
-- A wireframe bridge to support compatibility with OpenTracing.
-- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup.
-- Exporters for Jaeger, Stackdriver, and stdout.
-- Propagators for binary, B3, and trace-context protocols.
-- Project information and guidelines in the form of a README and CONTRIBUTING.
-- Tools to build the project and a Makefile to automate the process.
-- Apache-2.0 license.
-- CircleCI build CI manifest files.
-- CODEOWNERS file to track owners of this project.
-
-
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v0.13.0...HEAD
-[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0
-[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0
-[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0
-[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0
-[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0
-[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0
-[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0
-[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0
-[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0
-[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3
-[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2
-[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1
-[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0
-[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0
-[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3
-[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2
-[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1
-[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1
-[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0
-[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2
-[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
-[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS
deleted file mode 100644
index b2e99fc..0000000
--- a/vendor/go.opentelemetry.io/otel/CODEOWNERS
+++ /dev/null
@@ -1,17 +0,0 @@
-#####################################################
-#
-# List of approvers for this repository
-#
-#####################################################
-#
-# Learn about membership in OpenTelemetry community:
-#  https://github.com/open-telemetry/community/blob/master/community-membership.md
-#
-#
-# Learn about CODEOWNERS file format:
-#  https://help.github.com/en/articles/about-code-owners
-#
-
-* @jmacd @lizthegrey @MrAlias @Aneurysm9 @evantorrie @XSAM
-
-CODEOWNERS @MrAlias @Aneurysm9
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
deleted file mode 100644
index 85d9a09..0000000
--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ /dev/null
@@ -1,373 +0,0 @@
-# Contributing to opentelemetry-go
-
-The Go special interest group (SIG) meets regularly. See the
-OpenTelemetry
-[community](https://github.com/open-telemetry/community#golang-sdk)
-repo for information on this and other language SIGs.
-
-See the [public meeting
-notes](https://docs.google.com/document/d/1A63zSWX0x2CyCK_LoNhmQC4rqhLpYXJzXbEPDUQ2n6w/edit#heading=h.9tngw7jdwd6b)
-for a summary description of past meetings. To request edit access,
-join the meeting or get in touch on
-[Gitter](https://gitter.im/open-telemetry/opentelemetry-go).
-
-## Development
-
-You can view and edit the source code by cloning this repository:
-
-```bash
-git clone https://github.com/open-telemetry/opentelemetry-go.git
-```
-
-Run `make test` to run the tests instead of `go test`. 
-
-There are some generated files checked into the repo. To make sure
-that the generated files are up-to-date, run `make` (or `make
-precommit` - the `precommit` target is the default).
-
-The `precommit` target also fixes the formatting of the code and
-checks the status of the go module files.
-
-If after running `make precommit` the output of `git status` contains
-`nothing to commit, working tree clean` then it means that everything
-is up-to-date and properly formatted.
-
-## Pull Requests
-
-### How to Send Pull Requests
-
-Everyone is welcome to contribute code to `opentelemetry-go` via
-GitHub pull requests (PRs).
-
-To create a new PR, fork the project in GitHub and clone the upstream
-repo:
-
-```sh
-$ go get -d go.opentelemetry.io/otel
-```
-
-(This may print some warning about "build constraints exclude all Go
-files", just ignore it.)
-
-This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You
-can alternatively use `git` directly with:
-
-```sh
-$ git clone https://github.com/open-telemetry/opentelemetry-go
-```
-
-(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name -
-that name is a kind of a redirector to GitHub that `go get` can
-understand, but `git` does not.)
-
-This would put the project in the `opentelemetry-go` directory in
-current working directory.
-
-Enter the newly created directory and add your fork as a new remote:
-
-```sh
-$ git remote add <YOUR_FORK> git@github.com:<YOUR_GITHUB_USERNAME>/opentelemetry-go
-```
-
-Check out a new branch, make modifications, run linters and tests, update
-`CHANGELOG.md`, and push the branch to your fork:
-
-```sh
-$ git checkout -b <YOUR_BRANCH_NAME>
-# edit files
-# update changelog
-$ make precommit
-$ git add -p
-$ git commit
-$ git push <YOUR_FORK> <YOUR_BRANCH_NAME>
-```
-
-Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull
-request ID to the entry you added to `CHANGELOG.md`.
-
-### How to Receive Comments
-
-* If the PR is not ready for review, please put `[WIP]` in the title,
-  tag it as `work-in-progress`, or mark it as
-  [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/).
-* Make sure CLA is signed and CI is clear.
-
-### How to Get PRs Merged
-
-A PR is considered to be **ready to merge** when:
-
-* It has received two approvals from Collaborators/Maintainers (at
-  different companies). This is not enforced through technical means
-  and a PR may be **ready to merge** with a single approval if the change
-  and its approach have been discussed and consensus reached.
-* Major feedbacks are resolved.
-* It has been open for review for at least one working day. This gives
-  people reasonable time to review.
-* Trivial changes (typo, cosmetic, doc, etc.) do not have to wait for
-  one day and may be merged with a single Maintainer's approval.
-* `CHANGELOG.md` has been updated to reflect what has been
-  added, changed, removed, or fixed.
-* Urgent fix can take exception as long as it has been actively
-  communicated.
-
-Any Maintainer can merge the PR once it is **ready to merge**.
-
-## Design Choices
-
-As with other OpenTelemetry clients, opentelemetry-go follows the
-[opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification).
-
-It's especially valuable to read through the [library
-guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/library-guidelines.md).
-
-### Focus on Capabilities, Not Structure Compliance
-
-OpenTelemetry is an evolving specification, one where the desires and
-use cases are clear, but the method to satisfy those uses cases are
-not.
-
-As such, Contributions should provide functionality and behavior that
-conforms to the specification, but the interface and structure is
-flexible.
-
-It is preferable to have contributions follow the idioms of the
-language rather than conform to specific API names or argument
-patterns in the spec.
-
-For a deeper discussion, see:
-https://github.com/open-telemetry/opentelemetry-specification/issues/165
-
-## Style Guide
-
-One of the primary goals of this project is that it is actually used by
-developers. With this goal in mind the project strives to build
-user-friendly and idiomatic Go code adhering to the Go community's best
-practices.
-
-For a non-comprehensive but foundational overview of these best practices
-the [Effective Go](https://golang.org/doc/effective_go.html) documentation
-is an excellent starting place.
-
-As a convenience for developers building this project the `make precommit`
-will format, lint, validate, and in some cases fix the changes you plan to
-submit. This check will need to pass for your changes to be able to be
-merged.
-
-In addition to idiomatic Go, the project has adopted certain standards for
-implementations of common patterns. These standards should be followed as a
-default, and if they are not followed documentation needs to be included as
-to the reasons why.
-
-### Configuration
-
-When creating an instantiation function for a complex `struct` it is useful
-to allow variable number of options to be applied. However, the strong type
-system of Go restricts the function design options. There are a few ways to
-solve this problem, but we have landed on the following design.
-
-#### `config`
-
-Configuration should be held in a `struct` named `config`, or prefixed with
-specific type name this Configuration applies to if there are multiple
-`config` in the package. This `struct` must contain configuration options.
-
-```go
-// config contains configuration options for a thing.
-type config struct {
-    // options ...
-}
-```
-
-In general the `config` `struct` will not need to be used externally to the
-package and should be unexported. If, however, it is expected that the user
-will likely want to build custom options for the configuration, the `config`
-should be exported. Please, include in the documentation for the `config`
-how the user can extend the configuration.
-
-It is important that `config` are not shared across package boundaries.
-Meaning a `config` from one package should not be directly used by another.
-
-Optionally, it is common to include a `newConfig` function (with the same
-naming scheme). This function wraps any defaults setting and looping over
-all options to create a configured `config`.
-
-```go
-// newConfig returns an appropriately configured config.
-func newConfig([]Option) config {
-    // Set default values for config.
-    config := config{/* […] */}
-    for _, option := range options {
-        option.Apply(&config)
-    }
-    // Preform any validation here.
-    return config
-}
-```
-
-If validation of the `config` options is also preformed this can return an
-error as well that is expected to be handled by the instantiation function
-or propagated to the user.
-
-Given the design goal of not having the user need to work with the `config`,
-the `newConfig` function should also be unexported.
-
-#### `Option`
-
-To set the value of the options a `config` contains, a corresponding
-`Option` interface type should be used.
-
-```go
-type Option interface {
-  Apply(*config)
-}
-```
-
-The name of the interface should be prefixed in the same way the
-corresponding `config` is (if at all).
-
-#### Options
-
-All user configurable options for a `config` must have a related unexported
-implementation of the `Option` interface and an exported configuration
-function that wraps this implementation.
-
-The wrapping function name should be prefixed with `With*` (or in the
-special case of a boolean options `Without*`) and should have the following
-function signature.
-
-```go
-func With*(…) Option { … }
-```
-
-##### `bool` Options
-
-```go
-type defaultFalseOption bool
-
-func (o defaultFalseOption) Apply(c *config) {
-    c.Bool = bool(o)
-}
-
-// WithOption sets a T* to have an option included.
-func WithOption() Option {
-    return defaultFalseOption(true)
-}
-```
-
-```go
-type defaultTrueOption bool
-
-func (o defaultTrueOption) Apply(c *config) {
-    c.Bool = bool(o)
-}
-
-// WithoutOption sets a T* to have Bool option excluded.
-func WithoutOption() Option {
-    return defaultTrueOption(false)
-}
-````
-
-##### Declared Type Options
-
-```go
-type myTypeOption struct {
-    MyType MyType
-}
-
-func (o myTypeOption) Apply(c *config) {
-    c.MyType = o.MyType
-}
-
-// WithMyType sets T* to have include MyType.
-func WithMyType(t MyType) Option {
-    return myTypeOption{t}
-}
-```
-
-#### Instantiation
-
-Using this configuration pattern to configure instantiation with a `New*`
-function.
-
-```go
-func NewT*(options ...Option) T* {…}
-```
-
-Any required parameters can be declared before the variadic `options`.
-
-#### Dealing with Overlap
-
-Sometimes there are multiple complex `struct` that share common
-configuration and also have distinct configuration. To avoid repeated
-portions of `config`s, a common `config` can be used with the union of
-options being handled with the `Option` interface.
-
-For example.
-
-```go
-// config holds options for all animals.
-type config struct {
-	Weight      float64
-	Color       string
-	MaxAltitude float64
-}
-
-// DogOption apply Dog specific options.
-type DogOption interface {
-	ApplyDog(*config)
-}
-
-// BirdOption apply Bird specific options.
-type BirdOption interface {
-	ApplyBird(*config)
-}
-
-// Option apply options for all animals.
-type Option interface {
-	BirdOption
-	DogOption
-}
-
-type weightOption float64
-func (o weightOption) ApplyDog(c *config)  { c.Weight = float64(o) }
-func (o weightOption) ApplyBird(c *config) { c.Weight = float64(o) }
-func WithWeight(w float64) Option          { return weightOption(w) }
-
-type furColorOption string
-func (o furColorOption) ApplyDog(c *config) { c.Color = string(o) }
-func WithFurColor(c string) DogOption       { return furColorOption(c) }
-
-type maxAltitudeOption float64
-func (o maxAltitudeOption) ApplyBird(c *config) { c.MaxAltitude = float64(o) }
-func WithMaxAltitude(a float64) BirdOption      { return maxAltitudeOption(a) }
-
-func NewDog(name string, o ...DogOption) Dog    {…}
-func NewBird(name string, o ...BirdOption) Bird {…}
-```
-
-### Interface Type
-
-To allow other developers to better comprehend the code, it is important
-to ensure it is sufficiently documented. One simple measure that contributes
-to this aim is self-documenting by naming method parameters. Therefore,
-where appropriate, methods of every exported interface type should have
-their parameters appropriately named.
-
-## Approvers and Maintainers
-
-Approvers:
-
-- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
-- [Evan Torrie](https://github.com/evantorrie), Verizon Media
-- [Josh MacDonald](https://github.com/jmacd), LightStep
-- [Sam Xie](https://github.com/XSAM)
-
-Maintainers:
-
-- [Anthony Mirabella](https://github.com/Aneurysm9), Centene
-- [Tyler Yahn](https://github.com/MrAlias), New Relic
-
-### Become an Approver or a Maintainer
-
-See the [community membership document in OpenTelemetry community
-repo](https://github.com/open-telemetry/community/blob/master/community-membership.md).
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
deleted file mode 100644
index 85506a3..0000000
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ /dev/null
@@ -1,177 +0,0 @@
-# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-EXAMPLES := $(shell ./get_main_pkgs.sh ./example)
-TOOLS_MOD_DIR := ./internal/tools
-
-# All source code and documents. Used in spell check.
-ALL_DOCS := $(shell find . -name '*.md' -type f | sort)
-# All directories with go.mod files related to opentelemetry library. Used for building, testing and linting.
-ALL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort))
-ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
-
-# Mac OS Catalina 10.5.x doesn't support 386. Hence skip 386 test
-SKIP_386_TEST = false
-UNAME_S := $(shell uname -s)
-ifeq ($(UNAME_S),Darwin)
-	SW_VERS := $(shell sw_vers -productVersion)
-	ifeq ($(shell echo $(SW_VERS) | egrep '^(10.1[5-9]|1[1-9]|[2-9])'), $(SW_VERS))
-		SKIP_386_TEST = true
-	endif
-endif
-
-GOTEST_MIN = go test -timeout 30s
-GOTEST = $(GOTEST_MIN) -race
-GOTEST_WITH_COVERAGE = $(GOTEST) -coverprofile=coverage.out -covermode=atomic -coverpkg=./...
-
-.DEFAULT_GOAL := precommit
-
-.PHONY: precommit
-
-TOOLS_DIR := $(abspath ./.tools)
-
-$(TOOLS_DIR)/golangci-lint: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
-	cd $(TOOLS_MOD_DIR) && \
-	go build -o $(TOOLS_DIR)/golangci-lint github.com/golangci/golangci-lint/cmd/golangci-lint
-
-$(TOOLS_DIR)/misspell: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
-	cd $(TOOLS_MOD_DIR) && \
-	go build -o $(TOOLS_DIR)/misspell github.com/client9/misspell/cmd/misspell
-
-$(TOOLS_DIR)/stringer: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
-	cd $(TOOLS_MOD_DIR) && \
-	go build -o $(TOOLS_DIR)/stringer golang.org/x/tools/cmd/stringer
-
-$(TOOLS_DIR)/gojq: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
-	cd $(TOOLS_MOD_DIR) && \
-	go build -o $(TOOLS_DIR)/gojq github.com/itchyny/gojq/cmd/gojq
-
-precommit: dependabot-check license-check generate build lint examples test-benchmarks test
-
-.PHONY: test-with-coverage
-test-with-coverage:
-	set -e; \
-	printf "" > coverage.txt; \
-	for dir in $(ALL_COVERAGE_MOD_DIRS); do \
-	  echo "go test ./... + coverage in $${dir}"; \
-	  (cd "$${dir}" && \
-	 	$(GOTEST_WITH_COVERAGE) ./... && \
-		go tool cover -html=coverage.out -o coverage.html); \
-      [ -f "$${dir}/coverage.out" ] && cat "$${dir}/coverage.out" >> coverage.txt; \
-	done; \
-	sed -i.bak -e '2,$$ { /^mode: /d; }' coverage.txt
-
-
-.PHONY: ci
-ci: precommit check-clean-work-tree test-with-coverage test-386
-
-.PHONY: check-clean-work-tree
-check-clean-work-tree:
-	@if ! git diff --quiet; then \
-	  echo; \
-	  echo 'Working tree is not clean, did you forget to run "make precommit"?'; \
-	  echo; \
-	  git status; \
-	  exit 1; \
-	fi
-
-.PHONY: build
-build:
-	# TODO: Fix this on windows.
-	set -e; for dir in $(ALL_GO_MOD_DIRS); do \
-	  echo "compiling all packages in $${dir}"; \
-	  (cd "$${dir}" && \
-	    go build ./... && \
-	    go test -run xxxxxMatchNothingxxxxx ./... >/dev/null); \
-	done
-
-.PHONY: test
-test:
-	set -e; for dir in $(ALL_GO_MOD_DIRS); do \
-	  echo "go test ./... + race in $${dir}"; \
-	  (cd "$${dir}" && \
-	    $(GOTEST) ./...); \
-	done
-
-.PHONY: test-386
-test-386:
-	if [ $(SKIP_386_TEST) = true ] ; then \
-	  echo "skipping the test for GOARCH 386 as it is not supported on the current OS"; \
-	else \
-	  set -e; for dir in $(ALL_GO_MOD_DIRS); do \
-	    echo "go test ./... GOARCH 386 in $${dir}"; \
-	    (cd "$${dir}" && \
-	      GOARCH=386 $(GOTEST_MIN) ./...); \
-	  done; \
-	fi
-
-.PHONY: examples
-examples:
-	@set -e; for ex in $(EXAMPLES); do \
-	  echo "Building $${ex}"; \
-	  (cd "$${ex}" && \
-	   go build .); \
-	done
-
-.PHONY: test-benchmarks
-test-benchmarks:
-	@set -e; for dir in $(ALL_GO_MOD_DIRS); do \
-	  echo "test benchmarks in $${dir}"; \
-	  (cd "$${dir}" && go test -test.benchtime=1ms -run=NONE -bench=. ./...) > /dev/null; \
-	done
-
-.PHONY: lint
-lint: $(TOOLS_DIR)/golangci-lint $(TOOLS_DIR)/misspell
-	set -e; for dir in $(ALL_GO_MOD_DIRS); do \
-	  echo "golangci-lint in $${dir}"; \
-	  (cd "$${dir}" && \
-	    $(TOOLS_DIR)/golangci-lint run --fix && \
-	    $(TOOLS_DIR)/golangci-lint run); \
-	done
-	$(TOOLS_DIR)/misspell -w $(ALL_DOCS)
-	set -e; for dir in $(ALL_GO_MOD_DIRS) $(TOOLS_MOD_DIR); do \
-	  echo "go mod tidy in $${dir}"; \
-	  (cd "$${dir}" && \
-	    go mod tidy); \
-	done
-
-generate: $(TOOLS_DIR)/stringer
-	set -e; for dir in $(ALL_GO_MOD_DIRS); do \
-	  echo "running generators in $${dir}"; \
-	  (cd "$${dir}" && \
-	    PATH="$(TOOLS_DIR):$${PATH}" go generate ./...); \
-	done
-
-.PHONY: license-check
-license-check:
-	@licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path './vendor/*' ! -path './exporters/otlp/internal/opentelemetry-proto/*') ; do \
-	           awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \
-	   done); \
-	   if [ -n "$${licRes}" ]; then \
-	           echo "license header checking failed:"; echo "$${licRes}"; \
-	           exit 1; \
-	   fi
-
-.PHONY: dependabot-check
-dependabot-check:
-	@result=$$( \
-		for f in $$( find . -type f -name go.mod -exec dirname {} \; | sed 's/^.\/\?/\//' ); \
-			do grep -q "$$f" .github/dependabot.yml \
-			|| echo "$$f"; \
-		done; \
-	); \
-	if [ -n "$$result" ]; then \
-		echo "missing go.mod dependabot check:"; echo "$$result"; \
-		exit 1; \
-	fi
diff --git a/vendor/go.opentelemetry.io/otel/Makefile.proto b/vendor/go.opentelemetry.io/otel/Makefile.proto
deleted file mode 100644
index 417c3b3..0000000
--- a/vendor/go.opentelemetry.io/otel/Makefile.proto
+++ /dev/null
@@ -1,72 +0,0 @@
-#   -*- mode: makefile; -*-
-# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# This Makefile.proto has rules to generate *.pb.go files in
-# `exporters/otlp/internal/opentelemetry-proto-gen` from the .proto files in
-# `exporters/otlp/internal/opentelemetry-proto` using protoc with a go plugin.
-#
-# The protoc binary and other tools are sourced from a docker image
-# `PROTOC_IMAGE`.
-#
-# Prereqs: The archiving utility `pax` is installed.
-
-PROTOC_IMAGE          := namely/protoc-all:1.29_2
-PROTOBUF_VERSION      := v1
-OTEL_PROTO_SUBMODULE  := exporters/otlp/internal/opentelemetry-proto
-PROTOBUF_GEN_DIR      := exporters/otlp/internal/opentelemetry-proto-gen
-PROTOBUF_TEMP_DIR     := gen/pb-go
-PROTO_SOURCE_DIR      := gen/proto
-SUBMODULE_PROTO_FILES := $(wildcard $(OTEL_PROTO_SUBMODULE)/opentelemetry/proto/*/$(PROTOBUF_VERSION)/*.proto \
-                           $(OTEL_PROTO_SUBMODULE)/opentelemetry/proto/collector/*/$(PROTOBUF_VERSION)/*.proto)
-SOURCE_PROTO_FILES    := $(subst $(OTEL_PROTO_SUBMODULE),$(PROTO_SOURCE_DIR),$(SUBMODULE_PROTO_FILES))
-
-default: protobuf
-
-.PHONY: protobuf protobuf-source gen-protobuf copy-protobufs
-protobuf: protobuf-source gen-protobuf copy-protobufs
-
-protobuf-source: $(SOURCE_PROTO_FILES) | $(PROTO_SOURCE_DIR)/
-
-# Changes go_package in .proto file to point to repo-local location
-define exec-replace-pkgname
-sed  's,go_package = "github.com/open-telemetry/opentelemetry-proto/gen/go,go_package = "go.opentelemetry.io/otel/exporters/otlp/internal/opentelemetry-proto-gen,' < $(1) > $(2)
-
-endef
-
-# replace opentelemetry-proto package name by go.opentelemetry.io/otel specific version
-$(SOURCE_PROTO_FILES): $(PROTO_SOURCE_DIR)/%.proto: $(OTEL_PROTO_SUBMODULE)/%.proto
-	@mkdir -p $(@D)
-	$(call exec-replace-pkgname,$<,$@)
-
-# Command to run protoc using docker image
-define exec-protoc-all
-docker run -v `pwd`:/defs $(PROTOC_IMAGE) $(1)
-
-endef
-
-gen-protobuf: $(SOURCE_PROTO_FILES) | $(PROTOBUF_GEN_DIR)/
-	$(foreach file,$(subst ${PROTO_SOURCE_DIR}/,,$(SOURCE_PROTO_FILES)),$(call exec-protoc-all, -i $(PROTO_SOURCE_DIR) -f ${file} -l gogo -o ${PROTOBUF_TEMP_DIR}))
-
-# requires `pax` to be installed, as it has consistent options for both BSD (Darwin) and Linux
-copy-protobufs: | $(PROTOBUF_GEN_DIR)/
-	find ./$(PROTOBUF_TEMP_DIR)/go.opentelemetry.io/otel/$(PROTOBUF_GEN_DIR) -type f -print0 | \
-      pax -0 -s ',^./$(PROTOBUF_TEMP_DIR)/go.opentelemetry.io/otel/$(PROTOBUF_GEN_DIR),,' -rw ./$(PROTOBUF_GEN_DIR)
-
-$(PROTO_SOURCE_DIR)/ $(PROTOBUF_GEN_DIR)/:
-	mkdir -p $@
-
-.PHONY: clean
-clean:
-	rm -rf ./gen
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
deleted file mode 100644
index e45f05e..0000000
--- a/vendor/go.opentelemetry.io/otel/README.md
+++ /dev/null
@@ -1,57 +0,0 @@
-# OpenTelemetry-Go
-
-[![Circle CI](https://circleci.com/gh/open-telemetry/opentelemetry-go.svg?style=svg)](https://circleci.com/gh/open-telemetry/opentelemetry-go)
-[![Docs](https://godoc.org/go.opentelemetry.io/otel?status.svg)](https://pkg.go.dev/go.opentelemetry.io/otel)
-[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel)
-[![Gitter](https://badges.gitter.im/open-telemetry/opentelemetry-go.svg)](https://gitter.im/open-telemetry/opentelemetry-go?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
-
-The Go [OpenTelemetry](https://opentelemetry.io/) implementation.
-
-## Getting Started
-
-OpenTelemetry's goal is to provide a single set of APIs to capture distributed
-traces and metrics from your application and send them to an observability
-platform. This project allows you to do just that for applications written in
-Go. There are two steps to this process: instrument your application, and
-configure an exporter.
-
-### Instrumentation
-
-To start capturing distributed traces and metric events from your application
-it first needs to be instrumented. The easiest way to do this is by using an
-instrumentation library for your code. Be sure to check out [the officially
-supported instrumentation
-libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/master/instrumentation).
-
-If you need to extend the telemetry an instrumentation library provides or want
-to build your own instrumentation for your application directly you will need
-to use the
-[go.opentelemetry.io/otel/api](https://pkg.go.dev/go.opentelemetry.io/otel/api)
-package. The included [examples](./example/) are a good way to see some
-practical uses of this process.
-
-### Export
-
-Now that your application is instrumented to collect telemetry, it needs an
-export pipeline to send that telemetry to an observability platform.
-
-You can find officially supported exporters [here](./exporters/) and in the
-companion [contrib
-repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/master/exporters/metric).
-Additionally, there are many vendor specific or 3rd party exporters for
-OpenTelemetry. These exporters are broken down by
-[trace](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/export/trace?tab=importedby)
-and
-[metric](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/export/metric?tab=importedby)
-support.
-
-## Project Status
-
-[Project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
-and [milestones](https://github.com/open-telemetry/opentelemetry-go/milestones)
-can be found at the respective links. We try to keep these accurate and should
-be the best place to go for answers on project status.
-
-## Contributing
-
-See the [contributing documentation](CONTRIBUTING.md).
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
deleted file mode 100644
index 0fba7f1..0000000
--- a/vendor/go.opentelemetry.io/otel/RELEASING.md
+++ /dev/null
@@ -1,81 +0,0 @@
-# Release Process
-
-## Pre-Release
-
-Update go.mod for submodules to depend on the new release which will happen in the next step.
-
-1. Run the pre-release script. It creates a branch `pre_release_<new tag>` that will contain all release changes.
-
-    ```
-    ./pre_release.sh -t <new tag>
-    ```
-
-2. Verify the changes.
-
-    ```
-    git diff master
-    ```
-
-    This should have changed the version for all modules to be `<new tag>`.
-
-3. Update the [Changelog](./CHANGELOG.md).
-   - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand.
-       To verify this, you can look directly at the commits since the `<last tag>`.
-
-       ```
-       git --no-pager log --pretty=oneline "<last tag>..HEAD"
-       ```
-
-   - Move all the `Unreleased` changes into a new section following the title scheme (`[<new tag>] - <date of release>`).
-   - Update all the appropriate links at the bottom.
-
-4. Push the changes to upstream and create a Pull Request on GitHub.
-    Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description.
-
-
-## Tag
-
-Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit.
-
-***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step!
-Failure to do so will leave things in a broken state.
-
-***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189).
-It is critical you make sure the version you push upstream is correct.
-[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331).
-
-1. Run the tag.sh script using the `<commit-hash>` of the commit on the master branch for the merged Pull Request.
-
-    ```
-    ./tag.sh <new tag> <commit-hash>
-    ```
-
-2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`).
-    Make sure you push all sub-modules as well.
-
-    ```
-    git push upstream <new tag>
-    git push upstream <submodules-path/new tag>
-    ...
-    ```
-
-## Release
-
-Finally create a Release for the new `<new tag>` on GitHub.
-The release body should include all the release notes from the Changelog for this release.
-Additionally, the `tag.sh` script generates commit logs since last release which can be used to supplement the release notes.
-
-## Verify Examples
-
-After releasing verify that examples build outside of the repository.
-
-```
-./verify_examples.sh
-```
-
-The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them.
-This ensures they build with the published release, not the local copy.
-
-## Contrib Repository
-
-Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/master/RELEASING.md) that uses this release.
diff --git a/vendor/go.opentelemetry.io/otel/api/global/doc.go b/vendor/go.opentelemetry.io/otel/api/global/doc.go
deleted file mode 100644
index fce69e9..0000000
--- a/vendor/go.opentelemetry.io/otel/api/global/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package global provides global providers, propagators and more.
-package global // import "go.opentelemetry.io/otel/api/global"
diff --git a/vendor/go.opentelemetry.io/otel/api/global/handler.go b/vendor/go.opentelemetry.io/otel/api/global/handler.go
deleted file mode 100644
index 83f3e52..0000000
--- a/vendor/go.opentelemetry.io/otel/api/global/handler.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package global
-
-import (
-	"log"
-	"os"
-	"sync"
-	"sync/atomic"
-
-	"go.opentelemetry.io/otel"
-)
-
-var (
-	// globalErrorHandler provides an ErrorHandler that can be used
-	// throughout an OpenTelemetry instrumented project. When a user
-	// specified ErrorHandler is registered (`SetErrorHandler`) all calls to
-	// `Handle` and will be delegated to the registered ErrorHandler.
-	globalErrorHandler = &loggingErrorHandler{
-		l: log.New(os.Stderr, "", log.LstdFlags),
-	}
-
-	// delegateErrorHandlerOnce ensures that a user provided ErrorHandler is
-	// only ever registered once.
-	delegateErrorHandlerOnce sync.Once
-
-	// Comiple time check that loggingErrorHandler implements ErrorHandler.
-	_ otel.ErrorHandler = (*loggingErrorHandler)(nil)
-)
-
-// loggingErrorHandler logs all errors to STDERR.
-type loggingErrorHandler struct {
-	delegate atomic.Value
-
-	l *log.Logger
-}
-
-// setDelegate sets the ErrorHandler delegate if one is not already set.
-func (h *loggingErrorHandler) setDelegate(d otel.ErrorHandler) {
-	if h.delegate.Load() != nil {
-		// Delegate already registered
-		return
-	}
-	h.delegate.Store(d)
-}
-
-// Handle implements otel.ErrorHandler.
-func (h *loggingErrorHandler) Handle(err error) {
-	if d := h.delegate.Load(); d != nil {
-		d.(otel.ErrorHandler).Handle(err)
-		return
-	}
-	h.l.Print(err)
-}
-
-// ErrorHandler returns the global ErrorHandler instance. If no ErrorHandler
-// instance has been set (`SetErrorHandler`), the default ErrorHandler which
-// logs errors to STDERR is returned.
-func ErrorHandler() otel.ErrorHandler {
-	return globalErrorHandler
-}
-
-// SetErrorHandler sets the global ErrorHandler to be h.
-func SetErrorHandler(h otel.ErrorHandler) {
-	delegateErrorHandlerOnce.Do(func() {
-		current := ErrorHandler()
-		if current == h {
-			return
-		}
-		if internalHandler, ok := current.(*loggingErrorHandler); ok {
-			internalHandler.setDelegate(h)
-		}
-	})
-}
-
-// Handle is a convience function for ErrorHandler().Handle(err)
-func Handle(err error) {
-	ErrorHandler().Handle(err)
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/global/internal/meter.go b/vendor/go.opentelemetry.io/otel/api/global/internal/meter.go
deleted file mode 100644
index b45f4c9..0000000
--- a/vendor/go.opentelemetry.io/otel/api/global/internal/meter.go
+++ /dev/null
@@ -1,347 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
-	"context"
-	"sync"
-	"sync/atomic"
-	"unsafe"
-
-	"go.opentelemetry.io/otel/api/metric"
-	"go.opentelemetry.io/otel/api/metric/registry"
-	"go.opentelemetry.io/otel/label"
-)
-
-// This file contains the forwarding implementation of MeterProvider used as
-// the default global instance.  Metric events using instruments provided by
-// this implementation are no-ops until the first Meter implementation is set
-// as the global provider.
-//
-// The implementation here uses Mutexes to maintain a list of active Meters in
-// the MeterProvider and Instruments in each Meter, under the assumption that
-// these interfaces are not performance-critical.
-//
-// We have the invariant that setDelegate() will be called before a new
-// MeterProvider implementation is registered as the global provider.  Mutexes
-// in the MeterProvider and Meters ensure that each instrument has a delegate
-// before the global provider is set.
-//
-// Bound instrument operations are implemented by delegating to the
-// instrument after it is registered, with a sync.Once initializer to
-// protect against races with Release().
-//
-// Metric uniqueness checking is implemented by calling the exported
-// methods of the api/metric/registry package.
-
-type meterKey struct {
-	Name, Version string
-}
-
-type meterProvider struct {
-	delegate metric.MeterProvider
-
-	// lock protects `delegate` and `meters`.
-	lock sync.Mutex
-
-	// meters maintains a unique entry for every named Meter
-	// that has been registered through the global instance.
-	meters map[meterKey]*meterEntry
-}
-
-type meterImpl struct {
-	delegate unsafe.Pointer // (*metric.MeterImpl)
-
-	lock       sync.Mutex
-	syncInsts  []*syncImpl
-	asyncInsts []*asyncImpl
-}
-
-type meterEntry struct {
-	unique metric.MeterImpl
-	impl   meterImpl
-}
-
-type instrument struct {
-	descriptor metric.Descriptor
-}
-
-type syncImpl struct {
-	delegate unsafe.Pointer // (*metric.SyncImpl)
-
-	instrument
-}
-
-type asyncImpl struct {
-	delegate unsafe.Pointer // (*metric.AsyncImpl)
-
-	instrument
-
-	runner metric.AsyncRunner
-}
-
-// SyncImpler is implemented by all of the sync metric
-// instruments.
-type SyncImpler interface {
-	SyncImpl() metric.SyncImpl
-}
-
-// AsyncImpler is implemented by all of the async
-// metric instruments.
-type AsyncImpler interface {
-	AsyncImpl() metric.AsyncImpl
-}
-
-type syncHandle struct {
-	delegate unsafe.Pointer // (*metric.HandleImpl)
-
-	inst   *syncImpl
-	labels []label.KeyValue
-
-	initialize sync.Once
-}
-
-var _ metric.MeterProvider = &meterProvider{}
-var _ metric.MeterImpl = &meterImpl{}
-var _ metric.InstrumentImpl = &syncImpl{}
-var _ metric.BoundSyncImpl = &syncHandle{}
-var _ metric.AsyncImpl = &asyncImpl{}
-
-func (inst *instrument) Descriptor() metric.Descriptor {
-	return inst.descriptor
-}
-
-// MeterProvider interface and delegation
-
-func newMeterProvider() *meterProvider {
-	return &meterProvider{
-		meters: map[meterKey]*meterEntry{},
-	}
-}
-
-func (p *meterProvider) setDelegate(provider metric.MeterProvider) {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	p.delegate = provider
-	for key, entry := range p.meters {
-		entry.impl.setDelegate(key.Name, key.Version, provider)
-	}
-	p.meters = nil
-}
-
-func (p *meterProvider) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
-	p.lock.Lock()
-	defer p.lock.Unlock()
-
-	if p.delegate != nil {
-		return p.delegate.Meter(instrumentationName, opts...)
-	}
-
-	key := meterKey{
-		Name:    instrumentationName,
-		Version: metric.NewMeterConfig(opts...).InstrumentationVersion,
-	}
-	entry, ok := p.meters[key]
-	if !ok {
-		entry = &meterEntry{}
-		entry.unique = registry.NewUniqueInstrumentMeterImpl(&entry.impl)
-		p.meters[key] = entry
-
-	}
-	return metric.WrapMeterImpl(entry.unique, key.Name, metric.WithInstrumentationVersion(key.Version))
-}
-
-// Meter interface and delegation
-
-func (m *meterImpl) setDelegate(name, version string, provider metric.MeterProvider) {
-	m.lock.Lock()
-	defer m.lock.Unlock()
-
-	d := new(metric.MeterImpl)
-	*d = provider.Meter(name, metric.WithInstrumentationVersion(version)).MeterImpl()
-	m.delegate = unsafe.Pointer(d)
-
-	for _, inst := range m.syncInsts {
-		inst.setDelegate(*d)
-	}
-	m.syncInsts = nil
-	for _, obs := range m.asyncInsts {
-		obs.setDelegate(*d)
-	}
-	m.asyncInsts = nil
-}
-
-func (m *meterImpl) NewSyncInstrument(desc metric.Descriptor) (metric.SyncImpl, error) {
-	m.lock.Lock()
-	defer m.lock.Unlock()
-
-	if meterPtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil {
-		return (*meterPtr).NewSyncInstrument(desc)
-	}
-
-	inst := &syncImpl{
-		instrument: instrument{
-			descriptor: desc,
-		},
-	}
-	m.syncInsts = append(m.syncInsts, inst)
-	return inst, nil
-}
-
-// Synchronous delegation
-
-func (inst *syncImpl) setDelegate(d metric.MeterImpl) {
-	implPtr := new(metric.SyncImpl)
-
-	var err error
-	*implPtr, err = d.NewSyncInstrument(inst.descriptor)
-
-	if err != nil {
-		// TODO: There is no standard way to deliver this error to the user.
-		// See https://github.com/open-telemetry/opentelemetry-go/issues/514
-		// Note that the default SDK will not generate any errors yet, this is
-		// only for added safety.
-		panic(err)
-	}
-
-	atomic.StorePointer(&inst.delegate, unsafe.Pointer(implPtr))
-}
-
-func (inst *syncImpl) Implementation() interface{} {
-	if implPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil {
-		return (*implPtr).Implementation()
-	}
-	return inst
-}
-
-func (inst *syncImpl) Bind(labels []label.KeyValue) metric.BoundSyncImpl {
-	if implPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil {
-		return (*implPtr).Bind(labels)
-	}
-	return &syncHandle{
-		inst:   inst,
-		labels: labels,
-	}
-}
-
-func (bound *syncHandle) Unbind() {
-	bound.initialize.Do(func() {})
-
-	implPtr := (*metric.BoundSyncImpl)(atomic.LoadPointer(&bound.delegate))
-
-	if implPtr == nil {
-		return
-	}
-
-	(*implPtr).Unbind()
-}
-
-// Async delegation
-
-func (m *meterImpl) NewAsyncInstrument(
-	desc metric.Descriptor,
-	runner metric.AsyncRunner,
-) (metric.AsyncImpl, error) {
-
-	m.lock.Lock()
-	defer m.lock.Unlock()
-
-	if meterPtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil {
-		return (*meterPtr).NewAsyncInstrument(desc, runner)
-	}
-
-	inst := &asyncImpl{
-		instrument: instrument{
-			descriptor: desc,
-		},
-		runner: runner,
-	}
-	m.asyncInsts = append(m.asyncInsts, inst)
-	return inst, nil
-}
-
-func (obs *asyncImpl) Implementation() interface{} {
-	if implPtr := (*metric.AsyncImpl)(atomic.LoadPointer(&obs.delegate)); implPtr != nil {
-		return (*implPtr).Implementation()
-	}
-	return obs
-}
-
-func (obs *asyncImpl) setDelegate(d metric.MeterImpl) {
-	implPtr := new(metric.AsyncImpl)
-
-	var err error
-	*implPtr, err = d.NewAsyncInstrument(obs.descriptor, obs.runner)
-
-	if err != nil {
-		// TODO: There is no standard way to deliver this error to the user.
-		// See https://github.com/open-telemetry/opentelemetry-go/issues/514
-		// Note that the default SDK will not generate any errors yet, this is
-		// only for added safety.
-		panic(err)
-	}
-
-	atomic.StorePointer(&obs.delegate, unsafe.Pointer(implPtr))
-}
-
-// Metric updates
-
-func (m *meterImpl) RecordBatch(ctx context.Context, labels []label.KeyValue, measurements ...metric.Measurement) {
-	if delegatePtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); delegatePtr != nil {
-		(*delegatePtr).RecordBatch(ctx, labels, measurements...)
-	}
-}
-
-func (inst *syncImpl) RecordOne(ctx context.Context, number metric.Number, labels []label.KeyValue) {
-	if instPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); instPtr != nil {
-		(*instPtr).RecordOne(ctx, number, labels)
-	}
-}
-
-// Bound instrument initialization
-
-func (bound *syncHandle) RecordOne(ctx context.Context, number metric.Number) {
-	instPtr := (*metric.SyncImpl)(atomic.LoadPointer(&bound.inst.delegate))
-	if instPtr == nil {
-		return
-	}
-	var implPtr *metric.BoundSyncImpl
-	bound.initialize.Do(func() {
-		implPtr = new(metric.BoundSyncImpl)
-		*implPtr = (*instPtr).Bind(bound.labels)
-		atomic.StorePointer(&bound.delegate, unsafe.Pointer(implPtr))
-	})
-	if implPtr == nil {
-		implPtr = (*metric.BoundSyncImpl)(atomic.LoadPointer(&bound.delegate))
-	}
-	// This may still be nil if instrument was created and bound
-	// without a delegate, then the instrument was set to have a
-	// delegate and unbound.
-	if implPtr == nil {
-		return
-	}
-	(*implPtr).RecordOne(ctx, number)
-}
-
-func AtomicFieldOffsets() map[string]uintptr {
-	return map[string]uintptr{
-		"meterProvider.delegate": unsafe.Offsetof(meterProvider{}.delegate),
-		"meterImpl.delegate":     unsafe.Offsetof(meterImpl{}.delegate),
-		"syncImpl.delegate":      unsafe.Offsetof(syncImpl{}.delegate),
-		"asyncImpl.delegate":     unsafe.Offsetof(asyncImpl{}.delegate),
-		"syncHandle.delegate":    unsafe.Offsetof(syncHandle{}.delegate),
-	}
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/global/internal/state.go b/vendor/go.opentelemetry.io/otel/api/global/internal/state.go
deleted file mode 100644
index ecdfd75..0000000
--- a/vendor/go.opentelemetry.io/otel/api/global/internal/state.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
-	"sync"
-	"sync/atomic"
-
-	"go.opentelemetry.io/otel"
-	"go.opentelemetry.io/otel/api/metric"
-	"go.opentelemetry.io/otel/api/trace"
-)
-
-type (
-	tracerProviderHolder struct {
-		tp trace.TracerProvider
-	}
-
-	meterProviderHolder struct {
-		mp metric.MeterProvider
-	}
-
-	propagatorsHolder struct {
-		tm otel.TextMapPropagator
-	}
-)
-
-var (
-	globalTracer      = defaultTracerValue()
-	globalMeter       = defaultMeterValue()
-	globalPropagators = defaultPropagatorsValue()
-
-	delegateMeterOnce sync.Once
-	delegateTraceOnce sync.Once
-)
-
-// TracerProvider is the internal implementation for global.TracerProvider.
-func TracerProvider() trace.TracerProvider {
-	return globalTracer.Load().(tracerProviderHolder).tp
-}
-
-// SetTracerProvider is the internal implementation for global.SetTracerProvider.
-func SetTracerProvider(tp trace.TracerProvider) {
-	delegateTraceOnce.Do(func() {
-		current := TracerProvider()
-		if current == tp {
-			// Setting the provider to the prior default is nonsense, panic.
-			// Panic is acceptable because we are likely still early in the
-			// process lifetime.
-			panic("invalid TracerProvider, the global instance cannot be reinstalled")
-		} else if def, ok := current.(*tracerProvider); ok {
-			def.setDelegate(tp)
-		}
-
-	})
-	globalTracer.Store(tracerProviderHolder{tp: tp})
-}
-
-// MeterProvider is the internal implementation for global.MeterProvider.
-func MeterProvider() metric.MeterProvider {
-	return globalMeter.Load().(meterProviderHolder).mp
-}
-
-// SetMeterProvider is the internal implementation for global.SetMeterProvider.
-func SetMeterProvider(mp metric.MeterProvider) {
-	delegateMeterOnce.Do(func() {
-		current := MeterProvider()
-
-		if current == mp {
-			// Setting the provider to the prior default is nonsense, panic.
-			// Panic is acceptable because we are likely still early in the
-			// process lifetime.
-			panic("invalid MeterProvider, the global instance cannot be reinstalled")
-		} else if def, ok := current.(*meterProvider); ok {
-			def.setDelegate(mp)
-		}
-	})
-	globalMeter.Store(meterProviderHolder{mp: mp})
-}
-
-// TextMapPropagator is the internal implementation for global.TextMapPropagator.
-func TextMapPropagator() otel.TextMapPropagator {
-	return globalPropagators.Load().(propagatorsHolder).tm
-}
-
-// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator.
-func SetTextMapPropagator(p otel.TextMapPropagator) {
-	globalPropagators.Store(propagatorsHolder{tm: p})
-}
-
-func defaultTracerValue() *atomic.Value {
-	v := &atomic.Value{}
-	v.Store(tracerProviderHolder{tp: &tracerProvider{}})
-	return v
-}
-
-func defaultMeterValue() *atomic.Value {
-	v := &atomic.Value{}
-	v.Store(meterProviderHolder{mp: newMeterProvider()})
-	return v
-}
-
-func defaultPropagatorsValue() *atomic.Value {
-	v := &atomic.Value{}
-	v.Store(propagatorsHolder{tm: getDefaultTextMapPropagator()})
-	return v
-}
-
-// getDefaultTextMapPropagator returns the default TextMapPropagator,
-// configured with W3C trace and baggage propagation.
-func getDefaultTextMapPropagator() otel.TextMapPropagator {
-	return otel.NewCompositeTextMapPropagator()
-}
-
-// ResetForTest restores the initial global state, for testing purposes.
-func ResetForTest() {
-	globalTracer = defaultTracerValue()
-	globalMeter = defaultMeterValue()
-	globalPropagators = defaultPropagatorsValue()
-	delegateMeterOnce = sync.Once{}
-	delegateTraceOnce = sync.Once{}
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/global/internal/trace.go b/vendor/go.opentelemetry.io/otel/api/global/internal/trace.go
deleted file mode 100644
index bcd1461..0000000
--- a/vendor/go.opentelemetry.io/otel/api/global/internal/trace.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-/*
-This file contains the forwarding implementation of the TracerProvider used as
-the default global instance. Prior to initialization of an SDK, Tracers
-returned by the global TracerProvider will provide no-op functionality. This
-means that all Span created prior to initialization are no-op Spans.
-
-Once an SDK has been initialized, all provided no-op Tracers are swapped for
-Tracers provided by the SDK defined TracerProvider. However, any Span started
-prior to this initialization does not change its behavior. Meaning, the Span
-remains a no-op Span.
-
-The implementation to track and swap Tracers locks all new Tracer creation
-until the swap is complete. This assumes that this operation is not
-performance-critical. If that assumption is incorrect, be sure to configure an
-SDK prior to any Tracer creation.
-*/
-
-import (
-	"context"
-	"sync"
-
-	"go.opentelemetry.io/otel/api/trace"
-	"go.opentelemetry.io/otel/internal/trace/noop"
-)
-
-// tracerProvider is a placeholder for a configured SDK TracerProvider.
-//
-// All TracerProvider functionality is forwarded to a delegate once
-// configured.
-type tracerProvider struct {
-	mtx     sync.Mutex
-	tracers []*tracer
-
-	delegate trace.TracerProvider
-}
-
-// Compile-time guarantee that tracerProvider implements the TracerProvider
-// interface.
-var _ trace.TracerProvider = &tracerProvider{}
-
-// setDelegate configures p to delegate all TracerProvider functionality to
-// provider.
-//
-// All Tracers provided prior to this function call are switched out to be
-// Tracers provided by provider.
-//
-// Delegation only happens on the first call to this method. All subsequent
-// calls result in no delegation changes.
-func (p *tracerProvider) setDelegate(provider trace.TracerProvider) {
-	if p.delegate != nil {
-		return
-	}
-
-	p.mtx.Lock()
-	defer p.mtx.Unlock()
-
-	p.delegate = provider
-	for _, t := range p.tracers {
-		t.setDelegate(provider)
-	}
-
-	p.tracers = nil
-}
-
-// Tracer implements TracerProvider.
-func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
-	p.mtx.Lock()
-	defer p.mtx.Unlock()
-
-	if p.delegate != nil {
-		return p.delegate.Tracer(name)
-	}
-
-	t := &tracer{name: name, opts: opts}
-	p.tracers = append(p.tracers, t)
-	return t
-}
-
-// tracer is a placeholder for a trace.Tracer.
-//
-// All Tracer functionality is forwarded to a delegate once configured.
-// Otherwise, all functionality is forwarded to a NoopTracer.
-type tracer struct {
-	once sync.Once
-	name string
-	opts []trace.TracerOption
-
-	delegate trace.Tracer
-}
-
-// Compile-time guarantee that tracer implements the trace.Tracer interface.
-var _ trace.Tracer = &tracer{}
-
-// setDelegate configures t to delegate all Tracer functionality to Tracers
-// created by provider.
-//
-// All subsequent calls to the Tracer methods will be passed to the delegate.
-//
-// Delegation only happens on the first call to this method. All subsequent
-// calls result in no delegation changes.
-func (t *tracer) setDelegate(provider trace.TracerProvider) {
-	t.once.Do(func() { t.delegate = provider.Tracer(t.name, t.opts...) })
-}
-
-// Start implements trace.Tracer by forwarding the call to t.delegate if
-// set, otherwise it forwards the call to a NoopTracer.
-func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanOption) (context.Context, trace.Span) {
-	if t.delegate != nil {
-		return t.delegate.Start(ctx, name, opts...)
-	}
-	return noop.Tracer.Start(ctx, name, opts...)
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/global/metric.go b/vendor/go.opentelemetry.io/otel/api/global/metric.go
deleted file mode 100644
index f1695bb..0000000
--- a/vendor/go.opentelemetry.io/otel/api/global/metric.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package global
-
-import (
-	"go.opentelemetry.io/otel/api/global/internal"
-	"go.opentelemetry.io/otel/api/metric"
-)
-
-// Meter creates an implementation of the Meter interface from the global
-// MeterProvider. The instrumentationName must be the name of the library
-// providing instrumentation. This name may be the same as the instrumented
-// code only if that code provides built-in instrumentation. If the
-// instrumentationName is empty, then a implementation defined default name
-// will be used instead.
-//
-// This is short for MeterProvider().Meter(name)
-func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
-	return MeterProvider().Meter(instrumentationName, opts...)
-}
-
-// MeterProvider returns the registered global meter provider.  If
-// none is registered then a default meter provider is returned that
-// forwards the Meter interface to the first registered Meter.
-//
-// Use the meter provider to create a named meter. E.g.
-//     meter := global.MeterProvider().Meter("example.com/foo")
-// or
-//     meter := global.Meter("example.com/foo")
-func MeterProvider() metric.MeterProvider {
-	return internal.MeterProvider()
-}
-
-// SetMeterProvider registers `mp` as the global meter provider.
-func SetMeterProvider(mp metric.MeterProvider) {
-	internal.SetMeterProvider(mp)
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/global/propagation.go b/vendor/go.opentelemetry.io/otel/api/global/propagation.go
deleted file mode 100644
index c5f4c2b..0000000
--- a/vendor/go.opentelemetry.io/otel/api/global/propagation.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package global
-
-import (
-	"go.opentelemetry.io/otel"
-	"go.opentelemetry.io/otel/api/global/internal"
-)
-
-// TextMapPropagator returns the global TextMapPropagator. If none has been
-// set, a No-Op TextMapPropagator is returned.
-func TextMapPropagator() otel.TextMapPropagator {
-	return internal.TextMapPropagator()
-}
-
-// SetTextMapPropagator sets propagator as the global TSetTextMapPropagator.
-func SetTextMapPropagator(propagator otel.TextMapPropagator) {
-	internal.SetTextMapPropagator(propagator)
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/global/trace.go b/vendor/go.opentelemetry.io/otel/api/global/trace.go
deleted file mode 100644
index 49a7543..0000000
--- a/vendor/go.opentelemetry.io/otel/api/global/trace.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package global
-
-import (
-	"go.opentelemetry.io/otel/api/global/internal"
-	"go.opentelemetry.io/otel/api/trace"
-)
-
-// Tracer creates a named tracer that implements Tracer interface.
-// If the name is an empty string then provider uses default name.
-//
-// This is short for TracerProvider().Tracer(name)
-func Tracer(name string) trace.Tracer {
-	return TracerProvider().Tracer(name)
-}
-
-// TracerProvider returns the registered global trace provider.
-// If none is registered then an instance of NoopTracerProvider is returned.
-//
-// Use the trace provider to create a named tracer. E.g.
-//     tracer := global.TracerProvider().Tracer("example.com/foo")
-// or
-//     tracer := global.Tracer("example.com/foo")
-func TracerProvider() trace.TracerProvider {
-	return internal.TracerProvider()
-}
-
-// SetTracerProvider registers `tp` as the global trace provider.
-func SetTracerProvider(tp trace.TracerProvider) {
-	internal.SetTracerProvider(tp)
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/async.go b/vendor/go.opentelemetry.io/otel/api/metric/async.go
deleted file mode 100644
index d0d488d..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/async.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import (
-	"context"
-
-	"go.opentelemetry.io/otel/label"
-)
-
-// The file is organized as follows:
-//
-//  - Observation type
-//  - Three kinds of Observer callback (int64, float64, batch)
-//  - Three kinds of Observer result (int64, float64, batch)
-//  - Three kinds of Observe() function (int64, float64, batch)
-//  - Three kinds of AsyncRunner interface (abstract, single, batch)
-//  - Two kinds of Observer constructor (int64, float64)
-//  - Two kinds of Observation() function (int64, float64)
-//  - Various internals
-
-// Observation is used for reporting an asynchronous  batch of metric
-// values. Instances of this type should be created by asynchronous
-// instruments (e.g., Int64ValueObserver.Observation()).
-type Observation struct {
-	// number needs to be aligned for 64-bit atomic operations.
-	number     Number
-	instrument AsyncImpl
-}
-
-// Int64ObserverFunc is a type of callback that integral
-// observers run.
-type Int64ObserverFunc func(context.Context, Int64ObserverResult)
-
-// Float64ObserverFunc is a type of callback that floating point
-// observers run.
-type Float64ObserverFunc func(context.Context, Float64ObserverResult)
-
-// BatchObserverFunc is a callback argument for use with any
-// Observer instrument that will be reported as a batch of
-// observations.
-type BatchObserverFunc func(context.Context, BatchObserverResult)
-
-// Int64ObserverResult is passed to an observer callback to capture
-// observations for one asynchronous integer metric instrument.
-type Int64ObserverResult struct {
-	instrument AsyncImpl
-	function   func([]label.KeyValue, ...Observation)
-}
-
-// Float64ObserverResult is passed to an observer callback to capture
-// observations for one asynchronous floating point metric instrument.
-type Float64ObserverResult struct {
-	instrument AsyncImpl
-	function   func([]label.KeyValue, ...Observation)
-}
-
-// BatchObserverResult is passed to a batch observer callback to
-// capture observations for multiple asynchronous instruments.
-type BatchObserverResult struct {
-	function func([]label.KeyValue, ...Observation)
-}
-
-// Observe captures a single integer value from the associated
-// instrument callback, with the given labels.
-func (ir Int64ObserverResult) Observe(value int64, labels ...label.KeyValue) {
-	ir.function(labels, Observation{
-		instrument: ir.instrument,
-		number:     NewInt64Number(value),
-	})
-}
-
-// Observe captures a single floating point value from the associated
-// instrument callback, with the given labels.
-func (fr Float64ObserverResult) Observe(value float64, labels ...label.KeyValue) {
-	fr.function(labels, Observation{
-		instrument: fr.instrument,
-		number:     NewFloat64Number(value),
-	})
-}
-
-// Observe captures a multiple observations from the associated batch
-// instrument callback, with the given labels.
-func (br BatchObserverResult) Observe(labels []label.KeyValue, obs ...Observation) {
-	br.function(labels, obs...)
-}
-
-// AsyncRunner is expected to convert into an AsyncSingleRunner or an
-// AsyncBatchRunner.  SDKs will encounter an error if the AsyncRunner
-// does not satisfy one of these interfaces.
-type AsyncRunner interface {
-	// AnyRunner() is a non-exported method with no functional use
-	// other than to make this a non-empty interface.
-	AnyRunner()
-}
-
-// AsyncSingleRunner is an interface implemented by single-observer
-// callbacks.
-type AsyncSingleRunner interface {
-	// Run accepts a single instrument and function for capturing
-	// observations of that instrument.  Each call to the function
-	// receives one captured observation.  (The function accepts
-	// multiple observations so the same implementation can be
-	// used for batch runners.)
-	Run(ctx context.Context, single AsyncImpl, capture func([]label.KeyValue, ...Observation))
-
-	AsyncRunner
-}
-
-// AsyncBatchRunner is an interface implemented by batch-observer
-// callbacks.
-type AsyncBatchRunner interface {
-	// Run accepts a function for capturing observations of
-	// multiple instruments.
-	Run(ctx context.Context, capture func([]label.KeyValue, ...Observation))
-
-	AsyncRunner
-}
-
-var _ AsyncSingleRunner = (*Int64ObserverFunc)(nil)
-var _ AsyncSingleRunner = (*Float64ObserverFunc)(nil)
-var _ AsyncBatchRunner = (*BatchObserverFunc)(nil)
-
-// newInt64AsyncRunner returns a single-observer callback for integer Observer instruments.
-func newInt64AsyncRunner(c Int64ObserverFunc) AsyncSingleRunner {
-	return &c
-}
-
-// newFloat64AsyncRunner returns a single-observer callback for floating point Observer instruments.
-func newFloat64AsyncRunner(c Float64ObserverFunc) AsyncSingleRunner {
-	return &c
-}
-
-// newBatchAsyncRunner returns a batch-observer callback use with multiple Observer instruments.
-func newBatchAsyncRunner(c BatchObserverFunc) AsyncBatchRunner {
-	return &c
-}
-
-// AnyRunner implements AsyncRunner.
-func (*Int64ObserverFunc) AnyRunner() {}
-
-// AnyRunner implements AsyncRunner.
-func (*Float64ObserverFunc) AnyRunner() {}
-
-// AnyRunner implements AsyncRunner.
-func (*BatchObserverFunc) AnyRunner() {}
-
-// Run implements AsyncSingleRunner.
-func (i *Int64ObserverFunc) Run(ctx context.Context, impl AsyncImpl, function func([]label.KeyValue, ...Observation)) {
-	(*i)(ctx, Int64ObserverResult{
-		instrument: impl,
-		function:   function,
-	})
-}
-
-// Run implements AsyncSingleRunner.
-func (f *Float64ObserverFunc) Run(ctx context.Context, impl AsyncImpl, function func([]label.KeyValue, ...Observation)) {
-	(*f)(ctx, Float64ObserverResult{
-		instrument: impl,
-		function:   function,
-	})
-}
-
-// Run implements AsyncBatchRunner.
-func (b *BatchObserverFunc) Run(ctx context.Context, function func([]label.KeyValue, ...Observation)) {
-	(*b)(ctx, BatchObserverResult{
-		function: function,
-	})
-}
-
-// wrapInt64ValueObserverInstrument converts an AsyncImpl into Int64ValueObserver.
-func wrapInt64ValueObserverInstrument(asyncInst AsyncImpl, err error) (Int64ValueObserver, error) {
-	common, err := checkNewAsync(asyncInst, err)
-	return Int64ValueObserver{asyncInstrument: common}, err
-}
-
-// wrapFloat64ValueObserverInstrument converts an AsyncImpl into Float64ValueObserver.
-func wrapFloat64ValueObserverInstrument(asyncInst AsyncImpl, err error) (Float64ValueObserver, error) {
-	common, err := checkNewAsync(asyncInst, err)
-	return Float64ValueObserver{asyncInstrument: common}, err
-}
-
-// wrapInt64SumObserverInstrument converts an AsyncImpl into Int64SumObserver.
-func wrapInt64SumObserverInstrument(asyncInst AsyncImpl, err error) (Int64SumObserver, error) {
-	common, err := checkNewAsync(asyncInst, err)
-	return Int64SumObserver{asyncInstrument: common}, err
-}
-
-// wrapFloat64SumObserverInstrument converts an AsyncImpl into Float64SumObserver.
-func wrapFloat64SumObserverInstrument(asyncInst AsyncImpl, err error) (Float64SumObserver, error) {
-	common, err := checkNewAsync(asyncInst, err)
-	return Float64SumObserver{asyncInstrument: common}, err
-}
-
-// wrapInt64UpDownSumObserverInstrument converts an AsyncImpl into Int64UpDownSumObserver.
-func wrapInt64UpDownSumObserverInstrument(asyncInst AsyncImpl, err error) (Int64UpDownSumObserver, error) {
-	common, err := checkNewAsync(asyncInst, err)
-	return Int64UpDownSumObserver{asyncInstrument: common}, err
-}
-
-// wrapFloat64UpDownSumObserverInstrument converts an AsyncImpl into Float64UpDownSumObserver.
-func wrapFloat64UpDownSumObserverInstrument(asyncInst AsyncImpl, err error) (Float64UpDownSumObserver, error) {
-	common, err := checkNewAsync(asyncInst, err)
-	return Float64UpDownSumObserver{asyncInstrument: common}, err
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/config.go b/vendor/go.opentelemetry.io/otel/api/metric/config.go
deleted file mode 100644
index 3cd8fe8..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/config.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import "go.opentelemetry.io/otel/unit"
-
-// InstrumentConfig contains options for instrument descriptors.
-type InstrumentConfig struct {
-	// Description describes the instrument in human-readable terms.
-	Description string
-	// Unit describes the measurement unit for a instrument.
-	Unit unit.Unit
-	// InstrumentationName is the name of the library providing
-	// instrumentation.
-	InstrumentationName string
-	// InstrumentationVersion is the version of the library providing
-	// instrumentation.
-	InstrumentationVersion string
-}
-
-// InstrumentOption is an interface for applying instrument options.
-type InstrumentOption interface {
-	// ApplyMeter is used to set a InstrumentOption value of a
-	// InstrumentConfig.
-	ApplyInstrument(*InstrumentConfig)
-}
-
-// NewInstrumentConfig creates a new InstrumentConfig
-// and applies all the given options.
-func NewInstrumentConfig(opts ...InstrumentOption) InstrumentConfig {
-	var config InstrumentConfig
-	for _, o := range opts {
-		o.ApplyInstrument(&config)
-	}
-	return config
-}
-
-// WithDescription applies provided description.
-func WithDescription(desc string) InstrumentOption {
-	return descriptionOption(desc)
-}
-
-type descriptionOption string
-
-func (d descriptionOption) ApplyInstrument(config *InstrumentConfig) {
-	config.Description = string(d)
-}
-
-// WithUnit applies provided unit.
-func WithUnit(unit unit.Unit) InstrumentOption {
-	return unitOption(unit)
-}
-
-type unitOption unit.Unit
-
-func (u unitOption) ApplyInstrument(config *InstrumentConfig) {
-	config.Unit = unit.Unit(u)
-}
-
-// WithInstrumentationName sets the instrumentation name.
-func WithInstrumentationName(name string) InstrumentOption {
-	return instrumentationNameOption(name)
-}
-
-type instrumentationNameOption string
-
-func (i instrumentationNameOption) ApplyInstrument(config *InstrumentConfig) {
-	config.InstrumentationName = string(i)
-}
-
-// MeterConfig contains options for Meters.
-type MeterConfig struct {
-	// InstrumentationVersion is the version of the library providing
-	// instrumentation.
-	InstrumentationVersion string
-}
-
-// MeterOption is an interface for applying Meter options.
-type MeterOption interface {
-	// ApplyMeter is used to set a MeterOption value of a MeterConfig.
-	ApplyMeter(*MeterConfig)
-}
-
-// NewMeterConfig creates a new MeterConfig and applies
-// all the given options.
-func NewMeterConfig(opts ...MeterOption) MeterConfig {
-	var config MeterConfig
-	for _, o := range opts {
-		o.ApplyMeter(&config)
-	}
-	return config
-}
-
-// Option is an interface for applying Instrument or Meter options.
-type Option interface {
-	InstrumentOption
-	MeterOption
-}
-
-// WithInstrumentationVersion sets the instrumentation version.
-func WithInstrumentationVersion(version string) Option {
-	return instrumentationVersionOption(version)
-}
-
-type instrumentationVersionOption string
-
-func (i instrumentationVersionOption) ApplyMeter(config *MeterConfig) {
-	config.InstrumentationVersion = string(i)
-}
-
-func (i instrumentationVersionOption) ApplyInstrument(config *InstrumentConfig) {
-	config.InstrumentationVersion = string(i)
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/counter.go b/vendor/go.opentelemetry.io/otel/api/metric/counter.go
deleted file mode 100644
index c03421d..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/counter.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import (
-	"context"
-
-	"go.opentelemetry.io/otel/label"
-)
-
-// Float64Counter is a metric that accumulates float64 values.
-type Float64Counter struct {
-	syncInstrument
-}
-
-// Int64Counter is a metric that accumulates int64 values.
-type Int64Counter struct {
-	syncInstrument
-}
-
-// BoundFloat64Counter is a bound instrument for Float64Counter.
-//
-// It inherits the Unbind function from syncBoundInstrument.
-type BoundFloat64Counter struct {
-	syncBoundInstrument
-}
-
-// BoundInt64Counter is a boundInstrument for Int64Counter.
-//
-// It inherits the Unbind function from syncBoundInstrument.
-type BoundInt64Counter struct {
-	syncBoundInstrument
-}
-
-// Bind creates a bound instrument for this counter. The labels are
-// associated with values recorded via subsequent calls to Record.
-func (c Float64Counter) Bind(labels ...label.KeyValue) (h BoundFloat64Counter) {
-	h.syncBoundInstrument = c.bind(labels)
-	return
-}
-
-// Bind creates a bound instrument for this counter. The labels are
-// associated with values recorded via subsequent calls to Record.
-func (c Int64Counter) Bind(labels ...label.KeyValue) (h BoundInt64Counter) {
-	h.syncBoundInstrument = c.bind(labels)
-	return
-}
-
-// Measurement creates a Measurement object to use with batch
-// recording.
-func (c Float64Counter) Measurement(value float64) Measurement {
-	return c.float64Measurement(value)
-}
-
-// Measurement creates a Measurement object to use with batch
-// recording.
-func (c Int64Counter) Measurement(value int64) Measurement {
-	return c.int64Measurement(value)
-}
-
-// Add adds the value to the counter's sum. The labels should contain
-// the keys and values to be associated with this value.
-func (c Float64Counter) Add(ctx context.Context, value float64, labels ...label.KeyValue) {
-	c.directRecord(ctx, NewFloat64Number(value), labels)
-}
-
-// Add adds the value to the counter's sum. The labels should contain
-// the keys and values to be associated with this value.
-func (c Int64Counter) Add(ctx context.Context, value int64, labels ...label.KeyValue) {
-	c.directRecord(ctx, NewInt64Number(value), labels)
-}
-
-// Add adds the value to the counter's sum using the labels
-// previously bound to this counter via Bind()
-func (b BoundFloat64Counter) Add(ctx context.Context, value float64) {
-	b.directRecord(ctx, NewFloat64Number(value))
-}
-
-// Add adds the value to the counter's sum using the labels
-// previously bound to this counter via Bind()
-func (b BoundInt64Counter) Add(ctx context.Context, value int64) {
-	b.directRecord(ctx, NewInt64Number(value))
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/descriptor.go b/vendor/go.opentelemetry.io/otel/api/metric/descriptor.go
deleted file mode 100644
index 3af55e5..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/descriptor.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import "go.opentelemetry.io/otel/unit"
-
-// Descriptor contains all the settings that describe an instrument,
-// including its name, metric kind, number kind, and the configurable
-// options.
-type Descriptor struct {
-	name       string
-	kind       Kind
-	numberKind NumberKind
-	config     InstrumentConfig
-}
-
-// NewDescriptor returns a Descriptor with the given contents.
-func NewDescriptor(name string, mkind Kind, nkind NumberKind, opts ...InstrumentOption) Descriptor {
-	return Descriptor{
-		name:       name,
-		kind:       mkind,
-		numberKind: nkind,
-		config:     NewInstrumentConfig(opts...),
-	}
-}
-
-// Name returns the metric instrument's name.
-func (d Descriptor) Name() string {
-	return d.name
-}
-
-// MetricKind returns the specific kind of instrument.
-func (d Descriptor) MetricKind() Kind {
-	return d.kind
-}
-
-// Description provides a human-readable description of the metric
-// instrument.
-func (d Descriptor) Description() string {
-	return d.config.Description
-}
-
-// Unit describes the units of the metric instrument.  Unitless
-// metrics return the empty string.
-func (d Descriptor) Unit() unit.Unit {
-	return d.config.Unit
-}
-
-// NumberKind returns whether this instrument is declared over int64,
-// float64, or uint64 values.
-func (d Descriptor) NumberKind() NumberKind {
-	return d.numberKind
-}
-
-// InstrumentationName returns the name of the library that provided
-// instrumentation for this instrument.
-func (d Descriptor) InstrumentationName() string {
-	return d.config.InstrumentationName
-}
-
-// InstrumentationVersion returns the version of the library that provided
-// instrumentation for this instrument.
-func (d Descriptor) InstrumentationVersion() string {
-	return d.config.InstrumentationVersion
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/doc.go b/vendor/go.opentelemetry.io/otel/api/metric/doc.go
deleted file mode 100644
index 48a59c5..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/doc.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package metric provides support for reporting measurements using instruments.
-//
-// Instruments are categorized as below:
-//
-// Synchronous instruments are called by the user with a Context.
-// Asynchronous instruments are called by the SDK during collection.
-//
-// Additive instruments are semantically intended for capturing a sum.
-// Non-additive instruments are intended for capturing a distribution.
-//
-// Additive instruments may be monotonic, in which case they are
-// non-descreasing and naturally define a rate.
-//
-// The synchronous instrument names are:
-//
-//   Counter:           additive, monotonic
-//   UpDownCounter:     additive
-//   ValueRecorder:     non-additive
-//
-// and the asynchronous instruments are:
-//
-//   SumObserver:       additive, monotonic
-//   UpDownSumObserver: additive
-//   ValueObserver:     non-additive
-//
-// All instruments are provided with support for either float64 or
-// int64 input values.
-//
-// The Meter interface supports allocating new instruments as well as
-// interfaces for recording batches of synchronous measurements or
-// asynchronous observations.  To obtain a Meter, use a MeterProvider.
-//
-// The MeterProvider interface supports obtaining a named Meter interface. To
-// obtain a MeterProvider implementation, initialize and configure any
-// compatible SDK.
-package metric // import "go.opentelemetry.io/otel/api/metric"
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/kind.go b/vendor/go.opentelemetry.io/otel/api/metric/kind.go
deleted file mode 100644
index 9d4b453..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/kind.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:generate stringer -type=Kind
-
-package metric
-
-// Kind describes the kind of instrument.
-type Kind int8
-
-const (
-	// ValueRecorderKind indicates a ValueRecorder instrument.
-	ValueRecorderKind Kind = iota
-	// ValueObserverKind indicates an ValueObserver instrument.
-	ValueObserverKind
-
-	// CounterKind indicates a Counter instrument.
-	CounterKind
-	// UpDownCounterKind indicates a UpDownCounter instrument.
-	UpDownCounterKind
-
-	// SumObserverKind indicates a SumObserver instrument.
-	SumObserverKind
-	// UpDownSumObserverKind indicates a UpDownSumObserver instrument.
-	UpDownSumObserverKind
-)
-
-// Synchronous returns whether this is a synchronous kind of instrument.
-func (k Kind) Synchronous() bool {
-	switch k {
-	case CounterKind, UpDownCounterKind, ValueRecorderKind:
-		return true
-	}
-	return false
-}
-
-// Asynchronous returns whether this is an asynchronous kind of instrument.
-func (k Kind) Asynchronous() bool {
-	return !k.Synchronous()
-}
-
-// Adding returns whether this kind of instrument adds its inputs (as opposed to Grouping).
-func (k Kind) Adding() bool {
-	switch k {
-	case CounterKind, UpDownCounterKind, SumObserverKind, UpDownSumObserverKind:
-		return true
-	}
-	return false
-}
-
-// Adding returns whether this kind of instrument groups its inputs (as opposed to Adding).
-func (k Kind) Grouping() bool {
-	return !k.Adding()
-}
-
-// Monotonic returns whether this kind of instrument exposes a non-decreasing sum.
-func (k Kind) Monotonic() bool {
-	switch k {
-	case CounterKind, SumObserverKind:
-		return true
-	}
-	return false
-}
-
-// Cumulative returns whether this kind of instrument receives precomputed sums.
-func (k Kind) PrecomputedSum() bool {
-	return k.Adding() && k.Asynchronous()
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/kind_string.go b/vendor/go.opentelemetry.io/otel/api/metric/kind_string.go
deleted file mode 100644
index eb1a0d5..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/kind_string.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Code generated by "stringer -type=Kind"; DO NOT EDIT.
-
-package metric
-
-import "strconv"
-
-func _() {
-	// An "invalid array index" compiler error signifies that the constant values have changed.
-	// Re-run the stringer command to generate them again.
-	var x [1]struct{}
-	_ = x[ValueRecorderKind-0]
-	_ = x[ValueObserverKind-1]
-	_ = x[CounterKind-2]
-	_ = x[UpDownCounterKind-3]
-	_ = x[SumObserverKind-4]
-	_ = x[UpDownSumObserverKind-5]
-}
-
-const _Kind_name = "ValueRecorderKindValueObserverKindCounterKindUpDownCounterKindSumObserverKindUpDownSumObserverKind"
-
-var _Kind_index = [...]uint8{0, 17, 34, 45, 62, 77, 98}
-
-func (i Kind) String() string {
-	if i < 0 || i >= Kind(len(_Kind_index)-1) {
-		return "Kind(" + strconv.FormatInt(int64(i), 10) + ")"
-	}
-	return _Kind_name[_Kind_index[i]:_Kind_index[i+1]]
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/meter.go b/vendor/go.opentelemetry.io/otel/api/metric/meter.go
deleted file mode 100644
index d174913..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/meter.go
+++ /dev/null
@@ -1,320 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import (
-	"context"
-
-	"go.opentelemetry.io/otel/label"
-)
-
-// The file is organized as follows:
-//
-//  - MeterProvider interface
-//  - Meter struct
-//  - RecordBatch
-//  - BatchObserver
-//  - Synchronous instrument constructors (2 x int64,float64)
-//  - Asynchronous instrument constructors (1 x int64,float64)
-//  - Batch asynchronous constructors (1 x int64,float64)
-//  - Internals
-
-// MeterProvider supports named Meter instances.
-type MeterProvider interface {
-	// Meter creates an implementation of the Meter interface.
-	// The instrumentationName must be the name of the library providing
-	// instrumentation. This name may be the same as the instrumented code
-	// only if that code provides built-in instrumentation. If the
-	// instrumentationName is empty, then a implementation defined default
-	// name will be used instead.
-	Meter(instrumentationName string, opts ...MeterOption) Meter
-}
-
-// Meter is the OpenTelemetry metric API, based on a `MeterImpl`
-// implementation and the `Meter` library name.
-//
-// An uninitialized Meter is a no-op implementation.
-type Meter struct {
-	impl          MeterImpl
-	name, version string
-}
-
-// RecordBatch atomically records a batch of measurements.
-func (m Meter) RecordBatch(ctx context.Context, ls []label.KeyValue, ms ...Measurement) {
-	if m.impl == nil {
-		return
-	}
-	m.impl.RecordBatch(ctx, ls, ms...)
-}
-
-// NewBatchObserver creates a new BatchObserver that supports
-// making batches of observations for multiple instruments.
-func (m Meter) NewBatchObserver(callback BatchObserverFunc) BatchObserver {
-	return BatchObserver{
-		meter:  m,
-		runner: newBatchAsyncRunner(callback),
-	}
-}
-
-// NewInt64Counter creates a new integer Counter instrument with the
-// given name, customized with options.  May return an error if the
-// name is invalid (e.g., empty) or improperly registered (e.g.,
-// duplicate registration).
-func (m Meter) NewInt64Counter(name string, options ...InstrumentOption) (Int64Counter, error) {
-	return wrapInt64CounterInstrument(
-		m.newSync(name, CounterKind, Int64NumberKind, options))
-}
-
-// NewFloat64Counter creates a new floating point Counter with the
-// given name, customized with options.  May return an error if the
-// name is invalid (e.g., empty) or improperly registered (e.g.,
-// duplicate registration).
-func (m Meter) NewFloat64Counter(name string, options ...InstrumentOption) (Float64Counter, error) {
-	return wrapFloat64CounterInstrument(
-		m.newSync(name, CounterKind, Float64NumberKind, options))
-}
-
-// NewInt64UpDownCounter creates a new integer UpDownCounter instrument with the
-// given name, customized with options.  May return an error if the
-// name is invalid (e.g., empty) or improperly registered (e.g.,
-// duplicate registration).
-func (m Meter) NewInt64UpDownCounter(name string, options ...InstrumentOption) (Int64UpDownCounter, error) {
-	return wrapInt64UpDownCounterInstrument(
-		m.newSync(name, UpDownCounterKind, Int64NumberKind, options))
-}
-
-// NewFloat64UpDownCounter creates a new floating point UpDownCounter with the
-// given name, customized with options.  May return an error if the
-// name is invalid (e.g., empty) or improperly registered (e.g.,
-// duplicate registration).
-func (m Meter) NewFloat64UpDownCounter(name string, options ...InstrumentOption) (Float64UpDownCounter, error) {
-	return wrapFloat64UpDownCounterInstrument(
-		m.newSync(name, UpDownCounterKind, Float64NumberKind, options))
-}
-
-// NewInt64ValueRecorder creates a new integer ValueRecorder instrument with the
-// given name, customized with options.  May return an error if the
-// name is invalid (e.g., empty) or improperly registered (e.g.,
-// duplicate registration).
-func (m Meter) NewInt64ValueRecorder(name string, opts ...InstrumentOption) (Int64ValueRecorder, error) {
-	return wrapInt64ValueRecorderInstrument(
-		m.newSync(name, ValueRecorderKind, Int64NumberKind, opts))
-}
-
-// NewFloat64ValueRecorder creates a new floating point ValueRecorder with the
-// given name, customized with options.  May return an error if the
-// name is invalid (e.g., empty) or improperly registered (e.g.,
-// duplicate registration).
-func (m Meter) NewFloat64ValueRecorder(name string, opts ...InstrumentOption) (Float64ValueRecorder, error) {
-	return wrapFloat64ValueRecorderInstrument(
-		m.newSync(name, ValueRecorderKind, Float64NumberKind, opts))
-}
-
-// NewInt64ValueObserver creates a new integer ValueObserver instrument
-// with the given name, running a given callback, and customized with
-// options.  May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (m Meter) NewInt64ValueObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64ValueObserver, error) {
-	if callback == nil {
-		return wrapInt64ValueObserverInstrument(NoopAsync{}, nil)
-	}
-	return wrapInt64ValueObserverInstrument(
-		m.newAsync(name, ValueObserverKind, Int64NumberKind, opts,
-			newInt64AsyncRunner(callback)))
-}
-
-// NewFloat64ValueObserver creates a new floating point ValueObserver with
-// the given name, running a given callback, and customized with
-// options.  May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (m Meter) NewFloat64ValueObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64ValueObserver, error) {
-	if callback == nil {
-		return wrapFloat64ValueObserverInstrument(NoopAsync{}, nil)
-	}
-	return wrapFloat64ValueObserverInstrument(
-		m.newAsync(name, ValueObserverKind, Float64NumberKind, opts,
-			newFloat64AsyncRunner(callback)))
-}
-
-// NewInt64SumObserver creates a new integer SumObserver instrument
-// with the given name, running a given callback, and customized with
-// options.  May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (m Meter) NewInt64SumObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64SumObserver, error) {
-	if callback == nil {
-		return wrapInt64SumObserverInstrument(NoopAsync{}, nil)
-	}
-	return wrapInt64SumObserverInstrument(
-		m.newAsync(name, SumObserverKind, Int64NumberKind, opts,
-			newInt64AsyncRunner(callback)))
-}
-
-// NewFloat64SumObserver creates a new floating point SumObserver with
-// the given name, running a given callback, and customized with
-// options.  May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (m Meter) NewFloat64SumObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64SumObserver, error) {
-	if callback == nil {
-		return wrapFloat64SumObserverInstrument(NoopAsync{}, nil)
-	}
-	return wrapFloat64SumObserverInstrument(
-		m.newAsync(name, SumObserverKind, Float64NumberKind, opts,
-			newFloat64AsyncRunner(callback)))
-}
-
-// NewInt64UpDownSumObserver creates a new integer UpDownSumObserver instrument
-// with the given name, running a given callback, and customized with
-// options.  May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (m Meter) NewInt64UpDownSumObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64UpDownSumObserver, error) {
-	if callback == nil {
-		return wrapInt64UpDownSumObserverInstrument(NoopAsync{}, nil)
-	}
-	return wrapInt64UpDownSumObserverInstrument(
-		m.newAsync(name, UpDownSumObserverKind, Int64NumberKind, opts,
-			newInt64AsyncRunner(callback)))
-}
-
-// NewFloat64UpDownSumObserver creates a new floating point UpDownSumObserver with
-// the given name, running a given callback, and customized with
-// options.  May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (m Meter) NewFloat64UpDownSumObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64UpDownSumObserver, error) {
-	if callback == nil {
-		return wrapFloat64UpDownSumObserverInstrument(NoopAsync{}, nil)
-	}
-	return wrapFloat64UpDownSumObserverInstrument(
-		m.newAsync(name, UpDownSumObserverKind, Float64NumberKind, opts,
-			newFloat64AsyncRunner(callback)))
-}
-
-// NewInt64ValueObserver creates a new integer ValueObserver instrument
-// with the given name, running in a batch callback, and customized with
-// options.  May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (b BatchObserver) NewInt64ValueObserver(name string, opts ...InstrumentOption) (Int64ValueObserver, error) {
-	if b.runner == nil {
-		return wrapInt64ValueObserverInstrument(NoopAsync{}, nil)
-	}
-	return wrapInt64ValueObserverInstrument(
-		b.meter.newAsync(name, ValueObserverKind, Int64NumberKind, opts, b.runner))
-}
-
-// NewFloat64ValueObserver creates a new floating point ValueObserver with
-// the given name, running in a batch callback, and customized with
-// options.  May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (b BatchObserver) NewFloat64ValueObserver(name string, opts ...InstrumentOption) (Float64ValueObserver, error) {
-	if b.runner == nil {
-		return wrapFloat64ValueObserverInstrument(NoopAsync{}, nil)
-	}
-	return wrapFloat64ValueObserverInstrument(
-		b.meter.newAsync(name, ValueObserverKind, Float64NumberKind, opts,
-			b.runner))
-}
-
-// NewInt64SumObserver creates a new integer SumObserver instrument
-// with the given name, running in a batch callback, and customized with
-// options.  May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (b BatchObserver) NewInt64SumObserver(name string, opts ...InstrumentOption) (Int64SumObserver, error) {
-	if b.runner == nil {
-		return wrapInt64SumObserverInstrument(NoopAsync{}, nil)
-	}
-	return wrapInt64SumObserverInstrument(
-		b.meter.newAsync(name, SumObserverKind, Int64NumberKind, opts, b.runner))
-}
-
-// NewFloat64SumObserver creates a new floating point SumObserver with
-// the given name, running in a batch callback, and customized with
-// options.  May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (b BatchObserver) NewFloat64SumObserver(name string, opts ...InstrumentOption) (Float64SumObserver, error) {
-	if b.runner == nil {
-		return wrapFloat64SumObserverInstrument(NoopAsync{}, nil)
-	}
-	return wrapFloat64SumObserverInstrument(
-		b.meter.newAsync(name, SumObserverKind, Float64NumberKind, opts,
-			b.runner))
-}
-
-// NewInt64UpDownSumObserver creates a new integer UpDownSumObserver instrument
-// with the given name, running in a batch callback, and customized with
-// options.  May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (b BatchObserver) NewInt64UpDownSumObserver(name string, opts ...InstrumentOption) (Int64UpDownSumObserver, error) {
-	if b.runner == nil {
-		return wrapInt64UpDownSumObserverInstrument(NoopAsync{}, nil)
-	}
-	return wrapInt64UpDownSumObserverInstrument(
-		b.meter.newAsync(name, UpDownSumObserverKind, Int64NumberKind, opts, b.runner))
-}
-
-// NewFloat64UpDownSumObserver creates a new floating point UpDownSumObserver with
-// the given name, running in a batch callback, and customized with
-// options.  May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (b BatchObserver) NewFloat64UpDownSumObserver(name string, opts ...InstrumentOption) (Float64UpDownSumObserver, error) {
-	if b.runner == nil {
-		return wrapFloat64UpDownSumObserverInstrument(NoopAsync{}, nil)
-	}
-	return wrapFloat64UpDownSumObserverInstrument(
-		b.meter.newAsync(name, UpDownSumObserverKind, Float64NumberKind, opts,
-			b.runner))
-}
-
-// MeterImpl returns the underlying MeterImpl of this Meter.
-func (m Meter) MeterImpl() MeterImpl {
-	return m.impl
-}
-
-// newAsync constructs one new asynchronous instrument.
-func (m Meter) newAsync(
-	name string,
-	mkind Kind,
-	nkind NumberKind,
-	opts []InstrumentOption,
-	runner AsyncRunner,
-) (
-	AsyncImpl,
-	error,
-) {
-	if m.impl == nil {
-		return NoopAsync{}, nil
-	}
-	desc := NewDescriptor(name, mkind, nkind, opts...)
-	desc.config.InstrumentationName = m.name
-	desc.config.InstrumentationVersion = m.version
-	return m.impl.NewAsyncInstrument(desc, runner)
-}
-
-// newSync constructs one new synchronous instrument.
-func (m Meter) newSync(
-	name string,
-	metricKind Kind,
-	numberKind NumberKind,
-	opts []InstrumentOption,
-) (
-	SyncImpl,
-	error,
-) {
-	if m.impl == nil {
-		return NoopSync{}, nil
-	}
-	desc := NewDescriptor(name, metricKind, numberKind, opts...)
-	desc.config.InstrumentationName = m.name
-	desc.config.InstrumentationVersion = m.version
-	return m.impl.NewSyncInstrument(desc)
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/must.go b/vendor/go.opentelemetry.io/otel/api/metric/must.go
deleted file mode 100644
index c88e050..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/must.go
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-// MeterMust is a wrapper for Meter interfaces that panics when any
-// instrument constructor encounters an error.
-type MeterMust struct {
-	meter Meter
-}
-
-// BatchObserverMust is a wrapper for BatchObserver that panics when
-// any instrument constructor encounters an error.
-type BatchObserverMust struct {
-	batch BatchObserver
-}
-
-// Must constructs a MeterMust implementation from a Meter, allowing
-// the application to panic when any instrument constructor yields an
-// error.
-func Must(meter Meter) MeterMust {
-	return MeterMust{meter: meter}
-}
-
-// NewInt64Counter calls `Meter.NewInt64Counter` and returns the
-// instrument, panicking if it encounters an error.
-func (mm MeterMust) NewInt64Counter(name string, cos ...InstrumentOption) Int64Counter {
-	if inst, err := mm.meter.NewInt64Counter(name, cos...); err != nil {
-		panic(err)
-	} else {
-		return inst
-	}
-}
-
-// NewFloat64Counter calls `Meter.NewFloat64Counter` and returns the
-// instrument, panicking if it encounters an error.
-func (mm MeterMust) NewFloat64Counter(name string, cos ...InstrumentOption) Float64Counter {
-	if inst, err := mm.meter.NewFloat64Counter(name, cos...); err != nil {
-		panic(err)
-	} else {
-		return inst
-	}
-}
-
-// NewInt64UpDownCounter calls `Meter.NewInt64UpDownCounter` and returns the
-// instrument, panicking if it encounters an error.
-func (mm MeterMust) NewInt64UpDownCounter(name string, cos ...InstrumentOption) Int64UpDownCounter {
-	if inst, err := mm.meter.NewInt64UpDownCounter(name, cos...); err != nil {
-		panic(err)
-	} else {
-		return inst
-	}
-}
-
-// NewFloat64UpDownCounter calls `Meter.NewFloat64UpDownCounter` and returns the
-// instrument, panicking if it encounters an error.
-func (mm MeterMust) NewFloat64UpDownCounter(name string, cos ...InstrumentOption) Float64UpDownCounter {
-	if inst, err := mm.meter.NewFloat64UpDownCounter(name, cos...); err != nil {
-		panic(err)
-	} else {
-		return inst
-	}
-}
-
-// NewInt64ValueRecorder calls `Meter.NewInt64ValueRecorder` and returns the
-// instrument, panicking if it encounters an error.
-func (mm MeterMust) NewInt64ValueRecorder(name string, mos ...InstrumentOption) Int64ValueRecorder {
-	if inst, err := mm.meter.NewInt64ValueRecorder(name, mos...); err != nil {
-		panic(err)
-	} else {
-		return inst
-	}
-}
-
-// NewFloat64ValueRecorder calls `Meter.NewFloat64ValueRecorder` and returns the
-// instrument, panicking if it encounters an error.
-func (mm MeterMust) NewFloat64ValueRecorder(name string, mos ...InstrumentOption) Float64ValueRecorder {
-	if inst, err := mm.meter.NewFloat64ValueRecorder(name, mos...); err != nil {
-		panic(err)
-	} else {
-		return inst
-	}
-}
-
-// NewInt64ValueObserver calls `Meter.NewInt64ValueObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (mm MeterMust) NewInt64ValueObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64ValueObserver {
-	if inst, err := mm.meter.NewInt64ValueObserver(name, callback, oos...); err != nil {
-		panic(err)
-	} else {
-		return inst
-	}
-}
-
-// NewFloat64ValueObserver calls `Meter.NewFloat64ValueObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (mm MeterMust) NewFloat64ValueObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64ValueObserver {
-	if inst, err := mm.meter.NewFloat64ValueObserver(name, callback, oos...); err != nil {
-		panic(err)
-	} else {
-		return inst
-	}
-}
-
-// NewInt64SumObserver calls `Meter.NewInt64SumObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (mm MeterMust) NewInt64SumObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64SumObserver {
-	if inst, err := mm.meter.NewInt64SumObserver(name, callback, oos...); err != nil {
-		panic(err)
-	} else {
-		return inst
-	}
-}
-
-// NewFloat64SumObserver calls `Meter.NewFloat64SumObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (mm MeterMust) NewFloat64SumObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64SumObserver {
-	if inst, err := mm.meter.NewFloat64SumObserver(name, callback, oos...); err != nil {
-		panic(err)
-	} else {
-		return inst
-	}
-}
-
-// NewInt64UpDownSumObserver calls `Meter.NewInt64UpDownSumObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (mm MeterMust) NewInt64UpDownSumObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64UpDownSumObserver {
-	if inst, err := mm.meter.NewInt64UpDownSumObserver(name, callback, oos...); err != nil {
-		panic(err)
-	} else {
-		return inst
-	}
-}
-
-// NewFloat64UpDownSumObserver calls `Meter.NewFloat64UpDownSumObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (mm MeterMust) NewFloat64UpDownSumObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64UpDownSumObserver {
-	if inst, err := mm.meter.NewFloat64UpDownSumObserver(name, callback, oos...); err != nil {
-		panic(err)
-	} else {
-		return inst
-	}
-}
-
-// NewBatchObserver returns a wrapper around BatchObserver that panics
-// when any instrument constructor returns an error.
-func (mm MeterMust) NewBatchObserver(callback BatchObserverFunc) BatchObserverMust {
-	return BatchObserverMust{
-		batch: mm.meter.NewBatchObserver(callback),
-	}
-}
-
-// NewInt64ValueObserver calls `BatchObserver.NewInt64ValueObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (bm BatchObserverMust) NewInt64ValueObserver(name string, oos ...InstrumentOption) Int64ValueObserver {
-	if inst, err := bm.batch.NewInt64ValueObserver(name, oos...); err != nil {
-		panic(err)
-	} else {
-		return inst
-	}
-}
-
-// NewFloat64ValueObserver calls `BatchObserver.NewFloat64ValueObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (bm BatchObserverMust) NewFloat64ValueObserver(name string, oos ...InstrumentOption) Float64ValueObserver {
-	if inst, err := bm.batch.NewFloat64ValueObserver(name, oos...); err != nil {
-		panic(err)
-	} else {
-		return inst
-	}
-}
-
-// NewInt64SumObserver calls `BatchObserver.NewInt64SumObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (bm BatchObserverMust) NewInt64SumObserver(name string, oos ...InstrumentOption) Int64SumObserver {
-	if inst, err := bm.batch.NewInt64SumObserver(name, oos...); err != nil {
-		panic(err)
-	} else {
-		return inst
-	}
-}
-
-// NewFloat64SumObserver calls `BatchObserver.NewFloat64SumObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (bm BatchObserverMust) NewFloat64SumObserver(name string, oos ...InstrumentOption) Float64SumObserver {
-	if inst, err := bm.batch.NewFloat64SumObserver(name, oos...); err != nil {
-		panic(err)
-	} else {
-		return inst
-	}
-}
-
-// NewInt64UpDownSumObserver calls `BatchObserver.NewInt64UpDownSumObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (bm BatchObserverMust) NewInt64UpDownSumObserver(name string, oos ...InstrumentOption) Int64UpDownSumObserver {
-	if inst, err := bm.batch.NewInt64UpDownSumObserver(name, oos...); err != nil {
-		panic(err)
-	} else {
-		return inst
-	}
-}
-
-// NewFloat64UpDownSumObserver calls `BatchObserver.NewFloat64UpDownSumObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (bm BatchObserverMust) NewFloat64UpDownSumObserver(name string, oos ...InstrumentOption) Float64UpDownSumObserver {
-	if inst, err := bm.batch.NewFloat64UpDownSumObserver(name, oos...); err != nil {
-		panic(err)
-	} else {
-		return inst
-	}
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/noop.go b/vendor/go.opentelemetry.io/otel/api/metric/noop.go
deleted file mode 100644
index 97867d9..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/noop.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import (
-	"context"
-
-	"go.opentelemetry.io/otel/label"
-)
-
-type NoopMeterProvider struct{}
-
-type noopInstrument struct{}
-type noopBoundInstrument struct{}
-type NoopSync struct{ noopInstrument }
-type NoopAsync struct{ noopInstrument }
-
-var _ MeterProvider = NoopMeterProvider{}
-var _ SyncImpl = NoopSync{}
-var _ BoundSyncImpl = noopBoundInstrument{}
-var _ AsyncImpl = NoopAsync{}
-
-func (NoopMeterProvider) Meter(_ string, _ ...MeterOption) Meter {
-	return Meter{}
-}
-
-func (noopInstrument) Implementation() interface{} {
-	return nil
-}
-
-func (noopInstrument) Descriptor() Descriptor {
-	return Descriptor{}
-}
-
-func (noopBoundInstrument) RecordOne(context.Context, Number) {
-}
-
-func (noopBoundInstrument) Unbind() {
-}
-
-func (NoopSync) Bind([]label.KeyValue) BoundSyncImpl {
-	return noopBoundInstrument{}
-}
-
-func (NoopSync) RecordOne(context.Context, Number, []label.KeyValue) {
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/number.go b/vendor/go.opentelemetry.io/otel/api/metric/number.go
deleted file mode 100644
index c3ca0ed..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/number.go
+++ /dev/null
@@ -1,540 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-//go:generate stringer -type=NumberKind
-
-import (
-	"fmt"
-	"math"
-	"sync/atomic"
-
-	"go.opentelemetry.io/otel/internal"
-)
-
-// NumberKind describes the data type of the Number.
-type NumberKind int8
-
-const (
-	// Int64NumberKind means that the Number stores int64.
-	Int64NumberKind NumberKind = iota
-	// Float64NumberKind means that the Number stores float64.
-	Float64NumberKind
-)
-
-// Zero returns a zero value for a given NumberKind
-func (k NumberKind) Zero() Number {
-	switch k {
-	case Int64NumberKind:
-		return NewInt64Number(0)
-	case Float64NumberKind:
-		return NewFloat64Number(0.)
-	default:
-		return Number(0)
-	}
-}
-
-// Minimum returns the minimum representable value
-// for a given NumberKind
-func (k NumberKind) Minimum() Number {
-	switch k {
-	case Int64NumberKind:
-		return NewInt64Number(math.MinInt64)
-	case Float64NumberKind:
-		return NewFloat64Number(-1. * math.MaxFloat64)
-	default:
-		return Number(0)
-	}
-}
-
-// Maximum returns the maximum representable value
-// for a given NumberKind
-func (k NumberKind) Maximum() Number {
-	switch k {
-	case Int64NumberKind:
-		return NewInt64Number(math.MaxInt64)
-	case Float64NumberKind:
-		return NewFloat64Number(math.MaxFloat64)
-	default:
-		return Number(0)
-	}
-}
-
-// Number represents either an integral or a floating point value. It
-// needs to be accompanied with a source of NumberKind that describes
-// the actual type of the value stored within Number.
-type Number uint64
-
-// - constructors
-
-// NewNumberFromRaw creates a new Number from a raw value.
-func NewNumberFromRaw(r uint64) Number {
-	return Number(r)
-}
-
-// NewInt64Number creates an integral Number.
-func NewInt64Number(i int64) Number {
-	return NewNumberFromRaw(internal.Int64ToRaw(i))
-}
-
-// NewFloat64Number creates a floating point Number.
-func NewFloat64Number(f float64) Number {
-	return NewNumberFromRaw(internal.Float64ToRaw(f))
-}
-
-// NewNumberSignChange returns a number with the same magnitude and
-// the opposite sign.  `kind` must describe the kind of number in `nn`.
-//
-// Does not change Uint64NumberKind values.
-func NewNumberSignChange(kind NumberKind, nn Number) Number {
-	switch kind {
-	case Int64NumberKind:
-		return NewInt64Number(-nn.AsInt64())
-	case Float64NumberKind:
-		return NewFloat64Number(-nn.AsFloat64())
-	}
-	return nn
-}
-
-// - as x
-
-// AsNumber gets the Number.
-func (n *Number) AsNumber() Number {
-	return *n
-}
-
-// AsRaw gets the uninterpreted raw value. Might be useful for some
-// atomic operations.
-func (n *Number) AsRaw() uint64 {
-	return uint64(*n)
-}
-
-// AsInt64 assumes that the value contains an int64 and returns it as
-// such.
-func (n *Number) AsInt64() int64 {
-	return internal.RawToInt64(n.AsRaw())
-}
-
-// AsFloat64 assumes that the measurement value contains a float64 and
-// returns it as such.
-func (n *Number) AsFloat64() float64 {
-	return internal.RawToFloat64(n.AsRaw())
-}
-
-// - as x atomic
-
-// AsNumberAtomic gets the Number atomically.
-func (n *Number) AsNumberAtomic() Number {
-	return NewNumberFromRaw(n.AsRawAtomic())
-}
-
-// AsRawAtomic gets the uninterpreted raw value atomically. Might be
-// useful for some atomic operations.
-func (n *Number) AsRawAtomic() uint64 {
-	return atomic.LoadUint64(n.AsRawPtr())
-}
-
-// AsInt64Atomic assumes that the number contains an int64 and returns
-// it as such atomically.
-func (n *Number) AsInt64Atomic() int64 {
-	return atomic.LoadInt64(n.AsInt64Ptr())
-}
-
-// AsFloat64Atomic assumes that the measurement value contains a
-// float64 and returns it as such atomically.
-func (n *Number) AsFloat64Atomic() float64 {
-	return internal.RawToFloat64(n.AsRawAtomic())
-}
-
-// - as x ptr
-
-// AsRawPtr gets the pointer to the raw, uninterpreted raw
-// value. Might be useful for some atomic operations.
-func (n *Number) AsRawPtr() *uint64 {
-	return (*uint64)(n)
-}
-
-// AsInt64Ptr assumes that the number contains an int64 and returns a
-// pointer to it.
-func (n *Number) AsInt64Ptr() *int64 {
-	return internal.RawPtrToInt64Ptr(n.AsRawPtr())
-}
-
-// AsFloat64Ptr assumes that the number contains a float64 and returns a
-// pointer to it.
-func (n *Number) AsFloat64Ptr() *float64 {
-	return internal.RawPtrToFloat64Ptr(n.AsRawPtr())
-}
-
-// - coerce
-
-// CoerceToInt64 casts the number to int64. May result in
-// data/precision loss.
-func (n *Number) CoerceToInt64(kind NumberKind) int64 {
-	switch kind {
-	case Int64NumberKind:
-		return n.AsInt64()
-	case Float64NumberKind:
-		return int64(n.AsFloat64())
-	default:
-		// you get what you deserve
-		return 0
-	}
-}
-
-// CoerceToFloat64 casts the number to float64. May result in
-// data/precision loss.
-func (n *Number) CoerceToFloat64(kind NumberKind) float64 {
-	switch kind {
-	case Int64NumberKind:
-		return float64(n.AsInt64())
-	case Float64NumberKind:
-		return n.AsFloat64()
-	default:
-		// you get what you deserve
-		return 0
-	}
-}
-
-// - set
-
-// SetNumber sets the number to the passed number. Both should be of
-// the same kind.
-func (n *Number) SetNumber(nn Number) {
-	*n.AsRawPtr() = nn.AsRaw()
-}
-
-// SetRaw sets the number to the passed raw value. Both number and the
-// raw number should represent the same kind.
-func (n *Number) SetRaw(r uint64) {
-	*n.AsRawPtr() = r
-}
-
-// SetInt64 assumes that the number contains an int64 and sets it to
-// the passed value.
-func (n *Number) SetInt64(i int64) {
-	*n.AsInt64Ptr() = i
-}
-
-// SetFloat64 assumes that the number contains a float64 and sets it
-// to the passed value.
-func (n *Number) SetFloat64(f float64) {
-	*n.AsFloat64Ptr() = f
-}
-
-// - set atomic
-
-// SetNumberAtomic sets the number to the passed number
-// atomically. Both should be of the same kind.
-func (n *Number) SetNumberAtomic(nn Number) {
-	atomic.StoreUint64(n.AsRawPtr(), nn.AsRaw())
-}
-
-// SetRawAtomic sets the number to the passed raw value
-// atomically. Both number and the raw number should represent the
-// same kind.
-func (n *Number) SetRawAtomic(r uint64) {
-	atomic.StoreUint64(n.AsRawPtr(), r)
-}
-
-// SetInt64Atomic assumes that the number contains an int64 and sets
-// it to the passed value atomically.
-func (n *Number) SetInt64Atomic(i int64) {
-	atomic.StoreInt64(n.AsInt64Ptr(), i)
-}
-
-// SetFloat64Atomic assumes that the number contains a float64 and
-// sets it to the passed value atomically.
-func (n *Number) SetFloat64Atomic(f float64) {
-	atomic.StoreUint64(n.AsRawPtr(), internal.Float64ToRaw(f))
-}
-
-// - swap
-
-// SwapNumber sets the number to the passed number and returns the old
-// number. Both this number and the passed number should be of the
-// same kind.
-func (n *Number) SwapNumber(nn Number) Number {
-	old := *n
-	n.SetNumber(nn)
-	return old
-}
-
-// SwapRaw sets the number to the passed raw value and returns the old
-// raw value. Both number and the raw number should represent the same
-// kind.
-func (n *Number) SwapRaw(r uint64) uint64 {
-	old := n.AsRaw()
-	n.SetRaw(r)
-	return old
-}
-
-// SwapInt64 assumes that the number contains an int64, sets it to the
-// passed value and returns the old int64 value.
-func (n *Number) SwapInt64(i int64) int64 {
-	old := n.AsInt64()
-	n.SetInt64(i)
-	return old
-}
-
-// SwapFloat64 assumes that the number contains an float64, sets it to
-// the passed value and returns the old float64 value.
-func (n *Number) SwapFloat64(f float64) float64 {
-	old := n.AsFloat64()
-	n.SetFloat64(f)
-	return old
-}
-
-// - swap atomic
-
-// SwapNumberAtomic sets the number to the passed number and returns
-// the old number atomically. Both this number and the passed number
-// should be of the same kind.
-func (n *Number) SwapNumberAtomic(nn Number) Number {
-	return NewNumberFromRaw(atomic.SwapUint64(n.AsRawPtr(), nn.AsRaw()))
-}
-
-// SwapRawAtomic sets the number to the passed raw value and returns
-// the old raw value atomically. Both number and the raw number should
-// represent the same kind.
-func (n *Number) SwapRawAtomic(r uint64) uint64 {
-	return atomic.SwapUint64(n.AsRawPtr(), r)
-}
-
-// SwapInt64Atomic assumes that the number contains an int64, sets it
-// to the passed value and returns the old int64 value atomically.
-func (n *Number) SwapInt64Atomic(i int64) int64 {
-	return atomic.SwapInt64(n.AsInt64Ptr(), i)
-}
-
-// SwapFloat64Atomic assumes that the number contains an float64, sets
-// it to the passed value and returns the old float64 value
-// atomically.
-func (n *Number) SwapFloat64Atomic(f float64) float64 {
-	return internal.RawToFloat64(atomic.SwapUint64(n.AsRawPtr(), internal.Float64ToRaw(f)))
-}
-
-// - add
-
-// AddNumber assumes that this and the passed number are of the passed
-// kind and adds the passed number to this number.
-func (n *Number) AddNumber(kind NumberKind, nn Number) {
-	switch kind {
-	case Int64NumberKind:
-		n.AddInt64(nn.AsInt64())
-	case Float64NumberKind:
-		n.AddFloat64(nn.AsFloat64())
-	}
-}
-
-// AddRaw assumes that this number and the passed raw value are of the
-// passed kind and adds the passed raw value to this number.
-func (n *Number) AddRaw(kind NumberKind, r uint64) {
-	n.AddNumber(kind, NewNumberFromRaw(r))
-}
-
-// AddInt64 assumes that the number contains an int64 and adds the
-// passed int64 to it.
-func (n *Number) AddInt64(i int64) {
-	*n.AsInt64Ptr() += i
-}
-
-// AddFloat64 assumes that the number contains a float64 and adds the
-// passed float64 to it.
-func (n *Number) AddFloat64(f float64) {
-	*n.AsFloat64Ptr() += f
-}
-
-// - add atomic
-
-// AddNumberAtomic assumes that this and the passed number are of the
-// passed kind and adds the passed number to this number atomically.
-func (n *Number) AddNumberAtomic(kind NumberKind, nn Number) {
-	switch kind {
-	case Int64NumberKind:
-		n.AddInt64Atomic(nn.AsInt64())
-	case Float64NumberKind:
-		n.AddFloat64Atomic(nn.AsFloat64())
-	}
-}
-
-// AddRawAtomic assumes that this number and the passed raw value are
-// of the passed kind and adds the passed raw value to this number
-// atomically.
-func (n *Number) AddRawAtomic(kind NumberKind, r uint64) {
-	n.AddNumberAtomic(kind, NewNumberFromRaw(r))
-}
-
-// AddInt64Atomic assumes that the number contains an int64 and adds
-// the passed int64 to it atomically.
-func (n *Number) AddInt64Atomic(i int64) {
-	atomic.AddInt64(n.AsInt64Ptr(), i)
-}
-
-// AddFloat64Atomic assumes that the number contains a float64 and
-// adds the passed float64 to it atomically.
-func (n *Number) AddFloat64Atomic(f float64) {
-	for {
-		o := n.AsFloat64Atomic()
-		if n.CompareAndSwapFloat64(o, o+f) {
-			break
-		}
-	}
-}
-
-// - compare and swap (atomic only)
-
-// CompareAndSwapNumber does the atomic CAS operation on this
-// number. This number and passed old and new numbers should be of the
-// same kind.
-func (n *Number) CompareAndSwapNumber(on, nn Number) bool {
-	return atomic.CompareAndSwapUint64(n.AsRawPtr(), on.AsRaw(), nn.AsRaw())
-}
-
-// CompareAndSwapRaw does the atomic CAS operation on this
-// number. This number and passed old and new raw values should be of
-// the same kind.
-func (n *Number) CompareAndSwapRaw(or, nr uint64) bool {
-	return atomic.CompareAndSwapUint64(n.AsRawPtr(), or, nr)
-}
-
-// CompareAndSwapInt64 assumes that this number contains an int64 and
-// does the atomic CAS operation on it.
-func (n *Number) CompareAndSwapInt64(oi, ni int64) bool {
-	return atomic.CompareAndSwapInt64(n.AsInt64Ptr(), oi, ni)
-}
-
-// CompareAndSwapFloat64 assumes that this number contains a float64 and
-// does the atomic CAS operation on it.
-func (n *Number) CompareAndSwapFloat64(of, nf float64) bool {
-	return atomic.CompareAndSwapUint64(n.AsRawPtr(), internal.Float64ToRaw(of), internal.Float64ToRaw(nf))
-}
-
-// - compare
-
-// CompareNumber compares two Numbers given their kind.  Both numbers
-// should have the same kind.  This returns:
-//    0 if the numbers are equal
-//    -1 if the subject `n` is less than the argument `nn`
-//    +1 if the subject `n` is greater than the argument `nn`
-func (n *Number) CompareNumber(kind NumberKind, nn Number) int {
-	switch kind {
-	case Int64NumberKind:
-		return n.CompareInt64(nn.AsInt64())
-	case Float64NumberKind:
-		return n.CompareFloat64(nn.AsFloat64())
-	default:
-		// you get what you deserve
-		return 0
-	}
-}
-
-// CompareRaw compares two numbers, where one is input as a raw
-// uint64, interpreting both values as a `kind` of number.
-func (n *Number) CompareRaw(kind NumberKind, r uint64) int {
-	return n.CompareNumber(kind, NewNumberFromRaw(r))
-}
-
-// CompareInt64 assumes that the Number contains an int64 and performs
-// a comparison between the value and the other value. It returns the
-// typical result of the compare function: -1 if the value is less
-// than the other, 0 if both are equal, 1 if the value is greater than
-// the other.
-func (n *Number) CompareInt64(i int64) int {
-	this := n.AsInt64()
-	if this < i {
-		return -1
-	} else if this > i {
-		return 1
-	}
-	return 0
-}
-
-// CompareFloat64 assumes that the Number contains a float64 and
-// performs a comparison between the value and the other value. It
-// returns the typical result of the compare function: -1 if the value
-// is less than the other, 0 if both are equal, 1 if the value is
-// greater than the other.
-//
-// Do not compare NaN values.
-func (n *Number) CompareFloat64(f float64) int {
-	this := n.AsFloat64()
-	if this < f {
-		return -1
-	} else if this > f {
-		return 1
-	}
-	return 0
-}
-
-// - relations to zero
-
-// IsPositive returns true if the actual value is greater than zero.
-func (n *Number) IsPositive(kind NumberKind) bool {
-	return n.compareWithZero(kind) > 0
-}
-
-// IsNegative returns true if the actual value is less than zero.
-func (n *Number) IsNegative(kind NumberKind) bool {
-	return n.compareWithZero(kind) < 0
-}
-
-// IsZero returns true if the actual value is equal to zero.
-func (n *Number) IsZero(kind NumberKind) bool {
-	return n.compareWithZero(kind) == 0
-}
-
-// - misc
-
-// Emit returns a string representation of the raw value of the
-// Number. A %d is used for integral values, %f for floating point
-// values.
-func (n *Number) Emit(kind NumberKind) string {
-	switch kind {
-	case Int64NumberKind:
-		return fmt.Sprintf("%d", n.AsInt64())
-	case Float64NumberKind:
-		return fmt.Sprintf("%f", n.AsFloat64())
-	default:
-		return ""
-	}
-}
-
-// AsInterface returns the number as an interface{}, typically used
-// for NumberKind-correct JSON conversion.
-func (n *Number) AsInterface(kind NumberKind) interface{} {
-	switch kind {
-	case Int64NumberKind:
-		return n.AsInt64()
-	case Float64NumberKind:
-		return n.AsFloat64()
-	default:
-		return math.NaN()
-	}
-}
-
-// - private stuff
-
-func (n *Number) compareWithZero(kind NumberKind) int {
-	switch kind {
-	case Int64NumberKind:
-		return n.CompareInt64(0)
-	case Float64NumberKind:
-		return n.CompareFloat64(0.)
-	default:
-		// you get what you deserve
-		return 0
-	}
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/numberkind_string.go b/vendor/go.opentelemetry.io/otel/api/metric/numberkind_string.go
deleted file mode 100644
index e99a874..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/numberkind_string.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Code generated by "stringer -type=NumberKind"; DO NOT EDIT.
-
-package metric
-
-import "strconv"
-
-func _() {
-	// An "invalid array index" compiler error signifies that the constant values have changed.
-	// Re-run the stringer command to generate them again.
-	var x [1]struct{}
-	_ = x[Int64NumberKind-0]
-	_ = x[Float64NumberKind-1]
-}
-
-const _NumberKind_name = "Int64NumberKindFloat64NumberKind"
-
-var _NumberKind_index = [...]uint8{0, 15, 32}
-
-func (i NumberKind) String() string {
-	if i < 0 || i >= NumberKind(len(_NumberKind_index)-1) {
-		return "NumberKind(" + strconv.FormatInt(int64(i), 10) + ")"
-	}
-	return _NumberKind_name[_NumberKind_index[i]:_NumberKind_index[i+1]]
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/observer.go b/vendor/go.opentelemetry.io/otel/api/metric/observer.go
deleted file mode 100644
index c347da7..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/observer.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-// BatchObserver represents an Observer callback that can report
-// observations for multiple instruments.
-type BatchObserver struct {
-	meter  Meter
-	runner AsyncBatchRunner
-}
-
-// Int64ValueObserver is a metric that captures a set of int64 values at a
-// point in time.
-type Int64ValueObserver struct {
-	asyncInstrument
-}
-
-// Float64ValueObserver is a metric that captures a set of float64 values
-// at a point in time.
-type Float64ValueObserver struct {
-	asyncInstrument
-}
-
-// Int64SumObserver is a metric that captures a precomputed sum of
-// int64 values at a point in time.
-type Int64SumObserver struct {
-	asyncInstrument
-}
-
-// Float64SumObserver is a metric that captures a precomputed sum of
-// float64 values at a point in time.
-type Float64SumObserver struct {
-	asyncInstrument
-}
-
-// Int64UpDownSumObserver is a metric that captures a precomputed sum of
-// int64 values at a point in time.
-type Int64UpDownSumObserver struct {
-	asyncInstrument
-}
-
-// Float64UpDownSumObserver is a metric that captures a precomputed sum of
-// float64 values at a point in time.
-type Float64UpDownSumObserver struct {
-	asyncInstrument
-}
-
-// Observation returns an Observation, a BatchObserverFunc
-// argument, for an asynchronous integer instrument.
-// This returns an implementation-level object for use by the SDK,
-// users should not refer to this.
-func (i Int64ValueObserver) Observation(v int64) Observation {
-	return Observation{
-		number:     NewInt64Number(v),
-		instrument: i.instrument,
-	}
-}
-
-// Observation returns an Observation, a BatchObserverFunc
-// argument, for an asynchronous integer instrument.
-// This returns an implementation-level object for use by the SDK,
-// users should not refer to this.
-func (f Float64ValueObserver) Observation(v float64) Observation {
-	return Observation{
-		number:     NewFloat64Number(v),
-		instrument: f.instrument,
-	}
-}
-
-// Observation returns an Observation, a BatchObserverFunc
-// argument, for an asynchronous integer instrument.
-// This returns an implementation-level object for use by the SDK,
-// users should not refer to this.
-func (i Int64SumObserver) Observation(v int64) Observation {
-	return Observation{
-		number:     NewInt64Number(v),
-		instrument: i.instrument,
-	}
-}
-
-// Observation returns an Observation, a BatchObserverFunc
-// argument, for an asynchronous integer instrument.
-// This returns an implementation-level object for use by the SDK,
-// users should not refer to this.
-func (f Float64SumObserver) Observation(v float64) Observation {
-	return Observation{
-		number:     NewFloat64Number(v),
-		instrument: f.instrument,
-	}
-}
-
-// Observation returns an Observation, a BatchObserverFunc
-// argument, for an asynchronous integer instrument.
-// This returns an implementation-level object for use by the SDK,
-// users should not refer to this.
-func (i Int64UpDownSumObserver) Observation(v int64) Observation {
-	return Observation{
-		number:     NewInt64Number(v),
-		instrument: i.instrument,
-	}
-}
-
-// Observation returns an Observation, a BatchObserverFunc
-// argument, for an asynchronous integer instrument.
-// This returns an implementation-level object for use by the SDK,
-// users should not refer to this.
-func (f Float64UpDownSumObserver) Observation(v float64) Observation {
-	return Observation{
-		number:     NewFloat64Number(v),
-		instrument: f.instrument,
-	}
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/registry/registry.go b/vendor/go.opentelemetry.io/otel/api/metric/registry/registry.go
deleted file mode 100644
index ed9eccc..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/registry/registry.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package registry // import "go.opentelemetry.io/otel/api/metric/registry"
-
-import (
-	"context"
-	"fmt"
-	"sync"
-
-	"go.opentelemetry.io/otel/api/metric"
-	"go.opentelemetry.io/otel/label"
-)
-
-// MeterProvider is a standard MeterProvider for wrapping `MeterImpl`
-type MeterProvider struct {
-	impl metric.MeterImpl
-}
-
-var _ metric.MeterProvider = (*MeterProvider)(nil)
-
-// uniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding
-// uniqueness checking for instrument descriptors.  Use NewUniqueInstrumentMeter
-// to wrap an implementation with uniqueness checking.
-type uniqueInstrumentMeterImpl struct {
-	lock  sync.Mutex
-	impl  metric.MeterImpl
-	state map[key]metric.InstrumentImpl
-}
-
-var _ metric.MeterImpl = (*uniqueInstrumentMeterImpl)(nil)
-
-type key struct {
-	instrumentName         string
-	instrumentationName    string
-	InstrumentationVersion string
-}
-
-// NewMeterProvider returns a new provider that implements instrument
-// name-uniqueness checking.
-func NewMeterProvider(impl metric.MeterImpl) *MeterProvider {
-	return &MeterProvider{
-		impl: NewUniqueInstrumentMeterImpl(impl),
-	}
-}
-
-// Meter implements MeterProvider.
-func (p *MeterProvider) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
-	return metric.WrapMeterImpl(p.impl, instrumentationName, opts...)
-}
-
-// ErrMetricKindMismatch is the standard error for mismatched metric
-// instrument definitions.
-var ErrMetricKindMismatch = fmt.Errorf(
-	"A metric was already registered by this name with another kind or number type")
-
-// NewUniqueInstrumentMeterImpl returns a wrapped metric.MeterImpl with
-// the addition of uniqueness checking.
-func NewUniqueInstrumentMeterImpl(impl metric.MeterImpl) metric.MeterImpl {
-	return &uniqueInstrumentMeterImpl{
-		impl:  impl,
-		state: map[key]metric.InstrumentImpl{},
-	}
-}
-
-// RecordBatch implements metric.MeterImpl.
-func (u *uniqueInstrumentMeterImpl) RecordBatch(ctx context.Context, labels []label.KeyValue, ms ...metric.Measurement) {
-	u.impl.RecordBatch(ctx, labels, ms...)
-}
-
-func keyOf(descriptor metric.Descriptor) key {
-	return key{
-		descriptor.Name(),
-		descriptor.InstrumentationName(),
-		descriptor.InstrumentationVersion(),
-	}
-}
-
-// NewMetricKindMismatchError formats an error that describes a
-// mismatched metric instrument definition.
-func NewMetricKindMismatchError(desc metric.Descriptor) error {
-	return fmt.Errorf("Metric was %s (%s %s)registered as a %s %s: %w",
-		desc.Name(),
-		desc.InstrumentationName(),
-		desc.InstrumentationVersion(),
-		desc.NumberKind(),
-		desc.MetricKind(),
-		ErrMetricKindMismatch)
-}
-
-// Compatible determines whether two metric.Descriptors are considered
-// the same for the purpose of uniqueness checking.
-func Compatible(candidate, existing metric.Descriptor) bool {
-	return candidate.MetricKind() == existing.MetricKind() &&
-		candidate.NumberKind() == existing.NumberKind()
-}
-
-// checkUniqueness returns an ErrMetricKindMismatch error if there is
-// a conflict between a descriptor that was already registered and the
-// `descriptor` argument.  If there is an existing compatible
-// registration, this returns the already-registered instrument.  If
-// there is no conflict and no prior registration, returns (nil, nil).
-func (u *uniqueInstrumentMeterImpl) checkUniqueness(descriptor metric.Descriptor) (metric.InstrumentImpl, error) {
-	impl, ok := u.state[keyOf(descriptor)]
-	if !ok {
-		return nil, nil
-	}
-
-	if !Compatible(descriptor, impl.Descriptor()) {
-		return nil, NewMetricKindMismatchError(impl.Descriptor())
-	}
-
-	return impl, nil
-}
-
-// NewSyncInstrument implements metric.MeterImpl.
-func (u *uniqueInstrumentMeterImpl) NewSyncInstrument(descriptor metric.Descriptor) (metric.SyncImpl, error) {
-	u.lock.Lock()
-	defer u.lock.Unlock()
-
-	impl, err := u.checkUniqueness(descriptor)
-
-	if err != nil {
-		return nil, err
-	} else if impl != nil {
-		return impl.(metric.SyncImpl), nil
-	}
-
-	syncInst, err := u.impl.NewSyncInstrument(descriptor)
-	if err != nil {
-		return nil, err
-	}
-	u.state[keyOf(descriptor)] = syncInst
-	return syncInst, nil
-}
-
-// NewAsyncInstrument implements metric.MeterImpl.
-func (u *uniqueInstrumentMeterImpl) NewAsyncInstrument(
-	descriptor metric.Descriptor,
-	runner metric.AsyncRunner,
-) (metric.AsyncImpl, error) {
-	u.lock.Lock()
-	defer u.lock.Unlock()
-
-	impl, err := u.checkUniqueness(descriptor)
-
-	if err != nil {
-		return nil, err
-	} else if impl != nil {
-		return impl.(metric.AsyncImpl), nil
-	}
-
-	asyncInst, err := u.impl.NewAsyncInstrument(descriptor, runner)
-	if err != nil {
-		return nil, err
-	}
-	u.state[keyOf(descriptor)] = asyncInst
-	return asyncInst, nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/sdkapi.go b/vendor/go.opentelemetry.io/otel/api/metric/sdkapi.go
deleted file mode 100644
index 122c9ba..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/sdkapi.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import (
-	"context"
-
-	"go.opentelemetry.io/otel/label"
-)
-
-// MeterImpl is the interface an SDK must implement to supply a Meter
-// implementation.
-type MeterImpl interface {
-	// RecordBatch atomically records a batch of measurements.
-	RecordBatch(ctx context.Context, labels []label.KeyValue, measurement ...Measurement)
-
-	// NewSyncInstrument returns a newly constructed
-	// synchronous instrument implementation or an error, should
-	// one occur.
-	NewSyncInstrument(descriptor Descriptor) (SyncImpl, error)
-
-	// NewAsyncInstrument returns a newly constructed
-	// asynchronous instrument implementation or an error, should
-	// one occur.
-	NewAsyncInstrument(
-		descriptor Descriptor,
-		runner AsyncRunner,
-	) (AsyncImpl, error)
-}
-
-// InstrumentImpl is a common interface for synchronous and
-// asynchronous instruments.
-type InstrumentImpl interface {
-	// Implementation returns the underlying implementation of the
-	// instrument, which allows the implementation to gain access
-	// to its own representation especially from a `Measurement`.
-	Implementation() interface{}
-
-	// Descriptor returns a copy of the instrument's Descriptor.
-	Descriptor() Descriptor
-}
-
-// SyncImpl is the implementation-level interface to a generic
-// synchronous instrument (e.g., ValueRecorder and Counter instruments).
-type SyncImpl interface {
-	InstrumentImpl
-
-	// Bind creates an implementation-level bound instrument,
-	// binding a label set with this instrument implementation.
-	Bind(labels []label.KeyValue) BoundSyncImpl
-
-	// RecordOne captures a single synchronous metric event.
-	RecordOne(ctx context.Context, number Number, labels []label.KeyValue)
-}
-
-// BoundSyncImpl is the implementation-level interface to a
-// generic bound synchronous instrument
-type BoundSyncImpl interface {
-
-	// RecordOne captures a single synchronous metric event.
-	RecordOne(ctx context.Context, number Number)
-
-	// Unbind frees the resources associated with this bound instrument. It
-	// does not affect the metric this bound instrument was created through.
-	Unbind()
-}
-
-// AsyncImpl is an implementation-level interface to an
-// asynchronous instrument (e.g., Observer instruments).
-type AsyncImpl interface {
-	InstrumentImpl
-}
-
-// WrapMeterImpl constructs a `Meter` implementation from a
-// `MeterImpl` implementation.
-func WrapMeterImpl(impl MeterImpl, instrumentationName string, opts ...MeterOption) Meter {
-	return Meter{
-		impl:    impl,
-		name:    instrumentationName,
-		version: NewMeterConfig(opts...).InstrumentationVersion,
-	}
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/sync.go b/vendor/go.opentelemetry.io/otel/api/metric/sync.go
deleted file mode 100644
index a08a65b..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/sync.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import (
-	"context"
-	"errors"
-
-	"go.opentelemetry.io/otel/label"
-)
-
-// ErrSDKReturnedNilImpl is returned when a new `MeterImpl` returns nil.
-var ErrSDKReturnedNilImpl = errors.New("SDK returned a nil implementation")
-
-// Measurement is used for reporting a synchronous batch of metric
-// values. Instances of this type should be created by synchronous
-// instruments (e.g., Int64Counter.Measurement()).
-type Measurement struct {
-	// number needs to be aligned for 64-bit atomic operations.
-	number     Number
-	instrument SyncImpl
-}
-
-// syncInstrument contains a SyncImpl.
-type syncInstrument struct {
-	instrument SyncImpl
-}
-
-// syncBoundInstrument contains a BoundSyncImpl.
-type syncBoundInstrument struct {
-	boundInstrument BoundSyncImpl
-}
-
-// asyncInstrument contains a AsyncImpl.
-type asyncInstrument struct {
-	instrument AsyncImpl
-}
-
-// SyncImpl returns the instrument that created this measurement.
-// This returns an implementation-level object for use by the SDK,
-// users should not refer to this.
-func (m Measurement) SyncImpl() SyncImpl {
-	return m.instrument
-}
-
-// Number returns a number recorded in this measurement.
-func (m Measurement) Number() Number {
-	return m.number
-}
-
-// AsyncImpl returns the instrument that created this observation.
-// This returns an implementation-level object for use by the SDK,
-// users should not refer to this.
-func (m Observation) AsyncImpl() AsyncImpl {
-	return m.instrument
-}
-
-// Number returns a number recorded in this observation.
-func (m Observation) Number() Number {
-	return m.number
-}
-
-// AsyncImpl implements AsyncImpl.
-func (a asyncInstrument) AsyncImpl() AsyncImpl {
-	return a.instrument
-}
-
-// SyncImpl returns the implementation object for synchronous instruments.
-func (s syncInstrument) SyncImpl() SyncImpl {
-	return s.instrument
-}
-
-func (s syncInstrument) bind(labels []label.KeyValue) syncBoundInstrument {
-	return newSyncBoundInstrument(s.instrument.Bind(labels))
-}
-
-func (s syncInstrument) float64Measurement(value float64) Measurement {
-	return newMeasurement(s.instrument, NewFloat64Number(value))
-}
-
-func (s syncInstrument) int64Measurement(value int64) Measurement {
-	return newMeasurement(s.instrument, NewInt64Number(value))
-}
-
-func (s syncInstrument) directRecord(ctx context.Context, number Number, labels []label.KeyValue) {
-	s.instrument.RecordOne(ctx, number, labels)
-}
-
-func (h syncBoundInstrument) directRecord(ctx context.Context, number Number) {
-	h.boundInstrument.RecordOne(ctx, number)
-}
-
-// Unbind calls SyncImpl.Unbind.
-func (h syncBoundInstrument) Unbind() {
-	h.boundInstrument.Unbind()
-}
-
-// checkNewAsync receives an AsyncImpl and potential
-// error, and returns the same types, checking for and ensuring that
-// the returned interface is not nil.
-func checkNewAsync(instrument AsyncImpl, err error) (asyncInstrument, error) {
-	if instrument == nil {
-		if err == nil {
-			err = ErrSDKReturnedNilImpl
-		}
-		instrument = NoopAsync{}
-	}
-	return asyncInstrument{
-		instrument: instrument,
-	}, err
-}
-
-// checkNewSync receives an SyncImpl and potential
-// error, and returns the same types, checking for and ensuring that
-// the returned interface is not nil.
-func checkNewSync(instrument SyncImpl, err error) (syncInstrument, error) {
-	if instrument == nil {
-		if err == nil {
-			err = ErrSDKReturnedNilImpl
-		}
-		// Note: an alternate behavior would be to synthesize a new name
-		// or group all duplicately-named instruments of a certain type
-		// together and use a tag for the original name, e.g.,
-		//   name = 'invalid.counter.int64'
-		//   label = 'original-name=duplicate-counter-name'
-		instrument = NoopSync{}
-	}
-	return syncInstrument{
-		instrument: instrument,
-	}, err
-}
-
-func newSyncBoundInstrument(boundInstrument BoundSyncImpl) syncBoundInstrument {
-	return syncBoundInstrument{
-		boundInstrument: boundInstrument,
-	}
-}
-
-func newMeasurement(instrument SyncImpl, number Number) Measurement {
-	return Measurement{
-		instrument: instrument,
-		number:     number,
-	}
-}
-
-// wrapInt64CounterInstrument converts a SyncImpl into Int64Counter.
-func wrapInt64CounterInstrument(syncInst SyncImpl, err error) (Int64Counter, error) {
-	common, err := checkNewSync(syncInst, err)
-	return Int64Counter{syncInstrument: common}, err
-}
-
-// wrapFloat64CounterInstrument converts a SyncImpl into Float64Counter.
-func wrapFloat64CounterInstrument(syncInst SyncImpl, err error) (Float64Counter, error) {
-	common, err := checkNewSync(syncInst, err)
-	return Float64Counter{syncInstrument: common}, err
-}
-
-// wrapInt64UpDownCounterInstrument converts a SyncImpl into Int64UpDownCounter.
-func wrapInt64UpDownCounterInstrument(syncInst SyncImpl, err error) (Int64UpDownCounter, error) {
-	common, err := checkNewSync(syncInst, err)
-	return Int64UpDownCounter{syncInstrument: common}, err
-}
-
-// wrapFloat64UpDownCounterInstrument converts a SyncImpl into Float64UpDownCounter.
-func wrapFloat64UpDownCounterInstrument(syncInst SyncImpl, err error) (Float64UpDownCounter, error) {
-	common, err := checkNewSync(syncInst, err)
-	return Float64UpDownCounter{syncInstrument: common}, err
-}
-
-// wrapInt64ValueRecorderInstrument converts a SyncImpl into Int64ValueRecorder.
-func wrapInt64ValueRecorderInstrument(syncInst SyncImpl, err error) (Int64ValueRecorder, error) {
-	common, err := checkNewSync(syncInst, err)
-	return Int64ValueRecorder{syncInstrument: common}, err
-}
-
-// wrapFloat64ValueRecorderInstrument converts a SyncImpl into Float64ValueRecorder.
-func wrapFloat64ValueRecorderInstrument(syncInst SyncImpl, err error) (Float64ValueRecorder, error) {
-	common, err := checkNewSync(syncInst, err)
-	return Float64ValueRecorder{syncInstrument: common}, err
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/updowncounter.go b/vendor/go.opentelemetry.io/otel/api/metric/updowncounter.go
deleted file mode 100644
index 1018246..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/updowncounter.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import (
-	"context"
-
-	"go.opentelemetry.io/otel/label"
-)
-
-// Float64UpDownCounter is a metric instrument that sums floating
-// point values.
-type Float64UpDownCounter struct {
-	syncInstrument
-}
-
-// Int64UpDownCounter is a metric instrument that sums integer values.
-type Int64UpDownCounter struct {
-	syncInstrument
-}
-
-// BoundFloat64UpDownCounter is a bound instrument for Float64UpDownCounter.
-//
-// It inherits the Unbind function from syncBoundInstrument.
-type BoundFloat64UpDownCounter struct {
-	syncBoundInstrument
-}
-
-// BoundInt64UpDownCounter is a boundInstrument for Int64UpDownCounter.
-//
-// It inherits the Unbind function from syncBoundInstrument.
-type BoundInt64UpDownCounter struct {
-	syncBoundInstrument
-}
-
-// Bind creates a bound instrument for this counter. The labels are
-// associated with values recorded via subsequent calls to Record.
-func (c Float64UpDownCounter) Bind(labels ...label.KeyValue) (h BoundFloat64UpDownCounter) {
-	h.syncBoundInstrument = c.bind(labels)
-	return
-}
-
-// Bind creates a bound instrument for this counter. The labels are
-// associated with values recorded via subsequent calls to Record.
-func (c Int64UpDownCounter) Bind(labels ...label.KeyValue) (h BoundInt64UpDownCounter) {
-	h.syncBoundInstrument = c.bind(labels)
-	return
-}
-
-// Measurement creates a Measurement object to use with batch
-// recording.
-func (c Float64UpDownCounter) Measurement(value float64) Measurement {
-	return c.float64Measurement(value)
-}
-
-// Measurement creates a Measurement object to use with batch
-// recording.
-func (c Int64UpDownCounter) Measurement(value int64) Measurement {
-	return c.int64Measurement(value)
-}
-
-// Add adds the value to the counter's sum. The labels should contain
-// the keys and values to be associated with this value.
-func (c Float64UpDownCounter) Add(ctx context.Context, value float64, labels ...label.KeyValue) {
-	c.directRecord(ctx, NewFloat64Number(value), labels)
-}
-
-// Add adds the value to the counter's sum. The labels should contain
-// the keys and values to be associated with this value.
-func (c Int64UpDownCounter) Add(ctx context.Context, value int64, labels ...label.KeyValue) {
-	c.directRecord(ctx, NewInt64Number(value), labels)
-}
-
-// Add adds the value to the counter's sum using the labels
-// previously bound to this counter via Bind()
-func (b BoundFloat64UpDownCounter) Add(ctx context.Context, value float64) {
-	b.directRecord(ctx, NewFloat64Number(value))
-}
-
-// Add adds the value to the counter's sum using the labels
-// previously bound to this counter via Bind()
-func (b BoundInt64UpDownCounter) Add(ctx context.Context, value int64) {
-	b.directRecord(ctx, NewInt64Number(value))
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/valuerecorder.go b/vendor/go.opentelemetry.io/otel/api/metric/valuerecorder.go
deleted file mode 100644
index fa7e2d4..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/valuerecorder.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import (
-	"context"
-
-	"go.opentelemetry.io/otel/label"
-)
-
-// Float64ValueRecorder is a metric that records float64 values.
-type Float64ValueRecorder struct {
-	syncInstrument
-}
-
-// Int64ValueRecorder is a metric that records int64 values.
-type Int64ValueRecorder struct {
-	syncInstrument
-}
-
-// BoundFloat64ValueRecorder is a bound instrument for Float64ValueRecorder.
-//
-// It inherits the Unbind function from syncBoundInstrument.
-type BoundFloat64ValueRecorder struct {
-	syncBoundInstrument
-}
-
-// BoundInt64ValueRecorder is a bound instrument for Int64ValueRecorder.
-//
-// It inherits the Unbind function from syncBoundInstrument.
-type BoundInt64ValueRecorder struct {
-	syncBoundInstrument
-}
-
-// Bind creates a bound instrument for this ValueRecorder. The labels are
-// associated with values recorded via subsequent calls to Record.
-func (c Float64ValueRecorder) Bind(labels ...label.KeyValue) (h BoundFloat64ValueRecorder) {
-	h.syncBoundInstrument = c.bind(labels)
-	return
-}
-
-// Bind creates a bound instrument for this ValueRecorder. The labels are
-// associated with values recorded via subsequent calls to Record.
-func (c Int64ValueRecorder) Bind(labels ...label.KeyValue) (h BoundInt64ValueRecorder) {
-	h.syncBoundInstrument = c.bind(labels)
-	return
-}
-
-// Measurement creates a Measurement object to use with batch
-// recording.
-func (c Float64ValueRecorder) Measurement(value float64) Measurement {
-	return c.float64Measurement(value)
-}
-
-// Measurement creates a Measurement object to use with batch
-// recording.
-func (c Int64ValueRecorder) Measurement(value int64) Measurement {
-	return c.int64Measurement(value)
-}
-
-// Record adds a new value to the list of ValueRecorder's records. The
-// labels should contain the keys and values to be associated with
-// this value.
-func (c Float64ValueRecorder) Record(ctx context.Context, value float64, labels ...label.KeyValue) {
-	c.directRecord(ctx, NewFloat64Number(value), labels)
-}
-
-// Record adds a new value to the ValueRecorder's distribution. The
-// labels should contain the keys and values to be associated with
-// this value.
-func (c Int64ValueRecorder) Record(ctx context.Context, value int64, labels ...label.KeyValue) {
-	c.directRecord(ctx, NewInt64Number(value), labels)
-}
-
-// Record adds a new value to the ValueRecorder's distribution using the labels
-// previously bound to the ValueRecorder via Bind().
-func (b BoundFloat64ValueRecorder) Record(ctx context.Context, value float64) {
-	b.directRecord(ctx, NewFloat64Number(value))
-}
-
-// Record adds a new value to the ValueRecorder's distribution using the labels
-// previously bound to the ValueRecorder via Bind().
-func (b BoundInt64ValueRecorder) Record(ctx context.Context, value int64) {
-	b.directRecord(ctx, NewInt64Number(value))
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/api.go b/vendor/go.opentelemetry.io/otel/api/trace/api.go
deleted file mode 100644
index 3f15f48..0000000
--- a/vendor/go.opentelemetry.io/otel/api/trace/api.go
+++ /dev/null
@@ -1,314 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
-	"context"
-	"time"
-
-	"go.opentelemetry.io/otel/codes"
-	"go.opentelemetry.io/otel/label"
-)
-
-// TracerProvider provides access to instrumentation Tracers.
-type TracerProvider interface {
-	// Tracer creates an implementation of the Tracer interface.
-	// The instrumentationName must be the name of the library providing
-	// instrumentation. This name may be the same as the instrumented code
-	// only if that code provides built-in instrumentation. If the
-	// instrumentationName is empty, then a implementation defined default
-	// name will be used instead.
-	Tracer(instrumentationName string, opts ...TracerOption) Tracer
-}
-
-// TracerConfig is a group of options for a Tracer.
-//
-// Most users will use the tracer options instead.
-type TracerConfig struct {
-	// InstrumentationVersion is the version of the instrumentation library.
-	InstrumentationVersion string
-}
-
-// NewTracerConfig applies all the options to a returned TracerConfig.
-// The default value for all the fields of the returned TracerConfig are the
-// default zero value of the type. Also, this does not perform any validation
-// on the returned TracerConfig (e.g. no uniqueness checking or bounding of
-// data), instead it is left to the implementations of the SDK to perform this
-// action.
-func NewTracerConfig(opts ...TracerOption) *TracerConfig {
-	config := new(TracerConfig)
-	for _, option := range opts {
-		option.Apply(config)
-	}
-	return config
-}
-
-// TracerOption applies an options to a TracerConfig.
-type TracerOption interface {
-	Apply(*TracerConfig)
-}
-
-type instVersionTracerOption string
-
-func (o instVersionTracerOption) Apply(c *TracerConfig) { c.InstrumentationVersion = string(o) }
-
-// WithInstrumentationVersion sets the instrumentation version for a Tracer.
-func WithInstrumentationVersion(version string) TracerOption {
-	return instVersionTracerOption(version)
-}
-
-type Tracer interface {
-	// Start a span.
-	Start(ctx context.Context, spanName string, opts ...SpanOption) (context.Context, Span)
-}
-
-// ErrorConfig provides options to set properties of an error
-// event at the time it is recorded.
-//
-// Most users will use the error options instead.
-type ErrorConfig struct {
-	Timestamp  time.Time
-	StatusCode codes.Code
-}
-
-// ErrorOption applies changes to ErrorConfig that sets options when an error event is recorded.
-type ErrorOption func(*ErrorConfig)
-
-// WithErrorTime sets the time at which the error event should be recorded.
-func WithErrorTime(t time.Time) ErrorOption {
-	return func(c *ErrorConfig) {
-		c.Timestamp = t
-	}
-}
-
-// WithErrorStatus indicates the span status that should be set when recording an error event.
-func WithErrorStatus(s codes.Code) ErrorOption {
-	return func(c *ErrorConfig) {
-		c.StatusCode = s
-	}
-}
-
-type Span interface {
-	// Tracer returns tracer used to create this span. Tracer cannot be nil.
-	Tracer() Tracer
-
-	// End completes the span. No updates are allowed to span after it
-	// ends. The only exception is setting status of the span.
-	End(options ...SpanOption)
-
-	// AddEvent adds an event to the span.
-	AddEvent(ctx context.Context, name string, attrs ...label.KeyValue)
-	// AddEventWithTimestamp adds an event with a custom timestamp
-	// to the span.
-	AddEventWithTimestamp(ctx context.Context, timestamp time.Time, name string, attrs ...label.KeyValue)
-
-	// IsRecording returns true if the span is active and recording events is enabled.
-	IsRecording() bool
-
-	// RecordError records an error as a span event.
-	RecordError(ctx context.Context, err error, opts ...ErrorOption)
-
-	// SpanContext returns span context of the span. Returned SpanContext is usable
-	// even after the span ends.
-	SpanContext() SpanContext
-
-	// SetStatus sets the status of the span in the form of a code
-	// and a message.  SetStatus overrides the value of previous
-	// calls to SetStatus on the Span.
-	//
-	// The default span status is OK, so it is not necessary to
-	// explicitly set an OK status on successful Spans unless it
-	// is to add an OK message or to override a previous status on the Span.
-	SetStatus(code codes.Code, msg string)
-
-	// SetName sets the name of the span.
-	SetName(name string)
-
-	// Set span attributes
-	SetAttributes(kv ...label.KeyValue)
-}
-
-// SpanConfig is a group of options for a Span.
-//
-// Most users will use span options instead.
-type SpanConfig struct {
-	// Attributes describe the associated qualities of a Span.
-	Attributes []label.KeyValue
-	// Timestamp is a time in a Span life-cycle.
-	Timestamp time.Time
-	// Links are the associations a Span has with other Spans.
-	Links []Link
-	// Record is the recording state of a Span.
-	Record bool
-	// NewRoot identifies a Span as the root Span for a new trace. This is
-	// commonly used when an existing trace crosses trust boundaries and the
-	// remote parent span context should be ignored for security.
-	NewRoot bool
-	// SpanKind is the role a Span has in a trace.
-	SpanKind SpanKind
-}
-
-// NewSpanConfig applies all the options to a returned SpanConfig.
-// The default value for all the fields of the returned SpanConfig are the
-// default zero value of the type. Also, this does not perform any validation
-// on the returned SpanConfig (e.g. no uniqueness checking or bounding of
-// data). Instead, it is left to the implementations of the SDK to perform this
-// action.
-func NewSpanConfig(opts ...SpanOption) *SpanConfig {
-	c := new(SpanConfig)
-	for _, option := range opts {
-		option.Apply(c)
-	}
-	return c
-}
-
-// SpanOption applies an option to a SpanConfig.
-type SpanOption interface {
-	Apply(*SpanConfig)
-}
-
-type attributeSpanOption []label.KeyValue
-
-func (o attributeSpanOption) Apply(c *SpanConfig) {
-	c.Attributes = append(c.Attributes, []label.KeyValue(o)...)
-}
-
-// WithAttributes adds the attributes to a span. These attributes are meant to
-// provide additional information about the work the Span represents. The
-// attributes are added to the existing Span attributes, i.e. this does not
-// overwrite.
-func WithAttributes(attributes ...label.KeyValue) SpanOption {
-	return attributeSpanOption(attributes)
-}
-
-type timestampSpanOption time.Time
-
-func (o timestampSpanOption) Apply(c *SpanConfig) { c.Timestamp = time.Time(o) }
-
-// WithTimestamp sets the time of a Span life-cycle moment (e.g. started or
-// stopped).
-func WithTimestamp(t time.Time) SpanOption {
-	return timestampSpanOption(t)
-}
-
-type linksSpanOption []Link
-
-func (o linksSpanOption) Apply(c *SpanConfig) { c.Links = append(c.Links, []Link(o)...) }
-
-// WithLinks adds links to a Span. The links are added to the existing Span
-// links, i.e. this does not overwrite.
-func WithLinks(links ...Link) SpanOption {
-	return linksSpanOption(links)
-}
-
-type recordSpanOption bool
-
-func (o recordSpanOption) Apply(c *SpanConfig) { c.Record = bool(o) }
-
-// WithRecord specifies that the span should be recorded. It is important to
-// note that implementations may override this option, i.e. if the span is a
-// child of an un-sampled trace.
-func WithRecord() SpanOption {
-	return recordSpanOption(true)
-}
-
-type newRootSpanOption bool
-
-func (o newRootSpanOption) Apply(c *SpanConfig) { c.NewRoot = bool(o) }
-
-// WithNewRoot specifies that the Span should be treated as a root Span. Any
-// existing parent span context will be ignored when defining the Span's trace
-// identifiers.
-func WithNewRoot() SpanOption {
-	return newRootSpanOption(true)
-}
-
-type spanKindSpanOption SpanKind
-
-func (o spanKindSpanOption) Apply(c *SpanConfig) { c.SpanKind = SpanKind(o) }
-
-// WithSpanKind sets the SpanKind of a Span.
-func WithSpanKind(kind SpanKind) SpanOption {
-	return spanKindSpanOption(kind)
-}
-
-// Link is used to establish relationship between two spans within the same Trace or
-// across different Traces. Few examples of Link usage.
-//   1. Batch Processing: A batch of elements may contain elements associated with one
-//      or more traces/spans. Since there can only be one parent SpanContext, Link is
-//      used to keep reference to SpanContext of all elements in the batch.
-//   2. Public Endpoint: A SpanContext in incoming client request on a public endpoint
-//      is untrusted from service provider perspective. In such case it is advisable to
-//      start a new trace with appropriate sampling decision.
-//      However, it is desirable to associate incoming SpanContext to new trace initiated
-//      on service provider side so two traces (from Client and from Service Provider) can
-//      be correlated.
-type Link struct {
-	SpanContext
-	Attributes []label.KeyValue
-}
-
-// SpanKind represents the role of a Span inside a Trace. Often, this defines how a Span
-// will be processed and visualized by various backends.
-type SpanKind int
-
-const (
-	// As a convenience, these match the proto definition, see
-	// opentelemetry/proto/trace/v1/trace.proto
-	//
-	// The unspecified value is not a valid `SpanKind`.  Use
-	// `ValidateSpanKind()` to coerce a span kind to a valid
-	// value.
-	SpanKindUnspecified SpanKind = 0
-	SpanKindInternal    SpanKind = 1
-	SpanKindServer      SpanKind = 2
-	SpanKindClient      SpanKind = 3
-	SpanKindProducer    SpanKind = 4
-	SpanKindConsumer    SpanKind = 5
-)
-
-// ValidateSpanKind returns a valid span kind value.  This will coerce
-// invalid values into the default value, SpanKindInternal.
-func ValidateSpanKind(spanKind SpanKind) SpanKind {
-	switch spanKind {
-	case SpanKindInternal,
-		SpanKindServer,
-		SpanKindClient,
-		SpanKindProducer,
-		SpanKindConsumer:
-		// valid
-		return spanKind
-	default:
-		return SpanKindInternal
-	}
-}
-
-// String returns the specified name of the SpanKind in lower-case.
-func (sk SpanKind) String() string {
-	switch sk {
-	case SpanKindInternal:
-		return "internal"
-	case SpanKindServer:
-		return "server"
-	case SpanKindClient:
-		return "client"
-	case SpanKindProducer:
-		return "producer"
-	case SpanKindConsumer:
-		return "consumer"
-	default:
-		return "unspecified"
-	}
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/context.go b/vendor/go.opentelemetry.io/otel/api/trace/context.go
deleted file mode 100644
index 0f330e3..0000000
--- a/vendor/go.opentelemetry.io/otel/api/trace/context.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
-	"context"
-)
-
-type traceContextKeyType int
-
-const (
-	currentSpanKey traceContextKeyType = iota
-	remoteContextKey
-)
-
-// ContextWithSpan creates a new context with a current span set to
-// the passed span.
-func ContextWithSpan(ctx context.Context, span Span) context.Context {
-	return context.WithValue(ctx, currentSpanKey, span)
-}
-
-// SpanFromContext returns the current span stored in the context.
-func SpanFromContext(ctx context.Context) Span {
-	if span, has := ctx.Value(currentSpanKey).(Span); has {
-		return span
-	}
-	return noopSpan{}
-}
-
-// ContextWithRemoteSpanContext creates a new context with a remote
-// span context set to the passed span context.
-func ContextWithRemoteSpanContext(ctx context.Context, sc SpanContext) context.Context {
-	return context.WithValue(ctx, remoteContextKey, sc)
-}
-
-// RemoteSpanContextFromContext returns the remote span context stored
-// in the context.
-func RemoteSpanContextFromContext(ctx context.Context) SpanContext {
-	if sc, ok := ctx.Value(remoteContextKey).(SpanContext); ok {
-		return sc
-	}
-	return EmptySpanContext()
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/doc.go b/vendor/go.opentelemetry.io/otel/api/trace/doc.go
deleted file mode 100644
index 24f2dfb..0000000
--- a/vendor/go.opentelemetry.io/otel/api/trace/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package trace provides tracing support.
-package trace // import "go.opentelemetry.io/otel/api/trace"
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/noop_span.go b/vendor/go.opentelemetry.io/otel/api/trace/noop_span.go
deleted file mode 100644
index f014f21..0000000
--- a/vendor/go.opentelemetry.io/otel/api/trace/noop_span.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
-	"context"
-	"time"
-
-	"go.opentelemetry.io/otel/codes"
-	"go.opentelemetry.io/otel/label"
-)
-
-type noopSpan struct {
-}
-
-var _ Span = noopSpan{}
-
-// SpanContext returns an invalid span context.
-func (noopSpan) SpanContext() SpanContext {
-	return EmptySpanContext()
-}
-
-// IsRecording always returns false for NoopSpan.
-func (noopSpan) IsRecording() bool {
-	return false
-}
-
-// SetStatus does nothing.
-func (noopSpan) SetStatus(status codes.Code, msg string) {
-}
-
-// SetError does nothing.
-func (noopSpan) SetError(v bool) {
-}
-
-// SetAttributes does nothing.
-func (noopSpan) SetAttributes(attributes ...label.KeyValue) {
-}
-
-// End does nothing.
-func (noopSpan) End(options ...SpanOption) {
-}
-
-// RecordError does nothing.
-func (noopSpan) RecordError(ctx context.Context, err error, opts ...ErrorOption) {
-}
-
-// Tracer returns noop implementation of Tracer.
-func (noopSpan) Tracer() Tracer {
-	return noopTracer{}
-}
-
-// AddEvent does nothing.
-func (noopSpan) AddEvent(ctx context.Context, name string, attrs ...label.KeyValue) {
-}
-
-// AddEventWithTimestamp does nothing.
-func (noopSpan) AddEventWithTimestamp(ctx context.Context, timestamp time.Time, name string, attrs ...label.KeyValue) {
-}
-
-// SetName does nothing.
-func (noopSpan) SetName(name string) {
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/noop_trace.go b/vendor/go.opentelemetry.io/otel/api/trace/noop_trace.go
deleted file mode 100644
index 954f9e8..0000000
--- a/vendor/go.opentelemetry.io/otel/api/trace/noop_trace.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
-	"context"
-)
-
-type noopTracer struct{}
-
-var _ Tracer = noopTracer{}
-
-// Start starts a noop span.
-func (noopTracer) Start(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) {
-	span := noopSpan{}
-	return ContextWithSpan(ctx, span), span
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/noop_trace_provider.go b/vendor/go.opentelemetry.io/otel/api/trace/noop_trace_provider.go
deleted file mode 100644
index 414c8e3..0000000
--- a/vendor/go.opentelemetry.io/otel/api/trace/noop_trace_provider.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-type noopTracerProvider struct{}
-
-var _ TracerProvider = noopTracerProvider{}
-
-// Tracer returns noop implementation of Tracer.
-func (p noopTracerProvider) Tracer(_ string, _ ...TracerOption) Tracer {
-	return noopTracer{}
-}
-
-// NoopTracerProvider returns a noop implementation of TracerProvider. The
-// Tracer and Spans created from the noop provider will also be noop.
-func NoopTracerProvider() TracerProvider {
-	return noopTracerProvider{}
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/span_context.go b/vendor/go.opentelemetry.io/otel/api/trace/span_context.go
deleted file mode 100644
index 914ce5f..0000000
--- a/vendor/go.opentelemetry.io/otel/api/trace/span_context.go
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
-	"bytes"
-	"encoding/hex"
-	"encoding/json"
-)
-
-const (
-	// FlagsSampled is a bitmask with the sampled bit set. A SpanContext
-	// with the sampling bit set means the span is sampled.
-	FlagsSampled = byte(0x01)
-	// FlagsDeferred is a bitmask with the deferred bit set. A SpanContext
-	// with the deferred bit set means the sampling decision has been
-	// defered to the receiver.
-	FlagsDeferred = byte(0x02)
-	// FlagsDebug is a bitmask with the debug bit set.
-	FlagsDebug = byte(0x04)
-
-	ErrInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase"
-
-	ErrInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32"
-	ErrNilTraceID           errorConst = "trace-id can't be all zero"
-
-	ErrInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16"
-	ErrNilSpanID           errorConst = "span-id can't be all zero"
-)
-
-type errorConst string
-
-func (e errorConst) Error() string {
-	return string(e)
-}
-
-// ID is a unique identity of a trace.
-type ID [16]byte
-
-var nilTraceID ID
-var _ json.Marshaler = nilTraceID
-
-// IsValid checks whether the trace ID is valid. A valid trace ID does
-// not consist of zeros only.
-func (t ID) IsValid() bool {
-	return !bytes.Equal(t[:], nilTraceID[:])
-}
-
-// MarshalJSON implements a custom marshal function to encode TraceID
-// as a hex string.
-func (t ID) MarshalJSON() ([]byte, error) {
-	return json.Marshal(t.String())
-}
-
-// String returns the hex string representation form of a TraceID
-func (t ID) String() string {
-	return hex.EncodeToString(t[:])
-}
-
-// SpanID is a unique identify of a span in a trace.
-type SpanID [8]byte
-
-var nilSpanID SpanID
-var _ json.Marshaler = nilSpanID
-
-// IsValid checks whether the span ID is valid. A valid span ID does
-// not consist of zeros only.
-func (s SpanID) IsValid() bool {
-	return !bytes.Equal(s[:], nilSpanID[:])
-}
-
-// MarshalJSON implements a custom marshal function to encode SpanID
-// as a hex string.
-func (s SpanID) MarshalJSON() ([]byte, error) {
-	return json.Marshal(s.String())
-}
-
-// String returns the hex string representation form of a SpanID
-func (s SpanID) String() string {
-	return hex.EncodeToString(s[:])
-}
-
-// IDFromHex returns a TraceID from a hex string if it is compliant
-// with the w3c trace-context specification.
-// See more at https://www.w3.org/TR/trace-context/#trace-id
-func IDFromHex(h string) (ID, error) {
-	t := ID{}
-	if len(h) != 32 {
-		return t, ErrInvalidTraceIDLength
-	}
-
-	if err := decodeHex(h, t[:]); err != nil {
-		return t, err
-	}
-
-	if !t.IsValid() {
-		return t, ErrNilTraceID
-	}
-	return t, nil
-}
-
-// SpanIDFromHex returns a SpanID from a hex string if it is compliant
-// with the w3c trace-context specification.
-// See more at https://www.w3.org/TR/trace-context/#parent-id
-func SpanIDFromHex(h string) (SpanID, error) {
-	s := SpanID{}
-	if len(h) != 16 {
-		return s, ErrInvalidSpanIDLength
-	}
-
-	if err := decodeHex(h, s[:]); err != nil {
-		return s, err
-	}
-
-	if !s.IsValid() {
-		return s, ErrNilSpanID
-	}
-	return s, nil
-}
-
-func decodeHex(h string, b []byte) error {
-	for _, r := range h {
-		switch {
-		case 'a' <= r && r <= 'f':
-			continue
-		case '0' <= r && r <= '9':
-			continue
-		default:
-			return ErrInvalidHexID
-		}
-	}
-
-	decoded, err := hex.DecodeString(h)
-	if err != nil {
-		return err
-	}
-
-	copy(b, decoded)
-	return nil
-}
-
-// SpanContext contains basic information about the span - its trace
-// ID, span ID and trace flags.
-type SpanContext struct {
-	TraceID    ID
-	SpanID     SpanID
-	TraceFlags byte
-}
-
-// EmptySpanContext is meant for internal use to return invalid span
-// context during error conditions.
-func EmptySpanContext() SpanContext {
-	return SpanContext{}
-}
-
-// IsValid checks if the span context is valid. A valid span context
-// has a valid trace ID and a valid span ID.
-func (sc SpanContext) IsValid() bool {
-	return sc.HasTraceID() && sc.HasSpanID()
-}
-
-// HasTraceID checks if the span context has a valid trace ID.
-func (sc SpanContext) HasTraceID() bool {
-	return sc.TraceID.IsValid()
-}
-
-// HasSpanID checks if the span context has a valid span ID.
-func (sc SpanContext) HasSpanID() bool {
-	return sc.SpanID.IsValid()
-}
-
-// IsDeferred returns if the deferred bit is set in the trace flags.
-func (sc SpanContext) IsDeferred() bool {
-	return sc.TraceFlags&FlagsDeferred == FlagsDeferred
-}
-
-// IsDebug returns if the debug bit is set in the trace flags.
-func (sc SpanContext) IsDebug() bool {
-	return sc.TraceFlags&FlagsDebug == FlagsDebug
-}
-
-// IsSampled returns if the sampling bit is set in the trace flags.
-func (sc SpanContext) IsSampled() bool {
-	return sc.TraceFlags&FlagsSampled == FlagsSampled
-}
diff --git a/vendor/go.opentelemetry.io/otel/baggage.go b/vendor/go.opentelemetry.io/otel/baggage.go
deleted file mode 100644
index bf0f1f6..0000000
--- a/vendor/go.opentelemetry.io/otel/baggage.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otel
-
-import (
-	"context"
-
-	"go.opentelemetry.io/otel/internal/baggage"
-	"go.opentelemetry.io/otel/label"
-)
-
-// Baggage returns a copy of the baggage in ctx.
-func Baggage(ctx context.Context) label.Set {
-	// TODO (MrAlias, #1222): The underlying storage, the Map, shares many of
-	// the functional elements of the label.Set. These should be unified so
-	// this conversion is unnecessary and there is no performance hit calling
-	// this.
-	m := baggage.MapFromContext(ctx)
-	values := make([]label.KeyValue, 0, m.Len())
-	m.Foreach(func(kv label.KeyValue) bool {
-		values = append(values, kv)
-		return true
-	})
-	return label.NewSet(values...)
-}
-
-// BaggageValue returns the value related to key in the baggage of ctx. If no
-// value is set, the returned label.Value will be an uninitialized zero-value
-// with type INVALID.
-func BaggageValue(ctx context.Context, key label.Key) label.Value {
-	v, _ := baggage.MapFromContext(ctx).Value(key)
-	return v
-}
-
-// ContextWithBaggageValues returns a copy of parent with pairs updated in the baggage.
-func ContextWithBaggageValues(parent context.Context, pairs ...label.KeyValue) context.Context {
-	m := baggage.MapFromContext(parent).Apply(baggage.MapUpdate{
-		MultiKV: pairs,
-	})
-	return baggage.ContextWithMap(parent, m)
-}
-
-// ContextWithoutBaggageValues returns a copy of parent in which the values related
-// to keys have been removed from the baggage.
-func ContextWithoutBaggageValues(parent context.Context, keys ...label.Key) context.Context {
-	m := baggage.MapFromContext(parent).Apply(baggage.MapUpdate{
-		DropMultiK: keys,
-	})
-	return baggage.ContextWithMap(parent, m)
-}
-
-// ContextWithoutBaggage returns a copy of parent without baggage.
-func ContextWithoutBaggage(parent context.Context) context.Context {
-	return baggage.ContextWithNoCorrelationData(parent)
-}
diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go
deleted file mode 100644
index 28393a5..0000000
--- a/vendor/go.opentelemetry.io/otel/codes/codes.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package codes defines the canonical error codes used by OpenTelemetry.
-//
-// It conforms to [the OpenTelemetry
-// specification](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#statuscanonicalcode).
-package codes // import "go.opentelemetry.io/otel/codes"
-
-import (
-	"fmt"
-	"strconv"
-)
-
-const (
-	// Unset is the default status code.
-	Unset Code = 0
-	// Error indicates the operation contains an error.
-	Error Code = 1
-	// Ok indicates operation has been validated by an Application developers
-	// or Operator to have completed successfully, or contain no error.
-	Ok Code = 2
-
-	maxCode = 3
-)
-
-// Code is an 32-bit representation of a status state.
-type Code uint32
-
-var codeToStr = map[Code]string{
-	Unset: "Unset",
-	Error: "Error",
-	Ok:    "Ok",
-}
-
-var strToCode = map[string]Code{
-	"Unset": Unset,
-	"Error": Error,
-	"Ok":    Ok,
-}
-
-// String returns the Code as a string.
-func (c Code) String() string {
-	return codeToStr[c]
-}
-
-// UnmarshalJSON unmarshals b into the Code.
-//
-// This is based on the functionality in the gRPC codes package:
-// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244
-func (c *Code) UnmarshalJSON(b []byte) error {
-	// From json.Unmarshaler: By convention, to approximate the behavior of
-	// Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
-	// a no-op.
-	if string(b) == "null" {
-		return nil
-	}
-	if c == nil {
-		return fmt.Errorf("nil receiver passed to UnmarshalJSON")
-	}
-
-	if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
-		if ci >= maxCode {
-			return fmt.Errorf("invalid code: %q", ci)
-		}
-
-		*c = Code(ci)
-		return nil
-	}
-
-	if jc, ok := strToCode[string(b)]; ok {
-		*c = jc
-		return nil
-	}
-	return fmt.Errorf("invalid code: %q", string(b))
-}
-
-// MarshalJSON returns c as the JSON encoding of c.
-func (c *Code) MarshalJSON() ([]byte, error) {
-	if c == nil {
-		return []byte("null"), nil
-	}
-	str, ok := codeToStr[*c]
-	if !ok {
-		return nil, fmt.Errorf("invalid code: %d", *c)
-	}
-	return []byte(fmt.Sprintf("%q", str)), nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go
deleted file mode 100644
index de2f76c..0000000
--- a/vendor/go.opentelemetry.io/otel/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package otel contains OpenTelemetry Go packages.
-package otel // import "go.opentelemetry.io/otel"
diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
deleted file mode 100644
index d1c892f..0000000
--- a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -euo pipefail
-
-top_dir='.'
-if [[ $# -gt 0 ]]; then
-    top_dir="${1}"
-fi
-
-p=$(pwd)
-mod_dirs=()
-mapfile -t mod_dirs < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort)
-
-for mod_dir in "${mod_dirs[@]}"; do
-    cd "${mod_dir}"
-    main_dirs=()
-    mapfile -t main_dirs < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|')
-    for main_dir in "${main_dirs[@]}"; do
-        echo ".${main_dir#${p}}"
-    done
-    cd "${p}"
-done
diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
deleted file mode 100644
index 6b5c0c2..0000000
--- a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
+++ /dev/null
@@ -1,338 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package baggage provides types and functions to manage W3C Baggage.
-package baggage
-
-import (
-	"context"
-
-	"go.opentelemetry.io/otel/label"
-)
-
-type rawMap map[label.Key]label.Value
-type keySet map[label.Key]struct{}
-
-// Map is an immutable storage for correlations.
-type Map struct {
-	m rawMap
-}
-
-// MapUpdate contains information about correlation changes to be
-// made.
-type MapUpdate struct {
-	// DropSingleK contains a single key to be dropped from
-	// correlations. Use this to avoid an overhead of a slice
-	// allocation if there is only one key to drop.
-	DropSingleK label.Key
-	// DropMultiK contains all the keys to be dropped from
-	// correlations.
-	DropMultiK []label.Key
-
-	// SingleKV contains a single key-value pair to be added to
-	// correlations. Use this to avoid an overhead of a slice
-	// allocation if there is only one key-value pair to add.
-	SingleKV label.KeyValue
-	// MultiKV contains all the key-value pairs to be added to
-	// correlations.
-	MultiKV []label.KeyValue
-}
-
-func newMap(raw rawMap) Map {
-	return Map{
-		m: raw,
-	}
-}
-
-// NewEmptyMap creates an empty correlations map.
-func NewEmptyMap() Map {
-	return newMap(nil)
-}
-
-// NewMap creates a map with the contents of the update applied. In
-// this function, having an update with DropSingleK or DropMultiK
-// makes no sense - those fields are effectively ignored.
-func NewMap(update MapUpdate) Map {
-	return NewEmptyMap().Apply(update)
-}
-
-// Apply creates a copy of the map with the contents of the update
-// applied. Apply will first drop the keys from DropSingleK and
-// DropMultiK, then add key-value pairs from SingleKV and MultiKV.
-func (m Map) Apply(update MapUpdate) Map {
-	delSet, addSet := getModificationSets(update)
-	mapSize := getNewMapSize(m.m, delSet, addSet)
-
-	r := make(rawMap, mapSize)
-	for k, v := range m.m {
-		// do not copy items we want to drop
-		if _, ok := delSet[k]; ok {
-			continue
-		}
-		// do not copy items we would overwrite
-		if _, ok := addSet[k]; ok {
-			continue
-		}
-		r[k] = v
-	}
-	if update.SingleKV.Key.Defined() {
-		r[update.SingleKV.Key] = update.SingleKV.Value
-	}
-	for _, kv := range update.MultiKV {
-		r[kv.Key] = kv.Value
-	}
-	if len(r) == 0 {
-		r = nil
-	}
-	return newMap(r)
-}
-
-func getModificationSets(update MapUpdate) (delSet, addSet keySet) {
-	deletionsCount := len(update.DropMultiK)
-	if update.DropSingleK.Defined() {
-		deletionsCount++
-	}
-	if deletionsCount > 0 {
-		delSet = make(map[label.Key]struct{}, deletionsCount)
-		for _, k := range update.DropMultiK {
-			delSet[k] = struct{}{}
-		}
-		if update.DropSingleK.Defined() {
-			delSet[update.DropSingleK] = struct{}{}
-		}
-	}
-
-	additionsCount := len(update.MultiKV)
-	if update.SingleKV.Key.Defined() {
-		additionsCount++
-	}
-	if additionsCount > 0 {
-		addSet = make(map[label.Key]struct{}, additionsCount)
-		for _, k := range update.MultiKV {
-			addSet[k.Key] = struct{}{}
-		}
-		if update.SingleKV.Key.Defined() {
-			addSet[update.SingleKV.Key] = struct{}{}
-		}
-	}
-
-	return
-}
-
-func getNewMapSize(m rawMap, delSet, addSet keySet) int {
-	mapSizeDiff := 0
-	for k := range addSet {
-		if _, ok := m[k]; !ok {
-			mapSizeDiff++
-		}
-	}
-	for k := range delSet {
-		if _, ok := m[k]; ok {
-			if _, inAddSet := addSet[k]; !inAddSet {
-				mapSizeDiff--
-			}
-		}
-	}
-	return len(m) + mapSizeDiff
-}
-
-// Value gets a value from correlations map and returns a boolean
-// value indicating whether the key exist in the map.
-func (m Map) Value(k label.Key) (label.Value, bool) {
-	value, ok := m.m[k]
-	return value, ok
-}
-
-// HasValue returns a boolean value indicating whether the key exist
-// in the map.
-func (m Map) HasValue(k label.Key) bool {
-	_, has := m.Value(k)
-	return has
-}
-
-// Len returns a length of the map.
-func (m Map) Len() int {
-	return len(m.m)
-}
-
-// Foreach calls a passed callback once on each key-value pair until
-// all the key-value pairs of the map were iterated or the callback
-// returns false, whichever happens first.
-func (m Map) Foreach(f func(label.KeyValue) bool) {
-	for k, v := range m.m {
-		if !f(label.KeyValue{
-			Key:   k,
-			Value: v,
-		}) {
-			return
-		}
-	}
-}
-
-type correlationsType struct{}
-
-// SetHookFunc describes a type of a callback that is called when
-// storing baggage in the context.
-type SetHookFunc func(context.Context) context.Context
-
-// GetHookFunc describes a type of a callback that is called when
-// getting baggage from the context.
-type GetHookFunc func(context.Context, Map) Map
-
-// value under this key is either of type Map or correlationsData
-var correlationsKey = &correlationsType{}
-
-type correlationsData struct {
-	m       Map
-	setHook SetHookFunc
-	getHook GetHookFunc
-}
-
-func (d correlationsData) isHookless() bool {
-	return d.setHook == nil && d.getHook == nil
-}
-
-type hookKind int
-
-const (
-	hookKindSet hookKind = iota
-	hookKindGet
-)
-
-func (d *correlationsData) overrideHook(kind hookKind, setHook SetHookFunc, getHook GetHookFunc) {
-	switch kind {
-	case hookKindSet:
-		d.setHook = setHook
-	case hookKindGet:
-		d.getHook = getHook
-	}
-}
-
-// ContextWithSetHook installs a hook function that will be invoked
-// every time ContextWithMap is called. To avoid unnecessary callback
-// invocations (recursive or not), the callback can temporarily clear
-// the hooks from the context with the ContextWithNoHooks function.
-//
-// Note that NewContext also calls ContextWithMap, so the hook will be
-// invoked.
-//
-// Passing nil SetHookFunc creates a context with no set hook to call.
-//
-// This function should not be used by applications or libraries. It
-// is mostly for interoperation with other observability APIs.
-func ContextWithSetHook(ctx context.Context, hook SetHookFunc) context.Context {
-	return contextWithHook(ctx, hookKindSet, hook, nil)
-}
-
-// ContextWithGetHook installs a hook function that will be invoked
-// every time MapFromContext is called. To avoid unnecessary callback
-// invocations (recursive or not), the callback can temporarily clear
-// the hooks from the context with the ContextWithNoHooks function.
-//
-// Note that NewContext also calls MapFromContext, so the hook will be
-// invoked.
-//
-// Passing nil GetHookFunc creates a context with no get hook to call.
-//
-// This function should not be used by applications or libraries. It
-// is mostly for interoperation with other observability APIs.
-func ContextWithGetHook(ctx context.Context, hook GetHookFunc) context.Context {
-	return contextWithHook(ctx, hookKindGet, nil, hook)
-}
-
-func contextWithHook(ctx context.Context, kind hookKind, setHook SetHookFunc, getHook GetHookFunc) context.Context {
-	switch v := ctx.Value(correlationsKey).(type) {
-	case correlationsData:
-		v.overrideHook(kind, setHook, getHook)
-		if v.isHookless() {
-			return context.WithValue(ctx, correlationsKey, v.m)
-		}
-		return context.WithValue(ctx, correlationsKey, v)
-	case Map:
-		return contextWithOneHookAndMap(ctx, kind, setHook, getHook, v)
-	default:
-		m := NewEmptyMap()
-		return contextWithOneHookAndMap(ctx, kind, setHook, getHook, m)
-	}
-}
-
-func contextWithOneHookAndMap(ctx context.Context, kind hookKind, setHook SetHookFunc, getHook GetHookFunc, m Map) context.Context {
-	d := correlationsData{m: m}
-	d.overrideHook(kind, setHook, getHook)
-	if d.isHookless() {
-		return ctx
-	}
-	return context.WithValue(ctx, correlationsKey, d)
-}
-
-// ContextWithNoHooks creates a context with all the hooks
-// disabled. Also returns old set and get hooks. This function can be
-// used to temporarily clear the context from hooks and then reinstate
-// them by calling ContextWithSetHook and ContextWithGetHook functions
-// passing the hooks returned by this function.
-//
-// This function should not be used by applications or libraries. It
-// is mostly for interoperation with other observability APIs.
-func ContextWithNoHooks(ctx context.Context) (context.Context, SetHookFunc, GetHookFunc) {
-	switch v := ctx.Value(correlationsKey).(type) {
-	case correlationsData:
-		return context.WithValue(ctx, correlationsKey, v.m), v.setHook, v.getHook
-	default:
-		return ctx, nil, nil
-	}
-}
-
-// ContextWithMap returns a context with the Map entered into it.
-func ContextWithMap(ctx context.Context, m Map) context.Context {
-	switch v := ctx.Value(correlationsKey).(type) {
-	case correlationsData:
-		v.m = m
-		ctx = context.WithValue(ctx, correlationsKey, v)
-		if v.setHook != nil {
-			ctx = v.setHook(ctx)
-		}
-		return ctx
-	default:
-		return context.WithValue(ctx, correlationsKey, m)
-	}
-}
-
-// ContextWithNoCorrelationData returns a context stripped of correlation
-// data.
-func ContextWithNoCorrelationData(ctx context.Context) context.Context {
-	return context.WithValue(ctx, correlationsKey, nil)
-}
-
-// NewContext returns a context with the map from passed context
-// updated with the passed key-value pairs.
-func NewContext(ctx context.Context, keyvalues ...label.KeyValue) context.Context {
-	return ContextWithMap(ctx, MapFromContext(ctx).Apply(MapUpdate{
-		MultiKV: keyvalues,
-	}))
-}
-
-// MapFromContext gets the current Map from a Context.
-func MapFromContext(ctx context.Context) Map {
-	switch v := ctx.Value(correlationsKey).(type) {
-	case correlationsData:
-		if v.getHook != nil {
-			return v.getHook(ctx, v.m)
-		}
-		return v.m
-	case Map:
-		return v
-	default:
-		return NewEmptyMap()
-	}
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
deleted file mode 100644
index dae825e..0000000
--- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
-	"math"
-	"unsafe"
-)
-
-func BoolToRaw(b bool) uint64 {
-	if b {
-		return 1
-	}
-	return 0
-}
-
-func RawToBool(r uint64) bool {
-	return r != 0
-}
-
-func Int64ToRaw(i int64) uint64 {
-	return uint64(i)
-}
-
-func RawToInt64(r uint64) int64 {
-	return int64(r)
-}
-
-func Uint64ToRaw(u uint64) uint64 {
-	return u
-}
-
-func RawToUint64(r uint64) uint64 {
-	return r
-}
-
-func Float64ToRaw(f float64) uint64 {
-	return math.Float64bits(f)
-}
-
-func RawToFloat64(r uint64) float64 {
-	return math.Float64frombits(r)
-}
-
-func Int32ToRaw(i int32) uint64 {
-	return uint64(i)
-}
-
-func RawToInt32(r uint64) int32 {
-	return int32(r)
-}
-
-func Uint32ToRaw(u uint32) uint64 {
-	return uint64(u)
-}
-
-func RawToUint32(r uint64) uint32 {
-	return uint32(r)
-}
-
-func Float32ToRaw(f float32) uint64 {
-	return Uint32ToRaw(math.Float32bits(f))
-}
-
-func RawToFloat32(r uint64) float32 {
-	return math.Float32frombits(RawToUint32(r))
-}
-
-func RawPtrToFloat64Ptr(r *uint64) *float64 {
-	return (*float64)(unsafe.Pointer(r))
-}
-
-func RawPtrToInt64Ptr(r *uint64) *int64 {
-	return (*int64)(unsafe.Pointer(r))
-}
-
-func RawPtrToUint64Ptr(r *uint64) *uint64 {
-	return r
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/internal/trace/noop/noop.go
deleted file mode 100644
index c09a737..0000000
--- a/vendor/go.opentelemetry.io/otel/internal/trace/noop/noop.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package noop provides noop tracing implementations for tracer and span.
-package noop
-
-import (
-	"context"
-
-	"go.opentelemetry.io/otel/api/trace"
-)
-
-var (
-	// Tracer is a noop tracer that starts noop spans.
-	Tracer trace.Tracer
-
-	// Span is a noop Span.
-	Span trace.Span
-)
-
-func init() {
-	Tracer = trace.NoopTracerProvider().Tracer("")
-	_, Span = Tracer.Start(context.Background(), "")
-}
diff --git a/vendor/go.opentelemetry.io/otel/label/doc.go b/vendor/go.opentelemetry.io/otel/label/doc.go
deleted file mode 100644
index d631d23..0000000
--- a/vendor/go.opentelemetry.io/otel/label/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package label provides key and value labels.
-package label // import "go.opentelemetry.io/otel/label"
diff --git a/vendor/go.opentelemetry.io/otel/label/encoder.go b/vendor/go.opentelemetry.io/otel/label/encoder.go
deleted file mode 100644
index 6be7a3f..0000000
--- a/vendor/go.opentelemetry.io/otel/label/encoder.go
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package label
-
-import (
-	"bytes"
-	"sync"
-	"sync/atomic"
-)
-
-type (
-	// Encoder is a mechanism for serializing a label set into a
-	// specific string representation that supports caching, to
-	// avoid repeated serialization. An example could be an
-	// exporter encoding the label set into a wire representation.
-	Encoder interface {
-		// Encode returns the serialized encoding of the label
-		// set using its Iterator.  This result may be cached
-		// by a label.Set.
-		Encode(iterator Iterator) string
-
-		// ID returns a value that is unique for each class of
-		// label encoder.  Label encoders allocate these using
-		// `NewEncoderID`.
-		ID() EncoderID
-	}
-
-	// EncoderID is used to identify distinct Encoder
-	// implementations, for caching encoded results.
-	EncoderID struct {
-		value uint64
-	}
-
-	// defaultLabelEncoder uses a sync.Pool of buffers to reduce
-	// the number of allocations used in encoding labels.  This
-	// implementation encodes a comma-separated list of key=value,
-	// with '/'-escaping of '=', ',', and '\'.
-	defaultLabelEncoder struct {
-		// pool is a pool of labelset builders.  The buffers in this
-		// pool grow to a size that most label encodings will not
-		// allocate new memory.
-		pool sync.Pool // *bytes.Buffer
-	}
-)
-
-// escapeChar is used to ensure uniqueness of the label encoding where
-// keys or values contain either '=' or ','.  Since there is no parser
-// needed for this encoding and its only requirement is to be unique,
-// this choice is arbitrary.  Users will see these in some exporters
-// (e.g., stdout), so the backslash ('\') is used as a conventional choice.
-const escapeChar = '\\'
-
-var (
-	_ Encoder = &defaultLabelEncoder{}
-
-	// encoderIDCounter is for generating IDs for other label
-	// encoders.
-	encoderIDCounter uint64
-
-	defaultEncoderOnce     sync.Once
-	defaultEncoderID       = NewEncoderID()
-	defaultEncoderInstance *defaultLabelEncoder
-)
-
-// NewEncoderID returns a unique label encoder ID. It should be
-// called once per each type of label encoder. Preferably in init() or
-// in var definition.
-func NewEncoderID() EncoderID {
-	return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)}
-}
-
-// DefaultEncoder returns a label encoder that encodes labels
-// in such a way that each escaped label's key is followed by an equal
-// sign and then by an escaped label's value. All key-value pairs are
-// separated by a comma.
-//
-// Escaping is done by prepending a backslash before either a
-// backslash, equal sign or a comma.
-func DefaultEncoder() Encoder {
-	defaultEncoderOnce.Do(func() {
-		defaultEncoderInstance = &defaultLabelEncoder{
-			pool: sync.Pool{
-				New: func() interface{} {
-					return &bytes.Buffer{}
-				},
-			},
-		}
-	})
-	return defaultEncoderInstance
-}
-
-// Encode is a part of an implementation of the LabelEncoder
-// interface.
-func (d *defaultLabelEncoder) Encode(iter Iterator) string {
-	buf := d.pool.Get().(*bytes.Buffer)
-	defer d.pool.Put(buf)
-	buf.Reset()
-
-	for iter.Next() {
-		i, keyValue := iter.IndexedLabel()
-		if i > 0 {
-			_, _ = buf.WriteRune(',')
-		}
-		copyAndEscape(buf, string(keyValue.Key))
-
-		_, _ = buf.WriteRune('=')
-
-		if keyValue.Value.Type() == STRING {
-			copyAndEscape(buf, keyValue.Value.AsString())
-		} else {
-			_, _ = buf.WriteString(keyValue.Value.Emit())
-		}
-	}
-	return buf.String()
-}
-
-// ID is a part of an implementation of the LabelEncoder interface.
-func (*defaultLabelEncoder) ID() EncoderID {
-	return defaultEncoderID
-}
-
-// copyAndEscape escapes `=`, `,` and its own escape character (`\`),
-// making the default encoding unique.
-func copyAndEscape(buf *bytes.Buffer, val string) {
-	for _, ch := range val {
-		switch ch {
-		case '=', ',', escapeChar:
-			buf.WriteRune(escapeChar)
-		}
-		buf.WriteRune(ch)
-	}
-}
-
-// Valid returns true if this encoder ID was allocated by
-// `NewEncoderID`.  Invalid encoder IDs will not be cached.
-func (id EncoderID) Valid() bool {
-	return id.value != 0
-}
diff --git a/vendor/go.opentelemetry.io/otel/label/iterator.go b/vendor/go.opentelemetry.io/otel/label/iterator.go
deleted file mode 100644
index 9e72239..0000000
--- a/vendor/go.opentelemetry.io/otel/label/iterator.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package label
-
-// Iterator allows iterating over the set of labels in order,
-// sorted by key.
-type Iterator struct {
-	storage *Set
-	idx     int
-}
-
-// MergeIterator supports iterating over two sets of labels while
-// eliminating duplicate values from the combined set.  The first
-// iterator value takes precedence.
-type MergeItererator struct {
-	one     oneIterator
-	two     oneIterator
-	current KeyValue
-}
-
-type oneIterator struct {
-	iter  Iterator
-	done  bool
-	label KeyValue
-}
-
-// Next moves the iterator to the next position. Returns false if there
-// are no more labels.
-func (i *Iterator) Next() bool {
-	i.idx++
-	return i.idx < i.Len()
-}
-
-// Label returns current KeyValue. Must be called only after Next returns
-// true.
-func (i *Iterator) Label() KeyValue {
-	kv, _ := i.storage.Get(i.idx)
-	return kv
-}
-
-// Attribute is a synonym for Label().
-func (i *Iterator) Attribute() KeyValue {
-	return i.Label()
-}
-
-// IndexedLabel returns current index and label. Must be called only
-// after Next returns true.
-func (i *Iterator) IndexedLabel() (int, KeyValue) {
-	return i.idx, i.Label()
-}
-
-// Len returns a number of labels in the iterator's `*Set`.
-func (i *Iterator) Len() int {
-	return i.storage.Len()
-}
-
-// ToSlice is a convenience function that creates a slice of labels
-// from the passed iterator. The iterator is set up to start from the
-// beginning before creating the slice.
-func (i *Iterator) ToSlice() []KeyValue {
-	l := i.Len()
-	if l == 0 {
-		return nil
-	}
-	i.idx = -1
-	slice := make([]KeyValue, 0, l)
-	for i.Next() {
-		slice = append(slice, i.Label())
-	}
-	return slice
-}
-
-// NewMergeIterator returns a MergeIterator for merging two label sets
-// Duplicates are resolved by taking the value from the first set.
-func NewMergeIterator(s1, s2 *Set) MergeItererator {
-	mi := MergeItererator{
-		one: makeOne(s1.Iter()),
-		two: makeOne(s2.Iter()),
-	}
-	return mi
-}
-
-func makeOne(iter Iterator) oneIterator {
-	oi := oneIterator{
-		iter: iter,
-	}
-	oi.advance()
-	return oi
-}
-
-func (oi *oneIterator) advance() {
-	if oi.done = !oi.iter.Next(); !oi.done {
-		oi.label = oi.iter.Label()
-	}
-}
-
-// Next returns true if there is another label available.
-func (m *MergeItererator) Next() bool {
-	if m.one.done && m.two.done {
-		return false
-	}
-	if m.one.done {
-		m.current = m.two.label
-		m.two.advance()
-		return true
-	}
-	if m.two.done {
-		m.current = m.one.label
-		m.one.advance()
-		return true
-	}
-	if m.one.label.Key == m.two.label.Key {
-		m.current = m.one.label // first iterator label value wins
-		m.one.advance()
-		m.two.advance()
-		return true
-	}
-	if m.one.label.Key < m.two.label.Key {
-		m.current = m.one.label
-		m.one.advance()
-		return true
-	}
-	m.current = m.two.label
-	m.two.advance()
-	return true
-}
-
-// Label returns the current value after Next() returns true.
-func (m *MergeItererator) Label() KeyValue {
-	return m.current
-}
diff --git a/vendor/go.opentelemetry.io/otel/label/key.go b/vendor/go.opentelemetry.io/otel/label/key.go
deleted file mode 100644
index 7d72378..0000000
--- a/vendor/go.opentelemetry.io/otel/label/key.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package label
-
-// Key represents the key part in key-value pairs. It's a string. The
-// allowed character set in the key depends on the use of the key.
-type Key string
-
-// Bool creates a KeyValue instance with a BOOL Value.
-//
-// If creating both key and a bool value at the same time, then
-// instead of calling Key(name).Bool(value) consider using a
-// convenience function provided by the api/key package -
-// key.Bool(name, value).
-func (k Key) Bool(v bool) KeyValue {
-	return KeyValue{
-		Key:   k,
-		Value: BoolValue(v),
-	}
-}
-
-// Int64 creates a KeyValue instance with an INT64 Value.
-//
-// If creating both key and an int64 value at the same time, then
-// instead of calling Key(name).Int64(value) consider using a
-// convenience function provided by the api/key package -
-// key.Int64(name, value).
-func (k Key) Int64(v int64) KeyValue {
-	return KeyValue{
-		Key:   k,
-		Value: Int64Value(v),
-	}
-}
-
-// Uint64 creates a KeyValue instance with a UINT64 Value.
-//
-// If creating both key and a uint64 value at the same time, then
-// instead of calling Key(name).Uint64(value) consider using a
-// convenience function provided by the api/key package -
-// key.Uint64(name, value).
-func (k Key) Uint64(v uint64) KeyValue {
-	return KeyValue{
-		Key:   k,
-		Value: Uint64Value(v),
-	}
-}
-
-// Float64 creates a KeyValue instance with a FLOAT64 Value.
-//
-// If creating both key and a float64 value at the same time, then
-// instead of calling Key(name).Float64(value) consider using a
-// convenience function provided by the api/key package -
-// key.Float64(name, value).
-func (k Key) Float64(v float64) KeyValue {
-	return KeyValue{
-		Key:   k,
-		Value: Float64Value(v),
-	}
-}
-
-// Int32 creates a KeyValue instance with an INT32 Value.
-//
-// If creating both key and an int32 value at the same time, then
-// instead of calling Key(name).Int32(value) consider using a
-// convenience function provided by the api/key package -
-// key.Int32(name, value).
-func (k Key) Int32(v int32) KeyValue {
-	return KeyValue{
-		Key:   k,
-		Value: Int32Value(v),
-	}
-}
-
-// Uint32 creates a KeyValue instance with a UINT32 Value.
-//
-// If creating both key and a uint32 value at the same time, then
-// instead of calling Key(name).Uint32(value) consider using a
-// convenience function provided by the api/key package -
-// key.Uint32(name, value).
-func (k Key) Uint32(v uint32) KeyValue {
-	return KeyValue{
-		Key:   k,
-		Value: Uint32Value(v),
-	}
-}
-
-// Float32 creates a KeyValue instance with a FLOAT32 Value.
-//
-// If creating both key and a float32 value at the same time, then
-// instead of calling Key(name).Float32(value) consider using a
-// convenience function provided by the api/key package -
-// key.Float32(name, value).
-func (k Key) Float32(v float32) KeyValue {
-	return KeyValue{
-		Key:   k,
-		Value: Float32Value(v),
-	}
-}
-
-// String creates a KeyValue instance with a STRING Value.
-//
-// If creating both key and a string value at the same time, then
-// instead of calling Key(name).String(value) consider using a
-// convenience function provided by the api/key package -
-// key.String(name, value).
-func (k Key) String(v string) KeyValue {
-	return KeyValue{
-		Key:   k,
-		Value: StringValue(v),
-	}
-}
-
-// Int creates a KeyValue instance with either an INT32 or an INT64
-// Value, depending on whether the int type is 32 or 64 bits wide.
-//
-// If creating both key and an int value at the same time, then
-// instead of calling Key(name).Int(value) consider using a
-// convenience function provided by the api/key package -
-// key.Int(name, value).
-func (k Key) Int(v int) KeyValue {
-	return KeyValue{
-		Key:   k,
-		Value: IntValue(v),
-	}
-}
-
-// Uint creates a KeyValue instance with either a UINT32 or a UINT64
-// Value, depending on whether the uint type is 32 or 64 bits wide.
-//
-// If creating both key and a uint value at the same time, then
-// instead of calling Key(name).Uint(value) consider using a
-// convenience function provided by the api/key package -
-// key.Uint(name, value).
-func (k Key) Uint(v uint) KeyValue {
-	return KeyValue{
-		Key:   k,
-		Value: UintValue(v),
-	}
-}
-
-// Defined returns true for non-empty keys.
-func (k Key) Defined() bool {
-	return len(k) != 0
-}
-
-// Array creates a KeyValue instance with a ARRAY Value.
-//
-// If creating both key and a array value at the same time, then
-// instead of calling Key(name).String(value) consider using a
-// convenience function provided by the api/key package -
-// key.Array(name, value).
-func (k Key) Array(v interface{}) KeyValue {
-	return KeyValue{
-		Key:   k,
-		Value: ArrayValue(v),
-	}
-}
diff --git a/vendor/go.opentelemetry.io/otel/label/kv.go b/vendor/go.opentelemetry.io/otel/label/kv.go
deleted file mode 100644
index 3e2764f..0000000
--- a/vendor/go.opentelemetry.io/otel/label/kv.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package label
-
-import (
-	"encoding/json"
-	"fmt"
-	"reflect"
-)
-
-// KeyValue holds a key and value pair.
-type KeyValue struct {
-	Key   Key
-	Value Value
-}
-
-// Bool creates a new key-value pair with a passed name and a bool
-// value.
-func Bool(k string, v bool) KeyValue {
-	return Key(k).Bool(v)
-}
-
-// Int64 creates a new key-value pair with a passed name and an int64
-// value.
-func Int64(k string, v int64) KeyValue {
-	return Key(k).Int64(v)
-}
-
-// Uint64 creates a new key-value pair with a passed name and a uint64
-// value.
-func Uint64(k string, v uint64) KeyValue {
-	return Key(k).Uint64(v)
-}
-
-// Float64 creates a new key-value pair with a passed name and a float64
-// value.
-func Float64(k string, v float64) KeyValue {
-	return Key(k).Float64(v)
-}
-
-// Int32 creates a new key-value pair with a passed name and an int32
-// value.
-func Int32(k string, v int32) KeyValue {
-	return Key(k).Int32(v)
-}
-
-// Uint32 creates a new key-value pair with a passed name and a uint32
-// value.
-func Uint32(k string, v uint32) KeyValue {
-	return Key(k).Uint32(v)
-}
-
-// Float32 creates a new key-value pair with a passed name and a float32
-// value.
-func Float32(k string, v float32) KeyValue {
-	return Key(k).Float32(v)
-}
-
-// String creates a new key-value pair with a passed name and a string
-// value.
-func String(k, v string) KeyValue {
-	return Key(k).String(v)
-}
-
-// Stringer creates a new key-value pair with a passed name and a string
-// value generated by the passed Stringer interface.
-func Stringer(k string, v fmt.Stringer) KeyValue {
-	return Key(k).String(v.String())
-}
-
-// Int creates a new key-value pair instance with a passed name and
-// either an int32 or an int64 value, depending on whether the int
-// type is 32 or 64 bits wide.
-func Int(k string, v int) KeyValue {
-	return Key(k).Int(v)
-}
-
-// Uint creates a new key-value pair instance with a passed name and
-// either an uint32 or an uint64 value, depending on whether the uint
-// type is 32 or 64 bits wide.
-func Uint(k string, v uint) KeyValue {
-	return Key(k).Uint(v)
-}
-
-// Array creates a new key-value pair with a passed name and a array.
-// Only arrays of primitive type are supported.
-func Array(k string, v interface{}) KeyValue {
-	return Key(k).Array(v)
-}
-
-// Any creates a new key-value pair instance with a passed name and
-// automatic type inference. This is slower, and not type-safe.
-func Any(k string, value interface{}) KeyValue {
-	if value == nil {
-		return String(k, "<nil>")
-	}
-
-	if stringer, ok := value.(fmt.Stringer); ok {
-		return String(k, stringer.String())
-	}
-
-	rv := reflect.ValueOf(value)
-
-	switch rv.Kind() {
-	case reflect.Array, reflect.Slice:
-		return Array(k, value)
-	case reflect.Bool:
-		return Bool(k, rv.Bool())
-	case reflect.Int, reflect.Int8, reflect.Int16:
-		return Int(k, int(rv.Int()))
-	case reflect.Int32:
-		return Int32(k, int32(rv.Int()))
-	case reflect.Int64:
-		return Int64(k, int64(rv.Int()))
-	case reflect.Uint, reflect.Uint8, reflect.Uint16:
-		return Uint(k, uint(rv.Uint()))
-	case reflect.Uint32:
-		return Uint32(k, uint32(rv.Uint()))
-	case reflect.Uint64, reflect.Uintptr:
-		return Uint64(k, rv.Uint())
-	case reflect.Float32:
-		return Float32(k, float32(rv.Float()))
-	case reflect.Float64:
-		return Float64(k, rv.Float())
-	case reflect.String:
-		return String(k, rv.String())
-	}
-	if b, err := json.Marshal(value); value != nil && err == nil {
-		return String(k, string(b))
-	}
-	return String(k, fmt.Sprint(value))
-}
diff --git a/vendor/go.opentelemetry.io/otel/label/set.go b/vendor/go.opentelemetry.io/otel/label/set.go
deleted file mode 100644
index 3bd5263..0000000
--- a/vendor/go.opentelemetry.io/otel/label/set.go
+++ /dev/null
@@ -1,468 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package label
-
-import (
-	"encoding/json"
-	"reflect"
-	"sort"
-	"sync"
-)
-
-type (
-	// Set is the representation for a distinct label set.  It
-	// manages an immutable set of labels, with an internal cache
-	// for storing label encodings.
-	//
-	// This type supports the `Equivalent` method of comparison
-	// using values of type `Distinct`.
-	//
-	// This type is used to implement:
-	// 1. Metric labels
-	// 2. Resource sets
-	// 3. Correlation map (TODO)
-	Set struct {
-		equivalent Distinct
-
-		lock     sync.Mutex
-		encoders [maxConcurrentEncoders]EncoderID
-		encoded  [maxConcurrentEncoders]string
-	}
-
-	// Distinct wraps a variable-size array of `KeyValue`,
-	// constructed with keys in sorted order.  This can be used as
-	// a map key or for equality checking between Sets.
-	Distinct struct {
-		iface interface{}
-	}
-
-	// Filter supports removing certain labels from label sets.
-	// When the filter returns true, the label will be kept in
-	// the filtered label set.  When the filter returns false, the
-	// label is excluded from the filtered label set, and the
-	// label instead appears in the `removed` list of excluded labels.
-	Filter func(KeyValue) bool
-
-	// Sortable implements `sort.Interface`, used for sorting
-	// `KeyValue`.  This is an exported type to support a
-	// memory optimization.  A pointer to one of these is needed
-	// for the call to `sort.Stable()`, which the caller may
-	// provide in order to avoid an allocation.  See
-	// `NewSetWithSortable()`.
-	Sortable []KeyValue
-)
-
-var (
-	// keyValueType is used in `computeDistinctReflect`.
-	keyValueType = reflect.TypeOf(KeyValue{})
-
-	// emptySet is returned for empty label sets.
-	emptySet = &Set{
-		equivalent: Distinct{
-			iface: [0]KeyValue{},
-		},
-	}
-)
-
-const maxConcurrentEncoders = 3
-
-func EmptySet() *Set {
-	return emptySet
-}
-
-// reflect abbreviates `reflect.ValueOf`.
-func (d Distinct) reflect() reflect.Value {
-	return reflect.ValueOf(d.iface)
-}
-
-// Valid returns true if this value refers to a valid `*Set`.
-func (d Distinct) Valid() bool {
-	return d.iface != nil
-}
-
-// Len returns the number of labels in this set.
-func (l *Set) Len() int {
-	if l == nil || !l.equivalent.Valid() {
-		return 0
-	}
-	return l.equivalent.reflect().Len()
-}
-
-// Get returns the KeyValue at ordered position `idx` in this set.
-func (l *Set) Get(idx int) (KeyValue, bool) {
-	if l == nil {
-		return KeyValue{}, false
-	}
-	value := l.equivalent.reflect()
-
-	if idx >= 0 && idx < value.Len() {
-		// Note: The Go compiler successfully avoids an allocation for
-		// the interface{} conversion here:
-		return value.Index(idx).Interface().(KeyValue), true
-	}
-
-	return KeyValue{}, false
-}
-
-// Value returns the value of a specified key in this set.
-func (l *Set) Value(k Key) (Value, bool) {
-	if l == nil {
-		return Value{}, false
-	}
-	rValue := l.equivalent.reflect()
-	vlen := rValue.Len()
-
-	idx := sort.Search(vlen, func(idx int) bool {
-		return rValue.Index(idx).Interface().(KeyValue).Key >= k
-	})
-	if idx >= vlen {
-		return Value{}, false
-	}
-	keyValue := rValue.Index(idx).Interface().(KeyValue)
-	if k == keyValue.Key {
-		return keyValue.Value, true
-	}
-	return Value{}, false
-}
-
-// HasValue tests whether a key is defined in this set.
-func (l *Set) HasValue(k Key) bool {
-	if l == nil {
-		return false
-	}
-	_, ok := l.Value(k)
-	return ok
-}
-
-// Iter returns an iterator for visiting the labels in this set.
-func (l *Set) Iter() Iterator {
-	return Iterator{
-		storage: l,
-		idx:     -1,
-	}
-}
-
-// ToSlice returns the set of labels belonging to this set, sorted,
-// where keys appear no more than once.
-func (l *Set) ToSlice() []KeyValue {
-	iter := l.Iter()
-	return iter.ToSlice()
-}
-
-// Equivalent returns a value that may be used as a map key.  The
-// Distinct type guarantees that the result will equal the equivalent
-// Distinct value of any label set with the same elements as this,
-// where sets are made unique by choosing the last value in the input
-// for any given key.
-func (l *Set) Equivalent() Distinct {
-	if l == nil || !l.equivalent.Valid() {
-		return emptySet.equivalent
-	}
-	return l.equivalent
-}
-
-// Equals returns true if the argument set is equivalent to this set.
-func (l *Set) Equals(o *Set) bool {
-	return l.Equivalent() == o.Equivalent()
-}
-
-// Encoded returns the encoded form of this set, according to
-// `encoder`.  The result will be cached in this `*Set`.
-func (l *Set) Encoded(encoder Encoder) string {
-	if l == nil || encoder == nil {
-		return ""
-	}
-
-	id := encoder.ID()
-	if !id.Valid() {
-		// Invalid IDs are not cached.
-		return encoder.Encode(l.Iter())
-	}
-
-	var lookup *string
-	l.lock.Lock()
-	for idx := 0; idx < maxConcurrentEncoders; idx++ {
-		if l.encoders[idx] == id {
-			lookup = &l.encoded[idx]
-			break
-		}
-	}
-	l.lock.Unlock()
-
-	if lookup != nil {
-		return *lookup
-	}
-
-	r := encoder.Encode(l.Iter())
-
-	l.lock.Lock()
-	defer l.lock.Unlock()
-
-	for idx := 0; idx < maxConcurrentEncoders; idx++ {
-		if l.encoders[idx] == id {
-			return l.encoded[idx]
-		}
-		if !l.encoders[idx].Valid() {
-			l.encoders[idx] = id
-			l.encoded[idx] = r
-			return r
-		}
-	}
-
-	// TODO: This is a performance cliff.  Find a way for this to
-	// generate a warning.
-	return r
-}
-
-func empty() Set {
-	return Set{
-		equivalent: emptySet.equivalent,
-	}
-}
-
-// NewSet returns a new `Set`.  See the documentation for
-// `NewSetWithSortableFiltered` for more details.
-//
-// Except for empty sets, this method adds an additional allocation
-// compared with calls that include a `*Sortable`.
-func NewSet(kvs ...KeyValue) Set {
-	// Check for empty set.
-	if len(kvs) == 0 {
-		return empty()
-	}
-	s, _ := NewSetWithSortableFiltered(kvs, new(Sortable), nil)
-	return s //nolint
-}
-
-// NewSetWithSortable returns a new `Set`.  See the documentation for
-// `NewSetWithSortableFiltered` for more details.
-//
-// This call includes a `*Sortable` option as a memory optimization.
-func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set {
-	// Check for empty set.
-	if len(kvs) == 0 {
-		return empty()
-	}
-	s, _ := NewSetWithSortableFiltered(kvs, tmp, nil)
-	return s //nolint
-}
-
-// NewSetWithFiltered returns a new `Set`.  See the documentation for
-// `NewSetWithSortableFiltered` for more details.
-//
-// This call includes a `Filter` to include/exclude label keys from
-// the return value.  Excluded keys are returned as a slice of label
-// values.
-func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
-	// Check for empty set.
-	if len(kvs) == 0 {
-		return empty(), nil
-	}
-	return NewSetWithSortableFiltered(kvs, new(Sortable), filter)
-}
-
-// NewSetWithSortableFiltered returns a new `Set`.
-//
-// Duplicate keys are eliminated by taking the last value.  This
-// re-orders the input slice so that unique last-values are contiguous
-// at the end of the slice.
-//
-// This ensures the following:
-//
-// - Last-value-wins semantics
-// - Caller sees the reordering, but doesn't lose values
-// - Repeated call preserve last-value wins.
-//
-// Note that methods are defined on `*Set`, although this returns `Set`.
-// Callers can avoid memory allocations by:
-//
-// - allocating a `Sortable` for use as a temporary in this method
-// - allocating a `Set` for storing the return value of this
-//   constructor.
-//
-// The result maintains a cache of encoded labels, by label.EncoderID.
-// This value should not be copied after its first use.
-//
-// The second `[]KeyValue` return value is a list of labels that were
-// excluded by the Filter (if non-nil).
-func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) {
-	// Check for empty set.
-	if len(kvs) == 0 {
-		return empty(), nil
-	}
-
-	*tmp = kvs
-
-	// Stable sort so the following de-duplication can implement
-	// last-value-wins semantics.
-	sort.Stable(tmp)
-
-	*tmp = nil
-
-	position := len(kvs) - 1
-	offset := position - 1
-
-	// The requirements stated above require that the stable
-	// result be placed in the end of the input slice, while
-	// overwritten values are swapped to the beginning.
-	//
-	// De-duplicate with last-value-wins semantics.  Preserve
-	// duplicate values at the beginning of the input slice.
-	for ; offset >= 0; offset-- {
-		if kvs[offset].Key == kvs[position].Key {
-			continue
-		}
-		position--
-		kvs[offset], kvs[position] = kvs[position], kvs[offset]
-	}
-	if filter != nil {
-		return filterSet(kvs[position:], filter)
-	}
-	return Set{
-		equivalent: computeDistinct(kvs[position:]),
-	}, nil
-}
-
-// filterSet reorders `kvs` so that included keys are contiguous at
-// the end of the slice, while excluded keys precede the included keys.
-func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
-	var excluded []KeyValue
-
-	// Move labels that do not match the filter so
-	// they're adjacent before calling computeDistinct().
-	distinctPosition := len(kvs)
-
-	// Swap indistinct keys forward and distinct keys toward the
-	// end of the slice.
-	offset := len(kvs) - 1
-	for ; offset >= 0; offset-- {
-		if filter(kvs[offset]) {
-			distinctPosition--
-			kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset]
-			continue
-		}
-	}
-	excluded = kvs[:distinctPosition]
-
-	return Set{
-		equivalent: computeDistinct(kvs[distinctPosition:]),
-	}, excluded
-}
-
-// Filter returns a filtered copy of this `Set`.  See the
-// documentation for `NewSetWithSortableFiltered` for more details.
-func (l *Set) Filter(re Filter) (Set, []KeyValue) {
-	if re == nil {
-		return Set{
-			equivalent: l.equivalent,
-		}, nil
-	}
-
-	// Note: This could be refactored to avoid the temporary slice
-	// allocation, if it proves to be expensive.
-	return filterSet(l.ToSlice(), re)
-}
-
-// computeDistinct returns a `Distinct` using either the fixed- or
-// reflect-oriented code path, depending on the size of the input.
-// The input slice is assumed to already be sorted and de-duplicated.
-func computeDistinct(kvs []KeyValue) Distinct {
-	iface := computeDistinctFixed(kvs)
-	if iface == nil {
-		iface = computeDistinctReflect(kvs)
-	}
-	return Distinct{
-		iface: iface,
-	}
-}
-
-// computeDistinctFixed computes a `Distinct` for small slices.  It
-// returns nil if the input is too large for this code path.
-func computeDistinctFixed(kvs []KeyValue) interface{} {
-	switch len(kvs) {
-	case 1:
-		ptr := new([1]KeyValue)
-		copy((*ptr)[:], kvs)
-		return *ptr
-	case 2:
-		ptr := new([2]KeyValue)
-		copy((*ptr)[:], kvs)
-		return *ptr
-	case 3:
-		ptr := new([3]KeyValue)
-		copy((*ptr)[:], kvs)
-		return *ptr
-	case 4:
-		ptr := new([4]KeyValue)
-		copy((*ptr)[:], kvs)
-		return *ptr
-	case 5:
-		ptr := new([5]KeyValue)
-		copy((*ptr)[:], kvs)
-		return *ptr
-	case 6:
-		ptr := new([6]KeyValue)
-		copy((*ptr)[:], kvs)
-		return *ptr
-	case 7:
-		ptr := new([7]KeyValue)
-		copy((*ptr)[:], kvs)
-		return *ptr
-	case 8:
-		ptr := new([8]KeyValue)
-		copy((*ptr)[:], kvs)
-		return *ptr
-	case 9:
-		ptr := new([9]KeyValue)
-		copy((*ptr)[:], kvs)
-		return *ptr
-	case 10:
-		ptr := new([10]KeyValue)
-		copy((*ptr)[:], kvs)
-		return *ptr
-	default:
-		return nil
-	}
-}
-
-// computeDistinctReflect computes a `Distinct` using reflection,
-// works for any size input.
-func computeDistinctReflect(kvs []KeyValue) interface{} {
-	at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
-	for i, keyValue := range kvs {
-		*(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue
-	}
-	return at.Interface()
-}
-
-// MarshalJSON returns the JSON encoding of the `*Set`.
-func (l *Set) MarshalJSON() ([]byte, error) {
-	return json.Marshal(l.equivalent.iface)
-}
-
-// Len implements `sort.Interface`.
-func (l *Sortable) Len() int {
-	return len(*l)
-}
-
-// Swap implements `sort.Interface`.
-func (l *Sortable) Swap(i, j int) {
-	(*l)[i], (*l)[j] = (*l)[j], (*l)[i]
-}
-
-// Less implements `sort.Interface`.
-func (l *Sortable) Less(i, j int) bool {
-	return (*l)[i].Key < (*l)[j].Key
-}
diff --git a/vendor/go.opentelemetry.io/otel/label/type_string.go b/vendor/go.opentelemetry.io/otel/label/type_string.go
deleted file mode 100644
index 62afeb6..0000000
--- a/vendor/go.opentelemetry.io/otel/label/type_string.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Code generated by "stringer -type=Type"; DO NOT EDIT.
-
-package label
-
-import "strconv"
-
-func _() {
-	// An "invalid array index" compiler error signifies that the constant values have changed.
-	// Re-run the stringer command to generate them again.
-	var x [1]struct{}
-	_ = x[INVALID-0]
-	_ = x[BOOL-1]
-	_ = x[INT32-2]
-	_ = x[INT64-3]
-	_ = x[UINT32-4]
-	_ = x[UINT64-5]
-	_ = x[FLOAT32-6]
-	_ = x[FLOAT64-7]
-	_ = x[STRING-8]
-	_ = x[ARRAY-9]
-}
-
-const _Type_name = "INVALIDBOOLINT32INT64UINT32UINT64FLOAT32FLOAT64STRINGARRAY"
-
-var _Type_index = [...]uint8{0, 7, 11, 16, 21, 27, 33, 40, 47, 53, 58}
-
-func (i Type) String() string {
-	if i < 0 || i >= Type(len(_Type_index)-1) {
-		return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
-	}
-	return _Type_name[_Type_index[i]:_Type_index[i+1]]
-}
diff --git a/vendor/go.opentelemetry.io/otel/label/value.go b/vendor/go.opentelemetry.io/otel/label/value.go
deleted file mode 100644
index 679009b..0000000
--- a/vendor/go.opentelemetry.io/otel/label/value.go
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package label
-
-import (
-	"encoding/json"
-	"fmt"
-	"reflect"
-	"strconv"
-	"unsafe"
-
-	"go.opentelemetry.io/otel/internal"
-)
-
-//go:generate stringer -type=Type
-
-// Type describes the type of the data Value holds.
-type Type int
-
-// Value represents the value part in key-value pairs.
-type Value struct {
-	vtype    Type
-	numeric  uint64
-	stringly string
-	// TODO Lazy value type?
-
-	array interface{}
-}
-
-const (
-	INVALID Type = iota // No value.
-	BOOL                // Boolean value, use AsBool() to get it.
-	INT32               // 32 bit signed integral value, use AsInt32() to get it.
-	INT64               // 64 bit signed integral value, use AsInt64() to get it.
-	UINT32              // 32 bit unsigned integral value, use AsUint32() to get it.
-	UINT64              // 64 bit unsigned integral value, use AsUint64() to get it.
-	FLOAT32             // 32 bit floating point value, use AsFloat32() to get it.
-	FLOAT64             // 64 bit floating point value, use AsFloat64() to get it.
-	STRING              // String value, use AsString() to get it.
-	ARRAY               // Array value of arbitrary type, use AsArray() to get it.
-)
-
-// BoolValue creates a BOOL Value.
-func BoolValue(v bool) Value {
-	return Value{
-		vtype:   BOOL,
-		numeric: internal.BoolToRaw(v),
-	}
-}
-
-// Int64Value creates an INT64 Value.
-func Int64Value(v int64) Value {
-	return Value{
-		vtype:   INT64,
-		numeric: internal.Int64ToRaw(v),
-	}
-}
-
-// Uint64Value creates a UINT64 Value.
-func Uint64Value(v uint64) Value {
-	return Value{
-		vtype:   UINT64,
-		numeric: internal.Uint64ToRaw(v),
-	}
-}
-
-// Float64Value creates a FLOAT64 Value.
-func Float64Value(v float64) Value {
-	return Value{
-		vtype:   FLOAT64,
-		numeric: internal.Float64ToRaw(v),
-	}
-}
-
-// Int32Value creates an INT32 Value.
-func Int32Value(v int32) Value {
-	return Value{
-		vtype:   INT32,
-		numeric: internal.Int32ToRaw(v),
-	}
-}
-
-// Uint32Value creates a UINT32 Value.
-func Uint32Value(v uint32) Value {
-	return Value{
-		vtype:   UINT32,
-		numeric: internal.Uint32ToRaw(v),
-	}
-}
-
-// Float32Value creates a FLOAT32 Value.
-func Float32Value(v float32) Value {
-	return Value{
-		vtype:   FLOAT32,
-		numeric: internal.Float32ToRaw(v),
-	}
-}
-
-// StringValue creates a STRING Value.
-func StringValue(v string) Value {
-	return Value{
-		vtype:    STRING,
-		stringly: v,
-	}
-}
-
-// IntValue creates either an INT32 or an INT64 Value, depending on whether
-// the int type is 32 or 64 bits wide.
-func IntValue(v int) Value {
-	if unsafe.Sizeof(v) == 4 {
-		return Int32Value(int32(v))
-	}
-	return Int64Value(int64(v))
-}
-
-// UintValue creates either a UINT32 or a UINT64 Value, depending on whether
-// the uint type is 32 or 64 bits wide.
-func UintValue(v uint) Value {
-	if unsafe.Sizeof(v) == 4 {
-		return Uint32Value(uint32(v))
-	}
-	return Uint64Value(uint64(v))
-}
-
-// ArrayValue creates an ARRAY value from an array or slice.
-// Only arrays or slices of bool, int, int32, int64, uint, uint32, uint64,
-// float, float32, float64, or string types are allowed. Specifically, arrays
-// and slices can not contain other arrays, slices, structs, or non-standard
-// types. If the passed value is not an array or slice of these types an
-// INVALID value is returned.
-func ArrayValue(v interface{}) Value {
-	switch reflect.TypeOf(v).Kind() {
-	case reflect.Array, reflect.Slice:
-		// get array type regardless of dimensions
-		typ := reflect.TypeOf(v).Elem()
-		kind := typ.Kind()
-		switch kind {
-		case reflect.Bool, reflect.Int, reflect.Int32, reflect.Int64,
-			reflect.Float32, reflect.Float64, reflect.String,
-			reflect.Uint, reflect.Uint32, reflect.Uint64:
-			val := reflect.ValueOf(v)
-			length := val.Len()
-			frozen := reflect.Indirect(reflect.New(reflect.ArrayOf(length, typ)))
-			reflect.Copy(frozen, val)
-			return Value{
-				vtype: ARRAY,
-				array: frozen.Interface(),
-			}
-		default:
-			return Value{vtype: INVALID}
-		}
-	}
-	return Value{vtype: INVALID}
-}
-
-// Type returns a type of the Value.
-func (v Value) Type() Type {
-	return v.vtype
-}
-
-// AsBool returns the bool value. Make sure that the Value's type is
-// BOOL.
-func (v Value) AsBool() bool {
-	return internal.RawToBool(v.numeric)
-}
-
-// AsInt32 returns the int32 value. Make sure that the Value's type is
-// INT32.
-func (v Value) AsInt32() int32 {
-	return internal.RawToInt32(v.numeric)
-}
-
-// AsInt64 returns the int64 value. Make sure that the Value's type is
-// INT64.
-func (v Value) AsInt64() int64 {
-	return internal.RawToInt64(v.numeric)
-}
-
-// AsUint32 returns the uint32 value. Make sure that the Value's type
-// is UINT32.
-func (v Value) AsUint32() uint32 {
-	return internal.RawToUint32(v.numeric)
-}
-
-// AsUint64 returns the uint64 value. Make sure that the Value's type is
-// UINT64.
-func (v Value) AsUint64() uint64 {
-	return internal.RawToUint64(v.numeric)
-}
-
-// AsFloat32 returns the float32 value. Make sure that the Value's
-// type is FLOAT32.
-func (v Value) AsFloat32() float32 {
-	return internal.RawToFloat32(v.numeric)
-}
-
-// AsFloat64 returns the float64 value. Make sure that the Value's
-// type is FLOAT64.
-func (v Value) AsFloat64() float64 {
-	return internal.RawToFloat64(v.numeric)
-}
-
-// AsString returns the string value. Make sure that the Value's type
-// is STRING.
-func (v Value) AsString() string {
-	return v.stringly
-}
-
-// AsArray returns the array Value as an interface{}.
-func (v Value) AsArray() interface{} {
-	return v.array
-}
-
-type unknownValueType struct{}
-
-// AsInterface returns Value's data as interface{}.
-func (v Value) AsInterface() interface{} {
-	switch v.Type() {
-	case ARRAY:
-		return v.AsArray()
-	case BOOL:
-		return v.AsBool()
-	case INT32:
-		return v.AsInt32()
-	case INT64:
-		return v.AsInt64()
-	case UINT32:
-		return v.AsUint32()
-	case UINT64:
-		return v.AsUint64()
-	case FLOAT32:
-		return v.AsFloat32()
-	case FLOAT64:
-		return v.AsFloat64()
-	case STRING:
-		return v.stringly
-	}
-	return unknownValueType{}
-}
-
-// Emit returns a string representation of Value's data.
-func (v Value) Emit() string {
-	switch v.Type() {
-	case ARRAY:
-		return fmt.Sprint(v.array)
-	case BOOL:
-		return strconv.FormatBool(v.AsBool())
-	case INT32:
-		return strconv.FormatInt(int64(v.AsInt32()), 10)
-	case INT64:
-		return strconv.FormatInt(v.AsInt64(), 10)
-	case UINT32:
-		return strconv.FormatUint(uint64(v.AsUint32()), 10)
-	case UINT64:
-		return strconv.FormatUint(v.AsUint64(), 10)
-	case FLOAT32:
-		return fmt.Sprint(v.AsFloat32())
-	case FLOAT64:
-		return fmt.Sprint(v.AsFloat64())
-	case STRING:
-		return v.stringly
-	default:
-		return "unknown"
-	}
-}
-
-// MarshalJSON returns the JSON encoding of the Value.
-func (v Value) MarshalJSON() ([]byte, error) {
-	var jsonVal struct {
-		Type  string
-		Value interface{}
-	}
-	jsonVal.Type = v.Type().String()
-	jsonVal.Value = v.AsInterface()
-	return json.Marshal(jsonVal)
-}
diff --git a/vendor/go.opentelemetry.io/otel/otel.go b/vendor/go.opentelemetry.io/otel/otel.go
deleted file mode 100644
index aaabac2..0000000
--- a/vendor/go.opentelemetry.io/otel/otel.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otel
-
-import (
-	"go.opentelemetry.io/otel/api/metric"
-	"go.opentelemetry.io/otel/api/trace"
-)
-
-type Tracer = trace.Tracer
-
-type Meter = metric.Meter
-
-// ErrorHandler handles irremediable events.
-type ErrorHandler interface {
-	// Handle handles any error deemed irremediable by an OpenTelemetry
-	// component.
-	Handle(error)
-}
diff --git a/vendor/go.opentelemetry.io/otel/pre_release.sh b/vendor/go.opentelemetry.io/otel/pre_release.sh
deleted file mode 100644
index e09924b..0000000
--- a/vendor/go.opentelemetry.io/otel/pre_release.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/bash
-
-# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -e
-
-help()
-{
-   printf "\n"
-   printf "Usage: $0 -t tag\n"
-   printf "\t-t Unreleased tag. Update all go.mod with this tag.\n"
-   exit 1 # Exit script after printing help
-}
-
-while getopts "t:" opt
-do
-   case "$opt" in
-      t ) TAG="$OPTARG" ;;
-      ? ) help ;; # Print help
-   esac
-done
-
-# Print help in case parameters are empty
-if [ -z "$TAG" ]
-then
-   printf "Tag is missing\n";
-   help
-fi
-
-# Validate semver
-SEMVER_REGEX="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(\\-[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$"
-if [[ "${TAG}" =~ ${SEMVER_REGEX} ]]; then
-	printf "${TAG} is valid semver tag.\n"
-else
-	printf "${TAG} is not a valid semver tag.\n"
-	exit -1
-fi
-
-TAG_FOUND=`git tag --list ${TAG}`
-if [[ ${TAG_FOUND} = ${TAG} ]] ; then
-        printf "Tag ${TAG} already exists\n"
-        exit -1
-fi
-
-# Get version for sdk/opentelemetry.go
-OTEL_VERSION=$(echo "${TAG}" | grep -o '^v[0-9]\+\.[0-9]\+\.[0-9]\+')
-# Strip leading v
-OTEL_VERSION="${OTEL_VERSION#v}"
-
-cd $(dirname $0)
-
-if ! git diff --quiet; then \
-	printf "Working tree is not clean, can't proceed with the release process\n"
-	git status
-	git diff
-	exit 1
-fi
-
-# Update sdk/opentelemetry.go
-cp ./sdk/opentelemetry.go ./sdk/opentelemetry.go.bak
-sed "s/\(return \"\)[0-9]*\.[0-9]*\.[0-9]*\"/\1${OTEL_VERSION}\"/" ./sdk/opentelemetry.go.bak >./sdk/opentelemetry.go
-rm -f ./sdk/opentelemetry.go.bak
-
-# Update go.mod
-git checkout -b pre_release_${TAG} master
-PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep -v 'tools' | sed 's/^\.\///' | sort)
-
-for dir in $PACKAGE_DIRS; do
-	cp "${dir}/go.mod" "${dir}/go.mod.bak"
-	sed "s/opentelemetry.io\/otel\([^ ]*\) v[0-9]*\.[0-9]*\.[0-9]/opentelemetry.io\/otel\1 ${TAG}/" "${dir}/go.mod.bak" >"${dir}/go.mod"
-	rm -f "${dir}/go.mod.bak"
-done
-
-# Run lint to update go.sum
-make lint
-
-# Add changes and commit.
-git add .
-make ci
-git commit -m "Prepare for releasing $TAG"
-
-printf "Now run following to verify the changes.\ngit diff master\n"
-printf "\nThen push the changes to upstream\n"
diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go
deleted file mode 100644
index 8e3bd8d..0000000
--- a/vendor/go.opentelemetry.io/otel/propagation.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otel
-
-import "context"
-
-// TextMapCarrier is the storage medium used by a TextMapPropagator.
-type TextMapCarrier interface {
-	// Get returns the value associated with the passed key.
-	Get(key string) string
-	// Set stores the key-value pair.
-	Set(key string, value string)
-}
-
-// TextMapPropagator propagates cross-cutting concerns as key-value text
-// pairs within a carrier that travels in-band across process boundaries.
-type TextMapPropagator interface {
-	// Inject set cross-cutting concerns from the Context into the carrier.
-	Inject(ctx context.Context, carrier TextMapCarrier)
-	// Extract reads cross-cutting concerns from the carrier into a Context.
-	Extract(ctx context.Context, carrier TextMapCarrier) context.Context
-	// Fields returns the keys who's values are set with Inject.
-	Fields() []string
-}
-
-type compositeTextMapPropagator []TextMapPropagator
-
-func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) {
-	for _, i := range p {
-		i.Inject(ctx, carrier)
-	}
-}
-
-func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context {
-	for _, i := range p {
-		ctx = i.Extract(ctx, carrier)
-	}
-	return ctx
-}
-
-func (p compositeTextMapPropagator) Fields() []string {
-	unique := make(map[string]struct{})
-	for _, i := range p {
-		for _, k := range i.Fields() {
-			unique[k] = struct{}{}
-		}
-	}
-
-	fields := make([]string, 0, len(unique))
-	for k := range unique {
-		fields = append(fields, k)
-	}
-	return fields
-}
-
-// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the
-// group of passed TextMapPropagator. This allows different cross-cutting
-// concerns to be propagates in a unified manner.
-//
-// The returned TextMapPropagator will inject and extract cross-cutting
-// concerns in the order the TextMapPropagators were provided. Additionally,
-// the Fields method will return a de-duplicated slice of the keys that are
-// set with the Inject method.
-func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator {
-	return compositeTextMapPropagator(p)
-}
diff --git a/vendor/go.opentelemetry.io/otel/tag.sh b/vendor/go.opentelemetry.io/otel/tag.sh
deleted file mode 100644
index 70767c7..0000000
--- a/vendor/go.opentelemetry.io/otel/tag.sh
+++ /dev/null
@@ -1,178 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-readonly PROGNAME=$(basename "$0")
-readonly PROGDIR=$(readlink -m "$(dirname "$0")")
-
-readonly EXCLUDE_PACKAGES="internal/tools"
-readonly SEMVER_REGEX="v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(\\-[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?"
-
-usage() {
-    cat <<- EOF
-Usage: $PROGNAME [OPTIONS] SEMVER_TAG COMMIT_HASH
-
-Creates git tag for all Go packages in project.
-
-OPTIONS:
-  -h --help        Show this help.
-
-ARGUMENTS:
-  SEMVER_TAG       Semantic version to tag with.
-  COMMIT_HASH      Git commit hash to tag.
-EOF
-}
-
-cmdline() {
-    local arg commit
-
-    for arg
-    do
-        local delim=""
-        case "$arg" in
-            # Translate long form options to short form.
-            --help)           args="${args}-h ";;
-            # Pass through for everything else.
-            *) [[ "${arg:0:1}" == "-" ]] || delim="\""
-                args="${args}${delim}${arg}${delim} ";;
-        esac
-    done
-
-    # Reset and process short form options.
-    eval set -- "$args"
-
-    while getopts "h" OPTION
-    do
-         case $OPTION in
-         h)
-             usage
-             exit 0
-             ;;
-         *)
-             echo "unknown option: $OPTION"
-             usage
-             exit 1
-             ;;
-        esac
-    done
-
-    # Positional arguments.
-    shift $((OPTIND-1))
-    readonly TAG="$1"
-    if [ -z "$TAG" ]
-    then
-        echo "missing SEMVER_TAG"
-        usage
-        exit 1
-    fi
-    if [[ ! "$TAG" =~ $SEMVER_REGEX ]]
-    then
-        printf "invalid semantic version: %s\n" "$TAG"
-        exit 2
-    fi
-    if [[ "$( git tag --list "$TAG" )" ]]
-    then
-        printf "tag already exists: %s\n" "$TAG"
-        exit 2
-    fi
-
-    shift
-    commit="$1"
-    if [ -z "$commit" ]
-    then
-        echo "missing COMMIT_HASH"
-        usage
-        exit 1
-    fi
-    # Verify rev is for a commit and unify hashes into a complete SHA1.
-    readonly SHA="$( git rev-parse --quiet --verify "${commit}^{commit}" )"
-    if [ -z "$SHA" ]
-    then
-        printf "invalid commit hash: %s\n" "$commit"
-        exit 2
-    fi
-    if [ "$( git merge-base "$SHA" HEAD )" != "$SHA" ]
-    then
-        printf "commit '%s' not found on this branch\n" "$commit"
-        exit 2
-    fi
-}
-
-package_dirs() {
-    # Return a list of package directories in the form:
-    #
-    #  package/directory/a
-    #  package/directory/b
-    #  deeper/package/directory/a
-    #  ...
-    #
-    # Making sure to exclude any packages in the EXCLUDE_PACKAGES regexp.
-    find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; \
-        | grep -E -v "$EXCLUDE_PACKAGES" \
-        | sed 's/^\.\///' \
-        | sort
-}
-
-git_tag() {
-    local tag="$1"
-    local commit="$2"
-
-    git tag -a "$tag" -s -m "Version $tag" "$commit"
-}
-
-previous_version() {
-    local current="$1"
-
-    # Requires git > 2.0
-    git tag -l --sort=v:refname \
-        | grep -E "^${SEMVER_REGEX}$" \
-        | grep -v "$current" \
-        | tail -1
-}
-
-print_changes() {
-    local tag="$1"
-    local previous
-
-    previous="$( previous_version "$tag" )"
-    if [ -n "$previous" ]
-    then
-        printf "\nRaw changes made between %s and %s\n" "$previous" "$tag"
-        printf "======================================\n"
-        git --no-pager log --pretty=oneline "${previous}..$tag"
-    fi
-}
-
-main() {
-    local dir
-
-    cmdline "$@"
-
-    cd "$PROGDIR" || exit 3
-
-    # Create tag for root package.
-    git_tag "$TAG" "$SHA"
-    printf "created tag: %s\n" "$TAG"
-
-    # Create tag for all sub-packages.
-    for dir in $( package_dirs )
-    do
-        git_tag "${dir}/$TAG" "$SHA"
-        printf "created tag: %s\n" "${dir}/$TAG"
-    done
-
-    print_changes "$TAG"
-}
-main "$@"
diff --git a/vendor/go.opentelemetry.io/otel/unit/doc.go b/vendor/go.opentelemetry.io/otel/unit/doc.go
deleted file mode 100644
index 310a7b2..0000000
--- a/vendor/go.opentelemetry.io/otel/unit/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package unit provides units.
-package unit // import "go.opentelemetry.io/otel/unit"
diff --git a/vendor/go.opentelemetry.io/otel/unit/unit.go b/vendor/go.opentelemetry.io/otel/unit/unit.go
deleted file mode 100644
index dcd39af..0000000
--- a/vendor/go.opentelemetry.io/otel/unit/unit.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package unit
-
-type Unit string
-
-const (
-	Dimensionless Unit = "1"
-	Bytes         Unit = "By"
-	Milliseconds  Unit = "ms"
-)
diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh
deleted file mode 100644
index dbb61a4..0000000
--- a/vendor/go.opentelemetry.io/otel/verify_examples.sh
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/bin/bash
-
-# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -euo pipefail
-
-cd $(dirname $0)
-TOOLS_DIR=$(pwd)/.tools
-
-if [ -z "${GOPATH}" ] ; then
-	printf "GOPATH is not defined.\n"
-	exit -1
-fi
-
-if [ ! -d "${GOPATH}" ] ; then
-	printf "GOPATH ${GOPATH} is invalid \n"
-	exit -1
-fi
-
-# Pre-requisites
-if ! git diff --quiet; then \
-	git status
-	printf "\n\nError: working tree is not clean\n"
-	exit -1
-fi
-
-if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then
-	printf "$(git log -1)"
-	printf "\n\nError: HEAD is not pointing to a tagged version"
-fi
-
-make ${TOOLS_DIR}/gojq
-
-DIR_TMP="${GOPATH}/src/oteltmp/"
-rm -rf $DIR_TMP
-mkdir -p $DIR_TMP
-
-printf "Copy examples to ${DIR_TMP}\n"
-cp -a ./example ${DIR_TMP}
-
-# Update go.mod files
-printf "Update go.mod: rename module and remove replace\n"
-
-PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort)
-
-for dir in $PACKAGE_DIRS; do
-	printf "  Update go.mod for $dir\n"
-	(cd "${DIR_TMP}/${dir}" && \
-	 # replaces is ("mod1" "mod2" …)
-	 replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \
-	 # strip double quotes
-	 replaces=("${replaces[@]%\"}") && \
-	 replaces=("${replaces[@]#\"}") && \
-	 # make an array (-dropreplace=mod1 -dropreplace=mod2 …)
-	 dropreplaces=("${replaces[@]/#/-dropreplace=}") && \
-	 go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \
-	 go mod tidy)
-done
-printf "Update done:\n\n"
-
-# Build directories that contain main package. These directories are different than
-# directories that contain go.mod files.
-printf "Build examples:\n"
-EXAMPLES=$(./get_main_pkgs.sh ./example)
-for ex in $EXAMPLES; do
-	printf "  Build $ex in ${DIR_TMP}/${ex}\n"
-	(cd "${DIR_TMP}/${ex}" && \
-	 go build .)
-done
-
-# Cleanup
-printf "Remove copied files.\n"
-rm -rf $DIR_TMP
diff --git a/vendor/go.uber.org/multierr/.travis.yml b/vendor/go.uber.org/multierr/.travis.yml
deleted file mode 100644
index 8636ab4..0000000
--- a/vendor/go.uber.org/multierr/.travis.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-sudo: false
-language: go
-go_import_path: go.uber.org/multierr
-
-env:
-  global:
-    - GO111MODULE=on
-
-go:
-  - oldstable
-  - stable
-
-before_install:
-- go version
-
-script:
-- |
-  set -e
-  make lint
-  make cover
-
-after_success:
-- bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md
index 6f1db9e..f8177b9 100644
--- a/vendor/go.uber.org/multierr/CHANGELOG.md
+++ b/vendor/go.uber.org/multierr/CHANGELOG.md
@@ -1,6 +1,41 @@
 Releases
 ========
 
+v1.11.0 (2023-03-28)
+====================
+-   `Errors` now supports any error that implements multiple-error
+    interface.
+-   Add `Every` function to allow checking if all errors in the chain
+    satisfies `errors.Is` against the target error.
+
+v1.10.0 (2023-03-08)
+====================
+
+-   Comply with Go 1.20's multiple-error interface.
+-   Drop Go 1.18 support.
+    Per the support policy, only Go 1.19 and 1.20 are supported now.
+-   Drop all non-test external dependencies.
+
+v1.9.0 (2022-12-12)
+===================
+
+-   Add `AppendFunc` that allow passsing functions to similar to
+    `AppendInvoke`.
+
+-   Bump up yaml.v3 dependency to 3.0.1.
+
+v1.8.0 (2022-02-28)
+===================
+
+-   `Combine`: perform zero allocations when there are no errors.
+
+
+v1.7.0 (2021-05-06)
+===================
+
+-   Add `AppendInvoke` to append into errors from `defer` blocks.
+
+
 v1.6.0 (2020-09-14)
 ===================
 
diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt
index 858e024..413e30f 100644
--- a/vendor/go.uber.org/multierr/LICENSE.txt
+++ b/vendor/go.uber.org/multierr/LICENSE.txt
@@ -1,4 +1,4 @@
-Copyright (c) 2017 Uber Technologies, Inc.
+Copyright (c) 2017-2021 Uber Technologies, Inc.
 
 Permission is hereby granted, free of charge, to any person obtaining a copy
 of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile
index 3160044..dcb6fe7 100644
--- a/vendor/go.uber.org/multierr/Makefile
+++ b/vendor/go.uber.org/multierr/Makefile
@@ -34,9 +34,5 @@
 
 .PHONY: cover
 cover:
-	go test -coverprofile=cover.out -coverpkg=./... -v ./...
+	go test -race -coverprofile=cover.out -coverpkg=./... -v ./...
 	go tool cover -html=cover.out -o cover.html
-
-update-license:
-	@cd tools && go install go.uber.org/tools/update-license
-	@$(GOBIN)/update-license $(GO_FILES)
diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md
index 751bd65..5ab6ac4 100644
--- a/vendor/go.uber.org/multierr/README.md
+++ b/vendor/go.uber.org/multierr/README.md
@@ -2,9 +2,29 @@
 
 `multierr` allows combining one or more Go `error`s together.
 
+## Features
+
+- **Idiomatic**:
+  multierr follows best practices in Go, and keeps your code idiomatic.
+    - It keeps the underlying error type hidden,
+      allowing you to deal in `error` values exclusively.
+    - It provides APIs to safely append into an error from a `defer` statement.
+- **Performant**:
+  multierr is optimized for performance:
+    - It avoids allocations where possible.
+    - It utilizes slice resizing semantics to optimize common cases
+      like appending into the same error object from a loop.
+- **Interoperable**:
+  multierr interoperates with the Go standard library's error APIs seamlessly:
+    - The `errors.Is` and `errors.As` functions *just work*.
+- **Lightweight**:
+  multierr comes with virtually no dependencies.
+
 ## Installation
 
-    go get -u go.uber.org/multierr
+```bash
+go get -u go.uber.org/multierr@latest
+```
 
 ## Status
 
@@ -15,9 +35,9 @@
 Released under the [MIT License].
 
 [MIT License]: LICENSE.txt
-[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg
-[doc]: https://godoc.org/go.uber.org/multierr
-[ci-img]: https://travis-ci.com/uber-go/multierr.svg?branch=master
+[doc-img]: https://pkg.go.dev/badge/go.uber.org/multierr
+[doc]: https://pkg.go.dev/go.uber.org/multierr
+[ci-img]: https://github.com/uber-go/multierr/actions/workflows/go.yml/badge.svg
 [cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg
-[ci]: https://travis-ci.com/uber-go/multierr
+[ci]: https://github.com/uber-go/multierr/actions/workflows/go.yml
 [cov]: https://codecov.io/gh/uber-go/multierr
diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go
index 5c9b67d..3a828b2 100644
--- a/vendor/go.uber.org/multierr/error.go
+++ b/vendor/go.uber.org/multierr/error.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
 //
 // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to deal
@@ -20,54 +20,109 @@
 
 // Package multierr allows combining one or more errors together.
 //
-// Overview
+// # Overview
 //
 // Errors can be combined with the use of the Combine function.
 //
-// 	multierr.Combine(
-// 		reader.Close(),
-// 		writer.Close(),
-// 		conn.Close(),
-// 	)
+//	multierr.Combine(
+//		reader.Close(),
+//		writer.Close(),
+//		conn.Close(),
+//	)
 //
 // If only two errors are being combined, the Append function may be used
 // instead.
 //
-// 	err = multierr.Append(reader.Close(), writer.Close())
-//
-// This makes it possible to record resource cleanup failures from deferred
-// blocks with the help of named return values.
-//
-// 	func sendRequest(req Request) (err error) {
-// 		conn, err := openConnection()
-// 		if err != nil {
-// 			return err
-// 		}
-// 		defer func() {
-// 			err = multierr.Append(err, conn.Close())
-// 		}()
-// 		// ...
-// 	}
+//	err = multierr.Append(reader.Close(), writer.Close())
 //
 // The underlying list of errors for a returned error object may be retrieved
 // with the Errors function.
 //
-// 	errors := multierr.Errors(err)
-// 	if len(errors) > 0 {
-// 		fmt.Println("The following errors occurred:", errors)
-// 	}
+//	errors := multierr.Errors(err)
+//	if len(errors) > 0 {
+//		fmt.Println("The following errors occurred:", errors)
+//	}
 //
-// Advanced Usage
+// # Appending from a loop
+//
+// You sometimes need to append into an error from a loop.
+//
+//	var err error
+//	for _, item := range items {
+//		err = multierr.Append(err, process(item))
+//	}
+//
+// Cases like this may require knowledge of whether an individual instance
+// failed. This usually requires introduction of a new variable.
+//
+//	var err error
+//	for _, item := range items {
+//		if perr := process(item); perr != nil {
+//			log.Warn("skipping item", item)
+//			err = multierr.Append(err, perr)
+//		}
+//	}
+//
+// multierr includes AppendInto to simplify cases like this.
+//
+//	var err error
+//	for _, item := range items {
+//		if multierr.AppendInto(&err, process(item)) {
+//			log.Warn("skipping item", item)
+//		}
+//	}
+//
+// This will append the error into the err variable, and return true if that
+// individual error was non-nil.
+//
+// See [AppendInto] for more information.
+//
+// # Deferred Functions
+//
+// Go makes it possible to modify the return value of a function in a defer
+// block if the function was using named returns. This makes it possible to
+// record resource cleanup failures from deferred blocks.
+//
+//	func sendRequest(req Request) (err error) {
+//		conn, err := openConnection()
+//		if err != nil {
+//			return err
+//		}
+//		defer func() {
+//			err = multierr.Append(err, conn.Close())
+//		}()
+//		// ...
+//	}
+//
+// multierr provides the Invoker type and AppendInvoke function to make cases
+// like the above simpler and obviate the need for a closure. The following is
+// roughly equivalent to the example above.
+//
+//	func sendRequest(req Request) (err error) {
+//		conn, err := openConnection()
+//		if err != nil {
+//			return err
+//		}
+//		defer multierr.AppendInvoke(&err, multierr.Close(conn))
+//		// ...
+//	}
+//
+// See [AppendInvoke] and [Invoker] for more information.
+//
+// NOTE: If you're modifying an error from inside a defer, you MUST use a named
+// return value for that function.
+//
+// # Advanced Usage
 //
 // Errors returned by Combine and Append MAY implement the following
 // interface.
 //
-// 	type errorGroup interface {
-// 		// Returns a slice containing the underlying list of errors.
-// 		//
-// 		// This slice MUST NOT be modified by the caller.
-// 		Errors() []error
-// 	}
+//	type errorGroup interface {
+//		// Returns a slice containing the underlying list of errors.
+//		//
+//		// This slice MUST NOT be modified by the caller.
+//		Errors() []error
+//	}
 //
 // Note that if you need access to list of errors behind a multierr error, you
 // should prefer using the Errors function. That said, if you need cheap
@@ -76,23 +131,23 @@
 // because errors returned by Combine and Append are not guaranteed to
 // implement this interface.
 //
-// 	var errors []error
-// 	group, ok := err.(errorGroup)
-// 	if ok {
-// 		errors = group.Errors()
-// 	} else {
-// 		errors = []error{err}
-// 	}
+//	var errors []error
+//	group, ok := err.(errorGroup)
+//	if ok {
+//		errors = group.Errors()
+//	} else {
+//		errors = []error{err}
+//	}
 package multierr // import "go.uber.org/multierr"
 
 import (
 	"bytes"
+	"errors"
 	"fmt"
 	"io"
 	"strings"
 	"sync"
-
-	"go.uber.org/atomic"
+	"sync/atomic"
 )
 
 var (
@@ -132,34 +187,15 @@
 // Errors returns a slice containing zero or more errors that the supplied
 // error is composed of. If the error is nil, a nil slice is returned.
 //
-// 	err := multierr.Append(r.Close(), w.Close())
-// 	errors := multierr.Errors(err)
+//	err := multierr.Append(r.Close(), w.Close())
+//	errors := multierr.Errors(err)
 //
 // If the error is not composed of other errors, the returned slice contains
 // just the error that was passed in.
 //
 // Callers of this function are free to modify the returned slice.
 func Errors(err error) []error {
-	if err == nil {
-		return nil
-	}
-
-	// Note that we're casting to multiError, not errorGroup. Our contract is
-	// that returned errors MAY implement errorGroup. Errors, however, only
-	// has special behavior for multierr-specific error objects.
-	//
-	// This behavior can be expanded in the future but I think it's prudent to
-	// start with as little as possible in terms of contract and possibility
-	// of misuse.
-	eg, ok := err.(*multiError)
-	if !ok {
-		return []error{err}
-	}
-
-	errors := eg.Errors()
-	result := make([]error, len(errors))
-	copy(result, errors)
-	return result
+	return extractErrors(err)
 }
 
 // multiError is an error that holds one or more errors.
@@ -174,8 +210,6 @@
 	errors     []error
 }
 
-var _ errorGroup = (*multiError)(nil)
-
 // Errors returns the list of underlying errors.
 //
 // This slice MUST NOT be modified.
@@ -201,6 +235,17 @@
 	return result
 }
 
+// Every compares every error in the given err against the given target error
+// using [errors.Is], and returns true only if every comparison returned true.
+func Every(err error, target error) bool {
+	for _, e := range extractErrors(err) {
+		if !errors.Is(e, target) {
+			return false
+		}
+	}
+	return true
+}
+
 func (merr *multiError) Format(f fmt.State, c rune) {
 	if c == 'v' && f.Flag('+') {
 		merr.writeMultiline(f)
@@ -292,6 +337,14 @@
 
 // fromSlice converts the given list of errors into a single error.
 func fromSlice(errors []error) error {
+	// Don't pay to inspect small slices.
+	switch len(errors) {
+	case 0:
+		return nil
+	case 1:
+		return errors[0]
+	}
+
 	res := inspect(errors)
 	switch res.Count {
 	case 0:
@@ -301,8 +354,12 @@
 		return errors[res.FirstErrorIdx]
 	case len(errors):
 		if !res.ContainsMultiError {
-			// already flat
-			return &multiError{errors: errors}
+			// Error list is flat. Make a copy of it
+			// Otherwise "errors" escapes to the heap
+			// unconditionally for all other cases.
+			// This lets us optimize for the "no errors" case.
+			out := append(([]error)(nil), errors...)
+			return &multiError{errors: out}
 		}
 	}
 
@@ -327,32 +384,32 @@
 // If zero arguments were passed or if all items are nil, a nil error is
 // returned.
 //
-// 	Combine(nil, nil)  // == nil
+//	Combine(nil, nil)  // == nil
 //
 // If only a single error was passed, it is returned as-is.
 //
-// 	Combine(err)  // == err
+//	Combine(err)  // == err
 //
 // Combine skips over nil arguments so this function may be used to combine
 // together errors from operations that fail independently of each other.
 //
-// 	multierr.Combine(
-// 		reader.Close(),
-// 		writer.Close(),
-// 		pipe.Close(),
-// 	)
+//	multierr.Combine(
+//		reader.Close(),
+//		writer.Close(),
+//		pipe.Close(),
+//	)
 //
 // If any of the passed errors is a multierr error, it will be flattened along
 // with the other errors.
 //
-// 	multierr.Combine(multierr.Combine(err1, err2), err3)
-// 	// is the same as
-// 	multierr.Combine(err1, err2, err3)
+//	multierr.Combine(multierr.Combine(err1, err2), err3)
+//	// is the same as
+//	multierr.Combine(err1, err2, err3)
 //
 // The returned error formats into a readable multi-line error message if
 // formatted with %+v.
 //
-// 	fmt.Sprintf("%+v", multierr.Combine(err1, err2))
+//	fmt.Sprintf("%+v", multierr.Combine(err1, err2))
 func Combine(errors ...error) error {
 	return fromSlice(errors)
 }
@@ -362,16 +419,19 @@
 // This function is a specialization of Combine for the common case where
 // there are only two errors.
 //
-// 	err = multierr.Append(reader.Close(), writer.Close())
+//	err = multierr.Append(reader.Close(), writer.Close())
 //
 // The following pattern may also be used to record failure of deferred
 // operations without losing information about the original error.
 //
-// 	func doSomething(..) (err error) {
-// 		f := acquireResource()
-// 		defer func() {
-// 			err = multierr.Append(err, f.Close())
-// 		}()
+//	func doSomething(..) (err error) {
+//		f := acquireResource()
+//		defer func() {
+//			err = multierr.Append(err, f.Close())
+//		}()
+//
+// Note that the variable MUST be a named return to append an error to it from
+// the defer statement. See also [AppendInvoke].
 func Append(left error, right error) error {
 	switch {
 	case left == nil:
@@ -401,37 +461,37 @@
 // AppendInto appends an error into the destination of an error pointer and
 // returns whether the error being appended was non-nil.
 //
-// 	var err error
-// 	multierr.AppendInto(&err, r.Close())
-// 	multierr.AppendInto(&err, w.Close())
+//	var err error
+//	multierr.AppendInto(&err, r.Close())
+//	multierr.AppendInto(&err, w.Close())
 //
 // The above is equivalent to,
 //
-// 	err := multierr.Append(r.Close(), w.Close())
+//	err := multierr.Append(r.Close(), w.Close())
 //
 // As AppendInto reports whether the provided error was non-nil, it may be
 // used to build a multierr error in a loop more ergonomically. For example:
 //
-// 	var err error
-// 	for line := range lines {
-// 		var item Item
-// 		if multierr.AppendInto(&err, parse(line, &item)) {
-// 			continue
-// 		}
-// 		items = append(items, item)
-// 	}
+//	var err error
+//	for line := range lines {
+//		var item Item
+//		if multierr.AppendInto(&err, parse(line, &item)) {
+//			continue
+//		}
+//		items = append(items, item)
+//	}
 //
-// Compare this with a verison that relies solely on Append:
+// Compare this with a version that relies solely on Append:
 //
-// 	var err error
-// 	for line := range lines {
-// 		var item Item
-// 		if parseErr := parse(line, &item); parseErr != nil {
-// 			err = multierr.Append(err, parseErr)
-// 			continue
-// 		}
-// 		items = append(items, item)
-// 	}
+//	var err error
+//	for line := range lines {
+//		var item Item
+//		if parseErr := parse(line, &item); parseErr != nil {
+//			err = multierr.Append(err, parseErr)
+//			continue
+//		}
+//		items = append(items, item)
+//	}
 func AppendInto(into *error, err error) (errored bool) {
 	if into == nil {
 		// We panic if 'into' is nil. This is not documented above
@@ -447,3 +507,140 @@
 	*into = Append(*into, err)
 	return true
 }
+
+// Invoker is an operation that may fail with an error. Use it with
+// AppendInvoke to append the result of calling the function into an error.
+// This allows you to conveniently defer capture of failing operations.
+//
+// See also, [Close] and [Invoke].
+type Invoker interface {
+	Invoke() error
+}
+
+// Invoke wraps a function which may fail with an error to match the Invoker
+// interface. Use it to supply functions matching this signature to
+// AppendInvoke.
+//
+// For example,
+//
+//	func processReader(r io.Reader) (err error) {
+//		scanner := bufio.NewScanner(r)
+//		defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+//		for scanner.Scan() {
+//			// ...
+//		}
+//		// ...
+//	}
+//
+// In this example, the following line will construct the Invoker right away,
+// but defer the invocation of scanner.Err() until the function returns.
+//
+//	defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+//
+// Note that the error you're appending to from the defer statement MUST be a
+// named return.
+type Invoke func() error
+
+// Invoke calls the supplied function and returns its result.
+func (i Invoke) Invoke() error { return i() }
+
+// Close builds an Invoker that closes the provided io.Closer. Use it with
+// AppendInvoke to close io.Closers and append their results into an error.
+//
+// For example,
+//
+//	func processFile(path string) (err error) {
+//		f, err := os.Open(path)
+//		if err != nil {
+//			return err
+//		}
+//		defer multierr.AppendInvoke(&err, multierr.Close(f))
+//		return processReader(f)
+//	}
+//
+// In this example, multierr.Close will construct the Invoker right away, but
+// defer the invocation of f.Close until the function returns.
+//
+//	defer multierr.AppendInvoke(&err, multierr.Close(f))
+//
+// Note that the error you're appending to from the defer statement MUST be a
+// named return.
+func Close(closer io.Closer) Invoker {
+	return Invoke(closer.Close)
+}
+
+// AppendInvoke appends the result of calling the given Invoker into the
+// provided error pointer. Use it with named returns to safely defer
+// invocation of fallible operations until a function returns, and capture the
+// resulting errors.
+//
+//	func doSomething(...) (err error) {
+//		// ...
+//		f, err := openFile(..)
+//		if err != nil {
+//			return err
+//		}
+//
+//		// multierr will call f.Close() when this function returns and
+//		// if the operation fails, its append its error into the
+//		// returned error.
+//		defer multierr.AppendInvoke(&err, multierr.Close(f))
+//
+//		scanner := bufio.NewScanner(f)
+//		// Similarly, this scheduled scanner.Err to be called and
+//		// inspected when the function returns and append its error
+//		// into the returned error.
+//		defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+//
+//		// ...
+//	}
+//
+// NOTE: If used with a defer, the error variable MUST be a named return.
+//
+// Without defer, AppendInvoke behaves exactly like AppendInto.
+//
+//	err := // ...
+//	multierr.AppendInvoke(&err, mutltierr.Invoke(foo))
+//
+//	// ...is roughly equivalent to...
+//
+//	err := // ...
+//	multierr.AppendInto(&err, foo())
+//
+// The advantage of the indirection introduced by Invoker is to make it easy
+// to defer the invocation of a function. Without this indirection, the
+// invoked function will be evaluated at the time of the defer block rather
+// than when the function returns.
+//
+//	// BAD: This is likely not what the caller intended. This will evaluate
+//	// foo() right away and append its result into the error when the
+//	// function returns.
+//	defer multierr.AppendInto(&err, foo())
+//
+//	// GOOD: This will defer invocation of foo unutil the function returns.
+//	defer multierr.AppendInvoke(&err, multierr.Invoke(foo))
+//
+// multierr provides a few Invoker implementations out of the box for
+// convenience. See [Invoker] for more information.
+func AppendInvoke(into *error, invoker Invoker) {
+	AppendInto(into, invoker.Invoke())
+}
+
+// AppendFunc is a shorthand for [AppendInvoke].
+// It allows using function or method value directly
+// without having to wrap it into an [Invoker] interface.
+//
+//	func doSomething(...) (err error) {
+//		w, err := startWorker(...)
+//		if err != nil {
+//			return err
+//		}
+//
+//		// multierr will call w.Stop() when this function returns and
+//		// if the operation fails, it appends its error into the
+//		// returned error.
+//		defer multierr.AppendFunc(&err, w.Stop)
+//	}
+func AppendFunc(into *error, fn func() error) {
+	AppendInvoke(into, Invoke(fn))
+}
diff --git a/vendor/go.uber.org/multierr/error_post_go120.go b/vendor/go.uber.org/multierr/error_post_go120.go
new file mode 100644
index 0000000..a173f9c
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error_post_go120.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.20
+// +build go1.20
+
+package multierr
+
+// Unwrap returns a list of errors wrapped by this multierr.
+func (merr *multiError) Unwrap() []error {
+	return merr.Errors()
+}
+
+type multipleErrors interface {
+	Unwrap() []error
+}
+
+func extractErrors(err error) []error {
+	if err == nil {
+		return nil
+	}
+
+	// check if the given err is an Unwrapable error that
+	// implements multipleErrors interface.
+	eg, ok := err.(multipleErrors)
+	if !ok {
+		return []error{err}
+	}
+
+	return append(([]error)(nil), eg.Unwrap()...)
+}
diff --git a/vendor/go.uber.org/multierr/error_pre_go120.go b/vendor/go.uber.org/multierr/error_pre_go120.go
new file mode 100644
index 0000000..93872a3
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error_pre_go120.go
@@ -0,0 +1,79 @@
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build !go1.20
+// +build !go1.20
+
+package multierr
+
+import "errors"
+
+// Versions of Go before 1.20 did not support the Unwrap() []error method.
+// This provides a similar behavior by implementing the Is(..) and As(..)
+// methods.
+// See the errors.Join proposal for details:
+// https://github.com/golang/go/issues/53435
+
+// As attempts to find the first error in the error list that matches the type
+// of the value that target points to.
+//
+// This function allows errors.As to traverse the values stored on the
+// multierr error.
+func (merr *multiError) As(target interface{}) bool {
+	for _, err := range merr.Errors() {
+		if errors.As(err, target) {
+			return true
+		}
+	}
+	return false
+}
+
+// Is attempts to match the provided error against errors in the error list.
+//
+// This function allows errors.Is to traverse the values stored on the
+// multierr error.
+func (merr *multiError) Is(target error) bool {
+	for _, err := range merr.Errors() {
+		if errors.Is(err, target) {
+			return true
+		}
+	}
+	return false
+}
+
+func extractErrors(err error) []error {
+	if err == nil {
+		return nil
+	}
+
+	// Note that we're casting to multiError, not errorGroup. Our contract is
+	// that returned errors MAY implement errorGroup. Errors, however, only
+	// has special behavior for multierr-specific error objects.
+	//
+	// This behavior can be expanded in the future but I think it's prudent to
+	// start with as little as possible in terms of contract and possibility
+	// of misuse.
+	eg, ok := err.(*multiError)
+	if !ok {
+		return []error{err}
+	}
+
+	return append(([]error)(nil), eg.Errors()...)
+}
diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml
deleted file mode 100644
index 6ef084e..0000000
--- a/vendor/go.uber.org/multierr/glide.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-package: go.uber.org/multierr
-import:
-- package: go.uber.org/atomic
-  version: ^1
-testImport:
-- package: github.com/stretchr/testify
-  subpackages:
-  - assert
diff --git a/vendor/go.uber.org/multierr/go113.go b/vendor/go.uber.org/multierr/go113.go
deleted file mode 100644
index 264b0ea..0000000
--- a/vendor/go.uber.org/multierr/go113.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// +build go1.13
-
-package multierr
-
-import "errors"
-
-// As attempts to find the first error in the error list that matches the type
-// of the value that target points to.
-//
-// This function allows errors.As to traverse the values stored on the
-// multierr error.
-func (merr *multiError) As(target interface{}) bool {
-	for _, err := range merr.Errors() {
-		if errors.As(err, target) {
-			return true
-		}
-	}
-	return false
-}
-
-// Is attempts to match the provided error against errors in the error list.
-//
-// This function allows errors.Is to traverse the values stored on the
-// multierr error.
-func (merr *multiError) Is(target error) bool {
-	for _, err := range merr.Errors() {
-		if errors.Is(err, target) {
-			return true
-		}
-	}
-	return false
-}
diff --git a/vendor/go.uber.org/zap/.golangci.yml b/vendor/go.uber.org/zap/.golangci.yml
new file mode 100644
index 0000000..2346df1
--- /dev/null
+++ b/vendor/go.uber.org/zap/.golangci.yml
@@ -0,0 +1,77 @@
+output:
+  # Make output more digestible with quickfix in vim/emacs/etc.
+  sort-results: true
+  print-issued-lines: false
+
+linters:
+  # We'll track the golangci-lint default linters manually
+  # instead of letting them change without our control.
+  disable-all: true
+  enable:
+    # golangci-lint defaults:
+    - errcheck
+    - gosimple
+    - govet
+    - ineffassign
+    - staticcheck
+    - unused
+
+    # Our own extras:
+    - gofumpt
+    - nolintlint # lints nolint directives
+    - revive
+
+linters-settings:
+  govet:
+    # These govet checks are disabled by default, but they're useful.
+    enable:
+      - niliness
+      - reflectvaluecompare
+      - sortslice
+      - unusedwrite
+
+  errcheck:
+    exclude-functions:
+      # These methods can not fail.
+      # They operate on an in-memory buffer.
+      - (*go.uber.org/zap/buffer.Buffer).Write
+      - (*go.uber.org/zap/buffer.Buffer).WriteByte
+      - (*go.uber.org/zap/buffer.Buffer).WriteString
+
+      - (*go.uber.org/zap/zapio.Writer).Close
+      - (*go.uber.org/zap/zapio.Writer).Sync
+      - (*go.uber.org/zap/zapio.Writer).Write
+      # Write to zapio.Writer cannot fail,
+      # so io.WriteString on it cannot fail.
+      - io.WriteString(*go.uber.org/zap/zapio.Writer)
+
+      # Writing a plain string to a fmt.State cannot fail.
+      - io.WriteString(fmt.State)
+
+issues:
+  # Print all issues reported by all linters.
+  max-issues-per-linter: 0
+  max-same-issues: 0
+
+  # Don't ignore some of the issues that golangci-lint considers okay.
+  # This includes documenting all exported entities.
+  exclude-use-default: false
+
+  exclude-rules:
+    # Don't warn on unused parameters.
+    # Parameter names are useful; replacing them with '_' is undesirable.
+    - linters: [revive]
+      text: 'unused-parameter: parameter \S+ seems to be unused, consider removing or renaming it as _'
+
+    # staticcheck already has smarter checks for empty blocks.
+    # revive's empty-block linter has false positives.
+    # For example, as of writing this, the following is not allowed.
+    #   for foo() { }
+    - linters: [revive]
+      text: 'empty-block: this block is empty, you can remove it'
+
+    # Ignore logger.Sync() errcheck failures in example_test.go
+    # since those are intended to be uncomplicated examples.
+    - linters: [errcheck]
+      path: example_test.go
+      text: 'Error return value of `logger.Sync` is not checked'
diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl
index 3154a1e..4fea302 100644
--- a/vendor/go.uber.org/zap/.readme.tmpl
+++ b/vendor/go.uber.org/zap/.readme.tmpl
@@ -1,7 +1,15 @@
 # :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
 
+<div align="center">
+
 Blazing fast, structured, leveled logging in Go.
 
+![Zap logo](assets/logo.png)
+
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+</div>
+
 ## Installation
 
 `go get -u go.uber.org/zap`
@@ -92,18 +100,18 @@
 
 <hr>
 
-Released under the [MIT License](LICENSE.txt).
+Released under the [MIT License](LICENSE).
 
 <sup id="footnote-versions">1</sup> In particular, keep in mind that we may be
 benchmarking against slightly older versions of other packages. Versions are
-pinned in zap's [glide.lock][] file. [↩](#anchor-versions)
+pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions)
 
-[doc-img]: https://godoc.org/go.uber.org/zap?status.svg
-[doc]: https://godoc.org/go.uber.org/zap
-[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master
-[ci]: https://travis-ci.com/uber-go/zap
+[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap
+[doc]: https://pkg.go.dev/go.uber.org/zap
+[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg
+[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml
 [cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
 [cov]: https://codecov.io/gh/uber-go/zap
 [benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
-[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock
+[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod
 
diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md
index 6c32101..6d6cd5f 100644
--- a/vendor/go.uber.org/zap/CHANGELOG.md
+++ b/vendor/go.uber.org/zap/CHANGELOG.md
@@ -1,4 +1,176 @@
 # Changelog
+All notable changes to this project will be documented in this file.
+
+This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## 1.27.0 (20 Feb 2024)
+Enhancements:
+* [#1378][]: Add `WithLazy` method for `SugaredLogger`.
+* [#1399][]: zaptest: Add `NewTestingWriter` for customizing TestingWriter with more flexibility than `NewLogger`.
+* [#1406][]: Add `Log`, `Logw`, `Logln` methods for `SugaredLogger`.
+* [#1416][]: Add `WithPanicHook` option for testing panic logs.
+
+Thanks to @defval, @dimmo, @arxeiss, and @MKrupauskas for their contributions to this release.
+
+[#1378]: https://github.com/uber-go/zap/pull/1378
+[#1399]: https://github.com/uber-go/zap/pull/1399
+[#1406]: https://github.com/uber-go/zap/pull/1406
+[#1416]: https://github.com/uber-go/zap/pull/1416
+
+## 1.26.0 (14 Sep 2023)
+Enhancements:
+* [#1297][]: Add Dict as a Field.
+* [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured
+context.
+* [#1350][]: String encoding is much (~50%) faster now.
+
+Thanks to @hhk7734, @jquirke, and @cdvr1993 for their contributions to this release.
+
+[#1297]: https://github.com/uber-go/zap/pull/1297
+[#1319]: https://github.com/uber-go/zap/pull/1319
+[#1350]: https://github.com/uber-go/zap/pull/1350
+
+## 1.25.0 (1 Aug 2023)
+
+This release contains several improvements including performance, API additions,
+and two new experimental packages whose APIs are unstable and may change in the
+future.
+
+Enhancements:
+* [#1246][]: Add `zap/exp/zapslog` package for integration with slog.
+* [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set.
+* [#1281][]: Add `zap/exp/expfield` package which contains helper methods
+`Str` and `Strs` for constructing String-like zap.Fields.
+* [#1310][]: Reduce stack size on `Any`.
+
+Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions
+to this release.
+
+[#1246]: https://github.com/uber-go/zap/pull/1246
+[#1273]: https://github.com/uber-go/zap/pull/1273
+[#1281]: https://github.com/uber-go/zap/pull/1281
+[#1310]: https://github.com/uber-go/zap/pull/1310
+
+## 1.24.0 (30 Nov 2022)
+
+Enhancements:
+* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the
+  current minimum enabled log level.
+* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically.
+
+Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their
+contributions to this release.
+
+[#1148]: https://github.coml/uber-go/zap/pull/1148
+[#1185]: https://github.coml/uber-go/zap/pull/1185
+
+## 1.23.0 (24 Aug 2022)
+
+Enhancements:
+* [#1147][]: Add a `zapcore.LevelOf` function to determine the level of a
+  `LevelEnabler` or `Core`.
+* [#1155][]: Add `zap.Stringers` field constructor to log arrays of objects
+  that implement `String() string`.
+
+[#1147]: https://github.com/uber-go/zap/pull/1147
+[#1155]: https://github.com/uber-go/zap/pull/1155
+
+## 1.22.0 (8 Aug 2022)
+
+Enhancements:
+* [#1071][]: Add `zap.Objects` and `zap.ObjectValues` field constructors to log
+  arrays of objects. With these two constructors, you don't need to implement
+  `zapcore.ArrayMarshaler` for use with `zap.Array` if those objects implement
+  `zapcore.ObjectMarshaler`.
+* [#1079][]: Add `SugaredLogger.WithOptions` to build a copy of an existing
+  `SugaredLogger` with the provided options applied.
+* [#1080][]: Add `*ln` variants to `SugaredLogger` for each log level.
+  These functions provide a string joining behavior similar to `fmt.Println`.
+* [#1088][]: Add `zap.WithFatalHook` option to control the behavior of the
+  logger for `Fatal`-level log entries. This defaults to exiting the program.
+* [#1108][]: Add a `zap.Must` function that you can use with `NewProduction` or
+  `NewDevelopment` to panic if the system was unable to build the logger.
+* [#1118][]: Add a `Logger.Log` method that allows specifying the log level for
+  a statement dynamically.
+
+Thanks to @cardil, @craigpastro, @sashamelentyev, @shota3506, and @zhupeijun
+for their contributions to this release.
+
+[#1071]: https://github.com/uber-go/zap/pull/1071
+[#1079]: https://github.com/uber-go/zap/pull/1079
+[#1080]: https://github.com/uber-go/zap/pull/1080
+[#1088]: https://github.com/uber-go/zap/pull/1088
+[#1108]: https://github.com/uber-go/zap/pull/1108
+[#1118]: https://github.com/uber-go/zap/pull/1118
+
+## 1.21.0 (7 Feb 2022)
+
+Enhancements:
+*  [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string.
+*  [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a
+   string.
+
+Bugfixes:
+* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset.
+
+Other changes:
+* [#1052][]: Improve encoding performance when the `AddCaller` and
+  `AddStacktrace` options are used together.
+
+[#1047]: https://github.com/uber-go/zap/pull/1047
+[#1048]: https://github.com/uber-go/zap/pull/1048
+[#1052]: https://github.com/uber-go/zap/pull/1052
+[#1058]: https://github.com/uber-go/zap/pull/1058
+
+Thanks to @aerosol and @Techassi for their contributions to this release.
+
+## 1.20.0 (4 Jan 2022)
+
+Enhancements:
+* [#989][]: Add `EncoderConfig.SkipLineEnding` flag to disable adding newline
+  characters between log statements.
+* [#1039][]: Add `EncoderConfig.NewReflectedEncoder` field to customize JSON
+  encoding of reflected log fields.
+
+Bugfixes:
+* [#1011][]: Fix inaccurate precision when encoding complex64 as JSON.
+* [#554][], [#1017][]: Close JSON namespaces opened in `MarshalLogObject`
+  methods when the methods return.
+* [#1033][]: Avoid panicking in Sampler core if `thereafter` is zero.
+
+Other changes:
+* [#1028][]: Drop support for Go < 1.15.
+
+[#554]: https://github.com/uber-go/zap/pull/554
+[#989]: https://github.com/uber-go/zap/pull/989
+[#1011]: https://github.com/uber-go/zap/pull/1011
+[#1017]: https://github.com/uber-go/zap/pull/1017
+[#1028]: https://github.com/uber-go/zap/pull/1028
+[#1033]: https://github.com/uber-go/zap/pull/1033
+[#1039]: https://github.com/uber-go/zap/pull/1039
+
+Thanks to @psrajat, @lruggieri, @sammyrnycreal for their contributions to this release.
+
+## 1.19.1 (8 Sep 2021)
+
+Bugfixes:
+* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon.
+* [#1003][]: JSON: Fix inaccurate precision when encoding float32.
+
+[#1001]: https://github.com/uber-go/zap/pull/1001
+[#1003]: https://github.com/uber-go/zap/pull/1003
+
+## 1.19.0 (9 Aug 2021)
+
+Enhancements:
+* [#975][]: Avoid panicking in Sampler core if the level is out of bounds.
+* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields
+  better.
+
+[#975]: https://github.com/uber-go/zap/pull/975
+[#984]: https://github.com/uber-go/zap/pull/984
+
+Thanks to @lancoLiu and @thockin for their contributions to this release.
 
 ## 1.18.1 (28 Jun 2021)
 
@@ -51,6 +223,16 @@
 
 Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release.
 
+[#865]: https://github.com/uber-go/zap/pull/865
+[#867]: https://github.com/uber-go/zap/pull/867
+[#881]: https://github.com/uber-go/zap/pull/881
+[#903]: https://github.com/uber-go/zap/pull/903
+[#912]: https://github.com/uber-go/zap/pull/912
+[#913]: https://github.com/uber-go/zap/pull/913
+[#928]: https://github.com/uber-go/zap/pull/928
+[#931]: https://github.com/uber-go/zap/pull/931
+[#936]: https://github.com/uber-go/zap/pull/936
+
 ## 1.16.0 (1 Sep 2020)
 
 Bugfixes:
@@ -72,6 +254,17 @@
 
 Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release.
 
+[#629]: https://github.com/uber-go/zap/pull/629
+[#697]: https://github.com/uber-go/zap/pull/697
+[#828]: https://github.com/uber-go/zap/pull/828
+[#835]: https://github.com/uber-go/zap/pull/835
+[#843]: https://github.com/uber-go/zap/pull/843
+[#844]: https://github.com/uber-go/zap/pull/844
+[#852]: https://github.com/uber-go/zap/pull/852
+[#854]: https://github.com/uber-go/zap/pull/854
+[#861]: https://github.com/uber-go/zap/pull/861
+[#862]: https://github.com/uber-go/zap/pull/862
+
 ## 1.15.0 (23 Apr 2020)
 
 Bugfixes:
@@ -88,6 +281,11 @@
 
 Thanks to @danielbprice for their contributions to this release.
 
+[#804]: https://github.com/uber-go/zap/pull/804
+[#812]: https://github.com/uber-go/zap/pull/812
+[#806]: https://github.com/uber-go/zap/pull/806
+[#813]: https://github.com/uber-go/zap/pull/813
+
 ## 1.14.1 (14 Mar 2020)
 
 Bugfixes:
@@ -100,6 +298,10 @@
 
 Thanks to @YashishDua for their contributions to this release.
 
+[#791]: https://github.com/uber-go/zap/pull/791
+[#795]: https://github.com/uber-go/zap/pull/795
+[#799]: https://github.com/uber-go/zap/pull/799
+
 ## 1.14.0 (20 Feb 2020)
 
 Enhancements:
@@ -110,6 +312,11 @@
 
 Thanks to @caibirdme for their contributions to this release.
 
+[#771]: https://github.com/uber-go/zap/pull/771
+[#773]: https://github.com/uber-go/zap/pull/773
+[#775]: https://github.com/uber-go/zap/pull/775
+[#786]: https://github.com/uber-go/zap/pull/786
+
 ## 1.13.0 (13 Nov 2019)
 
 Enhancements:
@@ -118,11 +325,15 @@
 
 Thanks to @jbizzle for their contributions to this release.
 
+[#758]: https://github.com/uber-go/zap/pull/758
+
 ## 1.12.0 (29 Oct 2019)
 
 Enhancements:
 * [#751][]: Migrate to Go modules.
 
+[#751]: https://github.com/uber-go/zap/pull/751
+
 ## 1.11.0 (21 Oct 2019)
 
 Enhancements:
@@ -131,6 +342,9 @@
 
 Thanks to @juicemia, @uhthomas for their contributions to this release.
 
+[#725]: https://github.com/uber-go/zap/pull/725
+[#736]: https://github.com/uber-go/zap/pull/736
+
 ## 1.10.0 (29 Apr 2019)
 
 Bugfixes:
@@ -148,13 +362,21 @@
 Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions
 to this release.
 
-## v1.9.1 (06 Aug 2018)
+[#657]: https://github.com/uber-go/zap/pull/657
+[#706]: https://github.com/uber-go/zap/pull/706
+[#610]: https://github.com/uber-go/zap/pull/610
+[#675]: https://github.com/uber-go/zap/pull/675
+[#704]: https://github.com/uber-go/zap/pull/704
+
+## 1.9.1 (06 Aug 2018)
 
 Bugfixes:
 
 * [#614][]: MapObjectEncoder should not ignore empty slices.
 
-## v1.9.0 (19 Jul 2018)
+[#614]: https://github.com/uber-go/zap/pull/614
+
+## 1.9.0 (19 Jul 2018)
 
 Enhancements:
 * [#602][]: Reduce number of allocations when logging with reflection.
@@ -163,7 +385,11 @@
 Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and
 @dimroc for their contributions to this release.
 
-## v1.8.0 (13 Apr 2018)
+[#602]: https://github.com/uber-go/zap/pull/602
+[#572]: https://github.com/uber-go/zap/pull/572
+[#606]: https://github.com/uber-go/zap/pull/606
+
+## 1.8.0 (13 Apr 2018)
 
 Enhancements:
 * [#508][]: Make log level configurable when redirecting the standard
@@ -176,19 +402,28 @@
 
 Thanks to @DiSiqueira and @djui for their contributions to this release.
 
-## v1.7.1 (25 Sep 2017)
+[#508]: https://github.com/uber-go/zap/pull/508
+[#518]: https://github.com/uber-go/zap/pull/518
+[#577]: https://github.com/uber-go/zap/pull/577
+[#574]: https://github.com/uber-go/zap/pull/574
+
+## 1.7.1 (25 Sep 2017)
 
 Bugfixes:
 * [#504][]: Store strings when using AddByteString with the map encoder.
 
-## v1.7.0 (21 Sep 2017)
+[#504]: https://github.com/uber-go/zap/pull/504
+
+## 1.7.0 (21 Sep 2017)
 
 Enhancements:
 
 * [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user
   to specify the level of the logged messages.
 
-## v1.6.0 (30 Aug 2017)
+[#487]: https://github.com/uber-go/zap/pull/487
+
+## 1.6.0 (30 Aug 2017)
 
 Enhancements:
 
@@ -196,7 +431,10 @@
 * [#490][]: Add a `ContextMap` method to observer logs for simpler
   field validation in tests.
 
-## v1.5.0 (22 Jul 2017)
+[#490]: https://github.com/uber-go/zap/pull/490
+[#491]: https://github.com/uber-go/zap/pull/491
+
+## 1.5.0 (22 Jul 2017)
 
 Enhancements:
 
@@ -209,7 +447,12 @@
 
 Thanks to @richard-tunein and @pavius for their contributions to this release.
 
-## v1.4.1 (08 Jun 2017)
+[#477]: https://github.com/uber-go/zap/pull/477
+[#465]: https://github.com/uber-go/zap/pull/465
+[#460]: https://github.com/uber-go/zap/pull/460
+[#470]: https://github.com/uber-go/zap/pull/470
+
+## 1.4.1 (08 Jun 2017)
 
 This release fixes two bugs.
 
@@ -218,7 +461,10 @@
 * [#435][]: Support a variety of case conventions when unmarshaling levels.
 * [#444][]: Fix a panic in the observer.
 
-## v1.4.0 (12 May 2017)
+[#435]: https://github.com/uber-go/zap/pull/435
+[#444]: https://github.com/uber-go/zap/pull/444
+
+## 1.4.0 (12 May 2017)
 
 This release adds a few small features and is fully backward-compatible.
 
@@ -230,7 +476,11 @@
 * [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a
   variety of operations a bit simpler.
 
-## v1.3.0 (25 Apr 2017)
+[#424]: https://github.com/uber-go/zap/pull/424
+[#425]: https://github.com/uber-go/zap/pull/425
+[#431]: https://github.com/uber-go/zap/pull/431
+
+## 1.3.0 (25 Apr 2017)
 
 This release adds an enhancement to zap's testing helpers as well as the
 ability to marshal an AtomicLevel. It is fully backward-compatible.
@@ -241,7 +491,10 @@
   particularly useful when testing the `SugaredLogger`.
 * [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`.
 
-## v1.2.0 (13 Apr 2017)
+[#415]: https://github.com/uber-go/zap/pull/415
+[#416]: https://github.com/uber-go/zap/pull/416
+
+## 1.2.0 (13 Apr 2017)
 
 This release adds a gRPC compatibility wrapper. It is fully backward-compatible.
 
@@ -250,7 +503,9 @@
 * [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements
   `grpclog.Logger`.
 
-## v1.1.0 (31 Mar 2017)
+[#402]: https://github.com/uber-go/zap/pull/402
+
+## 1.1.0 (31 Mar 2017)
 
 This release fixes two bugs and adds some enhancements to zap's testing helpers.
 It is fully backward-compatible.
@@ -267,7 +522,11 @@
 
 Thanks to @moitias for contributing to this release.
 
-## v1.0.0 (14 Mar 2017)
+[#385]: https://github.com/uber-go/zap/pull/385
+[#396]: https://github.com/uber-go/zap/pull/396
+[#386]: https://github.com/uber-go/zap/pull/386
+
+## 1.0.0 (14 Mar 2017)
 
 This is zap's first stable release. All exported APIs are now final, and no
 further breaking changes will be made in the 1.x release series. Anyone using a
@@ -312,7 +571,21 @@
 Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their
 contributions to this release.
 
-## v1.0.0-rc.3 (7 Mar 2017)
+[#366]: https://github.com/uber-go/zap/pull/366
+[#364]: https://github.com/uber-go/zap/pull/364
+[#371]: https://github.com/uber-go/zap/pull/371
+[#362]: https://github.com/uber-go/zap/pull/362
+[#369]: https://github.com/uber-go/zap/pull/369
+[#347]: https://github.com/uber-go/zap/pull/347
+[#373]: https://github.com/uber-go/zap/pull/373
+[#348]: https://github.com/uber-go/zap/pull/348
+[#327]: https://github.com/uber-go/zap/pull/327
+[#376]: https://github.com/uber-go/zap/pull/376
+[#346]: https://github.com/uber-go/zap/pull/346
+[#365]: https://github.com/uber-go/zap/pull/365
+[#372]: https://github.com/uber-go/zap/pull/372
+
+## 1.0.0-rc.3 (7 Mar 2017)
 
 This is the third release candidate for zap's stable release. There are no
 breaking changes.
@@ -333,7 +606,12 @@
 
 Thanks to @ansel1 and @suyash for their contributions to this release.
 
-## v1.0.0-rc.2 (21 Feb 2017)
+[#339]: https://github.com/uber-go/zap/pull/339
+[#307]: https://github.com/uber-go/zap/pull/307
+[#353]: https://github.com/uber-go/zap/pull/353
+[#311]: https://github.com/uber-go/zap/pull/311
+
+## 1.0.0-rc.2 (21 Feb 2017)
 
 This is the second release candidate for zap's stable release. It includes two
 breaking changes.
@@ -370,7 +648,16 @@
 
 Thanks to @skipor and @chapsuk for their contributions to this release.
 
-## v1.0.0-rc.1 (14 Feb 2017)
+[#316]: https://github.com/uber-go/zap/pull/316
+[#309]: https://github.com/uber-go/zap/pull/309
+[#317]: https://github.com/uber-go/zap/pull/317
+[#321]: https://github.com/uber-go/zap/pull/321
+[#325]: https://github.com/uber-go/zap/pull/325
+[#333]: https://github.com/uber-go/zap/pull/333
+[#326]: https://github.com/uber-go/zap/pull/326
+[#300]: https://github.com/uber-go/zap/pull/300
+
+## 1.0.0-rc.1 (14 Feb 2017)
 
 This is the first release candidate for zap's stable release. There are multiple
 breaking changes and improvements from the pre-release version. Most notably:
@@ -390,7 +677,7 @@
 * Sampling is more accurate, and doesn't depend on the standard library's shared
   timer heap.
 
-## v0.1.0-beta.1 (6 Feb 2017)
+## 0.1.0-beta.1 (6 Feb 2017)
 
 This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and
 upgrade at their leisure. Since this is the first tagged release, there are no
@@ -398,95 +685,3 @@
 
 Early zap adopters should pin to the 0.1.x minor version until they're ready to
 upgrade to the upcoming stable release.
-
-[#316]: https://github.com/uber-go/zap/pull/316
-[#309]: https://github.com/uber-go/zap/pull/309
-[#317]: https://github.com/uber-go/zap/pull/317
-[#321]: https://github.com/uber-go/zap/pull/321
-[#325]: https://github.com/uber-go/zap/pull/325
-[#333]: https://github.com/uber-go/zap/pull/333
-[#326]: https://github.com/uber-go/zap/pull/326
-[#300]: https://github.com/uber-go/zap/pull/300
-[#339]: https://github.com/uber-go/zap/pull/339
-[#307]: https://github.com/uber-go/zap/pull/307
-[#353]: https://github.com/uber-go/zap/pull/353
-[#311]: https://github.com/uber-go/zap/pull/311
-[#366]: https://github.com/uber-go/zap/pull/366
-[#364]: https://github.com/uber-go/zap/pull/364
-[#371]: https://github.com/uber-go/zap/pull/371
-[#362]: https://github.com/uber-go/zap/pull/362
-[#369]: https://github.com/uber-go/zap/pull/369
-[#347]: https://github.com/uber-go/zap/pull/347
-[#373]: https://github.com/uber-go/zap/pull/373
-[#348]: https://github.com/uber-go/zap/pull/348
-[#327]: https://github.com/uber-go/zap/pull/327
-[#376]: https://github.com/uber-go/zap/pull/376
-[#346]: https://github.com/uber-go/zap/pull/346
-[#365]: https://github.com/uber-go/zap/pull/365
-[#372]: https://github.com/uber-go/zap/pull/372
-[#385]: https://github.com/uber-go/zap/pull/385
-[#396]: https://github.com/uber-go/zap/pull/396
-[#386]: https://github.com/uber-go/zap/pull/386
-[#402]: https://github.com/uber-go/zap/pull/402
-[#415]: https://github.com/uber-go/zap/pull/415
-[#416]: https://github.com/uber-go/zap/pull/416
-[#424]: https://github.com/uber-go/zap/pull/424
-[#425]: https://github.com/uber-go/zap/pull/425
-[#431]: https://github.com/uber-go/zap/pull/431
-[#435]: https://github.com/uber-go/zap/pull/435
-[#444]: https://github.com/uber-go/zap/pull/444
-[#477]: https://github.com/uber-go/zap/pull/477
-[#465]: https://github.com/uber-go/zap/pull/465
-[#460]: https://github.com/uber-go/zap/pull/460
-[#470]: https://github.com/uber-go/zap/pull/470
-[#487]: https://github.com/uber-go/zap/pull/487
-[#490]: https://github.com/uber-go/zap/pull/490
-[#491]: https://github.com/uber-go/zap/pull/491
-[#504]: https://github.com/uber-go/zap/pull/504
-[#508]: https://github.com/uber-go/zap/pull/508
-[#518]: https://github.com/uber-go/zap/pull/518
-[#577]: https://github.com/uber-go/zap/pull/577
-[#574]: https://github.com/uber-go/zap/pull/574
-[#602]: https://github.com/uber-go/zap/pull/602
-[#572]: https://github.com/uber-go/zap/pull/572
-[#606]: https://github.com/uber-go/zap/pull/606
-[#614]: https://github.com/uber-go/zap/pull/614
-[#657]: https://github.com/uber-go/zap/pull/657
-[#706]: https://github.com/uber-go/zap/pull/706
-[#610]: https://github.com/uber-go/zap/pull/610
-[#675]: https://github.com/uber-go/zap/pull/675
-[#704]: https://github.com/uber-go/zap/pull/704
-[#725]: https://github.com/uber-go/zap/pull/725
-[#736]: https://github.com/uber-go/zap/pull/736
-[#751]: https://github.com/uber-go/zap/pull/751
-[#758]: https://github.com/uber-go/zap/pull/758
-[#771]: https://github.com/uber-go/zap/pull/771
-[#773]: https://github.com/uber-go/zap/pull/773
-[#775]: https://github.com/uber-go/zap/pull/775
-[#786]: https://github.com/uber-go/zap/pull/786
-[#791]: https://github.com/uber-go/zap/pull/791
-[#795]: https://github.com/uber-go/zap/pull/795
-[#799]: https://github.com/uber-go/zap/pull/799
-[#804]: https://github.com/uber-go/zap/pull/804
-[#812]: https://github.com/uber-go/zap/pull/812
-[#806]: https://github.com/uber-go/zap/pull/806
-[#813]: https://github.com/uber-go/zap/pull/813
-[#629]: https://github.com/uber-go/zap/pull/629
-[#697]: https://github.com/uber-go/zap/pull/697
-[#828]: https://github.com/uber-go/zap/pull/828
-[#835]: https://github.com/uber-go/zap/pull/835
-[#843]: https://github.com/uber-go/zap/pull/843
-[#844]: https://github.com/uber-go/zap/pull/844
-[#852]: https://github.com/uber-go/zap/pull/852
-[#854]: https://github.com/uber-go/zap/pull/854
-[#861]: https://github.com/uber-go/zap/pull/861
-[#862]: https://github.com/uber-go/zap/pull/862
-[#865]: https://github.com/uber-go/zap/pull/865
-[#867]: https://github.com/uber-go/zap/pull/867
-[#881]: https://github.com/uber-go/zap/pull/881
-[#903]: https://github.com/uber-go/zap/pull/903
-[#912]: https://github.com/uber-go/zap/pull/912
-[#913]: https://github.com/uber-go/zap/pull/913
-[#928]: https://github.com/uber-go/zap/pull/928
-[#931]: https://github.com/uber-go/zap/pull/931
-[#936]: https://github.com/uber-go/zap/pull/936
diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md
index 5cd9656..ea02f3c 100644
--- a/vendor/go.uber.org/zap/CONTRIBUTING.md
+++ b/vendor/go.uber.org/zap/CONTRIBUTING.md
@@ -16,7 +16,7 @@
 
 [Fork][fork], then clone the repository:
 
-```
+```bash
 mkdir -p $GOPATH/src/go.uber.org
 cd $GOPATH/src/go.uber.org
 git clone git@github.com:your_github_username/zap.git
@@ -27,21 +27,16 @@
 
 Make sure that the tests and the linters pass:
 
-```
+```bash
 make test
 make lint
 ```
 
-If you're not using the minor version of Go specified in the Makefile's
-`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is
-fine, but it means that you'll only discover lint failures after you open your
-pull request.
-
 ## Making Changes
 
 Start by creating a new branch for your changes:
 
-```
+```bash
 cd $GOPATH/src/go.uber.org/zap
 git checkout master
 git fetch upstream
@@ -52,22 +47,22 @@
 Make your changes, then ensure that `make lint` and `make test` still pass. If
 you're satisfied with your changes, push them to your fork.
 
-```
+```bash
 git push origin cool_new_feature
 ```
 
 Then use the GitHub UI to open a pull request.
 
-At this point, you're waiting on us to review your changes. We *try* to respond
+At this point, you're waiting on us to review your changes. We _try_ to respond
 to issues and pull requests within a few business days, and we may suggest some
 improvements or alternatives. Once your changes are approved, one of the
 project maintainers will merge them.
 
 We're much more likely to approve your changes if you:
 
-* Add tests for new functionality.
-* Write a [good commit message][commit-message].
-* Maintain backward compatibility.
+- Add tests for new functionality.
+- Write a [good commit message][commit-message].
+- Maintain backward compatibility.
 
 [fork]: https://github.com/uber-go/zap/fork
 [open-issue]: https://github.com/uber-go/zap/issues/new
diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE
similarity index 100%
rename from vendor/go.uber.org/zap/LICENSE.txt
rename to vendor/go.uber.org/zap/LICENSE
diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile
index 9b1bc3b..eb1cee5 100644
--- a/vendor/go.uber.org/zap/Makefile
+++ b/vendor/go.uber.org/zap/Makefile
@@ -1,50 +1,51 @@
-export GOBIN ?= $(shell pwd)/bin
+# Directory containing the Makefile.
+PROJECT_ROOT = $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
 
-GOLINT = $(GOBIN)/golint
-STATICCHECK = $(GOBIN)/staticcheck
+export GOBIN ?= $(PROJECT_ROOT)/bin
+export PATH := $(GOBIN):$(PATH)
+
+GOVULNCHECK = $(GOBIN)/govulncheck
 BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem
 
 # Directories containing independent Go modules.
-#
-# We track coverage only for the main module.
-MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test
+MODULE_DIRS = . ./exp ./benchmarks ./zapgrpc/internal/test
 
-# Many Go tools take file globs or directories as arguments instead of packages.
-GO_FILES := $(shell \
-	find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
-	-o -name '*.go' -print | cut -b3-)
+# Directories that we want to track coverage for.
+COVER_DIRS = . ./exp
 
 .PHONY: all
 all: lint test
 
 .PHONY: lint
-lint: $(GOLINT) $(STATICCHECK)
-	@rm -rf lint.log
-	@echo "Checking formatting..."
-	@gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log
-	@echo "Checking vet..."
-	@$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log
-	@echo "Checking lint..."
-	@$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log
-	@echo "Checking staticcheck..."
-	@$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log
-	@echo "Checking for unresolved FIXMEs..."
-	@git grep -i fixme | grep -v -e Makefile | tee -a lint.log
-	@echo "Checking for license headers..."
-	@./checklicense.sh | tee -a lint.log
-	@[ ! -s lint.log ]
-	@echo "Checking 'go mod tidy'..."
-	@make tidy
-	@if ! git diff --quiet; then \
-		echo "'go mod tidy' resulted in changes or working tree is dirty:"; \
-		git --no-pager diff; \
-	fi
+lint: golangci-lint tidy-lint license-lint
 
-$(GOLINT):
-	cd tools && go install golang.org/x/lint/golint
+.PHONY: golangci-lint
+golangci-lint:
+	@$(foreach mod,$(MODULE_DIRS), \
+		(cd $(mod) && \
+		echo "[lint] golangci-lint: $(mod)" && \
+		golangci-lint run --path-prefix $(mod)) &&) true
 
-$(STATICCHECK):
-	cd tools && go install honnef.co/go/tools/cmd/staticcheck
+.PHONY: tidy
+tidy:
+	@$(foreach dir,$(MODULE_DIRS), \
+		(cd $(dir) && go mod tidy) &&) true
+
+.PHONY: tidy-lint
+tidy-lint:
+	@$(foreach mod,$(MODULE_DIRS), \
+		(cd $(mod) && \
+		echo "[lint] tidy: $(mod)" && \
+		go mod tidy && \
+		git diff --exit-code -- go.mod go.sum) &&) true
+
+
+.PHONY: license-lint
+license-lint:
+	./checklicense.sh
+
+$(GOVULNCHECK):
+	cd tools && go install golang.org/x/vuln/cmd/govulncheck
 
 .PHONY: test
 test:
@@ -52,8 +53,10 @@
 
 .PHONY: cover
 cover:
-	go test -race -coverprofile=cover.out -coverpkg=./... ./...
-	go tool cover -html=cover.out -o cover.html
+	@$(foreach dir,$(COVER_DIRS), ( \
+		cd $(dir) && \
+		go test -race -coverprofile=cover.out -coverpkg=./... ./... \
+		&& go tool cover -html=cover.out -o cover.html) &&) true
 
 .PHONY: bench
 BENCH ?= .
@@ -68,6 +71,6 @@
 	rm -f README.md
 	cat .readme.tmpl | go run internal/readme/readme.go > README.md
 
-.PHONY: tidy
-tidy:
-	@$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true
+.PHONY: vulncheck
+vulncheck: $(GOVULNCHECK)
+	$(GOVULNCHECK) ./...
diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md
index 1e64d6c..a17035c 100644
--- a/vendor/go.uber.org/zap/README.md
+++ b/vendor/go.uber.org/zap/README.md
@@ -1,7 +1,16 @@
-# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+# :zap: zap
+
+
+<div align="center">
 
 Blazing fast, structured, leveled logging in Go.
 
+![Zap logo](assets/logo.png)
+
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+</div>
+
 ## Installation
 
 `go get -u go.uber.org/zap`
@@ -66,38 +75,44 @@
 
 | Package | Time | Time % to zap | Objects Allocated |
 | :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 862 ns/op | +0% | 5 allocs/op
-| :zap: zap (sugared) | 1250 ns/op | +45% | 11 allocs/op
-| zerolog | 4021 ns/op | +366% | 76 allocs/op
-| go-kit | 4542 ns/op | +427% | 105 allocs/op
-| apex/log | 26785 ns/op | +3007% | 115 allocs/op
-| logrus | 29501 ns/op | +3322% | 125 allocs/op
-| log15 | 29906 ns/op | +3369% | 122 allocs/op
+| :zap: zap | 656 ns/op | +0% | 5 allocs/op
+| :zap: zap (sugared) | 935 ns/op | +43% | 10 allocs/op
+| zerolog | 380 ns/op | -42% | 1 allocs/op
+| go-kit | 2249 ns/op | +243% | 57 allocs/op
+| slog (LogAttrs) | 2479 ns/op | +278% | 40 allocs/op
+| slog | 2481 ns/op | +278% | 42 allocs/op
+| apex/log | 9591 ns/op | +1362% | 63 allocs/op
+| log15 | 11393 ns/op | +1637% | 75 allocs/op
+| logrus | 11654 ns/op | +1677% | 79 allocs/op
 
 Log a message with a logger that already has 10 fields of context:
 
 | Package | Time | Time % to zap | Objects Allocated |
 | :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 126 ns/op | +0% | 0 allocs/op
-| :zap: zap (sugared) | 187 ns/op | +48% | 2 allocs/op
-| zerolog | 88 ns/op | -30% | 0 allocs/op
-| go-kit | 5087 ns/op | +3937% | 103 allocs/op
-| log15 | 18548 ns/op | +14621% | 73 allocs/op
-| apex/log | 26012 ns/op | +20544% | 104 allocs/op
-| logrus | 27236 ns/op | +21516% | 113 allocs/op
+| :zap: zap | 67 ns/op | +0% | 0 allocs/op
+| :zap: zap (sugared) | 84 ns/op | +25% | 1 allocs/op
+| zerolog | 35 ns/op | -48% | 0 allocs/op
+| slog | 193 ns/op | +188% | 0 allocs/op
+| slog (LogAttrs) | 200 ns/op | +199% | 0 allocs/op
+| go-kit | 2460 ns/op | +3572% | 56 allocs/op
+| log15 | 9038 ns/op | +13390% | 70 allocs/op
+| apex/log | 9068 ns/op | +13434% | 53 allocs/op
+| logrus | 10521 ns/op | +15603% | 68 allocs/op
 
 Log a static string, without any context or `printf`-style templating:
 
 | Package | Time | Time % to zap | Objects Allocated |
 | :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 118 ns/op | +0% | 0 allocs/op
-| :zap: zap (sugared) | 191 ns/op | +62% | 2 allocs/op
-| zerolog | 93 ns/op | -21% | 0 allocs/op
-| go-kit | 280 ns/op | +137% | 11 allocs/op
-| standard library | 499 ns/op | +323% | 2 allocs/op
-| apex/log | 1990 ns/op | +1586% | 10 allocs/op
-| logrus | 3129 ns/op | +2552% | 24 allocs/op
-| log15 | 3887 ns/op | +3194% | 23 allocs/op
+| :zap: zap | 63 ns/op | +0% | 0 allocs/op
+| :zap: zap (sugared) | 81 ns/op | +29% | 1 allocs/op
+| zerolog | 32 ns/op | -49% | 0 allocs/op
+| standard library | 124 ns/op | +97% | 1 allocs/op
+| slog | 196 ns/op | +211% | 0 allocs/op
+| slog (LogAttrs) | 200 ns/op | +217% | 0 allocs/op
+| go-kit | 213 ns/op | +238% | 9 allocs/op
+| apex/log | 771 ns/op | +1124% | 5 allocs/op
+| logrus | 1439 ns/op | +2184% | 23 allocs/op
+| log15 | 2069 ns/op | +3184% | 20 allocs/op
 
 ## Development Status: Stable
 
@@ -117,7 +132,7 @@
 
 <hr>
 
-Released under the [MIT License](LICENSE.txt).
+Released under the [MIT License](LICENSE).
 
 <sup id="footnote-versions">1</sup> In particular, keep in mind that we may be
 benchmarking against slightly older versions of other packages. Versions are
diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go
index 5be3704..abfccb5 100644
--- a/vendor/go.uber.org/zap/array.go
+++ b/vendor/go.uber.org/zap/array.go
@@ -21,6 +21,7 @@
 package zap
 
 import (
+	"fmt"
 	"time"
 
 	"go.uber.org/zap/zapcore"
@@ -94,11 +95,137 @@
 	return Array(key, int8s(nums))
 }
 
+// Objects constructs a field with the given key, holding a list of the
+// provided objects that can be marshaled by Zap.
+//
+// Note that these objects must implement zapcore.ObjectMarshaler directly.
+// That is, if you're trying to marshal a []Request, the MarshalLogObject
+// method must be declared on the Request type, not its pointer (*Request).
+// If it's on the pointer, use ObjectValues.
+//
+// Given an object that implements MarshalLogObject on the value receiver, you
+// can log a slice of those objects with Objects like so:
+//
+//	type Author struct{ ... }
+//	func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+//	var authors []Author = ...
+//	logger.Info("loading article", zap.Objects("authors", authors))
+//
+// Similarly, given a type that implements MarshalLogObject on its pointer
+// receiver, you can log a slice of pointers to that object with Objects like
+// so:
+//
+//	type Request struct{ ... }
+//	func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+//	var requests []*Request = ...
+//	logger.Info("sending requests", zap.Objects("requests", requests))
+//
+// If instead, you have a slice of values of such an object, use the
+// ObjectValues constructor.
+//
+//	var requests []Request = ...
+//	logger.Info("sending requests", zap.ObjectValues("requests", requests))
+func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field {
+	return Array(key, objects[T](values))
+}
+
+type objects[T zapcore.ObjectMarshaler] []T
+
+func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for _, o := range os {
+		if err := arr.AppendObject(o); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// ObjectMarshalerPtr is a constraint that specifies that the given type
+// implements zapcore.ObjectMarshaler on a pointer receiver.
+type ObjectMarshalerPtr[T any] interface {
+	*T
+	zapcore.ObjectMarshaler
+}
+
+// ObjectValues constructs a field with the given key, holding a list of the
+// provided objects, where pointers to these objects can be marshaled by Zap.
+//
+// Note that pointers to these objects must implement zapcore.ObjectMarshaler.
+// That is, if you're trying to marshal a []Request, the MarshalLogObject
+// method must be declared on the *Request type, not the value (Request).
+// If it's on the value, use Objects.
+//
+// Given an object that implements MarshalLogObject on the pointer receiver,
+// you can log a slice of those objects with ObjectValues like so:
+//
+//	type Request struct{ ... }
+//	func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+//	var requests []Request = ...
+//	logger.Info("sending requests", zap.ObjectValues("requests", requests))
+//
+// If instead, you have a slice of pointers of such an object, use the Objects
+// field constructor.
+//
+//	var requests []*Request = ...
+//	logger.Info("sending requests", zap.Objects("requests", requests))
+func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field {
+	return Array(key, objectValues[T, P](values))
+}
+
+type objectValues[T any, P ObjectMarshalerPtr[T]] []T
+
+func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for i := range os {
+		// It is necessary for us to explicitly reference the "P" type.
+		// We cannot simply pass "&os[i]" to AppendObject because its type
+		// is "*T", which the type system does not consider as
+		// implementing ObjectMarshaler.
+		// Only the type "P" satisfies ObjectMarshaler, which we have
+		// to convert "*T" to explicitly.
+		var p P = &os[i]
+		if err := arr.AppendObject(p); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
 // Strings constructs a field that carries a slice of strings.
 func Strings(key string, ss []string) Field {
 	return Array(key, stringArray(ss))
 }
 
+// Stringers constructs a field with the given key, holding a list of the
+// output provided by the value's String method
+//
+// Given an object that implements String on the value receiver, you
+// can log a slice of those objects with Objects like so:
+//
+//	type Request struct{ ... }
+//	func (a Request) String() string
+//
+//	var requests []Request = ...
+//	logger.Info("sending requests", zap.Stringers("requests", requests))
+//
+// Note that these objects must implement fmt.Stringer directly.
+// That is, if you're trying to marshal a []Request, the String method
+// must be declared on the Request type, not its pointer (*Request).
+func Stringers[T fmt.Stringer](key string, values []T) Field {
+	return Array(key, stringers[T](values))
+}
+
+type stringers[T fmt.Stringer] []T
+
+func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+	for _, o := range os {
+		arr.AppendString(o.String())
+	}
+	return nil
+}
+
 // Times constructs a field that carries a slice of time.Times.
 func Times(key string, ts []time.Time) Field {
 	return Array(key, times(ts))
diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go
index 9e929cd..0b8540c 100644
--- a/vendor/go.uber.org/zap/buffer/buffer.go
+++ b/vendor/go.uber.org/zap/buffer/buffer.go
@@ -42,6 +42,11 @@
 	b.bs = append(b.bs, v)
 }
 
+// AppendBytes writes the given slice of bytes to the Buffer.
+func (b *Buffer) AppendBytes(v []byte) {
+	b.bs = append(b.bs, v...)
+}
+
 // AppendString writes a string to the Buffer.
 func (b *Buffer) AppendString(s string) {
 	b.bs = append(b.bs, s...)
diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go
index 8fb3e20..8463233 100644
--- a/vendor/go.uber.org/zap/buffer/pool.go
+++ b/vendor/go.uber.org/zap/buffer/pool.go
@@ -20,25 +20,29 @@
 
 package buffer
 
-import "sync"
+import (
+	"go.uber.org/zap/internal/pool"
+)
 
 // A Pool is a type-safe wrapper around a sync.Pool.
 type Pool struct {
-	p *sync.Pool
+	p *pool.Pool[*Buffer]
 }
 
 // NewPool constructs a new Pool.
 func NewPool() Pool {
-	return Pool{p: &sync.Pool{
-		New: func() interface{} {
-			return &Buffer{bs: make([]byte, 0, _size)}
-		},
-	}}
+	return Pool{
+		p: pool.New(func() *Buffer {
+			return &Buffer{
+				bs: make([]byte, 0, _size),
+			}
+		}),
+	}
 }
 
 // Get retrieves a Buffer from the pool, creating one if necessary.
 func (p Pool) Get() *Buffer {
-	buf := p.p.Get().(*Buffer)
+	buf := p.p.Get()
 	buf.Reset()
 	buf.pool = p
 	return buf
diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go
index 55637fb..e76e4e6 100644
--- a/vendor/go.uber.org/zap/config.go
+++ b/vendor/go.uber.org/zap/config.go
@@ -21,7 +21,7 @@
 package zap
 
 import (
-	"fmt"
+	"errors"
 	"sort"
 	"time"
 
@@ -95,6 +95,32 @@
 
 // NewProductionEncoderConfig returns an opinionated EncoderConfig for
 // production environments.
+//
+// Messages encoded with this configuration will be JSON-formatted
+// and will have the following keys by default:
+//
+//   - "level": The logging level (e.g. "info", "error").
+//   - "ts": The current time in number of seconds since the Unix epoch.
+//   - "msg": The message passed to the log statement.
+//   - "caller": If available, a short path to the file and line number
+//     where the log statement was issued.
+//     The logger configuration determines whether this field is captured.
+//   - "stacktrace": If available, a stack trace from the line
+//     where the log statement was issued.
+//     The logger configuration determines whether this field is captured.
+//
+// By default, the following formats are used for different types:
+//
+//   - Time is formatted as floating-point number of seconds since the Unix
+//     epoch.
+//   - Duration is formatted as floating-point number of seconds.
+//
+// You may change these by setting the appropriate fields in the returned
+// object.
+// For example, use the following to change the time encoding format:
+//
+//	cfg := zap.NewProductionEncoderConfig()
+//	cfg.EncodeTime = zapcore.ISO8601TimeEncoder
 func NewProductionEncoderConfig() zapcore.EncoderConfig {
 	return zapcore.EncoderConfig{
 		TimeKey:        "ts",
@@ -112,11 +138,22 @@
 	}
 }
 
-// NewProductionConfig is a reasonable production logging configuration.
-// Logging is enabled at InfoLevel and above.
+// NewProductionConfig builds a reasonable default production logging
+// configuration.
+// Logging is enabled at InfoLevel and above, and uses a JSON encoder.
+// Logs are written to standard error.
+// Stacktraces are included on logs of ErrorLevel and above.
+// DPanicLevel logs will not panic, but will write a stacktrace.
 //
-// It uses a JSON encoder, writes to standard error, and enables sampling.
-// Stacktraces are automatically included on logs of ErrorLevel and above.
+// Sampling is enabled at 100:100 by default,
+// meaning that after the first 100 log entries
+// with the same level and message in the same second,
+// it will log every 100th entry
+// with the same level and message in the same second.
+// You may disable this behavior by setting Sampling to nil.
+//
+// See [NewProductionEncoderConfig] for information
+// on the default encoder configuration.
 func NewProductionConfig() Config {
 	return Config{
 		Level:       NewAtomicLevelAt(InfoLevel),
@@ -134,6 +171,32 @@
 
 // NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for
 // development environments.
+//
+// Messages encoded with this configuration will use Zap's console encoder
+// intended to print human-readable output.
+// It will print log messages with the following information:
+//
+//   - The log level (e.g. "INFO", "ERROR").
+//   - The time in ISO8601 format (e.g. "2017-01-01T12:00:00Z").
+//   - The message passed to the log statement.
+//   - If available, a short path to the file and line number
+//     where the log statement was issued.
+//     The logger configuration determines whether this field is captured.
+//   - If available, a stacktrace from the line
+//     where the log statement was issued.
+//     The logger configuration determines whether this field is captured.
+//
+// By default, the following formats are used for different types:
+//
+//   - Time is formatted in ISO8601 format (e.g. "2017-01-01T12:00:00Z").
+//   - Duration is formatted as a string (e.g. "1.234s").
+//
+// You may change these by setting the appropriate fields in the returned
+// object.
+// For example, use the following to change the time encoding format:
+//
+//	cfg := zap.NewDevelopmentEncoderConfig()
+//	cfg.EncodeTime = zapcore.ISO8601TimeEncoder
 func NewDevelopmentEncoderConfig() zapcore.EncoderConfig {
 	return zapcore.EncoderConfig{
 		// Keys can be anything except the empty string.
@@ -152,12 +215,15 @@
 	}
 }
 
-// NewDevelopmentConfig is a reasonable development logging configuration.
-// Logging is enabled at DebugLevel and above.
+// NewDevelopmentConfig builds a reasonable default development logging
+// configuration.
+// Logging is enabled at DebugLevel and above, and uses a console encoder.
+// Logs are written to standard error.
+// Stacktraces are included on logs of WarnLevel and above.
+// DPanicLevel logs will panic.
 //
-// It enables development mode (which makes DPanicLevel logs panic), uses a
-// console encoder, writes to standard error, and disables sampling.
-// Stacktraces are automatically included on logs of WarnLevel and above.
+// See [NewDevelopmentEncoderConfig] for information
+// on the default encoder configuration.
 func NewDevelopmentConfig() Config {
 	return Config{
 		Level:            NewAtomicLevelAt(DebugLevel),
@@ -182,7 +248,7 @@
 	}
 
 	if cfg.Level == (AtomicLevel{}) {
-		return nil, fmt.Errorf("missing Level")
+		return nil, errors.New("missing Level")
 	}
 
 	log := New(
diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go
index 8638dd1..3c50d7b 100644
--- a/vendor/go.uber.org/zap/doc.go
+++ b/vendor/go.uber.org/zap/doc.go
@@ -32,7 +32,7 @@
 // they need to count every allocation and when they'd prefer a more familiar,
 // loosely typed API.
 //
-// Choosing a Logger
+// # Choosing a Logger
 //
 // In contexts where performance is nice, but not critical, use the
 // SugaredLogger. It's 4-10x faster than other structured logging packages and
@@ -41,14 +41,15 @@
 // variadic number of key-value pairs. (For more advanced use cases, they also
 // accept strongly typed fields - see the SugaredLogger.With documentation for
 // details.)
-//  sugar := zap.NewExample().Sugar()
-//  defer sugar.Sync()
-//  sugar.Infow("failed to fetch URL",
-//    "url", "http://example.com",
-//    "attempt", 3,
-//    "backoff", time.Second,
-//  )
-//  sugar.Infof("failed to fetch URL: %s", "http://example.com")
+//
+//	sugar := zap.NewExample().Sugar()
+//	defer sugar.Sync()
+//	sugar.Infow("failed to fetch URL",
+//	  "url", "http://example.com",
+//	  "attempt", 3,
+//	  "backoff", time.Second,
+//	)
+//	sugar.Infof("failed to fetch URL: %s", "http://example.com")
 //
 // By default, loggers are unbuffered. However, since zap's low-level APIs
 // allow buffering, calling Sync before letting your process exit is a good
@@ -57,32 +58,35 @@
 // In the rare contexts where every microsecond and every allocation matter,
 // use the Logger. It's even faster than the SugaredLogger and allocates far
 // less, but it only supports strongly-typed, structured logging.
-//  logger := zap.NewExample()
-//  defer logger.Sync()
-//  logger.Info("failed to fetch URL",
-//    zap.String("url", "http://example.com"),
-//    zap.Int("attempt", 3),
-//    zap.Duration("backoff", time.Second),
-//  )
+//
+//	logger := zap.NewExample()
+//	defer logger.Sync()
+//	logger.Info("failed to fetch URL",
+//	  zap.String("url", "http://example.com"),
+//	  zap.Int("attempt", 3),
+//	  zap.Duration("backoff", time.Second),
+//	)
 //
 // Choosing between the Logger and SugaredLogger doesn't need to be an
 // application-wide decision: converting between the two is simple and
 // inexpensive.
-//   logger := zap.NewExample()
-//   defer logger.Sync()
-//   sugar := logger.Sugar()
-//   plain := sugar.Desugar()
 //
-// Configuring Zap
+//	logger := zap.NewExample()
+//	defer logger.Sync()
+//	sugar := logger.Sugar()
+//	plain := sugar.Desugar()
+//
+// # Configuring Zap
 //
 // The simplest way to build a Logger is to use zap's opinionated presets:
 // NewExample, NewProduction, and NewDevelopment. These presets build a logger
 // with a single function call:
-//  logger, err := zap.NewProduction()
-//  if err != nil {
-//    log.Fatalf("can't initialize zap logger: %v", err)
-//  }
-//  defer logger.Sync()
+//
+//	logger, err := zap.NewProduction()
+//	if err != nil {
+//	  log.Fatalf("can't initialize zap logger: %v", err)
+//	}
+//	defer logger.Sync()
 //
 // Presets are fine for small projects, but larger projects and organizations
 // naturally require a bit more customization. For most users, zap's Config
@@ -94,7 +98,7 @@
 // go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration
 // example for sample code.
 //
-// Extending Zap
+// # Extending Zap
 //
 // The zap package itself is a relatively thin wrapper around the interfaces
 // in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g.,
@@ -106,7 +110,7 @@
 // Similarly, package authors can use the high-performance Encoder and Core
 // implementations in the zapcore package to build their own loggers.
 //
-// Frequently Asked Questions
+// # Frequently Asked Questions
 //
 // An FAQ covering everything from installation errors to design decisions is
 // available at https://github.com/uber-go/zap/blob/master/FAQ.md.
diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go
index 08ed833..caa04ce 100644
--- a/vendor/go.uber.org/zap/encoder.go
+++ b/vendor/go.uber.org/zap/encoder.go
@@ -63,7 +63,7 @@
 
 func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
 	if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil {
-		return nil, fmt.Errorf("missing EncodeTime in EncoderConfig")
+		return nil, errors.New("missing EncodeTime in EncoderConfig")
 	}
 
 	_encoderMutex.RLock()
diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go
index 65982a5..45f7b83 100644
--- a/vendor/go.uber.org/zap/error.go
+++ b/vendor/go.uber.org/zap/error.go
@@ -21,14 +21,13 @@
 package zap
 
 import (
-	"sync"
-
+	"go.uber.org/zap/internal/pool"
 	"go.uber.org/zap/zapcore"
 )
 
-var _errArrayElemPool = sync.Pool{New: func() interface{} {
+var _errArrayElemPool = pool.New(func() *errArrayElem {
 	return &errArrayElem{}
-}}
+})
 
 // Error is shorthand for the common idiom NamedError("error", err).
 func Error(err error) Field {
@@ -60,11 +59,14 @@
 		// potentially an "errorVerbose" attribute, we need to wrap it in a
 		// type that implements LogObjectMarshaler. To prevent this from
 		// allocating, pool the wrapper type.
-		elem := _errArrayElemPool.Get().(*errArrayElem)
+		elem := _errArrayElemPool.Get()
 		elem.error = errs[i]
-		arr.AppendObject(elem)
+		err := arr.AppendObject(elem)
 		elem.error = nil
 		_errArrayElemPool.Put(elem)
+		if err != nil {
+			return err
+		}
 	}
 	return nil
 }
diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go
index bbb745d..6743930 100644
--- a/vendor/go.uber.org/zap/field.go
+++ b/vendor/go.uber.org/zap/field.go
@@ -25,6 +25,7 @@
 	"math"
 	"time"
 
+	"go.uber.org/zap/internal/stacktrace"
 	"go.uber.org/zap/zapcore"
 )
 
@@ -374,7 +375,7 @@
 	// from expanding the zapcore.Field union struct to include a byte slice. Since
 	// taking a stacktrace is already so expensive (~10us), the extra allocation
 	// is okay.
-	return String(key, takeStacktrace(skip+1)) // skip StackSkip
+	return String(key, stacktrace.Take(skip+1)) // skip StackSkip
 }
 
 // Duration constructs a field with the given key and value. The encoder
@@ -410,6 +411,65 @@
 	}
 }
 
+// Dict constructs a field containing the provided key-value pairs.
+// It acts similar to [Object], but with the fields specified as arguments.
+func Dict(key string, val ...Field) Field {
+	return dictField(key, val)
+}
+
+// We need a function with the signature (string, T) for zap.Any.
+func dictField(key string, val []Field) Field {
+	return Object(key, dictObject(val))
+}
+
+type dictObject []Field
+
+func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+	for _, f := range d {
+		f.AddTo(enc)
+	}
+	return nil
+}
+
+// We discovered an issue where zap.Any can cause a performance degradation
+// when used in new goroutines.
+//
+// This happens because the compiler assigns 4.8kb (one zap.Field per arm of
+// switch statement) of stack space for zap.Any when it takes the form:
+//
+//	switch v := v.(type) {
+//	case string:
+//		return String(key, v)
+//	case int:
+//		return Int(key, v)
+//		// ...
+//	default:
+//		return Reflect(key, v)
+//	}
+//
+// To avoid this, we use the type switch to assign a value to a single local variable
+// and then call a function on it.
+// The local variable is just a function reference so it doesn't allocate
+// when converted to an interface{}.
+//
+// A fair bit of experimentation went into this.
+// See also:
+//
+// - https://github.com/uber-go/zap/pull/1301
+// - https://github.com/uber-go/zap/pull/1303
+// - https://github.com/uber-go/zap/pull/1304
+// - https://github.com/uber-go/zap/pull/1305
+// - https://github.com/uber-go/zap/pull/1308
+//
+// See https://github.com/golang/go/issues/62077 for upstream issue.
+type anyFieldC[T any] func(string, T) Field
+
+func (f anyFieldC[T]) Any(key string, val any) Field {
+	v, _ := val.(T)
+	// val is guaranteed to be a T, except when it's nil.
+	return f(key, v)
+}
+
 // Any takes a key and an arbitrary value and chooses the best way to represent
 // them as a field, falling back to a reflection-based approach only if
 // necessary.
@@ -418,132 +478,138 @@
 // them. To minimize surprises, []byte values are treated as binary blobs, byte
 // values are treated as uint8, and runes are always treated as integers.
 func Any(key string, value interface{}) Field {
-	switch val := value.(type) {
+	var c interface{ Any(string, any) Field }
+
+	switch value.(type) {
 	case zapcore.ObjectMarshaler:
-		return Object(key, val)
+		c = anyFieldC[zapcore.ObjectMarshaler](Object)
 	case zapcore.ArrayMarshaler:
-		return Array(key, val)
+		c = anyFieldC[zapcore.ArrayMarshaler](Array)
+	case []Field:
+		c = anyFieldC[[]Field](dictField)
 	case bool:
-		return Bool(key, val)
+		c = anyFieldC[bool](Bool)
 	case *bool:
-		return Boolp(key, val)
+		c = anyFieldC[*bool](Boolp)
 	case []bool:
-		return Bools(key, val)
+		c = anyFieldC[[]bool](Bools)
 	case complex128:
-		return Complex128(key, val)
+		c = anyFieldC[complex128](Complex128)
 	case *complex128:
-		return Complex128p(key, val)
+		c = anyFieldC[*complex128](Complex128p)
 	case []complex128:
-		return Complex128s(key, val)
+		c = anyFieldC[[]complex128](Complex128s)
 	case complex64:
-		return Complex64(key, val)
+		c = anyFieldC[complex64](Complex64)
 	case *complex64:
-		return Complex64p(key, val)
+		c = anyFieldC[*complex64](Complex64p)
 	case []complex64:
-		return Complex64s(key, val)
+		c = anyFieldC[[]complex64](Complex64s)
 	case float64:
-		return Float64(key, val)
+		c = anyFieldC[float64](Float64)
 	case *float64:
-		return Float64p(key, val)
+		c = anyFieldC[*float64](Float64p)
 	case []float64:
-		return Float64s(key, val)
+		c = anyFieldC[[]float64](Float64s)
 	case float32:
-		return Float32(key, val)
+		c = anyFieldC[float32](Float32)
 	case *float32:
-		return Float32p(key, val)
+		c = anyFieldC[*float32](Float32p)
 	case []float32:
-		return Float32s(key, val)
+		c = anyFieldC[[]float32](Float32s)
 	case int:
-		return Int(key, val)
+		c = anyFieldC[int](Int)
 	case *int:
-		return Intp(key, val)
+		c = anyFieldC[*int](Intp)
 	case []int:
-		return Ints(key, val)
+		c = anyFieldC[[]int](Ints)
 	case int64:
-		return Int64(key, val)
+		c = anyFieldC[int64](Int64)
 	case *int64:
-		return Int64p(key, val)
+		c = anyFieldC[*int64](Int64p)
 	case []int64:
-		return Int64s(key, val)
+		c = anyFieldC[[]int64](Int64s)
 	case int32:
-		return Int32(key, val)
+		c = anyFieldC[int32](Int32)
 	case *int32:
-		return Int32p(key, val)
+		c = anyFieldC[*int32](Int32p)
 	case []int32:
-		return Int32s(key, val)
+		c = anyFieldC[[]int32](Int32s)
 	case int16:
-		return Int16(key, val)
+		c = anyFieldC[int16](Int16)
 	case *int16:
-		return Int16p(key, val)
+		c = anyFieldC[*int16](Int16p)
 	case []int16:
-		return Int16s(key, val)
+		c = anyFieldC[[]int16](Int16s)
 	case int8:
-		return Int8(key, val)
+		c = anyFieldC[int8](Int8)
 	case *int8:
-		return Int8p(key, val)
+		c = anyFieldC[*int8](Int8p)
 	case []int8:
-		return Int8s(key, val)
+		c = anyFieldC[[]int8](Int8s)
 	case string:
-		return String(key, val)
+		c = anyFieldC[string](String)
 	case *string:
-		return Stringp(key, val)
+		c = anyFieldC[*string](Stringp)
 	case []string:
-		return Strings(key, val)
+		c = anyFieldC[[]string](Strings)
 	case uint:
-		return Uint(key, val)
+		c = anyFieldC[uint](Uint)
 	case *uint:
-		return Uintp(key, val)
+		c = anyFieldC[*uint](Uintp)
 	case []uint:
-		return Uints(key, val)
+		c = anyFieldC[[]uint](Uints)
 	case uint64:
-		return Uint64(key, val)
+		c = anyFieldC[uint64](Uint64)
 	case *uint64:
-		return Uint64p(key, val)
+		c = anyFieldC[*uint64](Uint64p)
 	case []uint64:
-		return Uint64s(key, val)
+		c = anyFieldC[[]uint64](Uint64s)
 	case uint32:
-		return Uint32(key, val)
+		c = anyFieldC[uint32](Uint32)
 	case *uint32:
-		return Uint32p(key, val)
+		c = anyFieldC[*uint32](Uint32p)
 	case []uint32:
-		return Uint32s(key, val)
+		c = anyFieldC[[]uint32](Uint32s)
 	case uint16:
-		return Uint16(key, val)
+		c = anyFieldC[uint16](Uint16)
 	case *uint16:
-		return Uint16p(key, val)
+		c = anyFieldC[*uint16](Uint16p)
 	case []uint16:
-		return Uint16s(key, val)
+		c = anyFieldC[[]uint16](Uint16s)
 	case uint8:
-		return Uint8(key, val)
+		c = anyFieldC[uint8](Uint8)
 	case *uint8:
-		return Uint8p(key, val)
+		c = anyFieldC[*uint8](Uint8p)
 	case []byte:
-		return Binary(key, val)
+		c = anyFieldC[[]byte](Binary)
 	case uintptr:
-		return Uintptr(key, val)
+		c = anyFieldC[uintptr](Uintptr)
 	case *uintptr:
-		return Uintptrp(key, val)
+		c = anyFieldC[*uintptr](Uintptrp)
 	case []uintptr:
-		return Uintptrs(key, val)
+		c = anyFieldC[[]uintptr](Uintptrs)
 	case time.Time:
-		return Time(key, val)
+		c = anyFieldC[time.Time](Time)
 	case *time.Time:
-		return Timep(key, val)
+		c = anyFieldC[*time.Time](Timep)
 	case []time.Time:
-		return Times(key, val)
+		c = anyFieldC[[]time.Time](Times)
 	case time.Duration:
-		return Duration(key, val)
+		c = anyFieldC[time.Duration](Duration)
 	case *time.Duration:
-		return Durationp(key, val)
+		c = anyFieldC[*time.Duration](Durationp)
 	case []time.Duration:
-		return Durations(key, val)
+		c = anyFieldC[[]time.Duration](Durations)
 	case error:
-		return NamedError(key, val)
+		c = anyFieldC[error](NamedError)
 	case []error:
-		return Errors(key, val)
+		c = anyFieldC[[]error](Errors)
 	case fmt.Stringer:
-		return Stringer(key, val)
+		c = anyFieldC[fmt.Stringer](Stringer)
 	default:
-		return Reflect(key, val)
+		c = anyFieldC[any](Reflect)
 	}
+
+	return c.Any(key, value)
 }
diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go
index c1ac050..3cb46c9 100644
--- a/vendor/go.uber.org/zap/global.go
+++ b/vendor/go.uber.org/zap/global.go
@@ -31,6 +31,7 @@
 )
 
 const (
+	_stdLogDefaultDepth      = 1
 	_loggerWriterDepth       = 2
 	_programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " +
 		"https://github.com/uber-go/zap/issues/new and reference this error: %v"
diff --git a/vendor/go.uber.org/zap/global_go112.go b/vendor/go.uber.org/zap/global_go112.go
deleted file mode 100644
index 6b5dbda..0000000
--- a/vendor/go.uber.org/zap/global_go112.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// See #682 for more information.
-// +build go1.12
-
-package zap
-
-const _stdLogDefaultDepth = 1
diff --git a/vendor/go.uber.org/zap/global_prego112.go b/vendor/go.uber.org/zap/global_prego112.go
deleted file mode 100644
index d3ab9af..0000000
--- a/vendor/go.uber.org/zap/global_prego112.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// See #682 for more information.
-// +build !go1.12
-
-package zap
-
-const _stdLogDefaultDepth = 2
diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go
index 1297c33..2be8f65 100644
--- a/vendor/go.uber.org/zap/http_handler.go
+++ b/vendor/go.uber.org/zap/http_handler.go
@@ -22,6 +22,7 @@
 
 import (
 	"encoding/json"
+	"errors"
 	"fmt"
 	"io"
 	"net/http"
@@ -32,22 +33,23 @@
 // ServeHTTP is a simple JSON endpoint that can report on or change the current
 // logging level.
 //
-// GET
+// # GET
 //
 // The GET request returns a JSON description of the current logging level like:
-//   {"level":"info"}
 //
-// PUT
+//	{"level":"info"}
+//
+// # PUT
 //
 // The PUT request changes the logging level. It is perfectly safe to change the
 // logging level while a program is running. Two content types are supported:
 //
-//    Content-Type: application/x-www-form-urlencoded
+//	Content-Type: application/x-www-form-urlencoded
 //
 // With this content type, the level can be provided through the request body or
 // a query parameter. The log level is URL encoded like:
 //
-//    level=debug
+//	level=debug
 //
 // The request body takes precedence over the query parameter, if both are
 // specified.
@@ -55,19 +57,25 @@
 // This content type is the default for a curl PUT request. Following are two
 // example curl requests that both set the logging level to debug.
 //
-//    curl -X PUT localhost:8080/log/level?level=debug
-//    curl -X PUT localhost:8080/log/level -d level=debug
+//	curl -X PUT localhost:8080/log/level?level=debug
+//	curl -X PUT localhost:8080/log/level -d level=debug
 //
 // For any other content type, the payload is expected to be JSON encoded and
 // look like:
 //
-//   {"level":"info"}
+//	{"level":"info"}
 //
 // An example curl request could look like this:
 //
-//    curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
-//
+//	curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
 func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	if err := lvl.serveHTTP(w, r); err != nil {
+		w.WriteHeader(http.StatusInternalServerError)
+		fmt.Fprintf(w, "internal error: %v", err)
+	}
+}
+
+func (lvl AtomicLevel) serveHTTP(w http.ResponseWriter, r *http.Request) error {
 	type errorResponse struct {
 		Error string `json:"error"`
 	}
@@ -79,19 +87,20 @@
 
 	switch r.Method {
 	case http.MethodGet:
-		enc.Encode(payload{Level: lvl.Level()})
+		return enc.Encode(payload{Level: lvl.Level()})
+
 	case http.MethodPut:
 		requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r)
 		if err != nil {
 			w.WriteHeader(http.StatusBadRequest)
-			enc.Encode(errorResponse{Error: err.Error()})
-			return
+			return enc.Encode(errorResponse{Error: err.Error()})
 		}
 		lvl.SetLevel(requestedLvl)
-		enc.Encode(payload{Level: lvl.Level()})
+		return enc.Encode(payload{Level: lvl.Level()})
+
 	default:
 		w.WriteHeader(http.StatusMethodNotAllowed)
-		enc.Encode(errorResponse{
+		return enc.Encode(errorResponse{
 			Error: "Only GET and PUT are supported.",
 		})
 	}
@@ -108,7 +117,7 @@
 func decodePutURL(r *http.Request) (zapcore.Level, error) {
 	lvl := r.FormValue("level")
 	if lvl == "" {
-		return 0, fmt.Errorf("must specify logging level")
+		return 0, errors.New("must specify logging level")
 	}
 	var l zapcore.Level
 	if err := l.UnmarshalText([]byte(lvl)); err != nil {
@@ -125,8 +134,7 @@
 		return 0, fmt.Errorf("malformed request body: %v", err)
 	}
 	if pld.Level == nil {
-		return 0, fmt.Errorf("must specify logging level")
+		return 0, errors.New("must specify logging level")
 	}
 	return *pld.Level, nil
-
 }
diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go
index dfc5b05..f673f99 100644
--- a/vendor/go.uber.org/zap/internal/exit/exit.go
+++ b/vendor/go.uber.org/zap/internal/exit/exit.go
@@ -24,24 +24,25 @@
 
 import "os"
 
-var real = func() { os.Exit(1) }
+var _exit = os.Exit
 
-// Exit normally terminates the process by calling os.Exit(1). If the package
-// is stubbed, it instead records a call in the testing spy.
-func Exit() {
-	real()
+// With terminates the process by calling os.Exit(code). If the package is
+// stubbed, it instead records a call in the testing spy.
+func With(code int) {
+	_exit(code)
 }
 
 // A StubbedExit is a testing fake for os.Exit.
 type StubbedExit struct {
 	Exited bool
-	prev   func()
+	Code   int
+	prev   func(code int)
 }
 
 // Stub substitutes a fake for the call to os.Exit(1).
 func Stub() *StubbedExit {
-	s := &StubbedExit{prev: real}
-	real = s.exit
+	s := &StubbedExit{prev: _exit}
+	_exit = s.exit
 	return s
 }
 
@@ -56,9 +57,10 @@
 
 // Unstub restores the previous exit function.
 func (se *StubbedExit) Unstub() {
-	real = se.prev
+	_exit = se.prev
 }
 
-func (se *StubbedExit) exit() {
+func (se *StubbedExit) exit(code int) {
 	se.Exited = true
+	se.Code = code
 }
diff --git a/vendor/go.uber.org/zap/internal/level_enabler.go b/vendor/go.uber.org/zap/internal/level_enabler.go
new file mode 100644
index 0000000..40bfed8
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/level_enabler.go
@@ -0,0 +1,37 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package internal and its subpackages hold types and functionality
+// that are not part of Zap's public API.
+package internal
+
+import "go.uber.org/zap/zapcore"
+
+// LeveledEnabler is an interface satisfied by LevelEnablers that are able to
+// report their own level.
+//
+// This interface is defined to use more conveniently in tests and non-zapcore
+// packages.
+// This cannot be imported from zapcore because of the cyclic dependency.
+type LeveledEnabler interface {
+	zapcore.LevelEnabler
+
+	Level() zapcore.Level
+}
diff --git a/vendor/go.uber.org/zap/internal/pool/pool.go b/vendor/go.uber.org/zap/internal/pool/pool.go
new file mode 100644
index 0000000..60e9d2c
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/pool/pool.go
@@ -0,0 +1,58 @@
+// Copyright (c) 2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package pool provides internal pool utilities.
+package pool
+
+import (
+	"sync"
+)
+
+// A Pool is a generic wrapper around [sync.Pool] to provide strongly-typed
+// object pooling.
+//
+// Note that SA6002 (ref: https://staticcheck.io/docs/checks/#SA6002) will
+// not be detected, so all internal pool use must take care to only store
+// pointer types.
+type Pool[T any] struct {
+	pool sync.Pool
+}
+
+// New returns a new [Pool] for T, and will use fn to construct new Ts when
+// the pool is empty.
+func New[T any](fn func() T) *Pool[T] {
+	return &Pool[T]{
+		pool: sync.Pool{
+			New: func() any {
+				return fn()
+			},
+		},
+	}
+}
+
+// Get gets a T from the pool, or creates a new one if the pool is empty.
+func (p *Pool[T]) Get() T {
+	return p.pool.Get().(T)
+}
+
+// Put returns x into the pool.
+func (p *Pool[T]) Put(x T) {
+	p.pool.Put(x)
+}
diff --git a/vendor/go.uber.org/zap/internal/stacktrace/stack.go b/vendor/go.uber.org/zap/internal/stacktrace/stack.go
new file mode 100644
index 0000000..82af755
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/stacktrace/stack.go
@@ -0,0 +1,181 @@
+// Copyright (c) 2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package stacktrace provides support for gathering stack traces
+// efficiently.
+package stacktrace
+
+import (
+	"runtime"
+
+	"go.uber.org/zap/buffer"
+	"go.uber.org/zap/internal/bufferpool"
+	"go.uber.org/zap/internal/pool"
+)
+
+var _stackPool = pool.New(func() *Stack {
+	return &Stack{
+		storage: make([]uintptr, 64),
+	}
+})
+
+// Stack is a captured stack trace.
+type Stack struct {
+	pcs    []uintptr // program counters; always a subslice of storage
+	frames *runtime.Frames
+
+	// The size of pcs varies depending on requirements:
+	// it will be one if the only the first frame was requested,
+	// and otherwise it will reflect the depth of the call stack.
+	//
+	// storage decouples the slice we need (pcs) from the slice we pool.
+	// We will always allocate a reasonably large storage, but we'll use
+	// only as much of it as we need.
+	storage []uintptr
+}
+
+// Depth specifies how deep of a stack trace should be captured.
+type Depth int
+
+const (
+	// First captures only the first frame.
+	First Depth = iota
+
+	// Full captures the entire call stack, allocating more
+	// storage for it if needed.
+	Full
+)
+
+// Capture captures a stack trace of the specified depth, skipping
+// the provided number of frames. skip=0 identifies the caller of
+// Capture.
+//
+// The caller must call Free on the returned stacktrace after using it.
+func Capture(skip int, depth Depth) *Stack {
+	stack := _stackPool.Get()
+
+	switch depth {
+	case First:
+		stack.pcs = stack.storage[:1]
+	case Full:
+		stack.pcs = stack.storage
+	}
+
+	// Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers
+	// itself. +2 to skip captureStacktrace and runtime.Callers.
+	numFrames := runtime.Callers(
+		skip+2,
+		stack.pcs,
+	)
+
+	// runtime.Callers truncates the recorded stacktrace if there is no
+	// room in the provided slice. For the full stack trace, keep expanding
+	// storage until there are fewer frames than there is room.
+	if depth == Full {
+		pcs := stack.pcs
+		for numFrames == len(pcs) {
+			pcs = make([]uintptr, len(pcs)*2)
+			numFrames = runtime.Callers(skip+2, pcs)
+		}
+
+		// Discard old storage instead of returning it to the pool.
+		// This will adjust the pool size over time if stack traces are
+		// consistently very deep.
+		stack.storage = pcs
+		stack.pcs = pcs[:numFrames]
+	} else {
+		stack.pcs = stack.pcs[:numFrames]
+	}
+
+	stack.frames = runtime.CallersFrames(stack.pcs)
+	return stack
+}
+
+// Free releases resources associated with this stacktrace
+// and returns it back to the pool.
+func (st *Stack) Free() {
+	st.frames = nil
+	st.pcs = nil
+	_stackPool.Put(st)
+}
+
+// Count reports the total number of frames in this stacktrace.
+// Count DOES NOT change as Next is called.
+func (st *Stack) Count() int {
+	return len(st.pcs)
+}
+
+// Next returns the next frame in the stack trace,
+// and a boolean indicating whether there are more after it.
+func (st *Stack) Next() (_ runtime.Frame, more bool) {
+	return st.frames.Next()
+}
+
+// Take returns a string representation of the current stacktrace.
+//
+// skip is the number of frames to skip before recording the stack trace.
+// skip=0 identifies the caller of Take.
+func Take(skip int) string {
+	stack := Capture(skip+1, Full)
+	defer stack.Free()
+
+	buffer := bufferpool.Get()
+	defer buffer.Free()
+
+	stackfmt := NewFormatter(buffer)
+	stackfmt.FormatStack(stack)
+	return buffer.String()
+}
+
+// Formatter formats a stack trace into a readable string representation.
+type Formatter struct {
+	b        *buffer.Buffer
+	nonEmpty bool // whehther we've written at least one frame already
+}
+
+// NewFormatter builds a new Formatter.
+func NewFormatter(b *buffer.Buffer) Formatter {
+	return Formatter{b: b}
+}
+
+// FormatStack formats all remaining frames in the provided stacktrace -- minus
+// the final runtime.main/runtime.goexit frame.
+func (sf *Formatter) FormatStack(stack *Stack) {
+	// Note: On the last iteration, frames.Next() returns false, with a valid
+	// frame, but we ignore this frame. The last frame is a runtime frame which
+	// adds noise, since it's only either runtime.main or runtime.goexit.
+	for frame, more := stack.Next(); more; frame, more = stack.Next() {
+		sf.FormatFrame(frame)
+	}
+}
+
+// FormatFrame formats the given frame.
+func (sf *Formatter) FormatFrame(frame runtime.Frame) {
+	if sf.nonEmpty {
+		sf.b.AppendByte('\n')
+	}
+	sf.nonEmpty = true
+	sf.b.AppendString(frame.Function)
+	sf.b.AppendByte('\n')
+	sf.b.AppendByte('\t')
+	sf.b.AppendString(frame.File)
+	sf.b.AppendByte(':')
+	sf.b.AppendInt(int64(frame.Line))
+}
diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go
index 3567a9a..155b208 100644
--- a/vendor/go.uber.org/zap/level.go
+++ b/vendor/go.uber.org/zap/level.go
@@ -21,7 +21,9 @@
 package zap
 
 import (
-	"go.uber.org/atomic"
+	"sync/atomic"
+
+	"go.uber.org/zap/internal"
 	"go.uber.org/zap/zapcore"
 )
 
@@ -70,12 +72,14 @@
 	l *atomic.Int32
 }
 
+var _ internal.LeveledEnabler = AtomicLevel{}
+
 // NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging
 // enabled.
 func NewAtomicLevel() AtomicLevel {
-	return AtomicLevel{
-		l: atomic.NewInt32(int32(InfoLevel)),
-	}
+	lvl := AtomicLevel{l: new(atomic.Int32)}
+	lvl.l.Store(int32(InfoLevel))
+	return lvl
 }
 
 // NewAtomicLevelAt is a convenience function that creates an AtomicLevel
@@ -86,6 +90,23 @@
 	return a
 }
 
+// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII
+// representation of the log level. If the provided ASCII representation is
+// invalid an error is returned.
+//
+// This is particularly useful when dealing with text input to configure log
+// levels.
+func ParseAtomicLevel(text string) (AtomicLevel, error) {
+	a := NewAtomicLevel()
+	l, err := zapcore.ParseLevel(text)
+	if err != nil {
+		return a, err
+	}
+
+	a.SetLevel(l)
+	return a, nil
+}
+
 // Enabled implements the zapcore.LevelEnabler interface, which allows the
 // AtomicLevel to be used in place of traditional static levels.
 func (lvl AtomicLevel) Enabled(l zapcore.Level) bool {
diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go
index f116bd9..c4d3003 100644
--- a/vendor/go.uber.org/zap/logger.go
+++ b/vendor/go.uber.org/zap/logger.go
@@ -22,11 +22,12 @@
 
 import (
 	"fmt"
-	"io/ioutil"
+	"io"
 	"os"
-	"runtime"
 	"strings"
 
+	"go.uber.org/zap/internal/bufferpool"
+	"go.uber.org/zap/internal/stacktrace"
 	"go.uber.org/zap/zapcore"
 )
 
@@ -42,7 +43,8 @@
 
 	development bool
 	addCaller   bool
-	onFatal     zapcore.CheckWriteAction // default is WriteThenFatal
+	onPanic     zapcore.CheckWriteHook // default is WriteThenPanic
+	onFatal     zapcore.CheckWriteHook // default is WriteThenFatal
 
 	name        string
 	errorOutput zapcore.WriteSyncer
@@ -85,7 +87,7 @@
 func NewNop() *Logger {
 	return &Logger{
 		core:        zapcore.NewNopCore(),
-		errorOutput: zapcore.AddSync(ioutil.Discard),
+		errorOutput: zapcore.AddSync(io.Discard),
 		addStack:    zapcore.FatalLevel + 1,
 		clock:       zapcore.DefaultClock,
 	}
@@ -107,6 +109,19 @@
 	return NewDevelopmentConfig().Build(options...)
 }
 
+// Must is a helper that wraps a call to a function returning (*Logger, error)
+// and panics if the error is non-nil. It is intended for use in variable
+// initialization such as:
+//
+//	var logger = zap.Must(zap.NewProduction())
+func Must(logger *Logger, err error) *Logger {
+	if err != nil {
+		panic(err)
+	}
+
+	return logger
+}
+
 // NewExample builds a Logger that's designed for use in zap's testable
 // examples. It writes DebugLevel and above logs to standard out as JSON, but
 // omits the timestamp and calling function to keep example output
@@ -160,7 +175,8 @@
 }
 
 // With creates a child logger and adds structured context to it. Fields added
-// to the child don't affect the parent, and vice versa.
+// to the child don't affect the parent, and vice versa. Any fields that
+// require evaluation (such as Objects) are evaluated upon invocation of With.
 func (log *Logger) With(fields ...Field) *Logger {
 	if len(fields) == 0 {
 		return log
@@ -170,6 +186,35 @@
 	return l
 }
 
+// WithLazy creates a child logger and adds structured context to it lazily.
+//
+// The fields are evaluated only if the logger is further chained with [With]
+// or is written to with any of the log level methods.
+// Until that occurs, the logger may retain references to objects inside the fields,
+// and logging will reflect the state of an object at the time of logging,
+// not the time of WithLazy().
+//
+// WithLazy provides a worthwhile performance optimization for contextual loggers
+// when the likelihood of using the child logger is low,
+// such as error paths and rarely taken branches.
+//
+// Similar to [With], fields added to the child don't affect the parent, and vice versa.
+func (log *Logger) WithLazy(fields ...Field) *Logger {
+	if len(fields) == 0 {
+		return log
+	}
+	return log.WithOptions(WrapCore(func(core zapcore.Core) zapcore.Core {
+		return zapcore.NewLazyWith(core, fields)
+	}))
+}
+
+// Level reports the minimum enabled level for this logger.
+//
+// For NopLoggers, this is [zapcore.InvalidLevel].
+func (log *Logger) Level() zapcore.Level {
+	return zapcore.LevelOf(log.core)
+}
+
 // Check returns a CheckedEntry if logging a message at the specified level
 // is enabled. It's a completely optional optimization; in high-performance
 // applications, Check can help avoid allocating a slice to hold fields.
@@ -177,6 +222,16 @@
 	return log.check(lvl, msg)
 }
 
+// Log logs a message at the specified level. The message includes any fields
+// passed at the log site, as well as any fields accumulated on the logger.
+// Any Fields that require  evaluation (such as Objects) are evaluated upon
+// invocation of Log.
+func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) {
+	if ce := log.check(lvl, msg); ce != nil {
+		ce.Write(fields...)
+	}
+}
+
 // Debug logs a message at DebugLevel. The message includes any fields passed
 // at the log site, as well as any fields accumulated on the logger.
 func (log *Logger) Debug(msg string, fields ...Field) {
@@ -253,14 +308,22 @@
 	return log.core
 }
 
+// Name returns the Logger's underlying name,
+// or an empty string if the logger is unnamed.
+func (log *Logger) Name() string {
+	return log.name
+}
+
 func (log *Logger) clone() *Logger {
-	copy := *log
-	return &copy
+	clone := *log
+	return &clone
 }
 
 func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
-	// check must always be called directly by a method in the Logger interface
-	// (e.g., Check, Info, Fatal).
+	// Logger.check must always be called directly by a method in the
+	// Logger interface (e.g., Check, Info, Fatal).
+	// This skips Logger.check and the Info/Fatal/Check/etc. method that
+	// called it.
 	const callerSkipOffset = 2
 
 	// Check the level first to reduce the cost of disabled log calls.
@@ -283,18 +346,12 @@
 	// Set up any required terminal behavior.
 	switch ent.Level {
 	case zapcore.PanicLevel:
-		ce = ce.Should(ent, zapcore.WriteThenPanic)
+		ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic))
 	case zapcore.FatalLevel:
-		onFatal := log.onFatal
-		// Noop is the default value for CheckWriteAction, and it leads to
-		// continued execution after a Fatal which is unexpected.
-		if onFatal == zapcore.WriteThenNoop {
-			onFatal = zapcore.WriteThenFatal
-		}
-		ce = ce.Should(ent, onFatal)
+		ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal))
 	case zapcore.DPanicLevel:
 		if log.development {
-			ce = ce.Should(ent, zapcore.WriteThenPanic)
+			ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic))
 		}
 	}
 
@@ -307,42 +364,72 @@
 
 	// Thread the error output through to the CheckedEntry.
 	ce.ErrorOutput = log.errorOutput
-	if log.addCaller {
-		frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset)
-		if !defined {
-			fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
-			log.errorOutput.Sync()
-		}
 
-		ce.Entry.Caller = zapcore.EntryCaller{
-			Defined:  defined,
+	addStack := log.addStack.Enabled(ce.Level)
+	if !log.addCaller && !addStack {
+		return ce
+	}
+
+	// Adding the caller or stack trace requires capturing the callers of
+	// this function. We'll share information between these two.
+	stackDepth := stacktrace.First
+	if addStack {
+		stackDepth = stacktrace.Full
+	}
+	stack := stacktrace.Capture(log.callerSkip+callerSkipOffset, stackDepth)
+	defer stack.Free()
+
+	if stack.Count() == 0 {
+		if log.addCaller {
+			fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
+			_ = log.errorOutput.Sync()
+		}
+		return ce
+	}
+
+	frame, more := stack.Next()
+
+	if log.addCaller {
+		ce.Caller = zapcore.EntryCaller{
+			Defined:  frame.PC != 0,
 			PC:       frame.PC,
 			File:     frame.File,
 			Line:     frame.Line,
 			Function: frame.Function,
 		}
 	}
-	if log.addStack.Enabled(ce.Entry.Level) {
-		ce.Entry.Stack = StackSkip("", log.callerSkip+callerSkipOffset).String
+
+	if addStack {
+		buffer := bufferpool.Get()
+		defer buffer.Free()
+
+		stackfmt := stacktrace.NewFormatter(buffer)
+
+		// We've already extracted the first frame, so format that
+		// separately and defer to stackfmt for the rest.
+		stackfmt.FormatFrame(frame)
+		if more {
+			stackfmt.FormatStack(stack)
+		}
+		ce.Stack = buffer.String()
 	}
 
 	return ce
 }
 
-// getCallerFrame gets caller frame. The argument skip is the number of stack
-// frames to ascend, with 0 identifying the caller of getCallerFrame. The
-// boolean ok is false if it was not possible to recover the information.
-//
-// Note: This implementation is similar to runtime.Caller, but it returns the whole frame.
-func getCallerFrame(skip int) (frame runtime.Frame, ok bool) {
-	const skipOffset = 2 // skip getCallerFrame and Callers
-
-	pc := make([]uintptr, 1)
-	numFrames := runtime.Callers(skip+skipOffset, pc)
-	if numFrames < 1 {
-		return
+func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook {
+	// A nil or WriteThenNoop hook will lead to continued execution after
+	// a Panic or Fatal log entry, which is unexpected. For example,
+	//
+	//   f, err := os.Open(..)
+	//   if err != nil {
+	//     log.Fatal("cannot open", zap.Error(err))
+	//   }
+	//   fmt.Println(f.Name())
+	//
+	// The f.Name() will panic if we continue execution after the log.Fatal.
+	if override == nil || override == zapcore.WriteThenNoop {
+		return defaultHook
 	}
-
-	frame, _ = runtime.CallersFrames(pc).Next()
-	return frame, frame.PC != 0
+	return override
 }
diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go
index e9e6616..43d357a 100644
--- a/vendor/go.uber.org/zap/options.go
+++ b/vendor/go.uber.org/zap/options.go
@@ -132,10 +132,44 @@
 	})
 }
 
-// OnFatal sets the action to take on fatal logs.
-func OnFatal(action zapcore.CheckWriteAction) Option {
+// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs.
+// Zap will call this hook after writing a log statement with a Panic/DPanic level.
+//
+// For example, the following builds a logger that will exit the current
+// goroutine after writing a Panic/DPanic log message, but it will not start a panic.
+//
+//	zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit))
+//
+// This is useful for testing Panic/DPanic log output.
+func WithPanicHook(hook zapcore.CheckWriteHook) Option {
 	return optionFunc(func(log *Logger) {
-		log.onFatal = action
+		log.onPanic = hook
+	})
+}
+
+// OnFatal sets the action to take on fatal logs.
+//
+// Deprecated: Use [WithFatalHook] instead.
+func OnFatal(action zapcore.CheckWriteAction) Option {
+	return WithFatalHook(action)
+}
+
+// WithFatalHook sets a CheckWriteHook to run on fatal logs.
+// Zap will call this hook after writing a log statement with a Fatal level.
+//
+// For example, the following builds a logger that will exit the current
+// goroutine after writing a fatal log message, but it will not exit the
+// program.
+//
+//	zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit))
+//
+// It is important that the provided CheckWriteHook stops the control flow at
+// the current statement to meet expectations of callers of the logger.
+// We recommend calling os.Exit or runtime.Goexit inside custom hooks at
+// minimum.
+func WithFatalHook(hook zapcore.CheckWriteHook) Option {
+	return optionFunc(func(log *Logger) {
+		log.onFatal = hook
 	})
 }
 
diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go
index df46fa8..499772a 100644
--- a/vendor/go.uber.org/zap/sink.go
+++ b/vendor/go.uber.org/zap/sink.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
 //
 // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to deal
@@ -26,6 +26,7 @@
 	"io"
 	"net/url"
 	"os"
+	"path/filepath"
 	"strings"
 	"sync"
 
@@ -34,23 +35,7 @@
 
 const schemeFile = "file"
 
-var (
-	_sinkMutex     sync.RWMutex
-	_sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme
-)
-
-func init() {
-	resetSinkRegistry()
-}
-
-func resetSinkRegistry() {
-	_sinkMutex.Lock()
-	defer _sinkMutex.Unlock()
-
-	_sinkFactories = map[string]func(*url.URL) (Sink, error){
-		schemeFile: newFileSink,
-	}
-}
+var _sinkRegistry = newSinkRegistry()
 
 // Sink defines the interface to write to and close logger destinations.
 type Sink interface {
@@ -58,10 +43,6 @@
 	io.Closer
 }
 
-type nopCloserSink struct{ zapcore.WriteSyncer }
-
-func (nopCloserSink) Close() error { return nil }
-
 type errSinkNotFound struct {
 	scheme string
 }
@@ -70,16 +51,30 @@
 	return fmt.Sprintf("no sink found for scheme %q", e.scheme)
 }
 
-// RegisterSink registers a user-supplied factory for all sinks with a
-// particular scheme.
-//
-// All schemes must be ASCII, valid under section 3.1 of RFC 3986
-// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already
-// have a factory registered. Zap automatically registers a factory for the
-// "file" scheme.
-func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
-	_sinkMutex.Lock()
-	defer _sinkMutex.Unlock()
+type nopCloserSink struct{ zapcore.WriteSyncer }
+
+func (nopCloserSink) Close() error { return nil }
+
+type sinkRegistry struct {
+	mu        sync.Mutex
+	factories map[string]func(*url.URL) (Sink, error)          // keyed by scheme
+	openFile  func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile
+}
+
+func newSinkRegistry() *sinkRegistry {
+	sr := &sinkRegistry{
+		factories: make(map[string]func(*url.URL) (Sink, error)),
+		openFile:  os.OpenFile,
+	}
+	// Infallible operation: the registry is empty, so we can't have a conflict.
+	_ = sr.RegisterSink(schemeFile, sr.newFileSinkFromURL)
+	return sr
+}
+
+// RegisterScheme registers the given factory for the specific scheme.
+func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
+	sr.mu.Lock()
+	defer sr.mu.Unlock()
 
 	if scheme == "" {
 		return errors.New("can't register a sink factory for empty string")
@@ -88,14 +83,22 @@
 	if err != nil {
 		return fmt.Errorf("%q is not a valid scheme: %v", scheme, err)
 	}
-	if _, ok := _sinkFactories[normalized]; ok {
+	if _, ok := sr.factories[normalized]; ok {
 		return fmt.Errorf("sink factory already registered for scheme %q", normalized)
 	}
-	_sinkFactories[normalized] = factory
+	sr.factories[normalized] = factory
 	return nil
 }
 
-func newSink(rawURL string) (Sink, error) {
+func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) {
+	// URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to
+	// the drive, and path is unset unless `c:/log.txt` is used.
+	// To avoid Windows-specific URL handling, we instead check IsAbs to open as a file.
+	// filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows.
+	if filepath.IsAbs(rawURL) {
+		return sr.newFileSinkFromPath(rawURL)
+	}
+
 	u, err := url.Parse(rawURL)
 	if err != nil {
 		return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err)
@@ -104,16 +107,27 @@
 		u.Scheme = schemeFile
 	}
 
-	_sinkMutex.RLock()
-	factory, ok := _sinkFactories[u.Scheme]
-	_sinkMutex.RUnlock()
+	sr.mu.Lock()
+	factory, ok := sr.factories[u.Scheme]
+	sr.mu.Unlock()
 	if !ok {
 		return nil, &errSinkNotFound{u.Scheme}
 	}
 	return factory(u)
 }
 
-func newFileSink(u *url.URL) (Sink, error) {
+// RegisterSink registers a user-supplied factory for all sinks with a
+// particular scheme.
+//
+// All schemes must be ASCII, valid under section 0.1 of RFC 3986
+// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already
+// have a factory registered. Zap automatically registers a factory for the
+// "file" scheme.
+func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
+	return _sinkRegistry.RegisterSink(scheme, factory)
+}
+
+func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) {
 	if u.User != nil {
 		return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u)
 	}
@@ -130,13 +144,18 @@
 	if hn := u.Hostname(); hn != "" && hn != "localhost" {
 		return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u)
 	}
-	switch u.Path {
+
+	return sr.newFileSinkFromPath(u.Path)
+}
+
+func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) {
+	switch path {
 	case "stdout":
 		return nopCloserSink{os.Stdout}, nil
 	case "stderr":
 		return nopCloserSink{os.Stderr}, nil
 	}
-	return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
+	return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666)
 }
 
 func normalizeScheme(s string) (string, error) {
diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go
deleted file mode 100644
index 0cf8c1d..0000000
--- a/vendor/go.uber.org/zap/stacktrace.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zap
-
-import (
-	"runtime"
-	"sync"
-
-	"go.uber.org/zap/internal/bufferpool"
-)
-
-var (
-	_stacktracePool = sync.Pool{
-		New: func() interface{} {
-			return newProgramCounters(64)
-		},
-	}
-)
-
-func takeStacktrace(skip int) string {
-	buffer := bufferpool.Get()
-	defer buffer.Free()
-	programCounters := _stacktracePool.Get().(*programCounters)
-	defer _stacktracePool.Put(programCounters)
-
-	var numFrames int
-	for {
-		// Skip the call to runtime.Callers and takeStacktrace so that the
-		// program counters start at the caller of takeStacktrace.
-		numFrames = runtime.Callers(skip+2, programCounters.pcs)
-		if numFrames < len(programCounters.pcs) {
-			break
-		}
-		// Don't put the too-short counter slice back into the pool; this lets
-		// the pool adjust if we consistently take deep stacktraces.
-		programCounters = newProgramCounters(len(programCounters.pcs) * 2)
-	}
-
-	i := 0
-	frames := runtime.CallersFrames(programCounters.pcs[:numFrames])
-
-	// Note: On the last iteration, frames.Next() returns false, with a valid
-	// frame, but we ignore this frame. The last frame is a a runtime frame which
-	// adds noise, since it's only either runtime.main or runtime.goexit.
-	for frame, more := frames.Next(); more; frame, more = frames.Next() {
-		if i != 0 {
-			buffer.AppendByte('\n')
-		}
-		i++
-		buffer.AppendString(frame.Function)
-		buffer.AppendByte('\n')
-		buffer.AppendByte('\t')
-		buffer.AppendString(frame.File)
-		buffer.AppendByte(':')
-		buffer.AppendInt(int64(frame.Line))
-	}
-
-	return buffer.String()
-}
-
-type programCounters struct {
-	pcs []uintptr
-}
-
-func newProgramCounters(size int) *programCounters {
-	return &programCounters{make([]uintptr, size)}
-}
diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go
index 0b96519..8904cd0 100644
--- a/vendor/go.uber.org/zap/sugar.go
+++ b/vendor/go.uber.org/zap/sugar.go
@@ -31,6 +31,7 @@
 const (
 	_oddNumberErrMsg    = "Ignored key without a value."
 	_nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys."
+	_multipleErrMsg     = "Multiple errors without a key."
 )
 
 // A SugaredLogger wraps the base Logger functionality in a slower, but less
@@ -38,10 +39,19 @@
 // method.
 //
 // Unlike the Logger, the SugaredLogger doesn't insist on structured logging.
-// For each log level, it exposes three methods: one for loosely-typed
-// structured logging, one for println-style formatting, and one for
-// printf-style formatting. For example, SugaredLoggers can produce InfoLevel
-// output with Infow ("info with" structured context), Info, or Infof.
+// For each log level, it exposes four methods:
+//
+//   - methods named after the log level for log.Print-style logging
+//   - methods ending in "w" for loosely-typed structured logging
+//   - methods ending in "f" for log.Printf-style logging
+//   - methods ending in "ln" for log.Println-style logging
+//
+// For example, the methods for InfoLevel are:
+//
+//	Info(...any)           Print-style logging
+//	Infow(...any)          Structured logging (read as "info with")
+//	Infof(string, ...any)  Printf-style logging
+//	Infoln(...any)         Println-style logging
 type SugaredLogger struct {
 	base *Logger
 }
@@ -61,27 +71,40 @@
 	return &SugaredLogger{base: s.base.Named(name)}
 }
 
+// WithOptions clones the current SugaredLogger, applies the supplied Options,
+// and returns the result. It's safe to use concurrently.
+func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger {
+	base := s.base.clone()
+	for _, opt := range opts {
+		opt.apply(base)
+	}
+	return &SugaredLogger{base: base}
+}
+
 // With adds a variadic number of fields to the logging context. It accepts a
 // mix of strongly-typed Field objects and loosely-typed key-value pairs. When
 // processing pairs, the first element of the pair is used as the field key
 // and the second as the field value.
 //
 // For example,
-//   sugaredLogger.With(
-//     "hello", "world",
-//     "failure", errors.New("oh no"),
-//     Stack(),
-//     "count", 42,
-//     "user", User{Name: "alice"},
-//  )
+//
+//	 sugaredLogger.With(
+//	   "hello", "world",
+//	   "failure", errors.New("oh no"),
+//	   Stack(),
+//	   "count", 42,
+//	   "user", User{Name: "alice"},
+//	)
+//
 // is the equivalent of
-//   unsugared.With(
-//     String("hello", "world"),
-//     String("failure", "oh no"),
-//     Stack(),
-//     Int("count", 42),
-//     Object("user", User{Name: "alice"}),
-//   )
+//
+//	unsugared.With(
+//	  String("hello", "world"),
+//	  String("failure", "oh no"),
+//	  Stack(),
+//	  Int("count", 42),
+//	  Object("user", User{Name: "alice"}),
+//	)
 //
 // Note that the keys in key-value pairs should be strings. In development,
 // passing a non-string key panics. In production, the logger is more
@@ -92,83 +115,138 @@
 	return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)}
 }
 
-// Debug uses fmt.Sprint to construct and log a message.
+// WithLazy adds a variadic number of fields to the logging context lazily.
+// The fields are evaluated only if the logger is further chained with [With]
+// or is written to with any of the log level methods.
+// Until that occurs, the logger may retain references to objects inside the fields,
+// and logging will reflect the state of an object at the time of logging,
+// not the time of WithLazy().
+//
+// Similar to [With], fields added to the child don't affect the parent,
+// and vice versa. Also, the keys in key-value pairs should be strings. In development,
+// passing a non-string key panics, while in production it logs an error and skips the pair.
+// Passing an orphaned key has the same behavior.
+func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger {
+	return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)}
+}
+
+// Level reports the minimum enabled level for this logger.
+//
+// For NopLoggers, this is [zapcore.InvalidLevel].
+func (s *SugaredLogger) Level() zapcore.Level {
+	return zapcore.LevelOf(s.base.core)
+}
+
+// Log logs the provided arguments at provided level.
+// Spaces are added between arguments when neither is a string.
+func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) {
+	s.log(lvl, "", args, nil)
+}
+
+// Debug logs the provided arguments at [DebugLevel].
+// Spaces are added between arguments when neither is a string.
 func (s *SugaredLogger) Debug(args ...interface{}) {
 	s.log(DebugLevel, "", args, nil)
 }
 
-// Info uses fmt.Sprint to construct and log a message.
+// Info logs the provided arguments at [InfoLevel].
+// Spaces are added between arguments when neither is a string.
 func (s *SugaredLogger) Info(args ...interface{}) {
 	s.log(InfoLevel, "", args, nil)
 }
 
-// Warn uses fmt.Sprint to construct and log a message.
+// Warn logs the provided arguments at [WarnLevel].
+// Spaces are added between arguments when neither is a string.
 func (s *SugaredLogger) Warn(args ...interface{}) {
 	s.log(WarnLevel, "", args, nil)
 }
 
-// Error uses fmt.Sprint to construct and log a message.
+// Error logs the provided arguments at [ErrorLevel].
+// Spaces are added between arguments when neither is a string.
 func (s *SugaredLogger) Error(args ...interface{}) {
 	s.log(ErrorLevel, "", args, nil)
 }
 
-// DPanic uses fmt.Sprint to construct and log a message. In development, the
-// logger then panics. (See DPanicLevel for details.)
+// DPanic logs the provided arguments at [DPanicLevel].
+// In development, the logger then panics. (See [DPanicLevel] for details.)
+// Spaces are added between arguments when neither is a string.
 func (s *SugaredLogger) DPanic(args ...interface{}) {
 	s.log(DPanicLevel, "", args, nil)
 }
 
-// Panic uses fmt.Sprint to construct and log a message, then panics.
+// Panic constructs a message with the provided arguments and panics.
+// Spaces are added between arguments when neither is a string.
 func (s *SugaredLogger) Panic(args ...interface{}) {
 	s.log(PanicLevel, "", args, nil)
 }
 
-// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit.
+// Fatal constructs a message with the provided arguments and calls os.Exit.
+// Spaces are added between arguments when neither is a string.
 func (s *SugaredLogger) Fatal(args ...interface{}) {
 	s.log(FatalLevel, "", args, nil)
 }
 
-// Debugf uses fmt.Sprintf to log a templated message.
+// Logf formats the message according to the format specifier
+// and logs it at provided level.
+func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) {
+	s.log(lvl, template, args, nil)
+}
+
+// Debugf formats the message according to the format specifier
+// and logs it at [DebugLevel].
 func (s *SugaredLogger) Debugf(template string, args ...interface{}) {
 	s.log(DebugLevel, template, args, nil)
 }
 
-// Infof uses fmt.Sprintf to log a templated message.
+// Infof formats the message according to the format specifier
+// and logs it at [InfoLevel].
 func (s *SugaredLogger) Infof(template string, args ...interface{}) {
 	s.log(InfoLevel, template, args, nil)
 }
 
-// Warnf uses fmt.Sprintf to log a templated message.
+// Warnf formats the message according to the format specifier
+// and logs it at [WarnLevel].
 func (s *SugaredLogger) Warnf(template string, args ...interface{}) {
 	s.log(WarnLevel, template, args, nil)
 }
 
-// Errorf uses fmt.Sprintf to log a templated message.
+// Errorf formats the message according to the format specifier
+// and logs it at [ErrorLevel].
 func (s *SugaredLogger) Errorf(template string, args ...interface{}) {
 	s.log(ErrorLevel, template, args, nil)
 }
 
-// DPanicf uses fmt.Sprintf to log a templated message. In development, the
-// logger then panics. (See DPanicLevel for details.)
+// DPanicf formats the message according to the format specifier
+// and logs it at [DPanicLevel].
+// In development, the logger then panics. (See [DPanicLevel] for details.)
 func (s *SugaredLogger) DPanicf(template string, args ...interface{}) {
 	s.log(DPanicLevel, template, args, nil)
 }
 
-// Panicf uses fmt.Sprintf to log a templated message, then panics.
+// Panicf formats the message according to the format specifier
+// and panics.
 func (s *SugaredLogger) Panicf(template string, args ...interface{}) {
 	s.log(PanicLevel, template, args, nil)
 }
 
-// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit.
+// Fatalf formats the message according to the format specifier
+// and calls os.Exit.
 func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
 	s.log(FatalLevel, template, args, nil)
 }
 
+// Logw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) {
+	s.log(lvl, msg, nil, keysAndValues)
+}
+
 // Debugw logs a message with some additional context. The variadic key-value
 // pairs are treated as they are in With.
 //
 // When debug-level logging is disabled, this is much faster than
-//  s.With(keysAndValues).Debug(msg)
+//
+//	s.With(keysAndValues).Debug(msg)
 func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) {
 	s.log(DebugLevel, msg, nil, keysAndValues)
 }
@@ -210,11 +288,61 @@
 	s.log(FatalLevel, msg, nil, keysAndValues)
 }
 
+// Logln logs a message at provided level.
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) {
+	s.logln(lvl, args, nil)
+}
+
+// Debugln logs a message at [DebugLevel].
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Debugln(args ...interface{}) {
+	s.logln(DebugLevel, args, nil)
+}
+
+// Infoln logs a message at [InfoLevel].
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Infoln(args ...interface{}) {
+	s.logln(InfoLevel, args, nil)
+}
+
+// Warnln logs a message at [WarnLevel].
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Warnln(args ...interface{}) {
+	s.logln(WarnLevel, args, nil)
+}
+
+// Errorln logs a message at [ErrorLevel].
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Errorln(args ...interface{}) {
+	s.logln(ErrorLevel, args, nil)
+}
+
+// DPanicln logs a message at [DPanicLevel].
+// In development, the logger then panics. (See [DPanicLevel] for details.)
+// Spaces are always added between arguments.
+func (s *SugaredLogger) DPanicln(args ...interface{}) {
+	s.logln(DPanicLevel, args, nil)
+}
+
+// Panicln logs a message at [PanicLevel] and panics.
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Panicln(args ...interface{}) {
+	s.logln(PanicLevel, args, nil)
+}
+
+// Fatalln logs a message at [FatalLevel] and calls os.Exit.
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Fatalln(args ...interface{}) {
+	s.logln(FatalLevel, args, nil)
+}
+
 // Sync flushes any buffered log entries.
 func (s *SugaredLogger) Sync() error {
 	return s.base.Sync()
 }
 
+// log message with Sprint, Sprintf, or neither.
 func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) {
 	// If logging at this level is completely disabled, skip the overhead of
 	// string formatting.
@@ -228,6 +356,18 @@
 	}
 }
 
+// logln message with Sprintln
+func (s *SugaredLogger) logln(lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) {
+	if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
+		return
+	}
+
+	msg := getMessageln(fmtArgs)
+	if ce := s.base.Check(lvl, msg); ce != nil {
+		ce.Write(s.sweetenFields(context)...)
+	}
+}
+
 // getMessage format with Sprint, Sprintf, or neither.
 func getMessage(template string, fmtArgs []interface{}) string {
 	if len(fmtArgs) == 0 {
@@ -246,15 +386,24 @@
 	return fmt.Sprint(fmtArgs...)
 }
 
+// getMessageln format with Sprintln.
+func getMessageln(fmtArgs []interface{}) string {
+	msg := fmt.Sprintln(fmtArgs...)
+	return msg[:len(msg)-1]
+}
+
 func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
 	if len(args) == 0 {
 		return nil
 	}
 
-	// Allocate enough space for the worst case; if users pass only structured
-	// fields, we shouldn't penalize them with extra allocations.
-	fields := make([]Field, 0, len(args))
-	var invalid invalidPairs
+	var (
+		// Allocate enough space for the worst case; if users pass only structured
+		// fields, we shouldn't penalize them with extra allocations.
+		fields    = make([]Field, 0, len(args))
+		invalid   invalidPairs
+		seenError bool
+	)
 
 	for i := 0; i < len(args); {
 		// This is a strongly-typed field. Consume it and move on.
@@ -264,6 +413,18 @@
 			continue
 		}
 
+		// If it is an error, consume it and move on.
+		if err, ok := args[i].(error); ok {
+			if !seenError {
+				seenError = true
+				fields = append(fields, Error(err))
+			} else {
+				s.base.Error(_multipleErrMsg, Error(err))
+			}
+			i++
+			continue
+		}
+
 		// Make sure this element isn't a dangling key.
 		if i == len(args)-1 {
 			s.base.Error(_oddNumberErrMsg, Any("ignored", args[i]))
diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go
index 86a709a..06768c6 100644
--- a/vendor/go.uber.org/zap/writer.go
+++ b/vendor/go.uber.org/zap/writer.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
 //
 // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to deal
@@ -23,7 +23,6 @@
 import (
 	"fmt"
 	"io"
-	"io/ioutil"
 
 	"go.uber.org/zap/zapcore"
 
@@ -49,40 +48,40 @@
 // os.Stdout and os.Stderr. When specified without a scheme, relative file
 // paths also work.
 func Open(paths ...string) (zapcore.WriteSyncer, func(), error) {
-	writers, close, err := open(paths)
+	writers, closeAll, err := open(paths)
 	if err != nil {
 		return nil, nil, err
 	}
 
 	writer := CombineWriteSyncers(writers...)
-	return writer, close, nil
+	return writer, closeAll, nil
 }
 
 func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
 	writers := make([]zapcore.WriteSyncer, 0, len(paths))
 	closers := make([]io.Closer, 0, len(paths))
-	close := func() {
+	closeAll := func() {
 		for _, c := range closers {
-			c.Close()
+			_ = c.Close()
 		}
 	}
 
 	var openErr error
 	for _, path := range paths {
-		sink, err := newSink(path)
+		sink, err := _sinkRegistry.newSink(path)
 		if err != nil {
-			openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err))
+			openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err))
 			continue
 		}
 		writers = append(writers, sink)
 		closers = append(closers, sink)
 	}
 	if openErr != nil {
-		close()
-		return writers, nil, openErr
+		closeAll()
+		return nil, nil, openErr
 	}
 
-	return writers, close, nil
+	return writers, closeAll, nil
 }
 
 // CombineWriteSyncers is a utility that combines multiple WriteSyncers into a
@@ -93,7 +92,7 @@
 // using zapcore.NewMultiWriteSyncer and zapcore.Lock individually.
 func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer {
 	if len(writers) == 0 {
-		return zapcore.AddSync(ioutil.Discard)
+		return zapcore.AddSync(io.Discard)
 	}
 	return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...))
 }
diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
index 0c1436f..a40e93b 100644
--- a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
+++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
@@ -43,6 +43,37 @@
 //
 // BufferedWriteSyncer is safe for concurrent use. You don't need to use
 // zapcore.Lock for WriteSyncers with BufferedWriteSyncer.
+//
+// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log
+// destination (*os.File is a valid WriteSyncer), wrap it with
+// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the
+// object.
+//
+//	 func main() {
+//	   ws := ... // your log destination
+//	   bws := &zapcore.BufferedWriteSyncer{WS: ws}
+//	   defer bws.Stop()
+//
+//	   // ...
+//	   core := zapcore.NewCore(enc, bws, lvl)
+//	   logger := zap.New(core)
+//
+//	   // ...
+//	}
+//
+// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs,
+// waiting at most 30 seconds between flushes.
+// You can customize these parameters by setting the Size or FlushInterval
+// fields.
+// For example, the following buffers up to 512 kB of logs before flushing them
+// to Stderr, with a maximum of one minute between each flush.
+//
+//	ws := &BufferedWriteSyncer{
+//	  WS:            os.Stderr,
+//	  Size:          512 * 1024, // 512 kB
+//	  FlushInterval: time.Minute,
+//	}
+//	defer ws.Stop()
 type BufferedWriteSyncer struct {
 	// WS is the WriteSyncer around which BufferedWriteSyncer will buffer
 	// writes.
@@ -71,10 +102,10 @@
 	// unexported fields for state
 	mu          sync.Mutex
 	initialized bool // whether initialize() has run
+	stopped     bool // whether Stop() has run
 	writer      *bufio.Writer
 	ticker      *time.Ticker
 	stop        chan struct{} // closed when flushLoop should stop
-	stopped     bool          // whether Stop() has run
 	done        chan struct{} // closed when flushLoop has stopped
 }
 
diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go
index d2ea95b..422fd82 100644
--- a/vendor/go.uber.org/zap/zapcore/clock.go
+++ b/vendor/go.uber.org/zap/zapcore/clock.go
@@ -20,9 +20,7 @@
 
 package zapcore
 
-import (
-	"time"
-)
+import "time"
 
 // DefaultClock is the default clock used by Zap in operations that require
 // time. This clock uses the system clock for all operations.
diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go
index 2307af4..cc2b4e0 100644
--- a/vendor/go.uber.org/zap/zapcore/console_encoder.go
+++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go
@@ -22,20 +22,20 @@
 
 import (
 	"fmt"
-	"sync"
 
 	"go.uber.org/zap/buffer"
 	"go.uber.org/zap/internal/bufferpool"
+	"go.uber.org/zap/internal/pool"
 )
 
-var _sliceEncoderPool = sync.Pool{
-	New: func() interface{} {
-		return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)}
-	},
-}
+var _sliceEncoderPool = pool.New(func() *sliceArrayEncoder {
+	return &sliceArrayEncoder{
+		elems: make([]interface{}, 0, 2),
+	}
+})
 
 func getSliceEncoder() *sliceArrayEncoder {
-	return _sliceEncoderPool.Get().(*sliceArrayEncoder)
+	return _sliceEncoderPool.Get()
 }
 
 func putSliceEncoder(e *sliceArrayEncoder) {
@@ -77,7 +77,7 @@
 	// If this ever becomes a performance bottleneck, we can implement
 	// ArrayEncoder for our plain-text format.
 	arr := getSliceEncoder()
-	if c.TimeKey != "" && c.EncodeTime != nil {
+	if c.TimeKey != "" && c.EncodeTime != nil && !ent.Time.IsZero() {
 		c.EncodeTime(ent.Time, arr)
 	}
 	if c.LevelKey != "" && c.EncodeLevel != nil {
@@ -125,11 +125,7 @@
 		line.AppendString(ent.Stack)
 	}
 
-	if c.LineEnding != "" {
-		line.AppendString(c.LineEnding)
-	} else {
-		line.AppendString(DefaultLineEnding)
-	}
+	line.AppendString(c.LineEnding)
 	return line, nil
 }
 
diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go
index a1ef8b0..776e93f 100644
--- a/vendor/go.uber.org/zap/zapcore/core.go
+++ b/vendor/go.uber.org/zap/zapcore/core.go
@@ -69,6 +69,15 @@
 	out WriteSyncer
 }
 
+var (
+	_ Core           = (*ioCore)(nil)
+	_ leveledEnabler = (*ioCore)(nil)
+)
+
+func (c *ioCore) Level() Level {
+	return LevelOf(c.LevelEnabler)
+}
+
 func (c *ioCore) With(fields []Field) Core {
 	clone := c.clone()
 	addFields(clone.enc, fields)
@@ -93,9 +102,9 @@
 		return err
 	}
 	if ent.Level > ErrorLevel {
-		// Since we may be crashing the program, sync the output. Ignore Sync
-		// errors, pending a clean solution to issue #370.
-		c.Sync()
+		// Since we may be crashing the program, sync the output.
+		// Ignore Sync errors, pending a clean solution to issue #370.
+		_ = c.Sync()
 	}
 	return nil
 }
diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go
index 6601ca1..0446254 100644
--- a/vendor/go.uber.org/zap/zapcore/encoder.go
+++ b/vendor/go.uber.org/zap/zapcore/encoder.go
@@ -22,6 +22,7 @@
 
 import (
 	"encoding/json"
+	"io"
 	"time"
 
 	"go.uber.org/zap/buffer"
@@ -36,6 +37,9 @@
 const OmitKey = ""
 
 // A LevelEncoder serializes a Level to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
 type LevelEncoder func(Level, PrimitiveArrayEncoder)
 
 // LowercaseLevelEncoder serializes a Level to a lowercase string. For example,
@@ -89,6 +93,9 @@
 }
 
 // A TimeEncoder serializes a time.Time to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
 type TimeEncoder func(time.Time, PrimitiveArrayEncoder)
 
 // EpochTimeEncoder serializes a time.Time to a floating-point number of seconds
@@ -187,10 +194,13 @@
 
 // UnmarshalYAML unmarshals YAML to a TimeEncoder.
 // If value is an object with a "layout" field, it will be unmarshaled to  TimeEncoder with given layout.
-//     timeEncoder:
-//       layout: 06/01/02 03:04pm
+//
+//	timeEncoder:
+//	  layout: 06/01/02 03:04pm
+//
 // If value is string, it uses UnmarshalText.
-//     timeEncoder: iso8601
+//
+//	timeEncoder: iso8601
 func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error {
 	var o struct {
 		Layout string `json:"layout" yaml:"layout"`
@@ -215,6 +225,9 @@
 }
 
 // A DurationEncoder serializes a time.Duration to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
 type DurationEncoder func(time.Duration, PrimitiveArrayEncoder)
 
 // SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed.
@@ -258,6 +271,9 @@
 }
 
 // A CallerEncoder serializes an EntryCaller to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
 type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder)
 
 // FullCallerEncoder serializes a caller in /full/path/to/package/file:line
@@ -288,6 +304,9 @@
 
 // A NameEncoder serializes a period-separated logger name to a primitive
 // type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
 type NameEncoder func(string, PrimitiveArrayEncoder)
 
 // FullNameEncoder serializes the logger name as-is.
@@ -312,14 +331,15 @@
 type EncoderConfig struct {
 	// Set the keys used for each log entry. If any key is empty, that portion
 	// of the entry is omitted.
-	MessageKey    string `json:"messageKey" yaml:"messageKey"`
-	LevelKey      string `json:"levelKey" yaml:"levelKey"`
-	TimeKey       string `json:"timeKey" yaml:"timeKey"`
-	NameKey       string `json:"nameKey" yaml:"nameKey"`
-	CallerKey     string `json:"callerKey" yaml:"callerKey"`
-	FunctionKey   string `json:"functionKey" yaml:"functionKey"`
-	StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
-	LineEnding    string `json:"lineEnding" yaml:"lineEnding"`
+	MessageKey     string `json:"messageKey" yaml:"messageKey"`
+	LevelKey       string `json:"levelKey" yaml:"levelKey"`
+	TimeKey        string `json:"timeKey" yaml:"timeKey"`
+	NameKey        string `json:"nameKey" yaml:"nameKey"`
+	CallerKey      string `json:"callerKey" yaml:"callerKey"`
+	FunctionKey    string `json:"functionKey" yaml:"functionKey"`
+	StacktraceKey  string `json:"stacktraceKey" yaml:"stacktraceKey"`
+	SkipLineEnding bool   `json:"skipLineEnding" yaml:"skipLineEnding"`
+	LineEnding     string `json:"lineEnding" yaml:"lineEnding"`
 	// Configure the primitive representations of common complex types. For
 	// example, some users may want all time.Times serialized as floating-point
 	// seconds since epoch, while others may prefer ISO8601 strings.
@@ -330,6 +350,9 @@
 	// Unlike the other primitive type encoders, EncodeName is optional. The
 	// zero value falls back to FullNameEncoder.
 	EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"`
+	// Configure the encoder for interface{} type objects.
+	// If not provided, objects are encoded using json.Encoder
+	NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"`
 	// Configures the field separator used by the console encoder. Defaults
 	// to tab.
 	ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"`
diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go
index 2d815fe..459a5d7 100644
--- a/vendor/go.uber.org/zap/zapcore/entry.go
+++ b/vendor/go.uber.org/zap/zapcore/entry.go
@@ -24,26 +24,23 @@
 	"fmt"
 	"runtime"
 	"strings"
-	"sync"
 	"time"
 
+	"go.uber.org/multierr"
 	"go.uber.org/zap/internal/bufferpool"
 	"go.uber.org/zap/internal/exit"
-
-	"go.uber.org/multierr"
+	"go.uber.org/zap/internal/pool"
 )
 
-var (
-	_cePool = sync.Pool{New: func() interface{} {
-		// Pre-allocate some space for cores.
-		return &CheckedEntry{
-			cores: make([]Core, 4),
-		}
-	}}
-)
+var _cePool = pool.New(func() *CheckedEntry {
+	// Pre-allocate some space for cores.
+	return &CheckedEntry{
+		cores: make([]Core, 4),
+	}
+})
 
 func getCheckedEntry() *CheckedEntry {
-	ce := _cePool.Get().(*CheckedEntry)
+	ce := _cePool.Get()
 	ce.reset()
 	return ce
 }
@@ -152,6 +149,27 @@
 	Stack      string
 }
 
+// CheckWriteHook is a custom action that may be executed after an entry is
+// written.
+//
+// Register one on a CheckedEntry with the After method.
+//
+//	if ce := logger.Check(...); ce != nil {
+//	  ce = ce.After(hook)
+//	  ce.Write(...)
+//	}
+//
+// You can configure the hook for Fatal log statements at the logger level with
+// the zap.WithFatalHook option.
+type CheckWriteHook interface {
+	// OnWrite is invoked with the CheckedEntry that was written and a list
+	// of fields added with that entry.
+	//
+	// The list of fields DOES NOT include fields that were already added
+	// to the logger with the With method.
+	OnWrite(*CheckedEntry, []Field)
+}
+
 // CheckWriteAction indicates what action to take after a log entry is
 // processed. Actions are ordered in increasing severity.
 type CheckWriteAction uint8
@@ -164,21 +182,36 @@
 	WriteThenGoexit
 	// WriteThenPanic causes a panic after Write.
 	WriteThenPanic
-	// WriteThenFatal causes a fatal os.Exit after Write.
+	// WriteThenFatal causes an os.Exit(1) after Write.
 	WriteThenFatal
 )
 
+// OnWrite implements the OnWrite method to keep CheckWriteAction compatible
+// with the new CheckWriteHook interface which deprecates CheckWriteAction.
+func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) {
+	switch a {
+	case WriteThenGoexit:
+		runtime.Goexit()
+	case WriteThenPanic:
+		panic(ce.Message)
+	case WriteThenFatal:
+		exit.With(1)
+	}
+}
+
+var _ CheckWriteHook = CheckWriteAction(0)
+
 // CheckedEntry is an Entry together with a collection of Cores that have
 // already agreed to log it.
 //
-// CheckedEntry references should be created by calling AddCore or Should on a
+// CheckedEntry references should be created by calling AddCore or After on a
 // nil *CheckedEntry. References are returned to a pool after Write, and MUST
 // NOT be retained after calling their Write method.
 type CheckedEntry struct {
 	Entry
 	ErrorOutput WriteSyncer
 	dirty       bool // best-effort detection of pool misuse
-	should      CheckWriteAction
+	after       CheckWriteHook
 	cores       []Core
 }
 
@@ -186,7 +219,7 @@
 	ce.Entry = Entry{}
 	ce.ErrorOutput = nil
 	ce.dirty = false
-	ce.should = WriteThenNoop
+	ce.after = nil
 	for i := range ce.cores {
 		// don't keep references to cores
 		ce.cores[i] = nil
@@ -209,7 +242,7 @@
 			// CheckedEntry is being used after it was returned to the pool,
 			// the message may be an amalgamation from multiple call sites.
 			fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry)
-			ce.ErrorOutput.Sync()
+			_ = ce.ErrorOutput.Sync() // ignore error
 		}
 		return
 	}
@@ -219,24 +252,16 @@
 	for i := range ce.cores {
 		err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields))
 	}
-	if ce.ErrorOutput != nil {
-		if err != nil {
-			fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
-			ce.ErrorOutput.Sync()
-		}
+	if err != nil && ce.ErrorOutput != nil {
+		fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
+		_ = ce.ErrorOutput.Sync() // ignore error
 	}
 
-	should, msg := ce.should, ce.Message
+	hook := ce.after
+	if hook != nil {
+		hook.OnWrite(ce, fields)
+	}
 	putCheckedEntry(ce)
-
-	switch should {
-	case WriteThenPanic:
-		panic(msg)
-	case WriteThenFatal:
-		exit.Exit()
-	case WriteThenGoexit:
-		runtime.Goexit()
-	}
 }
 
 // AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be
@@ -254,11 +279,20 @@
 // Should sets this CheckedEntry's CheckWriteAction, which controls whether a
 // Core will panic or fatal after writing this log entry. Like AddCore, it's
 // safe to call on nil CheckedEntry references.
+//
+// Deprecated: Use [CheckedEntry.After] instead.
 func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry {
+	return ce.After(ent, should)
+}
+
+// After sets this CheckEntry's CheckWriteHook, which will be called after this
+// log entry has been written. It's safe to call this on nil CheckedEntry
+// references.
+func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry {
 	if ce == nil {
 		ce = getCheckedEntry()
 		ce.Entry = ent
 	}
-	ce.should = should
+	ce.after = hook
 	return ce
 }
diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go
index f2a07d7..c40df13 100644
--- a/vendor/go.uber.org/zap/zapcore/error.go
+++ b/vendor/go.uber.org/zap/zapcore/error.go
@@ -23,7 +23,8 @@
 import (
 	"fmt"
 	"reflect"
-	"sync"
+
+	"go.uber.org/zap/internal/pool"
 )
 
 // Encodes the given error into fields of an object. A field with the given
@@ -36,13 +37,13 @@
 // causer (from github.com/pkg/errors), a ${key}Causes field is added with an
 // array of objects containing the errors this error was comprised of.
 //
-//  {
-//    "error": err.Error(),
-//    "errorVerbose": fmt.Sprintf("%+v", err),
-//    "errorCauses": [
-//      ...
-//    ],
-//  }
+//	{
+//	  "error": err.Error(),
+//	  "errorVerbose": fmt.Sprintf("%+v", err),
+//	  "errorCauses": [
+//	    ...
+//	  ],
+//	}
 func encodeError(key string, err error, enc ObjectEncoder) (retErr error) {
 	// Try to capture panics (from nil references or otherwise) when calling
 	// the Error() method
@@ -83,7 +84,7 @@
 	Errors() []error
 }
 
-// Note that errArry and errArrayElem are very similar to the version
+// Note that errArray and errArrayElem are very similar to the version
 // implemented in the top-level error.go file. We can't re-use this because
 // that would require exporting errArray as part of the zapcore API.
 
@@ -97,15 +98,18 @@
 		}
 
 		el := newErrArrayElem(errs[i])
-		arr.AppendObject(el)
+		err := arr.AppendObject(el)
 		el.Free()
+		if err != nil {
+			return err
+		}
 	}
 	return nil
 }
 
-var _errArrayElemPool = sync.Pool{New: func() interface{} {
+var _errArrayElemPool = pool.New(func() *errArrayElem {
 	return &errArrayElem{}
-}}
+})
 
 // Encodes any error into a {"error": ...} re-using the same errors logic.
 //
@@ -113,7 +117,7 @@
 type errArrayElem struct{ err error }
 
 func newErrArrayElem(err error) *errArrayElem {
-	e := _errArrayElemPool.Get().(*errArrayElem)
+	e := _errArrayElemPool.Get()
 	e.err = err
 	return e
 }
diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go
index 95bdb0a..308c978 100644
--- a/vendor/go.uber.org/zap/zapcore/field.go
+++ b/vendor/go.uber.org/zap/zapcore/field.go
@@ -47,7 +47,7 @@
 	ByteStringType
 	// Complex128Type indicates that the field carries a complex128.
 	Complex128Type
-	// Complex64Type indicates that the field carries a complex128.
+	// Complex64Type indicates that the field carries a complex64.
 	Complex64Type
 	// DurationType indicates that the field carries a time.Duration.
 	DurationType
diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go
index 5db4afb..198def9 100644
--- a/vendor/go.uber.org/zap/zapcore/hook.go
+++ b/vendor/go.uber.org/zap/zapcore/hook.go
@@ -27,6 +27,11 @@
 	funcs []func(Entry) error
 }
 
+var (
+	_ Core           = (*hooked)(nil)
+	_ leveledEnabler = (*hooked)(nil)
+)
+
 // RegisterHooks wraps a Core and runs a collection of user-defined callback
 // hooks each time a message is logged. Execution of the callbacks is blocking.
 //
@@ -40,6 +45,10 @@
 	}
 }
 
+func (h *hooked) Level() Level {
+	return LevelOf(h.Core)
+}
+
 func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
 	// Let the wrapped Core decide whether to log this message or not. This
 	// also gives the downstream a chance to register itself directly with the
diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go
index 5a17492..7a11237 100644
--- a/vendor/go.uber.org/zap/zapcore/increase_level.go
+++ b/vendor/go.uber.org/zap/zapcore/increase_level.go
@@ -27,6 +27,11 @@
 	level LevelEnabler
 }
 
+var (
+	_ Core           = (*levelFilterCore)(nil)
+	_ leveledEnabler = (*levelFilterCore)(nil)
+)
+
 // NewIncreaseLevelCore creates a core that can be used to increase the level of
 // an existing Core. It cannot be used to decrease the logging level, as it acts
 // as a filter before calling the underlying core. If level decreases the log level,
@@ -45,6 +50,10 @@
 	return c.level.Enabled(lvl)
 }
 
+func (c *levelFilterCore) Level() Level {
+	return LevelOf(c.level)
+}
+
 func (c *levelFilterCore) With(fields []Field) Core {
 	return &levelFilterCore{c.core.With(fields), c.level}
 }
diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go
index 5cf7d91..9685169 100644
--- a/vendor/go.uber.org/zap/zapcore/json_encoder.go
+++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go
@@ -22,26 +22,21 @@
 
 import (
 	"encoding/base64"
-	"encoding/json"
 	"math"
-	"sync"
 	"time"
 	"unicode/utf8"
 
 	"go.uber.org/zap/buffer"
 	"go.uber.org/zap/internal/bufferpool"
+	"go.uber.org/zap/internal/pool"
 )
 
 // For JSON-escaping; see jsonEncoder.safeAddString below.
 const _hex = "0123456789abcdef"
 
-var _jsonPool = sync.Pool{New: func() interface{} {
+var _jsonPool = pool.New(func() *jsonEncoder {
 	return &jsonEncoder{}
-}}
-
-func getJSONEncoder() *jsonEncoder {
-	return _jsonPool.Get().(*jsonEncoder)
-}
+})
 
 func putJSONEncoder(enc *jsonEncoder) {
 	if enc.reflectBuf != nil {
@@ -64,7 +59,7 @@
 
 	// for encoding generic values by reflection
 	reflectBuf *buffer.Buffer
-	reflectEnc *json.Encoder
+	reflectEnc ReflectedEncoder
 }
 
 // NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder
@@ -72,7 +67,9 @@
 //
 // Note that the encoder doesn't deduplicate keys, so it's possible to produce
 // a message like
-//   {"foo":"bar","foo":"baz"}
+//
+//	{"foo":"bar","foo":"baz"}
+//
 // This is permitted by the JSON specification, but not encouraged. Many
 // libraries will ignore duplicate key-value pairs (typically keeping the last
 // pair) when unmarshaling, but users should attempt to avoid adding duplicate
@@ -82,6 +79,17 @@
 }
 
 func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder {
+	if cfg.SkipLineEnding {
+		cfg.LineEnding = ""
+	} else if cfg.LineEnding == "" {
+		cfg.LineEnding = DefaultLineEnding
+	}
+
+	// If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default
+	if cfg.NewReflectedEncoder == nil {
+		cfg.NewReflectedEncoder = defaultReflectedEncoder
+	}
+
 	return &jsonEncoder{
 		EncoderConfig: &cfg,
 		buf:           bufferpool.Get(),
@@ -118,6 +126,11 @@
 	enc.AppendComplex128(val)
 }
 
+func (enc *jsonEncoder) AddComplex64(key string, val complex64) {
+	enc.addKey(key)
+	enc.AppendComplex64(val)
+}
+
 func (enc *jsonEncoder) AddDuration(key string, val time.Duration) {
 	enc.addKey(key)
 	enc.AppendDuration(val)
@@ -128,6 +141,11 @@
 	enc.AppendFloat64(val)
 }
 
+func (enc *jsonEncoder) AddFloat32(key string, val float32) {
+	enc.addKey(key)
+	enc.AppendFloat32(val)
+}
+
 func (enc *jsonEncoder) AddInt64(key string, val int64) {
 	enc.addKey(key)
 	enc.AppendInt64(val)
@@ -136,10 +154,7 @@
 func (enc *jsonEncoder) resetReflectBuf() {
 	if enc.reflectBuf == nil {
 		enc.reflectBuf = bufferpool.Get()
-		enc.reflectEnc = json.NewEncoder(enc.reflectBuf)
-
-		// For consistency with our custom JSON encoder.
-		enc.reflectEnc.SetEscapeHTML(false)
+		enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf)
 	} else {
 		enc.reflectBuf.Reset()
 	}
@@ -201,10 +216,16 @@
 }
 
 func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error {
+	// Close ONLY new openNamespaces that are created during
+	// AppendObject().
+	old := enc.openNamespaces
+	enc.openNamespaces = 0
 	enc.addElementSeparator()
 	enc.buf.AppendByte('{')
 	err := obj.MarshalLogObject(enc)
 	enc.buf.AppendByte('}')
+	enc.closeOpenNamespaces()
+	enc.openNamespaces = old
 	return err
 }
 
@@ -220,16 +241,23 @@
 	enc.buf.AppendByte('"')
 }
 
-func (enc *jsonEncoder) AppendComplex128(val complex128) {
+// appendComplex appends the encoded form of the provided complex128 value.
+// precision specifies the encoding precision for the real and imaginary
+// components of the complex number.
+func (enc *jsonEncoder) appendComplex(val complex128, precision int) {
 	enc.addElementSeparator()
 	// Cast to a platform-independent, fixed-size type.
 	r, i := float64(real(val)), float64(imag(val))
 	enc.buf.AppendByte('"')
 	// Because we're always in a quoted string, we can use strconv without
 	// special-casing NaN and +/-Inf.
-	enc.buf.AppendFloat(r, 64)
-	enc.buf.AppendByte('+')
-	enc.buf.AppendFloat(i, 64)
+	enc.buf.AppendFloat(r, precision)
+	// If imaginary part is less than 0, minus (-) sign is added by default
+	// by AppendFloat.
+	if i >= 0 {
+		enc.buf.AppendByte('+')
+	}
+	enc.buf.AppendFloat(i, precision)
 	enc.buf.AppendByte('i')
 	enc.buf.AppendByte('"')
 }
@@ -292,29 +320,28 @@
 	enc.buf.AppendUint(val)
 }
 
-func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) }
-func (enc *jsonEncoder) AddFloat32(k string, v float32)     { enc.AddFloat64(k, float64(v)) }
-func (enc *jsonEncoder) AddInt(k string, v int)             { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt32(k string, v int32)         { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt16(k string, v int16)         { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt8(k string, v int8)           { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddUint(k string, v uint)           { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint32(k string, v uint32)       { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint16(k string, v uint16)       { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint8(k string, v uint8)         { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUintptr(k string, v uintptr)     { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AppendComplex64(v complex64)        { enc.AppendComplex128(complex128(v)) }
-func (enc *jsonEncoder) AppendFloat64(v float64)            { enc.appendFloat(v, 64) }
-func (enc *jsonEncoder) AppendFloat32(v float32)            { enc.appendFloat(float64(v), 32) }
-func (enc *jsonEncoder) AppendInt(v int)                    { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt32(v int32)                { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt16(v int16)                { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt8(v int8)                  { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendUint(v uint)                  { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint32(v uint32)              { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint16(v uint16)              { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint8(v uint8)                { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUintptr(v uintptr)            { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AddInt(k string, v int)         { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt32(k string, v int32)     { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt16(k string, v int16)     { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt8(k string, v int8)       { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddUint(k string, v uint)       { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint32(k string, v uint32)   { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint16(k string, v uint16)   { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint8(k string, v uint8)     { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AppendComplex64(v complex64)    { enc.appendComplex(complex128(v), 32) }
+func (enc *jsonEncoder) AppendComplex128(v complex128)  { enc.appendComplex(complex128(v), 64) }
+func (enc *jsonEncoder) AppendFloat64(v float64)        { enc.appendFloat(v, 64) }
+func (enc *jsonEncoder) AppendFloat32(v float32)        { enc.appendFloat(float64(v), 32) }
+func (enc *jsonEncoder) AppendInt(v int)                { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt32(v int32)            { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt16(v int16)            { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt8(v int8)              { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendUint(v uint)              { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint32(v uint32)          { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint16(v uint16)          { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint8(v uint8)            { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUintptr(v uintptr)        { enc.AppendUint64(uint64(v)) }
 
 func (enc *jsonEncoder) Clone() Encoder {
 	clone := enc.clone()
@@ -323,7 +350,7 @@
 }
 
 func (enc *jsonEncoder) clone() *jsonEncoder {
-	clone := getJSONEncoder()
+	clone := _jsonPool.Get()
 	clone.EncoderConfig = enc.EncoderConfig
 	clone.spaced = enc.spaced
 	clone.openNamespaces = enc.openNamespaces
@@ -335,7 +362,7 @@
 	final := enc.clone()
 	final.buf.AppendByte('{')
 
-	if final.LevelKey != "" {
+	if final.LevelKey != "" && final.EncodeLevel != nil {
 		final.addKey(final.LevelKey)
 		cur := final.buf.Len()
 		final.EncodeLevel(ent.Level, final)
@@ -345,7 +372,7 @@
 			final.AppendString(ent.Level.String())
 		}
 	}
-	if final.TimeKey != "" {
+	if final.TimeKey != "" && !ent.Time.IsZero() {
 		final.AddTime(final.TimeKey, ent.Time)
 	}
 	if ent.LoggerName != "" && final.NameKey != "" {
@@ -396,11 +423,7 @@
 		final.AddString(final.StacktraceKey, ent.Stack)
 	}
 	final.buf.AppendByte('}')
-	if final.LineEnding != "" {
-		final.buf.AppendString(final.LineEnding)
-	} else {
-		final.buf.AppendString(DefaultLineEnding)
-	}
+	final.buf.AppendString(final.LineEnding)
 
 	ret := final.buf
 	putJSONEncoder(final)
@@ -415,6 +438,7 @@
 	for i := 0; i < enc.openNamespaces; i++ {
 		enc.buf.AppendByte('}')
 	}
+	enc.openNamespaces = 0
 }
 
 func (enc *jsonEncoder) addKey(key string) {
@@ -462,73 +486,98 @@
 // Unlike the standard library's encoder, it doesn't attempt to protect the
 // user from browser vulnerabilities or JSONP-related problems.
 func (enc *jsonEncoder) safeAddString(s string) {
-	for i := 0; i < len(s); {
-		if enc.tryAddRuneSelf(s[i]) {
-			i++
-			continue
-		}
-		r, size := utf8.DecodeRuneInString(s[i:])
-		if enc.tryAddRuneError(r, size) {
-			i++
-			continue
-		}
-		enc.buf.AppendString(s[i : i+size])
-		i += size
-	}
+	safeAppendStringLike(
+		(*buffer.Buffer).AppendString,
+		utf8.DecodeRuneInString,
+		enc.buf,
+		s,
+	)
 }
 
 // safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte.
 func (enc *jsonEncoder) safeAddByteString(s []byte) {
+	safeAppendStringLike(
+		(*buffer.Buffer).AppendBytes,
+		utf8.DecodeRune,
+		enc.buf,
+		s,
+	)
+}
+
+// safeAppendStringLike is a generic implementation of safeAddString and safeAddByteString.
+// It appends a string or byte slice to the buffer, escaping all special characters.
+func safeAppendStringLike[S []byte | string](
+	// appendTo appends this string-like object to the buffer.
+	appendTo func(*buffer.Buffer, S),
+	// decodeRune decodes the next rune from the string-like object
+	// and returns its value and width in bytes.
+	decodeRune func(S) (rune, int),
+	buf *buffer.Buffer,
+	s S,
+) {
+	// The encoding logic below works by skipping over characters
+	// that can be safely copied as-is,
+	// until a character is found that needs special handling.
+	// At that point, we copy everything we've seen so far,
+	// and then handle that special character.
+	//
+	// last is the index of the last byte that was copied to the buffer.
+	last := 0
 	for i := 0; i < len(s); {
-		if enc.tryAddRuneSelf(s[i]) {
-			i++
-			continue
-		}
-		r, size := utf8.DecodeRune(s[i:])
-		if enc.tryAddRuneError(r, size) {
-			i++
-			continue
-		}
-		enc.buf.Write(s[i : i+size])
-		i += size
-	}
-}
+		if s[i] >= utf8.RuneSelf {
+			// Character >= RuneSelf may be part of a multi-byte rune.
+			// They need to be decoded before we can decide how to handle them.
+			r, size := decodeRune(s[i:])
+			if r != utf8.RuneError || size != 1 {
+				// No special handling required.
+				// Skip over this rune and continue.
+				i += size
+				continue
+			}
 
-// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte.
-func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool {
-	if b >= utf8.RuneSelf {
-		return false
-	}
-	if 0x20 <= b && b != '\\' && b != '"' {
-		enc.buf.AppendByte(b)
-		return true
-	}
-	switch b {
-	case '\\', '"':
-		enc.buf.AppendByte('\\')
-		enc.buf.AppendByte(b)
-	case '\n':
-		enc.buf.AppendByte('\\')
-		enc.buf.AppendByte('n')
-	case '\r':
-		enc.buf.AppendByte('\\')
-		enc.buf.AppendByte('r')
-	case '\t':
-		enc.buf.AppendByte('\\')
-		enc.buf.AppendByte('t')
-	default:
-		// Encode bytes < 0x20, except for the escape sequences above.
-		enc.buf.AppendString(`\u00`)
-		enc.buf.AppendByte(_hex[b>>4])
-		enc.buf.AppendByte(_hex[b&0xF])
-	}
-	return true
-}
+			// Invalid UTF-8 sequence.
+			// Replace it with the Unicode replacement character.
+			appendTo(buf, s[last:i])
+			buf.AppendString(`\ufffd`)
 
-func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool {
-	if r == utf8.RuneError && size == 1 {
-		enc.buf.AppendString(`\ufffd`)
-		return true
+			i++
+			last = i
+		} else {
+			// Character < RuneSelf is a single-byte UTF-8 rune.
+			if s[i] >= 0x20 && s[i] != '\\' && s[i] != '"' {
+				// No escaping necessary.
+				// Skip over this character and continue.
+				i++
+				continue
+			}
+
+			// This character needs to be escaped.
+			appendTo(buf, s[last:i])
+			switch s[i] {
+			case '\\', '"':
+				buf.AppendByte('\\')
+				buf.AppendByte(s[i])
+			case '\n':
+				buf.AppendByte('\\')
+				buf.AppendByte('n')
+			case '\r':
+				buf.AppendByte('\\')
+				buf.AppendByte('r')
+			case '\t':
+				buf.AppendByte('\\')
+				buf.AppendByte('t')
+			default:
+				// Encode bytes < 0x20, except for the escape sequences above.
+				buf.AppendString(`\u00`)
+				buf.AppendByte(_hex[s[i]>>4])
+				buf.AppendByte(_hex[s[i]&0xF])
+			}
+
+			i++
+			last = i
+		}
 	}
-	return false
+
+	// add remaining
+	appendTo(buf, s[last:])
 }
diff --git a/vendor/go.uber.org/zap/zapcore/lazy_with.go b/vendor/go.uber.org/zap/zapcore/lazy_with.go
new file mode 100644
index 0000000..05288d6
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/lazy_with.go
@@ -0,0 +1,54 @@
+// Copyright (c) 2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "sync"
+
+type lazyWithCore struct {
+	Core
+	sync.Once
+	fields []Field
+}
+
+// NewLazyWith wraps a Core with a "lazy" Core that will only encode fields if
+// the logger is written to (or is further chained in a lon-lazy manner).
+func NewLazyWith(core Core, fields []Field) Core {
+	return &lazyWithCore{
+		Core:   core,
+		fields: fields,
+	}
+}
+
+func (d *lazyWithCore) initOnce() {
+	d.Once.Do(func() {
+		d.Core = d.Core.With(d.fields)
+	})
+}
+
+func (d *lazyWithCore) With(fields []Field) Core {
+	d.initOnce()
+	return d.Core.With(fields)
+}
+
+func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry {
+	d.initOnce()
+	return d.Core.Check(e, ce)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go
index e575c9f..e01a241 100644
--- a/vendor/go.uber.org/zap/zapcore/level.go
+++ b/vendor/go.uber.org/zap/zapcore/level.go
@@ -53,8 +53,62 @@
 
 	_minLevel = DebugLevel
 	_maxLevel = FatalLevel
+
+	// InvalidLevel is an invalid value for Level.
+	//
+	// Core implementations may panic if they see messages of this level.
+	InvalidLevel = _maxLevel + 1
 )
 
+// ParseLevel parses a level based on the lower-case or all-caps ASCII
+// representation of the log level. If the provided ASCII representation is
+// invalid an error is returned.
+//
+// This is particularly useful when dealing with text input to configure log
+// levels.
+func ParseLevel(text string) (Level, error) {
+	var level Level
+	err := level.UnmarshalText([]byte(text))
+	return level, err
+}
+
+type leveledEnabler interface {
+	LevelEnabler
+
+	Level() Level
+}
+
+// LevelOf reports the minimum enabled log level for the given LevelEnabler
+// from Zap's supported log levels, or [InvalidLevel] if none of them are
+// enabled.
+//
+// A LevelEnabler may implement a 'Level() Level' method to override the
+// behavior of this function.
+//
+//	func (c *core) Level() Level {
+//		return c.currentLevel
+//	}
+//
+// It is recommended that [Core] implementations that wrap other cores use
+// LevelOf to retrieve the level of the wrapped core. For example,
+//
+//	func (c *coreWrapper) Level() Level {
+//		return zapcore.LevelOf(c.wrappedCore)
+//	}
+func LevelOf(enab LevelEnabler) Level {
+	if lvler, ok := enab.(leveledEnabler); ok {
+		return lvler.Level()
+	}
+
+	for lvl := _minLevel; lvl <= _maxLevel; lvl++ {
+		if enab.Enabled(lvl) {
+			return lvl
+		}
+	}
+
+	return InvalidLevel
+}
+
 // String returns a lower-case ASCII representation of the log level.
 func (l Level) String() string {
 	switch l {
diff --git a/vendor/go.uber.org/zap/zapcore/reflected_encoder.go b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go
new file mode 100644
index 0000000..8746360
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go
@@ -0,0 +1,41 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+	"encoding/json"
+	"io"
+)
+
+// ReflectedEncoder serializes log fields that can't be serialized with Zap's
+// JSON encoder. These have the ReflectType field type.
+// Use EncoderConfig.NewReflectedEncoder to set this.
+type ReflectedEncoder interface {
+	// Encode encodes and writes to the underlying data stream.
+	Encode(interface{}) error
+}
+
+func defaultReflectedEncoder(w io.Writer) ReflectedEncoder {
+	enc := json.NewEncoder(w)
+	// For consistency with our custom JSON encoder.
+	enc.SetEscapeHTML(false)
+	return enc
+}
diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go
index 25f10ca..b7c093a 100644
--- a/vendor/go.uber.org/zap/zapcore/sampler.go
+++ b/vendor/go.uber.org/zap/zapcore/sampler.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
 //
 // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to deal
@@ -21,9 +21,8 @@
 package zapcore
 
 import (
+	"sync/atomic"
 	"time"
-
-	"go.uber.org/atomic"
 )
 
 const (
@@ -66,16 +65,16 @@
 	tn := t.UnixNano()
 	resetAfter := c.resetAt.Load()
 	if resetAfter > tn {
-		return c.counter.Inc()
+		return c.counter.Add(1)
 	}
 
 	c.counter.Store(1)
 
 	newResetAfter := tn + tick.Nanoseconds()
-	if !c.resetAt.CAS(resetAfter, newResetAfter) {
+	if !c.resetAt.CompareAndSwap(resetAfter, newResetAfter) {
 		// We raced with another goroutine trying to reset, and it also reset
 		// the counter to 1, so we need to reincrement the counter.
-		return c.counter.Inc()
+		return c.counter.Add(1)
 	}
 
 	return 1
@@ -113,12 +112,12 @@
 // This hook may be used to get visibility into the performance of the sampler.
 // For example, use it to track metrics of dropped versus sampled logs.
 //
-//  var dropped atomic.Int64
-//  zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
-//    if dec&zapcore.LogDropped > 0 {
-//      dropped.Inc()
-//    }
-//  })
+//	var dropped atomic.Int64
+//	zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
+//	  if dec&zapcore.LogDropped > 0 {
+//	    dropped.Inc()
+//	  }
+//	})
 func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
 	return optionFunc(func(s *sampler) {
 		s.hook = hook
@@ -133,10 +132,21 @@
 // each tick. If more Entries with the same level and message are seen during
 // the same interval, every Mth message is logged and the rest are dropped.
 //
+// For example,
+//
+//	core = NewSamplerWithOptions(core, time.Second, 10, 5)
+//
+// This will log the first 10 log entries with the same level and message
+// in a one second interval as-is. Following that, it will allow through
+// every 5th log entry with the same level and message in that interval.
+//
+// If thereafter is zero, the Core will drop all log entries after the first N
+// in that interval.
+//
 // Sampler can be configured to report sampling decisions with the SamplerHook
 // option.
 //
-// Keep in mind that zap's sampling implementation is optimized for speed over
+// Keep in mind that Zap's sampling implementation is optimized for speed over
 // absolute precision; under load, each tick may be slightly over- or
 // under-sampled.
 func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core {
@@ -164,6 +174,11 @@
 	hook              func(Entry, SamplingDecision)
 }
 
+var (
+	_ Core           = (*sampler)(nil)
+	_ leveledEnabler = (*sampler)(nil)
+)
+
 // NewSampler creates a Core that samples incoming entries, which
 // caps the CPU and I/O load of logging while attempting to preserve a
 // representative subset of your logs.
@@ -181,6 +196,10 @@
 	return NewSamplerWithOptions(core, tick, first, thereafter)
 }
 
+func (s *sampler) Level() Level {
+	return LevelOf(s.Core)
+}
+
 func (s *sampler) With(fields []Field) Core {
 	return &sampler{
 		Core:       s.Core.With(fields),
@@ -197,12 +216,14 @@
 		return ce
 	}
 
-	counter := s.counts.get(ent.Level, ent.Message)
-	n := counter.IncCheckReset(ent.Time, s.tick)
-	if n > s.first && (n-s.first)%s.thereafter != 0 {
-		s.hook(ent, LogDropped)
-		return ce
+	if ent.Level >= _minLevel && ent.Level <= _maxLevel {
+		counter := s.counts.get(ent.Level, ent.Message)
+		n := counter.IncCheckReset(ent.Time, s.tick)
+		if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) {
+			s.hook(ent, LogDropped)
+			return ce
+		}
+		s.hook(ent, LogSampled)
 	}
-	s.hook(ent, LogSampled)
 	return s.Core.Check(ent, ce)
 }
diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go
index 07a32ee..9bb32f0 100644
--- a/vendor/go.uber.org/zap/zapcore/tee.go
+++ b/vendor/go.uber.org/zap/zapcore/tee.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
 //
 // Permission is hereby granted, free of charge, to any person obtaining a copy
 // of this software and associated documentation files (the "Software"), to deal
@@ -24,6 +24,11 @@
 
 type multiCore []Core
 
+var (
+	_ leveledEnabler = multiCore(nil)
+	_ Core           = multiCore(nil)
+)
+
 // NewTee creates a Core that duplicates log entries into two or more
 // underlying Cores.
 //
@@ -48,6 +53,16 @@
 	return clone
 }
 
+func (mc multiCore) Level() Level {
+	minLvl := _maxLevel // mc is never empty
+	for i := range mc {
+		if lvl := LevelOf(mc[i]); lvl < minLvl {
+			minLvl = lvl
+		}
+	}
+	return minLvl
+}
+
 func (mc multiCore) Enabled(lvl Level) bool {
 	for i := range mc {
 		if mc[i].Enabled(lvl) {
diff --git a/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go b/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go
new file mode 100644
index 0000000..682de25
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go
@@ -0,0 +1,243 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package zapgrpc provides a logger that is compatible with grpclog.
+package zapgrpc // import "go.uber.org/zap/zapgrpc"
+
+import (
+	"fmt"
+
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// See https://github.com/grpc/grpc-go/blob/v1.35.0/grpclog/loggerv2.go#L77-L86
+const (
+	grpcLvlInfo int = iota
+	grpcLvlWarn
+	grpcLvlError
+	grpcLvlFatal
+)
+
+// _grpcToZapLevel maps gRPC log levels to zap log levels.
+// See https://pkg.go.dev/go.uber.org/zap@v1.16.0/zapcore#Level
+var _grpcToZapLevel = map[int]zapcore.Level{
+	grpcLvlInfo:  zapcore.InfoLevel,
+	grpcLvlWarn:  zapcore.WarnLevel,
+	grpcLvlError: zapcore.ErrorLevel,
+	grpcLvlFatal: zapcore.FatalLevel,
+}
+
+// An Option overrides a Logger's default configuration.
+type Option interface {
+	apply(*Logger)
+}
+
+type optionFunc func(*Logger)
+
+func (f optionFunc) apply(log *Logger) {
+	f(log)
+}
+
+// WithDebug configures a Logger to print at zap's DebugLevel instead of
+// InfoLevel.
+// It only affects the Printf, Println and Print methods, which are only used in the gRPC v1 grpclog.Logger API.
+//
+// Deprecated: use grpclog.SetLoggerV2() for v2 API.
+func WithDebug() Option {
+	return optionFunc(func(logger *Logger) {
+		logger.print = &printer{
+			enab:   logger.levelEnabler,
+			level:  zapcore.DebugLevel,
+			print:  logger.delegate.Debug,
+			printf: logger.delegate.Debugf,
+		}
+	})
+}
+
+// withWarn redirects the fatal level to the warn level, which makes testing
+// easier. This is intentionally unexported.
+func withWarn() Option {
+	return optionFunc(func(logger *Logger) {
+		logger.fatal = &printer{
+			enab:   logger.levelEnabler,
+			level:  zapcore.WarnLevel,
+			print:  logger.delegate.Warn,
+			printf: logger.delegate.Warnf,
+		}
+	})
+}
+
+// NewLogger returns a new Logger.
+func NewLogger(l *zap.Logger, options ...Option) *Logger {
+	logger := &Logger{
+		delegate:     l.Sugar(),
+		levelEnabler: l.Core(),
+	}
+	logger.print = &printer{
+		enab:   logger.levelEnabler,
+		level:  zapcore.InfoLevel,
+		print:  logger.delegate.Info,
+		printf: logger.delegate.Infof,
+	}
+	logger.fatal = &printer{
+		enab:   logger.levelEnabler,
+		level:  zapcore.FatalLevel,
+		print:  logger.delegate.Fatal,
+		printf: logger.delegate.Fatalf,
+	}
+	for _, option := range options {
+		option.apply(logger)
+	}
+	return logger
+}
+
+// printer implements Print, Printf, and Println operations for a Zap level.
+//
+// We use it to customize Debug vs Info, and Warn vs Fatal for Print and Fatal
+// respectively.
+type printer struct {
+	enab   zapcore.LevelEnabler
+	level  zapcore.Level
+	print  func(...interface{})
+	printf func(string, ...interface{})
+}
+
+func (v *printer) Print(args ...interface{}) {
+	v.print(args...)
+}
+
+func (v *printer) Printf(format string, args ...interface{}) {
+	v.printf(format, args...)
+}
+
+func (v *printer) Println(args ...interface{}) {
+	if v.enab.Enabled(v.level) {
+		v.print(sprintln(args))
+	}
+}
+
+// Logger adapts zap's Logger to be compatible with grpclog.LoggerV2 and the deprecated grpclog.Logger.
+type Logger struct {
+	delegate     *zap.SugaredLogger
+	levelEnabler zapcore.LevelEnabler
+	print        *printer
+	fatal        *printer
+	// printToDebug bool
+	// fatalToWarn  bool
+}
+
+// Print implements grpclog.Logger.
+//
+// Deprecated: use [Logger.Info].
+func (l *Logger) Print(args ...interface{}) {
+	l.print.Print(args...)
+}
+
+// Printf implements grpclog.Logger.
+//
+// Deprecated: use [Logger.Infof].
+func (l *Logger) Printf(format string, args ...interface{}) {
+	l.print.Printf(format, args...)
+}
+
+// Println implements grpclog.Logger.
+//
+// Deprecated: use [Logger.Info].
+func (l *Logger) Println(args ...interface{}) {
+	l.print.Println(args...)
+}
+
+// Info implements grpclog.LoggerV2.
+func (l *Logger) Info(args ...interface{}) {
+	l.delegate.Info(args...)
+}
+
+// Infoln implements grpclog.LoggerV2.
+func (l *Logger) Infoln(args ...interface{}) {
+	if l.levelEnabler.Enabled(zapcore.InfoLevel) {
+		l.delegate.Info(sprintln(args))
+	}
+}
+
+// Infof implements grpclog.LoggerV2.
+func (l *Logger) Infof(format string, args ...interface{}) {
+	l.delegate.Infof(format, args...)
+}
+
+// Warning implements grpclog.LoggerV2.
+func (l *Logger) Warning(args ...interface{}) {
+	l.delegate.Warn(args...)
+}
+
+// Warningln implements grpclog.LoggerV2.
+func (l *Logger) Warningln(args ...interface{}) {
+	if l.levelEnabler.Enabled(zapcore.WarnLevel) {
+		l.delegate.Warn(sprintln(args))
+	}
+}
+
+// Warningf implements grpclog.LoggerV2.
+func (l *Logger) Warningf(format string, args ...interface{}) {
+	l.delegate.Warnf(format, args...)
+}
+
+// Error implements grpclog.LoggerV2.
+func (l *Logger) Error(args ...interface{}) {
+	l.delegate.Error(args...)
+}
+
+// Errorln implements grpclog.LoggerV2.
+func (l *Logger) Errorln(args ...interface{}) {
+	if l.levelEnabler.Enabled(zapcore.ErrorLevel) {
+		l.delegate.Error(sprintln(args))
+	}
+}
+
+// Errorf implements grpclog.LoggerV2.
+func (l *Logger) Errorf(format string, args ...interface{}) {
+	l.delegate.Errorf(format, args...)
+}
+
+// Fatal implements grpclog.LoggerV2.
+func (l *Logger) Fatal(args ...interface{}) {
+	l.fatal.Print(args...)
+}
+
+// Fatalln implements grpclog.LoggerV2.
+func (l *Logger) Fatalln(args ...interface{}) {
+	l.fatal.Println(args...)
+}
+
+// Fatalf implements grpclog.LoggerV2.
+func (l *Logger) Fatalf(format string, args ...interface{}) {
+	l.fatal.Printf(format, args...)
+}
+
+// V implements grpclog.LoggerV2.
+func (l *Logger) V(level int) bool {
+	return l.levelEnabler.Enabled(_grpcToZapLevel[level])
+}
+
+func sprintln(args []interface{}) string {
+	s := fmt.Sprintln(args...)
+	// Drop the new line character added by Sprintln
+	return s[:len(s)-1]
+}
diff --git a/vendor/go.yaml.in/yaml/v2/.travis.yml b/vendor/go.yaml.in/yaml/v2/.travis.yml
new file mode 100644
index 0000000..7348c50
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+
+go:
+    - "1.4.x"
+    - "1.5.x"
+    - "1.6.x"
+    - "1.7.x"
+    - "1.8.x"
+    - "1.9.x"
+    - "1.10.x"
+    - "1.11.x"
+    - "1.12.x"
+    - "1.13.x"
+    - "1.14.x"
+    - "tip"
+
+go_import_path: gopkg.in/yaml.v2
diff --git a/vendor/go.yaml.in/yaml/v2/LICENSE b/vendor/go.yaml.in/yaml/v2/LICENSE
new file mode 100644
index 0000000..8dada3e
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml b/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml
new file mode 100644
index 0000000..8da58fb
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml
@@ -0,0 +1,31 @@
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original copyright and license:
+
+    apic.go
+    emitterc.go
+    parserc.go
+    readerc.go
+    scannerc.go
+    writerc.go
+    yamlh.go
+    yamlprivateh.go
+
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/go.yaml.in/yaml/v2/NOTICE b/vendor/go.yaml.in/yaml/v2/NOTICE
new file mode 100644
index 0000000..866d74a
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/go.yaml.in/yaml/v2/README.md b/vendor/go.yaml.in/yaml/v2/README.md
new file mode 100644
index 0000000..c9388da
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/README.md
@@ -0,0 +1,131 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.1 and 1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *go.yaml.in/yaml/v2*.
+
+To install it, run:
+
+    go get go.yaml.in/yaml/v2
+
+API documentation
+-----------------
+
+See: <https://pkg.go.dev/go.yaml.in/yaml/v2>
+
+API stability
+-------------
+
+The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+        "fmt"
+        "log"
+
+        "go.yaml.in/yaml/v2"
+)
+
+var data = `
+a: Easy!
+b:
+  c: 2
+  d: [3, 4]
+`
+
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
+type T struct {
+        A string
+        B struct {
+                RenamedC int   `yaml:"c"`
+                D        []int `yaml:",flow"`
+        }
+}
+
+func main() {
+        t := T{}
+    
+        err := yaml.Unmarshal([]byte(data), &t)
+        if err != nil {
+                log.Fatalf("error: %v", err)
+        }
+        fmt.Printf("--- t:\n%v\n\n", t)
+    
+        d, err := yaml.Marshal(&t)
+        if err != nil {
+                log.Fatalf("error: %v", err)
+        }
+        fmt.Printf("--- t dump:\n%s\n\n", string(d))
+    
+        m := make(map[interface{}]interface{})
+    
+        err = yaml.Unmarshal([]byte(data), &m)
+        if err != nil {
+                log.Fatalf("error: %v", err)
+        }
+        fmt.Printf("--- m:\n%v\n\n", m)
+    
+        d, err = yaml.Marshal(&m)
+        if err != nil {
+                log.Fatalf("error: %v", err)
+        }
+        fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+  c: 2
+  d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+  c: 2
+  d:
+  - 3
+  - 4
+```
+
diff --git a/vendor/go.yaml.in/yaml/v2/apic.go b/vendor/go.yaml.in/yaml/v2/apic.go
new file mode 100644
index 0000000..acf7140
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/apic.go
@@ -0,0 +1,744 @@
+package yaml
+
+import (
+	"io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+	//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+	// Check if we can move the queue at the beginning of the buffer.
+	if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+		if parser.tokens_head != len(parser.tokens) {
+			copy(parser.tokens, parser.tokens[parser.tokens_head:])
+		}
+		parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+		parser.tokens_head = 0
+	}
+	parser.tokens = append(parser.tokens, *token)
+	if pos < 0 {
+		return
+	}
+	copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+	parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+	*parser = yaml_parser_t{
+		raw_buffer: make([]byte, 0, input_raw_buffer_size),
+		buffer:     make([]byte, 0, input_buffer_size),
+	}
+	return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+	*parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+	if parser.input_pos == len(parser.input) {
+		return 0, io.EOF
+	}
+	n = copy(buffer, parser.input[parser.input_pos:])
+	parser.input_pos += n
+	return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+	return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+	if parser.read_handler != nil {
+		panic("must set the input source only once")
+	}
+	parser.read_handler = yaml_string_read_handler
+	parser.input = input
+	parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+	if parser.read_handler != nil {
+		panic("must set the input source only once")
+	}
+	parser.read_handler = yaml_reader_read_handler
+	parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+	if parser.encoding != yaml_ANY_ENCODING {
+		panic("must set the encoding only once")
+	}
+	parser.encoding = encoding
+}
+
+var disableLineWrapping = false
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+	*emitter = yaml_emitter_t{
+		buffer:     make([]byte, output_buffer_size),
+		raw_buffer: make([]byte, 0, output_raw_buffer_size),
+		states:     make([]yaml_emitter_state_t, 0, initial_stack_size),
+		events:     make([]yaml_event_t, 0, initial_queue_size),
+	}
+	if disableLineWrapping {
+		emitter.best_width = -1
+	}
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+	*emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+	*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+	return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+	_, err := emitter.output_writer.Write(buffer)
+	return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+	if emitter.write_handler != nil {
+		panic("must set the output target only once")
+	}
+	emitter.write_handler = yaml_string_write_handler
+	emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+	if emitter.write_handler != nil {
+		panic("must set the output target only once")
+	}
+	emitter.write_handler = yaml_writer_write_handler
+	emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+	if emitter.encoding != yaml_ANY_ENCODING {
+		panic("must set the output encoding only once")
+	}
+	emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+	emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+	if indent < 2 || indent > 9 {
+		indent = 2
+	}
+	emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+	if width < 0 {
+		width = -1
+	}
+	emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+	emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+	emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+//    assert(token);  // Non-NULL token object expected.
+//
+//    switch (token.type)
+//    {
+//        case YAML_TAG_DIRECTIVE_TOKEN:
+//            yaml_free(token.data.tag_directive.handle);
+//            yaml_free(token.data.tag_directive.prefix);
+//            break;
+//
+//        case YAML_ALIAS_TOKEN:
+//            yaml_free(token.data.alias.value);
+//            break;
+//
+//        case YAML_ANCHOR_TOKEN:
+//            yaml_free(token.data.anchor.value);
+//            break;
+//
+//        case YAML_TAG_TOKEN:
+//            yaml_free(token.data.tag.handle);
+//            yaml_free(token.data.tag.suffix);
+//            break;
+//
+//        case YAML_SCALAR_TOKEN:
+//            yaml_free(token.data.scalar.value);
+//            break;
+//
+//        default:
+//            break;
+//    }
+//
+//    memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+//    yaml_char_t *end = start+length;
+//    yaml_char_t *pointer = start;
+//
+//    while (pointer < end) {
+//        unsigned char octet;
+//        unsigned int width;
+//        unsigned int value;
+//        size_t k;
+//
+//        octet = pointer[0];
+//        width = (octet & 0x80) == 0x00 ? 1 :
+//                (octet & 0xE0) == 0xC0 ? 2 :
+//                (octet & 0xF0) == 0xE0 ? 3 :
+//                (octet & 0xF8) == 0xF0 ? 4 : 0;
+//        value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+//                (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+//                (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+//                (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+//        if (!width) return 0;
+//        if (pointer+width > end) return 0;
+//        for (k = 1; k < width; k ++) {
+//            octet = pointer[k];
+//            if ((octet & 0xC0) != 0x80) return 0;
+//            value = (value << 6) + (octet & 0x3F);
+//        }
+//        if (!((width == 1) ||
+//            (width == 2 && value >= 0x80) ||
+//            (width == 3 && value >= 0x800) ||
+//            (width == 4 && value >= 0x10000))) return 0;
+//
+//        pointer += width;
+//    }
+//
+//    return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+	*event = yaml_event_t{
+		typ:      yaml_STREAM_START_EVENT,
+		encoding: encoding,
+	}
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+	*event = yaml_event_t{
+		typ: yaml_STREAM_END_EVENT,
+	}
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+	event *yaml_event_t,
+	version_directive *yaml_version_directive_t,
+	tag_directives []yaml_tag_directive_t,
+	implicit bool,
+) {
+	*event = yaml_event_t{
+		typ:               yaml_DOCUMENT_START_EVENT,
+		version_directive: version_directive,
+		tag_directives:    tag_directives,
+		implicit:          implicit,
+	}
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+	*event = yaml_event_t{
+		typ:      yaml_DOCUMENT_END_EVENT,
+		implicit: implicit,
+	}
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    anchor_copy *yaml_char_t = NULL
+//
+//    assert(event) // Non-NULL event object is expected.
+//    assert(anchor) // Non-NULL anchor is expected.
+//
+//    if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+//    anchor_copy = yaml_strdup(anchor)
+//    if (!anchor_copy)
+//        return 0
+//
+//    ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+//    return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+	*event = yaml_event_t{
+		typ:             yaml_SCALAR_EVENT,
+		anchor:          anchor,
+		tag:             tag,
+		value:           value,
+		implicit:        plain_implicit,
+		quoted_implicit: quoted_implicit,
+		style:           yaml_style_t(style),
+	}
+	return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+	*event = yaml_event_t{
+		typ:      yaml_SEQUENCE_START_EVENT,
+		anchor:   anchor,
+		tag:      tag,
+		implicit: implicit,
+		style:    yaml_style_t(style),
+	}
+	return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+	*event = yaml_event_t{
+		typ: yaml_SEQUENCE_END_EVENT,
+	}
+	return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+	*event = yaml_event_t{
+		typ:      yaml_MAPPING_START_EVENT,
+		anchor:   anchor,
+		tag:      tag,
+		implicit: implicit,
+		style:    yaml_style_t(style),
+	}
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+	*event = yaml_event_t{
+		typ: yaml_MAPPING_END_EVENT,
+	}
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+	*event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+//        version_directive *yaml_version_directive_t,
+//        tag_directives_start *yaml_tag_directive_t,
+//        tag_directives_end *yaml_tag_directive_t,
+//        start_implicit int, end_implicit int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    struct {
+//        start *yaml_node_t
+//        end *yaml_node_t
+//        top *yaml_node_t
+//    } nodes = { NULL, NULL, NULL }
+//    version_directive_copy *yaml_version_directive_t = NULL
+//    struct {
+//        start *yaml_tag_directive_t
+//        end *yaml_tag_directive_t
+//        top *yaml_tag_directive_t
+//    } tag_directives_copy = { NULL, NULL, NULL }
+//    value yaml_tag_directive_t = { NULL, NULL }
+//    mark yaml_mark_t = { 0, 0, 0 }
+//
+//    assert(document) // Non-NULL document object is expected.
+//    assert((tag_directives_start && tag_directives_end) ||
+//            (tag_directives_start == tag_directives_end))
+//                            // Valid tag directives are expected.
+//
+//    if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+//    if (version_directive) {
+//        version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+//        if (!version_directive_copy) goto error
+//        version_directive_copy.major = version_directive.major
+//        version_directive_copy.minor = version_directive.minor
+//    }
+//
+//    if (tag_directives_start != tag_directives_end) {
+//        tag_directive *yaml_tag_directive_t
+//        if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+//            goto error
+//        for (tag_directive = tag_directives_start
+//                tag_directive != tag_directives_end; tag_directive ++) {
+//            assert(tag_directive.handle)
+//            assert(tag_directive.prefix)
+//            if (!yaml_check_utf8(tag_directive.handle,
+//                        strlen((char *)tag_directive.handle)))
+//                goto error
+//            if (!yaml_check_utf8(tag_directive.prefix,
+//                        strlen((char *)tag_directive.prefix)))
+//                goto error
+//            value.handle = yaml_strdup(tag_directive.handle)
+//            value.prefix = yaml_strdup(tag_directive.prefix)
+//            if (!value.handle || !value.prefix) goto error
+//            if (!PUSH(&context, tag_directives_copy, value))
+//                goto error
+//            value.handle = NULL
+//            value.prefix = NULL
+//        }
+//    }
+//
+//    DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+//            tag_directives_copy.start, tag_directives_copy.top,
+//            start_implicit, end_implicit, mark, mark)
+//
+//    return 1
+//
+//error:
+//    STACK_DEL(&context, nodes)
+//    yaml_free(version_directive_copy)
+//    while (!STACK_EMPTY(&context, tag_directives_copy)) {
+//        value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+//        yaml_free(value.handle)
+//        yaml_free(value.prefix)
+//    }
+//    STACK_DEL(&context, tag_directives_copy)
+//    yaml_free(value.handle)
+//    yaml_free(value.prefix)
+//
+//    return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    tag_directive *yaml_tag_directive_t
+//
+//    context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    while (!STACK_EMPTY(&context, document.nodes)) {
+//        node yaml_node_t = POP(&context, document.nodes)
+//        yaml_free(node.tag)
+//        switch (node.type) {
+//            case YAML_SCALAR_NODE:
+//                yaml_free(node.data.scalar.value)
+//                break
+//            case YAML_SEQUENCE_NODE:
+//                STACK_DEL(&context, node.data.sequence.items)
+//                break
+//            case YAML_MAPPING_NODE:
+//                STACK_DEL(&context, node.data.mapping.pairs)
+//                break
+//            default:
+//                assert(0) // Should not happen.
+//        }
+//    }
+//    STACK_DEL(&context, document.nodes)
+//
+//    yaml_free(document.version_directive)
+//    for (tag_directive = document.tag_directives.start
+//            tag_directive != document.tag_directives.end
+//            tag_directive++) {
+//        yaml_free(tag_directive.handle)
+//        yaml_free(tag_directive.prefix)
+//    }
+//    yaml_free(document.tag_directives.start)
+//
+//    memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+//        return document.nodes.start + index - 1
+//    }
+//    return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (document.nodes.top != document.nodes.start) {
+//        return document.nodes.start
+//    }
+//    return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+//        tag *yaml_char_t, value *yaml_char_t, length int,
+//        style yaml_scalar_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    value_copy *yaml_char_t = NULL
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//    assert(value) // Non-NULL value is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (length < 0) {
+//        length = strlen((char *)value)
+//    }
+//
+//    if (!yaml_check_utf8(value, length)) goto error
+//    value_copy = yaml_malloc(length+1)
+//    if (!value_copy) goto error
+//    memcpy(value_copy, value, length)
+//    value_copy[length] = '\0'
+//
+//    SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    yaml_free(tag_copy)
+//    yaml_free(value_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+//        tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    struct {
+//        start *yaml_node_item_t
+//        end *yaml_node_item_t
+//        top *yaml_node_item_t
+//    } items = { NULL, NULL, NULL }
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+//    SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+//            style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    STACK_DEL(&context, items)
+//    yaml_free(tag_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+//        tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    struct {
+//        start *yaml_node_pair_t
+//        end *yaml_node_pair_t
+//        top *yaml_node_pair_t
+//    } pairs = { NULL, NULL, NULL }
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+//    MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+//            style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    STACK_DEL(&context, pairs)
+//    yaml_free(tag_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+//        sequence int, item int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//
+//    assert(document) // Non-NULL document is required.
+//    assert(sequence > 0
+//            && document.nodes.start + sequence <= document.nodes.top)
+//                            // Valid sequence id is required.
+//    assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+//                            // A sequence node is required.
+//    assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+//                            // Valid item id is required.
+//
+//    if (!PUSH(&context,
+//                document.nodes.start[sequence-1].data.sequence.items, item))
+//        return 0
+//
+//    return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+//        mapping int, key int, value int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//
+//    pair yaml_node_pair_t
+//
+//    assert(document) // Non-NULL document is required.
+//    assert(mapping > 0
+//            && document.nodes.start + mapping <= document.nodes.top)
+//                            // Valid mapping id is required.
+//    assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+//                            // A mapping node is required.
+//    assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+//                            // Valid key id is required.
+//    assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+//                            // Valid value id is required.
+//
+//    pair.key = key
+//    pair.value = value
+//
+//    if (!PUSH(&context,
+//                document.nodes.start[mapping-1].data.mapping.pairs, pair))
+//        return 0
+//
+//    return 1
+//}
+//
+//
diff --git a/vendor/go.yaml.in/yaml/v2/decode.go b/vendor/go.yaml.in/yaml/v2/decode.go
new file mode 100644
index 0000000..129bc2a
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/decode.go
@@ -0,0 +1,815 @@
+package yaml
+
+import (
+	"encoding"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"strconv"
+	"time"
+)
+
+const (
+	documentNode = 1 << iota
+	mappingNode
+	sequenceNode
+	scalarNode
+	aliasNode
+)
+
+type node struct {
+	kind         int
+	line, column int
+	tag          string
+	// For an alias node, alias holds the resolved alias.
+	alias    *node
+	value    string
+	implicit bool
+	children []*node
+	anchors  map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+	parser   yaml_parser_t
+	event    yaml_event_t
+	doc      *node
+	doneInit bool
+}
+
+func newParser(b []byte) *parser {
+	p := parser{}
+	if !yaml_parser_initialize(&p.parser) {
+		panic("failed to initialize YAML emitter")
+	}
+	if len(b) == 0 {
+		b = []byte{'\n'}
+	}
+	yaml_parser_set_input_string(&p.parser, b)
+	return &p
+}
+
+func newParserFromReader(r io.Reader) *parser {
+	p := parser{}
+	if !yaml_parser_initialize(&p.parser) {
+		panic("failed to initialize YAML emitter")
+	}
+	yaml_parser_set_input_reader(&p.parser, r)
+	return &p
+}
+
+func (p *parser) init() {
+	if p.doneInit {
+		return
+	}
+	p.expect(yaml_STREAM_START_EVENT)
+	p.doneInit = true
+}
+
+func (p *parser) destroy() {
+	if p.event.typ != yaml_NO_EVENT {
+		yaml_event_delete(&p.event)
+	}
+	yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+	if p.event.typ == yaml_NO_EVENT {
+		if !yaml_parser_parse(&p.parser, &p.event) {
+			p.fail()
+		}
+	}
+	if p.event.typ == yaml_STREAM_END_EVENT {
+		failf("attempted to go past the end of stream; corrupted value?")
+	}
+	if p.event.typ != e {
+		p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+		p.fail()
+	}
+	yaml_event_delete(&p.event)
+	p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+	if p.event.typ != yaml_NO_EVENT {
+		return p.event.typ
+	}
+	if !yaml_parser_parse(&p.parser, &p.event) {
+		p.fail()
+	}
+	return p.event.typ
+}
+
+func (p *parser) fail() {
+	var where string
+	var line int
+	if p.parser.problem_mark.line != 0 {
+		line = p.parser.problem_mark.line
+		// Scanner errors don't iterate line before returning error
+		if p.parser.error == yaml_SCANNER_ERROR {
+			line++
+		}
+	} else if p.parser.context_mark.line != 0 {
+		line = p.parser.context_mark.line
+	}
+	if line != 0 {
+		where = "line " + strconv.Itoa(line) + ": "
+	}
+	var msg string
+	if len(p.parser.problem) > 0 {
+		msg = p.parser.problem
+	} else {
+		msg = "unknown problem parsing YAML content"
+	}
+	failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+	if anchor != nil {
+		p.doc.anchors[string(anchor)] = n
+	}
+}
+
+func (p *parser) parse() *node {
+	p.init()
+	switch p.peek() {
+	case yaml_SCALAR_EVENT:
+		return p.scalar()
+	case yaml_ALIAS_EVENT:
+		return p.alias()
+	case yaml_MAPPING_START_EVENT:
+		return p.mapping()
+	case yaml_SEQUENCE_START_EVENT:
+		return p.sequence()
+	case yaml_DOCUMENT_START_EVENT:
+		return p.document()
+	case yaml_STREAM_END_EVENT:
+		// Happens when attempting to decode an empty buffer.
+		return nil
+	default:
+		panic("attempted to parse unknown event: " + p.event.typ.String())
+	}
+}
+
+func (p *parser) node(kind int) *node {
+	return &node{
+		kind:   kind,
+		line:   p.event.start_mark.line,
+		column: p.event.start_mark.column,
+	}
+}
+
+func (p *parser) document() *node {
+	n := p.node(documentNode)
+	n.anchors = make(map[string]*node)
+	p.doc = n
+	p.expect(yaml_DOCUMENT_START_EVENT)
+	n.children = append(n.children, p.parse())
+	p.expect(yaml_DOCUMENT_END_EVENT)
+	return n
+}
+
+func (p *parser) alias() *node {
+	n := p.node(aliasNode)
+	n.value = string(p.event.anchor)
+	n.alias = p.doc.anchors[n.value]
+	if n.alias == nil {
+		failf("unknown anchor '%s' referenced", n.value)
+	}
+	p.expect(yaml_ALIAS_EVENT)
+	return n
+}
+
+func (p *parser) scalar() *node {
+	n := p.node(scalarNode)
+	n.value = string(p.event.value)
+	n.tag = string(p.event.tag)
+	n.implicit = p.event.implicit
+	p.anchor(n, p.event.anchor)
+	p.expect(yaml_SCALAR_EVENT)
+	return n
+}
+
+func (p *parser) sequence() *node {
+	n := p.node(sequenceNode)
+	p.anchor(n, p.event.anchor)
+	p.expect(yaml_SEQUENCE_START_EVENT)
+	for p.peek() != yaml_SEQUENCE_END_EVENT {
+		n.children = append(n.children, p.parse())
+	}
+	p.expect(yaml_SEQUENCE_END_EVENT)
+	return n
+}
+
+func (p *parser) mapping() *node {
+	n := p.node(mappingNode)
+	p.anchor(n, p.event.anchor)
+	p.expect(yaml_MAPPING_START_EVENT)
+	for p.peek() != yaml_MAPPING_END_EVENT {
+		n.children = append(n.children, p.parse(), p.parse())
+	}
+	p.expect(yaml_MAPPING_END_EVENT)
+	return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+	doc     *node
+	aliases map[*node]bool
+	mapType reflect.Type
+	terrors []string
+	strict  bool
+
+	decodeCount int
+	aliasCount  int
+	aliasDepth  int
+}
+
+var (
+	mapItemType    = reflect.TypeOf(MapItem{})
+	durationType   = reflect.TypeOf(time.Duration(0))
+	defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
+	ifaceType      = defaultMapType.Elem()
+	timeType       = reflect.TypeOf(time.Time{})
+	ptrTimeType    = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder(strict bool) *decoder {
+	d := &decoder{mapType: defaultMapType, strict: strict}
+	d.aliases = make(map[*node]bool)
+	return d
+}
+
+func (d *decoder) terror(n *node, tag string, out reflect.Value) {
+	if n.tag != "" {
+		tag = n.tag
+	}
+	value := n.value
+	if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
+		if len(value) > 10 {
+			value = " `" + value[:7] + "...`"
+		} else {
+			value = " `" + value + "`"
+		}
+	}
+	d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
+	terrlen := len(d.terrors)
+	err := u.UnmarshalYAML(func(v interface{}) (err error) {
+		defer handleErr(&err)
+		d.unmarshal(n, reflect.ValueOf(v))
+		if len(d.terrors) > terrlen {
+			issues := d.terrors[terrlen:]
+			d.terrors = d.terrors[:terrlen]
+			return &TypeError{issues}
+		}
+		return nil
+	})
+	if e, ok := err.(*TypeError); ok {
+		d.terrors = append(d.terrors, e.Errors...)
+		return false
+	}
+	if err != nil {
+		fail(err)
+	}
+	return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+	if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
+		return out, false, false
+	}
+	again := true
+	for again {
+		again = false
+		if out.Kind() == reflect.Ptr {
+			if out.IsNil() {
+				out.Set(reflect.New(out.Type().Elem()))
+			}
+			out = out.Elem()
+			again = true
+		}
+		if out.CanAddr() {
+			if u, ok := out.Addr().Interface().(Unmarshaler); ok {
+				good = d.callUnmarshaler(n, u)
+				return out, true, good
+			}
+		}
+	}
+	return out, false, false
+}
+
+const (
+	// 400,000 decode operations is ~500kb of dense object declarations, or
+	// ~5kb of dense object declarations with 10000% alias expansion
+	alias_ratio_range_low = 400000
+
+	// 4,000,000 decode operations is ~5MB of dense object declarations, or
+	// ~4.5MB of dense object declarations with 10% alias expansion
+	alias_ratio_range_high = 4000000
+
+	// alias_ratio_range is the range over which we scale allowed alias ratios
+	alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
+)
+
+func allowedAliasRatio(decodeCount int) float64 {
+	switch {
+	case decodeCount <= alias_ratio_range_low:
+		// allow 99% to come from alias expansion for small-to-medium documents
+		return 0.99
+	case decodeCount >= alias_ratio_range_high:
+		// allow 10% to come from alias expansion for very large documents
+		return 0.10
+	default:
+		// scale smoothly from 99% down to 10% over the range.
+		// this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
+		// 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
+		return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
+	}
+}
+
+func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+	d.decodeCount++
+	if d.aliasDepth > 0 {
+		d.aliasCount++
+	}
+	if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
+		failf("document contains excessive aliasing")
+	}
+	switch n.kind {
+	case documentNode:
+		return d.document(n, out)
+	case aliasNode:
+		return d.alias(n, out)
+	}
+	out, unmarshaled, good := d.prepare(n, out)
+	if unmarshaled {
+		return good
+	}
+	switch n.kind {
+	case scalarNode:
+		good = d.scalar(n, out)
+	case mappingNode:
+		good = d.mapping(n, out)
+	case sequenceNode:
+		good = d.sequence(n, out)
+	default:
+		panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
+	}
+	return good
+}
+
+func (d *decoder) document(n *node, out reflect.Value) (good bool) {
+	if len(n.children) == 1 {
+		d.doc = n
+		d.unmarshal(n.children[0], out)
+		return true
+	}
+	return false
+}
+
+func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
+	if d.aliases[n] {
+		// TODO this could actually be allowed in some circumstances.
+		failf("anchor '%s' value contains itself", n.value)
+	}
+	d.aliases[n] = true
+	d.aliasDepth++
+	good = d.unmarshal(n.alias, out)
+	d.aliasDepth--
+	delete(d.aliases, n)
+	return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+	for _, k := range out.MapKeys() {
+		out.SetMapIndex(k, zeroValue)
+	}
+}
+
+func (d *decoder) scalar(n *node, out reflect.Value) bool {
+	var tag string
+	var resolved interface{}
+	if n.tag == "" && !n.implicit {
+		tag = yaml_STR_TAG
+		resolved = n.value
+	} else {
+		tag, resolved = resolve(n.tag, n.value)
+		if tag == yaml_BINARY_TAG {
+			data, err := base64.StdEncoding.DecodeString(resolved.(string))
+			if err != nil {
+				failf("!!binary value contains invalid base64 data")
+			}
+			resolved = string(data)
+		}
+	}
+	if resolved == nil {
+		if out.Kind() == reflect.Map && !out.CanAddr() {
+			resetMap(out)
+		} else {
+			out.Set(reflect.Zero(out.Type()))
+		}
+		return true
+	}
+	if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+		// We've resolved to exactly the type we want, so use that.
+		out.Set(resolvedv)
+		return true
+	}
+	// Perhaps we can use the value as a TextUnmarshaler to
+	// set its value.
+	if out.CanAddr() {
+		u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+		if ok {
+			var text []byte
+			if tag == yaml_BINARY_TAG {
+				text = []byte(resolved.(string))
+			} else {
+				// We let any value be unmarshaled into TextUnmarshaler.
+				// That might be more lax than we'd like, but the
+				// TextUnmarshaler itself should bowl out any dubious values.
+				text = []byte(n.value)
+			}
+			err := u.UnmarshalText(text)
+			if err != nil {
+				fail(err)
+			}
+			return true
+		}
+	}
+	switch out.Kind() {
+	case reflect.String:
+		if tag == yaml_BINARY_TAG {
+			out.SetString(resolved.(string))
+			return true
+		}
+		if resolved != nil {
+			out.SetString(n.value)
+			return true
+		}
+	case reflect.Interface:
+		if resolved == nil {
+			out.Set(reflect.Zero(out.Type()))
+		} else if tag == yaml_TIMESTAMP_TAG {
+			// It looks like a timestamp but for backward compatibility
+			// reasons we set it as a string, so that code that unmarshals
+			// timestamp-like values into interface{} will continue to
+			// see a string and not a time.Time.
+			// TODO(v3) Drop this.
+			out.Set(reflect.ValueOf(n.value))
+		} else {
+			out.Set(reflect.ValueOf(resolved))
+		}
+		return true
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		switch resolved := resolved.(type) {
+		case int:
+			if !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				return true
+			}
+		case int64:
+			if !out.OverflowInt(resolved) {
+				out.SetInt(resolved)
+				return true
+			}
+		case uint64:
+			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				return true
+			}
+		case float64:
+			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				return true
+			}
+		case string:
+			if out.Type() == durationType {
+				d, err := time.ParseDuration(resolved)
+				if err == nil {
+					out.SetInt(int64(d))
+					return true
+				}
+			}
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		switch resolved := resolved.(type) {
+		case int:
+			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				return true
+			}
+		case int64:
+			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				return true
+			}
+		case uint64:
+			if !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				return true
+			}
+		case float64:
+			if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				return true
+			}
+		}
+	case reflect.Bool:
+		switch resolved := resolved.(type) {
+		case bool:
+			out.SetBool(resolved)
+			return true
+		}
+	case reflect.Float32, reflect.Float64:
+		switch resolved := resolved.(type) {
+		case int:
+			out.SetFloat(float64(resolved))
+			return true
+		case int64:
+			out.SetFloat(float64(resolved))
+			return true
+		case uint64:
+			out.SetFloat(float64(resolved))
+			return true
+		case float64:
+			out.SetFloat(resolved)
+			return true
+		}
+	case reflect.Struct:
+		if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+			out.Set(resolvedv)
+			return true
+		}
+	case reflect.Ptr:
+		if out.Type().Elem() == reflect.TypeOf(resolved) {
+			// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
+			elem := reflect.New(out.Type().Elem())
+			elem.Elem().Set(reflect.ValueOf(resolved))
+			out.Set(elem)
+			return true
+		}
+	}
+	d.terror(n, tag, out)
+	return false
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+	v := reflect.ValueOf(i)
+	sv := reflect.New(v.Type()).Elem()
+	sv.Set(v)
+	return sv
+}
+
+func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
+	l := len(n.children)
+
+	var iface reflect.Value
+	switch out.Kind() {
+	case reflect.Slice:
+		out.Set(reflect.MakeSlice(out.Type(), l, l))
+	case reflect.Array:
+		if l != out.Len() {
+			failf("invalid array: want %d elements but got %d", out.Len(), l)
+		}
+	case reflect.Interface:
+		// No type hints. Will have to use a generic sequence.
+		iface = out
+		out = settableValueOf(make([]interface{}, l))
+	default:
+		d.terror(n, yaml_SEQ_TAG, out)
+		return false
+	}
+	et := out.Type().Elem()
+
+	j := 0
+	for i := 0; i < l; i++ {
+		e := reflect.New(et).Elem()
+		if ok := d.unmarshal(n.children[i], e); ok {
+			out.Index(j).Set(e)
+			j++
+		}
+	}
+	if out.Kind() != reflect.Array {
+		out.Set(out.Slice(0, j))
+	}
+	if iface.IsValid() {
+		iface.Set(out)
+	}
+	return true
+}
+
+func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
+	switch out.Kind() {
+	case reflect.Struct:
+		return d.mappingStruct(n, out)
+	case reflect.Slice:
+		return d.mappingSlice(n, out)
+	case reflect.Map:
+		// okay
+	case reflect.Interface:
+		if d.mapType.Kind() == reflect.Map {
+			iface := out
+			out = reflect.MakeMap(d.mapType)
+			iface.Set(out)
+		} else {
+			slicev := reflect.New(d.mapType).Elem()
+			if !d.mappingSlice(n, slicev) {
+				return false
+			}
+			out.Set(slicev)
+			return true
+		}
+	default:
+		d.terror(n, yaml_MAP_TAG, out)
+		return false
+	}
+	outt := out.Type()
+	kt := outt.Key()
+	et := outt.Elem()
+
+	mapType := d.mapType
+	if outt.Key() == ifaceType && outt.Elem() == ifaceType {
+		d.mapType = outt
+	}
+
+	if out.IsNil() {
+		out.Set(reflect.MakeMap(outt))
+	}
+	l := len(n.children)
+	for i := 0; i < l; i += 2 {
+		if isMerge(n.children[i]) {
+			d.merge(n.children[i+1], out)
+			continue
+		}
+		k := reflect.New(kt).Elem()
+		if d.unmarshal(n.children[i], k) {
+			kkind := k.Kind()
+			if kkind == reflect.Interface {
+				kkind = k.Elem().Kind()
+			}
+			if kkind == reflect.Map || kkind == reflect.Slice {
+				failf("invalid map key: %#v", k.Interface())
+			}
+			e := reflect.New(et).Elem()
+			if d.unmarshal(n.children[i+1], e) {
+				d.setMapIndex(n.children[i+1], out, k, e)
+			}
+		}
+	}
+	d.mapType = mapType
+	return true
+}
+
+func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
+	if d.strict && out.MapIndex(k) != zeroValue {
+		d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
+		return
+	}
+	out.SetMapIndex(k, v)
+}
+
+func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
+	outt := out.Type()
+	if outt.Elem() != mapItemType {
+		d.terror(n, yaml_MAP_TAG, out)
+		return false
+	}
+
+	mapType := d.mapType
+	d.mapType = outt
+
+	var slice []MapItem
+	var l = len(n.children)
+	for i := 0; i < l; i += 2 {
+		if isMerge(n.children[i]) {
+			d.merge(n.children[i+1], out)
+			continue
+		}
+		item := MapItem{}
+		k := reflect.ValueOf(&item.Key).Elem()
+		if d.unmarshal(n.children[i], k) {
+			v := reflect.ValueOf(&item.Value).Elem()
+			if d.unmarshal(n.children[i+1], v) {
+				slice = append(slice, item)
+			}
+		}
+	}
+	out.Set(reflect.ValueOf(slice))
+	d.mapType = mapType
+	return true
+}
+
+func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
+	sinfo, err := getStructInfo(out.Type())
+	if err != nil {
+		panic(err)
+	}
+	name := settableValueOf("")
+	l := len(n.children)
+
+	var inlineMap reflect.Value
+	var elemType reflect.Type
+	if sinfo.InlineMap != -1 {
+		inlineMap = out.Field(sinfo.InlineMap)
+		inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+		elemType = inlineMap.Type().Elem()
+	}
+
+	var doneFields []bool
+	if d.strict {
+		doneFields = make([]bool, len(sinfo.FieldsList))
+	}
+	for i := 0; i < l; i += 2 {
+		ni := n.children[i]
+		if isMerge(ni) {
+			d.merge(n.children[i+1], out)
+			continue
+		}
+		if !d.unmarshal(ni, name) {
+			continue
+		}
+		if info, ok := sinfo.FieldsMap[name.String()]; ok {
+			if d.strict {
+				if doneFields[info.Id] {
+					d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
+					continue
+				}
+				doneFields[info.Id] = true
+			}
+			var field reflect.Value
+			if info.Inline == nil {
+				field = out.Field(info.Num)
+			} else {
+				field = out.FieldByIndex(info.Inline)
+			}
+			d.unmarshal(n.children[i+1], field)
+		} else if sinfo.InlineMap != -1 {
+			if inlineMap.IsNil() {
+				inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+			}
+			value := reflect.New(elemType).Elem()
+			d.unmarshal(n.children[i+1], value)
+			d.setMapIndex(n.children[i+1], inlineMap, name, value)
+		} else if d.strict {
+			d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
+		}
+	}
+	return true
+}
+
+func failWantMap() {
+	failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *node, out reflect.Value) {
+	switch n.kind {
+	case mappingNode:
+		d.unmarshal(n, out)
+	case aliasNode:
+		if n.alias != nil && n.alias.kind != mappingNode {
+			failWantMap()
+		}
+		d.unmarshal(n, out)
+	case sequenceNode:
+		// Step backwards as earlier nodes take precedence.
+		for i := len(n.children) - 1; i >= 0; i-- {
+			ni := n.children[i]
+			if ni.kind == aliasNode {
+				if ni.alias != nil && ni.alias.kind != mappingNode {
+					failWantMap()
+				}
+			} else if ni.kind != mappingNode {
+				failWantMap()
+			}
+			d.unmarshal(ni, out)
+		}
+	default:
+		failWantMap()
+	}
+}
+
+func isMerge(n *node) bool {
+	return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
+}
diff --git a/vendor/go.yaml.in/yaml/v2/emitterc.go b/vendor/go.yaml.in/yaml/v2/emitterc.go
new file mode 100644
index 0000000..a1c2cc5
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/emitterc.go
@@ -0,0 +1,1685 @@
+package yaml
+
+import (
+	"bytes"
+	"fmt"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) {
+		return yaml_emitter_flush(emitter)
+	}
+	return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	emitter.buffer[emitter.buffer_pos] = value
+	emitter.buffer_pos++
+	emitter.column++
+	return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	switch emitter.line_break {
+	case yaml_CR_BREAK:
+		emitter.buffer[emitter.buffer_pos] = '\r'
+		emitter.buffer_pos += 1
+	case yaml_LN_BREAK:
+		emitter.buffer[emitter.buffer_pos] = '\n'
+		emitter.buffer_pos += 1
+	case yaml_CRLN_BREAK:
+		emitter.buffer[emitter.buffer_pos+0] = '\r'
+		emitter.buffer[emitter.buffer_pos+1] = '\n'
+		emitter.buffer_pos += 2
+	default:
+		panic("unknown line break setting")
+	}
+	emitter.column = 0
+	emitter.line++
+	return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	p := emitter.buffer_pos
+	w := width(s[*i])
+	switch w {
+	case 4:
+		emitter.buffer[p+3] = s[*i+3]
+		fallthrough
+	case 3:
+		emitter.buffer[p+2] = s[*i+2]
+		fallthrough
+	case 2:
+		emitter.buffer[p+1] = s[*i+1]
+		fallthrough
+	case 1:
+		emitter.buffer[p+0] = s[*i+0]
+	default:
+		panic("unknown character width")
+	}
+	emitter.column++
+	emitter.buffer_pos += w
+	*i += w
+	return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+	for i := 0; i < len(s); {
+		if !write(emitter, s, &i) {
+			return false
+		}
+	}
+	return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+	if s[*i] == '\n' {
+		if !put_break(emitter) {
+			return false
+		}
+		*i++
+	} else {
+		if !write(emitter, s, i) {
+			return false
+		}
+		emitter.column = 0
+		emitter.line++
+	}
+	return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+	emitter.error = yaml_EMITTER_ERROR
+	emitter.problem = problem
+	return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	emitter.events = append(emitter.events, *event)
+	for !yaml_emitter_need_more_events(emitter) {
+		event := &emitter.events[emitter.events_head]
+		if !yaml_emitter_analyze_event(emitter, event) {
+			return false
+		}
+		if !yaml_emitter_state_machine(emitter, event) {
+			return false
+		}
+		yaml_event_delete(event)
+		emitter.events_head++
+	}
+	return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+//  - 1 event for DOCUMENT-START
+//  - 2 events for SEQUENCE-START
+//  - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+	if emitter.events_head == len(emitter.events) {
+		return true
+	}
+	var accumulate int
+	switch emitter.events[emitter.events_head].typ {
+	case yaml_DOCUMENT_START_EVENT:
+		accumulate = 1
+		break
+	case yaml_SEQUENCE_START_EVENT:
+		accumulate = 2
+		break
+	case yaml_MAPPING_START_EVENT:
+		accumulate = 3
+		break
+	default:
+		return false
+	}
+	if len(emitter.events)-emitter.events_head > accumulate {
+		return false
+	}
+	var level int
+	for i := emitter.events_head; i < len(emitter.events); i++ {
+		switch emitter.events[i].typ {
+		case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+			level++
+		case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+			level--
+		}
+		if level == 0 {
+			return false
+		}
+	}
+	return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+	for i := 0; i < len(emitter.tag_directives); i++ {
+		if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+			if allow_duplicates {
+				return true
+			}
+			return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+		}
+	}
+
+	// [Go] Do we actually need to copy this given garbage collection
+	// and the lack of deallocating destructors?
+	tag_copy := yaml_tag_directive_t{
+		handle: make([]byte, len(value.handle)),
+		prefix: make([]byte, len(value.prefix)),
+	}
+	copy(tag_copy.handle, value.handle)
+	copy(tag_copy.prefix, value.prefix)
+	emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+	return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+	emitter.indents = append(emitter.indents, emitter.indent)
+	if emitter.indent < 0 {
+		if flow {
+			emitter.indent = emitter.best_indent
+		} else {
+			emitter.indent = 0
+		}
+	} else if !indentless {
+		emitter.indent += emitter.best_indent
+	}
+	return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	switch emitter.state {
+	default:
+	case yaml_EMIT_STREAM_START_STATE:
+		return yaml_emitter_emit_stream_start(emitter, event)
+
+	case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+		return yaml_emitter_emit_document_start(emitter, event, true)
+
+	case yaml_EMIT_DOCUMENT_START_STATE:
+		return yaml_emitter_emit_document_start(emitter, event, false)
+
+	case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+		return yaml_emitter_emit_document_content(emitter, event)
+
+	case yaml_EMIT_DOCUMENT_END_STATE:
+		return yaml_emitter_emit_document_end(emitter, event)
+
+	case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+		return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+	case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+		return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+	case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+		return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+	case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+		return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+	case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+		return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+	case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+		return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+		return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+		return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+		return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+		return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+		return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+	case yaml_EMIT_END_STATE:
+		return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+	}
+	panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if event.typ != yaml_STREAM_START_EVENT {
+		return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+	}
+	if emitter.encoding == yaml_ANY_ENCODING {
+		emitter.encoding = event.encoding
+		if emitter.encoding == yaml_ANY_ENCODING {
+			emitter.encoding = yaml_UTF8_ENCODING
+		}
+	}
+	if emitter.best_indent < 2 || emitter.best_indent > 9 {
+		emitter.best_indent = 2
+	}
+	if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+		emitter.best_width = 80
+	}
+	if emitter.best_width < 0 {
+		emitter.best_width = 1<<31 - 1
+	}
+	if emitter.line_break == yaml_ANY_BREAK {
+		emitter.line_break = yaml_LN_BREAK
+	}
+
+	emitter.indent = -1
+	emitter.line = 0
+	emitter.column = 0
+	emitter.whitespace = true
+	emitter.indention = true
+
+	if emitter.encoding != yaml_UTF8_ENCODING {
+		if !yaml_emitter_write_bom(emitter) {
+			return false
+		}
+	}
+	emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+	return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+	if event.typ == yaml_DOCUMENT_START_EVENT {
+
+		if event.version_directive != nil {
+			if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+				return false
+			}
+		}
+
+		for i := 0; i < len(event.tag_directives); i++ {
+			tag_directive := &event.tag_directives[i]
+			if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+				return false
+			}
+			if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+				return false
+			}
+		}
+
+		for i := 0; i < len(default_tag_directives); i++ {
+			tag_directive := &default_tag_directives[i]
+			if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+				return false
+			}
+		}
+
+		implicit := event.implicit
+		if !first || emitter.canonical {
+			implicit = false
+		}
+
+		if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+
+		if event.version_directive != nil {
+			implicit = false
+			if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+
+		if len(event.tag_directives) > 0 {
+			implicit = false
+			for i := 0; i < len(event.tag_directives); i++ {
+				tag_directive := &event.tag_directives[i]
+				if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+					return false
+				}
+				if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+					return false
+				}
+				if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+					return false
+				}
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+		}
+
+		if yaml_emitter_check_empty_document(emitter) {
+			implicit = false
+		}
+		if !implicit {
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+			if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+				return false
+			}
+			if emitter.canonical {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+		}
+
+		emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+		return true
+	}
+
+	if event.typ == yaml_STREAM_END_EVENT {
+		if emitter.open_ended {
+			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_flush(emitter) {
+			return false
+		}
+		emitter.state = yaml_EMIT_END_STATE
+		return true
+	}
+
+	return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+	return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if event.typ != yaml_DOCUMENT_END_EVENT {
+		return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if !event.implicit {
+		// [Go] Allocate the slice elsewhere.
+		if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+			return false
+		}
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+	if !yaml_emitter_flush(emitter) {
+		return false
+	}
+	emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+	emitter.tag_directives = emitter.tag_directives[:0]
+	return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+			return false
+		}
+		if !yaml_emitter_increase_indent(emitter, true, false) {
+			return false
+		}
+		emitter.flow_level++
+	}
+
+	if event.typ == yaml_SEQUENCE_END_EVENT {
+		emitter.flow_level--
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		if emitter.canonical && !first {
+			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+			return false
+		}
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+
+		return true
+	}
+
+	if !first {
+		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+			return false
+		}
+	}
+
+	if emitter.canonical || emitter.column > emitter.best_width {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+			return false
+		}
+		if !yaml_emitter_increase_indent(emitter, true, false) {
+			return false
+		}
+		emitter.flow_level++
+	}
+
+	if event.typ == yaml_MAPPING_END_EVENT {
+		emitter.flow_level--
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		if emitter.canonical && !first {
+			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+			return false
+		}
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+
+	if !first {
+		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+			return false
+		}
+	}
+	if emitter.canonical || emitter.column > emitter.best_width {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+
+	if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+		emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+	if simple {
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+			return false
+		}
+	} else {
+		if emitter.canonical || emitter.column > emitter.best_width {
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+			return false
+		}
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+			return false
+		}
+	}
+	if event.typ == yaml_SEQUENCE_END_EVENT {
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_increase_indent(emitter, false, false) {
+			return false
+		}
+	}
+	if event.typ == yaml_MAPPING_END_EVENT {
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if yaml_emitter_check_simple_key(emitter) {
+		emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+	if simple {
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+			return false
+		}
+	} else {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+			return false
+		}
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+	root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+	emitter.root_context = root
+	emitter.sequence_context = sequence
+	emitter.mapping_context = mapping
+	emitter.simple_key_context = simple_key
+
+	switch event.typ {
+	case yaml_ALIAS_EVENT:
+		return yaml_emitter_emit_alias(emitter, event)
+	case yaml_SCALAR_EVENT:
+		return yaml_emitter_emit_scalar(emitter, event)
+	case yaml_SEQUENCE_START_EVENT:
+		return yaml_emitter_emit_sequence_start(emitter, event)
+	case yaml_MAPPING_START_EVENT:
+		return yaml_emitter_emit_mapping_start(emitter, event)
+	default:
+		return yaml_emitter_set_emitter_error(emitter,
+			fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
+	}
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	emitter.state = emitter.states[len(emitter.states)-1]
+	emitter.states = emitter.states[:len(emitter.states)-1]
+	return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_select_scalar_style(emitter, event) {
+		return false
+	}
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if !yaml_emitter_increase_indent(emitter, true, false) {
+		return false
+	}
+	if !yaml_emitter_process_scalar(emitter) {
+		return false
+	}
+	emitter.indent = emitter.indents[len(emitter.indents)-1]
+	emitter.indents = emitter.indents[:len(emitter.indents)-1]
+	emitter.state = emitter.states[len(emitter.states)-1]
+	emitter.states = emitter.states[:len(emitter.states)-1]
+	return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+		yaml_emitter_check_empty_sequence(emitter) {
+		emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+	} else {
+		emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+	}
+	return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+		yaml_emitter_check_empty_mapping(emitter) {
+		emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+	} else {
+		emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+	}
+	return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+	return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+	if len(emitter.events)-emitter.events_head < 2 {
+		return false
+	}
+	return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+		emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+	if len(emitter.events)-emitter.events_head < 2 {
+		return false
+	}
+	return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+		emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+	length := 0
+	switch emitter.events[emitter.events_head].typ {
+	case yaml_ALIAS_EVENT:
+		length += len(emitter.anchor_data.anchor)
+	case yaml_SCALAR_EVENT:
+		if emitter.scalar_data.multiline {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix) +
+			len(emitter.scalar_data.value)
+	case yaml_SEQUENCE_START_EVENT:
+		if !yaml_emitter_check_empty_sequence(emitter) {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix)
+	case yaml_MAPPING_START_EVENT:
+		if !yaml_emitter_check_empty_mapping(emitter) {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix)
+	default:
+		return false
+	}
+	return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+	no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+	if no_tag && !event.implicit && !event.quoted_implicit {
+		return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+	}
+
+	style := event.scalar_style()
+	if style == yaml_ANY_SCALAR_STYLE {
+		style = yaml_PLAIN_SCALAR_STYLE
+	}
+	if emitter.canonical {
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+	if emitter.simple_key_context && emitter.scalar_data.multiline {
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+
+	if style == yaml_PLAIN_SCALAR_STYLE {
+		if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+			emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+		if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+		if no_tag && !event.implicit {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+	}
+	if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+		if !emitter.scalar_data.single_quoted_allowed {
+			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+		}
+	}
+	if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+		if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+		}
+	}
+
+	if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+		emitter.tag_data.handle = []byte{'!'}
+	}
+	emitter.scalar_data.style = style
+	return true
+}
+
+// Write an anchor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+	if emitter.anchor_data.anchor == nil {
+		return true
+	}
+	c := []byte{'&'}
+	if emitter.anchor_data.alias {
+		c[0] = '*'
+	}
+	if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+		return false
+	}
+	return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+	if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+		return true
+	}
+	if len(emitter.tag_data.handle) > 0 {
+		if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+			return false
+		}
+		if len(emitter.tag_data.suffix) > 0 {
+			if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+				return false
+			}
+		}
+	} else {
+		// [Go] Allocate these slices elsewhere.
+		if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+			return false
+		}
+		if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+			return false
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+			return false
+		}
+	}
+	return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+	switch emitter.scalar_data.style {
+	case yaml_PLAIN_SCALAR_STYLE:
+		return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+		return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+		return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_LITERAL_SCALAR_STYLE:
+		return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+	case yaml_FOLDED_SCALAR_STYLE:
+		return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+	}
+	panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+	if version_directive.major != 1 || version_directive.minor != 1 {
+		return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+	}
+	return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+	handle := tag_directive.handle
+	prefix := tag_directive.prefix
+	if len(handle) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+	}
+	if handle[0] != '!' {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+	}
+	if handle[len(handle)-1] != '!' {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+	}
+	for i := 1; i < len(handle)-1; i += width(handle[i]) {
+		if !is_alpha(handle, i) {
+			return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+		}
+	}
+	if len(prefix) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+	}
+	return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+	if len(anchor) == 0 {
+		problem := "anchor value must not be empty"
+		if alias {
+			problem = "alias value must not be empty"
+		}
+		return yaml_emitter_set_emitter_error(emitter, problem)
+	}
+	for i := 0; i < len(anchor); i += width(anchor[i]) {
+		if !is_alpha(anchor, i) {
+			problem := "anchor value must contain alphanumerical characters only"
+			if alias {
+				problem = "alias value must contain alphanumerical characters only"
+			}
+			return yaml_emitter_set_emitter_error(emitter, problem)
+		}
+	}
+	emitter.anchor_data.anchor = anchor
+	emitter.anchor_data.alias = alias
+	return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+	if len(tag) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+	}
+	for i := 0; i < len(emitter.tag_directives); i++ {
+		tag_directive := &emitter.tag_directives[i]
+		if bytes.HasPrefix(tag, tag_directive.prefix) {
+			emitter.tag_data.handle = tag_directive.handle
+			emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+			return true
+		}
+	}
+	emitter.tag_data.suffix = tag
+	return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	var (
+		block_indicators   = false
+		flow_indicators    = false
+		line_breaks        = false
+		special_characters = false
+
+		leading_space  = false
+		leading_break  = false
+		trailing_space = false
+		trailing_break = false
+		break_space    = false
+		space_break    = false
+
+		preceded_by_whitespace = false
+		followed_by_whitespace = false
+		previous_space         = false
+		previous_break         = false
+	)
+
+	emitter.scalar_data.value = value
+
+	if len(value) == 0 {
+		emitter.scalar_data.multiline = false
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = true
+		emitter.scalar_data.single_quoted_allowed = true
+		emitter.scalar_data.block_allowed = false
+		return true
+	}
+
+	if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+		block_indicators = true
+		flow_indicators = true
+	}
+
+	preceded_by_whitespace = true
+	for i, w := 0, 0; i < len(value); i += w {
+		w = width(value[i])
+		followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+		if i == 0 {
+			switch value[i] {
+			case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+				flow_indicators = true
+				block_indicators = true
+			case '?', ':':
+				flow_indicators = true
+				if followed_by_whitespace {
+					block_indicators = true
+				}
+			case '-':
+				if followed_by_whitespace {
+					flow_indicators = true
+					block_indicators = true
+				}
+			}
+		} else {
+			switch value[i] {
+			case ',', '?', '[', ']', '{', '}':
+				flow_indicators = true
+			case ':':
+				flow_indicators = true
+				if followed_by_whitespace {
+					block_indicators = true
+				}
+			case '#':
+				if preceded_by_whitespace {
+					flow_indicators = true
+					block_indicators = true
+				}
+			}
+		}
+
+		if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+			special_characters = true
+		}
+		if is_space(value, i) {
+			if i == 0 {
+				leading_space = true
+			}
+			if i+width(value[i]) == len(value) {
+				trailing_space = true
+			}
+			if previous_break {
+				break_space = true
+			}
+			previous_space = true
+			previous_break = false
+		} else if is_break(value, i) {
+			line_breaks = true
+			if i == 0 {
+				leading_break = true
+			}
+			if i+width(value[i]) == len(value) {
+				trailing_break = true
+			}
+			if previous_space {
+				space_break = true
+			}
+			previous_space = false
+			previous_break = true
+		} else {
+			previous_space = false
+			previous_break = false
+		}
+
+		// [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+		preceded_by_whitespace = is_blankz(value, i)
+	}
+
+	emitter.scalar_data.multiline = line_breaks
+	emitter.scalar_data.flow_plain_allowed = true
+	emitter.scalar_data.block_plain_allowed = true
+	emitter.scalar_data.single_quoted_allowed = true
+	emitter.scalar_data.block_allowed = true
+
+	if leading_space || leading_break || trailing_space || trailing_break {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	if trailing_space {
+		emitter.scalar_data.block_allowed = false
+	}
+	if break_space {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+		emitter.scalar_data.single_quoted_allowed = false
+	}
+	if space_break || special_characters {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+		emitter.scalar_data.single_quoted_allowed = false
+		emitter.scalar_data.block_allowed = false
+	}
+	if line_breaks {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	if flow_indicators {
+		emitter.scalar_data.flow_plain_allowed = false
+	}
+	if block_indicators {
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+	emitter.anchor_data.anchor = nil
+	emitter.tag_data.handle = nil
+	emitter.tag_data.suffix = nil
+	emitter.scalar_data.value = nil
+
+	switch event.typ {
+	case yaml_ALIAS_EVENT:
+		if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+			return false
+		}
+
+	case yaml_SCALAR_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+		if !yaml_emitter_analyze_scalar(emitter, event.value) {
+			return false
+		}
+
+	case yaml_SEQUENCE_START_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+
+	case yaml_MAPPING_START_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+	if !flush(emitter) {
+		return false
+	}
+	pos := emitter.buffer_pos
+	emitter.buffer[pos+0] = '\xEF'
+	emitter.buffer[pos+1] = '\xBB'
+	emitter.buffer[pos+2] = '\xBF'
+	emitter.buffer_pos += 3
+	return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+	indent := emitter.indent
+	if indent < 0 {
+		indent = 0
+	}
+	if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+		if !put_break(emitter) {
+			return false
+		}
+	}
+	for emitter.column < indent {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	emitter.whitespace = true
+	emitter.indention = true
+	return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+	if need_whitespace && !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	if !write_all(emitter, indicator) {
+		return false
+	}
+	emitter.whitespace = is_whitespace
+	emitter.indention = (emitter.indention && is_indention)
+	emitter.open_ended = false
+	return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+	if !write_all(emitter, value) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+	if !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	if !write_all(emitter, value) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+	if need_whitespace && !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	for i := 0; i < len(value); {
+		var must_write bool
+		switch value[i] {
+		case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+			must_write = true
+		default:
+			must_write = is_alpha(value, i)
+		}
+		if must_write {
+			if !write(emitter, value, &i) {
+				return false
+			}
+		} else {
+			w := width(value[i])
+			for k := 0; k < w; k++ {
+				octet := value[i]
+				i++
+				if !put(emitter, '%') {
+					return false
+				}
+
+				c := octet >> 4
+				if c < 10 {
+					c += '0'
+				} else {
+					c += 'A' - 10
+				}
+				if !put(emitter, c) {
+					return false
+				}
+
+				c = octet & 0x0f
+				if c < 10 {
+					c += '0'
+				} else {
+					c += 'A' - 10
+				}
+				if !put(emitter, c) {
+					return false
+				}
+			}
+		}
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+	if !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+
+	spaces := false
+	breaks := false
+	for i := 0; i < len(value); {
+		if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			spaces = true
+		} else if is_break(value, i) {
+			if !breaks && value[i] == '\n' {
+				if !put_break(emitter) {
+					return false
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			spaces = false
+			breaks = false
+		}
+	}
+
+	emitter.whitespace = false
+	emitter.indention = false
+	if emitter.root_context {
+		emitter.open_ended = true
+	}
+
+	return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+		return false
+	}
+
+	spaces := false
+	breaks := false
+	for i := 0; i < len(value); {
+		if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			spaces = true
+		} else if is_break(value, i) {
+			if !breaks && value[i] == '\n' {
+				if !put_break(emitter) {
+					return false
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if value[i] == '\'' {
+				if !put(emitter, '\'') {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			spaces = false
+			breaks = false
+		}
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+	spaces := false
+	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+		return false
+	}
+
+	for i := 0; i < len(value); {
+		if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+			is_bom(value, i) || is_break(value, i) ||
+			value[i] == '"' || value[i] == '\\' {
+
+			octet := value[i]
+
+			var w int
+			var v rune
+			switch {
+			case octet&0x80 == 0x00:
+				w, v = 1, rune(octet&0x7F)
+			case octet&0xE0 == 0xC0:
+				w, v = 2, rune(octet&0x1F)
+			case octet&0xF0 == 0xE0:
+				w, v = 3, rune(octet&0x0F)
+			case octet&0xF8 == 0xF0:
+				w, v = 4, rune(octet&0x07)
+			}
+			for k := 1; k < w; k++ {
+				octet = value[i+k]
+				v = (v << 6) + (rune(octet) & 0x3F)
+			}
+			i += w
+
+			if !put(emitter, '\\') {
+				return false
+			}
+
+			var ok bool
+			switch v {
+			case 0x00:
+				ok = put(emitter, '0')
+			case 0x07:
+				ok = put(emitter, 'a')
+			case 0x08:
+				ok = put(emitter, 'b')
+			case 0x09:
+				ok = put(emitter, 't')
+			case 0x0A:
+				ok = put(emitter, 'n')
+			case 0x0b:
+				ok = put(emitter, 'v')
+			case 0x0c:
+				ok = put(emitter, 'f')
+			case 0x0d:
+				ok = put(emitter, 'r')
+			case 0x1b:
+				ok = put(emitter, 'e')
+			case 0x22:
+				ok = put(emitter, '"')
+			case 0x5c:
+				ok = put(emitter, '\\')
+			case 0x85:
+				ok = put(emitter, 'N')
+			case 0xA0:
+				ok = put(emitter, '_')
+			case 0x2028:
+				ok = put(emitter, 'L')
+			case 0x2029:
+				ok = put(emitter, 'P')
+			default:
+				if v <= 0xFF {
+					ok = put(emitter, 'x')
+					w = 2
+				} else if v <= 0xFFFF {
+					ok = put(emitter, 'u')
+					w = 4
+				} else {
+					ok = put(emitter, 'U')
+					w = 8
+				}
+				for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+					digit := byte((v >> uint(k)) & 0x0F)
+					if digit < 10 {
+						ok = put(emitter, digit+'0')
+					} else {
+						ok = put(emitter, digit+'A'-10)
+					}
+				}
+			}
+			if !ok {
+				return false
+			}
+			spaces = false
+		} else if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				if is_space(value, i+1) {
+					if !put(emitter, '\\') {
+						return false
+					}
+				}
+				i += width(value[i])
+			} else if !write(emitter, value, &i) {
+				return false
+			}
+			spaces = true
+		} else {
+			if !write(emitter, value, &i) {
+				return false
+			}
+			spaces = false
+		}
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+	if is_space(value, 0) || is_break(value, 0) {
+		indent_hint := []byte{'0' + byte(emitter.best_indent)}
+		if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+			return false
+		}
+	}
+
+	emitter.open_ended = false
+
+	var chomp_hint [1]byte
+	if len(value) == 0 {
+		chomp_hint[0] = '-'
+	} else {
+		i := len(value) - 1
+		for value[i]&0xC0 == 0x80 {
+			i--
+		}
+		if !is_break(value, i) {
+			chomp_hint[0] = '-'
+		} else if i == 0 {
+			chomp_hint[0] = '+'
+			emitter.open_ended = true
+		} else {
+			i--
+			for value[i]&0xC0 == 0x80 {
+				i--
+			}
+			if is_break(value, i) {
+				chomp_hint[0] = '+'
+				emitter.open_ended = true
+			}
+		}
+	}
+	if chomp_hint[0] != 0 {
+		if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+			return false
+		}
+	}
+	return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+		return false
+	}
+	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+		return false
+	}
+	if !put_break(emitter) {
+		return false
+	}
+	emitter.indention = true
+	emitter.whitespace = true
+	breaks := true
+	for i := 0; i < len(value); {
+		if is_break(value, i) {
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			breaks = false
+		}
+	}
+
+	return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+		return false
+	}
+	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+		return false
+	}
+
+	if !put_break(emitter) {
+		return false
+	}
+	emitter.indention = true
+	emitter.whitespace = true
+
+	breaks := true
+	leading_spaces := true
+	for i := 0; i < len(value); {
+		if is_break(value, i) {
+			if !breaks && !leading_spaces && value[i] == '\n' {
+				k := 0
+				for is_break(value, k) {
+					k += width(value[k])
+				}
+				if !is_blankz(value, k) {
+					if !put_break(emitter) {
+						return false
+					}
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				leading_spaces = is_blank(value, i)
+			}
+			if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			emitter.indention = false
+			breaks = false
+		}
+	}
+	return true
+}
diff --git a/vendor/go.yaml.in/yaml/v2/encode.go b/vendor/go.yaml.in/yaml/v2/encode.go
new file mode 100644
index 0000000..0ee738e
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/encode.go
@@ -0,0 +1,390 @@
+package yaml
+
+import (
+	"encoding"
+	"fmt"
+	"io"
+	"reflect"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+)
+
+// jsonNumber is the interface of the encoding/json.Number datatype.
+// Repeating the interface here avoids a dependency on encoding/json, and also
+// supports other libraries like jsoniter, which use a similar datatype with
+// the same interface. Detecting this interface is useful when dealing with
+// structures containing json.Number, which is a string under the hood. The
+// encoder should prefer the use of Int64(), Float64() and string(), in that
+// order, when encoding this type.
+type jsonNumber interface {
+	Float64() (float64, error)
+	Int64() (int64, error)
+	String() string
+}
+
+type encoder struct {
+	emitter yaml_emitter_t
+	event   yaml_event_t
+	out     []byte
+	flow    bool
+	// doneInit holds whether the initial stream_start_event has been
+	// emitted.
+	doneInit bool
+}
+
+func newEncoder() *encoder {
+	e := &encoder{}
+	yaml_emitter_initialize(&e.emitter)
+	yaml_emitter_set_output_string(&e.emitter, &e.out)
+	yaml_emitter_set_unicode(&e.emitter, true)
+	return e
+}
+
+func newEncoderWithWriter(w io.Writer) *encoder {
+	e := &encoder{}
+	yaml_emitter_initialize(&e.emitter)
+	yaml_emitter_set_output_writer(&e.emitter, w)
+	yaml_emitter_set_unicode(&e.emitter, true)
+	return e
+}
+
+func (e *encoder) init() {
+	if e.doneInit {
+		return
+	}
+	yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+	e.emit()
+	e.doneInit = true
+}
+
+func (e *encoder) finish() {
+	e.emitter.open_ended = false
+	yaml_stream_end_event_initialize(&e.event)
+	e.emit()
+}
+
+func (e *encoder) destroy() {
+	yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+	// This will internally delete the e.event value.
+	e.must(yaml_emitter_emit(&e.emitter, &e.event))
+}
+
+func (e *encoder) must(ok bool) {
+	if !ok {
+		msg := e.emitter.problem
+		if msg == "" {
+			msg = "unknown problem generating YAML content"
+		}
+		failf("%s", msg)
+	}
+}
+
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+	e.init()
+	yaml_document_start_event_initialize(&e.event, nil, nil, true)
+	e.emit()
+	e.marshal(tag, in)
+	yaml_document_end_event_initialize(&e.event, true)
+	e.emit()
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+	if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
+		e.nilv()
+		return
+	}
+	iface := in.Interface()
+	switch m := iface.(type) {
+	case jsonNumber:
+		integer, err := m.Int64()
+		if err == nil {
+			// In this case the json.Number is a valid int64
+			in = reflect.ValueOf(integer)
+			break
+		}
+		float, err := m.Float64()
+		if err == nil {
+			// In this case the json.Number is a valid float64
+			in = reflect.ValueOf(float)
+			break
+		}
+		// fallback case - no number could be obtained
+		in = reflect.ValueOf(m.String())
+	case time.Time, *time.Time:
+		// Although time.Time implements TextMarshaler,
+		// we don't want to treat it as a string for YAML
+		// purposes because YAML has special support for
+		// timestamps.
+	case Marshaler:
+		v, err := m.MarshalYAML()
+		if err != nil {
+			fail(err)
+		}
+		if v == nil {
+			e.nilv()
+			return
+		}
+		in = reflect.ValueOf(v)
+	case encoding.TextMarshaler:
+		text, err := m.MarshalText()
+		if err != nil {
+			fail(err)
+		}
+		in = reflect.ValueOf(string(text))
+	case nil:
+		e.nilv()
+		return
+	}
+	switch in.Kind() {
+	case reflect.Interface:
+		e.marshal(tag, in.Elem())
+	case reflect.Map:
+		e.mapv(tag, in)
+	case reflect.Ptr:
+		if in.Type() == ptrTimeType {
+			e.timev(tag, in.Elem())
+		} else {
+			e.marshal(tag, in.Elem())
+		}
+	case reflect.Struct:
+		if in.Type() == timeType {
+			e.timev(tag, in)
+		} else {
+			e.structv(tag, in)
+		}
+	case reflect.Slice, reflect.Array:
+		if in.Type().Elem() == mapItemType {
+			e.itemsv(tag, in)
+		} else {
+			e.slicev(tag, in)
+		}
+	case reflect.String:
+		e.stringv(tag, in)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		if in.Type() == durationType {
+			e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
+		} else {
+			e.intv(tag, in)
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		e.uintv(tag, in)
+	case reflect.Float32, reflect.Float64:
+		e.floatv(tag, in)
+	case reflect.Bool:
+		e.boolv(tag, in)
+	default:
+		panic("cannot marshal type: " + in.Type().String())
+	}
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+	e.mappingv(tag, func() {
+		keys := keyList(in.MapKeys())
+		sort.Sort(keys)
+		for _, k := range keys {
+			e.marshal("", k)
+			e.marshal("", in.MapIndex(k))
+		}
+	})
+}
+
+func (e *encoder) itemsv(tag string, in reflect.Value) {
+	e.mappingv(tag, func() {
+		slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
+		for _, item := range slice {
+			e.marshal("", reflect.ValueOf(item.Key))
+			e.marshal("", reflect.ValueOf(item.Value))
+		}
+	})
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+	sinfo, err := getStructInfo(in.Type())
+	if err != nil {
+		panic(err)
+	}
+	e.mappingv(tag, func() {
+		for _, info := range sinfo.FieldsList {
+			var value reflect.Value
+			if info.Inline == nil {
+				value = in.Field(info.Num)
+			} else {
+				value = in.FieldByIndex(info.Inline)
+			}
+			if info.OmitEmpty && isZero(value) {
+				continue
+			}
+			e.marshal("", reflect.ValueOf(info.Key))
+			e.flow = info.Flow
+			e.marshal("", value)
+		}
+		if sinfo.InlineMap >= 0 {
+			m := in.Field(sinfo.InlineMap)
+			if m.Len() > 0 {
+				e.flow = false
+				keys := keyList(m.MapKeys())
+				sort.Sort(keys)
+				for _, k := range keys {
+					if _, found := sinfo.FieldsMap[k.String()]; found {
+						panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
+					}
+					e.marshal("", k)
+					e.flow = false
+					e.marshal("", m.MapIndex(k))
+				}
+			}
+		}
+	})
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+	implicit := tag == ""
+	style := yaml_BLOCK_MAPPING_STYLE
+	if e.flow {
+		e.flow = false
+		style = yaml_FLOW_MAPPING_STYLE
+	}
+	yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+	e.emit()
+	f()
+	yaml_mapping_end_event_initialize(&e.event)
+	e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+	implicit := tag == ""
+	style := yaml_BLOCK_SEQUENCE_STYLE
+	if e.flow {
+		e.flow = false
+		style = yaml_FLOW_SEQUENCE_STYLE
+	}
+	e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+	e.emit()
+	n := in.Len()
+	for i := 0; i < n; i++ {
+		e.marshal("", in.Index(i))
+	}
+	e.must(yaml_sequence_end_event_initialize(&e.event))
+	e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+	// Fast path.
+	if s == "" {
+		return false
+	}
+	c := s[0]
+	if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+		return false
+	}
+	// Do the full match.
+	return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+	var style yaml_scalar_style_t
+	s := in.String()
+	canUsePlain := true
+	switch {
+	case !utf8.ValidString(s):
+		if tag == yaml_BINARY_TAG {
+			failf("explicitly tagged !!binary data must be base64-encoded")
+		}
+		if tag != "" {
+			failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+		}
+		// It can't be encoded directly as YAML so use a binary tag
+		// and encode it as base64.
+		tag = yaml_BINARY_TAG
+		s = encodeBase64(s)
+	case tag == "":
+		// Check to see if it would resolve to a specific
+		// tag when encoded unquoted. If it doesn't,
+		// there's no need to quote it.
+		rtag, _ := resolve("", s)
+		canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
+	}
+	// Note: it's possible for user code to emit invalid YAML
+	// if they explicitly specify a tag and a string containing
+	// text that's incompatible with that tag.
+	switch {
+	case strings.Contains(s, "\n"):
+		style = yaml_LITERAL_SCALAR_STYLE
+	case canUsePlain:
+		style = yaml_PLAIN_SCALAR_STYLE
+	default:
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+	e.emitScalar(s, "", tag, style)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+	var s string
+	if in.Bool() {
+		s = "true"
+	} else {
+		s = "false"
+	}
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+	s := strconv.FormatInt(in.Int(), 10)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+	s := strconv.FormatUint(in.Uint(), 10)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) timev(tag string, in reflect.Value) {
+	t := in.Interface().(time.Time)
+	s := t.Format(time.RFC3339Nano)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+	// Issue #352: When formatting, use the precision of the underlying value
+	precision := 64
+	if in.Kind() == reflect.Float32 {
+		precision = 32
+	}
+
+	s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
+	switch s {
+	case "+Inf":
+		s = ".inf"
+	case "-Inf":
+		s = "-.inf"
+	case "NaN":
+		s = ".nan"
+	}
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) nilv() {
+	e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+	implicit := tag == ""
+	e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+	e.emit()
+}
diff --git a/vendor/go.yaml.in/yaml/v2/parserc.go b/vendor/go.yaml.in/yaml/v2/parserc.go
new file mode 100644
index 0000000..81d05df
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/parserc.go
@@ -0,0 +1,1095 @@
+package yaml
+
+import (
+	"bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream               ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document    ::= block_node DOCUMENT-END*
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence    ::=
+//                          ALIAS
+//                          | properties (block_content | indentless_block_sequence)?
+//                          | block_content
+//                          | indentless_block_sequence
+// block_node           ::= ALIAS
+//                          | properties block_content?
+//                          | block_content
+// flow_node            ::= ALIAS
+//                          | properties flow_content?
+//                          | flow_content
+// properties           ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content        ::= block_collection | flow_collection | SCALAR
+// flow_content         ::= flow_collection | SCALAR
+// block_collection     ::= block_sequence | block_mapping
+// flow_collection      ::= flow_sequence | flow_mapping
+// block_sequence       ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
+// block_mapping        ::= BLOCK-MAPPING_START
+//                          ((KEY block_node_or_indentless_sequence?)?
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//                          BLOCK-END
+// flow_sequence        ::= FLOW-SEQUENCE-START
+//                          (flow_sequence_entry FLOW-ENTRY)*
+//                          flow_sequence_entry?
+//                          FLOW-SEQUENCE-END
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping         ::= FLOW-MAPPING-START
+//                          (flow_mapping_entry FLOW-ENTRY)*
+//                          flow_mapping_entry?
+//                          FLOW-MAPPING-END
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+	if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+		return &parser.tokens[parser.tokens_head]
+	}
+	return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+	parser.token_available = false
+	parser.tokens_parsed++
+	parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+	parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+	// Erase the event object.
+	*event = yaml_event_t{}
+
+	// No events after the end of the stream or error.
+	if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+		return true
+	}
+
+	// Generate the next event.
+	return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+	parser.error = yaml_PARSER_ERROR
+	parser.problem = problem
+	parser.problem_mark = problem_mark
+	return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+	parser.error = yaml_PARSER_ERROR
+	parser.context = context
+	parser.context_mark = context_mark
+	parser.problem = problem
+	parser.problem_mark = problem_mark
+	return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+	//trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+	switch parser.state {
+	case yaml_PARSE_STREAM_START_STATE:
+		return yaml_parser_parse_stream_start(parser, event)
+
+	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+		return yaml_parser_parse_document_start(parser, event, true)
+
+	case yaml_PARSE_DOCUMENT_START_STATE:
+		return yaml_parser_parse_document_start(parser, event, false)
+
+	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+		return yaml_parser_parse_document_content(parser, event)
+
+	case yaml_PARSE_DOCUMENT_END_STATE:
+		return yaml_parser_parse_document_end(parser, event)
+
+	case yaml_PARSE_BLOCK_NODE_STATE:
+		return yaml_parser_parse_node(parser, event, true, false)
+
+	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+		return yaml_parser_parse_node(parser, event, true, true)
+
+	case yaml_PARSE_FLOW_NODE_STATE:
+		return yaml_parser_parse_node(parser, event, false, false)
+
+	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+		return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+		return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_block_mapping_value(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+		return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+		return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+		return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+		return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+	default:
+		panic("invalid parser state")
+	}
+}
+
+// Parse the production:
+// stream   ::= STREAM-START implicit_document? explicit_document* STREAM-END
+//              ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_STREAM_START_TOKEN {
+		return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
+	}
+	parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+	*event = yaml_event_t{
+		typ:        yaml_STREAM_START_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+		encoding:   token.encoding,
+	}
+	skip_token(parser)
+	return true
+}
+
+// Parse the productions:
+// implicit_document    ::= block_node DOCUMENT-END*
+//                          *
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//                          *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	// Parse extra document end indicators.
+	if !implicit {
+		for token.typ == yaml_DOCUMENT_END_TOKEN {
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	}
+
+	if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+		token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+		token.typ != yaml_DOCUMENT_START_TOKEN &&
+		token.typ != yaml_STREAM_END_TOKEN {
+		// Parse an implicit document.
+		if !yaml_parser_process_directives(parser, nil, nil) {
+			return false
+		}
+		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+		parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+		*event = yaml_event_t{
+			typ:        yaml_DOCUMENT_START_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+
+	} else if token.typ != yaml_STREAM_END_TOKEN {
+		// Parse an explicit document.
+		var version_directive *yaml_version_directive_t
+		var tag_directives []yaml_tag_directive_t
+		start_mark := token.start_mark
+		if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+			return false
+		}
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_DOCUMENT_START_TOKEN {
+			yaml_parser_set_parser_error(parser,
+				"did not find expected <document start>", token.start_mark)
+			return false
+		}
+		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+		parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+		end_mark := token.end_mark
+
+		*event = yaml_event_t{
+			typ:               yaml_DOCUMENT_START_EVENT,
+			start_mark:        start_mark,
+			end_mark:          end_mark,
+			version_directive: version_directive,
+			tag_directives:    tag_directives,
+			implicit:          false,
+		}
+		skip_token(parser)
+
+	} else {
+		// Parse the stream end.
+		parser.state = yaml_PARSE_END_STATE
+		*event = yaml_event_t{
+			typ:        yaml_STREAM_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+		skip_token(parser)
+	}
+
+	return true
+}
+
+// Parse the productions:
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//                                                    ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+		token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+		token.typ == yaml_DOCUMENT_START_TOKEN ||
+		token.typ == yaml_DOCUMENT_END_TOKEN ||
+		token.typ == yaml_STREAM_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		return yaml_parser_process_empty_scalar(parser, event,
+			token.start_mark)
+	}
+	return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document    ::= block_node DOCUMENT-END*
+//                                     *************
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	start_mark := token.start_mark
+	end_mark := token.start_mark
+
+	implicit := true
+	if token.typ == yaml_DOCUMENT_END_TOKEN {
+		end_mark = token.end_mark
+		skip_token(parser)
+		implicit = false
+	}
+
+	parser.tag_directives = parser.tag_directives[:0]
+
+	parser.state = yaml_PARSE_DOCUMENT_START_STATE
+	*event = yaml_event_t{
+		typ:        yaml_DOCUMENT_END_EVENT,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		implicit:   implicit,
+	}
+	return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence    ::=
+//                          ALIAS
+//                          *****
+//                          | properties (block_content | indentless_block_sequence)?
+//                            **********  *
+//                          | block_content | indentless_block_sequence
+//                            *
+// block_node           ::= ALIAS
+//                          *****
+//                          | properties block_content?
+//                            ********** *
+//                          | block_content
+//                            *
+// flow_node            ::= ALIAS
+//                          *****
+//                          | properties flow_content?
+//                            ********** *
+//                          | flow_content
+//                            *
+// properties           ::= TAG ANCHOR? | ANCHOR TAG?
+//                          *************************
+// block_content        ::= block_collection | flow_collection | SCALAR
+//                                                               ******
+// flow_content         ::= flow_collection | SCALAR
+//                                            ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+	//defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_ALIAS_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		*event = yaml_event_t{
+			typ:        yaml_ALIAS_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+			anchor:     token.value,
+		}
+		skip_token(parser)
+		return true
+	}
+
+	start_mark := token.start_mark
+	end_mark := token.start_mark
+
+	var tag_token bool
+	var tag_handle, tag_suffix, anchor []byte
+	var tag_mark yaml_mark_t
+	if token.typ == yaml_ANCHOR_TOKEN {
+		anchor = token.value
+		start_mark = token.start_mark
+		end_mark = token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ == yaml_TAG_TOKEN {
+			tag_token = true
+			tag_handle = token.value
+			tag_suffix = token.suffix
+			tag_mark = token.start_mark
+			end_mark = token.end_mark
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	} else if token.typ == yaml_TAG_TOKEN {
+		tag_token = true
+		tag_handle = token.value
+		tag_suffix = token.suffix
+		start_mark = token.start_mark
+		tag_mark = token.start_mark
+		end_mark = token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ == yaml_ANCHOR_TOKEN {
+			anchor = token.value
+			end_mark = token.end_mark
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	}
+
+	var tag []byte
+	if tag_token {
+		if len(tag_handle) == 0 {
+			tag = tag_suffix
+			tag_suffix = nil
+		} else {
+			for i := range parser.tag_directives {
+				if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+					tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+					tag = append(tag, tag_suffix...)
+					break
+				}
+			}
+			if len(tag) == 0 {
+				yaml_parser_set_parser_error_context(parser,
+					"while parsing a node", start_mark,
+					"found undefined tag handle", tag_mark)
+				return false
+			}
+		}
+	}
+
+	implicit := len(tag) == 0
+	if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+		}
+		return true
+	}
+	if token.typ == yaml_SCALAR_TOKEN {
+		var plain_implicit, quoted_implicit bool
+		end_mark = token.end_mark
+		if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+			plain_implicit = true
+		} else if len(tag) == 0 {
+			quoted_implicit = true
+		}
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+
+		*event = yaml_event_t{
+			typ:             yaml_SCALAR_EVENT,
+			start_mark:      start_mark,
+			end_mark:        end_mark,
+			anchor:          anchor,
+			tag:             tag,
+			value:           token.value,
+			implicit:        plain_implicit,
+			quoted_implicit: quoted_implicit,
+			style:           yaml_style_t(token.style),
+		}
+		skip_token(parser)
+		return true
+	}
+	if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+		// [Go] Some of the events below can be merged as they differ only on style.
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+		}
+		return true
+	}
+	if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+		}
+		return true
+	}
+	if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+		}
+		return true
+	}
+	if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+		}
+		return true
+	}
+	if len(anchor) > 0 || len(tag) > 0 {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+
+		*event = yaml_event_t{
+			typ:             yaml_SCALAR_EVENT,
+			start_mark:      start_mark,
+			end_mark:        end_mark,
+			anchor:          anchor,
+			tag:             tag,
+			implicit:        implicit,
+			quoted_implicit: false,
+			style:           yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+		}
+		return true
+	}
+
+	context := "while parsing a flow node"
+	if block {
+		context = "while parsing a block node"
+	}
+	yaml_parser_set_parser_error_context(parser, context, start_mark,
+		"did not find expected node content", token.start_mark)
+	return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+//                    ********************  *********** *             *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, true, false)
+		} else {
+			parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+			return yaml_parser_process_empty_scalar(parser, event, mark)
+		}
+	}
+	if token.typ == yaml_BLOCK_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		parser.marks = parser.marks[:len(parser.marks)-1]
+
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+
+		skip_token(parser)
+		return true
+	}
+
+	context_mark := parser.marks[len(parser.marks)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	return yaml_parser_set_parser_error_context(parser,
+		"while parsing a block collection", context_mark,
+		"did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
+//                           *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+			token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, true, false)
+		}
+		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, mark)
+	}
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+
+	*event = yaml_event_t{
+		typ:        yaml_SEQUENCE_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.start_mark, // [Go] Shouldn't this be token.end_mark?
+	}
+	return true
+}
+
+// Parse the productions:
+// block_mapping        ::= BLOCK-MAPPING_START
+//                          *******************
+//                          ((KEY block_node_or_indentless_sequence?)?
+//                            *** *
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//
+//                          BLOCK-END
+//                          *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_KEY_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+			return yaml_parser_parse_node(parser, event, true, true)
+		} else {
+			parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+			return yaml_parser_process_empty_scalar(parser, event, mark)
+		}
+	} else if token.typ == yaml_BLOCK_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		parser.marks = parser.marks[:len(parser.marks)-1]
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+		skip_token(parser)
+		return true
+	}
+
+	context_mark := parser.marks[len(parser.marks)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	return yaml_parser_set_parser_error_context(parser,
+		"while parsing a block mapping", context_mark,
+		"did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping        ::= BLOCK-MAPPING_START
+//
+//                          ((KEY block_node_or_indentless_sequence?)?
+//
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//                           ***** *
+//                          BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+			return yaml_parser_parse_node(parser, event, true, true)
+		}
+		parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, mark)
+	}
+	parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence        ::= FLOW-SEQUENCE-START
+//                          *******************
+//                          (flow_sequence_entry FLOW-ENTRY)*
+//                           *                   **********
+//                          flow_sequence_entry?
+//                          *
+//                          FLOW-SEQUENCE-END
+//                          *****************
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                          *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+		if !first {
+			if token.typ == yaml_FLOW_ENTRY_TOKEN {
+				skip_token(parser)
+				token = peek_token(parser)
+				if token == nil {
+					return false
+				}
+			} else {
+				context_mark := parser.marks[len(parser.marks)-1]
+				parser.marks = parser.marks[:len(parser.marks)-1]
+				return yaml_parser_set_parser_error_context(parser,
+					"while parsing a flow sequence", context_mark,
+					"did not find expected ',' or ']'", token.start_mark)
+			}
+		}
+
+		if token.typ == yaml_KEY_TOKEN {
+			parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+			*event = yaml_event_t{
+				typ:        yaml_MAPPING_START_EVENT,
+				start_mark: token.start_mark,
+				end_mark:   token.end_mark,
+				implicit:   true,
+				style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+			}
+			skip_token(parser)
+			return true
+		} else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+
+	*event = yaml_event_t{
+		typ:        yaml_SEQUENCE_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+	}
+
+	skip_token(parser)
+	return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                      *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_VALUE_TOKEN &&
+		token.typ != yaml_FLOW_ENTRY_TOKEN &&
+		token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+		parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+		return yaml_parser_parse_node(parser, event, false, false)
+	}
+	mark := token.end_mark
+	skip_token(parser)
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+	return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                                      ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		skip_token(parser)
+		token := peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                                                      *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+	*event = yaml_event_t{
+		typ:        yaml_MAPPING_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.start_mark, // [Go] Shouldn't this be end_mark?
+	}
+	return true
+}
+
+// Parse the productions:
+// flow_mapping         ::= FLOW-MAPPING-START
+//                          ******************
+//                          (flow_mapping_entry FLOW-ENTRY)*
+//                           *                  **********
+//                          flow_mapping_entry?
+//                          ******************
+//                          FLOW-MAPPING-END
+//                          ****************
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                          *           *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+		if !first {
+			if token.typ == yaml_FLOW_ENTRY_TOKEN {
+				skip_token(parser)
+				token = peek_token(parser)
+				if token == nil {
+					return false
+				}
+			} else {
+				context_mark := parser.marks[len(parser.marks)-1]
+				parser.marks = parser.marks[:len(parser.marks)-1]
+				return yaml_parser_set_parser_error_context(parser,
+					"while parsing a flow mapping", context_mark,
+					"did not find expected ',' or '}'", token.start_mark)
+			}
+		}
+
+		if token.typ == yaml_KEY_TOKEN {
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+			if token.typ != yaml_VALUE_TOKEN &&
+				token.typ != yaml_FLOW_ENTRY_TOKEN &&
+				token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+				parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+				return yaml_parser_parse_node(parser, event, false, false)
+			} else {
+				parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+				return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+			}
+		} else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	*event = yaml_event_t{
+		typ:        yaml_MAPPING_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+	}
+	skip_token(parser)
+	return true
+}
+
+// Parse the productions:
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                   *                  ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if empty {
+		parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+	parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+	*event = yaml_event_t{
+		typ:        yaml_SCALAR_EVENT,
+		start_mark: mark,
+		end_mark:   mark,
+		value:      nil, // Empty
+		implicit:   true,
+		style:      yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+	}
+	return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+	{[]byte("!"), []byte("!")},
+	{[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+	version_directive_ref **yaml_version_directive_t,
+	tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+	var version_directive *yaml_version_directive_t
+	var tag_directives []yaml_tag_directive_t
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+		if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+			if version_directive != nil {
+				yaml_parser_set_parser_error(parser,
+					"found duplicate %YAML directive", token.start_mark)
+				return false
+			}
+			if token.major != 1 || token.minor != 1 {
+				yaml_parser_set_parser_error(parser,
+					"found incompatible YAML document", token.start_mark)
+				return false
+			}
+			version_directive = &yaml_version_directive_t{
+				major: token.major,
+				minor: token.minor,
+			}
+		} else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+			value := yaml_tag_directive_t{
+				handle: token.value,
+				prefix: token.prefix,
+			}
+			if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+				return false
+			}
+			tag_directives = append(tag_directives, value)
+		}
+
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+	}
+
+	for i := range default_tag_directives {
+		if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+			return false
+		}
+	}
+
+	if version_directive_ref != nil {
+		*version_directive_ref = version_directive
+	}
+	if tag_directives_ref != nil {
+		*tag_directives_ref = tag_directives
+	}
+	return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+	for i := range parser.tag_directives {
+		if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+			if allow_duplicates {
+				return true
+			}
+			return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+		}
+	}
+
+	// [Go] I suspect the copy is unnecessary. This was likely done
+	// because there was no way to track ownership of the data.
+	value_copy := yaml_tag_directive_t{
+		handle: make([]byte, len(value.handle)),
+		prefix: make([]byte, len(value.prefix)),
+	}
+	copy(value_copy.handle, value.handle)
+	copy(value_copy.prefix, value.prefix)
+	parser.tag_directives = append(parser.tag_directives, value_copy)
+	return true
+}
diff --git a/vendor/go.yaml.in/yaml/v2/readerc.go b/vendor/go.yaml.in/yaml/v2/readerc.go
new file mode 100644
index 0000000..7c1f5fa
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/readerc.go
@@ -0,0 +1,412 @@
+package yaml
+
+import (
+	"io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+	parser.error = yaml_READER_ERROR
+	parser.problem = problem
+	parser.problem_offset = offset
+	parser.problem_value = value
+	return false
+}
+
+// Byte order marks.
+const (
+	bom_UTF8    = "\xef\xbb\xbf"
+	bom_UTF16LE = "\xff\xfe"
+	bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+	// Ensure that we had enough bytes in the raw buffer.
+	for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+		if !yaml_parser_update_raw_buffer(parser) {
+			return false
+		}
+	}
+
+	// Determine the encoding.
+	buf := parser.raw_buffer
+	pos := parser.raw_buffer_pos
+	avail := len(buf) - pos
+	if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+		parser.encoding = yaml_UTF16LE_ENCODING
+		parser.raw_buffer_pos += 2
+		parser.offset += 2
+	} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+		parser.encoding = yaml_UTF16BE_ENCODING
+		parser.raw_buffer_pos += 2
+		parser.offset += 2
+	} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+		parser.encoding = yaml_UTF8_ENCODING
+		parser.raw_buffer_pos += 3
+		parser.offset += 3
+	} else {
+		parser.encoding = yaml_UTF8_ENCODING
+	}
+	return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+	size_read := 0
+
+	// Return if the raw buffer is full.
+	if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+		return true
+	}
+
+	// Return on EOF.
+	if parser.eof {
+		return true
+	}
+
+	// Move the remaining bytes in the raw buffer to the beginning.
+	if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+		copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+	}
+	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+	parser.raw_buffer_pos = 0
+
+	// Call the read handler to fill the buffer.
+	size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+	if err == io.EOF {
+		parser.eof = true
+	} else if err != nil {
+		return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+	}
+	return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+	if parser.read_handler == nil {
+		panic("read handler must be set")
+	}
+
+	// [Go] This function was changed to guarantee the requested length size at EOF.
+	// The fact we need to do this is pretty awful, but the description above implies
+	// for that to be the case, and there are tests 
+
+	// If the EOF flag is set and the raw buffer is empty, do nothing.
+	if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+		// [Go] ACTUALLY! Read the documentation of this function above.
+		// This is just broken. To return true, we need to have the
+		// given length in the buffer. Not doing that means every single
+		// check that calls this function to make sure the buffer has a
+		// given length is Go) panicking; or C) accessing invalid memory.
+		//return true
+	}
+
+	// Return if the buffer contains enough characters.
+	if parser.unread >= length {
+		return true
+	}
+
+	// Determine the input encoding if it is not known yet.
+	if parser.encoding == yaml_ANY_ENCODING {
+		if !yaml_parser_determine_encoding(parser) {
+			return false
+		}
+	}
+
+	// Move the unread characters to the beginning of the buffer.
+	buffer_len := len(parser.buffer)
+	if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+		copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+		buffer_len -= parser.buffer_pos
+		parser.buffer_pos = 0
+	} else if parser.buffer_pos == buffer_len {
+		buffer_len = 0
+		parser.buffer_pos = 0
+	}
+
+	// Open the whole buffer for writing, and cut it before returning.
+	parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+	// Fill the buffer until it has enough characters.
+	first := true
+	for parser.unread < length {
+
+		// Fill the raw buffer if necessary.
+		if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+			if !yaml_parser_update_raw_buffer(parser) {
+				parser.buffer = parser.buffer[:buffer_len]
+				return false
+			}
+		}
+		first = false
+
+		// Decode the raw buffer.
+	inner:
+		for parser.raw_buffer_pos != len(parser.raw_buffer) {
+			var value rune
+			var width int
+
+			raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+			// Decode the next character.
+			switch parser.encoding {
+			case yaml_UTF8_ENCODING:
+				// Decode a UTF-8 character.  Check RFC 3629
+				// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+				//
+				// The following table (taken from the RFC) is used for
+				// decoding.
+				//
+				//    Char. number range |        UTF-8 octet sequence
+				//      (hexadecimal)    |              (binary)
+				//   --------------------+------------------------------------
+				//   0000 0000-0000 007F | 0xxxxxxx
+				//   0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+				//   0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+				//   0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+				//
+				// Additionally, the characters in the range 0xD800-0xDFFF
+				// are prohibited as they are reserved for use with UTF-16
+				// surrogate pairs.
+
+				// Determine the length of the UTF-8 sequence.
+				octet := parser.raw_buffer[parser.raw_buffer_pos]
+				switch {
+				case octet&0x80 == 0x00:
+					width = 1
+				case octet&0xE0 == 0xC0:
+					width = 2
+				case octet&0xF0 == 0xE0:
+					width = 3
+				case octet&0xF8 == 0xF0:
+					width = 4
+				default:
+					// The leading octet is invalid.
+					return yaml_parser_set_reader_error(parser,
+						"invalid leading UTF-8 octet",
+						parser.offset, int(octet))
+				}
+
+				// Check if the raw buffer contains an incomplete character.
+				if width > raw_unread {
+					if parser.eof {
+						return yaml_parser_set_reader_error(parser,
+							"incomplete UTF-8 octet sequence",
+							parser.offset, -1)
+					}
+					break inner
+				}
+
+				// Decode the leading octet.
+				switch {
+				case octet&0x80 == 0x00:
+					value = rune(octet & 0x7F)
+				case octet&0xE0 == 0xC0:
+					value = rune(octet & 0x1F)
+				case octet&0xF0 == 0xE0:
+					value = rune(octet & 0x0F)
+				case octet&0xF8 == 0xF0:
+					value = rune(octet & 0x07)
+				default:
+					value = 0
+				}
+
+				// Check and decode the trailing octets.
+				for k := 1; k < width; k++ {
+					octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+					// Check if the octet is valid.
+					if (octet & 0xC0) != 0x80 {
+						return yaml_parser_set_reader_error(parser,
+							"invalid trailing UTF-8 octet",
+							parser.offset+k, int(octet))
+					}
+
+					// Decode the octet.
+					value = (value << 6) + rune(octet&0x3F)
+				}
+
+				// Check the length of the sequence against the value.
+				switch {
+				case width == 1:
+				case width == 2 && value >= 0x80:
+				case width == 3 && value >= 0x800:
+				case width == 4 && value >= 0x10000:
+				default:
+					return yaml_parser_set_reader_error(parser,
+						"invalid length of a UTF-8 sequence",
+						parser.offset, -1)
+				}
+
+				// Check the range of the value.
+				if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+					return yaml_parser_set_reader_error(parser,
+						"invalid Unicode character",
+						parser.offset, int(value))
+				}
+
+			case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+				var low, high int
+				if parser.encoding == yaml_UTF16LE_ENCODING {
+					low, high = 0, 1
+				} else {
+					low, high = 1, 0
+				}
+
+				// The UTF-16 encoding is not as simple as one might
+				// naively think.  Check RFC 2781
+				// (http://www.ietf.org/rfc/rfc2781.txt).
+				//
+				// Normally, two subsequent bytes describe a Unicode
+				// character.  However a special technique (called a
+				// surrogate pair) is used for specifying character
+				// values larger than 0xFFFF.
+				//
+				// A surrogate pair consists of two pseudo-characters:
+				//      high surrogate area (0xD800-0xDBFF)
+				//      low surrogate area (0xDC00-0xDFFF)
+				//
+				// The following formulas are used for decoding
+				// and encoding characters using surrogate pairs:
+				//
+				//  U  = U' + 0x10000   (0x01 00 00 <= U <= 0x10 FF FF)
+				//  U' = yyyyyyyyyyxxxxxxxxxx   (0 <= U' <= 0x0F FF FF)
+				//  W1 = 110110yyyyyyyyyy
+				//  W2 = 110111xxxxxxxxxx
+				//
+				// where U is the character value, W1 is the high surrogate
+				// area, W2 is the low surrogate area.
+
+				// Check for incomplete UTF-16 character.
+				if raw_unread < 2 {
+					if parser.eof {
+						return yaml_parser_set_reader_error(parser,
+							"incomplete UTF-16 character",
+							parser.offset, -1)
+					}
+					break inner
+				}
+
+				// Get the character.
+				value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+					(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+				// Check for unexpected low surrogate area.
+				if value&0xFC00 == 0xDC00 {
+					return yaml_parser_set_reader_error(parser,
+						"unexpected low surrogate area",
+						parser.offset, int(value))
+				}
+
+				// Check for a high surrogate area.
+				if value&0xFC00 == 0xD800 {
+					width = 4
+
+					// Check for incomplete surrogate pair.
+					if raw_unread < 4 {
+						if parser.eof {
+							return yaml_parser_set_reader_error(parser,
+								"incomplete UTF-16 surrogate pair",
+								parser.offset, -1)
+						}
+						break inner
+					}
+
+					// Get the next character.
+					value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+						(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+					// Check for a low surrogate area.
+					if value2&0xFC00 != 0xDC00 {
+						return yaml_parser_set_reader_error(parser,
+							"expected low surrogate area",
+							parser.offset+2, int(value2))
+					}
+
+					// Generate the value of the surrogate pair.
+					value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+				} else {
+					width = 2
+				}
+
+			default:
+				panic("impossible")
+			}
+
+			// Check if the character is in the allowed range:
+			//      #x9 | #xA | #xD | [#x20-#x7E]               (8 bit)
+			//      | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD]    (16 bit)
+			//      | [#x10000-#x10FFFF]                        (32 bit)
+			switch {
+			case value == 0x09:
+			case value == 0x0A:
+			case value == 0x0D:
+			case value >= 0x20 && value <= 0x7E:
+			case value == 0x85:
+			case value >= 0xA0 && value <= 0xD7FF:
+			case value >= 0xE000 && value <= 0xFFFD:
+			case value >= 0x10000 && value <= 0x10FFFF:
+			default:
+				return yaml_parser_set_reader_error(parser,
+					"control characters are not allowed",
+					parser.offset, int(value))
+			}
+
+			// Move the raw pointers.
+			parser.raw_buffer_pos += width
+			parser.offset += width
+
+			// Finally put the character into the buffer.
+			if value <= 0x7F {
+				// 0000 0000-0000 007F . 0xxxxxxx
+				parser.buffer[buffer_len+0] = byte(value)
+				buffer_len += 1
+			} else if value <= 0x7FF {
+				// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+				parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+				buffer_len += 2
+			} else if value <= 0xFFFF {
+				// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+				parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+				buffer_len += 3
+			} else {
+				// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+				parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+				parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+				buffer_len += 4
+			}
+
+			parser.unread++
+		}
+
+		// On EOF, put NUL into the buffer and return.
+		if parser.eof {
+			parser.buffer[buffer_len] = 0
+			buffer_len++
+			parser.unread++
+			break
+		}
+	}
+	// [Go] Read the documentation of this function above. To return true,
+	// we need to have the given length in the buffer. Not doing that means
+	// every single check that calls this function to make sure the buffer
+	// has a given length is Go) panicking; or C) accessing invalid memory.
+	// This happens here due to the EOF above breaking early.
+	for buffer_len < length {
+		parser.buffer[buffer_len] = 0
+		buffer_len++
+	}
+	parser.buffer = parser.buffer[:buffer_len]
+	return true
+}
diff --git a/vendor/go.yaml.in/yaml/v2/resolve.go b/vendor/go.yaml.in/yaml/v2/resolve.go
new file mode 100644
index 0000000..4120e0c
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/resolve.go
@@ -0,0 +1,258 @@
+package yaml
+
+import (
+	"encoding/base64"
+	"math"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+)
+
+type resolveMapItem struct {
+	value interface{}
+	tag   string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+	t := resolveTable
+	t[int('+')] = 'S' // Sign
+	t[int('-')] = 'S'
+	for _, c := range "0123456789" {
+		t[int(c)] = 'D' // Digit
+	}
+	for _, c := range "yYnNtTfFoO~" {
+		t[int(c)] = 'M' // In map
+	}
+	t[int('.')] = '.' // Float (potentially in map)
+
+	var resolveMapList = []struct {
+		v   interface{}
+		tag string
+		l   []string
+	}{
+		{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
+		{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
+		{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
+		{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
+		{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
+		{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
+		{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
+		{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
+		{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
+		{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
+		{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
+		{"<<", yaml_MERGE_TAG, []string{"<<"}},
+	}
+
+	m := resolveMap
+	for _, item := range resolveMapList {
+		for _, s := range item.l {
+			m[s] = resolveMapItem{item.v, item.tag}
+		}
+	}
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+	// TODO This can easily be made faster and produce less garbage.
+	if strings.HasPrefix(tag, longTagPrefix) {
+		return "!!" + tag[len(longTagPrefix):]
+	}
+	return tag
+}
+
+func longTag(tag string) string {
+	if strings.HasPrefix(tag, "!!") {
+		return longTagPrefix + tag[2:]
+	}
+	return tag
+}
+
+func resolvableTag(tag string) bool {
+	switch tag {
+	case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
+		return true
+	}
+	return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+	if !resolvableTag(tag) {
+		return tag, in
+	}
+
+	defer func() {
+		switch tag {
+		case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
+			return
+		case yaml_FLOAT_TAG:
+			if rtag == yaml_INT_TAG {
+				switch v := out.(type) {
+				case int64:
+					rtag = yaml_FLOAT_TAG
+					out = float64(v)
+					return
+				case int:
+					rtag = yaml_FLOAT_TAG
+					out = float64(v)
+					return
+				}
+			}
+		}
+		failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+	}()
+
+	// Any data is accepted as a !!str or !!binary.
+	// Otherwise, the prefix is enough of a hint about what it might be.
+	hint := byte('N')
+	if in != "" {
+		hint = resolveTable[in[0]]
+	}
+	if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+		// Handle things we can lookup in a map.
+		if item, ok := resolveMap[in]; ok {
+			return item.tag, item.value
+		}
+
+		// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+		// are purposefully unsupported here. They're still quoted on
+		// the way out for compatibility with other parser, though.
+
+		switch hint {
+		case 'M':
+			// We've already checked the map above.
+
+		case '.':
+			// Not in the map, so maybe a normal float.
+			floatv, err := strconv.ParseFloat(in, 64)
+			if err == nil {
+				return yaml_FLOAT_TAG, floatv
+			}
+
+		case 'D', 'S':
+			// Int, float, or timestamp.
+			// Only try values as a timestamp if the value is unquoted or there's an explicit
+			// !!timestamp tag.
+			if tag == "" || tag == yaml_TIMESTAMP_TAG {
+				t, ok := parseTimestamp(in)
+				if ok {
+					return yaml_TIMESTAMP_TAG, t
+				}
+			}
+
+			plain := strings.Replace(in, "_", "", -1)
+			intv, err := strconv.ParseInt(plain, 0, 64)
+			if err == nil {
+				if intv == int64(int(intv)) {
+					return yaml_INT_TAG, int(intv)
+				} else {
+					return yaml_INT_TAG, intv
+				}
+			}
+			uintv, err := strconv.ParseUint(plain, 0, 64)
+			if err == nil {
+				return yaml_INT_TAG, uintv
+			}
+			if yamlStyleFloat.MatchString(plain) {
+				floatv, err := strconv.ParseFloat(plain, 64)
+				if err == nil {
+					return yaml_FLOAT_TAG, floatv
+				}
+			}
+			if strings.HasPrefix(plain, "0b") {
+				intv, err := strconv.ParseInt(plain[2:], 2, 64)
+				if err == nil {
+					if intv == int64(int(intv)) {
+						return yaml_INT_TAG, int(intv)
+					} else {
+						return yaml_INT_TAG, intv
+					}
+				}
+				uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+				if err == nil {
+					return yaml_INT_TAG, uintv
+				}
+			} else if strings.HasPrefix(plain, "-0b") {
+				intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
+				if err == nil {
+					if true || intv == int64(int(intv)) {
+						return yaml_INT_TAG, int(intv)
+					} else {
+						return yaml_INT_TAG, intv
+					}
+				}
+			}
+		default:
+			panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
+		}
+	}
+	return yaml_STR_TAG, in
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+	const lineLen = 70
+	encLen := base64.StdEncoding.EncodedLen(len(s))
+	lines := encLen/lineLen + 1
+	buf := make([]byte, encLen*2+lines)
+	in := buf[0:encLen]
+	out := buf[encLen:]
+	base64.StdEncoding.Encode(in, []byte(s))
+	k := 0
+	for i := 0; i < len(in); i += lineLen {
+		j := i + lineLen
+		if j > len(in) {
+			j = len(in)
+		}
+		k += copy(out[k:], in[i:j])
+		if lines > 1 {
+			out[k] = '\n'
+			k++
+		}
+	}
+	return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+	"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+	"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+	"2006-1-2 15:4:5.999999999",       // space separated with no time zone
+	"2006-1-2",                        // date only
+	// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+	// from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+	// TODO write code to check all the formats supported by
+	// http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+	// Quick check: all date formats start with YYYY-.
+	i := 0
+	for ; i < len(s); i++ {
+		if c := s[i]; c < '0' || c > '9' {
+			break
+		}
+	}
+	if i != 4 || i == len(s) || s[i] != '-' {
+		return time.Time{}, false
+	}
+	for _, format := range allowedTimestampFormats {
+		if t, err := time.Parse(format, s); err == nil {
+			return t, true
+		}
+	}
+	return time.Time{}, false
+}
diff --git a/vendor/go.yaml.in/yaml/v2/scannerc.go b/vendor/go.yaml.in/yaml/v2/scannerc.go
new file mode 100644
index 0000000..0b9bb60
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/scannerc.go
@@ -0,0 +1,2711 @@
+package yaml
+
+import (
+	"bytes"
+	"fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html).  We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward.  The issues are "block collection start" and
+// "simple keys".  Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented.  We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+//      STREAM-START(encoding)          # The stream start.
+//      STREAM-END                      # The stream end.
+//      VERSION-DIRECTIVE(major,minor)  # The '%YAML' directive.
+//      TAG-DIRECTIVE(handle,prefix)    # The '%TAG' directive.
+//      DOCUMENT-START                  # '---'
+//      DOCUMENT-END                    # '...'
+//      BLOCK-SEQUENCE-START            # Indentation increase denoting a block
+//      BLOCK-MAPPING-START             # sequence or a block mapping.
+//      BLOCK-END                       # Indentation decrease.
+//      FLOW-SEQUENCE-START             # '['
+//      FLOW-SEQUENCE-END               # ']'
+//      BLOCK-SEQUENCE-START            # '{'
+//      BLOCK-SEQUENCE-END              # '}'
+//      BLOCK-ENTRY                     # '-'
+//      FLOW-ENTRY                      # ','
+//      KEY                             # '?' or nothing (simple keys).
+//      VALUE                           # ':'
+//      ALIAS(anchor)                   # '*anchor'
+//      ANCHOR(anchor)                  # '&anchor'
+//      TAG(handle,suffix)              # '!handle!suffix'
+//      SCALAR(value,style)             # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+//      STREAM-START(encoding)
+//      STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+//      VERSION-DIRECTIVE(major,minor)
+//      TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+//      %YAML   1.1
+//      %TAG    !   !foo
+//      %TAG    !yaml!  tag:yaml.org,2002:
+//      ---
+//
+// The correspoding sequence of tokens:
+//
+//      STREAM-START(utf-8)
+//      VERSION-DIRECTIVE(1,1)
+//      TAG-DIRECTIVE("!","!foo")
+//      TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+//      DOCUMENT-START
+//      STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+//      DOCUMENT-START
+//      DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+//      1. An implicit document:
+//
+//          'a scalar'
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          SCALAR("a scalar",single-quoted)
+//          STREAM-END
+//
+//      2. An explicit document:
+//
+//          ---
+//          'a scalar'
+//          ...
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          DOCUMENT-START
+//          SCALAR("a scalar",single-quoted)
+//          DOCUMENT-END
+//          STREAM-END
+//
+//      3. Several documents in a stream:
+//
+//          'a scalar'
+//          ---
+//          'another scalar'
+//          ---
+//          'yet another scalar'
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          SCALAR("a scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("another scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("yet another scalar",single-quoted)
+//          STREAM-END
+//
+// We have already introduced the SCALAR token above.  The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+//      ALIAS(anchor)
+//      ANCHOR(anchor)
+//      TAG(handle,suffix)
+//      SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+//      1. A recursive sequence:
+//
+//          &A [ *A ]
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          ANCHOR("A")
+//          FLOW-SEQUENCE-START
+//          ALIAS("A")
+//          FLOW-SEQUENCE-END
+//          STREAM-END
+//
+//      2. A tagged scalar:
+//
+//          !!float "3.14"  # A good approximation.
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          TAG("!!","float")
+//          SCALAR("3.14",double-quoted)
+//          STREAM-END
+//
+//      3. Various scalar styles:
+//
+//          --- # Implicit empty plain scalars do not produce tokens.
+//          --- a plain scalar
+//          --- 'a single-quoted scalar'
+//          --- "a double-quoted scalar"
+//          --- |-
+//            a literal scalar
+//          --- >-
+//            a folded
+//            scalar
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          DOCUMENT-START
+//          DOCUMENT-START
+//          SCALAR("a plain scalar",plain)
+//          DOCUMENT-START
+//          SCALAR("a single-quoted scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("a double-quoted scalar",double-quoted)
+//          DOCUMENT-START
+//          SCALAR("a literal scalar",literal)
+//          DOCUMENT-START
+//          SCALAR("a folded scalar",folded)
+//          STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+//      FLOW-SEQUENCE-START
+//      FLOW-SEQUENCE-END
+//      FLOW-MAPPING-START
+//      FLOW-MAPPING-END
+//      FLOW-ENTRY
+//      KEY
+//      VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly.  FLOW-ENTRY represent the ',' indicator.  Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+//      1. A flow sequence:
+//
+//          [item 1, item 2, item 3]
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          FLOW-SEQUENCE-START
+//          SCALAR("item 1",plain)
+//          FLOW-ENTRY
+//          SCALAR("item 2",plain)
+//          FLOW-ENTRY
+//          SCALAR("item 3",plain)
+//          FLOW-SEQUENCE-END
+//          STREAM-END
+//
+//      2. A flow mapping:
+//
+//          {
+//              a simple key: a value,  # Note that the KEY token is produced.
+//              ? a complex key: another value,
+//          }
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          FLOW-MAPPING-START
+//          KEY
+//          SCALAR("a simple key",plain)
+//          VALUE
+//          SCALAR("a value",plain)
+//          FLOW-ENTRY
+//          KEY
+//          SCALAR("a complex key",plain)
+//          VALUE
+//          SCALAR("another value",plain)
+//          FLOW-ENTRY
+//          FLOW-MAPPING-END
+//          STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator.  Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+//      BLOCK-SEQUENCE-START
+//      BLOCK-MAPPING-START
+//      BLOCK-END
+//      BLOCK-ENTRY
+//      KEY
+//      VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python).  However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+//      1. Block sequences:
+//
+//          - item 1
+//          - item 2
+//          -
+//            - item 3.1
+//            - item 3.2
+//          -
+//            key 1: value 1
+//            key 2: value 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-ENTRY
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 3.1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 3.2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+//      2. Block mappings:
+//
+//          a simple key: a value   # The KEY token is produced here.
+//          ? a complex key
+//          : another value
+//          a mapping:
+//            key 1: value 1
+//            key 2: value 2
+//          a sequence:
+//            - item 1
+//            - item 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("a simple key",plain)
+//          VALUE
+//          SCALAR("a value",plain)
+//          KEY
+//          SCALAR("a complex key",plain)
+//          VALUE
+//          SCALAR("another value",plain)
+//          KEY
+//          SCALAR("a mapping",plain)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          KEY
+//          SCALAR("a sequence",plain)
+//          VALUE
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line.  If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line.  The following examples
+// illustrate this case:
+//
+//      1. Collections in a sequence:
+//
+//          - - item 1
+//            - item 2
+//          - key 1: value 1
+//            key 2: value 2
+//          - ? complex key
+//            : complex value
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("complex key")
+//          VALUE
+//          SCALAR("complex value")
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+//      2. Collections in a mapping:
+//
+//          ? a sequence
+//          : - item 1
+//            - item 2
+//          ? a mapping
+//          : key 1: value 1
+//            key 2: value 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("a sequence",plain)
+//          VALUE
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          KEY
+//          SCALAR("a mapping",plain)
+//          VALUE
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping.  In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+//      key:
+//      - item 1    # BLOCK-SEQUENCE-START is NOT produced here.
+//      - item 2
+//
+// Tokens:
+//
+//      STREAM-START(utf-8)
+//      BLOCK-MAPPING-START
+//      KEY
+//      SCALAR("key",plain)
+//      VALUE
+//      BLOCK-ENTRY
+//      SCALAR("item 1",plain)
+//      BLOCK-ENTRY
+//      SCALAR("item 2",plain)
+//      BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+	// [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+	return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+	parser.mark.index++
+	parser.mark.column++
+	parser.unread--
+	parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+	if is_crlf(parser.buffer, parser.buffer_pos) {
+		parser.mark.index += 2
+		parser.mark.column = 0
+		parser.mark.line++
+		parser.unread -= 2
+		parser.buffer_pos += 2
+	} else if is_break(parser.buffer, parser.buffer_pos) {
+		parser.mark.index++
+		parser.mark.column = 0
+		parser.mark.line++
+		parser.unread--
+		parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+	}
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+	w := width(parser.buffer[parser.buffer_pos])
+	if w == 0 {
+		panic("invalid character sequence")
+	}
+	if len(s) == 0 {
+		s = make([]byte, 0, 32)
+	}
+	if w == 1 && len(s)+w <= cap(s) {
+		s = s[:len(s)+1]
+		s[len(s)-1] = parser.buffer[parser.buffer_pos]
+		parser.buffer_pos++
+	} else {
+		s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+		parser.buffer_pos += w
+	}
+	parser.mark.index++
+	parser.mark.column++
+	parser.unread--
+	return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+	buf := parser.buffer
+	pos := parser.buffer_pos
+	switch {
+	case buf[pos] == '\r' && buf[pos+1] == '\n':
+		// CR LF . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 2
+		parser.mark.index++
+		parser.unread--
+	case buf[pos] == '\r' || buf[pos] == '\n':
+		// CR|LF . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 1
+	case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+		// NEL . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 2
+	case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+		// LS|PS . LS|PS
+		s = append(s, buf[parser.buffer_pos:pos+3]...)
+		parser.buffer_pos += 3
+	default:
+		return s
+	}
+	parser.mark.index++
+	parser.mark.column = 0
+	parser.mark.line++
+	parser.unread--
+	return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+	// Erase the token object.
+	*token = yaml_token_t{} // [Go] Is this necessary?
+
+	// No tokens after STREAM-END or error.
+	if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+		return true
+	}
+
+	// Ensure that the tokens queue contains enough tokens.
+	if !parser.token_available {
+		if !yaml_parser_fetch_more_tokens(parser) {
+			return false
+		}
+	}
+
+	// Fetch the next token from the queue.
+	*token = parser.tokens[parser.tokens_head]
+	parser.tokens_head++
+	parser.tokens_parsed++
+	parser.token_available = false
+
+	if token.typ == yaml_STREAM_END_TOKEN {
+		parser.stream_end_produced = true
+	}
+	return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+	parser.error = yaml_SCANNER_ERROR
+	parser.context = context
+	parser.context_mark = context_mark
+	parser.problem = problem
+	parser.problem_mark = parser.mark
+	return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+	context := "while parsing a tag"
+	if directive {
+		context = "while parsing a %TAG directive"
+	}
+	return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+	pargs := append([]interface{}{"+++"}, args...)
+	fmt.Println(pargs...)
+	pargs = append([]interface{}{"---"}, args...)
+	return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+	// While we need more tokens to fetch, do it.
+	for {
+		if parser.tokens_head != len(parser.tokens) {
+			// If queue is non-empty, check if any potential simple key may
+			// occupy the head position.
+			head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
+			if !ok {
+				break
+			} else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
+				return false
+			} else if !valid {
+				break
+			}
+		}
+		// Fetch the next token.
+		if !yaml_parser_fetch_next_token(parser) {
+			return false
+		}
+	}
+
+	parser.token_available = true
+	return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+	// Ensure that the buffer is initialized.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// Check if we just started scanning.  Fetch STREAM-START then.
+	if !parser.stream_start_produced {
+		return yaml_parser_fetch_stream_start(parser)
+	}
+
+	// Eat whitespaces and comments until we reach the next token.
+	if !yaml_parser_scan_to_next_token(parser) {
+		return false
+	}
+
+	// Check the indentation level against the current column.
+	if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+		return false
+	}
+
+	// Ensure that the buffer contains at least 4 characters.  4 is the length
+	// of the longest indicators ('--- ' and '... ').
+	if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+		return false
+	}
+
+	// Is it the end of the stream?
+	if is_z(parser.buffer, parser.buffer_pos) {
+		return yaml_parser_fetch_stream_end(parser)
+	}
+
+	// Is it a directive?
+	if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+		return yaml_parser_fetch_directive(parser)
+	}
+
+	buf := parser.buffer
+	pos := parser.buffer_pos
+
+	// Is it the document start indicator?
+	if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+	}
+
+	// Is it the document end indicator?
+	if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+	}
+
+	// Is it the flow sequence start indicator?
+	if buf[pos] == '[' {
+		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+	}
+
+	// Is it the flow mapping start indicator?
+	if parser.buffer[parser.buffer_pos] == '{' {
+		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+	}
+
+	// Is it the flow sequence end indicator?
+	if parser.buffer[parser.buffer_pos] == ']' {
+		return yaml_parser_fetch_flow_collection_end(parser,
+			yaml_FLOW_SEQUENCE_END_TOKEN)
+	}
+
+	// Is it the flow mapping end indicator?
+	if parser.buffer[parser.buffer_pos] == '}' {
+		return yaml_parser_fetch_flow_collection_end(parser,
+			yaml_FLOW_MAPPING_END_TOKEN)
+	}
+
+	// Is it the flow entry indicator?
+	if parser.buffer[parser.buffer_pos] == ',' {
+		return yaml_parser_fetch_flow_entry(parser)
+	}
+
+	// Is it the block entry indicator?
+	if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+		return yaml_parser_fetch_block_entry(parser)
+	}
+
+	// Is it the key indicator?
+	if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_key(parser)
+	}
+
+	// Is it the value indicator?
+	if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_value(parser)
+	}
+
+	// Is it an alias?
+	if parser.buffer[parser.buffer_pos] == '*' {
+		return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+	}
+
+	// Is it an anchor?
+	if parser.buffer[parser.buffer_pos] == '&' {
+		return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+	}
+
+	// Is it a tag?
+	if parser.buffer[parser.buffer_pos] == '!' {
+		return yaml_parser_fetch_tag(parser)
+	}
+
+	// Is it a literal scalar?
+	if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+		return yaml_parser_fetch_block_scalar(parser, true)
+	}
+
+	// Is it a folded scalar?
+	if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+		return yaml_parser_fetch_block_scalar(parser, false)
+	}
+
+	// Is it a single-quoted scalar?
+	if parser.buffer[parser.buffer_pos] == '\'' {
+		return yaml_parser_fetch_flow_scalar(parser, true)
+	}
+
+	// Is it a double-quoted scalar?
+	if parser.buffer[parser.buffer_pos] == '"' {
+		return yaml_parser_fetch_flow_scalar(parser, false)
+	}
+
+	// Is it a plain scalar?
+	//
+	// A plain scalar may start with any non-blank characters except
+	//
+	//      '-', '?', ':', ',', '[', ']', '{', '}',
+	//      '#', '&', '*', '!', '|', '>', '\'', '\"',
+	//      '%', '@', '`'.
+	//
+	// In the block context (and, for the '-' indicator, in the flow context
+	// too), it may also start with the characters
+	//
+	//      '-', '?', ':'
+	//
+	// if it is followed by a non-space character.
+	//
+	// The last rule is more restrictive than the specification requires.
+	// [Go] Make this logic more reasonable.
+	//switch parser.buffer[parser.buffer_pos] {
+	//case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+	//}
+	if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+		parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+		parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+		parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+		parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+		parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+		parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+		(parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+		(parser.flow_level == 0 &&
+			(parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+			!is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_plain_scalar(parser)
+	}
+
+	// If we don't determine the token type so far, it is an error.
+	return yaml_parser_set_scanner_error(parser,
+		"while scanning for the next token", parser.mark,
+		"found character that cannot start any token")
+}
+
+func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
+	if !simple_key.possible {
+		return false, true
+	}
+
+	// The 1.2 specification says:
+	//
+	//     "If the ? indicator is omitted, parsing needs to see past the
+	//     implicit key to recognize it as such. To limit the amount of
+	//     lookahead required, the “:” indicator must appear at most 1024
+	//     Unicode characters beyond the start of the key. In addition, the key
+	//     is restricted to a single line."
+	//
+	if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
+		// Check if the potential simple key to be removed is required.
+		if simple_key.required {
+			return false, yaml_parser_set_scanner_error(parser,
+				"while scanning a simple key", simple_key.mark,
+				"could not find expected ':'")
+		}
+		simple_key.possible = false
+		return false, true
+	}
+	return true, true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+	// A simple key is required at the current position if the scanner is in
+	// the block context and the current column coincides with the indentation
+	// level.
+
+	required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+	//
+	// If the current position may start a simple key, save it.
+	//
+	if parser.simple_key_allowed {
+		simple_key := yaml_simple_key_t{
+			possible:     true,
+			required:     required,
+			token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+			mark:         parser.mark,
+		}
+
+		if !yaml_parser_remove_simple_key(parser) {
+			return false
+		}
+		parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+		parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
+	}
+	return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+	i := len(parser.simple_keys) - 1
+	if parser.simple_keys[i].possible {
+		// If the key is required, it is an error.
+		if parser.simple_keys[i].required {
+			return yaml_parser_set_scanner_error(parser,
+				"while scanning a simple key", parser.simple_keys[i].mark,
+				"could not find expected ':'")
+		}
+		// Remove the key from the stack.
+		parser.simple_keys[i].possible = false
+		delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
+	}
+	return true
+}
+
+// max_flow_level limits the flow_level
+const max_flow_level = 10000
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+	// Reset the simple key on the next level.
+	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
+		possible:     false,
+		required:     false,
+		token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+		mark:         parser.mark,
+	})
+
+	// Increase the flow level.
+	parser.flow_level++
+	if parser.flow_level > max_flow_level {
+		return yaml_parser_set_scanner_error(parser,
+			"while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+			fmt.Sprintf("exceeded max depth of %d", max_flow_level))
+	}
+	return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+	if parser.flow_level > 0 {
+		parser.flow_level--
+		last := len(parser.simple_keys) - 1
+		delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
+		parser.simple_keys = parser.simple_keys[:last]
+	}
+	return true
+}
+
+// max_indents limits the indents stack size
+const max_indents = 10000
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level.  In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+	// In the flow context, do nothing.
+	if parser.flow_level > 0 {
+		return true
+	}
+
+	if parser.indent < column {
+		// Push the current indentation level to the stack and set the new
+		// indentation level.
+		parser.indents = append(parser.indents, parser.indent)
+		parser.indent = column
+		if len(parser.indents) > max_indents {
+			return yaml_parser_set_scanner_error(parser,
+				"while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+				fmt.Sprintf("exceeded max depth of %d", max_indents))
+		}
+
+		// Create a token and insert it into the queue.
+		token := yaml_token_t{
+			typ:        typ,
+			start_mark: mark,
+			end_mark:   mark,
+		}
+		if number > -1 {
+			number -= parser.tokens_parsed
+		}
+		yaml_insert_token(parser, number, &token)
+	}
+	return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column.  For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+	// In the flow context, do nothing.
+	if parser.flow_level > 0 {
+		return true
+	}
+
+	// Loop through the indentation levels in the stack.
+	for parser.indent > column {
+		// Create a token and append it to the queue.
+		token := yaml_token_t{
+			typ:        yaml_BLOCK_END_TOKEN,
+			start_mark: parser.mark,
+			end_mark:   parser.mark,
+		}
+		yaml_insert_token(parser, -1, &token)
+
+		// Pop the indentation level.
+		parser.indent = parser.indents[len(parser.indents)-1]
+		parser.indents = parser.indents[:len(parser.indents)-1]
+	}
+	return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+	// Set the initial indentation.
+	parser.indent = -1
+
+	// Initialize the simple key stack.
+	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+	parser.simple_keys_by_tok = make(map[int]int)
+
+	// A simple key is allowed at the beginning of the stream.
+	parser.simple_key_allowed = true
+
+	// We have started.
+	parser.stream_start_produced = true
+
+	// Create the STREAM-START token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_STREAM_START_TOKEN,
+		start_mark: parser.mark,
+		end_mark:   parser.mark,
+		encoding:   parser.encoding,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+	// Force new line.
+	if parser.mark.column != 0 {
+		parser.mark.column = 0
+		parser.mark.line++
+	}
+
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Create the STREAM-END token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_STREAM_END_TOKEN,
+		start_mark: parser.mark,
+		end_mark:   parser.mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+	token := yaml_token_t{}
+	if !yaml_parser_scan_directive(parser, &token) {
+		return false
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Consume the token.
+	start_mark := parser.mark
+
+	skip(parser)
+	skip(parser)
+	skip(parser)
+
+	end_mark := parser.mark
+
+	// Create the DOCUMENT-START or DOCUMENT-END token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// The indicators '[' and '{' may start a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// Increase the flow level.
+	if !yaml_parser_increase_flow_level(parser) {
+		return false
+	}
+
+	// A simple key may follow the indicators '[' and '{'.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// Reset any potential simple key on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Decrease the flow level.
+	if !yaml_parser_decrease_flow_level(parser) {
+		return false
+	}
+
+	// No simple keys after the indicators ']' and '}'.
+	parser.simple_key_allowed = false
+
+	// Consume the token.
+
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after ','.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-ENTRY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_FLOW_ENTRY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+	// Check if the scanner is in the block context.
+	if parser.flow_level == 0 {
+		// Check if we are allowed to start a new entry.
+		if !parser.simple_key_allowed {
+			return yaml_parser_set_scanner_error(parser, "", parser.mark,
+				"block sequence entries are not allowed in this context")
+		}
+		// Add the BLOCK-SEQUENCE-START token if needed.
+		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+			return false
+		}
+	} else {
+		// It is an error for the '-' indicator to occur in the flow context,
+		// but we let the Parser detect and report about it because the Parser
+		// is able to point to the context.
+	}
+
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after '-'.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the BLOCK-ENTRY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_BLOCK_ENTRY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+	// In the block context, additional checks are required.
+	if parser.flow_level == 0 {
+		// Check if we are allowed to start a new key (not nessesary simple).
+		if !parser.simple_key_allowed {
+			return yaml_parser_set_scanner_error(parser, "", parser.mark,
+				"mapping keys are not allowed in this context")
+		}
+		// Add the BLOCK-MAPPING-START token if needed.
+		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+			return false
+		}
+	}
+
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after '?' in the block context.
+	parser.simple_key_allowed = parser.flow_level == 0
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the KEY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_KEY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+	simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+	// Have we found a simple key?
+	if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
+		return false
+
+	} else if valid {
+
+		// Create the KEY token and insert it into the queue.
+		token := yaml_token_t{
+			typ:        yaml_KEY_TOKEN,
+			start_mark: simple_key.mark,
+			end_mark:   simple_key.mark,
+		}
+		yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+		// In the block context, we may need to add the BLOCK-MAPPING-START token.
+		if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+			simple_key.token_number,
+			yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+			return false
+		}
+
+		// Remove the simple key.
+		simple_key.possible = false
+		delete(parser.simple_keys_by_tok, simple_key.token_number)
+
+		// A simple key cannot follow another simple key.
+		parser.simple_key_allowed = false
+
+	} else {
+		// The ':' indicator follows a complex key.
+
+		// In the block context, extra checks are required.
+		if parser.flow_level == 0 {
+
+			// Check if we are allowed to start a complex value.
+			if !parser.simple_key_allowed {
+				return yaml_parser_set_scanner_error(parser, "", parser.mark,
+					"mapping values are not allowed in this context")
+			}
+
+			// Add the BLOCK-MAPPING-START token if needed.
+			if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+				return false
+			}
+		}
+
+		// Simple keys after ':' are allowed in the block context.
+		parser.simple_key_allowed = parser.flow_level == 0
+	}
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the VALUE token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_VALUE_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// An anchor or an alias could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow an anchor or an alias.
+	parser.simple_key_allowed = false
+
+	// Create the ALIAS or ANCHOR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_anchor(parser, &token, typ) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+	// A tag could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a tag.
+	parser.simple_key_allowed = false
+
+	// Create the TAG token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_tag(parser, &token) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+	// Remove any potential simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// A simple key may follow a block scalar.
+	parser.simple_key_allowed = true
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+	// A plain scalar could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a flow scalar.
+	parser.simple_key_allowed = false
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+	// A plain scalar could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a flow scalar.
+	parser.simple_key_allowed = false
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_plain_scalar(parser, &token) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+	// Until the next token is not found.
+	for {
+		// Allow the BOM mark to start a line.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+		}
+
+		// Eat whitespaces.
+		// Tabs are allowed:
+		//  - in the flow context
+		//  - in the block context, but not at the beginning of the line or
+		//  after '-', '?', or ':' (complex value).
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Eat a comment until a line break.
+		if parser.buffer[parser.buffer_pos] == '#' {
+			for !is_breakz(parser.buffer, parser.buffer_pos) {
+				skip(parser)
+				if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+					return false
+				}
+			}
+		}
+
+		// If it is a line break, eat it.
+		if is_break(parser.buffer, parser.buffer_pos) {
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+			skip_line(parser)
+
+			// In the block context, a new line may start a simple key.
+			if parser.flow_level == 0 {
+				parser.simple_key_allowed = true
+			}
+		} else {
+			break // We have found a token.
+		}
+	}
+
+	return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+//      %YAML    1.1    # a comment \n
+//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+	// Eat '%'.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Scan the directive name.
+	var name []byte
+	if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+		return false
+	}
+
+	// Is it a YAML directive?
+	if bytes.Equal(name, []byte("YAML")) {
+		// Scan the VERSION directive value.
+		var major, minor int8
+		if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+			return false
+		}
+		end_mark := parser.mark
+
+		// Create a VERSION-DIRECTIVE token.
+		*token = yaml_token_t{
+			typ:        yaml_VERSION_DIRECTIVE_TOKEN,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			major:      major,
+			minor:      minor,
+		}
+
+		// Is it a TAG directive?
+	} else if bytes.Equal(name, []byte("TAG")) {
+		// Scan the TAG directive value.
+		var handle, prefix []byte
+		if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+			return false
+		}
+		end_mark := parser.mark
+
+		// Create a TAG-DIRECTIVE token.
+		*token = yaml_token_t{
+			typ:        yaml_TAG_DIRECTIVE_TOKEN,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			value:      handle,
+			prefix:     prefix,
+		}
+
+		// Unknown directive.
+	} else {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "found unknown directive name")
+		return false
+	}
+
+	// Eat the rest of the line including any comments.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	if parser.buffer[parser.buffer_pos] == '#' {
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+	}
+
+	// Check if we are at the end of the line.
+	if !is_breakz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "did not find expected comment or line break")
+		return false
+	}
+
+	// Eat a line break.
+	if is_break(parser.buffer, parser.buffer_pos) {
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		skip_line(parser)
+	}
+
+	return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//       ^^^^
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//       ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+	// Consume the directive name.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	var s []byte
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the name is empty.
+	if len(s) == 0 {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "could not find expected directive name")
+		return false
+	}
+
+	// Check for an blank character after the name.
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "found unexpected non-alphabetical character")
+		return false
+	}
+	*name = s
+	return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//           ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+	// Eat whitespaces.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Consume the major version number.
+	if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+		return false
+	}
+
+	// Eat '.'.
+	if parser.buffer[parser.buffer_pos] != '.' {
+		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+			start_mark, "did not find expected digit or '.' character")
+	}
+
+	skip(parser)
+
+	// Consume the minor version number.
+	if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+		return false
+	}
+	return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//              ^
+//      %YAML   1.1     # a comment \n
+//                ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+	// Repeat while the next character is digit.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	var value, length int8
+	for is_digit(parser.buffer, parser.buffer_pos) {
+		// Check if the number is too long.
+		length++
+		if length > max_number_length {
+			return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+				start_mark, "found extremely long version number")
+		}
+		value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the number was present.
+	if length == 0 {
+		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+			start_mark, "did not find expected version number")
+	}
+	*number = value
+	return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+	var handle_value, prefix_value []byte
+
+	// Eat whitespaces.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Scan a handle.
+	if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+		return false
+	}
+
+	// Expect a whitespace.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blank(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+			start_mark, "did not find expected whitespace")
+		return false
+	}
+
+	// Eat whitespaces.
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Scan a prefix.
+	if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+		return false
+	}
+
+	// Expect a whitespace or line break.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+			start_mark, "did not find expected whitespace or line break")
+		return false
+	}
+
+	*handle = handle_value
+	*prefix = prefix_value
+	return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+	var s []byte
+
+	// Eat the indicator character.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Consume the value.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	end_mark := parser.mark
+
+	/*
+	 * Check if length of the anchor is greater than 0 and it is followed by
+	 * a whitespace character or one of the indicators:
+	 *
+	 *      '?', ':', ',', ']', '}', '%', '@', '`'.
+	 */
+
+	if len(s) == 0 ||
+		!(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+			parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+			parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+			parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+			parser.buffer[parser.buffer_pos] == '`') {
+		context := "while scanning an alias"
+		if typ == yaml_ANCHOR_TOKEN {
+			context = "while scanning an anchor"
+		}
+		yaml_parser_set_scanner_error(parser, context, start_mark,
+			"did not find expected alphabetic or numeric character")
+		return false
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+	}
+
+	return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+	var handle, suffix []byte
+
+	start_mark := parser.mark
+
+	// Check if the tag is in the canonical form.
+	if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+		return false
+	}
+
+	if parser.buffer[parser.buffer_pos+1] == '<' {
+		// Keep the handle as ''
+
+		// Eat '!<'
+		skip(parser)
+		skip(parser)
+
+		// Consume the tag value.
+		if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+			return false
+		}
+
+		// Check for '>' and eat it.
+		if parser.buffer[parser.buffer_pos] != '>' {
+			yaml_parser_set_scanner_error(parser, "while scanning a tag",
+				start_mark, "did not find the expected '>'")
+			return false
+		}
+
+		skip(parser)
+	} else {
+		// The tag has either the '!suffix' or the '!handle!suffix' form.
+
+		// First, try to scan a handle.
+		if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+			return false
+		}
+
+		// Check if it is, indeed, handle.
+		if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+			// Scan the suffix now.
+			if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+				return false
+			}
+		} else {
+			// It wasn't a handle after all.  Scan the rest of the tag.
+			if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+				return false
+			}
+
+			// Set the handle to '!'.
+			handle = []byte{'!'}
+
+			// A special case: the '!' tag.  Set the handle to '' and the
+			// suffix to '!'.
+			if len(suffix) == 0 {
+				handle, suffix = suffix, handle
+			}
+		}
+	}
+
+	// Check the character which ends the tag.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a tag",
+			start_mark, "did not find expected whitespace or line break")
+		return false
+	}
+
+	end_mark := parser.mark
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_TAG_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      handle,
+		suffix:     suffix,
+	}
+	return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+	// Check the initial '!' character.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if parser.buffer[parser.buffer_pos] != '!' {
+		yaml_parser_set_scanner_tag_error(parser, directive,
+			start_mark, "did not find expected '!'")
+		return false
+	}
+
+	var s []byte
+
+	// Copy the '!' character.
+	s = read(parser, s)
+
+	// Copy all subsequent alphabetical and numerical characters.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the trailing character is '!' and copy it.
+	if parser.buffer[parser.buffer_pos] == '!' {
+		s = read(parser, s)
+	} else {
+		// It's either the '!' tag or not really a tag handle.  If it's a %TAG
+		// directive, it's an error.  If it's a tag token, it must be a part of URI.
+		if directive && string(s) != "!" {
+			yaml_parser_set_scanner_tag_error(parser, directive,
+				start_mark, "did not find expected '!'")
+			return false
+		}
+	}
+
+	*handle = s
+	return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+	//size_t length = head ? strlen((char *)head) : 0
+	var s []byte
+	hasTag := len(head) > 0
+
+	// Copy the head if needed.
+	//
+	// Note that we don't copy the leading '!' character.
+	if len(head) > 1 {
+		s = append(s, head[1:]...)
+	}
+
+	// Scan the tag.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// The set of characters that may appear in URI is as follows:
+	//
+	//      '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+	//      '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+	//      '%'.
+	// [Go] Convert this into more reasonable logic.
+	for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+		parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+		parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+		parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+		parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+		parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+		parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+		parser.buffer[parser.buffer_pos] == '%' {
+		// Check if it is a URI-escape sequence.
+		if parser.buffer[parser.buffer_pos] == '%' {
+			if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+				return false
+			}
+		} else {
+			s = read(parser, s)
+		}
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		hasTag = true
+	}
+
+	if !hasTag {
+		yaml_parser_set_scanner_tag_error(parser, directive,
+			start_mark, "did not find expected tag URI")
+		return false
+	}
+	*uri = s
+	return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+	// Decode the required number of characters.
+	w := 1024
+	for w > 0 {
+		// Check for a URI-escaped octet.
+		if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+			return false
+		}
+
+		if !(parser.buffer[parser.buffer_pos] == '%' &&
+			is_hex(parser.buffer, parser.buffer_pos+1) &&
+			is_hex(parser.buffer, parser.buffer_pos+2)) {
+			return yaml_parser_set_scanner_tag_error(parser, directive,
+				start_mark, "did not find URI escaped octet")
+		}
+
+		// Get the octet.
+		octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+		// If it is the leading octet, determine the length of the UTF-8 sequence.
+		if w == 1024 {
+			w = width(octet)
+			if w == 0 {
+				return yaml_parser_set_scanner_tag_error(parser, directive,
+					start_mark, "found an incorrect leading UTF-8 octet")
+			}
+		} else {
+			// Check if the trailing octet is correct.
+			if octet&0xC0 != 0x80 {
+				return yaml_parser_set_scanner_tag_error(parser, directive,
+					start_mark, "found an incorrect trailing UTF-8 octet")
+			}
+		}
+
+		// Copy the octet and move the pointers.
+		*s = append(*s, octet)
+		skip(parser)
+		skip(parser)
+		skip(parser)
+		w--
+	}
+	return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+	// Eat the indicator '|' or '>'.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Scan the additional block scalar indicators.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// Check for a chomping indicator.
+	var chomping, increment int
+	if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+		// Set the chomping method and eat the indicator.
+		if parser.buffer[parser.buffer_pos] == '+' {
+			chomping = +1
+		} else {
+			chomping = -1
+		}
+		skip(parser)
+
+		// Check for an indentation indicator.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if is_digit(parser.buffer, parser.buffer_pos) {
+			// Check that the indentation is greater than 0.
+			if parser.buffer[parser.buffer_pos] == '0' {
+				yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+					start_mark, "found an indentation indicator equal to 0")
+				return false
+			}
+
+			// Get the indentation level and eat the indicator.
+			increment = as_digit(parser.buffer, parser.buffer_pos)
+			skip(parser)
+		}
+
+	} else if is_digit(parser.buffer, parser.buffer_pos) {
+		// Do the same as above, but in the opposite order.
+
+		if parser.buffer[parser.buffer_pos] == '0' {
+			yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+				start_mark, "found an indentation indicator equal to 0")
+			return false
+		}
+		increment = as_digit(parser.buffer, parser.buffer_pos)
+		skip(parser)
+
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+			if parser.buffer[parser.buffer_pos] == '+' {
+				chomping = +1
+			} else {
+				chomping = -1
+			}
+			skip(parser)
+		}
+	}
+
+	// Eat whitespaces and comments to the end of the line.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+	if parser.buffer[parser.buffer_pos] == '#' {
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+	}
+
+	// Check if we are at the end of the line.
+	if !is_breakz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+			start_mark, "did not find expected comment or line break")
+		return false
+	}
+
+	// Eat a line break.
+	if is_break(parser.buffer, parser.buffer_pos) {
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		skip_line(parser)
+	}
+
+	end_mark := parser.mark
+
+	// Set the indentation level if it was specified.
+	var indent int
+	if increment > 0 {
+		if parser.indent >= 0 {
+			indent = parser.indent + increment
+		} else {
+			indent = increment
+		}
+	}
+
+	// Scan the leading line breaks and determine the indentation level if needed.
+	var s, leading_break, trailing_breaks []byte
+	if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+		return false
+	}
+
+	// Scan the block scalar content.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	var leading_blank, trailing_blank bool
+	for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+		// We are at the beginning of a non-empty line.
+
+		// Is it a trailing whitespace?
+		trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+		// Check if we need to fold the leading line break.
+		if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+			// Do we need to join the lines by space?
+			if len(trailing_breaks) == 0 {
+				s = append(s, ' ')
+			}
+		} else {
+			s = append(s, leading_break...)
+		}
+		leading_break = leading_break[:0]
+
+		// Append the remaining line breaks.
+		s = append(s, trailing_breaks...)
+		trailing_breaks = trailing_breaks[:0]
+
+		// Is it a leading whitespace?
+		leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+		// Consume the current line.
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			s = read(parser, s)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Consume the line break.
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+
+		leading_break = read_line(parser, leading_break)
+
+		// Eat the following indentation spaces and line breaks.
+		if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+			return false
+		}
+	}
+
+	// Chomp the tail.
+	if chomping != -1 {
+		s = append(s, leading_break...)
+	}
+	if chomping == 1 {
+		s = append(s, trailing_breaks...)
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_LITERAL_SCALAR_STYLE,
+	}
+	if !literal {
+		token.style = yaml_FOLDED_SCALAR_STYLE
+	}
+	return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar.  Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+	*end_mark = parser.mark
+
+	// Eat the indentation spaces and line breaks.
+	max_indent := 0
+	for {
+		// Eat the indentation spaces.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+		if parser.mark.column > max_indent {
+			max_indent = parser.mark.column
+		}
+
+		// Check for a tab character messing the indentation.
+		if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+			return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+				start_mark, "found a tab character where an indentation space is expected")
+		}
+
+		// Have we found a non-empty line?
+		if !is_break(parser.buffer, parser.buffer_pos) {
+			break
+		}
+
+		// Consume the line break.
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		// [Go] Should really be returning breaks instead.
+		*breaks = read_line(parser, *breaks)
+		*end_mark = parser.mark
+	}
+
+	// Determine the indentation level if needed.
+	if *indent == 0 {
+		*indent = max_indent
+		if *indent < parser.indent+1 {
+			*indent = parser.indent + 1
+		}
+		if *indent < 1 {
+			*indent = 1
+		}
+	}
+	return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+	// Eat the left quote.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Consume the content of the quoted scalar.
+	var s, leading_break, trailing_breaks, whitespaces []byte
+	for {
+		// Check that there are no document indicators at the beginning of the line.
+		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+			return false
+		}
+
+		if parser.mark.column == 0 &&
+			((parser.buffer[parser.buffer_pos+0] == '-' &&
+				parser.buffer[parser.buffer_pos+1] == '-' &&
+				parser.buffer[parser.buffer_pos+2] == '-') ||
+				(parser.buffer[parser.buffer_pos+0] == '.' &&
+					parser.buffer[parser.buffer_pos+1] == '.' &&
+					parser.buffer[parser.buffer_pos+2] == '.')) &&
+			is_blankz(parser.buffer, parser.buffer_pos+3) {
+			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+				start_mark, "found unexpected document indicator")
+			return false
+		}
+
+		// Check for EOF.
+		if is_z(parser.buffer, parser.buffer_pos) {
+			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+				start_mark, "found unexpected end of stream")
+			return false
+		}
+
+		// Consume non-blank characters.
+		leading_blanks := false
+		for !is_blankz(parser.buffer, parser.buffer_pos) {
+			if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+				// Is is an escaped single quote.
+				s = append(s, '\'')
+				skip(parser)
+				skip(parser)
+
+			} else if single && parser.buffer[parser.buffer_pos] == '\'' {
+				// It is a right single quote.
+				break
+			} else if !single && parser.buffer[parser.buffer_pos] == '"' {
+				// It is a right double quote.
+				break
+
+			} else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+				// It is an escaped line break.
+				if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+					return false
+				}
+				skip(parser)
+				skip_line(parser)
+				leading_blanks = true
+				break
+
+			} else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+				// It is an escape sequence.
+				code_length := 0
+
+				// Check the escape character.
+				switch parser.buffer[parser.buffer_pos+1] {
+				case '0':
+					s = append(s, 0)
+				case 'a':
+					s = append(s, '\x07')
+				case 'b':
+					s = append(s, '\x08')
+				case 't', '\t':
+					s = append(s, '\x09')
+				case 'n':
+					s = append(s, '\x0A')
+				case 'v':
+					s = append(s, '\x0B')
+				case 'f':
+					s = append(s, '\x0C')
+				case 'r':
+					s = append(s, '\x0D')
+				case 'e':
+					s = append(s, '\x1B')
+				case ' ':
+					s = append(s, '\x20')
+				case '"':
+					s = append(s, '"')
+				case '\'':
+					s = append(s, '\'')
+				case '\\':
+					s = append(s, '\\')
+				case 'N': // NEL (#x85)
+					s = append(s, '\xC2')
+					s = append(s, '\x85')
+				case '_': // #xA0
+					s = append(s, '\xC2')
+					s = append(s, '\xA0')
+				case 'L': // LS (#x2028)
+					s = append(s, '\xE2')
+					s = append(s, '\x80')
+					s = append(s, '\xA8')
+				case 'P': // PS (#x2029)
+					s = append(s, '\xE2')
+					s = append(s, '\x80')
+					s = append(s, '\xA9')
+				case 'x':
+					code_length = 2
+				case 'u':
+					code_length = 4
+				case 'U':
+					code_length = 8
+				default:
+					yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+						start_mark, "found unknown escape character")
+					return false
+				}
+
+				skip(parser)
+				skip(parser)
+
+				// Consume an arbitrary escape code.
+				if code_length > 0 {
+					var value int
+
+					// Scan the character value.
+					if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+						return false
+					}
+					for k := 0; k < code_length; k++ {
+						if !is_hex(parser.buffer, parser.buffer_pos+k) {
+							yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+								start_mark, "did not find expected hexdecimal number")
+							return false
+						}
+						value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+					}
+
+					// Check the value and write the character.
+					if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+						yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+							start_mark, "found invalid Unicode character escape code")
+						return false
+					}
+					if value <= 0x7F {
+						s = append(s, byte(value))
+					} else if value <= 0x7FF {
+						s = append(s, byte(0xC0+(value>>6)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					} else if value <= 0xFFFF {
+						s = append(s, byte(0xE0+(value>>12)))
+						s = append(s, byte(0x80+((value>>6)&0x3F)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					} else {
+						s = append(s, byte(0xF0+(value>>18)))
+						s = append(s, byte(0x80+((value>>12)&0x3F)))
+						s = append(s, byte(0x80+((value>>6)&0x3F)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					}
+
+					// Advance the pointer.
+					for k := 0; k < code_length; k++ {
+						skip(parser)
+					}
+				}
+			} else {
+				// It is a non-escaped non-blank character.
+				s = read(parser, s)
+			}
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+		}
+
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		// Check if we are at the end of the scalar.
+		if single {
+			if parser.buffer[parser.buffer_pos] == '\'' {
+				break
+			}
+		} else {
+			if parser.buffer[parser.buffer_pos] == '"' {
+				break
+			}
+		}
+
+		// Consume blank characters.
+		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+			if is_blank(parser.buffer, parser.buffer_pos) {
+				// Consume a space or a tab character.
+				if !leading_blanks {
+					whitespaces = read(parser, whitespaces)
+				} else {
+					skip(parser)
+				}
+			} else {
+				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+					return false
+				}
+
+				// Check if it is a first line break.
+				if !leading_blanks {
+					whitespaces = whitespaces[:0]
+					leading_break = read_line(parser, leading_break)
+					leading_blanks = true
+				} else {
+					trailing_breaks = read_line(parser, trailing_breaks)
+				}
+			}
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Join the whitespaces or fold line breaks.
+		if leading_blanks {
+			// Do we need to fold line breaks?
+			if len(leading_break) > 0 && leading_break[0] == '\n' {
+				if len(trailing_breaks) == 0 {
+					s = append(s, ' ')
+				} else {
+					s = append(s, trailing_breaks...)
+				}
+			} else {
+				s = append(s, leading_break...)
+				s = append(s, trailing_breaks...)
+			}
+			trailing_breaks = trailing_breaks[:0]
+			leading_break = leading_break[:0]
+		} else {
+			s = append(s, whitespaces...)
+			whitespaces = whitespaces[:0]
+		}
+	}
+
+	// Eat the right quote.
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_SINGLE_QUOTED_SCALAR_STYLE,
+	}
+	if !single {
+		token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+	return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+	var s, leading_break, trailing_breaks, whitespaces []byte
+	var leading_blanks bool
+	var indent = parser.indent + 1
+
+	start_mark := parser.mark
+	end_mark := parser.mark
+
+	// Consume the content of the plain scalar.
+	for {
+		// Check for a document indicator.
+		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+			return false
+		}
+		if parser.mark.column == 0 &&
+			((parser.buffer[parser.buffer_pos+0] == '-' &&
+				parser.buffer[parser.buffer_pos+1] == '-' &&
+				parser.buffer[parser.buffer_pos+2] == '-') ||
+				(parser.buffer[parser.buffer_pos+0] == '.' &&
+					parser.buffer[parser.buffer_pos+1] == '.' &&
+					parser.buffer[parser.buffer_pos+2] == '.')) &&
+			is_blankz(parser.buffer, parser.buffer_pos+3) {
+			break
+		}
+
+		// Check for a comment.
+		if parser.buffer[parser.buffer_pos] == '#' {
+			break
+		}
+
+		// Consume non-blank characters.
+		for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+			// Check for indicators that may end a plain scalar.
+			if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+				(parser.flow_level > 0 &&
+					(parser.buffer[parser.buffer_pos] == ',' ||
+						parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+						parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+						parser.buffer[parser.buffer_pos] == '}')) {
+				break
+			}
+
+			// Check if we need to join whitespaces and breaks.
+			if leading_blanks || len(whitespaces) > 0 {
+				if leading_blanks {
+					// Do we need to fold line breaks?
+					if leading_break[0] == '\n' {
+						if len(trailing_breaks) == 0 {
+							s = append(s, ' ')
+						} else {
+							s = append(s, trailing_breaks...)
+						}
+					} else {
+						s = append(s, leading_break...)
+						s = append(s, trailing_breaks...)
+					}
+					trailing_breaks = trailing_breaks[:0]
+					leading_break = leading_break[:0]
+					leading_blanks = false
+				} else {
+					s = append(s, whitespaces...)
+					whitespaces = whitespaces[:0]
+				}
+			}
+
+			// Copy the character.
+			s = read(parser, s)
+
+			end_mark = parser.mark
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+		}
+
+		// Is it the end?
+		if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+			break
+		}
+
+		// Consume blank characters.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+			if is_blank(parser.buffer, parser.buffer_pos) {
+
+				// Check for tab characters that abuse indentation.
+				if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+					yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+						start_mark, "found a tab character that violates indentation")
+					return false
+				}
+
+				// Consume a space or a tab character.
+				if !leading_blanks {
+					whitespaces = read(parser, whitespaces)
+				} else {
+					skip(parser)
+				}
+			} else {
+				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+					return false
+				}
+
+				// Check if it is a first line break.
+				if !leading_blanks {
+					whitespaces = whitespaces[:0]
+					leading_break = read_line(parser, leading_break)
+					leading_blanks = true
+				} else {
+					trailing_breaks = read_line(parser, trailing_breaks)
+				}
+			}
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Check indentation level.
+		if parser.flow_level == 0 && parser.mark.column < indent {
+			break
+		}
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_PLAIN_SCALAR_STYLE,
+	}
+
+	// Note that we change the 'simple_key_allowed' flag.
+	if leading_blanks {
+		parser.simple_key_allowed = true
+	}
+	return true
+}
diff --git a/vendor/go.yaml.in/yaml/v2/sorter.go b/vendor/go.yaml.in/yaml/v2/sorter.go
new file mode 100644
index 0000000..4c45e66
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/sorter.go
@@ -0,0 +1,113 @@
+package yaml
+
+import (
+	"reflect"
+	"unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int      { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+	a := l[i]
+	b := l[j]
+	ak := a.Kind()
+	bk := b.Kind()
+	for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+		a = a.Elem()
+		ak = a.Kind()
+	}
+	for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+		b = b.Elem()
+		bk = b.Kind()
+	}
+	af, aok := keyFloat(a)
+	bf, bok := keyFloat(b)
+	if aok && bok {
+		if af != bf {
+			return af < bf
+		}
+		if ak != bk {
+			return ak < bk
+		}
+		return numLess(a, b)
+	}
+	if ak != reflect.String || bk != reflect.String {
+		return ak < bk
+	}
+	ar, br := []rune(a.String()), []rune(b.String())
+	for i := 0; i < len(ar) && i < len(br); i++ {
+		if ar[i] == br[i] {
+			continue
+		}
+		al := unicode.IsLetter(ar[i])
+		bl := unicode.IsLetter(br[i])
+		if al && bl {
+			return ar[i] < br[i]
+		}
+		if al || bl {
+			return bl
+		}
+		var ai, bi int
+		var an, bn int64
+		if ar[i] == '0' || br[i] == '0' {
+			for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+				if ar[j] != '0' {
+					an = 1
+					bn = 1
+					break
+				}
+			}
+		}
+		for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+			an = an*10 + int64(ar[ai]-'0')
+		}
+		for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+			bn = bn*10 + int64(br[bi]-'0')
+		}
+		if an != bn {
+			return an < bn
+		}
+		if ai != bi {
+			return ai < bi
+		}
+		return ar[i] < br[i]
+	}
+	return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+	switch v.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return float64(v.Int()), true
+	case reflect.Float32, reflect.Float64:
+		return v.Float(), true
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return float64(v.Uint()), true
+	case reflect.Bool:
+		if v.Bool() {
+			return 1, true
+		}
+		return 0, true
+	}
+	return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+	switch a.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return a.Int() < b.Int()
+	case reflect.Float32, reflect.Float64:
+		return a.Float() < b.Float()
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return a.Uint() < b.Uint()
+	case reflect.Bool:
+		return !a.Bool() && b.Bool()
+	}
+	panic("not a number")
+}
diff --git a/vendor/go.yaml.in/yaml/v2/writerc.go b/vendor/go.yaml.in/yaml/v2/writerc.go
new file mode 100644
index 0000000..a2dde60
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/writerc.go
@@ -0,0 +1,26 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+	emitter.error = yaml_WRITER_ERROR
+	emitter.problem = problem
+	return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+	if emitter.write_handler == nil {
+		panic("write handler not set")
+	}
+
+	// Check if the buffer is empty.
+	if emitter.buffer_pos == 0 {
+		return true
+	}
+
+	if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+		return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+	}
+	emitter.buffer_pos = 0
+	return true
+}
diff --git a/vendor/go.yaml.in/yaml/v2/yaml.go b/vendor/go.yaml.in/yaml/v2/yaml.go
new file mode 100644
index 0000000..5248e12
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/yaml.go
@@ -0,0 +1,478 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+//   https://github.com/yaml/go-yaml
+//
+package yaml
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"strings"
+	"sync"
+)
+
+// MapSlice encodes and decodes as a YAML map.
+// The order of keys is preserved when encoding and decoding.
+type MapSlice []MapItem
+
+// MapItem is an item in a MapSlice.
+type MapItem struct {
+	Key, Value interface{}
+}
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
+// method receives a function that may be called to unmarshal the original
+// YAML value into a field or variable. It is safe to call the unmarshal
+// function parameter more than once if necessary.
+type Unmarshaler interface {
+	UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+	MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+//     type T struct {
+//         F int `yaml:"a,omitempty"`
+//         B int
+//     }
+//     var t T
+//     yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+	return unmarshal(in, out, false)
+}
+
+// UnmarshalStrict is like Unmarshal except that any fields that are found
+// in the data that do not have corresponding struct members, or mapping
+// keys that are duplicates, will result in
+// an error.
+func UnmarshalStrict(in []byte, out interface{}) (err error) {
+	return unmarshal(in, out, true)
+}
+
+// A Decoder reads and decodes YAML values from an input stream.
+type Decoder struct {
+	strict bool
+	parser *parser
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+	return &Decoder{
+		parser: newParserFromReader(r),
+	}
+}
+
+// SetStrict sets whether strict decoding behaviour is enabled when
+// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
+func (dec *Decoder) SetStrict(strict bool) {
+	dec.strict = strict
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+	d := newDecoder(dec.strict)
+	defer handleErr(&err)
+	node := dec.parser.parse()
+	if node == nil {
+		return io.EOF
+	}
+	out := reflect.ValueOf(v)
+	if out.Kind() == reflect.Ptr && !out.IsNil() {
+		out = out.Elem()
+	}
+	d.unmarshal(node, out)
+	if len(d.terrors) > 0 {
+		return &TypeError{d.terrors}
+	}
+	return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+	defer handleErr(&err)
+	d := newDecoder(strict)
+	p := newParser(in)
+	defer p.destroy()
+	node := p.parse()
+	if node != nil {
+		v := reflect.ValueOf(out)
+		if v.Kind() == reflect.Ptr && !v.IsNil() {
+			v = v.Elem()
+		}
+		d.unmarshal(node, v)
+	}
+	if len(d.terrors) > 0 {
+		return &TypeError{d.terrors}
+	}
+	return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+//     `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+//     omitempty    Only include the field if it's not set to the zero
+//                  value for the type or to empty slices or maps.
+//                  Zero valued structs will be omitted if all their public
+//                  fields are zero, unless they implement an IsZero
+//                  method (see the IsZeroer interface type), in which
+//                  case the field will be excluded if IsZero returns true.
+//
+//     flow         Marshal using a flow style (useful for structs,
+//                  sequences and maps).
+//
+//     inline       Inline the field, which must be a struct or a map,
+//                  causing all of its fields or keys to be processed as if
+//                  they were part of the outer struct. For maps, keys must
+//                  not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+//     type T struct {
+//         F int `yaml:"a,omitempty"`
+//         B int
+//     }
+//     yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+//     yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+	defer handleErr(&err)
+	e := newEncoder()
+	defer e.destroy()
+	e.marshalDoc("", reflect.ValueOf(in))
+	e.finish()
+	out = e.out
+	return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+	encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{
+		encoder: newEncoderWithWriter(w),
+	}
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+	defer handleErr(&err)
+	e.encoder.marshalDoc("", reflect.ValueOf(v))
+	return nil
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+	defer handleErr(&err)
+	e.encoder.finish()
+	return nil
+}
+
+func handleErr(err *error) {
+	if v := recover(); v != nil {
+		if e, ok := v.(yamlError); ok {
+			*err = e.err
+		} else {
+			panic(v)
+		}
+	}
+}
+
+type yamlError struct {
+	err error
+}
+
+func fail(err error) {
+	panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+	panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+	Errors []string
+}
+
+func (e *TypeError) Error() string {
+	return fmt.Sprintf("yaml: unmarshal errors:\n  %s", strings.Join(e.Errors, "\n  "))
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+	FieldsMap  map[string]fieldInfo
+	FieldsList []fieldInfo
+
+	// InlineMap is the number of the field in the struct that
+	// contains an ,inline map, or -1 if there's none.
+	InlineMap int
+}
+
+type fieldInfo struct {
+	Key       string
+	Num       int
+	OmitEmpty bool
+	Flow      bool
+	// Id holds the unique field identifier, so we can cheaply
+	// check for field duplicates without maintaining an extra map.
+	Id int
+
+	// Inline holds the field index if the field is part of an inlined struct.
+	Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+	fieldMapMutex.RLock()
+	sinfo, found := structMap[st]
+	fieldMapMutex.RUnlock()
+	if found {
+		return sinfo, nil
+	}
+
+	n := st.NumField()
+	fieldsMap := make(map[string]fieldInfo)
+	fieldsList := make([]fieldInfo, 0, n)
+	inlineMap := -1
+	for i := 0; i != n; i++ {
+		field := st.Field(i)
+		if field.PkgPath != "" && !field.Anonymous {
+			continue // Private field
+		}
+
+		info := fieldInfo{Num: i}
+
+		tag := field.Tag.Get("yaml")
+		if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+			tag = string(field.Tag)
+		}
+		if tag == "-" {
+			continue
+		}
+
+		inline := false
+		fields := strings.Split(tag, ",")
+		if len(fields) > 1 {
+			for _, flag := range fields[1:] {
+				switch flag {
+				case "omitempty":
+					info.OmitEmpty = true
+				case "flow":
+					info.Flow = true
+				case "inline":
+					inline = true
+				default:
+					return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+				}
+			}
+			tag = fields[0]
+		}
+
+		if inline {
+			switch field.Type.Kind() {
+			case reflect.Map:
+				if inlineMap >= 0 {
+					return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+				}
+				if field.Type.Key() != reflect.TypeOf("") {
+					return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+				}
+				inlineMap = info.Num
+			case reflect.Struct:
+				sinfo, err := getStructInfo(field.Type)
+				if err != nil {
+					return nil, err
+				}
+				for _, finfo := range sinfo.FieldsList {
+					if _, found := fieldsMap[finfo.Key]; found {
+						msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+						return nil, errors.New(msg)
+					}
+					if finfo.Inline == nil {
+						finfo.Inline = []int{i, finfo.Num}
+					} else {
+						finfo.Inline = append([]int{i}, finfo.Inline...)
+					}
+					finfo.Id = len(fieldsList)
+					fieldsMap[finfo.Key] = finfo
+					fieldsList = append(fieldsList, finfo)
+				}
+			default:
+				//return nil, errors.New("Option ,inline needs a struct value or map field")
+				return nil, errors.New("Option ,inline needs a struct value field")
+			}
+			continue
+		}
+
+		if tag != "" {
+			info.Key = tag
+		} else {
+			info.Key = strings.ToLower(field.Name)
+		}
+
+		if _, found = fieldsMap[info.Key]; found {
+			msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+			return nil, errors.New(msg)
+		}
+
+		info.Id = len(fieldsList)
+		fieldsList = append(fieldsList, info)
+		fieldsMap[info.Key] = info
+	}
+
+	sinfo = &structInfo{
+		FieldsMap:  fieldsMap,
+		FieldsList: fieldsList,
+		InlineMap:  inlineMap,
+	}
+
+	fieldMapMutex.Lock()
+	structMap[st] = sinfo
+	fieldMapMutex.Unlock()
+	return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+	IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+	kind := v.Kind()
+	if z, ok := v.Interface().(IsZeroer); ok {
+		if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+			return true
+		}
+		return z.IsZero()
+	}
+	switch kind {
+	case reflect.String:
+		return len(v.String()) == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	case reflect.Slice:
+		return v.Len() == 0
+	case reflect.Map:
+		return v.Len() == 0
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Struct:
+		vt := v.Type()
+		for i := v.NumField() - 1; i >= 0; i-- {
+			if vt.Field(i).PkgPath != "" {
+				continue // Private field
+			}
+			if !isZero(v.Field(i)) {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}
+
+// FutureLineWrap globally disables line wrapping when encoding long strings.
+// This is a temporary and thus deprecated method introduced to faciliate
+// migration towards v3, which offers more control of line lengths on
+// individual encodings, and has a default matching the behavior introduced
+// by this function.
+//
+// The default formatting of v2 was erroneously changed in v2.3.0 and reverted
+// in v2.4.0, at which point this function was introduced to help migration.
+func FutureLineWrap() {
+	disableLineWrapping = true
+}
diff --git a/vendor/go.yaml.in/yaml/v2/yamlh.go b/vendor/go.yaml.in/yaml/v2/yamlh.go
new file mode 100644
index 0000000..f6a9c8e
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/yamlh.go
@@ -0,0 +1,739 @@
+package yaml
+
+import (
+	"fmt"
+	"io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+	major int8 // The major version number.
+	minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+	handle []byte // The tag handle.
+	prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+	// Let the parser choose the encoding.
+	yaml_ANY_ENCODING yaml_encoding_t = iota
+
+	yaml_UTF8_ENCODING    // The default UTF-8 encoding.
+	yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+	yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+	// Let the parser choose the break type.
+	yaml_ANY_BREAK yaml_break_t = iota
+
+	yaml_CR_BREAK   // Use CR for line breaks (Mac style).
+	yaml_LN_BREAK   // Use LN for line breaks (Unix style).
+	yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+	// No error is produced.
+	yaml_NO_ERROR yaml_error_type_t = iota
+
+	yaml_MEMORY_ERROR   // Cannot allocate or reallocate a block of memory.
+	yaml_READER_ERROR   // Cannot read or decode the input stream.
+	yaml_SCANNER_ERROR  // Cannot scan the input stream.
+	yaml_PARSER_ERROR   // Cannot parse the input stream.
+	yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+	yaml_WRITER_ERROR   // Cannot write to the output stream.
+	yaml_EMITTER_ERROR  // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+	index  int // The position index.
+	line   int // The position line.
+	column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+	yaml_PLAIN_SCALAR_STYLE         // The plain scalar style.
+	yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+	yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+	yaml_LITERAL_SCALAR_STYLE       // The literal scalar style.
+	yaml_FOLDED_SCALAR_STYLE        // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+	yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+	yaml_FLOW_SEQUENCE_STYLE  // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+	yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+	yaml_FLOW_MAPPING_STYLE  // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+	// An empty token.
+	yaml_NO_TOKEN yaml_token_type_t = iota
+
+	yaml_STREAM_START_TOKEN // A STREAM-START token.
+	yaml_STREAM_END_TOKEN   // A STREAM-END token.
+
+	yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+	yaml_TAG_DIRECTIVE_TOKEN     // A TAG-DIRECTIVE token.
+	yaml_DOCUMENT_START_TOKEN    // A DOCUMENT-START token.
+	yaml_DOCUMENT_END_TOKEN      // A DOCUMENT-END token.
+
+	yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+	yaml_BLOCK_MAPPING_START_TOKEN  // A BLOCK-SEQUENCE-END token.
+	yaml_BLOCK_END_TOKEN            // A BLOCK-END token.
+
+	yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+	yaml_FLOW_SEQUENCE_END_TOKEN   // A FLOW-SEQUENCE-END token.
+	yaml_FLOW_MAPPING_START_TOKEN  // A FLOW-MAPPING-START token.
+	yaml_FLOW_MAPPING_END_TOKEN    // A FLOW-MAPPING-END token.
+
+	yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+	yaml_FLOW_ENTRY_TOKEN  // A FLOW-ENTRY token.
+	yaml_KEY_TOKEN         // A KEY token.
+	yaml_VALUE_TOKEN       // A VALUE token.
+
+	yaml_ALIAS_TOKEN  // An ALIAS token.
+	yaml_ANCHOR_TOKEN // An ANCHOR token.
+	yaml_TAG_TOKEN    // A TAG token.
+	yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+	switch tt {
+	case yaml_NO_TOKEN:
+		return "yaml_NO_TOKEN"
+	case yaml_STREAM_START_TOKEN:
+		return "yaml_STREAM_START_TOKEN"
+	case yaml_STREAM_END_TOKEN:
+		return "yaml_STREAM_END_TOKEN"
+	case yaml_VERSION_DIRECTIVE_TOKEN:
+		return "yaml_VERSION_DIRECTIVE_TOKEN"
+	case yaml_TAG_DIRECTIVE_TOKEN:
+		return "yaml_TAG_DIRECTIVE_TOKEN"
+	case yaml_DOCUMENT_START_TOKEN:
+		return "yaml_DOCUMENT_START_TOKEN"
+	case yaml_DOCUMENT_END_TOKEN:
+		return "yaml_DOCUMENT_END_TOKEN"
+	case yaml_BLOCK_SEQUENCE_START_TOKEN:
+		return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+	case yaml_BLOCK_MAPPING_START_TOKEN:
+		return "yaml_BLOCK_MAPPING_START_TOKEN"
+	case yaml_BLOCK_END_TOKEN:
+		return "yaml_BLOCK_END_TOKEN"
+	case yaml_FLOW_SEQUENCE_START_TOKEN:
+		return "yaml_FLOW_SEQUENCE_START_TOKEN"
+	case yaml_FLOW_SEQUENCE_END_TOKEN:
+		return "yaml_FLOW_SEQUENCE_END_TOKEN"
+	case yaml_FLOW_MAPPING_START_TOKEN:
+		return "yaml_FLOW_MAPPING_START_TOKEN"
+	case yaml_FLOW_MAPPING_END_TOKEN:
+		return "yaml_FLOW_MAPPING_END_TOKEN"
+	case yaml_BLOCK_ENTRY_TOKEN:
+		return "yaml_BLOCK_ENTRY_TOKEN"
+	case yaml_FLOW_ENTRY_TOKEN:
+		return "yaml_FLOW_ENTRY_TOKEN"
+	case yaml_KEY_TOKEN:
+		return "yaml_KEY_TOKEN"
+	case yaml_VALUE_TOKEN:
+		return "yaml_VALUE_TOKEN"
+	case yaml_ALIAS_TOKEN:
+		return "yaml_ALIAS_TOKEN"
+	case yaml_ANCHOR_TOKEN:
+		return "yaml_ANCHOR_TOKEN"
+	case yaml_TAG_TOKEN:
+		return "yaml_TAG_TOKEN"
+	case yaml_SCALAR_TOKEN:
+		return "yaml_SCALAR_TOKEN"
+	}
+	return "<unknown token>"
+}
+
+// The token structure.
+type yaml_token_t struct {
+	// The token type.
+	typ yaml_token_type_t
+
+	// The start/end of the token.
+	start_mark, end_mark yaml_mark_t
+
+	// The stream encoding (for yaml_STREAM_START_TOKEN).
+	encoding yaml_encoding_t
+
+	// The alias/anchor/scalar value or tag/tag directive handle
+	// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+	value []byte
+
+	// The tag suffix (for yaml_TAG_TOKEN).
+	suffix []byte
+
+	// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+	prefix []byte
+
+	// The scalar style (for yaml_SCALAR_TOKEN).
+	style yaml_scalar_style_t
+
+	// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+	major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+	// An empty event.
+	yaml_NO_EVENT yaml_event_type_t = iota
+
+	yaml_STREAM_START_EVENT   // A STREAM-START event.
+	yaml_STREAM_END_EVENT     // A STREAM-END event.
+	yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+	yaml_DOCUMENT_END_EVENT   // A DOCUMENT-END event.
+	yaml_ALIAS_EVENT          // An ALIAS event.
+	yaml_SCALAR_EVENT         // A SCALAR event.
+	yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+	yaml_SEQUENCE_END_EVENT   // A SEQUENCE-END event.
+	yaml_MAPPING_START_EVENT  // A MAPPING-START event.
+	yaml_MAPPING_END_EVENT    // A MAPPING-END event.
+)
+
+var eventStrings = []string{
+	yaml_NO_EVENT:             "none",
+	yaml_STREAM_START_EVENT:   "stream start",
+	yaml_STREAM_END_EVENT:     "stream end",
+	yaml_DOCUMENT_START_EVENT: "document start",
+	yaml_DOCUMENT_END_EVENT:   "document end",
+	yaml_ALIAS_EVENT:          "alias",
+	yaml_SCALAR_EVENT:         "scalar",
+	yaml_SEQUENCE_START_EVENT: "sequence start",
+	yaml_SEQUENCE_END_EVENT:   "sequence end",
+	yaml_MAPPING_START_EVENT:  "mapping start",
+	yaml_MAPPING_END_EVENT:    "mapping end",
+}
+
+func (e yaml_event_type_t) String() string {
+	if e < 0 || int(e) >= len(eventStrings) {
+		return fmt.Sprintf("unknown event %d", e)
+	}
+	return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+	// The event type.
+	typ yaml_event_type_t
+
+	// The start and end of the event.
+	start_mark, end_mark yaml_mark_t
+
+	// The document encoding (for yaml_STREAM_START_EVENT).
+	encoding yaml_encoding_t
+
+	// The version directive (for yaml_DOCUMENT_START_EVENT).
+	version_directive *yaml_version_directive_t
+
+	// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+	tag_directives []yaml_tag_directive_t
+
+	// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+	anchor []byte
+
+	// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+	tag []byte
+
+	// The scalar value (for yaml_SCALAR_EVENT).
+	value []byte
+
+	// Is the document start/end indicator implicit, or the tag optional?
+	// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+	implicit bool
+
+	// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+	quoted_implicit bool
+
+	// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+	style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t     { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t   { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+	yaml_NULL_TAG      = "tag:yaml.org,2002:null"      // The tag !!null with the only possible value: null.
+	yaml_BOOL_TAG      = "tag:yaml.org,2002:bool"      // The tag !!bool with the values: true and false.
+	yaml_STR_TAG       = "tag:yaml.org,2002:str"       // The tag !!str for string values.
+	yaml_INT_TAG       = "tag:yaml.org,2002:int"       // The tag !!int for integer values.
+	yaml_FLOAT_TAG     = "tag:yaml.org,2002:float"     // The tag !!float for float values.
+	yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+	yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+	yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+	// Not in original libyaml.
+	yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+	yaml_MERGE_TAG  = "tag:yaml.org,2002:merge"
+
+	yaml_DEFAULT_SCALAR_TAG   = yaml_STR_TAG // The default scalar tag is !!str.
+	yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+	yaml_DEFAULT_MAPPING_TAG  = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+	// An empty node.
+	yaml_NO_NODE yaml_node_type_t = iota
+
+	yaml_SCALAR_NODE   // A scalar node.
+	yaml_SEQUENCE_NODE // A sequence node.
+	yaml_MAPPING_NODE  // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+	key   int // The key of the element.
+	value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+	typ yaml_node_type_t // The node type.
+	tag []byte           // The node tag.
+
+	// The node data.
+
+	// The scalar parameters (for yaml_SCALAR_NODE).
+	scalar struct {
+		value  []byte              // The scalar value.
+		length int                 // The length of the scalar value.
+		style  yaml_scalar_style_t // The scalar style.
+	}
+
+	// The sequence parameters (for YAML_SEQUENCE_NODE).
+	sequence struct {
+		items_data []yaml_node_item_t    // The stack of sequence items.
+		style      yaml_sequence_style_t // The sequence style.
+	}
+
+	// The mapping parameters (for yaml_MAPPING_NODE).
+	mapping struct {
+		pairs_data  []yaml_node_pair_t   // The stack of mapping pairs (key, value).
+		pairs_start *yaml_node_pair_t    // The beginning of the stack.
+		pairs_end   *yaml_node_pair_t    // The end of the stack.
+		pairs_top   *yaml_node_pair_t    // The top of the stack.
+		style       yaml_mapping_style_t // The mapping style.
+	}
+
+	start_mark yaml_mark_t // The beginning of the node.
+	end_mark   yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+	// The document nodes.
+	nodes []yaml_node_t
+
+	// The version directive.
+	version_directive *yaml_version_directive_t
+
+	// The list of tag directives.
+	tag_directives_data  []yaml_tag_directive_t
+	tag_directives_start int // The beginning of the tag directives list.
+	tag_directives_end   int // The end of the tag directives list.
+
+	start_implicit int // Is the document start indicator implicit?
+	end_implicit   int // Is the document end indicator implicit?
+
+	// The start/end of the document.
+	start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out]   data        A pointer to an application data specified by
+//                        yaml_parser_set_input().
+// [out]      buffer      The buffer to write the data from the source.
+// [in]       size        The size of the buffer.
+// [out]      size_read   The actual number of bytes read from the source.
+//
+// On success, the handler should return 1.  If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+	possible     bool        // Is a simple key possible?
+	required     bool        // Is a simple key required?
+	token_number int         // The number of the token.
+	mark         yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+	yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+	yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE           // Expect the beginning of an implicit document.
+	yaml_PARSE_DOCUMENT_START_STATE                    // Expect DOCUMENT-START.
+	yaml_PARSE_DOCUMENT_CONTENT_STATE                  // Expect the content of a document.
+	yaml_PARSE_DOCUMENT_END_STATE                      // Expect DOCUMENT-END.
+	yaml_PARSE_BLOCK_NODE_STATE                        // Expect a block node.
+	yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+	yaml_PARSE_FLOW_NODE_STATE                         // Expect a flow node.
+	yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE        // Expect the first entry of a block sequence.
+	yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE              // Expect an entry of a block sequence.
+	yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE         // Expect an entry of an indentless sequence.
+	yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE           // Expect the first key of a block mapping.
+	yaml_PARSE_BLOCK_MAPPING_KEY_STATE                 // Expect a block mapping key.
+	yaml_PARSE_BLOCK_MAPPING_VALUE_STATE               // Expect a block mapping value.
+	yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE         // Expect the first entry of a flow sequence.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE               // Expect an entry of a flow sequence.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE   // Expect a key of an ordered mapping.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE   // Expect the and of an ordered mapping entry.
+	yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE            // Expect the first key of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_KEY_STATE                  // Expect a key of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_VALUE_STATE                // Expect a value of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE          // Expect an empty value of a flow mapping.
+	yaml_PARSE_END_STATE                               // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+	switch ps {
+	case yaml_PARSE_STREAM_START_STATE:
+		return "yaml_PARSE_STREAM_START_STATE"
+	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+		return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+	case yaml_PARSE_DOCUMENT_START_STATE:
+		return "yaml_PARSE_DOCUMENT_START_STATE"
+	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+		return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+	case yaml_PARSE_DOCUMENT_END_STATE:
+		return "yaml_PARSE_DOCUMENT_END_STATE"
+	case yaml_PARSE_BLOCK_NODE_STATE:
+		return "yaml_PARSE_BLOCK_NODE_STATE"
+	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+		return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+	case yaml_PARSE_FLOW_NODE_STATE:
+		return "yaml_PARSE_FLOW_NODE_STATE"
+	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+		return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+	case yaml_PARSE_END_STATE:
+		return "yaml_PARSE_END_STATE"
+	}
+	return "<unknown parser state>"
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+	anchor []byte      // The anchor.
+	index  int         // The node id.
+	mark   yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+	// Error handling
+
+	error yaml_error_type_t // Error type.
+
+	problem string // Error description.
+
+	// The byte about which the problem occurred.
+	problem_offset int
+	problem_value  int
+	problem_mark   yaml_mark_t
+
+	// The error context.
+	context      string
+	context_mark yaml_mark_t
+
+	// Reader stuff
+
+	read_handler yaml_read_handler_t // Read handler.
+
+	input_reader io.Reader // File input data.
+	input        []byte    // String input data.
+	input_pos    int
+
+	eof bool // EOF flag
+
+	buffer     []byte // The working buffer.
+	buffer_pos int    // The current position of the buffer.
+
+	unread int // The number of unread characters in the buffer.
+
+	raw_buffer     []byte // The raw buffer.
+	raw_buffer_pos int    // The current position of the buffer.
+
+	encoding yaml_encoding_t // The input encoding.
+
+	offset int         // The offset of the current position (in bytes).
+	mark   yaml_mark_t // The mark of the current position.
+
+	// Scanner stuff
+
+	stream_start_produced bool // Have we started to scan the input stream?
+	stream_end_produced   bool // Have we reached the end of the input stream?
+
+	flow_level int // The number of unclosed '[' and '{' indicators.
+
+	tokens          []yaml_token_t // The tokens queue.
+	tokens_head     int            // The head of the tokens queue.
+	tokens_parsed   int            // The number of tokens fetched from the queue.
+	token_available bool           // Does the tokens queue contain a token ready for dequeueing.
+
+	indent  int   // The current indentation level.
+	indents []int // The indentation levels stack.
+
+	simple_key_allowed bool                // May a simple key occur at the current position?
+	simple_keys        []yaml_simple_key_t // The stack of simple keys.
+	simple_keys_by_tok map[int]int         // possible simple_key indexes indexed by token_number
+
+	// Parser stuff
+
+	state          yaml_parser_state_t    // The current parser state.
+	states         []yaml_parser_state_t  // The parser states stack.
+	marks          []yaml_mark_t          // The stack of marks.
+	tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+	// Dumper stuff
+
+	aliases []yaml_alias_data_t // The alias data.
+
+	document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output.  The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out]   data        A pointer to an application data specified by
+//                              yaml_emitter_set_output().
+// @param[in]       buffer      The buffer with bytes to be written.
+// @param[in]       size        The size of the buffer.
+//
+// @returns On success, the handler should return @c 1.  If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+	// Expect STREAM-START.
+	yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+	yaml_EMIT_FIRST_DOCUMENT_START_STATE       // Expect the first DOCUMENT-START or STREAM-END.
+	yaml_EMIT_DOCUMENT_START_STATE             // Expect DOCUMENT-START or STREAM-END.
+	yaml_EMIT_DOCUMENT_CONTENT_STATE           // Expect the content of a document.
+	yaml_EMIT_DOCUMENT_END_STATE               // Expect DOCUMENT-END.
+	yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE   // Expect the first item of a flow sequence.
+	yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE         // Expect an item of a flow sequence.
+	yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE     // Expect the first key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_KEY_STATE           // Expect a key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE  // Expect a value for a simple key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_VALUE_STATE         // Expect a value of a flow mapping.
+	yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE  // Expect the first item of a block sequence.
+	yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE        // Expect an item of a block sequence.
+	yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE    // Expect the first key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_KEY_STATE          // Expect the key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_VALUE_STATE        // Expect a value of a block mapping.
+	yaml_EMIT_END_STATE                        // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal.  Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+	// Error handling
+
+	error   yaml_error_type_t // Error type.
+	problem string            // Error description.
+
+	// Writer stuff
+
+	write_handler yaml_write_handler_t // Write handler.
+
+	output_buffer *[]byte   // String output data.
+	output_writer io.Writer // File output data.
+
+	buffer     []byte // The working buffer.
+	buffer_pos int    // The current position of the buffer.
+
+	raw_buffer     []byte // The raw buffer.
+	raw_buffer_pos int    // The current position of the buffer.
+
+	encoding yaml_encoding_t // The stream encoding.
+
+	// Emitter stuff
+
+	canonical   bool         // If the output is in the canonical style?
+	best_indent int          // The number of indentation spaces.
+	best_width  int          // The preferred width of the output lines.
+	unicode     bool         // Allow unescaped non-ASCII characters?
+	line_break  yaml_break_t // The preferred line break.
+
+	state  yaml_emitter_state_t   // The current emitter state.
+	states []yaml_emitter_state_t // The stack of states.
+
+	events      []yaml_event_t // The event queue.
+	events_head int            // The head of the event queue.
+
+	indents []int // The stack of indentation levels.
+
+	tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+	indent int // The current indentation level.
+
+	flow_level int // The current flow level.
+
+	root_context       bool // Is it the document root context?
+	sequence_context   bool // Is it a sequence context?
+	mapping_context    bool // Is it a mapping context?
+	simple_key_context bool // Is it a simple mapping key context?
+
+	line       int  // The current line.
+	column     int  // The current column.
+	whitespace bool // If the last character was a whitespace?
+	indention  bool // If the last character was an indentation character (' ', '-', '?', ':')?
+	open_ended bool // If an explicit document end is required?
+
+	// Anchor analysis.
+	anchor_data struct {
+		anchor []byte // The anchor value.
+		alias  bool   // Is it an alias?
+	}
+
+	// Tag analysis.
+	tag_data struct {
+		handle []byte // The tag handle.
+		suffix []byte // The tag suffix.
+	}
+
+	// Scalar analysis.
+	scalar_data struct {
+		value                 []byte              // The scalar value.
+		multiline             bool                // Does the scalar contain line breaks?
+		flow_plain_allowed    bool                // Can the scalar be expessed in the flow plain style?
+		block_plain_allowed   bool                // Can the scalar be expressed in the block plain style?
+		single_quoted_allowed bool                // Can the scalar be expressed in the single quoted style?
+		block_allowed         bool                // Can the scalar be expressed in the literal or folded styles?
+		style                 yaml_scalar_style_t // The output style.
+	}
+
+	// Dumper stuff
+
+	opened bool // If the stream was already opened?
+	closed bool // If the stream was already closed?
+
+	// The information associated with the document nodes.
+	anchors *struct {
+		references int  // The number of references.
+		anchor     int  // The anchor id.
+		serialized bool // If the node has been emitted?
+	}
+
+	last_anchor_id int // The last assigned anchor id.
+
+	document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/go.yaml.in/yaml/v2/yamlprivateh.go b/vendor/go.yaml.in/yaml/v2/yamlprivateh.go
new file mode 100644
index 0000000..8110ce3
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/yamlprivateh.go
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+	// The size of the input raw buffer.
+	input_raw_buffer_size = 512
+
+	// The size of the input buffer.
+	// It should be possible to decode the whole raw buffer.
+	input_buffer_size = input_raw_buffer_size * 3
+
+	// The size of the output buffer.
+	output_buffer_size = 128
+
+	// The size of the output raw buffer.
+	// It should be possible to encode the whole output buffer.
+	output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+	// The size of other stacks and queues.
+	initial_stack_size  = 16
+	initial_queue_size  = 16
+	initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+	return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+	bi := b[i]
+	if bi >= 'A' && bi <= 'F' {
+		return int(bi) - 'A' + 10
+	}
+	if bi >= 'a' && bi <= 'f' {
+		return int(bi) - 'a' + 10
+	}
+	return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+	return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+	return ((b[i] == 0x0A) || // . == #x0A
+		(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+		(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+		(b[i] > 0xC2 && b[i] < 0xED) ||
+		(b[i] == 0xED && b[i+1] < 0xA0) ||
+		(b[i] == 0xEE) ||
+		(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+			!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+			!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+	return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+	return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+	return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+	return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+	//return is_space(b, i) || is_tab(b, i)
+	return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+	return (b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+	return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+	//return is_break(b, i) || is_z(b, i)
+	return (        // is_break:
+	b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		// is_z:
+		b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+	//return is_space(b, i) || is_breakz(b, i)
+	return ( // is_space:
+	b[i] == ' ' ||
+		// is_breakz:
+		b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+	//return is_blank(b, i) || is_breakz(b, i)
+	return ( // is_blank:
+	b[i] == ' ' || b[i] == '\t' ||
+		// is_breakz:
+		b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+	// Don't replace these by a switch without first
+	// confirming that it is being inlined.
+	if b&0x80 == 0x00 {
+		return 1
+	}
+	if b&0xE0 == 0xC0 {
+		return 2
+	}
+	if b&0xF0 == 0xE0 {
+		return 3
+	}
+	if b&0xF8 == 0xF0 {
+		return 4
+	}
+	return 0
+
+}
diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go
index cf66309..d3cb951 100644
--- a/vendor/golang.org/x/net/context/context.go
+++ b/vendor/golang.org/x/net/context/context.go
@@ -3,29 +3,31 @@
 // license that can be found in the LICENSE file.
 
 // Package context defines the Context type, which carries deadlines,
-// cancelation signals, and other request-scoped values across API boundaries
+// cancellation signals, and other request-scoped values across API boundaries
 // and between processes.
 // As of Go 1.7 this package is available in the standard library under the
-// name context.  https://golang.org/pkg/context.
+// name [context].
 //
-// Incoming requests to a server should create a Context, and outgoing calls to
-// servers should accept a Context. The chain of function calls between must
-// propagate the Context, optionally replacing it with a modified copy created
-// using WithDeadline, WithTimeout, WithCancel, or WithValue.
+// Incoming requests to a server should create a [Context], and outgoing
+// calls to servers should accept a Context. The chain of function
+// calls between them must propagate the Context, optionally replacing
+// it with a derived Context created using [WithCancel], [WithDeadline],
+// [WithTimeout], or [WithValue].
 //
 // Programs that use Contexts should follow these rules to keep interfaces
 // consistent across packages and enable static analysis tools to check context
 // propagation:
 //
 // Do not store Contexts inside a struct type; instead, pass a Context
-// explicitly to each function that needs it. The Context should be the first
+// explicitly to each function that needs it. This is discussed further in
+// https://go.dev/blog/context-and-structs. The Context should be the first
 // parameter, typically named ctx:
 //
 //	func DoSomething(ctx context.Context, arg Arg) error {
 //		// ... use ctx ...
 //	}
 //
-// Do not pass a nil Context, even if a function permits it. Pass context.TODO
+// Do not pass a nil [Context], even if a function permits it. Pass [context.TODO]
 // if you are unsure about which Context to use.
 //
 // Use context Values only for request-scoped data that transits processes and
@@ -34,23 +36,116 @@
 // The same Context may be passed to functions running in different goroutines;
 // Contexts are safe for simultaneous use by multiple goroutines.
 //
-// See http://blog.golang.org/context for example code for a server that uses
+// See https://go.dev/blog/context for example code for a server that uses
 // Contexts.
-package context // import "golang.org/x/net/context"
+package context
+
+import (
+	"context" // standard library's context, as of Go 1.7
+	"time"
+)
+
+// A Context carries a deadline, a cancellation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+//
+//go:fix inline
+type Context = context.Context
+
+// Canceled is the error returned by [Context.Err] when the context is canceled
+// for some reason other than its deadline passing.
+//
+//go:fix inline
+var Canceled = context.Canceled
+
+// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled
+// due to its deadline passing.
+//
+//go:fix inline
+var DeadlineExceeded = context.DeadlineExceeded
 
 // Background returns a non-nil, empty Context. It is never canceled, has no
 // values, and has no deadline. It is typically used by the main function,
 // initialization, and tests, and as the top-level Context for incoming
 // requests.
-func Background() Context {
-	return background
-}
+//
+//go:fix inline
+func Background() Context { return context.Background() }
 
 // TODO returns a non-nil, empty Context. Code should use context.TODO when
 // it's unclear which Context to use or it is not yet available (because the
 // surrounding function has not yet been extended to accept a Context
-// parameter).  TODO is recognized by static analysis tools that determine
-// whether Contexts are propagated correctly in a program.
-func TODO() Context {
-	return todo
+// parameter).
+//
+//go:fix inline
+func TODO() Context { return context.TODO() }
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// A CancelFunc may be called by multiple goroutines simultaneously.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc = context.CancelFunc
+
+// WithCancel returns a derived context that points to the parent context
+// but has a new Done channel. The returned context's Done channel is closed
+// when the returned cancel function is called or when the parent context's
+// Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this [Context] complete.
+//
+//go:fix inline
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+	return context.WithCancel(parent)
+}
+
+// WithDeadline returns a derived context that points to the parent context
+// but has the deadline adjusted to be no later than d. If the parent's
+// deadline is already earlier than d, WithDeadline(parent, d) is semantically
+// equivalent to parent. The returned [Context.Done] channel is closed when
+// the deadline expires, when the returned cancel function is called,
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this [Context] complete.
+//
+//go:fix inline
+func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) {
+	return context.WithDeadline(parent, d)
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this [Context] complete:
+//
+//	func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+//		ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+//		defer cancel()  // releases resources if slowOperation completes before timeout elapses
+//		return slowOperation(ctx)
+//	}
+//
+//go:fix inline
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+	return context.WithTimeout(parent, timeout)
+}
+
+// WithValue returns a derived context that points to the parent Context.
+// In the derived context, the value associated with key is val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+//
+// The provided key must be comparable and should not be of type
+// string or any other built-in type to avoid collisions between
+// packages using context. Users of WithValue should define their own
+// types for keys. To avoid allocating when assigning to an
+// interface{}, context keys often have concrete type
+// struct{}. Alternatively, exported context key variables' static
+// type should be a pointer or interface.
+//
+//go:fix inline
+func WithValue(parent Context, key, val interface{}) Context {
+	return context.WithValue(parent, key, val)
 }
diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go
deleted file mode 100644
index 0c1b867..0000000
--- a/vendor/golang.org/x/net/context/go17.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.7
-
-package context
-
-import (
-	"context" // standard library's context, as of Go 1.7
-	"time"
-)
-
-var (
-	todo       = context.TODO()
-	background = context.Background()
-)
-
-// Canceled is the error returned by Context.Err when the context is canceled.
-var Canceled = context.Canceled
-
-// DeadlineExceeded is the error returned by Context.Err when the context's
-// deadline passes.
-var DeadlineExceeded = context.DeadlineExceeded
-
-// WithCancel returns a copy of parent with a new Done channel. The returned
-// context's Done channel is closed when the returned cancel function is called
-// or when the parent context's Done channel is closed, whichever happens first.
-//
-// Canceling this context releases resources associated with it, so code should
-// call cancel as soon as the operations running in this Context complete.
-func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
-	ctx, f := context.WithCancel(parent)
-	return ctx, f
-}
-
-// WithDeadline returns a copy of the parent context with the deadline adjusted
-// to be no later than d. If the parent's deadline is already earlier than d,
-// WithDeadline(parent, d) is semantically equivalent to parent. The returned
-// context's Done channel is closed when the deadline expires, when the returned
-// cancel function is called, or when the parent context's Done channel is
-// closed, whichever happens first.
-//
-// Canceling this context releases resources associated with it, so code should
-// call cancel as soon as the operations running in this Context complete.
-func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
-	ctx, f := context.WithDeadline(parent, deadline)
-	return ctx, f
-}
-
-// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
-//
-// Canceling this context releases resources associated with it, so code should
-// call cancel as soon as the operations running in this Context complete:
-//
-//	func slowOperationWithTimeout(ctx context.Context) (Result, error) {
-//		ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
-//		defer cancel()  // releases resources if slowOperation completes before timeout elapses
-//		return slowOperation(ctx)
-//	}
-func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
-	return WithDeadline(parent, time.Now().Add(timeout))
-}
-
-// WithValue returns a copy of parent in which the value associated with key is
-// val.
-//
-// Use context Values only for request-scoped data that transits processes and
-// APIs, not for passing optional parameters to functions.
-func WithValue(parent Context, key interface{}, val interface{}) Context {
-	return context.WithValue(parent, key, val)
-}
diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go
deleted file mode 100644
index e31e35a..0000000
--- a/vendor/golang.org/x/net/context/go19.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.9
-
-package context
-
-import "context" // standard library's context, as of Go 1.7
-
-// A Context carries a deadline, a cancelation signal, and other values across
-// API boundaries.
-//
-// Context's methods may be called by multiple goroutines simultaneously.
-type Context = context.Context
-
-// A CancelFunc tells an operation to abandon its work.
-// A CancelFunc does not wait for the work to stop.
-// After the first call, subsequent calls to a CancelFunc do nothing.
-type CancelFunc = context.CancelFunc
diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go
deleted file mode 100644
index 065ff3d..0000000
--- a/vendor/golang.org/x/net/context/pre_go17.go
+++ /dev/null
@@ -1,300 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.7
-
-package context
-
-import (
-	"errors"
-	"fmt"
-	"sync"
-	"time"
-)
-
-// An emptyCtx is never canceled, has no values, and has no deadline. It is not
-// struct{}, since vars of this type must have distinct addresses.
-type emptyCtx int
-
-func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
-	return
-}
-
-func (*emptyCtx) Done() <-chan struct{} {
-	return nil
-}
-
-func (*emptyCtx) Err() error {
-	return nil
-}
-
-func (*emptyCtx) Value(key interface{}) interface{} {
-	return nil
-}
-
-func (e *emptyCtx) String() string {
-	switch e {
-	case background:
-		return "context.Background"
-	case todo:
-		return "context.TODO"
-	}
-	return "unknown empty Context"
-}
-
-var (
-	background = new(emptyCtx)
-	todo       = new(emptyCtx)
-)
-
-// Canceled is the error returned by Context.Err when the context is canceled.
-var Canceled = errors.New("context canceled")
-
-// DeadlineExceeded is the error returned by Context.Err when the context's
-// deadline passes.
-var DeadlineExceeded = errors.New("context deadline exceeded")
-
-// WithCancel returns a copy of parent with a new Done channel. The returned
-// context's Done channel is closed when the returned cancel function is called
-// or when the parent context's Done channel is closed, whichever happens first.
-//
-// Canceling this context releases resources associated with it, so code should
-// call cancel as soon as the operations running in this Context complete.
-func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
-	c := newCancelCtx(parent)
-	propagateCancel(parent, c)
-	return c, func() { c.cancel(true, Canceled) }
-}
-
-// newCancelCtx returns an initialized cancelCtx.
-func newCancelCtx(parent Context) *cancelCtx {
-	return &cancelCtx{
-		Context: parent,
-		done:    make(chan struct{}),
-	}
-}
-
-// propagateCancel arranges for child to be canceled when parent is.
-func propagateCancel(parent Context, child canceler) {
-	if parent.Done() == nil {
-		return // parent is never canceled
-	}
-	if p, ok := parentCancelCtx(parent); ok {
-		p.mu.Lock()
-		if p.err != nil {
-			// parent has already been canceled
-			child.cancel(false, p.err)
-		} else {
-			if p.children == nil {
-				p.children = make(map[canceler]bool)
-			}
-			p.children[child] = true
-		}
-		p.mu.Unlock()
-	} else {
-		go func() {
-			select {
-			case <-parent.Done():
-				child.cancel(false, parent.Err())
-			case <-child.Done():
-			}
-		}()
-	}
-}
-
-// parentCancelCtx follows a chain of parent references until it finds a
-// *cancelCtx. This function understands how each of the concrete types in this
-// package represents its parent.
-func parentCancelCtx(parent Context) (*cancelCtx, bool) {
-	for {
-		switch c := parent.(type) {
-		case *cancelCtx:
-			return c, true
-		case *timerCtx:
-			return c.cancelCtx, true
-		case *valueCtx:
-			parent = c.Context
-		default:
-			return nil, false
-		}
-	}
-}
-
-// removeChild removes a context from its parent.
-func removeChild(parent Context, child canceler) {
-	p, ok := parentCancelCtx(parent)
-	if !ok {
-		return
-	}
-	p.mu.Lock()
-	if p.children != nil {
-		delete(p.children, child)
-	}
-	p.mu.Unlock()
-}
-
-// A canceler is a context type that can be canceled directly. The
-// implementations are *cancelCtx and *timerCtx.
-type canceler interface {
-	cancel(removeFromParent bool, err error)
-	Done() <-chan struct{}
-}
-
-// A cancelCtx can be canceled. When canceled, it also cancels any children
-// that implement canceler.
-type cancelCtx struct {
-	Context
-
-	done chan struct{} // closed by the first cancel call.
-
-	mu       sync.Mutex
-	children map[canceler]bool // set to nil by the first cancel call
-	err      error             // set to non-nil by the first cancel call
-}
-
-func (c *cancelCtx) Done() <-chan struct{} {
-	return c.done
-}
-
-func (c *cancelCtx) Err() error {
-	c.mu.Lock()
-	defer c.mu.Unlock()
-	return c.err
-}
-
-func (c *cancelCtx) String() string {
-	return fmt.Sprintf("%v.WithCancel", c.Context)
-}
-
-// cancel closes c.done, cancels each of c's children, and, if
-// removeFromParent is true, removes c from its parent's children.
-func (c *cancelCtx) cancel(removeFromParent bool, err error) {
-	if err == nil {
-		panic("context: internal error: missing cancel error")
-	}
-	c.mu.Lock()
-	if c.err != nil {
-		c.mu.Unlock()
-		return // already canceled
-	}
-	c.err = err
-	close(c.done)
-	for child := range c.children {
-		// NOTE: acquiring the child's lock while holding parent's lock.
-		child.cancel(false, err)
-	}
-	c.children = nil
-	c.mu.Unlock()
-
-	if removeFromParent {
-		removeChild(c.Context, c)
-	}
-}
-
-// WithDeadline returns a copy of the parent context with the deadline adjusted
-// to be no later than d. If the parent's deadline is already earlier than d,
-// WithDeadline(parent, d) is semantically equivalent to parent. The returned
-// context's Done channel is closed when the deadline expires, when the returned
-// cancel function is called, or when the parent context's Done channel is
-// closed, whichever happens first.
-//
-// Canceling this context releases resources associated with it, so code should
-// call cancel as soon as the operations running in this Context complete.
-func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
-	if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
-		// The current deadline is already sooner than the new one.
-		return WithCancel(parent)
-	}
-	c := &timerCtx{
-		cancelCtx: newCancelCtx(parent),
-		deadline:  deadline,
-	}
-	propagateCancel(parent, c)
-	d := deadline.Sub(time.Now())
-	if d <= 0 {
-		c.cancel(true, DeadlineExceeded) // deadline has already passed
-		return c, func() { c.cancel(true, Canceled) }
-	}
-	c.mu.Lock()
-	defer c.mu.Unlock()
-	if c.err == nil {
-		c.timer = time.AfterFunc(d, func() {
-			c.cancel(true, DeadlineExceeded)
-		})
-	}
-	return c, func() { c.cancel(true, Canceled) }
-}
-
-// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
-// implement Done and Err. It implements cancel by stopping its timer then
-// delegating to cancelCtx.cancel.
-type timerCtx struct {
-	*cancelCtx
-	timer *time.Timer // Under cancelCtx.mu.
-
-	deadline time.Time
-}
-
-func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
-	return c.deadline, true
-}
-
-func (c *timerCtx) String() string {
-	return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
-}
-
-func (c *timerCtx) cancel(removeFromParent bool, err error) {
-	c.cancelCtx.cancel(false, err)
-	if removeFromParent {
-		// Remove this timerCtx from its parent cancelCtx's children.
-		removeChild(c.cancelCtx.Context, c)
-	}
-	c.mu.Lock()
-	if c.timer != nil {
-		c.timer.Stop()
-		c.timer = nil
-	}
-	c.mu.Unlock()
-}
-
-// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
-//
-// Canceling this context releases resources associated with it, so code should
-// call cancel as soon as the operations running in this Context complete:
-//
-//	func slowOperationWithTimeout(ctx context.Context) (Result, error) {
-//		ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
-//		defer cancel()  // releases resources if slowOperation completes before timeout elapses
-//		return slowOperation(ctx)
-//	}
-func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
-	return WithDeadline(parent, time.Now().Add(timeout))
-}
-
-// WithValue returns a copy of parent in which the value associated with key is
-// val.
-//
-// Use context Values only for request-scoped data that transits processes and
-// APIs, not for passing optional parameters to functions.
-func WithValue(parent Context, key interface{}, val interface{}) Context {
-	return &valueCtx{parent, key, val}
-}
-
-// A valueCtx carries a key-value pair. It implements Value for that key and
-// delegates all other calls to the embedded Context.
-type valueCtx struct {
-	Context
-	key, val interface{}
-}
-
-func (c *valueCtx) String() string {
-	return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
-}
-
-func (c *valueCtx) Value(key interface{}) interface{} {
-	if c.key == key {
-		return c.val
-	}
-	return c.Context.Value(key)
-}
diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go
deleted file mode 100644
index ec5a638..0000000
--- a/vendor/golang.org/x/net/context/pre_go19.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.9
-
-package context
-
-import "time"
-
-// A Context carries a deadline, a cancelation signal, and other values across
-// API boundaries.
-//
-// Context's methods may be called by multiple goroutines simultaneously.
-type Context interface {
-	// Deadline returns the time when work done on behalf of this context
-	// should be canceled. Deadline returns ok==false when no deadline is
-	// set. Successive calls to Deadline return the same results.
-	Deadline() (deadline time.Time, ok bool)
-
-	// Done returns a channel that's closed when work done on behalf of this
-	// context should be canceled. Done may return nil if this context can
-	// never be canceled. Successive calls to Done return the same value.
-	//
-	// WithCancel arranges for Done to be closed when cancel is called;
-	// WithDeadline arranges for Done to be closed when the deadline
-	// expires; WithTimeout arranges for Done to be closed when the timeout
-	// elapses.
-	//
-	// Done is provided for use in select statements:
-	//
-	//  // Stream generates values with DoSomething and sends them to out
-	//  // until DoSomething returns an error or ctx.Done is closed.
-	//  func Stream(ctx context.Context, out chan<- Value) error {
-	//  	for {
-	//  		v, err := DoSomething(ctx)
-	//  		if err != nil {
-	//  			return err
-	//  		}
-	//  		select {
-	//  		case <-ctx.Done():
-	//  			return ctx.Err()
-	//  		case out <- v:
-	//  		}
-	//  	}
-	//  }
-	//
-	// See http://blog.golang.org/pipelines for more examples of how to use
-	// a Done channel for cancelation.
-	Done() <-chan struct{}
-
-	// Err returns a non-nil error value after Done is closed. Err returns
-	// Canceled if the context was canceled or DeadlineExceeded if the
-	// context's deadline passed. No other values for Err are defined.
-	// After Done is closed, successive calls to Err return the same value.
-	Err() error
-
-	// Value returns the value associated with this context for key, or nil
-	// if no value is associated with key. Successive calls to Value with
-	// the same key returns the same result.
-	//
-	// Use context values only for request-scoped data that transits
-	// processes and API boundaries, not for passing optional parameters to
-	// functions.
-	//
-	// A key identifies a specific value in a Context. Functions that wish
-	// to store values in Context typically allocate a key in a global
-	// variable then use that key as the argument to context.WithValue and
-	// Context.Value. A key can be any type that supports equality;
-	// packages should define keys as an unexported type to avoid
-	// collisions.
-	//
-	// Packages that define a Context key should provide type-safe accessors
-	// for the values stores using that key:
-	//
-	// 	// Package user defines a User type that's stored in Contexts.
-	// 	package user
-	//
-	// 	import "golang.org/x/net/context"
-	//
-	// 	// User is the type of value stored in the Contexts.
-	// 	type User struct {...}
-	//
-	// 	// key is an unexported type for keys defined in this package.
-	// 	// This prevents collisions with keys defined in other packages.
-	// 	type key int
-	//
-	// 	// userKey is the key for user.User values in Contexts. It is
-	// 	// unexported; clients use user.NewContext and user.FromContext
-	// 	// instead of using this key directly.
-	// 	var userKey key = 0
-	//
-	// 	// NewContext returns a new Context that carries value u.
-	// 	func NewContext(ctx context.Context, u *User) context.Context {
-	// 		return context.WithValue(ctx, userKey, u)
-	// 	}
-	//
-	// 	// FromContext returns the User value stored in ctx, if any.
-	// 	func FromContext(ctx context.Context) (*User, bool) {
-	// 		u, ok := ctx.Value(userKey).(*User)
-	// 		return u, ok
-	// 	}
-	Value(key interface{}) interface{}
-}
-
-// A CancelFunc tells an operation to abandon its work.
-// A CancelFunc does not wait for the work to stop.
-// After the first call, subsequent calls to a CancelFunc do nothing.
-type CancelFunc func()
diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go
index de58dfb..8a7a89d 100644
--- a/vendor/golang.org/x/net/http2/config.go
+++ b/vendor/golang.org/x/net/http2/config.go
@@ -27,6 +27,7 @@
 //   - If the resulting value is zero or out of range, use a default.
 type http2Config struct {
 	MaxConcurrentStreams         uint32
+	StrictMaxConcurrentRequests  bool
 	MaxDecoderHeaderTableSize    uint32
 	MaxEncoderHeaderTableSize    uint32
 	MaxReadFrameSize             uint32
@@ -55,21 +56,22 @@
 		PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites,
 		CountError:                   h2.CountError,
 	}
-	fillNetHTTPServerConfig(&conf, h1)
+	fillNetHTTPConfig(&conf, h1.HTTP2)
 	setConfigDefaults(&conf, true)
 	return conf
 }
 
-// configFromServer merges configuration settings from h2 and h2.t1.HTTP2
+// configFromTransport merges configuration settings from h2 and h2.t1.HTTP2
 // (the net/http Transport).
 func configFromTransport(h2 *Transport) http2Config {
 	conf := http2Config{
-		MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
-		MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
-		MaxReadFrameSize:          h2.MaxReadFrameSize,
-		SendPingTimeout:           h2.ReadIdleTimeout,
-		PingTimeout:               h2.PingTimeout,
-		WriteByteTimeout:          h2.WriteByteTimeout,
+		StrictMaxConcurrentRequests: h2.StrictMaxConcurrentStreams,
+		MaxEncoderHeaderTableSize:   h2.MaxEncoderHeaderTableSize,
+		MaxDecoderHeaderTableSize:   h2.MaxDecoderHeaderTableSize,
+		MaxReadFrameSize:            h2.MaxReadFrameSize,
+		SendPingTimeout:             h2.ReadIdleTimeout,
+		PingTimeout:                 h2.PingTimeout,
+		WriteByteTimeout:            h2.WriteByteTimeout,
 	}
 
 	// Unlike most config fields, where out-of-range values revert to the default,
@@ -81,7 +83,7 @@
 	}
 
 	if h2.t1 != nil {
-		fillNetHTTPTransportConfig(&conf, h2.t1)
+		fillNetHTTPConfig(&conf, h2.t1.HTTP2)
 	}
 	setConfigDefaults(&conf, false)
 	return conf
@@ -120,3 +122,48 @@
 	const typicalHeaders = 10   // conservative
 	return n + typicalHeaders*perFieldOverhead
 }
+
+func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
+	if h2 == nil {
+		return
+	}
+	if h2.MaxConcurrentStreams != 0 {
+		conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+	}
+	if http2ConfigStrictMaxConcurrentRequests(h2) {
+		conf.StrictMaxConcurrentRequests = true
+	}
+	if h2.MaxEncoderHeaderTableSize != 0 {
+		conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
+	}
+	if h2.MaxDecoderHeaderTableSize != 0 {
+		conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
+	}
+	if h2.MaxConcurrentStreams != 0 {
+		conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+	}
+	if h2.MaxReadFrameSize != 0 {
+		conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
+	}
+	if h2.MaxReceiveBufferPerConnection != 0 {
+		conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
+	}
+	if h2.MaxReceiveBufferPerStream != 0 {
+		conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
+	}
+	if h2.SendPingTimeout != 0 {
+		conf.SendPingTimeout = h2.SendPingTimeout
+	}
+	if h2.PingTimeout != 0 {
+		conf.PingTimeout = h2.PingTimeout
+	}
+	if h2.WriteByteTimeout != 0 {
+		conf.WriteByteTimeout = h2.WriteByteTimeout
+	}
+	if h2.PermitProhibitedCipherSuites {
+		conf.PermitProhibitedCipherSuites = true
+	}
+	if h2.CountError != nil {
+		conf.CountError = h2.CountError
+	}
+}
diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go
deleted file mode 100644
index e378412..0000000
--- a/vendor/golang.org/x/net/http2/config_go124.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.24
-
-package http2
-
-import "net/http"
-
-// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2.
-func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
-	fillNetHTTPConfig(conf, srv.HTTP2)
-}
-
-// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2.
-func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
-	fillNetHTTPConfig(conf, tr.HTTP2)
-}
-
-func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
-	if h2 == nil {
-		return
-	}
-	if h2.MaxConcurrentStreams != 0 {
-		conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
-	}
-	if h2.MaxEncoderHeaderTableSize != 0 {
-		conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
-	}
-	if h2.MaxDecoderHeaderTableSize != 0 {
-		conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
-	}
-	if h2.MaxConcurrentStreams != 0 {
-		conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
-	}
-	if h2.MaxReadFrameSize != 0 {
-		conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
-	}
-	if h2.MaxReceiveBufferPerConnection != 0 {
-		conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
-	}
-	if h2.MaxReceiveBufferPerStream != 0 {
-		conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
-	}
-	if h2.SendPingTimeout != 0 {
-		conf.SendPingTimeout = h2.SendPingTimeout
-	}
-	if h2.PingTimeout != 0 {
-		conf.PingTimeout = h2.PingTimeout
-	}
-	if h2.WriteByteTimeout != 0 {
-		conf.WriteByteTimeout = h2.WriteByteTimeout
-	}
-	if h2.PermitProhibitedCipherSuites {
-		conf.PermitProhibitedCipherSuites = true
-	}
-	if h2.CountError != nil {
-		conf.CountError = h2.CountError
-	}
-}
diff --git a/vendor/golang.org/x/net/http2/config_go125.go b/vendor/golang.org/x/net/http2/config_go125.go
new file mode 100644
index 0000000..b4373fe
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config_go125.go
@@ -0,0 +1,15 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.26
+
+package http2
+
+import (
+	"net/http"
+)
+
+func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
+	return false
+}
diff --git a/vendor/golang.org/x/net/http2/config_go126.go b/vendor/golang.org/x/net/http2/config_go126.go
new file mode 100644
index 0000000..6b071c1
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config_go126.go
@@ -0,0 +1,15 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.26
+
+package http2
+
+import (
+	"net/http"
+)
+
+func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
+	return h2.StrictMaxConcurrentRequests
+}
diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go
deleted file mode 100644
index 060fd6c..0000000
--- a/vendor/golang.org/x/net/http2/config_pre_go124.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.24
-
-package http2
-
-import "net/http"
-
-// Pre-Go 1.24 fallback.
-// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24.
-
-func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {}
-
-func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {}
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index 81faec7..93bcaab 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -39,7 +39,7 @@
 	FrameContinuation FrameType = 0x9
 )
 
-var frameName = map[FrameType]string{
+var frameNames = [...]string{
 	FrameData:         "DATA",
 	FrameHeaders:      "HEADERS",
 	FramePriority:     "PRIORITY",
@@ -53,10 +53,10 @@
 }
 
 func (t FrameType) String() string {
-	if s, ok := frameName[t]; ok {
-		return s
+	if int(t) < len(frameNames) {
+		return frameNames[t]
 	}
-	return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t))
+	return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", t)
 }
 
 // Flags is a bitmask of HTTP/2 flags.
@@ -124,7 +124,7 @@
 // might be 0).
 type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error)
 
-var frameParsers = map[FrameType]frameParser{
+var frameParsers = [...]frameParser{
 	FrameData:         parseDataFrame,
 	FrameHeaders:      parseHeadersFrame,
 	FramePriority:     parsePriorityFrame,
@@ -138,8 +138,8 @@
 }
 
 func typeFrameParser(t FrameType) frameParser {
-	if f := frameParsers[t]; f != nil {
-		return f
+	if int(t) < len(frameParsers) {
+		return frameParsers[t]
 	}
 	return parseUnknownFrame
 }
@@ -225,6 +225,11 @@
 	},
 }
 
+func invalidHTTP1LookingFrameHeader() FrameHeader {
+	fh, _ := readFrameHeader(make([]byte, frameHeaderLen), strings.NewReader("HTTP/1.1 "))
+	return fh
+}
+
 // ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.
 // Most users should use Framer.ReadFrame instead.
 func ReadFrameHeader(r io.Reader) (FrameHeader, error) {
@@ -342,7 +347,7 @@
 func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {
 	// Write the FrameHeader.
 	f.wbuf = append(f.wbuf[:0],
-		0, // 3 bytes of length, filled in in endWrite
+		0, // 3 bytes of length, filled in endWrite
 		0,
 		0,
 		byte(ftype),
@@ -503,10 +508,16 @@
 		return nil, err
 	}
 	if fh.Length > fr.maxReadSize {
+		if fh == invalidHTTP1LookingFrameHeader() {
+			return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge)
+		}
 		return nil, ErrFrameTooLarge
 	}
 	payload := fr.getReadBuf(fh.Length)
 	if _, err := io.ReadFull(fr.r, payload); err != nil {
+		if fh == invalidHTTP1LookingFrameHeader() {
+			return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err)
+		}
 		return nil, err
 	}
 	f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload)
@@ -1141,6 +1152,15 @@
 	PriorityParam
 }
 
+var defaultRFC9218Priority = PriorityParam{
+	incremental: 0,
+	urgency:     3,
+}
+
+// Note that HTTP/2 has had two different prioritization schemes, and
+// PriorityParam struct below is a superset of both schemes. The exported
+// symbols are from RFC 7540 and the non-exported ones are from RFC 9218.
+
 // PriorityParam are the stream prioritzation parameters.
 type PriorityParam struct {
 	// StreamDep is a 31-bit stream identifier for the
@@ -1156,6 +1176,20 @@
 	// the spec, "Add one to the value to obtain a weight between
 	// 1 and 256."
 	Weight uint8
+
+	// "The urgency (u) parameter value is Integer (see Section 3.3.1 of
+	// [STRUCTURED-FIELDS]), between 0 and 7 inclusive, in descending order of
+	// priority. The default is 3."
+	urgency uint8
+
+	// "The incremental (i) parameter value is Boolean (see Section 3.3.6 of
+	// [STRUCTURED-FIELDS]). It indicates if an HTTP response can be processed
+	// incrementally, i.e., provide some meaningful output as chunks of the
+	// response arrive."
+	//
+	// We use uint8 (i.e. 0 is false, 1 is true) instead of bool so we can
+	// avoid unnecessary type conversions and because either type takes 1 byte.
+	incremental uint8
 }
 
 func (p PriorityParam) IsZero() bool {
diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go
index 9933c9f..9921ca0 100644
--- a/vendor/golang.org/x/net/http2/gotrack.go
+++ b/vendor/golang.org/x/net/http2/gotrack.go
@@ -15,21 +15,32 @@
 	"runtime"
 	"strconv"
 	"sync"
+	"sync/atomic"
 )
 
 var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
 
+// Setting DebugGoroutines to false during a test to disable goroutine debugging
+// results in race detector complaints when a test leaves goroutines running before
+// returning. Tests shouldn't do this, of course, but when they do it generally shows
+// up as infrequent, hard-to-debug flakes. (See #66519.)
+//
+// Disable goroutine debugging during individual tests with an atomic bool.
+// (Note that it's safe to enable/disable debugging mid-test, so the actual race condition
+// here is harmless.)
+var disableDebugGoroutines atomic.Bool
+
 type goroutineLock uint64
 
 func newGoroutineLock() goroutineLock {
-	if !DebugGoroutines {
+	if !DebugGoroutines || disableDebugGoroutines.Load() {
 		return 0
 	}
 	return goroutineLock(curGoroutineID())
 }
 
 func (g goroutineLock) check() {
-	if !DebugGoroutines {
+	if !DebugGoroutines || disableDebugGoroutines.Load() {
 		return
 	}
 	if curGoroutineID() != uint64(g) {
@@ -38,7 +49,7 @@
 }
 
 func (g goroutineLock) checkNotOn() {
-	if !DebugGoroutines {
+	if !DebugGoroutines || disableDebugGoroutines.Load() {
 		return
 	}
 	if curGoroutineID() == uint64(g) {
diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go
deleted file mode 100644
index 149b3dd..0000000
--- a/vendor/golang.org/x/net/http2/headermap.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import (
-	"net/http"
-	"sync"
-)
-
-var (
-	commonBuildOnce   sync.Once
-	commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case
-	commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case
-)
-
-func buildCommonHeaderMapsOnce() {
-	commonBuildOnce.Do(buildCommonHeaderMaps)
-}
-
-func buildCommonHeaderMaps() {
-	common := []string{
-		"accept",
-		"accept-charset",
-		"accept-encoding",
-		"accept-language",
-		"accept-ranges",
-		"age",
-		"access-control-allow-credentials",
-		"access-control-allow-headers",
-		"access-control-allow-methods",
-		"access-control-allow-origin",
-		"access-control-expose-headers",
-		"access-control-max-age",
-		"access-control-request-headers",
-		"access-control-request-method",
-		"allow",
-		"authorization",
-		"cache-control",
-		"content-disposition",
-		"content-encoding",
-		"content-language",
-		"content-length",
-		"content-location",
-		"content-range",
-		"content-type",
-		"cookie",
-		"date",
-		"etag",
-		"expect",
-		"expires",
-		"from",
-		"host",
-		"if-match",
-		"if-modified-since",
-		"if-none-match",
-		"if-unmodified-since",
-		"last-modified",
-		"link",
-		"location",
-		"max-forwards",
-		"origin",
-		"proxy-authenticate",
-		"proxy-authorization",
-		"range",
-		"referer",
-		"refresh",
-		"retry-after",
-		"server",
-		"set-cookie",
-		"strict-transport-security",
-		"trailer",
-		"transfer-encoding",
-		"user-agent",
-		"vary",
-		"via",
-		"www-authenticate",
-		"x-forwarded-for",
-		"x-forwarded-proto",
-	}
-	commonLowerHeader = make(map[string]string, len(common))
-	commonCanonHeader = make(map[string]string, len(common))
-	for _, v := range common {
-		chk := http.CanonicalHeaderKey(v)
-		commonLowerHeader[chk] = v
-		commonCanonHeader[v] = chk
-	}
-}
-
-func lowerHeader(v string) (lower string, ascii bool) {
-	buildCommonHeaderMapsOnce()
-	if s, ok := commonLowerHeader[v]; ok {
-		return s, true
-	}
-	return asciiToLower(v)
-}
-
-func canonicalHeader(v string) string {
-	buildCommonHeaderMapsOnce()
-	if s, ok := commonCanonHeader[v]; ok {
-		return s
-	}
-	return http.CanonicalHeaderKey(v)
-}
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index c7601c9..105fe12 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -11,13 +11,10 @@
 // requires Go 1.6 or later)
 //
 // See https://http2.github.io/ for more information on HTTP/2.
-//
-// See https://http2.golang.org/ for a test server running this code.
 package http2 // import "golang.org/x/net/http2"
 
 import (
 	"bufio"
-	"context"
 	"crypto/tls"
 	"errors"
 	"fmt"
@@ -34,11 +31,18 @@
 )
 
 var (
-	VerboseLogs                    bool
-	logFrameWrites                 bool
-	logFrameReads                  bool
-	inTests                        bool
-	disableExtendedConnectProtocol bool
+	VerboseLogs    bool
+	logFrameWrites bool
+	logFrameReads  bool
+
+	// Enabling extended CONNECT by causes browsers to attempt to use
+	// WebSockets-over-HTTP/2. This results in problems when the server's websocket
+	// package doesn't support extended CONNECT.
+	//
+	// Disable extended CONNECT by default for now.
+	//
+	// Issue #71128.
+	disableExtendedConnectProtocol = true
 )
 
 func init() {
@@ -51,8 +55,8 @@
 		logFrameWrites = true
 		logFrameReads = true
 	}
-	if strings.Contains(e, "http2xconnect=0") {
-		disableExtendedConnectProtocol = true
+	if strings.Contains(e, "http2xconnect=1") {
+		disableExtendedConnectProtocol = false
 	}
 }
 
@@ -249,15 +253,13 @@
 // idle memory usage with many connections.
 type bufferedWriter struct {
 	_           incomparable
-	group       synctestGroupInterface // immutable
-	conn        net.Conn               // immutable
-	bw          *bufio.Writer          // non-nil when data is buffered
-	byteTimeout time.Duration          // immutable, WriteByteTimeout
+	conn        net.Conn      // immutable
+	bw          *bufio.Writer // non-nil when data is buffered
+	byteTimeout time.Duration // immutable, WriteByteTimeout
 }
 
-func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter {
+func newBufferedWriter(conn net.Conn, timeout time.Duration) *bufferedWriter {
 	return &bufferedWriter{
-		group:       group,
 		conn:        conn,
 		byteTimeout: timeout,
 	}
@@ -308,24 +310,18 @@
 type bufferedWriterTimeoutWriter bufferedWriter
 
 func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) {
-	return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p)
+	return writeWithByteTimeout(w.conn, w.byteTimeout, p)
 }
 
 // writeWithByteTimeout writes to conn.
 // If more than timeout passes without any bytes being written to the connection,
 // the write fails.
-func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
+func writeWithByteTimeout(conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
 	if timeout <= 0 {
 		return conn.Write(p)
 	}
 	for {
-		var now time.Time
-		if group == nil {
-			now = time.Now()
-		} else {
-			now = group.Now()
-		}
-		conn.SetWriteDeadline(now.Add(timeout))
+		conn.SetWriteDeadline(time.Now().Add(timeout))
 		nn, err := conn.Write(p[n:])
 		n += nn
 		if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) {
@@ -407,35 +403,7 @@
 	s.v = save
 }
 
-// validPseudoPath reports whether v is a valid :path pseudo-header
-// value. It must be either:
-//
-//   - a non-empty string starting with '/'
-//   - the string '*', for OPTIONS requests.
-//
-// For now this is only used a quick check for deciding when to clean
-// up Opaque URLs before sending requests from the Transport.
-// See golang.org/issue/16847
-//
-// We used to enforce that the path also didn't start with "//", but
-// Google's GFE accepts such paths and Chrome sends them, so ignore
-// that part of the spec. See golang.org/issue/19103.
-func validPseudoPath(v string) bool {
-	return (len(v) > 0 && v[0] == '/') || v == "*"
-}
-
 // incomparable is a zero-width, non-comparable type. Adding it to a struct
 // makes that struct also non-comparable, and generally doesn't add
 // any size (as long as it's first).
 type incomparable [0]func()
-
-// synctestGroupInterface is the methods of synctestGroup used by Server and Transport.
-// It's defined as an interface here to let us keep synctestGroup entirely test-only
-// and not a part of non-test builds.
-type synctestGroupInterface interface {
-	Join()
-	Now() time.Time
-	NewTimer(d time.Duration) timer
-	AfterFunc(d time.Duration, f func()) timer
-	ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc)
-}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index b55547a..bdc5520 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -50,6 +50,7 @@
 
 	"golang.org/x/net/http/httpguts"
 	"golang.org/x/net/http2/hpack"
+	"golang.org/x/net/internal/httpcommon"
 )
 
 const (
@@ -175,44 +176,15 @@
 	// so that we don't embed a Mutex in this struct, which will make the
 	// struct non-copyable, which might break some callers.
 	state *serverInternalState
-
-	// Synchronization group used for testing.
-	// Outside of tests, this is nil.
-	group synctestGroupInterface
-}
-
-func (s *Server) markNewGoroutine() {
-	if s.group != nil {
-		s.group.Join()
-	}
-}
-
-func (s *Server) now() time.Time {
-	if s.group != nil {
-		return s.group.Now()
-	}
-	return time.Now()
-}
-
-// newTimer creates a new time.Timer, or a synthetic timer in tests.
-func (s *Server) newTimer(d time.Duration) timer {
-	if s.group != nil {
-		return s.group.NewTimer(d)
-	}
-	return timeTimer{time.NewTimer(d)}
-}
-
-// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
-func (s *Server) afterFunc(d time.Duration, f func()) timer {
-	if s.group != nil {
-		return s.group.AfterFunc(d, f)
-	}
-	return timeTimer{time.AfterFunc(d, f)}
 }
 
 type serverInternalState struct {
 	mu          sync.Mutex
 	activeConns map[*serverConn]struct{}
+
+	// Pool of error channels. This is per-Server rather than global
+	// because channels can't be reused across synctest bubbles.
+	errChanPool sync.Pool
 }
 
 func (s *serverInternalState) registerConn(sc *serverConn) {
@@ -244,6 +216,27 @@
 	s.mu.Unlock()
 }
 
+// Global error channel pool used for uninitialized Servers.
+// We use a per-Server pool when possible to avoid using channels across synctest bubbles.
+var errChanPool = sync.Pool{
+	New: func() any { return make(chan error, 1) },
+}
+
+func (s *serverInternalState) getErrChan() chan error {
+	if s == nil {
+		return errChanPool.Get().(chan error) // Server used without calling ConfigureServer
+	}
+	return s.errChanPool.Get().(chan error)
+}
+
+func (s *serverInternalState) putErrChan(ch chan error) {
+	if s == nil {
+		errChanPool.Put(ch) // Server used without calling ConfigureServer
+		return
+	}
+	s.errChanPool.Put(ch)
+}
+
 // ConfigureServer adds HTTP/2 support to a net/http Server.
 //
 // The configuration conf may be nil.
@@ -256,7 +249,10 @@
 	if conf == nil {
 		conf = new(Server)
 	}
-	conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
+	conf.state = &serverInternalState{
+		activeConns: make(map[*serverConn]struct{}),
+		errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }},
+	}
 	if h1, h2 := s, conf; h2.IdleTimeout == 0 {
 		if h1.IdleTimeout != 0 {
 			h2.IdleTimeout = h1.IdleTimeout
@@ -422,6 +418,9 @@
 //
 // The opts parameter is optional. If nil, default values are used.
 func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
+	if opts == nil {
+		opts = &ServeConnOpts{}
+	}
 	s.serveConn(c, opts, nil)
 }
 
@@ -437,7 +436,7 @@
 		conn:                        c,
 		baseCtx:                     baseCtx,
 		remoteAddrStr:               c.RemoteAddr().String(),
-		bw:                          newBufferedWriter(s.group, c, conf.WriteByteTimeout),
+		bw:                          newBufferedWriter(c, conf.WriteByteTimeout),
 		handler:                     opts.handler(),
 		streams:                     make(map[uint32]*stream),
 		readFrameCh:                 make(chan readFrameResult),
@@ -637,11 +636,11 @@
 	pingSent                    bool
 	sentPingData                [8]byte
 	goAwayCode                  ErrCode
-	shutdownTimer               timer // nil until used
-	idleTimer                   timer // nil if unused
+	shutdownTimer               *time.Timer // nil until used
+	idleTimer                   *time.Timer // nil if unused
 	readIdleTimeout             time.Duration
 	pingTimeout                 time.Duration
-	readIdleTimer               timer // nil if unused
+	readIdleTimer               *time.Timer // nil if unused
 
 	// Owned by the writeFrameAsync goroutine:
 	headerWriteBuf bytes.Buffer
@@ -686,12 +685,12 @@
 	flow             outflow // limits writing from Handler to client
 	inflow           inflow  // what the client is allowed to POST/etc to us
 	state            streamState
-	resetQueued      bool  // RST_STREAM queued for write; set by sc.resetStream
-	gotTrailerHeader bool  // HEADER frame for trailers was seen
-	wroteHeaders     bool  // whether we wrote headers (not status 100)
-	readDeadline     timer // nil if unused
-	writeDeadline    timer // nil if unused
-	closeErr         error // set before cw is closed
+	resetQueued      bool        // RST_STREAM queued for write; set by sc.resetStream
+	gotTrailerHeader bool        // HEADER frame for trailers was seen
+	wroteHeaders     bool        // whether we wrote headers (not status 100)
+	readDeadline     *time.Timer // nil if unused
+	writeDeadline    *time.Timer // nil if unused
+	closeErr         error       // set before cw is closed
 
 	trailer    http.Header // accumulated trailers
 	reqTrailer http.Header // handler's Request.Trailer
@@ -812,8 +811,7 @@
 
 func (sc *serverConn) canonicalHeader(v string) string {
 	sc.serveG.check()
-	buildCommonHeaderMapsOnce()
-	cv, ok := commonCanonHeader[v]
+	cv, ok := httpcommon.CachedCanonicalHeader(v)
 	if ok {
 		return cv
 	}
@@ -848,7 +846,6 @@
 // consumer is done with the frame.
 // It's run on its own goroutine.
 func (sc *serverConn) readFrames() {
-	sc.srv.markNewGoroutine()
 	gate := make(chan struct{})
 	gateDone := func() { gate <- struct{}{} }
 	for {
@@ -881,7 +878,6 @@
 // At most one goroutine can be running writeFrameAsync at a time per
 // serverConn.
 func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
-	sc.srv.markNewGoroutine()
 	var err error
 	if wd == nil {
 		err = wr.write.writeFrame(sc)
@@ -965,22 +961,22 @@
 	sc.setConnState(http.StateIdle)
 
 	if sc.srv.IdleTimeout > 0 {
-		sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
+		sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
 		defer sc.idleTimer.Stop()
 	}
 
 	if conf.SendPingTimeout > 0 {
 		sc.readIdleTimeout = conf.SendPingTimeout
-		sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
+		sc.readIdleTimer = time.AfterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
 		defer sc.readIdleTimer.Stop()
 	}
 
 	go sc.readFrames() // closed by defer sc.conn.Close above
 
-	settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
+	settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
 	defer settingsTimer.Stop()
 
-	lastFrameTime := sc.srv.now()
+	lastFrameTime := time.Now()
 	loopNum := 0
 	for {
 		loopNum++
@@ -994,7 +990,7 @@
 		case res := <-sc.wroteFrameCh:
 			sc.wroteFrame(res)
 		case res := <-sc.readFrameCh:
-			lastFrameTime = sc.srv.now()
+			lastFrameTime = time.Now()
 			// Process any written frames before reading new frames from the client since a
 			// written frame could have triggered a new stream to be started.
 			if sc.writingFrameAsync {
@@ -1068,13 +1064,16 @@
 
 func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
 	if sc.pingSent {
-		sc.vlogf("timeout waiting for PING response")
+		sc.logf("timeout waiting for PING response")
+		if f := sc.countErrorFunc; f != nil {
+			f("conn_close_lost_ping")
+		}
 		sc.conn.Close()
 		return
 	}
 
 	pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
-	now := sc.srv.now()
+	now := time.Now()
 	if pingAt.After(now) {
 		// We received frames since arming the ping timer.
 		// Reset it for the next possible timeout.
@@ -1138,10 +1137,10 @@
 			errc <- nil
 		}
 	}()
-	timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server?
+	timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
 	defer timer.Stop()
 	select {
-	case <-timer.C():
+	case <-timer.C:
 		return errPrefaceTimeout
 	case err := <-errc:
 		if err == nil {
@@ -1153,10 +1152,6 @@
 	}
 }
 
-var errChanPool = sync.Pool{
-	New: func() interface{} { return make(chan error, 1) },
-}
-
 var writeDataPool = sync.Pool{
 	New: func() interface{} { return new(writeData) },
 }
@@ -1164,7 +1159,7 @@
 // writeDataFromHandler writes DATA response frames from a handler on
 // the given stream.
 func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
-	ch := errChanPool.Get().(chan error)
+	ch := sc.srv.state.getErrChan()
 	writeArg := writeDataPool.Get().(*writeData)
 	*writeArg = writeData{stream.id, data, endStream}
 	err := sc.writeFrameFromHandler(FrameWriteRequest{
@@ -1196,7 +1191,7 @@
 			return errStreamClosed
 		}
 	}
-	errChanPool.Put(ch)
+	sc.srv.state.putErrChan(ch)
 	if frameWriteDone {
 		writeDataPool.Put(writeArg)
 	}
@@ -1510,7 +1505,7 @@
 
 func (sc *serverConn) shutDownIn(d time.Duration) {
 	sc.serveG.check()
-	sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer)
+	sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
 }
 
 func (sc *serverConn) resetStream(se StreamError) {
@@ -2115,7 +2110,7 @@
 	// (in Go 1.8), though. That's a more sane option anyway.
 	if sc.hs.ReadTimeout > 0 {
 		sc.conn.SetReadDeadline(time.Time{})
-		st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
+		st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
 	}
 
 	return sc.scheduleHandler(id, rw, req, handler)
@@ -2213,7 +2208,7 @@
 	st.flow.add(sc.initialStreamSendWindowSize)
 	st.inflow.init(sc.initialStreamRecvWindowSize)
 	if sc.hs.WriteTimeout > 0 {
-		st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
+		st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
 	}
 
 	sc.streams[id] = st
@@ -2233,25 +2228,25 @@
 func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
 	sc.serveG.check()
 
-	rp := requestParam{
-		method:    f.PseudoValue("method"),
-		scheme:    f.PseudoValue("scheme"),
-		authority: f.PseudoValue("authority"),
-		path:      f.PseudoValue("path"),
-		protocol:  f.PseudoValue("protocol"),
+	rp := httpcommon.ServerRequestParam{
+		Method:    f.PseudoValue("method"),
+		Scheme:    f.PseudoValue("scheme"),
+		Authority: f.PseudoValue("authority"),
+		Path:      f.PseudoValue("path"),
+		Protocol:  f.PseudoValue("protocol"),
 	}
 
 	// extended connect is disabled, so we should not see :protocol
-	if disableExtendedConnectProtocol && rp.protocol != "" {
+	if disableExtendedConnectProtocol && rp.Protocol != "" {
 		return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
 	}
 
-	isConnect := rp.method == "CONNECT"
+	isConnect := rp.Method == "CONNECT"
 	if isConnect {
-		if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") {
+		if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") {
 			return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
 		}
-	} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
+	} else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") {
 		// See 8.1.2.6 Malformed Requests and Responses:
 		//
 		// Malformed requests or responses that are detected
@@ -2265,15 +2260,16 @@
 		return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
 	}
 
-	rp.header = make(http.Header)
+	header := make(http.Header)
+	rp.Header = header
 	for _, hf := range f.RegularFields() {
-		rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
+		header.Add(sc.canonicalHeader(hf.Name), hf.Value)
 	}
-	if rp.authority == "" {
-		rp.authority = rp.header.Get("Host")
+	if rp.Authority == "" {
+		rp.Authority = header.Get("Host")
 	}
-	if rp.protocol != "" {
-		rp.header.Set(":protocol", rp.protocol)
+	if rp.Protocol != "" {
+		header.Set(":protocol", rp.Protocol)
 	}
 
 	rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
@@ -2282,7 +2278,7 @@
 	}
 	bodyOpen := !f.StreamEnded()
 	if bodyOpen {
-		if vv, ok := rp.header["Content-Length"]; ok {
+		if vv, ok := rp.Header["Content-Length"]; ok {
 			if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
 				req.ContentLength = int64(cl)
 			} else {
@@ -2298,84 +2294,38 @@
 	return rw, req, nil
 }
 
-type requestParam struct {
-	method                  string
-	scheme, authority, path string
-	protocol                string
-	header                  http.Header
-}
-
-func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) {
+func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) {
 	sc.serveG.check()
 
 	var tlsState *tls.ConnectionState // nil if not scheme https
-	if rp.scheme == "https" {
+	if rp.Scheme == "https" {
 		tlsState = sc.tlsState
 	}
 
-	needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue")
-	if needsContinue {
-		rp.header.Del("Expect")
-	}
-	// Merge Cookie headers into one "; "-delimited value.
-	if cookies := rp.header["Cookie"]; len(cookies) > 1 {
-		rp.header.Set("Cookie", strings.Join(cookies, "; "))
-	}
-
-	// Setup Trailers
-	var trailer http.Header
-	for _, v := range rp.header["Trailer"] {
-		for _, key := range strings.Split(v, ",") {
-			key = http.CanonicalHeaderKey(textproto.TrimString(key))
-			switch key {
-			case "Transfer-Encoding", "Trailer", "Content-Length":
-				// Bogus. (copy of http1 rules)
-				// Ignore.
-			default:
-				if trailer == nil {
-					trailer = make(http.Header)
-				}
-				trailer[key] = nil
-			}
-		}
-	}
-	delete(rp.header, "Trailer")
-
-	var url_ *url.URL
-	var requestURI string
-	if rp.method == "CONNECT" && rp.protocol == "" {
-		url_ = &url.URL{Host: rp.authority}
-		requestURI = rp.authority // mimic HTTP/1 server behavior
-	} else {
-		var err error
-		url_, err = url.ParseRequestURI(rp.path)
-		if err != nil {
-			return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol))
-		}
-		requestURI = rp.path
+	res := httpcommon.NewServerRequest(rp)
+	if res.InvalidReason != "" {
+		return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol))
 	}
 
 	body := &requestBody{
 		conn:          sc,
 		stream:        st,
-		needsContinue: needsContinue,
+		needsContinue: res.NeedsContinue,
 	}
-	req := &http.Request{
-		Method:     rp.method,
-		URL:        url_,
+	req := (&http.Request{
+		Method:     rp.Method,
+		URL:        res.URL,
 		RemoteAddr: sc.remoteAddrStr,
-		Header:     rp.header,
-		RequestURI: requestURI,
+		Header:     rp.Header,
+		RequestURI: res.RequestURI,
 		Proto:      "HTTP/2.0",
 		ProtoMajor: 2,
 		ProtoMinor: 0,
 		TLS:        tlsState,
-		Host:       rp.authority,
+		Host:       rp.Authority,
 		Body:       body,
-		Trailer:    trailer,
-	}
-	req = req.WithContext(st.ctx)
-
+		Trailer:    res.Trailer,
+	}).WithContext(st.ctx)
 	rw := sc.newResponseWriter(st, req)
 	return rw, req, nil
 }
@@ -2447,7 +2397,6 @@
 
 // Run on its own goroutine.
 func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
-	sc.srv.markNewGoroutine()
 	defer sc.sendServeMsg(handlerDoneMsg)
 	didPanic := true
 	defer func() {
@@ -2496,7 +2445,7 @@
 		// waiting for this frame to be written, so an http.Flush mid-handler
 		// writes out the correct value of keys, before a handler later potentially
 		// mutates it.
-		errc = errChanPool.Get().(chan error)
+		errc = sc.srv.state.getErrChan()
 	}
 	if err := sc.writeFrameFromHandler(FrameWriteRequest{
 		write:  headerData,
@@ -2508,7 +2457,7 @@
 	if errc != nil {
 		select {
 		case err := <-errc:
-			errChanPool.Put(errc)
+			sc.srv.state.putErrChan(errc)
 			return err
 		case <-sc.doneServing:
 			return errClientDisconnected
@@ -2615,7 +2564,7 @@
 	if err == io.EOF {
 		b.sawEOF = true
 	}
-	if b.conn == nil && inTests {
+	if b.conn == nil {
 		return
 	}
 	b.conn.noteBodyReadFromHandler(b.stream, n, err)
@@ -2744,7 +2693,7 @@
 		var date string
 		if _, ok := rws.snapHeader["Date"]; !ok {
 			// TODO(bradfitz): be faster here, like net/http? measure.
-			date = rws.conn.srv.now().UTC().Format(http.TimeFormat)
+			date = time.Now().UTC().Format(http.TimeFormat)
 		}
 
 		for _, v := range rws.snapHeader["Trailer"] {
@@ -2866,7 +2815,7 @@
 
 func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
 	st := w.rws.stream
-	if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
+	if !deadline.IsZero() && deadline.Before(time.Now()) {
 		// If we're setting a deadline in the past, reset the stream immediately
 		// so writes after SetWriteDeadline returns will fail.
 		st.onReadTimeout()
@@ -2882,9 +2831,9 @@
 		if deadline.IsZero() {
 			st.readDeadline = nil
 		} else if st.readDeadline == nil {
-			st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout)
+			st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
 		} else {
-			st.readDeadline.Reset(deadline.Sub(sc.srv.now()))
+			st.readDeadline.Reset(deadline.Sub(time.Now()))
 		}
 	})
 	return nil
@@ -2892,7 +2841,7 @@
 
 func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
 	st := w.rws.stream
-	if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
+	if !deadline.IsZero() && deadline.Before(time.Now()) {
 		// If we're setting a deadline in the past, reset the stream immediately
 		// so writes after SetWriteDeadline returns will fail.
 		st.onWriteTimeout()
@@ -2908,9 +2857,9 @@
 		if deadline.IsZero() {
 			st.writeDeadline = nil
 		} else if st.writeDeadline == nil {
-			st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout)
+			st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
 		} else {
-			st.writeDeadline.Reset(deadline.Sub(sc.srv.now()))
+			st.writeDeadline.Reset(deadline.Sub(time.Now()))
 		}
 	})
 	return nil
@@ -3189,7 +3138,7 @@
 		method: opts.Method,
 		url:    u,
 		header: cloneHeader(opts.Header),
-		done:   errChanPool.Get().(chan error),
+		done:   sc.srv.state.getErrChan(),
 	}
 
 	select {
@@ -3206,7 +3155,7 @@
 	case <-st.cw:
 		return errStreamClosed
 	case err := <-msg.done:
-		errChanPool.Put(msg.done)
+		sc.srv.state.putErrChan(msg.done)
 		return err
 	}
 }
@@ -3270,12 +3219,12 @@
 		// we start in "half closed (remote)" for simplicity.
 		// See further comments at the definition of stateHalfClosedRemote.
 		promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)
-		rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{
-			method:    msg.method,
-			scheme:    msg.url.Scheme,
-			authority: msg.url.Host,
-			path:      msg.url.RequestURI(),
-			header:    cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
+		rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{
+			Method:    msg.method,
+			Scheme:    msg.url.Scheme,
+			Authority: msg.url.Host,
+			Path:      msg.url.RequestURI(),
+			Header:    cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
 		})
 		if err != nil {
 			// Should not happen, since we've already validated msg.url.
diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go
deleted file mode 100644
index 0b1c17b..0000000
--- a/vendor/golang.org/x/net/http2/timer.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-package http2
-
-import "time"
-
-// A timer is a time.Timer, as an interface which can be replaced in tests.
-type timer = interface {
-	C() <-chan time.Time
-	Reset(d time.Duration) bool
-	Stop() bool
-}
-
-// timeTimer adapts a time.Timer to the timer interface.
-type timeTimer struct {
-	*time.Timer
-}
-
-func (t timeTimer) C() <-chan time.Time { return t.Timer.C }
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 090d0e1..be759b6 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -25,7 +25,6 @@
 	"net/http"
 	"net/http/httptrace"
 	"net/textproto"
-	"sort"
 	"strconv"
 	"strings"
 	"sync"
@@ -35,6 +34,7 @@
 	"golang.org/x/net/http/httpguts"
 	"golang.org/x/net/http2/hpack"
 	"golang.org/x/net/idna"
+	"golang.org/x/net/internal/httpcommon"
 )
 
 const (
@@ -193,50 +193,6 @@
 
 type transportTestHooks struct {
 	newclientconn func(*ClientConn)
-	group         synctestGroupInterface
-}
-
-func (t *Transport) markNewGoroutine() {
-	if t != nil && t.transportTestHooks != nil {
-		t.transportTestHooks.group.Join()
-	}
-}
-
-func (t *Transport) now() time.Time {
-	if t != nil && t.transportTestHooks != nil {
-		return t.transportTestHooks.group.Now()
-	}
-	return time.Now()
-}
-
-func (t *Transport) timeSince(when time.Time) time.Duration {
-	if t != nil && t.transportTestHooks != nil {
-		return t.now().Sub(when)
-	}
-	return time.Since(when)
-}
-
-// newTimer creates a new time.Timer, or a synthetic timer in tests.
-func (t *Transport) newTimer(d time.Duration) timer {
-	if t.transportTestHooks != nil {
-		return t.transportTestHooks.group.NewTimer(d)
-	}
-	return timeTimer{time.NewTimer(d)}
-}
-
-// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
-func (t *Transport) afterFunc(d time.Duration, f func()) timer {
-	if t.transportTestHooks != nil {
-		return t.transportTestHooks.group.AfterFunc(d, f)
-	}
-	return timeTimer{time.AfterFunc(d, f)}
-}
-
-func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
-	if t.transportTestHooks != nil {
-		return t.transportTestHooks.group.ContextWithTimeout(ctx, d)
-	}
-	return context.WithTimeout(ctx, d)
 }
 
 func (t *Transport) maxHeaderListSize() uint32 {
@@ -366,7 +322,7 @@
 	readerErr  error         // set before readerDone is closed
 
 	idleTimeout time.Duration // or 0 for never
-	idleTimer   timer
+	idleTimer   *time.Timer
 
 	mu               sync.Mutex // guards following
 	cond             *sync.Cond // hold mu; broadcast on flow/closed changes
@@ -375,6 +331,7 @@
 	doNotReuse       bool       // whether conn is marked to not be reused for any future requests
 	closing          bool
 	closed           bool
+	closedOnIdle     bool                     // true if conn was closed for idleness
 	seenSettings     bool                     // true if we've seen a settings frame, false otherwise
 	seenSettingsChan chan struct{}            // closed when seenSettings is true or frame reading fails
 	wantSettingsAck  bool                     // we sent a SETTINGS frame and haven't heard back
@@ -398,6 +355,7 @@
 	readIdleTimeout             time.Duration
 	pingTimeout                 time.Duration
 	extendedConnectAllowed      bool
+	strictMaxConcurrentStreams  bool
 
 	// rstStreamPingsBlocked works around an unfortunate gRPC behavior.
 	// gRPC strictly limits the number of PING frames that it will receive.
@@ -533,14 +491,12 @@
 	cs.reqBodyClosed = make(chan struct{})
 	reqBodyClosed := cs.reqBodyClosed
 	go func() {
-		cs.cc.t.markNewGoroutine()
 		cs.reqBody.Close()
 		close(reqBodyClosed)
 	}()
 }
 
 type stickyErrWriter struct {
-	group   synctestGroupInterface
 	conn    net.Conn
 	timeout time.Duration
 	err     *error
@@ -550,7 +506,7 @@
 	if *sew.err != nil {
 		return 0, *sew.err
 	}
-	n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p)
+	n, err = writeWithByteTimeout(sew.conn, sew.timeout, p)
 	*sew.err = err
 	return n, err
 }
@@ -649,9 +605,9 @@
 				backoff := float64(uint(1) << (uint(retry) - 1))
 				backoff += backoff * (0.1 * mathrand.Float64())
 				d := time.Second * time.Duration(backoff)
-				tm := t.newTimer(d)
+				tm := time.NewTimer(d)
 				select {
-				case <-tm.C():
+				case <-tm.C:
 					t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
 					continue
 				case <-req.Context().Done():
@@ -698,6 +654,7 @@
 	errClientConnUnusable       = errors.New("http2: client conn not usable")
 	errClientConnNotEstablished = errors.New("http2: client conn could not be established")
 	errClientConnGotGoAway      = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
+	errClientConnForceClosed    = errors.New("http2: client connection force closed via ClientConn.Close")
 )
 
 // shouldRetryRequest is called by RoundTrip when a request fails to get
@@ -828,7 +785,8 @@
 		initialWindowSize:           65535,    // spec default
 		initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
 		maxConcurrentStreams:        initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
-		peerMaxHeaderListSize:       0xffffffffffffffff,          // "infinite", per spec. Use 2^64-1 instead.
+		strictMaxConcurrentStreams:  conf.StrictMaxConcurrentRequests,
+		peerMaxHeaderListSize:       0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
 		streams:                     make(map[uint32]*clientStream),
 		singleUse:                   singleUse,
 		seenSettingsChan:            make(chan struct{}),
@@ -837,14 +795,11 @@
 		pingTimeout:                 conf.PingTimeout,
 		pings:                       make(map[[8]byte]chan struct{}),
 		reqHeaderMu:                 make(chan struct{}, 1),
-		lastActive:                  t.now(),
+		lastActive:                  time.Now(),
 	}
-	var group synctestGroupInterface
 	if t.transportTestHooks != nil {
-		t.markNewGoroutine()
 		t.transportTestHooks.newclientconn(cc)
 		c = cc.tconn
-		group = t.group
 	}
 	if VerboseLogs {
 		t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
@@ -856,7 +811,6 @@
 	// TODO: adjust this writer size to account for frame size +
 	// MTU + crypto/tls record padding.
 	cc.bw = bufio.NewWriter(stickyErrWriter{
-		group:   group,
 		conn:    c,
 		timeout: conf.WriteByteTimeout,
 		err:     &cc.werr,
@@ -905,7 +859,7 @@
 	// Start the idle timer after the connection is fully initialized.
 	if d := t.idleConnTimeout(); d != 0 {
 		cc.idleTimeout = d
-		cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout)
+		cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
 	}
 
 	go cc.readLoop()
@@ -916,7 +870,7 @@
 	pingTimeout := cc.pingTimeout
 	// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
 	// trigger the healthCheck again if there is no frame received.
-	ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
+	ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
 	defer cancel()
 	cc.vlogf("http2: Transport sending health check")
 	err := cc.Ping(ctx)
@@ -1066,7 +1020,7 @@
 		return
 	}
 	var maxConcurrentOkay bool
-	if cc.t.StrictMaxConcurrentStreams {
+	if cc.strictMaxConcurrentStreams {
 		// We'll tell the caller we can take a new request to
 		// prevent the caller from dialing a new TCP
 		// connection, but then we'll block later before
@@ -1089,10 +1043,12 @@
 
 	// If this connection has never been used for a request and is closed,
 	// then let it take a request (which will fail).
+	// If the conn was closed for idleness, we're racing the idle timer;
+	// don't try to use the conn. (Issue #70515.)
 	//
 	// This avoids a situation where an error early in a connection's lifetime
 	// goes unreported.
-	if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed {
+	if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle {
 		st.canTakeNewRequest = true
 	}
 
@@ -1117,7 +1073,7 @@
 	// times are compared based on their wall time. We don't want
 	// to reuse a connection that's been sitting idle during
 	// VM/laptop suspend if monotonic time was also frozen.
-	return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout
+	return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout
 }
 
 // onIdleTimeout is called from a time.AfterFunc goroutine. It will
@@ -1155,6 +1111,7 @@
 		return
 	}
 	cc.closed = true
+	cc.closedOnIdle = true
 	nextID := cc.nextStreamID
 	// TODO: do clients send GOAWAY too? maybe? Just Close:
 	cc.mu.Unlock()
@@ -1182,7 +1139,6 @@
 	done := make(chan struct{})
 	cancelled := false // guarded by cc.mu
 	go func() {
-		cc.t.markNewGoroutine()
 		cc.mu.Lock()
 		defer cc.mu.Unlock()
 		for {
@@ -1253,8 +1209,7 @@
 //
 // In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
 func (cc *ClientConn) Close() error {
-	err := errors.New("http2: client connection force closed via ClientConn.Close")
-	cc.closeForError(err)
+	cc.closeForError(errClientConnForceClosed)
 	return nil
 }
 
@@ -1271,23 +1226,6 @@
 // exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
 var errRequestCanceled = errors.New("net/http: request canceled")
 
-func commaSeparatedTrailers(req *http.Request) (string, error) {
-	keys := make([]string, 0, len(req.Trailer))
-	for k := range req.Trailer {
-		k = canonicalHeader(k)
-		switch k {
-		case "Transfer-Encoding", "Trailer", "Content-Length":
-			return "", fmt.Errorf("invalid Trailer key %q", k)
-		}
-		keys = append(keys, k)
-	}
-	if len(keys) > 0 {
-		sort.Strings(keys)
-		return strings.Join(keys, ","), nil
-	}
-	return "", nil
-}
-
 func (cc *ClientConn) responseHeaderTimeout() time.Duration {
 	if cc.t.t1 != nil {
 		return cc.t.t1.ResponseHeaderTimeout
@@ -1299,22 +1237,6 @@
 	return 0
 }
 
-// checkConnHeaders checks whether req has any invalid connection-level headers.
-// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
-// Certain headers are special-cased as okay but not transmitted later.
-func checkConnHeaders(req *http.Request) error {
-	if v := req.Header.Get("Upgrade"); v != "" {
-		return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
-	}
-	if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
-		return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
-	}
-	if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
-		return fmt.Errorf("http2: invalid Connection request header: %q", vv)
-	}
-	return nil
-}
-
 // actualContentLength returns a sanitized version of
 // req.ContentLength, where 0 actually means zero (not unknown) and -1
 // means unknown.
@@ -1360,25 +1282,7 @@
 		donec:                make(chan struct{}),
 	}
 
-	// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
-	if !cc.t.disableCompression() &&
-		req.Header.Get("Accept-Encoding") == "" &&
-		req.Header.Get("Range") == "" &&
-		!cs.isHead {
-		// Request gzip only, not deflate. Deflate is ambiguous and
-		// not as universally supported anyway.
-		// See: https://zlib.net/zlib_faq.html#faq39
-		//
-		// Note that we don't request this for HEAD requests,
-		// due to a bug in nginx:
-		//   http://trac.nginx.org/nginx/ticket/358
-		//   https://golang.org/issue/5522
-		//
-		// We don't request gzip if the request is for a range, since
-		// auto-decoding a portion of a gzipped document will just fail
-		// anyway. See https://golang.org/issue/8923
-		cs.requestedGzip = true
-	}
+	cs.requestedGzip = httpcommon.IsRequestGzip(req.Method, req.Header, cc.t.disableCompression())
 
 	go cs.doRequest(req, streamf)
 
@@ -1474,7 +1378,6 @@
 //
 // It sends the request and performs post-request cleanup (closing Request.Body, etc.).
 func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) {
-	cs.cc.t.markNewGoroutine()
 	err := cs.writeRequest(req, streamf)
 	cs.cleanupWriteRequest(err)
 }
@@ -1492,10 +1395,6 @@
 	cc := cs.cc
 	ctx := cs.ctx
 
-	if err := checkConnHeaders(req); err != nil {
-		return err
-	}
-
 	// wait for setting frames to be received, a server can change this value later,
 	// but we just wait for the first settings frame
 	var isExtendedConnect bool
@@ -1609,9 +1508,9 @@
 	var respHeaderTimer <-chan time.Time
 	var respHeaderRecv chan struct{}
 	if d := cc.responseHeaderTimeout(); d != 0 {
-		timer := cc.t.newTimer(d)
+		timer := time.NewTimer(d)
 		defer timer.Stop()
-		respHeaderTimer = timer.C()
+		respHeaderTimer = timer.C
 		respHeaderRecv = cs.respHeaderRecv
 	}
 	// Wait until the peer half-closes its end of the stream,
@@ -1659,26 +1558,39 @@
 	// we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
 	// sent by writeRequestBody below, along with any Trailers,
 	// again in form HEADERS{1}, CONTINUATION{0,})
-	trailers, err := commaSeparatedTrailers(req)
+	cc.hbuf.Reset()
+	res, err := encodeRequestHeaders(req, cs.requestedGzip, cc.peerMaxHeaderListSize, func(name, value string) {
+		cc.writeHeader(name, value)
+	})
 	if err != nil {
-		return err
+		return fmt.Errorf("http2: %w", err)
 	}
-	hasTrailers := trailers != ""
-	contentLen := actualContentLength(req)
-	hasBody := contentLen != 0
-	hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen)
-	if err != nil {
-		return err
-	}
+	hdrs := cc.hbuf.Bytes()
 
 	// Write the request.
-	endStream := !hasBody && !hasTrailers
+	endStream := !res.HasBody && !res.HasTrailers
 	cs.sentHeaders = true
 	err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
 	traceWroteHeaders(cs.trace)
 	return err
 }
 
+func encodeRequestHeaders(req *http.Request, addGzipHeader bool, peerMaxHeaderListSize uint64, headerf func(name, value string)) (httpcommon.EncodeHeadersResult, error) {
+	return httpcommon.EncodeHeaders(req.Context(), httpcommon.EncodeHeadersParam{
+		Request: httpcommon.Request{
+			Header:              req.Header,
+			Trailer:             req.Trailer,
+			URL:                 req.URL,
+			Host:                req.Host,
+			Method:              req.Method,
+			ActualContentLength: actualContentLength(req),
+		},
+		AddGzipHeader:         addGzipHeader,
+		PeerMaxHeaderListSize: peerMaxHeaderListSize,
+		DefaultUserAgent:      defaultUserAgent,
+	}, headerf)
+}
+
 // cleanupWriteRequest performs post-request tasks.
 //
 // If err (the result of writeRequest) is non-nil and the stream is not closed,
@@ -1791,7 +1703,7 @@
 			// Return a fatal error which aborts the retry loop.
 			return errClientConnNotEstablished
 		}
-		cc.lastActive = cc.t.now()
+		cc.lastActive = time.Now()
 		if cc.closed || !cc.canTakeNewRequestLocked() {
 			return errClientConnUnusable
 		}
@@ -2066,218 +1978,6 @@
 	}
 }
 
-func validateHeaders(hdrs http.Header) string {
-	for k, vv := range hdrs {
-		if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
-			return fmt.Sprintf("name %q", k)
-		}
-		for _, v := range vv {
-			if !httpguts.ValidHeaderFieldValue(v) {
-				// Don't include the value in the error,
-				// because it may be sensitive.
-				return fmt.Sprintf("value for header %q", k)
-			}
-		}
-	}
-	return ""
-}
-
-var errNilRequestURL = errors.New("http2: Request.URI is nil")
-
-func isNormalConnect(req *http.Request) bool {
-	return req.Method == "CONNECT" && req.Header.Get(":protocol") == ""
-}
-
-// requires cc.wmu be held.
-func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
-	cc.hbuf.Reset()
-	if req.URL == nil {
-		return nil, errNilRequestURL
-	}
-
-	host := req.Host
-	if host == "" {
-		host = req.URL.Host
-	}
-	host, err := httpguts.PunycodeHostPort(host)
-	if err != nil {
-		return nil, err
-	}
-	if !httpguts.ValidHostHeader(host) {
-		return nil, errors.New("http2: invalid Host header")
-	}
-
-	var path string
-	if !isNormalConnect(req) {
-		path = req.URL.RequestURI()
-		if !validPseudoPath(path) {
-			orig := path
-			path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
-			if !validPseudoPath(path) {
-				if req.URL.Opaque != "" {
-					return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
-				} else {
-					return nil, fmt.Errorf("invalid request :path %q", orig)
-				}
-			}
-		}
-	}
-
-	// Check for any invalid headers+trailers and return an error before we
-	// potentially pollute our hpack state. (We want to be able to
-	// continue to reuse the hpack encoder for future requests)
-	if err := validateHeaders(req.Header); err != "" {
-		return nil, fmt.Errorf("invalid HTTP header %s", err)
-	}
-	if err := validateHeaders(req.Trailer); err != "" {
-		return nil, fmt.Errorf("invalid HTTP trailer %s", err)
-	}
-
-	enumerateHeaders := func(f func(name, value string)) {
-		// 8.1.2.3 Request Pseudo-Header Fields
-		// The :path pseudo-header field includes the path and query parts of the
-		// target URI (the path-absolute production and optionally a '?' character
-		// followed by the query production, see Sections 3.3 and 3.4 of
-		// [RFC3986]).
-		f(":authority", host)
-		m := req.Method
-		if m == "" {
-			m = http.MethodGet
-		}
-		f(":method", m)
-		if !isNormalConnect(req) {
-			f(":path", path)
-			f(":scheme", req.URL.Scheme)
-		}
-		if trailers != "" {
-			f("trailer", trailers)
-		}
-
-		var didUA bool
-		for k, vv := range req.Header {
-			if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") {
-				// Host is :authority, already sent.
-				// Content-Length is automatic, set below.
-				continue
-			} else if asciiEqualFold(k, "connection") ||
-				asciiEqualFold(k, "proxy-connection") ||
-				asciiEqualFold(k, "transfer-encoding") ||
-				asciiEqualFold(k, "upgrade") ||
-				asciiEqualFold(k, "keep-alive") {
-				// Per 8.1.2.2 Connection-Specific Header
-				// Fields, don't send connection-specific
-				// fields. We have already checked if any
-				// are error-worthy so just ignore the rest.
-				continue
-			} else if asciiEqualFold(k, "user-agent") {
-				// Match Go's http1 behavior: at most one
-				// User-Agent. If set to nil or empty string,
-				// then omit it. Otherwise if not mentioned,
-				// include the default (below).
-				didUA = true
-				if len(vv) < 1 {
-					continue
-				}
-				vv = vv[:1]
-				if vv[0] == "" {
-					continue
-				}
-			} else if asciiEqualFold(k, "cookie") {
-				// Per 8.1.2.5 To allow for better compression efficiency, the
-				// Cookie header field MAY be split into separate header fields,
-				// each with one or more cookie-pairs.
-				for _, v := range vv {
-					for {
-						p := strings.IndexByte(v, ';')
-						if p < 0 {
-							break
-						}
-						f("cookie", v[:p])
-						p++
-						// strip space after semicolon if any.
-						for p+1 <= len(v) && v[p] == ' ' {
-							p++
-						}
-						v = v[p:]
-					}
-					if len(v) > 0 {
-						f("cookie", v)
-					}
-				}
-				continue
-			}
-
-			for _, v := range vv {
-				f(k, v)
-			}
-		}
-		if shouldSendReqContentLength(req.Method, contentLength) {
-			f("content-length", strconv.FormatInt(contentLength, 10))
-		}
-		if addGzipHeader {
-			f("accept-encoding", "gzip")
-		}
-		if !didUA {
-			f("user-agent", defaultUserAgent)
-		}
-	}
-
-	// Do a first pass over the headers counting bytes to ensure
-	// we don't exceed cc.peerMaxHeaderListSize. This is done as a
-	// separate pass before encoding the headers to prevent
-	// modifying the hpack state.
-	hlSize := uint64(0)
-	enumerateHeaders(func(name, value string) {
-		hf := hpack.HeaderField{Name: name, Value: value}
-		hlSize += uint64(hf.Size())
-	})
-
-	if hlSize > cc.peerMaxHeaderListSize {
-		return nil, errRequestHeaderListSize
-	}
-
-	trace := httptrace.ContextClientTrace(req.Context())
-	traceHeaders := traceHasWroteHeaderField(trace)
-
-	// Header list size is ok. Write the headers.
-	enumerateHeaders(func(name, value string) {
-		name, ascii := lowerHeader(name)
-		if !ascii {
-			// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
-			// field names have to be ASCII characters (just as in HTTP/1.x).
-			return
-		}
-		cc.writeHeader(name, value)
-		if traceHeaders {
-			traceWroteHeaderField(trace, name, value)
-		}
-	})
-
-	return cc.hbuf.Bytes(), nil
-}
-
-// shouldSendReqContentLength reports whether the http2.Transport should send
-// a "content-length" request header. This logic is basically a copy of the net/http
-// transferWriter.shouldSendContentLength.
-// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
-// -1 means unknown.
-func shouldSendReqContentLength(method string, contentLength int64) bool {
-	if contentLength > 0 {
-		return true
-	}
-	if contentLength < 0 {
-		return false
-	}
-	// For zero bodies, whether we send a content-length depends on the method.
-	// It also kinda doesn't matter for http2 either way, with END_STREAM.
-	switch method {
-	case "POST", "PUT", "PATCH":
-		return true
-	default:
-		return false
-	}
-}
-
 // requires cc.wmu be held.
 func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
 	cc.hbuf.Reset()
@@ -2294,7 +1994,7 @@
 	}
 
 	for k, vv := range trailer {
-		lowKey, ascii := lowerHeader(k)
+		lowKey, ascii := httpcommon.LowerHeader(k)
 		if !ascii {
 			// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
 			// field names have to be ASCII characters (just as in HTTP/1.x).
@@ -2342,10 +2042,10 @@
 	if len(cc.streams) != slen-1 {
 		panic("forgetting unknown stream id")
 	}
-	cc.lastActive = cc.t.now()
+	cc.lastActive = time.Now()
 	if len(cc.streams) == 0 && cc.idleTimer != nil {
 		cc.idleTimer.Reset(cc.idleTimeout)
-		cc.lastIdle = cc.t.now()
+		cc.lastIdle = time.Now()
 	}
 	// Wake up writeRequestBody via clientStream.awaitFlowControl and
 	// wake up RoundTrip if there is a pending request.
@@ -2371,7 +2071,6 @@
 
 // readLoop runs in its own goroutine and reads and dispatches frames.
 func (cc *ClientConn) readLoop() {
-	cc.t.markNewGoroutine()
 	rl := &clientConnReadLoop{cc: cc}
 	defer rl.cleanup()
 	cc.readerErr = rl.run()
@@ -2434,10 +2133,13 @@
 	// This avoids a situation where new connections are constantly created,
 	// added to the pool, fail, and are removed from the pool, without any error
 	// being surfaced to the user.
-	const unusedWaitTime = 5 * time.Second
-	idleTime := cc.t.now().Sub(cc.lastActive)
-	if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime {
-		cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() {
+	unusedWaitTime := 5 * time.Second
+	if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout {
+		unusedWaitTime = cc.idleTimeout
+	}
+	idleTime := time.Now().Sub(cc.lastActive)
+	if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle {
+		cc.idleTimer = time.AfterFunc(unusedWaitTime-idleTime, func() {
 			cc.t.connPool().MarkDead(cc)
 		})
 	} else {
@@ -2457,6 +2159,13 @@
 	}
 	cc.cond.Broadcast()
 	cc.mu.Unlock()
+
+	if !cc.seenSettings {
+		// If we have a pending request that wants extended CONNECT,
+		// let it continue and fail with the connection error.
+		cc.extendedConnectAllowed = true
+		close(cc.seenSettingsChan)
+	}
 }
 
 // countReadFrameError calls Transport.CountError with a string
@@ -2490,9 +2199,9 @@
 	cc := rl.cc
 	gotSettings := false
 	readIdleTimeout := cc.readIdleTimeout
-	var t timer
+	var t *time.Timer
 	if readIdleTimeout != 0 {
-		t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)
+		t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
 	}
 	for {
 		f, err := cc.fr.ReadFrame()
@@ -2549,9 +2258,6 @@
 			if VerboseLogs {
 				cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err)
 			}
-			if !cc.seenSettings {
-				close(cc.seenSettingsChan)
-			}
 			return err
 		}
 	}
@@ -2646,7 +2352,7 @@
 		Status:     status + " " + http.StatusText(statusCode),
 	}
 	for _, hf := range regularFields {
-		key := canonicalHeader(hf.Name)
+		key := httpcommon.CanonicalHeader(hf.Name)
 		if key == "Trailer" {
 			t := res.Trailer
 			if t == nil {
@@ -2654,7 +2360,7 @@
 				res.Trailer = t
 			}
 			foreachHeaderElement(hf.Value, func(v string) {
-				t[canonicalHeader(v)] = nil
+				t[httpcommon.CanonicalHeader(v)] = nil
 			})
 		} else {
 			vv := header[key]
@@ -2778,7 +2484,7 @@
 
 	trailer := make(http.Header)
 	for _, hf := range f.RegularFields() {
-		key := canonicalHeader(hf.Name)
+		key := httpcommon.CanonicalHeader(hf.Name)
 		trailer[key] = append(trailer[key], hf.Value)
 	}
 	cs.trailer = trailer
@@ -3241,7 +2947,6 @@
 	var pingError error
 	errc := make(chan struct{})
 	go func() {
-		cc.t.markNewGoroutine()
 		cc.wmu.Lock()
 		defer cc.wmu.Unlock()
 		if pingError = cc.fr.WritePing(false, p); pingError != nil {
@@ -3324,7 +3029,7 @@
 
 var (
 	errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
-	errRequestHeaderListSize  = errors.New("http2: request header list larger than peer's advertised limit")
+	errRequestHeaderListSize  = httpcommon.ErrRequestHeaderListSize
 )
 
 func (cc *ClientConn) logf(format string, args ...interface{}) {
@@ -3471,7 +3176,7 @@
 	cc.mu.Lock()
 	ci.WasIdle = len(cc.streams) == 0 && reused
 	if ci.WasIdle && !cc.lastActive.IsZero() {
-		ci.IdleTime = cc.t.timeSince(cc.lastActive)
+		ci.IdleTime = time.Since(cc.lastActive)
 	}
 	cc.mu.Unlock()
 
@@ -3508,16 +3213,6 @@
 	}
 }
 
-func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
-	return trace != nil && trace.WroteHeaderField != nil
-}
-
-func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
-	if trace != nil && trace.WroteHeaderField != nil {
-		trace.WroteHeaderField(k, []string{v})
-	}
-}
-
 func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
 	if trace != nil {
 		return trace.Got1xxResponse
diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go
index 6ff6bee..fdb35b9 100644
--- a/vendor/golang.org/x/net/http2/write.go
+++ b/vendor/golang.org/x/net/http2/write.go
@@ -13,6 +13,7 @@
 
 	"golang.org/x/net/http/httpguts"
 	"golang.org/x/net/http2/hpack"
+	"golang.org/x/net/internal/httpcommon"
 )
 
 // writeFramer is implemented by any type that is used to write frames.
@@ -351,7 +352,7 @@
 	}
 	for _, k := range keys {
 		vv := h[k]
-		k, ascii := lowerHeader(k)
+		k, ascii := httpcommon.LowerHeader(k)
 		if !ascii {
 			// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
 			// field names have to be ASCII characters (just as in HTTP/1.x).
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
index cc893ad..4d3890f 100644
--- a/vendor/golang.org/x/net/http2/writesched.go
+++ b/vendor/golang.org/x/net/http2/writesched.go
@@ -42,6 +42,8 @@
 	// PusherID is zero if the stream was initiated by the client. Otherwise,
 	// PusherID names the stream that pushed the newly opened stream.
 	PusherID uint32
+	// priority is used to set the priority of the newly opened stream.
+	priority PriorityParam
 }
 
 // FrameWriteRequest is a request to write a frame.
diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go
deleted file mode 100644
index f678333..0000000
--- a/vendor/golang.org/x/net/http2/writesched_priority.go
+++ /dev/null
@@ -1,451 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import (
-	"fmt"
-	"math"
-	"sort"
-)
-
-// RFC 7540, Section 5.3.5: the default weight is 16.
-const priorityDefaultWeight = 15 // 16 = 15 + 1
-
-// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
-type PriorityWriteSchedulerConfig struct {
-	// MaxClosedNodesInTree controls the maximum number of closed streams to
-	// retain in the priority tree. Setting this to zero saves a small amount
-	// of memory at the cost of performance.
-	//
-	// See RFC 7540, Section 5.3.4:
-	//   "It is possible for a stream to become closed while prioritization
-	//   information ... is in transit. ... This potentially creates suboptimal
-	//   prioritization, since the stream could be given a priority that is
-	//   different from what is intended. To avoid these problems, an endpoint
-	//   SHOULD retain stream prioritization state for a period after streams
-	//   become closed. The longer state is retained, the lower the chance that
-	//   streams are assigned incorrect or default priority values."
-	MaxClosedNodesInTree int
-
-	// MaxIdleNodesInTree controls the maximum number of idle streams to
-	// retain in the priority tree. Setting this to zero saves a small amount
-	// of memory at the cost of performance.
-	//
-	// See RFC 7540, Section 5.3.4:
-	//   Similarly, streams that are in the "idle" state can be assigned
-	//   priority or become a parent of other streams. This allows for the
-	//   creation of a grouping node in the dependency tree, which enables
-	//   more flexible expressions of priority. Idle streams begin with a
-	//   default priority (Section 5.3.5).
-	MaxIdleNodesInTree int
-
-	// ThrottleOutOfOrderWrites enables write throttling to help ensure that
-	// data is delivered in priority order. This works around a race where
-	// stream B depends on stream A and both streams are about to call Write
-	// to queue DATA frames. If B wins the race, a naive scheduler would eagerly
-	// write as much data from B as possible, but this is suboptimal because A
-	// is a higher-priority stream. With throttling enabled, we write a small
-	// amount of data from B to minimize the amount of bandwidth that B can
-	// steal from A.
-	ThrottleOutOfOrderWrites bool
-}
-
-// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
-// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
-// If cfg is nil, default options are used.
-func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
-	if cfg == nil {
-		// For justification of these defaults, see:
-		// https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
-		cfg = &PriorityWriteSchedulerConfig{
-			MaxClosedNodesInTree:     10,
-			MaxIdleNodesInTree:       10,
-			ThrottleOutOfOrderWrites: false,
-		}
-	}
-
-	ws := &priorityWriteScheduler{
-		nodes:                make(map[uint32]*priorityNode),
-		maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
-		maxIdleNodesInTree:   cfg.MaxIdleNodesInTree,
-		enableWriteThrottle:  cfg.ThrottleOutOfOrderWrites,
-	}
-	ws.nodes[0] = &ws.root
-	if cfg.ThrottleOutOfOrderWrites {
-		ws.writeThrottleLimit = 1024
-	} else {
-		ws.writeThrottleLimit = math.MaxInt32
-	}
-	return ws
-}
-
-type priorityNodeState int
-
-const (
-	priorityNodeOpen priorityNodeState = iota
-	priorityNodeClosed
-	priorityNodeIdle
-)
-
-// priorityNode is a node in an HTTP/2 priority tree.
-// Each node is associated with a single stream ID.
-// See RFC 7540, Section 5.3.
-type priorityNode struct {
-	q            writeQueue        // queue of pending frames to write
-	id           uint32            // id of the stream, or 0 for the root of the tree
-	weight       uint8             // the actual weight is weight+1, so the value is in [1,256]
-	state        priorityNodeState // open | closed | idle
-	bytes        int64             // number of bytes written by this node, or 0 if closed
-	subtreeBytes int64             // sum(node.bytes) of all nodes in this subtree
-
-	// These links form the priority tree.
-	parent     *priorityNode
-	kids       *priorityNode // start of the kids list
-	prev, next *priorityNode // doubly-linked list of siblings
-}
-
-func (n *priorityNode) setParent(parent *priorityNode) {
-	if n == parent {
-		panic("setParent to self")
-	}
-	if n.parent == parent {
-		return
-	}
-	// Unlink from current parent.
-	if parent := n.parent; parent != nil {
-		if n.prev == nil {
-			parent.kids = n.next
-		} else {
-			n.prev.next = n.next
-		}
-		if n.next != nil {
-			n.next.prev = n.prev
-		}
-	}
-	// Link to new parent.
-	// If parent=nil, remove n from the tree.
-	// Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
-	n.parent = parent
-	if parent == nil {
-		n.next = nil
-		n.prev = nil
-	} else {
-		n.next = parent.kids
-		n.prev = nil
-		if n.next != nil {
-			n.next.prev = n
-		}
-		parent.kids = n
-	}
-}
-
-func (n *priorityNode) addBytes(b int64) {
-	n.bytes += b
-	for ; n != nil; n = n.parent {
-		n.subtreeBytes += b
-	}
-}
-
-// walkReadyInOrder iterates over the tree in priority order, calling f for each node
-// with a non-empty write queue. When f returns true, this function returns true and the
-// walk halts. tmp is used as scratch space for sorting.
-//
-// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
-// if any ancestor p of n is still open (ignoring the root node).
-func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
-	if !n.q.empty() && f(n, openParent) {
-		return true
-	}
-	if n.kids == nil {
-		return false
-	}
-
-	// Don't consider the root "open" when updating openParent since
-	// we can't send data frames on the root stream (only control frames).
-	if n.id != 0 {
-		openParent = openParent || (n.state == priorityNodeOpen)
-	}
-
-	// Common case: only one kid or all kids have the same weight.
-	// Some clients don't use weights; other clients (like web browsers)
-	// use mostly-linear priority trees.
-	w := n.kids.weight
-	needSort := false
-	for k := n.kids.next; k != nil; k = k.next {
-		if k.weight != w {
-			needSort = true
-			break
-		}
-	}
-	if !needSort {
-		for k := n.kids; k != nil; k = k.next {
-			if k.walkReadyInOrder(openParent, tmp, f) {
-				return true
-			}
-		}
-		return false
-	}
-
-	// Uncommon case: sort the child nodes. We remove the kids from the parent,
-	// then re-insert after sorting so we can reuse tmp for future sort calls.
-	*tmp = (*tmp)[:0]
-	for n.kids != nil {
-		*tmp = append(*tmp, n.kids)
-		n.kids.setParent(nil)
-	}
-	sort.Sort(sortPriorityNodeSiblings(*tmp))
-	for i := len(*tmp) - 1; i >= 0; i-- {
-		(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
-	}
-	for k := n.kids; k != nil; k = k.next {
-		if k.walkReadyInOrder(openParent, tmp, f) {
-			return true
-		}
-	}
-	return false
-}
-
-type sortPriorityNodeSiblings []*priorityNode
-
-func (z sortPriorityNodeSiblings) Len() int      { return len(z) }
-func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
-func (z sortPriorityNodeSiblings) Less(i, k int) bool {
-	// Prefer the subtree that has sent fewer bytes relative to its weight.
-	// See sections 5.3.2 and 5.3.4.
-	wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
-	wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
-	if bi == 0 && bk == 0 {
-		return wi >= wk
-	}
-	if bk == 0 {
-		return false
-	}
-	return bi/bk <= wi/wk
-}
-
-type priorityWriteScheduler struct {
-	// root is the root of the priority tree, where root.id = 0.
-	// The root queues control frames that are not associated with any stream.
-	root priorityNode
-
-	// nodes maps stream ids to priority tree nodes.
-	nodes map[uint32]*priorityNode
-
-	// maxID is the maximum stream id in nodes.
-	maxID uint32
-
-	// lists of nodes that have been closed or are idle, but are kept in
-	// the tree for improved prioritization. When the lengths exceed either
-	// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
-	closedNodes, idleNodes []*priorityNode
-
-	// From the config.
-	maxClosedNodesInTree int
-	maxIdleNodesInTree   int
-	writeThrottleLimit   int32
-	enableWriteThrottle  bool
-
-	// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
-	tmp []*priorityNode
-
-	// pool of empty queues for reuse.
-	queuePool writeQueuePool
-}
-
-func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
-	// The stream may be currently idle but cannot be opened or closed.
-	if curr := ws.nodes[streamID]; curr != nil {
-		if curr.state != priorityNodeIdle {
-			panic(fmt.Sprintf("stream %d already opened", streamID))
-		}
-		curr.state = priorityNodeOpen
-		return
-	}
-
-	// RFC 7540, Section 5.3.5:
-	//  "All streams are initially assigned a non-exclusive dependency on stream 0x0.
-	//  Pushed streams initially depend on their associated stream. In both cases,
-	//  streams are assigned a default weight of 16."
-	parent := ws.nodes[options.PusherID]
-	if parent == nil {
-		parent = &ws.root
-	}
-	n := &priorityNode{
-		q:      *ws.queuePool.get(),
-		id:     streamID,
-		weight: priorityDefaultWeight,
-		state:  priorityNodeOpen,
-	}
-	n.setParent(parent)
-	ws.nodes[streamID] = n
-	if streamID > ws.maxID {
-		ws.maxID = streamID
-	}
-}
-
-func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
-	if streamID == 0 {
-		panic("violation of WriteScheduler interface: cannot close stream 0")
-	}
-	if ws.nodes[streamID] == nil {
-		panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
-	}
-	if ws.nodes[streamID].state != priorityNodeOpen {
-		panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
-	}
-
-	n := ws.nodes[streamID]
-	n.state = priorityNodeClosed
-	n.addBytes(-n.bytes)
-
-	q := n.q
-	ws.queuePool.put(&q)
-	n.q.s = nil
-	if ws.maxClosedNodesInTree > 0 {
-		ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
-	} else {
-		ws.removeNode(n)
-	}
-}
-
-func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
-	if streamID == 0 {
-		panic("adjustPriority on root")
-	}
-
-	// If streamID does not exist, there are two cases:
-	// - A closed stream that has been removed (this will have ID <= maxID)
-	// - An idle stream that is being used for "grouping" (this will have ID > maxID)
-	n := ws.nodes[streamID]
-	if n == nil {
-		if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
-			return
-		}
-		ws.maxID = streamID
-		n = &priorityNode{
-			q:      *ws.queuePool.get(),
-			id:     streamID,
-			weight: priorityDefaultWeight,
-			state:  priorityNodeIdle,
-		}
-		n.setParent(&ws.root)
-		ws.nodes[streamID] = n
-		ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
-	}
-
-	// Section 5.3.1: A dependency on a stream that is not currently in the tree
-	// results in that stream being given a default priority (Section 5.3.5).
-	parent := ws.nodes[priority.StreamDep]
-	if parent == nil {
-		n.setParent(&ws.root)
-		n.weight = priorityDefaultWeight
-		return
-	}
-
-	// Ignore if the client tries to make a node its own parent.
-	if n == parent {
-		return
-	}
-
-	// Section 5.3.3:
-	//   "If a stream is made dependent on one of its own dependencies, the
-	//   formerly dependent stream is first moved to be dependent on the
-	//   reprioritized stream's previous parent. The moved dependency retains
-	//   its weight."
-	//
-	// That is: if parent depends on n, move parent to depend on n.parent.
-	for x := parent.parent; x != nil; x = x.parent {
-		if x == n {
-			parent.setParent(n.parent)
-			break
-		}
-	}
-
-	// Section 5.3.3: The exclusive flag causes the stream to become the sole
-	// dependency of its parent stream, causing other dependencies to become
-	// dependent on the exclusive stream.
-	if priority.Exclusive {
-		k := parent.kids
-		for k != nil {
-			next := k.next
-			if k != n {
-				k.setParent(n)
-			}
-			k = next
-		}
-	}
-
-	n.setParent(parent)
-	n.weight = priority.Weight
-}
-
-func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
-	var n *priorityNode
-	if wr.isControl() {
-		n = &ws.root
-	} else {
-		id := wr.StreamID()
-		n = ws.nodes[id]
-		if n == nil {
-			// id is an idle or closed stream. wr should not be a HEADERS or
-			// DATA frame. In other case, we push wr onto the root, rather
-			// than creating a new priorityNode.
-			if wr.DataSize() > 0 {
-				panic("add DATA on non-open stream")
-			}
-			n = &ws.root
-		}
-	}
-	n.q.push(wr)
-}
-
-func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
-	ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
-		limit := int32(math.MaxInt32)
-		if openParent {
-			limit = ws.writeThrottleLimit
-		}
-		wr, ok = n.q.consume(limit)
-		if !ok {
-			return false
-		}
-		n.addBytes(int64(wr.DataSize()))
-		// If B depends on A and B continuously has data available but A
-		// does not, gradually increase the throttling limit to allow B to
-		// steal more and more bandwidth from A.
-		if openParent {
-			ws.writeThrottleLimit += 1024
-			if ws.writeThrottleLimit < 0 {
-				ws.writeThrottleLimit = math.MaxInt32
-			}
-		} else if ws.enableWriteThrottle {
-			ws.writeThrottleLimit = 1024
-		}
-		return true
-	})
-	return wr, ok
-}
-
-func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
-	if maxSize == 0 {
-		return
-	}
-	if len(*list) == maxSize {
-		// Remove the oldest node, then shift left.
-		ws.removeNode((*list)[0])
-		x := (*list)[1:]
-		copy(*list, x)
-		*list = (*list)[:len(x)]
-	}
-	*list = append(*list, n)
-}
-
-func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
-	for n.kids != nil {
-		n.kids.setParent(n.parent)
-	}
-	n.setParent(nil)
-	delete(ws.nodes, n.id)
-}
diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
new file mode 100644
index 0000000..6d24d6a
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
@@ -0,0 +1,451 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+	"fmt"
+	"math"
+	"sort"
+)
+
+// RFC 7540, Section 5.3.5: the default weight is 16.
+const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1
+
+// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
+type PriorityWriteSchedulerConfig struct {
+	// MaxClosedNodesInTree controls the maximum number of closed streams to
+	// retain in the priority tree. Setting this to zero saves a small amount
+	// of memory at the cost of performance.
+	//
+	// See RFC 7540, Section 5.3.4:
+	//   "It is possible for a stream to become closed while prioritization
+	//   information ... is in transit. ... This potentially creates suboptimal
+	//   prioritization, since the stream could be given a priority that is
+	//   different from what is intended. To avoid these problems, an endpoint
+	//   SHOULD retain stream prioritization state for a period after streams
+	//   become closed. The longer state is retained, the lower the chance that
+	//   streams are assigned incorrect or default priority values."
+	MaxClosedNodesInTree int
+
+	// MaxIdleNodesInTree controls the maximum number of idle streams to
+	// retain in the priority tree. Setting this to zero saves a small amount
+	// of memory at the cost of performance.
+	//
+	// See RFC 7540, Section 5.3.4:
+	//   Similarly, streams that are in the "idle" state can be assigned
+	//   priority or become a parent of other streams. This allows for the
+	//   creation of a grouping node in the dependency tree, which enables
+	//   more flexible expressions of priority. Idle streams begin with a
+	//   default priority (Section 5.3.5).
+	MaxIdleNodesInTree int
+
+	// ThrottleOutOfOrderWrites enables write throttling to help ensure that
+	// data is delivered in priority order. This works around a race where
+	// stream B depends on stream A and both streams are about to call Write
+	// to queue DATA frames. If B wins the race, a naive scheduler would eagerly
+	// write as much data from B as possible, but this is suboptimal because A
+	// is a higher-priority stream. With throttling enabled, we write a small
+	// amount of data from B to minimize the amount of bandwidth that B can
+	// steal from A.
+	ThrottleOutOfOrderWrites bool
+}
+
+// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
+// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
+// If cfg is nil, default options are used.
+func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
+	if cfg == nil {
+		// For justification of these defaults, see:
+		// https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
+		cfg = &PriorityWriteSchedulerConfig{
+			MaxClosedNodesInTree:     10,
+			MaxIdleNodesInTree:       10,
+			ThrottleOutOfOrderWrites: false,
+		}
+	}
+
+	ws := &priorityWriteSchedulerRFC7540{
+		nodes:                make(map[uint32]*priorityNodeRFC7540),
+		maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
+		maxIdleNodesInTree:   cfg.MaxIdleNodesInTree,
+		enableWriteThrottle:  cfg.ThrottleOutOfOrderWrites,
+	}
+	ws.nodes[0] = &ws.root
+	if cfg.ThrottleOutOfOrderWrites {
+		ws.writeThrottleLimit = 1024
+	} else {
+		ws.writeThrottleLimit = math.MaxInt32
+	}
+	return ws
+}
+
+type priorityNodeStateRFC7540 int
+
+const (
+	priorityNodeOpenRFC7540 priorityNodeStateRFC7540 = iota
+	priorityNodeClosedRFC7540
+	priorityNodeIdleRFC7540
+)
+
+// priorityNodeRFC7540 is a node in an HTTP/2 priority tree.
+// Each node is associated with a single stream ID.
+// See RFC 7540, Section 5.3.
+type priorityNodeRFC7540 struct {
+	q            writeQueue               // queue of pending frames to write
+	id           uint32                   // id of the stream, or 0 for the root of the tree
+	weight       uint8                    // the actual weight is weight+1, so the value is in [1,256]
+	state        priorityNodeStateRFC7540 // open | closed | idle
+	bytes        int64                    // number of bytes written by this node, or 0 if closed
+	subtreeBytes int64                    // sum(node.bytes) of all nodes in this subtree
+
+	// These links form the priority tree.
+	parent     *priorityNodeRFC7540
+	kids       *priorityNodeRFC7540 // start of the kids list
+	prev, next *priorityNodeRFC7540 // doubly-linked list of siblings
+}
+
+func (n *priorityNodeRFC7540) setParent(parent *priorityNodeRFC7540) {
+	if n == parent {
+		panic("setParent to self")
+	}
+	if n.parent == parent {
+		return
+	}
+	// Unlink from current parent.
+	if parent := n.parent; parent != nil {
+		if n.prev == nil {
+			parent.kids = n.next
+		} else {
+			n.prev.next = n.next
+		}
+		if n.next != nil {
+			n.next.prev = n.prev
+		}
+	}
+	// Link to new parent.
+	// If parent=nil, remove n from the tree.
+	// Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
+	n.parent = parent
+	if parent == nil {
+		n.next = nil
+		n.prev = nil
+	} else {
+		n.next = parent.kids
+		n.prev = nil
+		if n.next != nil {
+			n.next.prev = n
+		}
+		parent.kids = n
+	}
+}
+
+func (n *priorityNodeRFC7540) addBytes(b int64) {
+	n.bytes += b
+	for ; n != nil; n = n.parent {
+		n.subtreeBytes += b
+	}
+}
+
+// walkReadyInOrder iterates over the tree in priority order, calling f for each node
+// with a non-empty write queue. When f returns true, this function returns true and the
+// walk halts. tmp is used as scratch space for sorting.
+//
+// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
+// if any ancestor p of n is still open (ignoring the root node).
+func (n *priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*priorityNodeRFC7540, f func(*priorityNodeRFC7540, bool) bool) bool {
+	if !n.q.empty() && f(n, openParent) {
+		return true
+	}
+	if n.kids == nil {
+		return false
+	}
+
+	// Don't consider the root "open" when updating openParent since
+	// we can't send data frames on the root stream (only control frames).
+	if n.id != 0 {
+		openParent = openParent || (n.state == priorityNodeOpenRFC7540)
+	}
+
+	// Common case: only one kid or all kids have the same weight.
+	// Some clients don't use weights; other clients (like web browsers)
+	// use mostly-linear priority trees.
+	w := n.kids.weight
+	needSort := false
+	for k := n.kids.next; k != nil; k = k.next {
+		if k.weight != w {
+			needSort = true
+			break
+		}
+	}
+	if !needSort {
+		for k := n.kids; k != nil; k = k.next {
+			if k.walkReadyInOrder(openParent, tmp, f) {
+				return true
+			}
+		}
+		return false
+	}
+
+	// Uncommon case: sort the child nodes. We remove the kids from the parent,
+	// then re-insert after sorting so we can reuse tmp for future sort calls.
+	*tmp = (*tmp)[:0]
+	for n.kids != nil {
+		*tmp = append(*tmp, n.kids)
+		n.kids.setParent(nil)
+	}
+	sort.Sort(sortPriorityNodeSiblingsRFC7540(*tmp))
+	for i := len(*tmp) - 1; i >= 0; i-- {
+		(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
+	}
+	for k := n.kids; k != nil; k = k.next {
+		if k.walkReadyInOrder(openParent, tmp, f) {
+			return true
+		}
+	}
+	return false
+}
+
+type sortPriorityNodeSiblingsRFC7540 []*priorityNodeRFC7540
+
+func (z sortPriorityNodeSiblingsRFC7540) Len() int      { return len(z) }
+func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
+func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool {
+	// Prefer the subtree that has sent fewer bytes relative to its weight.
+	// See sections 5.3.2 and 5.3.4.
+	wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
+	wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
+	if bi == 0 && bk == 0 {
+		return wi >= wk
+	}
+	if bk == 0 {
+		return false
+	}
+	return bi/bk <= wi/wk
+}
+
+type priorityWriteSchedulerRFC7540 struct {
+	// root is the root of the priority tree, where root.id = 0.
+	// The root queues control frames that are not associated with any stream.
+	root priorityNodeRFC7540
+
+	// nodes maps stream ids to priority tree nodes.
+	nodes map[uint32]*priorityNodeRFC7540
+
+	// maxID is the maximum stream id in nodes.
+	maxID uint32
+
+	// lists of nodes that have been closed or are idle, but are kept in
+	// the tree for improved prioritization. When the lengths exceed either
+	// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
+	closedNodes, idleNodes []*priorityNodeRFC7540
+
+	// From the config.
+	maxClosedNodesInTree int
+	maxIdleNodesInTree   int
+	writeThrottleLimit   int32
+	enableWriteThrottle  bool
+
+	// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
+	tmp []*priorityNodeRFC7540
+
+	// pool of empty queues for reuse.
+	queuePool writeQueuePool
+}
+
+func (ws *priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options OpenStreamOptions) {
+	// The stream may be currently idle but cannot be opened or closed.
+	if curr := ws.nodes[streamID]; curr != nil {
+		if curr.state != priorityNodeIdleRFC7540 {
+			panic(fmt.Sprintf("stream %d already opened", streamID))
+		}
+		curr.state = priorityNodeOpenRFC7540
+		return
+	}
+
+	// RFC 7540, Section 5.3.5:
+	//  "All streams are initially assigned a non-exclusive dependency on stream 0x0.
+	//  Pushed streams initially depend on their associated stream. In both cases,
+	//  streams are assigned a default weight of 16."
+	parent := ws.nodes[options.PusherID]
+	if parent == nil {
+		parent = &ws.root
+	}
+	n := &priorityNodeRFC7540{
+		q:      *ws.queuePool.get(),
+		id:     streamID,
+		weight: priorityDefaultWeightRFC7540,
+		state:  priorityNodeOpenRFC7540,
+	}
+	n.setParent(parent)
+	ws.nodes[streamID] = n
+	if streamID > ws.maxID {
+		ws.maxID = streamID
+	}
+}
+
+func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) {
+	if streamID == 0 {
+		panic("violation of WriteScheduler interface: cannot close stream 0")
+	}
+	if ws.nodes[streamID] == nil {
+		panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
+	}
+	if ws.nodes[streamID].state != priorityNodeOpenRFC7540 {
+		panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
+	}
+
+	n := ws.nodes[streamID]
+	n.state = priorityNodeClosedRFC7540
+	n.addBytes(-n.bytes)
+
+	q := n.q
+	ws.queuePool.put(&q)
+	n.q.s = nil
+	if ws.maxClosedNodesInTree > 0 {
+		ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
+	} else {
+		ws.removeNode(n)
+	}
+}
+
+func (ws *priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority PriorityParam) {
+	if streamID == 0 {
+		panic("adjustPriority on root")
+	}
+
+	// If streamID does not exist, there are two cases:
+	// - A closed stream that has been removed (this will have ID <= maxID)
+	// - An idle stream that is being used for "grouping" (this will have ID > maxID)
+	n := ws.nodes[streamID]
+	if n == nil {
+		if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
+			return
+		}
+		ws.maxID = streamID
+		n = &priorityNodeRFC7540{
+			q:      *ws.queuePool.get(),
+			id:     streamID,
+			weight: priorityDefaultWeightRFC7540,
+			state:  priorityNodeIdleRFC7540,
+		}
+		n.setParent(&ws.root)
+		ws.nodes[streamID] = n
+		ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
+	}
+
+	// Section 5.3.1: A dependency on a stream that is not currently in the tree
+	// results in that stream being given a default priority (Section 5.3.5).
+	parent := ws.nodes[priority.StreamDep]
+	if parent == nil {
+		n.setParent(&ws.root)
+		n.weight = priorityDefaultWeightRFC7540
+		return
+	}
+
+	// Ignore if the client tries to make a node its own parent.
+	if n == parent {
+		return
+	}
+
+	// Section 5.3.3:
+	//   "If a stream is made dependent on one of its own dependencies, the
+	//   formerly dependent stream is first moved to be dependent on the
+	//   reprioritized stream's previous parent. The moved dependency retains
+	//   its weight."
+	//
+	// That is: if parent depends on n, move parent to depend on n.parent.
+	for x := parent.parent; x != nil; x = x.parent {
+		if x == n {
+			parent.setParent(n.parent)
+			break
+		}
+	}
+
+	// Section 5.3.3: The exclusive flag causes the stream to become the sole
+	// dependency of its parent stream, causing other dependencies to become
+	// dependent on the exclusive stream.
+	if priority.Exclusive {
+		k := parent.kids
+		for k != nil {
+			next := k.next
+			if k != n {
+				k.setParent(n)
+			}
+			k = next
+		}
+	}
+
+	n.setParent(parent)
+	n.weight = priority.Weight
+}
+
+func (ws *priorityWriteSchedulerRFC7540) Push(wr FrameWriteRequest) {
+	var n *priorityNodeRFC7540
+	if wr.isControl() {
+		n = &ws.root
+	} else {
+		id := wr.StreamID()
+		n = ws.nodes[id]
+		if n == nil {
+			// id is an idle or closed stream. wr should not be a HEADERS or
+			// DATA frame. In other case, we push wr onto the root, rather
+			// than creating a new priorityNode.
+			if wr.DataSize() > 0 {
+				panic("add DATA on non-open stream")
+			}
+			n = &ws.root
+		}
+	}
+	n.q.push(wr)
+}
+
+func (ws *priorityWriteSchedulerRFC7540) Pop() (wr FrameWriteRequest, ok bool) {
+	ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNodeRFC7540, openParent bool) bool {
+		limit := int32(math.MaxInt32)
+		if openParent {
+			limit = ws.writeThrottleLimit
+		}
+		wr, ok = n.q.consume(limit)
+		if !ok {
+			return false
+		}
+		n.addBytes(int64(wr.DataSize()))
+		// If B depends on A and B continuously has data available but A
+		// does not, gradually increase the throttling limit to allow B to
+		// steal more and more bandwidth from A.
+		if openParent {
+			ws.writeThrottleLimit += 1024
+			if ws.writeThrottleLimit < 0 {
+				ws.writeThrottleLimit = math.MaxInt32
+			}
+		} else if ws.enableWriteThrottle {
+			ws.writeThrottleLimit = 1024
+		}
+		return true
+	})
+	return wr, ok
+}
+
+func (ws *priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*priorityNodeRFC7540, maxSize int, n *priorityNodeRFC7540) {
+	if maxSize == 0 {
+		return
+	}
+	if len(*list) == maxSize {
+		// Remove the oldest node, then shift left.
+		ws.removeNode((*list)[0])
+		x := (*list)[1:]
+		copy(*list, x)
+		*list = (*list)[:len(x)]
+	}
+	*list = append(*list, n)
+}
+
+func (ws *priorityWriteSchedulerRFC7540) removeNode(n *priorityNodeRFC7540) {
+	for n.kids != nil {
+		n.kids.setParent(n.parent)
+	}
+	n.setParent(nil)
+	delete(ws.nodes, n.id)
+}
diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go
new file mode 100644
index 0000000..9b5b880
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go
@@ -0,0 +1,209 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+	"fmt"
+	"math"
+)
+
+type streamMetadata struct {
+	location *writeQueue
+	priority PriorityParam
+}
+
+type priorityWriteSchedulerRFC9218 struct {
+	// control contains control frames (SETTINGS, PING, etc.).
+	control writeQueue
+
+	// heads contain the head of a circular list of streams.
+	// We put these heads within a nested array that represents urgency and
+	// incremental, as defined in
+	// https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters.
+	// 8 represents u=0 up to u=7, and 2 represents i=false and i=true.
+	heads [8][2]*writeQueue
+
+	// streams contains a mapping between each stream ID and their metadata, so
+	// we can quickly locate them when needing to, for example, adjust their
+	// priority.
+	streams map[uint32]streamMetadata
+
+	// queuePool are empty queues for reuse.
+	queuePool writeQueuePool
+
+	// prioritizeIncremental is used to determine whether we should prioritize
+	// incremental streams or not, when urgency is the same in a given Pop()
+	// call.
+	prioritizeIncremental bool
+}
+
+func newPriorityWriteSchedulerRFC9128() WriteScheduler {
+	ws := &priorityWriteSchedulerRFC9218{
+		streams: make(map[uint32]streamMetadata),
+	}
+	return ws
+}
+
+func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStreamOptions) {
+	if ws.streams[streamID].location != nil {
+		panic(fmt.Errorf("stream %d already opened", streamID))
+	}
+	q := ws.queuePool.get()
+	ws.streams[streamID] = streamMetadata{
+		location: q,
+		priority: opt.priority,
+	}
+
+	u, i := opt.priority.urgency, opt.priority.incremental
+	if ws.heads[u][i] == nil {
+		ws.heads[u][i] = q
+		q.next = q
+		q.prev = q
+	} else {
+		// Queues are stored in a ring.
+		// Insert the new stream before ws.head, putting it at the end of the list.
+		q.prev = ws.heads[u][i].prev
+		q.next = ws.heads[u][i]
+		q.prev.next = q
+		q.next.prev = q
+	}
+}
+
+func (ws *priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) {
+	metadata := ws.streams[streamID]
+	q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
+	if q == nil {
+		return
+	}
+	if q.next == q {
+		// This was the only open stream.
+		ws.heads[u][i] = nil
+	} else {
+		q.prev.next = q.next
+		q.next.prev = q.prev
+		if ws.heads[u][i] == q {
+			ws.heads[u][i] = q.next
+		}
+	}
+	delete(ws.streams, streamID)
+	ws.queuePool.put(q)
+}
+
+func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority PriorityParam) {
+	metadata := ws.streams[streamID]
+	q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
+	if q == nil {
+		return
+	}
+
+	// Remove stream from current location.
+	if q.next == q {
+		// This was the only open stream.
+		ws.heads[u][i] = nil
+	} else {
+		q.prev.next = q.next
+		q.next.prev = q.prev
+		if ws.heads[u][i] == q {
+			ws.heads[u][i] = q.next
+		}
+	}
+
+	// Insert stream to the new queue.
+	u, i = priority.urgency, priority.incremental
+	if ws.heads[u][i] == nil {
+		ws.heads[u][i] = q
+		q.next = q
+		q.prev = q
+	} else {
+		// Queues are stored in a ring.
+		// Insert the new stream before ws.head, putting it at the end of the list.
+		q.prev = ws.heads[u][i].prev
+		q.next = ws.heads[u][i]
+		q.prev.next = q
+		q.next.prev = q
+	}
+
+	// Update the metadata.
+	ws.streams[streamID] = streamMetadata{
+		location: q,
+		priority: priority,
+	}
+}
+
+func (ws *priorityWriteSchedulerRFC9218) Push(wr FrameWriteRequest) {
+	if wr.isControl() {
+		ws.control.push(wr)
+		return
+	}
+	q := ws.streams[wr.StreamID()].location
+	if q == nil {
+		// This is a closed stream.
+		// wr should not be a HEADERS or DATA frame.
+		// We push the request onto the control queue.
+		if wr.DataSize() > 0 {
+			panic("add DATA on non-open stream")
+		}
+		ws.control.push(wr)
+		return
+	}
+	q.push(wr)
+}
+
+func (ws *priorityWriteSchedulerRFC9218) Pop() (FrameWriteRequest, bool) {
+	// Control and RST_STREAM frames first.
+	if !ws.control.empty() {
+		return ws.control.shift(), true
+	}
+
+	// On the next Pop(), we want to prioritize incremental if we prioritized
+	// non-incremental request of the same urgency this time. Vice-versa.
+	// i.e. when there are incremental and non-incremental requests at the same
+	// priority, we give 50% of our bandwidth to the incremental ones in
+	// aggregate and 50% to the first non-incremental one (since
+	// non-incremental streams do not use round-robin writes).
+	ws.prioritizeIncremental = !ws.prioritizeIncremental
+
+	// Always prioritize lowest u (i.e. highest urgency level).
+	for u := range ws.heads {
+		for i := range ws.heads[u] {
+			// When we want to prioritize incremental, we try to pop i=true
+			// first before i=false when u is the same.
+			if ws.prioritizeIncremental {
+				i = (i + 1) % 2
+			}
+			q := ws.heads[u][i]
+			if q == nil {
+				continue
+			}
+			for {
+				if wr, ok := q.consume(math.MaxInt32); ok {
+					if i == 1 {
+						// For incremental streams, we update head to q.next so
+						// we can round-robin between multiple streams that can
+						// immediately benefit from partial writes.
+						ws.heads[u][i] = q.next
+					} else {
+						// For non-incremental streams, we try to finish one to
+						// completion rather than doing round-robin. However,
+						// we update head here so that if q.consume() is !ok
+						// (e.g. the stream has no more frame to consume), head
+						// is updated to the next q that has frames to consume
+						// on future iterations. This way, we do not prioritize
+						// writing to unavailable stream on next Pop() calls,
+						// preventing head-of-line blocking.
+						ws.heads[u][i] = q
+					}
+					return wr, true
+				}
+				q = q.next
+				if q == ws.heads[u][i] {
+					break
+				}
+			}
+
+		}
+	}
+	return FrameWriteRequest{}, false
+}
diff --git a/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/vendor/golang.org/x/net/http2/writesched_roundrobin.go
index 54fe863..737cff9 100644
--- a/vendor/golang.org/x/net/http2/writesched_roundrobin.go
+++ b/vendor/golang.org/x/net/http2/writesched_roundrobin.go
@@ -25,7 +25,7 @@
 }
 
 // newRoundRobinWriteScheduler constructs a new write scheduler.
-// The round robin scheduler priorizes control frames
+// The round robin scheduler prioritizes control frames
 // like SETTINGS and PING over DATA frames.
 // When there are no control frames to send, it performs a round-robin
 // selection from the ready streams.
diff --git a/vendor/golang.org/x/net/internal/httpcommon/ascii.go b/vendor/golang.org/x/net/internal/httpcommon/ascii.go
new file mode 100644
index 0000000..ed14da5
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/httpcommon/ascii.go
@@ -0,0 +1,53 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httpcommon
+
+import "strings"
+
+// The HTTP protocols are defined in terms of ASCII, not Unicode. This file
+// contains helper functions which may use Unicode-aware functions which would
+// otherwise be unsafe and could introduce vulnerabilities if used improperly.
+
+// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
+// are equal, ASCII-case-insensitively.
+func asciiEqualFold(s, t string) bool {
+	if len(s) != len(t) {
+		return false
+	}
+	for i := 0; i < len(s); i++ {
+		if lower(s[i]) != lower(t[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+// lower returns the ASCII lowercase version of b.
+func lower(b byte) byte {
+	if 'A' <= b && b <= 'Z' {
+		return b + ('a' - 'A')
+	}
+	return b
+}
+
+// isASCIIPrint returns whether s is ASCII and printable according to
+// https://tools.ietf.org/html/rfc20#section-4.2.
+func isASCIIPrint(s string) bool {
+	for i := 0; i < len(s); i++ {
+		if s[i] < ' ' || s[i] > '~' {
+			return false
+		}
+	}
+	return true
+}
+
+// asciiToLower returns the lowercase version of s if s is ASCII and printable,
+// and whether or not it was.
+func asciiToLower(s string) (lower string, ok bool) {
+	if !isASCIIPrint(s) {
+		return "", false
+	}
+	return strings.ToLower(s), true
+}
diff --git a/vendor/golang.org/x/net/internal/httpcommon/headermap.go b/vendor/golang.org/x/net/internal/httpcommon/headermap.go
new file mode 100644
index 0000000..92483d8
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/httpcommon/headermap.go
@@ -0,0 +1,115 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httpcommon
+
+import (
+	"net/textproto"
+	"sync"
+)
+
+var (
+	commonBuildOnce   sync.Once
+	commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case
+	commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case
+)
+
+func buildCommonHeaderMapsOnce() {
+	commonBuildOnce.Do(buildCommonHeaderMaps)
+}
+
+func buildCommonHeaderMaps() {
+	common := []string{
+		"accept",
+		"accept-charset",
+		"accept-encoding",
+		"accept-language",
+		"accept-ranges",
+		"age",
+		"access-control-allow-credentials",
+		"access-control-allow-headers",
+		"access-control-allow-methods",
+		"access-control-allow-origin",
+		"access-control-expose-headers",
+		"access-control-max-age",
+		"access-control-request-headers",
+		"access-control-request-method",
+		"allow",
+		"authorization",
+		"cache-control",
+		"content-disposition",
+		"content-encoding",
+		"content-language",
+		"content-length",
+		"content-location",
+		"content-range",
+		"content-type",
+		"cookie",
+		"date",
+		"etag",
+		"expect",
+		"expires",
+		"from",
+		"host",
+		"if-match",
+		"if-modified-since",
+		"if-none-match",
+		"if-unmodified-since",
+		"last-modified",
+		"link",
+		"location",
+		"max-forwards",
+		"origin",
+		"proxy-authenticate",
+		"proxy-authorization",
+		"range",
+		"referer",
+		"refresh",
+		"retry-after",
+		"server",
+		"set-cookie",
+		"strict-transport-security",
+		"trailer",
+		"transfer-encoding",
+		"user-agent",
+		"vary",
+		"via",
+		"www-authenticate",
+		"x-forwarded-for",
+		"x-forwarded-proto",
+	}
+	commonLowerHeader = make(map[string]string, len(common))
+	commonCanonHeader = make(map[string]string, len(common))
+	for _, v := range common {
+		chk := textproto.CanonicalMIMEHeaderKey(v)
+		commonLowerHeader[chk] = v
+		commonCanonHeader[v] = chk
+	}
+}
+
+// LowerHeader returns the lowercase form of a header name,
+// used on the wire for HTTP/2 and HTTP/3 requests.
+func LowerHeader(v string) (lower string, ascii bool) {
+	buildCommonHeaderMapsOnce()
+	if s, ok := commonLowerHeader[v]; ok {
+		return s, true
+	}
+	return asciiToLower(v)
+}
+
+// CanonicalHeader canonicalizes a header name. (For example, "host" becomes "Host".)
+func CanonicalHeader(v string) string {
+	buildCommonHeaderMapsOnce()
+	if s, ok := commonCanonHeader[v]; ok {
+		return s
+	}
+	return textproto.CanonicalMIMEHeaderKey(v)
+}
+
+// CachedCanonicalHeader returns the canonical form of a well-known header name.
+func CachedCanonicalHeader(v string) (string, bool) {
+	buildCommonHeaderMapsOnce()
+	s, ok := commonCanonHeader[v]
+	return s, ok
+}
diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go
new file mode 100644
index 0000000..1e10f89
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/httpcommon/request.go
@@ -0,0 +1,467 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httpcommon
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net/http/httptrace"
+	"net/textproto"
+	"net/url"
+	"sort"
+	"strconv"
+	"strings"
+
+	"golang.org/x/net/http/httpguts"
+	"golang.org/x/net/http2/hpack"
+)
+
+var (
+	ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit")
+)
+
+// Request is a subset of http.Request.
+// It'd be simpler to pass an *http.Request, of course, but we can't depend on net/http
+// without creating a dependency cycle.
+type Request struct {
+	URL                 *url.URL
+	Method              string
+	Host                string
+	Header              map[string][]string
+	Trailer             map[string][]string
+	ActualContentLength int64 // 0 means 0, -1 means unknown
+}
+
+// EncodeHeadersParam is parameters to EncodeHeaders.
+type EncodeHeadersParam struct {
+	Request Request
+
+	// AddGzipHeader indicates that an "accept-encoding: gzip" header should be
+	// added to the request.
+	AddGzipHeader bool
+
+	// PeerMaxHeaderListSize, when non-zero, is the peer's MAX_HEADER_LIST_SIZE setting.
+	PeerMaxHeaderListSize uint64
+
+	// DefaultUserAgent is the User-Agent header to send when the request
+	// neither contains a User-Agent nor disables it.
+	DefaultUserAgent string
+}
+
+// EncodeHeadersResult is the result of EncodeHeaders.
+type EncodeHeadersResult struct {
+	HasBody     bool
+	HasTrailers bool
+}
+
+// EncodeHeaders constructs request headers common to HTTP/2 and HTTP/3.
+// It validates a request and calls headerf with each pseudo-header and header
+// for the request.
+// The headerf function is called with the validated, canonicalized header name.
+func EncodeHeaders(ctx context.Context, param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) {
+	req := param.Request
+
+	// Check for invalid connection-level headers.
+	if err := checkConnHeaders(req.Header); err != nil {
+		return res, err
+	}
+
+	if req.URL == nil {
+		return res, errors.New("Request.URL is nil")
+	}
+
+	host := req.Host
+	if host == "" {
+		host = req.URL.Host
+	}
+	host, err := httpguts.PunycodeHostPort(host)
+	if err != nil {
+		return res, err
+	}
+	if !httpguts.ValidHostHeader(host) {
+		return res, errors.New("invalid Host header")
+	}
+
+	// isNormalConnect is true if this is a non-extended CONNECT request.
+	isNormalConnect := false
+	var protocol string
+	if vv := req.Header[":protocol"]; len(vv) > 0 {
+		protocol = vv[0]
+	}
+	if req.Method == "CONNECT" && protocol == "" {
+		isNormalConnect = true
+	} else if protocol != "" && req.Method != "CONNECT" {
+		return res, errors.New("invalid :protocol header in non-CONNECT request")
+	}
+
+	// Validate the path, except for non-extended CONNECT requests which have no path.
+	var path string
+	if !isNormalConnect {
+		path = req.URL.RequestURI()
+		if !validPseudoPath(path) {
+			orig := path
+			path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
+			if !validPseudoPath(path) {
+				if req.URL.Opaque != "" {
+					return res, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
+				} else {
+					return res, fmt.Errorf("invalid request :path %q", orig)
+				}
+			}
+		}
+	}
+
+	// Check for any invalid headers+trailers and return an error before we
+	// potentially pollute our hpack state. (We want to be able to
+	// continue to reuse the hpack encoder for future requests)
+	if err := validateHeaders(req.Header); err != "" {
+		return res, fmt.Errorf("invalid HTTP header %s", err)
+	}
+	if err := validateHeaders(req.Trailer); err != "" {
+		return res, fmt.Errorf("invalid HTTP trailer %s", err)
+	}
+
+	trailers, err := commaSeparatedTrailers(req.Trailer)
+	if err != nil {
+		return res, err
+	}
+
+	enumerateHeaders := func(f func(name, value string)) {
+		// 8.1.2.3 Request Pseudo-Header Fields
+		// The :path pseudo-header field includes the path and query parts of the
+		// target URI (the path-absolute production and optionally a '?' character
+		// followed by the query production, see Sections 3.3 and 3.4 of
+		// [RFC3986]).
+		f(":authority", host)
+		m := req.Method
+		if m == "" {
+			m = "GET"
+		}
+		f(":method", m)
+		if !isNormalConnect {
+			f(":path", path)
+			f(":scheme", req.URL.Scheme)
+		}
+		if protocol != "" {
+			f(":protocol", protocol)
+		}
+		if trailers != "" {
+			f("trailer", trailers)
+		}
+
+		var didUA bool
+		for k, vv := range req.Header {
+			if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") {
+				// Host is :authority, already sent.
+				// Content-Length is automatic, set below.
+				continue
+			} else if asciiEqualFold(k, "connection") ||
+				asciiEqualFold(k, "proxy-connection") ||
+				asciiEqualFold(k, "transfer-encoding") ||
+				asciiEqualFold(k, "upgrade") ||
+				asciiEqualFold(k, "keep-alive") {
+				// Per 8.1.2.2 Connection-Specific Header
+				// Fields, don't send connection-specific
+				// fields. We have already checked if any
+				// are error-worthy so just ignore the rest.
+				continue
+			} else if asciiEqualFold(k, "user-agent") {
+				// Match Go's http1 behavior: at most one
+				// User-Agent. If set to nil or empty string,
+				// then omit it. Otherwise if not mentioned,
+				// include the default (below).
+				didUA = true
+				if len(vv) < 1 {
+					continue
+				}
+				vv = vv[:1]
+				if vv[0] == "" {
+					continue
+				}
+			} else if asciiEqualFold(k, "cookie") {
+				// Per 8.1.2.5 To allow for better compression efficiency, the
+				// Cookie header field MAY be split into separate header fields,
+				// each with one or more cookie-pairs.
+				for _, v := range vv {
+					for {
+						p := strings.IndexByte(v, ';')
+						if p < 0 {
+							break
+						}
+						f("cookie", v[:p])
+						p++
+						// strip space after semicolon if any.
+						for p+1 <= len(v) && v[p] == ' ' {
+							p++
+						}
+						v = v[p:]
+					}
+					if len(v) > 0 {
+						f("cookie", v)
+					}
+				}
+				continue
+			} else if k == ":protocol" {
+				// :protocol pseudo-header was already sent above.
+				continue
+			}
+
+			for _, v := range vv {
+				f(k, v)
+			}
+		}
+		if shouldSendReqContentLength(req.Method, req.ActualContentLength) {
+			f("content-length", strconv.FormatInt(req.ActualContentLength, 10))
+		}
+		if param.AddGzipHeader {
+			f("accept-encoding", "gzip")
+		}
+		if !didUA {
+			f("user-agent", param.DefaultUserAgent)
+		}
+	}
+
+	// Do a first pass over the headers counting bytes to ensure
+	// we don't exceed cc.peerMaxHeaderListSize. This is done as a
+	// separate pass before encoding the headers to prevent
+	// modifying the hpack state.
+	if param.PeerMaxHeaderListSize > 0 {
+		hlSize := uint64(0)
+		enumerateHeaders(func(name, value string) {
+			hf := hpack.HeaderField{Name: name, Value: value}
+			hlSize += uint64(hf.Size())
+		})
+
+		if hlSize > param.PeerMaxHeaderListSize {
+			return res, ErrRequestHeaderListSize
+		}
+	}
+
+	trace := httptrace.ContextClientTrace(ctx)
+
+	// Header list size is ok. Write the headers.
+	enumerateHeaders(func(name, value string) {
+		name, ascii := LowerHeader(name)
+		if !ascii {
+			// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
+			// field names have to be ASCII characters (just as in HTTP/1.x).
+			return
+		}
+
+		headerf(name, value)
+
+		if trace != nil && trace.WroteHeaderField != nil {
+			trace.WroteHeaderField(name, []string{value})
+		}
+	})
+
+	res.HasBody = req.ActualContentLength != 0
+	res.HasTrailers = trailers != ""
+	return res, nil
+}
+
+// IsRequestGzip reports whether we should add an Accept-Encoding: gzip header
+// for a request.
+func IsRequestGzip(method string, header map[string][]string, disableCompression bool) bool {
+	// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
+	if !disableCompression &&
+		len(header["Accept-Encoding"]) == 0 &&
+		len(header["Range"]) == 0 &&
+		method != "HEAD" {
+		// Request gzip only, not deflate. Deflate is ambiguous and
+		// not as universally supported anyway.
+		// See: https://zlib.net/zlib_faq.html#faq39
+		//
+		// Note that we don't request this for HEAD requests,
+		// due to a bug in nginx:
+		//   http://trac.nginx.org/nginx/ticket/358
+		//   https://golang.org/issue/5522
+		//
+		// We don't request gzip if the request is for a range, since
+		// auto-decoding a portion of a gzipped document will just fail
+		// anyway. See https://golang.org/issue/8923
+		return true
+	}
+	return false
+}
+
+// checkConnHeaders checks whether req has any invalid connection-level headers.
+//
+// https://www.rfc-editor.org/rfc/rfc9114.html#section-4.2-3
+// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.2.2-1
+//
+// Certain headers are special-cased as okay but not transmitted later.
+// For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding.
+func checkConnHeaders(h map[string][]string) error {
+	if vv := h["Upgrade"]; len(vv) > 0 && (vv[0] != "" && vv[0] != "chunked") {
+		return fmt.Errorf("invalid Upgrade request header: %q", vv)
+	}
+	if vv := h["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
+		return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv)
+	}
+	if vv := h["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
+		return fmt.Errorf("invalid Connection request header: %q", vv)
+	}
+	return nil
+}
+
+func commaSeparatedTrailers(trailer map[string][]string) (string, error) {
+	keys := make([]string, 0, len(trailer))
+	for k := range trailer {
+		k = CanonicalHeader(k)
+		switch k {
+		case "Transfer-Encoding", "Trailer", "Content-Length":
+			return "", fmt.Errorf("invalid Trailer key %q", k)
+		}
+		keys = append(keys, k)
+	}
+	if len(keys) > 0 {
+		sort.Strings(keys)
+		return strings.Join(keys, ","), nil
+	}
+	return "", nil
+}
+
+// validPseudoPath reports whether v is a valid :path pseudo-header
+// value. It must be either:
+//
+//   - a non-empty string starting with '/'
+//   - the string '*', for OPTIONS requests.
+//
+// For now this is only used a quick check for deciding when to clean
+// up Opaque URLs before sending requests from the Transport.
+// See golang.org/issue/16847
+//
+// We used to enforce that the path also didn't start with "//", but
+// Google's GFE accepts such paths and Chrome sends them, so ignore
+// that part of the spec. See golang.org/issue/19103.
+func validPseudoPath(v string) bool {
+	return (len(v) > 0 && v[0] == '/') || v == "*"
+}
+
+func validateHeaders(hdrs map[string][]string) string {
+	for k, vv := range hdrs {
+		if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
+			return fmt.Sprintf("name %q", k)
+		}
+		for _, v := range vv {
+			if !httpguts.ValidHeaderFieldValue(v) {
+				// Don't include the value in the error,
+				// because it may be sensitive.
+				return fmt.Sprintf("value for header %q", k)
+			}
+		}
+	}
+	return ""
+}
+
+// shouldSendReqContentLength reports whether we should send
+// a "content-length" request header. This logic is basically a copy of the net/http
+// transferWriter.shouldSendContentLength.
+// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
+// -1 means unknown.
+func shouldSendReqContentLength(method string, contentLength int64) bool {
+	if contentLength > 0 {
+		return true
+	}
+	if contentLength < 0 {
+		return false
+	}
+	// For zero bodies, whether we send a content-length depends on the method.
+	// It also kinda doesn't matter for http2 either way, with END_STREAM.
+	switch method {
+	case "POST", "PUT", "PATCH":
+		return true
+	default:
+		return false
+	}
+}
+
+// ServerRequestParam is parameters to NewServerRequest.
+type ServerRequestParam struct {
+	Method                  string
+	Scheme, Authority, Path string
+	Protocol                string
+	Header                  map[string][]string
+}
+
+// ServerRequestResult is the result of NewServerRequest.
+type ServerRequestResult struct {
+	// Various http.Request fields.
+	URL        *url.URL
+	RequestURI string
+	Trailer    map[string][]string
+
+	NeedsContinue bool // client provided an "Expect: 100-continue" header
+
+	// If the request should be rejected, this is a short string suitable for passing
+	// to the http2 package's CountError function.
+	// It might be a bit odd to return errors this way rather than returning an error,
+	// but this ensures we don't forget to include a CountError reason.
+	InvalidReason string
+}
+
+func NewServerRequest(rp ServerRequestParam) ServerRequestResult {
+	needsContinue := httpguts.HeaderValuesContainsToken(rp.Header["Expect"], "100-continue")
+	if needsContinue {
+		delete(rp.Header, "Expect")
+	}
+	// Merge Cookie headers into one "; "-delimited value.
+	if cookies := rp.Header["Cookie"]; len(cookies) > 1 {
+		rp.Header["Cookie"] = []string{strings.Join(cookies, "; ")}
+	}
+
+	// Setup Trailers
+	var trailer map[string][]string
+	for _, v := range rp.Header["Trailer"] {
+		for _, key := range strings.Split(v, ",") {
+			key = textproto.CanonicalMIMEHeaderKey(textproto.TrimString(key))
+			switch key {
+			case "Transfer-Encoding", "Trailer", "Content-Length":
+				// Bogus. (copy of http1 rules)
+				// Ignore.
+			default:
+				if trailer == nil {
+					trailer = make(map[string][]string)
+				}
+				trailer[key] = nil
+			}
+		}
+	}
+	delete(rp.Header, "Trailer")
+
+	// "':authority' MUST NOT include the deprecated userinfo subcomponent
+	// for "http" or "https" schemed URIs."
+	// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.3.1-2.3.8
+	if strings.IndexByte(rp.Authority, '@') != -1 && (rp.Scheme == "http" || rp.Scheme == "https") {
+		return ServerRequestResult{
+			InvalidReason: "userinfo_in_authority",
+		}
+	}
+
+	var url_ *url.URL
+	var requestURI string
+	if rp.Method == "CONNECT" && rp.Protocol == "" {
+		url_ = &url.URL{Host: rp.Authority}
+		requestURI = rp.Authority // mimic HTTP/1 server behavior
+	} else {
+		var err error
+		url_, err = url.ParseRequestURI(rp.Path)
+		if err != nil {
+			return ServerRequestResult{
+				InvalidReason: "bad_path",
+			}
+		}
+		requestURI = rp.Path
+	}
+
+	return ServerRequestResult{
+		URL:           url_,
+		NeedsContinue: needsContinue,
+		RequestURI:    requestURI,
+		Trailer:       trailer,
+	}
+}
diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go
index 84fcc32..8eedb84 100644
--- a/vendor/golang.org/x/net/internal/socks/socks.go
+++ b/vendor/golang.org/x/net/internal/socks/socks.go
@@ -297,7 +297,7 @@
 		b = append(b, up.Username...)
 		b = append(b, byte(len(up.Password)))
 		b = append(b, up.Password...)
-		// TODO(mikio): handle IO deadlines and cancelation if
+		// TODO(mikio): handle IO deadlines and cancellation if
 		// necessary
 		if _, err := rw.Write(b); err != nil {
 			return err
diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go
index d7d4b8b..32bdf43 100644
--- a/vendor/golang.org/x/net/proxy/per_host.go
+++ b/vendor/golang.org/x/net/proxy/per_host.go
@@ -7,6 +7,7 @@
 import (
 	"context"
 	"net"
+	"net/netip"
 	"strings"
 )
 
@@ -57,7 +58,8 @@
 }
 
 func (p *PerHost) dialerForRequest(host string) Dialer {
-	if ip := net.ParseIP(host); ip != nil {
+	if nip, err := netip.ParseAddr(host); err == nil {
+		ip := net.IP(nip.AsSlice())
 		for _, net := range p.bypassNetworks {
 			if net.Contains(ip) {
 				return p.bypass
@@ -108,8 +110,8 @@
 			}
 			continue
 		}
-		if ip := net.ParseIP(host); ip != nil {
-			p.AddIP(ip)
+		if nip, err := netip.ParseAddr(host); err == nil {
+			p.AddIP(net.IP(nip.AsSlice()))
 			continue
 		}
 		if strings.HasPrefix(host, "*.") {
diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go
index c646a69..3aaffdd 100644
--- a/vendor/golang.org/x/net/trace/events.go
+++ b/vendor/golang.org/x/net/trace/events.go
@@ -508,7 +508,7 @@
 	<tr class="first">
 		<td class="when">{{$el.When}}</td>
 		<td class="elapsed">{{$el.ElapsedTime}}</td>
-		<td>{{$el.Title}}
+		<td>{{$el.Title}}</td>
 	</tr>
 	{{if $.Expanded}}
 	<tr>
diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go
index 6e5c81a..3ea4703 100644
--- a/vendor/golang.org/x/sys/unix/affinity_linux.go
+++ b/vendor/golang.org/x/sys/unix/affinity_linux.go
@@ -38,8 +38,15 @@
 
 // Zero clears the set s, so that it contains no CPUs.
 func (s *CPUSet) Zero() {
+	clear(s[:])
+}
+
+// Fill adds all possible CPU bits to the set s. On Linux, [SchedSetaffinity]
+// will silently ignore any invalid CPU bits in [CPUSet] so this is an
+// efficient way of resetting the CPU affinity of a process.
+func (s *CPUSet) Fill() {
 	for i := range s {
-		s[i] = 0
+		s[i] = ^cpuMask(0)
 	}
 }
 
diff --git a/vendor/golang.org/x/sys/unix/auxv.go b/vendor/golang.org/x/sys/unix/auxv.go
new file mode 100644
index 0000000..37a8252
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/auxv.go
@@ -0,0 +1,36 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos)
+
+package unix
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+//go:linkname runtime_getAuxv runtime.getAuxv
+func runtime_getAuxv() []uintptr
+
+// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs.
+// The returned slice is always a fresh copy, owned by the caller.
+// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed,
+// which happens in some locked-down environments and build modes.
+func Auxv() ([][2]uintptr, error) {
+	vec := runtime_getAuxv()
+	vecLen := len(vec)
+
+	if vecLen == 0 {
+		return nil, syscall.ENOENT
+	}
+
+	if vecLen%2 != 0 {
+		return nil, syscall.EINVAL
+	}
+
+	result := make([]uintptr, vecLen)
+	copy(result, vec)
+	return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil
+}
diff --git a/vendor/golang.org/x/sys/unix/auxv_unsupported.go b/vendor/golang.org/x/sys/unix/auxv_unsupported.go
new file mode 100644
index 0000000..1200487
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/auxv_unsupported.go
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos)
+
+package unix
+
+import "syscall"
+
+func Auxv() ([][2]uintptr, error) {
+	return nil, syscall.ENOTSUP
+}
diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go
index 9e83d18..62ed126 100644
--- a/vendor/golang.org/x/sys/unix/fdset.go
+++ b/vendor/golang.org/x/sys/unix/fdset.go
@@ -23,7 +23,5 @@
 
 // Zero clears the set fds.
 func (fds *FdSet) Zero() {
-	for i := range fds.Bits {
-		fds.Bits[i] = 0
-	}
+	clear(fds.Bits[:])
 }
diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go
index 848840a..309f5a2 100644
--- a/vendor/golang.org/x/sys/unix/ifreq_linux.go
+++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go
@@ -111,9 +111,7 @@
 // clear zeroes the ifreq's union field to prevent trailing garbage data from
 // being sent to the kernel if an ifreq is reused.
 func (ifr *Ifreq) clear() {
-	for i := range ifr.raw.Ifru {
-		ifr.raw.Ifru[i] = 0
-	}
+	clear(ifr.raw.Ifru[:])
 }
 
 // TODO(mdlayher): export as IfreqData? For now we can provide helpers such as
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
index e6f31d3..d0ed611 100644
--- a/vendor/golang.org/x/sys/unix/mkall.sh
+++ b/vendor/golang.org/x/sys/unix/mkall.sh
@@ -49,6 +49,7 @@
 if [[ "$GOOS" = "linux" ]]; then
 	# Use the Docker-based build system
 	# Files generated through docker (use $cmd so you can Ctl-C the build or run)
+	set -e
 	$cmd docker build --tag generate:$GOOS $GOOS
 	$cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS
 	exit
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index 6ab02b6..d1c8b26 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -349,6 +349,9 @@
 #define _HIDIOCGRAWPHYS		HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN)
 #define _HIDIOCGRAWUNIQ		HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN)
 
+// Renamed in v6.16, commit c6d732c38f93 ("net: ethtool: remove duplicate defines for family info")
+#define ETHTOOL_FAMILY_NAME	ETHTOOL_GENL_NAME
+#define ETHTOOL_FAMILY_VERSION	ETHTOOL_GENL_VERSION
 '
 
 includes_NetBSD='
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index 099867d..7838ca5 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -602,6 +602,95 @@
 	return
 }
 
+const minIovec = 8
+
+func Readv(fd int, iovs [][]byte) (n int, err error) {
+	iovecs := make([]Iovec, 0, minIovec)
+	iovecs = appendBytes(iovecs, iovs)
+	n, err = readv(fd, iovecs)
+	readvRacedetect(iovecs, n, err)
+	return n, err
+}
+
+func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) {
+	iovecs := make([]Iovec, 0, minIovec)
+	iovecs = appendBytes(iovecs, iovs)
+	n, err = preadv(fd, iovecs, offset)
+	readvRacedetect(iovecs, n, err)
+	return n, err
+}
+
+func Writev(fd int, iovs [][]byte) (n int, err error) {
+	iovecs := make([]Iovec, 0, minIovec)
+	iovecs = appendBytes(iovecs, iovs)
+	if raceenabled {
+		raceReleaseMerge(unsafe.Pointer(&ioSync))
+	}
+	n, err = writev(fd, iovecs)
+	writevRacedetect(iovecs, n)
+	return n, err
+}
+
+func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) {
+	iovecs := make([]Iovec, 0, minIovec)
+	iovecs = appendBytes(iovecs, iovs)
+	if raceenabled {
+		raceReleaseMerge(unsafe.Pointer(&ioSync))
+	}
+	n, err = pwritev(fd, iovecs, offset)
+	writevRacedetect(iovecs, n)
+	return n, err
+}
+
+func appendBytes(vecs []Iovec, bs [][]byte) []Iovec {
+	for _, b := range bs {
+		var v Iovec
+		v.SetLen(len(b))
+		if len(b) > 0 {
+			v.Base = &b[0]
+		} else {
+			v.Base = (*byte)(unsafe.Pointer(&_zero))
+		}
+		vecs = append(vecs, v)
+	}
+	return vecs
+}
+
+func writevRacedetect(iovecs []Iovec, n int) {
+	if !raceenabled {
+		return
+	}
+	for i := 0; n > 0 && i < len(iovecs); i++ {
+		m := int(iovecs[i].Len)
+		if m > n {
+			m = n
+		}
+		n -= m
+		if m > 0 {
+			raceReadRange(unsafe.Pointer(iovecs[i].Base), m)
+		}
+	}
+}
+
+func readvRacedetect(iovecs []Iovec, n int, err error) {
+	if !raceenabled {
+		return
+	}
+	for i := 0; n > 0 && i < len(iovecs); i++ {
+		m := int(iovecs[i].Len)
+		if m > n {
+			m = n
+		}
+		n -= m
+		if m > 0 {
+			raceWriteRange(unsafe.Pointer(iovecs[i].Base), m)
+		}
+	}
+	if err == nil {
+		raceAcquire(unsafe.Pointer(&ioSync))
+	}
+}
+
 //sys	connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error)
 //sys	sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
 
@@ -705,3 +794,7 @@
 //sys	write(fd int, p []byte) (n int, err error)
 //sys	mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
 //sys	munmap(addr uintptr, length uintptr) (err error)
+//sys	readv(fd int, iovecs []Iovec) (n int, err error)
+//sys	preadv(fd int, iovecs []Iovec, offset int64) (n int, err error)
+//sys	writev(fd int, iovecs []Iovec) (n int, err error)
+//sys	pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 230a945..9439af9 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -13,6 +13,7 @@
 
 import (
 	"encoding/binary"
+	"slices"
 	"strconv"
 	"syscall"
 	"time"
@@ -417,7 +418,7 @@
 		return nil, 0, EINVAL
 	}
 	sa.raw.Family = AF_UNIX
-	for i := 0; i < n; i++ {
+	for i := range n {
 		sa.raw.Path[i] = int8(name[i])
 	}
 	// length is family (uint16), name, NUL.
@@ -507,7 +508,7 @@
 	psm := (*[2]byte)(unsafe.Pointer(&sa.raw.Psm))
 	psm[0] = byte(sa.PSM)
 	psm[1] = byte(sa.PSM >> 8)
-	for i := 0; i < len(sa.Addr); i++ {
+	for i := range len(sa.Addr) {
 		sa.raw.Bdaddr[i] = sa.Addr[len(sa.Addr)-1-i]
 	}
 	cid := (*[2]byte)(unsafe.Pointer(&sa.raw.Cid))
@@ -589,11 +590,11 @@
 	sa.raw.Family = AF_CAN
 	sa.raw.Ifindex = int32(sa.Ifindex)
 	rx := (*[4]byte)(unsafe.Pointer(&sa.RxID))
-	for i := 0; i < 4; i++ {
+	for i := range 4 {
 		sa.raw.Addr[i] = rx[i]
 	}
 	tx := (*[4]byte)(unsafe.Pointer(&sa.TxID))
-	for i := 0; i < 4; i++ {
+	for i := range 4 {
 		sa.raw.Addr[i+4] = tx[i]
 	}
 	return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil
@@ -618,11 +619,11 @@
 	sa.raw.Family = AF_CAN
 	sa.raw.Ifindex = int32(sa.Ifindex)
 	n := (*[8]byte)(unsafe.Pointer(&sa.Name))
-	for i := 0; i < 8; i++ {
+	for i := range 8 {
 		sa.raw.Addr[i] = n[i]
 	}
 	p := (*[4]byte)(unsafe.Pointer(&sa.PGN))
-	for i := 0; i < 4; i++ {
+	for i := range 4 {
 		sa.raw.Addr[i+8] = p[i]
 	}
 	sa.raw.Addr[12] = sa.Addr
@@ -800,9 +801,7 @@
 	// one. The kernel expects SID to be in network byte order.
 	binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID)
 	copy(sa.raw[8:14], sa.Remote)
-	for i := 14; i < 14+IFNAMSIZ; i++ {
-		sa.raw[i] = 0
-	}
+	clear(sa.raw[14 : 14+IFNAMSIZ])
 	copy(sa.raw[14:], sa.Dev)
 	return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil
 }
@@ -911,7 +910,7 @@
 	// These are EBCDIC encoded by the kernel, but we still need to pad them
 	// with blanks. Initializing with blanks allows the caller to feed in either
 	// a padded or an unpadded string.
-	for i := 0; i < 8; i++ {
+	for i := range 8 {
 		sa.raw.Nodeid[i] = ' '
 		sa.raw.User_id[i] = ' '
 		sa.raw.Name[i] = ' '
@@ -1148,7 +1147,7 @@
 		var user [8]byte
 		var name [8]byte
 
-		for i := 0; i < 8; i++ {
+		for i := range 8 {
 			user[i] = byte(pp.User_id[i])
 			name[i] = byte(pp.Name[i])
 		}
@@ -1173,11 +1172,11 @@
 				Ifindex: int(pp.Ifindex),
 			}
 			name := (*[8]byte)(unsafe.Pointer(&sa.Name))
-			for i := 0; i < 8; i++ {
+			for i := range 8 {
 				name[i] = pp.Addr[i]
 			}
 			pgn := (*[4]byte)(unsafe.Pointer(&sa.PGN))
-			for i := 0; i < 4; i++ {
+			for i := range 4 {
 				pgn[i] = pp.Addr[i+8]
 			}
 			addr := (*[1]byte)(unsafe.Pointer(&sa.Addr))
@@ -1188,11 +1187,11 @@
 				Ifindex: int(pp.Ifindex),
 			}
 			rx := (*[4]byte)(unsafe.Pointer(&sa.RxID))
-			for i := 0; i < 4; i++ {
+			for i := range 4 {
 				rx[i] = pp.Addr[i]
 			}
 			tx := (*[4]byte)(unsafe.Pointer(&sa.TxID))
-			for i := 0; i < 4; i++ {
+			for i := range 4 {
 				tx[i] = pp.Addr[i+4]
 			}
 			return sa, nil
@@ -2216,10 +2215,7 @@
 		return
 	}
 	for i := 0; n > 0 && i < len(iovecs); i++ {
-		m := int(iovecs[i].Len)
-		if m > n {
-			m = n
-		}
+		m := min(int(iovecs[i].Len), n)
 		n -= m
 		if m > 0 {
 			raceWriteRange(unsafe.Pointer(iovecs[i].Base), m)
@@ -2270,10 +2266,7 @@
 		return
 	}
 	for i := 0; n > 0 && i < len(iovecs); i++ {
-		m := int(iovecs[i].Len)
-		if m > n {
-			m = n
-		}
+		m := min(int(iovecs[i].Len), n)
 		n -= m
 		if m > 0 {
 			raceReadRange(unsafe.Pointer(iovecs[i].Base), m)
@@ -2320,12 +2313,7 @@
 		return false
 	}
 
-	for _, g := range groups {
-		if g == gid {
-			return true
-		}
-	}
-	return false
+	return slices.Contains(groups, gid)
 }
 
 func isCapDacOverrideSet() bool {
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
index 8816209..34a4676 100644
--- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
@@ -248,6 +248,23 @@
 	return Statvfs1(path, buf, ST_WAIT)
 }
 
+func Getvfsstat(buf []Statvfs_t, flags int) (n int, err error) {
+	var (
+		_p0     unsafe.Pointer
+		bufsize uintptr
+	)
+	if len(buf) > 0 {
+		_p0 = unsafe.Pointer(&buf[0])
+		bufsize = unsafe.Sizeof(Statvfs_t{}) * uintptr(len(buf))
+	}
+	r0, _, e1 := Syscall(SYS_GETVFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
+	n = int(r0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 /*
  * Exposed directly
  */
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
index 21974af..18a3d9b 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -629,7 +629,7 @@
 //sys	Kill(pid int, signum syscall.Signal) (err error)
 //sys	Lchown(path string, uid int, gid int) (err error)
 //sys	Link(path string, link string) (err error)
-//sys	Listen(s int, backlog int) (err error) = libsocket.__xnet_llisten
+//sys	Listen(s int, backlog int) (err error) = libsocket.__xnet_listen
 //sys	Lstat(path string, stat *Stat_t) (err error)
 //sys	Madvise(b []byte, advice int) (err error)
 //sys	Mkdir(path string, mode uint32) (err error)
@@ -1102,3 +1102,90 @@
 func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) {
 	return ioctlPtrRet(fd, req, unsafe.Pointer(s))
 }
+
+// Ucred Helpers
+// See ucred(3c) and getpeerucred(3c)
+
+//sys	getpeerucred(fd uintptr, ucred *uintptr) (err error)
+//sys	ucredFree(ucred uintptr) = ucred_free
+//sys	ucredGet(pid int) (ucred uintptr, err error) = ucred_get
+//sys	ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid
+//sys	ucredGetegid(ucred uintptr) (gid int) = ucred_getegid
+//sys	ucredGetruid(ucred uintptr) (uid int) = ucred_getruid
+//sys	ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid
+//sys	ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid
+//sys	ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid
+//sys	ucredGetpid(ucred uintptr) (pid int) = ucred_getpid
+
+// Ucred is an opaque struct that holds user credentials.
+type Ucred struct {
+	ucred uintptr
+}
+
+// We need to ensure that ucredFree is called on the underlying ucred
+// when the Ucred is garbage collected.
+func ucredFinalizer(u *Ucred) {
+	ucredFree(u.ucred)
+}
+
+func GetPeerUcred(fd uintptr) (*Ucred, error) {
+	var ucred uintptr
+	err := getpeerucred(fd, &ucred)
+	if err != nil {
+		return nil, err
+	}
+	result := &Ucred{
+		ucred: ucred,
+	}
+	// set the finalizer on the result so that the ucred will be freed
+	runtime.SetFinalizer(result, ucredFinalizer)
+	return result, nil
+}
+
+func UcredGet(pid int) (*Ucred, error) {
+	ucred, err := ucredGet(pid)
+	if err != nil {
+		return nil, err
+	}
+	result := &Ucred{
+		ucred: ucred,
+	}
+	// set the finalizer on the result so that the ucred will be freed
+	runtime.SetFinalizer(result, ucredFinalizer)
+	return result, nil
+}
+
+func (u *Ucred) Geteuid() int {
+	defer runtime.KeepAlive(u)
+	return ucredGeteuid(u.ucred)
+}
+
+func (u *Ucred) Getruid() int {
+	defer runtime.KeepAlive(u)
+	return ucredGetruid(u.ucred)
+}
+
+func (u *Ucred) Getsuid() int {
+	defer runtime.KeepAlive(u)
+	return ucredGetsuid(u.ucred)
+}
+
+func (u *Ucred) Getegid() int {
+	defer runtime.KeepAlive(u)
+	return ucredGetegid(u.ucred)
+}
+
+func (u *Ucred) Getrgid() int {
+	defer runtime.KeepAlive(u)
+	return ucredGetrgid(u.ucred)
+}
+
+func (u *Ucred) Getsgid() int {
+	defer runtime.KeepAlive(u)
+	return ucredGetsgid(u.ucred)
+}
+
+func (u *Ucred) Getpid() int {
+	defer runtime.KeepAlive(u)
+	return ucredGetpid(u.ucred)
+}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index 6ebc48b..b6db27d 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -319,6 +319,7 @@
 	AUDIT_INTEGRITY_POLICY_RULE                 = 0x70f
 	AUDIT_INTEGRITY_RULE                        = 0x70d
 	AUDIT_INTEGRITY_STATUS                      = 0x70a
+	AUDIT_INTEGRITY_USERSPACE                   = 0x710
 	AUDIT_IPC                                   = 0x517
 	AUDIT_IPC_SET_PERM                          = 0x51f
 	AUDIT_IPE_ACCESS                            = 0x58c
@@ -327,6 +328,8 @@
 	AUDIT_KERNEL                                = 0x7d0
 	AUDIT_KERNEL_OTHER                          = 0x524
 	AUDIT_KERN_MODULE                           = 0x532
+	AUDIT_LANDLOCK_ACCESS                       = 0x58f
+	AUDIT_LANDLOCK_DOMAIN                       = 0x590
 	AUDIT_LAST_FEATURE                          = 0x1
 	AUDIT_LAST_KERN_ANOM_MSG                    = 0x707
 	AUDIT_LAST_USER_MSG                         = 0x4af
@@ -491,6 +494,7 @@
 	BPF_F_BEFORE                                = 0x8
 	BPF_F_ID                                    = 0x20
 	BPF_F_NETFILTER_IP_DEFRAG                   = 0x1
+	BPF_F_PREORDER                              = 0x40
 	BPF_F_QUERY_EFFECTIVE                       = 0x1
 	BPF_F_REDIRECT_FLAGS                        = 0x19
 	BPF_F_REPLACE                               = 0x4
@@ -527,6 +531,7 @@
 	BPF_LDX                                     = 0x1
 	BPF_LEN                                     = 0x80
 	BPF_LL_OFF                                  = -0x200000
+	BPF_LOAD_ACQ                                = 0x100
 	BPF_LSH                                     = 0x60
 	BPF_MAJOR_VERSION                           = 0x1
 	BPF_MAXINSNS                                = 0x1000
@@ -554,6 +559,7 @@
 	BPF_RET                                     = 0x6
 	BPF_RSH                                     = 0x70
 	BPF_ST                                      = 0x2
+	BPF_STORE_REL                               = 0x110
 	BPF_STX                                     = 0x3
 	BPF_SUB                                     = 0x10
 	BPF_TAG_SIZE                                = 0x8
@@ -843,9 +849,9 @@
 	DM_UUID_FLAG                                = 0x4000
 	DM_UUID_LEN                                 = 0x81
 	DM_VERSION                                  = 0xc138fd00
-	DM_VERSION_EXTRA                            = "-ioctl (2023-03-01)"
+	DM_VERSION_EXTRA                            = "-ioctl (2025-04-28)"
 	DM_VERSION_MAJOR                            = 0x4
-	DM_VERSION_MINOR                            = 0x30
+	DM_VERSION_MINOR                            = 0x32
 	DM_VERSION_PATCHLEVEL                       = 0x0
 	DT_BLK                                      = 0x6
 	DT_CHR                                      = 0x2
@@ -936,11 +942,10 @@
 	EPOLL_CTL_MOD                               = 0x3
 	EPOLL_IOC_TYPE                              = 0x8a
 	EROFS_SUPER_MAGIC_V1                        = 0xe0f5e1e2
-	ESP_V4_FLOW                                 = 0xa
-	ESP_V6_FLOW                                 = 0xc
-	ETHER_FLOW                                  = 0x12
 	ETHTOOL_BUSINFO_LEN                         = 0x20
 	ETHTOOL_EROMVERS_LEN                        = 0x20
+	ETHTOOL_FAMILY_NAME                         = "ethtool"
+	ETHTOOL_FAMILY_VERSION                      = 0x1
 	ETHTOOL_FEC_AUTO                            = 0x2
 	ETHTOOL_FEC_BASER                           = 0x10
 	ETHTOOL_FEC_LLRS                            = 0x20
@@ -1203,13 +1208,18 @@
 	FAN_DENY                                    = 0x2
 	FAN_ENABLE_AUDIT                            = 0x40
 	FAN_EPIDFD                                  = -0x2
+	FAN_ERRNO_BITS                              = 0x8
+	FAN_ERRNO_MASK                              = 0xff
+	FAN_ERRNO_SHIFT                             = 0x18
 	FAN_EVENT_INFO_TYPE_DFID                    = 0x3
 	FAN_EVENT_INFO_TYPE_DFID_NAME               = 0x2
 	FAN_EVENT_INFO_TYPE_ERROR                   = 0x5
 	FAN_EVENT_INFO_TYPE_FID                     = 0x1
+	FAN_EVENT_INFO_TYPE_MNT                     = 0x7
 	FAN_EVENT_INFO_TYPE_NEW_DFID_NAME           = 0xc
 	FAN_EVENT_INFO_TYPE_OLD_DFID_NAME           = 0xa
 	FAN_EVENT_INFO_TYPE_PIDFD                   = 0x4
+	FAN_EVENT_INFO_TYPE_RANGE                   = 0x6
 	FAN_EVENT_METADATA_LEN                      = 0x18
 	FAN_EVENT_ON_CHILD                          = 0x8000000
 	FAN_FS_ERROR                                = 0x8000
@@ -1224,9 +1234,12 @@
 	FAN_MARK_IGNORED_SURV_MODIFY                = 0x40
 	FAN_MARK_IGNORE_SURV                        = 0x440
 	FAN_MARK_INODE                              = 0x0
+	FAN_MARK_MNTNS                              = 0x110
 	FAN_MARK_MOUNT                              = 0x10
 	FAN_MARK_ONLYDIR                            = 0x8
 	FAN_MARK_REMOVE                             = 0x2
+	FAN_MNT_ATTACH                              = 0x1000000
+	FAN_MNT_DETACH                              = 0x2000000
 	FAN_MODIFY                                  = 0x2
 	FAN_MOVE                                    = 0xc0
 	FAN_MOVED_FROM                              = 0x40
@@ -1240,12 +1253,15 @@
 	FAN_OPEN_EXEC                               = 0x1000
 	FAN_OPEN_EXEC_PERM                          = 0x40000
 	FAN_OPEN_PERM                               = 0x10000
+	FAN_PRE_ACCESS                              = 0x100000
 	FAN_Q_OVERFLOW                              = 0x4000
 	FAN_RENAME                                  = 0x10000000
 	FAN_REPORT_DFID_NAME                        = 0xc00
 	FAN_REPORT_DFID_NAME_TARGET                 = 0x1e00
 	FAN_REPORT_DIR_FID                          = 0x400
+	FAN_REPORT_FD_ERROR                         = 0x2000
 	FAN_REPORT_FID                              = 0x200
+	FAN_REPORT_MNT                              = 0x4000
 	FAN_REPORT_NAME                             = 0x800
 	FAN_REPORT_PIDFD                            = 0x80
 	FAN_REPORT_TARGET_FID                       = 0x1000
@@ -1265,6 +1281,7 @@
 	FIB_RULE_PERMANENT                          = 0x1
 	FIB_RULE_UNRESOLVED                         = 0x4
 	FIDEDUPERANGE                               = 0xc0189436
+	FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED             = 0x1
 	FSCRYPT_KEY_DESCRIPTOR_SIZE                 = 0x8
 	FSCRYPT_KEY_DESC_PREFIX                     = "fscrypt:"
 	FSCRYPT_KEY_DESC_PREFIX_SIZE                = 0x8
@@ -1330,8 +1347,10 @@
 	FUSE_SUPER_MAGIC                            = 0x65735546
 	FUTEXFS_SUPER_MAGIC                         = 0xbad1dea
 	F_ADD_SEALS                                 = 0x409
+	F_CREATED_QUERY                             = 0x404
 	F_DUPFD                                     = 0x0
 	F_DUPFD_CLOEXEC                             = 0x406
+	F_DUPFD_QUERY                               = 0x403
 	F_EXLCK                                     = 0x4
 	F_GETFD                                     = 0x1
 	F_GETFL                                     = 0x3
@@ -1551,6 +1570,7 @@
 	IPPROTO_ROUTING                             = 0x2b
 	IPPROTO_RSVP                                = 0x2e
 	IPPROTO_SCTP                                = 0x84
+	IPPROTO_SMC                                 = 0x100
 	IPPROTO_TCP                                 = 0x6
 	IPPROTO_TP                                  = 0x1d
 	IPPROTO_UDP                                 = 0x11
@@ -1570,7 +1590,6 @@
 	IPV6_DONTFRAG                               = 0x3e
 	IPV6_DROP_MEMBERSHIP                        = 0x15
 	IPV6_DSTOPTS                                = 0x3b
-	IPV6_FLOW                                   = 0x11
 	IPV6_FREEBIND                               = 0x4e
 	IPV6_HDRINCL                                = 0x24
 	IPV6_HOPLIMIT                               = 0x34
@@ -1621,8 +1640,9 @@
 	IPV6_TRANSPARENT                            = 0x4b
 	IPV6_UNICAST_HOPS                           = 0x10
 	IPV6_UNICAST_IF                             = 0x4c
-	IPV6_USER_FLOW                              = 0xe
 	IPV6_V6ONLY                                 = 0x1a
+	IPV6_VERSION                                = 0x60
+	IPV6_VERSION_MASK                           = 0xf0
 	IPV6_XFRM_POLICY                            = 0x23
 	IP_ADD_MEMBERSHIP                           = 0x23
 	IP_ADD_SOURCE_MEMBERSHIP                    = 0x27
@@ -1681,7 +1701,6 @@
 	IP_TTL                                      = 0x2
 	IP_UNBLOCK_SOURCE                           = 0x25
 	IP_UNICAST_IF                               = 0x32
-	IP_USER_FLOW                                = 0xd
 	IP_XFRM_POLICY                              = 0x11
 	ISOFS_SUPER_MAGIC                           = 0x9660
 	ISTRIP                                      = 0x20
@@ -1803,7 +1822,11 @@
 	LANDLOCK_ACCESS_FS_WRITE_FILE               = 0x2
 	LANDLOCK_ACCESS_NET_BIND_TCP                = 0x1
 	LANDLOCK_ACCESS_NET_CONNECT_TCP             = 0x2
+	LANDLOCK_CREATE_RULESET_ERRATA              = 0x2
 	LANDLOCK_CREATE_RULESET_VERSION             = 0x1
+	LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON      = 0x2
+	LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF    = 0x1
+	LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF   = 0x4
 	LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET         = 0x1
 	LANDLOCK_SCOPE_SIGNAL                       = 0x2
 	LINUX_REBOOT_CMD_CAD_OFF                    = 0x0
@@ -1867,6 +1890,7 @@
 	MADV_UNMERGEABLE                            = 0xd
 	MADV_WILLNEED                               = 0x3
 	MADV_WIPEONFORK                             = 0x12
+	MAP_DROPPABLE                               = 0x8
 	MAP_FILE                                    = 0x0
 	MAP_FIXED                                   = 0x10
 	MAP_FIXED_NOREPLACE                         = 0x100000
@@ -1967,6 +1991,7 @@
 	MSG_PEEK                                    = 0x2
 	MSG_PROXY                                   = 0x10
 	MSG_RST                                     = 0x1000
+	MSG_SOCK_DEVMEM                             = 0x2000000
 	MSG_SYN                                     = 0x400
 	MSG_TRUNC                                   = 0x20
 	MSG_TRYHARD                                 = 0x4
@@ -2083,6 +2108,7 @@
 	NFC_ATR_REQ_MAXSIZE                         = 0x40
 	NFC_ATR_RES_GB_MAXSIZE                      = 0x2f
 	NFC_ATR_RES_MAXSIZE                         = 0x40
+	NFC_ATS_MAXSIZE                             = 0x14
 	NFC_COMM_ACTIVE                             = 0x0
 	NFC_COMM_PASSIVE                            = 0x1
 	NFC_DEVICE_NAME_MAXSIZE                     = 0x8
@@ -2163,6 +2189,7 @@
 	NFNL_SUBSYS_QUEUE                           = 0x3
 	NFNL_SUBSYS_ULOG                            = 0x4
 	NFS_SUPER_MAGIC                             = 0x6969
+	NFT_BITWISE_BOOL                            = 0x0
 	NFT_CHAIN_FLAGS                             = 0x7
 	NFT_CHAIN_MAXNAMELEN                        = 0x100
 	NFT_CT_MAX                                  = 0x17
@@ -2475,6 +2502,10 @@
 	PR_FP_EXC_UND                               = 0x40000
 	PR_FP_MODE_FR                               = 0x1
 	PR_FP_MODE_FRE                              = 0x2
+	PR_FUTEX_HASH                               = 0x4e
+	PR_FUTEX_HASH_GET_IMMUTABLE                 = 0x3
+	PR_FUTEX_HASH_GET_SLOTS                     = 0x2
+	PR_FUTEX_HASH_SET_SLOTS                     = 0x1
 	PR_GET_AUXV                                 = 0x41555856
 	PR_GET_CHILD_SUBREAPER                      = 0x25
 	PR_GET_DUMPABLE                             = 0x3
@@ -2491,6 +2522,7 @@
 	PR_GET_PDEATHSIG                            = 0x2
 	PR_GET_SECCOMP                              = 0x15
 	PR_GET_SECUREBITS                           = 0x1b
+	PR_GET_SHADOW_STACK_STATUS                  = 0x4a
 	PR_GET_SPECULATION_CTRL                     = 0x34
 	PR_GET_TAGGED_ADDR_CTRL                     = 0x38
 	PR_GET_THP_DISABLE                          = 0x2a
@@ -2499,6 +2531,7 @@
 	PR_GET_TIMING                               = 0xd
 	PR_GET_TSC                                  = 0x19
 	PR_GET_UNALIGN                              = 0x5
+	PR_LOCK_SHADOW_STACK_STATUS                 = 0x4c
 	PR_MCE_KILL                                 = 0x21
 	PR_MCE_KILL_CLEAR                           = 0x0
 	PR_MCE_KILL_DEFAULT                         = 0x2
@@ -2525,6 +2558,8 @@
 	PR_PAC_GET_ENABLED_KEYS                     = 0x3d
 	PR_PAC_RESET_KEYS                           = 0x36
 	PR_PAC_SET_ENABLED_KEYS                     = 0x3c
+	PR_PMLEN_MASK                               = 0x7f000000
+	PR_PMLEN_SHIFT                              = 0x18
 	PR_PPC_DEXCR_CTRL_CLEAR                     = 0x4
 	PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC              = 0x10
 	PR_PPC_DEXCR_CTRL_EDITABLE                  = 0x1
@@ -2592,6 +2627,7 @@
 	PR_SET_PTRACER                              = 0x59616d61
 	PR_SET_SECCOMP                              = 0x16
 	PR_SET_SECUREBITS                           = 0x1c
+	PR_SET_SHADOW_STACK_STATUS                  = 0x4b
 	PR_SET_SPECULATION_CTRL                     = 0x35
 	PR_SET_SYSCALL_USER_DISPATCH                = 0x3b
 	PR_SET_TAGGED_ADDR_CTRL                     = 0x37
@@ -2602,6 +2638,9 @@
 	PR_SET_UNALIGN                              = 0x6
 	PR_SET_VMA                                  = 0x53564d41
 	PR_SET_VMA_ANON_NAME                        = 0x0
+	PR_SHADOW_STACK_ENABLE                      = 0x1
+	PR_SHADOW_STACK_PUSH                        = 0x4
+	PR_SHADOW_STACK_WRITE                       = 0x2
 	PR_SME_GET_VL                               = 0x40
 	PR_SME_SET_VL                               = 0x3f
 	PR_SME_SET_VL_ONEXEC                        = 0x40000
@@ -2626,6 +2665,10 @@
 	PR_TAGGED_ADDR_ENABLE                       = 0x1
 	PR_TASK_PERF_EVENTS_DISABLE                 = 0x1f
 	PR_TASK_PERF_EVENTS_ENABLE                  = 0x20
+	PR_TIMER_CREATE_RESTORE_IDS                 = 0x4d
+	PR_TIMER_CREATE_RESTORE_IDS_GET             = 0x2
+	PR_TIMER_CREATE_RESTORE_IDS_OFF             = 0x0
+	PR_TIMER_CREATE_RESTORE_IDS_ON              = 0x1
 	PR_TIMING_STATISTICAL                       = 0x0
 	PR_TIMING_TIMESTAMP                         = 0x1
 	PR_TSC_ENABLE                               = 0x1
@@ -2706,6 +2749,7 @@
 	PTRACE_SETREGSET                            = 0x4205
 	PTRACE_SETSIGINFO                           = 0x4203
 	PTRACE_SETSIGMASK                           = 0x420b
+	PTRACE_SET_SYSCALL_INFO                     = 0x4212
 	PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG     = 0x4210
 	PTRACE_SINGLESTEP                           = 0x9
 	PTRACE_SYSCALL                              = 0x18
@@ -2769,7 +2813,7 @@
 	RTAX_UNSPEC                                 = 0x0
 	RTAX_WINDOW                                 = 0x3
 	RTA_ALIGNTO                                 = 0x4
-	RTA_MAX                                     = 0x1e
+	RTA_MAX                                     = 0x1f
 	RTCF_DIRECTSRC                              = 0x4000000
 	RTCF_DOREDIRECT                             = 0x1000000
 	RTCF_LOG                                    = 0x2000000
@@ -2846,10 +2890,12 @@
 	RTM_DELACTION                               = 0x31
 	RTM_DELADDR                                 = 0x15
 	RTM_DELADDRLABEL                            = 0x49
+	RTM_DELANYCAST                              = 0x3d
 	RTM_DELCHAIN                                = 0x65
 	RTM_DELLINK                                 = 0x11
 	RTM_DELLINKPROP                             = 0x6d
 	RTM_DELMDB                                  = 0x55
+	RTM_DELMULTICAST                            = 0x39
 	RTM_DELNEIGH                                = 0x1d
 	RTM_DELNETCONF                              = 0x51
 	RTM_DELNEXTHOP                              = 0x69
@@ -2899,11 +2945,13 @@
 	RTM_NEWACTION                               = 0x30
 	RTM_NEWADDR                                 = 0x14
 	RTM_NEWADDRLABEL                            = 0x48
+	RTM_NEWANYCAST                              = 0x3c
 	RTM_NEWCACHEREPORT                          = 0x60
 	RTM_NEWCHAIN                                = 0x64
 	RTM_NEWLINK                                 = 0x10
 	RTM_NEWLINKPROP                             = 0x6c
 	RTM_NEWMDB                                  = 0x54
+	RTM_NEWMULTICAST                            = 0x38
 	RTM_NEWNDUSEROPT                            = 0x44
 	RTM_NEWNEIGH                                = 0x1c
 	RTM_NEWNEIGHTBL                             = 0x40
@@ -2911,7 +2959,6 @@
 	RTM_NEWNEXTHOP                              = 0x68
 	RTM_NEWNEXTHOPBUCKET                        = 0x74
 	RTM_NEWNSID                                 = 0x58
-	RTM_NEWNVLAN                                = 0x70
 	RTM_NEWPREFIX                               = 0x34
 	RTM_NEWQDISC                                = 0x24
 	RTM_NEWROUTE                                = 0x18
@@ -2920,6 +2967,7 @@
 	RTM_NEWTCLASS                               = 0x28
 	RTM_NEWTFILTER                              = 0x2c
 	RTM_NEWTUNNEL                               = 0x78
+	RTM_NEWVLAN                                 = 0x70
 	RTM_NR_FAMILIES                             = 0x1b
 	RTM_NR_MSGTYPES                             = 0x6c
 	RTM_SETDCB                                  = 0x4f
@@ -2952,6 +3000,7 @@
 	RTPROT_NTK                                  = 0xf
 	RTPROT_OPENR                                = 0x63
 	RTPROT_OSPF                                 = 0xbc
+	RTPROT_OVN                                  = 0x54
 	RTPROT_RA                                   = 0x9
 	RTPROT_REDIRECT                             = 0x1
 	RTPROT_RIP                                  = 0xbd
@@ -2969,11 +3018,12 @@
 	RUSAGE_THREAD                               = 0x1
 	RWF_APPEND                                  = 0x10
 	RWF_ATOMIC                                  = 0x40
+	RWF_DONTCACHE                               = 0x80
 	RWF_DSYNC                                   = 0x2
 	RWF_HIPRI                                   = 0x1
 	RWF_NOAPPEND                                = 0x20
 	RWF_NOWAIT                                  = 0x8
-	RWF_SUPPORTED                               = 0x7f
+	RWF_SUPPORTED                               = 0xff
 	RWF_SYNC                                    = 0x4
 	RWF_WRITE_LIFE_NOT_SET                      = 0x0
 	SCHED_BATCH                                 = 0x3
@@ -3253,6 +3303,7 @@
 	STATX_BTIME                                 = 0x800
 	STATX_CTIME                                 = 0x80
 	STATX_DIOALIGN                              = 0x2000
+	STATX_DIO_READ_ALIGN                        = 0x20000
 	STATX_GID                                   = 0x10
 	STATX_INO                                   = 0x100
 	STATX_MNT_ID                                = 0x1000
@@ -3304,7 +3355,7 @@
 	TASKSTATS_GENL_NAME                         = "TASKSTATS"
 	TASKSTATS_GENL_VERSION                      = 0x1
 	TASKSTATS_TYPE_MAX                          = 0x6
-	TASKSTATS_VERSION                           = 0xe
+	TASKSTATS_VERSION                           = 0x10
 	TCIFLUSH                                    = 0x0
 	TCIOFF                                      = 0x2
 	TCIOFLUSH                                   = 0x2
@@ -3374,8 +3425,6 @@
 	TCP_TX_DELAY                                = 0x25
 	TCP_ULP                                     = 0x1f
 	TCP_USER_TIMEOUT                            = 0x12
-	TCP_V4_FLOW                                 = 0x1
-	TCP_V6_FLOW                                 = 0x5
 	TCP_WINDOW_CLAMP                            = 0xa
 	TCP_ZEROCOPY_RECEIVE                        = 0x23
 	TFD_TIMER_ABSTIME                           = 0x1
@@ -3485,6 +3534,7 @@
 	TP_STATUS_WRONG_FORMAT                      = 0x4
 	TRACEFS_MAGIC                               = 0x74726163
 	TS_COMM_LEN                                 = 0x20
+	UBI_IOCECNFO                                = 0xc01c6f06
 	UDF_SUPER_MAGIC                             = 0x15013346
 	UDP_CORK                                    = 0x1
 	UDP_ENCAP                                   = 0x64
@@ -3497,8 +3547,6 @@
 	UDP_NO_CHECK6_RX                            = 0x66
 	UDP_NO_CHECK6_TX                            = 0x65
 	UDP_SEGMENT                                 = 0x67
-	UDP_V4_FLOW                                 = 0x2
-	UDP_V6_FLOW                                 = 0x6
 	UMOUNT_NOFOLLOW                             = 0x8
 	USBDEVICE_SUPER_MAGIC                       = 0x9fa2
 	UTIME_NOW                                   = 0x3fffffff
@@ -3541,7 +3589,7 @@
 	WDIOS_TEMPPANIC                             = 0x4
 	WDIOS_UNKNOWN                               = -0x1
 	WEXITED                                     = 0x4
-	WGALLOWEDIP_A_MAX                           = 0x3
+	WGALLOWEDIP_A_MAX                           = 0x4
 	WGDEVICE_A_MAX                              = 0x8
 	WGPEER_A_MAX                                = 0xa
 	WG_CMD_MAX                                  = 0x1
@@ -3655,6 +3703,7 @@
 	XDP_SHARED_UMEM                             = 0x1
 	XDP_STATISTICS                              = 0x7
 	XDP_TXMD_FLAGS_CHECKSUM                     = 0x2
+	XDP_TXMD_FLAGS_LAUNCH_TIME                  = 0x4
 	XDP_TXMD_FLAGS_TIMESTAMP                    = 0x1
 	XDP_TX_METADATA                             = 0x2
 	XDP_TX_RING                                 = 0x3
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index c0d45e3..1c37f9f 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -68,6 +68,7 @@
 	CS8                              = 0x30
 	CSIZE                            = 0x30
 	CSTOPB                           = 0x40
+	DM_MPATH_PROBE_PATHS             = 0xfd12
 	ECCGETLAYOUT                     = 0x81484d11
 	ECCGETSTATS                      = 0x80104d12
 	ECHOCTL                          = 0x200
@@ -116,6 +117,8 @@
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x800
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x7b9
+	IPV6_FLOWINFO_MASK               = 0xffffff0f
+	IPV6_FLOWLABEL_MASK              = 0xffff0f00
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -304,6 +307,7 @@
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x40182103
@@ -357,6 +361,7 @@
 	SO_OOBINLINE                     = 0xa
 	SO_PASSCRED                      = 0x10
 	SO_PASSPIDFD                     = 0x4c
+	SO_PASSRIGHTS                    = 0x53
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x11
@@ -369,6 +374,7 @@
 	SO_RCVBUFFORCE                   = 0x21
 	SO_RCVLOWAT                      = 0x12
 	SO_RCVMARK                       = 0x4b
+	SO_RCVPRIORITY                   = 0x52
 	SO_RCVTIMEO                      = 0x14
 	SO_RCVTIMEO_NEW                  = 0x42
 	SO_RCVTIMEO_OLD                  = 0x14
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index c731d24..6f54d34 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -68,6 +68,7 @@
 	CS8                              = 0x30
 	CSIZE                            = 0x30
 	CSTOPB                           = 0x40
+	DM_MPATH_PROBE_PATHS             = 0xfd12
 	ECCGETLAYOUT                     = 0x81484d11
 	ECCGETSTATS                      = 0x80104d12
 	ECHOCTL                          = 0x200
@@ -116,6 +117,8 @@
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x800
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x7b9
+	IPV6_FLOWINFO_MASK               = 0xffffff0f
+	IPV6_FLOWLABEL_MASK              = 0xffff0f00
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -305,6 +308,7 @@
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x40182103
@@ -358,6 +362,7 @@
 	SO_OOBINLINE                     = 0xa
 	SO_PASSCRED                      = 0x10
 	SO_PASSPIDFD                     = 0x4c
+	SO_PASSRIGHTS                    = 0x53
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x11
@@ -370,6 +375,7 @@
 	SO_RCVBUFFORCE                   = 0x21
 	SO_RCVLOWAT                      = 0x12
 	SO_RCVMARK                       = 0x4b
+	SO_RCVPRIORITY                   = 0x52
 	SO_RCVTIMEO                      = 0x14
 	SO_RCVTIMEO_NEW                  = 0x42
 	SO_RCVTIMEO_OLD                  = 0x14
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index 680018a..783ec5c 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -68,6 +68,7 @@
 	CS8                              = 0x30
 	CSIZE                            = 0x30
 	CSTOPB                           = 0x40
+	DM_MPATH_PROBE_PATHS             = 0xfd12
 	ECCGETLAYOUT                     = 0x81484d11
 	ECCGETSTATS                      = 0x80104d12
 	ECHOCTL                          = 0x200
@@ -115,6 +116,8 @@
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x800
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x7b9
+	IPV6_FLOWINFO_MASK               = 0xffffff0f
+	IPV6_FLOWLABEL_MASK              = 0xffff0f00
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -310,6 +313,7 @@
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x40182103
@@ -363,6 +367,7 @@
 	SO_OOBINLINE                     = 0xa
 	SO_PASSCRED                      = 0x10
 	SO_PASSPIDFD                     = 0x4c
+	SO_PASSRIGHTS                    = 0x53
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x11
@@ -375,6 +380,7 @@
 	SO_RCVBUFFORCE                   = 0x21
 	SO_RCVLOWAT                      = 0x12
 	SO_RCVMARK                       = 0x4b
+	SO_RCVPRIORITY                   = 0x52
 	SO_RCVTIMEO                      = 0x14
 	SO_RCVTIMEO_NEW                  = 0x42
 	SO_RCVTIMEO_OLD                  = 0x14
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index a63909f..ca83d3b 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -68,6 +68,7 @@
 	CS8                              = 0x30
 	CSIZE                            = 0x30
 	CSTOPB                           = 0x40
+	DM_MPATH_PROBE_PATHS             = 0xfd12
 	ECCGETLAYOUT                     = 0x81484d11
 	ECCGETSTATS                      = 0x80104d12
 	ECHOCTL                          = 0x200
@@ -109,6 +110,7 @@
 	F_SETOWN                         = 0x8
 	F_UNLCK                          = 0x2
 	F_WRLCK                          = 0x1
+	GCS_MAGIC                        = 0x47435300
 	HIDIOCGRAWINFO                   = 0x80084803
 	HIDIOCGRDESC                     = 0x90044802
 	HIDIOCGRDESCSIZE                 = 0x80044801
@@ -119,6 +121,8 @@
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x800
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x7b9
+	IPV6_FLOWINFO_MASK               = 0xffffff0f
+	IPV6_FLOWLABEL_MASK              = 0xffff0f00
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -302,6 +306,7 @@
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x40182103
@@ -355,6 +360,7 @@
 	SO_OOBINLINE                     = 0xa
 	SO_PASSCRED                      = 0x10
 	SO_PASSPIDFD                     = 0x4c
+	SO_PASSRIGHTS                    = 0x53
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x11
@@ -367,6 +373,7 @@
 	SO_RCVBUFFORCE                   = 0x21
 	SO_RCVLOWAT                      = 0x12
 	SO_RCVMARK                       = 0x4b
+	SO_RCVPRIORITY                   = 0x52
 	SO_RCVTIMEO                      = 0x14
 	SO_RCVTIMEO_NEW                  = 0x42
 	SO_RCVTIMEO_OLD                  = 0x14
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
index 9b0a257..607e611 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -68,6 +68,7 @@
 	CS8                              = 0x30
 	CSIZE                            = 0x30
 	CSTOPB                           = 0x40
+	DM_MPATH_PROBE_PATHS             = 0xfd12
 	ECCGETLAYOUT                     = 0x81484d11
 	ECCGETSTATS                      = 0x80104d12
 	ECHOCTL                          = 0x200
@@ -116,6 +117,8 @@
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x800
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x7b9
+	IPV6_FLOWINFO_MASK               = 0xffffff0f
+	IPV6_FLOWLABEL_MASK              = 0xffff0f00
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -297,6 +300,7 @@
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x40182103
@@ -350,6 +354,7 @@
 	SO_OOBINLINE                     = 0xa
 	SO_PASSCRED                      = 0x10
 	SO_PASSPIDFD                     = 0x4c
+	SO_PASSRIGHTS                    = 0x53
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x11
@@ -362,6 +367,7 @@
 	SO_RCVBUFFORCE                   = 0x21
 	SO_RCVLOWAT                      = 0x12
 	SO_RCVMARK                       = 0x4b
+	SO_RCVPRIORITY                   = 0x52
 	SO_RCVTIMEO                      = 0x14
 	SO_RCVTIMEO_NEW                  = 0x42
 	SO_RCVTIMEO_OLD                  = 0x14
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index 958e6e0..b9cb5bd 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -68,6 +68,7 @@
 	CS8                              = 0x30
 	CSIZE                            = 0x30
 	CSTOPB                           = 0x40
+	DM_MPATH_PROBE_PATHS             = 0x2000fd12
 	ECCGETLAYOUT                     = 0x41484d11
 	ECCGETSTATS                      = 0x40104d12
 	ECHOCTL                          = 0x200
@@ -115,6 +116,8 @@
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x80
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x200007b9
+	IPV6_FLOWINFO_MASK               = 0xfffffff
+	IPV6_FLOWLABEL_MASK              = 0xfffff
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -303,6 +306,7 @@
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x80182103
@@ -356,6 +360,7 @@
 	SO_OOBINLINE                     = 0x100
 	SO_PASSCRED                      = 0x11
 	SO_PASSPIDFD                     = 0x4c
+	SO_PASSRIGHTS                    = 0x53
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x12
@@ -368,6 +373,7 @@
 	SO_RCVBUFFORCE                   = 0x21
 	SO_RCVLOWAT                      = 0x1004
 	SO_RCVMARK                       = 0x4b
+	SO_RCVPRIORITY                   = 0x52
 	SO_RCVTIMEO                      = 0x1006
 	SO_RCVTIMEO_NEW                  = 0x42
 	SO_RCVTIMEO_OLD                  = 0x1006
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index 50c7f25..65b078a 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -68,6 +68,7 @@
 	CS8                              = 0x30
 	CSIZE                            = 0x30
 	CSTOPB                           = 0x40
+	DM_MPATH_PROBE_PATHS             = 0x2000fd12
 	ECCGETLAYOUT                     = 0x41484d11
 	ECCGETSTATS                      = 0x40104d12
 	ECHOCTL                          = 0x200
@@ -115,6 +116,8 @@
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x80
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x200007b9
+	IPV6_FLOWINFO_MASK               = 0xfffffff
+	IPV6_FLOWLABEL_MASK              = 0xfffff
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -303,6 +306,7 @@
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x80182103
@@ -356,6 +360,7 @@
 	SO_OOBINLINE                     = 0x100
 	SO_PASSCRED                      = 0x11
 	SO_PASSPIDFD                     = 0x4c
+	SO_PASSRIGHTS                    = 0x53
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x12
@@ -368,6 +373,7 @@
 	SO_RCVBUFFORCE                   = 0x21
 	SO_RCVLOWAT                      = 0x1004
 	SO_RCVMARK                       = 0x4b
+	SO_RCVPRIORITY                   = 0x52
 	SO_RCVTIMEO                      = 0x1006
 	SO_RCVTIMEO_NEW                  = 0x42
 	SO_RCVTIMEO_OLD                  = 0x1006
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index ced21d6..5298a30 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -68,6 +68,7 @@
 	CS8                              = 0x30
 	CSIZE                            = 0x30
 	CSTOPB                           = 0x40
+	DM_MPATH_PROBE_PATHS             = 0x2000fd12
 	ECCGETLAYOUT                     = 0x41484d11
 	ECCGETSTATS                      = 0x40104d12
 	ECHOCTL                          = 0x200
@@ -115,6 +116,8 @@
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x80
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x200007b9
+	IPV6_FLOWINFO_MASK               = 0xffffff0f
+	IPV6_FLOWLABEL_MASK              = 0xffff0f00
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -303,6 +306,7 @@
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x80182103
@@ -356,6 +360,7 @@
 	SO_OOBINLINE                     = 0x100
 	SO_PASSCRED                      = 0x11
 	SO_PASSPIDFD                     = 0x4c
+	SO_PASSRIGHTS                    = 0x53
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x12
@@ -368,6 +373,7 @@
 	SO_RCVBUFFORCE                   = 0x21
 	SO_RCVLOWAT                      = 0x1004
 	SO_RCVMARK                       = 0x4b
+	SO_RCVPRIORITY                   = 0x52
 	SO_RCVTIMEO                      = 0x1006
 	SO_RCVTIMEO_NEW                  = 0x42
 	SO_RCVTIMEO_OLD                  = 0x1006
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 226c044..7bc557c 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -68,6 +68,7 @@
 	CS8                              = 0x30
 	CSIZE                            = 0x30
 	CSTOPB                           = 0x40
+	DM_MPATH_PROBE_PATHS             = 0x2000fd12
 	ECCGETLAYOUT                     = 0x41484d11
 	ECCGETSTATS                      = 0x40104d12
 	ECHOCTL                          = 0x200
@@ -115,6 +116,8 @@
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x80
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x200007b9
+	IPV6_FLOWINFO_MASK               = 0xffffff0f
+	IPV6_FLOWLABEL_MASK              = 0xffff0f00
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -303,6 +306,7 @@
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x80182103
@@ -356,6 +360,7 @@
 	SO_OOBINLINE                     = 0x100
 	SO_PASSCRED                      = 0x11
 	SO_PASSPIDFD                     = 0x4c
+	SO_PASSRIGHTS                    = 0x53
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x12
@@ -368,6 +373,7 @@
 	SO_RCVBUFFORCE                   = 0x21
 	SO_RCVLOWAT                      = 0x1004
 	SO_RCVMARK                       = 0x4b
+	SO_RCVPRIORITY                   = 0x52
 	SO_RCVTIMEO                      = 0x1006
 	SO_RCVTIMEO_NEW                  = 0x42
 	SO_RCVTIMEO_OLD                  = 0x1006
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
index 3122737..152399b 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
@@ -68,6 +68,7 @@
 	CS8                              = 0x300
 	CSIZE                            = 0x300
 	CSTOPB                           = 0x400
+	DM_MPATH_PROBE_PATHS             = 0x2000fd12
 	ECCGETLAYOUT                     = 0x41484d11
 	ECCGETSTATS                      = 0x40104d12
 	ECHOCTL                          = 0x40
@@ -115,6 +116,8 @@
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x800
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x200007b9
+	IPV6_FLOWINFO_MASK               = 0xfffffff
+	IPV6_FLOWLABEL_MASK              = 0xfffff
 	ISIG                             = 0x80
 	IUCLC                            = 0x1000
 	IXOFF                            = 0x400
@@ -358,6 +361,7 @@
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x80182103
@@ -411,6 +415,7 @@
 	SO_OOBINLINE                     = 0xa
 	SO_PASSCRED                      = 0x14
 	SO_PASSPIDFD                     = 0x4c
+	SO_PASSRIGHTS                    = 0x53
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x15
@@ -423,6 +428,7 @@
 	SO_RCVBUFFORCE                   = 0x21
 	SO_RCVLOWAT                      = 0x10
 	SO_RCVMARK                       = 0x4b
+	SO_RCVPRIORITY                   = 0x52
 	SO_RCVTIMEO                      = 0x12
 	SO_RCVTIMEO_NEW                  = 0x42
 	SO_RCVTIMEO_OLD                  = 0x12
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index eb5d346..1a1ce24 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -68,6 +68,7 @@
 	CS8                              = 0x300
 	CSIZE                            = 0x300
 	CSTOPB                           = 0x400
+	DM_MPATH_PROBE_PATHS             = 0x2000fd12
 	ECCGETLAYOUT                     = 0x41484d11
 	ECCGETSTATS                      = 0x40104d12
 	ECHOCTL                          = 0x40
@@ -115,6 +116,8 @@
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x800
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x200007b9
+	IPV6_FLOWINFO_MASK               = 0xfffffff
+	IPV6_FLOWLABEL_MASK              = 0xfffff
 	ISIG                             = 0x80
 	IUCLC                            = 0x1000
 	IXOFF                            = 0x400
@@ -362,6 +365,7 @@
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x80182103
@@ -415,6 +419,7 @@
 	SO_OOBINLINE                     = 0xa
 	SO_PASSCRED                      = 0x14
 	SO_PASSPIDFD                     = 0x4c
+	SO_PASSRIGHTS                    = 0x53
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x15
@@ -427,6 +432,7 @@
 	SO_RCVBUFFORCE                   = 0x21
 	SO_RCVLOWAT                      = 0x10
 	SO_RCVMARK                       = 0x4b
+	SO_RCVPRIORITY                   = 0x52
 	SO_RCVTIMEO                      = 0x12
 	SO_RCVTIMEO_NEW                  = 0x42
 	SO_RCVTIMEO_OLD                  = 0x12
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index e921ebc..4231a1f 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -68,6 +68,7 @@
 	CS8                              = 0x300
 	CSIZE                            = 0x300
 	CSTOPB                           = 0x400
+	DM_MPATH_PROBE_PATHS             = 0x2000fd12
 	ECCGETLAYOUT                     = 0x41484d11
 	ECCGETSTATS                      = 0x40104d12
 	ECHOCTL                          = 0x40
@@ -115,6 +116,8 @@
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x800
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x200007b9
+	IPV6_FLOWINFO_MASK               = 0xffffff0f
+	IPV6_FLOWLABEL_MASK              = 0xffff0f00
 	ISIG                             = 0x80
 	IUCLC                            = 0x1000
 	IXOFF                            = 0x400
@@ -362,6 +365,7 @@
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x80182103
@@ -415,6 +419,7 @@
 	SO_OOBINLINE                     = 0xa
 	SO_PASSCRED                      = 0x14
 	SO_PASSPIDFD                     = 0x4c
+	SO_PASSRIGHTS                    = 0x53
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x15
@@ -427,6 +432,7 @@
 	SO_RCVBUFFORCE                   = 0x21
 	SO_RCVLOWAT                      = 0x10
 	SO_RCVMARK                       = 0x4b
+	SO_RCVPRIORITY                   = 0x52
 	SO_RCVTIMEO                      = 0x12
 	SO_RCVTIMEO_NEW                  = 0x42
 	SO_RCVTIMEO_OLD                  = 0x12
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index 38ba81c..21c0e95 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -68,6 +68,7 @@
 	CS8                              = 0x30
 	CSIZE                            = 0x30
 	CSTOPB                           = 0x40
+	DM_MPATH_PROBE_PATHS             = 0xfd12
 	ECCGETLAYOUT                     = 0x81484d11
 	ECCGETSTATS                      = 0x80104d12
 	ECHOCTL                          = 0x200
@@ -115,6 +116,8 @@
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x800
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x7b9
+	IPV6_FLOWINFO_MASK               = 0xffffff0f
+	IPV6_FLOWLABEL_MASK              = 0xffff0f00
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -294,6 +297,7 @@
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x40182103
@@ -347,6 +351,7 @@
 	SO_OOBINLINE                     = 0xa
 	SO_PASSCRED                      = 0x10
 	SO_PASSPIDFD                     = 0x4c
+	SO_PASSRIGHTS                    = 0x53
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x11
@@ -359,6 +364,7 @@
 	SO_RCVBUFFORCE                   = 0x21
 	SO_RCVLOWAT                      = 0x12
 	SO_RCVMARK                       = 0x4b
+	SO_RCVPRIORITY                   = 0x52
 	SO_RCVTIMEO                      = 0x14
 	SO_RCVTIMEO_NEW                  = 0x42
 	SO_RCVTIMEO_OLD                  = 0x14
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index 71f0400..f00d1cd 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -68,6 +68,7 @@
 	CS8                              = 0x30
 	CSIZE                            = 0x30
 	CSTOPB                           = 0x40
+	DM_MPATH_PROBE_PATHS             = 0xfd12
 	ECCGETLAYOUT                     = 0x81484d11
 	ECCGETSTATS                      = 0x80104d12
 	ECHOCTL                          = 0x200
@@ -115,6 +116,8 @@
 	IN_CLOEXEC                       = 0x80000
 	IN_NONBLOCK                      = 0x800
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x7b9
+	IPV6_FLOWINFO_MASK               = 0xfffffff
+	IPV6_FLOWLABEL_MASK              = 0xfffff
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -366,6 +369,7 @@
 	SCM_TIMESTAMPING_OPT_STATS       = 0x36
 	SCM_TIMESTAMPING_PKTINFO         = 0x3a
 	SCM_TIMESTAMPNS                  = 0x23
+	SCM_TS_OPT_ID                    = 0x51
 	SCM_TXTIME                       = 0x3d
 	SCM_WIFI_STATUS                  = 0x29
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x40182103
@@ -419,6 +423,7 @@
 	SO_OOBINLINE                     = 0xa
 	SO_PASSCRED                      = 0x10
 	SO_PASSPIDFD                     = 0x4c
+	SO_PASSRIGHTS                    = 0x53
 	SO_PASSSEC                       = 0x22
 	SO_PEEK_OFF                      = 0x2a
 	SO_PEERCRED                      = 0x11
@@ -431,6 +436,7 @@
 	SO_RCVBUFFORCE                   = 0x21
 	SO_RCVLOWAT                      = 0x12
 	SO_RCVMARK                       = 0x4b
+	SO_RCVPRIORITY                   = 0x52
 	SO_RCVTIMEO                      = 0x14
 	SO_RCVTIMEO_NEW                  = 0x42
 	SO_RCVTIMEO_OLD                  = 0x14
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index c44a313..bc8d539 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -71,6 +71,7 @@
 	CS8                              = 0x30
 	CSIZE                            = 0x30
 	CSTOPB                           = 0x40
+	DM_MPATH_PROBE_PATHS             = 0x2000fd12
 	ECCGETLAYOUT                     = 0x41484d11
 	ECCGETSTATS                      = 0x40104d12
 	ECHOCTL                          = 0x200
@@ -119,6 +120,8 @@
 	IN_CLOEXEC                       = 0x400000
 	IN_NONBLOCK                      = 0x4000
 	IOCTL_VM_SOCKETS_GET_LOCAL_CID   = 0x200007b9
+	IPV6_FLOWINFO_MASK               = 0xfffffff
+	IPV6_FLOWLABEL_MASK              = 0xfffff
 	ISIG                             = 0x1
 	IUCLC                            = 0x200
 	IXOFF                            = 0x1000
@@ -357,6 +360,7 @@
 	SCM_TIMESTAMPING_OPT_STATS       = 0x38
 	SCM_TIMESTAMPING_PKTINFO         = 0x3c
 	SCM_TIMESTAMPNS                  = 0x21
+	SCM_TS_OPT_ID                    = 0x5a
 	SCM_TXTIME                       = 0x3f
 	SCM_WIFI_STATUS                  = 0x25
 	SECCOMP_IOCTL_NOTIF_ADDFD        = 0x80182103
@@ -458,6 +462,7 @@
 	SO_OOBINLINE                     = 0x100
 	SO_PASSCRED                      = 0x2
 	SO_PASSPIDFD                     = 0x55
+	SO_PASSRIGHTS                    = 0x5c
 	SO_PASSSEC                       = 0x1f
 	SO_PEEK_OFF                      = 0x26
 	SO_PEERCRED                      = 0x40
@@ -470,6 +475,7 @@
 	SO_RCVBUFFORCE                   = 0x100b
 	SO_RCVLOWAT                      = 0x800
 	SO_RCVMARK                       = 0x54
+	SO_RCVPRIORITY                   = 0x5b
 	SO_RCVTIMEO                      = 0x2000
 	SO_RCVTIMEO_NEW                  = 0x44
 	SO_RCVTIMEO_OLD                  = 0x2000
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
index 24b346e..813c05b 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
@@ -2512,6 +2512,90 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func readv(fd int, iovecs []Iovec) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(iovecs) > 0 {
+		_p0 = unsafe.Pointer(&iovecs[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_readv_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(iovecs) > 0 {
+		_p0 = unsafe.Pointer(&iovecs[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_preadv_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writev(fd int, iovecs []Iovec) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(iovecs) > 0 {
+		_p0 = unsafe.Pointer(&iovecs[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_writev_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(iovecs) > 0 {
+		_p0 = unsafe.Pointer(&iovecs[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_pwritev_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fstat(fd int, stat *Stat_t) (err error) {
 	_, _, e1 := syscall_syscall(libc_fstat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
index ebd2131..fda3285 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
@@ -738,6 +738,26 @@
 GLOBL	·libc_munmap_trampoline_addr(SB), RODATA, $8
 DATA	·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
 
+TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_readv(SB)
+GLOBL	·libc_readv_trampoline_addr(SB), RODATA, $8
+DATA	·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB)
+
+TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_preadv(SB)
+GLOBL	·libc_preadv_trampoline_addr(SB), RODATA, $8
+DATA	·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB)
+
+TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_writev(SB)
+GLOBL	·libc_writev_trampoline_addr(SB), RODATA, $8
+DATA	·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB)
+
+TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_pwritev(SB)
+GLOBL	·libc_pwritev_trampoline_addr(SB), RODATA, $8
+DATA	·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB)
+
 TEXT libc_fstat64_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fstat64(SB)
 GLOBL	·libc_fstat64_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
index 824b9c2..e6f58f3 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
@@ -2512,6 +2512,90 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func readv(fd int, iovecs []Iovec) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(iovecs) > 0 {
+		_p0 = unsafe.Pointer(&iovecs[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_readv_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(iovecs) > 0 {
+		_p0 = unsafe.Pointer(&iovecs[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_preadv_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writev(fd int, iovecs []Iovec) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(iovecs) > 0 {
+		_p0 = unsafe.Pointer(&iovecs[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_writev_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) {
+	var _p0 unsafe.Pointer
+	if len(iovecs) > 0 {
+		_p0 = unsafe.Pointer(&iovecs[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+var libc_pwritev_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Fstat(fd int, stat *Stat_t) (err error) {
 	_, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
index 4f178a2..7f8998b 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
@@ -738,6 +738,26 @@
 GLOBL	·libc_munmap_trampoline_addr(SB), RODATA, $8
 DATA	·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
 
+TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_readv(SB)
+GLOBL	·libc_readv_trampoline_addr(SB), RODATA, $8
+DATA	·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB)
+
+TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_preadv(SB)
+GLOBL	·libc_preadv_trampoline_addr(SB), RODATA, $8
+DATA	·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB)
+
+TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_writev(SB)
+GLOBL	·libc_writev_trampoline_addr(SB), RODATA, $8
+DATA	·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB)
+
+TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0
+	JMP	libc_pwritev(SB)
+GLOBL	·libc_pwritev_trampoline_addr(SB), RODATA, $8
+DATA	·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB)
+
 TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0
 	JMP	libc_fstat(SB)
 GLOBL	·libc_fstat_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
index 829b87f..b4609c2 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
@@ -72,7 +72,7 @@
 //go:cgo_import_dynamic libc_kill kill "libc.so"
 //go:cgo_import_dynamic libc_lchown lchown "libc.so"
 //go:cgo_import_dynamic libc_link link "libc.so"
-//go:cgo_import_dynamic libc___xnet_llisten __xnet_llisten "libsocket.so"
+//go:cgo_import_dynamic libc___xnet_listen __xnet_listen "libsocket.so"
 //go:cgo_import_dynamic libc_lstat lstat "libc.so"
 //go:cgo_import_dynamic libc_madvise madvise "libc.so"
 //go:cgo_import_dynamic libc_mkdir mkdir "libc.so"
@@ -141,6 +141,16 @@
 //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so"
 //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so"
 //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so"
+//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so"
+//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so"
+//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so"
+//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so"
 //go:cgo_import_dynamic libc_port_create port_create "libc.so"
 //go:cgo_import_dynamic libc_port_associate port_associate "libc.so"
 //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so"
@@ -211,7 +221,7 @@
 //go:linkname procKill libc_kill
 //go:linkname procLchown libc_lchown
 //go:linkname procLink libc_link
-//go:linkname proc__xnet_llisten libc___xnet_llisten
+//go:linkname proc__xnet_listen libc___xnet_listen
 //go:linkname procLstat libc_lstat
 //go:linkname procMadvise libc_madvise
 //go:linkname procMkdir libc_mkdir
@@ -280,6 +290,16 @@
 //go:linkname procgetpeername libc_getpeername
 //go:linkname procsetsockopt libc_setsockopt
 //go:linkname procrecvfrom libc_recvfrom
+//go:linkname procgetpeerucred libc_getpeerucred
+//go:linkname procucred_get libc_ucred_get
+//go:linkname procucred_geteuid libc_ucred_geteuid
+//go:linkname procucred_getegid libc_ucred_getegid
+//go:linkname procucred_getruid libc_ucred_getruid
+//go:linkname procucred_getrgid libc_ucred_getrgid
+//go:linkname procucred_getsuid libc_ucred_getsuid
+//go:linkname procucred_getsgid libc_ucred_getsgid
+//go:linkname procucred_getpid libc_ucred_getpid
+//go:linkname procucred_free libc_ucred_free
 //go:linkname procport_create libc_port_create
 //go:linkname procport_associate libc_port_associate
 //go:linkname procport_dissociate libc_port_dissociate
@@ -351,7 +371,7 @@
 	procKill,
 	procLchown,
 	procLink,
-	proc__xnet_llisten,
+	proc__xnet_listen,
 	procLstat,
 	procMadvise,
 	procMkdir,
@@ -420,6 +440,16 @@
 	procgetpeername,
 	procsetsockopt,
 	procrecvfrom,
+	procgetpeerucred,
+	procucred_get,
+	procucred_geteuid,
+	procucred_getegid,
+	procucred_getruid,
+	procucred_getrgid,
+	procucred_getsuid,
+	procucred_getsgid,
+	procucred_getpid,
+	procucred_free,
 	procport_create,
 	procport_associate,
 	procport_dissociate,
@@ -1148,7 +1178,7 @@
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
 func Listen(s int, backlog int) (err error) {
-	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0)
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_listen)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0)
 	if e1 != 0 {
 		err = errnoErr(e1)
 	}
@@ -2029,6 +2059,90 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func getpeerucred(fd uintptr, ucred *uintptr) (err error) {
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGet(pid int) (ucred uintptr, err error) {
+	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0)
+	ucred = uintptr(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGeteuid(ucred uintptr) (uid int) {
+	r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+	uid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetegid(ucred uintptr) (gid int) {
+	r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+	gid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetruid(ucred uintptr) (uid int) {
+	r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+	uid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetrgid(ucred uintptr) (gid int) {
+	r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+	gid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetsuid(ucred uintptr) (uid int) {
+	r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+	uid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetsgid(ucred uintptr) (gid int) {
+	r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+	gid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetpid(ucred uintptr) (pid int) {
+	r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+	pid = int(r0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredFree(ucred uintptr) {
+	sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func port_create() (n int, err error) {
 	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0)
 	n = int(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
index 524b082..aca56ee 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
@@ -458,4 +458,9 @@
 	SYS_LSM_SET_SELF_ATTR            = 460
 	SYS_LSM_LIST_MODULES             = 461
 	SYS_MSEAL                        = 462
+	SYS_SETXATTRAT                   = 463
+	SYS_GETXATTRAT                   = 464
+	SYS_LISTXATTRAT                  = 465
+	SYS_REMOVEXATTRAT                = 466
+	SYS_OPEN_TREE_ATTR               = 467
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
index f485dbf..2ea1ef5 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
@@ -381,4 +381,9 @@
 	SYS_LSM_SET_SELF_ATTR       = 460
 	SYS_LSM_LIST_MODULES        = 461
 	SYS_MSEAL                   = 462
+	SYS_SETXATTRAT              = 463
+	SYS_GETXATTRAT              = 464
+	SYS_LISTXATTRAT             = 465
+	SYS_REMOVEXATTRAT           = 466
+	SYS_OPEN_TREE_ATTR          = 467
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
index 70b35bf..d22c8af 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
@@ -422,4 +422,9 @@
 	SYS_LSM_SET_SELF_ATTR            = 460
 	SYS_LSM_LIST_MODULES             = 461
 	SYS_MSEAL                        = 462
+	SYS_SETXATTRAT                   = 463
+	SYS_GETXATTRAT                   = 464
+	SYS_LISTXATTRAT                  = 465
+	SYS_REMOVEXATTRAT                = 466
+	SYS_OPEN_TREE_ATTR               = 467
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
index 1893e2f..5ee264a 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
@@ -325,4 +325,9 @@
 	SYS_LSM_SET_SELF_ATTR       = 460
 	SYS_LSM_LIST_MODULES        = 461
 	SYS_MSEAL                   = 462
+	SYS_SETXATTRAT              = 463
+	SYS_GETXATTRAT              = 464
+	SYS_LISTXATTRAT             = 465
+	SYS_REMOVEXATTRAT           = 466
+	SYS_OPEN_TREE_ATTR          = 467
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
index 16a4017..f9f03eb 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
@@ -321,4 +321,9 @@
 	SYS_LSM_SET_SELF_ATTR       = 460
 	SYS_LSM_LIST_MODULES        = 461
 	SYS_MSEAL                   = 462
+	SYS_SETXATTRAT              = 463
+	SYS_GETXATTRAT              = 464
+	SYS_LISTXATTRAT             = 465
+	SYS_REMOVEXATTRAT           = 466
+	SYS_OPEN_TREE_ATTR          = 467
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
index 7e567f1..87c2118 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
@@ -442,4 +442,9 @@
 	SYS_LSM_SET_SELF_ATTR            = 4460
 	SYS_LSM_LIST_MODULES             = 4461
 	SYS_MSEAL                        = 4462
+	SYS_SETXATTRAT                   = 4463
+	SYS_GETXATTRAT                   = 4464
+	SYS_LISTXATTRAT                  = 4465
+	SYS_REMOVEXATTRAT                = 4466
+	SYS_OPEN_TREE_ATTR               = 4467
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
index 38ae55e..391ad10 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
@@ -372,4 +372,9 @@
 	SYS_LSM_SET_SELF_ATTR       = 5460
 	SYS_LSM_LIST_MODULES        = 5461
 	SYS_MSEAL                   = 5462
+	SYS_SETXATTRAT              = 5463
+	SYS_GETXATTRAT              = 5464
+	SYS_LISTXATTRAT             = 5465
+	SYS_REMOVEXATTRAT           = 5466
+	SYS_OPEN_TREE_ATTR          = 5467
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
index 55e92e6..5656157 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
@@ -372,4 +372,9 @@
 	SYS_LSM_SET_SELF_ATTR       = 5460
 	SYS_LSM_LIST_MODULES        = 5461
 	SYS_MSEAL                   = 5462
+	SYS_SETXATTRAT              = 5463
+	SYS_GETXATTRAT              = 5464
+	SYS_LISTXATTRAT             = 5465
+	SYS_REMOVEXATTRAT           = 5466
+	SYS_OPEN_TREE_ATTR          = 5467
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
index 60658d6..0482b52 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
@@ -442,4 +442,9 @@
 	SYS_LSM_SET_SELF_ATTR            = 4460
 	SYS_LSM_LIST_MODULES             = 4461
 	SYS_MSEAL                        = 4462
+	SYS_SETXATTRAT                   = 4463
+	SYS_GETXATTRAT                   = 4464
+	SYS_LISTXATTRAT                  = 4465
+	SYS_REMOVEXATTRAT                = 4466
+	SYS_OPEN_TREE_ATTR               = 4467
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
index e203e8a..71806f0 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
@@ -449,4 +449,9 @@
 	SYS_LSM_SET_SELF_ATTR            = 460
 	SYS_LSM_LIST_MODULES             = 461
 	SYS_MSEAL                        = 462
+	SYS_SETXATTRAT                   = 463
+	SYS_GETXATTRAT                   = 464
+	SYS_LISTXATTRAT                  = 465
+	SYS_REMOVEXATTRAT                = 466
+	SYS_OPEN_TREE_ATTR               = 467
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
index 5944b97..e35a710 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
@@ -421,4 +421,9 @@
 	SYS_LSM_SET_SELF_ATTR       = 460
 	SYS_LSM_LIST_MODULES        = 461
 	SYS_MSEAL                   = 462
+	SYS_SETXATTRAT              = 463
+	SYS_GETXATTRAT              = 464
+	SYS_LISTXATTRAT             = 465
+	SYS_REMOVEXATTRAT           = 466
+	SYS_OPEN_TREE_ATTR          = 467
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
index c66d416..2aea476 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
@@ -421,4 +421,9 @@
 	SYS_LSM_SET_SELF_ATTR       = 460
 	SYS_LSM_LIST_MODULES        = 461
 	SYS_MSEAL                   = 462
+	SYS_SETXATTRAT              = 463
+	SYS_GETXATTRAT              = 464
+	SYS_LISTXATTRAT             = 465
+	SYS_REMOVEXATTRAT           = 466
+	SYS_OPEN_TREE_ATTR          = 467
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
index a5459e7..6c9bb4e 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
@@ -326,4 +326,9 @@
 	SYS_LSM_SET_SELF_ATTR       = 460
 	SYS_LSM_LIST_MODULES        = 461
 	SYS_MSEAL                   = 462
+	SYS_SETXATTRAT              = 463
+	SYS_GETXATTRAT              = 464
+	SYS_LISTXATTRAT             = 465
+	SYS_REMOVEXATTRAT           = 466
+	SYS_OPEN_TREE_ATTR          = 467
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
index 01d8682..680bc99 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
@@ -387,4 +387,9 @@
 	SYS_LSM_SET_SELF_ATTR       = 460
 	SYS_LSM_LIST_MODULES        = 461
 	SYS_MSEAL                   = 462
+	SYS_SETXATTRAT              = 463
+	SYS_GETXATTRAT              = 464
+	SYS_LISTXATTRAT             = 465
+	SYS_REMOVEXATTRAT           = 466
+	SYS_OPEN_TREE_ATTR          = 467
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
index 7b703e7..620f271 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
@@ -400,4 +400,9 @@
 	SYS_LSM_SET_SELF_ATTR       = 460
 	SYS_LSM_LIST_MODULES        = 461
 	SYS_MSEAL                   = 462
+	SYS_SETXATTRAT              = 463
+	SYS_GETXATTRAT              = 464
+	SYS_LISTXATTRAT             = 465
+	SYS_REMOVEXATTRAT           = 466
+	SYS_OPEN_TREE_ATTR          = 467
 )
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 5537148..944e75a 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -114,8 +114,10 @@
 	Atomic_write_unit_min     uint32
 	Atomic_write_unit_max     uint32
 	Atomic_write_segments_max uint32
+	Dio_read_offset_align     uint32
+	Atomic_write_unit_max_opt uint32
 	_                         [1]uint32
-	_                         [9]uint64
+	_                         [8]uint64
 }
 
 type Fsid struct {
@@ -199,7 +201,8 @@
 	Key_spec FscryptKeySpecifier
 	Raw_size uint32
 	Key_id   uint32
-	_        [8]uint32
+	Flags    uint32
+	_        [7]uint32
 }
 
 type FscryptRemoveKeyArg struct {
@@ -629,6 +632,8 @@
 	IFA_FLAGS          = 0x8
 	IFA_RT_PRIORITY    = 0x9
 	IFA_TARGET_NETNSID = 0xa
+	IFAL_LABEL         = 0x2
+	IFAL_ADDRESS       = 0x1
 	RT_SCOPE_UNIVERSE  = 0x0
 	RT_SCOPE_SITE      = 0xc8
 	RT_SCOPE_LINK      = 0xfd
@@ -686,6 +691,7 @@
 	SizeofRtAttr       = 0x4
 	SizeofIfInfomsg    = 0x10
 	SizeofIfAddrmsg    = 0x8
+	SizeofIfAddrlblmsg = 0xc
 	SizeofIfaCacheinfo = 0x10
 	SizeofRtMsg        = 0xc
 	SizeofRtNexthop    = 0x8
@@ -737,6 +743,15 @@
 	Index     uint32
 }
 
+type IfAddrlblmsg struct {
+	Family    uint8
+	_         uint8
+	Prefixlen uint8
+	Flags     uint8
+	Index     uint32
+	Seq       uint32
+}
+
 type IfaCacheinfo struct {
 	Prefered uint32
 	Valid    uint32
@@ -2226,8 +2241,11 @@
 	NFT_PAYLOAD_LL_HEADER             = 0x0
 	NFT_PAYLOAD_NETWORK_HEADER        = 0x1
 	NFT_PAYLOAD_TRANSPORT_HEADER      = 0x2
+	NFT_PAYLOAD_INNER_HEADER          = 0x3
+	NFT_PAYLOAD_TUN_HEADER            = 0x4
 	NFT_PAYLOAD_CSUM_NONE             = 0x0
 	NFT_PAYLOAD_CSUM_INET             = 0x1
+	NFT_PAYLOAD_CSUM_SCTP             = 0x2
 	NFT_PAYLOAD_L4CSUM_PSEUDOHDR      = 0x1
 	NFTA_PAYLOAD_UNSPEC               = 0x0
 	NFTA_PAYLOAD_DREG                 = 0x1
@@ -2314,6 +2332,11 @@
 	NFT_CT_AVGPKT                     = 0x10
 	NFT_CT_ZONE                       = 0x11
 	NFT_CT_EVENTMASK                  = 0x12
+	NFT_CT_SRC_IP                     = 0x13
+	NFT_CT_DST_IP                     = 0x14
+	NFT_CT_SRC_IP6                    = 0x15
+	NFT_CT_DST_IP6                    = 0x16
+	NFT_CT_ID                         = 0x17
 	NFTA_CT_UNSPEC                    = 0x0
 	NFTA_CT_DREG                      = 0x1
 	NFTA_CT_KEY                       = 0x2
@@ -2594,8 +2617,8 @@
 	SOF_TIMESTAMPING_BIND_PHC     = 0x8000
 	SOF_TIMESTAMPING_OPT_ID_TCP   = 0x10000
 
-	SOF_TIMESTAMPING_LAST = 0x20000
-	SOF_TIMESTAMPING_MASK = 0x3ffff
+	SOF_TIMESTAMPING_LAST = 0x40000
+	SOF_TIMESTAMPING_MASK = 0x7ffff
 
 	SCM_TSTAMP_SND   = 0x0
 	SCM_TSTAMP_SCHED = 0x1
@@ -3041,6 +3064,23 @@
 )
 
 const (
+	TCA_UNSPEC            = 0x0
+	TCA_KIND              = 0x1
+	TCA_OPTIONS           = 0x2
+	TCA_STATS             = 0x3
+	TCA_XSTATS            = 0x4
+	TCA_RATE              = 0x5
+	TCA_FCNT              = 0x6
+	TCA_STATS2            = 0x7
+	TCA_STAB              = 0x8
+	TCA_PAD               = 0x9
+	TCA_DUMP_INVISIBLE    = 0xa
+	TCA_CHAIN             = 0xb
+	TCA_HW_OFFLOAD        = 0xc
+	TCA_INGRESS_BLOCK     = 0xd
+	TCA_EGRESS_BLOCK      = 0xe
+	TCA_DUMP_FLAGS        = 0xf
+	TCA_EXT_WARN_MSG      = 0x10
 	RTNLGRP_NONE          = 0x0
 	RTNLGRP_LINK          = 0x1
 	RTNLGRP_NOTIFY        = 0x2
@@ -3075,6 +3115,18 @@
 	RTNLGRP_IPV6_MROUTE_R = 0x1f
 	RTNLGRP_NEXTHOP       = 0x20
 	RTNLGRP_BRVLAN        = 0x21
+	RTNLGRP_MCTP_IFADDR   = 0x22
+	RTNLGRP_TUNNEL        = 0x23
+	RTNLGRP_STATS         = 0x24
+	RTNLGRP_IPV4_MCADDR   = 0x25
+	RTNLGRP_IPV6_MCADDR   = 0x26
+	RTNLGRP_IPV6_ACADDR   = 0x27
+	TCA_ROOT_UNSPEC       = 0x0
+	TCA_ROOT_TAB          = 0x1
+	TCA_ROOT_FLAGS        = 0x2
+	TCA_ROOT_COUNT        = 0x3
+	TCA_ROOT_TIME_DELTA   = 0x4
+	TCA_ROOT_EXT_WARN_MSG = 0x5
 )
 
 type CapUserHeader struct {
@@ -3802,7 +3854,16 @@
 	ETHTOOL_MSG_PSE_GET                       = 0x24
 	ETHTOOL_MSG_PSE_SET                       = 0x25
 	ETHTOOL_MSG_RSS_GET                       = 0x26
-	ETHTOOL_MSG_USER_MAX                      = 0x2d
+	ETHTOOL_MSG_PLCA_GET_CFG                  = 0x27
+	ETHTOOL_MSG_PLCA_SET_CFG                  = 0x28
+	ETHTOOL_MSG_PLCA_GET_STATUS               = 0x29
+	ETHTOOL_MSG_MM_GET                        = 0x2a
+	ETHTOOL_MSG_MM_SET                        = 0x2b
+	ETHTOOL_MSG_MODULE_FW_FLASH_ACT           = 0x2c
+	ETHTOOL_MSG_PHY_GET                       = 0x2d
+	ETHTOOL_MSG_TSCONFIG_GET                  = 0x2e
+	ETHTOOL_MSG_TSCONFIG_SET                  = 0x2f
+	ETHTOOL_MSG_USER_MAX                      = 0x2f
 	ETHTOOL_MSG_KERNEL_NONE                   = 0x0
 	ETHTOOL_MSG_STRSET_GET_REPLY              = 0x1
 	ETHTOOL_MSG_LINKINFO_GET_REPLY            = 0x2
@@ -3842,7 +3903,17 @@
 	ETHTOOL_MSG_MODULE_NTF                    = 0x24
 	ETHTOOL_MSG_PSE_GET_REPLY                 = 0x25
 	ETHTOOL_MSG_RSS_GET_REPLY                 = 0x26
-	ETHTOOL_MSG_KERNEL_MAX                    = 0x2e
+	ETHTOOL_MSG_PLCA_GET_CFG_REPLY            = 0x27
+	ETHTOOL_MSG_PLCA_GET_STATUS_REPLY         = 0x28
+	ETHTOOL_MSG_PLCA_NTF                      = 0x29
+	ETHTOOL_MSG_MM_GET_REPLY                  = 0x2a
+	ETHTOOL_MSG_MM_NTF                        = 0x2b
+	ETHTOOL_MSG_MODULE_FW_FLASH_NTF           = 0x2c
+	ETHTOOL_MSG_PHY_GET_REPLY                 = 0x2d
+	ETHTOOL_MSG_PHY_NTF                       = 0x2e
+	ETHTOOL_MSG_TSCONFIG_GET_REPLY            = 0x2f
+	ETHTOOL_MSG_TSCONFIG_SET_REPLY            = 0x30
+	ETHTOOL_MSG_KERNEL_MAX                    = 0x30
 	ETHTOOL_FLAG_COMPACT_BITSETS              = 0x1
 	ETHTOOL_FLAG_OMIT_REPLY                   = 0x2
 	ETHTOOL_FLAG_STATS                        = 0x4
@@ -3949,7 +4020,12 @@
 	ETHTOOL_A_RINGS_TCP_DATA_SPLIT            = 0xb
 	ETHTOOL_A_RINGS_CQE_SIZE                  = 0xc
 	ETHTOOL_A_RINGS_TX_PUSH                   = 0xd
-	ETHTOOL_A_RINGS_MAX                       = 0x10
+	ETHTOOL_A_RINGS_RX_PUSH                   = 0xe
+	ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN           = 0xf
+	ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX       = 0x10
+	ETHTOOL_A_RINGS_HDS_THRESH                = 0x11
+	ETHTOOL_A_RINGS_HDS_THRESH_MAX            = 0x12
+	ETHTOOL_A_RINGS_MAX                       = 0x12
 	ETHTOOL_A_CHANNELS_UNSPEC                 = 0x0
 	ETHTOOL_A_CHANNELS_HEADER                 = 0x1
 	ETHTOOL_A_CHANNELS_RX_MAX                 = 0x2
@@ -4015,7 +4091,9 @@
 	ETHTOOL_A_TSINFO_TX_TYPES                 = 0x3
 	ETHTOOL_A_TSINFO_RX_FILTERS               = 0x4
 	ETHTOOL_A_TSINFO_PHC_INDEX                = 0x5
-	ETHTOOL_A_TSINFO_MAX                      = 0x6
+	ETHTOOL_A_TSINFO_STATS                    = 0x6
+	ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER        = 0x7
+	ETHTOOL_A_TSINFO_MAX                      = 0x9
 	ETHTOOL_A_CABLE_TEST_UNSPEC               = 0x0
 	ETHTOOL_A_CABLE_TEST_HEADER               = 0x1
 	ETHTOOL_A_CABLE_TEST_MAX                  = 0x1
@@ -4101,6 +4179,19 @@
 	ETHTOOL_A_TUNNEL_INFO_MAX                 = 0x2
 )
 
+const (
+	TCP_V4_FLOW    = 0x1
+	UDP_V4_FLOW    = 0x2
+	TCP_V6_FLOW    = 0x5
+	UDP_V6_FLOW    = 0x6
+	ESP_V4_FLOW    = 0xa
+	ESP_V6_FLOW    = 0xc
+	IP_USER_FLOW   = 0xd
+	IPV6_USER_FLOW = 0xe
+	IPV6_FLOW      = 0x11
+	ETHER_FLOW     = 0x12
+)
+
 const SPEED_UNKNOWN = -0x1
 
 type EthtoolDrvinfo struct {
@@ -4613,6 +4704,7 @@
 	NL80211_ATTR_AKM_SUITES                                 = 0x4c
 	NL80211_ATTR_AP_ISOLATE                                 = 0x60
 	NL80211_ATTR_AP_SETTINGS_FLAGS                          = 0x135
+	NL80211_ATTR_ASSOC_SPP_AMSDU                            = 0x14a
 	NL80211_ATTR_AUTH_DATA                                  = 0x9c
 	NL80211_ATTR_AUTH_TYPE                                  = 0x35
 	NL80211_ATTR_BANDS                                      = 0xef
@@ -4623,6 +4715,7 @@
 	NL80211_ATTR_BSS_BASIC_RATES                            = 0x24
 	NL80211_ATTR_BSS                                        = 0x2f
 	NL80211_ATTR_BSS_CTS_PROT                               = 0x1c
+	NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA                  = 0x147
 	NL80211_ATTR_BSS_HT_OPMODE                              = 0x6d
 	NL80211_ATTR_BSSID                                      = 0xf5
 	NL80211_ATTR_BSS_SELECT                                 = 0xe3
@@ -4682,6 +4775,7 @@
 	NL80211_ATTR_DTIM_PERIOD                                = 0xd
 	NL80211_ATTR_DURATION                                   = 0x57
 	NL80211_ATTR_EHT_CAPABILITY                             = 0x136
+	NL80211_ATTR_EMA_RNR_ELEMS                              = 0x145
 	NL80211_ATTR_EML_CAPABILITY                             = 0x13d
 	NL80211_ATTR_EXT_CAPA                                   = 0xa9
 	NL80211_ATTR_EXT_CAPA_MASK                              = 0xaa
@@ -4717,6 +4811,7 @@
 	NL80211_ATTR_HIDDEN_SSID                                = 0x7e
 	NL80211_ATTR_HT_CAPABILITY                              = 0x1f
 	NL80211_ATTR_HT_CAPABILITY_MASK                         = 0x94
+	NL80211_ATTR_HW_TIMESTAMP_ENABLED                       = 0x144
 	NL80211_ATTR_IE_ASSOC_RESP                              = 0x80
 	NL80211_ATTR_IE                                         = 0x2a
 	NL80211_ATTR_IE_PROBE_RESP                              = 0x7f
@@ -4747,9 +4842,10 @@
 	NL80211_ATTR_MAC_HINT                                   = 0xc8
 	NL80211_ATTR_MAC_MASK                                   = 0xd7
 	NL80211_ATTR_MAX_AP_ASSOC_STA                           = 0xca
-	NL80211_ATTR_MAX                                        = 0x14c
+	NL80211_ATTR_MAX                                        = 0x151
 	NL80211_ATTR_MAX_CRIT_PROT_DURATION                     = 0xb4
 	NL80211_ATTR_MAX_CSA_COUNTERS                           = 0xce
+	NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS                     = 0x143
 	NL80211_ATTR_MAX_MATCH_SETS                             = 0x85
 	NL80211_ATTR_MAX_NUM_AKM_SUITES                         = 0x13c
 	NL80211_ATTR_MAX_NUM_PMKIDS                             = 0x56
@@ -4774,9 +4870,12 @@
 	NL80211_ATTR_MGMT_SUBTYPE                               = 0x29
 	NL80211_ATTR_MLD_ADDR                                   = 0x13a
 	NL80211_ATTR_MLD_CAPA_AND_OPS                           = 0x13e
+	NL80211_ATTR_MLO_LINK_DISABLED                          = 0x146
 	NL80211_ATTR_MLO_LINK_ID                                = 0x139
 	NL80211_ATTR_MLO_LINKS                                  = 0x138
 	NL80211_ATTR_MLO_SUPPORT                                = 0x13b
+	NL80211_ATTR_MLO_TTLM_DLINK                             = 0x148
+	NL80211_ATTR_MLO_TTLM_ULINK                             = 0x149
 	NL80211_ATTR_MNTR_FLAGS                                 = 0x17
 	NL80211_ATTR_MPATH_INFO                                 = 0x1b
 	NL80211_ATTR_MPATH_NEXT_HOP                             = 0x1a
@@ -4809,12 +4908,14 @@
 	NL80211_ATTR_PORT_AUTHORIZED                            = 0x103
 	NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN                    = 0x5
 	NL80211_ATTR_POWER_RULE_MAX_EIRP                        = 0x6
+	NL80211_ATTR_POWER_RULE_PSD                             = 0x8
 	NL80211_ATTR_PREV_BSSID                                 = 0x4f
 	NL80211_ATTR_PRIVACY                                    = 0x46
 	NL80211_ATTR_PROBE_RESP                                 = 0x91
 	NL80211_ATTR_PROBE_RESP_OFFLOAD                         = 0x90
 	NL80211_ATTR_PROTOCOL_FEATURES                          = 0xad
 	NL80211_ATTR_PS_STATE                                   = 0x5d
+	NL80211_ATTR_PUNCT_BITMAP                               = 0x142
 	NL80211_ATTR_QOS_MAP                                    = 0xc7
 	NL80211_ATTR_RADAR_BACKGROUND                           = 0x134
 	NL80211_ATTR_RADAR_EVENT                                = 0xa8
@@ -4943,7 +5044,9 @@
 	NL80211_ATTR_WIPHY_FREQ                                 = 0x26
 	NL80211_ATTR_WIPHY_FREQ_HINT                            = 0xc9
 	NL80211_ATTR_WIPHY_FREQ_OFFSET                          = 0x122
+	NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS               = 0x14c
 	NL80211_ATTR_WIPHY_NAME                                 = 0x2
+	NL80211_ATTR_WIPHY_RADIOS                               = 0x14b
 	NL80211_ATTR_WIPHY_RETRY_LONG                           = 0x3e
 	NL80211_ATTR_WIPHY_RETRY_SHORT                          = 0x3d
 	NL80211_ATTR_WIPHY_RTS_THRESHOLD                        = 0x40
@@ -4978,6 +5081,8 @@
 	NL80211_BAND_ATTR_IFTYPE_DATA                           = 0x9
 	NL80211_BAND_ATTR_MAX                                   = 0xd
 	NL80211_BAND_ATTR_RATES                                 = 0x2
+	NL80211_BAND_ATTR_S1G_CAPA                              = 0xd
+	NL80211_BAND_ATTR_S1G_MCS_NSS_SET                       = 0xc
 	NL80211_BAND_ATTR_VHT_CAPA                              = 0x8
 	NL80211_BAND_ATTR_VHT_MCS_SET                           = 0x7
 	NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC                    = 0x8
@@ -5001,6 +5106,10 @@
 	NL80211_BSS_BEACON_INTERVAL                             = 0x4
 	NL80211_BSS_BEACON_TSF                                  = 0xd
 	NL80211_BSS_BSSID                                       = 0x1
+	NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH                = 0x2
+	NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY                  = 0x1
+	NL80211_BSS_CANNOT_USE_REASONS                          = 0x18
+	NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH                 = 0x2
 	NL80211_BSS_CAPABILITY                                  = 0x5
 	NL80211_BSS_CHAIN_SIGNAL                                = 0x13
 	NL80211_BSS_CHAN_WIDTH_10                               = 0x1
@@ -5032,6 +5141,9 @@
 	NL80211_BSS_STATUS                                      = 0x9
 	NL80211_BSS_STATUS_IBSS_JOINED                          = 0x2
 	NL80211_BSS_TSF                                         = 0x3
+	NL80211_BSS_USE_FOR                                     = 0x17
+	NL80211_BSS_USE_FOR_MLD_LINK                            = 0x2
+	NL80211_BSS_USE_FOR_NORMAL                              = 0x1
 	NL80211_CHAN_HT20                                       = 0x1
 	NL80211_CHAN_HT40MINUS                                  = 0x2
 	NL80211_CHAN_HT40PLUS                                   = 0x3
@@ -5117,7 +5229,8 @@
 	NL80211_CMD_LEAVE_IBSS                                  = 0x2c
 	NL80211_CMD_LEAVE_MESH                                  = 0x45
 	NL80211_CMD_LEAVE_OCB                                   = 0x6d
-	NL80211_CMD_MAX                                         = 0x9b
+	NL80211_CMD_LINKS_REMOVED                               = 0x9a
+	NL80211_CMD_MAX                                         = 0x9d
 	NL80211_CMD_MICHAEL_MIC_FAILURE                         = 0x29
 	NL80211_CMD_MODIFY_LINK_STA                             = 0x97
 	NL80211_CMD_NAN_MATCH                                   = 0x78
@@ -5161,6 +5274,7 @@
 	NL80211_CMD_SET_COALESCE                                = 0x65
 	NL80211_CMD_SET_CQM                                     = 0x3f
 	NL80211_CMD_SET_FILS_AAD                                = 0x92
+	NL80211_CMD_SET_HW_TIMESTAMP                            = 0x99
 	NL80211_CMD_SET_INTERFACE                               = 0x6
 	NL80211_CMD_SET_KEY                                     = 0xa
 	NL80211_CMD_SET_MAC_ACL                                 = 0x5d
@@ -5180,6 +5294,7 @@
 	NL80211_CMD_SET_SAR_SPECS                               = 0x8c
 	NL80211_CMD_SET_STATION                                 = 0x12
 	NL80211_CMD_SET_TID_CONFIG                              = 0x89
+	NL80211_CMD_SET_TID_TO_LINK_MAPPING                     = 0x9b
 	NL80211_CMD_SET_TX_BITRATE_MASK                         = 0x39
 	NL80211_CMD_SET_WDS_PEER                                = 0x42
 	NL80211_CMD_SET_WIPHY                                   = 0x2
@@ -5247,6 +5362,7 @@
 	NL80211_EXT_FEATURE_AIRTIME_FAIRNESS                    = 0x21
 	NL80211_EXT_FEATURE_AP_PMKSA_CACHING                    = 0x22
 	NL80211_EXT_FEATURE_AQL                                 = 0x28
+	NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA           = 0x40
 	NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT            = 0x2e
 	NL80211_EXT_FEATURE_BEACON_PROTECTION                   = 0x29
 	NL80211_EXT_FEATURE_BEACON_RATE_HE                      = 0x36
@@ -5262,6 +5378,7 @@
 	NL80211_EXT_FEATURE_CQM_RSSI_LIST                       = 0xd
 	NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT             = 0x1b
 	NL80211_EXT_FEATURE_DEL_IBSS_STA                        = 0x2c
+	NL80211_EXT_FEATURE_DFS_CONCURRENT                      = 0x43
 	NL80211_EXT_FEATURE_DFS_OFFLOAD                         = 0x19
 	NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER                = 0x20
 	NL80211_EXT_FEATURE_EXT_KEY_ID                          = 0x24
@@ -5281,9 +5398,12 @@
 	NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION  = 0x14
 	NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE          = 0x13
 	NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION        = 0x31
+	NL80211_EXT_FEATURE_OWE_OFFLOAD_AP                      = 0x42
+	NL80211_EXT_FEATURE_OWE_OFFLOAD                         = 0x41
 	NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE                 = 0x3d
 	NL80211_EXT_FEATURE_PROTECTED_TWT                       = 0x2b
 	NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE         = 0x39
+	NL80211_EXT_FEATURE_PUNCT                               = 0x3e
 	NL80211_EXT_FEATURE_RADAR_BACKGROUND                    = 0x3c
 	NL80211_EXT_FEATURE_RRM                                 = 0x1
 	NL80211_EXT_FEATURE_SAE_OFFLOAD_AP                      = 0x33
@@ -5295,8 +5415,10 @@
 	NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD = 0x23
 	NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI            = 0xc
 	NL80211_EXT_FEATURE_SECURE_LTF                          = 0x37
+	NL80211_EXT_FEATURE_SECURE_NAN                          = 0x3f
 	NL80211_EXT_FEATURE_SECURE_RTT                          = 0x38
 	NL80211_EXT_FEATURE_SET_SCAN_DWELL                      = 0x5
+	NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT                   = 0x44
 	NL80211_EXT_FEATURE_STA_TX_PWR                          = 0x25
 	NL80211_EXT_FEATURE_TXQS                                = 0x1c
 	NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP              = 0x35
@@ -5343,7 +5465,10 @@
 	NL80211_FREQUENCY_ATTR_2MHZ                             = 0x16
 	NL80211_FREQUENCY_ATTR_4MHZ                             = 0x17
 	NL80211_FREQUENCY_ATTR_8MHZ                             = 0x18
+	NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP                = 0x21
+	NL80211_FREQUENCY_ATTR_CAN_MONITOR                      = 0x20
 	NL80211_FREQUENCY_ATTR_DFS_CAC_TIME                     = 0xd
+	NL80211_FREQUENCY_ATTR_DFS_CONCURRENT                   = 0x1d
 	NL80211_FREQUENCY_ATTR_DFS_STATE                        = 0x7
 	NL80211_FREQUENCY_ATTR_DFS_TIME                         = 0x8
 	NL80211_FREQUENCY_ATTR_DISABLED                         = 0x2
@@ -5351,12 +5476,14 @@
 	NL80211_FREQUENCY_ATTR_GO_CONCURRENT                    = 0xf
 	NL80211_FREQUENCY_ATTR_INDOOR_ONLY                      = 0xe
 	NL80211_FREQUENCY_ATTR_IR_CONCURRENT                    = 0xf
-	NL80211_FREQUENCY_ATTR_MAX                              = 0x21
+	NL80211_FREQUENCY_ATTR_MAX                              = 0x22
 	NL80211_FREQUENCY_ATTR_MAX_TX_POWER                     = 0x6
 	NL80211_FREQUENCY_ATTR_NO_10MHZ                         = 0x11
 	NL80211_FREQUENCY_ATTR_NO_160MHZ                        = 0xc
 	NL80211_FREQUENCY_ATTR_NO_20MHZ                         = 0x10
 	NL80211_FREQUENCY_ATTR_NO_320MHZ                        = 0x1a
+	NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT               = 0x1f
+	NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT               = 0x1e
 	NL80211_FREQUENCY_ATTR_NO_80MHZ                         = 0xb
 	NL80211_FREQUENCY_ATTR_NO_EHT                           = 0x1b
 	NL80211_FREQUENCY_ATTR_NO_HE                            = 0x13
@@ -5364,8 +5491,11 @@
 	NL80211_FREQUENCY_ATTR_NO_HT40_PLUS                     = 0xa
 	NL80211_FREQUENCY_ATTR_NO_IBSS                          = 0x3
 	NL80211_FREQUENCY_ATTR_NO_IR                            = 0x3
+	NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT                = 0x1f
+	NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT                = 0x1e
 	NL80211_FREQUENCY_ATTR_OFFSET                           = 0x14
 	NL80211_FREQUENCY_ATTR_PASSIVE_SCAN                     = 0x3
+	NL80211_FREQUENCY_ATTR_PSD                              = 0x1c
 	NL80211_FREQUENCY_ATTR_RADAR                            = 0x5
 	NL80211_FREQUENCY_ATTR_WMM                              = 0x12
 	NL80211_FTM_RESP_ATTR_CIVICLOC                          = 0x3
@@ -5430,6 +5560,7 @@
 	NL80211_IFTYPE_STATION                                  = 0x2
 	NL80211_IFTYPE_UNSPECIFIED                              = 0x0
 	NL80211_IFTYPE_WDS                                      = 0x5
+	NL80211_KCK_EXT_LEN_32                                  = 0x20
 	NL80211_KCK_EXT_LEN                                     = 0x18
 	NL80211_KCK_LEN                                         = 0x10
 	NL80211_KEK_EXT_LEN                                     = 0x20
@@ -5458,9 +5589,10 @@
 	NL80211_MAX_SUPP_HT_RATES                               = 0x4d
 	NL80211_MAX_SUPP_RATES                                  = 0x20
 	NL80211_MAX_SUPP_REG_RULES                              = 0x80
+	NL80211_MAX_SUPP_SELECTORS                              = 0x80
 	NL80211_MBSSID_CONFIG_ATTR_EMA                          = 0x5
 	NL80211_MBSSID_CONFIG_ATTR_INDEX                        = 0x3
-	NL80211_MBSSID_CONFIG_ATTR_MAX                          = 0x5
+	NL80211_MBSSID_CONFIG_ATTR_MAX                          = 0x6
 	NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY  = 0x2
 	NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES               = 0x1
 	NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX                   = 0x4
@@ -5519,7 +5651,7 @@
 	NL80211_MNTR_FLAG_CONTROL                               = 0x3
 	NL80211_MNTR_FLAG_COOK_FRAMES                           = 0x5
 	NL80211_MNTR_FLAG_FCSFAIL                               = 0x1
-	NL80211_MNTR_FLAG_MAX                                   = 0x6
+	NL80211_MNTR_FLAG_MAX                                   = 0x7
 	NL80211_MNTR_FLAG_OTHER_BSS                             = 0x4
 	NL80211_MNTR_FLAG_PLCPFAIL                              = 0x2
 	NL80211_MPATH_FLAG_ACTIVE                               = 0x1
@@ -5703,11 +5835,16 @@
 	NL80211_RADAR_PRE_CAC_EXPIRED                           = 0x4
 	NL80211_RATE_INFO_10_MHZ_WIDTH                          = 0xb
 	NL80211_RATE_INFO_160_MHZ_WIDTH                         = 0xa
+	NL80211_RATE_INFO_16_MHZ_WIDTH                          = 0x1d
+	NL80211_RATE_INFO_1_MHZ_WIDTH                           = 0x19
+	NL80211_RATE_INFO_2_MHZ_WIDTH                           = 0x1a
 	NL80211_RATE_INFO_320_MHZ_WIDTH                         = 0x12
 	NL80211_RATE_INFO_40_MHZ_WIDTH                          = 0x3
+	NL80211_RATE_INFO_4_MHZ_WIDTH                           = 0x1b
 	NL80211_RATE_INFO_5_MHZ_WIDTH                           = 0xc
 	NL80211_RATE_INFO_80_MHZ_WIDTH                          = 0x8
 	NL80211_RATE_INFO_80P80_MHZ_WIDTH                       = 0x9
+	NL80211_RATE_INFO_8_MHZ_WIDTH                           = 0x1c
 	NL80211_RATE_INFO_BITRATE32                             = 0x5
 	NL80211_RATE_INFO_BITRATE                               = 0x1
 	NL80211_RATE_INFO_EHT_GI_0_8                            = 0x0
@@ -5753,6 +5890,8 @@
 	NL80211_RATE_INFO_HE_RU_ALLOC                           = 0x11
 	NL80211_RATE_INFO_MAX                                   = 0x1d
 	NL80211_RATE_INFO_MCS                                   = 0x2
+	NL80211_RATE_INFO_S1G_MCS                               = 0x17
+	NL80211_RATE_INFO_S1G_NSS                               = 0x18
 	NL80211_RATE_INFO_SHORT_GI                              = 0x4
 	NL80211_RATE_INFO_VHT_MCS                               = 0x6
 	NL80211_RATE_INFO_VHT_NSS                               = 0x7
@@ -5770,14 +5909,19 @@
 	NL80211_REKEY_DATA_KEK                                  = 0x1
 	NL80211_REKEY_DATA_REPLAY_CTR                           = 0x3
 	NL80211_REPLAY_CTR_LEN                                  = 0x8
+	NL80211_RRF_ALLOW_6GHZ_VLP_AP                           = 0x1000000
 	NL80211_RRF_AUTO_BW                                     = 0x800
 	NL80211_RRF_DFS                                         = 0x10
+	NL80211_RRF_DFS_CONCURRENT                              = 0x200000
 	NL80211_RRF_GO_CONCURRENT                               = 0x1000
 	NL80211_RRF_IR_CONCURRENT                               = 0x1000
 	NL80211_RRF_NO_160MHZ                                   = 0x10000
 	NL80211_RRF_NO_320MHZ                                   = 0x40000
+	NL80211_RRF_NO_6GHZ_AFC_CLIENT                          = 0x800000
+	NL80211_RRF_NO_6GHZ_VLP_CLIENT                          = 0x400000
 	NL80211_RRF_NO_80MHZ                                    = 0x8000
 	NL80211_RRF_NO_CCK                                      = 0x2
+	NL80211_RRF_NO_EHT                                      = 0x80000
 	NL80211_RRF_NO_HE                                       = 0x20000
 	NL80211_RRF_NO_HT40                                     = 0x6000
 	NL80211_RRF_NO_HT40MINUS                                = 0x2000
@@ -5788,7 +5932,10 @@
 	NL80211_RRF_NO_IR                                       = 0x80
 	NL80211_RRF_NO_OFDM                                     = 0x1
 	NL80211_RRF_NO_OUTDOOR                                  = 0x8
+	NL80211_RRF_NO_UHB_AFC_CLIENT                           = 0x800000
+	NL80211_RRF_NO_UHB_VLP_CLIENT                           = 0x400000
 	NL80211_RRF_PASSIVE_SCAN                                = 0x80
+	NL80211_RRF_PSD                                         = 0x100000
 	NL80211_RRF_PTMP_ONLY                                   = 0x40
 	NL80211_RRF_PTP_ONLY                                    = 0x20
 	NL80211_RXMGMT_FLAG_ANSWERED                            = 0x1
@@ -5849,6 +5996,7 @@
 	NL80211_STA_FLAG_MAX_OLD_API                            = 0x6
 	NL80211_STA_FLAG_MFP                                    = 0x4
 	NL80211_STA_FLAG_SHORT_PREAMBLE                         = 0x2
+	NL80211_STA_FLAG_SPP_AMSDU                              = 0x8
 	NL80211_STA_FLAG_TDLS_PEER                              = 0x6
 	NL80211_STA_FLAG_WME                                    = 0x3
 	NL80211_STA_INFO_ACK_SIGNAL_AVG                         = 0x23
@@ -6007,6 +6155,13 @@
 	NL80211_VHT_CAPABILITY_LEN                              = 0xc
 	NL80211_VHT_NSS_MAX                                     = 0x8
 	NL80211_WIPHY_NAME_MAXLEN                               = 0x40
+	NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE                     = 0x2
+	NL80211_WIPHY_RADIO_ATTR_INDEX                          = 0x1
+	NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION          = 0x3
+	NL80211_WIPHY_RADIO_ATTR_MAX                            = 0x4
+	NL80211_WIPHY_RADIO_FREQ_ATTR_END                       = 0x2
+	NL80211_WIPHY_RADIO_FREQ_ATTR_MAX                       = 0x2
+	NL80211_WIPHY_RADIO_FREQ_ATTR_START                     = 0x1
 	NL80211_WMMR_AIFSN                                      = 0x3
 	NL80211_WMMR_CW_MAX                                     = 0x2
 	NL80211_WMMR_CW_MIN                                     = 0x1
@@ -6038,6 +6193,7 @@
 	NL80211_WOWLAN_TRIG_PKT_PATTERN                         = 0x4
 	NL80211_WOWLAN_TRIG_RFKILL_RELEASE                      = 0x9
 	NL80211_WOWLAN_TRIG_TCP_CONNECTION                      = 0xe
+	NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC         = 0x14
 	NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211                    = 0xa
 	NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN                = 0xb
 	NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023                     = 0xc
@@ -6174,3 +6330,5 @@
 	Family   uint8
 	Protocol uint8
 }
+
+const RTM_NEWNVLAN = 0x70
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index fd402da..485f2d3 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -282,7 +282,7 @@
 	Ac_exitcode               uint32
 	Ac_flag                   uint8
 	Ac_nice                   uint8
-	_                         [4]byte
+	_                         [6]byte
 	Cpu_count                 uint64
 	Cpu_delay_total           uint64
 	Blkio_count               uint64
@@ -338,6 +338,22 @@
 	Wpcopy_delay_total        uint64
 	Irq_count                 uint64
 	Irq_delay_total           uint64
+	Cpu_delay_max             uint64
+	Cpu_delay_min             uint64
+	Blkio_delay_max           uint64
+	Blkio_delay_min           uint64
+	Swapin_delay_max          uint64
+	Swapin_delay_min          uint64
+	Freepages_delay_max       uint64
+	Freepages_delay_min       uint64
+	Thrashing_delay_max       uint64
+	Thrashing_delay_min       uint64
+	Compact_delay_max         uint64
+	Compact_delay_min         uint64
+	Wpcopy_delay_max          uint64
+	Wpcopy_delay_min          uint64
+	Irq_delay_max             uint64
+	Irq_delay_min             uint64
 }
 
 type cpuMask uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index eb7a5e1..ecbd1ad 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -351,6 +351,22 @@
 	Wpcopy_delay_total        uint64
 	Irq_count                 uint64
 	Irq_delay_total           uint64
+	Cpu_delay_max             uint64
+	Cpu_delay_min             uint64
+	Blkio_delay_max           uint64
+	Blkio_delay_min           uint64
+	Swapin_delay_max          uint64
+	Swapin_delay_min          uint64
+	Freepages_delay_max       uint64
+	Freepages_delay_min       uint64
+	Thrashing_delay_max       uint64
+	Thrashing_delay_min       uint64
+	Compact_delay_max         uint64
+	Compact_delay_min         uint64
+	Wpcopy_delay_max          uint64
+	Wpcopy_delay_min          uint64
+	Irq_delay_max             uint64
+	Irq_delay_min             uint64
 }
 
 type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index d78ac10..02f0463 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -91,7 +91,7 @@
 	Gid     uint32
 	Rdev    uint64
 	_       uint16
-	_       [4]byte
+	_       [6]byte
 	Size    int64
 	Blksize int32
 	_       [4]byte
@@ -273,7 +273,7 @@
 	Ac_exitcode               uint32
 	Ac_flag                   uint8
 	Ac_nice                   uint8
-	_                         [4]byte
+	_                         [6]byte
 	Cpu_count                 uint64
 	Cpu_delay_total           uint64
 	Blkio_count               uint64
@@ -329,6 +329,22 @@
 	Wpcopy_delay_total        uint64
 	Irq_count                 uint64
 	Irq_delay_total           uint64
+	Cpu_delay_max             uint64
+	Cpu_delay_min             uint64
+	Blkio_delay_max           uint64
+	Blkio_delay_min           uint64
+	Swapin_delay_max          uint64
+	Swapin_delay_min          uint64
+	Freepages_delay_max       uint64
+	Freepages_delay_min       uint64
+	Thrashing_delay_max       uint64
+	Thrashing_delay_min       uint64
+	Compact_delay_max         uint64
+	Compact_delay_min         uint64
+	Wpcopy_delay_max          uint64
+	Wpcopy_delay_min          uint64
+	Irq_delay_max             uint64
+	Irq_delay_min             uint64
 }
 
 type cpuMask uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index cd06d47..6f4d400 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -330,6 +330,22 @@
 	Wpcopy_delay_total        uint64
 	Irq_count                 uint64
 	Irq_delay_total           uint64
+	Cpu_delay_max             uint64
+	Cpu_delay_min             uint64
+	Blkio_delay_max           uint64
+	Blkio_delay_min           uint64
+	Swapin_delay_max          uint64
+	Swapin_delay_min          uint64
+	Freepages_delay_max       uint64
+	Freepages_delay_min       uint64
+	Thrashing_delay_max       uint64
+	Thrashing_delay_min       uint64
+	Compact_delay_max         uint64
+	Compact_delay_min         uint64
+	Wpcopy_delay_max          uint64
+	Wpcopy_delay_min          uint64
+	Irq_delay_max             uint64
+	Irq_delay_min             uint64
 }
 
 type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
index 2f28fe2..cd532cf 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
@@ -331,6 +331,22 @@
 	Wpcopy_delay_total        uint64
 	Irq_count                 uint64
 	Irq_delay_total           uint64
+	Cpu_delay_max             uint64
+	Cpu_delay_min             uint64
+	Blkio_delay_max           uint64
+	Blkio_delay_min           uint64
+	Swapin_delay_max          uint64
+	Swapin_delay_min          uint64
+	Freepages_delay_max       uint64
+	Freepages_delay_min       uint64
+	Thrashing_delay_max       uint64
+	Thrashing_delay_min       uint64
+	Compact_delay_max         uint64
+	Compact_delay_min         uint64
+	Wpcopy_delay_max          uint64
+	Wpcopy_delay_min          uint64
+	Irq_delay_max             uint64
+	Irq_delay_min             uint64
 }
 
 type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
index 71d6cac..4133620 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
@@ -278,7 +278,7 @@
 	Ac_exitcode               uint32
 	Ac_flag                   uint8
 	Ac_nice                   uint8
-	_                         [4]byte
+	_                         [6]byte
 	Cpu_count                 uint64
 	Cpu_delay_total           uint64
 	Blkio_count               uint64
@@ -334,6 +334,22 @@
 	Wpcopy_delay_total        uint64
 	Irq_count                 uint64
 	Irq_delay_total           uint64
+	Cpu_delay_max             uint64
+	Cpu_delay_min             uint64
+	Blkio_delay_max           uint64
+	Blkio_delay_min           uint64
+	Swapin_delay_max          uint64
+	Swapin_delay_min          uint64
+	Freepages_delay_max       uint64
+	Freepages_delay_min       uint64
+	Thrashing_delay_max       uint64
+	Thrashing_delay_min       uint64
+	Compact_delay_max         uint64
+	Compact_delay_min         uint64
+	Wpcopy_delay_max          uint64
+	Wpcopy_delay_min          uint64
+	Irq_delay_max             uint64
+	Irq_delay_min             uint64
 }
 
 type cpuMask uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
index 8596d45..eaa37eb 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -333,6 +333,22 @@
 	Wpcopy_delay_total        uint64
 	Irq_count                 uint64
 	Irq_delay_total           uint64
+	Cpu_delay_max             uint64
+	Cpu_delay_min             uint64
+	Blkio_delay_max           uint64
+	Blkio_delay_min           uint64
+	Swapin_delay_max          uint64
+	Swapin_delay_min          uint64
+	Freepages_delay_max       uint64
+	Freepages_delay_min       uint64
+	Thrashing_delay_max       uint64
+	Thrashing_delay_min       uint64
+	Compact_delay_max         uint64
+	Compact_delay_min         uint64
+	Wpcopy_delay_max          uint64
+	Wpcopy_delay_min          uint64
+	Irq_delay_max             uint64
+	Irq_delay_min             uint64
 }
 
 type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
index cd60ea1..98ae6a1 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -333,6 +333,22 @@
 	Wpcopy_delay_total        uint64
 	Irq_count                 uint64
 	Irq_delay_total           uint64
+	Cpu_delay_max             uint64
+	Cpu_delay_min             uint64
+	Blkio_delay_max           uint64
+	Blkio_delay_min           uint64
+	Swapin_delay_max          uint64
+	Swapin_delay_min          uint64
+	Freepages_delay_max       uint64
+	Freepages_delay_min       uint64
+	Thrashing_delay_max       uint64
+	Thrashing_delay_min       uint64
+	Compact_delay_max         uint64
+	Compact_delay_min         uint64
+	Wpcopy_delay_max          uint64
+	Wpcopy_delay_min          uint64
+	Irq_delay_max             uint64
+	Irq_delay_min             uint64
 }
 
 type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
index b0ae420..cae1961 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
@@ -278,7 +278,7 @@
 	Ac_exitcode               uint32
 	Ac_flag                   uint8
 	Ac_nice                   uint8
-	_                         [4]byte
+	_                         [6]byte
 	Cpu_count                 uint64
 	Cpu_delay_total           uint64
 	Blkio_count               uint64
@@ -334,6 +334,22 @@
 	Wpcopy_delay_total        uint64
 	Irq_count                 uint64
 	Irq_delay_total           uint64
+	Cpu_delay_max             uint64
+	Cpu_delay_min             uint64
+	Blkio_delay_max           uint64
+	Blkio_delay_min           uint64
+	Swapin_delay_max          uint64
+	Swapin_delay_min          uint64
+	Freepages_delay_max       uint64
+	Freepages_delay_min       uint64
+	Thrashing_delay_max       uint64
+	Thrashing_delay_min       uint64
+	Compact_delay_max         uint64
+	Compact_delay_min         uint64
+	Wpcopy_delay_max          uint64
+	Wpcopy_delay_min          uint64
+	Irq_delay_max             uint64
+	Irq_delay_min             uint64
 }
 
 type cpuMask uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
index 8359728..6ce3b4e 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
@@ -90,7 +90,7 @@
 	Gid     uint32
 	Rdev    uint64
 	_       uint16
-	_       [4]byte
+	_       [6]byte
 	Size    int64
 	Blksize int32
 	_       [4]byte
@@ -285,7 +285,7 @@
 	Ac_exitcode               uint32
 	Ac_flag                   uint8
 	Ac_nice                   uint8
-	_                         [4]byte
+	_                         [6]byte
 	Cpu_count                 uint64
 	Cpu_delay_total           uint64
 	Blkio_count               uint64
@@ -341,6 +341,22 @@
 	Wpcopy_delay_total        uint64
 	Irq_count                 uint64
 	Irq_delay_total           uint64
+	Cpu_delay_max             uint64
+	Cpu_delay_min             uint64
+	Blkio_delay_max           uint64
+	Blkio_delay_min           uint64
+	Swapin_delay_max          uint64
+	Swapin_delay_min          uint64
+	Freepages_delay_max       uint64
+	Freepages_delay_min       uint64
+	Thrashing_delay_max       uint64
+	Thrashing_delay_min       uint64
+	Compact_delay_max         uint64
+	Compact_delay_min         uint64
+	Wpcopy_delay_max          uint64
+	Wpcopy_delay_min          uint64
+	Irq_delay_max             uint64
+	Irq_delay_min             uint64
 }
 
 type cpuMask uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index 69eb6a5..c7429c6 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -340,6 +340,22 @@
 	Wpcopy_delay_total        uint64
 	Irq_count                 uint64
 	Irq_delay_total           uint64
+	Cpu_delay_max             uint64
+	Cpu_delay_min             uint64
+	Blkio_delay_max           uint64
+	Blkio_delay_min           uint64
+	Swapin_delay_max          uint64
+	Swapin_delay_min          uint64
+	Freepages_delay_max       uint64
+	Freepages_delay_min       uint64
+	Thrashing_delay_max       uint64
+	Thrashing_delay_min       uint64
+	Compact_delay_max         uint64
+	Compact_delay_min         uint64
+	Wpcopy_delay_max          uint64
+	Wpcopy_delay_min          uint64
+	Irq_delay_max             uint64
+	Irq_delay_min             uint64
 }
 
 type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index 5f583cb..4bf4baf 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -340,6 +340,22 @@
 	Wpcopy_delay_total        uint64
 	Irq_count                 uint64
 	Irq_delay_total           uint64
+	Cpu_delay_max             uint64
+	Cpu_delay_min             uint64
+	Blkio_delay_max           uint64
+	Blkio_delay_min           uint64
+	Swapin_delay_max          uint64
+	Swapin_delay_min          uint64
+	Freepages_delay_max       uint64
+	Freepages_delay_min       uint64
+	Thrashing_delay_max       uint64
+	Thrashing_delay_min       uint64
+	Compact_delay_max         uint64
+	Compact_delay_min         uint64
+	Wpcopy_delay_max          uint64
+	Wpcopy_delay_min          uint64
+	Irq_delay_max             uint64
+	Irq_delay_min             uint64
 }
 
 type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
index ad05b51..e9709d7 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
@@ -358,6 +358,22 @@
 	Wpcopy_delay_total        uint64
 	Irq_count                 uint64
 	Irq_delay_total           uint64
+	Cpu_delay_max             uint64
+	Cpu_delay_min             uint64
+	Blkio_delay_max           uint64
+	Blkio_delay_min           uint64
+	Swapin_delay_max          uint64
+	Swapin_delay_min          uint64
+	Freepages_delay_max       uint64
+	Freepages_delay_min       uint64
+	Thrashing_delay_max       uint64
+	Thrashing_delay_min       uint64
+	Compact_delay_max         uint64
+	Compact_delay_min         uint64
+	Wpcopy_delay_max          uint64
+	Wpcopy_delay_min          uint64
+	Irq_delay_max             uint64
+	Irq_delay_min             uint64
 }
 
 type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
index cf3ce90..fb44268 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
@@ -353,6 +353,22 @@
 	Wpcopy_delay_total        uint64
 	Irq_count                 uint64
 	Irq_delay_total           uint64
+	Cpu_delay_max             uint64
+	Cpu_delay_min             uint64
+	Blkio_delay_max           uint64
+	Blkio_delay_min           uint64
+	Swapin_delay_max          uint64
+	Swapin_delay_min          uint64
+	Freepages_delay_max       uint64
+	Freepages_delay_min       uint64
+	Thrashing_delay_max       uint64
+	Thrashing_delay_min       uint64
+	Compact_delay_max         uint64
+	Compact_delay_min         uint64
+	Wpcopy_delay_max          uint64
+	Wpcopy_delay_min          uint64
+	Irq_delay_max             uint64
+	Irq_delay_min             uint64
 }
 
 type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
index 590b567..9c38265 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
@@ -335,6 +335,22 @@
 	Wpcopy_delay_total        uint64
 	Irq_count                 uint64
 	Irq_delay_total           uint64
+	Cpu_delay_max             uint64
+	Cpu_delay_min             uint64
+	Blkio_delay_max           uint64
+	Blkio_delay_min           uint64
+	Swapin_delay_max          uint64
+	Swapin_delay_min          uint64
+	Freepages_delay_max       uint64
+	Freepages_delay_min       uint64
+	Thrashing_delay_max       uint64
+	Thrashing_delay_min       uint64
+	Compact_delay_max         uint64
+	Compact_delay_min         uint64
+	Wpcopy_delay_max          uint64
+	Wpcopy_delay_min          uint64
+	Irq_delay_max             uint64
+	Irq_delay_min             uint64
 }
 
 type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/windows/aliases.go b/vendor/golang.org/x/sys/windows/aliases.go
new file mode 100644
index 0000000..16f9056
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/aliases.go
@@ -0,0 +1,12 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package windows
+
+import "syscall"
+
+type Errno = syscall.Errno
+type SysProcAttr = syscall.SysProcAttr
diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go
new file mode 100644
index 0000000..3ca814f
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/dll_windows.go
@@ -0,0 +1,415 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+import (
+	"sync"
+	"sync/atomic"
+	"syscall"
+	"unsafe"
+)
+
+// We need to use LoadLibrary and GetProcAddress from the Go runtime, because
+// the these symbols are loaded by the system linker and are required to
+// dynamically load additional symbols. Note that in the Go runtime, these
+// return syscall.Handle and syscall.Errno, but these are the same, in fact,
+// as windows.Handle and windows.Errno, and we intend to keep these the same.
+
+//go:linkname syscall_loadlibrary syscall.loadlibrary
+func syscall_loadlibrary(filename *uint16) (handle Handle, err Errno)
+
+//go:linkname syscall_getprocaddress syscall.getprocaddress
+func syscall_getprocaddress(handle Handle, procname *uint8) (proc uintptr, err Errno)
+
+// DLLError describes reasons for DLL load failures.
+type DLLError struct {
+	Err     error
+	ObjName string
+	Msg     string
+}
+
+func (e *DLLError) Error() string { return e.Msg }
+
+func (e *DLLError) Unwrap() error { return e.Err }
+
+// A DLL implements access to a single DLL.
+type DLL struct {
+	Name   string
+	Handle Handle
+}
+
+// LoadDLL loads DLL file into memory.
+//
+// Warning: using LoadDLL without an absolute path name is subject to
+// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL],
+// or use [LoadLibraryEx] directly.
+func LoadDLL(name string) (dll *DLL, err error) {
+	namep, err := UTF16PtrFromString(name)
+	if err != nil {
+		return nil, err
+	}
+	h, e := syscall_loadlibrary(namep)
+	if e != 0 {
+		return nil, &DLLError{
+			Err:     e,
+			ObjName: name,
+			Msg:     "Failed to load " + name + ": " + e.Error(),
+		}
+	}
+	d := &DLL{
+		Name:   name,
+		Handle: h,
+	}
+	return d, nil
+}
+
+// MustLoadDLL is like LoadDLL but panics if load operation fails.
+func MustLoadDLL(name string) *DLL {
+	d, e := LoadDLL(name)
+	if e != nil {
+		panic(e)
+	}
+	return d
+}
+
+// FindProc searches DLL d for procedure named name and returns *Proc
+// if found. It returns an error if search fails.
+func (d *DLL) FindProc(name string) (proc *Proc, err error) {
+	namep, err := BytePtrFromString(name)
+	if err != nil {
+		return nil, err
+	}
+	a, e := syscall_getprocaddress(d.Handle, namep)
+	if e != 0 {
+		return nil, &DLLError{
+			Err:     e,
+			ObjName: name,
+			Msg:     "Failed to find " + name + " procedure in " + d.Name + ": " + e.Error(),
+		}
+	}
+	p := &Proc{
+		Dll:  d,
+		Name: name,
+		addr: a,
+	}
+	return p, nil
+}
+
+// MustFindProc is like FindProc but panics if search fails.
+func (d *DLL) MustFindProc(name string) *Proc {
+	p, e := d.FindProc(name)
+	if e != nil {
+		panic(e)
+	}
+	return p
+}
+
+// FindProcByOrdinal searches DLL d for procedure by ordinal and returns *Proc
+// if found. It returns an error if search fails.
+func (d *DLL) FindProcByOrdinal(ordinal uintptr) (proc *Proc, err error) {
+	a, e := GetProcAddressByOrdinal(d.Handle, ordinal)
+	name := "#" + itoa(int(ordinal))
+	if e != nil {
+		return nil, &DLLError{
+			Err:     e,
+			ObjName: name,
+			Msg:     "Failed to find " + name + " procedure in " + d.Name + ": " + e.Error(),
+		}
+	}
+	p := &Proc{
+		Dll:  d,
+		Name: name,
+		addr: a,
+	}
+	return p, nil
+}
+
+// MustFindProcByOrdinal is like FindProcByOrdinal but panics if search fails.
+func (d *DLL) MustFindProcByOrdinal(ordinal uintptr) *Proc {
+	p, e := d.FindProcByOrdinal(ordinal)
+	if e != nil {
+		panic(e)
+	}
+	return p
+}
+
+// Release unloads DLL d from memory.
+func (d *DLL) Release() (err error) {
+	return FreeLibrary(d.Handle)
+}
+
+// A Proc implements access to a procedure inside a DLL.
+type Proc struct {
+	Dll  *DLL
+	Name string
+	addr uintptr
+}
+
+// Addr returns the address of the procedure represented by p.
+// The return value can be passed to Syscall to run the procedure.
+func (p *Proc) Addr() uintptr {
+	return p.addr
+}
+
+//go:uintptrescapes
+
+// Call executes procedure p with arguments a. It will panic, if more than 15 arguments
+// are supplied.
+//
+// The returned error is always non-nil, constructed from the result of GetLastError.
+// Callers must inspect the primary return value to decide whether an error occurred
+// (according to the semantics of the specific function being called) before consulting
+// the error. The error will be guaranteed to contain windows.Errno.
+func (p *Proc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) {
+	switch len(a) {
+	case 0:
+		return syscall.Syscall(p.Addr(), uintptr(len(a)), 0, 0, 0)
+	case 1:
+		return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], 0, 0)
+	case 2:
+		return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], 0)
+	case 3:
+		return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], a[2])
+	case 4:
+		return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], 0, 0)
+	case 5:
+		return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], 0)
+	case 6:
+		return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5])
+	case 7:
+		return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], 0, 0)
+	case 8:
+		return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], 0)
+	case 9:
+		return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8])
+	case 10:
+		return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], 0, 0)
+	case 11:
+		return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], 0)
+	case 12:
+		return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11])
+	case 13:
+		return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], 0, 0)
+	case 14:
+		return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], 0)
+	case 15:
+		return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14])
+	default:
+		panic("Call " + p.Name + " with too many arguments " + itoa(len(a)) + ".")
+	}
+}
+
+// A LazyDLL implements access to a single DLL.
+// It will delay the load of the DLL until the first
+// call to its Handle method or to one of its
+// LazyProc's Addr method.
+type LazyDLL struct {
+	Name string
+
+	// System determines whether the DLL must be loaded from the
+	// Windows System directory, bypassing the normal DLL search
+	// path.
+	System bool
+
+	mu  sync.Mutex
+	dll *DLL // non nil once DLL is loaded
+}
+
+// Load loads DLL file d.Name into memory. It returns an error if fails.
+// Load will not try to load DLL, if it is already loaded into memory.
+func (d *LazyDLL) Load() error {
+	// Non-racy version of:
+	// if d.dll != nil {
+	if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll))) != nil {
+		return nil
+	}
+	d.mu.Lock()
+	defer d.mu.Unlock()
+	if d.dll != nil {
+		return nil
+	}
+
+	// kernel32.dll is special, since it's where LoadLibraryEx comes from.
+	// The kernel already special-cases its name, so it's always
+	// loaded from system32.
+	var dll *DLL
+	var err error
+	if d.Name == "kernel32.dll" {
+		dll, err = LoadDLL(d.Name)
+	} else {
+		dll, err = loadLibraryEx(d.Name, d.System)
+	}
+	if err != nil {
+		return err
+	}
+
+	// Non-racy version of:
+	// d.dll = dll
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll)), unsafe.Pointer(dll))
+	return nil
+}
+
+// mustLoad is like Load but panics if search fails.
+func (d *LazyDLL) mustLoad() {
+	e := d.Load()
+	if e != nil {
+		panic(e)
+	}
+}
+
+// Handle returns d's module handle.
+func (d *LazyDLL) Handle() uintptr {
+	d.mustLoad()
+	return uintptr(d.dll.Handle)
+}
+
+// NewProc returns a LazyProc for accessing the named procedure in the DLL d.
+func (d *LazyDLL) NewProc(name string) *LazyProc {
+	return &LazyProc{l: d, Name: name}
+}
+
+// NewLazyDLL creates new LazyDLL associated with DLL file.
+//
+// Warning: using NewLazyDLL without an absolute path name is subject to
+// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL].
+func NewLazyDLL(name string) *LazyDLL {
+	return &LazyDLL{Name: name}
+}
+
+// NewLazySystemDLL is like NewLazyDLL, but will only
+// search Windows System directory for the DLL if name is
+// a base name (like "advapi32.dll").
+func NewLazySystemDLL(name string) *LazyDLL {
+	return &LazyDLL{Name: name, System: true}
+}
+
+// A LazyProc implements access to a procedure inside a LazyDLL.
+// It delays the lookup until the Addr method is called.
+type LazyProc struct {
+	Name string
+
+	mu   sync.Mutex
+	l    *LazyDLL
+	proc *Proc
+}
+
+// Find searches DLL for procedure named p.Name. It returns
+// an error if search fails. Find will not search procedure,
+// if it is already found and loaded into memory.
+func (p *LazyProc) Find() error {
+	// Non-racy version of:
+	// if p.proc == nil {
+	if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&p.proc))) == nil {
+		p.mu.Lock()
+		defer p.mu.Unlock()
+		if p.proc == nil {
+			e := p.l.Load()
+			if e != nil {
+				return e
+			}
+			proc, e := p.l.dll.FindProc(p.Name)
+			if e != nil {
+				return e
+			}
+			// Non-racy version of:
+			// p.proc = proc
+			atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&p.proc)), unsafe.Pointer(proc))
+		}
+	}
+	return nil
+}
+
+// mustFind is like Find but panics if search fails.
+func (p *LazyProc) mustFind() {
+	e := p.Find()
+	if e != nil {
+		panic(e)
+	}
+}
+
+// Addr returns the address of the procedure represented by p.
+// The return value can be passed to Syscall to run the procedure.
+// It will panic if the procedure cannot be found.
+func (p *LazyProc) Addr() uintptr {
+	p.mustFind()
+	return p.proc.Addr()
+}
+
+//go:uintptrescapes
+
+// Call executes procedure p with arguments a. It will panic, if more than 15 arguments
+// are supplied. It will also panic if the procedure cannot be found.
+//
+// The returned error is always non-nil, constructed from the result of GetLastError.
+// Callers must inspect the primary return value to decide whether an error occurred
+// (according to the semantics of the specific function being called) before consulting
+// the error. The error will be guaranteed to contain windows.Errno.
+func (p *LazyProc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) {
+	p.mustFind()
+	return p.proc.Call(a...)
+}
+
+var canDoSearchSystem32Once struct {
+	sync.Once
+	v bool
+}
+
+func initCanDoSearchSystem32() {
+	// https://msdn.microsoft.com/en-us/library/ms684179(v=vs.85).aspx says:
+	// "Windows 7, Windows Server 2008 R2, Windows Vista, and Windows
+	// Server 2008: The LOAD_LIBRARY_SEARCH_* flags are available on
+	// systems that have KB2533623 installed. To determine whether the
+	// flags are available, use GetProcAddress to get the address of the
+	// AddDllDirectory, RemoveDllDirectory, or SetDefaultDllDirectories
+	// function. If GetProcAddress succeeds, the LOAD_LIBRARY_SEARCH_*
+	// flags can be used with LoadLibraryEx."
+	canDoSearchSystem32Once.v = (modkernel32.NewProc("AddDllDirectory").Find() == nil)
+}
+
+func canDoSearchSystem32() bool {
+	canDoSearchSystem32Once.Do(initCanDoSearchSystem32)
+	return canDoSearchSystem32Once.v
+}
+
+func isBaseName(name string) bool {
+	for _, c := range name {
+		if c == ':' || c == '/' || c == '\\' {
+			return false
+		}
+	}
+	return true
+}
+
+// loadLibraryEx wraps the Windows LoadLibraryEx function.
+//
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684179(v=vs.85).aspx
+//
+// If name is not an absolute path, LoadLibraryEx searches for the DLL
+// in a variety of automatic locations unless constrained by flags.
+// See: https://msdn.microsoft.com/en-us/library/ff919712%28VS.85%29.aspx
+func loadLibraryEx(name string, system bool) (*DLL, error) {
+	loadDLL := name
+	var flags uintptr
+	if system {
+		if canDoSearchSystem32() {
+			flags = LOAD_LIBRARY_SEARCH_SYSTEM32
+		} else if isBaseName(name) {
+			// WindowsXP or unpatched Windows machine
+			// trying to load "foo.dll" out of the system
+			// folder, but LoadLibraryEx doesn't support
+			// that yet on their system, so emulate it.
+			systemdir, err := GetSystemDirectory()
+			if err != nil {
+				return nil, err
+			}
+			loadDLL = systemdir + "\\" + name
+		}
+	}
+	h, err := LoadLibraryEx(loadDLL, 0, flags)
+	if err != nil {
+		return nil, err
+	}
+	return &DLL{Name: name, Handle: h}, nil
+}
diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go
new file mode 100644
index 0000000..d4577a4
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/env_windows.go
@@ -0,0 +1,57 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Windows environment variables.
+
+package windows
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+func Getenv(key string) (value string, found bool) {
+	return syscall.Getenv(key)
+}
+
+func Setenv(key, value string) error {
+	return syscall.Setenv(key, value)
+}
+
+func Clearenv() {
+	syscall.Clearenv()
+}
+
+func Environ() []string {
+	return syscall.Environ()
+}
+
+// Returns a default environment associated with the token, rather than the current
+// process. If inheritExisting is true, then this environment also inherits the
+// environment of the current process.
+func (token Token) Environ(inheritExisting bool) (env []string, err error) {
+	var block *uint16
+	err = CreateEnvironmentBlock(&block, token, inheritExisting)
+	if err != nil {
+		return nil, err
+	}
+	defer DestroyEnvironmentBlock(block)
+	size := unsafe.Sizeof(*block)
+	for *block != 0 {
+		// find NUL terminator
+		end := unsafe.Pointer(block)
+		for *(*uint16)(end) != 0 {
+			end = unsafe.Add(end, size)
+		}
+
+		entry := unsafe.Slice(block, (uintptr(end)-uintptr(unsafe.Pointer(block)))/size)
+		env = append(env, UTF16ToString(entry))
+		block = (*uint16)(unsafe.Add(end, size))
+	}
+	return env, nil
+}
+
+func Unsetenv(key string) error {
+	return syscall.Unsetenv(key)
+}
diff --git a/vendor/golang.org/x/sys/windows/eventlog.go b/vendor/golang.org/x/sys/windows/eventlog.go
new file mode 100644
index 0000000..6c36695
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/eventlog.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package windows
+
+const (
+	EVENTLOG_SUCCESS          = 0
+	EVENTLOG_ERROR_TYPE       = 1
+	EVENTLOG_WARNING_TYPE     = 2
+	EVENTLOG_INFORMATION_TYPE = 4
+	EVENTLOG_AUDIT_SUCCESS    = 8
+	EVENTLOG_AUDIT_FAILURE    = 16
+)
+
+//sys	RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) [failretval==0] = advapi32.RegisterEventSourceW
+//sys	DeregisterEventSource(handle Handle) (err error) = advapi32.DeregisterEventSource
+//sys	ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) = advapi32.ReportEventW
diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go
new file mode 100644
index 0000000..9cabbb6
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/exec_windows.go
@@ -0,0 +1,248 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Fork, exec, wait, etc.
+
+package windows
+
+import (
+	errorspkg "errors"
+	"unsafe"
+)
+
+// EscapeArg rewrites command line argument s as prescribed
+// in http://msdn.microsoft.com/en-us/library/ms880421.
+// This function returns "" (2 double quotes) if s is empty.
+// Alternatively, these transformations are done:
+//   - every back slash (\) is doubled, but only if immediately
+//     followed by double quote (");
+//   - every double quote (") is escaped by back slash (\);
+//   - finally, s is wrapped with double quotes (arg -> "arg"),
+//     but only if there is space or tab inside s.
+func EscapeArg(s string) string {
+	if len(s) == 0 {
+		return `""`
+	}
+	n := len(s)
+	hasSpace := false
+	for i := 0; i < len(s); i++ {
+		switch s[i] {
+		case '"', '\\':
+			n++
+		case ' ', '\t':
+			hasSpace = true
+		}
+	}
+	if hasSpace {
+		n += 2 // Reserve space for quotes.
+	}
+	if n == len(s) {
+		return s
+	}
+
+	qs := make([]byte, n)
+	j := 0
+	if hasSpace {
+		qs[j] = '"'
+		j++
+	}
+	slashes := 0
+	for i := 0; i < len(s); i++ {
+		switch s[i] {
+		default:
+			slashes = 0
+			qs[j] = s[i]
+		case '\\':
+			slashes++
+			qs[j] = s[i]
+		case '"':
+			for ; slashes > 0; slashes-- {
+				qs[j] = '\\'
+				j++
+			}
+			qs[j] = '\\'
+			j++
+			qs[j] = s[i]
+		}
+		j++
+	}
+	if hasSpace {
+		for ; slashes > 0; slashes-- {
+			qs[j] = '\\'
+			j++
+		}
+		qs[j] = '"'
+		j++
+	}
+	return string(qs[:j])
+}
+
+// ComposeCommandLine escapes and joins the given arguments suitable for use as a Windows command line,
+// in CreateProcess's CommandLine argument, CreateService/ChangeServiceConfig's BinaryPathName argument,
+// or any program that uses CommandLineToArgv.
+func ComposeCommandLine(args []string) string {
+	if len(args) == 0 {
+		return ""
+	}
+
+	// Per https://learn.microsoft.com/en-us/windows/win32/api/shellapi/nf-shellapi-commandlinetoargvw:
+	// “This function accepts command lines that contain a program name; the
+	// program name can be enclosed in quotation marks or not.”
+	//
+	// Unfortunately, it provides no means of escaping interior quotation marks
+	// within that program name, and we have no way to report them here.
+	prog := args[0]
+	mustQuote := len(prog) == 0
+	for i := 0; i < len(prog); i++ {
+		c := prog[i]
+		if c <= ' ' || (c == '"' && i == 0) {
+			// Force quotes for not only the ASCII space and tab as described in the
+			// MSDN article, but also ASCII control characters.
+			// The documentation for CommandLineToArgvW doesn't say what happens when
+			// the first argument is not a valid program name, but it empirically
+			// seems to drop unquoted control characters.
+			mustQuote = true
+			break
+		}
+	}
+	var commandLine []byte
+	if mustQuote {
+		commandLine = make([]byte, 0, len(prog)+2)
+		commandLine = append(commandLine, '"')
+		for i := 0; i < len(prog); i++ {
+			c := prog[i]
+			if c == '"' {
+				// This quote would interfere with our surrounding quotes.
+				// We have no way to report an error, so just strip out
+				// the offending character instead.
+				continue
+			}
+			commandLine = append(commandLine, c)
+		}
+		commandLine = append(commandLine, '"')
+	} else {
+		if len(args) == 1 {
+			// args[0] is a valid command line representing itself.
+			// No need to allocate a new slice or string for it.
+			return prog
+		}
+		commandLine = []byte(prog)
+	}
+
+	for _, arg := range args[1:] {
+		commandLine = append(commandLine, ' ')
+		// TODO(bcmills): since we're already appending to a slice, it would be nice
+		// to avoid the intermediate allocations of EscapeArg.
+		// Perhaps we can factor out an appendEscapedArg function.
+		commandLine = append(commandLine, EscapeArg(arg)...)
+	}
+	return string(commandLine)
+}
+
+// DecomposeCommandLine breaks apart its argument command line into unescaped parts using CommandLineToArgv,
+// as gathered from GetCommandLine, QUERY_SERVICE_CONFIG's BinaryPathName argument, or elsewhere that
+// command lines are passed around.
+// DecomposeCommandLine returns an error if commandLine contains NUL.
+func DecomposeCommandLine(commandLine string) ([]string, error) {
+	if len(commandLine) == 0 {
+		return []string{}, nil
+	}
+	utf16CommandLine, err := UTF16FromString(commandLine)
+	if err != nil {
+		return nil, errorspkg.New("string with NUL passed to DecomposeCommandLine")
+	}
+	var argc int32
+	argv, err := commandLineToArgv(&utf16CommandLine[0], &argc)
+	if err != nil {
+		return nil, err
+	}
+	defer LocalFree(Handle(unsafe.Pointer(argv)))
+
+	var args []string
+	for _, p := range unsafe.Slice(argv, argc) {
+		args = append(args, UTF16PtrToString(p))
+	}
+	return args, nil
+}
+
+// CommandLineToArgv parses a Unicode command line string and sets
+// argc to the number of parsed arguments.
+//
+// The returned memory should be freed using a single call to LocalFree.
+//
+// Note that although the return type of CommandLineToArgv indicates 8192
+// entries of up to 8192 characters each, the actual count of parsed arguments
+// may exceed 8192, and the documentation for CommandLineToArgvW does not mention
+// any bound on the lengths of the individual argument strings.
+// (See https://go.dev/issue/63236.)
+func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) {
+	argp, err := commandLineToArgv(cmd, argc)
+	argv = (*[8192]*[8192]uint16)(unsafe.Pointer(argp))
+	return argv, err
+}
+
+func CloseOnExec(fd Handle) {
+	SetHandleInformation(Handle(fd), HANDLE_FLAG_INHERIT, 0)
+}
+
+// FullPath retrieves the full path of the specified file.
+func FullPath(name string) (path string, err error) {
+	p, err := UTF16PtrFromString(name)
+	if err != nil {
+		return "", err
+	}
+	n := uint32(100)
+	for {
+		buf := make([]uint16, n)
+		n, err = GetFullPathName(p, uint32(len(buf)), &buf[0], nil)
+		if err != nil {
+			return "", err
+		}
+		if n <= uint32(len(buf)) {
+			return UTF16ToString(buf[:n]), nil
+		}
+	}
+}
+
+// NewProcThreadAttributeList allocates a new ProcThreadAttributeListContainer, with the requested maximum number of attributes.
+func NewProcThreadAttributeList(maxAttrCount uint32) (*ProcThreadAttributeListContainer, error) {
+	var size uintptr
+	err := initializeProcThreadAttributeList(nil, maxAttrCount, 0, &size)
+	if err != ERROR_INSUFFICIENT_BUFFER {
+		if err == nil {
+			return nil, errorspkg.New("unable to query buffer size from InitializeProcThreadAttributeList")
+		}
+		return nil, err
+	}
+	alloc, err := LocalAlloc(LMEM_FIXED, uint32(size))
+	if err != nil {
+		return nil, err
+	}
+	// size is guaranteed to be ≥1 by InitializeProcThreadAttributeList.
+	al := &ProcThreadAttributeListContainer{data: (*ProcThreadAttributeList)(unsafe.Pointer(alloc))}
+	err = initializeProcThreadAttributeList(al.data, maxAttrCount, 0, &size)
+	if err != nil {
+		return nil, err
+	}
+	return al, err
+}
+
+// Update modifies the ProcThreadAttributeList using UpdateProcThreadAttribute.
+func (al *ProcThreadAttributeListContainer) Update(attribute uintptr, value unsafe.Pointer, size uintptr) error {
+	al.pointers = append(al.pointers, value)
+	return updateProcThreadAttribute(al.data, 0, attribute, value, size, nil, nil)
+}
+
+// Delete frees ProcThreadAttributeList's resources.
+func (al *ProcThreadAttributeListContainer) Delete() {
+	deleteProcThreadAttributeList(al.data)
+	LocalFree(Handle(unsafe.Pointer(al.data)))
+	al.data = nil
+	al.pointers = nil
+}
+
+// List returns the actual ProcThreadAttributeList to be passed to StartupInfoEx.
+func (al *ProcThreadAttributeListContainer) List() *ProcThreadAttributeList {
+	return al.data
+}
diff --git a/vendor/golang.org/x/sys/windows/memory_windows.go b/vendor/golang.org/x/sys/windows/memory_windows.go
new file mode 100644
index 0000000..6dc0920
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/memory_windows.go
@@ -0,0 +1,48 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+const (
+	MEM_COMMIT      = 0x00001000
+	MEM_RESERVE     = 0x00002000
+	MEM_DECOMMIT    = 0x00004000
+	MEM_RELEASE     = 0x00008000
+	MEM_RESET       = 0x00080000
+	MEM_TOP_DOWN    = 0x00100000
+	MEM_WRITE_WATCH = 0x00200000
+	MEM_PHYSICAL    = 0x00400000
+	MEM_RESET_UNDO  = 0x01000000
+	MEM_LARGE_PAGES = 0x20000000
+
+	PAGE_NOACCESS          = 0x00000001
+	PAGE_READONLY          = 0x00000002
+	PAGE_READWRITE         = 0x00000004
+	PAGE_WRITECOPY         = 0x00000008
+	PAGE_EXECUTE           = 0x00000010
+	PAGE_EXECUTE_READ      = 0x00000020
+	PAGE_EXECUTE_READWRITE = 0x00000040
+	PAGE_EXECUTE_WRITECOPY = 0x00000080
+	PAGE_GUARD             = 0x00000100
+	PAGE_NOCACHE           = 0x00000200
+	PAGE_WRITECOMBINE      = 0x00000400
+	PAGE_TARGETS_INVALID   = 0x40000000
+	PAGE_TARGETS_NO_UPDATE = 0x40000000
+
+	QUOTA_LIMITS_HARDWS_MIN_DISABLE = 0x00000002
+	QUOTA_LIMITS_HARDWS_MIN_ENABLE  = 0x00000001
+	QUOTA_LIMITS_HARDWS_MAX_DISABLE = 0x00000008
+	QUOTA_LIMITS_HARDWS_MAX_ENABLE  = 0x00000004
+)
+
+type MemoryBasicInformation struct {
+	BaseAddress       uintptr
+	AllocationBase    uintptr
+	AllocationProtect uint32
+	PartitionId       uint16
+	RegionSize        uintptr
+	State             uint32
+	Protect           uint32
+	Type              uint32
+}
diff --git a/vendor/golang.org/x/sys/windows/mkerrors.bash b/vendor/golang.org/x/sys/windows/mkerrors.bash
new file mode 100644
index 0000000..58e0188
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/mkerrors.bash
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+# Copyright 2019 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+set -e
+shopt -s nullglob
+
+winerror="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/winerror.h | sort -Vr | head -n 1)"
+[[ -n $winerror ]] || { echo "Unable to find winerror.h" >&2; exit 1; }
+ntstatus="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/shared/ntstatus.h | sort -Vr | head -n 1)"
+[[ -n $ntstatus ]] || { echo "Unable to find ntstatus.h" >&2; exit 1; }
+
+declare -A errors
+
+{
+	echo "// Code generated by 'mkerrors.bash'; DO NOT EDIT."
+	echo
+	echo "package windows"
+	echo "import \"syscall\""
+	echo "const ("
+
+	while read -r line; do
+		unset vtype
+		if [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +([A-Z0-9_]+\()?([A-Z][A-Z0-9_]+k?)\)? ]]; then
+			key="${BASH_REMATCH[1]}"
+			value="${BASH_REMATCH[3]}"
+		elif [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +([A-Z0-9_]+\()?((0x)?[0-9A-Fa-f]+)L?\)? ]]; then
+			key="${BASH_REMATCH[1]}"
+			value="${BASH_REMATCH[3]}"
+			vtype="${BASH_REMATCH[2]}"
+		elif [[ $line =~ ^#define\ +([A-Z0-9_]+k?)\ +\(\(([A-Z]+)\)((0x)?[0-9A-Fa-f]+)L?\) ]]; then
+			key="${BASH_REMATCH[1]}"
+			value="${BASH_REMATCH[3]}"
+			vtype="${BASH_REMATCH[2]}"
+		else
+			continue
+		fi
+		[[ -n $key && -n $value ]] || continue
+		[[ -z ${errors["$key"]} ]] || continue
+		errors["$key"]="$value"
+		if [[ -v vtype ]]; then
+			if [[ $key == FACILITY_* || $key == NO_ERROR ]]; then
+				vtype=""
+			elif [[ $vtype == *HANDLE* || $vtype == *HRESULT* ]]; then
+				vtype="Handle"
+			else
+				vtype="syscall.Errno"
+			fi
+			last_vtype="$vtype"
+		else
+			vtype=""
+			if [[ $last_vtype == Handle && $value == NO_ERROR ]]; then
+				value="S_OK"
+			elif [[ $last_vtype == syscall.Errno && $value == NO_ERROR ]]; then
+				value="ERROR_SUCCESS"
+			fi
+		fi
+
+		echo "$key $vtype = $value"
+	done < "$winerror"
+
+	while read -r line; do
+		[[ $line =~ ^#define\ (STATUS_[^\s]+)\ +\(\(NTSTATUS\)((0x)?[0-9a-fA-F]+)L?\) ]] || continue
+		echo "${BASH_REMATCH[1]} NTStatus = ${BASH_REMATCH[2]}"
+	done < "$ntstatus"
+
+	echo ")"
+} | gofmt > "zerrors_windows.go"
diff --git a/vendor/golang.org/x/sys/windows/mkknownfolderids.bash b/vendor/golang.org/x/sys/windows/mkknownfolderids.bash
new file mode 100644
index 0000000..ab8924e
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/mkknownfolderids.bash
@@ -0,0 +1,27 @@
+#!/bin/bash
+
+# Copyright 2019 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+set -e
+shopt -s nullglob
+
+knownfolders="$(printf '%s\n' "/mnt/c/Program Files (x86)/Windows Kits/"/*/Include/*/um/KnownFolders.h | sort -Vr | head -n 1)"
+[[ -n $knownfolders ]] || { echo "Unable to find KnownFolders.h" >&2; exit 1; }
+
+{
+	echo "// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT."
+	echo
+	echo "package windows"
+	echo "type KNOWNFOLDERID GUID"
+	echo "var ("
+	while read -r line; do
+		[[ $line =~ DEFINE_KNOWN_FOLDER\((FOLDERID_[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+),[\t\ ]*(0x[^,]+)\) ]] || continue
+		printf "%s = &KNOWNFOLDERID{0x%08x, 0x%04x, 0x%04x, [8]byte{0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x}}\n" \
+			"${BASH_REMATCH[1]}" $(( "${BASH_REMATCH[2]}" )) $(( "${BASH_REMATCH[3]}" )) $(( "${BASH_REMATCH[4]}" )) \
+			$(( "${BASH_REMATCH[5]}" )) $(( "${BASH_REMATCH[6]}" )) $(( "${BASH_REMATCH[7]}" )) $(( "${BASH_REMATCH[8]}" )) \
+			$(( "${BASH_REMATCH[9]}" )) $(( "${BASH_REMATCH[10]}" )) $(( "${BASH_REMATCH[11]}" )) $(( "${BASH_REMATCH[12]}" ))
+	done < "$knownfolders"
+	echo ")"
+} | gofmt > "zknownfolderids_windows.go"
diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go
new file mode 100644
index 0000000..dbcdb09
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/mksyscall.go
@@ -0,0 +1,9 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build generate
+
+package windows
+
+//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go setupapi_windows.go
diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go
new file mode 100644
index 0000000..0f1bdc3
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/race.go
@@ -0,0 +1,30 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows && race
+
+package windows
+
+import (
+	"runtime"
+	"unsafe"
+)
+
+const raceenabled = true
+
+func raceAcquire(addr unsafe.Pointer) {
+	runtime.RaceAcquire(addr)
+}
+
+func raceReleaseMerge(addr unsafe.Pointer) {
+	runtime.RaceReleaseMerge(addr)
+}
+
+func raceReadRange(addr unsafe.Pointer, len int) {
+	runtime.RaceReadRange(addr, len)
+}
+
+func raceWriteRange(addr unsafe.Pointer, len int) {
+	runtime.RaceWriteRange(addr, len)
+}
diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go
new file mode 100644
index 0000000..0c78da7
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/race0.go
@@ -0,0 +1,25 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows && !race
+
+package windows
+
+import (
+	"unsafe"
+)
+
+const raceenabled = false
+
+func raceAcquire(addr unsafe.Pointer) {
+}
+
+func raceReleaseMerge(addr unsafe.Pointer) {
+}
+
+func raceReadRange(addr unsafe.Pointer, len int) {
+}
+
+func raceWriteRange(addr unsafe.Pointer, len int) {
+}
diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go
new file mode 100644
index 0000000..a8b0364
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/security_windows.go
@@ -0,0 +1,1497 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+const (
+	NameUnknown          = 0
+	NameFullyQualifiedDN = 1
+	NameSamCompatible    = 2
+	NameDisplay          = 3
+	NameUniqueId         = 6
+	NameCanonical        = 7
+	NameUserPrincipal    = 8
+	NameCanonicalEx      = 9
+	NameServicePrincipal = 10
+	NameDnsDomain        = 12
+)
+
+// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL.
+// http://blogs.msdn.com/b/drnick/archive/2007/12/19/windows-and-upn-format-credentials.aspx
+//sys	TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.TranslateNameW
+//sys	GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.GetUserNameExW
+
+// TranslateAccountName converts a directory service
+// object name from one format to another.
+func TranslateAccountName(username string, from, to uint32, initSize int) (string, error) {
+	u, e := UTF16PtrFromString(username)
+	if e != nil {
+		return "", e
+	}
+	n := uint32(50)
+	for {
+		b := make([]uint16, n)
+		e = TranslateName(u, from, to, &b[0], &n)
+		if e == nil {
+			return UTF16ToString(b[:n]), nil
+		}
+		if e != ERROR_INSUFFICIENT_BUFFER {
+			return "", e
+		}
+		if n <= uint32(len(b)) {
+			return "", e
+		}
+	}
+}
+
+const (
+	// do not reorder
+	NetSetupUnknownStatus = iota
+	NetSetupUnjoined
+	NetSetupWorkgroupName
+	NetSetupDomainName
+)
+
+type UserInfo10 struct {
+	Name       *uint16
+	Comment    *uint16
+	UsrComment *uint16
+	FullName   *uint16
+}
+
+//sys	NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo
+//sys	NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation
+//sys	NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree
+//sys   NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) = netapi32.NetUserEnum
+
+const (
+	// do not reorder
+	SidTypeUser = 1 + iota
+	SidTypeGroup
+	SidTypeDomain
+	SidTypeAlias
+	SidTypeWellKnownGroup
+	SidTypeDeletedAccount
+	SidTypeInvalid
+	SidTypeUnknown
+	SidTypeComputer
+	SidTypeLabel
+)
+
+type SidIdentifierAuthority struct {
+	Value [6]byte
+}
+
+var (
+	SECURITY_NULL_SID_AUTHORITY        = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 0}}
+	SECURITY_WORLD_SID_AUTHORITY       = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 1}}
+	SECURITY_LOCAL_SID_AUTHORITY       = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 2}}
+	SECURITY_CREATOR_SID_AUTHORITY     = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 3}}
+	SECURITY_NON_UNIQUE_AUTHORITY      = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 4}}
+	SECURITY_NT_AUTHORITY              = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 5}}
+	SECURITY_MANDATORY_LABEL_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 16}}
+)
+
+const (
+	SECURITY_NULL_RID                   = 0
+	SECURITY_WORLD_RID                  = 0
+	SECURITY_LOCAL_RID                  = 0
+	SECURITY_CREATOR_OWNER_RID          = 0
+	SECURITY_CREATOR_GROUP_RID          = 1
+	SECURITY_DIALUP_RID                 = 1
+	SECURITY_NETWORK_RID                = 2
+	SECURITY_BATCH_RID                  = 3
+	SECURITY_INTERACTIVE_RID            = 4
+	SECURITY_LOGON_IDS_RID              = 5
+	SECURITY_SERVICE_RID                = 6
+	SECURITY_LOCAL_SYSTEM_RID           = 18
+	SECURITY_BUILTIN_DOMAIN_RID         = 32
+	SECURITY_PRINCIPAL_SELF_RID         = 10
+	SECURITY_CREATOR_OWNER_SERVER_RID   = 0x2
+	SECURITY_CREATOR_GROUP_SERVER_RID   = 0x3
+	SECURITY_LOGON_IDS_RID_COUNT        = 0x3
+	SECURITY_ANONYMOUS_LOGON_RID        = 0x7
+	SECURITY_PROXY_RID                  = 0x8
+	SECURITY_ENTERPRISE_CONTROLLERS_RID = 0x9
+	SECURITY_SERVER_LOGON_RID           = SECURITY_ENTERPRISE_CONTROLLERS_RID
+	SECURITY_AUTHENTICATED_USER_RID     = 0xb
+	SECURITY_RESTRICTED_CODE_RID        = 0xc
+	SECURITY_NT_NON_UNIQUE_RID          = 0x15
+)
+
+// Predefined domain-relative RIDs for local groups.
+// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa379649(v=vs.85).aspx
+const (
+	DOMAIN_ALIAS_RID_ADMINS                         = 0x220
+	DOMAIN_ALIAS_RID_USERS                          = 0x221
+	DOMAIN_ALIAS_RID_GUESTS                         = 0x222
+	DOMAIN_ALIAS_RID_POWER_USERS                    = 0x223
+	DOMAIN_ALIAS_RID_ACCOUNT_OPS                    = 0x224
+	DOMAIN_ALIAS_RID_SYSTEM_OPS                     = 0x225
+	DOMAIN_ALIAS_RID_PRINT_OPS                      = 0x226
+	DOMAIN_ALIAS_RID_BACKUP_OPS                     = 0x227
+	DOMAIN_ALIAS_RID_REPLICATOR                     = 0x228
+	DOMAIN_ALIAS_RID_RAS_SERVERS                    = 0x229
+	DOMAIN_ALIAS_RID_PREW2KCOMPACCESS               = 0x22a
+	DOMAIN_ALIAS_RID_REMOTE_DESKTOP_USERS           = 0x22b
+	DOMAIN_ALIAS_RID_NETWORK_CONFIGURATION_OPS      = 0x22c
+	DOMAIN_ALIAS_RID_INCOMING_FOREST_TRUST_BUILDERS = 0x22d
+	DOMAIN_ALIAS_RID_MONITORING_USERS               = 0x22e
+	DOMAIN_ALIAS_RID_LOGGING_USERS                  = 0x22f
+	DOMAIN_ALIAS_RID_AUTHORIZATIONACCESS            = 0x230
+	DOMAIN_ALIAS_RID_TS_LICENSE_SERVERS             = 0x231
+	DOMAIN_ALIAS_RID_DCOM_USERS                     = 0x232
+	DOMAIN_ALIAS_RID_IUSERS                         = 0x238
+	DOMAIN_ALIAS_RID_CRYPTO_OPERATORS               = 0x239
+	DOMAIN_ALIAS_RID_CACHEABLE_PRINCIPALS_GROUP     = 0x23b
+	DOMAIN_ALIAS_RID_NON_CACHEABLE_PRINCIPALS_GROUP = 0x23c
+	DOMAIN_ALIAS_RID_EVENT_LOG_READERS_GROUP        = 0x23d
+	DOMAIN_ALIAS_RID_CERTSVC_DCOM_ACCESS_GROUP      = 0x23e
+)
+
+//sys	LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountSidW
+//sys	LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountNameW
+//sys	ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) = advapi32.ConvertSidToStringSidW
+//sys	ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) = advapi32.ConvertStringSidToSidW
+//sys	GetLengthSid(sid *SID) (len uint32) = advapi32.GetLengthSid
+//sys	CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) = advapi32.CopySid
+//sys	AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) = advapi32.AllocateAndInitializeSid
+//sys	createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) = advapi32.CreateWellKnownSid
+//sys	isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) = advapi32.IsWellKnownSid
+//sys	FreeSid(sid *SID) (err error) [failretval!=0] = advapi32.FreeSid
+//sys	EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) = advapi32.EqualSid
+//sys	getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) = advapi32.GetSidIdentifierAuthority
+//sys	getSidSubAuthorityCount(sid *SID) (count *uint8) = advapi32.GetSidSubAuthorityCount
+//sys	getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) = advapi32.GetSidSubAuthority
+//sys	isValidSid(sid *SID) (isValid bool) = advapi32.IsValidSid
+
+// The security identifier (SID) structure is a variable-length
+// structure used to uniquely identify users or groups.
+type SID struct{}
+
+// StringToSid converts a string-format security identifier
+// SID into a valid, functional SID.
+func StringToSid(s string) (*SID, error) {
+	var sid *SID
+	p, e := UTF16PtrFromString(s)
+	if e != nil {
+		return nil, e
+	}
+	e = ConvertStringSidToSid(p, &sid)
+	if e != nil {
+		return nil, e
+	}
+	defer LocalFree((Handle)(unsafe.Pointer(sid)))
+	return sid.Copy()
+}
+
+// LookupSID retrieves a security identifier SID for the account
+// and the name of the domain on which the account was found.
+// System specify target computer to search.
+func LookupSID(system, account string) (sid *SID, domain string, accType uint32, err error) {
+	if len(account) == 0 {
+		return nil, "", 0, syscall.EINVAL
+	}
+	acc, e := UTF16PtrFromString(account)
+	if e != nil {
+		return nil, "", 0, e
+	}
+	var sys *uint16
+	if len(system) > 0 {
+		sys, e = UTF16PtrFromString(system)
+		if e != nil {
+			return nil, "", 0, e
+		}
+	}
+	n := uint32(50)
+	dn := uint32(50)
+	for {
+		b := make([]byte, n)
+		db := make([]uint16, dn)
+		sid = (*SID)(unsafe.Pointer(&b[0]))
+		e = LookupAccountName(sys, acc, sid, &n, &db[0], &dn, &accType)
+		if e == nil {
+			return sid, UTF16ToString(db), accType, nil
+		}
+		if e != ERROR_INSUFFICIENT_BUFFER {
+			return nil, "", 0, e
+		}
+		if n <= uint32(len(b)) {
+			return nil, "", 0, e
+		}
+	}
+}
+
+// String converts SID to a string format suitable for display, storage, or transmission.
+func (sid *SID) String() string {
+	var s *uint16
+	e := ConvertSidToStringSid(sid, &s)
+	if e != nil {
+		return ""
+	}
+	defer LocalFree((Handle)(unsafe.Pointer(s)))
+	return UTF16ToString((*[256]uint16)(unsafe.Pointer(s))[:])
+}
+
+// Len returns the length, in bytes, of a valid security identifier SID.
+func (sid *SID) Len() int {
+	return int(GetLengthSid(sid))
+}
+
+// Copy creates a duplicate of security identifier SID.
+func (sid *SID) Copy() (*SID, error) {
+	b := make([]byte, sid.Len())
+	sid2 := (*SID)(unsafe.Pointer(&b[0]))
+	e := CopySid(uint32(len(b)), sid2, sid)
+	if e != nil {
+		return nil, e
+	}
+	return sid2, nil
+}
+
+// IdentifierAuthority returns the identifier authority of the SID.
+func (sid *SID) IdentifierAuthority() SidIdentifierAuthority {
+	return *getSidIdentifierAuthority(sid)
+}
+
+// SubAuthorityCount returns the number of sub-authorities in the SID.
+func (sid *SID) SubAuthorityCount() uint8 {
+	return *getSidSubAuthorityCount(sid)
+}
+
+// SubAuthority returns the sub-authority of the SID as specified by
+// the index, which must be less than sid.SubAuthorityCount().
+func (sid *SID) SubAuthority(idx uint32) uint32 {
+	if idx >= uint32(sid.SubAuthorityCount()) {
+		panic("sub-authority index out of range")
+	}
+	return *getSidSubAuthority(sid, idx)
+}
+
+// IsValid returns whether the SID has a valid revision and length.
+func (sid *SID) IsValid() bool {
+	return isValidSid(sid)
+}
+
+// Equals compares two SIDs for equality.
+func (sid *SID) Equals(sid2 *SID) bool {
+	return EqualSid(sid, sid2)
+}
+
+// IsWellKnown determines whether the SID matches the well-known sidType.
+func (sid *SID) IsWellKnown(sidType WELL_KNOWN_SID_TYPE) bool {
+	return isWellKnownSid(sid, sidType)
+}
+
+// LookupAccount retrieves the name of the account for this SID
+// and the name of the first domain on which this SID is found.
+// System specify target computer to search for.
+func (sid *SID) LookupAccount(system string) (account, domain string, accType uint32, err error) {
+	var sys *uint16
+	if len(system) > 0 {
+		sys, err = UTF16PtrFromString(system)
+		if err != nil {
+			return "", "", 0, err
+		}
+	}
+	n := uint32(50)
+	dn := uint32(50)
+	for {
+		b := make([]uint16, n)
+		db := make([]uint16, dn)
+		e := LookupAccountSid(sys, sid, &b[0], &n, &db[0], &dn, &accType)
+		if e == nil {
+			return UTF16ToString(b), UTF16ToString(db), accType, nil
+		}
+		if e != ERROR_INSUFFICIENT_BUFFER {
+			return "", "", 0, e
+		}
+		if n <= uint32(len(b)) {
+			return "", "", 0, e
+		}
+	}
+}
+
+// Various types of pre-specified SIDs that can be synthesized and compared at runtime.
+type WELL_KNOWN_SID_TYPE uint32
+
+const (
+	WinNullSid                                    = 0
+	WinWorldSid                                   = 1
+	WinLocalSid                                   = 2
+	WinCreatorOwnerSid                            = 3
+	WinCreatorGroupSid                            = 4
+	WinCreatorOwnerServerSid                      = 5
+	WinCreatorGroupServerSid                      = 6
+	WinNtAuthoritySid                             = 7
+	WinDialupSid                                  = 8
+	WinNetworkSid                                 = 9
+	WinBatchSid                                   = 10
+	WinInteractiveSid                             = 11
+	WinServiceSid                                 = 12
+	WinAnonymousSid                               = 13
+	WinProxySid                                   = 14
+	WinEnterpriseControllersSid                   = 15
+	WinSelfSid                                    = 16
+	WinAuthenticatedUserSid                       = 17
+	WinRestrictedCodeSid                          = 18
+	WinTerminalServerSid                          = 19
+	WinRemoteLogonIdSid                           = 20
+	WinLogonIdsSid                                = 21
+	WinLocalSystemSid                             = 22
+	WinLocalServiceSid                            = 23
+	WinNetworkServiceSid                          = 24
+	WinBuiltinDomainSid                           = 25
+	WinBuiltinAdministratorsSid                   = 26
+	WinBuiltinUsersSid                            = 27
+	WinBuiltinGuestsSid                           = 28
+	WinBuiltinPowerUsersSid                       = 29
+	WinBuiltinAccountOperatorsSid                 = 30
+	WinBuiltinSystemOperatorsSid                  = 31
+	WinBuiltinPrintOperatorsSid                   = 32
+	WinBuiltinBackupOperatorsSid                  = 33
+	WinBuiltinReplicatorSid                       = 34
+	WinBuiltinPreWindows2000CompatibleAccessSid   = 35
+	WinBuiltinRemoteDesktopUsersSid               = 36
+	WinBuiltinNetworkConfigurationOperatorsSid    = 37
+	WinAccountAdministratorSid                    = 38
+	WinAccountGuestSid                            = 39
+	WinAccountKrbtgtSid                           = 40
+	WinAccountDomainAdminsSid                     = 41
+	WinAccountDomainUsersSid                      = 42
+	WinAccountDomainGuestsSid                     = 43
+	WinAccountComputersSid                        = 44
+	WinAccountControllersSid                      = 45
+	WinAccountCertAdminsSid                       = 46
+	WinAccountSchemaAdminsSid                     = 47
+	WinAccountEnterpriseAdminsSid                 = 48
+	WinAccountPolicyAdminsSid                     = 49
+	WinAccountRasAndIasServersSid                 = 50
+	WinNTLMAuthenticationSid                      = 51
+	WinDigestAuthenticationSid                    = 52
+	WinSChannelAuthenticationSid                  = 53
+	WinThisOrganizationSid                        = 54
+	WinOtherOrganizationSid                       = 55
+	WinBuiltinIncomingForestTrustBuildersSid      = 56
+	WinBuiltinPerfMonitoringUsersSid              = 57
+	WinBuiltinPerfLoggingUsersSid                 = 58
+	WinBuiltinAuthorizationAccessSid              = 59
+	WinBuiltinTerminalServerLicenseServersSid     = 60
+	WinBuiltinDCOMUsersSid                        = 61
+	WinBuiltinIUsersSid                           = 62
+	WinIUserSid                                   = 63
+	WinBuiltinCryptoOperatorsSid                  = 64
+	WinUntrustedLabelSid                          = 65
+	WinLowLabelSid                                = 66
+	WinMediumLabelSid                             = 67
+	WinHighLabelSid                               = 68
+	WinSystemLabelSid                             = 69
+	WinWriteRestrictedCodeSid                     = 70
+	WinCreatorOwnerRightsSid                      = 71
+	WinCacheablePrincipalsGroupSid                = 72
+	WinNonCacheablePrincipalsGroupSid             = 73
+	WinEnterpriseReadonlyControllersSid           = 74
+	WinAccountReadonlyControllersSid              = 75
+	WinBuiltinEventLogReadersGroup                = 76
+	WinNewEnterpriseReadonlyControllersSid        = 77
+	WinBuiltinCertSvcDComAccessGroup              = 78
+	WinMediumPlusLabelSid                         = 79
+	WinLocalLogonSid                              = 80
+	WinConsoleLogonSid                            = 81
+	WinThisOrganizationCertificateSid             = 82
+	WinApplicationPackageAuthoritySid             = 83
+	WinBuiltinAnyPackageSid                       = 84
+	WinCapabilityInternetClientSid                = 85
+	WinCapabilityInternetClientServerSid          = 86
+	WinCapabilityPrivateNetworkClientServerSid    = 87
+	WinCapabilityPicturesLibrarySid               = 88
+	WinCapabilityVideosLibrarySid                 = 89
+	WinCapabilityMusicLibrarySid                  = 90
+	WinCapabilityDocumentsLibrarySid              = 91
+	WinCapabilitySharedUserCertificatesSid        = 92
+	WinCapabilityEnterpriseAuthenticationSid      = 93
+	WinCapabilityRemovableStorageSid              = 94
+	WinBuiltinRDSRemoteAccessServersSid           = 95
+	WinBuiltinRDSEndpointServersSid               = 96
+	WinBuiltinRDSManagementServersSid             = 97
+	WinUserModeDriversSid                         = 98
+	WinBuiltinHyperVAdminsSid                     = 99
+	WinAccountCloneableControllersSid             = 100
+	WinBuiltinAccessControlAssistanceOperatorsSid = 101
+	WinBuiltinRemoteManagementUsersSid            = 102
+	WinAuthenticationAuthorityAssertedSid         = 103
+	WinAuthenticationServiceAssertedSid           = 104
+	WinLocalAccountSid                            = 105
+	WinLocalAccountAndAdministratorSid            = 106
+	WinAccountProtectedUsersSid                   = 107
+	WinCapabilityAppointmentsSid                  = 108
+	WinCapabilityContactsSid                      = 109
+	WinAccountDefaultSystemManagedSid             = 110
+	WinBuiltinDefaultSystemManagedGroupSid        = 111
+	WinBuiltinStorageReplicaAdminsSid             = 112
+	WinAccountKeyAdminsSid                        = 113
+	WinAccountEnterpriseKeyAdminsSid              = 114
+	WinAuthenticationKeyTrustSid                  = 115
+	WinAuthenticationKeyPropertyMFASid            = 116
+	WinAuthenticationKeyPropertyAttestationSid    = 117
+	WinAuthenticationFreshKeyAuthSid              = 118
+	WinBuiltinDeviceOwnersSid                     = 119
+)
+
+// Creates a SID for a well-known predefined alias, generally using the constants of the form
+// Win*Sid, for the local machine.
+func CreateWellKnownSid(sidType WELL_KNOWN_SID_TYPE) (*SID, error) {
+	return CreateWellKnownDomainSid(sidType, nil)
+}
+
+// Creates a SID for a well-known predefined alias, generally using the constants of the form
+// Win*Sid, for the domain specified by the domainSid parameter.
+func CreateWellKnownDomainSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID) (*SID, error) {
+	n := uint32(50)
+	for {
+		b := make([]byte, n)
+		sid := (*SID)(unsafe.Pointer(&b[0]))
+		err := createWellKnownSid(sidType, domainSid, sid, &n)
+		if err == nil {
+			return sid, nil
+		}
+		if err != ERROR_INSUFFICIENT_BUFFER {
+			return nil, err
+		}
+		if n <= uint32(len(b)) {
+			return nil, err
+		}
+	}
+}
+
+const (
+	// do not reorder
+	TOKEN_ASSIGN_PRIMARY = 1 << iota
+	TOKEN_DUPLICATE
+	TOKEN_IMPERSONATE
+	TOKEN_QUERY
+	TOKEN_QUERY_SOURCE
+	TOKEN_ADJUST_PRIVILEGES
+	TOKEN_ADJUST_GROUPS
+	TOKEN_ADJUST_DEFAULT
+	TOKEN_ADJUST_SESSIONID
+
+	TOKEN_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED |
+		TOKEN_ASSIGN_PRIMARY |
+		TOKEN_DUPLICATE |
+		TOKEN_IMPERSONATE |
+		TOKEN_QUERY |
+		TOKEN_QUERY_SOURCE |
+		TOKEN_ADJUST_PRIVILEGES |
+		TOKEN_ADJUST_GROUPS |
+		TOKEN_ADJUST_DEFAULT |
+		TOKEN_ADJUST_SESSIONID
+	TOKEN_READ  = STANDARD_RIGHTS_READ | TOKEN_QUERY
+	TOKEN_WRITE = STANDARD_RIGHTS_WRITE |
+		TOKEN_ADJUST_PRIVILEGES |
+		TOKEN_ADJUST_GROUPS |
+		TOKEN_ADJUST_DEFAULT
+	TOKEN_EXECUTE = STANDARD_RIGHTS_EXECUTE
+)
+
+const (
+	// do not reorder
+	TokenUser = 1 + iota
+	TokenGroups
+	TokenPrivileges
+	TokenOwner
+	TokenPrimaryGroup
+	TokenDefaultDacl
+	TokenSource
+	TokenType
+	TokenImpersonationLevel
+	TokenStatistics
+	TokenRestrictedSids
+	TokenSessionId
+	TokenGroupsAndPrivileges
+	TokenSessionReference
+	TokenSandBoxInert
+	TokenAuditPolicy
+	TokenOrigin
+	TokenElevationType
+	TokenLinkedToken
+	TokenElevation
+	TokenHasRestrictions
+	TokenAccessInformation
+	TokenVirtualizationAllowed
+	TokenVirtualizationEnabled
+	TokenIntegrityLevel
+	TokenUIAccess
+	TokenMandatoryPolicy
+	TokenLogonSid
+	MaxTokenInfoClass
+)
+
+// Group attributes inside of Tokengroups.Groups[i].Attributes
+const (
+	SE_GROUP_MANDATORY          = 0x00000001
+	SE_GROUP_ENABLED_BY_DEFAULT = 0x00000002
+	SE_GROUP_ENABLED            = 0x00000004
+	SE_GROUP_OWNER              = 0x00000008
+	SE_GROUP_USE_FOR_DENY_ONLY  = 0x00000010
+	SE_GROUP_INTEGRITY          = 0x00000020
+	SE_GROUP_INTEGRITY_ENABLED  = 0x00000040
+	SE_GROUP_LOGON_ID           = 0xC0000000
+	SE_GROUP_RESOURCE           = 0x20000000
+	SE_GROUP_VALID_ATTRIBUTES   = SE_GROUP_MANDATORY | SE_GROUP_ENABLED_BY_DEFAULT | SE_GROUP_ENABLED | SE_GROUP_OWNER | SE_GROUP_USE_FOR_DENY_ONLY | SE_GROUP_LOGON_ID | SE_GROUP_RESOURCE | SE_GROUP_INTEGRITY | SE_GROUP_INTEGRITY_ENABLED
+)
+
+// Privilege attributes
+const (
+	SE_PRIVILEGE_ENABLED_BY_DEFAULT = 0x00000001
+	SE_PRIVILEGE_ENABLED            = 0x00000002
+	SE_PRIVILEGE_REMOVED            = 0x00000004
+	SE_PRIVILEGE_USED_FOR_ACCESS    = 0x80000000
+	SE_PRIVILEGE_VALID_ATTRIBUTES   = SE_PRIVILEGE_ENABLED_BY_DEFAULT | SE_PRIVILEGE_ENABLED | SE_PRIVILEGE_REMOVED | SE_PRIVILEGE_USED_FOR_ACCESS
+)
+
+// Token types
+const (
+	TokenPrimary       = 1
+	TokenImpersonation = 2
+)
+
+// Impersonation levels
+const (
+	SecurityAnonymous      = 0
+	SecurityIdentification = 1
+	SecurityImpersonation  = 2
+	SecurityDelegation     = 3
+)
+
+type LUID struct {
+	LowPart  uint32
+	HighPart int32
+}
+
+type LUIDAndAttributes struct {
+	Luid       LUID
+	Attributes uint32
+}
+
+type SIDAndAttributes struct {
+	Sid        *SID
+	Attributes uint32
+}
+
+type Tokenuser struct {
+	User SIDAndAttributes
+}
+
+type Tokenprimarygroup struct {
+	PrimaryGroup *SID
+}
+
+type Tokengroups struct {
+	GroupCount uint32
+	Groups     [1]SIDAndAttributes // Use AllGroups() for iterating.
+}
+
+// AllGroups returns a slice that can be used to iterate over the groups in g.
+func (g *Tokengroups) AllGroups() []SIDAndAttributes {
+	return (*[(1 << 28) - 1]SIDAndAttributes)(unsafe.Pointer(&g.Groups[0]))[:g.GroupCount:g.GroupCount]
+}
+
+type Tokenprivileges struct {
+	PrivilegeCount uint32
+	Privileges     [1]LUIDAndAttributes // Use AllPrivileges() for iterating.
+}
+
+// AllPrivileges returns a slice that can be used to iterate over the privileges in p.
+func (p *Tokenprivileges) AllPrivileges() []LUIDAndAttributes {
+	return (*[(1 << 27) - 1]LUIDAndAttributes)(unsafe.Pointer(&p.Privileges[0]))[:p.PrivilegeCount:p.PrivilegeCount]
+}
+
+type Tokenmandatorylabel struct {
+	Label SIDAndAttributes
+}
+
+func (tml *Tokenmandatorylabel) Size() uint32 {
+	return uint32(unsafe.Sizeof(Tokenmandatorylabel{})) + GetLengthSid(tml.Label.Sid)
+}
+
+// Authorization Functions
+//sys	checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership
+//sys	isTokenRestricted(tokenHandle Token) (ret bool, err error) [!failretval] = advapi32.IsTokenRestricted
+//sys	OpenProcessToken(process Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken
+//sys	OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) = advapi32.OpenThreadToken
+//sys	ImpersonateSelf(impersonationlevel uint32) (err error) = advapi32.ImpersonateSelf
+//sys	RevertToSelf() (err error) = advapi32.RevertToSelf
+//sys	SetThreadToken(thread *Handle, token Token) (err error) = advapi32.SetThreadToken
+//sys	LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) = advapi32.LookupPrivilegeValueW
+//sys	AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) = advapi32.AdjustTokenPrivileges
+//sys	AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) = advapi32.AdjustTokenGroups
+//sys	GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation
+//sys	SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) = advapi32.SetTokenInformation
+//sys	DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) = advapi32.DuplicateTokenEx
+//sys	GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW
+//sys	getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemDirectoryW
+//sys	getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetWindowsDirectoryW
+//sys	getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemWindowsDirectoryW
+
+// An access token contains the security information for a logon session.
+// The system creates an access token when a user logs on, and every
+// process executed on behalf of the user has a copy of the token.
+// The token identifies the user, the user's groups, and the user's
+// privileges. The system uses the token to control access to securable
+// objects and to control the ability of the user to perform various
+// system-related operations on the local computer.
+type Token Handle
+
+// OpenCurrentProcessToken opens an access token associated with current
+// process with TOKEN_QUERY access. It is a real token that needs to be closed.
+//
+// Deprecated: Explicitly call OpenProcessToken(CurrentProcess(), ...)
+// with the desired access instead, or use GetCurrentProcessToken for a
+// TOKEN_QUERY token.
+func OpenCurrentProcessToken() (Token, error) {
+	var token Token
+	err := OpenProcessToken(CurrentProcess(), TOKEN_QUERY, &token)
+	return token, err
+}
+
+// GetCurrentProcessToken returns the access token associated with
+// the current process. It is a pseudo token that does not need
+// to be closed.
+func GetCurrentProcessToken() Token {
+	return Token(^uintptr(4 - 1))
+}
+
+// GetCurrentThreadToken return the access token associated with
+// the current thread. It is a pseudo token that does not need
+// to be closed.
+func GetCurrentThreadToken() Token {
+	return Token(^uintptr(5 - 1))
+}
+
+// GetCurrentThreadEffectiveToken returns the effective access token
+// associated with the current thread. It is a pseudo token that does
+// not need to be closed.
+func GetCurrentThreadEffectiveToken() Token {
+	return Token(^uintptr(6 - 1))
+}
+
+// Close releases access to access token.
+func (t Token) Close() error {
+	return CloseHandle(Handle(t))
+}
+
+// getInfo retrieves a specified type of information about an access token.
+func (t Token) getInfo(class uint32, initSize int) (unsafe.Pointer, error) {
+	n := uint32(initSize)
+	for {
+		b := make([]byte, n)
+		e := GetTokenInformation(t, class, &b[0], uint32(len(b)), &n)
+		if e == nil {
+			return unsafe.Pointer(&b[0]), nil
+		}
+		if e != ERROR_INSUFFICIENT_BUFFER {
+			return nil, e
+		}
+		if n <= uint32(len(b)) {
+			return nil, e
+		}
+	}
+}
+
+// GetTokenUser retrieves access token t user account information.
+func (t Token) GetTokenUser() (*Tokenuser, error) {
+	i, e := t.getInfo(TokenUser, 50)
+	if e != nil {
+		return nil, e
+	}
+	return (*Tokenuser)(i), nil
+}
+
+// GetTokenGroups retrieves group accounts associated with access token t.
+func (t Token) GetTokenGroups() (*Tokengroups, error) {
+	i, e := t.getInfo(TokenGroups, 50)
+	if e != nil {
+		return nil, e
+	}
+	return (*Tokengroups)(i), nil
+}
+
+// GetTokenPrimaryGroup retrieves access token t primary group information.
+// A pointer to a SID structure representing a group that will become
+// the primary group of any objects created by a process using this access token.
+func (t Token) GetTokenPrimaryGroup() (*Tokenprimarygroup, error) {
+	i, e := t.getInfo(TokenPrimaryGroup, 50)
+	if e != nil {
+		return nil, e
+	}
+	return (*Tokenprimarygroup)(i), nil
+}
+
+// GetUserProfileDirectory retrieves path to the
+// root directory of the access token t user's profile.
+func (t Token) GetUserProfileDirectory() (string, error) {
+	n := uint32(100)
+	for {
+		b := make([]uint16, n)
+		e := GetUserProfileDirectory(t, &b[0], &n)
+		if e == nil {
+			return UTF16ToString(b), nil
+		}
+		if e != ERROR_INSUFFICIENT_BUFFER {
+			return "", e
+		}
+		if n <= uint32(len(b)) {
+			return "", e
+		}
+	}
+}
+
+// IsElevated returns whether the current token is elevated from a UAC perspective.
+func (token Token) IsElevated() bool {
+	var isElevated uint32
+	var outLen uint32
+	err := GetTokenInformation(token, TokenElevation, (*byte)(unsafe.Pointer(&isElevated)), uint32(unsafe.Sizeof(isElevated)), &outLen)
+	if err != nil {
+		return false
+	}
+	return outLen == uint32(unsafe.Sizeof(isElevated)) && isElevated != 0
+}
+
+// GetLinkedToken returns the linked token, which may be an elevated UAC token.
+func (token Token) GetLinkedToken() (Token, error) {
+	var linkedToken Token
+	var outLen uint32
+	err := GetTokenInformation(token, TokenLinkedToken, (*byte)(unsafe.Pointer(&linkedToken)), uint32(unsafe.Sizeof(linkedToken)), &outLen)
+	if err != nil {
+		return Token(0), err
+	}
+	return linkedToken, nil
+}
+
+// GetSystemDirectory retrieves the path to current location of the system
+// directory, which is typically, though not always, `C:\Windows\System32`.
+func GetSystemDirectory() (string, error) {
+	n := uint32(MAX_PATH)
+	for {
+		b := make([]uint16, n)
+		l, e := getSystemDirectory(&b[0], n)
+		if e != nil {
+			return "", e
+		}
+		if l <= n {
+			return UTF16ToString(b[:l]), nil
+		}
+		n = l
+	}
+}
+
+// GetWindowsDirectory retrieves the path to current location of the Windows
+// directory, which is typically, though not always, `C:\Windows`. This may
+// be a private user directory in the case that the application is running
+// under a terminal server.
+func GetWindowsDirectory() (string, error) {
+	n := uint32(MAX_PATH)
+	for {
+		b := make([]uint16, n)
+		l, e := getWindowsDirectory(&b[0], n)
+		if e != nil {
+			return "", e
+		}
+		if l <= n {
+			return UTF16ToString(b[:l]), nil
+		}
+		n = l
+	}
+}
+
+// GetSystemWindowsDirectory retrieves the path to current location of the
+// Windows directory, which is typically, though not always, `C:\Windows`.
+func GetSystemWindowsDirectory() (string, error) {
+	n := uint32(MAX_PATH)
+	for {
+		b := make([]uint16, n)
+		l, e := getSystemWindowsDirectory(&b[0], n)
+		if e != nil {
+			return "", e
+		}
+		if l <= n {
+			return UTF16ToString(b[:l]), nil
+		}
+		n = l
+	}
+}
+
+// IsMember reports whether the access token t is a member of the provided SID.
+func (t Token) IsMember(sid *SID) (bool, error) {
+	var b int32
+	if e := checkTokenMembership(t, sid, &b); e != nil {
+		return false, e
+	}
+	return b != 0, nil
+}
+
+// IsRestricted reports whether the access token t is a restricted token.
+func (t Token) IsRestricted() (isRestricted bool, err error) {
+	isRestricted, err = isTokenRestricted(t)
+	if !isRestricted && err == syscall.EINVAL {
+		// If err is EINVAL, this returned ERROR_SUCCESS indicating a non-restricted token.
+		err = nil
+	}
+	return
+}
+
+const (
+	WTS_CONSOLE_CONNECT        = 0x1
+	WTS_CONSOLE_DISCONNECT     = 0x2
+	WTS_REMOTE_CONNECT         = 0x3
+	WTS_REMOTE_DISCONNECT      = 0x4
+	WTS_SESSION_LOGON          = 0x5
+	WTS_SESSION_LOGOFF         = 0x6
+	WTS_SESSION_LOCK           = 0x7
+	WTS_SESSION_UNLOCK         = 0x8
+	WTS_SESSION_REMOTE_CONTROL = 0x9
+	WTS_SESSION_CREATE         = 0xa
+	WTS_SESSION_TERMINATE      = 0xb
+)
+
+const (
+	WTSActive       = 0
+	WTSConnected    = 1
+	WTSConnectQuery = 2
+	WTSShadow       = 3
+	WTSDisconnected = 4
+	WTSIdle         = 5
+	WTSListen       = 6
+	WTSReset        = 7
+	WTSDown         = 8
+	WTSInit         = 9
+)
+
+type WTSSESSION_NOTIFICATION struct {
+	Size      uint32
+	SessionID uint32
+}
+
+type WTS_SESSION_INFO struct {
+	SessionID         uint32
+	WindowStationName *uint16
+	State             uint32
+}
+
+//sys WTSQueryUserToken(session uint32, token *Token) (err error) = wtsapi32.WTSQueryUserToken
+//sys WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) = wtsapi32.WTSEnumerateSessionsW
+//sys WTSFreeMemory(ptr uintptr) = wtsapi32.WTSFreeMemory
+//sys WTSGetActiveConsoleSessionId() (sessionID uint32)
+
+type ACL struct {
+	aclRevision byte
+	sbz1        byte
+	aclSize     uint16
+	AceCount    uint16
+	sbz2        uint16
+}
+
+type SECURITY_DESCRIPTOR struct {
+	revision byte
+	sbz1     byte
+	control  SECURITY_DESCRIPTOR_CONTROL
+	owner    *SID
+	group    *SID
+	sacl     *ACL
+	dacl     *ACL
+}
+
+type SECURITY_QUALITY_OF_SERVICE struct {
+	Length              uint32
+	ImpersonationLevel  uint32
+	ContextTrackingMode byte
+	EffectiveOnly       byte
+}
+
+// Constants for the ContextTrackingMode field of SECURITY_QUALITY_OF_SERVICE.
+const (
+	SECURITY_STATIC_TRACKING  = 0
+	SECURITY_DYNAMIC_TRACKING = 1
+)
+
+type SecurityAttributes struct {
+	Length             uint32
+	SecurityDescriptor *SECURITY_DESCRIPTOR
+	InheritHandle      uint32
+}
+
+type SE_OBJECT_TYPE uint32
+
+// Constants for type SE_OBJECT_TYPE
+const (
+	SE_UNKNOWN_OBJECT_TYPE     = 0
+	SE_FILE_OBJECT             = 1
+	SE_SERVICE                 = 2
+	SE_PRINTER                 = 3
+	SE_REGISTRY_KEY            = 4
+	SE_LMSHARE                 = 5
+	SE_KERNEL_OBJECT           = 6
+	SE_WINDOW_OBJECT           = 7
+	SE_DS_OBJECT               = 8
+	SE_DS_OBJECT_ALL           = 9
+	SE_PROVIDER_DEFINED_OBJECT = 10
+	SE_WMIGUID_OBJECT          = 11
+	SE_REGISTRY_WOW64_32KEY    = 12
+	SE_REGISTRY_WOW64_64KEY    = 13
+)
+
+type SECURITY_INFORMATION uint32
+
+// Constants for type SECURITY_INFORMATION
+const (
+	OWNER_SECURITY_INFORMATION            = 0x00000001
+	GROUP_SECURITY_INFORMATION            = 0x00000002
+	DACL_SECURITY_INFORMATION             = 0x00000004
+	SACL_SECURITY_INFORMATION             = 0x00000008
+	LABEL_SECURITY_INFORMATION            = 0x00000010
+	ATTRIBUTE_SECURITY_INFORMATION        = 0x00000020
+	SCOPE_SECURITY_INFORMATION            = 0x00000040
+	BACKUP_SECURITY_INFORMATION           = 0x00010000
+	PROTECTED_DACL_SECURITY_INFORMATION   = 0x80000000
+	PROTECTED_SACL_SECURITY_INFORMATION   = 0x40000000
+	UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000
+	UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000
+)
+
+type SECURITY_DESCRIPTOR_CONTROL uint16
+
+// Constants for type SECURITY_DESCRIPTOR_CONTROL
+const (
+	SE_OWNER_DEFAULTED       = 0x0001
+	SE_GROUP_DEFAULTED       = 0x0002
+	SE_DACL_PRESENT          = 0x0004
+	SE_DACL_DEFAULTED        = 0x0008
+	SE_SACL_PRESENT          = 0x0010
+	SE_SACL_DEFAULTED        = 0x0020
+	SE_DACL_AUTO_INHERIT_REQ = 0x0100
+	SE_SACL_AUTO_INHERIT_REQ = 0x0200
+	SE_DACL_AUTO_INHERITED   = 0x0400
+	SE_SACL_AUTO_INHERITED   = 0x0800
+	SE_DACL_PROTECTED        = 0x1000
+	SE_SACL_PROTECTED        = 0x2000
+	SE_RM_CONTROL_VALID      = 0x4000
+	SE_SELF_RELATIVE         = 0x8000
+)
+
+type ACCESS_MASK uint32
+
+// Constants for type ACCESS_MASK
+const (
+	DELETE                   = 0x00010000
+	READ_CONTROL             = 0x00020000
+	WRITE_DAC                = 0x00040000
+	WRITE_OWNER              = 0x00080000
+	SYNCHRONIZE              = 0x00100000
+	STANDARD_RIGHTS_REQUIRED = 0x000F0000
+	STANDARD_RIGHTS_READ     = READ_CONTROL
+	STANDARD_RIGHTS_WRITE    = READ_CONTROL
+	STANDARD_RIGHTS_EXECUTE  = READ_CONTROL
+	STANDARD_RIGHTS_ALL      = 0x001F0000
+	SPECIFIC_RIGHTS_ALL      = 0x0000FFFF
+	ACCESS_SYSTEM_SECURITY   = 0x01000000
+	MAXIMUM_ALLOWED          = 0x02000000
+	GENERIC_READ             = 0x80000000
+	GENERIC_WRITE            = 0x40000000
+	GENERIC_EXECUTE          = 0x20000000
+	GENERIC_ALL              = 0x10000000
+)
+
+type ACCESS_MODE uint32
+
+// Constants for type ACCESS_MODE
+const (
+	NOT_USED_ACCESS   = 0
+	GRANT_ACCESS      = 1
+	SET_ACCESS        = 2
+	DENY_ACCESS       = 3
+	REVOKE_ACCESS     = 4
+	SET_AUDIT_SUCCESS = 5
+	SET_AUDIT_FAILURE = 6
+)
+
+// Constants for AceFlags and Inheritance fields
+const (
+	NO_INHERITANCE                     = 0x0
+	SUB_OBJECTS_ONLY_INHERIT           = 0x1
+	SUB_CONTAINERS_ONLY_INHERIT        = 0x2
+	SUB_CONTAINERS_AND_OBJECTS_INHERIT = 0x3
+	INHERIT_NO_PROPAGATE               = 0x4
+	INHERIT_ONLY                       = 0x8
+	INHERITED_ACCESS_ENTRY             = 0x10
+	INHERITED_PARENT                   = 0x10000000
+	INHERITED_GRANDPARENT              = 0x20000000
+	OBJECT_INHERIT_ACE                 = 0x1
+	CONTAINER_INHERIT_ACE              = 0x2
+	NO_PROPAGATE_INHERIT_ACE           = 0x4
+	INHERIT_ONLY_ACE                   = 0x8
+	INHERITED_ACE                      = 0x10
+	VALID_INHERIT_FLAGS                = 0x1F
+)
+
+type MULTIPLE_TRUSTEE_OPERATION uint32
+
+// Constants for MULTIPLE_TRUSTEE_OPERATION
+const (
+	NO_MULTIPLE_TRUSTEE    = 0
+	TRUSTEE_IS_IMPERSONATE = 1
+)
+
+type TRUSTEE_FORM uint32
+
+// Constants for TRUSTEE_FORM
+const (
+	TRUSTEE_IS_SID              = 0
+	TRUSTEE_IS_NAME             = 1
+	TRUSTEE_BAD_FORM            = 2
+	TRUSTEE_IS_OBJECTS_AND_SID  = 3
+	TRUSTEE_IS_OBJECTS_AND_NAME = 4
+)
+
+type TRUSTEE_TYPE uint32
+
+// Constants for TRUSTEE_TYPE
+const (
+	TRUSTEE_IS_UNKNOWN          = 0
+	TRUSTEE_IS_USER             = 1
+	TRUSTEE_IS_GROUP            = 2
+	TRUSTEE_IS_DOMAIN           = 3
+	TRUSTEE_IS_ALIAS            = 4
+	TRUSTEE_IS_WELL_KNOWN_GROUP = 5
+	TRUSTEE_IS_DELETED          = 6
+	TRUSTEE_IS_INVALID          = 7
+	TRUSTEE_IS_COMPUTER         = 8
+)
+
+// Constants for ObjectsPresent field
+const (
+	ACE_OBJECT_TYPE_PRESENT           = 0x1
+	ACE_INHERITED_OBJECT_TYPE_PRESENT = 0x2
+)
+
+type EXPLICIT_ACCESS struct {
+	AccessPermissions ACCESS_MASK
+	AccessMode        ACCESS_MODE
+	Inheritance       uint32
+	Trustee           TRUSTEE
+}
+
+// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header
+type ACE_HEADER struct {
+	AceType  uint8
+	AceFlags uint8
+	AceSize  uint16
+}
+
+// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-access_allowed_ace
+type ACCESS_ALLOWED_ACE struct {
+	Header   ACE_HEADER
+	Mask     ACCESS_MASK
+	SidStart uint32
+}
+
+const (
+	// Constants for AceType
+	// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header
+	ACCESS_ALLOWED_ACE_TYPE = 0
+	ACCESS_DENIED_ACE_TYPE  = 1
+)
+
+// This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions.
+type TrusteeValue uintptr
+
+func TrusteeValueFromString(str string) TrusteeValue {
+	return TrusteeValue(unsafe.Pointer(StringToUTF16Ptr(str)))
+}
+func TrusteeValueFromSID(sid *SID) TrusteeValue {
+	return TrusteeValue(unsafe.Pointer(sid))
+}
+func TrusteeValueFromObjectsAndSid(objectsAndSid *OBJECTS_AND_SID) TrusteeValue {
+	return TrusteeValue(unsafe.Pointer(objectsAndSid))
+}
+func TrusteeValueFromObjectsAndName(objectsAndName *OBJECTS_AND_NAME) TrusteeValue {
+	return TrusteeValue(unsafe.Pointer(objectsAndName))
+}
+
+type TRUSTEE struct {
+	MultipleTrustee          *TRUSTEE
+	MultipleTrusteeOperation MULTIPLE_TRUSTEE_OPERATION
+	TrusteeForm              TRUSTEE_FORM
+	TrusteeType              TRUSTEE_TYPE
+	TrusteeValue             TrusteeValue
+}
+
+type OBJECTS_AND_SID struct {
+	ObjectsPresent          uint32
+	ObjectTypeGuid          GUID
+	InheritedObjectTypeGuid GUID
+	Sid                     *SID
+}
+
+type OBJECTS_AND_NAME struct {
+	ObjectsPresent          uint32
+	ObjectType              SE_OBJECT_TYPE
+	ObjectTypeName          *uint16
+	InheritedObjectTypeName *uint16
+	Name                    *uint16
+}
+
+//sys	getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetSecurityInfo
+//sys	SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) = advapi32.SetSecurityInfo
+//sys	getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetNamedSecurityInfoW
+//sys	SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) = advapi32.SetNamedSecurityInfoW
+//sys	SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) = advapi32.SetKernelObjectSecurity
+
+//sys	buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) = advapi32.BuildSecurityDescriptorW
+//sys	initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) = advapi32.InitializeSecurityDescriptor
+
+//sys	getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) = advapi32.GetSecurityDescriptorControl
+//sys	getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorDacl
+//sys	getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorSacl
+//sys	getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorOwner
+//sys	getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorGroup
+//sys	getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) = advapi32.GetSecurityDescriptorLength
+//sys	getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) [failretval!=0] = advapi32.GetSecurityDescriptorRMControl
+//sys	isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) = advapi32.IsValidSecurityDescriptor
+
+//sys	setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) = advapi32.SetSecurityDescriptorControl
+//sys	setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) = advapi32.SetSecurityDescriptorDacl
+//sys	setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) = advapi32.SetSecurityDescriptorSacl
+//sys	setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) = advapi32.SetSecurityDescriptorOwner
+//sys	setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) = advapi32.SetSecurityDescriptorGroup
+//sys	setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) = advapi32.SetSecurityDescriptorRMControl
+
+//sys	convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW
+//sys	convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW
+
+//sys	makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) = advapi32.MakeAbsoluteSD
+//sys	makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD
+
+//sys	setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW
+//sys	GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) = advapi32.GetAce
+
+// Control returns the security descriptor control bits.
+func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) {
+	err = getSecurityDescriptorControl(sd, &control, &revision)
+	return
+}
+
+// SetControl sets the security descriptor control bits.
+func (sd *SECURITY_DESCRIPTOR) SetControl(controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) error {
+	return setSecurityDescriptorControl(sd, controlBitsOfInterest, controlBitsToSet)
+}
+
+// RMControl returns the security descriptor resource manager control bits.
+func (sd *SECURITY_DESCRIPTOR) RMControl() (control uint8, err error) {
+	err = getSecurityDescriptorRMControl(sd, &control)
+	return
+}
+
+// SetRMControl sets the security descriptor resource manager control bits.
+func (sd *SECURITY_DESCRIPTOR) SetRMControl(rmControl uint8) {
+	setSecurityDescriptorRMControl(sd, &rmControl)
+}
+
+// DACL returns the security descriptor DACL and whether it was defaulted. The dacl return value may be nil
+// if a DACL exists but is an "empty DACL", meaning fully permissive. If the DACL does not exist, err returns
+// ERROR_OBJECT_NOT_FOUND.
+func (sd *SECURITY_DESCRIPTOR) DACL() (dacl *ACL, defaulted bool, err error) {
+	var present bool
+	err = getSecurityDescriptorDacl(sd, &present, &dacl, &defaulted)
+	if !present {
+		err = ERROR_OBJECT_NOT_FOUND
+	}
+	return
+}
+
+// SetDACL sets the absolute security descriptor DACL.
+func (absoluteSD *SECURITY_DESCRIPTOR) SetDACL(dacl *ACL, present, defaulted bool) error {
+	return setSecurityDescriptorDacl(absoluteSD, present, dacl, defaulted)
+}
+
+// SACL returns the security descriptor SACL and whether it was defaulted. The sacl return value may be nil
+// if a SACL exists but is an "empty SACL", meaning fully permissive. If the SACL does not exist, err returns
+// ERROR_OBJECT_NOT_FOUND.
+func (sd *SECURITY_DESCRIPTOR) SACL() (sacl *ACL, defaulted bool, err error) {
+	var present bool
+	err = getSecurityDescriptorSacl(sd, &present, &sacl, &defaulted)
+	if !present {
+		err = ERROR_OBJECT_NOT_FOUND
+	}
+	return
+}
+
+// SetSACL sets the absolute security descriptor SACL.
+func (absoluteSD *SECURITY_DESCRIPTOR) SetSACL(sacl *ACL, present, defaulted bool) error {
+	return setSecurityDescriptorSacl(absoluteSD, present, sacl, defaulted)
+}
+
+// Owner returns the security descriptor owner and whether it was defaulted.
+func (sd *SECURITY_DESCRIPTOR) Owner() (owner *SID, defaulted bool, err error) {
+	err = getSecurityDescriptorOwner(sd, &owner, &defaulted)
+	return
+}
+
+// SetOwner sets the absolute security descriptor owner.
+func (absoluteSD *SECURITY_DESCRIPTOR) SetOwner(owner *SID, defaulted bool) error {
+	return setSecurityDescriptorOwner(absoluteSD, owner, defaulted)
+}
+
+// Group returns the security descriptor group and whether it was defaulted.
+func (sd *SECURITY_DESCRIPTOR) Group() (group *SID, defaulted bool, err error) {
+	err = getSecurityDescriptorGroup(sd, &group, &defaulted)
+	return
+}
+
+// SetGroup sets the absolute security descriptor owner.
+func (absoluteSD *SECURITY_DESCRIPTOR) SetGroup(group *SID, defaulted bool) error {
+	return setSecurityDescriptorGroup(absoluteSD, group, defaulted)
+}
+
+// Length returns the length of the security descriptor.
+func (sd *SECURITY_DESCRIPTOR) Length() uint32 {
+	return getSecurityDescriptorLength(sd)
+}
+
+// IsValid returns whether the security descriptor is valid.
+func (sd *SECURITY_DESCRIPTOR) IsValid() bool {
+	return isValidSecurityDescriptor(sd)
+}
+
+// String returns the SDDL form of the security descriptor, with a function signature that can be
+// used with %v formatting directives.
+func (sd *SECURITY_DESCRIPTOR) String() string {
+	var sddl *uint16
+	err := convertSecurityDescriptorToStringSecurityDescriptor(sd, 1, 0xff, &sddl, nil)
+	if err != nil {
+		return ""
+	}
+	defer LocalFree(Handle(unsafe.Pointer(sddl)))
+	return UTF16PtrToString(sddl)
+}
+
+// ToAbsolute converts a self-relative security descriptor into an absolute one.
+func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DESCRIPTOR, err error) {
+	control, _, err := selfRelativeSD.Control()
+	if err != nil {
+		return
+	}
+	if control&SE_SELF_RELATIVE == 0 {
+		err = ERROR_INVALID_PARAMETER
+		return
+	}
+	var absoluteSDSize, daclSize, saclSize, ownerSize, groupSize uint32
+	err = makeAbsoluteSD(selfRelativeSD, nil, &absoluteSDSize,
+		nil, &daclSize, nil, &saclSize, nil, &ownerSize, nil, &groupSize)
+	switch err {
+	case ERROR_INSUFFICIENT_BUFFER:
+	case nil:
+		// makeAbsoluteSD is expected to fail, but it succeeds.
+		return nil, ERROR_INTERNAL_ERROR
+	default:
+		return nil, err
+	}
+	if absoluteSDSize > 0 {
+		absoluteSD = new(SECURITY_DESCRIPTOR)
+		if unsafe.Sizeof(*absoluteSD) < uintptr(absoluteSDSize) {
+			panic("sizeof(SECURITY_DESCRIPTOR) too small")
+		}
+	}
+	var (
+		dacl  *ACL
+		sacl  *ACL
+		owner *SID
+		group *SID
+	)
+	if daclSize > 0 {
+		dacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, daclSize))))
+	}
+	if saclSize > 0 {
+		sacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, saclSize))))
+	}
+	if ownerSize > 0 {
+		owner = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, ownerSize))))
+	}
+	if groupSize > 0 {
+		group = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, groupSize))))
+	}
+	// We call into Windows via makeAbsoluteSD, which sets up
+	// pointers within absoluteSD that point to other chunks of memory
+	// we pass into makeAbsoluteSD, and that happens outside the view of the GC.
+	// We therefore take some care here to then verify the pointers are as we expect
+	// and set them explicitly in view of the GC. See https://go.dev/issue/73199.
+	// TODO: consider weak pointers once Go 1.24 is appropriate. See suggestion in https://go.dev/cl/663575.
+	err = makeAbsoluteSD(selfRelativeSD, absoluteSD, &absoluteSDSize,
+		dacl, &daclSize, sacl, &saclSize, owner, &ownerSize, group, &groupSize)
+	if err != nil {
+		// Don't return absoluteSD, which might be partially initialized.
+		return nil, err
+	}
+	// Before using any fields, verify absoluteSD is in the format we expect according to Windows.
+	// See https://learn.microsoft.com/en-us/windows/win32/secauthz/absolute-and-self-relative-security-descriptors
+	absControl, _, err := absoluteSD.Control()
+	if err != nil {
+		panic("absoluteSD: " + err.Error())
+	}
+	if absControl&SE_SELF_RELATIVE != 0 {
+		panic("absoluteSD not in absolute format")
+	}
+	if absoluteSD.dacl != dacl {
+		panic("dacl pointer mismatch")
+	}
+	if absoluteSD.sacl != sacl {
+		panic("sacl pointer mismatch")
+	}
+	if absoluteSD.owner != owner {
+		panic("owner pointer mismatch")
+	}
+	if absoluteSD.group != group {
+		panic("group pointer mismatch")
+	}
+	absoluteSD.dacl = dacl
+	absoluteSD.sacl = sacl
+	absoluteSD.owner = owner
+	absoluteSD.group = group
+
+	return
+}
+
+// ToSelfRelative converts an absolute security descriptor into a self-relative one.
+func (absoluteSD *SECURITY_DESCRIPTOR) ToSelfRelative() (selfRelativeSD *SECURITY_DESCRIPTOR, err error) {
+	control, _, err := absoluteSD.Control()
+	if err != nil {
+		return
+	}
+	if control&SE_SELF_RELATIVE != 0 {
+		err = ERROR_INVALID_PARAMETER
+		return
+	}
+	var selfRelativeSDSize uint32
+	err = makeSelfRelativeSD(absoluteSD, nil, &selfRelativeSDSize)
+	switch err {
+	case ERROR_INSUFFICIENT_BUFFER:
+	case nil:
+		// makeSelfRelativeSD is expected to fail, but it succeeds.
+		return nil, ERROR_INTERNAL_ERROR
+	default:
+		return nil, err
+	}
+	if selfRelativeSDSize > 0 {
+		selfRelativeSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, selfRelativeSDSize)[0]))
+	}
+	err = makeSelfRelativeSD(absoluteSD, selfRelativeSD, &selfRelativeSDSize)
+	return
+}
+
+func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() *SECURITY_DESCRIPTOR {
+	sdLen := int(selfRelativeSD.Length())
+	const min = int(unsafe.Sizeof(SECURITY_DESCRIPTOR{}))
+	if sdLen < min {
+		sdLen = min
+	}
+
+	src := unsafe.Slice((*byte)(unsafe.Pointer(selfRelativeSD)), sdLen)
+	// SECURITY_DESCRIPTOR has pointers in it, which means checkptr expects for it to
+	// be aligned properly. When we're copying a Windows-allocated struct to a
+	// Go-allocated one, make sure that the Go allocation is aligned to the
+	// pointer size.
+	const psize = int(unsafe.Sizeof(uintptr(0)))
+	alloc := make([]uintptr, (sdLen+psize-1)/psize)
+	dst := unsafe.Slice((*byte)(unsafe.Pointer(&alloc[0])), sdLen)
+	copy(dst, src)
+	return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&dst[0]))
+}
+
+// SecurityDescriptorFromString converts an SDDL string describing a security descriptor into a
+// self-relative security descriptor object allocated on the Go heap.
+func SecurityDescriptorFromString(sddl string) (sd *SECURITY_DESCRIPTOR, err error) {
+	var winHeapSD *SECURITY_DESCRIPTOR
+	err = convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &winHeapSD, nil)
+	if err != nil {
+		return
+	}
+	defer LocalFree(Handle(unsafe.Pointer(winHeapSD)))
+	return winHeapSD.copySelfRelativeSecurityDescriptor(), nil
+}
+
+// GetSecurityInfo queries the security information for a given handle and returns the self-relative security
+// descriptor result on the Go heap.
+func GetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION) (sd *SECURITY_DESCRIPTOR, err error) {
+	var winHeapSD *SECURITY_DESCRIPTOR
+	err = getSecurityInfo(handle, objectType, securityInformation, nil, nil, nil, nil, &winHeapSD)
+	if err != nil {
+		return
+	}
+	defer LocalFree(Handle(unsafe.Pointer(winHeapSD)))
+	return winHeapSD.copySelfRelativeSecurityDescriptor(), nil
+}
+
+// GetNamedSecurityInfo queries the security information for a given named object and returns the self-relative security
+// descriptor result on the Go heap.
+func GetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION) (sd *SECURITY_DESCRIPTOR, err error) {
+	var winHeapSD *SECURITY_DESCRIPTOR
+	err = getNamedSecurityInfo(objectName, objectType, securityInformation, nil, nil, nil, nil, &winHeapSD)
+	if err != nil {
+		return
+	}
+	defer LocalFree(Handle(unsafe.Pointer(winHeapSD)))
+	return winHeapSD.copySelfRelativeSecurityDescriptor(), nil
+}
+
+// BuildSecurityDescriptor makes a new security descriptor using the input trustees, explicit access lists, and
+// prior security descriptor to be merged, any of which can be nil, returning the self-relative security descriptor
+// result on the Go heap.
+func BuildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, accessEntries []EXPLICIT_ACCESS, auditEntries []EXPLICIT_ACCESS, mergedSecurityDescriptor *SECURITY_DESCRIPTOR) (sd *SECURITY_DESCRIPTOR, err error) {
+	var winHeapSD *SECURITY_DESCRIPTOR
+	var winHeapSDSize uint32
+	var firstAccessEntry *EXPLICIT_ACCESS
+	if len(accessEntries) > 0 {
+		firstAccessEntry = &accessEntries[0]
+	}
+	var firstAuditEntry *EXPLICIT_ACCESS
+	if len(auditEntries) > 0 {
+		firstAuditEntry = &auditEntries[0]
+	}
+	err = buildSecurityDescriptor(owner, group, uint32(len(accessEntries)), firstAccessEntry, uint32(len(auditEntries)), firstAuditEntry, mergedSecurityDescriptor, &winHeapSDSize, &winHeapSD)
+	if err != nil {
+		return
+	}
+	defer LocalFree(Handle(unsafe.Pointer(winHeapSD)))
+	return winHeapSD.copySelfRelativeSecurityDescriptor(), nil
+}
+
+// NewSecurityDescriptor creates and initializes a new absolute security descriptor.
+func NewSecurityDescriptor() (absoluteSD *SECURITY_DESCRIPTOR, err error) {
+	absoluteSD = &SECURITY_DESCRIPTOR{}
+	err = initializeSecurityDescriptor(absoluteSD, 1)
+	return
+}
+
+// ACLFromEntries returns a new ACL on the Go heap containing a list of explicit entries as well as those of another ACL.
+// Both explicitEntries and mergedACL are optional and can be nil.
+func ACLFromEntries(explicitEntries []EXPLICIT_ACCESS, mergedACL *ACL) (acl *ACL, err error) {
+	var firstExplicitEntry *EXPLICIT_ACCESS
+	if len(explicitEntries) > 0 {
+		firstExplicitEntry = &explicitEntries[0]
+	}
+	var winHeapACL *ACL
+	err = setEntriesInAcl(uint32(len(explicitEntries)), firstExplicitEntry, mergedACL, &winHeapACL)
+	if err != nil {
+		return
+	}
+	defer LocalFree(Handle(unsafe.Pointer(winHeapACL)))
+	aclBytes := make([]byte, winHeapACL.aclSize)
+	copy(aclBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(winHeapACL))[:len(aclBytes):len(aclBytes)])
+	return (*ACL)(unsafe.Pointer(&aclBytes[0])), nil
+}
diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go
new file mode 100644
index 0000000..a9dc630
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/service.go
@@ -0,0 +1,257 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package windows
+
+const (
+	SC_MANAGER_CONNECT            = 1
+	SC_MANAGER_CREATE_SERVICE     = 2
+	SC_MANAGER_ENUMERATE_SERVICE  = 4
+	SC_MANAGER_LOCK               = 8
+	SC_MANAGER_QUERY_LOCK_STATUS  = 16
+	SC_MANAGER_MODIFY_BOOT_CONFIG = 32
+	SC_MANAGER_ALL_ACCESS         = 0xf003f
+)
+
+const (
+	SERVICE_KERNEL_DRIVER       = 1
+	SERVICE_FILE_SYSTEM_DRIVER  = 2
+	SERVICE_ADAPTER             = 4
+	SERVICE_RECOGNIZER_DRIVER   = 8
+	SERVICE_WIN32_OWN_PROCESS   = 16
+	SERVICE_WIN32_SHARE_PROCESS = 32
+	SERVICE_WIN32               = SERVICE_WIN32_OWN_PROCESS | SERVICE_WIN32_SHARE_PROCESS
+	SERVICE_INTERACTIVE_PROCESS = 256
+	SERVICE_DRIVER              = SERVICE_KERNEL_DRIVER | SERVICE_FILE_SYSTEM_DRIVER | SERVICE_RECOGNIZER_DRIVER
+	SERVICE_TYPE_ALL            = SERVICE_WIN32 | SERVICE_ADAPTER | SERVICE_DRIVER | SERVICE_INTERACTIVE_PROCESS
+
+	SERVICE_BOOT_START   = 0
+	SERVICE_SYSTEM_START = 1
+	SERVICE_AUTO_START   = 2
+	SERVICE_DEMAND_START = 3
+	SERVICE_DISABLED     = 4
+
+	SERVICE_ERROR_IGNORE   = 0
+	SERVICE_ERROR_NORMAL   = 1
+	SERVICE_ERROR_SEVERE   = 2
+	SERVICE_ERROR_CRITICAL = 3
+
+	SC_STATUS_PROCESS_INFO = 0
+
+	SC_ACTION_NONE        = 0
+	SC_ACTION_RESTART     = 1
+	SC_ACTION_REBOOT      = 2
+	SC_ACTION_RUN_COMMAND = 3
+
+	SERVICE_STOPPED          = 1
+	SERVICE_START_PENDING    = 2
+	SERVICE_STOP_PENDING     = 3
+	SERVICE_RUNNING          = 4
+	SERVICE_CONTINUE_PENDING = 5
+	SERVICE_PAUSE_PENDING    = 6
+	SERVICE_PAUSED           = 7
+	SERVICE_NO_CHANGE        = 0xffffffff
+
+	SERVICE_ACCEPT_STOP                  = 1
+	SERVICE_ACCEPT_PAUSE_CONTINUE        = 2
+	SERVICE_ACCEPT_SHUTDOWN              = 4
+	SERVICE_ACCEPT_PARAMCHANGE           = 8
+	SERVICE_ACCEPT_NETBINDCHANGE         = 16
+	SERVICE_ACCEPT_HARDWAREPROFILECHANGE = 32
+	SERVICE_ACCEPT_POWEREVENT            = 64
+	SERVICE_ACCEPT_SESSIONCHANGE         = 128
+	SERVICE_ACCEPT_PRESHUTDOWN           = 256
+
+	SERVICE_CONTROL_STOP                  = 1
+	SERVICE_CONTROL_PAUSE                 = 2
+	SERVICE_CONTROL_CONTINUE              = 3
+	SERVICE_CONTROL_INTERROGATE           = 4
+	SERVICE_CONTROL_SHUTDOWN              = 5
+	SERVICE_CONTROL_PARAMCHANGE           = 6
+	SERVICE_CONTROL_NETBINDADD            = 7
+	SERVICE_CONTROL_NETBINDREMOVE         = 8
+	SERVICE_CONTROL_NETBINDENABLE         = 9
+	SERVICE_CONTROL_NETBINDDISABLE        = 10
+	SERVICE_CONTROL_DEVICEEVENT           = 11
+	SERVICE_CONTROL_HARDWAREPROFILECHANGE = 12
+	SERVICE_CONTROL_POWEREVENT            = 13
+	SERVICE_CONTROL_SESSIONCHANGE         = 14
+	SERVICE_CONTROL_PRESHUTDOWN           = 15
+
+	SERVICE_ACTIVE    = 1
+	SERVICE_INACTIVE  = 2
+	SERVICE_STATE_ALL = 3
+
+	SERVICE_QUERY_CONFIG         = 1
+	SERVICE_CHANGE_CONFIG        = 2
+	SERVICE_QUERY_STATUS         = 4
+	SERVICE_ENUMERATE_DEPENDENTS = 8
+	SERVICE_START                = 16
+	SERVICE_STOP                 = 32
+	SERVICE_PAUSE_CONTINUE       = 64
+	SERVICE_INTERROGATE          = 128
+	SERVICE_USER_DEFINED_CONTROL = 256
+	SERVICE_ALL_ACCESS           = STANDARD_RIGHTS_REQUIRED | SERVICE_QUERY_CONFIG | SERVICE_CHANGE_CONFIG | SERVICE_QUERY_STATUS | SERVICE_ENUMERATE_DEPENDENTS | SERVICE_START | SERVICE_STOP | SERVICE_PAUSE_CONTINUE | SERVICE_INTERROGATE | SERVICE_USER_DEFINED_CONTROL
+
+	SERVICE_RUNS_IN_SYSTEM_PROCESS = 1
+
+	SERVICE_CONFIG_DESCRIPTION              = 1
+	SERVICE_CONFIG_FAILURE_ACTIONS          = 2
+	SERVICE_CONFIG_DELAYED_AUTO_START_INFO  = 3
+	SERVICE_CONFIG_FAILURE_ACTIONS_FLAG     = 4
+	SERVICE_CONFIG_SERVICE_SID_INFO         = 5
+	SERVICE_CONFIG_REQUIRED_PRIVILEGES_INFO = 6
+	SERVICE_CONFIG_PRESHUTDOWN_INFO         = 7
+	SERVICE_CONFIG_TRIGGER_INFO             = 8
+	SERVICE_CONFIG_PREFERRED_NODE           = 9
+	SERVICE_CONFIG_LAUNCH_PROTECTED         = 12
+
+	SERVICE_SID_TYPE_NONE         = 0
+	SERVICE_SID_TYPE_UNRESTRICTED = 1
+	SERVICE_SID_TYPE_RESTRICTED   = 2 | SERVICE_SID_TYPE_UNRESTRICTED
+
+	SC_ENUM_PROCESS_INFO = 0
+
+	SERVICE_NOTIFY_STATUS_CHANGE    = 2
+	SERVICE_NOTIFY_STOPPED          = 0x00000001
+	SERVICE_NOTIFY_START_PENDING    = 0x00000002
+	SERVICE_NOTIFY_STOP_PENDING     = 0x00000004
+	SERVICE_NOTIFY_RUNNING          = 0x00000008
+	SERVICE_NOTIFY_CONTINUE_PENDING = 0x00000010
+	SERVICE_NOTIFY_PAUSE_PENDING    = 0x00000020
+	SERVICE_NOTIFY_PAUSED           = 0x00000040
+	SERVICE_NOTIFY_CREATED          = 0x00000080
+	SERVICE_NOTIFY_DELETED          = 0x00000100
+	SERVICE_NOTIFY_DELETE_PENDING   = 0x00000200
+
+	SC_EVENT_DATABASE_CHANGE = 0
+	SC_EVENT_PROPERTY_CHANGE = 1
+	SC_EVENT_STATUS_CHANGE   = 2
+
+	SERVICE_START_REASON_DEMAND             = 0x00000001
+	SERVICE_START_REASON_AUTO               = 0x00000002
+	SERVICE_START_REASON_TRIGGER            = 0x00000004
+	SERVICE_START_REASON_RESTART_ON_FAILURE = 0x00000008
+	SERVICE_START_REASON_DELAYEDAUTO        = 0x00000010
+
+	SERVICE_DYNAMIC_INFORMATION_LEVEL_START_REASON = 1
+)
+
+type ENUM_SERVICE_STATUS struct {
+	ServiceName   *uint16
+	DisplayName   *uint16
+	ServiceStatus SERVICE_STATUS
+}
+
+type SERVICE_STATUS struct {
+	ServiceType             uint32
+	CurrentState            uint32
+	ControlsAccepted        uint32
+	Win32ExitCode           uint32
+	ServiceSpecificExitCode uint32
+	CheckPoint              uint32
+	WaitHint                uint32
+}
+
+type SERVICE_TABLE_ENTRY struct {
+	ServiceName *uint16
+	ServiceProc uintptr
+}
+
+type QUERY_SERVICE_CONFIG struct {
+	ServiceType      uint32
+	StartType        uint32
+	ErrorControl     uint32
+	BinaryPathName   *uint16
+	LoadOrderGroup   *uint16
+	TagId            uint32
+	Dependencies     *uint16
+	ServiceStartName *uint16
+	DisplayName      *uint16
+}
+
+type SERVICE_DESCRIPTION struct {
+	Description *uint16
+}
+
+type SERVICE_DELAYED_AUTO_START_INFO struct {
+	IsDelayedAutoStartUp uint32
+}
+
+type SERVICE_STATUS_PROCESS struct {
+	ServiceType             uint32
+	CurrentState            uint32
+	ControlsAccepted        uint32
+	Win32ExitCode           uint32
+	ServiceSpecificExitCode uint32
+	CheckPoint              uint32
+	WaitHint                uint32
+	ProcessId               uint32
+	ServiceFlags            uint32
+}
+
+type ENUM_SERVICE_STATUS_PROCESS struct {
+	ServiceName          *uint16
+	DisplayName          *uint16
+	ServiceStatusProcess SERVICE_STATUS_PROCESS
+}
+
+type SERVICE_NOTIFY struct {
+	Version               uint32
+	NotifyCallback        uintptr
+	Context               uintptr
+	NotificationStatus    uint32
+	ServiceStatus         SERVICE_STATUS_PROCESS
+	NotificationTriggered uint32
+	ServiceNames          *uint16
+}
+
+type SERVICE_FAILURE_ACTIONS struct {
+	ResetPeriod  uint32
+	RebootMsg    *uint16
+	Command      *uint16
+	ActionsCount uint32
+	Actions      *SC_ACTION
+}
+
+type SERVICE_FAILURE_ACTIONS_FLAG struct {
+	FailureActionsOnNonCrashFailures int32
+}
+
+type SC_ACTION struct {
+	Type  uint32
+	Delay uint32
+}
+
+type QUERY_SERVICE_LOCK_STATUS struct {
+	IsLocked     uint32
+	LockOwner    *uint16
+	LockDuration uint32
+}
+
+//sys	OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenSCManagerW
+//sys	CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle
+//sys	CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW
+//sys	OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW
+//sys	DeleteService(service Handle) (err error) = advapi32.DeleteService
+//sys	StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) = advapi32.StartServiceW
+//sys	QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) = advapi32.QueryServiceStatus
+//sys	QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceLockStatusW
+//sys	ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) = advapi32.ControlService
+//sys	StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) = advapi32.StartServiceCtrlDispatcherW
+//sys	SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) = advapi32.SetServiceStatus
+//sys	ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) = advapi32.ChangeServiceConfigW
+//sys	QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfigW
+//sys	ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) = advapi32.ChangeServiceConfig2W
+//sys	QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfig2W
+//sys	EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) = advapi32.EnumServicesStatusExW
+//sys	QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceStatusEx
+//sys	NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) = advapi32.NotifyServiceStatusChangeW
+//sys	SubscribeServiceChangeNotifications(service Handle, eventType uint32, callback uintptr, callbackCtx uintptr, subscription *uintptr) (ret error) = sechost.SubscribeServiceChangeNotifications?
+//sys	UnsubscribeServiceChangeNotifications(subscription uintptr) = sechost.UnsubscribeServiceChangeNotifications?
+//sys	RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) = advapi32.RegisterServiceCtrlHandlerExW
+//sys	QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInfo unsafe.Pointer) (err error) = advapi32.QueryServiceDynamicInformation?
+//sys	EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) = advapi32.EnumDependentServicesW
diff --git a/vendor/golang.org/x/sys/windows/setupapi_windows.go b/vendor/golang.org/x/sys/windows/setupapi_windows.go
new file mode 100644
index 0000000..f812648
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/setupapi_windows.go
@@ -0,0 +1,1425 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"runtime"
+	"strings"
+	"syscall"
+	"unsafe"
+)
+
+// This file contains functions that wrap SetupAPI.dll and CfgMgr32.dll,
+// core system functions for managing hardware devices, drivers, and the PnP tree.
+// Information about these APIs can be found at:
+//     https://docs.microsoft.com/en-us/windows-hardware/drivers/install/setupapi
+//     https://docs.microsoft.com/en-us/windows/win32/devinst/cfgmgr32-
+
+const (
+	ERROR_EXPECTED_SECTION_NAME                  Errno = 0x20000000 | 0xC0000000 | 0
+	ERROR_BAD_SECTION_NAME_LINE                  Errno = 0x20000000 | 0xC0000000 | 1
+	ERROR_SECTION_NAME_TOO_LONG                  Errno = 0x20000000 | 0xC0000000 | 2
+	ERROR_GENERAL_SYNTAX                         Errno = 0x20000000 | 0xC0000000 | 3
+	ERROR_WRONG_INF_STYLE                        Errno = 0x20000000 | 0xC0000000 | 0x100
+	ERROR_SECTION_NOT_FOUND                      Errno = 0x20000000 | 0xC0000000 | 0x101
+	ERROR_LINE_NOT_FOUND                         Errno = 0x20000000 | 0xC0000000 | 0x102
+	ERROR_NO_BACKUP                              Errno = 0x20000000 | 0xC0000000 | 0x103
+	ERROR_NO_ASSOCIATED_CLASS                    Errno = 0x20000000 | 0xC0000000 | 0x200
+	ERROR_CLASS_MISMATCH                         Errno = 0x20000000 | 0xC0000000 | 0x201
+	ERROR_DUPLICATE_FOUND                        Errno = 0x20000000 | 0xC0000000 | 0x202
+	ERROR_NO_DRIVER_SELECTED                     Errno = 0x20000000 | 0xC0000000 | 0x203
+	ERROR_KEY_DOES_NOT_EXIST                     Errno = 0x20000000 | 0xC0000000 | 0x204
+	ERROR_INVALID_DEVINST_NAME                   Errno = 0x20000000 | 0xC0000000 | 0x205
+	ERROR_INVALID_CLASS                          Errno = 0x20000000 | 0xC0000000 | 0x206
+	ERROR_DEVINST_ALREADY_EXISTS                 Errno = 0x20000000 | 0xC0000000 | 0x207
+	ERROR_DEVINFO_NOT_REGISTERED                 Errno = 0x20000000 | 0xC0000000 | 0x208
+	ERROR_INVALID_REG_PROPERTY                   Errno = 0x20000000 | 0xC0000000 | 0x209
+	ERROR_NO_INF                                 Errno = 0x20000000 | 0xC0000000 | 0x20A
+	ERROR_NO_SUCH_DEVINST                        Errno = 0x20000000 | 0xC0000000 | 0x20B
+	ERROR_CANT_LOAD_CLASS_ICON                   Errno = 0x20000000 | 0xC0000000 | 0x20C
+	ERROR_INVALID_CLASS_INSTALLER                Errno = 0x20000000 | 0xC0000000 | 0x20D
+	ERROR_DI_DO_DEFAULT                          Errno = 0x20000000 | 0xC0000000 | 0x20E
+	ERROR_DI_NOFILECOPY                          Errno = 0x20000000 | 0xC0000000 | 0x20F
+	ERROR_INVALID_HWPROFILE                      Errno = 0x20000000 | 0xC0000000 | 0x210
+	ERROR_NO_DEVICE_SELECTED                     Errno = 0x20000000 | 0xC0000000 | 0x211
+	ERROR_DEVINFO_LIST_LOCKED                    Errno = 0x20000000 | 0xC0000000 | 0x212
+	ERROR_DEVINFO_DATA_LOCKED                    Errno = 0x20000000 | 0xC0000000 | 0x213
+	ERROR_DI_BAD_PATH                            Errno = 0x20000000 | 0xC0000000 | 0x214
+	ERROR_NO_CLASSINSTALL_PARAMS                 Errno = 0x20000000 | 0xC0000000 | 0x215
+	ERROR_FILEQUEUE_LOCKED                       Errno = 0x20000000 | 0xC0000000 | 0x216
+	ERROR_BAD_SERVICE_INSTALLSECT                Errno = 0x20000000 | 0xC0000000 | 0x217
+	ERROR_NO_CLASS_DRIVER_LIST                   Errno = 0x20000000 | 0xC0000000 | 0x218
+	ERROR_NO_ASSOCIATED_SERVICE                  Errno = 0x20000000 | 0xC0000000 | 0x219
+	ERROR_NO_DEFAULT_DEVICE_INTERFACE            Errno = 0x20000000 | 0xC0000000 | 0x21A
+	ERROR_DEVICE_INTERFACE_ACTIVE                Errno = 0x20000000 | 0xC0000000 | 0x21B
+	ERROR_DEVICE_INTERFACE_REMOVED               Errno = 0x20000000 | 0xC0000000 | 0x21C
+	ERROR_BAD_INTERFACE_INSTALLSECT              Errno = 0x20000000 | 0xC0000000 | 0x21D
+	ERROR_NO_SUCH_INTERFACE_CLASS                Errno = 0x20000000 | 0xC0000000 | 0x21E
+	ERROR_INVALID_REFERENCE_STRING               Errno = 0x20000000 | 0xC0000000 | 0x21F
+	ERROR_INVALID_MACHINENAME                    Errno = 0x20000000 | 0xC0000000 | 0x220
+	ERROR_REMOTE_COMM_FAILURE                    Errno = 0x20000000 | 0xC0000000 | 0x221
+	ERROR_MACHINE_UNAVAILABLE                    Errno = 0x20000000 | 0xC0000000 | 0x222
+	ERROR_NO_CONFIGMGR_SERVICES                  Errno = 0x20000000 | 0xC0000000 | 0x223
+	ERROR_INVALID_PROPPAGE_PROVIDER              Errno = 0x20000000 | 0xC0000000 | 0x224
+	ERROR_NO_SUCH_DEVICE_INTERFACE               Errno = 0x20000000 | 0xC0000000 | 0x225
+	ERROR_DI_POSTPROCESSING_REQUIRED             Errno = 0x20000000 | 0xC0000000 | 0x226
+	ERROR_INVALID_COINSTALLER                    Errno = 0x20000000 | 0xC0000000 | 0x227
+	ERROR_NO_COMPAT_DRIVERS                      Errno = 0x20000000 | 0xC0000000 | 0x228
+	ERROR_NO_DEVICE_ICON                         Errno = 0x20000000 | 0xC0000000 | 0x229
+	ERROR_INVALID_INF_LOGCONFIG                  Errno = 0x20000000 | 0xC0000000 | 0x22A
+	ERROR_DI_DONT_INSTALL                        Errno = 0x20000000 | 0xC0000000 | 0x22B
+	ERROR_INVALID_FILTER_DRIVER                  Errno = 0x20000000 | 0xC0000000 | 0x22C
+	ERROR_NON_WINDOWS_NT_DRIVER                  Errno = 0x20000000 | 0xC0000000 | 0x22D
+	ERROR_NON_WINDOWS_DRIVER                     Errno = 0x20000000 | 0xC0000000 | 0x22E
+	ERROR_NO_CATALOG_FOR_OEM_INF                 Errno = 0x20000000 | 0xC0000000 | 0x22F
+	ERROR_DEVINSTALL_QUEUE_NONNATIVE             Errno = 0x20000000 | 0xC0000000 | 0x230
+	ERROR_NOT_DISABLEABLE                        Errno = 0x20000000 | 0xC0000000 | 0x231
+	ERROR_CANT_REMOVE_DEVINST                    Errno = 0x20000000 | 0xC0000000 | 0x232
+	ERROR_INVALID_TARGET                         Errno = 0x20000000 | 0xC0000000 | 0x233
+	ERROR_DRIVER_NONNATIVE                       Errno = 0x20000000 | 0xC0000000 | 0x234
+	ERROR_IN_WOW64                               Errno = 0x20000000 | 0xC0000000 | 0x235
+	ERROR_SET_SYSTEM_RESTORE_POINT               Errno = 0x20000000 | 0xC0000000 | 0x236
+	ERROR_SCE_DISABLED                           Errno = 0x20000000 | 0xC0000000 | 0x238
+	ERROR_UNKNOWN_EXCEPTION                      Errno = 0x20000000 | 0xC0000000 | 0x239
+	ERROR_PNP_REGISTRY_ERROR                     Errno = 0x20000000 | 0xC0000000 | 0x23A
+	ERROR_REMOTE_REQUEST_UNSUPPORTED             Errno = 0x20000000 | 0xC0000000 | 0x23B
+	ERROR_NOT_AN_INSTALLED_OEM_INF               Errno = 0x20000000 | 0xC0000000 | 0x23C
+	ERROR_INF_IN_USE_BY_DEVICES                  Errno = 0x20000000 | 0xC0000000 | 0x23D
+	ERROR_DI_FUNCTION_OBSOLETE                   Errno = 0x20000000 | 0xC0000000 | 0x23E
+	ERROR_NO_AUTHENTICODE_CATALOG                Errno = 0x20000000 | 0xC0000000 | 0x23F
+	ERROR_AUTHENTICODE_DISALLOWED                Errno = 0x20000000 | 0xC0000000 | 0x240
+	ERROR_AUTHENTICODE_TRUSTED_PUBLISHER         Errno = 0x20000000 | 0xC0000000 | 0x241
+	ERROR_AUTHENTICODE_TRUST_NOT_ESTABLISHED     Errno = 0x20000000 | 0xC0000000 | 0x242
+	ERROR_AUTHENTICODE_PUBLISHER_NOT_TRUSTED     Errno = 0x20000000 | 0xC0000000 | 0x243
+	ERROR_SIGNATURE_OSATTRIBUTE_MISMATCH         Errno = 0x20000000 | 0xC0000000 | 0x244
+	ERROR_ONLY_VALIDATE_VIA_AUTHENTICODE         Errno = 0x20000000 | 0xC0000000 | 0x245
+	ERROR_DEVICE_INSTALLER_NOT_READY             Errno = 0x20000000 | 0xC0000000 | 0x246
+	ERROR_DRIVER_STORE_ADD_FAILED                Errno = 0x20000000 | 0xC0000000 | 0x247
+	ERROR_DEVICE_INSTALL_BLOCKED                 Errno = 0x20000000 | 0xC0000000 | 0x248
+	ERROR_DRIVER_INSTALL_BLOCKED                 Errno = 0x20000000 | 0xC0000000 | 0x249
+	ERROR_WRONG_INF_TYPE                         Errno = 0x20000000 | 0xC0000000 | 0x24A
+	ERROR_FILE_HASH_NOT_IN_CATALOG               Errno = 0x20000000 | 0xC0000000 | 0x24B
+	ERROR_DRIVER_STORE_DELETE_FAILED             Errno = 0x20000000 | 0xC0000000 | 0x24C
+	ERROR_UNRECOVERABLE_STACK_OVERFLOW           Errno = 0x20000000 | 0xC0000000 | 0x300
+	EXCEPTION_SPAPI_UNRECOVERABLE_STACK_OVERFLOW Errno = ERROR_UNRECOVERABLE_STACK_OVERFLOW
+	ERROR_NO_DEFAULT_INTERFACE_DEVICE            Errno = ERROR_NO_DEFAULT_DEVICE_INTERFACE
+	ERROR_INTERFACE_DEVICE_ACTIVE                Errno = ERROR_DEVICE_INTERFACE_ACTIVE
+	ERROR_INTERFACE_DEVICE_REMOVED               Errno = ERROR_DEVICE_INTERFACE_REMOVED
+	ERROR_NO_SUCH_INTERFACE_DEVICE               Errno = ERROR_NO_SUCH_DEVICE_INTERFACE
+)
+
+const (
+	MAX_DEVICE_ID_LEN   = 200
+	MAX_DEVNODE_ID_LEN  = MAX_DEVICE_ID_LEN
+	MAX_GUID_STRING_LEN = 39 // 38 chars + terminator null
+	MAX_CLASS_NAME_LEN  = 32
+	MAX_PROFILE_LEN     = 80
+	MAX_CONFIG_VALUE    = 9999
+	MAX_INSTANCE_VALUE  = 9999
+	CONFIGMG_VERSION    = 0x0400
+)
+
+// Maximum string length constants
+const (
+	LINE_LEN                    = 256  // Windows 9x-compatible maximum for displayable strings coming from a device INF.
+	MAX_INF_STRING_LENGTH       = 4096 // Actual maximum size of an INF string (including string substitutions).
+	MAX_INF_SECTION_NAME_LENGTH = 255  // For Windows 9x compatibility, INF section names should be constrained to 32 characters.
+	MAX_TITLE_LEN               = 60
+	MAX_INSTRUCTION_LEN         = 256
+	MAX_LABEL_LEN               = 30
+	MAX_SERVICE_NAME_LEN        = 256
+	MAX_SUBTITLE_LEN            = 256
+)
+
+const (
+	// SP_MAX_MACHINENAME_LENGTH defines maximum length of a machine name in the format expected by ConfigMgr32 CM_Connect_Machine (i.e., "\\\\MachineName\0").
+	SP_MAX_MACHINENAME_LENGTH = MAX_PATH + 3
+)
+
+// HSPFILEQ is type for setup file queue
+type HSPFILEQ uintptr
+
+// DevInfo holds reference to device information set
+type DevInfo Handle
+
+// DEVINST is a handle usually recognized by cfgmgr32 APIs
+type DEVINST uint32
+
+// DevInfoData is a device information structure (references a device instance that is a member of a device information set)
+type DevInfoData struct {
+	size      uint32
+	ClassGUID GUID
+	DevInst   DEVINST
+	_         uintptr
+}
+
+// DevInfoListDetailData is a structure for detailed information on a device information set (used for SetupDiGetDeviceInfoListDetail which supersedes the functionality of SetupDiGetDeviceInfoListClass).
+type DevInfoListDetailData struct {
+	size                uint32 // Use unsafeSizeOf method
+	ClassGUID           GUID
+	RemoteMachineHandle Handle
+	remoteMachineName   [SP_MAX_MACHINENAME_LENGTH]uint16
+}
+
+func (*DevInfoListDetailData) unsafeSizeOf() uint32 {
+	if unsafe.Sizeof(uintptr(0)) == 4 {
+		// Windows declares this with pshpack1.h
+		return uint32(unsafe.Offsetof(DevInfoListDetailData{}.remoteMachineName) + unsafe.Sizeof(DevInfoListDetailData{}.remoteMachineName))
+	}
+	return uint32(unsafe.Sizeof(DevInfoListDetailData{}))
+}
+
+func (data *DevInfoListDetailData) RemoteMachineName() string {
+	return UTF16ToString(data.remoteMachineName[:])
+}
+
+func (data *DevInfoListDetailData) SetRemoteMachineName(remoteMachineName string) error {
+	str, err := UTF16FromString(remoteMachineName)
+	if err != nil {
+		return err
+	}
+	copy(data.remoteMachineName[:], str)
+	return nil
+}
+
+// DI_FUNCTION is function type for device installer
+type DI_FUNCTION uint32
+
+const (
+	DIF_SELECTDEVICE                   DI_FUNCTION = 0x00000001
+	DIF_INSTALLDEVICE                  DI_FUNCTION = 0x00000002
+	DIF_ASSIGNRESOURCES                DI_FUNCTION = 0x00000003
+	DIF_PROPERTIES                     DI_FUNCTION = 0x00000004
+	DIF_REMOVE                         DI_FUNCTION = 0x00000005
+	DIF_FIRSTTIMESETUP                 DI_FUNCTION = 0x00000006
+	DIF_FOUNDDEVICE                    DI_FUNCTION = 0x00000007
+	DIF_SELECTCLASSDRIVERS             DI_FUNCTION = 0x00000008
+	DIF_VALIDATECLASSDRIVERS           DI_FUNCTION = 0x00000009
+	DIF_INSTALLCLASSDRIVERS            DI_FUNCTION = 0x0000000A
+	DIF_CALCDISKSPACE                  DI_FUNCTION = 0x0000000B
+	DIF_DESTROYPRIVATEDATA             DI_FUNCTION = 0x0000000C
+	DIF_VALIDATEDRIVER                 DI_FUNCTION = 0x0000000D
+	DIF_DETECT                         DI_FUNCTION = 0x0000000F
+	DIF_INSTALLWIZARD                  DI_FUNCTION = 0x00000010
+	DIF_DESTROYWIZARDDATA              DI_FUNCTION = 0x00000011
+	DIF_PROPERTYCHANGE                 DI_FUNCTION = 0x00000012
+	DIF_ENABLECLASS                    DI_FUNCTION = 0x00000013
+	DIF_DETECTVERIFY                   DI_FUNCTION = 0x00000014
+	DIF_INSTALLDEVICEFILES             DI_FUNCTION = 0x00000015
+	DIF_UNREMOVE                       DI_FUNCTION = 0x00000016
+	DIF_SELECTBESTCOMPATDRV            DI_FUNCTION = 0x00000017
+	DIF_ALLOW_INSTALL                  DI_FUNCTION = 0x00000018
+	DIF_REGISTERDEVICE                 DI_FUNCTION = 0x00000019
+	DIF_NEWDEVICEWIZARD_PRESELECT      DI_FUNCTION = 0x0000001A
+	DIF_NEWDEVICEWIZARD_SELECT         DI_FUNCTION = 0x0000001B
+	DIF_NEWDEVICEWIZARD_PREANALYZE     DI_FUNCTION = 0x0000001C
+	DIF_NEWDEVICEWIZARD_POSTANALYZE    DI_FUNCTION = 0x0000001D
+	DIF_NEWDEVICEWIZARD_FINISHINSTALL  DI_FUNCTION = 0x0000001E
+	DIF_INSTALLINTERFACES              DI_FUNCTION = 0x00000020
+	DIF_DETECTCANCEL                   DI_FUNCTION = 0x00000021
+	DIF_REGISTER_COINSTALLERS          DI_FUNCTION = 0x00000022
+	DIF_ADDPROPERTYPAGE_ADVANCED       DI_FUNCTION = 0x00000023
+	DIF_ADDPROPERTYPAGE_BASIC          DI_FUNCTION = 0x00000024
+	DIF_TROUBLESHOOTER                 DI_FUNCTION = 0x00000026
+	DIF_POWERMESSAGEWAKE               DI_FUNCTION = 0x00000027
+	DIF_ADDREMOTEPROPERTYPAGE_ADVANCED DI_FUNCTION = 0x00000028
+	DIF_UPDATEDRIVER_UI                DI_FUNCTION = 0x00000029
+	DIF_FINISHINSTALL_ACTION           DI_FUNCTION = 0x0000002A
+)
+
+// DevInstallParams is device installation parameters structure (associated with a particular device information element, or globally with a device information set)
+type DevInstallParams struct {
+	size                     uint32
+	Flags                    DI_FLAGS
+	FlagsEx                  DI_FLAGSEX
+	hwndParent               uintptr
+	InstallMsgHandler        uintptr
+	InstallMsgHandlerContext uintptr
+	FileQueue                HSPFILEQ
+	_                        uintptr
+	_                        uint32
+	driverPath               [MAX_PATH]uint16
+}
+
+func (params *DevInstallParams) DriverPath() string {
+	return UTF16ToString(params.driverPath[:])
+}
+
+func (params *DevInstallParams) SetDriverPath(driverPath string) error {
+	str, err := UTF16FromString(driverPath)
+	if err != nil {
+		return err
+	}
+	copy(params.driverPath[:], str)
+	return nil
+}
+
+// DI_FLAGS is SP_DEVINSTALL_PARAMS.Flags values
+type DI_FLAGS uint32
+
+const (
+	// Flags for choosing a device
+	DI_SHOWOEM       DI_FLAGS = 0x00000001 // support Other... button
+	DI_SHOWCOMPAT    DI_FLAGS = 0x00000002 // show compatibility list
+	DI_SHOWCLASS     DI_FLAGS = 0x00000004 // show class list
+	DI_SHOWALL       DI_FLAGS = 0x00000007 // both class & compat list shown
+	DI_NOVCP         DI_FLAGS = 0x00000008 // don't create a new copy queue--use caller-supplied FileQueue
+	DI_DIDCOMPAT     DI_FLAGS = 0x00000010 // Searched for compatible devices
+	DI_DIDCLASS      DI_FLAGS = 0x00000020 // Searched for class devices
+	DI_AUTOASSIGNRES DI_FLAGS = 0x00000040 // No UI for resources if possible
+
+	// Flags returned by DiInstallDevice to indicate need to reboot/restart
+	DI_NEEDRESTART DI_FLAGS = 0x00000080 // Reboot required to take effect
+	DI_NEEDREBOOT  DI_FLAGS = 0x00000100 // ""
+
+	// Flags for device installation
+	DI_NOBROWSE DI_FLAGS = 0x00000200 // no Browse... in InsertDisk
+
+	// Flags set by DiBuildDriverInfoList
+	DI_MULTMFGS DI_FLAGS = 0x00000400 // Set if multiple manufacturers in class driver list
+
+	// Flag indicates that device is disabled
+	DI_DISABLED DI_FLAGS = 0x00000800 // Set if device disabled
+
+	// Flags for Device/Class Properties
+	DI_GENERALPAGE_ADDED  DI_FLAGS = 0x00001000
+	DI_RESOURCEPAGE_ADDED DI_FLAGS = 0x00002000
+
+	// Flag to indicate the setting properties for this Device (or class) caused a change so the Dev Mgr UI probably needs to be updated.
+	DI_PROPERTIES_CHANGE DI_FLAGS = 0x00004000
+
+	// Flag to indicate that the sorting from the INF file should be used.
+	DI_INF_IS_SORTED DI_FLAGS = 0x00008000
+
+	// Flag to indicate that only the INF specified by SP_DEVINSTALL_PARAMS.DriverPath should be searched.
+	DI_ENUMSINGLEINF DI_FLAGS = 0x00010000
+
+	// Flag that prevents ConfigMgr from removing/re-enumerating devices during device
+	// registration, installation, and deletion.
+	DI_DONOTCALLCONFIGMG DI_FLAGS = 0x00020000
+
+	// The following flag can be used to install a device disabled
+	DI_INSTALLDISABLED DI_FLAGS = 0x00040000
+
+	// Flag that causes SetupDiBuildDriverInfoList to build a device's compatible driver
+	// list from its existing class driver list, instead of the normal INF search.
+	DI_COMPAT_FROM_CLASS DI_FLAGS = 0x00080000
+
+	// This flag is set if the Class Install params should be used.
+	DI_CLASSINSTALLPARAMS DI_FLAGS = 0x00100000
+
+	// This flag is set if the caller of DiCallClassInstaller does NOT want the internal default action performed if the Class installer returns ERROR_DI_DO_DEFAULT.
+	DI_NODI_DEFAULTACTION DI_FLAGS = 0x00200000
+
+	// Flags for device installation
+	DI_QUIETINSTALL        DI_FLAGS = 0x00800000 // don't confuse the user with questions or excess info
+	DI_NOFILECOPY          DI_FLAGS = 0x01000000 // No file Copy necessary
+	DI_FORCECOPY           DI_FLAGS = 0x02000000 // Force files to be copied from install path
+	DI_DRIVERPAGE_ADDED    DI_FLAGS = 0x04000000 // Prop provider added Driver page.
+	DI_USECI_SELECTSTRINGS DI_FLAGS = 0x08000000 // Use Class Installer Provided strings in the Select Device Dlg
+	DI_OVERRIDE_INFFLAGS   DI_FLAGS = 0x10000000 // Override INF flags
+	DI_PROPS_NOCHANGEUSAGE DI_FLAGS = 0x20000000 // No Enable/Disable in General Props
+
+	DI_NOSELECTICONS DI_FLAGS = 0x40000000 // No small icons in select device dialogs
+
+	DI_NOWRITE_IDS DI_FLAGS = 0x80000000 // Don't write HW & Compat IDs on install
+)
+
+// DI_FLAGSEX is SP_DEVINSTALL_PARAMS.FlagsEx values
+type DI_FLAGSEX uint32
+
+const (
+	DI_FLAGSEX_CI_FAILED                DI_FLAGSEX = 0x00000004 // Failed to Load/Call class installer
+	DI_FLAGSEX_FINISHINSTALL_ACTION     DI_FLAGSEX = 0x00000008 // Class/co-installer wants to get a DIF_FINISH_INSTALL action in client context.
+	DI_FLAGSEX_DIDINFOLIST              DI_FLAGSEX = 0x00000010 // Did the Class Info List
+	DI_FLAGSEX_DIDCOMPATINFO            DI_FLAGSEX = 0x00000020 // Did the Compat Info List
+	DI_FLAGSEX_FILTERCLASSES            DI_FLAGSEX = 0x00000040
+	DI_FLAGSEX_SETFAILEDINSTALL         DI_FLAGSEX = 0x00000080
+	DI_FLAGSEX_DEVICECHANGE             DI_FLAGSEX = 0x00000100
+	DI_FLAGSEX_ALWAYSWRITEIDS           DI_FLAGSEX = 0x00000200
+	DI_FLAGSEX_PROPCHANGE_PENDING       DI_FLAGSEX = 0x00000400 // One or more device property sheets have had changes made to them, and need to have a DIF_PROPERTYCHANGE occur.
+	DI_FLAGSEX_ALLOWEXCLUDEDDRVS        DI_FLAGSEX = 0x00000800
+	DI_FLAGSEX_NOUIONQUERYREMOVE        DI_FLAGSEX = 0x00001000
+	DI_FLAGSEX_USECLASSFORCOMPAT        DI_FLAGSEX = 0x00002000 // Use the device's class when building compat drv list. (Ignored if DI_COMPAT_FROM_CLASS flag is specified.)
+	DI_FLAGSEX_NO_DRVREG_MODIFY         DI_FLAGSEX = 0x00008000 // Don't run AddReg and DelReg for device's software (driver) key.
+	DI_FLAGSEX_IN_SYSTEM_SETUP          DI_FLAGSEX = 0x00010000 // Installation is occurring during initial system setup.
+	DI_FLAGSEX_INET_DRIVER              DI_FLAGSEX = 0x00020000 // Driver came from Windows Update
+	DI_FLAGSEX_APPENDDRIVERLIST         DI_FLAGSEX = 0x00040000 // Cause SetupDiBuildDriverInfoList to append a new driver list to an existing list.
+	DI_FLAGSEX_PREINSTALLBACKUP         DI_FLAGSEX = 0x00080000 // not used
+	DI_FLAGSEX_BACKUPONREPLACE          DI_FLAGSEX = 0x00100000 // not used
+	DI_FLAGSEX_DRIVERLIST_FROM_URL      DI_FLAGSEX = 0x00200000 // build driver list from INF(s) retrieved from URL specified in SP_DEVINSTALL_PARAMS.DriverPath (empty string means Windows Update website)
+	DI_FLAGSEX_EXCLUDE_OLD_INET_DRIVERS DI_FLAGSEX = 0x00800000 // Don't include old Internet drivers when building a driver list. Ignored on Windows Vista and later.
+	DI_FLAGSEX_POWERPAGE_ADDED          DI_FLAGSEX = 0x01000000 // class installer added their own power page
+	DI_FLAGSEX_FILTERSIMILARDRIVERS     DI_FLAGSEX = 0x02000000 // only include similar drivers in class list
+	DI_FLAGSEX_INSTALLEDDRIVER          DI_FLAGSEX = 0x04000000 // only add the installed driver to the class or compat driver list.  Used in calls to SetupDiBuildDriverInfoList
+	DI_FLAGSEX_NO_CLASSLIST_NODE_MERGE  DI_FLAGSEX = 0x08000000 // Don't remove identical driver nodes from the class list
+	DI_FLAGSEX_ALTPLATFORM_DRVSEARCH    DI_FLAGSEX = 0x10000000 // Build driver list based on alternate platform information specified in associated file queue
+	DI_FLAGSEX_RESTART_DEVICE_ONLY      DI_FLAGSEX = 0x20000000 // only restart the device drivers are being installed on as opposed to restarting all devices using those drivers.
+	DI_FLAGSEX_RECURSIVESEARCH          DI_FLAGSEX = 0x40000000 // Tell SetupDiBuildDriverInfoList to do a recursive search
+	DI_FLAGSEX_SEARCH_PUBLISHED_INFS    DI_FLAGSEX = 0x80000000 // Tell SetupDiBuildDriverInfoList to do a "published INF" search
+)
+
+// ClassInstallHeader is the first member of any class install parameters structure. It contains the device installation request code that defines the format of the rest of the install parameters structure.
+type ClassInstallHeader struct {
+	size            uint32
+	InstallFunction DI_FUNCTION
+}
+
+func MakeClassInstallHeader(installFunction DI_FUNCTION) *ClassInstallHeader {
+	hdr := &ClassInstallHeader{InstallFunction: installFunction}
+	hdr.size = uint32(unsafe.Sizeof(*hdr))
+	return hdr
+}
+
+// DICS_STATE specifies values indicating a change in a device's state
+type DICS_STATE uint32
+
+const (
+	DICS_ENABLE     DICS_STATE = 0x00000001 // The device is being enabled.
+	DICS_DISABLE    DICS_STATE = 0x00000002 // The device is being disabled.
+	DICS_PROPCHANGE DICS_STATE = 0x00000003 // The properties of the device have changed.
+	DICS_START      DICS_STATE = 0x00000004 // The device is being started (if the request is for the currently active hardware profile).
+	DICS_STOP       DICS_STATE = 0x00000005 // The device is being stopped. The driver stack will be unloaded and the CSCONFIGFLAG_DO_NOT_START flag will be set for the device.
+)
+
+// DICS_FLAG specifies the scope of a device property change
+type DICS_FLAG uint32
+
+const (
+	DICS_FLAG_GLOBAL         DICS_FLAG = 0x00000001 // make change in all hardware profiles
+	DICS_FLAG_CONFIGSPECIFIC DICS_FLAG = 0x00000002 // make change in specified profile only
+	DICS_FLAG_CONFIGGENERAL  DICS_FLAG = 0x00000004 // 1 or more hardware profile-specific changes to follow (obsolete)
+)
+
+// PropChangeParams is a structure corresponding to a DIF_PROPERTYCHANGE install function.
+type PropChangeParams struct {
+	ClassInstallHeader ClassInstallHeader
+	StateChange        DICS_STATE
+	Scope              DICS_FLAG
+	HwProfile          uint32
+}
+
+// DI_REMOVEDEVICE specifies the scope of the device removal
+type DI_REMOVEDEVICE uint32
+
+const (
+	DI_REMOVEDEVICE_GLOBAL         DI_REMOVEDEVICE = 0x00000001 // Make this change in all hardware profiles. Remove information about the device from the registry.
+	DI_REMOVEDEVICE_CONFIGSPECIFIC DI_REMOVEDEVICE = 0x00000002 // Make this change to only the hardware profile specified by HwProfile. this flag only applies to root-enumerated devices. When Windows removes the device from the last hardware profile in which it was configured, Windows performs a global removal.
+)
+
+// RemoveDeviceParams is a structure corresponding to a DIF_REMOVE install function.
+type RemoveDeviceParams struct {
+	ClassInstallHeader ClassInstallHeader
+	Scope              DI_REMOVEDEVICE
+	HwProfile          uint32
+}
+
+// DrvInfoData is driver information structure (member of a driver info list that may be associated with a particular device instance, or (globally) with a device information set)
+type DrvInfoData struct {
+	size          uint32
+	DriverType    uint32
+	_             uintptr
+	description   [LINE_LEN]uint16
+	mfgName       [LINE_LEN]uint16
+	providerName  [LINE_LEN]uint16
+	DriverDate    Filetime
+	DriverVersion uint64
+}
+
+func (data *DrvInfoData) Description() string {
+	return UTF16ToString(data.description[:])
+}
+
+func (data *DrvInfoData) SetDescription(description string) error {
+	str, err := UTF16FromString(description)
+	if err != nil {
+		return err
+	}
+	copy(data.description[:], str)
+	return nil
+}
+
+func (data *DrvInfoData) MfgName() string {
+	return UTF16ToString(data.mfgName[:])
+}
+
+func (data *DrvInfoData) SetMfgName(mfgName string) error {
+	str, err := UTF16FromString(mfgName)
+	if err != nil {
+		return err
+	}
+	copy(data.mfgName[:], str)
+	return nil
+}
+
+func (data *DrvInfoData) ProviderName() string {
+	return UTF16ToString(data.providerName[:])
+}
+
+func (data *DrvInfoData) SetProviderName(providerName string) error {
+	str, err := UTF16FromString(providerName)
+	if err != nil {
+		return err
+	}
+	copy(data.providerName[:], str)
+	return nil
+}
+
+// IsNewer method returns true if DrvInfoData date and version is newer than supplied parameters.
+func (data *DrvInfoData) IsNewer(driverDate Filetime, driverVersion uint64) bool {
+	if data.DriverDate.HighDateTime > driverDate.HighDateTime {
+		return true
+	}
+	if data.DriverDate.HighDateTime < driverDate.HighDateTime {
+		return false
+	}
+
+	if data.DriverDate.LowDateTime > driverDate.LowDateTime {
+		return true
+	}
+	if data.DriverDate.LowDateTime < driverDate.LowDateTime {
+		return false
+	}
+
+	if data.DriverVersion > driverVersion {
+		return true
+	}
+	if data.DriverVersion < driverVersion {
+		return false
+	}
+
+	return false
+}
+
+// DrvInfoDetailData is driver information details structure (provides detailed information about a particular driver information structure)
+type DrvInfoDetailData struct {
+	size            uint32 // Use unsafeSizeOf method
+	InfDate         Filetime
+	compatIDsOffset uint32
+	compatIDsLength uint32
+	_               uintptr
+	sectionName     [LINE_LEN]uint16
+	infFileName     [MAX_PATH]uint16
+	drvDescription  [LINE_LEN]uint16
+	hardwareID      [1]uint16
+}
+
+func (*DrvInfoDetailData) unsafeSizeOf() uint32 {
+	if unsafe.Sizeof(uintptr(0)) == 4 {
+		// Windows declares this with pshpack1.h
+		return uint32(unsafe.Offsetof(DrvInfoDetailData{}.hardwareID) + unsafe.Sizeof(DrvInfoDetailData{}.hardwareID))
+	}
+	return uint32(unsafe.Sizeof(DrvInfoDetailData{}))
+}
+
+func (data *DrvInfoDetailData) SectionName() string {
+	return UTF16ToString(data.sectionName[:])
+}
+
+func (data *DrvInfoDetailData) InfFileName() string {
+	return UTF16ToString(data.infFileName[:])
+}
+
+func (data *DrvInfoDetailData) DrvDescription() string {
+	return UTF16ToString(data.drvDescription[:])
+}
+
+func (data *DrvInfoDetailData) HardwareID() string {
+	if data.compatIDsOffset > 1 {
+		bufW := data.getBuf()
+		return UTF16ToString(bufW[:wcslen(bufW)])
+	}
+
+	return ""
+}
+
+func (data *DrvInfoDetailData) CompatIDs() []string {
+	a := make([]string, 0)
+
+	if data.compatIDsLength > 0 {
+		bufW := data.getBuf()
+		bufW = bufW[data.compatIDsOffset : data.compatIDsOffset+data.compatIDsLength]
+		for i := 0; i < len(bufW); {
+			j := i + wcslen(bufW[i:])
+			if i < j {
+				a = append(a, UTF16ToString(bufW[i:j]))
+			}
+			i = j + 1
+		}
+	}
+
+	return a
+}
+
+func (data *DrvInfoDetailData) getBuf() []uint16 {
+	len := (data.size - uint32(unsafe.Offsetof(data.hardwareID))) / 2
+	sl := struct {
+		addr *uint16
+		len  int
+		cap  int
+	}{&data.hardwareID[0], int(len), int(len)}
+	return *(*[]uint16)(unsafe.Pointer(&sl))
+}
+
+// IsCompatible method tests if given hardware ID matches the driver or is listed on the compatible ID list.
+func (data *DrvInfoDetailData) IsCompatible(hwid string) bool {
+	hwidLC := strings.ToLower(hwid)
+	if strings.ToLower(data.HardwareID()) == hwidLC {
+		return true
+	}
+	a := data.CompatIDs()
+	for i := range a {
+		if strings.ToLower(a[i]) == hwidLC {
+			return true
+		}
+	}
+
+	return false
+}
+
+// DICD flags control SetupDiCreateDeviceInfo
+type DICD uint32
+
+const (
+	DICD_GENERATE_ID       DICD = 0x00000001
+	DICD_INHERIT_CLASSDRVS DICD = 0x00000002
+)
+
+// SUOI flags control SetupUninstallOEMInf
+type SUOI uint32
+
+const (
+	SUOI_FORCEDELETE SUOI = 0x0001
+)
+
+// SPDIT flags to distinguish between class drivers and
+// device drivers. (Passed in 'DriverType' parameter of
+// driver information list APIs)
+type SPDIT uint32
+
+const (
+	SPDIT_NODRIVER     SPDIT = 0x00000000
+	SPDIT_CLASSDRIVER  SPDIT = 0x00000001
+	SPDIT_COMPATDRIVER SPDIT = 0x00000002
+)
+
+// DIGCF flags control what is included in the device information set built by SetupDiGetClassDevs
+type DIGCF uint32
+
+const (
+	DIGCF_DEFAULT         DIGCF = 0x00000001 // only valid with DIGCF_DEVICEINTERFACE
+	DIGCF_PRESENT         DIGCF = 0x00000002
+	DIGCF_ALLCLASSES      DIGCF = 0x00000004
+	DIGCF_PROFILE         DIGCF = 0x00000008
+	DIGCF_DEVICEINTERFACE DIGCF = 0x00000010
+)
+
+// DIREG specifies values for SetupDiCreateDevRegKey, SetupDiOpenDevRegKey, and SetupDiDeleteDevRegKey.
+type DIREG uint32
+
+const (
+	DIREG_DEV  DIREG = 0x00000001 // Open/Create/Delete device key
+	DIREG_DRV  DIREG = 0x00000002 // Open/Create/Delete driver key
+	DIREG_BOTH DIREG = 0x00000004 // Delete both driver and Device key
+)
+
+// SPDRP specifies device registry property codes
+// (Codes marked as read-only (R) may only be used for
+// SetupDiGetDeviceRegistryProperty)
+//
+// These values should cover the same set of registry properties
+// as defined by the CM_DRP codes in cfgmgr32.h.
+//
+// Note that SPDRP codes are zero based while CM_DRP codes are one based!
+type SPDRP uint32
+
+const (
+	SPDRP_DEVICEDESC                  SPDRP = 0x00000000 // DeviceDesc (R/W)
+	SPDRP_HARDWAREID                  SPDRP = 0x00000001 // HardwareID (R/W)
+	SPDRP_COMPATIBLEIDS               SPDRP = 0x00000002 // CompatibleIDs (R/W)
+	SPDRP_SERVICE                     SPDRP = 0x00000004 // Service (R/W)
+	SPDRP_CLASS                       SPDRP = 0x00000007 // Class (R--tied to ClassGUID)
+	SPDRP_CLASSGUID                   SPDRP = 0x00000008 // ClassGUID (R/W)
+	SPDRP_DRIVER                      SPDRP = 0x00000009 // Driver (R/W)
+	SPDRP_CONFIGFLAGS                 SPDRP = 0x0000000A // ConfigFlags (R/W)
+	SPDRP_MFG                         SPDRP = 0x0000000B // Mfg (R/W)
+	SPDRP_FRIENDLYNAME                SPDRP = 0x0000000C // FriendlyName (R/W)
+	SPDRP_LOCATION_INFORMATION        SPDRP = 0x0000000D // LocationInformation (R/W)
+	SPDRP_PHYSICAL_DEVICE_OBJECT_NAME SPDRP = 0x0000000E // PhysicalDeviceObjectName (R)
+	SPDRP_CAPABILITIES                SPDRP = 0x0000000F // Capabilities (R)
+	SPDRP_UI_NUMBER                   SPDRP = 0x00000010 // UiNumber (R)
+	SPDRP_UPPERFILTERS                SPDRP = 0x00000011 // UpperFilters (R/W)
+	SPDRP_LOWERFILTERS                SPDRP = 0x00000012 // LowerFilters (R/W)
+	SPDRP_BUSTYPEGUID                 SPDRP = 0x00000013 // BusTypeGUID (R)
+	SPDRP_LEGACYBUSTYPE               SPDRP = 0x00000014 // LegacyBusType (R)
+	SPDRP_BUSNUMBER                   SPDRP = 0x00000015 // BusNumber (R)
+	SPDRP_ENUMERATOR_NAME             SPDRP = 0x00000016 // Enumerator Name (R)
+	SPDRP_SECURITY                    SPDRP = 0x00000017 // Security (R/W, binary form)
+	SPDRP_SECURITY_SDS                SPDRP = 0x00000018 // Security (W, SDS form)
+	SPDRP_DEVTYPE                     SPDRP = 0x00000019 // Device Type (R/W)
+	SPDRP_EXCLUSIVE                   SPDRP = 0x0000001A // Device is exclusive-access (R/W)
+	SPDRP_CHARACTERISTICS             SPDRP = 0x0000001B // Device Characteristics (R/W)
+	SPDRP_ADDRESS                     SPDRP = 0x0000001C // Device Address (R)
+	SPDRP_UI_NUMBER_DESC_FORMAT       SPDRP = 0x0000001D // UiNumberDescFormat (R/W)
+	SPDRP_DEVICE_POWER_DATA           SPDRP = 0x0000001E // Device Power Data (R)
+	SPDRP_REMOVAL_POLICY              SPDRP = 0x0000001F // Removal Policy (R)
+	SPDRP_REMOVAL_POLICY_HW_DEFAULT   SPDRP = 0x00000020 // Hardware Removal Policy (R)
+	SPDRP_REMOVAL_POLICY_OVERRIDE     SPDRP = 0x00000021 // Removal Policy Override (RW)
+	SPDRP_INSTALL_STATE               SPDRP = 0x00000022 // Device Install State (R)
+	SPDRP_LOCATION_PATHS              SPDRP = 0x00000023 // Device Location Paths (R)
+	SPDRP_BASE_CONTAINERID            SPDRP = 0x00000024 // Base ContainerID (R)
+
+	SPDRP_MAXIMUM_PROPERTY SPDRP = 0x00000025 // Upper bound on ordinals
+)
+
+// DEVPROPTYPE represents the property-data-type identifier that specifies the
+// data type of a device property value in the unified device property model.
+type DEVPROPTYPE uint32
+
+const (
+	DEVPROP_TYPEMOD_ARRAY DEVPROPTYPE = 0x00001000
+	DEVPROP_TYPEMOD_LIST  DEVPROPTYPE = 0x00002000
+
+	DEVPROP_TYPE_EMPTY                      DEVPROPTYPE = 0x00000000
+	DEVPROP_TYPE_NULL                       DEVPROPTYPE = 0x00000001
+	DEVPROP_TYPE_SBYTE                      DEVPROPTYPE = 0x00000002
+	DEVPROP_TYPE_BYTE                       DEVPROPTYPE = 0x00000003
+	DEVPROP_TYPE_INT16                      DEVPROPTYPE = 0x00000004
+	DEVPROP_TYPE_UINT16                     DEVPROPTYPE = 0x00000005
+	DEVPROP_TYPE_INT32                      DEVPROPTYPE = 0x00000006
+	DEVPROP_TYPE_UINT32                     DEVPROPTYPE = 0x00000007
+	DEVPROP_TYPE_INT64                      DEVPROPTYPE = 0x00000008
+	DEVPROP_TYPE_UINT64                     DEVPROPTYPE = 0x00000009
+	DEVPROP_TYPE_FLOAT                      DEVPROPTYPE = 0x0000000A
+	DEVPROP_TYPE_DOUBLE                     DEVPROPTYPE = 0x0000000B
+	DEVPROP_TYPE_DECIMAL                    DEVPROPTYPE = 0x0000000C
+	DEVPROP_TYPE_GUID                       DEVPROPTYPE = 0x0000000D
+	DEVPROP_TYPE_CURRENCY                   DEVPROPTYPE = 0x0000000E
+	DEVPROP_TYPE_DATE                       DEVPROPTYPE = 0x0000000F
+	DEVPROP_TYPE_FILETIME                   DEVPROPTYPE = 0x00000010
+	DEVPROP_TYPE_BOOLEAN                    DEVPROPTYPE = 0x00000011
+	DEVPROP_TYPE_STRING                     DEVPROPTYPE = 0x00000012
+	DEVPROP_TYPE_STRING_LIST                DEVPROPTYPE = DEVPROP_TYPE_STRING | DEVPROP_TYPEMOD_LIST
+	DEVPROP_TYPE_SECURITY_DESCRIPTOR        DEVPROPTYPE = 0x00000013
+	DEVPROP_TYPE_SECURITY_DESCRIPTOR_STRING DEVPROPTYPE = 0x00000014
+	DEVPROP_TYPE_DEVPROPKEY                 DEVPROPTYPE = 0x00000015
+	DEVPROP_TYPE_DEVPROPTYPE                DEVPROPTYPE = 0x00000016
+	DEVPROP_TYPE_BINARY                     DEVPROPTYPE = DEVPROP_TYPE_BYTE | DEVPROP_TYPEMOD_ARRAY
+	DEVPROP_TYPE_ERROR                      DEVPROPTYPE = 0x00000017
+	DEVPROP_TYPE_NTSTATUS                   DEVPROPTYPE = 0x00000018
+	DEVPROP_TYPE_STRING_INDIRECT            DEVPROPTYPE = 0x00000019
+
+	MAX_DEVPROP_TYPE    DEVPROPTYPE = 0x00000019
+	MAX_DEVPROP_TYPEMOD DEVPROPTYPE = 0x00002000
+
+	DEVPROP_MASK_TYPE    DEVPROPTYPE = 0x00000FFF
+	DEVPROP_MASK_TYPEMOD DEVPROPTYPE = 0x0000F000
+)
+
+// DEVPROPGUID specifies a property category.
+type DEVPROPGUID GUID
+
+// DEVPROPID uniquely identifies the property within the property category.
+type DEVPROPID uint32
+
+const DEVPROPID_FIRST_USABLE DEVPROPID = 2
+
+// DEVPROPKEY represents a device property key for a device property in the
+// unified device property model.
+type DEVPROPKEY struct {
+	FmtID DEVPROPGUID
+	PID   DEVPROPID
+}
+
+// CONFIGRET is a return value or error code from cfgmgr32 APIs
+type CONFIGRET uint32
+
+func (ret CONFIGRET) Error() string {
+	if win32Error, ok := ret.Unwrap().(Errno); ok {
+		return fmt.Sprintf("%s (CfgMgr error: 0x%08x)", win32Error.Error(), uint32(ret))
+	}
+	return fmt.Sprintf("CfgMgr error: 0x%08x", uint32(ret))
+}
+
+func (ret CONFIGRET) Win32Error(defaultError Errno) Errno {
+	return cm_MapCrToWin32Err(ret, defaultError)
+}
+
+func (ret CONFIGRET) Unwrap() error {
+	const noMatch = Errno(^uintptr(0))
+	win32Error := ret.Win32Error(noMatch)
+	if win32Error == noMatch {
+		return nil
+	}
+	return win32Error
+}
+
+const (
+	CR_SUCCESS                  CONFIGRET = 0x00000000
+	CR_DEFAULT                  CONFIGRET = 0x00000001
+	CR_OUT_OF_MEMORY            CONFIGRET = 0x00000002
+	CR_INVALID_POINTER          CONFIGRET = 0x00000003
+	CR_INVALID_FLAG             CONFIGRET = 0x00000004
+	CR_INVALID_DEVNODE          CONFIGRET = 0x00000005
+	CR_INVALID_DEVINST                    = CR_INVALID_DEVNODE
+	CR_INVALID_RES_DES          CONFIGRET = 0x00000006
+	CR_INVALID_LOG_CONF         CONFIGRET = 0x00000007
+	CR_INVALID_ARBITRATOR       CONFIGRET = 0x00000008
+	CR_INVALID_NODELIST         CONFIGRET = 0x00000009
+	CR_DEVNODE_HAS_REQS         CONFIGRET = 0x0000000A
+	CR_DEVINST_HAS_REQS                   = CR_DEVNODE_HAS_REQS
+	CR_INVALID_RESOURCEID       CONFIGRET = 0x0000000B
+	CR_DLVXD_NOT_FOUND          CONFIGRET = 0x0000000C
+	CR_NO_SUCH_DEVNODE          CONFIGRET = 0x0000000D
+	CR_NO_SUCH_DEVINST                    = CR_NO_SUCH_DEVNODE
+	CR_NO_MORE_LOG_CONF         CONFIGRET = 0x0000000E
+	CR_NO_MORE_RES_DES          CONFIGRET = 0x0000000F
+	CR_ALREADY_SUCH_DEVNODE     CONFIGRET = 0x00000010
+	CR_ALREADY_SUCH_DEVINST               = CR_ALREADY_SUCH_DEVNODE
+	CR_INVALID_RANGE_LIST       CONFIGRET = 0x00000011
+	CR_INVALID_RANGE            CONFIGRET = 0x00000012
+	CR_FAILURE                  CONFIGRET = 0x00000013
+	CR_NO_SUCH_LOGICAL_DEV      CONFIGRET = 0x00000014
+	CR_CREATE_BLOCKED           CONFIGRET = 0x00000015
+	CR_NOT_SYSTEM_VM            CONFIGRET = 0x00000016
+	CR_REMOVE_VETOED            CONFIGRET = 0x00000017
+	CR_APM_VETOED               CONFIGRET = 0x00000018
+	CR_INVALID_LOAD_TYPE        CONFIGRET = 0x00000019
+	CR_BUFFER_SMALL             CONFIGRET = 0x0000001A
+	CR_NO_ARBITRATOR            CONFIGRET = 0x0000001B
+	CR_NO_REGISTRY_HANDLE       CONFIGRET = 0x0000001C
+	CR_REGISTRY_ERROR           CONFIGRET = 0x0000001D
+	CR_INVALID_DEVICE_ID        CONFIGRET = 0x0000001E
+	CR_INVALID_DATA             CONFIGRET = 0x0000001F
+	CR_INVALID_API              CONFIGRET = 0x00000020
+	CR_DEVLOADER_NOT_READY      CONFIGRET = 0x00000021
+	CR_NEED_RESTART             CONFIGRET = 0x00000022
+	CR_NO_MORE_HW_PROFILES      CONFIGRET = 0x00000023
+	CR_DEVICE_NOT_THERE         CONFIGRET = 0x00000024
+	CR_NO_SUCH_VALUE            CONFIGRET = 0x00000025
+	CR_WRONG_TYPE               CONFIGRET = 0x00000026
+	CR_INVALID_PRIORITY         CONFIGRET = 0x00000027
+	CR_NOT_DISABLEABLE          CONFIGRET = 0x00000028
+	CR_FREE_RESOURCES           CONFIGRET = 0x00000029
+	CR_QUERY_VETOED             CONFIGRET = 0x0000002A
+	CR_CANT_SHARE_IRQ           CONFIGRET = 0x0000002B
+	CR_NO_DEPENDENT             CONFIGRET = 0x0000002C
+	CR_SAME_RESOURCES           CONFIGRET = 0x0000002D
+	CR_NO_SUCH_REGISTRY_KEY     CONFIGRET = 0x0000002E
+	CR_INVALID_MACHINENAME      CONFIGRET = 0x0000002F
+	CR_REMOTE_COMM_FAILURE      CONFIGRET = 0x00000030
+	CR_MACHINE_UNAVAILABLE      CONFIGRET = 0x00000031
+	CR_NO_CM_SERVICES           CONFIGRET = 0x00000032
+	CR_ACCESS_DENIED            CONFIGRET = 0x00000033
+	CR_CALL_NOT_IMPLEMENTED     CONFIGRET = 0x00000034
+	CR_INVALID_PROPERTY         CONFIGRET = 0x00000035
+	CR_DEVICE_INTERFACE_ACTIVE  CONFIGRET = 0x00000036
+	CR_NO_SUCH_DEVICE_INTERFACE CONFIGRET = 0x00000037
+	CR_INVALID_REFERENCE_STRING CONFIGRET = 0x00000038
+	CR_INVALID_CONFLICT_LIST    CONFIGRET = 0x00000039
+	CR_INVALID_INDEX            CONFIGRET = 0x0000003A
+	CR_INVALID_STRUCTURE_SIZE   CONFIGRET = 0x0000003B
+	NUM_CR_RESULTS              CONFIGRET = 0x0000003C
+)
+
+const (
+	CM_GET_DEVICE_INTERFACE_LIST_PRESENT     = 0 // only currently 'live' device interfaces
+	CM_GET_DEVICE_INTERFACE_LIST_ALL_DEVICES = 1 // all registered device interfaces, live or not
+)
+
+const (
+	DN_ROOT_ENUMERATED       = 0x00000001        // Was enumerated by ROOT
+	DN_DRIVER_LOADED         = 0x00000002        // Has Register_Device_Driver
+	DN_ENUM_LOADED           = 0x00000004        // Has Register_Enumerator
+	DN_STARTED               = 0x00000008        // Is currently configured
+	DN_MANUAL                = 0x00000010        // Manually installed
+	DN_NEED_TO_ENUM          = 0x00000020        // May need reenumeration
+	DN_NOT_FIRST_TIME        = 0x00000040        // Has received a config
+	DN_HARDWARE_ENUM         = 0x00000080        // Enum generates hardware ID
+	DN_LIAR                  = 0x00000100        // Lied about can reconfig once
+	DN_HAS_MARK              = 0x00000200        // Not CM_Create_DevInst lately
+	DN_HAS_PROBLEM           = 0x00000400        // Need device installer
+	DN_FILTERED              = 0x00000800        // Is filtered
+	DN_MOVED                 = 0x00001000        // Has been moved
+	DN_DISABLEABLE           = 0x00002000        // Can be disabled
+	DN_REMOVABLE             = 0x00004000        // Can be removed
+	DN_PRIVATE_PROBLEM       = 0x00008000        // Has a private problem
+	DN_MF_PARENT             = 0x00010000        // Multi function parent
+	DN_MF_CHILD              = 0x00020000        // Multi function child
+	DN_WILL_BE_REMOVED       = 0x00040000        // DevInst is being removed
+	DN_NOT_FIRST_TIMEE       = 0x00080000        // Has received a config enumerate
+	DN_STOP_FREE_RES         = 0x00100000        // When child is stopped, free resources
+	DN_REBAL_CANDIDATE       = 0x00200000        // Don't skip during rebalance
+	DN_BAD_PARTIAL           = 0x00400000        // This devnode's log_confs do not have same resources
+	DN_NT_ENUMERATOR         = 0x00800000        // This devnode's is an NT enumerator
+	DN_NT_DRIVER             = 0x01000000        // This devnode's is an NT driver
+	DN_NEEDS_LOCKING         = 0x02000000        // Devnode need lock resume processing
+	DN_ARM_WAKEUP            = 0x04000000        // Devnode can be the wakeup device
+	DN_APM_ENUMERATOR        = 0x08000000        // APM aware enumerator
+	DN_APM_DRIVER            = 0x10000000        // APM aware driver
+	DN_SILENT_INSTALL        = 0x20000000        // Silent install
+	DN_NO_SHOW_IN_DM         = 0x40000000        // No show in device manager
+	DN_BOOT_LOG_PROB         = 0x80000000        // Had a problem during preassignment of boot log conf
+	DN_NEED_RESTART          = DN_LIAR           // System needs to be restarted for this Devnode to work properly
+	DN_DRIVER_BLOCKED        = DN_NOT_FIRST_TIME // One or more drivers are blocked from loading for this Devnode
+	DN_LEGACY_DRIVER         = DN_MOVED          // This device is using a legacy driver
+	DN_CHILD_WITH_INVALID_ID = DN_HAS_MARK       // One or more children have invalid IDs
+	DN_DEVICE_DISCONNECTED   = DN_NEEDS_LOCKING  // The function driver for a device reported that the device is not connected.  Typically this means a wireless device is out of range.
+	DN_QUERY_REMOVE_PENDING  = DN_MF_PARENT      // Device is part of a set of related devices collectively pending query-removal
+	DN_QUERY_REMOVE_ACTIVE   = DN_MF_CHILD       // Device is actively engaged in a query-remove IRP
+	DN_CHANGEABLE_FLAGS      = DN_NOT_FIRST_TIME | DN_HARDWARE_ENUM | DN_HAS_MARK | DN_DISABLEABLE | DN_REMOVABLE | DN_MF_CHILD | DN_MF_PARENT | DN_NOT_FIRST_TIMEE | DN_STOP_FREE_RES | DN_REBAL_CANDIDATE | DN_NT_ENUMERATOR | DN_NT_DRIVER | DN_SILENT_INSTALL | DN_NO_SHOW_IN_DM
+)
+
+//sys	setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) [failretval==DevInfo(InvalidHandle)] = setupapi.SetupDiCreateDeviceInfoListExW
+
+// SetupDiCreateDeviceInfoListEx function creates an empty device information set on a remote or a local computer and optionally associates the set with a device setup class.
+func SetupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName string) (deviceInfoSet DevInfo, err error) {
+	var machineNameUTF16 *uint16
+	if machineName != "" {
+		machineNameUTF16, err = UTF16PtrFromString(machineName)
+		if err != nil {
+			return
+		}
+	}
+	return setupDiCreateDeviceInfoListEx(classGUID, hwndParent, machineNameUTF16, 0)
+}
+
+//sys	setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) = setupapi.SetupDiGetDeviceInfoListDetailW
+
+// SetupDiGetDeviceInfoListDetail function retrieves information associated with a device information set including the class GUID, remote computer handle, and remote computer name.
+func SetupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo) (deviceInfoSetDetailData *DevInfoListDetailData, err error) {
+	data := &DevInfoListDetailData{}
+	data.size = data.unsafeSizeOf()
+
+	return data, setupDiGetDeviceInfoListDetail(deviceInfoSet, data)
+}
+
+// DeviceInfoListDetail method retrieves information associated with a device information set including the class GUID, remote computer handle, and remote computer name.
+func (deviceInfoSet DevInfo) DeviceInfoListDetail() (*DevInfoListDetailData, error) {
+	return SetupDiGetDeviceInfoListDetail(deviceInfoSet)
+}
+
+//sys	setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiCreateDeviceInfoW
+
+// SetupDiCreateDeviceInfo function creates a new device information element and adds it as a new member to the specified device information set.
+func SetupDiCreateDeviceInfo(deviceInfoSet DevInfo, deviceName string, classGUID *GUID, deviceDescription string, hwndParent uintptr, creationFlags DICD) (deviceInfoData *DevInfoData, err error) {
+	deviceNameUTF16, err := UTF16PtrFromString(deviceName)
+	if err != nil {
+		return
+	}
+
+	var deviceDescriptionUTF16 *uint16
+	if deviceDescription != "" {
+		deviceDescriptionUTF16, err = UTF16PtrFromString(deviceDescription)
+		if err != nil {
+			return
+		}
+	}
+
+	data := &DevInfoData{}
+	data.size = uint32(unsafe.Sizeof(*data))
+
+	return data, setupDiCreateDeviceInfo(deviceInfoSet, deviceNameUTF16, classGUID, deviceDescriptionUTF16, hwndParent, creationFlags, data)
+}
+
+// CreateDeviceInfo method creates a new device information element and adds it as a new member to the specified device information set.
+func (deviceInfoSet DevInfo) CreateDeviceInfo(deviceName string, classGUID *GUID, deviceDescription string, hwndParent uintptr, creationFlags DICD) (*DevInfoData, error) {
+	return SetupDiCreateDeviceInfo(deviceInfoSet, deviceName, classGUID, deviceDescription, hwndParent, creationFlags)
+}
+
+//sys	setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiEnumDeviceInfo
+
+// SetupDiEnumDeviceInfo function returns a DevInfoData structure that specifies a device information element in a device information set.
+func SetupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex int) (*DevInfoData, error) {
+	data := &DevInfoData{}
+	data.size = uint32(unsafe.Sizeof(*data))
+
+	return data, setupDiEnumDeviceInfo(deviceInfoSet, uint32(memberIndex), data)
+}
+
+// EnumDeviceInfo method returns a DevInfoData structure that specifies a device information element in a device information set.
+func (deviceInfoSet DevInfo) EnumDeviceInfo(memberIndex int) (*DevInfoData, error) {
+	return SetupDiEnumDeviceInfo(deviceInfoSet, memberIndex)
+}
+
+// SetupDiDestroyDeviceInfoList function deletes a device information set and frees all associated memory.
+//sys	SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) = setupapi.SetupDiDestroyDeviceInfoList
+
+// Close method deletes a device information set and frees all associated memory.
+func (deviceInfoSet DevInfo) Close() error {
+	return SetupDiDestroyDeviceInfoList(deviceInfoSet)
+}
+
+//sys	SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) = setupapi.SetupDiBuildDriverInfoList
+
+// BuildDriverInfoList method builds a list of drivers that is associated with a specific device or with the global class driver list for a device information set.
+func (deviceInfoSet DevInfo) BuildDriverInfoList(deviceInfoData *DevInfoData, driverType SPDIT) error {
+	return SetupDiBuildDriverInfoList(deviceInfoSet, deviceInfoData, driverType)
+}
+
+//sys	SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) = setupapi.SetupDiCancelDriverInfoSearch
+
+// CancelDriverInfoSearch method cancels a driver list search that is currently in progress in a different thread.
+func (deviceInfoSet DevInfo) CancelDriverInfoSearch() error {
+	return SetupDiCancelDriverInfoSearch(deviceInfoSet)
+}
+
+//sys	setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiEnumDriverInfoW
+
+// SetupDiEnumDriverInfo function enumerates the members of a driver list.
+func SetupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex int) (*DrvInfoData, error) {
+	data := &DrvInfoData{}
+	data.size = uint32(unsafe.Sizeof(*data))
+
+	return data, setupDiEnumDriverInfo(deviceInfoSet, deviceInfoData, driverType, uint32(memberIndex), data)
+}
+
+// EnumDriverInfo method enumerates the members of a driver list.
+func (deviceInfoSet DevInfo) EnumDriverInfo(deviceInfoData *DevInfoData, driverType SPDIT, memberIndex int) (*DrvInfoData, error) {
+	return SetupDiEnumDriverInfo(deviceInfoSet, deviceInfoData, driverType, memberIndex)
+}
+
+//sys	setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiGetSelectedDriverW
+
+// SetupDiGetSelectedDriver function retrieves the selected driver for a device information set or a particular device information element.
+func SetupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (*DrvInfoData, error) {
+	data := &DrvInfoData{}
+	data.size = uint32(unsafe.Sizeof(*data))
+
+	return data, setupDiGetSelectedDriver(deviceInfoSet, deviceInfoData, data)
+}
+
+// SelectedDriver method retrieves the selected driver for a device information set or a particular device information element.
+func (deviceInfoSet DevInfo) SelectedDriver(deviceInfoData *DevInfoData) (*DrvInfoData, error) {
+	return SetupDiGetSelectedDriver(deviceInfoSet, deviceInfoData)
+}
+
+//sys	SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) = setupapi.SetupDiSetSelectedDriverW
+
+// SetSelectedDriver method sets, or resets, the selected driver for a device information element or the selected class driver for a device information set.
+func (deviceInfoSet DevInfo) SetSelectedDriver(deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) error {
+	return SetupDiSetSelectedDriver(deviceInfoSet, deviceInfoData, driverInfoData)
+}
+
+//sys	setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetDriverInfoDetailW
+
+// SetupDiGetDriverInfoDetail function retrieves driver information detail for a device information set or a particular device information element in the device information set.
+func SetupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (*DrvInfoDetailData, error) {
+	reqSize := uint32(2048)
+	for {
+		buf := make([]byte, reqSize)
+		data := (*DrvInfoDetailData)(unsafe.Pointer(&buf[0]))
+		data.size = data.unsafeSizeOf()
+		err := setupDiGetDriverInfoDetail(deviceInfoSet, deviceInfoData, driverInfoData, data, uint32(len(buf)), &reqSize)
+		if err == ERROR_INSUFFICIENT_BUFFER {
+			continue
+		}
+		if err != nil {
+			return nil, err
+		}
+		data.size = reqSize
+		return data, nil
+	}
+}
+
+// DriverInfoDetail method retrieves driver information detail for a device information set or a particular device information element in the device information set.
+func (deviceInfoSet DevInfo) DriverInfoDetail(deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (*DrvInfoDetailData, error) {
+	return SetupDiGetDriverInfoDetail(deviceInfoSet, deviceInfoData, driverInfoData)
+}
+
+//sys	SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) = setupapi.SetupDiDestroyDriverInfoList
+
+// DestroyDriverInfoList method deletes a driver list.
+func (deviceInfoSet DevInfo) DestroyDriverInfoList(deviceInfoData *DevInfoData, driverType SPDIT) error {
+	return SetupDiDestroyDriverInfoList(deviceInfoSet, deviceInfoData, driverType)
+}
+
+//sys	setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) [failretval==DevInfo(InvalidHandle)] = setupapi.SetupDiGetClassDevsExW
+
+// SetupDiGetClassDevsEx function returns a handle to a device information set that contains requested device information elements for a local or a remote computer.
+func SetupDiGetClassDevsEx(classGUID *GUID, enumerator string, hwndParent uintptr, flags DIGCF, deviceInfoSet DevInfo, machineName string) (handle DevInfo, err error) {
+	var enumeratorUTF16 *uint16
+	if enumerator != "" {
+		enumeratorUTF16, err = UTF16PtrFromString(enumerator)
+		if err != nil {
+			return
+		}
+	}
+	var machineNameUTF16 *uint16
+	if machineName != "" {
+		machineNameUTF16, err = UTF16PtrFromString(machineName)
+		if err != nil {
+			return
+		}
+	}
+	return setupDiGetClassDevsEx(classGUID, enumeratorUTF16, hwndParent, flags, deviceInfoSet, machineNameUTF16, 0)
+}
+
+// SetupDiCallClassInstaller function calls the appropriate class installer, and any registered co-installers, with the specified installation request (DIF code).
+//sys	SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiCallClassInstaller
+
+// CallClassInstaller member calls the appropriate class installer, and any registered co-installers, with the specified installation request (DIF code).
+func (deviceInfoSet DevInfo) CallClassInstaller(installFunction DI_FUNCTION, deviceInfoData *DevInfoData) error {
+	return SetupDiCallClassInstaller(installFunction, deviceInfoSet, deviceInfoData)
+}
+
+// SetupDiOpenDevRegKey function opens a registry key for device-specific configuration information.
+//sys	SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) [failretval==InvalidHandle] = setupapi.SetupDiOpenDevRegKey
+
+// OpenDevRegKey method opens a registry key for device-specific configuration information.
+func (deviceInfoSet DevInfo) OpenDevRegKey(DeviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (Handle, error) {
+	return SetupDiOpenDevRegKey(deviceInfoSet, DeviceInfoData, Scope, HwProfile, KeyType, samDesired)
+}
+
+//sys	setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) = setupapi.SetupDiGetDevicePropertyW
+
+// SetupDiGetDeviceProperty function retrieves a specified device instance property.
+func SetupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY) (value interface{}, err error) {
+	reqSize := uint32(256)
+	for {
+		var dataType DEVPROPTYPE
+		buf := make([]byte, reqSize)
+		err = setupDiGetDeviceProperty(deviceInfoSet, deviceInfoData, propertyKey, &dataType, &buf[0], uint32(len(buf)), &reqSize, 0)
+		if err == ERROR_INSUFFICIENT_BUFFER {
+			continue
+		}
+		if err != nil {
+			return
+		}
+		switch dataType {
+		case DEVPROP_TYPE_STRING:
+			ret := UTF16ToString(bufToUTF16(buf))
+			runtime.KeepAlive(buf)
+			return ret, nil
+		}
+		return nil, errors.New("unimplemented property type")
+	}
+}
+
+//sys	setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetDeviceRegistryPropertyW
+
+// SetupDiGetDeviceRegistryProperty function retrieves a specified Plug and Play device property.
+func SetupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP) (value interface{}, err error) {
+	reqSize := uint32(256)
+	for {
+		var dataType uint32
+		buf := make([]byte, reqSize)
+		err = setupDiGetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, &dataType, &buf[0], uint32(len(buf)), &reqSize)
+		if err == ERROR_INSUFFICIENT_BUFFER {
+			continue
+		}
+		if err != nil {
+			return
+		}
+		return getRegistryValue(buf[:reqSize], dataType)
+	}
+}
+
+func getRegistryValue(buf []byte, dataType uint32) (interface{}, error) {
+	switch dataType {
+	case REG_SZ:
+		ret := UTF16ToString(bufToUTF16(buf))
+		runtime.KeepAlive(buf)
+		return ret, nil
+	case REG_EXPAND_SZ:
+		value := UTF16ToString(bufToUTF16(buf))
+		if value == "" {
+			return "", nil
+		}
+		p, err := syscall.UTF16PtrFromString(value)
+		if err != nil {
+			return "", err
+		}
+		ret := make([]uint16, 100)
+		for {
+			n, err := ExpandEnvironmentStrings(p, &ret[0], uint32(len(ret)))
+			if err != nil {
+				return "", err
+			}
+			if n <= uint32(len(ret)) {
+				return UTF16ToString(ret[:n]), nil
+			}
+			ret = make([]uint16, n)
+		}
+	case REG_BINARY:
+		return buf, nil
+	case REG_DWORD_LITTLE_ENDIAN:
+		return binary.LittleEndian.Uint32(buf), nil
+	case REG_DWORD_BIG_ENDIAN:
+		return binary.BigEndian.Uint32(buf), nil
+	case REG_MULTI_SZ:
+		bufW := bufToUTF16(buf)
+		a := []string{}
+		for i := 0; i < len(bufW); {
+			j := i + wcslen(bufW[i:])
+			if i < j {
+				a = append(a, UTF16ToString(bufW[i:j]))
+			}
+			i = j + 1
+		}
+		runtime.KeepAlive(buf)
+		return a, nil
+	case REG_QWORD_LITTLE_ENDIAN:
+		return binary.LittleEndian.Uint64(buf), nil
+	default:
+		return nil, fmt.Errorf("Unsupported registry value type: %v", dataType)
+	}
+}
+
+// bufToUTF16 function reinterprets []byte buffer as []uint16
+func bufToUTF16(buf []byte) []uint16 {
+	sl := struct {
+		addr *uint16
+		len  int
+		cap  int
+	}{(*uint16)(unsafe.Pointer(&buf[0])), len(buf) / 2, cap(buf) / 2}
+	return *(*[]uint16)(unsafe.Pointer(&sl))
+}
+
+// utf16ToBuf function reinterprets []uint16 as []byte
+func utf16ToBuf(buf []uint16) []byte {
+	sl := struct {
+		addr *byte
+		len  int
+		cap  int
+	}{(*byte)(unsafe.Pointer(&buf[0])), len(buf) * 2, cap(buf) * 2}
+	return *(*[]byte)(unsafe.Pointer(&sl))
+}
+
+func wcslen(str []uint16) int {
+	for i := 0; i < len(str); i++ {
+		if str[i] == 0 {
+			return i
+		}
+	}
+	return len(str)
+}
+
+// DeviceRegistryProperty method retrieves a specified Plug and Play device property.
+func (deviceInfoSet DevInfo) DeviceRegistryProperty(deviceInfoData *DevInfoData, property SPDRP) (interface{}, error) {
+	return SetupDiGetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property)
+}
+
+//sys	setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) = setupapi.SetupDiSetDeviceRegistryPropertyW
+
+// SetupDiSetDeviceRegistryProperty function sets a Plug and Play device property for a device.
+func SetupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffers []byte) error {
+	return setupDiSetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, &propertyBuffers[0], uint32(len(propertyBuffers)))
+}
+
+// SetDeviceRegistryProperty function sets a Plug and Play device property for a device.
+func (deviceInfoSet DevInfo) SetDeviceRegistryProperty(deviceInfoData *DevInfoData, property SPDRP, propertyBuffers []byte) error {
+	return SetupDiSetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, propertyBuffers)
+}
+
+// SetDeviceRegistryPropertyString method sets a Plug and Play device property string for a device.
+func (deviceInfoSet DevInfo) SetDeviceRegistryPropertyString(deviceInfoData *DevInfoData, property SPDRP, str string) error {
+	str16, err := UTF16FromString(str)
+	if err != nil {
+		return err
+	}
+	err = SetupDiSetDeviceRegistryProperty(deviceInfoSet, deviceInfoData, property, utf16ToBuf(append(str16, 0)))
+	runtime.KeepAlive(str16)
+	return err
+}
+
+//sys	setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) = setupapi.SetupDiGetDeviceInstallParamsW
+
+// SetupDiGetDeviceInstallParams function retrieves device installation parameters for a device information set or a particular device information element.
+func SetupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (*DevInstallParams, error) {
+	params := &DevInstallParams{}
+	params.size = uint32(unsafe.Sizeof(*params))
+
+	return params, setupDiGetDeviceInstallParams(deviceInfoSet, deviceInfoData, params)
+}
+
+// DeviceInstallParams method retrieves device installation parameters for a device information set or a particular device information element.
+func (deviceInfoSet DevInfo) DeviceInstallParams(deviceInfoData *DevInfoData) (*DevInstallParams, error) {
+	return SetupDiGetDeviceInstallParams(deviceInfoSet, deviceInfoData)
+}
+
+//sys	setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) = setupapi.SetupDiGetDeviceInstanceIdW
+
+// SetupDiGetDeviceInstanceId function retrieves the instance ID of the device.
+func SetupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (string, error) {
+	reqSize := uint32(1024)
+	for {
+		buf := make([]uint16, reqSize)
+		err := setupDiGetDeviceInstanceId(deviceInfoSet, deviceInfoData, &buf[0], uint32(len(buf)), &reqSize)
+		if err == ERROR_INSUFFICIENT_BUFFER {
+			continue
+		}
+		if err != nil {
+			return "", err
+		}
+		return UTF16ToString(buf), nil
+	}
+}
+
+// DeviceInstanceID method retrieves the instance ID of the device.
+func (deviceInfoSet DevInfo) DeviceInstanceID(deviceInfoData *DevInfoData) (string, error) {
+	return SetupDiGetDeviceInstanceId(deviceInfoSet, deviceInfoData)
+}
+
+// SetupDiGetClassInstallParams function retrieves class installation parameters for a device information set or a particular device information element.
+//sys	SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) = setupapi.SetupDiGetClassInstallParamsW
+
+// ClassInstallParams method retrieves class installation parameters for a device information set or a particular device information element.
+func (deviceInfoSet DevInfo) ClassInstallParams(deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) error {
+	return SetupDiGetClassInstallParams(deviceInfoSet, deviceInfoData, classInstallParams, classInstallParamsSize, requiredSize)
+}
+
+//sys	SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) = setupapi.SetupDiSetDeviceInstallParamsW
+
+// SetDeviceInstallParams member sets device installation parameters for a device information set or a particular device information element.
+func (deviceInfoSet DevInfo) SetDeviceInstallParams(deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) error {
+	return SetupDiSetDeviceInstallParams(deviceInfoSet, deviceInfoData, deviceInstallParams)
+}
+
+// SetupDiSetClassInstallParams function sets or clears class install parameters for a device information set or a particular device information element.
+//sys	SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) = setupapi.SetupDiSetClassInstallParamsW
+
+// SetClassInstallParams method sets or clears class install parameters for a device information set or a particular device information element.
+func (deviceInfoSet DevInfo) SetClassInstallParams(deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) error {
+	return SetupDiSetClassInstallParams(deviceInfoSet, deviceInfoData, classInstallParams, classInstallParamsSize)
+}
+
+//sys	setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) = setupapi.SetupDiClassNameFromGuidExW
+
+// SetupDiClassNameFromGuidEx function retrieves the class name associated with a class GUID. The class can be installed on a local or remote computer.
+func SetupDiClassNameFromGuidEx(classGUID *GUID, machineName string) (className string, err error) {
+	var classNameUTF16 [MAX_CLASS_NAME_LEN]uint16
+
+	var machineNameUTF16 *uint16
+	if machineName != "" {
+		machineNameUTF16, err = UTF16PtrFromString(machineName)
+		if err != nil {
+			return
+		}
+	}
+
+	err = setupDiClassNameFromGuidEx(classGUID, &classNameUTF16[0], MAX_CLASS_NAME_LEN, nil, machineNameUTF16, 0)
+	if err != nil {
+		return
+	}
+
+	className = UTF16ToString(classNameUTF16[:])
+	return
+}
+
+//sys	setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) = setupapi.SetupDiClassGuidsFromNameExW
+
+// SetupDiClassGuidsFromNameEx function retrieves the GUIDs associated with the specified class name. This resulting list contains the classes currently installed on a local or remote computer.
+func SetupDiClassGuidsFromNameEx(className string, machineName string) ([]GUID, error) {
+	classNameUTF16, err := UTF16PtrFromString(className)
+	if err != nil {
+		return nil, err
+	}
+
+	var machineNameUTF16 *uint16
+	if machineName != "" {
+		machineNameUTF16, err = UTF16PtrFromString(machineName)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	reqSize := uint32(4)
+	for {
+		buf := make([]GUID, reqSize)
+		err = setupDiClassGuidsFromNameEx(classNameUTF16, &buf[0], uint32(len(buf)), &reqSize, machineNameUTF16, 0)
+		if err == ERROR_INSUFFICIENT_BUFFER {
+			continue
+		}
+		if err != nil {
+			return nil, err
+		}
+		return buf[:reqSize], nil
+	}
+}
+
+//sys	setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiGetSelectedDevice
+
+// SetupDiGetSelectedDevice function retrieves the selected device information element in a device information set.
+func SetupDiGetSelectedDevice(deviceInfoSet DevInfo) (*DevInfoData, error) {
+	data := &DevInfoData{}
+	data.size = uint32(unsafe.Sizeof(*data))
+
+	return data, setupDiGetSelectedDevice(deviceInfoSet, data)
+}
+
+// SelectedDevice method retrieves the selected device information element in a device information set.
+func (deviceInfoSet DevInfo) SelectedDevice() (*DevInfoData, error) {
+	return SetupDiGetSelectedDevice(deviceInfoSet)
+}
+
+// SetupDiSetSelectedDevice function sets a device information element as the selected member of a device information set. This function is typically used by an installation wizard.
+//sys	SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) = setupapi.SetupDiSetSelectedDevice
+
+// SetSelectedDevice method sets a device information element as the selected member of a device information set. This function is typically used by an installation wizard.
+func (deviceInfoSet DevInfo) SetSelectedDevice(deviceInfoData *DevInfoData) error {
+	return SetupDiSetSelectedDevice(deviceInfoSet, deviceInfoData)
+}
+
+//sys	setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) = setupapi.SetupUninstallOEMInfW
+
+// SetupUninstallOEMInf uninstalls the specified driver.
+func SetupUninstallOEMInf(infFileName string, flags SUOI) error {
+	infFileName16, err := UTF16PtrFromString(infFileName)
+	if err != nil {
+		return err
+	}
+	return setupUninstallOEMInf(infFileName16, flags, 0)
+}
+
+//sys cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) = CfgMgr32.CM_MapCrToWin32Err
+
+//sys cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) = CfgMgr32.CM_Get_Device_Interface_List_SizeW
+//sys cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) = CfgMgr32.CM_Get_Device_Interface_ListW
+
+func CM_Get_Device_Interface_List(deviceID string, interfaceClass *GUID, flags uint32) ([]string, error) {
+	deviceID16, err := UTF16PtrFromString(deviceID)
+	if err != nil {
+		return nil, err
+	}
+	var buf []uint16
+	var buflen uint32
+	for {
+		if ret := cm_Get_Device_Interface_List_Size(&buflen, interfaceClass, deviceID16, flags); ret != CR_SUCCESS {
+			return nil, ret
+		}
+		buf = make([]uint16, buflen)
+		if ret := cm_Get_Device_Interface_List(interfaceClass, deviceID16, &buf[0], buflen, flags); ret == CR_SUCCESS {
+			break
+		} else if ret != CR_BUFFER_SMALL {
+			return nil, ret
+		}
+	}
+	var interfaces []string
+	for i := 0; i < len(buf); {
+		j := i + wcslen(buf[i:])
+		if i < j {
+			interfaces = append(interfaces, UTF16ToString(buf[i:j]))
+		}
+		i = j + 1
+	}
+	if interfaces == nil {
+		return nil, ERROR_NO_SUCH_DEVICE_INTERFACE
+	}
+	return interfaces, nil
+}
+
+//sys cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) = CfgMgr32.CM_Get_DevNode_Status
+
+func CM_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) error {
+	ret := cm_Get_DevNode_Status(status, problemNumber, devInst, flags)
+	if ret == CR_SUCCESS {
+		return nil
+	}
+	return ret
+}
diff --git a/vendor/golang.org/x/sys/windows/str.go b/vendor/golang.org/x/sys/windows/str.go
new file mode 100644
index 0000000..6a4f9ce
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/str.go
@@ -0,0 +1,22 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package windows
+
+func itoa(val int) string { // do it here rather than with fmt to avoid dependency
+	if val < 0 {
+		return "-" + itoa(-val)
+	}
+	var buf [32]byte // big enough for int64
+	i := len(buf) - 1
+	for val >= 10 {
+		buf[i] = byte(val%10 + '0')
+		i--
+		val /= 10
+	}
+	buf[i] = byte(val + '0')
+	return string(buf[i:])
+}
diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go
new file mode 100644
index 0000000..e85ed6b
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/syscall.go
@@ -0,0 +1,104 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+// Package windows contains an interface to the low-level operating system
+// primitives. OS details vary depending on the underlying system, and
+// by default, godoc will display the OS-specific documentation for the current
+// system. If you want godoc to display syscall documentation for another
+// system, set $GOOS and $GOARCH to the desired system. For example, if
+// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS
+// to freebsd and $GOARCH to arm.
+//
+// The primary use of this package is inside other packages that provide a more
+// portable interface to the system, such as "os", "time" and "net".  Use
+// those packages rather than this one if you can.
+//
+// For details of the functions and data types in this package consult
+// the manuals for the appropriate operating system.
+//
+// These calls return err == nil to indicate success; otherwise
+// err represents an operating system error describing the failure and
+// holds a value of type syscall.Errno.
+package windows // import "golang.org/x/sys/windows"
+
+import (
+	"bytes"
+	"strings"
+	"syscall"
+	"unsafe"
+)
+
+// ByteSliceFromString returns a NUL-terminated slice of bytes
+// containing the text of s. If s contains a NUL byte at any
+// location, it returns (nil, syscall.EINVAL).
+func ByteSliceFromString(s string) ([]byte, error) {
+	if strings.IndexByte(s, 0) != -1 {
+		return nil, syscall.EINVAL
+	}
+	a := make([]byte, len(s)+1)
+	copy(a, s)
+	return a, nil
+}
+
+// BytePtrFromString returns a pointer to a NUL-terminated array of
+// bytes containing the text of s. If s contains a NUL byte at any
+// location, it returns (nil, syscall.EINVAL).
+func BytePtrFromString(s string) (*byte, error) {
+	a, err := ByteSliceFromString(s)
+	if err != nil {
+		return nil, err
+	}
+	return &a[0], nil
+}
+
+// ByteSliceToString returns a string form of the text represented by the slice s, with a terminating NUL and any
+// bytes after the NUL removed.
+func ByteSliceToString(s []byte) string {
+	if i := bytes.IndexByte(s, 0); i != -1 {
+		s = s[:i]
+	}
+	return string(s)
+}
+
+// BytePtrToString takes a pointer to a sequence of text and returns the corresponding string.
+// If the pointer is nil, it returns the empty string. It assumes that the text sequence is terminated
+// at a zero byte; if the zero byte is not present, the program may crash.
+func BytePtrToString(p *byte) string {
+	if p == nil {
+		return ""
+	}
+	if *p == 0 {
+		return ""
+	}
+
+	// Find NUL terminator.
+	n := 0
+	for ptr := unsafe.Pointer(p); *(*byte)(ptr) != 0; n++ {
+		ptr = unsafe.Pointer(uintptr(ptr) + 1)
+	}
+
+	return string(unsafe.Slice(p, n))
+}
+
+// Single-word zero for use when we need a valid pointer to 0 bytes.
+// See mksyscall.pl.
+var _zero uintptr
+
+func (ts *Timespec) Unix() (sec int64, nsec int64) {
+	return int64(ts.Sec), int64(ts.Nsec)
+}
+
+func (tv *Timeval) Unix() (sec int64, nsec int64) {
+	return int64(tv.Sec), int64(tv.Usec) * 1000
+}
+
+func (ts *Timespec) Nano() int64 {
+	return int64(ts.Sec)*1e9 + int64(ts.Nsec)
+}
+
+func (tv *Timeval) Nano() int64 {
+	return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000
+}
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
new file mode 100644
index 0000000..bd51337
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -0,0 +1,1936 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Windows system calls.
+
+package windows
+
+import (
+	errorspkg "errors"
+	"fmt"
+	"runtime"
+	"sync"
+	"syscall"
+	"time"
+	"unicode/utf16"
+	"unsafe"
+)
+
+type (
+	Handle uintptr
+	HWND   uintptr
+)
+
+const (
+	InvalidHandle = ^Handle(0)
+	InvalidHWND   = ^HWND(0)
+
+	// Flags for DefineDosDevice.
+	DDD_EXACT_MATCH_ON_REMOVE = 0x00000004
+	DDD_NO_BROADCAST_SYSTEM   = 0x00000008
+	DDD_RAW_TARGET_PATH       = 0x00000001
+	DDD_REMOVE_DEFINITION     = 0x00000002
+
+	// Return values for GetDriveType.
+	DRIVE_UNKNOWN     = 0
+	DRIVE_NO_ROOT_DIR = 1
+	DRIVE_REMOVABLE   = 2
+	DRIVE_FIXED       = 3
+	DRIVE_REMOTE      = 4
+	DRIVE_CDROM       = 5
+	DRIVE_RAMDISK     = 6
+
+	// File system flags from GetVolumeInformation and GetVolumeInformationByHandle.
+	FILE_CASE_SENSITIVE_SEARCH        = 0x00000001
+	FILE_CASE_PRESERVED_NAMES         = 0x00000002
+	FILE_FILE_COMPRESSION             = 0x00000010
+	FILE_DAX_VOLUME                   = 0x20000000
+	FILE_NAMED_STREAMS                = 0x00040000
+	FILE_PERSISTENT_ACLS              = 0x00000008
+	FILE_READ_ONLY_VOLUME             = 0x00080000
+	FILE_SEQUENTIAL_WRITE_ONCE        = 0x00100000
+	FILE_SUPPORTS_ENCRYPTION          = 0x00020000
+	FILE_SUPPORTS_EXTENDED_ATTRIBUTES = 0x00800000
+	FILE_SUPPORTS_HARD_LINKS          = 0x00400000
+	FILE_SUPPORTS_OBJECT_IDS          = 0x00010000
+	FILE_SUPPORTS_OPEN_BY_FILE_ID     = 0x01000000
+	FILE_SUPPORTS_REPARSE_POINTS      = 0x00000080
+	FILE_SUPPORTS_SPARSE_FILES        = 0x00000040
+	FILE_SUPPORTS_TRANSACTIONS        = 0x00200000
+	FILE_SUPPORTS_USN_JOURNAL         = 0x02000000
+	FILE_UNICODE_ON_DISK              = 0x00000004
+	FILE_VOLUME_IS_COMPRESSED         = 0x00008000
+	FILE_VOLUME_QUOTAS                = 0x00000020
+
+	// Flags for LockFileEx.
+	LOCKFILE_FAIL_IMMEDIATELY = 0x00000001
+	LOCKFILE_EXCLUSIVE_LOCK   = 0x00000002
+
+	// Return value of SleepEx and other APC functions
+	WAIT_IO_COMPLETION = 0x000000C0
+)
+
+// StringToUTF16 is deprecated. Use UTF16FromString instead.
+// If s contains a NUL byte this function panics instead of
+// returning an error.
+func StringToUTF16(s string) []uint16 {
+	a, err := UTF16FromString(s)
+	if err != nil {
+		panic("windows: string with NUL passed to StringToUTF16")
+	}
+	return a
+}
+
+// UTF16FromString returns the UTF-16 encoding of the UTF-8 string
+// s, with a terminating NUL added. If s contains a NUL byte at any
+// location, it returns (nil, syscall.EINVAL).
+func UTF16FromString(s string) ([]uint16, error) {
+	return syscall.UTF16FromString(s)
+}
+
+// UTF16ToString returns the UTF-8 encoding of the UTF-16 sequence s,
+// with a terminating NUL and any bytes after the NUL removed.
+func UTF16ToString(s []uint16) string {
+	return syscall.UTF16ToString(s)
+}
+
+// StringToUTF16Ptr is deprecated. Use UTF16PtrFromString instead.
+// If s contains a NUL byte this function panics instead of
+// returning an error.
+func StringToUTF16Ptr(s string) *uint16 { return &StringToUTF16(s)[0] }
+
+// UTF16PtrFromString returns pointer to the UTF-16 encoding of
+// the UTF-8 string s, with a terminating NUL added. If s
+// contains a NUL byte at any location, it returns (nil, syscall.EINVAL).
+func UTF16PtrFromString(s string) (*uint16, error) {
+	a, err := UTF16FromString(s)
+	if err != nil {
+		return nil, err
+	}
+	return &a[0], nil
+}
+
+// UTF16PtrToString takes a pointer to a UTF-16 sequence and returns the corresponding UTF-8 encoded string.
+// If the pointer is nil, it returns the empty string. It assumes that the UTF-16 sequence is terminated
+// at a zero word; if the zero word is not present, the program may crash.
+func UTF16PtrToString(p *uint16) string {
+	if p == nil {
+		return ""
+	}
+	if *p == 0 {
+		return ""
+	}
+
+	// Find NUL terminator.
+	n := 0
+	for ptr := unsafe.Pointer(p); *(*uint16)(ptr) != 0; n++ {
+		ptr = unsafe.Pointer(uintptr(ptr) + unsafe.Sizeof(*p))
+	}
+	return UTF16ToString(unsafe.Slice(p, n))
+}
+
+func Getpagesize() int { return 4096 }
+
+// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention.
+// This is useful when interoperating with Windows code requiring callbacks.
+// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr.
+func NewCallback(fn interface{}) uintptr {
+	return syscall.NewCallback(fn)
+}
+
+// NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention.
+// This is useful when interoperating with Windows code requiring callbacks.
+// The argument is expected to be a function with one uintptr-sized result. The function must not have arguments with size larger than the size of uintptr.
+func NewCallbackCDecl(fn interface{}) uintptr {
+	return syscall.NewCallbackCDecl(fn)
+}
+
+// windows api calls
+
+//sys	GetLastError() (lasterr error)
+//sys	LoadLibrary(libname string) (handle Handle, err error) = LoadLibraryW
+//sys	LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) = LoadLibraryExW
+//sys	FreeLibrary(handle Handle) (err error)
+//sys	GetProcAddress(module Handle, procname string) (proc uintptr, err error)
+//sys	GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW
+//sys	GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW
+//sys	SetDefaultDllDirectories(directoryFlags uint32) (err error)
+//sys	AddDllDirectory(path *uint16) (cookie uintptr, err error) = kernel32.AddDllDirectory
+//sys	RemoveDllDirectory(cookie uintptr) (err error) = kernel32.RemoveDllDirectory
+//sys	SetDllDirectory(path string) (err error) = kernel32.SetDllDirectoryW
+//sys	GetVersion() (ver uint32, err error)
+//sys	FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW
+//sys	ExitProcess(exitcode uint32)
+//sys	IsWow64Process(handle Handle, isWow64 *bool) (err error) = IsWow64Process
+//sys	IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint16) (err error) = IsWow64Process2?
+//sys	CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW
+//sys	CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error)  [failretval==InvalidHandle] = CreateNamedPipeW
+//sys	ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error)
+//sys	DisconnectNamedPipe(pipe Handle) (err error)
+//sys   GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error)
+//sys   GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error)
+//sys	GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error)
+//sys	GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW
+//sys	SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState
+//sys	readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) = ReadFile
+//sys	writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) = WriteFile
+//sys	GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error)
+//sys	SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) [failretval==0xffffffff]
+//sys	CloseHandle(handle Handle) (err error)
+//sys	GetStdHandle(stdhandle uint32) (handle Handle, err error) [failretval==InvalidHandle]
+//sys	SetStdHandle(stdhandle uint32, handle Handle) (err error)
+//sys	findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstFileW
+//sys	findNextFile1(handle Handle, data *win32finddata1) (err error) = FindNextFileW
+//sys	FindClose(handle Handle) (err error)
+//sys	GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error)
+//sys	GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error)
+//sys	SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error)
+//sys	GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) = GetCurrentDirectoryW
+//sys	SetCurrentDirectory(path *uint16) (err error) = SetCurrentDirectoryW
+//sys	CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) = CreateDirectoryW
+//sys	RemoveDirectory(path *uint16) (err error) = RemoveDirectoryW
+//sys	DeleteFile(path *uint16) (err error) = DeleteFileW
+//sys	MoveFile(from *uint16, to *uint16) (err error) = MoveFileW
+//sys	MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) = MoveFileExW
+//sys	LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error)
+//sys	UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error)
+//sys	GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW
+//sys	GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW
+//sys	SetEndOfFile(handle Handle) (err error)
+//sys	SetFileValidData(handle Handle, validDataLength int64) (err error)
+//sys	GetSystemTimeAsFileTime(time *Filetime)
+//sys	GetSystemTimePreciseAsFileTime(time *Filetime)
+//sys	GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff]
+//sys	CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error)
+//sys	GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error)
+//sys	PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error)
+//sys	CancelIo(s Handle) (err error)
+//sys	CancelIoEx(s Handle, o *Overlapped) (err error)
+//sys	CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW
+//sys	CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = advapi32.CreateProcessAsUserW
+//sys   initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) = InitializeProcThreadAttributeList
+//sys   deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) = DeleteProcThreadAttributeList
+//sys   updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) = UpdateProcThreadAttribute
+//sys	OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error)
+//sys	ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW
+//sys	GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) = user32.GetWindowThreadProcessId
+//sys	LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) [failretval==0] = user32.LoadKeyboardLayoutW
+//sys	UnloadKeyboardLayout(hkl Handle) (err error) = user32.UnloadKeyboardLayout
+//sys	GetKeyboardLayout(tid uint32) (hkl Handle) = user32.GetKeyboardLayout
+//sys	ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) = user32.ToUnicodeEx
+//sys	GetShellWindow() (shellWindow HWND) = user32.GetShellWindow
+//sys	MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW
+//sys	ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx
+//sys	shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath
+//sys	TerminateProcess(handle Handle, exitcode uint32) (err error)
+//sys	GetExitCodeProcess(handle Handle, exitcode *uint32) (err error)
+//sys	getStartupInfo(startupInfo *StartupInfo) = GetStartupInfoW
+//sys	GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error)
+//sys	DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error)
+//sys	WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff]
+//sys	waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] = WaitForMultipleObjects
+//sys	GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) = GetTempPathW
+//sys	CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error)
+//sys	GetFileType(filehandle Handle) (n uint32, err error)
+//sys	CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) = advapi32.CryptAcquireContextW
+//sys	CryptReleaseContext(provhandle Handle, flags uint32) (err error) = advapi32.CryptReleaseContext
+//sys	CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) = advapi32.CryptGenRandom
+//sys	GetEnvironmentStrings() (envs *uint16, err error) [failretval==nil] = kernel32.GetEnvironmentStringsW
+//sys	FreeEnvironmentStrings(envs *uint16) (err error) = kernel32.FreeEnvironmentStringsW
+//sys	GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) = kernel32.GetEnvironmentVariableW
+//sys	SetEnvironmentVariable(name *uint16, value *uint16) (err error) = kernel32.SetEnvironmentVariableW
+//sys	ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW
+//sys	CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) = userenv.CreateEnvironmentBlock
+//sys	DestroyEnvironmentBlock(block *uint16) (err error) = userenv.DestroyEnvironmentBlock
+//sys	getTickCount64() (ms uint64) = kernel32.GetTickCount64
+//sys   GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error)
+//sys	SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error)
+//sys	GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW
+//sys	SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW
+//sys	GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) = kernel32.GetFileAttributesExW
+//sys	GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW
+//sys	commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW
+//sys	LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0]
+//sys	LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error)
+//sys	SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error)
+//sys	FlushFileBuffers(handle Handle) (err error)
+//sys	GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) = kernel32.GetFullPathNameW
+//sys	GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) = kernel32.GetLongPathNameW
+//sys	GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) = kernel32.GetShortPathNameW
+//sys	GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) = kernel32.GetFinalPathNameByHandleW
+//sys	CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateFileMappingW
+//sys	MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error)
+//sys	UnmapViewOfFile(addr uintptr) (err error)
+//sys	FlushViewOfFile(addr uintptr, length uintptr) (err error)
+//sys	VirtualLock(addr uintptr, length uintptr) (err error)
+//sys	VirtualUnlock(addr uintptr, length uintptr) (err error)
+//sys	VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) = kernel32.VirtualAlloc
+//sys	VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) = kernel32.VirtualFree
+//sys	VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) = kernel32.VirtualProtect
+//sys	VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) = kernel32.VirtualProtectEx
+//sys	VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) = kernel32.VirtualQuery
+//sys	VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) = kernel32.VirtualQueryEx
+//sys	ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) = kernel32.ReadProcessMemory
+//sys	WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) = kernel32.WriteProcessMemory
+//sys	TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) = mswsock.TransmitFile
+//sys	ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) = kernel32.ReadDirectoryChangesW
+//sys	FindFirstChangeNotification(path string, watchSubtree bool, notifyFilter uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.FindFirstChangeNotificationW
+//sys	FindNextChangeNotification(handle Handle) (err error)
+//sys	FindCloseChangeNotification(handle Handle) (err error)
+//sys	CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) = crypt32.CertOpenSystemStoreW
+//sys	CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) = crypt32.CertOpenStore
+//sys	CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) [failretval==nil] = crypt32.CertEnumCertificatesInStore
+//sys	CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) = crypt32.CertAddCertificateContextToStore
+//sys	CertCloseStore(store Handle, flags uint32) (err error) = crypt32.CertCloseStore
+//sys	CertDeleteCertificateFromStore(certContext *CertContext) (err error) = crypt32.CertDeleteCertificateFromStore
+//sys	CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) = crypt32.CertDuplicateCertificateContext
+//sys	PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) = crypt32.PFXImportCertStore
+//sys	CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) = crypt32.CertGetCertificateChain
+//sys	CertFreeCertificateChain(ctx *CertChainContext) = crypt32.CertFreeCertificateChain
+//sys	CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) [failretval==nil] = crypt32.CertCreateCertificateContext
+//sys	CertFreeCertificateContext(ctx *CertContext) (err error) = crypt32.CertFreeCertificateContext
+//sys	CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) = crypt32.CertVerifyCertificateChainPolicy
+//sys	CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) = crypt32.CertGetNameStringW
+//sys	CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) = crypt32.CertFindExtension
+//sys   CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) [failretval==nil] = crypt32.CertFindCertificateInStore
+//sys   CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) [failretval==nil] = crypt32.CertFindChainInStore
+//sys   CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, parameters unsafe.Pointer, cryptProvOrNCryptKey *Handle, keySpec *uint32, callerFreeProvOrNCryptKey *bool) (err error) = crypt32.CryptAcquireCertificatePrivateKey
+//sys	CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) = crypt32.CryptQueryObject
+//sys	CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) = crypt32.CryptDecodeObject
+//sys	CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) = crypt32.CryptProtectData
+//sys	CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) = crypt32.CryptUnprotectData
+//sys	WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) = wintrust.WinVerifyTrustEx
+//sys	RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) = advapi32.RegOpenKeyExW
+//sys	RegCloseKey(key Handle) (regerrno error) = advapi32.RegCloseKey
+//sys	RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegQueryInfoKeyW
+//sys	RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegEnumKeyExW
+//sys	RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegQueryValueExW
+//sys	RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, event Handle, asynchronous bool) (regerrno error) = advapi32.RegNotifyChangeKeyValue
+//sys	GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId
+//sys	ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) = kernel32.ProcessIdToSessionId
+//sys	ClosePseudoConsole(console Handle) = kernel32.ClosePseudoConsole
+//sys	createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) = kernel32.CreatePseudoConsole
+//sys	GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode
+//sys	SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode
+//sys	GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo
+//sys	setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition
+//sys	GetConsoleCP() (cp uint32, err error) = kernel32.GetConsoleCP
+//sys	GetConsoleOutputCP() (cp uint32, err error) = kernel32.GetConsoleOutputCP
+//sys	SetConsoleCP(cp uint32) (err error) = kernel32.SetConsoleCP
+//sys	SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP
+//sys	WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW
+//sys	ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW
+//sys	GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) = kernel32.GetNumberOfConsoleInputEvents
+//sys	FlushConsoleInputBuffer(console Handle) (err error) = kernel32.FlushConsoleInputBuffer
+//sys	resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole
+//sys	CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot
+//sys	Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW
+//sys	Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32NextW
+//sys	Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32FirstW
+//sys	Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32NextW
+//sys	Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error)
+//sys	Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error)
+//sys	DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error)
+// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL.
+//sys	CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) [failretval&0xff==0] = CreateSymbolicLinkW
+//sys	CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) [failretval&0xff==0] = CreateHardLinkW
+//sys	GetCurrentThreadId() (id uint32)
+//sys	CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateEventW
+//sys	CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateEventExW
+//sys	OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenEventW
+//sys	SetEvent(event Handle) (err error) = kernel32.SetEvent
+//sys	ResetEvent(event Handle) (err error) = kernel32.ResetEvent
+//sys	PulseEvent(event Handle) (err error) = kernel32.PulseEvent
+//sys	CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateMutexW
+//sys	CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) [failretval == 0 || e1 == ERROR_ALREADY_EXISTS] = kernel32.CreateMutexExW
+//sys	OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenMutexW
+//sys	ReleaseMutex(mutex Handle) (err error) = kernel32.ReleaseMutex
+//sys	SleepEx(milliseconds uint32, alertable bool) (ret uint32) = kernel32.SleepEx
+//sys	CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) = kernel32.CreateJobObjectW
+//sys	AssignProcessToJobObject(job Handle, process Handle) (err error) = kernel32.AssignProcessToJobObject
+//sys	TerminateJobObject(job Handle, exitCode uint32) (err error) = kernel32.TerminateJobObject
+//sys	SetErrorMode(mode uint32) (ret uint32) = kernel32.SetErrorMode
+//sys	ResumeThread(thread Handle) (ret uint32, err error) [failretval==0xffffffff] = kernel32.ResumeThread
+//sys	SetPriorityClass(process Handle, priorityClass uint32) (err error) = kernel32.SetPriorityClass
+//sys	GetPriorityClass(process Handle) (ret uint32, err error) = kernel32.GetPriorityClass
+//sys	QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) = kernel32.QueryInformationJobObject
+//sys	SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error)
+//sys	GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error)
+//sys	GetProcessId(process Handle) (id uint32, err error)
+//sys	QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) = kernel32.QueryFullProcessImageNameW
+//sys	OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error)
+//sys	SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost
+//sys	GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32)
+//sys	SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error)
+//sys	ClearCommBreak(handle Handle) (err error)
+//sys	ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error)
+//sys	EscapeCommFunction(handle Handle, dwFunc uint32) (err error)
+//sys	GetCommState(handle Handle, lpDCB *DCB) (err error)
+//sys	GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error)
+//sys	GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error)
+//sys	PurgeComm(handle Handle, dwFlags uint32) (err error)
+//sys	SetCommBreak(handle Handle) (err error)
+//sys	SetCommMask(handle Handle, dwEvtMask uint32) (err error)
+//sys	SetCommState(handle Handle, lpDCB *DCB) (err error)
+//sys	SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error)
+//sys	SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error)
+//sys	WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error)
+//sys	GetActiveProcessorCount(groupNumber uint16) (ret uint32)
+//sys	GetMaximumProcessorCount(groupNumber uint16) (ret uint32)
+//sys	EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows
+//sys	EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) = user32.EnumChildWindows
+//sys	GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) = user32.GetClassNameW
+//sys	GetDesktopWindow() (hwnd HWND) = user32.GetDesktopWindow
+//sys	GetForegroundWindow() (hwnd HWND) = user32.GetForegroundWindow
+//sys	IsWindow(hwnd HWND) (isWindow bool) = user32.IsWindow
+//sys	IsWindowUnicode(hwnd HWND) (isUnicode bool) = user32.IsWindowUnicode
+//sys	IsWindowVisible(hwnd HWND) (isVisible bool) = user32.IsWindowVisible
+//sys	GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) = user32.GetGUIThreadInfo
+//sys	GetLargePageMinimum() (size uintptr)
+
+// Volume Management Functions
+//sys	DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW
+//sys	DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) = DeleteVolumeMountPointW
+//sys	FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeW
+//sys	FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeMountPointW
+//sys	FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) = FindNextVolumeW
+//sys	FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) = FindNextVolumeMountPointW
+//sys	FindVolumeClose(findVolume Handle) (err error)
+//sys	FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error)
+//sys	GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) = GetDiskFreeSpaceExW
+//sys	GetDriveType(rootPathName *uint16) (driveType uint32) = GetDriveTypeW
+//sys	GetLogicalDrives() (drivesBitMask uint32, err error) [failretval==0]
+//sys	GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) [failretval==0] = GetLogicalDriveStringsW
+//sys	GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationW
+//sys	GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationByHandleW
+//sys	GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) = GetVolumeNameForVolumeMountPointW
+//sys	GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) = GetVolumePathNameW
+//sys	GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) = GetVolumePathNamesForVolumeNameW
+//sys	QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) [failretval==0] = QueryDosDeviceW
+//sys	SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) = SetVolumeLabelW
+//sys	SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) = SetVolumeMountPointW
+//sys	InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) = advapi32.InitiateSystemShutdownExW
+//sys	SetProcessShutdownParameters(level uint32, flags uint32) (err error) = kernel32.SetProcessShutdownParameters
+//sys	GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) = kernel32.GetProcessShutdownParameters
+//sys	clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) = ole32.CLSIDFromString
+//sys	stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) = ole32.StringFromGUID2
+//sys	coCreateGuid(pguid *GUID) (ret error) = ole32.CoCreateGuid
+//sys	CoTaskMemFree(address unsafe.Pointer) = ole32.CoTaskMemFree
+//sys	CoInitializeEx(reserved uintptr, coInit uint32) (ret error) = ole32.CoInitializeEx
+//sys	CoUninitialize() = ole32.CoUninitialize
+//sys	CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) = ole32.CoGetObject
+//sys	getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetProcessPreferredUILanguages
+//sys	getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetThreadPreferredUILanguages
+//sys	getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetUserPreferredUILanguages
+//sys	getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) = kernel32.GetSystemPreferredUILanguages
+//sys	findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) = kernel32.FindResourceW
+//sys	SizeofResource(module Handle, resInfo Handle) (size uint32, err error) = kernel32.SizeofResource
+//sys	LoadResource(module Handle, resInfo Handle) (resData Handle, err error) = kernel32.LoadResource
+//sys	LockResource(resData Handle) (addr uintptr, err error) = kernel32.LockResource
+
+// Version APIs
+//sys	GetFileVersionInfoSize(filename string, zeroHandle *Handle) (bufSize uint32, err error) = version.GetFileVersionInfoSizeW
+//sys	GetFileVersionInfo(filename string, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) = version.GetFileVersionInfoW
+//sys	VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) = version.VerQueryValueW
+
+// Process Status API (PSAPI)
+//sys	enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) = psapi.EnumProcesses
+//sys	EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) = psapi.EnumProcessModules
+//sys	EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) = psapi.EnumProcessModulesEx
+//sys	GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) = psapi.GetModuleInformation
+//sys	GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) = psapi.GetModuleFileNameExW
+//sys	GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) = psapi.GetModuleBaseNameW
+//sys   QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) = psapi.QueryWorkingSetEx
+
+// NT Native APIs
+//sys	rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) = ntdll.RtlNtStatusToDosErrorNoTeb
+//sys	rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) = ntdll.RtlGetVersion
+//sys	rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) = ntdll.RtlGetNtVersionNumbers
+//sys	RtlGetCurrentPeb() (peb *PEB) = ntdll.RtlGetCurrentPeb
+//sys	RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) = ntdll.RtlInitUnicodeString
+//sys	RtlInitString(destinationString *NTString, sourceString *byte) = ntdll.RtlInitString
+//sys	NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) = ntdll.NtCreateFile
+//sys	NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) = ntdll.NtCreateNamedPipeFile
+//sys	NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) = ntdll.NtSetInformationFile
+//sys	RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) = ntdll.RtlDosPathNameToNtPathName_U_WithStatus
+//sys	RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) = ntdll.RtlDosPathNameToRelativeNtPathName_U_WithStatus
+//sys	RtlDefaultNpAcl(acl **ACL) (ntstatus error) = ntdll.RtlDefaultNpAcl
+//sys	NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) = ntdll.NtQueryInformationProcess
+//sys	NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) = ntdll.NtSetInformationProcess
+//sys	NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) = ntdll.NtQuerySystemInformation
+//sys	NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) = ntdll.NtSetSystemInformation
+//sys	RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) = ntdll.RtlAddFunctionTable
+//sys	RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) = ntdll.RtlDeleteFunctionTable
+
+// Desktop Window Manager API (Dwmapi)
+//sys	DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmGetWindowAttribute
+//sys	DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) = dwmapi.DwmSetWindowAttribute
+
+// Windows Multimedia API
+//sys TimeBeginPeriod (period uint32) (err error) [failretval != 0] = winmm.timeBeginPeriod
+//sys TimeEndPeriod (period uint32) (err error) [failretval != 0] = winmm.timeEndPeriod
+
+// syscall interface implementation for other packages
+
+// GetCurrentProcess returns the handle for the current process.
+// It is a pseudo handle that does not need to be closed.
+// The returned error is always nil.
+//
+// Deprecated: use CurrentProcess for the same Handle without the nil
+// error.
+func GetCurrentProcess() (Handle, error) {
+	return CurrentProcess(), nil
+}
+
+// CurrentProcess returns the handle for the current process.
+// It is a pseudo handle that does not need to be closed.
+func CurrentProcess() Handle { return Handle(^uintptr(1 - 1)) }
+
+// GetCurrentThread returns the handle for the current thread.
+// It is a pseudo handle that does not need to be closed.
+// The returned error is always nil.
+//
+// Deprecated: use CurrentThread for the same Handle without the nil
+// error.
+func GetCurrentThread() (Handle, error) {
+	return CurrentThread(), nil
+}
+
+// CurrentThread returns the handle for the current thread.
+// It is a pseudo handle that does not need to be closed.
+func CurrentThread() Handle { return Handle(^uintptr(2 - 1)) }
+
+// GetProcAddressByOrdinal retrieves the address of the exported
+// function from module by ordinal.
+func GetProcAddressByOrdinal(module Handle, ordinal uintptr) (proc uintptr, err error) {
+	r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), ordinal, 0)
+	proc = uintptr(r0)
+	if proc == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func Exit(code int) { ExitProcess(uint32(code)) }
+
+func makeInheritSa() *SecurityAttributes {
+	var sa SecurityAttributes
+	sa.Length = uint32(unsafe.Sizeof(sa))
+	sa.InheritHandle = 1
+	return &sa
+}
+
+func Open(path string, mode int, perm uint32) (fd Handle, err error) {
+	if len(path) == 0 {
+		return InvalidHandle, ERROR_FILE_NOT_FOUND
+	}
+	pathp, err := UTF16PtrFromString(path)
+	if err != nil {
+		return InvalidHandle, err
+	}
+	var access uint32
+	switch mode & (O_RDONLY | O_WRONLY | O_RDWR) {
+	case O_RDONLY:
+		access = GENERIC_READ
+	case O_WRONLY:
+		access = GENERIC_WRITE
+	case O_RDWR:
+		access = GENERIC_READ | GENERIC_WRITE
+	}
+	if mode&O_CREAT != 0 {
+		access |= GENERIC_WRITE
+	}
+	if mode&O_APPEND != 0 {
+		access &^= GENERIC_WRITE
+		access |= FILE_APPEND_DATA
+	}
+	sharemode := uint32(FILE_SHARE_READ | FILE_SHARE_WRITE)
+	var sa *SecurityAttributes
+	if mode&O_CLOEXEC == 0 {
+		sa = makeInheritSa()
+	}
+	var createmode uint32
+	switch {
+	case mode&(O_CREAT|O_EXCL) == (O_CREAT | O_EXCL):
+		createmode = CREATE_NEW
+	case mode&(O_CREAT|O_TRUNC) == (O_CREAT | O_TRUNC):
+		createmode = CREATE_ALWAYS
+	case mode&O_CREAT == O_CREAT:
+		createmode = OPEN_ALWAYS
+	case mode&O_TRUNC == O_TRUNC:
+		createmode = TRUNCATE_EXISTING
+	default:
+		createmode = OPEN_EXISTING
+	}
+	var attrs uint32 = FILE_ATTRIBUTE_NORMAL
+	if perm&S_IWRITE == 0 {
+		attrs = FILE_ATTRIBUTE_READONLY
+	}
+	h, e := CreateFile(pathp, access, sharemode, sa, createmode, attrs, 0)
+	return h, e
+}
+
+func Read(fd Handle, p []byte) (n int, err error) {
+	var done uint32
+	e := ReadFile(fd, p, &done, nil)
+	if e != nil {
+		if e == ERROR_BROKEN_PIPE {
+			// NOTE(brainman): work around ERROR_BROKEN_PIPE is returned on reading EOF from stdin
+			return 0, nil
+		}
+		return 0, e
+	}
+	return int(done), nil
+}
+
+func Write(fd Handle, p []byte) (n int, err error) {
+	if raceenabled {
+		raceReleaseMerge(unsafe.Pointer(&ioSync))
+	}
+	var done uint32
+	e := WriteFile(fd, p, &done, nil)
+	if e != nil {
+		return 0, e
+	}
+	return int(done), nil
+}
+
+func ReadFile(fd Handle, p []byte, done *uint32, overlapped *Overlapped) error {
+	err := readFile(fd, p, done, overlapped)
+	if raceenabled {
+		if *done > 0 {
+			raceWriteRange(unsafe.Pointer(&p[0]), int(*done))
+		}
+		raceAcquire(unsafe.Pointer(&ioSync))
+	}
+	return err
+}
+
+func WriteFile(fd Handle, p []byte, done *uint32, overlapped *Overlapped) error {
+	if raceenabled {
+		raceReleaseMerge(unsafe.Pointer(&ioSync))
+	}
+	err := writeFile(fd, p, done, overlapped)
+	if raceenabled && *done > 0 {
+		raceReadRange(unsafe.Pointer(&p[0]), int(*done))
+	}
+	return err
+}
+
+var ioSync int64
+
+func Seek(fd Handle, offset int64, whence int) (newoffset int64, err error) {
+	var w uint32
+	switch whence {
+	case 0:
+		w = FILE_BEGIN
+	case 1:
+		w = FILE_CURRENT
+	case 2:
+		w = FILE_END
+	}
+	hi := int32(offset >> 32)
+	lo := int32(offset)
+	// use GetFileType to check pipe, pipe can't do seek
+	ft, _ := GetFileType(fd)
+	if ft == FILE_TYPE_PIPE {
+		return 0, syscall.EPIPE
+	}
+	rlo, e := SetFilePointer(fd, lo, &hi, w)
+	if e != nil {
+		return 0, e
+	}
+	return int64(hi)<<32 + int64(rlo), nil
+}
+
+func Close(fd Handle) (err error) {
+	return CloseHandle(fd)
+}
+
+var (
+	Stdin  = getStdHandle(STD_INPUT_HANDLE)
+	Stdout = getStdHandle(STD_OUTPUT_HANDLE)
+	Stderr = getStdHandle(STD_ERROR_HANDLE)
+)
+
+func getStdHandle(stdhandle uint32) (fd Handle) {
+	r, _ := GetStdHandle(stdhandle)
+	return r
+}
+
+const ImplementsGetwd = true
+
+func Getwd() (wd string, err error) {
+	b := make([]uint16, 300)
+	n, e := GetCurrentDirectory(uint32(len(b)), &b[0])
+	if e != nil {
+		return "", e
+	}
+	return string(utf16.Decode(b[0:n])), nil
+}
+
+func Chdir(path string) (err error) {
+	pathp, err := UTF16PtrFromString(path)
+	if err != nil {
+		return err
+	}
+	return SetCurrentDirectory(pathp)
+}
+
+func Mkdir(path string, mode uint32) (err error) {
+	pathp, err := UTF16PtrFromString(path)
+	if err != nil {
+		return err
+	}
+	return CreateDirectory(pathp, nil)
+}
+
+func Rmdir(path string) (err error) {
+	pathp, err := UTF16PtrFromString(path)
+	if err != nil {
+		return err
+	}
+	return RemoveDirectory(pathp)
+}
+
+func Unlink(path string) (err error) {
+	pathp, err := UTF16PtrFromString(path)
+	if err != nil {
+		return err
+	}
+	return DeleteFile(pathp)
+}
+
+func Rename(oldpath, newpath string) (err error) {
+	from, err := UTF16PtrFromString(oldpath)
+	if err != nil {
+		return err
+	}
+	to, err := UTF16PtrFromString(newpath)
+	if err != nil {
+		return err
+	}
+	return MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING)
+}
+
+func ComputerName() (name string, err error) {
+	var n uint32 = MAX_COMPUTERNAME_LENGTH + 1
+	b := make([]uint16, n)
+	e := GetComputerName(&b[0], &n)
+	if e != nil {
+		return "", e
+	}
+	return string(utf16.Decode(b[0:n])), nil
+}
+
+func DurationSinceBoot() time.Duration {
+	return time.Duration(getTickCount64()) * time.Millisecond
+}
+
+func Ftruncate(fd Handle, length int64) (err error) {
+	type _FILE_END_OF_FILE_INFO struct {
+		EndOfFile int64
+	}
+	var info _FILE_END_OF_FILE_INFO
+	info.EndOfFile = length
+	return SetFileInformationByHandle(fd, FileEndOfFileInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info)))
+}
+
+func Gettimeofday(tv *Timeval) (err error) {
+	var ft Filetime
+	GetSystemTimeAsFileTime(&ft)
+	*tv = NsecToTimeval(ft.Nanoseconds())
+	return nil
+}
+
+func Pipe(p []Handle) (err error) {
+	if len(p) != 2 {
+		return syscall.EINVAL
+	}
+	var r, w Handle
+	e := CreatePipe(&r, &w, makeInheritSa(), 0)
+	if e != nil {
+		return e
+	}
+	p[0] = r
+	p[1] = w
+	return nil
+}
+
+func Utimes(path string, tv []Timeval) (err error) {
+	if len(tv) != 2 {
+		return syscall.EINVAL
+	}
+	pathp, e := UTF16PtrFromString(path)
+	if e != nil {
+		return e
+	}
+	h, e := CreateFile(pathp,
+		FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil,
+		OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0)
+	if e != nil {
+		return e
+	}
+	defer CloseHandle(h)
+	a := NsecToFiletime(tv[0].Nanoseconds())
+	w := NsecToFiletime(tv[1].Nanoseconds())
+	return SetFileTime(h, nil, &a, &w)
+}
+
+func UtimesNano(path string, ts []Timespec) (err error) {
+	if len(ts) != 2 {
+		return syscall.EINVAL
+	}
+	pathp, e := UTF16PtrFromString(path)
+	if e != nil {
+		return e
+	}
+	h, e := CreateFile(pathp,
+		FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil,
+		OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0)
+	if e != nil {
+		return e
+	}
+	defer CloseHandle(h)
+	a := NsecToFiletime(TimespecToNsec(ts[0]))
+	w := NsecToFiletime(TimespecToNsec(ts[1]))
+	return SetFileTime(h, nil, &a, &w)
+}
+
+func Fsync(fd Handle) (err error) {
+	return FlushFileBuffers(fd)
+}
+
+func Chmod(path string, mode uint32) (err error) {
+	p, e := UTF16PtrFromString(path)
+	if e != nil {
+		return e
+	}
+	attrs, e := GetFileAttributes(p)
+	if e != nil {
+		return e
+	}
+	if mode&S_IWRITE != 0 {
+		attrs &^= FILE_ATTRIBUTE_READONLY
+	} else {
+		attrs |= FILE_ATTRIBUTE_READONLY
+	}
+	return SetFileAttributes(p, attrs)
+}
+
+func LoadGetSystemTimePreciseAsFileTime() error {
+	return procGetSystemTimePreciseAsFileTime.Find()
+}
+
+func LoadCancelIoEx() error {
+	return procCancelIoEx.Find()
+}
+
+func LoadSetFileCompletionNotificationModes() error {
+	return procSetFileCompletionNotificationModes.Find()
+}
+
+func WaitForMultipleObjects(handles []Handle, waitAll bool, waitMilliseconds uint32) (event uint32, err error) {
+	// Every other win32 array API takes arguments as "pointer, count", except for this function. So we
+	// can't declare it as a usual [] type, because mksyscall will use the opposite order. We therefore
+	// trivially stub this ourselves.
+
+	var handlePtr *Handle
+	if len(handles) > 0 {
+		handlePtr = &handles[0]
+	}
+	return waitForMultipleObjects(uint32(len(handles)), uintptr(unsafe.Pointer(handlePtr)), waitAll, waitMilliseconds)
+}
+
+// net api calls
+
+const socket_error = uintptr(^uint32(0))
+
+//sys	WSAStartup(verreq uint32, data *WSAData) (sockerr error) = ws2_32.WSAStartup
+//sys	WSACleanup() (err error) [failretval==socket_error] = ws2_32.WSACleanup
+//sys	WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) [failretval==socket_error] = ws2_32.WSAIoctl
+//sys	WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceBeginW
+//sys	WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceNextW
+//sys	WSALookupServiceEnd(handle Handle) (err error) [failretval==socket_error] = ws2_32.WSALookupServiceEnd
+//sys	socket(af int32, typ int32, protocol int32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.socket
+//sys	sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) [failretval==socket_error] = ws2_32.sendto
+//sys	recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) [failretval==-1] = ws2_32.recvfrom
+//sys	Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) [failretval==socket_error] = ws2_32.setsockopt
+//sys	Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockopt
+//sys	bind(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.bind
+//sys	connect(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.connect
+//sys	getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockname
+//sys	getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getpeername
+//sys	listen(s Handle, backlog int32) (err error) [failretval==socket_error] = ws2_32.listen
+//sys	shutdown(s Handle, how int32) (err error) [failretval==socket_error] = ws2_32.shutdown
+//sys	Closesocket(s Handle) (err error) [failretval==socket_error] = ws2_32.closesocket
+//sys	AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) = mswsock.AcceptEx
+//sys	GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) = mswsock.GetAcceptExSockaddrs
+//sys	WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecv
+//sys	WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASend
+//sys	WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32,  from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom
+//sys	WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32,  overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo
+//sys	WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.WSASocketW
+//sys	WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) [failretval!=0] = ws2_32.WSADuplicateSocketW
+//sys	GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname
+//sys	GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname
+//sys	Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs
+//sys	GetProtoByName(name string) (p *Protoent, err error) [failretval==nil] = ws2_32.getprotobyname
+//sys	DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) = dnsapi.DnsQuery_W
+//sys	DnsRecordListFree(rl *DNSRecord, freetype uint32) = dnsapi.DnsRecordListFree
+//sys	DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) = dnsapi.DnsNameCompare_W
+//sys	GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) = ws2_32.GetAddrInfoW
+//sys	FreeAddrInfoW(addrinfo *AddrinfoW) = ws2_32.FreeAddrInfoW
+//sys	GetIfEntry(pIfRow *MibIfRow) (errcode error) = iphlpapi.GetIfEntry
+//sys	GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) = iphlpapi.GetAdaptersInfo
+//sys	SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) = kernel32.SetFileCompletionNotificationModes
+//sys	WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) [failretval==-1] = ws2_32.WSAEnumProtocolsW
+//sys	WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult
+//sys	GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses
+//sys	GetACP() (acp uint32) = kernel32.GetACP
+//sys	MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar
+//sys	getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx
+//sys   GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex
+//sys   GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry
+//sys   NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange
+//sys   NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange
+//sys   CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2
+
+// For testing: clients can set this flag to force
+// creation of IPv6 sockets to return EAFNOSUPPORT.
+var SocketDisableIPv6 bool
+
+type RawSockaddrInet4 struct {
+	Family uint16
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	Zero   [8]uint8
+}
+
+type RawSockaddrInet6 struct {
+	Family   uint16
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type RawSockaddr struct {
+	Family uint16
+	Data   [14]int8
+}
+
+type RawSockaddrAny struct {
+	Addr RawSockaddr
+	Pad  [100]int8
+}
+
+type Sockaddr interface {
+	sockaddr() (ptr unsafe.Pointer, len int32, err error) // lowercase; only we can define Sockaddrs
+}
+
+type SockaddrInet4 struct {
+	Port int
+	Addr [4]byte
+	raw  RawSockaddrInet4
+}
+
+func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, int32, error) {
+	if sa.Port < 0 || sa.Port > 0xFFFF {
+		return nil, 0, syscall.EINVAL
+	}
+	sa.raw.Family = AF_INET
+	p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+	p[0] = byte(sa.Port >> 8)
+	p[1] = byte(sa.Port)
+	sa.raw.Addr = sa.Addr
+	return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil
+}
+
+type SockaddrInet6 struct {
+	Port   int
+	ZoneId uint32
+	Addr   [16]byte
+	raw    RawSockaddrInet6
+}
+
+func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, int32, error) {
+	if sa.Port < 0 || sa.Port > 0xFFFF {
+		return nil, 0, syscall.EINVAL
+	}
+	sa.raw.Family = AF_INET6
+	p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
+	p[0] = byte(sa.Port >> 8)
+	p[1] = byte(sa.Port)
+	sa.raw.Scope_id = sa.ZoneId
+	sa.raw.Addr = sa.Addr
+	return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil
+}
+
+type RawSockaddrUnix struct {
+	Family uint16
+	Path   [UNIX_PATH_MAX]int8
+}
+
+type SockaddrUnix struct {
+	Name string
+	raw  RawSockaddrUnix
+}
+
+func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) {
+	name := sa.Name
+	n := len(name)
+	if n > len(sa.raw.Path) {
+		return nil, 0, syscall.EINVAL
+	}
+	if n == len(sa.raw.Path) && name[0] != '@' {
+		return nil, 0, syscall.EINVAL
+	}
+	sa.raw.Family = AF_UNIX
+	for i := 0; i < n; i++ {
+		sa.raw.Path[i] = int8(name[i])
+	}
+	// length is family (uint16), name, NUL.
+	sl := int32(2)
+	if n > 0 {
+		sl += int32(n) + 1
+	}
+	if sa.raw.Path[0] == '@' || (sa.raw.Path[0] == 0 && sl > 3) {
+		// Check sl > 3 so we don't change unnamed socket behavior.
+		sa.raw.Path[0] = 0
+		// Don't count trailing NUL for abstract address.
+		sl--
+	}
+
+	return unsafe.Pointer(&sa.raw), sl, nil
+}
+
+type RawSockaddrBth struct {
+	AddressFamily  [2]byte
+	BtAddr         [8]byte
+	ServiceClassId [16]byte
+	Port           [4]byte
+}
+
+type SockaddrBth struct {
+	BtAddr         uint64
+	ServiceClassId GUID
+	Port           uint32
+
+	raw RawSockaddrBth
+}
+
+func (sa *SockaddrBth) sockaddr() (unsafe.Pointer, int32, error) {
+	family := AF_BTH
+	sa.raw = RawSockaddrBth{
+		AddressFamily:  *(*[2]byte)(unsafe.Pointer(&family)),
+		BtAddr:         *(*[8]byte)(unsafe.Pointer(&sa.BtAddr)),
+		Port:           *(*[4]byte)(unsafe.Pointer(&sa.Port)),
+		ServiceClassId: *(*[16]byte)(unsafe.Pointer(&sa.ServiceClassId)),
+	}
+	return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil
+}
+
+func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) {
+	switch rsa.Addr.Family {
+	case AF_UNIX:
+		pp := (*RawSockaddrUnix)(unsafe.Pointer(rsa))
+		sa := new(SockaddrUnix)
+		if pp.Path[0] == 0 {
+			// "Abstract" Unix domain socket.
+			// Rewrite leading NUL as @ for textual display.
+			// (This is the standard convention.)
+			// Not friendly to overwrite in place,
+			// but the callers below don't care.
+			pp.Path[0] = '@'
+		}
+
+		// Assume path ends at NUL.
+		// This is not technically the Linux semantics for
+		// abstract Unix domain sockets--they are supposed
+		// to be uninterpreted fixed-size binary blobs--but
+		// everyone uses this convention.
+		n := 0
+		for n < len(pp.Path) && pp.Path[n] != 0 {
+			n++
+		}
+		sa.Name = string(unsafe.Slice((*byte)(unsafe.Pointer(&pp.Path[0])), n))
+		return sa, nil
+
+	case AF_INET:
+		pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa))
+		sa := new(SockaddrInet4)
+		p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+		sa.Port = int(p[0])<<8 + int(p[1])
+		sa.Addr = pp.Addr
+		return sa, nil
+
+	case AF_INET6:
+		pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa))
+		sa := new(SockaddrInet6)
+		p := (*[2]byte)(unsafe.Pointer(&pp.Port))
+		sa.Port = int(p[0])<<8 + int(p[1])
+		sa.ZoneId = pp.Scope_id
+		sa.Addr = pp.Addr
+		return sa, nil
+	}
+	return nil, syscall.EAFNOSUPPORT
+}
+
+func Socket(domain, typ, proto int) (fd Handle, err error) {
+	if domain == AF_INET6 && SocketDisableIPv6 {
+		return InvalidHandle, syscall.EAFNOSUPPORT
+	}
+	return socket(int32(domain), int32(typ), int32(proto))
+}
+
+func SetsockoptInt(fd Handle, level, opt int, value int) (err error) {
+	v := int32(value)
+	return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), int32(unsafe.Sizeof(v)))
+}
+
+func Bind(fd Handle, sa Sockaddr) (err error) {
+	ptr, n, err := sa.sockaddr()
+	if err != nil {
+		return err
+	}
+	return bind(fd, ptr, n)
+}
+
+func Connect(fd Handle, sa Sockaddr) (err error) {
+	ptr, n, err := sa.sockaddr()
+	if err != nil {
+		return err
+	}
+	return connect(fd, ptr, n)
+}
+
+func GetBestInterfaceEx(sa Sockaddr, pdwBestIfIndex *uint32) (err error) {
+	ptr, _, err := sa.sockaddr()
+	if err != nil {
+		return err
+	}
+	return getBestInterfaceEx(ptr, pdwBestIfIndex)
+}
+
+func Getsockname(fd Handle) (sa Sockaddr, err error) {
+	var rsa RawSockaddrAny
+	l := int32(unsafe.Sizeof(rsa))
+	if err = getsockname(fd, &rsa, &l); err != nil {
+		return
+	}
+	return rsa.Sockaddr()
+}
+
+func Getpeername(fd Handle) (sa Sockaddr, err error) {
+	var rsa RawSockaddrAny
+	l := int32(unsafe.Sizeof(rsa))
+	if err = getpeername(fd, &rsa, &l); err != nil {
+		return
+	}
+	return rsa.Sockaddr()
+}
+
+func Listen(s Handle, n int) (err error) {
+	return listen(s, int32(n))
+}
+
+func Shutdown(fd Handle, how int) (err error) {
+	return shutdown(fd, int32(how))
+}
+
+func WSASendto(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to Sockaddr, overlapped *Overlapped, croutine *byte) (err error) {
+	var rsa unsafe.Pointer
+	var l int32
+	if to != nil {
+		rsa, l, err = to.sockaddr()
+		if err != nil {
+			return err
+		}
+	}
+	return WSASendTo(s, bufs, bufcnt, sent, flags, (*RawSockaddrAny)(unsafe.Pointer(rsa)), l, overlapped, croutine)
+}
+
+func LoadGetAddrInfo() error {
+	return procGetAddrInfoW.Find()
+}
+
+var connectExFunc struct {
+	once sync.Once
+	addr uintptr
+	err  error
+}
+
+func LoadConnectEx() error {
+	connectExFunc.once.Do(func() {
+		var s Handle
+		s, connectExFunc.err = Socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)
+		if connectExFunc.err != nil {
+			return
+		}
+		defer CloseHandle(s)
+		var n uint32
+		connectExFunc.err = WSAIoctl(s,
+			SIO_GET_EXTENSION_FUNCTION_POINTER,
+			(*byte)(unsafe.Pointer(&WSAID_CONNECTEX)),
+			uint32(unsafe.Sizeof(WSAID_CONNECTEX)),
+			(*byte)(unsafe.Pointer(&connectExFunc.addr)),
+			uint32(unsafe.Sizeof(connectExFunc.addr)),
+			&n, nil, 0)
+	})
+	return connectExFunc.err
+}
+
+func connectEx(s Handle, name unsafe.Pointer, namelen int32, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) (err error) {
+	r1, _, e1 := syscall.Syscall9(connectExFunc.addr, 7, uintptr(s), uintptr(name), uintptr(namelen), uintptr(unsafe.Pointer(sendBuf)), uintptr(sendDataLen), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), 0, 0)
+	if r1 == 0 {
+		if e1 != 0 {
+			err = error(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
+func ConnectEx(fd Handle, sa Sockaddr, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) error {
+	err := LoadConnectEx()
+	if err != nil {
+		return errorspkg.New("failed to find ConnectEx: " + err.Error())
+	}
+	ptr, n, err := sa.sockaddr()
+	if err != nil {
+		return err
+	}
+	return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped)
+}
+
+var sendRecvMsgFunc struct {
+	once     sync.Once
+	sendAddr uintptr
+	recvAddr uintptr
+	err      error
+}
+
+func loadWSASendRecvMsg() error {
+	sendRecvMsgFunc.once.Do(func() {
+		var s Handle
+		s, sendRecvMsgFunc.err = Socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)
+		if sendRecvMsgFunc.err != nil {
+			return
+		}
+		defer CloseHandle(s)
+		var n uint32
+		sendRecvMsgFunc.err = WSAIoctl(s,
+			SIO_GET_EXTENSION_FUNCTION_POINTER,
+			(*byte)(unsafe.Pointer(&WSAID_WSARECVMSG)),
+			uint32(unsafe.Sizeof(WSAID_WSARECVMSG)),
+			(*byte)(unsafe.Pointer(&sendRecvMsgFunc.recvAddr)),
+			uint32(unsafe.Sizeof(sendRecvMsgFunc.recvAddr)),
+			&n, nil, 0)
+		if sendRecvMsgFunc.err != nil {
+			return
+		}
+		sendRecvMsgFunc.err = WSAIoctl(s,
+			SIO_GET_EXTENSION_FUNCTION_POINTER,
+			(*byte)(unsafe.Pointer(&WSAID_WSASENDMSG)),
+			uint32(unsafe.Sizeof(WSAID_WSASENDMSG)),
+			(*byte)(unsafe.Pointer(&sendRecvMsgFunc.sendAddr)),
+			uint32(unsafe.Sizeof(sendRecvMsgFunc.sendAddr)),
+			&n, nil, 0)
+	})
+	return sendRecvMsgFunc.err
+}
+
+func WSASendMsg(fd Handle, msg *WSAMsg, flags uint32, bytesSent *uint32, overlapped *Overlapped, croutine *byte) error {
+	err := loadWSASendRecvMsg()
+	if err != nil {
+		return err
+	}
+	r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.sendAddr, 6, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(flags), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return err
+}
+
+func WSARecvMsg(fd Handle, msg *WSAMsg, bytesReceived *uint32, overlapped *Overlapped, croutine *byte) error {
+	err := loadWSASendRecvMsg()
+	if err != nil {
+		return err
+	}
+	r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.recvAddr, 5, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(bytesReceived)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0)
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return err
+}
+
+// Invented structures to support what package os expects.
+type Rusage struct {
+	CreationTime Filetime
+	ExitTime     Filetime
+	KernelTime   Filetime
+	UserTime     Filetime
+}
+
+type WaitStatus struct {
+	ExitCode uint32
+}
+
+func (w WaitStatus) Exited() bool { return true }
+
+func (w WaitStatus) ExitStatus() int { return int(w.ExitCode) }
+
+func (w WaitStatus) Signal() Signal { return -1 }
+
+func (w WaitStatus) CoreDump() bool { return false }
+
+func (w WaitStatus) Stopped() bool { return false }
+
+func (w WaitStatus) Continued() bool { return false }
+
+func (w WaitStatus) StopSignal() Signal { return -1 }
+
+func (w WaitStatus) Signaled() bool { return false }
+
+func (w WaitStatus) TrapCause() int { return -1 }
+
+// Timespec is an invented structure on Windows, but here for
+// consistency with the corresponding package for other operating systems.
+type Timespec struct {
+	Sec  int64
+	Nsec int64
+}
+
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+func NsecToTimespec(nsec int64) (ts Timespec) {
+	ts.Sec = nsec / 1e9
+	ts.Nsec = nsec % 1e9
+	return
+}
+
+// TODO(brainman): fix all needed for net
+
+func Accept(fd Handle) (nfd Handle, sa Sockaddr, err error) { return 0, nil, syscall.EWINDOWS }
+
+func Recvfrom(fd Handle, p []byte, flags int) (n int, from Sockaddr, err error) {
+	var rsa RawSockaddrAny
+	l := int32(unsafe.Sizeof(rsa))
+	n32, err := recvfrom(fd, p, int32(flags), &rsa, &l)
+	n = int(n32)
+	if err != nil {
+		return
+	}
+	from, err = rsa.Sockaddr()
+	return
+}
+
+func Sendto(fd Handle, p []byte, flags int, to Sockaddr) (err error) {
+	ptr, l, err := to.sockaddr()
+	if err != nil {
+		return err
+	}
+	return sendto(fd, p, int32(flags), ptr, l)
+}
+
+func SetsockoptTimeval(fd Handle, level, opt int, tv *Timeval) (err error) { return syscall.EWINDOWS }
+
+// The Linger struct is wrong but we only noticed after Go 1.
+// sysLinger is the real system call structure.
+
+// BUG(brainman): The definition of Linger is not appropriate for direct use
+// with Setsockopt and Getsockopt.
+// Use SetsockoptLinger instead.
+
+type Linger struct {
+	Onoff  int32
+	Linger int32
+}
+
+type sysLinger struct {
+	Onoff  uint16
+	Linger uint16
+}
+
+type IPMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type IPv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Interface uint32
+}
+
+func GetsockoptInt(fd Handle, level, opt int) (int, error) {
+	v := int32(0)
+	l := int32(unsafe.Sizeof(v))
+	err := Getsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), &l)
+	return int(v), err
+}
+
+func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) {
+	sys := sysLinger{Onoff: uint16(l.Onoff), Linger: uint16(l.Linger)}
+	return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&sys)), int32(unsafe.Sizeof(sys)))
+}
+
+func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) {
+	return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4)
+}
+
+func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) {
+	return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq)))
+}
+
+func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) {
+	return syscall.EWINDOWS
+}
+
+func EnumProcesses(processIds []uint32, bytesReturned *uint32) error {
+	// EnumProcesses syscall expects the size parameter to be in bytes, but the code generated with mksyscall uses
+	// the length of the processIds slice instead. Hence, this wrapper function is added to fix the discrepancy.
+	var p *uint32
+	if len(processIds) > 0 {
+		p = &processIds[0]
+	}
+	size := uint32(len(processIds) * 4)
+	return enumProcesses(p, size, bytesReturned)
+}
+
+func Getpid() (pid int) { return int(GetCurrentProcessId()) }
+
+func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) {
+	// NOTE(rsc): The Win32finddata struct is wrong for the system call:
+	// the two paths are each one uint16 short. Use the correct struct,
+	// a win32finddata1, and then copy the results out.
+	// There is no loss of expressivity here, because the final
+	// uint16, if it is used, is supposed to be a NUL, and Go doesn't need that.
+	// For Go 1.1, we might avoid the allocation of win32finddata1 here
+	// by adding a final Bug [2]uint16 field to the struct and then
+	// adjusting the fields in the result directly.
+	var data1 win32finddata1
+	handle, err = findFirstFile1(name, &data1)
+	if err == nil {
+		copyFindData(data, &data1)
+	}
+	return
+}
+
+func FindNextFile(handle Handle, data *Win32finddata) (err error) {
+	var data1 win32finddata1
+	err = findNextFile1(handle, &data1)
+	if err == nil {
+		copyFindData(data, &data1)
+	}
+	return
+}
+
+func getProcessEntry(pid int) (*ProcessEntry32, error) {
+	snapshot, err := CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)
+	if err != nil {
+		return nil, err
+	}
+	defer CloseHandle(snapshot)
+	var procEntry ProcessEntry32
+	procEntry.Size = uint32(unsafe.Sizeof(procEntry))
+	if err = Process32First(snapshot, &procEntry); err != nil {
+		return nil, err
+	}
+	for {
+		if procEntry.ProcessID == uint32(pid) {
+			return &procEntry, nil
+		}
+		err = Process32Next(snapshot, &procEntry)
+		if err != nil {
+			return nil, err
+		}
+	}
+}
+
+func Getppid() (ppid int) {
+	pe, err := getProcessEntry(Getpid())
+	if err != nil {
+		return -1
+	}
+	return int(pe.ParentProcessID)
+}
+
+// TODO(brainman): fix all needed for os
+func Fchdir(fd Handle) (err error)             { return syscall.EWINDOWS }
+func Link(oldpath, newpath string) (err error) { return syscall.EWINDOWS }
+func Symlink(path, link string) (err error)    { return syscall.EWINDOWS }
+
+func Fchmod(fd Handle, mode uint32) (err error)        { return syscall.EWINDOWS }
+func Chown(path string, uid int, gid int) (err error)  { return syscall.EWINDOWS }
+func Lchown(path string, uid int, gid int) (err error) { return syscall.EWINDOWS }
+func Fchown(fd Handle, uid int, gid int) (err error)   { return syscall.EWINDOWS }
+
+func Getuid() (uid int)                  { return -1 }
+func Geteuid() (euid int)                { return -1 }
+func Getgid() (gid int)                  { return -1 }
+func Getegid() (egid int)                { return -1 }
+func Getgroups() (gids []int, err error) { return nil, syscall.EWINDOWS }
+
+type Signal int
+
+func (s Signal) Signal() {}
+
+func (s Signal) String() string {
+	if 0 <= s && int(s) < len(signals) {
+		str := signals[s]
+		if str != "" {
+			return str
+		}
+	}
+	return "signal " + itoa(int(s))
+}
+
+func LoadCreateSymbolicLink() error {
+	return procCreateSymbolicLinkW.Find()
+}
+
+// Readlink returns the destination of the named symbolic link.
+func Readlink(path string, buf []byte) (n int, err error) {
+	fd, err := CreateFile(StringToUTF16Ptr(path), GENERIC_READ, 0, nil, OPEN_EXISTING,
+		FILE_FLAG_OPEN_REPARSE_POINT|FILE_FLAG_BACKUP_SEMANTICS, 0)
+	if err != nil {
+		return -1, err
+	}
+	defer CloseHandle(fd)
+
+	rdbbuf := make([]byte, MAXIMUM_REPARSE_DATA_BUFFER_SIZE)
+	var bytesReturned uint32
+	err = DeviceIoControl(fd, FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil)
+	if err != nil {
+		return -1, err
+	}
+
+	rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0]))
+	var s string
+	switch rdb.ReparseTag {
+	case IO_REPARSE_TAG_SYMLINK:
+		data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
+		p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
+		s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2])
+	case IO_REPARSE_TAG_MOUNT_POINT:
+		data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
+		p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
+		s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2])
+	default:
+		// the path is not a symlink or junction but another type of reparse
+		// point
+		return -1, syscall.ENOENT
+	}
+	n = copy(buf, []byte(s))
+
+	return n, nil
+}
+
+// GUIDFromString parses a string in the form of
+// "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}" into a GUID.
+func GUIDFromString(str string) (GUID, error) {
+	guid := GUID{}
+	str16, err := syscall.UTF16PtrFromString(str)
+	if err != nil {
+		return guid, err
+	}
+	err = clsidFromString(str16, &guid)
+	if err != nil {
+		return guid, err
+	}
+	return guid, nil
+}
+
+// GenerateGUID creates a new random GUID.
+func GenerateGUID() (GUID, error) {
+	guid := GUID{}
+	err := coCreateGuid(&guid)
+	if err != nil {
+		return guid, err
+	}
+	return guid, nil
+}
+
+// String returns the canonical string form of the GUID,
+// in the form of "{XXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}".
+func (guid GUID) String() string {
+	var str [100]uint16
+	chars := stringFromGUID2(&guid, &str[0], int32(len(str)))
+	if chars <= 1 {
+		return ""
+	}
+	return string(utf16.Decode(str[:chars-1]))
+}
+
+// KnownFolderPath returns a well-known folder path for the current user, specified by one of
+// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag.
+func KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) {
+	return Token(0).KnownFolderPath(folderID, flags)
+}
+
+// KnownFolderPath returns a well-known folder path for the user token, specified by one of
+// the FOLDERID_ constants, and chosen and optionally created based on a KF_ flag.
+func (t Token) KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, error) {
+	var p *uint16
+	err := shGetKnownFolderPath(folderID, flags, t, &p)
+	if err != nil {
+		return "", err
+	}
+	defer CoTaskMemFree(unsafe.Pointer(p))
+	return UTF16PtrToString(p), nil
+}
+
+// RtlGetVersion returns the version of the underlying operating system, ignoring
+// manifest semantics but is affected by the application compatibility layer.
+func RtlGetVersion() *OsVersionInfoEx {
+	info := &OsVersionInfoEx{}
+	info.osVersionInfoSize = uint32(unsafe.Sizeof(*info))
+	// According to documentation, this function always succeeds.
+	// The function doesn't even check the validity of the
+	// osVersionInfoSize member. Disassembling ntdll.dll indicates
+	// that the documentation is indeed correct about that.
+	_ = rtlGetVersion(info)
+	return info
+}
+
+// RtlGetNtVersionNumbers returns the version of the underlying operating system,
+// ignoring manifest semantics and the application compatibility layer.
+func RtlGetNtVersionNumbers() (majorVersion, minorVersion, buildNumber uint32) {
+	rtlGetNtVersionNumbers(&majorVersion, &minorVersion, &buildNumber)
+	buildNumber &= 0xffff
+	return
+}
+
+// GetProcessPreferredUILanguages retrieves the process preferred UI languages.
+func GetProcessPreferredUILanguages(flags uint32) ([]string, error) {
+	return getUILanguages(flags, getProcessPreferredUILanguages)
+}
+
+// GetThreadPreferredUILanguages retrieves the thread preferred UI languages for the current thread.
+func GetThreadPreferredUILanguages(flags uint32) ([]string, error) {
+	return getUILanguages(flags, getThreadPreferredUILanguages)
+}
+
+// GetUserPreferredUILanguages retrieves information about the user preferred UI languages.
+func GetUserPreferredUILanguages(flags uint32) ([]string, error) {
+	return getUILanguages(flags, getUserPreferredUILanguages)
+}
+
+// GetSystemPreferredUILanguages retrieves the system preferred UI languages.
+func GetSystemPreferredUILanguages(flags uint32) ([]string, error) {
+	return getUILanguages(flags, getSystemPreferredUILanguages)
+}
+
+func getUILanguages(flags uint32, f func(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) error) ([]string, error) {
+	size := uint32(128)
+	for {
+		var numLanguages uint32
+		buf := make([]uint16, size)
+		err := f(flags, &numLanguages, &buf[0], &size)
+		if err == ERROR_INSUFFICIENT_BUFFER {
+			continue
+		}
+		if err != nil {
+			return nil, err
+		}
+		buf = buf[:size]
+		if numLanguages == 0 || len(buf) == 0 { // GetProcessPreferredUILanguages may return numLanguages==0 with "\0\0"
+			return []string{}, nil
+		}
+		if buf[len(buf)-1] == 0 {
+			buf = buf[:len(buf)-1] // remove terminating null
+		}
+		languages := make([]string, 0, numLanguages)
+		from := 0
+		for i, c := range buf {
+			if c == 0 {
+				languages = append(languages, string(utf16.Decode(buf[from:i])))
+				from = i + 1
+			}
+		}
+		return languages, nil
+	}
+}
+
+func SetConsoleCursorPosition(console Handle, position Coord) error {
+	return setConsoleCursorPosition(console, *((*uint32)(unsafe.Pointer(&position))))
+}
+
+func GetStartupInfo(startupInfo *StartupInfo) error {
+	getStartupInfo(startupInfo)
+	return nil
+}
+
+func (s NTStatus) Errno() syscall.Errno {
+	return rtlNtStatusToDosErrorNoTeb(s)
+}
+
+func langID(pri, sub uint16) uint32 { return uint32(sub)<<10 | uint32(pri) }
+
+func (s NTStatus) Error() string {
+	b := make([]uint16, 300)
+	n, err := FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_FROM_HMODULE|FORMAT_MESSAGE_ARGUMENT_ARRAY, modntdll.Handle(), uint32(s), langID(LANG_ENGLISH, SUBLANG_ENGLISH_US), b, nil)
+	if err != nil {
+		return fmt.Sprintf("NTSTATUS 0x%08x", uint32(s))
+	}
+	// trim terminating \r and \n
+	for ; n > 0 && (b[n-1] == '\n' || b[n-1] == '\r'); n-- {
+	}
+	return string(utf16.Decode(b[:n]))
+}
+
+// NewNTUnicodeString returns a new NTUnicodeString structure for use with native
+// NT APIs that work over the NTUnicodeString type. Note that most Windows APIs
+// do not use NTUnicodeString, and instead UTF16PtrFromString should be used for
+// the more common *uint16 string type.
+func NewNTUnicodeString(s string) (*NTUnicodeString, error) {
+	s16, err := UTF16FromString(s)
+	if err != nil {
+		return nil, err
+	}
+	n := uint16(len(s16) * 2)
+	return &NTUnicodeString{
+		Length:        n - 2, // subtract 2 bytes for the NULL terminator
+		MaximumLength: n,
+		Buffer:        &s16[0],
+	}, nil
+}
+
+// Slice returns a uint16 slice that aliases the data in the NTUnicodeString.
+func (s *NTUnicodeString) Slice() []uint16 {
+	// Note: this rounds the length down, if it happens
+	// to (incorrectly) be odd. Probably safer than rounding up.
+	return unsafe.Slice(s.Buffer, s.MaximumLength/2)[:s.Length/2]
+}
+
+func (s *NTUnicodeString) String() string {
+	return UTF16ToString(s.Slice())
+}
+
+// NewNTString returns a new NTString structure for use with native
+// NT APIs that work over the NTString type. Note that most Windows APIs
+// do not use NTString, and instead UTF16PtrFromString should be used for
+// the more common *uint16 string type.
+func NewNTString(s string) (*NTString, error) {
+	var nts NTString
+	s8, err := BytePtrFromString(s)
+	if err != nil {
+		return nil, err
+	}
+	RtlInitString(&nts, s8)
+	return &nts, nil
+}
+
+// Slice returns a byte slice that aliases the data in the NTString.
+func (s *NTString) Slice() []byte {
+	slice := unsafe.Slice(s.Buffer, s.MaximumLength)
+	return slice[:s.Length]
+}
+
+func (s *NTString) String() string {
+	return ByteSliceToString(s.Slice())
+}
+
+// FindResource resolves a resource of the given name and resource type.
+func FindResource(module Handle, name, resType ResourceIDOrString) (Handle, error) {
+	var namePtr, resTypePtr uintptr
+	var name16, resType16 *uint16
+	var err error
+	resolvePtr := func(i interface{}, keep **uint16) (uintptr, error) {
+		switch v := i.(type) {
+		case string:
+			*keep, err = UTF16PtrFromString(v)
+			if err != nil {
+				return 0, err
+			}
+			return uintptr(unsafe.Pointer(*keep)), nil
+		case ResourceID:
+			return uintptr(v), nil
+		}
+		return 0, errorspkg.New("parameter must be a ResourceID or a string")
+	}
+	namePtr, err = resolvePtr(name, &name16)
+	if err != nil {
+		return 0, err
+	}
+	resTypePtr, err = resolvePtr(resType, &resType16)
+	if err != nil {
+		return 0, err
+	}
+	resInfo, err := findResource(module, namePtr, resTypePtr)
+	runtime.KeepAlive(name16)
+	runtime.KeepAlive(resType16)
+	return resInfo, err
+}
+
+func LoadResourceData(module, resInfo Handle) (data []byte, err error) {
+	size, err := SizeofResource(module, resInfo)
+	if err != nil {
+		return
+	}
+	resData, err := LoadResource(module, resInfo)
+	if err != nil {
+		return
+	}
+	ptr, err := LockResource(resData)
+	if err != nil {
+		return
+	}
+	data = unsafe.Slice((*byte)(unsafe.Pointer(ptr)), size)
+	return
+}
+
+// PSAPI_WORKING_SET_EX_BLOCK contains extended working set information for a page.
+type PSAPI_WORKING_SET_EX_BLOCK uint64
+
+// Valid returns the validity of this page.
+// If this bit is 1, the subsequent members are valid; otherwise they should be ignored.
+func (b PSAPI_WORKING_SET_EX_BLOCK) Valid() bool {
+	return (b & 1) == 1
+}
+
+// ShareCount is the number of processes that share this page. The maximum value of this member is 7.
+func (b PSAPI_WORKING_SET_EX_BLOCK) ShareCount() uint64 {
+	return b.intField(1, 3)
+}
+
+// Win32Protection is the memory protection attributes of the page. For a list of values, see
+// https://docs.microsoft.com/en-us/windows/win32/memory/memory-protection-constants
+func (b PSAPI_WORKING_SET_EX_BLOCK) Win32Protection() uint64 {
+	return b.intField(4, 11)
+}
+
+// Shared returns the shared status of this page.
+// If this bit is 1, the page can be shared.
+func (b PSAPI_WORKING_SET_EX_BLOCK) Shared() bool {
+	return (b & (1 << 15)) == 1
+}
+
+// Node is the NUMA node. The maximum value of this member is 63.
+func (b PSAPI_WORKING_SET_EX_BLOCK) Node() uint64 {
+	return b.intField(16, 6)
+}
+
+// Locked returns the locked status of this page.
+// If this bit is 1, the virtual page is locked in physical memory.
+func (b PSAPI_WORKING_SET_EX_BLOCK) Locked() bool {
+	return (b & (1 << 22)) == 1
+}
+
+// LargePage returns the large page status of this page.
+// If this bit is 1, the page is a large page.
+func (b PSAPI_WORKING_SET_EX_BLOCK) LargePage() bool {
+	return (b & (1 << 23)) == 1
+}
+
+// Bad returns the bad status of this page.
+// If this bit is 1, the page is has been reported as bad.
+func (b PSAPI_WORKING_SET_EX_BLOCK) Bad() bool {
+	return (b & (1 << 31)) == 1
+}
+
+// intField extracts an integer field in the PSAPI_WORKING_SET_EX_BLOCK union.
+func (b PSAPI_WORKING_SET_EX_BLOCK) intField(start, length int) uint64 {
+	var mask PSAPI_WORKING_SET_EX_BLOCK
+	for pos := start; pos < start+length; pos++ {
+		mask |= (1 << pos)
+	}
+
+	masked := b & mask
+	return uint64(masked >> start)
+}
+
+// PSAPI_WORKING_SET_EX_INFORMATION contains extended working set information for a process.
+type PSAPI_WORKING_SET_EX_INFORMATION struct {
+	// The virtual address.
+	VirtualAddress Pointer
+	// A PSAPI_WORKING_SET_EX_BLOCK union that indicates the attributes of the page at VirtualAddress.
+	VirtualAttributes PSAPI_WORKING_SET_EX_BLOCK
+}
+
+// CreatePseudoConsole creates a windows pseudo console.
+func CreatePseudoConsole(size Coord, in Handle, out Handle, flags uint32, pconsole *Handle) error {
+	// We need this wrapper to manually cast Coord to uint32. The autogenerated wrappers only
+	// accept arguments that can be casted to uintptr, and Coord can't.
+	return createPseudoConsole(*((*uint32)(unsafe.Pointer(&size))), in, out, flags, pconsole)
+}
+
+// ResizePseudoConsole resizes the internal buffers of the pseudo console to the width and height specified in `size`.
+func ResizePseudoConsole(pconsole Handle, size Coord) error {
+	// We need this wrapper to manually cast Coord to uint32. The autogenerated wrappers only
+	// accept arguments that can be casted to uintptr, and Coord can't.
+	return resizePseudoConsole(pconsole, *((*uint32)(unsafe.Pointer(&size))))
+}
+
+// DCB constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-dcb.
+const (
+	CBR_110    = 110
+	CBR_300    = 300
+	CBR_600    = 600
+	CBR_1200   = 1200
+	CBR_2400   = 2400
+	CBR_4800   = 4800
+	CBR_9600   = 9600
+	CBR_14400  = 14400
+	CBR_19200  = 19200
+	CBR_38400  = 38400
+	CBR_57600  = 57600
+	CBR_115200 = 115200
+	CBR_128000 = 128000
+	CBR_256000 = 256000
+
+	DTR_CONTROL_DISABLE   = 0x00000000
+	DTR_CONTROL_ENABLE    = 0x00000010
+	DTR_CONTROL_HANDSHAKE = 0x00000020
+
+	RTS_CONTROL_DISABLE   = 0x00000000
+	RTS_CONTROL_ENABLE    = 0x00001000
+	RTS_CONTROL_HANDSHAKE = 0x00002000
+	RTS_CONTROL_TOGGLE    = 0x00003000
+
+	NOPARITY    = 0
+	ODDPARITY   = 1
+	EVENPARITY  = 2
+	MARKPARITY  = 3
+	SPACEPARITY = 4
+
+	ONESTOPBIT   = 0
+	ONE5STOPBITS = 1
+	TWOSTOPBITS  = 2
+)
+
+// EscapeCommFunction constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-escapecommfunction.
+const (
+	SETXOFF  = 1
+	SETXON   = 2
+	SETRTS   = 3
+	CLRRTS   = 4
+	SETDTR   = 5
+	CLRDTR   = 6
+	SETBREAK = 8
+	CLRBREAK = 9
+)
+
+// PurgeComm constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-purgecomm.
+const (
+	PURGE_TXABORT = 0x0001
+	PURGE_RXABORT = 0x0002
+	PURGE_TXCLEAR = 0x0004
+	PURGE_RXCLEAR = 0x0008
+)
+
+// SetCommMask constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setcommmask.
+const (
+	EV_RXCHAR  = 0x0001
+	EV_RXFLAG  = 0x0002
+	EV_TXEMPTY = 0x0004
+	EV_CTS     = 0x0008
+	EV_DSR     = 0x0010
+	EV_RLSD    = 0x0020
+	EV_BREAK   = 0x0040
+	EV_ERR     = 0x0080
+	EV_RING    = 0x0100
+)
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
new file mode 100644
index 0000000..358be3c
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -0,0 +1,3864 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+import (
+	"net"
+	"syscall"
+	"unsafe"
+)
+
+// NTStatus corresponds with NTSTATUS, error values returned by ntdll.dll and
+// other native functions.
+type NTStatus uint32
+
+const (
+	// Invented values to support what package os expects.
+	O_RDONLY   = 0x00000
+	O_WRONLY   = 0x00001
+	O_RDWR     = 0x00002
+	O_CREAT    = 0x00040
+	O_EXCL     = 0x00080
+	O_NOCTTY   = 0x00100
+	O_TRUNC    = 0x00200
+	O_NONBLOCK = 0x00800
+	O_APPEND   = 0x00400
+	O_SYNC     = 0x01000
+	O_ASYNC    = 0x02000
+	O_CLOEXEC  = 0x80000
+)
+
+const (
+	// More invented values for signals
+	SIGHUP  = Signal(0x1)
+	SIGINT  = Signal(0x2)
+	SIGQUIT = Signal(0x3)
+	SIGILL  = Signal(0x4)
+	SIGTRAP = Signal(0x5)
+	SIGABRT = Signal(0x6)
+	SIGBUS  = Signal(0x7)
+	SIGFPE  = Signal(0x8)
+	SIGKILL = Signal(0x9)
+	SIGSEGV = Signal(0xb)
+	SIGPIPE = Signal(0xd)
+	SIGALRM = Signal(0xe)
+	SIGTERM = Signal(0xf)
+)
+
+var signals = [...]string{
+	1:  "hangup",
+	2:  "interrupt",
+	3:  "quit",
+	4:  "illegal instruction",
+	5:  "trace/breakpoint trap",
+	6:  "aborted",
+	7:  "bus error",
+	8:  "floating point exception",
+	9:  "killed",
+	10: "user defined signal 1",
+	11: "segmentation fault",
+	12: "user defined signal 2",
+	13: "broken pipe",
+	14: "alarm clock",
+	15: "terminated",
+}
+
+// File flags for [os.OpenFile]. The O_ prefix is used to indicate
+// that these flags are specific to the OpenFile function.
+const (
+	O_FILE_FLAG_OPEN_NO_RECALL     = FILE_FLAG_OPEN_NO_RECALL
+	O_FILE_FLAG_OPEN_REPARSE_POINT = FILE_FLAG_OPEN_REPARSE_POINT
+	O_FILE_FLAG_SESSION_AWARE      = FILE_FLAG_SESSION_AWARE
+	O_FILE_FLAG_POSIX_SEMANTICS    = FILE_FLAG_POSIX_SEMANTICS
+	O_FILE_FLAG_BACKUP_SEMANTICS   = FILE_FLAG_BACKUP_SEMANTICS
+	O_FILE_FLAG_DELETE_ON_CLOSE    = FILE_FLAG_DELETE_ON_CLOSE
+	O_FILE_FLAG_SEQUENTIAL_SCAN    = FILE_FLAG_SEQUENTIAL_SCAN
+	O_FILE_FLAG_RANDOM_ACCESS      = FILE_FLAG_RANDOM_ACCESS
+	O_FILE_FLAG_NO_BUFFERING       = FILE_FLAG_NO_BUFFERING
+	O_FILE_FLAG_OVERLAPPED         = FILE_FLAG_OVERLAPPED
+	O_FILE_FLAG_WRITE_THROUGH      = FILE_FLAG_WRITE_THROUGH
+)
+
+const (
+	FILE_READ_DATA        = 0x00000001
+	FILE_READ_ATTRIBUTES  = 0x00000080
+	FILE_READ_EA          = 0x00000008
+	FILE_WRITE_DATA       = 0x00000002
+	FILE_WRITE_ATTRIBUTES = 0x00000100
+	FILE_WRITE_EA         = 0x00000010
+	FILE_APPEND_DATA      = 0x00000004
+	FILE_EXECUTE          = 0x00000020
+
+	FILE_GENERIC_READ    = STANDARD_RIGHTS_READ | FILE_READ_DATA | FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE
+	FILE_GENERIC_WRITE   = STANDARD_RIGHTS_WRITE | FILE_WRITE_DATA | FILE_WRITE_ATTRIBUTES | FILE_WRITE_EA | FILE_APPEND_DATA | SYNCHRONIZE
+	FILE_GENERIC_EXECUTE = STANDARD_RIGHTS_EXECUTE | FILE_READ_ATTRIBUTES | FILE_EXECUTE | SYNCHRONIZE
+
+	FILE_LIST_DIRECTORY = 0x00000001
+	FILE_TRAVERSE       = 0x00000020
+
+	FILE_SHARE_READ   = 0x00000001
+	FILE_SHARE_WRITE  = 0x00000002
+	FILE_SHARE_DELETE = 0x00000004
+
+	FILE_ATTRIBUTE_READONLY              = 0x00000001
+	FILE_ATTRIBUTE_HIDDEN                = 0x00000002
+	FILE_ATTRIBUTE_SYSTEM                = 0x00000004
+	FILE_ATTRIBUTE_DIRECTORY             = 0x00000010
+	FILE_ATTRIBUTE_ARCHIVE               = 0x00000020
+	FILE_ATTRIBUTE_DEVICE                = 0x00000040
+	FILE_ATTRIBUTE_NORMAL                = 0x00000080
+	FILE_ATTRIBUTE_TEMPORARY             = 0x00000100
+	FILE_ATTRIBUTE_SPARSE_FILE           = 0x00000200
+	FILE_ATTRIBUTE_REPARSE_POINT         = 0x00000400
+	FILE_ATTRIBUTE_COMPRESSED            = 0x00000800
+	FILE_ATTRIBUTE_OFFLINE               = 0x00001000
+	FILE_ATTRIBUTE_NOT_CONTENT_INDEXED   = 0x00002000
+	FILE_ATTRIBUTE_ENCRYPTED             = 0x00004000
+	FILE_ATTRIBUTE_INTEGRITY_STREAM      = 0x00008000
+	FILE_ATTRIBUTE_VIRTUAL               = 0x00010000
+	FILE_ATTRIBUTE_NO_SCRUB_DATA         = 0x00020000
+	FILE_ATTRIBUTE_RECALL_ON_OPEN        = 0x00040000
+	FILE_ATTRIBUTE_RECALL_ON_DATA_ACCESS = 0x00400000
+
+	INVALID_FILE_ATTRIBUTES = 0xffffffff
+
+	CREATE_NEW        = 1
+	CREATE_ALWAYS     = 2
+	OPEN_EXISTING     = 3
+	OPEN_ALWAYS       = 4
+	TRUNCATE_EXISTING = 5
+
+	FILE_FLAG_OPEN_REQUIRING_OPLOCK = 0x00040000
+	FILE_FLAG_FIRST_PIPE_INSTANCE   = 0x00080000
+	FILE_FLAG_OPEN_NO_RECALL        = 0x00100000
+	FILE_FLAG_OPEN_REPARSE_POINT    = 0x00200000
+	FILE_FLAG_SESSION_AWARE         = 0x00800000
+	FILE_FLAG_POSIX_SEMANTICS       = 0x01000000
+	FILE_FLAG_BACKUP_SEMANTICS      = 0x02000000
+	FILE_FLAG_DELETE_ON_CLOSE       = 0x04000000
+	FILE_FLAG_SEQUENTIAL_SCAN       = 0x08000000
+	FILE_FLAG_RANDOM_ACCESS         = 0x10000000
+	FILE_FLAG_NO_BUFFERING          = 0x20000000
+	FILE_FLAG_OVERLAPPED            = 0x40000000
+	FILE_FLAG_WRITE_THROUGH         = 0x80000000
+
+	HANDLE_FLAG_INHERIT    = 0x00000001
+	STARTF_USESTDHANDLES   = 0x00000100
+	STARTF_USESHOWWINDOW   = 0x00000001
+	DUPLICATE_CLOSE_SOURCE = 0x00000001
+	DUPLICATE_SAME_ACCESS  = 0x00000002
+
+	STD_INPUT_HANDLE  = -10 & (1<<32 - 1)
+	STD_OUTPUT_HANDLE = -11 & (1<<32 - 1)
+	STD_ERROR_HANDLE  = -12 & (1<<32 - 1)
+
+	FILE_BEGIN   = 0
+	FILE_CURRENT = 1
+	FILE_END     = 2
+
+	LANG_ENGLISH       = 0x09
+	SUBLANG_ENGLISH_US = 0x01
+
+	FORMAT_MESSAGE_ALLOCATE_BUFFER = 256
+	FORMAT_MESSAGE_IGNORE_INSERTS  = 512
+	FORMAT_MESSAGE_FROM_STRING     = 1024
+	FORMAT_MESSAGE_FROM_HMODULE    = 2048
+	FORMAT_MESSAGE_FROM_SYSTEM     = 4096
+	FORMAT_MESSAGE_ARGUMENT_ARRAY  = 8192
+	FORMAT_MESSAGE_MAX_WIDTH_MASK  = 255
+
+	MAX_PATH      = 260
+	MAX_LONG_PATH = 32768
+
+	MAX_MODULE_NAME32 = 255
+
+	MAX_COMPUTERNAME_LENGTH = 15
+
+	MAX_DHCPV6_DUID_LENGTH = 130
+
+	MAX_DNS_SUFFIX_STRING_LENGTH = 256
+
+	TIME_ZONE_ID_UNKNOWN  = 0
+	TIME_ZONE_ID_STANDARD = 1
+
+	TIME_ZONE_ID_DAYLIGHT = 2
+	IGNORE                = 0
+	INFINITE              = 0xffffffff
+
+	WAIT_ABANDONED = 0x00000080
+	WAIT_OBJECT_0  = 0x00000000
+	WAIT_FAILED    = 0xFFFFFFFF
+
+	// Access rights for process.
+	PROCESS_ALL_ACCESS                = 0xFFFF
+	PROCESS_CREATE_PROCESS            = 0x0080
+	PROCESS_CREATE_THREAD             = 0x0002
+	PROCESS_DUP_HANDLE                = 0x0040
+	PROCESS_QUERY_INFORMATION         = 0x0400
+	PROCESS_QUERY_LIMITED_INFORMATION = 0x1000
+	PROCESS_SET_INFORMATION           = 0x0200
+	PROCESS_SET_QUOTA                 = 0x0100
+	PROCESS_SUSPEND_RESUME            = 0x0800
+	PROCESS_TERMINATE                 = 0x0001
+	PROCESS_VM_OPERATION              = 0x0008
+	PROCESS_VM_READ                   = 0x0010
+	PROCESS_VM_WRITE                  = 0x0020
+
+	// Access rights for thread.
+	THREAD_DIRECT_IMPERSONATION      = 0x0200
+	THREAD_GET_CONTEXT               = 0x0008
+	THREAD_IMPERSONATE               = 0x0100
+	THREAD_QUERY_INFORMATION         = 0x0040
+	THREAD_QUERY_LIMITED_INFORMATION = 0x0800
+	THREAD_SET_CONTEXT               = 0x0010
+	THREAD_SET_INFORMATION           = 0x0020
+	THREAD_SET_LIMITED_INFORMATION   = 0x0400
+	THREAD_SET_THREAD_TOKEN          = 0x0080
+	THREAD_SUSPEND_RESUME            = 0x0002
+	THREAD_TERMINATE                 = 0x0001
+
+	FILE_MAP_COPY    = 0x01
+	FILE_MAP_WRITE   = 0x02
+	FILE_MAP_READ    = 0x04
+	FILE_MAP_EXECUTE = 0x20
+
+	CTRL_C_EVENT        = 0
+	CTRL_BREAK_EVENT    = 1
+	CTRL_CLOSE_EVENT    = 2
+	CTRL_LOGOFF_EVENT   = 5
+	CTRL_SHUTDOWN_EVENT = 6
+
+	// Windows reserves errors >= 1<<29 for application use.
+	APPLICATION_ERROR = 1 << 29
+)
+
+const (
+	// Process creation flags.
+	CREATE_BREAKAWAY_FROM_JOB        = 0x01000000
+	CREATE_DEFAULT_ERROR_MODE        = 0x04000000
+	CREATE_NEW_CONSOLE               = 0x00000010
+	CREATE_NEW_PROCESS_GROUP         = 0x00000200
+	CREATE_NO_WINDOW                 = 0x08000000
+	CREATE_PROTECTED_PROCESS         = 0x00040000
+	CREATE_PRESERVE_CODE_AUTHZ_LEVEL = 0x02000000
+	CREATE_SEPARATE_WOW_VDM          = 0x00000800
+	CREATE_SHARED_WOW_VDM            = 0x00001000
+	CREATE_SUSPENDED                 = 0x00000004
+	CREATE_UNICODE_ENVIRONMENT       = 0x00000400
+	DEBUG_ONLY_THIS_PROCESS          = 0x00000002
+	DEBUG_PROCESS                    = 0x00000001
+	DETACHED_PROCESS                 = 0x00000008
+	EXTENDED_STARTUPINFO_PRESENT     = 0x00080000
+	INHERIT_PARENT_AFFINITY          = 0x00010000
+)
+
+const (
+	// attributes for ProcThreadAttributeList
+	PROC_THREAD_ATTRIBUTE_PARENT_PROCESS    = 0x00020000
+	PROC_THREAD_ATTRIBUTE_HANDLE_LIST       = 0x00020002
+	PROC_THREAD_ATTRIBUTE_GROUP_AFFINITY    = 0x00030003
+	PROC_THREAD_ATTRIBUTE_PREFERRED_NODE    = 0x00020004
+	PROC_THREAD_ATTRIBUTE_IDEAL_PROCESSOR   = 0x00030005
+	PROC_THREAD_ATTRIBUTE_MITIGATION_POLICY = 0x00020007
+	PROC_THREAD_ATTRIBUTE_UMS_THREAD        = 0x00030006
+	PROC_THREAD_ATTRIBUTE_PROTECTION_LEVEL  = 0x0002000b
+	PROC_THREAD_ATTRIBUTE_PSEUDOCONSOLE     = 0x00020016
+)
+
+const (
+	// flags for CreateToolhelp32Snapshot
+	TH32CS_SNAPHEAPLIST = 0x01
+	TH32CS_SNAPPROCESS  = 0x02
+	TH32CS_SNAPTHREAD   = 0x04
+	TH32CS_SNAPMODULE   = 0x08
+	TH32CS_SNAPMODULE32 = 0x10
+	TH32CS_SNAPALL      = TH32CS_SNAPHEAPLIST | TH32CS_SNAPMODULE | TH32CS_SNAPPROCESS | TH32CS_SNAPTHREAD
+	TH32CS_INHERIT      = 0x80000000
+)
+
+const (
+	// flags for EnumProcessModulesEx
+	LIST_MODULES_32BIT   = 0x01
+	LIST_MODULES_64BIT   = 0x02
+	LIST_MODULES_ALL     = 0x03
+	LIST_MODULES_DEFAULT = 0x00
+)
+
+const (
+	// filters for ReadDirectoryChangesW and FindFirstChangeNotificationW
+	FILE_NOTIFY_CHANGE_FILE_NAME   = 0x001
+	FILE_NOTIFY_CHANGE_DIR_NAME    = 0x002
+	FILE_NOTIFY_CHANGE_ATTRIBUTES  = 0x004
+	FILE_NOTIFY_CHANGE_SIZE        = 0x008
+	FILE_NOTIFY_CHANGE_LAST_WRITE  = 0x010
+	FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020
+	FILE_NOTIFY_CHANGE_CREATION    = 0x040
+	FILE_NOTIFY_CHANGE_SECURITY    = 0x100
+)
+
+const (
+	// do not reorder
+	FILE_ACTION_ADDED = iota + 1
+	FILE_ACTION_REMOVED
+	FILE_ACTION_MODIFIED
+	FILE_ACTION_RENAMED_OLD_NAME
+	FILE_ACTION_RENAMED_NEW_NAME
+)
+
+const (
+	// wincrypt.h
+	/* certenrolld_begin -- PROV_RSA_*/
+	PROV_RSA_FULL      = 1
+	PROV_RSA_SIG       = 2
+	PROV_DSS           = 3
+	PROV_FORTEZZA      = 4
+	PROV_MS_EXCHANGE   = 5
+	PROV_SSL           = 6
+	PROV_RSA_SCHANNEL  = 12
+	PROV_DSS_DH        = 13
+	PROV_EC_ECDSA_SIG  = 14
+	PROV_EC_ECNRA_SIG  = 15
+	PROV_EC_ECDSA_FULL = 16
+	PROV_EC_ECNRA_FULL = 17
+	PROV_DH_SCHANNEL   = 18
+	PROV_SPYRUS_LYNKS  = 20
+	PROV_RNG           = 21
+	PROV_INTEL_SEC     = 22
+	PROV_REPLACE_OWF   = 23
+	PROV_RSA_AES       = 24
+
+	/* dwFlags definitions for CryptAcquireContext */
+	CRYPT_VERIFYCONTEXT              = 0xF0000000
+	CRYPT_NEWKEYSET                  = 0x00000008
+	CRYPT_DELETEKEYSET               = 0x00000010
+	CRYPT_MACHINE_KEYSET             = 0x00000020
+	CRYPT_SILENT                     = 0x00000040
+	CRYPT_DEFAULT_CONTAINER_OPTIONAL = 0x00000080
+
+	/* Flags for PFXImportCertStore */
+	CRYPT_EXPORTABLE                   = 0x00000001
+	CRYPT_USER_PROTECTED               = 0x00000002
+	CRYPT_USER_KEYSET                  = 0x00001000
+	PKCS12_PREFER_CNG_KSP              = 0x00000100
+	PKCS12_ALWAYS_CNG_KSP              = 0x00000200
+	PKCS12_ALLOW_OVERWRITE_KEY         = 0x00004000
+	PKCS12_NO_PERSIST_KEY              = 0x00008000
+	PKCS12_INCLUDE_EXTENDED_PROPERTIES = 0x00000010
+
+	/* Flags for CryptAcquireCertificatePrivateKey */
+	CRYPT_ACQUIRE_CACHE_FLAG             = 0x00000001
+	CRYPT_ACQUIRE_USE_PROV_INFO_FLAG     = 0x00000002
+	CRYPT_ACQUIRE_COMPARE_KEY_FLAG       = 0x00000004
+	CRYPT_ACQUIRE_NO_HEALING             = 0x00000008
+	CRYPT_ACQUIRE_SILENT_FLAG            = 0x00000040
+	CRYPT_ACQUIRE_WINDOW_HANDLE_FLAG     = 0x00000080
+	CRYPT_ACQUIRE_NCRYPT_KEY_FLAGS_MASK  = 0x00070000
+	CRYPT_ACQUIRE_ALLOW_NCRYPT_KEY_FLAG  = 0x00010000
+	CRYPT_ACQUIRE_PREFER_NCRYPT_KEY_FLAG = 0x00020000
+	CRYPT_ACQUIRE_ONLY_NCRYPT_KEY_FLAG   = 0x00040000
+
+	/* pdwKeySpec for CryptAcquireCertificatePrivateKey */
+	AT_KEYEXCHANGE       = 1
+	AT_SIGNATURE         = 2
+	CERT_NCRYPT_KEY_SPEC = 0xFFFFFFFF
+
+	/* Default usage match type is AND with value zero */
+	USAGE_MATCH_TYPE_AND = 0
+	USAGE_MATCH_TYPE_OR  = 1
+
+	/* msgAndCertEncodingType values for CertOpenStore function */
+	X509_ASN_ENCODING   = 0x00000001
+	PKCS_7_ASN_ENCODING = 0x00010000
+
+	/* storeProvider values for CertOpenStore function */
+	CERT_STORE_PROV_MSG               = 1
+	CERT_STORE_PROV_MEMORY            = 2
+	CERT_STORE_PROV_FILE              = 3
+	CERT_STORE_PROV_REG               = 4
+	CERT_STORE_PROV_PKCS7             = 5
+	CERT_STORE_PROV_SERIALIZED        = 6
+	CERT_STORE_PROV_FILENAME_A        = 7
+	CERT_STORE_PROV_FILENAME_W        = 8
+	CERT_STORE_PROV_FILENAME          = CERT_STORE_PROV_FILENAME_W
+	CERT_STORE_PROV_SYSTEM_A          = 9
+	CERT_STORE_PROV_SYSTEM_W          = 10
+	CERT_STORE_PROV_SYSTEM            = CERT_STORE_PROV_SYSTEM_W
+	CERT_STORE_PROV_COLLECTION        = 11
+	CERT_STORE_PROV_SYSTEM_REGISTRY_A = 12
+	CERT_STORE_PROV_SYSTEM_REGISTRY_W = 13
+	CERT_STORE_PROV_SYSTEM_REGISTRY   = CERT_STORE_PROV_SYSTEM_REGISTRY_W
+	CERT_STORE_PROV_PHYSICAL_W        = 14
+	CERT_STORE_PROV_PHYSICAL          = CERT_STORE_PROV_PHYSICAL_W
+	CERT_STORE_PROV_SMART_CARD_W      = 15
+	CERT_STORE_PROV_SMART_CARD        = CERT_STORE_PROV_SMART_CARD_W
+	CERT_STORE_PROV_LDAP_W            = 16
+	CERT_STORE_PROV_LDAP              = CERT_STORE_PROV_LDAP_W
+	CERT_STORE_PROV_PKCS12            = 17
+
+	/* store characteristics (low WORD of flag) for CertOpenStore function */
+	CERT_STORE_NO_CRYPT_RELEASE_FLAG            = 0x00000001
+	CERT_STORE_SET_LOCALIZED_NAME_FLAG          = 0x00000002
+	CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG = 0x00000004
+	CERT_STORE_DELETE_FLAG                      = 0x00000010
+	CERT_STORE_UNSAFE_PHYSICAL_FLAG             = 0x00000020
+	CERT_STORE_SHARE_STORE_FLAG                 = 0x00000040
+	CERT_STORE_SHARE_CONTEXT_FLAG               = 0x00000080
+	CERT_STORE_MANIFOLD_FLAG                    = 0x00000100
+	CERT_STORE_ENUM_ARCHIVED_FLAG               = 0x00000200
+	CERT_STORE_UPDATE_KEYID_FLAG                = 0x00000400
+	CERT_STORE_BACKUP_RESTORE_FLAG              = 0x00000800
+	CERT_STORE_MAXIMUM_ALLOWED_FLAG             = 0x00001000
+	CERT_STORE_CREATE_NEW_FLAG                  = 0x00002000
+	CERT_STORE_OPEN_EXISTING_FLAG               = 0x00004000
+	CERT_STORE_READONLY_FLAG                    = 0x00008000
+
+	/* store locations (high WORD of flag) for CertOpenStore function */
+	CERT_SYSTEM_STORE_CURRENT_USER               = 0x00010000
+	CERT_SYSTEM_STORE_LOCAL_MACHINE              = 0x00020000
+	CERT_SYSTEM_STORE_CURRENT_SERVICE            = 0x00040000
+	CERT_SYSTEM_STORE_SERVICES                   = 0x00050000
+	CERT_SYSTEM_STORE_USERS                      = 0x00060000
+	CERT_SYSTEM_STORE_CURRENT_USER_GROUP_POLICY  = 0x00070000
+	CERT_SYSTEM_STORE_LOCAL_MACHINE_GROUP_POLICY = 0x00080000
+	CERT_SYSTEM_STORE_LOCAL_MACHINE_ENTERPRISE   = 0x00090000
+	CERT_SYSTEM_STORE_UNPROTECTED_FLAG           = 0x40000000
+	CERT_SYSTEM_STORE_RELOCATE_FLAG              = 0x80000000
+
+	/* Miscellaneous high-WORD flags for CertOpenStore function */
+	CERT_REGISTRY_STORE_REMOTE_FLAG      = 0x00010000
+	CERT_REGISTRY_STORE_SERIALIZED_FLAG  = 0x00020000
+	CERT_REGISTRY_STORE_ROAMING_FLAG     = 0x00040000
+	CERT_REGISTRY_STORE_MY_IE_DIRTY_FLAG = 0x00080000
+	CERT_REGISTRY_STORE_LM_GPT_FLAG      = 0x01000000
+	CERT_REGISTRY_STORE_CLIENT_GPT_FLAG  = 0x80000000
+	CERT_FILE_STORE_COMMIT_ENABLE_FLAG   = 0x00010000
+	CERT_LDAP_STORE_SIGN_FLAG            = 0x00010000
+	CERT_LDAP_STORE_AREC_EXCLUSIVE_FLAG  = 0x00020000
+	CERT_LDAP_STORE_OPENED_FLAG          = 0x00040000
+	CERT_LDAP_STORE_UNBIND_FLAG          = 0x00080000
+
+	/* addDisposition values for CertAddCertificateContextToStore function */
+	CERT_STORE_ADD_NEW                                 = 1
+	CERT_STORE_ADD_USE_EXISTING                        = 2
+	CERT_STORE_ADD_REPLACE_EXISTING                    = 3
+	CERT_STORE_ADD_ALWAYS                              = 4
+	CERT_STORE_ADD_REPLACE_EXISTING_INHERIT_PROPERTIES = 5
+	CERT_STORE_ADD_NEWER                               = 6
+	CERT_STORE_ADD_NEWER_INHERIT_PROPERTIES            = 7
+
+	/* ErrorStatus values for CertTrustStatus struct */
+	CERT_TRUST_NO_ERROR                          = 0x00000000
+	CERT_TRUST_IS_NOT_TIME_VALID                 = 0x00000001
+	CERT_TRUST_IS_REVOKED                        = 0x00000004
+	CERT_TRUST_IS_NOT_SIGNATURE_VALID            = 0x00000008
+	CERT_TRUST_IS_NOT_VALID_FOR_USAGE            = 0x00000010
+	CERT_TRUST_IS_UNTRUSTED_ROOT                 = 0x00000020
+	CERT_TRUST_REVOCATION_STATUS_UNKNOWN         = 0x00000040
+	CERT_TRUST_IS_CYCLIC                         = 0x00000080
+	CERT_TRUST_INVALID_EXTENSION                 = 0x00000100
+	CERT_TRUST_INVALID_POLICY_CONSTRAINTS        = 0x00000200
+	CERT_TRUST_INVALID_BASIC_CONSTRAINTS         = 0x00000400
+	CERT_TRUST_INVALID_NAME_CONSTRAINTS          = 0x00000800
+	CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT = 0x00001000
+	CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT   = 0x00002000
+	CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT = 0x00004000
+	CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT      = 0x00008000
+	CERT_TRUST_IS_PARTIAL_CHAIN                  = 0x00010000
+	CERT_TRUST_CTL_IS_NOT_TIME_VALID             = 0x00020000
+	CERT_TRUST_CTL_IS_NOT_SIGNATURE_VALID        = 0x00040000
+	CERT_TRUST_CTL_IS_NOT_VALID_FOR_USAGE        = 0x00080000
+	CERT_TRUST_HAS_WEAK_SIGNATURE                = 0x00100000
+	CERT_TRUST_IS_OFFLINE_REVOCATION             = 0x01000000
+	CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY          = 0x02000000
+	CERT_TRUST_IS_EXPLICIT_DISTRUST              = 0x04000000
+	CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT    = 0x08000000
+
+	/* InfoStatus values for CertTrustStatus struct */
+	CERT_TRUST_HAS_EXACT_MATCH_ISSUER        = 0x00000001
+	CERT_TRUST_HAS_KEY_MATCH_ISSUER          = 0x00000002
+	CERT_TRUST_HAS_NAME_MATCH_ISSUER         = 0x00000004
+	CERT_TRUST_IS_SELF_SIGNED                = 0x00000008
+	CERT_TRUST_HAS_PREFERRED_ISSUER          = 0x00000100
+	CERT_TRUST_HAS_ISSUANCE_CHAIN_POLICY     = 0x00000400
+	CERT_TRUST_HAS_VALID_NAME_CONSTRAINTS    = 0x00000400
+	CERT_TRUST_IS_PEER_TRUSTED               = 0x00000800
+	CERT_TRUST_HAS_CRL_VALIDITY_EXTENDED     = 0x00001000
+	CERT_TRUST_IS_FROM_EXCLUSIVE_TRUST_STORE = 0x00002000
+	CERT_TRUST_IS_CA_TRUSTED                 = 0x00004000
+	CERT_TRUST_IS_COMPLEX_CHAIN              = 0x00010000
+
+	/* Certificate Information Flags */
+	CERT_INFO_VERSION_FLAG                 = 1
+	CERT_INFO_SERIAL_NUMBER_FLAG           = 2
+	CERT_INFO_SIGNATURE_ALGORITHM_FLAG     = 3
+	CERT_INFO_ISSUER_FLAG                  = 4
+	CERT_INFO_NOT_BEFORE_FLAG              = 5
+	CERT_INFO_NOT_AFTER_FLAG               = 6
+	CERT_INFO_SUBJECT_FLAG                 = 7
+	CERT_INFO_SUBJECT_PUBLIC_KEY_INFO_FLAG = 8
+	CERT_INFO_ISSUER_UNIQUE_ID_FLAG        = 9
+	CERT_INFO_SUBJECT_UNIQUE_ID_FLAG       = 10
+	CERT_INFO_EXTENSION_FLAG               = 11
+
+	/* dwFindType for CertFindCertificateInStore  */
+	CERT_COMPARE_MASK                     = 0xFFFF
+	CERT_COMPARE_SHIFT                    = 16
+	CERT_COMPARE_ANY                      = 0
+	CERT_COMPARE_SHA1_HASH                = 1
+	CERT_COMPARE_NAME                     = 2
+	CERT_COMPARE_ATTR                     = 3
+	CERT_COMPARE_MD5_HASH                 = 4
+	CERT_COMPARE_PROPERTY                 = 5
+	CERT_COMPARE_PUBLIC_KEY               = 6
+	CERT_COMPARE_HASH                     = CERT_COMPARE_SHA1_HASH
+	CERT_COMPARE_NAME_STR_A               = 7
+	CERT_COMPARE_NAME_STR_W               = 8
+	CERT_COMPARE_KEY_SPEC                 = 9
+	CERT_COMPARE_ENHKEY_USAGE             = 10
+	CERT_COMPARE_CTL_USAGE                = CERT_COMPARE_ENHKEY_USAGE
+	CERT_COMPARE_SUBJECT_CERT             = 11
+	CERT_COMPARE_ISSUER_OF                = 12
+	CERT_COMPARE_EXISTING                 = 13
+	CERT_COMPARE_SIGNATURE_HASH           = 14
+	CERT_COMPARE_KEY_IDENTIFIER           = 15
+	CERT_COMPARE_CERT_ID                  = 16
+	CERT_COMPARE_CROSS_CERT_DIST_POINTS   = 17
+	CERT_COMPARE_PUBKEY_MD5_HASH          = 18
+	CERT_COMPARE_SUBJECT_INFO_ACCESS      = 19
+	CERT_COMPARE_HASH_STR                 = 20
+	CERT_COMPARE_HAS_PRIVATE_KEY          = 21
+	CERT_FIND_ANY                         = (CERT_COMPARE_ANY << CERT_COMPARE_SHIFT)
+	CERT_FIND_SHA1_HASH                   = (CERT_COMPARE_SHA1_HASH << CERT_COMPARE_SHIFT)
+	CERT_FIND_MD5_HASH                    = (CERT_COMPARE_MD5_HASH << CERT_COMPARE_SHIFT)
+	CERT_FIND_SIGNATURE_HASH              = (CERT_COMPARE_SIGNATURE_HASH << CERT_COMPARE_SHIFT)
+	CERT_FIND_KEY_IDENTIFIER              = (CERT_COMPARE_KEY_IDENTIFIER << CERT_COMPARE_SHIFT)
+	CERT_FIND_HASH                        = CERT_FIND_SHA1_HASH
+	CERT_FIND_PROPERTY                    = (CERT_COMPARE_PROPERTY << CERT_COMPARE_SHIFT)
+	CERT_FIND_PUBLIC_KEY                  = (CERT_COMPARE_PUBLIC_KEY << CERT_COMPARE_SHIFT)
+	CERT_FIND_SUBJECT_NAME                = (CERT_COMPARE_NAME<<CERT_COMPARE_SHIFT | CERT_INFO_SUBJECT_FLAG)
+	CERT_FIND_SUBJECT_ATTR                = (CERT_COMPARE_ATTR<<CERT_COMPARE_SHIFT | CERT_INFO_SUBJECT_FLAG)
+	CERT_FIND_ISSUER_NAME                 = (CERT_COMPARE_NAME<<CERT_COMPARE_SHIFT | CERT_INFO_ISSUER_FLAG)
+	CERT_FIND_ISSUER_ATTR                 = (CERT_COMPARE_ATTR<<CERT_COMPARE_SHIFT | CERT_INFO_ISSUER_FLAG)
+	CERT_FIND_SUBJECT_STR_A               = (CERT_COMPARE_NAME_STR_A<<CERT_COMPARE_SHIFT | CERT_INFO_SUBJECT_FLAG)
+	CERT_FIND_SUBJECT_STR_W               = (CERT_COMPARE_NAME_STR_W<<CERT_COMPARE_SHIFT | CERT_INFO_SUBJECT_FLAG)
+	CERT_FIND_SUBJECT_STR                 = CERT_FIND_SUBJECT_STR_W
+	CERT_FIND_ISSUER_STR_A                = (CERT_COMPARE_NAME_STR_A<<CERT_COMPARE_SHIFT | CERT_INFO_ISSUER_FLAG)
+	CERT_FIND_ISSUER_STR_W                = (CERT_COMPARE_NAME_STR_W<<CERT_COMPARE_SHIFT | CERT_INFO_ISSUER_FLAG)
+	CERT_FIND_ISSUER_STR                  = CERT_FIND_ISSUER_STR_W
+	CERT_FIND_KEY_SPEC                    = (CERT_COMPARE_KEY_SPEC << CERT_COMPARE_SHIFT)
+	CERT_FIND_ENHKEY_USAGE                = (CERT_COMPARE_ENHKEY_USAGE << CERT_COMPARE_SHIFT)
+	CERT_FIND_CTL_USAGE                   = CERT_FIND_ENHKEY_USAGE
+	CERT_FIND_SUBJECT_CERT                = (CERT_COMPARE_SUBJECT_CERT << CERT_COMPARE_SHIFT)
+	CERT_FIND_ISSUER_OF                   = (CERT_COMPARE_ISSUER_OF << CERT_COMPARE_SHIFT)
+	CERT_FIND_EXISTING                    = (CERT_COMPARE_EXISTING << CERT_COMPARE_SHIFT)
+	CERT_FIND_CERT_ID                     = (CERT_COMPARE_CERT_ID << CERT_COMPARE_SHIFT)
+	CERT_FIND_CROSS_CERT_DIST_POINTS      = (CERT_COMPARE_CROSS_CERT_DIST_POINTS << CERT_COMPARE_SHIFT)
+	CERT_FIND_PUBKEY_MD5_HASH             = (CERT_COMPARE_PUBKEY_MD5_HASH << CERT_COMPARE_SHIFT)
+	CERT_FIND_SUBJECT_INFO_ACCESS         = (CERT_COMPARE_SUBJECT_INFO_ACCESS << CERT_COMPARE_SHIFT)
+	CERT_FIND_HASH_STR                    = (CERT_COMPARE_HASH_STR << CERT_COMPARE_SHIFT)
+	CERT_FIND_HAS_PRIVATE_KEY             = (CERT_COMPARE_HAS_PRIVATE_KEY << CERT_COMPARE_SHIFT)
+	CERT_FIND_OPTIONAL_ENHKEY_USAGE_FLAG  = 0x1
+	CERT_FIND_EXT_ONLY_ENHKEY_USAGE_FLAG  = 0x2
+	CERT_FIND_PROP_ONLY_ENHKEY_USAGE_FLAG = 0x4
+	CERT_FIND_NO_ENHKEY_USAGE_FLAG        = 0x8
+	CERT_FIND_OR_ENHKEY_USAGE_FLAG        = 0x10
+	CERT_FIND_VALID_ENHKEY_USAGE_FLAG     = 0x20
+	CERT_FIND_OPTIONAL_CTL_USAGE_FLAG     = CERT_FIND_OPTIONAL_ENHKEY_USAGE_FLAG
+	CERT_FIND_EXT_ONLY_CTL_USAGE_FLAG     = CERT_FIND_EXT_ONLY_ENHKEY_USAGE_FLAG
+	CERT_FIND_PROP_ONLY_CTL_USAGE_FLAG    = CERT_FIND_PROP_ONLY_ENHKEY_USAGE_FLAG
+	CERT_FIND_NO_CTL_USAGE_FLAG           = CERT_FIND_NO_ENHKEY_USAGE_FLAG
+	CERT_FIND_OR_CTL_USAGE_FLAG           = CERT_FIND_OR_ENHKEY_USAGE_FLAG
+	CERT_FIND_VALID_CTL_USAGE_FLAG        = CERT_FIND_VALID_ENHKEY_USAGE_FLAG
+
+	/* policyOID values for CertVerifyCertificateChainPolicy function */
+	CERT_CHAIN_POLICY_BASE              = 1
+	CERT_CHAIN_POLICY_AUTHENTICODE      = 2
+	CERT_CHAIN_POLICY_AUTHENTICODE_TS   = 3
+	CERT_CHAIN_POLICY_SSL               = 4
+	CERT_CHAIN_POLICY_BASIC_CONSTRAINTS = 5
+	CERT_CHAIN_POLICY_NT_AUTH           = 6
+	CERT_CHAIN_POLICY_MICROSOFT_ROOT    = 7
+	CERT_CHAIN_POLICY_EV                = 8
+	CERT_CHAIN_POLICY_SSL_F12           = 9
+
+	/* flag for dwFindType CertFindChainInStore  */
+	CERT_CHAIN_FIND_BY_ISSUER = 1
+
+	/* dwFindFlags for CertFindChainInStore when dwFindType == CERT_CHAIN_FIND_BY_ISSUER */
+	CERT_CHAIN_FIND_BY_ISSUER_COMPARE_KEY_FLAG    = 0x0001
+	CERT_CHAIN_FIND_BY_ISSUER_COMPLEX_CHAIN_FLAG  = 0x0002
+	CERT_CHAIN_FIND_BY_ISSUER_CACHE_ONLY_URL_FLAG = 0x0004
+	CERT_CHAIN_FIND_BY_ISSUER_LOCAL_MACHINE_FLAG  = 0x0008
+	CERT_CHAIN_FIND_BY_ISSUER_NO_KEY_FLAG         = 0x4000
+	CERT_CHAIN_FIND_BY_ISSUER_CACHE_ONLY_FLAG     = 0x8000
+
+	/* Certificate Store close flags */
+	CERT_CLOSE_STORE_FORCE_FLAG = 0x00000001
+	CERT_CLOSE_STORE_CHECK_FLAG = 0x00000002
+
+	/* CryptQueryObject object type */
+	CERT_QUERY_OBJECT_FILE = 1
+	CERT_QUERY_OBJECT_BLOB = 2
+
+	/* CryptQueryObject content type flags */
+	CERT_QUERY_CONTENT_CERT                    = 1
+	CERT_QUERY_CONTENT_CTL                     = 2
+	CERT_QUERY_CONTENT_CRL                     = 3
+	CERT_QUERY_CONTENT_SERIALIZED_STORE        = 4
+	CERT_QUERY_CONTENT_SERIALIZED_CERT         = 5
+	CERT_QUERY_CONTENT_SERIALIZED_CTL          = 6
+	CERT_QUERY_CONTENT_SERIALIZED_CRL          = 7
+	CERT_QUERY_CONTENT_PKCS7_SIGNED            = 8
+	CERT_QUERY_CONTENT_PKCS7_UNSIGNED          = 9
+	CERT_QUERY_CONTENT_PKCS7_SIGNED_EMBED      = 10
+	CERT_QUERY_CONTENT_PKCS10                  = 11
+	CERT_QUERY_CONTENT_PFX                     = 12
+	CERT_QUERY_CONTENT_CERT_PAIR               = 13
+	CERT_QUERY_CONTENT_PFX_AND_LOAD            = 14
+	CERT_QUERY_CONTENT_FLAG_CERT               = (1 << CERT_QUERY_CONTENT_CERT)
+	CERT_QUERY_CONTENT_FLAG_CTL                = (1 << CERT_QUERY_CONTENT_CTL)
+	CERT_QUERY_CONTENT_FLAG_CRL                = (1 << CERT_QUERY_CONTENT_CRL)
+	CERT_QUERY_CONTENT_FLAG_SERIALIZED_STORE   = (1 << CERT_QUERY_CONTENT_SERIALIZED_STORE)
+	CERT_QUERY_CONTENT_FLAG_SERIALIZED_CERT    = (1 << CERT_QUERY_CONTENT_SERIALIZED_CERT)
+	CERT_QUERY_CONTENT_FLAG_SERIALIZED_CTL     = (1 << CERT_QUERY_CONTENT_SERIALIZED_CTL)
+	CERT_QUERY_CONTENT_FLAG_SERIALIZED_CRL     = (1 << CERT_QUERY_CONTENT_SERIALIZED_CRL)
+	CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED       = (1 << CERT_QUERY_CONTENT_PKCS7_SIGNED)
+	CERT_QUERY_CONTENT_FLAG_PKCS7_UNSIGNED     = (1 << CERT_QUERY_CONTENT_PKCS7_UNSIGNED)
+	CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED_EMBED = (1 << CERT_QUERY_CONTENT_PKCS7_SIGNED_EMBED)
+	CERT_QUERY_CONTENT_FLAG_PKCS10             = (1 << CERT_QUERY_CONTENT_PKCS10)
+	CERT_QUERY_CONTENT_FLAG_PFX                = (1 << CERT_QUERY_CONTENT_PFX)
+	CERT_QUERY_CONTENT_FLAG_CERT_PAIR          = (1 << CERT_QUERY_CONTENT_CERT_PAIR)
+	CERT_QUERY_CONTENT_FLAG_PFX_AND_LOAD       = (1 << CERT_QUERY_CONTENT_PFX_AND_LOAD)
+	CERT_QUERY_CONTENT_FLAG_ALL                = (CERT_QUERY_CONTENT_FLAG_CERT | CERT_QUERY_CONTENT_FLAG_CTL | CERT_QUERY_CONTENT_FLAG_CRL | CERT_QUERY_CONTENT_FLAG_SERIALIZED_STORE | CERT_QUERY_CONTENT_FLAG_SERIALIZED_CERT | CERT_QUERY_CONTENT_FLAG_SERIALIZED_CTL | CERT_QUERY_CONTENT_FLAG_SERIALIZED_CRL | CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED | CERT_QUERY_CONTENT_FLAG_PKCS7_UNSIGNED | CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED_EMBED | CERT_QUERY_CONTENT_FLAG_PKCS10 | CERT_QUERY_CONTENT_FLAG_PFX | CERT_QUERY_CONTENT_FLAG_CERT_PAIR)
+	CERT_QUERY_CONTENT_FLAG_ALL_ISSUER_CERT    = (CERT_QUERY_CONTENT_FLAG_CERT | CERT_QUERY_CONTENT_FLAG_SERIALIZED_STORE | CERT_QUERY_CONTENT_FLAG_SERIALIZED_CERT | CERT_QUERY_CONTENT_FLAG_PKCS7_SIGNED | CERT_QUERY_CONTENT_FLAG_PKCS7_UNSIGNED)
+
+	/* CryptQueryObject format type flags */
+	CERT_QUERY_FORMAT_BINARY                     = 1
+	CERT_QUERY_FORMAT_BASE64_ENCODED             = 2
+	CERT_QUERY_FORMAT_ASN_ASCII_HEX_ENCODED      = 3
+	CERT_QUERY_FORMAT_FLAG_BINARY                = (1 << CERT_QUERY_FORMAT_BINARY)
+	CERT_QUERY_FORMAT_FLAG_BASE64_ENCODED        = (1 << CERT_QUERY_FORMAT_BASE64_ENCODED)
+	CERT_QUERY_FORMAT_FLAG_ASN_ASCII_HEX_ENCODED = (1 << CERT_QUERY_FORMAT_ASN_ASCII_HEX_ENCODED)
+	CERT_QUERY_FORMAT_FLAG_ALL                   = (CERT_QUERY_FORMAT_FLAG_BINARY | CERT_QUERY_FORMAT_FLAG_BASE64_ENCODED | CERT_QUERY_FORMAT_FLAG_ASN_ASCII_HEX_ENCODED)
+
+	/* CertGetNameString name types */
+	CERT_NAME_EMAIL_TYPE            = 1
+	CERT_NAME_RDN_TYPE              = 2
+	CERT_NAME_ATTR_TYPE             = 3
+	CERT_NAME_SIMPLE_DISPLAY_TYPE   = 4
+	CERT_NAME_FRIENDLY_DISPLAY_TYPE = 5
+	CERT_NAME_DNS_TYPE              = 6
+	CERT_NAME_URL_TYPE              = 7
+	CERT_NAME_UPN_TYPE              = 8
+
+	/* CertGetNameString flags */
+	CERT_NAME_ISSUER_FLAG              = 0x1
+	CERT_NAME_DISABLE_IE4_UTF8_FLAG    = 0x10000
+	CERT_NAME_SEARCH_ALL_NAMES_FLAG    = 0x2
+	CERT_NAME_STR_ENABLE_PUNYCODE_FLAG = 0x00200000
+
+	/* AuthType values for SSLExtraCertChainPolicyPara struct */
+	AUTHTYPE_CLIENT = 1
+	AUTHTYPE_SERVER = 2
+
+	/* Checks values for SSLExtraCertChainPolicyPara struct */
+	SECURITY_FLAG_IGNORE_REVOCATION        = 0x00000080
+	SECURITY_FLAG_IGNORE_UNKNOWN_CA        = 0x00000100
+	SECURITY_FLAG_IGNORE_WRONG_USAGE       = 0x00000200
+	SECURITY_FLAG_IGNORE_CERT_CN_INVALID   = 0x00001000
+	SECURITY_FLAG_IGNORE_CERT_DATE_INVALID = 0x00002000
+
+	/* Flags for Crypt[Un]ProtectData */
+	CRYPTPROTECT_UI_FORBIDDEN      = 0x1
+	CRYPTPROTECT_LOCAL_MACHINE     = 0x4
+	CRYPTPROTECT_CRED_SYNC         = 0x8
+	CRYPTPROTECT_AUDIT             = 0x10
+	CRYPTPROTECT_NO_RECOVERY       = 0x20
+	CRYPTPROTECT_VERIFY_PROTECTION = 0x40
+	CRYPTPROTECT_CRED_REGENERATE   = 0x80
+
+	/* Flags for CryptProtectPromptStruct */
+	CRYPTPROTECT_PROMPT_ON_UNPROTECT   = 1
+	CRYPTPROTECT_PROMPT_ON_PROTECT     = 2
+	CRYPTPROTECT_PROMPT_RESERVED       = 4
+	CRYPTPROTECT_PROMPT_STRONG         = 8
+	CRYPTPROTECT_PROMPT_REQUIRE_STRONG = 16
+)
+
+const (
+	// flags for SetErrorMode
+	SEM_FAILCRITICALERRORS     = 0x0001
+	SEM_NOALIGNMENTFAULTEXCEPT = 0x0004
+	SEM_NOGPFAULTERRORBOX      = 0x0002
+	SEM_NOOPENFILEERRORBOX     = 0x8000
+)
+
+const (
+	// Priority class.
+	ABOVE_NORMAL_PRIORITY_CLASS   = 0x00008000
+	BELOW_NORMAL_PRIORITY_CLASS   = 0x00004000
+	HIGH_PRIORITY_CLASS           = 0x00000080
+	IDLE_PRIORITY_CLASS           = 0x00000040
+	NORMAL_PRIORITY_CLASS         = 0x00000020
+	PROCESS_MODE_BACKGROUND_BEGIN = 0x00100000
+	PROCESS_MODE_BACKGROUND_END   = 0x00200000
+	REALTIME_PRIORITY_CLASS       = 0x00000100
+)
+
+/* wintrust.h constants for WinVerifyTrustEx */
+const (
+	WTD_UI_ALL    = 1
+	WTD_UI_NONE   = 2
+	WTD_UI_NOBAD  = 3
+	WTD_UI_NOGOOD = 4
+
+	WTD_REVOKE_NONE       = 0
+	WTD_REVOKE_WHOLECHAIN = 1
+
+	WTD_CHOICE_FILE    = 1
+	WTD_CHOICE_CATALOG = 2
+	WTD_CHOICE_BLOB    = 3
+	WTD_CHOICE_SIGNER  = 4
+	WTD_CHOICE_CERT    = 5
+
+	WTD_STATEACTION_IGNORE           = 0x00000000
+	WTD_STATEACTION_VERIFY           = 0x00000001
+	WTD_STATEACTION_CLOSE            = 0x00000002
+	WTD_STATEACTION_AUTO_CACHE       = 0x00000003
+	WTD_STATEACTION_AUTO_CACHE_FLUSH = 0x00000004
+
+	WTD_USE_IE4_TRUST_FLAG                  = 0x1
+	WTD_NO_IE4_CHAIN_FLAG                   = 0x2
+	WTD_NO_POLICY_USAGE_FLAG                = 0x4
+	WTD_REVOCATION_CHECK_NONE               = 0x10
+	WTD_REVOCATION_CHECK_END_CERT           = 0x20
+	WTD_REVOCATION_CHECK_CHAIN              = 0x40
+	WTD_REVOCATION_CHECK_CHAIN_EXCLUDE_ROOT = 0x80
+	WTD_SAFER_FLAG                          = 0x100
+	WTD_HASH_ONLY_FLAG                      = 0x200
+	WTD_USE_DEFAULT_OSVER_CHECK             = 0x400
+	WTD_LIFETIME_SIGNING_FLAG               = 0x800
+	WTD_CACHE_ONLY_URL_RETRIEVAL            = 0x1000
+	WTD_DISABLE_MD2_MD4                     = 0x2000
+	WTD_MOTW                                = 0x4000
+
+	WTD_UICONTEXT_EXECUTE = 0
+	WTD_UICONTEXT_INSTALL = 1
+)
+
+var (
+	OID_PKIX_KP_SERVER_AUTH = []byte("1.3.6.1.5.5.7.3.1\x00")
+	OID_SERVER_GATED_CRYPTO = []byte("1.3.6.1.4.1.311.10.3.3\x00")
+	OID_SGC_NETSCAPE        = []byte("2.16.840.1.113730.4.1\x00")
+
+	WINTRUST_ACTION_GENERIC_VERIFY_V2 = GUID{
+		Data1: 0xaac56b,
+		Data2: 0xcd44,
+		Data3: 0x11d0,
+		Data4: [8]byte{0x8c, 0xc2, 0x0, 0xc0, 0x4f, 0xc2, 0x95, 0xee},
+	}
+)
+
+// Pointer represents a pointer to an arbitrary Windows type.
+//
+// Pointer-typed fields may point to one of many different types. It's
+// up to the caller to provide a pointer to the appropriate type, cast
+// to Pointer. The caller must obey the unsafe.Pointer rules while
+// doing so.
+type Pointer *struct{}
+
+// Invented values to support what package os expects.
+type Timeval struct {
+	Sec  int32
+	Usec int32
+}
+
+func (tv *Timeval) Nanoseconds() int64 {
+	return (int64(tv.Sec)*1e6 + int64(tv.Usec)) * 1e3
+}
+
+func NsecToTimeval(nsec int64) (tv Timeval) {
+	tv.Sec = int32(nsec / 1e9)
+	tv.Usec = int32(nsec % 1e9 / 1e3)
+	return
+}
+
+type Overlapped struct {
+	Internal     uintptr
+	InternalHigh uintptr
+	Offset       uint32
+	OffsetHigh   uint32
+	HEvent       Handle
+}
+
+type FileNotifyInformation struct {
+	NextEntryOffset uint32
+	Action          uint32
+	FileNameLength  uint32
+	FileName        uint16
+}
+
+type Filetime struct {
+	LowDateTime  uint32
+	HighDateTime uint32
+}
+
+// Nanoseconds returns Filetime ft in nanoseconds
+// since Epoch (00:00:00 UTC, January 1, 1970).
+func (ft *Filetime) Nanoseconds() int64 {
+	// 100-nanosecond intervals since January 1, 1601
+	nsec := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime)
+	// change starting time to the Epoch (00:00:00 UTC, January 1, 1970)
+	nsec -= 116444736000000000
+	// convert into nanoseconds
+	nsec *= 100
+	return nsec
+}
+
+func NsecToFiletime(nsec int64) (ft Filetime) {
+	// convert into 100-nanosecond
+	nsec /= 100
+	// change starting time to January 1, 1601
+	nsec += 116444736000000000
+	// split into high / low
+	ft.LowDateTime = uint32(nsec & 0xffffffff)
+	ft.HighDateTime = uint32(nsec >> 32 & 0xffffffff)
+	return ft
+}
+
+type Win32finddata struct {
+	FileAttributes    uint32
+	CreationTime      Filetime
+	LastAccessTime    Filetime
+	LastWriteTime     Filetime
+	FileSizeHigh      uint32
+	FileSizeLow       uint32
+	Reserved0         uint32
+	Reserved1         uint32
+	FileName          [MAX_PATH - 1]uint16
+	AlternateFileName [13]uint16
+}
+
+// This is the actual system call structure.
+// Win32finddata is what we committed to in Go 1.
+type win32finddata1 struct {
+	FileAttributes    uint32
+	CreationTime      Filetime
+	LastAccessTime    Filetime
+	LastWriteTime     Filetime
+	FileSizeHigh      uint32
+	FileSizeLow       uint32
+	Reserved0         uint32
+	Reserved1         uint32
+	FileName          [MAX_PATH]uint16
+	AlternateFileName [14]uint16
+
+	// The Microsoft documentation for this struct¹ describes three additional
+	// fields: dwFileType, dwCreatorType, and wFinderFlags. However, those fields
+	// are empirically only present in the macOS port of the Win32 API,² and thus
+	// not needed for binaries built for Windows.
+	//
+	// ¹ https://docs.microsoft.com/en-us/windows/win32/api/minwinbase/ns-minwinbase-win32_find_dataw describe
+	// ² https://golang.org/issue/42637#issuecomment-760715755.
+}
+
+func copyFindData(dst *Win32finddata, src *win32finddata1) {
+	dst.FileAttributes = src.FileAttributes
+	dst.CreationTime = src.CreationTime
+	dst.LastAccessTime = src.LastAccessTime
+	dst.LastWriteTime = src.LastWriteTime
+	dst.FileSizeHigh = src.FileSizeHigh
+	dst.FileSizeLow = src.FileSizeLow
+	dst.Reserved0 = src.Reserved0
+	dst.Reserved1 = src.Reserved1
+
+	// The src is 1 element bigger than dst, but it must be NUL.
+	copy(dst.FileName[:], src.FileName[:])
+	copy(dst.AlternateFileName[:], src.AlternateFileName[:])
+}
+
+type ByHandleFileInformation struct {
+	FileAttributes     uint32
+	CreationTime       Filetime
+	LastAccessTime     Filetime
+	LastWriteTime      Filetime
+	VolumeSerialNumber uint32
+	FileSizeHigh       uint32
+	FileSizeLow        uint32
+	NumberOfLinks      uint32
+	FileIndexHigh      uint32
+	FileIndexLow       uint32
+}
+
+const (
+	GetFileExInfoStandard = 0
+	GetFileExMaxInfoLevel = 1
+)
+
+type Win32FileAttributeData struct {
+	FileAttributes uint32
+	CreationTime   Filetime
+	LastAccessTime Filetime
+	LastWriteTime  Filetime
+	FileSizeHigh   uint32
+	FileSizeLow    uint32
+}
+
+// ShowWindow constants
+const (
+	// winuser.h
+	SW_HIDE            = 0
+	SW_NORMAL          = 1
+	SW_SHOWNORMAL      = 1
+	SW_SHOWMINIMIZED   = 2
+	SW_SHOWMAXIMIZED   = 3
+	SW_MAXIMIZE        = 3
+	SW_SHOWNOACTIVATE  = 4
+	SW_SHOW            = 5
+	SW_MINIMIZE        = 6
+	SW_SHOWMINNOACTIVE = 7
+	SW_SHOWNA          = 8
+	SW_RESTORE         = 9
+	SW_SHOWDEFAULT     = 10
+	SW_FORCEMINIMIZE   = 11
+)
+
+type StartupInfo struct {
+	Cb            uint32
+	_             *uint16
+	Desktop       *uint16
+	Title         *uint16
+	X             uint32
+	Y             uint32
+	XSize         uint32
+	YSize         uint32
+	XCountChars   uint32
+	YCountChars   uint32
+	FillAttribute uint32
+	Flags         uint32
+	ShowWindow    uint16
+	_             uint16
+	_             *byte
+	StdInput      Handle
+	StdOutput     Handle
+	StdErr        Handle
+}
+
+type StartupInfoEx struct {
+	StartupInfo
+	ProcThreadAttributeList *ProcThreadAttributeList
+}
+
+// ProcThreadAttributeList is a placeholder type to represent a PROC_THREAD_ATTRIBUTE_LIST.
+//
+// To create a *ProcThreadAttributeList, use NewProcThreadAttributeList, update
+// it with ProcThreadAttributeListContainer.Update, free its memory using
+// ProcThreadAttributeListContainer.Delete, and access the list itself using
+// ProcThreadAttributeListContainer.List.
+type ProcThreadAttributeList struct{}
+
+type ProcThreadAttributeListContainer struct {
+	data     *ProcThreadAttributeList
+	pointers []unsafe.Pointer
+}
+
+type ProcessInformation struct {
+	Process   Handle
+	Thread    Handle
+	ProcessId uint32
+	ThreadId  uint32
+}
+
+type ProcessEntry32 struct {
+	Size            uint32
+	Usage           uint32
+	ProcessID       uint32
+	DefaultHeapID   uintptr
+	ModuleID        uint32
+	Threads         uint32
+	ParentProcessID uint32
+	PriClassBase    int32
+	Flags           uint32
+	ExeFile         [MAX_PATH]uint16
+}
+
+type ThreadEntry32 struct {
+	Size           uint32
+	Usage          uint32
+	ThreadID       uint32
+	OwnerProcessID uint32
+	BasePri        int32
+	DeltaPri       int32
+	Flags          uint32
+}
+
+type ModuleEntry32 struct {
+	Size         uint32
+	ModuleID     uint32
+	ProcessID    uint32
+	GlblcntUsage uint32
+	ProccntUsage uint32
+	ModBaseAddr  uintptr
+	ModBaseSize  uint32
+	ModuleHandle Handle
+	Module       [MAX_MODULE_NAME32 + 1]uint16
+	ExePath      [MAX_PATH]uint16
+}
+
+const SizeofModuleEntry32 = unsafe.Sizeof(ModuleEntry32{})
+
+type Systemtime struct {
+	Year         uint16
+	Month        uint16
+	DayOfWeek    uint16
+	Day          uint16
+	Hour         uint16
+	Minute       uint16
+	Second       uint16
+	Milliseconds uint16
+}
+
+type Timezoneinformation struct {
+	Bias         int32
+	StandardName [32]uint16
+	StandardDate Systemtime
+	StandardBias int32
+	DaylightName [32]uint16
+	DaylightDate Systemtime
+	DaylightBias int32
+}
+
+// Socket related.
+
+const (
+	AF_UNSPEC  = 0
+	AF_UNIX    = 1
+	AF_INET    = 2
+	AF_NETBIOS = 17
+	AF_INET6   = 23
+	AF_IRDA    = 26
+	AF_BTH     = 32
+
+	SOCK_STREAM    = 1
+	SOCK_DGRAM     = 2
+	SOCK_RAW       = 3
+	SOCK_RDM       = 4
+	SOCK_SEQPACKET = 5
+
+	IPPROTO_IP      = 0
+	IPPROTO_ICMP    = 1
+	IPPROTO_IGMP    = 2
+	BTHPROTO_RFCOMM = 3
+	IPPROTO_TCP     = 6
+	IPPROTO_UDP     = 17
+	IPPROTO_IPV6    = 41
+	IPPROTO_ICMPV6  = 58
+	IPPROTO_RM      = 113
+
+	SOL_SOCKET                = 0xffff
+	SO_REUSEADDR              = 4
+	SO_KEEPALIVE              = 8
+	SO_DONTROUTE              = 16
+	SO_BROADCAST              = 32
+	SO_LINGER                 = 128
+	SO_RCVBUF                 = 0x1002
+	SO_RCVTIMEO               = 0x1006
+	SO_SNDBUF                 = 0x1001
+	SO_UPDATE_ACCEPT_CONTEXT  = 0x700b
+	SO_UPDATE_CONNECT_CONTEXT = 0x7010
+
+	IOC_OUT                            = 0x40000000
+	IOC_IN                             = 0x80000000
+	IOC_VENDOR                         = 0x18000000
+	IOC_INOUT                          = IOC_IN | IOC_OUT
+	IOC_WS2                            = 0x08000000
+	SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6
+	SIO_KEEPALIVE_VALS                 = IOC_IN | IOC_VENDOR | 4
+	SIO_UDP_CONNRESET                  = IOC_IN | IOC_VENDOR | 12
+	SIO_UDP_NETRESET                   = IOC_IN | IOC_VENDOR | 15
+
+	// cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460
+
+	IP_HDRINCL         = 0x2
+	IP_TOS             = 0x3
+	IP_TTL             = 0x4
+	IP_MULTICAST_IF    = 0x9
+	IP_MULTICAST_TTL   = 0xa
+	IP_MULTICAST_LOOP  = 0xb
+	IP_ADD_MEMBERSHIP  = 0xc
+	IP_DROP_MEMBERSHIP = 0xd
+	IP_PKTINFO         = 0x13
+	IP_MTU_DISCOVER    = 0x47
+
+	IPV6_V6ONLY         = 0x1b
+	IPV6_UNICAST_HOPS   = 0x4
+	IPV6_MULTICAST_IF   = 0x9
+	IPV6_MULTICAST_HOPS = 0xa
+	IPV6_MULTICAST_LOOP = 0xb
+	IPV6_JOIN_GROUP     = 0xc
+	IPV6_LEAVE_GROUP    = 0xd
+	IPV6_PKTINFO        = 0x13
+	IPV6_MTU_DISCOVER   = 0x47
+
+	MSG_OOB       = 0x1
+	MSG_PEEK      = 0x2
+	MSG_DONTROUTE = 0x4
+	MSG_WAITALL   = 0x8
+
+	MSG_TRUNC  = 0x0100
+	MSG_CTRUNC = 0x0200
+	MSG_BCAST  = 0x0400
+	MSG_MCAST  = 0x0800
+
+	SOMAXCONN = 0x7fffffff
+
+	TCP_NODELAY                    = 1
+	TCP_EXPEDITED_1122             = 2
+	TCP_KEEPALIVE                  = 3
+	TCP_MAXSEG                     = 4
+	TCP_MAXRT                      = 5
+	TCP_STDURG                     = 6
+	TCP_NOURG                      = 7
+	TCP_ATMARK                     = 8
+	TCP_NOSYNRETRIES               = 9
+	TCP_TIMESTAMPS                 = 10
+	TCP_OFFLOAD_PREFERENCE         = 11
+	TCP_CONGESTION_ALGORITHM       = 12
+	TCP_DELAY_FIN_ACK              = 13
+	TCP_MAXRTMS                    = 14
+	TCP_FASTOPEN                   = 15
+	TCP_KEEPCNT                    = 16
+	TCP_KEEPIDLE                   = TCP_KEEPALIVE
+	TCP_KEEPINTVL                  = 17
+	TCP_FAIL_CONNECT_ON_ICMP_ERROR = 18
+	TCP_ICMP_ERROR_INFO            = 19
+
+	UDP_NOCHECKSUM              = 1
+	UDP_SEND_MSG_SIZE           = 2
+	UDP_RECV_MAX_COALESCED_SIZE = 3
+	UDP_CHECKSUM_COVERAGE       = 20
+
+	UDP_COALESCED_INFO = 3
+
+	SHUT_RD   = 0
+	SHUT_WR   = 1
+	SHUT_RDWR = 2
+
+	WSADESCRIPTION_LEN = 256
+	WSASYS_STATUS_LEN  = 128
+)
+
+// enum PMTUD_STATE from ws2ipdef.h
+const (
+	IP_PMTUDISC_NOT_SET = 0
+	IP_PMTUDISC_DO      = 1
+	IP_PMTUDISC_DONT    = 2
+	IP_PMTUDISC_PROBE   = 3
+	IP_PMTUDISC_MAX     = 4
+)
+
+type WSABuf struct {
+	Len uint32
+	Buf *byte
+}
+
+type WSAMsg struct {
+	Name        *syscall.RawSockaddrAny
+	Namelen     int32
+	Buffers     *WSABuf
+	BufferCount uint32
+	Control     WSABuf
+	Flags       uint32
+}
+
+type WSACMSGHDR struct {
+	Len   uintptr
+	Level int32
+	Type  int32
+}
+
+type IN_PKTINFO struct {
+	Addr    [4]byte
+	Ifindex uint32
+}
+
+type IN6_PKTINFO struct {
+	Addr    [16]byte
+	Ifindex uint32
+}
+
+// Flags for WSASocket
+const (
+	WSA_FLAG_OVERLAPPED             = 0x01
+	WSA_FLAG_MULTIPOINT_C_ROOT      = 0x02
+	WSA_FLAG_MULTIPOINT_C_LEAF      = 0x04
+	WSA_FLAG_MULTIPOINT_D_ROOT      = 0x08
+	WSA_FLAG_MULTIPOINT_D_LEAF      = 0x10
+	WSA_FLAG_ACCESS_SYSTEM_SECURITY = 0x40
+	WSA_FLAG_NO_HANDLE_INHERIT      = 0x80
+	WSA_FLAG_REGISTERED_IO          = 0x100
+)
+
+// Invented values to support what package os expects.
+const (
+	S_IFMT   = 0x1f000
+	S_IFIFO  = 0x1000
+	S_IFCHR  = 0x2000
+	S_IFDIR  = 0x4000
+	S_IFBLK  = 0x6000
+	S_IFREG  = 0x8000
+	S_IFLNK  = 0xa000
+	S_IFSOCK = 0xc000
+	S_ISUID  = 0x800
+	S_ISGID  = 0x400
+	S_ISVTX  = 0x200
+	S_IRUSR  = 0x100
+	S_IWRITE = 0x80
+	S_IWUSR  = 0x80
+	S_IXUSR  = 0x40
+)
+
+const (
+	FILE_TYPE_CHAR    = 0x0002
+	FILE_TYPE_DISK    = 0x0001
+	FILE_TYPE_PIPE    = 0x0003
+	FILE_TYPE_REMOTE  = 0x8000
+	FILE_TYPE_UNKNOWN = 0x0000
+)
+
+type Hostent struct {
+	Name     *byte
+	Aliases  **byte
+	AddrType uint16
+	Length   uint16
+	AddrList **byte
+}
+
+type Protoent struct {
+	Name    *byte
+	Aliases **byte
+	Proto   uint16
+}
+
+const (
+	DNS_TYPE_A       = 0x0001
+	DNS_TYPE_NS      = 0x0002
+	DNS_TYPE_MD      = 0x0003
+	DNS_TYPE_MF      = 0x0004
+	DNS_TYPE_CNAME   = 0x0005
+	DNS_TYPE_SOA     = 0x0006
+	DNS_TYPE_MB      = 0x0007
+	DNS_TYPE_MG      = 0x0008
+	DNS_TYPE_MR      = 0x0009
+	DNS_TYPE_NULL    = 0x000a
+	DNS_TYPE_WKS     = 0x000b
+	DNS_TYPE_PTR     = 0x000c
+	DNS_TYPE_HINFO   = 0x000d
+	DNS_TYPE_MINFO   = 0x000e
+	DNS_TYPE_MX      = 0x000f
+	DNS_TYPE_TEXT    = 0x0010
+	DNS_TYPE_RP      = 0x0011
+	DNS_TYPE_AFSDB   = 0x0012
+	DNS_TYPE_X25     = 0x0013
+	DNS_TYPE_ISDN    = 0x0014
+	DNS_TYPE_RT      = 0x0015
+	DNS_TYPE_NSAP    = 0x0016
+	DNS_TYPE_NSAPPTR = 0x0017
+	DNS_TYPE_SIG     = 0x0018
+	DNS_TYPE_KEY     = 0x0019
+	DNS_TYPE_PX      = 0x001a
+	DNS_TYPE_GPOS    = 0x001b
+	DNS_TYPE_AAAA    = 0x001c
+	DNS_TYPE_LOC     = 0x001d
+	DNS_TYPE_NXT     = 0x001e
+	DNS_TYPE_EID     = 0x001f
+	DNS_TYPE_NIMLOC  = 0x0020
+	DNS_TYPE_SRV     = 0x0021
+	DNS_TYPE_ATMA    = 0x0022
+	DNS_TYPE_NAPTR   = 0x0023
+	DNS_TYPE_KX      = 0x0024
+	DNS_TYPE_CERT    = 0x0025
+	DNS_TYPE_A6      = 0x0026
+	DNS_TYPE_DNAME   = 0x0027
+	DNS_TYPE_SINK    = 0x0028
+	DNS_TYPE_OPT     = 0x0029
+	DNS_TYPE_DS      = 0x002B
+	DNS_TYPE_RRSIG   = 0x002E
+	DNS_TYPE_NSEC    = 0x002F
+	DNS_TYPE_DNSKEY  = 0x0030
+	DNS_TYPE_DHCID   = 0x0031
+	DNS_TYPE_UINFO   = 0x0064
+	DNS_TYPE_UID     = 0x0065
+	DNS_TYPE_GID     = 0x0066
+	DNS_TYPE_UNSPEC  = 0x0067
+	DNS_TYPE_ADDRS   = 0x00f8
+	DNS_TYPE_TKEY    = 0x00f9
+	DNS_TYPE_TSIG    = 0x00fa
+	DNS_TYPE_IXFR    = 0x00fb
+	DNS_TYPE_AXFR    = 0x00fc
+	DNS_TYPE_MAILB   = 0x00fd
+	DNS_TYPE_MAILA   = 0x00fe
+	DNS_TYPE_ALL     = 0x00ff
+	DNS_TYPE_ANY     = 0x00ff
+	DNS_TYPE_WINS    = 0xff01
+	DNS_TYPE_WINSR   = 0xff02
+	DNS_TYPE_NBSTAT  = 0xff01
+)
+
+const (
+	// flags inside DNSRecord.Dw
+	DnsSectionQuestion   = 0x0000
+	DnsSectionAnswer     = 0x0001
+	DnsSectionAuthority  = 0x0002
+	DnsSectionAdditional = 0x0003
+)
+
+const (
+	// flags of WSALookupService
+	LUP_DEEP                = 0x0001
+	LUP_CONTAINERS          = 0x0002
+	LUP_NOCONTAINERS        = 0x0004
+	LUP_NEAREST             = 0x0008
+	LUP_RETURN_NAME         = 0x0010
+	LUP_RETURN_TYPE         = 0x0020
+	LUP_RETURN_VERSION      = 0x0040
+	LUP_RETURN_COMMENT      = 0x0080
+	LUP_RETURN_ADDR         = 0x0100
+	LUP_RETURN_BLOB         = 0x0200
+	LUP_RETURN_ALIASES      = 0x0400
+	LUP_RETURN_QUERY_STRING = 0x0800
+	LUP_RETURN_ALL          = 0x0FF0
+	LUP_RES_SERVICE         = 0x8000
+
+	LUP_FLUSHCACHE    = 0x1000
+	LUP_FLUSHPREVIOUS = 0x2000
+
+	LUP_NON_AUTHORITATIVE      = 0x4000
+	LUP_SECURE                 = 0x8000
+	LUP_RETURN_PREFERRED_NAMES = 0x10000
+	LUP_DNS_ONLY               = 0x20000
+
+	LUP_ADDRCONFIG           = 0x100000
+	LUP_DUAL_ADDR            = 0x200000
+	LUP_FILESERVER           = 0x400000
+	LUP_DISABLE_IDN_ENCODING = 0x00800000
+	LUP_API_ANSI             = 0x01000000
+
+	LUP_RESOLUTION_HANDLE = 0x80000000
+)
+
+const (
+	// values of WSAQUERYSET's namespace
+	NS_ALL       = 0
+	NS_DNS       = 12
+	NS_NLA       = 15
+	NS_BTH       = 16
+	NS_EMAIL     = 37
+	NS_PNRPNAME  = 38
+	NS_PNRPCLOUD = 39
+)
+
+type DNSSRVData struct {
+	Target   *uint16
+	Priority uint16
+	Weight   uint16
+	Port     uint16
+	Pad      uint16
+}
+
+type DNSPTRData struct {
+	Host *uint16
+}
+
+type DNSMXData struct {
+	NameExchange *uint16
+	Preference   uint16
+	Pad          uint16
+}
+
+type DNSTXTData struct {
+	StringCount uint16
+	StringArray [1]*uint16
+}
+
+type DNSRecord struct {
+	Next     *DNSRecord
+	Name     *uint16
+	Type     uint16
+	Length   uint16
+	Dw       uint32
+	Ttl      uint32
+	Reserved uint32
+	Data     [40]byte
+}
+
+const (
+	TF_DISCONNECT         = 1
+	TF_REUSE_SOCKET       = 2
+	TF_WRITE_BEHIND       = 4
+	TF_USE_DEFAULT_WORKER = 0
+	TF_USE_SYSTEM_THREAD  = 16
+	TF_USE_KERNEL_APC     = 32
+)
+
+type TransmitFileBuffers struct {
+	Head       uintptr
+	HeadLength uint32
+	Tail       uintptr
+	TailLength uint32
+}
+
+const (
+	IFF_UP           = 1
+	IFF_BROADCAST    = 2
+	IFF_LOOPBACK     = 4
+	IFF_POINTTOPOINT = 8
+	IFF_MULTICAST    = 16
+)
+
+const SIO_GET_INTERFACE_LIST = 0x4004747F
+
+// TODO(mattn): SockaddrGen is union of sockaddr/sockaddr_in/sockaddr_in6_old.
+// will be fixed to change variable type as suitable.
+
+type SockaddrGen [24]byte
+
+type InterfaceInfo struct {
+	Flags            uint32
+	Address          SockaddrGen
+	BroadcastAddress SockaddrGen
+	Netmask          SockaddrGen
+}
+
+type IpAddressString struct {
+	String [16]byte
+}
+
+type IpMaskString IpAddressString
+
+type IpAddrString struct {
+	Next      *IpAddrString
+	IpAddress IpAddressString
+	IpMask    IpMaskString
+	Context   uint32
+}
+
+const MAX_ADAPTER_NAME_LENGTH = 256
+const MAX_ADAPTER_DESCRIPTION_LENGTH = 128
+const MAX_ADAPTER_ADDRESS_LENGTH = 8
+
+type IpAdapterInfo struct {
+	Next                *IpAdapterInfo
+	ComboIndex          uint32
+	AdapterName         [MAX_ADAPTER_NAME_LENGTH + 4]byte
+	Description         [MAX_ADAPTER_DESCRIPTION_LENGTH + 4]byte
+	AddressLength       uint32
+	Address             [MAX_ADAPTER_ADDRESS_LENGTH]byte
+	Index               uint32
+	Type                uint32
+	DhcpEnabled         uint32
+	CurrentIpAddress    *IpAddrString
+	IpAddressList       IpAddrString
+	GatewayList         IpAddrString
+	DhcpServer          IpAddrString
+	HaveWins            bool
+	PrimaryWinsServer   IpAddrString
+	SecondaryWinsServer IpAddrString
+	LeaseObtained       int64
+	LeaseExpires        int64
+}
+
+const MAXLEN_PHYSADDR = 8
+const MAX_INTERFACE_NAME_LEN = 256
+const MAXLEN_IFDESCR = 256
+
+type MibIfRow struct {
+	Name            [MAX_INTERFACE_NAME_LEN]uint16
+	Index           uint32
+	Type            uint32
+	Mtu             uint32
+	Speed           uint32
+	PhysAddrLen     uint32
+	PhysAddr        [MAXLEN_PHYSADDR]byte
+	AdminStatus     uint32
+	OperStatus      uint32
+	LastChange      uint32
+	InOctets        uint32
+	InUcastPkts     uint32
+	InNUcastPkts    uint32
+	InDiscards      uint32
+	InErrors        uint32
+	InUnknownProtos uint32
+	OutOctets       uint32
+	OutUcastPkts    uint32
+	OutNUcastPkts   uint32
+	OutDiscards     uint32
+	OutErrors       uint32
+	OutQLen         uint32
+	DescrLen        uint32
+	Descr           [MAXLEN_IFDESCR]byte
+}
+
+type CertInfo struct {
+	Version              uint32
+	SerialNumber         CryptIntegerBlob
+	SignatureAlgorithm   CryptAlgorithmIdentifier
+	Issuer               CertNameBlob
+	NotBefore            Filetime
+	NotAfter             Filetime
+	Subject              CertNameBlob
+	SubjectPublicKeyInfo CertPublicKeyInfo
+	IssuerUniqueId       CryptBitBlob
+	SubjectUniqueId      CryptBitBlob
+	CountExtensions      uint32
+	Extensions           *CertExtension
+}
+
+type CertExtension struct {
+	ObjId    *byte
+	Critical int32
+	Value    CryptObjidBlob
+}
+
+type CryptAlgorithmIdentifier struct {
+	ObjId      *byte
+	Parameters CryptObjidBlob
+}
+
+type CertPublicKeyInfo struct {
+	Algorithm CryptAlgorithmIdentifier
+	PublicKey CryptBitBlob
+}
+
+type DataBlob struct {
+	Size uint32
+	Data *byte
+}
+type CryptIntegerBlob DataBlob
+type CryptUintBlob DataBlob
+type CryptObjidBlob DataBlob
+type CertNameBlob DataBlob
+type CertRdnValueBlob DataBlob
+type CertBlob DataBlob
+type CrlBlob DataBlob
+type CryptDataBlob DataBlob
+type CryptHashBlob DataBlob
+type CryptDigestBlob DataBlob
+type CryptDerBlob DataBlob
+type CryptAttrBlob DataBlob
+
+type CryptBitBlob struct {
+	Size       uint32
+	Data       *byte
+	UnusedBits uint32
+}
+
+type CertContext struct {
+	EncodingType uint32
+	EncodedCert  *byte
+	Length       uint32
+	CertInfo     *CertInfo
+	Store        Handle
+}
+
+type CertChainContext struct {
+	Size                       uint32
+	TrustStatus                CertTrustStatus
+	ChainCount                 uint32
+	Chains                     **CertSimpleChain
+	LowerQualityChainCount     uint32
+	LowerQualityChains         **CertChainContext
+	HasRevocationFreshnessTime uint32
+	RevocationFreshnessTime    uint32
+}
+
+type CertTrustListInfo struct {
+	// Not implemented
+}
+
+type CertSimpleChain struct {
+	Size                       uint32
+	TrustStatus                CertTrustStatus
+	NumElements                uint32
+	Elements                   **CertChainElement
+	TrustListInfo              *CertTrustListInfo
+	HasRevocationFreshnessTime uint32
+	RevocationFreshnessTime    uint32
+}
+
+type CertChainElement struct {
+	Size              uint32
+	CertContext       *CertContext
+	TrustStatus       CertTrustStatus
+	RevocationInfo    *CertRevocationInfo
+	IssuanceUsage     *CertEnhKeyUsage
+	ApplicationUsage  *CertEnhKeyUsage
+	ExtendedErrorInfo *uint16
+}
+
+type CertRevocationCrlInfo struct {
+	// Not implemented
+}
+
+type CertRevocationInfo struct {
+	Size             uint32
+	RevocationResult uint32
+	RevocationOid    *byte
+	OidSpecificInfo  Pointer
+	HasFreshnessTime uint32
+	FreshnessTime    uint32
+	CrlInfo          *CertRevocationCrlInfo
+}
+
+type CertTrustStatus struct {
+	ErrorStatus uint32
+	InfoStatus  uint32
+}
+
+type CertUsageMatch struct {
+	Type  uint32
+	Usage CertEnhKeyUsage
+}
+
+type CertEnhKeyUsage struct {
+	Length           uint32
+	UsageIdentifiers **byte
+}
+
+type CertChainPara struct {
+	Size                         uint32
+	RequestedUsage               CertUsageMatch
+	RequstedIssuancePolicy       CertUsageMatch
+	URLRetrievalTimeout          uint32
+	CheckRevocationFreshnessTime uint32
+	RevocationFreshnessTime      uint32
+	CacheResync                  *Filetime
+}
+
+type CertChainPolicyPara struct {
+	Size            uint32
+	Flags           uint32
+	ExtraPolicyPara Pointer
+}
+
+type SSLExtraCertChainPolicyPara struct {
+	Size       uint32
+	AuthType   uint32
+	Checks     uint32
+	ServerName *uint16
+}
+
+type CertChainPolicyStatus struct {
+	Size              uint32
+	Error             uint32
+	ChainIndex        uint32
+	ElementIndex      uint32
+	ExtraPolicyStatus Pointer
+}
+
+type CertPolicyInfo struct {
+	Identifier      *byte
+	CountQualifiers uint32
+	Qualifiers      *CertPolicyQualifierInfo
+}
+
+type CertPoliciesInfo struct {
+	Count       uint32
+	PolicyInfos *CertPolicyInfo
+}
+
+type CertPolicyQualifierInfo struct {
+	// Not implemented
+}
+
+type CertStrongSignPara struct {
+	Size                      uint32
+	InfoChoice                uint32
+	InfoOrSerializedInfoOrOID unsafe.Pointer
+}
+
+type CryptProtectPromptStruct struct {
+	Size        uint32
+	PromptFlags uint32
+	App         HWND
+	Prompt      *uint16
+}
+
+type CertChainFindByIssuerPara struct {
+	Size                   uint32
+	UsageIdentifier        *byte
+	KeySpec                uint32
+	AcquirePrivateKeyFlags uint32
+	IssuerCount            uint32
+	Issuer                 Pointer
+	FindCallback           Pointer
+	FindArg                Pointer
+	IssuerChainIndex       *uint32
+	IssuerElementIndex     *uint32
+}
+
+type WinTrustData struct {
+	Size                            uint32
+	PolicyCallbackData              uintptr
+	SIPClientData                   uintptr
+	UIChoice                        uint32
+	RevocationChecks                uint32
+	UnionChoice                     uint32
+	FileOrCatalogOrBlobOrSgnrOrCert unsafe.Pointer
+	StateAction                     uint32
+	StateData                       Handle
+	URLReference                    *uint16
+	ProvFlags                       uint32
+	UIContext                       uint32
+	SignatureSettings               *WinTrustSignatureSettings
+}
+
+type WinTrustFileInfo struct {
+	Size         uint32
+	FilePath     *uint16
+	File         Handle
+	KnownSubject *GUID
+}
+
+type WinTrustSignatureSettings struct {
+	Size             uint32
+	Index            uint32
+	Flags            uint32
+	SecondarySigs    uint32
+	VerifiedSigIndex uint32
+	CryptoPolicy     *CertStrongSignPara
+}
+
+const (
+	// do not reorder
+	HKEY_CLASSES_ROOT = 0x80000000 + iota
+	HKEY_CURRENT_USER
+	HKEY_LOCAL_MACHINE
+	HKEY_USERS
+	HKEY_PERFORMANCE_DATA
+	HKEY_CURRENT_CONFIG
+	HKEY_DYN_DATA
+
+	KEY_QUERY_VALUE        = 1
+	KEY_SET_VALUE          = 2
+	KEY_CREATE_SUB_KEY     = 4
+	KEY_ENUMERATE_SUB_KEYS = 8
+	KEY_NOTIFY             = 16
+	KEY_CREATE_LINK        = 32
+	KEY_WRITE              = 0x20006
+	KEY_EXECUTE            = 0x20019
+	KEY_READ               = 0x20019
+	KEY_WOW64_64KEY        = 0x0100
+	KEY_WOW64_32KEY        = 0x0200
+	KEY_ALL_ACCESS         = 0xf003f
+)
+
+const (
+	// do not reorder
+	REG_NONE = iota
+	REG_SZ
+	REG_EXPAND_SZ
+	REG_BINARY
+	REG_DWORD_LITTLE_ENDIAN
+	REG_DWORD_BIG_ENDIAN
+	REG_LINK
+	REG_MULTI_SZ
+	REG_RESOURCE_LIST
+	REG_FULL_RESOURCE_DESCRIPTOR
+	REG_RESOURCE_REQUIREMENTS_LIST
+	REG_QWORD_LITTLE_ENDIAN
+	REG_DWORD = REG_DWORD_LITTLE_ENDIAN
+	REG_QWORD = REG_QWORD_LITTLE_ENDIAN
+)
+
+const (
+	EVENT_MODIFY_STATE = 0x0002
+	EVENT_ALL_ACCESS   = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3
+
+	MUTANT_QUERY_STATE = 0x0001
+	MUTANT_ALL_ACCESS  = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | MUTANT_QUERY_STATE
+
+	SEMAPHORE_MODIFY_STATE = 0x0002
+	SEMAPHORE_ALL_ACCESS   = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3
+
+	TIMER_QUERY_STATE  = 0x0001
+	TIMER_MODIFY_STATE = 0x0002
+	TIMER_ALL_ACCESS   = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | TIMER_QUERY_STATE | TIMER_MODIFY_STATE
+
+	MUTEX_MODIFY_STATE = MUTANT_QUERY_STATE
+	MUTEX_ALL_ACCESS   = MUTANT_ALL_ACCESS
+
+	CREATE_EVENT_MANUAL_RESET  = 0x1
+	CREATE_EVENT_INITIAL_SET   = 0x2
+	CREATE_MUTEX_INITIAL_OWNER = 0x1
+)
+
+type AddrinfoW struct {
+	Flags     int32
+	Family    int32
+	Socktype  int32
+	Protocol  int32
+	Addrlen   uintptr
+	Canonname *uint16
+	Addr      uintptr
+	Next      *AddrinfoW
+}
+
+const (
+	AI_PASSIVE     = 1
+	AI_CANONNAME   = 2
+	AI_NUMERICHOST = 4
+)
+
+type GUID struct {
+	Data1 uint32
+	Data2 uint16
+	Data3 uint16
+	Data4 [8]byte
+}
+
+var WSAID_CONNECTEX = GUID{
+	0x25a207b9,
+	0xddf3,
+	0x4660,
+	[8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e},
+}
+
+var WSAID_WSASENDMSG = GUID{
+	0xa441e712,
+	0x754f,
+	0x43ca,
+	[8]byte{0x84, 0xa7, 0x0d, 0xee, 0x44, 0xcf, 0x60, 0x6d},
+}
+
+var WSAID_WSARECVMSG = GUID{
+	0xf689d7c8,
+	0x6f1f,
+	0x436b,
+	[8]byte{0x8a, 0x53, 0xe5, 0x4f, 0xe3, 0x51, 0xc3, 0x22},
+}
+
+const (
+	FILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
+	FILE_SKIP_SET_EVENT_ON_HANDLE        = 2
+)
+
+const (
+	WSAPROTOCOL_LEN    = 255
+	MAX_PROTOCOL_CHAIN = 7
+	BASE_PROTOCOL      = 1
+	LAYERED_PROTOCOL   = 0
+
+	XP1_CONNECTIONLESS           = 0x00000001
+	XP1_GUARANTEED_DELIVERY      = 0x00000002
+	XP1_GUARANTEED_ORDER         = 0x00000004
+	XP1_MESSAGE_ORIENTED         = 0x00000008
+	XP1_PSEUDO_STREAM            = 0x00000010
+	XP1_GRACEFUL_CLOSE           = 0x00000020
+	XP1_EXPEDITED_DATA           = 0x00000040
+	XP1_CONNECT_DATA             = 0x00000080
+	XP1_DISCONNECT_DATA          = 0x00000100
+	XP1_SUPPORT_BROADCAST        = 0x00000200
+	XP1_SUPPORT_MULTIPOINT       = 0x00000400
+	XP1_MULTIPOINT_CONTROL_PLANE = 0x00000800
+	XP1_MULTIPOINT_DATA_PLANE    = 0x00001000
+	XP1_QOS_SUPPORTED            = 0x00002000
+	XP1_UNI_SEND                 = 0x00008000
+	XP1_UNI_RECV                 = 0x00010000
+	XP1_IFS_HANDLES              = 0x00020000
+	XP1_PARTIAL_MESSAGE          = 0x00040000
+	XP1_SAN_SUPPORT_SDP          = 0x00080000
+
+	PFL_MULTIPLE_PROTO_ENTRIES  = 0x00000001
+	PFL_RECOMMENDED_PROTO_ENTRY = 0x00000002
+	PFL_HIDDEN                  = 0x00000004
+	PFL_MATCHES_PROTOCOL_ZERO   = 0x00000008
+	PFL_NETWORKDIRECT_PROVIDER  = 0x00000010
+)
+
+type WSAProtocolInfo struct {
+	ServiceFlags1     uint32
+	ServiceFlags2     uint32
+	ServiceFlags3     uint32
+	ServiceFlags4     uint32
+	ProviderFlags     uint32
+	ProviderId        GUID
+	CatalogEntryId    uint32
+	ProtocolChain     WSAProtocolChain
+	Version           int32
+	AddressFamily     int32
+	MaxSockAddr       int32
+	MinSockAddr       int32
+	SocketType        int32
+	Protocol          int32
+	ProtocolMaxOffset int32
+	NetworkByteOrder  int32
+	SecurityScheme    int32
+	MessageSize       uint32
+	ProviderReserved  uint32
+	ProtocolName      [WSAPROTOCOL_LEN + 1]uint16
+}
+
+type WSAProtocolChain struct {
+	ChainLen     int32
+	ChainEntries [MAX_PROTOCOL_CHAIN]uint32
+}
+
+type TCPKeepalive struct {
+	OnOff    uint32
+	Time     uint32
+	Interval uint32
+}
+
+type symbolicLinkReparseBuffer struct {
+	SubstituteNameOffset uint16
+	SubstituteNameLength uint16
+	PrintNameOffset      uint16
+	PrintNameLength      uint16
+	Flags                uint32
+	PathBuffer           [1]uint16
+}
+
+type mountPointReparseBuffer struct {
+	SubstituteNameOffset uint16
+	SubstituteNameLength uint16
+	PrintNameOffset      uint16
+	PrintNameLength      uint16
+	PathBuffer           [1]uint16
+}
+
+type reparseDataBuffer struct {
+	ReparseTag        uint32
+	ReparseDataLength uint16
+	Reserved          uint16
+
+	// GenericReparseBuffer
+	reparseBuffer byte
+}
+
+const (
+	FSCTL_CREATE_OR_GET_OBJECT_ID             = 0x0900C0
+	FSCTL_DELETE_OBJECT_ID                    = 0x0900A0
+	FSCTL_DELETE_REPARSE_POINT                = 0x0900AC
+	FSCTL_DUPLICATE_EXTENTS_TO_FILE           = 0x098344
+	FSCTL_DUPLICATE_EXTENTS_TO_FILE_EX        = 0x0983E8
+	FSCTL_FILESYSTEM_GET_STATISTICS           = 0x090060
+	FSCTL_FILE_LEVEL_TRIM                     = 0x098208
+	FSCTL_FIND_FILES_BY_SID                   = 0x09008F
+	FSCTL_GET_COMPRESSION                     = 0x09003C
+	FSCTL_GET_INTEGRITY_INFORMATION           = 0x09027C
+	FSCTL_GET_NTFS_VOLUME_DATA                = 0x090064
+	FSCTL_GET_REFS_VOLUME_DATA                = 0x0902D8
+	FSCTL_GET_OBJECT_ID                       = 0x09009C
+	FSCTL_GET_REPARSE_POINT                   = 0x0900A8
+	FSCTL_GET_RETRIEVAL_POINTER_COUNT         = 0x09042B
+	FSCTL_GET_RETRIEVAL_POINTERS              = 0x090073
+	FSCTL_GET_RETRIEVAL_POINTERS_AND_REFCOUNT = 0x0903D3
+	FSCTL_IS_PATHNAME_VALID                   = 0x09002C
+	FSCTL_LMR_SET_LINK_TRACKING_INFORMATION   = 0x1400EC
+	FSCTL_MARK_HANDLE                         = 0x0900FC
+	FSCTL_OFFLOAD_READ                        = 0x094264
+	FSCTL_OFFLOAD_WRITE                       = 0x098268
+	FSCTL_PIPE_PEEK                           = 0x11400C
+	FSCTL_PIPE_TRANSCEIVE                     = 0x11C017
+	FSCTL_PIPE_WAIT                           = 0x110018
+	FSCTL_QUERY_ALLOCATED_RANGES              = 0x0940CF
+	FSCTL_QUERY_FAT_BPB                       = 0x090058
+	FSCTL_QUERY_FILE_REGIONS                  = 0x090284
+	FSCTL_QUERY_ON_DISK_VOLUME_INFO           = 0x09013C
+	FSCTL_QUERY_SPARING_INFO                  = 0x090138
+	FSCTL_READ_FILE_USN_DATA                  = 0x0900EB
+	FSCTL_RECALL_FILE                         = 0x090117
+	FSCTL_REFS_STREAM_SNAPSHOT_MANAGEMENT     = 0x090440
+	FSCTL_SET_COMPRESSION                     = 0x09C040
+	FSCTL_SET_DEFECT_MANAGEMENT               = 0x098134
+	FSCTL_SET_ENCRYPTION                      = 0x0900D7
+	FSCTL_SET_INTEGRITY_INFORMATION           = 0x09C280
+	FSCTL_SET_INTEGRITY_INFORMATION_EX        = 0x090380
+	FSCTL_SET_OBJECT_ID                       = 0x090098
+	FSCTL_SET_OBJECT_ID_EXTENDED              = 0x0900BC
+	FSCTL_SET_REPARSE_POINT                   = 0x0900A4
+	FSCTL_SET_SPARSE                          = 0x0900C4
+	FSCTL_SET_ZERO_DATA                       = 0x0980C8
+	FSCTL_SET_ZERO_ON_DEALLOCATION            = 0x090194
+	FSCTL_SIS_COPYFILE                        = 0x090100
+	FSCTL_WRITE_USN_CLOSE_RECORD              = 0x0900EF
+
+	MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024
+	IO_REPARSE_TAG_MOUNT_POINT       = 0xA0000003
+	IO_REPARSE_TAG_SYMLINK           = 0xA000000C
+	SYMBOLIC_LINK_FLAG_DIRECTORY     = 0x1
+)
+
+// FILE_ZERO_DATA_INFORMATION from winioctl.h
+type FileZeroDataInformation struct {
+	FileOffset      int64
+	BeyondFinalZero int64
+}
+
+const (
+	ComputerNameNetBIOS                   = 0
+	ComputerNameDnsHostname               = 1
+	ComputerNameDnsDomain                 = 2
+	ComputerNameDnsFullyQualified         = 3
+	ComputerNamePhysicalNetBIOS           = 4
+	ComputerNamePhysicalDnsHostname       = 5
+	ComputerNamePhysicalDnsDomain         = 6
+	ComputerNamePhysicalDnsFullyQualified = 7
+	ComputerNameMax                       = 8
+)
+
+// For MessageBox()
+const (
+	MB_OK                   = 0x00000000
+	MB_OKCANCEL             = 0x00000001
+	MB_ABORTRETRYIGNORE     = 0x00000002
+	MB_YESNOCANCEL          = 0x00000003
+	MB_YESNO                = 0x00000004
+	MB_RETRYCANCEL          = 0x00000005
+	MB_CANCELTRYCONTINUE    = 0x00000006
+	MB_ICONHAND             = 0x00000010
+	MB_ICONQUESTION         = 0x00000020
+	MB_ICONEXCLAMATION      = 0x00000030
+	MB_ICONASTERISK         = 0x00000040
+	MB_USERICON             = 0x00000080
+	MB_ICONWARNING          = MB_ICONEXCLAMATION
+	MB_ICONERROR            = MB_ICONHAND
+	MB_ICONINFORMATION      = MB_ICONASTERISK
+	MB_ICONSTOP             = MB_ICONHAND
+	MB_DEFBUTTON1           = 0x00000000
+	MB_DEFBUTTON2           = 0x00000100
+	MB_DEFBUTTON3           = 0x00000200
+	MB_DEFBUTTON4           = 0x00000300
+	MB_APPLMODAL            = 0x00000000
+	MB_SYSTEMMODAL          = 0x00001000
+	MB_TASKMODAL            = 0x00002000
+	MB_HELP                 = 0x00004000
+	MB_NOFOCUS              = 0x00008000
+	MB_SETFOREGROUND        = 0x00010000
+	MB_DEFAULT_DESKTOP_ONLY = 0x00020000
+	MB_TOPMOST              = 0x00040000
+	MB_RIGHT                = 0x00080000
+	MB_RTLREADING           = 0x00100000
+	MB_SERVICE_NOTIFICATION = 0x00200000
+)
+
+const (
+	MOVEFILE_REPLACE_EXISTING      = 0x1
+	MOVEFILE_COPY_ALLOWED          = 0x2
+	MOVEFILE_DELAY_UNTIL_REBOOT    = 0x4
+	MOVEFILE_WRITE_THROUGH         = 0x8
+	MOVEFILE_CREATE_HARDLINK       = 0x10
+	MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20
+)
+
+// Flags for GetAdaptersAddresses, see
+// https://learn.microsoft.com/en-us/windows/win32/api/iphlpapi/nf-iphlpapi-getadaptersaddresses.
+const (
+	GAA_FLAG_SKIP_UNICAST                = 0x1
+	GAA_FLAG_SKIP_ANYCAST                = 0x2
+	GAA_FLAG_SKIP_MULTICAST              = 0x4
+	GAA_FLAG_SKIP_DNS_SERVER             = 0x8
+	GAA_FLAG_INCLUDE_PREFIX              = 0x10
+	GAA_FLAG_SKIP_FRIENDLY_NAME          = 0x20
+	GAA_FLAG_INCLUDE_WINS_INFO           = 0x40
+	GAA_FLAG_INCLUDE_GATEWAYS            = 0x80
+	GAA_FLAG_INCLUDE_ALL_INTERFACES      = 0x100
+	GAA_FLAG_INCLUDE_ALL_COMPARTMENTS    = 0x200
+	GAA_FLAG_INCLUDE_TUNNEL_BINDINGORDER = 0x400
+)
+
+const (
+	IF_TYPE_OTHER              = 1
+	IF_TYPE_ETHERNET_CSMACD    = 6
+	IF_TYPE_ISO88025_TOKENRING = 9
+	IF_TYPE_PPP                = 23
+	IF_TYPE_SOFTWARE_LOOPBACK  = 24
+	IF_TYPE_ATM                = 37
+	IF_TYPE_IEEE80211          = 71
+	IF_TYPE_TUNNEL             = 131
+	IF_TYPE_IEEE1394           = 144
+)
+
+// Enum NL_PREFIX_ORIGIN for [IpAdapterUnicastAddress], see
+// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_prefix_origin
+const (
+	IpPrefixOriginOther               = 0
+	IpPrefixOriginManual              = 1
+	IpPrefixOriginWellKnown           = 2
+	IpPrefixOriginDhcp                = 3
+	IpPrefixOriginRouterAdvertisement = 4
+	IpPrefixOriginUnchanged           = 1 << 4
+)
+
+// Enum NL_SUFFIX_ORIGIN for [IpAdapterUnicastAddress], see
+// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_suffix_origin
+const (
+	NlsoOther                      = 0
+	NlsoManual                     = 1
+	NlsoWellKnown                  = 2
+	NlsoDhcp                       = 3
+	NlsoLinkLayerAddress           = 4
+	NlsoRandom                     = 5
+	IpSuffixOriginOther            = 0
+	IpSuffixOriginManual           = 1
+	IpSuffixOriginWellKnown        = 2
+	IpSuffixOriginDhcp             = 3
+	IpSuffixOriginLinkLayerAddress = 4
+	IpSuffixOriginRandom           = 5
+	IpSuffixOriginUnchanged        = 1 << 4
+)
+
+// Enum NL_DAD_STATE for [IpAdapterUnicastAddress], see
+// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_dad_state
+const (
+	NldsInvalid          = 0
+	NldsTentative        = 1
+	NldsDuplicate        = 2
+	NldsDeprecated       = 3
+	NldsPreferred        = 4
+	IpDadStateInvalid    = 0
+	IpDadStateTentative  = 1
+	IpDadStateDuplicate  = 2
+	IpDadStateDeprecated = 3
+	IpDadStatePreferred  = 4
+)
+
+type SocketAddress struct {
+	Sockaddr       *syscall.RawSockaddrAny
+	SockaddrLength int32
+}
+
+// IP returns an IPv4 or IPv6 address, or nil if the underlying SocketAddress is neither.
+func (addr *SocketAddress) IP() net.IP {
+	if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet4{}) && addr.Sockaddr.Addr.Family == AF_INET {
+		return (*RawSockaddrInet4)(unsafe.Pointer(addr.Sockaddr)).Addr[:]
+	} else if uintptr(addr.SockaddrLength) >= unsafe.Sizeof(RawSockaddrInet6{}) && addr.Sockaddr.Addr.Family == AF_INET6 {
+		return (*RawSockaddrInet6)(unsafe.Pointer(addr.Sockaddr)).Addr[:]
+	}
+	return nil
+}
+
+type IpAdapterUnicastAddress struct {
+	Length             uint32
+	Flags              uint32
+	Next               *IpAdapterUnicastAddress
+	Address            SocketAddress
+	PrefixOrigin       int32
+	SuffixOrigin       int32
+	DadState           int32
+	ValidLifetime      uint32
+	PreferredLifetime  uint32
+	LeaseLifetime      uint32
+	OnLinkPrefixLength uint8
+}
+
+type IpAdapterAnycastAddress struct {
+	Length  uint32
+	Flags   uint32
+	Next    *IpAdapterAnycastAddress
+	Address SocketAddress
+}
+
+type IpAdapterMulticastAddress struct {
+	Length  uint32
+	Flags   uint32
+	Next    *IpAdapterMulticastAddress
+	Address SocketAddress
+}
+
+type IpAdapterDnsServerAdapter struct {
+	Length   uint32
+	Reserved uint32
+	Next     *IpAdapterDnsServerAdapter
+	Address  SocketAddress
+}
+
+type IpAdapterPrefix struct {
+	Length       uint32
+	Flags        uint32
+	Next         *IpAdapterPrefix
+	Address      SocketAddress
+	PrefixLength uint32
+}
+
+type IpAdapterAddresses struct {
+	Length                 uint32
+	IfIndex                uint32
+	Next                   *IpAdapterAddresses
+	AdapterName            *byte
+	FirstUnicastAddress    *IpAdapterUnicastAddress
+	FirstAnycastAddress    *IpAdapterAnycastAddress
+	FirstMulticastAddress  *IpAdapterMulticastAddress
+	FirstDnsServerAddress  *IpAdapterDnsServerAdapter
+	DnsSuffix              *uint16
+	Description            *uint16
+	FriendlyName           *uint16
+	PhysicalAddress        [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte
+	PhysicalAddressLength  uint32
+	Flags                  uint32
+	Mtu                    uint32
+	IfType                 uint32
+	OperStatus             uint32
+	Ipv6IfIndex            uint32
+	ZoneIndices            [16]uint32
+	FirstPrefix            *IpAdapterPrefix
+	TransmitLinkSpeed      uint64
+	ReceiveLinkSpeed       uint64
+	FirstWinsServerAddress *IpAdapterWinsServerAddress
+	FirstGatewayAddress    *IpAdapterGatewayAddress
+	Ipv4Metric             uint32
+	Ipv6Metric             uint32
+	Luid                   uint64
+	Dhcpv4Server           SocketAddress
+	CompartmentId          uint32
+	NetworkGuid            GUID
+	ConnectionType         uint32
+	TunnelType             uint32
+	Dhcpv6Server           SocketAddress
+	Dhcpv6ClientDuid       [MAX_DHCPV6_DUID_LENGTH]byte
+	Dhcpv6ClientDuidLength uint32
+	Dhcpv6Iaid             uint32
+	FirstDnsSuffix         *IpAdapterDNSSuffix
+}
+
+type IpAdapterWinsServerAddress struct {
+	Length   uint32
+	Reserved uint32
+	Next     *IpAdapterWinsServerAddress
+	Address  SocketAddress
+}
+
+type IpAdapterGatewayAddress struct {
+	Length   uint32
+	Reserved uint32
+	Next     *IpAdapterGatewayAddress
+	Address  SocketAddress
+}
+
+type IpAdapterDNSSuffix struct {
+	Next   *IpAdapterDNSSuffix
+	String [MAX_DNS_SUFFIX_STRING_LENGTH]uint16
+}
+
+const (
+	IfOperStatusUp             = 1
+	IfOperStatusDown           = 2
+	IfOperStatusTesting        = 3
+	IfOperStatusUnknown        = 4
+	IfOperStatusDormant        = 5
+	IfOperStatusNotPresent     = 6
+	IfOperStatusLowerLayerDown = 7
+)
+
+const (
+	IF_MAX_PHYS_ADDRESS_LENGTH = 32
+	IF_MAX_STRING_SIZE         = 256
+)
+
+// MIB_IF_ENTRY_LEVEL enumeration from netioapi.h or
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/nf-netioapi-getifentry2ex.
+const (
+	MibIfEntryNormal                  = 0
+	MibIfEntryNormalWithoutStatistics = 2
+)
+
+// MIB_NOTIFICATION_TYPE enumeration from netioapi.h or
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ne-netioapi-mib_notification_type.
+const (
+	MibParameterNotification = 0
+	MibAddInstance           = 1
+	MibDeleteInstance        = 2
+	MibInitialNotification   = 3
+)
+
+// MibIfRow2 stores information about a particular interface. See
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_if_row2.
+type MibIfRow2 struct {
+	InterfaceLuid               uint64
+	InterfaceIndex              uint32
+	InterfaceGuid               GUID
+	Alias                       [IF_MAX_STRING_SIZE + 1]uint16
+	Description                 [IF_MAX_STRING_SIZE + 1]uint16
+	PhysicalAddressLength       uint32
+	PhysicalAddress             [IF_MAX_PHYS_ADDRESS_LENGTH]uint8
+	PermanentPhysicalAddress    [IF_MAX_PHYS_ADDRESS_LENGTH]uint8
+	Mtu                         uint32
+	Type                        uint32
+	TunnelType                  uint32
+	MediaType                   uint32
+	PhysicalMediumType          uint32
+	AccessType                  uint32
+	DirectionType               uint32
+	InterfaceAndOperStatusFlags uint8
+	OperStatus                  uint32
+	AdminStatus                 uint32
+	MediaConnectState           uint32
+	NetworkGuid                 GUID
+	ConnectionType              uint32
+	TransmitLinkSpeed           uint64
+	ReceiveLinkSpeed            uint64
+	InOctets                    uint64
+	InUcastPkts                 uint64
+	InNUcastPkts                uint64
+	InDiscards                  uint64
+	InErrors                    uint64
+	InUnknownProtos             uint64
+	InUcastOctets               uint64
+	InMulticastOctets           uint64
+	InBroadcastOctets           uint64
+	OutOctets                   uint64
+	OutUcastPkts                uint64
+	OutNUcastPkts               uint64
+	OutDiscards                 uint64
+	OutErrors                   uint64
+	OutUcastOctets              uint64
+	OutMulticastOctets          uint64
+	OutBroadcastOctets          uint64
+	OutQLen                     uint64
+}
+
+// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row.
+type MibUnicastIpAddressRow struct {
+	Address            RawSockaddrInet6 // SOCKADDR_INET union
+	InterfaceLuid      uint64
+	InterfaceIndex     uint32
+	PrefixOrigin       uint32
+	SuffixOrigin       uint32
+	ValidLifetime      uint32
+	PreferredLifetime  uint32
+	OnLinkPrefixLength uint8
+	SkipAsSource       uint8
+	DadState           uint32
+	ScopeId            uint32
+	CreationTimeStamp  Filetime
+}
+
+const ScopeLevelCount = 16
+
+// MIB_IPINTERFACE_ROW stores interface management information for a particular IP address family on a network interface.
+// See https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipinterface_row.
+type MibIpInterfaceRow struct {
+	Family                               uint16
+	InterfaceLuid                        uint64
+	InterfaceIndex                       uint32
+	MaxReassemblySize                    uint32
+	InterfaceIdentifier                  uint64
+	MinRouterAdvertisementInterval       uint32
+	MaxRouterAdvertisementInterval       uint32
+	AdvertisingEnabled                   uint8
+	ForwardingEnabled                    uint8
+	WeakHostSend                         uint8
+	WeakHostReceive                      uint8
+	UseAutomaticMetric                   uint8
+	UseNeighborUnreachabilityDetection   uint8
+	ManagedAddressConfigurationSupported uint8
+	OtherStatefulConfigurationSupported  uint8
+	AdvertiseDefaultRoute                uint8
+	RouterDiscoveryBehavior              uint32
+	DadTransmits                         uint32
+	BaseReachableTime                    uint32
+	RetransmitTime                       uint32
+	PathMtuDiscoveryTimeout              uint32
+	LinkLocalAddressBehavior             uint32
+	LinkLocalAddressTimeout              uint32
+	ZoneIndices                          [ScopeLevelCount]uint32
+	SitePrefixLength                     uint32
+	Metric                               uint32
+	NlMtu                                uint32
+	Connected                            uint8
+	SupportsWakeUpPatterns               uint8
+	SupportsNeighborDiscovery            uint8
+	SupportsRouterDiscovery              uint8
+	ReachableTime                        uint32
+	TransmitOffload                      uint32
+	ReceiveOffload                       uint32
+	DisableDefaultRoutes                 uint8
+}
+
+// Console related constants used for the mode parameter to SetConsoleMode. See
+// https://docs.microsoft.com/en-us/windows/console/setconsolemode for details.
+
+const (
+	ENABLE_PROCESSED_INPUT        = 0x1
+	ENABLE_LINE_INPUT             = 0x2
+	ENABLE_ECHO_INPUT             = 0x4
+	ENABLE_WINDOW_INPUT           = 0x8
+	ENABLE_MOUSE_INPUT            = 0x10
+	ENABLE_INSERT_MODE            = 0x20
+	ENABLE_QUICK_EDIT_MODE        = 0x40
+	ENABLE_EXTENDED_FLAGS         = 0x80
+	ENABLE_AUTO_POSITION          = 0x100
+	ENABLE_VIRTUAL_TERMINAL_INPUT = 0x200
+
+	ENABLE_PROCESSED_OUTPUT            = 0x1
+	ENABLE_WRAP_AT_EOL_OUTPUT          = 0x2
+	ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4
+	DISABLE_NEWLINE_AUTO_RETURN        = 0x8
+	ENABLE_LVB_GRID_WORLDWIDE          = 0x10
+)
+
+// Pseudo console related constants used for the flags parameter to
+// CreatePseudoConsole. See: https://learn.microsoft.com/en-us/windows/console/createpseudoconsole
+const (
+	PSEUDOCONSOLE_INHERIT_CURSOR = 0x1
+)
+
+type Coord struct {
+	X int16
+	Y int16
+}
+
+type SmallRect struct {
+	Left   int16
+	Top    int16
+	Right  int16
+	Bottom int16
+}
+
+// Used with GetConsoleScreenBuffer to retrieve information about a console
+// screen buffer. See
+// https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str
+// for details.
+
+type ConsoleScreenBufferInfo struct {
+	Size              Coord
+	CursorPosition    Coord
+	Attributes        uint16
+	Window            SmallRect
+	MaximumWindowSize Coord
+}
+
+const UNIX_PATH_MAX = 108 // defined in afunix.h
+
+const (
+	// flags for JOBOBJECT_BASIC_LIMIT_INFORMATION.LimitFlags
+	JOB_OBJECT_LIMIT_ACTIVE_PROCESS             = 0x00000008
+	JOB_OBJECT_LIMIT_AFFINITY                   = 0x00000010
+	JOB_OBJECT_LIMIT_BREAKAWAY_OK               = 0x00000800
+	JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION = 0x00000400
+	JOB_OBJECT_LIMIT_JOB_MEMORY                 = 0x00000200
+	JOB_OBJECT_LIMIT_JOB_TIME                   = 0x00000004
+	JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE          = 0x00002000
+	JOB_OBJECT_LIMIT_PRESERVE_JOB_TIME          = 0x00000040
+	JOB_OBJECT_LIMIT_PRIORITY_CLASS             = 0x00000020
+	JOB_OBJECT_LIMIT_PROCESS_MEMORY             = 0x00000100
+	JOB_OBJECT_LIMIT_PROCESS_TIME               = 0x00000002
+	JOB_OBJECT_LIMIT_SCHEDULING_CLASS           = 0x00000080
+	JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK        = 0x00001000
+	JOB_OBJECT_LIMIT_SUBSET_AFFINITY            = 0x00004000
+	JOB_OBJECT_LIMIT_WORKINGSET                 = 0x00000001
+)
+
+type IO_COUNTERS struct {
+	ReadOperationCount  uint64
+	WriteOperationCount uint64
+	OtherOperationCount uint64
+	ReadTransferCount   uint64
+	WriteTransferCount  uint64
+	OtherTransferCount  uint64
+}
+
+type JOBOBJECT_EXTENDED_LIMIT_INFORMATION struct {
+	BasicLimitInformation JOBOBJECT_BASIC_LIMIT_INFORMATION
+	IoInfo                IO_COUNTERS
+	ProcessMemoryLimit    uintptr
+	JobMemoryLimit        uintptr
+	PeakProcessMemoryUsed uintptr
+	PeakJobMemoryUsed     uintptr
+}
+
+const (
+	// UIRestrictionsClass
+	JOB_OBJECT_UILIMIT_DESKTOP          = 0x00000040
+	JOB_OBJECT_UILIMIT_DISPLAYSETTINGS  = 0x00000010
+	JOB_OBJECT_UILIMIT_EXITWINDOWS      = 0x00000080
+	JOB_OBJECT_UILIMIT_GLOBALATOMS      = 0x00000020
+	JOB_OBJECT_UILIMIT_HANDLES          = 0x00000001
+	JOB_OBJECT_UILIMIT_READCLIPBOARD    = 0x00000002
+	JOB_OBJECT_UILIMIT_SYSTEMPARAMETERS = 0x00000008
+	JOB_OBJECT_UILIMIT_WRITECLIPBOARD   = 0x00000004
+)
+
+type JOBOBJECT_BASIC_UI_RESTRICTIONS struct {
+	UIRestrictionsClass uint32
+}
+
+const (
+	// JobObjectInformationClass for QueryInformationJobObject and SetInformationJobObject
+	JobObjectAssociateCompletionPortInformation = 7
+	JobObjectBasicAccountingInformation         = 1
+	JobObjectBasicAndIoAccountingInformation    = 8
+	JobObjectBasicLimitInformation              = 2
+	JobObjectBasicProcessIdList                 = 3
+	JobObjectBasicUIRestrictions                = 4
+	JobObjectCpuRateControlInformation          = 15
+	JobObjectEndOfJobTimeInformation            = 6
+	JobObjectExtendedLimitInformation           = 9
+	JobObjectGroupInformation                   = 11
+	JobObjectGroupInformationEx                 = 14
+	JobObjectLimitViolationInformation          = 13
+	JobObjectLimitViolationInformation2         = 34
+	JobObjectNetRateControlInformation          = 32
+	JobObjectNotificationLimitInformation       = 12
+	JobObjectNotificationLimitInformation2      = 33
+	JobObjectSecurityLimitInformation           = 5
+)
+
+const (
+	KF_FLAG_DEFAULT                          = 0x00000000
+	KF_FLAG_FORCE_APP_DATA_REDIRECTION       = 0x00080000
+	KF_FLAG_RETURN_FILTER_REDIRECTION_TARGET = 0x00040000
+	KF_FLAG_FORCE_PACKAGE_REDIRECTION        = 0x00020000
+	KF_FLAG_NO_PACKAGE_REDIRECTION           = 0x00010000
+	KF_FLAG_FORCE_APPCONTAINER_REDIRECTION   = 0x00020000
+	KF_FLAG_NO_APPCONTAINER_REDIRECTION      = 0x00010000
+	KF_FLAG_CREATE                           = 0x00008000
+	KF_FLAG_DONT_VERIFY                      = 0x00004000
+	KF_FLAG_DONT_UNEXPAND                    = 0x00002000
+	KF_FLAG_NO_ALIAS                         = 0x00001000
+	KF_FLAG_INIT                             = 0x00000800
+	KF_FLAG_DEFAULT_PATH                     = 0x00000400
+	KF_FLAG_NOT_PARENT_RELATIVE              = 0x00000200
+	KF_FLAG_SIMPLE_IDLIST                    = 0x00000100
+	KF_FLAG_ALIAS_ONLY                       = 0x80000000
+)
+
+type OsVersionInfoEx struct {
+	osVersionInfoSize uint32
+	MajorVersion      uint32
+	MinorVersion      uint32
+	BuildNumber       uint32
+	PlatformId        uint32
+	CsdVersion        [128]uint16
+	ServicePackMajor  uint16
+	ServicePackMinor  uint16
+	SuiteMask         uint16
+	ProductType       byte
+	_                 byte
+}
+
+const (
+	EWX_LOGOFF          = 0x00000000
+	EWX_SHUTDOWN        = 0x00000001
+	EWX_REBOOT          = 0x00000002
+	EWX_FORCE           = 0x00000004
+	EWX_POWEROFF        = 0x00000008
+	EWX_FORCEIFHUNG     = 0x00000010
+	EWX_QUICKRESOLVE    = 0x00000020
+	EWX_RESTARTAPPS     = 0x00000040
+	EWX_HYBRID_SHUTDOWN = 0x00400000
+	EWX_BOOTOPTIONS     = 0x01000000
+
+	SHTDN_REASON_FLAG_COMMENT_REQUIRED          = 0x01000000
+	SHTDN_REASON_FLAG_DIRTY_PROBLEM_ID_REQUIRED = 0x02000000
+	SHTDN_REASON_FLAG_CLEAN_UI                  = 0x04000000
+	SHTDN_REASON_FLAG_DIRTY_UI                  = 0x08000000
+	SHTDN_REASON_FLAG_USER_DEFINED              = 0x40000000
+	SHTDN_REASON_FLAG_PLANNED                   = 0x80000000
+	SHTDN_REASON_MAJOR_OTHER                    = 0x00000000
+	SHTDN_REASON_MAJOR_NONE                     = 0x00000000
+	SHTDN_REASON_MAJOR_HARDWARE                 = 0x00010000
+	SHTDN_REASON_MAJOR_OPERATINGSYSTEM          = 0x00020000
+	SHTDN_REASON_MAJOR_SOFTWARE                 = 0x00030000
+	SHTDN_REASON_MAJOR_APPLICATION              = 0x00040000
+	SHTDN_REASON_MAJOR_SYSTEM                   = 0x00050000
+	SHTDN_REASON_MAJOR_POWER                    = 0x00060000
+	SHTDN_REASON_MAJOR_LEGACY_API               = 0x00070000
+	SHTDN_REASON_MINOR_OTHER                    = 0x00000000
+	SHTDN_REASON_MINOR_NONE                     = 0x000000ff
+	SHTDN_REASON_MINOR_MAINTENANCE              = 0x00000001
+	SHTDN_REASON_MINOR_INSTALLATION             = 0x00000002
+	SHTDN_REASON_MINOR_UPGRADE                  = 0x00000003
+	SHTDN_REASON_MINOR_RECONFIG                 = 0x00000004
+	SHTDN_REASON_MINOR_HUNG                     = 0x00000005
+	SHTDN_REASON_MINOR_UNSTABLE                 = 0x00000006
+	SHTDN_REASON_MINOR_DISK                     = 0x00000007
+	SHTDN_REASON_MINOR_PROCESSOR                = 0x00000008
+	SHTDN_REASON_MINOR_NETWORKCARD              = 0x00000009
+	SHTDN_REASON_MINOR_POWER_SUPPLY             = 0x0000000a
+	SHTDN_REASON_MINOR_CORDUNPLUGGED            = 0x0000000b
+	SHTDN_REASON_MINOR_ENVIRONMENT              = 0x0000000c
+	SHTDN_REASON_MINOR_HARDWARE_DRIVER          = 0x0000000d
+	SHTDN_REASON_MINOR_OTHERDRIVER              = 0x0000000e
+	SHTDN_REASON_MINOR_BLUESCREEN               = 0x0000000F
+	SHTDN_REASON_MINOR_SERVICEPACK              = 0x00000010
+	SHTDN_REASON_MINOR_HOTFIX                   = 0x00000011
+	SHTDN_REASON_MINOR_SECURITYFIX              = 0x00000012
+	SHTDN_REASON_MINOR_SECURITY                 = 0x00000013
+	SHTDN_REASON_MINOR_NETWORK_CONNECTIVITY     = 0x00000014
+	SHTDN_REASON_MINOR_WMI                      = 0x00000015
+	SHTDN_REASON_MINOR_SERVICEPACK_UNINSTALL    = 0x00000016
+	SHTDN_REASON_MINOR_HOTFIX_UNINSTALL         = 0x00000017
+	SHTDN_REASON_MINOR_SECURITYFIX_UNINSTALL    = 0x00000018
+	SHTDN_REASON_MINOR_MMC                      = 0x00000019
+	SHTDN_REASON_MINOR_SYSTEMRESTORE            = 0x0000001a
+	SHTDN_REASON_MINOR_TERMSRV                  = 0x00000020
+	SHTDN_REASON_MINOR_DC_PROMOTION             = 0x00000021
+	SHTDN_REASON_MINOR_DC_DEMOTION              = 0x00000022
+	SHTDN_REASON_UNKNOWN                        = SHTDN_REASON_MINOR_NONE
+	SHTDN_REASON_LEGACY_API                     = SHTDN_REASON_MAJOR_LEGACY_API | SHTDN_REASON_FLAG_PLANNED
+	SHTDN_REASON_VALID_BIT_MASK                 = 0xc0ffffff
+
+	SHUTDOWN_NORETRY = 0x1
+)
+
+// Flags used for GetModuleHandleEx
+const (
+	GET_MODULE_HANDLE_EX_FLAG_PIN                = 1
+	GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT = 2
+	GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS       = 4
+)
+
+// MUI function flag values
+const (
+	MUI_LANGUAGE_ID                    = 0x4
+	MUI_LANGUAGE_NAME                  = 0x8
+	MUI_MERGE_SYSTEM_FALLBACK          = 0x10
+	MUI_MERGE_USER_FALLBACK            = 0x20
+	MUI_UI_FALLBACK                    = MUI_MERGE_SYSTEM_FALLBACK | MUI_MERGE_USER_FALLBACK
+	MUI_THREAD_LANGUAGES               = 0x40
+	MUI_CONSOLE_FILTER                 = 0x100
+	MUI_COMPLEX_SCRIPT_FILTER          = 0x200
+	MUI_RESET_FILTERS                  = 0x001
+	MUI_USER_PREFERRED_UI_LANGUAGES    = 0x10
+	MUI_USE_INSTALLED_LANGUAGES        = 0x20
+	MUI_USE_SEARCH_ALL_LANGUAGES       = 0x40
+	MUI_LANG_NEUTRAL_PE_FILE           = 0x100
+	MUI_NON_LANG_NEUTRAL_FILE          = 0x200
+	MUI_MACHINE_LANGUAGE_SETTINGS      = 0x400
+	MUI_FILETYPE_NOT_LANGUAGE_NEUTRAL  = 0x001
+	MUI_FILETYPE_LANGUAGE_NEUTRAL_MAIN = 0x002
+	MUI_FILETYPE_LANGUAGE_NEUTRAL_MUI  = 0x004
+	MUI_QUERY_TYPE                     = 0x001
+	MUI_QUERY_CHECKSUM                 = 0x002
+	MUI_QUERY_LANGUAGE_NAME            = 0x004
+	MUI_QUERY_RESOURCE_TYPES           = 0x008
+	MUI_FILEINFO_VERSION               = 0x001
+
+	MUI_FULL_LANGUAGE      = 0x01
+	MUI_PARTIAL_LANGUAGE   = 0x02
+	MUI_LIP_LANGUAGE       = 0x04
+	MUI_LANGUAGE_INSTALLED = 0x20
+	MUI_LANGUAGE_LICENSED  = 0x40
+)
+
+// FILE_INFO_BY_HANDLE_CLASS constants for SetFileInformationByHandle/GetFileInformationByHandleEx
+const (
+	FileBasicInfo                  = 0
+	FileStandardInfo               = 1
+	FileNameInfo                   = 2
+	FileRenameInfo                 = 3
+	FileDispositionInfo            = 4
+	FileAllocationInfo             = 5
+	FileEndOfFileInfo              = 6
+	FileStreamInfo                 = 7
+	FileCompressionInfo            = 8
+	FileAttributeTagInfo           = 9
+	FileIdBothDirectoryInfo        = 10
+	FileIdBothDirectoryRestartInfo = 11
+	FileIoPriorityHintInfo         = 12
+	FileRemoteProtocolInfo         = 13
+	FileFullDirectoryInfo          = 14
+	FileFullDirectoryRestartInfo   = 15
+	FileStorageInfo                = 16
+	FileAlignmentInfo              = 17
+	FileIdInfo                     = 18
+	FileIdExtdDirectoryInfo        = 19
+	FileIdExtdDirectoryRestartInfo = 20
+	FileDispositionInfoEx          = 21
+	FileRenameInfoEx               = 22
+	FileCaseSensitiveInfo          = 23
+	FileNormalizedNameInfo         = 24
+)
+
+// LoadLibrary flags for determining from where to search for a DLL
+const (
+	DONT_RESOLVE_DLL_REFERENCES               = 0x1
+	LOAD_LIBRARY_AS_DATAFILE                  = 0x2
+	LOAD_WITH_ALTERED_SEARCH_PATH             = 0x8
+	LOAD_IGNORE_CODE_AUTHZ_LEVEL              = 0x10
+	LOAD_LIBRARY_AS_IMAGE_RESOURCE            = 0x20
+	LOAD_LIBRARY_AS_DATAFILE_EXCLUSIVE        = 0x40
+	LOAD_LIBRARY_REQUIRE_SIGNED_TARGET        = 0x80
+	LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR          = 0x100
+	LOAD_LIBRARY_SEARCH_APPLICATION_DIR       = 0x200
+	LOAD_LIBRARY_SEARCH_USER_DIRS             = 0x400
+	LOAD_LIBRARY_SEARCH_SYSTEM32              = 0x800
+	LOAD_LIBRARY_SEARCH_DEFAULT_DIRS          = 0x1000
+	LOAD_LIBRARY_SAFE_CURRENT_DIRS            = 0x00002000
+	LOAD_LIBRARY_SEARCH_SYSTEM32_NO_FORWARDER = 0x00004000
+	LOAD_LIBRARY_OS_INTEGRITY_CONTINUITY      = 0x00008000
+)
+
+// RegNotifyChangeKeyValue notifyFilter flags.
+const (
+	// REG_NOTIFY_CHANGE_NAME notifies the caller if a subkey is added or deleted.
+	REG_NOTIFY_CHANGE_NAME = 0x00000001
+
+	// REG_NOTIFY_CHANGE_ATTRIBUTES notifies the caller of changes to the attributes of the key, such as the security descriptor information.
+	REG_NOTIFY_CHANGE_ATTRIBUTES = 0x00000002
+
+	// REG_NOTIFY_CHANGE_LAST_SET notifies the caller of changes to a value of the key. This can include adding or deleting a value, or changing an existing value.
+	REG_NOTIFY_CHANGE_LAST_SET = 0x00000004
+
+	// REG_NOTIFY_CHANGE_SECURITY notifies the caller of changes to the security descriptor of the key.
+	REG_NOTIFY_CHANGE_SECURITY = 0x00000008
+
+	// REG_NOTIFY_THREAD_AGNOSTIC indicates that the lifetime of the registration must not be tied to the lifetime of the thread issuing the RegNotifyChangeKeyValue call. Note: This flag value is only supported in Windows 8 and later.
+	REG_NOTIFY_THREAD_AGNOSTIC = 0x10000000
+)
+
+type CommTimeouts struct {
+	ReadIntervalTimeout         uint32
+	ReadTotalTimeoutMultiplier  uint32
+	ReadTotalTimeoutConstant    uint32
+	WriteTotalTimeoutMultiplier uint32
+	WriteTotalTimeoutConstant   uint32
+}
+
+// NTUnicodeString is a UTF-16 string for NT native APIs, corresponding to UNICODE_STRING.
+type NTUnicodeString struct {
+	// Note: Length and MaximumLength are in *bytes*, not uint16s.
+	// They should always be even.
+	Length        uint16
+	MaximumLength uint16
+	Buffer        *uint16
+}
+
+// NTString is an ANSI string for NT native APIs, corresponding to STRING.
+type NTString struct {
+	Length        uint16
+	MaximumLength uint16
+	Buffer        *byte
+}
+
+type LIST_ENTRY struct {
+	Flink *LIST_ENTRY
+	Blink *LIST_ENTRY
+}
+
+type RUNTIME_FUNCTION struct {
+	BeginAddress uint32
+	EndAddress   uint32
+	UnwindData   uint32
+}
+
+type LDR_DATA_TABLE_ENTRY struct {
+	reserved1          [2]uintptr
+	InMemoryOrderLinks LIST_ENTRY
+	reserved2          [2]uintptr
+	DllBase            uintptr
+	reserved3          [2]uintptr
+	FullDllName        NTUnicodeString
+	reserved4          [8]byte
+	reserved5          [3]uintptr
+	reserved6          uintptr
+	TimeDateStamp      uint32
+}
+
+type PEB_LDR_DATA struct {
+	reserved1               [8]byte
+	reserved2               [3]uintptr
+	InMemoryOrderModuleList LIST_ENTRY
+}
+
+type CURDIR struct {
+	DosPath NTUnicodeString
+	Handle  Handle
+}
+
+type RTL_DRIVE_LETTER_CURDIR struct {
+	Flags     uint16
+	Length    uint16
+	TimeStamp uint32
+	DosPath   NTString
+}
+
+type RTL_USER_PROCESS_PARAMETERS struct {
+	MaximumLength, Length uint32
+
+	Flags, DebugFlags uint32
+
+	ConsoleHandle                                Handle
+	ConsoleFlags                                 uint32
+	StandardInput, StandardOutput, StandardError Handle
+
+	CurrentDirectory CURDIR
+	DllPath          NTUnicodeString
+	ImagePathName    NTUnicodeString
+	CommandLine      NTUnicodeString
+	Environment      unsafe.Pointer
+
+	StartingX, StartingY, CountX, CountY, CountCharsX, CountCharsY, FillAttribute uint32
+
+	WindowFlags, ShowWindowFlags                     uint32
+	WindowTitle, DesktopInfo, ShellInfo, RuntimeData NTUnicodeString
+	CurrentDirectories                               [32]RTL_DRIVE_LETTER_CURDIR
+
+	EnvironmentSize, EnvironmentVersion uintptr
+
+	PackageDependencyData unsafe.Pointer
+	ProcessGroupId        uint32
+	LoaderThreads         uint32
+
+	RedirectionDllName               NTUnicodeString
+	HeapPartitionName                NTUnicodeString
+	DefaultThreadpoolCpuSetMasks     uintptr
+	DefaultThreadpoolCpuSetMaskCount uint32
+}
+
+type PEB struct {
+	reserved1              [2]byte
+	BeingDebugged          byte
+	BitField               byte
+	reserved3              uintptr
+	ImageBaseAddress       uintptr
+	Ldr                    *PEB_LDR_DATA
+	ProcessParameters      *RTL_USER_PROCESS_PARAMETERS
+	reserved4              [3]uintptr
+	AtlThunkSListPtr       uintptr
+	reserved5              uintptr
+	reserved6              uint32
+	reserved7              uintptr
+	reserved8              uint32
+	AtlThunkSListPtr32     uint32
+	reserved9              [45]uintptr
+	reserved10             [96]byte
+	PostProcessInitRoutine uintptr
+	reserved11             [128]byte
+	reserved12             [1]uintptr
+	SessionId              uint32
+}
+
+type OBJECT_ATTRIBUTES struct {
+	Length             uint32
+	RootDirectory      Handle
+	ObjectName         *NTUnicodeString
+	Attributes         uint32
+	SecurityDescriptor *SECURITY_DESCRIPTOR
+	SecurityQoS        *SECURITY_QUALITY_OF_SERVICE
+}
+
+// Values for the Attributes member of OBJECT_ATTRIBUTES.
+const (
+	OBJ_INHERIT                       = 0x00000002
+	OBJ_PERMANENT                     = 0x00000010
+	OBJ_EXCLUSIVE                     = 0x00000020
+	OBJ_CASE_INSENSITIVE              = 0x00000040
+	OBJ_OPENIF                        = 0x00000080
+	OBJ_OPENLINK                      = 0x00000100
+	OBJ_KERNEL_HANDLE                 = 0x00000200
+	OBJ_FORCE_ACCESS_CHECK            = 0x00000400
+	OBJ_IGNORE_IMPERSONATED_DEVICEMAP = 0x00000800
+	OBJ_DONT_REPARSE                  = 0x00001000
+	OBJ_VALID_ATTRIBUTES              = 0x00001FF2
+)
+
+type IO_STATUS_BLOCK struct {
+	Status      NTStatus
+	Information uintptr
+}
+
+type RTLP_CURDIR_REF struct {
+	RefCount int32
+	Handle   Handle
+}
+
+type RTL_RELATIVE_NAME struct {
+	RelativeName        NTUnicodeString
+	ContainingDirectory Handle
+	CurDirRef           *RTLP_CURDIR_REF
+}
+
+const (
+	// CreateDisposition flags for NtCreateFile and NtCreateNamedPipeFile.
+	FILE_SUPERSEDE           = 0x00000000
+	FILE_OPEN                = 0x00000001
+	FILE_CREATE              = 0x00000002
+	FILE_OPEN_IF             = 0x00000003
+	FILE_OVERWRITE           = 0x00000004
+	FILE_OVERWRITE_IF        = 0x00000005
+	FILE_MAXIMUM_DISPOSITION = 0x00000005
+
+	// CreateOptions flags for NtCreateFile and NtCreateNamedPipeFile.
+	FILE_DIRECTORY_FILE            = 0x00000001
+	FILE_WRITE_THROUGH             = 0x00000002
+	FILE_SEQUENTIAL_ONLY           = 0x00000004
+	FILE_NO_INTERMEDIATE_BUFFERING = 0x00000008
+	FILE_SYNCHRONOUS_IO_ALERT      = 0x00000010
+	FILE_SYNCHRONOUS_IO_NONALERT   = 0x00000020
+	FILE_NON_DIRECTORY_FILE        = 0x00000040
+	FILE_CREATE_TREE_CONNECTION    = 0x00000080
+	FILE_COMPLETE_IF_OPLOCKED      = 0x00000100
+	FILE_NO_EA_KNOWLEDGE           = 0x00000200
+	FILE_OPEN_REMOTE_INSTANCE      = 0x00000400
+	FILE_RANDOM_ACCESS             = 0x00000800
+	FILE_DELETE_ON_CLOSE           = 0x00001000
+	FILE_OPEN_BY_FILE_ID           = 0x00002000
+	FILE_OPEN_FOR_BACKUP_INTENT    = 0x00004000
+	FILE_NO_COMPRESSION            = 0x00008000
+	FILE_OPEN_REQUIRING_OPLOCK     = 0x00010000
+	FILE_DISALLOW_EXCLUSIVE        = 0x00020000
+	FILE_RESERVE_OPFILTER          = 0x00100000
+	FILE_OPEN_REPARSE_POINT        = 0x00200000
+	FILE_OPEN_NO_RECALL            = 0x00400000
+	FILE_OPEN_FOR_FREE_SPACE_QUERY = 0x00800000
+
+	// Parameter constants for NtCreateNamedPipeFile.
+
+	FILE_PIPE_BYTE_STREAM_TYPE = 0x00000000
+	FILE_PIPE_MESSAGE_TYPE     = 0x00000001
+
+	FILE_PIPE_ACCEPT_REMOTE_CLIENTS = 0x00000000
+	FILE_PIPE_REJECT_REMOTE_CLIENTS = 0x00000002
+
+	FILE_PIPE_TYPE_VALID_MASK = 0x00000003
+
+	FILE_PIPE_BYTE_STREAM_MODE = 0x00000000
+	FILE_PIPE_MESSAGE_MODE     = 0x00000001
+
+	FILE_PIPE_QUEUE_OPERATION    = 0x00000000
+	FILE_PIPE_COMPLETE_OPERATION = 0x00000001
+
+	FILE_PIPE_INBOUND     = 0x00000000
+	FILE_PIPE_OUTBOUND    = 0x00000001
+	FILE_PIPE_FULL_DUPLEX = 0x00000002
+
+	FILE_PIPE_DISCONNECTED_STATE = 0x00000001
+	FILE_PIPE_LISTENING_STATE    = 0x00000002
+	FILE_PIPE_CONNECTED_STATE    = 0x00000003
+	FILE_PIPE_CLOSING_STATE      = 0x00000004
+
+	FILE_PIPE_CLIENT_END = 0x00000000
+	FILE_PIPE_SERVER_END = 0x00000001
+)
+
+const (
+	// FileInformationClass for NtSetInformationFile
+	FileBasicInformation                         = 4
+	FileRenameInformation                        = 10
+	FileDispositionInformation                   = 13
+	FilePositionInformation                      = 14
+	FileEndOfFileInformation                     = 20
+	FileValidDataLengthInformation               = 39
+	FileShortNameInformation                     = 40
+	FileIoPriorityHintInformation                = 43
+	FileReplaceCompletionInformation             = 61
+	FileDispositionInformationEx                 = 64
+	FileCaseSensitiveInformation                 = 71
+	FileLinkInformation                          = 72
+	FileCaseSensitiveInformationForceAccessCheck = 75
+	FileKnownFolderInformation                   = 76
+
+	// Flags for FILE_RENAME_INFORMATION
+	FILE_RENAME_REPLACE_IF_EXISTS                    = 0x00000001
+	FILE_RENAME_POSIX_SEMANTICS                      = 0x00000002
+	FILE_RENAME_SUPPRESS_PIN_STATE_INHERITANCE       = 0x00000004
+	FILE_RENAME_SUPPRESS_STORAGE_RESERVE_INHERITANCE = 0x00000008
+	FILE_RENAME_NO_INCREASE_AVAILABLE_SPACE          = 0x00000010
+	FILE_RENAME_NO_DECREASE_AVAILABLE_SPACE          = 0x00000020
+	FILE_RENAME_PRESERVE_AVAILABLE_SPACE             = 0x00000030
+	FILE_RENAME_IGNORE_READONLY_ATTRIBUTE            = 0x00000040
+	FILE_RENAME_FORCE_RESIZE_TARGET_SR               = 0x00000080
+	FILE_RENAME_FORCE_RESIZE_SOURCE_SR               = 0x00000100
+	FILE_RENAME_FORCE_RESIZE_SR                      = 0x00000180
+
+	// Flags for FILE_DISPOSITION_INFORMATION_EX
+	FILE_DISPOSITION_DO_NOT_DELETE             = 0x00000000
+	FILE_DISPOSITION_DELETE                    = 0x00000001
+	FILE_DISPOSITION_POSIX_SEMANTICS           = 0x00000002
+	FILE_DISPOSITION_FORCE_IMAGE_SECTION_CHECK = 0x00000004
+	FILE_DISPOSITION_ON_CLOSE                  = 0x00000008
+	FILE_DISPOSITION_IGNORE_READONLY_ATTRIBUTE = 0x00000010
+
+	// Flags for FILE_CASE_SENSITIVE_INFORMATION
+	FILE_CS_FLAG_CASE_SENSITIVE_DIR = 0x00000001
+
+	// Flags for FILE_LINK_INFORMATION
+	FILE_LINK_REPLACE_IF_EXISTS                    = 0x00000001
+	FILE_LINK_POSIX_SEMANTICS                      = 0x00000002
+	FILE_LINK_SUPPRESS_STORAGE_RESERVE_INHERITANCE = 0x00000008
+	FILE_LINK_NO_INCREASE_AVAILABLE_SPACE          = 0x00000010
+	FILE_LINK_NO_DECREASE_AVAILABLE_SPACE          = 0x00000020
+	FILE_LINK_PRESERVE_AVAILABLE_SPACE             = 0x00000030
+	FILE_LINK_IGNORE_READONLY_ATTRIBUTE            = 0x00000040
+	FILE_LINK_FORCE_RESIZE_TARGET_SR               = 0x00000080
+	FILE_LINK_FORCE_RESIZE_SOURCE_SR               = 0x00000100
+	FILE_LINK_FORCE_RESIZE_SR                      = 0x00000180
+)
+
+// ProcessInformationClasses for NtQueryInformationProcess and NtSetInformationProcess.
+const (
+	ProcessBasicInformation = iota
+	ProcessQuotaLimits
+	ProcessIoCounters
+	ProcessVmCounters
+	ProcessTimes
+	ProcessBasePriority
+	ProcessRaisePriority
+	ProcessDebugPort
+	ProcessExceptionPort
+	ProcessAccessToken
+	ProcessLdtInformation
+	ProcessLdtSize
+	ProcessDefaultHardErrorMode
+	ProcessIoPortHandlers
+	ProcessPooledUsageAndLimits
+	ProcessWorkingSetWatch
+	ProcessUserModeIOPL
+	ProcessEnableAlignmentFaultFixup
+	ProcessPriorityClass
+	ProcessWx86Information
+	ProcessHandleCount
+	ProcessAffinityMask
+	ProcessPriorityBoost
+	ProcessDeviceMap
+	ProcessSessionInformation
+	ProcessForegroundInformation
+	ProcessWow64Information
+	ProcessImageFileName
+	ProcessLUIDDeviceMapsEnabled
+	ProcessBreakOnTermination
+	ProcessDebugObjectHandle
+	ProcessDebugFlags
+	ProcessHandleTracing
+	ProcessIoPriority
+	ProcessExecuteFlags
+	ProcessTlsInformation
+	ProcessCookie
+	ProcessImageInformation
+	ProcessCycleTime
+	ProcessPagePriority
+	ProcessInstrumentationCallback
+	ProcessThreadStackAllocation
+	ProcessWorkingSetWatchEx
+	ProcessImageFileNameWin32
+	ProcessImageFileMapping
+	ProcessAffinityUpdateMode
+	ProcessMemoryAllocationMode
+	ProcessGroupInformation
+	ProcessTokenVirtualizationEnabled
+	ProcessConsoleHostProcess
+	ProcessWindowInformation
+	ProcessHandleInformation
+	ProcessMitigationPolicy
+	ProcessDynamicFunctionTableInformation
+	ProcessHandleCheckingMode
+	ProcessKeepAliveCount
+	ProcessRevokeFileHandles
+	ProcessWorkingSetControl
+	ProcessHandleTable
+	ProcessCheckStackExtentsMode
+	ProcessCommandLineInformation
+	ProcessProtectionInformation
+	ProcessMemoryExhaustion
+	ProcessFaultInformation
+	ProcessTelemetryIdInformation
+	ProcessCommitReleaseInformation
+	ProcessDefaultCpuSetsInformation
+	ProcessAllowedCpuSetsInformation
+	ProcessSubsystemProcess
+	ProcessJobMemoryInformation
+	ProcessInPrivate
+	ProcessRaiseUMExceptionOnInvalidHandleClose
+	ProcessIumChallengeResponse
+	ProcessChildProcessInformation
+	ProcessHighGraphicsPriorityInformation
+	ProcessSubsystemInformation
+	ProcessEnergyValues
+	ProcessActivityThrottleState
+	ProcessActivityThrottlePolicy
+	ProcessWin32kSyscallFilterInformation
+	ProcessDisableSystemAllowedCpuSets
+	ProcessWakeInformation
+	ProcessEnergyTrackingState
+	ProcessManageWritesToExecutableMemory
+	ProcessCaptureTrustletLiveDump
+	ProcessTelemetryCoverage
+	ProcessEnclaveInformation
+	ProcessEnableReadWriteVmLogging
+	ProcessUptimeInformation
+	ProcessImageSection
+	ProcessDebugAuthInformation
+	ProcessSystemResourceManagement
+	ProcessSequenceNumber
+	ProcessLoaderDetour
+	ProcessSecurityDomainInformation
+	ProcessCombineSecurityDomainsInformation
+	ProcessEnableLogging
+	ProcessLeapSecondInformation
+	ProcessFiberShadowStackAllocation
+	ProcessFreeFiberShadowStackAllocation
+	ProcessAltSystemCallInformation
+	ProcessDynamicEHContinuationTargets
+	ProcessDynamicEnforcedCetCompatibleRanges
+)
+
+type PROCESS_BASIC_INFORMATION struct {
+	ExitStatus                   NTStatus
+	PebBaseAddress               *PEB
+	AffinityMask                 uintptr
+	BasePriority                 int32
+	UniqueProcessId              uintptr
+	InheritedFromUniqueProcessId uintptr
+}
+
+type SYSTEM_PROCESS_INFORMATION struct {
+	NextEntryOffset              uint32
+	NumberOfThreads              uint32
+	WorkingSetPrivateSize        int64
+	HardFaultCount               uint32
+	NumberOfThreadsHighWatermark uint32
+	CycleTime                    uint64
+	CreateTime                   int64
+	UserTime                     int64
+	KernelTime                   int64
+	ImageName                    NTUnicodeString
+	BasePriority                 int32
+	UniqueProcessID              uintptr
+	InheritedFromUniqueProcessID uintptr
+	HandleCount                  uint32
+	SessionID                    uint32
+	UniqueProcessKey             *uint32
+	PeakVirtualSize              uintptr
+	VirtualSize                  uintptr
+	PageFaultCount               uint32
+	PeakWorkingSetSize           uintptr
+	WorkingSetSize               uintptr
+	QuotaPeakPagedPoolUsage      uintptr
+	QuotaPagedPoolUsage          uintptr
+	QuotaPeakNonPagedPoolUsage   uintptr
+	QuotaNonPagedPoolUsage       uintptr
+	PagefileUsage                uintptr
+	PeakPagefileUsage            uintptr
+	PrivatePageCount             uintptr
+	ReadOperationCount           int64
+	WriteOperationCount          int64
+	OtherOperationCount          int64
+	ReadTransferCount            int64
+	WriteTransferCount           int64
+	OtherTransferCount           int64
+}
+
+// SystemInformationClasses for NtQuerySystemInformation and NtSetSystemInformation
+const (
+	SystemBasicInformation = iota
+	SystemProcessorInformation
+	SystemPerformanceInformation
+	SystemTimeOfDayInformation
+	SystemPathInformation
+	SystemProcessInformation
+	SystemCallCountInformation
+	SystemDeviceInformation
+	SystemProcessorPerformanceInformation
+	SystemFlagsInformation
+	SystemCallTimeInformation
+	SystemModuleInformation
+	SystemLocksInformation
+	SystemStackTraceInformation
+	SystemPagedPoolInformation
+	SystemNonPagedPoolInformation
+	SystemHandleInformation
+	SystemObjectInformation
+	SystemPageFileInformation
+	SystemVdmInstemulInformation
+	SystemVdmBopInformation
+	SystemFileCacheInformation
+	SystemPoolTagInformation
+	SystemInterruptInformation
+	SystemDpcBehaviorInformation
+	SystemFullMemoryInformation
+	SystemLoadGdiDriverInformation
+	SystemUnloadGdiDriverInformation
+	SystemTimeAdjustmentInformation
+	SystemSummaryMemoryInformation
+	SystemMirrorMemoryInformation
+	SystemPerformanceTraceInformation
+	systemObsolete0
+	SystemExceptionInformation
+	SystemCrashDumpStateInformation
+	SystemKernelDebuggerInformation
+	SystemContextSwitchInformation
+	SystemRegistryQuotaInformation
+	SystemExtendServiceTableInformation
+	SystemPrioritySeperation
+	SystemVerifierAddDriverInformation
+	SystemVerifierRemoveDriverInformation
+	SystemProcessorIdleInformation
+	SystemLegacyDriverInformation
+	SystemCurrentTimeZoneInformation
+	SystemLookasideInformation
+	SystemTimeSlipNotification
+	SystemSessionCreate
+	SystemSessionDetach
+	SystemSessionInformation
+	SystemRangeStartInformation
+	SystemVerifierInformation
+	SystemVerifierThunkExtend
+	SystemSessionProcessInformation
+	SystemLoadGdiDriverInSystemSpace
+	SystemNumaProcessorMap
+	SystemPrefetcherInformation
+	SystemExtendedProcessInformation
+	SystemRecommendedSharedDataAlignment
+	SystemComPlusPackage
+	SystemNumaAvailableMemory
+	SystemProcessorPowerInformation
+	SystemEmulationBasicInformation
+	SystemEmulationProcessorInformation
+	SystemExtendedHandleInformation
+	SystemLostDelayedWriteInformation
+	SystemBigPoolInformation
+	SystemSessionPoolTagInformation
+	SystemSessionMappedViewInformation
+	SystemHotpatchInformation
+	SystemObjectSecurityMode
+	SystemWatchdogTimerHandler
+	SystemWatchdogTimerInformation
+	SystemLogicalProcessorInformation
+	SystemWow64SharedInformationObsolete
+	SystemRegisterFirmwareTableInformationHandler
+	SystemFirmwareTableInformation
+	SystemModuleInformationEx
+	SystemVerifierTriageInformation
+	SystemSuperfetchInformation
+	SystemMemoryListInformation
+	SystemFileCacheInformationEx
+	SystemThreadPriorityClientIdInformation
+	SystemProcessorIdleCycleTimeInformation
+	SystemVerifierCancellationInformation
+	SystemProcessorPowerInformationEx
+	SystemRefTraceInformation
+	SystemSpecialPoolInformation
+	SystemProcessIdInformation
+	SystemErrorPortInformation
+	SystemBootEnvironmentInformation
+	SystemHypervisorInformation
+	SystemVerifierInformationEx
+	SystemTimeZoneInformation
+	SystemImageFileExecutionOptionsInformation
+	SystemCoverageInformation
+	SystemPrefetchPatchInformation
+	SystemVerifierFaultsInformation
+	SystemSystemPartitionInformation
+	SystemSystemDiskInformation
+	SystemProcessorPerformanceDistribution
+	SystemNumaProximityNodeInformation
+	SystemDynamicTimeZoneInformation
+	SystemCodeIntegrityInformation
+	SystemProcessorMicrocodeUpdateInformation
+	SystemProcessorBrandString
+	SystemVirtualAddressInformation
+	SystemLogicalProcessorAndGroupInformation
+	SystemProcessorCycleTimeInformation
+	SystemStoreInformation
+	SystemRegistryAppendString
+	SystemAitSamplingValue
+	SystemVhdBootInformation
+	SystemCpuQuotaInformation
+	SystemNativeBasicInformation
+	systemSpare1
+	SystemLowPriorityIoInformation
+	SystemTpmBootEntropyInformation
+	SystemVerifierCountersInformation
+	SystemPagedPoolInformationEx
+	SystemSystemPtesInformationEx
+	SystemNodeDistanceInformation
+	SystemAcpiAuditInformation
+	SystemBasicPerformanceInformation
+	SystemQueryPerformanceCounterInformation
+	SystemSessionBigPoolInformation
+	SystemBootGraphicsInformation
+	SystemScrubPhysicalMemoryInformation
+	SystemBadPageInformation
+	SystemProcessorProfileControlArea
+	SystemCombinePhysicalMemoryInformation
+	SystemEntropyInterruptTimingCallback
+	SystemConsoleInformation
+	SystemPlatformBinaryInformation
+	SystemThrottleNotificationInformation
+	SystemHypervisorProcessorCountInformation
+	SystemDeviceDataInformation
+	SystemDeviceDataEnumerationInformation
+	SystemMemoryTopologyInformation
+	SystemMemoryChannelInformation
+	SystemBootLogoInformation
+	SystemProcessorPerformanceInformationEx
+	systemSpare0
+	SystemSecureBootPolicyInformation
+	SystemPageFileInformationEx
+	SystemSecureBootInformation
+	SystemEntropyInterruptTimingRawInformation
+	SystemPortableWorkspaceEfiLauncherInformation
+	SystemFullProcessInformation
+	SystemKernelDebuggerInformationEx
+	SystemBootMetadataInformation
+	SystemSoftRebootInformation
+	SystemElamCertificateInformation
+	SystemOfflineDumpConfigInformation
+	SystemProcessorFeaturesInformation
+	SystemRegistryReconciliationInformation
+	SystemEdidInformation
+	SystemManufacturingInformation
+	SystemEnergyEstimationConfigInformation
+	SystemHypervisorDetailInformation
+	SystemProcessorCycleStatsInformation
+	SystemVmGenerationCountInformation
+	SystemTrustedPlatformModuleInformation
+	SystemKernelDebuggerFlags
+	SystemCodeIntegrityPolicyInformation
+	SystemIsolatedUserModeInformation
+	SystemHardwareSecurityTestInterfaceResultsInformation
+	SystemSingleModuleInformation
+	SystemAllowedCpuSetsInformation
+	SystemDmaProtectionInformation
+	SystemInterruptCpuSetsInformation
+	SystemSecureBootPolicyFullInformation
+	SystemCodeIntegrityPolicyFullInformation
+	SystemAffinitizedInterruptProcessorInformation
+	SystemRootSiloInformation
+)
+
+type RTL_PROCESS_MODULE_INFORMATION struct {
+	Section          Handle
+	MappedBase       uintptr
+	ImageBase        uintptr
+	ImageSize        uint32
+	Flags            uint32
+	LoadOrderIndex   uint16
+	InitOrderIndex   uint16
+	LoadCount        uint16
+	OffsetToFileName uint16
+	FullPathName     [256]byte
+}
+
+type RTL_PROCESS_MODULES struct {
+	NumberOfModules uint32
+	Modules         [1]RTL_PROCESS_MODULE_INFORMATION
+}
+
+// Constants for LocalAlloc flags.
+const (
+	LMEM_FIXED          = 0x0
+	LMEM_MOVEABLE       = 0x2
+	LMEM_NOCOMPACT      = 0x10
+	LMEM_NODISCARD      = 0x20
+	LMEM_ZEROINIT       = 0x40
+	LMEM_MODIFY         = 0x80
+	LMEM_DISCARDABLE    = 0xf00
+	LMEM_VALID_FLAGS    = 0xf72
+	LMEM_INVALID_HANDLE = 0x8000
+	LHND                = LMEM_MOVEABLE | LMEM_ZEROINIT
+	LPTR                = LMEM_FIXED | LMEM_ZEROINIT
+	NONZEROLHND         = LMEM_MOVEABLE
+	NONZEROLPTR         = LMEM_FIXED
+)
+
+// Constants for the CreateNamedPipe-family of functions.
+const (
+	PIPE_ACCESS_INBOUND  = 0x1
+	PIPE_ACCESS_OUTBOUND = 0x2
+	PIPE_ACCESS_DUPLEX   = 0x3
+
+	PIPE_CLIENT_END = 0x0
+	PIPE_SERVER_END = 0x1
+
+	PIPE_WAIT                  = 0x0
+	PIPE_NOWAIT                = 0x1
+	PIPE_READMODE_BYTE         = 0x0
+	PIPE_READMODE_MESSAGE      = 0x2
+	PIPE_TYPE_BYTE             = 0x0
+	PIPE_TYPE_MESSAGE          = 0x4
+	PIPE_ACCEPT_REMOTE_CLIENTS = 0x0
+	PIPE_REJECT_REMOTE_CLIENTS = 0x8
+
+	PIPE_UNLIMITED_INSTANCES = 255
+)
+
+// Constants for security attributes when opening named pipes.
+const (
+	SECURITY_ANONYMOUS      = SecurityAnonymous << 16
+	SECURITY_IDENTIFICATION = SecurityIdentification << 16
+	SECURITY_IMPERSONATION  = SecurityImpersonation << 16
+	SECURITY_DELEGATION     = SecurityDelegation << 16
+
+	SECURITY_CONTEXT_TRACKING = 0x40000
+	SECURITY_EFFECTIVE_ONLY   = 0x80000
+
+	SECURITY_SQOS_PRESENT     = 0x100000
+	SECURITY_VALID_SQOS_FLAGS = 0x1f0000
+)
+
+// ResourceID represents a 16-bit resource identifier, traditionally created with the MAKEINTRESOURCE macro.
+type ResourceID uint16
+
+// ResourceIDOrString must be either a ResourceID, to specify a resource or resource type by ID,
+// or a string, to specify a resource or resource type by name.
+type ResourceIDOrString interface{}
+
+// Predefined resource names and types.
+var (
+	// Predefined names.
+	CREATEPROCESS_MANIFEST_RESOURCE_ID                 ResourceID = 1
+	ISOLATIONAWARE_MANIFEST_RESOURCE_ID                ResourceID = 2
+	ISOLATIONAWARE_NOSTATICIMPORT_MANIFEST_RESOURCE_ID ResourceID = 3
+	ISOLATIONPOLICY_MANIFEST_RESOURCE_ID               ResourceID = 4
+	ISOLATIONPOLICY_BROWSER_MANIFEST_RESOURCE_ID       ResourceID = 5
+	MINIMUM_RESERVED_MANIFEST_RESOURCE_ID              ResourceID = 1  // inclusive
+	MAXIMUM_RESERVED_MANIFEST_RESOURCE_ID              ResourceID = 16 // inclusive
+
+	// Predefined types.
+	RT_CURSOR       ResourceID = 1
+	RT_BITMAP       ResourceID = 2
+	RT_ICON         ResourceID = 3
+	RT_MENU         ResourceID = 4
+	RT_DIALOG       ResourceID = 5
+	RT_STRING       ResourceID = 6
+	RT_FONTDIR      ResourceID = 7
+	RT_FONT         ResourceID = 8
+	RT_ACCELERATOR  ResourceID = 9
+	RT_RCDATA       ResourceID = 10
+	RT_MESSAGETABLE ResourceID = 11
+	RT_GROUP_CURSOR ResourceID = 12
+	RT_GROUP_ICON   ResourceID = 14
+	RT_VERSION      ResourceID = 16
+	RT_DLGINCLUDE   ResourceID = 17
+	RT_PLUGPLAY     ResourceID = 19
+	RT_VXD          ResourceID = 20
+	RT_ANICURSOR    ResourceID = 21
+	RT_ANIICON      ResourceID = 22
+	RT_HTML         ResourceID = 23
+	RT_MANIFEST     ResourceID = 24
+)
+
+type VS_FIXEDFILEINFO struct {
+	Signature        uint32
+	StrucVersion     uint32
+	FileVersionMS    uint32
+	FileVersionLS    uint32
+	ProductVersionMS uint32
+	ProductVersionLS uint32
+	FileFlagsMask    uint32
+	FileFlags        uint32
+	FileOS           uint32
+	FileType         uint32
+	FileSubtype      uint32
+	FileDateMS       uint32
+	FileDateLS       uint32
+}
+
+type COAUTHIDENTITY struct {
+	User           *uint16
+	UserLength     uint32
+	Domain         *uint16
+	DomainLength   uint32
+	Password       *uint16
+	PasswordLength uint32
+	Flags          uint32
+}
+
+type COAUTHINFO struct {
+	AuthnSvc           uint32
+	AuthzSvc           uint32
+	ServerPrincName    *uint16
+	AuthnLevel         uint32
+	ImpersonationLevel uint32
+	AuthIdentityData   *COAUTHIDENTITY
+	Capabilities       uint32
+}
+
+type COSERVERINFO struct {
+	Reserved1 uint32
+	Aame      *uint16
+	AuthInfo  *COAUTHINFO
+	Reserved2 uint32
+}
+
+type BIND_OPTS3 struct {
+	CbStruct          uint32
+	Flags             uint32
+	Mode              uint32
+	TickCountDeadline uint32
+	TrackFlags        uint32
+	ClassContext      uint32
+	Locale            uint32
+	ServerInfo        *COSERVERINFO
+	Hwnd              HWND
+}
+
+const (
+	CLSCTX_INPROC_SERVER          = 0x1
+	CLSCTX_INPROC_HANDLER         = 0x2
+	CLSCTX_LOCAL_SERVER           = 0x4
+	CLSCTX_INPROC_SERVER16        = 0x8
+	CLSCTX_REMOTE_SERVER          = 0x10
+	CLSCTX_INPROC_HANDLER16       = 0x20
+	CLSCTX_RESERVED1              = 0x40
+	CLSCTX_RESERVED2              = 0x80
+	CLSCTX_RESERVED3              = 0x100
+	CLSCTX_RESERVED4              = 0x200
+	CLSCTX_NO_CODE_DOWNLOAD       = 0x400
+	CLSCTX_RESERVED5              = 0x800
+	CLSCTX_NO_CUSTOM_MARSHAL      = 0x1000
+	CLSCTX_ENABLE_CODE_DOWNLOAD   = 0x2000
+	CLSCTX_NO_FAILURE_LOG         = 0x4000
+	CLSCTX_DISABLE_AAA            = 0x8000
+	CLSCTX_ENABLE_AAA             = 0x10000
+	CLSCTX_FROM_DEFAULT_CONTEXT   = 0x20000
+	CLSCTX_ACTIVATE_32_BIT_SERVER = 0x40000
+	CLSCTX_ACTIVATE_64_BIT_SERVER = 0x80000
+	CLSCTX_ENABLE_CLOAKING        = 0x100000
+	CLSCTX_APPCONTAINER           = 0x400000
+	CLSCTX_ACTIVATE_AAA_AS_IU     = 0x800000
+	CLSCTX_PS_DLL                 = 0x80000000
+
+	COINIT_MULTITHREADED     = 0x0
+	COINIT_APARTMENTTHREADED = 0x2
+	COINIT_DISABLE_OLE1DDE   = 0x4
+	COINIT_SPEED_OVER_MEMORY = 0x8
+)
+
+// Flag for QueryFullProcessImageName.
+const PROCESS_NAME_NATIVE = 1
+
+type ModuleInfo struct {
+	BaseOfDll   uintptr
+	SizeOfImage uint32
+	EntryPoint  uintptr
+}
+
+const ALL_PROCESSOR_GROUPS = 0xFFFF
+
+type Rect struct {
+	Left   int32
+	Top    int32
+	Right  int32
+	Bottom int32
+}
+
+type GUIThreadInfo struct {
+	Size        uint32
+	Flags       uint32
+	Active      HWND
+	Focus       HWND
+	Capture     HWND
+	MenuOwner   HWND
+	MoveSize    HWND
+	CaretHandle HWND
+	CaretRect   Rect
+}
+
+const (
+	DWMWA_NCRENDERING_ENABLED            = 1
+	DWMWA_NCRENDERING_POLICY             = 2
+	DWMWA_TRANSITIONS_FORCEDISABLED      = 3
+	DWMWA_ALLOW_NCPAINT                  = 4
+	DWMWA_CAPTION_BUTTON_BOUNDS          = 5
+	DWMWA_NONCLIENT_RTL_LAYOUT           = 6
+	DWMWA_FORCE_ICONIC_REPRESENTATION    = 7
+	DWMWA_FLIP3D_POLICY                  = 8
+	DWMWA_EXTENDED_FRAME_BOUNDS          = 9
+	DWMWA_HAS_ICONIC_BITMAP              = 10
+	DWMWA_DISALLOW_PEEK                  = 11
+	DWMWA_EXCLUDED_FROM_PEEK             = 12
+	DWMWA_CLOAK                          = 13
+	DWMWA_CLOAKED                        = 14
+	DWMWA_FREEZE_REPRESENTATION          = 15
+	DWMWA_PASSIVE_UPDATE_MODE            = 16
+	DWMWA_USE_HOSTBACKDROPBRUSH          = 17
+	DWMWA_USE_IMMERSIVE_DARK_MODE        = 20
+	DWMWA_WINDOW_CORNER_PREFERENCE       = 33
+	DWMWA_BORDER_COLOR                   = 34
+	DWMWA_CAPTION_COLOR                  = 35
+	DWMWA_TEXT_COLOR                     = 36
+	DWMWA_VISIBLE_FRAME_BORDER_THICKNESS = 37
+)
+
+type WSAQUERYSET struct {
+	Size                uint32
+	ServiceInstanceName *uint16
+	ServiceClassId      *GUID
+	Version             *WSAVersion
+	Comment             *uint16
+	NameSpace           uint32
+	NSProviderId        *GUID
+	Context             *uint16
+	NumberOfProtocols   uint32
+	AfpProtocols        *AFProtocols
+	QueryString         *uint16
+	NumberOfCsAddrs     uint32
+	SaBuffer            *CSAddrInfo
+	OutputFlags         uint32
+	Blob                *BLOB
+}
+
+type WSAVersion struct {
+	Version                 uint32
+	EnumerationOfComparison int32
+}
+
+type AFProtocols struct {
+	AddressFamily int32
+	Protocol      int32
+}
+
+type CSAddrInfo struct {
+	LocalAddr  SocketAddress
+	RemoteAddr SocketAddress
+	SocketType int32
+	Protocol   int32
+}
+
+type BLOB struct {
+	Size     uint32
+	BlobData *byte
+}
+
+type ComStat struct {
+	Flags    uint32
+	CBInQue  uint32
+	CBOutQue uint32
+}
+
+type DCB struct {
+	DCBlength  uint32
+	BaudRate   uint32
+	Flags      uint32
+	wReserved  uint16
+	XonLim     uint16
+	XoffLim    uint16
+	ByteSize   uint8
+	Parity     uint8
+	StopBits   uint8
+	XonChar    byte
+	XoffChar   byte
+	ErrorChar  byte
+	EofChar    byte
+	EvtChar    byte
+	wReserved1 uint16
+}
+
+// Keyboard Layout Flags.
+// See https://learn.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-loadkeyboardlayoutw
+const (
+	KLF_ACTIVATE      = 0x00000001
+	KLF_SUBSTITUTE_OK = 0x00000002
+	KLF_REORDER       = 0x00000008
+	KLF_REPLACELANG   = 0x00000010
+	KLF_NOTELLSHELL   = 0x00000080
+	KLF_SETFORPROCESS = 0x00000100
+)
+
+// Virtual Key codes
+// https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes
+const (
+	VK_LBUTTON             = 0x01
+	VK_RBUTTON             = 0x02
+	VK_CANCEL              = 0x03
+	VK_MBUTTON             = 0x04
+	VK_XBUTTON1            = 0x05
+	VK_XBUTTON2            = 0x06
+	VK_BACK                = 0x08
+	VK_TAB                 = 0x09
+	VK_CLEAR               = 0x0C
+	VK_RETURN              = 0x0D
+	VK_SHIFT               = 0x10
+	VK_CONTROL             = 0x11
+	VK_MENU                = 0x12
+	VK_PAUSE               = 0x13
+	VK_CAPITAL             = 0x14
+	VK_KANA                = 0x15
+	VK_HANGEUL             = 0x15
+	VK_HANGUL              = 0x15
+	VK_IME_ON              = 0x16
+	VK_JUNJA               = 0x17
+	VK_FINAL               = 0x18
+	VK_HANJA               = 0x19
+	VK_KANJI               = 0x19
+	VK_IME_OFF             = 0x1A
+	VK_ESCAPE              = 0x1B
+	VK_CONVERT             = 0x1C
+	VK_NONCONVERT          = 0x1D
+	VK_ACCEPT              = 0x1E
+	VK_MODECHANGE          = 0x1F
+	VK_SPACE               = 0x20
+	VK_PRIOR               = 0x21
+	VK_NEXT                = 0x22
+	VK_END                 = 0x23
+	VK_HOME                = 0x24
+	VK_LEFT                = 0x25
+	VK_UP                  = 0x26
+	VK_RIGHT               = 0x27
+	VK_DOWN                = 0x28
+	VK_SELECT              = 0x29
+	VK_PRINT               = 0x2A
+	VK_EXECUTE             = 0x2B
+	VK_SNAPSHOT            = 0x2C
+	VK_INSERT              = 0x2D
+	VK_DELETE              = 0x2E
+	VK_HELP                = 0x2F
+	VK_LWIN                = 0x5B
+	VK_RWIN                = 0x5C
+	VK_APPS                = 0x5D
+	VK_SLEEP               = 0x5F
+	VK_NUMPAD0             = 0x60
+	VK_NUMPAD1             = 0x61
+	VK_NUMPAD2             = 0x62
+	VK_NUMPAD3             = 0x63
+	VK_NUMPAD4             = 0x64
+	VK_NUMPAD5             = 0x65
+	VK_NUMPAD6             = 0x66
+	VK_NUMPAD7             = 0x67
+	VK_NUMPAD8             = 0x68
+	VK_NUMPAD9             = 0x69
+	VK_MULTIPLY            = 0x6A
+	VK_ADD                 = 0x6B
+	VK_SEPARATOR           = 0x6C
+	VK_SUBTRACT            = 0x6D
+	VK_DECIMAL             = 0x6E
+	VK_DIVIDE              = 0x6F
+	VK_F1                  = 0x70
+	VK_F2                  = 0x71
+	VK_F3                  = 0x72
+	VK_F4                  = 0x73
+	VK_F5                  = 0x74
+	VK_F6                  = 0x75
+	VK_F7                  = 0x76
+	VK_F8                  = 0x77
+	VK_F9                  = 0x78
+	VK_F10                 = 0x79
+	VK_F11                 = 0x7A
+	VK_F12                 = 0x7B
+	VK_F13                 = 0x7C
+	VK_F14                 = 0x7D
+	VK_F15                 = 0x7E
+	VK_F16                 = 0x7F
+	VK_F17                 = 0x80
+	VK_F18                 = 0x81
+	VK_F19                 = 0x82
+	VK_F20                 = 0x83
+	VK_F21                 = 0x84
+	VK_F22                 = 0x85
+	VK_F23                 = 0x86
+	VK_F24                 = 0x87
+	VK_NUMLOCK             = 0x90
+	VK_SCROLL              = 0x91
+	VK_OEM_NEC_EQUAL       = 0x92
+	VK_OEM_FJ_JISHO        = 0x92
+	VK_OEM_FJ_MASSHOU      = 0x93
+	VK_OEM_FJ_TOUROKU      = 0x94
+	VK_OEM_FJ_LOYA         = 0x95
+	VK_OEM_FJ_ROYA         = 0x96
+	VK_LSHIFT              = 0xA0
+	VK_RSHIFT              = 0xA1
+	VK_LCONTROL            = 0xA2
+	VK_RCONTROL            = 0xA3
+	VK_LMENU               = 0xA4
+	VK_RMENU               = 0xA5
+	VK_BROWSER_BACK        = 0xA6
+	VK_BROWSER_FORWARD     = 0xA7
+	VK_BROWSER_REFRESH     = 0xA8
+	VK_BROWSER_STOP        = 0xA9
+	VK_BROWSER_SEARCH      = 0xAA
+	VK_BROWSER_FAVORITES   = 0xAB
+	VK_BROWSER_HOME        = 0xAC
+	VK_VOLUME_MUTE         = 0xAD
+	VK_VOLUME_DOWN         = 0xAE
+	VK_VOLUME_UP           = 0xAF
+	VK_MEDIA_NEXT_TRACK    = 0xB0
+	VK_MEDIA_PREV_TRACK    = 0xB1
+	VK_MEDIA_STOP          = 0xB2
+	VK_MEDIA_PLAY_PAUSE    = 0xB3
+	VK_LAUNCH_MAIL         = 0xB4
+	VK_LAUNCH_MEDIA_SELECT = 0xB5
+	VK_LAUNCH_APP1         = 0xB6
+	VK_LAUNCH_APP2         = 0xB7
+	VK_OEM_1               = 0xBA
+	VK_OEM_PLUS            = 0xBB
+	VK_OEM_COMMA           = 0xBC
+	VK_OEM_MINUS           = 0xBD
+	VK_OEM_PERIOD          = 0xBE
+	VK_OEM_2               = 0xBF
+	VK_OEM_3               = 0xC0
+	VK_OEM_4               = 0xDB
+	VK_OEM_5               = 0xDC
+	VK_OEM_6               = 0xDD
+	VK_OEM_7               = 0xDE
+	VK_OEM_8               = 0xDF
+	VK_OEM_AX              = 0xE1
+	VK_OEM_102             = 0xE2
+	VK_ICO_HELP            = 0xE3
+	VK_ICO_00              = 0xE4
+	VK_PROCESSKEY          = 0xE5
+	VK_ICO_CLEAR           = 0xE6
+	VK_OEM_RESET           = 0xE9
+	VK_OEM_JUMP            = 0xEA
+	VK_OEM_PA1             = 0xEB
+	VK_OEM_PA2             = 0xEC
+	VK_OEM_PA3             = 0xED
+	VK_OEM_WSCTRL          = 0xEE
+	VK_OEM_CUSEL           = 0xEF
+	VK_OEM_ATTN            = 0xF0
+	VK_OEM_FINISH          = 0xF1
+	VK_OEM_COPY            = 0xF2
+	VK_OEM_AUTO            = 0xF3
+	VK_OEM_ENLW            = 0xF4
+	VK_OEM_BACKTAB         = 0xF5
+	VK_ATTN                = 0xF6
+	VK_CRSEL               = 0xF7
+	VK_EXSEL               = 0xF8
+	VK_EREOF               = 0xF9
+	VK_PLAY                = 0xFA
+	VK_ZOOM                = 0xFB
+	VK_NONAME              = 0xFC
+	VK_PA1                 = 0xFD
+	VK_OEM_CLEAR           = 0xFE
+)
+
+// Mouse button constants.
+// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str
+const (
+	FROM_LEFT_1ST_BUTTON_PRESSED = 0x0001
+	RIGHTMOST_BUTTON_PRESSED     = 0x0002
+	FROM_LEFT_2ND_BUTTON_PRESSED = 0x0004
+	FROM_LEFT_3RD_BUTTON_PRESSED = 0x0008
+	FROM_LEFT_4TH_BUTTON_PRESSED = 0x0010
+)
+
+// Control key state constaints.
+// https://docs.microsoft.com/en-us/windows/console/key-event-record-str
+// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str
+const (
+	CAPSLOCK_ON        = 0x0080
+	ENHANCED_KEY       = 0x0100
+	LEFT_ALT_PRESSED   = 0x0002
+	LEFT_CTRL_PRESSED  = 0x0008
+	NUMLOCK_ON         = 0x0020
+	RIGHT_ALT_PRESSED  = 0x0001
+	RIGHT_CTRL_PRESSED = 0x0004
+	SCROLLLOCK_ON      = 0x0040
+	SHIFT_PRESSED      = 0x0010
+)
+
+// Mouse event record event flags.
+// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str
+const (
+	MOUSE_MOVED    = 0x0001
+	DOUBLE_CLICK   = 0x0002
+	MOUSE_WHEELED  = 0x0004
+	MOUSE_HWHEELED = 0x0008
+)
+
+// Input Record Event Types
+// https://learn.microsoft.com/en-us/windows/console/input-record-str
+const (
+	FOCUS_EVENT              = 0x0010
+	KEY_EVENT                = 0x0001
+	MENU_EVENT               = 0x0008
+	MOUSE_EVENT              = 0x0002
+	WINDOW_BUFFER_SIZE_EVENT = 0x0004
+)
diff --git a/vendor/golang.org/x/sys/windows/types_windows_386.go b/vendor/golang.org/x/sys/windows/types_windows_386.go
new file mode 100644
index 0000000..8bce3e2
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/types_windows_386.go
@@ -0,0 +1,35 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+type WSAData struct {
+	Version      uint16
+	HighVersion  uint16
+	Description  [WSADESCRIPTION_LEN + 1]byte
+	SystemStatus [WSASYS_STATUS_LEN + 1]byte
+	MaxSockets   uint16
+	MaxUdpDg     uint16
+	VendorInfo   *byte
+}
+
+type Servent struct {
+	Name    *byte
+	Aliases **byte
+	Port    uint16
+	Proto   *byte
+}
+
+type JOBOBJECT_BASIC_LIMIT_INFORMATION struct {
+	PerProcessUserTimeLimit int64
+	PerJobUserTimeLimit     int64
+	LimitFlags              uint32
+	MinimumWorkingSetSize   uintptr
+	MaximumWorkingSetSize   uintptr
+	ActiveProcessLimit      uint32
+	Affinity                uintptr
+	PriorityClass           uint32
+	SchedulingClass         uint32
+	_                       uint32 // pad to 8 byte boundary
+}
diff --git a/vendor/golang.org/x/sys/windows/types_windows_amd64.go b/vendor/golang.org/x/sys/windows/types_windows_amd64.go
new file mode 100644
index 0000000..fdddc0c
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/types_windows_amd64.go
@@ -0,0 +1,34 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+type WSAData struct {
+	Version      uint16
+	HighVersion  uint16
+	MaxSockets   uint16
+	MaxUdpDg     uint16
+	VendorInfo   *byte
+	Description  [WSADESCRIPTION_LEN + 1]byte
+	SystemStatus [WSASYS_STATUS_LEN + 1]byte
+}
+
+type Servent struct {
+	Name    *byte
+	Aliases **byte
+	Proto   *byte
+	Port    uint16
+}
+
+type JOBOBJECT_BASIC_LIMIT_INFORMATION struct {
+	PerProcessUserTimeLimit int64
+	PerJobUserTimeLimit     int64
+	LimitFlags              uint32
+	MinimumWorkingSetSize   uintptr
+	MaximumWorkingSetSize   uintptr
+	ActiveProcessLimit      uint32
+	Affinity                uintptr
+	PriorityClass           uint32
+	SchedulingClass         uint32
+}
diff --git a/vendor/golang.org/x/sys/windows/types_windows_arm.go b/vendor/golang.org/x/sys/windows/types_windows_arm.go
new file mode 100644
index 0000000..321872c
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/types_windows_arm.go
@@ -0,0 +1,35 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+type WSAData struct {
+	Version      uint16
+	HighVersion  uint16
+	Description  [WSADESCRIPTION_LEN + 1]byte
+	SystemStatus [WSASYS_STATUS_LEN + 1]byte
+	MaxSockets   uint16
+	MaxUdpDg     uint16
+	VendorInfo   *byte
+}
+
+type Servent struct {
+	Name    *byte
+	Aliases **byte
+	Port    uint16
+	Proto   *byte
+}
+
+type JOBOBJECT_BASIC_LIMIT_INFORMATION struct {
+	PerProcessUserTimeLimit int64
+	PerJobUserTimeLimit     int64
+	LimitFlags              uint32
+	MinimumWorkingSetSize   uintptr
+	MaximumWorkingSetSize   uintptr
+	ActiveProcessLimit      uint32
+	Affinity                uintptr
+	PriorityClass           uint32
+	SchedulingClass         uint32
+	_                       uint32 // pad to 8 byte boundary
+}
diff --git a/vendor/golang.org/x/sys/windows/types_windows_arm64.go b/vendor/golang.org/x/sys/windows/types_windows_arm64.go
new file mode 100644
index 0000000..fdddc0c
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/types_windows_arm64.go
@@ -0,0 +1,34 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package windows
+
+type WSAData struct {
+	Version      uint16
+	HighVersion  uint16
+	MaxSockets   uint16
+	MaxUdpDg     uint16
+	VendorInfo   *byte
+	Description  [WSADESCRIPTION_LEN + 1]byte
+	SystemStatus [WSASYS_STATUS_LEN + 1]byte
+}
+
+type Servent struct {
+	Name    *byte
+	Aliases **byte
+	Proto   *byte
+	Port    uint16
+}
+
+type JOBOBJECT_BASIC_LIMIT_INFORMATION struct {
+	PerProcessUserTimeLimit int64
+	PerJobUserTimeLimit     int64
+	LimitFlags              uint32
+	MinimumWorkingSetSize   uintptr
+	MaximumWorkingSetSize   uintptr
+	ActiveProcessLimit      uint32
+	Affinity                uintptr
+	PriorityClass           uint32
+	SchedulingClass         uint32
+}
diff --git a/vendor/golang.org/x/sys/windows/zerrors_windows.go b/vendor/golang.org/x/sys/windows/zerrors_windows.go
new file mode 100644
index 0000000..0cf658f
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/zerrors_windows.go
@@ -0,0 +1,9468 @@
+// Code generated by 'mkerrors.bash'; DO NOT EDIT.
+
+package windows
+
+import "syscall"
+
+const (
+	FACILITY_NULL                                                                           = 0
+	FACILITY_RPC                                                                            = 1
+	FACILITY_DISPATCH                                                                       = 2
+	FACILITY_STORAGE                                                                        = 3
+	FACILITY_ITF                                                                            = 4
+	FACILITY_WIN32                                                                          = 7
+	FACILITY_WINDOWS                                                                        = 8
+	FACILITY_SSPI                                                                           = 9
+	FACILITY_SECURITY                                                                       = 9
+	FACILITY_CONTROL                                                                        = 10
+	FACILITY_CERT                                                                           = 11
+	FACILITY_INTERNET                                                                       = 12
+	FACILITY_MEDIASERVER                                                                    = 13
+	FACILITY_MSMQ                                                                           = 14
+	FACILITY_SETUPAPI                                                                       = 15
+	FACILITY_SCARD                                                                          = 16
+	FACILITY_COMPLUS                                                                        = 17
+	FACILITY_AAF                                                                            = 18
+	FACILITY_URT                                                                            = 19
+	FACILITY_ACS                                                                            = 20
+	FACILITY_DPLAY                                                                          = 21
+	FACILITY_UMI                                                                            = 22
+	FACILITY_SXS                                                                            = 23
+	FACILITY_WINDOWS_CE                                                                     = 24
+	FACILITY_HTTP                                                                           = 25
+	FACILITY_USERMODE_COMMONLOG                                                             = 26
+	FACILITY_WER                                                                            = 27
+	FACILITY_USERMODE_FILTER_MANAGER                                                        = 31
+	FACILITY_BACKGROUNDCOPY                                                                 = 32
+	FACILITY_CONFIGURATION                                                                  = 33
+	FACILITY_WIA                                                                            = 33
+	FACILITY_STATE_MANAGEMENT                                                               = 34
+	FACILITY_METADIRECTORY                                                                  = 35
+	FACILITY_WINDOWSUPDATE                                                                  = 36
+	FACILITY_DIRECTORYSERVICE                                                               = 37
+	FACILITY_GRAPHICS                                                                       = 38
+	FACILITY_SHELL                                                                          = 39
+	FACILITY_NAP                                                                            = 39
+	FACILITY_TPM_SERVICES                                                                   = 40
+	FACILITY_TPM_SOFTWARE                                                                   = 41
+	FACILITY_UI                                                                             = 42
+	FACILITY_XAML                                                                           = 43
+	FACILITY_ACTION_QUEUE                                                                   = 44
+	FACILITY_PLA                                                                            = 48
+	FACILITY_WINDOWS_SETUP                                                                  = 48
+	FACILITY_FVE                                                                            = 49
+	FACILITY_FWP                                                                            = 50
+	FACILITY_WINRM                                                                          = 51
+	FACILITY_NDIS                                                                           = 52
+	FACILITY_USERMODE_HYPERVISOR                                                            = 53
+	FACILITY_CMI                                                                            = 54
+	FACILITY_USERMODE_VIRTUALIZATION                                                        = 55
+	FACILITY_USERMODE_VOLMGR                                                                = 56
+	FACILITY_BCD                                                                            = 57
+	FACILITY_USERMODE_VHD                                                                   = 58
+	FACILITY_USERMODE_HNS                                                                   = 59
+	FACILITY_SDIAG                                                                          = 60
+	FACILITY_WEBSERVICES                                                                    = 61
+	FACILITY_WINPE                                                                          = 61
+	FACILITY_WPN                                                                            = 62
+	FACILITY_WINDOWS_STORE                                                                  = 63
+	FACILITY_INPUT                                                                          = 64
+	FACILITY_EAP                                                                            = 66
+	FACILITY_WINDOWS_DEFENDER                                                               = 80
+	FACILITY_OPC                                                                            = 81
+	FACILITY_XPS                                                                            = 82
+	FACILITY_MBN                                                                            = 84
+	FACILITY_POWERSHELL                                                                     = 84
+	FACILITY_RAS                                                                            = 83
+	FACILITY_P2P_INT                                                                        = 98
+	FACILITY_P2P                                                                            = 99
+	FACILITY_DAF                                                                            = 100
+	FACILITY_BLUETOOTH_ATT                                                                  = 101
+	FACILITY_AUDIO                                                                          = 102
+	FACILITY_STATEREPOSITORY                                                                = 103
+	FACILITY_VISUALCPP                                                                      = 109
+	FACILITY_SCRIPT                                                                         = 112
+	FACILITY_PARSE                                                                          = 113
+	FACILITY_BLB                                                                            = 120
+	FACILITY_BLB_CLI                                                                        = 121
+	FACILITY_WSBAPP                                                                         = 122
+	FACILITY_BLBUI                                                                          = 128
+	FACILITY_USN                                                                            = 129
+	FACILITY_USERMODE_VOLSNAP                                                               = 130
+	FACILITY_TIERING                                                                        = 131
+	FACILITY_WSB_ONLINE                                                                     = 133
+	FACILITY_ONLINE_ID                                                                      = 134
+	FACILITY_DEVICE_UPDATE_AGENT                                                            = 135
+	FACILITY_DRVSERVICING                                                                   = 136
+	FACILITY_DLS                                                                            = 153
+	FACILITY_DELIVERY_OPTIMIZATION                                                          = 208
+	FACILITY_USERMODE_SPACES                                                                = 231
+	FACILITY_USER_MODE_SECURITY_CORE                                                        = 232
+	FACILITY_USERMODE_LICENSING                                                             = 234
+	FACILITY_SOS                                                                            = 160
+	FACILITY_DEBUGGERS                                                                      = 176
+	FACILITY_SPP                                                                            = 256
+	FACILITY_RESTORE                                                                        = 256
+	FACILITY_DMSERVER                                                                       = 256
+	FACILITY_DEPLOYMENT_SERVICES_SERVER                                                     = 257
+	FACILITY_DEPLOYMENT_SERVICES_IMAGING                                                    = 258
+	FACILITY_DEPLOYMENT_SERVICES_MANAGEMENT                                                 = 259
+	FACILITY_DEPLOYMENT_SERVICES_UTIL                                                       = 260
+	FACILITY_DEPLOYMENT_SERVICES_BINLSVC                                                    = 261
+	FACILITY_DEPLOYMENT_SERVICES_PXE                                                        = 263
+	FACILITY_DEPLOYMENT_SERVICES_TFTP                                                       = 264
+	FACILITY_DEPLOYMENT_SERVICES_TRANSPORT_MANAGEMENT                                       = 272
+	FACILITY_DEPLOYMENT_SERVICES_DRIVER_PROVISIONING                                        = 278
+	FACILITY_DEPLOYMENT_SERVICES_MULTICAST_SERVER                                           = 289
+	FACILITY_DEPLOYMENT_SERVICES_MULTICAST_CLIENT                                           = 290
+	FACILITY_DEPLOYMENT_SERVICES_CONTENT_PROVIDER                                           = 293
+	FACILITY_LINGUISTIC_SERVICES                                                            = 305
+	FACILITY_AUDIOSTREAMING                                                                 = 1094
+	FACILITY_ACCELERATOR                                                                    = 1536
+	FACILITY_WMAAECMA                                                                       = 1996
+	FACILITY_DIRECTMUSIC                                                                    = 2168
+	FACILITY_DIRECT3D10                                                                     = 2169
+	FACILITY_DXGI                                                                           = 2170
+	FACILITY_DXGI_DDI                                                                       = 2171
+	FACILITY_DIRECT3D11                                                                     = 2172
+	FACILITY_DIRECT3D11_DEBUG                                                               = 2173
+	FACILITY_DIRECT3D12                                                                     = 2174
+	FACILITY_DIRECT3D12_DEBUG                                                               = 2175
+	FACILITY_LEAP                                                                           = 2184
+	FACILITY_AUDCLNT                                                                        = 2185
+	FACILITY_WINCODEC_DWRITE_DWM                                                            = 2200
+	FACILITY_WINML                                                                          = 2192
+	FACILITY_DIRECT2D                                                                       = 2201
+	FACILITY_DEFRAG                                                                         = 2304
+	FACILITY_USERMODE_SDBUS                                                                 = 2305
+	FACILITY_JSCRIPT                                                                        = 2306
+	FACILITY_PIDGENX                                                                        = 2561
+	FACILITY_EAS                                                                            = 85
+	FACILITY_WEB                                                                            = 885
+	FACILITY_WEB_SOCKET                                                                     = 886
+	FACILITY_MOBILE                                                                         = 1793
+	FACILITY_SQLITE                                                                         = 1967
+	FACILITY_UTC                                                                            = 1989
+	FACILITY_WEP                                                                            = 2049
+	FACILITY_SYNCENGINE                                                                     = 2050
+	FACILITY_XBOX                                                                           = 2339
+	FACILITY_GAME                                                                           = 2340
+	FACILITY_PIX                                                                            = 2748
+	ERROR_SUCCESS                                                             syscall.Errno = 0
+	NO_ERROR                                                                                = 0
+	SEC_E_OK                                                                  Handle        = 0x00000000
+	ERROR_INVALID_FUNCTION                                                    syscall.Errno = 1
+	ERROR_FILE_NOT_FOUND                                                      syscall.Errno = 2
+	ERROR_PATH_NOT_FOUND                                                      syscall.Errno = 3
+	ERROR_TOO_MANY_OPEN_FILES                                                 syscall.Errno = 4
+	ERROR_ACCESS_DENIED                                                       syscall.Errno = 5
+	ERROR_INVALID_HANDLE                                                      syscall.Errno = 6
+	ERROR_ARENA_TRASHED                                                       syscall.Errno = 7
+	ERROR_NOT_ENOUGH_MEMORY                                                   syscall.Errno = 8
+	ERROR_INVALID_BLOCK                                                       syscall.Errno = 9
+	ERROR_BAD_ENVIRONMENT                                                     syscall.Errno = 10
+	ERROR_BAD_FORMAT                                                          syscall.Errno = 11
+	ERROR_INVALID_ACCESS                                                      syscall.Errno = 12
+	ERROR_INVALID_DATA                                                        syscall.Errno = 13
+	ERROR_OUTOFMEMORY                                                         syscall.Errno = 14
+	ERROR_INVALID_DRIVE                                                       syscall.Errno = 15
+	ERROR_CURRENT_DIRECTORY                                                   syscall.Errno = 16
+	ERROR_NOT_SAME_DEVICE                                                     syscall.Errno = 17
+	ERROR_NO_MORE_FILES                                                       syscall.Errno = 18
+	ERROR_WRITE_PROTECT                                                       syscall.Errno = 19
+	ERROR_BAD_UNIT                                                            syscall.Errno = 20
+	ERROR_NOT_READY                                                           syscall.Errno = 21
+	ERROR_BAD_COMMAND                                                         syscall.Errno = 22
+	ERROR_CRC                                                                 syscall.Errno = 23
+	ERROR_BAD_LENGTH                                                          syscall.Errno = 24
+	ERROR_SEEK                                                                syscall.Errno = 25
+	ERROR_NOT_DOS_DISK                                                        syscall.Errno = 26
+	ERROR_SECTOR_NOT_FOUND                                                    syscall.Errno = 27
+	ERROR_OUT_OF_PAPER                                                        syscall.Errno = 28
+	ERROR_WRITE_FAULT                                                         syscall.Errno = 29
+	ERROR_READ_FAULT                                                          syscall.Errno = 30
+	ERROR_GEN_FAILURE                                                         syscall.Errno = 31
+	ERROR_SHARING_VIOLATION                                                   syscall.Errno = 32
+	ERROR_LOCK_VIOLATION                                                      syscall.Errno = 33
+	ERROR_WRONG_DISK                                                          syscall.Errno = 34
+	ERROR_SHARING_BUFFER_EXCEEDED                                             syscall.Errno = 36
+	ERROR_HANDLE_EOF                                                          syscall.Errno = 38
+	ERROR_HANDLE_DISK_FULL                                                    syscall.Errno = 39
+	ERROR_NOT_SUPPORTED                                                       syscall.Errno = 50
+	ERROR_REM_NOT_LIST                                                        syscall.Errno = 51
+	ERROR_DUP_NAME                                                            syscall.Errno = 52
+	ERROR_BAD_NETPATH                                                         syscall.Errno = 53
+	ERROR_NETWORK_BUSY                                                        syscall.Errno = 54
+	ERROR_DEV_NOT_EXIST                                                       syscall.Errno = 55
+	ERROR_TOO_MANY_CMDS                                                       syscall.Errno = 56
+	ERROR_ADAP_HDW_ERR                                                        syscall.Errno = 57
+	ERROR_BAD_NET_RESP                                                        syscall.Errno = 58
+	ERROR_UNEXP_NET_ERR                                                       syscall.Errno = 59
+	ERROR_BAD_REM_ADAP                                                        syscall.Errno = 60
+	ERROR_PRINTQ_FULL                                                         syscall.Errno = 61
+	ERROR_NO_SPOOL_SPACE                                                      syscall.Errno = 62
+	ERROR_PRINT_CANCELLED                                                     syscall.Errno = 63
+	ERROR_NETNAME_DELETED                                                     syscall.Errno = 64
+	ERROR_NETWORK_ACCESS_DENIED                                               syscall.Errno = 65
+	ERROR_BAD_DEV_TYPE                                                        syscall.Errno = 66
+	ERROR_BAD_NET_NAME                                                        syscall.Errno = 67
+	ERROR_TOO_MANY_NAMES                                                      syscall.Errno = 68
+	ERROR_TOO_MANY_SESS                                                       syscall.Errno = 69
+	ERROR_SHARING_PAUSED                                                      syscall.Errno = 70
+	ERROR_REQ_NOT_ACCEP                                                       syscall.Errno = 71
+	ERROR_REDIR_PAUSED                                                        syscall.Errno = 72
+	ERROR_FILE_EXISTS                                                         syscall.Errno = 80
+	ERROR_CANNOT_MAKE                                                         syscall.Errno = 82
+	ERROR_FAIL_I24                                                            syscall.Errno = 83
+	ERROR_OUT_OF_STRUCTURES                                                   syscall.Errno = 84
+	ERROR_ALREADY_ASSIGNED                                                    syscall.Errno = 85
+	ERROR_INVALID_PASSWORD                                                    syscall.Errno = 86
+	ERROR_INVALID_PARAMETER                                                   syscall.Errno = 87
+	ERROR_NET_WRITE_FAULT                                                     syscall.Errno = 88
+	ERROR_NO_PROC_SLOTS                                                       syscall.Errno = 89
+	ERROR_TOO_MANY_SEMAPHORES                                                 syscall.Errno = 100
+	ERROR_EXCL_SEM_ALREADY_OWNED                                              syscall.Errno = 101
+	ERROR_SEM_IS_SET                                                          syscall.Errno = 102
+	ERROR_TOO_MANY_SEM_REQUESTS                                               syscall.Errno = 103
+	ERROR_INVALID_AT_INTERRUPT_TIME                                           syscall.Errno = 104
+	ERROR_SEM_OWNER_DIED                                                      syscall.Errno = 105
+	ERROR_SEM_USER_LIMIT                                                      syscall.Errno = 106
+	ERROR_DISK_CHANGE                                                         syscall.Errno = 107
+	ERROR_DRIVE_LOCKED                                                        syscall.Errno = 108
+	ERROR_BROKEN_PIPE                                                         syscall.Errno = 109
+	ERROR_OPEN_FAILED                                                         syscall.Errno = 110
+	ERROR_BUFFER_OVERFLOW                                                     syscall.Errno = 111
+	ERROR_DISK_FULL                                                           syscall.Errno = 112
+	ERROR_NO_MORE_SEARCH_HANDLES                                              syscall.Errno = 113
+	ERROR_INVALID_TARGET_HANDLE                                               syscall.Errno = 114
+	ERROR_INVALID_CATEGORY                                                    syscall.Errno = 117
+	ERROR_INVALID_VERIFY_SWITCH                                               syscall.Errno = 118
+	ERROR_BAD_DRIVER_LEVEL                                                    syscall.Errno = 119
+	ERROR_CALL_NOT_IMPLEMENTED                                                syscall.Errno = 120
+	ERROR_SEM_TIMEOUT                                                         syscall.Errno = 121
+	ERROR_INSUFFICIENT_BUFFER                                                 syscall.Errno = 122
+	ERROR_INVALID_NAME                                                        syscall.Errno = 123
+	ERROR_INVALID_LEVEL                                                       syscall.Errno = 124
+	ERROR_NO_VOLUME_LABEL                                                     syscall.Errno = 125
+	ERROR_MOD_NOT_FOUND                                                       syscall.Errno = 126
+	ERROR_PROC_NOT_FOUND                                                      syscall.Errno = 127
+	ERROR_WAIT_NO_CHILDREN                                                    syscall.Errno = 128
+	ERROR_CHILD_NOT_COMPLETE                                                  syscall.Errno = 129
+	ERROR_DIRECT_ACCESS_HANDLE                                                syscall.Errno = 130
+	ERROR_NEGATIVE_SEEK                                                       syscall.Errno = 131
+	ERROR_SEEK_ON_DEVICE                                                      syscall.Errno = 132
+	ERROR_IS_JOIN_TARGET                                                      syscall.Errno = 133
+	ERROR_IS_JOINED                                                           syscall.Errno = 134
+	ERROR_IS_SUBSTED                                                          syscall.Errno = 135
+	ERROR_NOT_JOINED                                                          syscall.Errno = 136
+	ERROR_NOT_SUBSTED                                                         syscall.Errno = 137
+	ERROR_JOIN_TO_JOIN                                                        syscall.Errno = 138
+	ERROR_SUBST_TO_SUBST                                                      syscall.Errno = 139
+	ERROR_JOIN_TO_SUBST                                                       syscall.Errno = 140
+	ERROR_SUBST_TO_JOIN                                                       syscall.Errno = 141
+	ERROR_BUSY_DRIVE                                                          syscall.Errno = 142
+	ERROR_SAME_DRIVE                                                          syscall.Errno = 143
+	ERROR_DIR_NOT_ROOT                                                        syscall.Errno = 144
+	ERROR_DIR_NOT_EMPTY                                                       syscall.Errno = 145
+	ERROR_IS_SUBST_PATH                                                       syscall.Errno = 146
+	ERROR_IS_JOIN_PATH                                                        syscall.Errno = 147
+	ERROR_PATH_BUSY                                                           syscall.Errno = 148
+	ERROR_IS_SUBST_TARGET                                                     syscall.Errno = 149
+	ERROR_SYSTEM_TRACE                                                        syscall.Errno = 150
+	ERROR_INVALID_EVENT_COUNT                                                 syscall.Errno = 151
+	ERROR_TOO_MANY_MUXWAITERS                                                 syscall.Errno = 152
+	ERROR_INVALID_LIST_FORMAT                                                 syscall.Errno = 153
+	ERROR_LABEL_TOO_LONG                                                      syscall.Errno = 154
+	ERROR_TOO_MANY_TCBS                                                       syscall.Errno = 155
+	ERROR_SIGNAL_REFUSED                                                      syscall.Errno = 156
+	ERROR_DISCARDED                                                           syscall.Errno = 157
+	ERROR_NOT_LOCKED                                                          syscall.Errno = 158
+	ERROR_BAD_THREADID_ADDR                                                   syscall.Errno = 159
+	ERROR_BAD_ARGUMENTS                                                       syscall.Errno = 160
+	ERROR_BAD_PATHNAME                                                        syscall.Errno = 161
+	ERROR_SIGNAL_PENDING                                                      syscall.Errno = 162
+	ERROR_MAX_THRDS_REACHED                                                   syscall.Errno = 164
+	ERROR_LOCK_FAILED                                                         syscall.Errno = 167
+	ERROR_BUSY                                                                syscall.Errno = 170
+	ERROR_DEVICE_SUPPORT_IN_PROGRESS                                          syscall.Errno = 171
+	ERROR_CANCEL_VIOLATION                                                    syscall.Errno = 173
+	ERROR_ATOMIC_LOCKS_NOT_SUPPORTED                                          syscall.Errno = 174
+	ERROR_INVALID_SEGMENT_NUMBER                                              syscall.Errno = 180
+	ERROR_INVALID_ORDINAL                                                     syscall.Errno = 182
+	ERROR_ALREADY_EXISTS                                                      syscall.Errno = 183
+	ERROR_INVALID_FLAG_NUMBER                                                 syscall.Errno = 186
+	ERROR_SEM_NOT_FOUND                                                       syscall.Errno = 187
+	ERROR_INVALID_STARTING_CODESEG                                            syscall.Errno = 188
+	ERROR_INVALID_STACKSEG                                                    syscall.Errno = 189
+	ERROR_INVALID_MODULETYPE                                                  syscall.Errno = 190
+	ERROR_INVALID_EXE_SIGNATURE                                               syscall.Errno = 191
+	ERROR_EXE_MARKED_INVALID                                                  syscall.Errno = 192
+	ERROR_BAD_EXE_FORMAT                                                      syscall.Errno = 193
+	ERROR_ITERATED_DATA_EXCEEDS_64k                                           syscall.Errno = 194
+	ERROR_INVALID_MINALLOCSIZE                                                syscall.Errno = 195
+	ERROR_DYNLINK_FROM_INVALID_RING                                           syscall.Errno = 196
+	ERROR_IOPL_NOT_ENABLED                                                    syscall.Errno = 197
+	ERROR_INVALID_SEGDPL                                                      syscall.Errno = 198
+	ERROR_AUTODATASEG_EXCEEDS_64k                                             syscall.Errno = 199
+	ERROR_RING2SEG_MUST_BE_MOVABLE                                            syscall.Errno = 200
+	ERROR_RELOC_CHAIN_XEEDS_SEGLIM                                            syscall.Errno = 201
+	ERROR_INFLOOP_IN_RELOC_CHAIN                                              syscall.Errno = 202
+	ERROR_ENVVAR_NOT_FOUND                                                    syscall.Errno = 203
+	ERROR_NO_SIGNAL_SENT                                                      syscall.Errno = 205
+	ERROR_FILENAME_EXCED_RANGE                                                syscall.Errno = 206
+	ERROR_RING2_STACK_IN_USE                                                  syscall.Errno = 207
+	ERROR_META_EXPANSION_TOO_LONG                                             syscall.Errno = 208
+	ERROR_INVALID_SIGNAL_NUMBER                                               syscall.Errno = 209
+	ERROR_THREAD_1_INACTIVE                                                   syscall.Errno = 210
+	ERROR_LOCKED                                                              syscall.Errno = 212
+	ERROR_TOO_MANY_MODULES                                                    syscall.Errno = 214
+	ERROR_NESTING_NOT_ALLOWED                                                 syscall.Errno = 215
+	ERROR_EXE_MACHINE_TYPE_MISMATCH                                           syscall.Errno = 216
+	ERROR_EXE_CANNOT_MODIFY_SIGNED_BINARY                                     syscall.Errno = 217
+	ERROR_EXE_CANNOT_MODIFY_STRONG_SIGNED_BINARY                              syscall.Errno = 218
+	ERROR_FILE_CHECKED_OUT                                                    syscall.Errno = 220
+	ERROR_CHECKOUT_REQUIRED                                                   syscall.Errno = 221
+	ERROR_BAD_FILE_TYPE                                                       syscall.Errno = 222
+	ERROR_FILE_TOO_LARGE                                                      syscall.Errno = 223
+	ERROR_FORMS_AUTH_REQUIRED                                                 syscall.Errno = 224
+	ERROR_VIRUS_INFECTED                                                      syscall.Errno = 225
+	ERROR_VIRUS_DELETED                                                       syscall.Errno = 226
+	ERROR_PIPE_LOCAL                                                          syscall.Errno = 229
+	ERROR_BAD_PIPE                                                            syscall.Errno = 230
+	ERROR_PIPE_BUSY                                                           syscall.Errno = 231
+	ERROR_NO_DATA                                                             syscall.Errno = 232
+	ERROR_PIPE_NOT_CONNECTED                                                  syscall.Errno = 233
+	ERROR_MORE_DATA                                                           syscall.Errno = 234
+	ERROR_NO_WORK_DONE                                                        syscall.Errno = 235
+	ERROR_VC_DISCONNECTED                                                     syscall.Errno = 240
+	ERROR_INVALID_EA_NAME                                                     syscall.Errno = 254
+	ERROR_EA_LIST_INCONSISTENT                                                syscall.Errno = 255
+	WAIT_TIMEOUT                                                              syscall.Errno = 258
+	ERROR_NO_MORE_ITEMS                                                       syscall.Errno = 259
+	ERROR_CANNOT_COPY                                                         syscall.Errno = 266
+	ERROR_DIRECTORY                                                           syscall.Errno = 267
+	ERROR_EAS_DIDNT_FIT                                                       syscall.Errno = 275
+	ERROR_EA_FILE_CORRUPT                                                     syscall.Errno = 276
+	ERROR_EA_TABLE_FULL                                                       syscall.Errno = 277
+	ERROR_INVALID_EA_HANDLE                                                   syscall.Errno = 278
+	ERROR_EAS_NOT_SUPPORTED                                                   syscall.Errno = 282
+	ERROR_NOT_OWNER                                                           syscall.Errno = 288
+	ERROR_TOO_MANY_POSTS                                                      syscall.Errno = 298
+	ERROR_PARTIAL_COPY                                                        syscall.Errno = 299
+	ERROR_OPLOCK_NOT_GRANTED                                                  syscall.Errno = 300
+	ERROR_INVALID_OPLOCK_PROTOCOL                                             syscall.Errno = 301
+	ERROR_DISK_TOO_FRAGMENTED                                                 syscall.Errno = 302
+	ERROR_DELETE_PENDING                                                      syscall.Errno = 303
+	ERROR_INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING                syscall.Errno = 304
+	ERROR_SHORT_NAMES_NOT_ENABLED_ON_VOLUME                                   syscall.Errno = 305
+	ERROR_SECURITY_STREAM_IS_INCONSISTENT                                     syscall.Errno = 306
+	ERROR_INVALID_LOCK_RANGE                                                  syscall.Errno = 307
+	ERROR_IMAGE_SUBSYSTEM_NOT_PRESENT                                         syscall.Errno = 308
+	ERROR_NOTIFICATION_GUID_ALREADY_DEFINED                                   syscall.Errno = 309
+	ERROR_INVALID_EXCEPTION_HANDLER                                           syscall.Errno = 310
+	ERROR_DUPLICATE_PRIVILEGES                                                syscall.Errno = 311
+	ERROR_NO_RANGES_PROCESSED                                                 syscall.Errno = 312
+	ERROR_NOT_ALLOWED_ON_SYSTEM_FILE                                          syscall.Errno = 313
+	ERROR_DISK_RESOURCES_EXHAUSTED                                            syscall.Errno = 314
+	ERROR_INVALID_TOKEN                                                       syscall.Errno = 315
+	ERROR_DEVICE_FEATURE_NOT_SUPPORTED                                        syscall.Errno = 316
+	ERROR_MR_MID_NOT_FOUND                                                    syscall.Errno = 317
+	ERROR_SCOPE_NOT_FOUND                                                     syscall.Errno = 318
+	ERROR_UNDEFINED_SCOPE                                                     syscall.Errno = 319
+	ERROR_INVALID_CAP                                                         syscall.Errno = 320
+	ERROR_DEVICE_UNREACHABLE                                                  syscall.Errno = 321
+	ERROR_DEVICE_NO_RESOURCES                                                 syscall.Errno = 322
+	ERROR_DATA_CHECKSUM_ERROR                                                 syscall.Errno = 323
+	ERROR_INTERMIXED_KERNEL_EA_OPERATION                                      syscall.Errno = 324
+	ERROR_FILE_LEVEL_TRIM_NOT_SUPPORTED                                       syscall.Errno = 326
+	ERROR_OFFSET_ALIGNMENT_VIOLATION                                          syscall.Errno = 327
+	ERROR_INVALID_FIELD_IN_PARAMETER_LIST                                     syscall.Errno = 328
+	ERROR_OPERATION_IN_PROGRESS                                               syscall.Errno = 329
+	ERROR_BAD_DEVICE_PATH                                                     syscall.Errno = 330
+	ERROR_TOO_MANY_DESCRIPTORS                                                syscall.Errno = 331
+	ERROR_SCRUB_DATA_DISABLED                                                 syscall.Errno = 332
+	ERROR_NOT_REDUNDANT_STORAGE                                               syscall.Errno = 333
+	ERROR_RESIDENT_FILE_NOT_SUPPORTED                                         syscall.Errno = 334
+	ERROR_COMPRESSED_FILE_NOT_SUPPORTED                                       syscall.Errno = 335
+	ERROR_DIRECTORY_NOT_SUPPORTED                                             syscall.Errno = 336
+	ERROR_NOT_READ_FROM_COPY                                                  syscall.Errno = 337
+	ERROR_FT_WRITE_FAILURE                                                    syscall.Errno = 338
+	ERROR_FT_DI_SCAN_REQUIRED                                                 syscall.Errno = 339
+	ERROR_INVALID_KERNEL_INFO_VERSION                                         syscall.Errno = 340
+	ERROR_INVALID_PEP_INFO_VERSION                                            syscall.Errno = 341
+	ERROR_OBJECT_NOT_EXTERNALLY_BACKED                                        syscall.Errno = 342
+	ERROR_EXTERNAL_BACKING_PROVIDER_UNKNOWN                                   syscall.Errno = 343
+	ERROR_COMPRESSION_NOT_BENEFICIAL                                          syscall.Errno = 344
+	ERROR_STORAGE_TOPOLOGY_ID_MISMATCH                                        syscall.Errno = 345
+	ERROR_BLOCKED_BY_PARENTAL_CONTROLS                                        syscall.Errno = 346
+	ERROR_BLOCK_TOO_MANY_REFERENCES                                           syscall.Errno = 347
+	ERROR_MARKED_TO_DISALLOW_WRITES                                           syscall.Errno = 348
+	ERROR_ENCLAVE_FAILURE                                                     syscall.Errno = 349
+	ERROR_FAIL_NOACTION_REBOOT                                                syscall.Errno = 350
+	ERROR_FAIL_SHUTDOWN                                                       syscall.Errno = 351
+	ERROR_FAIL_RESTART                                                        syscall.Errno = 352
+	ERROR_MAX_SESSIONS_REACHED                                                syscall.Errno = 353
+	ERROR_NETWORK_ACCESS_DENIED_EDP                                           syscall.Errno = 354
+	ERROR_DEVICE_HINT_NAME_BUFFER_TOO_SMALL                                   syscall.Errno = 355
+	ERROR_EDP_POLICY_DENIES_OPERATION                                         syscall.Errno = 356
+	ERROR_EDP_DPL_POLICY_CANT_BE_SATISFIED                                    syscall.Errno = 357
+	ERROR_CLOUD_FILE_SYNC_ROOT_METADATA_CORRUPT                               syscall.Errno = 358
+	ERROR_DEVICE_IN_MAINTENANCE                                               syscall.Errno = 359
+	ERROR_NOT_SUPPORTED_ON_DAX                                                syscall.Errno = 360
+	ERROR_DAX_MAPPING_EXISTS                                                  syscall.Errno = 361
+	ERROR_CLOUD_FILE_PROVIDER_NOT_RUNNING                                     syscall.Errno = 362
+	ERROR_CLOUD_FILE_METADATA_CORRUPT                                         syscall.Errno = 363
+	ERROR_CLOUD_FILE_METADATA_TOO_LARGE                                       syscall.Errno = 364
+	ERROR_CLOUD_FILE_PROPERTY_BLOB_TOO_LARGE                                  syscall.Errno = 365
+	ERROR_CLOUD_FILE_PROPERTY_BLOB_CHECKSUM_MISMATCH                          syscall.Errno = 366
+	ERROR_CHILD_PROCESS_BLOCKED                                               syscall.Errno = 367
+	ERROR_STORAGE_LOST_DATA_PERSISTENCE                                       syscall.Errno = 368
+	ERROR_FILE_SYSTEM_VIRTUALIZATION_UNAVAILABLE                              syscall.Errno = 369
+	ERROR_FILE_SYSTEM_VIRTUALIZATION_METADATA_CORRUPT                         syscall.Errno = 370
+	ERROR_FILE_SYSTEM_VIRTUALIZATION_BUSY                                     syscall.Errno = 371
+	ERROR_FILE_SYSTEM_VIRTUALIZATION_PROVIDER_UNKNOWN                         syscall.Errno = 372
+	ERROR_GDI_HANDLE_LEAK                                                     syscall.Errno = 373
+	ERROR_CLOUD_FILE_TOO_MANY_PROPERTY_BLOBS                                  syscall.Errno = 374
+	ERROR_CLOUD_FILE_PROPERTY_VERSION_NOT_SUPPORTED                           syscall.Errno = 375
+	ERROR_NOT_A_CLOUD_FILE                                                    syscall.Errno = 376
+	ERROR_CLOUD_FILE_NOT_IN_SYNC                                              syscall.Errno = 377
+	ERROR_CLOUD_FILE_ALREADY_CONNECTED                                        syscall.Errno = 378
+	ERROR_CLOUD_FILE_NOT_SUPPORTED                                            syscall.Errno = 379
+	ERROR_CLOUD_FILE_INVALID_REQUEST                                          syscall.Errno = 380
+	ERROR_CLOUD_FILE_READ_ONLY_VOLUME                                         syscall.Errno = 381
+	ERROR_CLOUD_FILE_CONNECTED_PROVIDER_ONLY                                  syscall.Errno = 382
+	ERROR_CLOUD_FILE_VALIDATION_FAILED                                        syscall.Errno = 383
+	ERROR_SMB1_NOT_AVAILABLE                                                  syscall.Errno = 384
+	ERROR_FILE_SYSTEM_VIRTUALIZATION_INVALID_OPERATION                        syscall.Errno = 385
+	ERROR_CLOUD_FILE_AUTHENTICATION_FAILED                                    syscall.Errno = 386
+	ERROR_CLOUD_FILE_INSUFFICIENT_RESOURCES                                   syscall.Errno = 387
+	ERROR_CLOUD_FILE_NETWORK_UNAVAILABLE                                      syscall.Errno = 388
+	ERROR_CLOUD_FILE_UNSUCCESSFUL                                             syscall.Errno = 389
+	ERROR_CLOUD_FILE_NOT_UNDER_SYNC_ROOT                                      syscall.Errno = 390
+	ERROR_CLOUD_FILE_IN_USE                                                   syscall.Errno = 391
+	ERROR_CLOUD_FILE_PINNED                                                   syscall.Errno = 392
+	ERROR_CLOUD_FILE_REQUEST_ABORTED                                          syscall.Errno = 393
+	ERROR_CLOUD_FILE_PROPERTY_CORRUPT                                         syscall.Errno = 394
+	ERROR_CLOUD_FILE_ACCESS_DENIED                                            syscall.Errno = 395
+	ERROR_CLOUD_FILE_INCOMPATIBLE_HARDLINKS                                   syscall.Errno = 396
+	ERROR_CLOUD_FILE_PROPERTY_LOCK_CONFLICT                                   syscall.Errno = 397
+	ERROR_CLOUD_FILE_REQUEST_CANCELED                                         syscall.Errno = 398
+	ERROR_EXTERNAL_SYSKEY_NOT_SUPPORTED                                       syscall.Errno = 399
+	ERROR_THREAD_MODE_ALREADY_BACKGROUND                                      syscall.Errno = 400
+	ERROR_THREAD_MODE_NOT_BACKGROUND                                          syscall.Errno = 401
+	ERROR_PROCESS_MODE_ALREADY_BACKGROUND                                     syscall.Errno = 402
+	ERROR_PROCESS_MODE_NOT_BACKGROUND                                         syscall.Errno = 403
+	ERROR_CLOUD_FILE_PROVIDER_TERMINATED                                      syscall.Errno = 404
+	ERROR_NOT_A_CLOUD_SYNC_ROOT                                               syscall.Errno = 405
+	ERROR_FILE_PROTECTED_UNDER_DPL                                            syscall.Errno = 406
+	ERROR_VOLUME_NOT_CLUSTER_ALIGNED                                          syscall.Errno = 407
+	ERROR_NO_PHYSICALLY_ALIGNED_FREE_SPACE_FOUND                              syscall.Errno = 408
+	ERROR_APPX_FILE_NOT_ENCRYPTED                                             syscall.Errno = 409
+	ERROR_RWRAW_ENCRYPTED_FILE_NOT_ENCRYPTED                                  syscall.Errno = 410
+	ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILEOFFSET                        syscall.Errno = 411
+	ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILERANGE                         syscall.Errno = 412
+	ERROR_RWRAW_ENCRYPTED_INVALID_EDATAINFO_PARAMETER                         syscall.Errno = 413
+	ERROR_LINUX_SUBSYSTEM_NOT_PRESENT                                         syscall.Errno = 414
+	ERROR_FT_READ_FAILURE                                                     syscall.Errno = 415
+	ERROR_STORAGE_RESERVE_ID_INVALID                                          syscall.Errno = 416
+	ERROR_STORAGE_RESERVE_DOES_NOT_EXIST                                      syscall.Errno = 417
+	ERROR_STORAGE_RESERVE_ALREADY_EXISTS                                      syscall.Errno = 418
+	ERROR_STORAGE_RESERVE_NOT_EMPTY                                           syscall.Errno = 419
+	ERROR_NOT_A_DAX_VOLUME                                                    syscall.Errno = 420
+	ERROR_NOT_DAX_MAPPABLE                                                    syscall.Errno = 421
+	ERROR_TIME_SENSITIVE_THREAD                                               syscall.Errno = 422
+	ERROR_DPL_NOT_SUPPORTED_FOR_USER                                          syscall.Errno = 423
+	ERROR_CASE_DIFFERING_NAMES_IN_DIR                                         syscall.Errno = 424
+	ERROR_FILE_NOT_SUPPORTED                                                  syscall.Errno = 425
+	ERROR_CLOUD_FILE_REQUEST_TIMEOUT                                          syscall.Errno = 426
+	ERROR_NO_TASK_QUEUE                                                       syscall.Errno = 427
+	ERROR_SRC_SRV_DLL_LOAD_FAILED                                             syscall.Errno = 428
+	ERROR_NOT_SUPPORTED_WITH_BTT                                              syscall.Errno = 429
+	ERROR_ENCRYPTION_DISABLED                                                 syscall.Errno = 430
+	ERROR_ENCRYPTING_METADATA_DISALLOWED                                      syscall.Errno = 431
+	ERROR_CANT_CLEAR_ENCRYPTION_FLAG                                          syscall.Errno = 432
+	ERROR_NO_SUCH_DEVICE                                                      syscall.Errno = 433
+	ERROR_CAPAUTHZ_NOT_DEVUNLOCKED                                            syscall.Errno = 450
+	ERROR_CAPAUTHZ_CHANGE_TYPE                                                syscall.Errno = 451
+	ERROR_CAPAUTHZ_NOT_PROVISIONED                                            syscall.Errno = 452
+	ERROR_CAPAUTHZ_NOT_AUTHORIZED                                             syscall.Errno = 453
+	ERROR_CAPAUTHZ_NO_POLICY                                                  syscall.Errno = 454
+	ERROR_CAPAUTHZ_DB_CORRUPTED                                               syscall.Errno = 455
+	ERROR_CAPAUTHZ_SCCD_INVALID_CATALOG                                       syscall.Errno = 456
+	ERROR_CAPAUTHZ_SCCD_NO_AUTH_ENTITY                                        syscall.Errno = 457
+	ERROR_CAPAUTHZ_SCCD_PARSE_ERROR                                           syscall.Errno = 458
+	ERROR_CAPAUTHZ_SCCD_DEV_MODE_REQUIRED                                     syscall.Errno = 459
+	ERROR_CAPAUTHZ_SCCD_NO_CAPABILITY_MATCH                                   syscall.Errno = 460
+	ERROR_PNP_QUERY_REMOVE_DEVICE_TIMEOUT                                     syscall.Errno = 480
+	ERROR_PNP_QUERY_REMOVE_RELATED_DEVICE_TIMEOUT                             syscall.Errno = 481
+	ERROR_PNP_QUERY_REMOVE_UNRELATED_DEVICE_TIMEOUT                           syscall.Errno = 482
+	ERROR_DEVICE_HARDWARE_ERROR                                               syscall.Errno = 483
+	ERROR_INVALID_ADDRESS                                                     syscall.Errno = 487
+	ERROR_VRF_CFG_ENABLED                                                     syscall.Errno = 1183
+	ERROR_PARTITION_TERMINATING                                               syscall.Errno = 1184
+	ERROR_USER_PROFILE_LOAD                                                   syscall.Errno = 500
+	ERROR_ARITHMETIC_OVERFLOW                                                 syscall.Errno = 534
+	ERROR_PIPE_CONNECTED                                                      syscall.Errno = 535
+	ERROR_PIPE_LISTENING                                                      syscall.Errno = 536
+	ERROR_VERIFIER_STOP                                                       syscall.Errno = 537
+	ERROR_ABIOS_ERROR                                                         syscall.Errno = 538
+	ERROR_WX86_WARNING                                                        syscall.Errno = 539
+	ERROR_WX86_ERROR                                                          syscall.Errno = 540
+	ERROR_TIMER_NOT_CANCELED                                                  syscall.Errno = 541
+	ERROR_UNWIND                                                              syscall.Errno = 542
+	ERROR_BAD_STACK                                                           syscall.Errno = 543
+	ERROR_INVALID_UNWIND_TARGET                                               syscall.Errno = 544
+	ERROR_INVALID_PORT_ATTRIBUTES                                             syscall.Errno = 545
+	ERROR_PORT_MESSAGE_TOO_LONG                                               syscall.Errno = 546
+	ERROR_INVALID_QUOTA_LOWER                                                 syscall.Errno = 547
+	ERROR_DEVICE_ALREADY_ATTACHED                                             syscall.Errno = 548
+	ERROR_INSTRUCTION_MISALIGNMENT                                            syscall.Errno = 549
+	ERROR_PROFILING_NOT_STARTED                                               syscall.Errno = 550
+	ERROR_PROFILING_NOT_STOPPED                                               syscall.Errno = 551
+	ERROR_COULD_NOT_INTERPRET                                                 syscall.Errno = 552
+	ERROR_PROFILING_AT_LIMIT                                                  syscall.Errno = 553
+	ERROR_CANT_WAIT                                                           syscall.Errno = 554
+	ERROR_CANT_TERMINATE_SELF                                                 syscall.Errno = 555
+	ERROR_UNEXPECTED_MM_CREATE_ERR                                            syscall.Errno = 556
+	ERROR_UNEXPECTED_MM_MAP_ERROR                                             syscall.Errno = 557
+	ERROR_UNEXPECTED_MM_EXTEND_ERR                                            syscall.Errno = 558
+	ERROR_BAD_FUNCTION_TABLE                                                  syscall.Errno = 559
+	ERROR_NO_GUID_TRANSLATION                                                 syscall.Errno = 560
+	ERROR_INVALID_LDT_SIZE                                                    syscall.Errno = 561
+	ERROR_INVALID_LDT_OFFSET                                                  syscall.Errno = 563
+	ERROR_INVALID_LDT_DESCRIPTOR                                              syscall.Errno = 564
+	ERROR_TOO_MANY_THREADS                                                    syscall.Errno = 565
+	ERROR_THREAD_NOT_IN_PROCESS                                               syscall.Errno = 566
+	ERROR_PAGEFILE_QUOTA_EXCEEDED                                             syscall.Errno = 567
+	ERROR_LOGON_SERVER_CONFLICT                                               syscall.Errno = 568
+	ERROR_SYNCHRONIZATION_REQUIRED                                            syscall.Errno = 569
+	ERROR_NET_OPEN_FAILED                                                     syscall.Errno = 570
+	ERROR_IO_PRIVILEGE_FAILED                                                 syscall.Errno = 571
+	ERROR_CONTROL_C_EXIT                                                      syscall.Errno = 572
+	ERROR_MISSING_SYSTEMFILE                                                  syscall.Errno = 573
+	ERROR_UNHANDLED_EXCEPTION                                                 syscall.Errno = 574
+	ERROR_APP_INIT_FAILURE                                                    syscall.Errno = 575
+	ERROR_PAGEFILE_CREATE_FAILED                                              syscall.Errno = 576
+	ERROR_INVALID_IMAGE_HASH                                                  syscall.Errno = 577
+	ERROR_NO_PAGEFILE                                                         syscall.Errno = 578
+	ERROR_ILLEGAL_FLOAT_CONTEXT                                               syscall.Errno = 579
+	ERROR_NO_EVENT_PAIR                                                       syscall.Errno = 580
+	ERROR_DOMAIN_CTRLR_CONFIG_ERROR                                           syscall.Errno = 581
+	ERROR_ILLEGAL_CHARACTER                                                   syscall.Errno = 582
+	ERROR_UNDEFINED_CHARACTER                                                 syscall.Errno = 583
+	ERROR_FLOPPY_VOLUME                                                       syscall.Errno = 584
+	ERROR_BIOS_FAILED_TO_CONNECT_INTERRUPT                                    syscall.Errno = 585
+	ERROR_BACKUP_CONTROLLER                                                   syscall.Errno = 586
+	ERROR_MUTANT_LIMIT_EXCEEDED                                               syscall.Errno = 587
+	ERROR_FS_DRIVER_REQUIRED                                                  syscall.Errno = 588
+	ERROR_CANNOT_LOAD_REGISTRY_FILE                                           syscall.Errno = 589
+	ERROR_DEBUG_ATTACH_FAILED                                                 syscall.Errno = 590
+	ERROR_SYSTEM_PROCESS_TERMINATED                                           syscall.Errno = 591
+	ERROR_DATA_NOT_ACCEPTED                                                   syscall.Errno = 592
+	ERROR_VDM_HARD_ERROR                                                      syscall.Errno = 593
+	ERROR_DRIVER_CANCEL_TIMEOUT                                               syscall.Errno = 594
+	ERROR_REPLY_MESSAGE_MISMATCH                                              syscall.Errno = 595
+	ERROR_LOST_WRITEBEHIND_DATA                                               syscall.Errno = 596
+	ERROR_CLIENT_SERVER_PARAMETERS_INVALID                                    syscall.Errno = 597
+	ERROR_NOT_TINY_STREAM                                                     syscall.Errno = 598
+	ERROR_STACK_OVERFLOW_READ                                                 syscall.Errno = 599
+	ERROR_CONVERT_TO_LARGE                                                    syscall.Errno = 600
+	ERROR_FOUND_OUT_OF_SCOPE                                                  syscall.Errno = 601
+	ERROR_ALLOCATE_BUCKET                                                     syscall.Errno = 602
+	ERROR_MARSHALL_OVERFLOW                                                   syscall.Errno = 603
+	ERROR_INVALID_VARIANT                                                     syscall.Errno = 604
+	ERROR_BAD_COMPRESSION_BUFFER                                              syscall.Errno = 605
+	ERROR_AUDIT_FAILED                                                        syscall.Errno = 606
+	ERROR_TIMER_RESOLUTION_NOT_SET                                            syscall.Errno = 607
+	ERROR_INSUFFICIENT_LOGON_INFO                                             syscall.Errno = 608
+	ERROR_BAD_DLL_ENTRYPOINT                                                  syscall.Errno = 609
+	ERROR_BAD_SERVICE_ENTRYPOINT                                              syscall.Errno = 610
+	ERROR_IP_ADDRESS_CONFLICT1                                                syscall.Errno = 611
+	ERROR_IP_ADDRESS_CONFLICT2                                                syscall.Errno = 612
+	ERROR_REGISTRY_QUOTA_LIMIT                                                syscall.Errno = 613
+	ERROR_NO_CALLBACK_ACTIVE                                                  syscall.Errno = 614
+	ERROR_PWD_TOO_SHORT                                                       syscall.Errno = 615
+	ERROR_PWD_TOO_RECENT                                                      syscall.Errno = 616
+	ERROR_PWD_HISTORY_CONFLICT                                                syscall.Errno = 617
+	ERROR_UNSUPPORTED_COMPRESSION                                             syscall.Errno = 618
+	ERROR_INVALID_HW_PROFILE                                                  syscall.Errno = 619
+	ERROR_INVALID_PLUGPLAY_DEVICE_PATH                                        syscall.Errno = 620
+	ERROR_QUOTA_LIST_INCONSISTENT                                             syscall.Errno = 621
+	ERROR_EVALUATION_EXPIRATION                                               syscall.Errno = 622
+	ERROR_ILLEGAL_DLL_RELOCATION                                              syscall.Errno = 623
+	ERROR_DLL_INIT_FAILED_LOGOFF                                              syscall.Errno = 624
+	ERROR_VALIDATE_CONTINUE                                                   syscall.Errno = 625
+	ERROR_NO_MORE_MATCHES                                                     syscall.Errno = 626
+	ERROR_RANGE_LIST_CONFLICT                                                 syscall.Errno = 627
+	ERROR_SERVER_SID_MISMATCH                                                 syscall.Errno = 628
+	ERROR_CANT_ENABLE_DENY_ONLY                                               syscall.Errno = 629
+	ERROR_FLOAT_MULTIPLE_FAULTS                                               syscall.Errno = 630
+	ERROR_FLOAT_MULTIPLE_TRAPS                                                syscall.Errno = 631
+	ERROR_NOINTERFACE                                                         syscall.Errno = 632
+	ERROR_DRIVER_FAILED_SLEEP                                                 syscall.Errno = 633
+	ERROR_CORRUPT_SYSTEM_FILE                                                 syscall.Errno = 634
+	ERROR_COMMITMENT_MINIMUM                                                  syscall.Errno = 635
+	ERROR_PNP_RESTART_ENUMERATION                                             syscall.Errno = 636
+	ERROR_SYSTEM_IMAGE_BAD_SIGNATURE                                          syscall.Errno = 637
+	ERROR_PNP_REBOOT_REQUIRED                                                 syscall.Errno = 638
+	ERROR_INSUFFICIENT_POWER                                                  syscall.Errno = 639
+	ERROR_MULTIPLE_FAULT_VIOLATION                                            syscall.Errno = 640
+	ERROR_SYSTEM_SHUTDOWN                                                     syscall.Errno = 641
+	ERROR_PORT_NOT_SET                                                        syscall.Errno = 642
+	ERROR_DS_VERSION_CHECK_FAILURE                                            syscall.Errno = 643
+	ERROR_RANGE_NOT_FOUND                                                     syscall.Errno = 644
+	ERROR_NOT_SAFE_MODE_DRIVER                                                syscall.Errno = 646
+	ERROR_FAILED_DRIVER_ENTRY                                                 syscall.Errno = 647
+	ERROR_DEVICE_ENUMERATION_ERROR                                            syscall.Errno = 648
+	ERROR_MOUNT_POINT_NOT_RESOLVED                                            syscall.Errno = 649
+	ERROR_INVALID_DEVICE_OBJECT_PARAMETER                                     syscall.Errno = 650
+	ERROR_MCA_OCCURED                                                         syscall.Errno = 651
+	ERROR_DRIVER_DATABASE_ERROR                                               syscall.Errno = 652
+	ERROR_SYSTEM_HIVE_TOO_LARGE                                               syscall.Errno = 653
+	ERROR_DRIVER_FAILED_PRIOR_UNLOAD                                          syscall.Errno = 654
+	ERROR_VOLSNAP_PREPARE_HIBERNATE                                           syscall.Errno = 655
+	ERROR_HIBERNATION_FAILURE                                                 syscall.Errno = 656
+	ERROR_PWD_TOO_LONG                                                        syscall.Errno = 657
+	ERROR_FILE_SYSTEM_LIMITATION                                              syscall.Errno = 665
+	ERROR_ASSERTION_FAILURE                                                   syscall.Errno = 668
+	ERROR_ACPI_ERROR                                                          syscall.Errno = 669
+	ERROR_WOW_ASSERTION                                                       syscall.Errno = 670
+	ERROR_PNP_BAD_MPS_TABLE                                                   syscall.Errno = 671
+	ERROR_PNP_TRANSLATION_FAILED                                              syscall.Errno = 672
+	ERROR_PNP_IRQ_TRANSLATION_FAILED                                          syscall.Errno = 673
+	ERROR_PNP_INVALID_ID                                                      syscall.Errno = 674
+	ERROR_WAKE_SYSTEM_DEBUGGER                                                syscall.Errno = 675
+	ERROR_HANDLES_CLOSED                                                      syscall.Errno = 676
+	ERROR_EXTRANEOUS_INFORMATION                                              syscall.Errno = 677
+	ERROR_RXACT_COMMIT_NECESSARY                                              syscall.Errno = 678
+	ERROR_MEDIA_CHECK                                                         syscall.Errno = 679
+	ERROR_GUID_SUBSTITUTION_MADE                                              syscall.Errno = 680
+	ERROR_STOPPED_ON_SYMLINK                                                  syscall.Errno = 681
+	ERROR_LONGJUMP                                                            syscall.Errno = 682
+	ERROR_PLUGPLAY_QUERY_VETOED                                               syscall.Errno = 683
+	ERROR_UNWIND_CONSOLIDATE                                                  syscall.Errno = 684
+	ERROR_REGISTRY_HIVE_RECOVERED                                             syscall.Errno = 685
+	ERROR_DLL_MIGHT_BE_INSECURE                                               syscall.Errno = 686
+	ERROR_DLL_MIGHT_BE_INCOMPATIBLE                                           syscall.Errno = 687
+	ERROR_DBG_EXCEPTION_NOT_HANDLED                                           syscall.Errno = 688
+	ERROR_DBG_REPLY_LATER                                                     syscall.Errno = 689
+	ERROR_DBG_UNABLE_TO_PROVIDE_HANDLE                                        syscall.Errno = 690
+	ERROR_DBG_TERMINATE_THREAD                                                syscall.Errno = 691
+	ERROR_DBG_TERMINATE_PROCESS                                               syscall.Errno = 692
+	ERROR_DBG_CONTROL_C                                                       syscall.Errno = 693
+	ERROR_DBG_PRINTEXCEPTION_C                                                syscall.Errno = 694
+	ERROR_DBG_RIPEXCEPTION                                                    syscall.Errno = 695
+	ERROR_DBG_CONTROL_BREAK                                                   syscall.Errno = 696
+	ERROR_DBG_COMMAND_EXCEPTION                                               syscall.Errno = 697
+	ERROR_OBJECT_NAME_EXISTS                                                  syscall.Errno = 698
+	ERROR_THREAD_WAS_SUSPENDED                                                syscall.Errno = 699
+	ERROR_IMAGE_NOT_AT_BASE                                                   syscall.Errno = 700
+	ERROR_RXACT_STATE_CREATED                                                 syscall.Errno = 701
+	ERROR_SEGMENT_NOTIFICATION                                                syscall.Errno = 702
+	ERROR_BAD_CURRENT_DIRECTORY                                               syscall.Errno = 703
+	ERROR_FT_READ_RECOVERY_FROM_BACKUP                                        syscall.Errno = 704
+	ERROR_FT_WRITE_RECOVERY                                                   syscall.Errno = 705
+	ERROR_IMAGE_MACHINE_TYPE_MISMATCH                                         syscall.Errno = 706
+	ERROR_RECEIVE_PARTIAL                                                     syscall.Errno = 707
+	ERROR_RECEIVE_EXPEDITED                                                   syscall.Errno = 708
+	ERROR_RECEIVE_PARTIAL_EXPEDITED                                           syscall.Errno = 709
+	ERROR_EVENT_DONE                                                          syscall.Errno = 710
+	ERROR_EVENT_PENDING                                                       syscall.Errno = 711
+	ERROR_CHECKING_FILE_SYSTEM                                                syscall.Errno = 712
+	ERROR_FATAL_APP_EXIT                                                      syscall.Errno = 713
+	ERROR_PREDEFINED_HANDLE                                                   syscall.Errno = 714
+	ERROR_WAS_UNLOCKED                                                        syscall.Errno = 715
+	ERROR_SERVICE_NOTIFICATION                                                syscall.Errno = 716
+	ERROR_WAS_LOCKED                                                          syscall.Errno = 717
+	ERROR_LOG_HARD_ERROR                                                      syscall.Errno = 718
+	ERROR_ALREADY_WIN32                                                       syscall.Errno = 719
+	ERROR_IMAGE_MACHINE_TYPE_MISMATCH_EXE                                     syscall.Errno = 720
+	ERROR_NO_YIELD_PERFORMED                                                  syscall.Errno = 721
+	ERROR_TIMER_RESUME_IGNORED                                                syscall.Errno = 722
+	ERROR_ARBITRATION_UNHANDLED                                               syscall.Errno = 723
+	ERROR_CARDBUS_NOT_SUPPORTED                                               syscall.Errno = 724
+	ERROR_MP_PROCESSOR_MISMATCH                                               syscall.Errno = 725
+	ERROR_HIBERNATED                                                          syscall.Errno = 726
+	ERROR_RESUME_HIBERNATION                                                  syscall.Errno = 727
+	ERROR_FIRMWARE_UPDATED                                                    syscall.Errno = 728
+	ERROR_DRIVERS_LEAKING_LOCKED_PAGES                                        syscall.Errno = 729
+	ERROR_WAKE_SYSTEM                                                         syscall.Errno = 730
+	ERROR_WAIT_1                                                              syscall.Errno = 731
+	ERROR_WAIT_2                                                              syscall.Errno = 732
+	ERROR_WAIT_3                                                              syscall.Errno = 733
+	ERROR_WAIT_63                                                             syscall.Errno = 734
+	ERROR_ABANDONED_WAIT_0                                                    syscall.Errno = 735
+	ERROR_ABANDONED_WAIT_63                                                   syscall.Errno = 736
+	ERROR_USER_APC                                                            syscall.Errno = 737
+	ERROR_KERNEL_APC                                                          syscall.Errno = 738
+	ERROR_ALERTED                                                             syscall.Errno = 739
+	ERROR_ELEVATION_REQUIRED                                                  syscall.Errno = 740
+	ERROR_REPARSE                                                             syscall.Errno = 741
+	ERROR_OPLOCK_BREAK_IN_PROGRESS                                            syscall.Errno = 742
+	ERROR_VOLUME_MOUNTED                                                      syscall.Errno = 743
+	ERROR_RXACT_COMMITTED                                                     syscall.Errno = 744
+	ERROR_NOTIFY_CLEANUP                                                      syscall.Errno = 745
+	ERROR_PRIMARY_TRANSPORT_CONNECT_FAILED                                    syscall.Errno = 746
+	ERROR_PAGE_FAULT_TRANSITION                                               syscall.Errno = 747
+	ERROR_PAGE_FAULT_DEMAND_ZERO                                              syscall.Errno = 748
+	ERROR_PAGE_FAULT_COPY_ON_WRITE                                            syscall.Errno = 749
+	ERROR_PAGE_FAULT_GUARD_PAGE                                               syscall.Errno = 750
+	ERROR_PAGE_FAULT_PAGING_FILE                                              syscall.Errno = 751
+	ERROR_CACHE_PAGE_LOCKED                                                   syscall.Errno = 752
+	ERROR_CRASH_DUMP                                                          syscall.Errno = 753
+	ERROR_BUFFER_ALL_ZEROS                                                    syscall.Errno = 754
+	ERROR_REPARSE_OBJECT                                                      syscall.Errno = 755
+	ERROR_RESOURCE_REQUIREMENTS_CHANGED                                       syscall.Errno = 756
+	ERROR_TRANSLATION_COMPLETE                                                syscall.Errno = 757
+	ERROR_NOTHING_TO_TERMINATE                                                syscall.Errno = 758
+	ERROR_PROCESS_NOT_IN_JOB                                                  syscall.Errno = 759
+	ERROR_PROCESS_IN_JOB                                                      syscall.Errno = 760
+	ERROR_VOLSNAP_HIBERNATE_READY                                             syscall.Errno = 761
+	ERROR_FSFILTER_OP_COMPLETED_SUCCESSFULLY                                  syscall.Errno = 762
+	ERROR_INTERRUPT_VECTOR_ALREADY_CONNECTED                                  syscall.Errno = 763
+	ERROR_INTERRUPT_STILL_CONNECTED                                           syscall.Errno = 764
+	ERROR_WAIT_FOR_OPLOCK                                                     syscall.Errno = 765
+	ERROR_DBG_EXCEPTION_HANDLED                                               syscall.Errno = 766
+	ERROR_DBG_CONTINUE                                                        syscall.Errno = 767
+	ERROR_CALLBACK_POP_STACK                                                  syscall.Errno = 768
+	ERROR_COMPRESSION_DISABLED                                                syscall.Errno = 769
+	ERROR_CANTFETCHBACKWARDS                                                  syscall.Errno = 770
+	ERROR_CANTSCROLLBACKWARDS                                                 syscall.Errno = 771
+	ERROR_ROWSNOTRELEASED                                                     syscall.Errno = 772
+	ERROR_BAD_ACCESSOR_FLAGS                                                  syscall.Errno = 773
+	ERROR_ERRORS_ENCOUNTERED                                                  syscall.Errno = 774
+	ERROR_NOT_CAPABLE                                                         syscall.Errno = 775
+	ERROR_REQUEST_OUT_OF_SEQUENCE                                             syscall.Errno = 776
+	ERROR_VERSION_PARSE_ERROR                                                 syscall.Errno = 777
+	ERROR_BADSTARTPOSITION                                                    syscall.Errno = 778
+	ERROR_MEMORY_HARDWARE                                                     syscall.Errno = 779
+	ERROR_DISK_REPAIR_DISABLED                                                syscall.Errno = 780
+	ERROR_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE             syscall.Errno = 781
+	ERROR_SYSTEM_POWERSTATE_TRANSITION                                        syscall.Errno = 782
+	ERROR_SYSTEM_POWERSTATE_COMPLEX_TRANSITION                                syscall.Errno = 783
+	ERROR_MCA_EXCEPTION                                                       syscall.Errno = 784
+	ERROR_ACCESS_AUDIT_BY_POLICY                                              syscall.Errno = 785
+	ERROR_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY                               syscall.Errno = 786
+	ERROR_ABANDON_HIBERFILE                                                   syscall.Errno = 787
+	ERROR_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED                          syscall.Errno = 788
+	ERROR_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR                          syscall.Errno = 789
+	ERROR_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR                              syscall.Errno = 790
+	ERROR_BAD_MCFG_TABLE                                                      syscall.Errno = 791
+	ERROR_DISK_REPAIR_REDIRECTED                                              syscall.Errno = 792
+	ERROR_DISK_REPAIR_UNSUCCESSFUL                                            syscall.Errno = 793
+	ERROR_CORRUPT_LOG_OVERFULL                                                syscall.Errno = 794
+	ERROR_CORRUPT_LOG_CORRUPTED                                               syscall.Errno = 795
+	ERROR_CORRUPT_LOG_UNAVAILABLE                                             syscall.Errno = 796
+	ERROR_CORRUPT_LOG_DELETED_FULL                                            syscall.Errno = 797
+	ERROR_CORRUPT_LOG_CLEARED                                                 syscall.Errno = 798
+	ERROR_ORPHAN_NAME_EXHAUSTED                                               syscall.Errno = 799
+	ERROR_OPLOCK_SWITCHED_TO_NEW_HANDLE                                       syscall.Errno = 800
+	ERROR_CANNOT_GRANT_REQUESTED_OPLOCK                                       syscall.Errno = 801
+	ERROR_CANNOT_BREAK_OPLOCK                                                 syscall.Errno = 802
+	ERROR_OPLOCK_HANDLE_CLOSED                                                syscall.Errno = 803
+	ERROR_NO_ACE_CONDITION                                                    syscall.Errno = 804
+	ERROR_INVALID_ACE_CONDITION                                               syscall.Errno = 805
+	ERROR_FILE_HANDLE_REVOKED                                                 syscall.Errno = 806
+	ERROR_IMAGE_AT_DIFFERENT_BASE                                             syscall.Errno = 807
+	ERROR_ENCRYPTED_IO_NOT_POSSIBLE                                           syscall.Errno = 808
+	ERROR_FILE_METADATA_OPTIMIZATION_IN_PROGRESS                              syscall.Errno = 809
+	ERROR_QUOTA_ACTIVITY                                                      syscall.Errno = 810
+	ERROR_HANDLE_REVOKED                                                      syscall.Errno = 811
+	ERROR_CALLBACK_INVOKE_INLINE                                              syscall.Errno = 812
+	ERROR_CPU_SET_INVALID                                                     syscall.Errno = 813
+	ERROR_ENCLAVE_NOT_TERMINATED                                              syscall.Errno = 814
+	ERROR_ENCLAVE_VIOLATION                                                   syscall.Errno = 815
+	ERROR_EA_ACCESS_DENIED                                                    syscall.Errno = 994
+	ERROR_OPERATION_ABORTED                                                   syscall.Errno = 995
+	ERROR_IO_INCOMPLETE                                                       syscall.Errno = 996
+	ERROR_IO_PENDING                                                          syscall.Errno = 997
+	ERROR_NOACCESS                                                            syscall.Errno = 998
+	ERROR_SWAPERROR                                                           syscall.Errno = 999
+	ERROR_STACK_OVERFLOW                                                      syscall.Errno = 1001
+	ERROR_INVALID_MESSAGE                                                     syscall.Errno = 1002
+	ERROR_CAN_NOT_COMPLETE                                                    syscall.Errno = 1003
+	ERROR_INVALID_FLAGS                                                       syscall.Errno = 1004
+	ERROR_UNRECOGNIZED_VOLUME                                                 syscall.Errno = 1005
+	ERROR_FILE_INVALID                                                        syscall.Errno = 1006
+	ERROR_FULLSCREEN_MODE                                                     syscall.Errno = 1007
+	ERROR_NO_TOKEN                                                            syscall.Errno = 1008
+	ERROR_BADDB                                                               syscall.Errno = 1009
+	ERROR_BADKEY                                                              syscall.Errno = 1010
+	ERROR_CANTOPEN                                                            syscall.Errno = 1011
+	ERROR_CANTREAD                                                            syscall.Errno = 1012
+	ERROR_CANTWRITE                                                           syscall.Errno = 1013
+	ERROR_REGISTRY_RECOVERED                                                  syscall.Errno = 1014
+	ERROR_REGISTRY_CORRUPT                                                    syscall.Errno = 1015
+	ERROR_REGISTRY_IO_FAILED                                                  syscall.Errno = 1016
+	ERROR_NOT_REGISTRY_FILE                                                   syscall.Errno = 1017
+	ERROR_KEY_DELETED                                                         syscall.Errno = 1018
+	ERROR_NO_LOG_SPACE                                                        syscall.Errno = 1019
+	ERROR_KEY_HAS_CHILDREN                                                    syscall.Errno = 1020
+	ERROR_CHILD_MUST_BE_VOLATILE                                              syscall.Errno = 1021
+	ERROR_NOTIFY_ENUM_DIR                                                     syscall.Errno = 1022
+	ERROR_DEPENDENT_SERVICES_RUNNING                                          syscall.Errno = 1051
+	ERROR_INVALID_SERVICE_CONTROL                                             syscall.Errno = 1052
+	ERROR_SERVICE_REQUEST_TIMEOUT                                             syscall.Errno = 1053
+	ERROR_SERVICE_NO_THREAD                                                   syscall.Errno = 1054
+	ERROR_SERVICE_DATABASE_LOCKED                                             syscall.Errno = 1055
+	ERROR_SERVICE_ALREADY_RUNNING                                             syscall.Errno = 1056
+	ERROR_INVALID_SERVICE_ACCOUNT                                             syscall.Errno = 1057
+	ERROR_SERVICE_DISABLED                                                    syscall.Errno = 1058
+	ERROR_CIRCULAR_DEPENDENCY                                                 syscall.Errno = 1059
+	ERROR_SERVICE_DOES_NOT_EXIST                                              syscall.Errno = 1060
+	ERROR_SERVICE_CANNOT_ACCEPT_CTRL                                          syscall.Errno = 1061
+	ERROR_SERVICE_NOT_ACTIVE                                                  syscall.Errno = 1062
+	ERROR_FAILED_SERVICE_CONTROLLER_CONNECT                                   syscall.Errno = 1063
+	ERROR_EXCEPTION_IN_SERVICE                                                syscall.Errno = 1064
+	ERROR_DATABASE_DOES_NOT_EXIST                                             syscall.Errno = 1065
+	ERROR_SERVICE_SPECIFIC_ERROR                                              syscall.Errno = 1066
+	ERROR_PROCESS_ABORTED                                                     syscall.Errno = 1067
+	ERROR_SERVICE_DEPENDENCY_FAIL                                             syscall.Errno = 1068
+	ERROR_SERVICE_LOGON_FAILED                                                syscall.Errno = 1069
+	ERROR_SERVICE_START_HANG                                                  syscall.Errno = 1070
+	ERROR_INVALID_SERVICE_LOCK                                                syscall.Errno = 1071
+	ERROR_SERVICE_MARKED_FOR_DELETE                                           syscall.Errno = 1072
+	ERROR_SERVICE_EXISTS                                                      syscall.Errno = 1073
+	ERROR_ALREADY_RUNNING_LKG                                                 syscall.Errno = 1074
+	ERROR_SERVICE_DEPENDENCY_DELETED                                          syscall.Errno = 1075
+	ERROR_BOOT_ALREADY_ACCEPTED                                               syscall.Errno = 1076
+	ERROR_SERVICE_NEVER_STARTED                                               syscall.Errno = 1077
+	ERROR_DUPLICATE_SERVICE_NAME                                              syscall.Errno = 1078
+	ERROR_DIFFERENT_SERVICE_ACCOUNT                                           syscall.Errno = 1079
+	ERROR_CANNOT_DETECT_DRIVER_FAILURE                                        syscall.Errno = 1080
+	ERROR_CANNOT_DETECT_PROCESS_ABORT                                         syscall.Errno = 1081
+	ERROR_NO_RECOVERY_PROGRAM                                                 syscall.Errno = 1082
+	ERROR_SERVICE_NOT_IN_EXE                                                  syscall.Errno = 1083
+	ERROR_NOT_SAFEBOOT_SERVICE                                                syscall.Errno = 1084
+	ERROR_END_OF_MEDIA                                                        syscall.Errno = 1100
+	ERROR_FILEMARK_DETECTED                                                   syscall.Errno = 1101
+	ERROR_BEGINNING_OF_MEDIA                                                  syscall.Errno = 1102
+	ERROR_SETMARK_DETECTED                                                    syscall.Errno = 1103
+	ERROR_NO_DATA_DETECTED                                                    syscall.Errno = 1104
+	ERROR_PARTITION_FAILURE                                                   syscall.Errno = 1105
+	ERROR_INVALID_BLOCK_LENGTH                                                syscall.Errno = 1106
+	ERROR_DEVICE_NOT_PARTITIONED                                              syscall.Errno = 1107
+	ERROR_UNABLE_TO_LOCK_MEDIA                                                syscall.Errno = 1108
+	ERROR_UNABLE_TO_UNLOAD_MEDIA                                              syscall.Errno = 1109
+	ERROR_MEDIA_CHANGED                                                       syscall.Errno = 1110
+	ERROR_BUS_RESET                                                           syscall.Errno = 1111
+	ERROR_NO_MEDIA_IN_DRIVE                                                   syscall.Errno = 1112
+	ERROR_NO_UNICODE_TRANSLATION                                              syscall.Errno = 1113
+	ERROR_DLL_INIT_FAILED                                                     syscall.Errno = 1114
+	ERROR_SHUTDOWN_IN_PROGRESS                                                syscall.Errno = 1115
+	ERROR_NO_SHUTDOWN_IN_PROGRESS                                             syscall.Errno = 1116
+	ERROR_IO_DEVICE                                                           syscall.Errno = 1117
+	ERROR_SERIAL_NO_DEVICE                                                    syscall.Errno = 1118
+	ERROR_IRQ_BUSY                                                            syscall.Errno = 1119
+	ERROR_MORE_WRITES                                                         syscall.Errno = 1120
+	ERROR_COUNTER_TIMEOUT                                                     syscall.Errno = 1121
+	ERROR_FLOPPY_ID_MARK_NOT_FOUND                                            syscall.Errno = 1122
+	ERROR_FLOPPY_WRONG_CYLINDER                                               syscall.Errno = 1123
+	ERROR_FLOPPY_UNKNOWN_ERROR                                                syscall.Errno = 1124
+	ERROR_FLOPPY_BAD_REGISTERS                                                syscall.Errno = 1125
+	ERROR_DISK_RECALIBRATE_FAILED                                             syscall.Errno = 1126
+	ERROR_DISK_OPERATION_FAILED                                               syscall.Errno = 1127
+	ERROR_DISK_RESET_FAILED                                                   syscall.Errno = 1128
+	ERROR_EOM_OVERFLOW                                                        syscall.Errno = 1129
+	ERROR_NOT_ENOUGH_SERVER_MEMORY                                            syscall.Errno = 1130
+	ERROR_POSSIBLE_DEADLOCK                                                   syscall.Errno = 1131
+	ERROR_MAPPED_ALIGNMENT                                                    syscall.Errno = 1132
+	ERROR_SET_POWER_STATE_VETOED                                              syscall.Errno = 1140
+	ERROR_SET_POWER_STATE_FAILED                                              syscall.Errno = 1141
+	ERROR_TOO_MANY_LINKS                                                      syscall.Errno = 1142
+	ERROR_OLD_WIN_VERSION                                                     syscall.Errno = 1150
+	ERROR_APP_WRONG_OS                                                        syscall.Errno = 1151
+	ERROR_SINGLE_INSTANCE_APP                                                 syscall.Errno = 1152
+	ERROR_RMODE_APP                                                           syscall.Errno = 1153
+	ERROR_INVALID_DLL                                                         syscall.Errno = 1154
+	ERROR_NO_ASSOCIATION                                                      syscall.Errno = 1155
+	ERROR_DDE_FAIL                                                            syscall.Errno = 1156
+	ERROR_DLL_NOT_FOUND                                                       syscall.Errno = 1157
+	ERROR_NO_MORE_USER_HANDLES                                                syscall.Errno = 1158
+	ERROR_MESSAGE_SYNC_ONLY                                                   syscall.Errno = 1159
+	ERROR_SOURCE_ELEMENT_EMPTY                                                syscall.Errno = 1160
+	ERROR_DESTINATION_ELEMENT_FULL                                            syscall.Errno = 1161
+	ERROR_ILLEGAL_ELEMENT_ADDRESS                                             syscall.Errno = 1162
+	ERROR_MAGAZINE_NOT_PRESENT                                                syscall.Errno = 1163
+	ERROR_DEVICE_REINITIALIZATION_NEEDED                                      syscall.Errno = 1164
+	ERROR_DEVICE_REQUIRES_CLEANING                                            syscall.Errno = 1165
+	ERROR_DEVICE_DOOR_OPEN                                                    syscall.Errno = 1166
+	ERROR_DEVICE_NOT_CONNECTED                                                syscall.Errno = 1167
+	ERROR_NOT_FOUND                                                           syscall.Errno = 1168
+	ERROR_NO_MATCH                                                            syscall.Errno = 1169
+	ERROR_SET_NOT_FOUND                                                       syscall.Errno = 1170
+	ERROR_POINT_NOT_FOUND                                                     syscall.Errno = 1171
+	ERROR_NO_TRACKING_SERVICE                                                 syscall.Errno = 1172
+	ERROR_NO_VOLUME_ID                                                        syscall.Errno = 1173
+	ERROR_UNABLE_TO_REMOVE_REPLACED                                           syscall.Errno = 1175
+	ERROR_UNABLE_TO_MOVE_REPLACEMENT                                          syscall.Errno = 1176
+	ERROR_UNABLE_TO_MOVE_REPLACEMENT_2                                        syscall.Errno = 1177
+	ERROR_JOURNAL_DELETE_IN_PROGRESS                                          syscall.Errno = 1178
+	ERROR_JOURNAL_NOT_ACTIVE                                                  syscall.Errno = 1179
+	ERROR_POTENTIAL_FILE_FOUND                                                syscall.Errno = 1180
+	ERROR_JOURNAL_ENTRY_DELETED                                               syscall.Errno = 1181
+	ERROR_SHUTDOWN_IS_SCHEDULED                                               syscall.Errno = 1190
+	ERROR_SHUTDOWN_USERS_LOGGED_ON                                            syscall.Errno = 1191
+	ERROR_BAD_DEVICE                                                          syscall.Errno = 1200
+	ERROR_CONNECTION_UNAVAIL                                                  syscall.Errno = 1201
+	ERROR_DEVICE_ALREADY_REMEMBERED                                           syscall.Errno = 1202
+	ERROR_NO_NET_OR_BAD_PATH                                                  syscall.Errno = 1203
+	ERROR_BAD_PROVIDER                                                        syscall.Errno = 1204
+	ERROR_CANNOT_OPEN_PROFILE                                                 syscall.Errno = 1205
+	ERROR_BAD_PROFILE                                                         syscall.Errno = 1206
+	ERROR_NOT_CONTAINER                                                       syscall.Errno = 1207
+	ERROR_EXTENDED_ERROR                                                      syscall.Errno = 1208
+	ERROR_INVALID_GROUPNAME                                                   syscall.Errno = 1209
+	ERROR_INVALID_COMPUTERNAME                                                syscall.Errno = 1210
+	ERROR_INVALID_EVENTNAME                                                   syscall.Errno = 1211
+	ERROR_INVALID_DOMAINNAME                                                  syscall.Errno = 1212
+	ERROR_INVALID_SERVICENAME                                                 syscall.Errno = 1213
+	ERROR_INVALID_NETNAME                                                     syscall.Errno = 1214
+	ERROR_INVALID_SHARENAME                                                   syscall.Errno = 1215
+	ERROR_INVALID_PASSWORDNAME                                                syscall.Errno = 1216
+	ERROR_INVALID_MESSAGENAME                                                 syscall.Errno = 1217
+	ERROR_INVALID_MESSAGEDEST                                                 syscall.Errno = 1218
+	ERROR_SESSION_CREDENTIAL_CONFLICT                                         syscall.Errno = 1219
+	ERROR_REMOTE_SESSION_LIMIT_EXCEEDED                                       syscall.Errno = 1220
+	ERROR_DUP_DOMAINNAME                                                      syscall.Errno = 1221
+	ERROR_NO_NETWORK                                                          syscall.Errno = 1222
+	ERROR_CANCELLED                                                           syscall.Errno = 1223
+	ERROR_USER_MAPPED_FILE                                                    syscall.Errno = 1224
+	ERROR_CONNECTION_REFUSED                                                  syscall.Errno = 1225
+	ERROR_GRACEFUL_DISCONNECT                                                 syscall.Errno = 1226
+	ERROR_ADDRESS_ALREADY_ASSOCIATED                                          syscall.Errno = 1227
+	ERROR_ADDRESS_NOT_ASSOCIATED                                              syscall.Errno = 1228
+	ERROR_CONNECTION_INVALID                                                  syscall.Errno = 1229
+	ERROR_CONNECTION_ACTIVE                                                   syscall.Errno = 1230
+	ERROR_NETWORK_UNREACHABLE                                                 syscall.Errno = 1231
+	ERROR_HOST_UNREACHABLE                                                    syscall.Errno = 1232
+	ERROR_PROTOCOL_UNREACHABLE                                                syscall.Errno = 1233
+	ERROR_PORT_UNREACHABLE                                                    syscall.Errno = 1234
+	ERROR_REQUEST_ABORTED                                                     syscall.Errno = 1235
+	ERROR_CONNECTION_ABORTED                                                  syscall.Errno = 1236
+	ERROR_RETRY                                                               syscall.Errno = 1237
+	ERROR_CONNECTION_COUNT_LIMIT                                              syscall.Errno = 1238
+	ERROR_LOGIN_TIME_RESTRICTION                                              syscall.Errno = 1239
+	ERROR_LOGIN_WKSTA_RESTRICTION                                             syscall.Errno = 1240
+	ERROR_INCORRECT_ADDRESS                                                   syscall.Errno = 1241
+	ERROR_ALREADY_REGISTERED                                                  syscall.Errno = 1242
+	ERROR_SERVICE_NOT_FOUND                                                   syscall.Errno = 1243
+	ERROR_NOT_AUTHENTICATED                                                   syscall.Errno = 1244
+	ERROR_NOT_LOGGED_ON                                                       syscall.Errno = 1245
+	ERROR_CONTINUE                                                            syscall.Errno = 1246
+	ERROR_ALREADY_INITIALIZED                                                 syscall.Errno = 1247
+	ERROR_NO_MORE_DEVICES                                                     syscall.Errno = 1248
+	ERROR_NO_SUCH_SITE                                                        syscall.Errno = 1249
+	ERROR_DOMAIN_CONTROLLER_EXISTS                                            syscall.Errno = 1250
+	ERROR_ONLY_IF_CONNECTED                                                   syscall.Errno = 1251
+	ERROR_OVERRIDE_NOCHANGES                                                  syscall.Errno = 1252
+	ERROR_BAD_USER_PROFILE                                                    syscall.Errno = 1253
+	ERROR_NOT_SUPPORTED_ON_SBS                                                syscall.Errno = 1254
+	ERROR_SERVER_SHUTDOWN_IN_PROGRESS                                         syscall.Errno = 1255
+	ERROR_HOST_DOWN                                                           syscall.Errno = 1256
+	ERROR_NON_ACCOUNT_SID                                                     syscall.Errno = 1257
+	ERROR_NON_DOMAIN_SID                                                      syscall.Errno = 1258
+	ERROR_APPHELP_BLOCK                                                       syscall.Errno = 1259
+	ERROR_ACCESS_DISABLED_BY_POLICY                                           syscall.Errno = 1260
+	ERROR_REG_NAT_CONSUMPTION                                                 syscall.Errno = 1261
+	ERROR_CSCSHARE_OFFLINE                                                    syscall.Errno = 1262
+	ERROR_PKINIT_FAILURE                                                      syscall.Errno = 1263
+	ERROR_SMARTCARD_SUBSYSTEM_FAILURE                                         syscall.Errno = 1264
+	ERROR_DOWNGRADE_DETECTED                                                  syscall.Errno = 1265
+	ERROR_MACHINE_LOCKED                                                      syscall.Errno = 1271
+	ERROR_SMB_GUEST_LOGON_BLOCKED                                             syscall.Errno = 1272
+	ERROR_CALLBACK_SUPPLIED_INVALID_DATA                                      syscall.Errno = 1273
+	ERROR_SYNC_FOREGROUND_REFRESH_REQUIRED                                    syscall.Errno = 1274
+	ERROR_DRIVER_BLOCKED                                                      syscall.Errno = 1275
+	ERROR_INVALID_IMPORT_OF_NON_DLL                                           syscall.Errno = 1276
+	ERROR_ACCESS_DISABLED_WEBBLADE                                            syscall.Errno = 1277
+	ERROR_ACCESS_DISABLED_WEBBLADE_TAMPER                                     syscall.Errno = 1278
+	ERROR_RECOVERY_FAILURE                                                    syscall.Errno = 1279
+	ERROR_ALREADY_FIBER                                                       syscall.Errno = 1280
+	ERROR_ALREADY_THREAD                                                      syscall.Errno = 1281
+	ERROR_STACK_BUFFER_OVERRUN                                                syscall.Errno = 1282
+	ERROR_PARAMETER_QUOTA_EXCEEDED                                            syscall.Errno = 1283
+	ERROR_DEBUGGER_INACTIVE                                                   syscall.Errno = 1284
+	ERROR_DELAY_LOAD_FAILED                                                   syscall.Errno = 1285
+	ERROR_VDM_DISALLOWED                                                      syscall.Errno = 1286
+	ERROR_UNIDENTIFIED_ERROR                                                  syscall.Errno = 1287
+	ERROR_INVALID_CRUNTIME_PARAMETER                                          syscall.Errno = 1288
+	ERROR_BEYOND_VDL                                                          syscall.Errno = 1289
+	ERROR_INCOMPATIBLE_SERVICE_SID_TYPE                                       syscall.Errno = 1290
+	ERROR_DRIVER_PROCESS_TERMINATED                                           syscall.Errno = 1291
+	ERROR_IMPLEMENTATION_LIMIT                                                syscall.Errno = 1292
+	ERROR_PROCESS_IS_PROTECTED                                                syscall.Errno = 1293
+	ERROR_SERVICE_NOTIFY_CLIENT_LAGGING                                       syscall.Errno = 1294
+	ERROR_DISK_QUOTA_EXCEEDED                                                 syscall.Errno = 1295
+	ERROR_CONTENT_BLOCKED                                                     syscall.Errno = 1296
+	ERROR_INCOMPATIBLE_SERVICE_PRIVILEGE                                      syscall.Errno = 1297
+	ERROR_APP_HANG                                                            syscall.Errno = 1298
+	ERROR_INVALID_LABEL                                                       syscall.Errno = 1299
+	ERROR_NOT_ALL_ASSIGNED                                                    syscall.Errno = 1300
+	ERROR_SOME_NOT_MAPPED                                                     syscall.Errno = 1301
+	ERROR_NO_QUOTAS_FOR_ACCOUNT                                               syscall.Errno = 1302
+	ERROR_LOCAL_USER_SESSION_KEY                                              syscall.Errno = 1303
+	ERROR_NULL_LM_PASSWORD                                                    syscall.Errno = 1304
+	ERROR_UNKNOWN_REVISION                                                    syscall.Errno = 1305
+	ERROR_REVISION_MISMATCH                                                   syscall.Errno = 1306
+	ERROR_INVALID_OWNER                                                       syscall.Errno = 1307
+	ERROR_INVALID_PRIMARY_GROUP                                               syscall.Errno = 1308
+	ERROR_NO_IMPERSONATION_TOKEN                                              syscall.Errno = 1309
+	ERROR_CANT_DISABLE_MANDATORY                                              syscall.Errno = 1310
+	ERROR_NO_LOGON_SERVERS                                                    syscall.Errno = 1311
+	ERROR_NO_SUCH_LOGON_SESSION                                               syscall.Errno = 1312
+	ERROR_NO_SUCH_PRIVILEGE                                                   syscall.Errno = 1313
+	ERROR_PRIVILEGE_NOT_HELD                                                  syscall.Errno = 1314
+	ERROR_INVALID_ACCOUNT_NAME                                                syscall.Errno = 1315
+	ERROR_USER_EXISTS                                                         syscall.Errno = 1316
+	ERROR_NO_SUCH_USER                                                        syscall.Errno = 1317
+	ERROR_GROUP_EXISTS                                                        syscall.Errno = 1318
+	ERROR_NO_SUCH_GROUP                                                       syscall.Errno = 1319
+	ERROR_MEMBER_IN_GROUP                                                     syscall.Errno = 1320
+	ERROR_MEMBER_NOT_IN_GROUP                                                 syscall.Errno = 1321
+	ERROR_LAST_ADMIN                                                          syscall.Errno = 1322
+	ERROR_WRONG_PASSWORD                                                      syscall.Errno = 1323
+	ERROR_ILL_FORMED_PASSWORD                                                 syscall.Errno = 1324
+	ERROR_PASSWORD_RESTRICTION                                                syscall.Errno = 1325
+	ERROR_LOGON_FAILURE                                                       syscall.Errno = 1326
+	ERROR_ACCOUNT_RESTRICTION                                                 syscall.Errno = 1327
+	ERROR_INVALID_LOGON_HOURS                                                 syscall.Errno = 1328
+	ERROR_INVALID_WORKSTATION                                                 syscall.Errno = 1329
+	ERROR_PASSWORD_EXPIRED                                                    syscall.Errno = 1330
+	ERROR_ACCOUNT_DISABLED                                                    syscall.Errno = 1331
+	ERROR_NONE_MAPPED                                                         syscall.Errno = 1332
+	ERROR_TOO_MANY_LUIDS_REQUESTED                                            syscall.Errno = 1333
+	ERROR_LUIDS_EXHAUSTED                                                     syscall.Errno = 1334
+	ERROR_INVALID_SUB_AUTHORITY                                               syscall.Errno = 1335
+	ERROR_INVALID_ACL                                                         syscall.Errno = 1336
+	ERROR_INVALID_SID                                                         syscall.Errno = 1337
+	ERROR_INVALID_SECURITY_DESCR                                              syscall.Errno = 1338
+	ERROR_BAD_INHERITANCE_ACL                                                 syscall.Errno = 1340
+	ERROR_SERVER_DISABLED                                                     syscall.Errno = 1341
+	ERROR_SERVER_NOT_DISABLED                                                 syscall.Errno = 1342
+	ERROR_INVALID_ID_AUTHORITY                                                syscall.Errno = 1343
+	ERROR_ALLOTTED_SPACE_EXCEEDED                                             syscall.Errno = 1344
+	ERROR_INVALID_GROUP_ATTRIBUTES                                            syscall.Errno = 1345
+	ERROR_BAD_IMPERSONATION_LEVEL                                             syscall.Errno = 1346
+	ERROR_CANT_OPEN_ANONYMOUS                                                 syscall.Errno = 1347
+	ERROR_BAD_VALIDATION_CLASS                                                syscall.Errno = 1348
+	ERROR_BAD_TOKEN_TYPE                                                      syscall.Errno = 1349
+	ERROR_NO_SECURITY_ON_OBJECT                                               syscall.Errno = 1350
+	ERROR_CANT_ACCESS_DOMAIN_INFO                                             syscall.Errno = 1351
+	ERROR_INVALID_SERVER_STATE                                                syscall.Errno = 1352
+	ERROR_INVALID_DOMAIN_STATE                                                syscall.Errno = 1353
+	ERROR_INVALID_DOMAIN_ROLE                                                 syscall.Errno = 1354
+	ERROR_NO_SUCH_DOMAIN                                                      syscall.Errno = 1355
+	ERROR_DOMAIN_EXISTS                                                       syscall.Errno = 1356
+	ERROR_DOMAIN_LIMIT_EXCEEDED                                               syscall.Errno = 1357
+	ERROR_INTERNAL_DB_CORRUPTION                                              syscall.Errno = 1358
+	ERROR_INTERNAL_ERROR                                                      syscall.Errno = 1359
+	ERROR_GENERIC_NOT_MAPPED                                                  syscall.Errno = 1360
+	ERROR_BAD_DESCRIPTOR_FORMAT                                               syscall.Errno = 1361
+	ERROR_NOT_LOGON_PROCESS                                                   syscall.Errno = 1362
+	ERROR_LOGON_SESSION_EXISTS                                                syscall.Errno = 1363
+	ERROR_NO_SUCH_PACKAGE                                                     syscall.Errno = 1364
+	ERROR_BAD_LOGON_SESSION_STATE                                             syscall.Errno = 1365
+	ERROR_LOGON_SESSION_COLLISION                                             syscall.Errno = 1366
+	ERROR_INVALID_LOGON_TYPE                                                  syscall.Errno = 1367
+	ERROR_CANNOT_IMPERSONATE                                                  syscall.Errno = 1368
+	ERROR_RXACT_INVALID_STATE                                                 syscall.Errno = 1369
+	ERROR_RXACT_COMMIT_FAILURE                                                syscall.Errno = 1370
+	ERROR_SPECIAL_ACCOUNT                                                     syscall.Errno = 1371
+	ERROR_SPECIAL_GROUP                                                       syscall.Errno = 1372
+	ERROR_SPECIAL_USER                                                        syscall.Errno = 1373
+	ERROR_MEMBERS_PRIMARY_GROUP                                               syscall.Errno = 1374
+	ERROR_TOKEN_ALREADY_IN_USE                                                syscall.Errno = 1375
+	ERROR_NO_SUCH_ALIAS                                                       syscall.Errno = 1376
+	ERROR_MEMBER_NOT_IN_ALIAS                                                 syscall.Errno = 1377
+	ERROR_MEMBER_IN_ALIAS                                                     syscall.Errno = 1378
+	ERROR_ALIAS_EXISTS                                                        syscall.Errno = 1379
+	ERROR_LOGON_NOT_GRANTED                                                   syscall.Errno = 1380
+	ERROR_TOO_MANY_SECRETS                                                    syscall.Errno = 1381
+	ERROR_SECRET_TOO_LONG                                                     syscall.Errno = 1382
+	ERROR_INTERNAL_DB_ERROR                                                   syscall.Errno = 1383
+	ERROR_TOO_MANY_CONTEXT_IDS                                                syscall.Errno = 1384
+	ERROR_LOGON_TYPE_NOT_GRANTED                                              syscall.Errno = 1385
+	ERROR_NT_CROSS_ENCRYPTION_REQUIRED                                        syscall.Errno = 1386
+	ERROR_NO_SUCH_MEMBER                                                      syscall.Errno = 1387
+	ERROR_INVALID_MEMBER                                                      syscall.Errno = 1388
+	ERROR_TOO_MANY_SIDS                                                       syscall.Errno = 1389
+	ERROR_LM_CROSS_ENCRYPTION_REQUIRED                                        syscall.Errno = 1390
+	ERROR_NO_INHERITANCE                                                      syscall.Errno = 1391
+	ERROR_FILE_CORRUPT                                                        syscall.Errno = 1392
+	ERROR_DISK_CORRUPT                                                        syscall.Errno = 1393
+	ERROR_NO_USER_SESSION_KEY                                                 syscall.Errno = 1394
+	ERROR_LICENSE_QUOTA_EXCEEDED                                              syscall.Errno = 1395
+	ERROR_WRONG_TARGET_NAME                                                   syscall.Errno = 1396
+	ERROR_MUTUAL_AUTH_FAILED                                                  syscall.Errno = 1397
+	ERROR_TIME_SKEW                                                           syscall.Errno = 1398
+	ERROR_CURRENT_DOMAIN_NOT_ALLOWED                                          syscall.Errno = 1399
+	ERROR_INVALID_WINDOW_HANDLE                                               syscall.Errno = 1400
+	ERROR_INVALID_MENU_HANDLE                                                 syscall.Errno = 1401
+	ERROR_INVALID_CURSOR_HANDLE                                               syscall.Errno = 1402
+	ERROR_INVALID_ACCEL_HANDLE                                                syscall.Errno = 1403
+	ERROR_INVALID_HOOK_HANDLE                                                 syscall.Errno = 1404
+	ERROR_INVALID_DWP_HANDLE                                                  syscall.Errno = 1405
+	ERROR_TLW_WITH_WSCHILD                                                    syscall.Errno = 1406
+	ERROR_CANNOT_FIND_WND_CLASS                                               syscall.Errno = 1407
+	ERROR_WINDOW_OF_OTHER_THREAD                                              syscall.Errno = 1408
+	ERROR_HOTKEY_ALREADY_REGISTERED                                           syscall.Errno = 1409
+	ERROR_CLASS_ALREADY_EXISTS                                                syscall.Errno = 1410
+	ERROR_CLASS_DOES_NOT_EXIST                                                syscall.Errno = 1411
+	ERROR_CLASS_HAS_WINDOWS                                                   syscall.Errno = 1412
+	ERROR_INVALID_INDEX                                                       syscall.Errno = 1413
+	ERROR_INVALID_ICON_HANDLE                                                 syscall.Errno = 1414
+	ERROR_PRIVATE_DIALOG_INDEX                                                syscall.Errno = 1415
+	ERROR_LISTBOX_ID_NOT_FOUND                                                syscall.Errno = 1416
+	ERROR_NO_WILDCARD_CHARACTERS                                              syscall.Errno = 1417
+	ERROR_CLIPBOARD_NOT_OPEN                                                  syscall.Errno = 1418
+	ERROR_HOTKEY_NOT_REGISTERED                                               syscall.Errno = 1419
+	ERROR_WINDOW_NOT_DIALOG                                                   syscall.Errno = 1420
+	ERROR_CONTROL_ID_NOT_FOUND                                                syscall.Errno = 1421
+	ERROR_INVALID_COMBOBOX_MESSAGE                                            syscall.Errno = 1422
+	ERROR_WINDOW_NOT_COMBOBOX                                                 syscall.Errno = 1423
+	ERROR_INVALID_EDIT_HEIGHT                                                 syscall.Errno = 1424
+	ERROR_DC_NOT_FOUND                                                        syscall.Errno = 1425
+	ERROR_INVALID_HOOK_FILTER                                                 syscall.Errno = 1426
+	ERROR_INVALID_FILTER_PROC                                                 syscall.Errno = 1427
+	ERROR_HOOK_NEEDS_HMOD                                                     syscall.Errno = 1428
+	ERROR_GLOBAL_ONLY_HOOK                                                    syscall.Errno = 1429
+	ERROR_JOURNAL_HOOK_SET                                                    syscall.Errno = 1430
+	ERROR_HOOK_NOT_INSTALLED                                                  syscall.Errno = 1431
+	ERROR_INVALID_LB_MESSAGE                                                  syscall.Errno = 1432
+	ERROR_SETCOUNT_ON_BAD_LB                                                  syscall.Errno = 1433
+	ERROR_LB_WITHOUT_TABSTOPS                                                 syscall.Errno = 1434
+	ERROR_DESTROY_OBJECT_OF_OTHER_THREAD                                      syscall.Errno = 1435
+	ERROR_CHILD_WINDOW_MENU                                                   syscall.Errno = 1436
+	ERROR_NO_SYSTEM_MENU                                                      syscall.Errno = 1437
+	ERROR_INVALID_MSGBOX_STYLE                                                syscall.Errno = 1438
+	ERROR_INVALID_SPI_VALUE                                                   syscall.Errno = 1439
+	ERROR_SCREEN_ALREADY_LOCKED                                               syscall.Errno = 1440
+	ERROR_HWNDS_HAVE_DIFF_PARENT                                              syscall.Errno = 1441
+	ERROR_NOT_CHILD_WINDOW                                                    syscall.Errno = 1442
+	ERROR_INVALID_GW_COMMAND                                                  syscall.Errno = 1443
+	ERROR_INVALID_THREAD_ID                                                   syscall.Errno = 1444
+	ERROR_NON_MDICHILD_WINDOW                                                 syscall.Errno = 1445
+	ERROR_POPUP_ALREADY_ACTIVE                                                syscall.Errno = 1446
+	ERROR_NO_SCROLLBARS                                                       syscall.Errno = 1447
+	ERROR_INVALID_SCROLLBAR_RANGE                                             syscall.Errno = 1448
+	ERROR_INVALID_SHOWWIN_COMMAND                                             syscall.Errno = 1449
+	ERROR_NO_SYSTEM_RESOURCES                                                 syscall.Errno = 1450
+	ERROR_NONPAGED_SYSTEM_RESOURCES                                           syscall.Errno = 1451
+	ERROR_PAGED_SYSTEM_RESOURCES                                              syscall.Errno = 1452
+	ERROR_WORKING_SET_QUOTA                                                   syscall.Errno = 1453
+	ERROR_PAGEFILE_QUOTA                                                      syscall.Errno = 1454
+	ERROR_COMMITMENT_LIMIT                                                    syscall.Errno = 1455
+	ERROR_MENU_ITEM_NOT_FOUND                                                 syscall.Errno = 1456
+	ERROR_INVALID_KEYBOARD_HANDLE                                             syscall.Errno = 1457
+	ERROR_HOOK_TYPE_NOT_ALLOWED                                               syscall.Errno = 1458
+	ERROR_REQUIRES_INTERACTIVE_WINDOWSTATION                                  syscall.Errno = 1459
+	ERROR_TIMEOUT                                                             syscall.Errno = 1460
+	ERROR_INVALID_MONITOR_HANDLE                                              syscall.Errno = 1461
+	ERROR_INCORRECT_SIZE                                                      syscall.Errno = 1462
+	ERROR_SYMLINK_CLASS_DISABLED                                              syscall.Errno = 1463
+	ERROR_SYMLINK_NOT_SUPPORTED                                               syscall.Errno = 1464
+	ERROR_XML_PARSE_ERROR                                                     syscall.Errno = 1465
+	ERROR_XMLDSIG_ERROR                                                       syscall.Errno = 1466
+	ERROR_RESTART_APPLICATION                                                 syscall.Errno = 1467
+	ERROR_WRONG_COMPARTMENT                                                   syscall.Errno = 1468
+	ERROR_AUTHIP_FAILURE                                                      syscall.Errno = 1469
+	ERROR_NO_NVRAM_RESOURCES                                                  syscall.Errno = 1470
+	ERROR_NOT_GUI_PROCESS                                                     syscall.Errno = 1471
+	ERROR_EVENTLOG_FILE_CORRUPT                                               syscall.Errno = 1500
+	ERROR_EVENTLOG_CANT_START                                                 syscall.Errno = 1501
+	ERROR_LOG_FILE_FULL                                                       syscall.Errno = 1502
+	ERROR_EVENTLOG_FILE_CHANGED                                               syscall.Errno = 1503
+	ERROR_CONTAINER_ASSIGNED                                                  syscall.Errno = 1504
+	ERROR_JOB_NO_CONTAINER                                                    syscall.Errno = 1505
+	ERROR_INVALID_TASK_NAME                                                   syscall.Errno = 1550
+	ERROR_INVALID_TASK_INDEX                                                  syscall.Errno = 1551
+	ERROR_THREAD_ALREADY_IN_TASK                                              syscall.Errno = 1552
+	ERROR_INSTALL_SERVICE_FAILURE                                             syscall.Errno = 1601
+	ERROR_INSTALL_USEREXIT                                                    syscall.Errno = 1602
+	ERROR_INSTALL_FAILURE                                                     syscall.Errno = 1603
+	ERROR_INSTALL_SUSPEND                                                     syscall.Errno = 1604
+	ERROR_UNKNOWN_PRODUCT                                                     syscall.Errno = 1605
+	ERROR_UNKNOWN_FEATURE                                                     syscall.Errno = 1606
+	ERROR_UNKNOWN_COMPONENT                                                   syscall.Errno = 1607
+	ERROR_UNKNOWN_PROPERTY                                                    syscall.Errno = 1608
+	ERROR_INVALID_HANDLE_STATE                                                syscall.Errno = 1609
+	ERROR_BAD_CONFIGURATION                                                   syscall.Errno = 1610
+	ERROR_INDEX_ABSENT                                                        syscall.Errno = 1611
+	ERROR_INSTALL_SOURCE_ABSENT                                               syscall.Errno = 1612
+	ERROR_INSTALL_PACKAGE_VERSION                                             syscall.Errno = 1613
+	ERROR_PRODUCT_UNINSTALLED                                                 syscall.Errno = 1614
+	ERROR_BAD_QUERY_SYNTAX                                                    syscall.Errno = 1615
+	ERROR_INVALID_FIELD                                                       syscall.Errno = 1616
+	ERROR_DEVICE_REMOVED                                                      syscall.Errno = 1617
+	ERROR_INSTALL_ALREADY_RUNNING                                             syscall.Errno = 1618
+	ERROR_INSTALL_PACKAGE_OPEN_FAILED                                         syscall.Errno = 1619
+	ERROR_INSTALL_PACKAGE_INVALID                                             syscall.Errno = 1620
+	ERROR_INSTALL_UI_FAILURE                                                  syscall.Errno = 1621
+	ERROR_INSTALL_LOG_FAILURE                                                 syscall.Errno = 1622
+	ERROR_INSTALL_LANGUAGE_UNSUPPORTED                                        syscall.Errno = 1623
+	ERROR_INSTALL_TRANSFORM_FAILURE                                           syscall.Errno = 1624
+	ERROR_INSTALL_PACKAGE_REJECTED                                            syscall.Errno = 1625
+	ERROR_FUNCTION_NOT_CALLED                                                 syscall.Errno = 1626
+	ERROR_FUNCTION_FAILED                                                     syscall.Errno = 1627
+	ERROR_INVALID_TABLE                                                       syscall.Errno = 1628
+	ERROR_DATATYPE_MISMATCH                                                   syscall.Errno = 1629
+	ERROR_UNSUPPORTED_TYPE                                                    syscall.Errno = 1630
+	ERROR_CREATE_FAILED                                                       syscall.Errno = 1631
+	ERROR_INSTALL_TEMP_UNWRITABLE                                             syscall.Errno = 1632
+	ERROR_INSTALL_PLATFORM_UNSUPPORTED                                        syscall.Errno = 1633
+	ERROR_INSTALL_NOTUSED                                                     syscall.Errno = 1634
+	ERROR_PATCH_PACKAGE_OPEN_FAILED                                           syscall.Errno = 1635
+	ERROR_PATCH_PACKAGE_INVALID                                               syscall.Errno = 1636
+	ERROR_PATCH_PACKAGE_UNSUPPORTED                                           syscall.Errno = 1637
+	ERROR_PRODUCT_VERSION                                                     syscall.Errno = 1638
+	ERROR_INVALID_COMMAND_LINE                                                syscall.Errno = 1639
+	ERROR_INSTALL_REMOTE_DISALLOWED                                           syscall.Errno = 1640
+	ERROR_SUCCESS_REBOOT_INITIATED                                            syscall.Errno = 1641
+	ERROR_PATCH_TARGET_NOT_FOUND                                              syscall.Errno = 1642
+	ERROR_PATCH_PACKAGE_REJECTED                                              syscall.Errno = 1643
+	ERROR_INSTALL_TRANSFORM_REJECTED                                          syscall.Errno = 1644
+	ERROR_INSTALL_REMOTE_PROHIBITED                                           syscall.Errno = 1645
+	ERROR_PATCH_REMOVAL_UNSUPPORTED                                           syscall.Errno = 1646
+	ERROR_UNKNOWN_PATCH                                                       syscall.Errno = 1647
+	ERROR_PATCH_NO_SEQUENCE                                                   syscall.Errno = 1648
+	ERROR_PATCH_REMOVAL_DISALLOWED                                            syscall.Errno = 1649
+	ERROR_INVALID_PATCH_XML                                                   syscall.Errno = 1650
+	ERROR_PATCH_MANAGED_ADVERTISED_PRODUCT                                    syscall.Errno = 1651
+	ERROR_INSTALL_SERVICE_SAFEBOOT                                            syscall.Errno = 1652
+	ERROR_FAIL_FAST_EXCEPTION                                                 syscall.Errno = 1653
+	ERROR_INSTALL_REJECTED                                                    syscall.Errno = 1654
+	ERROR_DYNAMIC_CODE_BLOCKED                                                syscall.Errno = 1655
+	ERROR_NOT_SAME_OBJECT                                                     syscall.Errno = 1656
+	ERROR_STRICT_CFG_VIOLATION                                                syscall.Errno = 1657
+	ERROR_SET_CONTEXT_DENIED                                                  syscall.Errno = 1660
+	ERROR_CROSS_PARTITION_VIOLATION                                           syscall.Errno = 1661
+	RPC_S_INVALID_STRING_BINDING                                              syscall.Errno = 1700
+	RPC_S_WRONG_KIND_OF_BINDING                                               syscall.Errno = 1701
+	RPC_S_INVALID_BINDING                                                     syscall.Errno = 1702
+	RPC_S_PROTSEQ_NOT_SUPPORTED                                               syscall.Errno = 1703
+	RPC_S_INVALID_RPC_PROTSEQ                                                 syscall.Errno = 1704
+	RPC_S_INVALID_STRING_UUID                                                 syscall.Errno = 1705
+	RPC_S_INVALID_ENDPOINT_FORMAT                                             syscall.Errno = 1706
+	RPC_S_INVALID_NET_ADDR                                                    syscall.Errno = 1707
+	RPC_S_NO_ENDPOINT_FOUND                                                   syscall.Errno = 1708
+	RPC_S_INVALID_TIMEOUT                                                     syscall.Errno = 1709
+	RPC_S_OBJECT_NOT_FOUND                                                    syscall.Errno = 1710
+	RPC_S_ALREADY_REGISTERED                                                  syscall.Errno = 1711
+	RPC_S_TYPE_ALREADY_REGISTERED                                             syscall.Errno = 1712
+	RPC_S_ALREADY_LISTENING                                                   syscall.Errno = 1713
+	RPC_S_NO_PROTSEQS_REGISTERED                                              syscall.Errno = 1714
+	RPC_S_NOT_LISTENING                                                       syscall.Errno = 1715
+	RPC_S_UNKNOWN_MGR_TYPE                                                    syscall.Errno = 1716
+	RPC_S_UNKNOWN_IF                                                          syscall.Errno = 1717
+	RPC_S_NO_BINDINGS                                                         syscall.Errno = 1718
+	RPC_S_NO_PROTSEQS                                                         syscall.Errno = 1719
+	RPC_S_CANT_CREATE_ENDPOINT                                                syscall.Errno = 1720
+	RPC_S_OUT_OF_RESOURCES                                                    syscall.Errno = 1721
+	RPC_S_SERVER_UNAVAILABLE                                                  syscall.Errno = 1722
+	RPC_S_SERVER_TOO_BUSY                                                     syscall.Errno = 1723
+	RPC_S_INVALID_NETWORK_OPTIONS                                             syscall.Errno = 1724
+	RPC_S_NO_CALL_ACTIVE                                                      syscall.Errno = 1725
+	RPC_S_CALL_FAILED                                                         syscall.Errno = 1726
+	RPC_S_CALL_FAILED_DNE                                                     syscall.Errno = 1727
+	RPC_S_PROTOCOL_ERROR                                                      syscall.Errno = 1728
+	RPC_S_PROXY_ACCESS_DENIED                                                 syscall.Errno = 1729
+	RPC_S_UNSUPPORTED_TRANS_SYN                                               syscall.Errno = 1730
+	RPC_S_UNSUPPORTED_TYPE                                                    syscall.Errno = 1732
+	RPC_S_INVALID_TAG                                                         syscall.Errno = 1733
+	RPC_S_INVALID_BOUND                                                       syscall.Errno = 1734
+	RPC_S_NO_ENTRY_NAME                                                       syscall.Errno = 1735
+	RPC_S_INVALID_NAME_SYNTAX                                                 syscall.Errno = 1736
+	RPC_S_UNSUPPORTED_NAME_SYNTAX                                             syscall.Errno = 1737
+	RPC_S_UUID_NO_ADDRESS                                                     syscall.Errno = 1739
+	RPC_S_DUPLICATE_ENDPOINT                                                  syscall.Errno = 1740
+	RPC_S_UNKNOWN_AUTHN_TYPE                                                  syscall.Errno = 1741
+	RPC_S_MAX_CALLS_TOO_SMALL                                                 syscall.Errno = 1742
+	RPC_S_STRING_TOO_LONG                                                     syscall.Errno = 1743
+	RPC_S_PROTSEQ_NOT_FOUND                                                   syscall.Errno = 1744
+	RPC_S_PROCNUM_OUT_OF_RANGE                                                syscall.Errno = 1745
+	RPC_S_BINDING_HAS_NO_AUTH                                                 syscall.Errno = 1746
+	RPC_S_UNKNOWN_AUTHN_SERVICE                                               syscall.Errno = 1747
+	RPC_S_UNKNOWN_AUTHN_LEVEL                                                 syscall.Errno = 1748
+	RPC_S_INVALID_AUTH_IDENTITY                                               syscall.Errno = 1749
+	RPC_S_UNKNOWN_AUTHZ_SERVICE                                               syscall.Errno = 1750
+	EPT_S_INVALID_ENTRY                                                       syscall.Errno = 1751
+	EPT_S_CANT_PERFORM_OP                                                     syscall.Errno = 1752
+	EPT_S_NOT_REGISTERED                                                      syscall.Errno = 1753
+	RPC_S_NOTHING_TO_EXPORT                                                   syscall.Errno = 1754
+	RPC_S_INCOMPLETE_NAME                                                     syscall.Errno = 1755
+	RPC_S_INVALID_VERS_OPTION                                                 syscall.Errno = 1756
+	RPC_S_NO_MORE_MEMBERS                                                     syscall.Errno = 1757
+	RPC_S_NOT_ALL_OBJS_UNEXPORTED                                             syscall.Errno = 1758
+	RPC_S_INTERFACE_NOT_FOUND                                                 syscall.Errno = 1759
+	RPC_S_ENTRY_ALREADY_EXISTS                                                syscall.Errno = 1760
+	RPC_S_ENTRY_NOT_FOUND                                                     syscall.Errno = 1761
+	RPC_S_NAME_SERVICE_UNAVAILABLE                                            syscall.Errno = 1762
+	RPC_S_INVALID_NAF_ID                                                      syscall.Errno = 1763
+	RPC_S_CANNOT_SUPPORT                                                      syscall.Errno = 1764
+	RPC_S_NO_CONTEXT_AVAILABLE                                                syscall.Errno = 1765
+	RPC_S_INTERNAL_ERROR                                                      syscall.Errno = 1766
+	RPC_S_ZERO_DIVIDE                                                         syscall.Errno = 1767
+	RPC_S_ADDRESS_ERROR                                                       syscall.Errno = 1768
+	RPC_S_FP_DIV_ZERO                                                         syscall.Errno = 1769
+	RPC_S_FP_UNDERFLOW                                                        syscall.Errno = 1770
+	RPC_S_FP_OVERFLOW                                                         syscall.Errno = 1771
+	RPC_X_NO_MORE_ENTRIES                                                     syscall.Errno = 1772
+	RPC_X_SS_CHAR_TRANS_OPEN_FAIL                                             syscall.Errno = 1773
+	RPC_X_SS_CHAR_TRANS_SHORT_FILE                                            syscall.Errno = 1774
+	RPC_X_SS_IN_NULL_CONTEXT                                                  syscall.Errno = 1775
+	RPC_X_SS_CONTEXT_DAMAGED                                                  syscall.Errno = 1777
+	RPC_X_SS_HANDLES_MISMATCH                                                 syscall.Errno = 1778
+	RPC_X_SS_CANNOT_GET_CALL_HANDLE                                           syscall.Errno = 1779
+	RPC_X_NULL_REF_POINTER                                                    syscall.Errno = 1780
+	RPC_X_ENUM_VALUE_OUT_OF_RANGE                                             syscall.Errno = 1781
+	RPC_X_BYTE_COUNT_TOO_SMALL                                                syscall.Errno = 1782
+	RPC_X_BAD_STUB_DATA                                                       syscall.Errno = 1783
+	ERROR_INVALID_USER_BUFFER                                                 syscall.Errno = 1784
+	ERROR_UNRECOGNIZED_MEDIA                                                  syscall.Errno = 1785
+	ERROR_NO_TRUST_LSA_SECRET                                                 syscall.Errno = 1786
+	ERROR_NO_TRUST_SAM_ACCOUNT                                                syscall.Errno = 1787
+	ERROR_TRUSTED_DOMAIN_FAILURE                                              syscall.Errno = 1788
+	ERROR_TRUSTED_RELATIONSHIP_FAILURE                                        syscall.Errno = 1789
+	ERROR_TRUST_FAILURE                                                       syscall.Errno = 1790
+	RPC_S_CALL_IN_PROGRESS                                                    syscall.Errno = 1791
+	ERROR_NETLOGON_NOT_STARTED                                                syscall.Errno = 1792
+	ERROR_ACCOUNT_EXPIRED                                                     syscall.Errno = 1793
+	ERROR_REDIRECTOR_HAS_OPEN_HANDLES                                         syscall.Errno = 1794
+	ERROR_PRINTER_DRIVER_ALREADY_INSTALLED                                    syscall.Errno = 1795
+	ERROR_UNKNOWN_PORT                                                        syscall.Errno = 1796
+	ERROR_UNKNOWN_PRINTER_DRIVER                                              syscall.Errno = 1797
+	ERROR_UNKNOWN_PRINTPROCESSOR                                              syscall.Errno = 1798
+	ERROR_INVALID_SEPARATOR_FILE                                              syscall.Errno = 1799
+	ERROR_INVALID_PRIORITY                                                    syscall.Errno = 1800
+	ERROR_INVALID_PRINTER_NAME                                                syscall.Errno = 1801
+	ERROR_PRINTER_ALREADY_EXISTS                                              syscall.Errno = 1802
+	ERROR_INVALID_PRINTER_COMMAND                                             syscall.Errno = 1803
+	ERROR_INVALID_DATATYPE                                                    syscall.Errno = 1804
+	ERROR_INVALID_ENVIRONMENT                                                 syscall.Errno = 1805
+	RPC_S_NO_MORE_BINDINGS                                                    syscall.Errno = 1806
+	ERROR_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT                                   syscall.Errno = 1807
+	ERROR_NOLOGON_WORKSTATION_TRUST_ACCOUNT                                   syscall.Errno = 1808
+	ERROR_NOLOGON_SERVER_TRUST_ACCOUNT                                        syscall.Errno = 1809
+	ERROR_DOMAIN_TRUST_INCONSISTENT                                           syscall.Errno = 1810
+	ERROR_SERVER_HAS_OPEN_HANDLES                                             syscall.Errno = 1811
+	ERROR_RESOURCE_DATA_NOT_FOUND                                             syscall.Errno = 1812
+	ERROR_RESOURCE_TYPE_NOT_FOUND                                             syscall.Errno = 1813
+	ERROR_RESOURCE_NAME_NOT_FOUND                                             syscall.Errno = 1814
+	ERROR_RESOURCE_LANG_NOT_FOUND                                             syscall.Errno = 1815
+	ERROR_NOT_ENOUGH_QUOTA                                                    syscall.Errno = 1816
+	RPC_S_NO_INTERFACES                                                       syscall.Errno = 1817
+	RPC_S_CALL_CANCELLED                                                      syscall.Errno = 1818
+	RPC_S_BINDING_INCOMPLETE                                                  syscall.Errno = 1819
+	RPC_S_COMM_FAILURE                                                        syscall.Errno = 1820
+	RPC_S_UNSUPPORTED_AUTHN_LEVEL                                             syscall.Errno = 1821
+	RPC_S_NO_PRINC_NAME                                                       syscall.Errno = 1822
+	RPC_S_NOT_RPC_ERROR                                                       syscall.Errno = 1823
+	RPC_S_UUID_LOCAL_ONLY                                                     syscall.Errno = 1824
+	RPC_S_SEC_PKG_ERROR                                                       syscall.Errno = 1825
+	RPC_S_NOT_CANCELLED                                                       syscall.Errno = 1826
+	RPC_X_INVALID_ES_ACTION                                                   syscall.Errno = 1827
+	RPC_X_WRONG_ES_VERSION                                                    syscall.Errno = 1828
+	RPC_X_WRONG_STUB_VERSION                                                  syscall.Errno = 1829
+	RPC_X_INVALID_PIPE_OBJECT                                                 syscall.Errno = 1830
+	RPC_X_WRONG_PIPE_ORDER                                                    syscall.Errno = 1831
+	RPC_X_WRONG_PIPE_VERSION                                                  syscall.Errno = 1832
+	RPC_S_COOKIE_AUTH_FAILED                                                  syscall.Errno = 1833
+	RPC_S_DO_NOT_DISTURB                                                      syscall.Errno = 1834
+	RPC_S_SYSTEM_HANDLE_COUNT_EXCEEDED                                        syscall.Errno = 1835
+	RPC_S_SYSTEM_HANDLE_TYPE_MISMATCH                                         syscall.Errno = 1836
+	RPC_S_GROUP_MEMBER_NOT_FOUND                                              syscall.Errno = 1898
+	EPT_S_CANT_CREATE                                                         syscall.Errno = 1899
+	RPC_S_INVALID_OBJECT                                                      syscall.Errno = 1900
+	ERROR_INVALID_TIME                                                        syscall.Errno = 1901
+	ERROR_INVALID_FORM_NAME                                                   syscall.Errno = 1902
+	ERROR_INVALID_FORM_SIZE                                                   syscall.Errno = 1903
+	ERROR_ALREADY_WAITING                                                     syscall.Errno = 1904
+	ERROR_PRINTER_DELETED                                                     syscall.Errno = 1905
+	ERROR_INVALID_PRINTER_STATE                                               syscall.Errno = 1906
+	ERROR_PASSWORD_MUST_CHANGE                                                syscall.Errno = 1907
+	ERROR_DOMAIN_CONTROLLER_NOT_FOUND                                         syscall.Errno = 1908
+	ERROR_ACCOUNT_LOCKED_OUT                                                  syscall.Errno = 1909
+	OR_INVALID_OXID                                                           syscall.Errno = 1910
+	OR_INVALID_OID                                                            syscall.Errno = 1911
+	OR_INVALID_SET                                                            syscall.Errno = 1912
+	RPC_S_SEND_INCOMPLETE                                                     syscall.Errno = 1913
+	RPC_S_INVALID_ASYNC_HANDLE                                                syscall.Errno = 1914
+	RPC_S_INVALID_ASYNC_CALL                                                  syscall.Errno = 1915
+	RPC_X_PIPE_CLOSED                                                         syscall.Errno = 1916
+	RPC_X_PIPE_DISCIPLINE_ERROR                                               syscall.Errno = 1917
+	RPC_X_PIPE_EMPTY                                                          syscall.Errno = 1918
+	ERROR_NO_SITENAME                                                         syscall.Errno = 1919
+	ERROR_CANT_ACCESS_FILE                                                    syscall.Errno = 1920
+	ERROR_CANT_RESOLVE_FILENAME                                               syscall.Errno = 1921
+	RPC_S_ENTRY_TYPE_MISMATCH                                                 syscall.Errno = 1922
+	RPC_S_NOT_ALL_OBJS_EXPORTED                                               syscall.Errno = 1923
+	RPC_S_INTERFACE_NOT_EXPORTED                                              syscall.Errno = 1924
+	RPC_S_PROFILE_NOT_ADDED                                                   syscall.Errno = 1925
+	RPC_S_PRF_ELT_NOT_ADDED                                                   syscall.Errno = 1926
+	RPC_S_PRF_ELT_NOT_REMOVED                                                 syscall.Errno = 1927
+	RPC_S_GRP_ELT_NOT_ADDED                                                   syscall.Errno = 1928
+	RPC_S_GRP_ELT_NOT_REMOVED                                                 syscall.Errno = 1929
+	ERROR_KM_DRIVER_BLOCKED                                                   syscall.Errno = 1930
+	ERROR_CONTEXT_EXPIRED                                                     syscall.Errno = 1931
+	ERROR_PER_USER_TRUST_QUOTA_EXCEEDED                                       syscall.Errno = 1932
+	ERROR_ALL_USER_TRUST_QUOTA_EXCEEDED                                       syscall.Errno = 1933
+	ERROR_USER_DELETE_TRUST_QUOTA_EXCEEDED                                    syscall.Errno = 1934
+	ERROR_AUTHENTICATION_FIREWALL_FAILED                                      syscall.Errno = 1935
+	ERROR_REMOTE_PRINT_CONNECTIONS_BLOCKED                                    syscall.Errno = 1936
+	ERROR_NTLM_BLOCKED                                                        syscall.Errno = 1937
+	ERROR_PASSWORD_CHANGE_REQUIRED                                            syscall.Errno = 1938
+	ERROR_LOST_MODE_LOGON_RESTRICTION                                         syscall.Errno = 1939
+	ERROR_INVALID_PIXEL_FORMAT                                                syscall.Errno = 2000
+	ERROR_BAD_DRIVER                                                          syscall.Errno = 2001
+	ERROR_INVALID_WINDOW_STYLE                                                syscall.Errno = 2002
+	ERROR_METAFILE_NOT_SUPPORTED                                              syscall.Errno = 2003
+	ERROR_TRANSFORM_NOT_SUPPORTED                                             syscall.Errno = 2004
+	ERROR_CLIPPING_NOT_SUPPORTED                                              syscall.Errno = 2005
+	ERROR_INVALID_CMM                                                         syscall.Errno = 2010
+	ERROR_INVALID_PROFILE                                                     syscall.Errno = 2011
+	ERROR_TAG_NOT_FOUND                                                       syscall.Errno = 2012
+	ERROR_TAG_NOT_PRESENT                                                     syscall.Errno = 2013
+	ERROR_DUPLICATE_TAG                                                       syscall.Errno = 2014
+	ERROR_PROFILE_NOT_ASSOCIATED_WITH_DEVICE                                  syscall.Errno = 2015
+	ERROR_PROFILE_NOT_FOUND                                                   syscall.Errno = 2016
+	ERROR_INVALID_COLORSPACE                                                  syscall.Errno = 2017
+	ERROR_ICM_NOT_ENABLED                                                     syscall.Errno = 2018
+	ERROR_DELETING_ICM_XFORM                                                  syscall.Errno = 2019
+	ERROR_INVALID_TRANSFORM                                                   syscall.Errno = 2020
+	ERROR_COLORSPACE_MISMATCH                                                 syscall.Errno = 2021
+	ERROR_INVALID_COLORINDEX                                                  syscall.Errno = 2022
+	ERROR_PROFILE_DOES_NOT_MATCH_DEVICE                                       syscall.Errno = 2023
+	ERROR_CONNECTED_OTHER_PASSWORD                                            syscall.Errno = 2108
+	ERROR_CONNECTED_OTHER_PASSWORD_DEFAULT                                    syscall.Errno = 2109
+	ERROR_BAD_USERNAME                                                        syscall.Errno = 2202
+	ERROR_NOT_CONNECTED                                                       syscall.Errno = 2250
+	ERROR_OPEN_FILES                                                          syscall.Errno = 2401
+	ERROR_ACTIVE_CONNECTIONS                                                  syscall.Errno = 2402
+	ERROR_DEVICE_IN_USE                                                       syscall.Errno = 2404
+	ERROR_UNKNOWN_PRINT_MONITOR                                               syscall.Errno = 3000
+	ERROR_PRINTER_DRIVER_IN_USE                                               syscall.Errno = 3001
+	ERROR_SPOOL_FILE_NOT_FOUND                                                syscall.Errno = 3002
+	ERROR_SPL_NO_STARTDOC                                                     syscall.Errno = 3003
+	ERROR_SPL_NO_ADDJOB                                                       syscall.Errno = 3004
+	ERROR_PRINT_PROCESSOR_ALREADY_INSTALLED                                   syscall.Errno = 3005
+	ERROR_PRINT_MONITOR_ALREADY_INSTALLED                                     syscall.Errno = 3006
+	ERROR_INVALID_PRINT_MONITOR                                               syscall.Errno = 3007
+	ERROR_PRINT_MONITOR_IN_USE                                                syscall.Errno = 3008
+	ERROR_PRINTER_HAS_JOBS_QUEUED                                             syscall.Errno = 3009
+	ERROR_SUCCESS_REBOOT_REQUIRED                                             syscall.Errno = 3010
+	ERROR_SUCCESS_RESTART_REQUIRED                                            syscall.Errno = 3011
+	ERROR_PRINTER_NOT_FOUND                                                   syscall.Errno = 3012
+	ERROR_PRINTER_DRIVER_WARNED                                               syscall.Errno = 3013
+	ERROR_PRINTER_DRIVER_BLOCKED                                              syscall.Errno = 3014
+	ERROR_PRINTER_DRIVER_PACKAGE_IN_USE                                       syscall.Errno = 3015
+	ERROR_CORE_DRIVER_PACKAGE_NOT_FOUND                                       syscall.Errno = 3016
+	ERROR_FAIL_REBOOT_REQUIRED                                                syscall.Errno = 3017
+	ERROR_FAIL_REBOOT_INITIATED                                               syscall.Errno = 3018
+	ERROR_PRINTER_DRIVER_DOWNLOAD_NEEDED                                      syscall.Errno = 3019
+	ERROR_PRINT_JOB_RESTART_REQUIRED                                          syscall.Errno = 3020
+	ERROR_INVALID_PRINTER_DRIVER_MANIFEST                                     syscall.Errno = 3021
+	ERROR_PRINTER_NOT_SHAREABLE                                               syscall.Errno = 3022
+	ERROR_REQUEST_PAUSED                                                      syscall.Errno = 3050
+	ERROR_APPEXEC_CONDITION_NOT_SATISFIED                                     syscall.Errno = 3060
+	ERROR_APPEXEC_HANDLE_INVALIDATED                                          syscall.Errno = 3061
+	ERROR_APPEXEC_INVALID_HOST_GENERATION                                     syscall.Errno = 3062
+	ERROR_APPEXEC_UNEXPECTED_PROCESS_REGISTRATION                             syscall.Errno = 3063
+	ERROR_APPEXEC_INVALID_HOST_STATE                                          syscall.Errno = 3064
+	ERROR_APPEXEC_NO_DONOR                                                    syscall.Errno = 3065
+	ERROR_APPEXEC_HOST_ID_MISMATCH                                            syscall.Errno = 3066
+	ERROR_APPEXEC_UNKNOWN_USER                                                syscall.Errno = 3067
+	ERROR_IO_REISSUE_AS_CACHED                                                syscall.Errno = 3950
+	ERROR_WINS_INTERNAL                                                       syscall.Errno = 4000
+	ERROR_CAN_NOT_DEL_LOCAL_WINS                                              syscall.Errno = 4001
+	ERROR_STATIC_INIT                                                         syscall.Errno = 4002
+	ERROR_INC_BACKUP                                                          syscall.Errno = 4003
+	ERROR_FULL_BACKUP                                                         syscall.Errno = 4004
+	ERROR_REC_NON_EXISTENT                                                    syscall.Errno = 4005
+	ERROR_RPL_NOT_ALLOWED                                                     syscall.Errno = 4006
+	PEERDIST_ERROR_CONTENTINFO_VERSION_UNSUPPORTED                            syscall.Errno = 4050
+	PEERDIST_ERROR_CANNOT_PARSE_CONTENTINFO                                   syscall.Errno = 4051
+	PEERDIST_ERROR_MISSING_DATA                                               syscall.Errno = 4052
+	PEERDIST_ERROR_NO_MORE                                                    syscall.Errno = 4053
+	PEERDIST_ERROR_NOT_INITIALIZED                                            syscall.Errno = 4054
+	PEERDIST_ERROR_ALREADY_INITIALIZED                                        syscall.Errno = 4055
+	PEERDIST_ERROR_SHUTDOWN_IN_PROGRESS                                       syscall.Errno = 4056
+	PEERDIST_ERROR_INVALIDATED                                                syscall.Errno = 4057
+	PEERDIST_ERROR_ALREADY_EXISTS                                             syscall.Errno = 4058
+	PEERDIST_ERROR_OPERATION_NOTFOUND                                         syscall.Errno = 4059
+	PEERDIST_ERROR_ALREADY_COMPLETED                                          syscall.Errno = 4060
+	PEERDIST_ERROR_OUT_OF_BOUNDS                                              syscall.Errno = 4061
+	PEERDIST_ERROR_VERSION_UNSUPPORTED                                        syscall.Errno = 4062
+	PEERDIST_ERROR_INVALID_CONFIGURATION                                      syscall.Errno = 4063
+	PEERDIST_ERROR_NOT_LICENSED                                               syscall.Errno = 4064
+	PEERDIST_ERROR_SERVICE_UNAVAILABLE                                        syscall.Errno = 4065
+	PEERDIST_ERROR_TRUST_FAILURE                                              syscall.Errno = 4066
+	ERROR_DHCP_ADDRESS_CONFLICT                                               syscall.Errno = 4100
+	ERROR_WMI_GUID_NOT_FOUND                                                  syscall.Errno = 4200
+	ERROR_WMI_INSTANCE_NOT_FOUND                                              syscall.Errno = 4201
+	ERROR_WMI_ITEMID_NOT_FOUND                                                syscall.Errno = 4202
+	ERROR_WMI_TRY_AGAIN                                                       syscall.Errno = 4203
+	ERROR_WMI_DP_NOT_FOUND                                                    syscall.Errno = 4204
+	ERROR_WMI_UNRESOLVED_INSTANCE_REF                                         syscall.Errno = 4205
+	ERROR_WMI_ALREADY_ENABLED                                                 syscall.Errno = 4206
+	ERROR_WMI_GUID_DISCONNECTED                                               syscall.Errno = 4207
+	ERROR_WMI_SERVER_UNAVAILABLE                                              syscall.Errno = 4208
+	ERROR_WMI_DP_FAILED                                                       syscall.Errno = 4209
+	ERROR_WMI_INVALID_MOF                                                     syscall.Errno = 4210
+	ERROR_WMI_INVALID_REGINFO                                                 syscall.Errno = 4211
+	ERROR_WMI_ALREADY_DISABLED                                                syscall.Errno = 4212
+	ERROR_WMI_READ_ONLY                                                       syscall.Errno = 4213
+	ERROR_WMI_SET_FAILURE                                                     syscall.Errno = 4214
+	ERROR_NOT_APPCONTAINER                                                    syscall.Errno = 4250
+	ERROR_APPCONTAINER_REQUIRED                                               syscall.Errno = 4251
+	ERROR_NOT_SUPPORTED_IN_APPCONTAINER                                       syscall.Errno = 4252
+	ERROR_INVALID_PACKAGE_SID_LENGTH                                          syscall.Errno = 4253
+	ERROR_INVALID_MEDIA                                                       syscall.Errno = 4300
+	ERROR_INVALID_LIBRARY                                                     syscall.Errno = 4301
+	ERROR_INVALID_MEDIA_POOL                                                  syscall.Errno = 4302
+	ERROR_DRIVE_MEDIA_MISMATCH                                                syscall.Errno = 4303
+	ERROR_MEDIA_OFFLINE                                                       syscall.Errno = 4304
+	ERROR_LIBRARY_OFFLINE                                                     syscall.Errno = 4305
+	ERROR_EMPTY                                                               syscall.Errno = 4306
+	ERROR_NOT_EMPTY                                                           syscall.Errno = 4307
+	ERROR_MEDIA_UNAVAILABLE                                                   syscall.Errno = 4308
+	ERROR_RESOURCE_DISABLED                                                   syscall.Errno = 4309
+	ERROR_INVALID_CLEANER                                                     syscall.Errno = 4310
+	ERROR_UNABLE_TO_CLEAN                                                     syscall.Errno = 4311
+	ERROR_OBJECT_NOT_FOUND                                                    syscall.Errno = 4312
+	ERROR_DATABASE_FAILURE                                                    syscall.Errno = 4313
+	ERROR_DATABASE_FULL                                                       syscall.Errno = 4314
+	ERROR_MEDIA_INCOMPATIBLE                                                  syscall.Errno = 4315
+	ERROR_RESOURCE_NOT_PRESENT                                                syscall.Errno = 4316
+	ERROR_INVALID_OPERATION                                                   syscall.Errno = 4317
+	ERROR_MEDIA_NOT_AVAILABLE                                                 syscall.Errno = 4318
+	ERROR_DEVICE_NOT_AVAILABLE                                                syscall.Errno = 4319
+	ERROR_REQUEST_REFUSED                                                     syscall.Errno = 4320
+	ERROR_INVALID_DRIVE_OBJECT                                                syscall.Errno = 4321
+	ERROR_LIBRARY_FULL                                                        syscall.Errno = 4322
+	ERROR_MEDIUM_NOT_ACCESSIBLE                                               syscall.Errno = 4323
+	ERROR_UNABLE_TO_LOAD_MEDIUM                                               syscall.Errno = 4324
+	ERROR_UNABLE_TO_INVENTORY_DRIVE                                           syscall.Errno = 4325
+	ERROR_UNABLE_TO_INVENTORY_SLOT                                            syscall.Errno = 4326
+	ERROR_UNABLE_TO_INVENTORY_TRANSPORT                                       syscall.Errno = 4327
+	ERROR_TRANSPORT_FULL                                                      syscall.Errno = 4328
+	ERROR_CONTROLLING_IEPORT                                                  syscall.Errno = 4329
+	ERROR_UNABLE_TO_EJECT_MOUNTED_MEDIA                                       syscall.Errno = 4330
+	ERROR_CLEANER_SLOT_SET                                                    syscall.Errno = 4331
+	ERROR_CLEANER_SLOT_NOT_SET                                                syscall.Errno = 4332
+	ERROR_CLEANER_CARTRIDGE_SPENT                                             syscall.Errno = 4333
+	ERROR_UNEXPECTED_OMID                                                     syscall.Errno = 4334
+	ERROR_CANT_DELETE_LAST_ITEM                                               syscall.Errno = 4335
+	ERROR_MESSAGE_EXCEEDS_MAX_SIZE                                            syscall.Errno = 4336
+	ERROR_VOLUME_CONTAINS_SYS_FILES                                           syscall.Errno = 4337
+	ERROR_INDIGENOUS_TYPE                                                     syscall.Errno = 4338
+	ERROR_NO_SUPPORTING_DRIVES                                                syscall.Errno = 4339
+	ERROR_CLEANER_CARTRIDGE_INSTALLED                                         syscall.Errno = 4340
+	ERROR_IEPORT_FULL                                                         syscall.Errno = 4341
+	ERROR_FILE_OFFLINE                                                        syscall.Errno = 4350
+	ERROR_REMOTE_STORAGE_NOT_ACTIVE                                           syscall.Errno = 4351
+	ERROR_REMOTE_STORAGE_MEDIA_ERROR                                          syscall.Errno = 4352
+	ERROR_NOT_A_REPARSE_POINT                                                 syscall.Errno = 4390
+	ERROR_REPARSE_ATTRIBUTE_CONFLICT                                          syscall.Errno = 4391
+	ERROR_INVALID_REPARSE_DATA                                                syscall.Errno = 4392
+	ERROR_REPARSE_TAG_INVALID                                                 syscall.Errno = 4393
+	ERROR_REPARSE_TAG_MISMATCH                                                syscall.Errno = 4394
+	ERROR_REPARSE_POINT_ENCOUNTERED                                           syscall.Errno = 4395
+	ERROR_APP_DATA_NOT_FOUND                                                  syscall.Errno = 4400
+	ERROR_APP_DATA_EXPIRED                                                    syscall.Errno = 4401
+	ERROR_APP_DATA_CORRUPT                                                    syscall.Errno = 4402
+	ERROR_APP_DATA_LIMIT_EXCEEDED                                             syscall.Errno = 4403
+	ERROR_APP_DATA_REBOOT_REQUIRED                                            syscall.Errno = 4404
+	ERROR_SECUREBOOT_ROLLBACK_DETECTED                                        syscall.Errno = 4420
+	ERROR_SECUREBOOT_POLICY_VIOLATION                                         syscall.Errno = 4421
+	ERROR_SECUREBOOT_INVALID_POLICY                                           syscall.Errno = 4422
+	ERROR_SECUREBOOT_POLICY_PUBLISHER_NOT_FOUND                               syscall.Errno = 4423
+	ERROR_SECUREBOOT_POLICY_NOT_SIGNED                                        syscall.Errno = 4424
+	ERROR_SECUREBOOT_NOT_ENABLED                                              syscall.Errno = 4425
+	ERROR_SECUREBOOT_FILE_REPLACED                                            syscall.Errno = 4426
+	ERROR_SECUREBOOT_POLICY_NOT_AUTHORIZED                                    syscall.Errno = 4427
+	ERROR_SECUREBOOT_POLICY_UNKNOWN                                           syscall.Errno = 4428
+	ERROR_SECUREBOOT_POLICY_MISSING_ANTIROLLBACKVERSION                       syscall.Errno = 4429
+	ERROR_SECUREBOOT_PLATFORM_ID_MISMATCH                                     syscall.Errno = 4430
+	ERROR_SECUREBOOT_POLICY_ROLLBACK_DETECTED                                 syscall.Errno = 4431
+	ERROR_SECUREBOOT_POLICY_UPGRADE_MISMATCH                                  syscall.Errno = 4432
+	ERROR_SECUREBOOT_REQUIRED_POLICY_FILE_MISSING                             syscall.Errno = 4433
+	ERROR_SECUREBOOT_NOT_BASE_POLICY                                          syscall.Errno = 4434
+	ERROR_SECUREBOOT_NOT_SUPPLEMENTAL_POLICY                                  syscall.Errno = 4435
+	ERROR_OFFLOAD_READ_FLT_NOT_SUPPORTED                                      syscall.Errno = 4440
+	ERROR_OFFLOAD_WRITE_FLT_NOT_SUPPORTED                                     syscall.Errno = 4441
+	ERROR_OFFLOAD_READ_FILE_NOT_SUPPORTED                                     syscall.Errno = 4442
+	ERROR_OFFLOAD_WRITE_FILE_NOT_SUPPORTED                                    syscall.Errno = 4443
+	ERROR_ALREADY_HAS_STREAM_ID                                               syscall.Errno = 4444
+	ERROR_SMR_GARBAGE_COLLECTION_REQUIRED                                     syscall.Errno = 4445
+	ERROR_WOF_WIM_HEADER_CORRUPT                                              syscall.Errno = 4446
+	ERROR_WOF_WIM_RESOURCE_TABLE_CORRUPT                                      syscall.Errno = 4447
+	ERROR_WOF_FILE_RESOURCE_TABLE_CORRUPT                                     syscall.Errno = 4448
+	ERROR_VOLUME_NOT_SIS_ENABLED                                              syscall.Errno = 4500
+	ERROR_SYSTEM_INTEGRITY_ROLLBACK_DETECTED                                  syscall.Errno = 4550
+	ERROR_SYSTEM_INTEGRITY_POLICY_VIOLATION                                   syscall.Errno = 4551
+	ERROR_SYSTEM_INTEGRITY_INVALID_POLICY                                     syscall.Errno = 4552
+	ERROR_SYSTEM_INTEGRITY_POLICY_NOT_SIGNED                                  syscall.Errno = 4553
+	ERROR_SYSTEM_INTEGRITY_TOO_MANY_POLICIES                                  syscall.Errno = 4554
+	ERROR_SYSTEM_INTEGRITY_SUPPLEMENTAL_POLICY_NOT_AUTHORIZED                 syscall.Errno = 4555
+	ERROR_VSM_NOT_INITIALIZED                                                 syscall.Errno = 4560
+	ERROR_VSM_DMA_PROTECTION_NOT_IN_USE                                       syscall.Errno = 4561
+	ERROR_PLATFORM_MANIFEST_NOT_AUTHORIZED                                    syscall.Errno = 4570
+	ERROR_PLATFORM_MANIFEST_INVALID                                           syscall.Errno = 4571
+	ERROR_PLATFORM_MANIFEST_FILE_NOT_AUTHORIZED                               syscall.Errno = 4572
+	ERROR_PLATFORM_MANIFEST_CATALOG_NOT_AUTHORIZED                            syscall.Errno = 4573
+	ERROR_PLATFORM_MANIFEST_BINARY_ID_NOT_FOUND                               syscall.Errno = 4574
+	ERROR_PLATFORM_MANIFEST_NOT_ACTIVE                                        syscall.Errno = 4575
+	ERROR_PLATFORM_MANIFEST_NOT_SIGNED                                        syscall.Errno = 4576
+	ERROR_DEPENDENT_RESOURCE_EXISTS                                           syscall.Errno = 5001
+	ERROR_DEPENDENCY_NOT_FOUND                                                syscall.Errno = 5002
+	ERROR_DEPENDENCY_ALREADY_EXISTS                                           syscall.Errno = 5003
+	ERROR_RESOURCE_NOT_ONLINE                                                 syscall.Errno = 5004
+	ERROR_HOST_NODE_NOT_AVAILABLE                                             syscall.Errno = 5005
+	ERROR_RESOURCE_NOT_AVAILABLE                                              syscall.Errno = 5006
+	ERROR_RESOURCE_NOT_FOUND                                                  syscall.Errno = 5007
+	ERROR_SHUTDOWN_CLUSTER                                                    syscall.Errno = 5008
+	ERROR_CANT_EVICT_ACTIVE_NODE                                              syscall.Errno = 5009
+	ERROR_OBJECT_ALREADY_EXISTS                                               syscall.Errno = 5010
+	ERROR_OBJECT_IN_LIST                                                      syscall.Errno = 5011
+	ERROR_GROUP_NOT_AVAILABLE                                                 syscall.Errno = 5012
+	ERROR_GROUP_NOT_FOUND                                                     syscall.Errno = 5013
+	ERROR_GROUP_NOT_ONLINE                                                    syscall.Errno = 5014
+	ERROR_HOST_NODE_NOT_RESOURCE_OWNER                                        syscall.Errno = 5015
+	ERROR_HOST_NODE_NOT_GROUP_OWNER                                           syscall.Errno = 5016
+	ERROR_RESMON_CREATE_FAILED                                                syscall.Errno = 5017
+	ERROR_RESMON_ONLINE_FAILED                                                syscall.Errno = 5018
+	ERROR_RESOURCE_ONLINE                                                     syscall.Errno = 5019
+	ERROR_QUORUM_RESOURCE                                                     syscall.Errno = 5020
+	ERROR_NOT_QUORUM_CAPABLE                                                  syscall.Errno = 5021
+	ERROR_CLUSTER_SHUTTING_DOWN                                               syscall.Errno = 5022
+	ERROR_INVALID_STATE                                                       syscall.Errno = 5023
+	ERROR_RESOURCE_PROPERTIES_STORED                                          syscall.Errno = 5024
+	ERROR_NOT_QUORUM_CLASS                                                    syscall.Errno = 5025
+	ERROR_CORE_RESOURCE                                                       syscall.Errno = 5026
+	ERROR_QUORUM_RESOURCE_ONLINE_FAILED                                       syscall.Errno = 5027
+	ERROR_QUORUMLOG_OPEN_FAILED                                               syscall.Errno = 5028
+	ERROR_CLUSTERLOG_CORRUPT                                                  syscall.Errno = 5029
+	ERROR_CLUSTERLOG_RECORD_EXCEEDS_MAXSIZE                                   syscall.Errno = 5030
+	ERROR_CLUSTERLOG_EXCEEDS_MAXSIZE                                          syscall.Errno = 5031
+	ERROR_CLUSTERLOG_CHKPOINT_NOT_FOUND                                       syscall.Errno = 5032
+	ERROR_CLUSTERLOG_NOT_ENOUGH_SPACE                                         syscall.Errno = 5033
+	ERROR_QUORUM_OWNER_ALIVE                                                  syscall.Errno = 5034
+	ERROR_NETWORK_NOT_AVAILABLE                                               syscall.Errno = 5035
+	ERROR_NODE_NOT_AVAILABLE                                                  syscall.Errno = 5036
+	ERROR_ALL_NODES_NOT_AVAILABLE                                             syscall.Errno = 5037
+	ERROR_RESOURCE_FAILED                                                     syscall.Errno = 5038
+	ERROR_CLUSTER_INVALID_NODE                                                syscall.Errno = 5039
+	ERROR_CLUSTER_NODE_EXISTS                                                 syscall.Errno = 5040
+	ERROR_CLUSTER_JOIN_IN_PROGRESS                                            syscall.Errno = 5041
+	ERROR_CLUSTER_NODE_NOT_FOUND                                              syscall.Errno = 5042
+	ERROR_CLUSTER_LOCAL_NODE_NOT_FOUND                                        syscall.Errno = 5043
+	ERROR_CLUSTER_NETWORK_EXISTS                                              syscall.Errno = 5044
+	ERROR_CLUSTER_NETWORK_NOT_FOUND                                           syscall.Errno = 5045
+	ERROR_CLUSTER_NETINTERFACE_EXISTS                                         syscall.Errno = 5046
+	ERROR_CLUSTER_NETINTERFACE_NOT_FOUND                                      syscall.Errno = 5047
+	ERROR_CLUSTER_INVALID_REQUEST                                             syscall.Errno = 5048
+	ERROR_CLUSTER_INVALID_NETWORK_PROVIDER                                    syscall.Errno = 5049
+	ERROR_CLUSTER_NODE_DOWN                                                   syscall.Errno = 5050
+	ERROR_CLUSTER_NODE_UNREACHABLE                                            syscall.Errno = 5051
+	ERROR_CLUSTER_NODE_NOT_MEMBER                                             syscall.Errno = 5052
+	ERROR_CLUSTER_JOIN_NOT_IN_PROGRESS                                        syscall.Errno = 5053
+	ERROR_CLUSTER_INVALID_NETWORK                                             syscall.Errno = 5054
+	ERROR_CLUSTER_NODE_UP                                                     syscall.Errno = 5056
+	ERROR_CLUSTER_IPADDR_IN_USE                                               syscall.Errno = 5057
+	ERROR_CLUSTER_NODE_NOT_PAUSED                                             syscall.Errno = 5058
+	ERROR_CLUSTER_NO_SECURITY_CONTEXT                                         syscall.Errno = 5059
+	ERROR_CLUSTER_NETWORK_NOT_INTERNAL                                        syscall.Errno = 5060
+	ERROR_CLUSTER_NODE_ALREADY_UP                                             syscall.Errno = 5061
+	ERROR_CLUSTER_NODE_ALREADY_DOWN                                           syscall.Errno = 5062
+	ERROR_CLUSTER_NETWORK_ALREADY_ONLINE                                      syscall.Errno = 5063
+	ERROR_CLUSTER_NETWORK_ALREADY_OFFLINE                                     syscall.Errno = 5064
+	ERROR_CLUSTER_NODE_ALREADY_MEMBER                                         syscall.Errno = 5065
+	ERROR_CLUSTER_LAST_INTERNAL_NETWORK                                       syscall.Errno = 5066
+	ERROR_CLUSTER_NETWORK_HAS_DEPENDENTS                                      syscall.Errno = 5067
+	ERROR_INVALID_OPERATION_ON_QUORUM                                         syscall.Errno = 5068
+	ERROR_DEPENDENCY_NOT_ALLOWED                                              syscall.Errno = 5069
+	ERROR_CLUSTER_NODE_PAUSED                                                 syscall.Errno = 5070
+	ERROR_NODE_CANT_HOST_RESOURCE                                             syscall.Errno = 5071
+	ERROR_CLUSTER_NODE_NOT_READY                                              syscall.Errno = 5072
+	ERROR_CLUSTER_NODE_SHUTTING_DOWN                                          syscall.Errno = 5073
+	ERROR_CLUSTER_JOIN_ABORTED                                                syscall.Errno = 5074
+	ERROR_CLUSTER_INCOMPATIBLE_VERSIONS                                       syscall.Errno = 5075
+	ERROR_CLUSTER_MAXNUM_OF_RESOURCES_EXCEEDED                                syscall.Errno = 5076
+	ERROR_CLUSTER_SYSTEM_CONFIG_CHANGED                                       syscall.Errno = 5077
+	ERROR_CLUSTER_RESOURCE_TYPE_NOT_FOUND                                     syscall.Errno = 5078
+	ERROR_CLUSTER_RESTYPE_NOT_SUPPORTED                                       syscall.Errno = 5079
+	ERROR_CLUSTER_RESNAME_NOT_FOUND                                           syscall.Errno = 5080
+	ERROR_CLUSTER_NO_RPC_PACKAGES_REGISTERED                                  syscall.Errno = 5081
+	ERROR_CLUSTER_OWNER_NOT_IN_PREFLIST                                       syscall.Errno = 5082
+	ERROR_CLUSTER_DATABASE_SEQMISMATCH                                        syscall.Errno = 5083
+	ERROR_RESMON_INVALID_STATE                                                syscall.Errno = 5084
+	ERROR_CLUSTER_GUM_NOT_LOCKER                                              syscall.Errno = 5085
+	ERROR_QUORUM_DISK_NOT_FOUND                                               syscall.Errno = 5086
+	ERROR_DATABASE_BACKUP_CORRUPT                                             syscall.Errno = 5087
+	ERROR_CLUSTER_NODE_ALREADY_HAS_DFS_ROOT                                   syscall.Errno = 5088
+	ERROR_RESOURCE_PROPERTY_UNCHANGEABLE                                      syscall.Errno = 5089
+	ERROR_NO_ADMIN_ACCESS_POINT                                               syscall.Errno = 5090
+	ERROR_CLUSTER_MEMBERSHIP_INVALID_STATE                                    syscall.Errno = 5890
+	ERROR_CLUSTER_QUORUMLOG_NOT_FOUND                                         syscall.Errno = 5891
+	ERROR_CLUSTER_MEMBERSHIP_HALT                                             syscall.Errno = 5892
+	ERROR_CLUSTER_INSTANCE_ID_MISMATCH                                        syscall.Errno = 5893
+	ERROR_CLUSTER_NETWORK_NOT_FOUND_FOR_IP                                    syscall.Errno = 5894
+	ERROR_CLUSTER_PROPERTY_DATA_TYPE_MISMATCH                                 syscall.Errno = 5895
+	ERROR_CLUSTER_EVICT_WITHOUT_CLEANUP                                       syscall.Errno = 5896
+	ERROR_CLUSTER_PARAMETER_MISMATCH                                          syscall.Errno = 5897
+	ERROR_NODE_CANNOT_BE_CLUSTERED                                            syscall.Errno = 5898
+	ERROR_CLUSTER_WRONG_OS_VERSION                                            syscall.Errno = 5899
+	ERROR_CLUSTER_CANT_CREATE_DUP_CLUSTER_NAME                                syscall.Errno = 5900
+	ERROR_CLUSCFG_ALREADY_COMMITTED                                           syscall.Errno = 5901
+	ERROR_CLUSCFG_ROLLBACK_FAILED                                             syscall.Errno = 5902
+	ERROR_CLUSCFG_SYSTEM_DISK_DRIVE_LETTER_CONFLICT                           syscall.Errno = 5903
+	ERROR_CLUSTER_OLD_VERSION                                                 syscall.Errno = 5904
+	ERROR_CLUSTER_MISMATCHED_COMPUTER_ACCT_NAME                               syscall.Errno = 5905
+	ERROR_CLUSTER_NO_NET_ADAPTERS                                             syscall.Errno = 5906
+	ERROR_CLUSTER_POISONED                                                    syscall.Errno = 5907
+	ERROR_CLUSTER_GROUP_MOVING                                                syscall.Errno = 5908
+	ERROR_CLUSTER_RESOURCE_TYPE_BUSY                                          syscall.Errno = 5909
+	ERROR_RESOURCE_CALL_TIMED_OUT                                             syscall.Errno = 5910
+	ERROR_INVALID_CLUSTER_IPV6_ADDRESS                                        syscall.Errno = 5911
+	ERROR_CLUSTER_INTERNAL_INVALID_FUNCTION                                   syscall.Errno = 5912
+	ERROR_CLUSTER_PARAMETER_OUT_OF_BOUNDS                                     syscall.Errno = 5913
+	ERROR_CLUSTER_PARTIAL_SEND                                                syscall.Errno = 5914
+	ERROR_CLUSTER_REGISTRY_INVALID_FUNCTION                                   syscall.Errno = 5915
+	ERROR_CLUSTER_INVALID_STRING_TERMINATION                                  syscall.Errno = 5916
+	ERROR_CLUSTER_INVALID_STRING_FORMAT                                       syscall.Errno = 5917
+	ERROR_CLUSTER_DATABASE_TRANSACTION_IN_PROGRESS                            syscall.Errno = 5918
+	ERROR_CLUSTER_DATABASE_TRANSACTION_NOT_IN_PROGRESS                        syscall.Errno = 5919
+	ERROR_CLUSTER_NULL_DATA                                                   syscall.Errno = 5920
+	ERROR_CLUSTER_PARTIAL_READ                                                syscall.Errno = 5921
+	ERROR_CLUSTER_PARTIAL_WRITE                                               syscall.Errno = 5922
+	ERROR_CLUSTER_CANT_DESERIALIZE_DATA                                       syscall.Errno = 5923
+	ERROR_DEPENDENT_RESOURCE_PROPERTY_CONFLICT                                syscall.Errno = 5924
+	ERROR_CLUSTER_NO_QUORUM                                                   syscall.Errno = 5925
+	ERROR_CLUSTER_INVALID_IPV6_NETWORK                                        syscall.Errno = 5926
+	ERROR_CLUSTER_INVALID_IPV6_TUNNEL_NETWORK                                 syscall.Errno = 5927
+	ERROR_QUORUM_NOT_ALLOWED_IN_THIS_GROUP                                    syscall.Errno = 5928
+	ERROR_DEPENDENCY_TREE_TOO_COMPLEX                                         syscall.Errno = 5929
+	ERROR_EXCEPTION_IN_RESOURCE_CALL                                          syscall.Errno = 5930
+	ERROR_CLUSTER_RHS_FAILED_INITIALIZATION                                   syscall.Errno = 5931
+	ERROR_CLUSTER_NOT_INSTALLED                                               syscall.Errno = 5932
+	ERROR_CLUSTER_RESOURCES_MUST_BE_ONLINE_ON_THE_SAME_NODE                   syscall.Errno = 5933
+	ERROR_CLUSTER_MAX_NODES_IN_CLUSTER                                        syscall.Errno = 5934
+	ERROR_CLUSTER_TOO_MANY_NODES                                              syscall.Errno = 5935
+	ERROR_CLUSTER_OBJECT_ALREADY_USED                                         syscall.Errno = 5936
+	ERROR_NONCORE_GROUPS_FOUND                                                syscall.Errno = 5937
+	ERROR_FILE_SHARE_RESOURCE_CONFLICT                                        syscall.Errno = 5938
+	ERROR_CLUSTER_EVICT_INVALID_REQUEST                                       syscall.Errno = 5939
+	ERROR_CLUSTER_SINGLETON_RESOURCE                                          syscall.Errno = 5940
+	ERROR_CLUSTER_GROUP_SINGLETON_RESOURCE                                    syscall.Errno = 5941
+	ERROR_CLUSTER_RESOURCE_PROVIDER_FAILED                                    syscall.Errno = 5942
+	ERROR_CLUSTER_RESOURCE_CONFIGURATION_ERROR                                syscall.Errno = 5943
+	ERROR_CLUSTER_GROUP_BUSY                                                  syscall.Errno = 5944
+	ERROR_CLUSTER_NOT_SHARED_VOLUME                                           syscall.Errno = 5945
+	ERROR_CLUSTER_INVALID_SECURITY_DESCRIPTOR                                 syscall.Errno = 5946
+	ERROR_CLUSTER_SHARED_VOLUMES_IN_USE                                       syscall.Errno = 5947
+	ERROR_CLUSTER_USE_SHARED_VOLUMES_API                                      syscall.Errno = 5948
+	ERROR_CLUSTER_BACKUP_IN_PROGRESS                                          syscall.Errno = 5949
+	ERROR_NON_CSV_PATH                                                        syscall.Errno = 5950
+	ERROR_CSV_VOLUME_NOT_LOCAL                                                syscall.Errno = 5951
+	ERROR_CLUSTER_WATCHDOG_TERMINATING                                        syscall.Errno = 5952
+	ERROR_CLUSTER_RESOURCE_VETOED_MOVE_INCOMPATIBLE_NODES                     syscall.Errno = 5953
+	ERROR_CLUSTER_INVALID_NODE_WEIGHT                                         syscall.Errno = 5954
+	ERROR_CLUSTER_RESOURCE_VETOED_CALL                                        syscall.Errno = 5955
+	ERROR_RESMON_SYSTEM_RESOURCES_LACKING                                     syscall.Errno = 5956
+	ERROR_CLUSTER_RESOURCE_VETOED_MOVE_NOT_ENOUGH_RESOURCES_ON_DESTINATION    syscall.Errno = 5957
+	ERROR_CLUSTER_RESOURCE_VETOED_MOVE_NOT_ENOUGH_RESOURCES_ON_SOURCE         syscall.Errno = 5958
+	ERROR_CLUSTER_GROUP_QUEUED                                                syscall.Errno = 5959
+	ERROR_CLUSTER_RESOURCE_LOCKED_STATUS                                      syscall.Errno = 5960
+	ERROR_CLUSTER_SHARED_VOLUME_FAILOVER_NOT_ALLOWED                          syscall.Errno = 5961
+	ERROR_CLUSTER_NODE_DRAIN_IN_PROGRESS                                      syscall.Errno = 5962
+	ERROR_CLUSTER_DISK_NOT_CONNECTED                                          syscall.Errno = 5963
+	ERROR_DISK_NOT_CSV_CAPABLE                                                syscall.Errno = 5964
+	ERROR_RESOURCE_NOT_IN_AVAILABLE_STORAGE                                   syscall.Errno = 5965
+	ERROR_CLUSTER_SHARED_VOLUME_REDIRECTED                                    syscall.Errno = 5966
+	ERROR_CLUSTER_SHARED_VOLUME_NOT_REDIRECTED                                syscall.Errno = 5967
+	ERROR_CLUSTER_CANNOT_RETURN_PROPERTIES                                    syscall.Errno = 5968
+	ERROR_CLUSTER_RESOURCE_CONTAINS_UNSUPPORTED_DIFF_AREA_FOR_SHARED_VOLUMES  syscall.Errno = 5969
+	ERROR_CLUSTER_RESOURCE_IS_IN_MAINTENANCE_MODE                             syscall.Errno = 5970
+	ERROR_CLUSTER_AFFINITY_CONFLICT                                           syscall.Errno = 5971
+	ERROR_CLUSTER_RESOURCE_IS_REPLICA_VIRTUAL_MACHINE                         syscall.Errno = 5972
+	ERROR_CLUSTER_UPGRADE_INCOMPATIBLE_VERSIONS                               syscall.Errno = 5973
+	ERROR_CLUSTER_UPGRADE_FIX_QUORUM_NOT_SUPPORTED                            syscall.Errno = 5974
+	ERROR_CLUSTER_UPGRADE_RESTART_REQUIRED                                    syscall.Errno = 5975
+	ERROR_CLUSTER_UPGRADE_IN_PROGRESS                                         syscall.Errno = 5976
+	ERROR_CLUSTER_UPGRADE_INCOMPLETE                                          syscall.Errno = 5977
+	ERROR_CLUSTER_NODE_IN_GRACE_PERIOD                                        syscall.Errno = 5978
+	ERROR_CLUSTER_CSV_IO_PAUSE_TIMEOUT                                        syscall.Errno = 5979
+	ERROR_NODE_NOT_ACTIVE_CLUSTER_MEMBER                                      syscall.Errno = 5980
+	ERROR_CLUSTER_RESOURCE_NOT_MONITORED                                      syscall.Errno = 5981
+	ERROR_CLUSTER_RESOURCE_DOES_NOT_SUPPORT_UNMONITORED                       syscall.Errno = 5982
+	ERROR_CLUSTER_RESOURCE_IS_REPLICATED                                      syscall.Errno = 5983
+	ERROR_CLUSTER_NODE_ISOLATED                                               syscall.Errno = 5984
+	ERROR_CLUSTER_NODE_QUARANTINED                                            syscall.Errno = 5985
+	ERROR_CLUSTER_DATABASE_UPDATE_CONDITION_FAILED                            syscall.Errno = 5986
+	ERROR_CLUSTER_SPACE_DEGRADED                                              syscall.Errno = 5987
+	ERROR_CLUSTER_TOKEN_DELEGATION_NOT_SUPPORTED                              syscall.Errno = 5988
+	ERROR_CLUSTER_CSV_INVALID_HANDLE                                          syscall.Errno = 5989
+	ERROR_CLUSTER_CSV_SUPPORTED_ONLY_ON_COORDINATOR                           syscall.Errno = 5990
+	ERROR_GROUPSET_NOT_AVAILABLE                                              syscall.Errno = 5991
+	ERROR_GROUPSET_NOT_FOUND                                                  syscall.Errno = 5992
+	ERROR_GROUPSET_CANT_PROVIDE                                               syscall.Errno = 5993
+	ERROR_CLUSTER_FAULT_DOMAIN_PARENT_NOT_FOUND                               syscall.Errno = 5994
+	ERROR_CLUSTER_FAULT_DOMAIN_INVALID_HIERARCHY                              syscall.Errno = 5995
+	ERROR_CLUSTER_FAULT_DOMAIN_FAILED_S2D_VALIDATION                          syscall.Errno = 5996
+	ERROR_CLUSTER_FAULT_DOMAIN_S2D_CONNECTIVITY_LOSS                          syscall.Errno = 5997
+	ERROR_CLUSTER_INVALID_INFRASTRUCTURE_FILESERVER_NAME                      syscall.Errno = 5998
+	ERROR_CLUSTERSET_MANAGEMENT_CLUSTER_UNREACHABLE                           syscall.Errno = 5999
+	ERROR_ENCRYPTION_FAILED                                                   syscall.Errno = 6000
+	ERROR_DECRYPTION_FAILED                                                   syscall.Errno = 6001
+	ERROR_FILE_ENCRYPTED                                                      syscall.Errno = 6002
+	ERROR_NO_RECOVERY_POLICY                                                  syscall.Errno = 6003
+	ERROR_NO_EFS                                                              syscall.Errno = 6004
+	ERROR_WRONG_EFS                                                           syscall.Errno = 6005
+	ERROR_NO_USER_KEYS                                                        syscall.Errno = 6006
+	ERROR_FILE_NOT_ENCRYPTED                                                  syscall.Errno = 6007
+	ERROR_NOT_EXPORT_FORMAT                                                   syscall.Errno = 6008
+	ERROR_FILE_READ_ONLY                                                      syscall.Errno = 6009
+	ERROR_DIR_EFS_DISALLOWED                                                  syscall.Errno = 6010
+	ERROR_EFS_SERVER_NOT_TRUSTED                                              syscall.Errno = 6011
+	ERROR_BAD_RECOVERY_POLICY                                                 syscall.Errno = 6012
+	ERROR_EFS_ALG_BLOB_TOO_BIG                                                syscall.Errno = 6013
+	ERROR_VOLUME_NOT_SUPPORT_EFS                                              syscall.Errno = 6014
+	ERROR_EFS_DISABLED                                                        syscall.Errno = 6015
+	ERROR_EFS_VERSION_NOT_SUPPORT                                             syscall.Errno = 6016
+	ERROR_CS_ENCRYPTION_INVALID_SERVER_RESPONSE                               syscall.Errno = 6017
+	ERROR_CS_ENCRYPTION_UNSUPPORTED_SERVER                                    syscall.Errno = 6018
+	ERROR_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE                               syscall.Errno = 6019
+	ERROR_CS_ENCRYPTION_NEW_ENCRYPTED_FILE                                    syscall.Errno = 6020
+	ERROR_CS_ENCRYPTION_FILE_NOT_CSE                                          syscall.Errno = 6021
+	ERROR_ENCRYPTION_POLICY_DENIES_OPERATION                                  syscall.Errno = 6022
+	ERROR_WIP_ENCRYPTION_FAILED                                               syscall.Errno = 6023
+	ERROR_NO_BROWSER_SERVERS_FOUND                                            syscall.Errno = 6118
+	SCHED_E_SERVICE_NOT_LOCALSYSTEM                                           syscall.Errno = 6200
+	ERROR_LOG_SECTOR_INVALID                                                  syscall.Errno = 6600
+	ERROR_LOG_SECTOR_PARITY_INVALID                                           syscall.Errno = 6601
+	ERROR_LOG_SECTOR_REMAPPED                                                 syscall.Errno = 6602
+	ERROR_LOG_BLOCK_INCOMPLETE                                                syscall.Errno = 6603
+	ERROR_LOG_INVALID_RANGE                                                   syscall.Errno = 6604
+	ERROR_LOG_BLOCKS_EXHAUSTED                                                syscall.Errno = 6605
+	ERROR_LOG_READ_CONTEXT_INVALID                                            syscall.Errno = 6606
+	ERROR_LOG_RESTART_INVALID                                                 syscall.Errno = 6607
+	ERROR_LOG_BLOCK_VERSION                                                   syscall.Errno = 6608
+	ERROR_LOG_BLOCK_INVALID                                                   syscall.Errno = 6609
+	ERROR_LOG_READ_MODE_INVALID                                               syscall.Errno = 6610
+	ERROR_LOG_NO_RESTART                                                      syscall.Errno = 6611
+	ERROR_LOG_METADATA_CORRUPT                                                syscall.Errno = 6612
+	ERROR_LOG_METADATA_INVALID                                                syscall.Errno = 6613
+	ERROR_LOG_METADATA_INCONSISTENT                                           syscall.Errno = 6614
+	ERROR_LOG_RESERVATION_INVALID                                             syscall.Errno = 6615
+	ERROR_LOG_CANT_DELETE                                                     syscall.Errno = 6616
+	ERROR_LOG_CONTAINER_LIMIT_EXCEEDED                                        syscall.Errno = 6617
+	ERROR_LOG_START_OF_LOG                                                    syscall.Errno = 6618
+	ERROR_LOG_POLICY_ALREADY_INSTALLED                                        syscall.Errno = 6619
+	ERROR_LOG_POLICY_NOT_INSTALLED                                            syscall.Errno = 6620
+	ERROR_LOG_POLICY_INVALID                                                  syscall.Errno = 6621
+	ERROR_LOG_POLICY_CONFLICT                                                 syscall.Errno = 6622
+	ERROR_LOG_PINNED_ARCHIVE_TAIL                                             syscall.Errno = 6623
+	ERROR_LOG_RECORD_NONEXISTENT                                              syscall.Errno = 6624
+	ERROR_LOG_RECORDS_RESERVED_INVALID                                        syscall.Errno = 6625
+	ERROR_LOG_SPACE_RESERVED_INVALID                                          syscall.Errno = 6626
+	ERROR_LOG_TAIL_INVALID                                                    syscall.Errno = 6627
+	ERROR_LOG_FULL                                                            syscall.Errno = 6628
+	ERROR_COULD_NOT_RESIZE_LOG                                                syscall.Errno = 6629
+	ERROR_LOG_MULTIPLEXED                                                     syscall.Errno = 6630
+	ERROR_LOG_DEDICATED                                                       syscall.Errno = 6631
+	ERROR_LOG_ARCHIVE_NOT_IN_PROGRESS                                         syscall.Errno = 6632
+	ERROR_LOG_ARCHIVE_IN_PROGRESS                                             syscall.Errno = 6633
+	ERROR_LOG_EPHEMERAL                                                       syscall.Errno = 6634
+	ERROR_LOG_NOT_ENOUGH_CONTAINERS                                           syscall.Errno = 6635
+	ERROR_LOG_CLIENT_ALREADY_REGISTERED                                       syscall.Errno = 6636
+	ERROR_LOG_CLIENT_NOT_REGISTERED                                           syscall.Errno = 6637
+	ERROR_LOG_FULL_HANDLER_IN_PROGRESS                                        syscall.Errno = 6638
+	ERROR_LOG_CONTAINER_READ_FAILED                                           syscall.Errno = 6639
+	ERROR_LOG_CONTAINER_WRITE_FAILED                                          syscall.Errno = 6640
+	ERROR_LOG_CONTAINER_OPEN_FAILED                                           syscall.Errno = 6641
+	ERROR_LOG_CONTAINER_STATE_INVALID                                         syscall.Errno = 6642
+	ERROR_LOG_STATE_INVALID                                                   syscall.Errno = 6643
+	ERROR_LOG_PINNED                                                          syscall.Errno = 6644
+	ERROR_LOG_METADATA_FLUSH_FAILED                                           syscall.Errno = 6645
+	ERROR_LOG_INCONSISTENT_SECURITY                                           syscall.Errno = 6646
+	ERROR_LOG_APPENDED_FLUSH_FAILED                                           syscall.Errno = 6647
+	ERROR_LOG_PINNED_RESERVATION                                              syscall.Errno = 6648
+	ERROR_INVALID_TRANSACTION                                                 syscall.Errno = 6700
+	ERROR_TRANSACTION_NOT_ACTIVE                                              syscall.Errno = 6701
+	ERROR_TRANSACTION_REQUEST_NOT_VALID                                       syscall.Errno = 6702
+	ERROR_TRANSACTION_NOT_REQUESTED                                           syscall.Errno = 6703
+	ERROR_TRANSACTION_ALREADY_ABORTED                                         syscall.Errno = 6704
+	ERROR_TRANSACTION_ALREADY_COMMITTED                                       syscall.Errno = 6705
+	ERROR_TM_INITIALIZATION_FAILED                                            syscall.Errno = 6706
+	ERROR_RESOURCEMANAGER_READ_ONLY                                           syscall.Errno = 6707
+	ERROR_TRANSACTION_NOT_JOINED                                              syscall.Errno = 6708
+	ERROR_TRANSACTION_SUPERIOR_EXISTS                                         syscall.Errno = 6709
+	ERROR_CRM_PROTOCOL_ALREADY_EXISTS                                         syscall.Errno = 6710
+	ERROR_TRANSACTION_PROPAGATION_FAILED                                      syscall.Errno = 6711
+	ERROR_CRM_PROTOCOL_NOT_FOUND                                              syscall.Errno = 6712
+	ERROR_TRANSACTION_INVALID_MARSHALL_BUFFER                                 syscall.Errno = 6713
+	ERROR_CURRENT_TRANSACTION_NOT_VALID                                       syscall.Errno = 6714
+	ERROR_TRANSACTION_NOT_FOUND                                               syscall.Errno = 6715
+	ERROR_RESOURCEMANAGER_NOT_FOUND                                           syscall.Errno = 6716
+	ERROR_ENLISTMENT_NOT_FOUND                                                syscall.Errno = 6717
+	ERROR_TRANSACTIONMANAGER_NOT_FOUND                                        syscall.Errno = 6718
+	ERROR_TRANSACTIONMANAGER_NOT_ONLINE                                       syscall.Errno = 6719
+	ERROR_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION                          syscall.Errno = 6720
+	ERROR_TRANSACTION_NOT_ROOT                                                syscall.Errno = 6721
+	ERROR_TRANSACTION_OBJECT_EXPIRED                                          syscall.Errno = 6722
+	ERROR_TRANSACTION_RESPONSE_NOT_ENLISTED                                   syscall.Errno = 6723
+	ERROR_TRANSACTION_RECORD_TOO_LONG                                         syscall.Errno = 6724
+	ERROR_IMPLICIT_TRANSACTION_NOT_SUPPORTED                                  syscall.Errno = 6725
+	ERROR_TRANSACTION_INTEGRITY_VIOLATED                                      syscall.Errno = 6726
+	ERROR_TRANSACTIONMANAGER_IDENTITY_MISMATCH                                syscall.Errno = 6727
+	ERROR_RM_CANNOT_BE_FROZEN_FOR_SNAPSHOT                                    syscall.Errno = 6728
+	ERROR_TRANSACTION_MUST_WRITETHROUGH                                       syscall.Errno = 6729
+	ERROR_TRANSACTION_NO_SUPERIOR                                             syscall.Errno = 6730
+	ERROR_HEURISTIC_DAMAGE_POSSIBLE                                           syscall.Errno = 6731
+	ERROR_TRANSACTIONAL_CONFLICT                                              syscall.Errno = 6800
+	ERROR_RM_NOT_ACTIVE                                                       syscall.Errno = 6801
+	ERROR_RM_METADATA_CORRUPT                                                 syscall.Errno = 6802
+	ERROR_DIRECTORY_NOT_RM                                                    syscall.Errno = 6803
+	ERROR_TRANSACTIONS_UNSUPPORTED_REMOTE                                     syscall.Errno = 6805
+	ERROR_LOG_RESIZE_INVALID_SIZE                                             syscall.Errno = 6806
+	ERROR_OBJECT_NO_LONGER_EXISTS                                             syscall.Errno = 6807
+	ERROR_STREAM_MINIVERSION_NOT_FOUND                                        syscall.Errno = 6808
+	ERROR_STREAM_MINIVERSION_NOT_VALID                                        syscall.Errno = 6809
+	ERROR_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION                 syscall.Errno = 6810
+	ERROR_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT                            syscall.Errno = 6811
+	ERROR_CANT_CREATE_MORE_STREAM_MINIVERSIONS                                syscall.Errno = 6812
+	ERROR_REMOTE_FILE_VERSION_MISMATCH                                        syscall.Errno = 6814
+	ERROR_HANDLE_NO_LONGER_VALID                                              syscall.Errno = 6815
+	ERROR_NO_TXF_METADATA                                                     syscall.Errno = 6816
+	ERROR_LOG_CORRUPTION_DETECTED                                             syscall.Errno = 6817
+	ERROR_CANT_RECOVER_WITH_HANDLE_OPEN                                       syscall.Errno = 6818
+	ERROR_RM_DISCONNECTED                                                     syscall.Errno = 6819
+	ERROR_ENLISTMENT_NOT_SUPERIOR                                             syscall.Errno = 6820
+	ERROR_RECOVERY_NOT_NEEDED                                                 syscall.Errno = 6821
+	ERROR_RM_ALREADY_STARTED                                                  syscall.Errno = 6822
+	ERROR_FILE_IDENTITY_NOT_PERSISTENT                                        syscall.Errno = 6823
+	ERROR_CANT_BREAK_TRANSACTIONAL_DEPENDENCY                                 syscall.Errno = 6824
+	ERROR_CANT_CROSS_RM_BOUNDARY                                              syscall.Errno = 6825
+	ERROR_TXF_DIR_NOT_EMPTY                                                   syscall.Errno = 6826
+	ERROR_INDOUBT_TRANSACTIONS_EXIST                                          syscall.Errno = 6827
+	ERROR_TM_VOLATILE                                                         syscall.Errno = 6828
+	ERROR_ROLLBACK_TIMER_EXPIRED                                              syscall.Errno = 6829
+	ERROR_TXF_ATTRIBUTE_CORRUPT                                               syscall.Errno = 6830
+	ERROR_EFS_NOT_ALLOWED_IN_TRANSACTION                                      syscall.Errno = 6831
+	ERROR_TRANSACTIONAL_OPEN_NOT_ALLOWED                                      syscall.Errno = 6832
+	ERROR_LOG_GROWTH_FAILED                                                   syscall.Errno = 6833
+	ERROR_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE                               syscall.Errno = 6834
+	ERROR_TXF_METADATA_ALREADY_PRESENT                                        syscall.Errno = 6835
+	ERROR_TRANSACTION_SCOPE_CALLBACKS_NOT_SET                                 syscall.Errno = 6836
+	ERROR_TRANSACTION_REQUIRED_PROMOTION                                      syscall.Errno = 6837
+	ERROR_CANNOT_EXECUTE_FILE_IN_TRANSACTION                                  syscall.Errno = 6838
+	ERROR_TRANSACTIONS_NOT_FROZEN                                             syscall.Errno = 6839
+	ERROR_TRANSACTION_FREEZE_IN_PROGRESS                                      syscall.Errno = 6840
+	ERROR_NOT_SNAPSHOT_VOLUME                                                 syscall.Errno = 6841
+	ERROR_NO_SAVEPOINT_WITH_OPEN_FILES                                        syscall.Errno = 6842
+	ERROR_DATA_LOST_REPAIR                                                    syscall.Errno = 6843
+	ERROR_SPARSE_NOT_ALLOWED_IN_TRANSACTION                                   syscall.Errno = 6844
+	ERROR_TM_IDENTITY_MISMATCH                                                syscall.Errno = 6845
+	ERROR_FLOATED_SECTION                                                     syscall.Errno = 6846
+	ERROR_CANNOT_ACCEPT_TRANSACTED_WORK                                       syscall.Errno = 6847
+	ERROR_CANNOT_ABORT_TRANSACTIONS                                           syscall.Errno = 6848
+	ERROR_BAD_CLUSTERS                                                        syscall.Errno = 6849
+	ERROR_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION                              syscall.Errno = 6850
+	ERROR_VOLUME_DIRTY                                                        syscall.Errno = 6851
+	ERROR_NO_LINK_TRACKING_IN_TRANSACTION                                     syscall.Errno = 6852
+	ERROR_OPERATION_NOT_SUPPORTED_IN_TRANSACTION                              syscall.Errno = 6853
+	ERROR_EXPIRED_HANDLE                                                      syscall.Errno = 6854
+	ERROR_TRANSACTION_NOT_ENLISTED                                            syscall.Errno = 6855
+	ERROR_CTX_WINSTATION_NAME_INVALID                                         syscall.Errno = 7001
+	ERROR_CTX_INVALID_PD                                                      syscall.Errno = 7002
+	ERROR_CTX_PD_NOT_FOUND                                                    syscall.Errno = 7003
+	ERROR_CTX_WD_NOT_FOUND                                                    syscall.Errno = 7004
+	ERROR_CTX_CANNOT_MAKE_EVENTLOG_ENTRY                                      syscall.Errno = 7005
+	ERROR_CTX_SERVICE_NAME_COLLISION                                          syscall.Errno = 7006
+	ERROR_CTX_CLOSE_PENDING                                                   syscall.Errno = 7007
+	ERROR_CTX_NO_OUTBUF                                                       syscall.Errno = 7008
+	ERROR_CTX_MODEM_INF_NOT_FOUND                                             syscall.Errno = 7009
+	ERROR_CTX_INVALID_MODEMNAME                                               syscall.Errno = 7010
+	ERROR_CTX_MODEM_RESPONSE_ERROR                                            syscall.Errno = 7011
+	ERROR_CTX_MODEM_RESPONSE_TIMEOUT                                          syscall.Errno = 7012
+	ERROR_CTX_MODEM_RESPONSE_NO_CARRIER                                       syscall.Errno = 7013
+	ERROR_CTX_MODEM_RESPONSE_NO_DIALTONE                                      syscall.Errno = 7014
+	ERROR_CTX_MODEM_RESPONSE_BUSY                                             syscall.Errno = 7015
+	ERROR_CTX_MODEM_RESPONSE_VOICE                                            syscall.Errno = 7016
+	ERROR_CTX_TD_ERROR                                                        syscall.Errno = 7017
+	ERROR_CTX_WINSTATION_NOT_FOUND                                            syscall.Errno = 7022
+	ERROR_CTX_WINSTATION_ALREADY_EXISTS                                       syscall.Errno = 7023
+	ERROR_CTX_WINSTATION_BUSY                                                 syscall.Errno = 7024
+	ERROR_CTX_BAD_VIDEO_MODE                                                  syscall.Errno = 7025
+	ERROR_CTX_GRAPHICS_INVALID                                                syscall.Errno = 7035
+	ERROR_CTX_LOGON_DISABLED                                                  syscall.Errno = 7037
+	ERROR_CTX_NOT_CONSOLE                                                     syscall.Errno = 7038
+	ERROR_CTX_CLIENT_QUERY_TIMEOUT                                            syscall.Errno = 7040
+	ERROR_CTX_CONSOLE_DISCONNECT                                              syscall.Errno = 7041
+	ERROR_CTX_CONSOLE_CONNECT                                                 syscall.Errno = 7042
+	ERROR_CTX_SHADOW_DENIED                                                   syscall.Errno = 7044
+	ERROR_CTX_WINSTATION_ACCESS_DENIED                                        syscall.Errno = 7045
+	ERROR_CTX_INVALID_WD                                                      syscall.Errno = 7049
+	ERROR_CTX_SHADOW_INVALID                                                  syscall.Errno = 7050
+	ERROR_CTX_SHADOW_DISABLED                                                 syscall.Errno = 7051
+	ERROR_CTX_CLIENT_LICENSE_IN_USE                                           syscall.Errno = 7052
+	ERROR_CTX_CLIENT_LICENSE_NOT_SET                                          syscall.Errno = 7053
+	ERROR_CTX_LICENSE_NOT_AVAILABLE                                           syscall.Errno = 7054
+	ERROR_CTX_LICENSE_CLIENT_INVALID                                          syscall.Errno = 7055
+	ERROR_CTX_LICENSE_EXPIRED                                                 syscall.Errno = 7056
+	ERROR_CTX_SHADOW_NOT_RUNNING                                              syscall.Errno = 7057
+	ERROR_CTX_SHADOW_ENDED_BY_MODE_CHANGE                                     syscall.Errno = 7058
+	ERROR_ACTIVATION_COUNT_EXCEEDED                                           syscall.Errno = 7059
+	ERROR_CTX_WINSTATIONS_DISABLED                                            syscall.Errno = 7060
+	ERROR_CTX_ENCRYPTION_LEVEL_REQUIRED                                       syscall.Errno = 7061
+	ERROR_CTX_SESSION_IN_USE                                                  syscall.Errno = 7062
+	ERROR_CTX_NO_FORCE_LOGOFF                                                 syscall.Errno = 7063
+	ERROR_CTX_ACCOUNT_RESTRICTION                                             syscall.Errno = 7064
+	ERROR_RDP_PROTOCOL_ERROR                                                  syscall.Errno = 7065
+	ERROR_CTX_CDM_CONNECT                                                     syscall.Errno = 7066
+	ERROR_CTX_CDM_DISCONNECT                                                  syscall.Errno = 7067
+	ERROR_CTX_SECURITY_LAYER_ERROR                                            syscall.Errno = 7068
+	ERROR_TS_INCOMPATIBLE_SESSIONS                                            syscall.Errno = 7069
+	ERROR_TS_VIDEO_SUBSYSTEM_ERROR                                            syscall.Errno = 7070
+	FRS_ERR_INVALID_API_SEQUENCE                                              syscall.Errno = 8001
+	FRS_ERR_STARTING_SERVICE                                                  syscall.Errno = 8002
+	FRS_ERR_STOPPING_SERVICE                                                  syscall.Errno = 8003
+	FRS_ERR_INTERNAL_API                                                      syscall.Errno = 8004
+	FRS_ERR_INTERNAL                                                          syscall.Errno = 8005
+	FRS_ERR_SERVICE_COMM                                                      syscall.Errno = 8006
+	FRS_ERR_INSUFFICIENT_PRIV                                                 syscall.Errno = 8007
+	FRS_ERR_AUTHENTICATION                                                    syscall.Errno = 8008
+	FRS_ERR_PARENT_INSUFFICIENT_PRIV                                          syscall.Errno = 8009
+	FRS_ERR_PARENT_AUTHENTICATION                                             syscall.Errno = 8010
+	FRS_ERR_CHILD_TO_PARENT_COMM                                              syscall.Errno = 8011
+	FRS_ERR_PARENT_TO_CHILD_COMM                                              syscall.Errno = 8012
+	FRS_ERR_SYSVOL_POPULATE                                                   syscall.Errno = 8013
+	FRS_ERR_SYSVOL_POPULATE_TIMEOUT                                           syscall.Errno = 8014
+	FRS_ERR_SYSVOL_IS_BUSY                                                    syscall.Errno = 8015
+	FRS_ERR_SYSVOL_DEMOTE                                                     syscall.Errno = 8016
+	FRS_ERR_INVALID_SERVICE_PARAMETER                                         syscall.Errno = 8017
+	DS_S_SUCCESS                                                                            = ERROR_SUCCESS
+	ERROR_DS_NOT_INSTALLED                                                    syscall.Errno = 8200
+	ERROR_DS_MEMBERSHIP_EVALUATED_LOCALLY                                     syscall.Errno = 8201
+	ERROR_DS_NO_ATTRIBUTE_OR_VALUE                                            syscall.Errno = 8202
+	ERROR_DS_INVALID_ATTRIBUTE_SYNTAX                                         syscall.Errno = 8203
+	ERROR_DS_ATTRIBUTE_TYPE_UNDEFINED                                         syscall.Errno = 8204
+	ERROR_DS_ATTRIBUTE_OR_VALUE_EXISTS                                        syscall.Errno = 8205
+	ERROR_DS_BUSY                                                             syscall.Errno = 8206
+	ERROR_DS_UNAVAILABLE                                                      syscall.Errno = 8207
+	ERROR_DS_NO_RIDS_ALLOCATED                                                syscall.Errno = 8208
+	ERROR_DS_NO_MORE_RIDS                                                     syscall.Errno = 8209
+	ERROR_DS_INCORRECT_ROLE_OWNER                                             syscall.Errno = 8210
+	ERROR_DS_RIDMGR_INIT_ERROR                                                syscall.Errno = 8211
+	ERROR_DS_OBJ_CLASS_VIOLATION                                              syscall.Errno = 8212
+	ERROR_DS_CANT_ON_NON_LEAF                                                 syscall.Errno = 8213
+	ERROR_DS_CANT_ON_RDN                                                      syscall.Errno = 8214
+	ERROR_DS_CANT_MOD_OBJ_CLASS                                               syscall.Errno = 8215
+	ERROR_DS_CROSS_DOM_MOVE_ERROR                                             syscall.Errno = 8216
+	ERROR_DS_GC_NOT_AVAILABLE                                                 syscall.Errno = 8217
+	ERROR_SHARED_POLICY                                                       syscall.Errno = 8218
+	ERROR_POLICY_OBJECT_NOT_FOUND                                             syscall.Errno = 8219
+	ERROR_POLICY_ONLY_IN_DS                                                   syscall.Errno = 8220
+	ERROR_PROMOTION_ACTIVE                                                    syscall.Errno = 8221
+	ERROR_NO_PROMOTION_ACTIVE                                                 syscall.Errno = 8222
+	ERROR_DS_OPERATIONS_ERROR                                                 syscall.Errno = 8224
+	ERROR_DS_PROTOCOL_ERROR                                                   syscall.Errno = 8225
+	ERROR_DS_TIMELIMIT_EXCEEDED                                               syscall.Errno = 8226
+	ERROR_DS_SIZELIMIT_EXCEEDED                                               syscall.Errno = 8227
+	ERROR_DS_ADMIN_LIMIT_EXCEEDED                                             syscall.Errno = 8228
+	ERROR_DS_COMPARE_FALSE                                                    syscall.Errno = 8229
+	ERROR_DS_COMPARE_TRUE                                                     syscall.Errno = 8230
+	ERROR_DS_AUTH_METHOD_NOT_SUPPORTED                                        syscall.Errno = 8231
+	ERROR_DS_STRONG_AUTH_REQUIRED                                             syscall.Errno = 8232
+	ERROR_DS_INAPPROPRIATE_AUTH                                               syscall.Errno = 8233
+	ERROR_DS_AUTH_UNKNOWN                                                     syscall.Errno = 8234
+	ERROR_DS_REFERRAL                                                         syscall.Errno = 8235
+	ERROR_DS_UNAVAILABLE_CRIT_EXTENSION                                       syscall.Errno = 8236
+	ERROR_DS_CONFIDENTIALITY_REQUIRED                                         syscall.Errno = 8237
+	ERROR_DS_INAPPROPRIATE_MATCHING                                           syscall.Errno = 8238
+	ERROR_DS_CONSTRAINT_VIOLATION                                             syscall.Errno = 8239
+	ERROR_DS_NO_SUCH_OBJECT                                                   syscall.Errno = 8240
+	ERROR_DS_ALIAS_PROBLEM                                                    syscall.Errno = 8241
+	ERROR_DS_INVALID_DN_SYNTAX                                                syscall.Errno = 8242
+	ERROR_DS_IS_LEAF                                                          syscall.Errno = 8243
+	ERROR_DS_ALIAS_DEREF_PROBLEM                                              syscall.Errno = 8244
+	ERROR_DS_UNWILLING_TO_PERFORM                                             syscall.Errno = 8245
+	ERROR_DS_LOOP_DETECT                                                      syscall.Errno = 8246
+	ERROR_DS_NAMING_VIOLATION                                                 syscall.Errno = 8247
+	ERROR_DS_OBJECT_RESULTS_TOO_LARGE                                         syscall.Errno = 8248
+	ERROR_DS_AFFECTS_MULTIPLE_DSAS                                            syscall.Errno = 8249
+	ERROR_DS_SERVER_DOWN                                                      syscall.Errno = 8250
+	ERROR_DS_LOCAL_ERROR                                                      syscall.Errno = 8251
+	ERROR_DS_ENCODING_ERROR                                                   syscall.Errno = 8252
+	ERROR_DS_DECODING_ERROR                                                   syscall.Errno = 8253
+	ERROR_DS_FILTER_UNKNOWN                                                   syscall.Errno = 8254
+	ERROR_DS_PARAM_ERROR                                                      syscall.Errno = 8255
+	ERROR_DS_NOT_SUPPORTED                                                    syscall.Errno = 8256
+	ERROR_DS_NO_RESULTS_RETURNED                                              syscall.Errno = 8257
+	ERROR_DS_CONTROL_NOT_FOUND                                                syscall.Errno = 8258
+	ERROR_DS_CLIENT_LOOP                                                      syscall.Errno = 8259
+	ERROR_DS_REFERRAL_LIMIT_EXCEEDED                                          syscall.Errno = 8260
+	ERROR_DS_SORT_CONTROL_MISSING                                             syscall.Errno = 8261
+	ERROR_DS_OFFSET_RANGE_ERROR                                               syscall.Errno = 8262
+	ERROR_DS_RIDMGR_DISABLED                                                  syscall.Errno = 8263
+	ERROR_DS_ROOT_MUST_BE_NC                                                  syscall.Errno = 8301
+	ERROR_DS_ADD_REPLICA_INHIBITED                                            syscall.Errno = 8302
+	ERROR_DS_ATT_NOT_DEF_IN_SCHEMA                                            syscall.Errno = 8303
+	ERROR_DS_MAX_OBJ_SIZE_EXCEEDED                                            syscall.Errno = 8304
+	ERROR_DS_OBJ_STRING_NAME_EXISTS                                           syscall.Errno = 8305
+	ERROR_DS_NO_RDN_DEFINED_IN_SCHEMA                                         syscall.Errno = 8306
+	ERROR_DS_RDN_DOESNT_MATCH_SCHEMA                                          syscall.Errno = 8307
+	ERROR_DS_NO_REQUESTED_ATTS_FOUND                                          syscall.Errno = 8308
+	ERROR_DS_USER_BUFFER_TO_SMALL                                             syscall.Errno = 8309
+	ERROR_DS_ATT_IS_NOT_ON_OBJ                                                syscall.Errno = 8310
+	ERROR_DS_ILLEGAL_MOD_OPERATION                                            syscall.Errno = 8311
+	ERROR_DS_OBJ_TOO_LARGE                                                    syscall.Errno = 8312
+	ERROR_DS_BAD_INSTANCE_TYPE                                                syscall.Errno = 8313
+	ERROR_DS_MASTERDSA_REQUIRED                                               syscall.Errno = 8314
+	ERROR_DS_OBJECT_CLASS_REQUIRED                                            syscall.Errno = 8315
+	ERROR_DS_MISSING_REQUIRED_ATT                                             syscall.Errno = 8316
+	ERROR_DS_ATT_NOT_DEF_FOR_CLASS                                            syscall.Errno = 8317
+	ERROR_DS_ATT_ALREADY_EXISTS                                               syscall.Errno = 8318
+	ERROR_DS_CANT_ADD_ATT_VALUES                                              syscall.Errno = 8320
+	ERROR_DS_SINGLE_VALUE_CONSTRAINT                                          syscall.Errno = 8321
+	ERROR_DS_RANGE_CONSTRAINT                                                 syscall.Errno = 8322
+	ERROR_DS_ATT_VAL_ALREADY_EXISTS                                           syscall.Errno = 8323
+	ERROR_DS_CANT_REM_MISSING_ATT                                             syscall.Errno = 8324
+	ERROR_DS_CANT_REM_MISSING_ATT_VAL                                         syscall.Errno = 8325
+	ERROR_DS_ROOT_CANT_BE_SUBREF                                              syscall.Errno = 8326
+	ERROR_DS_NO_CHAINING                                                      syscall.Errno = 8327
+	ERROR_DS_NO_CHAINED_EVAL                                                  syscall.Errno = 8328
+	ERROR_DS_NO_PARENT_OBJECT                                                 syscall.Errno = 8329
+	ERROR_DS_PARENT_IS_AN_ALIAS                                               syscall.Errno = 8330
+	ERROR_DS_CANT_MIX_MASTER_AND_REPS                                         syscall.Errno = 8331
+	ERROR_DS_CHILDREN_EXIST                                                   syscall.Errno = 8332
+	ERROR_DS_OBJ_NOT_FOUND                                                    syscall.Errno = 8333
+	ERROR_DS_ALIASED_OBJ_MISSING                                              syscall.Errno = 8334
+	ERROR_DS_BAD_NAME_SYNTAX                                                  syscall.Errno = 8335
+	ERROR_DS_ALIAS_POINTS_TO_ALIAS                                            syscall.Errno = 8336
+	ERROR_DS_CANT_DEREF_ALIAS                                                 syscall.Errno = 8337
+	ERROR_DS_OUT_OF_SCOPE                                                     syscall.Errno = 8338
+	ERROR_DS_OBJECT_BEING_REMOVED                                             syscall.Errno = 8339
+	ERROR_DS_CANT_DELETE_DSA_OBJ                                              syscall.Errno = 8340
+	ERROR_DS_GENERIC_ERROR                                                    syscall.Errno = 8341
+	ERROR_DS_DSA_MUST_BE_INT_MASTER                                           syscall.Errno = 8342
+	ERROR_DS_CLASS_NOT_DSA                                                    syscall.Errno = 8343
+	ERROR_DS_INSUFF_ACCESS_RIGHTS                                             syscall.Errno = 8344
+	ERROR_DS_ILLEGAL_SUPERIOR                                                 syscall.Errno = 8345
+	ERROR_DS_ATTRIBUTE_OWNED_BY_SAM                                           syscall.Errno = 8346
+	ERROR_DS_NAME_TOO_MANY_PARTS                                              syscall.Errno = 8347
+	ERROR_DS_NAME_TOO_LONG                                                    syscall.Errno = 8348
+	ERROR_DS_NAME_VALUE_TOO_LONG                                              syscall.Errno = 8349
+	ERROR_DS_NAME_UNPARSEABLE                                                 syscall.Errno = 8350
+	ERROR_DS_NAME_TYPE_UNKNOWN                                                syscall.Errno = 8351
+	ERROR_DS_NOT_AN_OBJECT                                                    syscall.Errno = 8352
+	ERROR_DS_SEC_DESC_TOO_SHORT                                               syscall.Errno = 8353
+	ERROR_DS_SEC_DESC_INVALID                                                 syscall.Errno = 8354
+	ERROR_DS_NO_DELETED_NAME                                                  syscall.Errno = 8355
+	ERROR_DS_SUBREF_MUST_HAVE_PARENT                                          syscall.Errno = 8356
+	ERROR_DS_NCNAME_MUST_BE_NC                                                syscall.Errno = 8357
+	ERROR_DS_CANT_ADD_SYSTEM_ONLY                                             syscall.Errno = 8358
+	ERROR_DS_CLASS_MUST_BE_CONCRETE                                           syscall.Errno = 8359
+	ERROR_DS_INVALID_DMD                                                      syscall.Errno = 8360
+	ERROR_DS_OBJ_GUID_EXISTS                                                  syscall.Errno = 8361
+	ERROR_DS_NOT_ON_BACKLINK                                                  syscall.Errno = 8362
+	ERROR_DS_NO_CROSSREF_FOR_NC                                               syscall.Errno = 8363
+	ERROR_DS_SHUTTING_DOWN                                                    syscall.Errno = 8364
+	ERROR_DS_UNKNOWN_OPERATION                                                syscall.Errno = 8365
+	ERROR_DS_INVALID_ROLE_OWNER                                               syscall.Errno = 8366
+	ERROR_DS_COULDNT_CONTACT_FSMO                                             syscall.Errno = 8367
+	ERROR_DS_CROSS_NC_DN_RENAME                                               syscall.Errno = 8368
+	ERROR_DS_CANT_MOD_SYSTEM_ONLY                                             syscall.Errno = 8369
+	ERROR_DS_REPLICATOR_ONLY                                                  syscall.Errno = 8370
+	ERROR_DS_OBJ_CLASS_NOT_DEFINED                                            syscall.Errno = 8371
+	ERROR_DS_OBJ_CLASS_NOT_SUBCLASS                                           syscall.Errno = 8372
+	ERROR_DS_NAME_REFERENCE_INVALID                                           syscall.Errno = 8373
+	ERROR_DS_CROSS_REF_EXISTS                                                 syscall.Errno = 8374
+	ERROR_DS_CANT_DEL_MASTER_CROSSREF                                         syscall.Errno = 8375
+	ERROR_DS_SUBTREE_NOTIFY_NOT_NC_HEAD                                       syscall.Errno = 8376
+	ERROR_DS_NOTIFY_FILTER_TOO_COMPLEX                                        syscall.Errno = 8377
+	ERROR_DS_DUP_RDN                                                          syscall.Errno = 8378
+	ERROR_DS_DUP_OID                                                          syscall.Errno = 8379
+	ERROR_DS_DUP_MAPI_ID                                                      syscall.Errno = 8380
+	ERROR_DS_DUP_SCHEMA_ID_GUID                                               syscall.Errno = 8381
+	ERROR_DS_DUP_LDAP_DISPLAY_NAME                                            syscall.Errno = 8382
+	ERROR_DS_SEMANTIC_ATT_TEST                                                syscall.Errno = 8383
+	ERROR_DS_SYNTAX_MISMATCH                                                  syscall.Errno = 8384
+	ERROR_DS_EXISTS_IN_MUST_HAVE                                              syscall.Errno = 8385
+	ERROR_DS_EXISTS_IN_MAY_HAVE                                               syscall.Errno = 8386
+	ERROR_DS_NONEXISTENT_MAY_HAVE                                             syscall.Errno = 8387
+	ERROR_DS_NONEXISTENT_MUST_HAVE                                            syscall.Errno = 8388
+	ERROR_DS_AUX_CLS_TEST_FAIL                                                syscall.Errno = 8389
+	ERROR_DS_NONEXISTENT_POSS_SUP                                             syscall.Errno = 8390
+	ERROR_DS_SUB_CLS_TEST_FAIL                                                syscall.Errno = 8391
+	ERROR_DS_BAD_RDN_ATT_ID_SYNTAX                                            syscall.Errno = 8392
+	ERROR_DS_EXISTS_IN_AUX_CLS                                                syscall.Errno = 8393
+	ERROR_DS_EXISTS_IN_SUB_CLS                                                syscall.Errno = 8394
+	ERROR_DS_EXISTS_IN_POSS_SUP                                               syscall.Errno = 8395
+	ERROR_DS_RECALCSCHEMA_FAILED                                              syscall.Errno = 8396
+	ERROR_DS_TREE_DELETE_NOT_FINISHED                                         syscall.Errno = 8397
+	ERROR_DS_CANT_DELETE                                                      syscall.Errno = 8398
+	ERROR_DS_ATT_SCHEMA_REQ_ID                                                syscall.Errno = 8399
+	ERROR_DS_BAD_ATT_SCHEMA_SYNTAX                                            syscall.Errno = 8400
+	ERROR_DS_CANT_CACHE_ATT                                                   syscall.Errno = 8401
+	ERROR_DS_CANT_CACHE_CLASS                                                 syscall.Errno = 8402
+	ERROR_DS_CANT_REMOVE_ATT_CACHE                                            syscall.Errno = 8403
+	ERROR_DS_CANT_REMOVE_CLASS_CACHE                                          syscall.Errno = 8404
+	ERROR_DS_CANT_RETRIEVE_DN                                                 syscall.Errno = 8405
+	ERROR_DS_MISSING_SUPREF                                                   syscall.Errno = 8406
+	ERROR_DS_CANT_RETRIEVE_INSTANCE                                           syscall.Errno = 8407
+	ERROR_DS_CODE_INCONSISTENCY                                               syscall.Errno = 8408
+	ERROR_DS_DATABASE_ERROR                                                   syscall.Errno = 8409
+	ERROR_DS_GOVERNSID_MISSING                                                syscall.Errno = 8410
+	ERROR_DS_MISSING_EXPECTED_ATT                                             syscall.Errno = 8411
+	ERROR_DS_NCNAME_MISSING_CR_REF                                            syscall.Errno = 8412
+	ERROR_DS_SECURITY_CHECKING_ERROR                                          syscall.Errno = 8413
+	ERROR_DS_SCHEMA_NOT_LOADED                                                syscall.Errno = 8414
+	ERROR_DS_SCHEMA_ALLOC_FAILED                                              syscall.Errno = 8415
+	ERROR_DS_ATT_SCHEMA_REQ_SYNTAX                                            syscall.Errno = 8416
+	ERROR_DS_GCVERIFY_ERROR                                                   syscall.Errno = 8417
+	ERROR_DS_DRA_SCHEMA_MISMATCH                                              syscall.Errno = 8418
+	ERROR_DS_CANT_FIND_DSA_OBJ                                                syscall.Errno = 8419
+	ERROR_DS_CANT_FIND_EXPECTED_NC                                            syscall.Errno = 8420
+	ERROR_DS_CANT_FIND_NC_IN_CACHE                                            syscall.Errno = 8421
+	ERROR_DS_CANT_RETRIEVE_CHILD                                              syscall.Errno = 8422
+	ERROR_DS_SECURITY_ILLEGAL_MODIFY                                          syscall.Errno = 8423
+	ERROR_DS_CANT_REPLACE_HIDDEN_REC                                          syscall.Errno = 8424
+	ERROR_DS_BAD_HIERARCHY_FILE                                               syscall.Errno = 8425
+	ERROR_DS_BUILD_HIERARCHY_TABLE_FAILED                                     syscall.Errno = 8426
+	ERROR_DS_CONFIG_PARAM_MISSING                                             syscall.Errno = 8427
+	ERROR_DS_COUNTING_AB_INDICES_FAILED                                       syscall.Errno = 8428
+	ERROR_DS_HIERARCHY_TABLE_MALLOC_FAILED                                    syscall.Errno = 8429
+	ERROR_DS_INTERNAL_FAILURE                                                 syscall.Errno = 8430
+	ERROR_DS_UNKNOWN_ERROR                                                    syscall.Errno = 8431
+	ERROR_DS_ROOT_REQUIRES_CLASS_TOP                                          syscall.Errno = 8432
+	ERROR_DS_REFUSING_FSMO_ROLES                                              syscall.Errno = 8433
+	ERROR_DS_MISSING_FSMO_SETTINGS                                            syscall.Errno = 8434
+	ERROR_DS_UNABLE_TO_SURRENDER_ROLES                                        syscall.Errno = 8435
+	ERROR_DS_DRA_GENERIC                                                      syscall.Errno = 8436
+	ERROR_DS_DRA_INVALID_PARAMETER                                            syscall.Errno = 8437
+	ERROR_DS_DRA_BUSY                                                         syscall.Errno = 8438
+	ERROR_DS_DRA_BAD_DN                                                       syscall.Errno = 8439
+	ERROR_DS_DRA_BAD_NC                                                       syscall.Errno = 8440
+	ERROR_DS_DRA_DN_EXISTS                                                    syscall.Errno = 8441
+	ERROR_DS_DRA_INTERNAL_ERROR                                               syscall.Errno = 8442
+	ERROR_DS_DRA_INCONSISTENT_DIT                                             syscall.Errno = 8443
+	ERROR_DS_DRA_CONNECTION_FAILED                                            syscall.Errno = 8444
+	ERROR_DS_DRA_BAD_INSTANCE_TYPE                                            syscall.Errno = 8445
+	ERROR_DS_DRA_OUT_OF_MEM                                                   syscall.Errno = 8446
+	ERROR_DS_DRA_MAIL_PROBLEM                                                 syscall.Errno = 8447
+	ERROR_DS_DRA_REF_ALREADY_EXISTS                                           syscall.Errno = 8448
+	ERROR_DS_DRA_REF_NOT_FOUND                                                syscall.Errno = 8449
+	ERROR_DS_DRA_OBJ_IS_REP_SOURCE                                            syscall.Errno = 8450
+	ERROR_DS_DRA_DB_ERROR                                                     syscall.Errno = 8451
+	ERROR_DS_DRA_NO_REPLICA                                                   syscall.Errno = 8452
+	ERROR_DS_DRA_ACCESS_DENIED                                                syscall.Errno = 8453
+	ERROR_DS_DRA_NOT_SUPPORTED                                                syscall.Errno = 8454
+	ERROR_DS_DRA_RPC_CANCELLED                                                syscall.Errno = 8455
+	ERROR_DS_DRA_SOURCE_DISABLED                                              syscall.Errno = 8456
+	ERROR_DS_DRA_SINK_DISABLED                                                syscall.Errno = 8457
+	ERROR_DS_DRA_NAME_COLLISION                                               syscall.Errno = 8458
+	ERROR_DS_DRA_SOURCE_REINSTALLED                                           syscall.Errno = 8459
+	ERROR_DS_DRA_MISSING_PARENT                                               syscall.Errno = 8460
+	ERROR_DS_DRA_PREEMPTED                                                    syscall.Errno = 8461
+	ERROR_DS_DRA_ABANDON_SYNC                                                 syscall.Errno = 8462
+	ERROR_DS_DRA_SHUTDOWN                                                     syscall.Errno = 8463
+	ERROR_DS_DRA_INCOMPATIBLE_PARTIAL_SET                                     syscall.Errno = 8464
+	ERROR_DS_DRA_SOURCE_IS_PARTIAL_REPLICA                                    syscall.Errno = 8465
+	ERROR_DS_DRA_EXTN_CONNECTION_FAILED                                       syscall.Errno = 8466
+	ERROR_DS_INSTALL_SCHEMA_MISMATCH                                          syscall.Errno = 8467
+	ERROR_DS_DUP_LINK_ID                                                      syscall.Errno = 8468
+	ERROR_DS_NAME_ERROR_RESOLVING                                             syscall.Errno = 8469
+	ERROR_DS_NAME_ERROR_NOT_FOUND                                             syscall.Errno = 8470
+	ERROR_DS_NAME_ERROR_NOT_UNIQUE                                            syscall.Errno = 8471
+	ERROR_DS_NAME_ERROR_NO_MAPPING                                            syscall.Errno = 8472
+	ERROR_DS_NAME_ERROR_DOMAIN_ONLY                                           syscall.Errno = 8473
+	ERROR_DS_NAME_ERROR_NO_SYNTACTICAL_MAPPING                                syscall.Errno = 8474
+	ERROR_DS_CONSTRUCTED_ATT_MOD                                              syscall.Errno = 8475
+	ERROR_DS_WRONG_OM_OBJ_CLASS                                               syscall.Errno = 8476
+	ERROR_DS_DRA_REPL_PENDING                                                 syscall.Errno = 8477
+	ERROR_DS_DS_REQUIRED                                                      syscall.Errno = 8478
+	ERROR_DS_INVALID_LDAP_DISPLAY_NAME                                        syscall.Errno = 8479
+	ERROR_DS_NON_BASE_SEARCH                                                  syscall.Errno = 8480
+	ERROR_DS_CANT_RETRIEVE_ATTS                                               syscall.Errno = 8481
+	ERROR_DS_BACKLINK_WITHOUT_LINK                                            syscall.Errno = 8482
+	ERROR_DS_EPOCH_MISMATCH                                                   syscall.Errno = 8483
+	ERROR_DS_SRC_NAME_MISMATCH                                                syscall.Errno = 8484
+	ERROR_DS_SRC_AND_DST_NC_IDENTICAL                                         syscall.Errno = 8485
+	ERROR_DS_DST_NC_MISMATCH                                                  syscall.Errno = 8486
+	ERROR_DS_NOT_AUTHORITIVE_FOR_DST_NC                                       syscall.Errno = 8487
+	ERROR_DS_SRC_GUID_MISMATCH                                                syscall.Errno = 8488
+	ERROR_DS_CANT_MOVE_DELETED_OBJECT                                         syscall.Errno = 8489
+	ERROR_DS_PDC_OPERATION_IN_PROGRESS                                        syscall.Errno = 8490
+	ERROR_DS_CROSS_DOMAIN_CLEANUP_REQD                                        syscall.Errno = 8491
+	ERROR_DS_ILLEGAL_XDOM_MOVE_OPERATION                                      syscall.Errno = 8492
+	ERROR_DS_CANT_WITH_ACCT_GROUP_MEMBERSHPS                                  syscall.Errno = 8493
+	ERROR_DS_NC_MUST_HAVE_NC_PARENT                                           syscall.Errno = 8494
+	ERROR_DS_CR_IMPOSSIBLE_TO_VALIDATE                                        syscall.Errno = 8495
+	ERROR_DS_DST_DOMAIN_NOT_NATIVE                                            syscall.Errno = 8496
+	ERROR_DS_MISSING_INFRASTRUCTURE_CONTAINER                                 syscall.Errno = 8497
+	ERROR_DS_CANT_MOVE_ACCOUNT_GROUP                                          syscall.Errno = 8498
+	ERROR_DS_CANT_MOVE_RESOURCE_GROUP                                         syscall.Errno = 8499
+	ERROR_DS_INVALID_SEARCH_FLAG                                              syscall.Errno = 8500
+	ERROR_DS_NO_TREE_DELETE_ABOVE_NC                                          syscall.Errno = 8501
+	ERROR_DS_COULDNT_LOCK_TREE_FOR_DELETE                                     syscall.Errno = 8502
+	ERROR_DS_COULDNT_IDENTIFY_OBJECTS_FOR_TREE_DELETE                         syscall.Errno = 8503
+	ERROR_DS_SAM_INIT_FAILURE                                                 syscall.Errno = 8504
+	ERROR_DS_SENSITIVE_GROUP_VIOLATION                                        syscall.Errno = 8505
+	ERROR_DS_CANT_MOD_PRIMARYGROUPID                                          syscall.Errno = 8506
+	ERROR_DS_ILLEGAL_BASE_SCHEMA_MOD                                          syscall.Errno = 8507
+	ERROR_DS_NONSAFE_SCHEMA_CHANGE                                            syscall.Errno = 8508
+	ERROR_DS_SCHEMA_UPDATE_DISALLOWED                                         syscall.Errno = 8509
+	ERROR_DS_CANT_CREATE_UNDER_SCHEMA                                         syscall.Errno = 8510
+	ERROR_DS_INSTALL_NO_SRC_SCH_VERSION                                       syscall.Errno = 8511
+	ERROR_DS_INSTALL_NO_SCH_VERSION_IN_INIFILE                                syscall.Errno = 8512
+	ERROR_DS_INVALID_GROUP_TYPE                                               syscall.Errno = 8513
+	ERROR_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN                               syscall.Errno = 8514
+	ERROR_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN                                syscall.Errno = 8515
+	ERROR_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER                                    syscall.Errno = 8516
+	ERROR_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER                                syscall.Errno = 8517
+	ERROR_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER                                 syscall.Errno = 8518
+	ERROR_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER                              syscall.Errno = 8519
+	ERROR_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER                         syscall.Errno = 8520
+	ERROR_DS_HAVE_PRIMARY_MEMBERS                                             syscall.Errno = 8521
+	ERROR_DS_STRING_SD_CONVERSION_FAILED                                      syscall.Errno = 8522
+	ERROR_DS_NAMING_MASTER_GC                                                 syscall.Errno = 8523
+	ERROR_DS_DNS_LOOKUP_FAILURE                                               syscall.Errno = 8524
+	ERROR_DS_COULDNT_UPDATE_SPNS                                              syscall.Errno = 8525
+	ERROR_DS_CANT_RETRIEVE_SD                                                 syscall.Errno = 8526
+	ERROR_DS_KEY_NOT_UNIQUE                                                   syscall.Errno = 8527
+	ERROR_DS_WRONG_LINKED_ATT_SYNTAX                                          syscall.Errno = 8528
+	ERROR_DS_SAM_NEED_BOOTKEY_PASSWORD                                        syscall.Errno = 8529
+	ERROR_DS_SAM_NEED_BOOTKEY_FLOPPY                                          syscall.Errno = 8530
+	ERROR_DS_CANT_START                                                       syscall.Errno = 8531
+	ERROR_DS_INIT_FAILURE                                                     syscall.Errno = 8532
+	ERROR_DS_NO_PKT_PRIVACY_ON_CONNECTION                                     syscall.Errno = 8533
+	ERROR_DS_SOURCE_DOMAIN_IN_FOREST                                          syscall.Errno = 8534
+	ERROR_DS_DESTINATION_DOMAIN_NOT_IN_FOREST                                 syscall.Errno = 8535
+	ERROR_DS_DESTINATION_AUDITING_NOT_ENABLED                                 syscall.Errno = 8536
+	ERROR_DS_CANT_FIND_DC_FOR_SRC_DOMAIN                                      syscall.Errno = 8537
+	ERROR_DS_SRC_OBJ_NOT_GROUP_OR_USER                                        syscall.Errno = 8538
+	ERROR_DS_SRC_SID_EXISTS_IN_FOREST                                         syscall.Errno = 8539
+	ERROR_DS_SRC_AND_DST_OBJECT_CLASS_MISMATCH                                syscall.Errno = 8540
+	ERROR_SAM_INIT_FAILURE                                                    syscall.Errno = 8541
+	ERROR_DS_DRA_SCHEMA_INFO_SHIP                                             syscall.Errno = 8542
+	ERROR_DS_DRA_SCHEMA_CONFLICT                                              syscall.Errno = 8543
+	ERROR_DS_DRA_EARLIER_SCHEMA_CONFLICT                                      syscall.Errno = 8544
+	ERROR_DS_DRA_OBJ_NC_MISMATCH                                              syscall.Errno = 8545
+	ERROR_DS_NC_STILL_HAS_DSAS                                                syscall.Errno = 8546
+	ERROR_DS_GC_REQUIRED                                                      syscall.Errno = 8547
+	ERROR_DS_LOCAL_MEMBER_OF_LOCAL_ONLY                                       syscall.Errno = 8548
+	ERROR_DS_NO_FPO_IN_UNIVERSAL_GROUPS                                       syscall.Errno = 8549
+	ERROR_DS_CANT_ADD_TO_GC                                                   syscall.Errno = 8550
+	ERROR_DS_NO_CHECKPOINT_WITH_PDC                                           syscall.Errno = 8551
+	ERROR_DS_SOURCE_AUDITING_NOT_ENABLED                                      syscall.Errno = 8552
+	ERROR_DS_CANT_CREATE_IN_NONDOMAIN_NC                                      syscall.Errno = 8553
+	ERROR_DS_INVALID_NAME_FOR_SPN                                             syscall.Errno = 8554
+	ERROR_DS_FILTER_USES_CONTRUCTED_ATTRS                                     syscall.Errno = 8555
+	ERROR_DS_UNICODEPWD_NOT_IN_QUOTES                                         syscall.Errno = 8556
+	ERROR_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED                                   syscall.Errno = 8557
+	ERROR_DS_MUST_BE_RUN_ON_DST_DC                                            syscall.Errno = 8558
+	ERROR_DS_SRC_DC_MUST_BE_SP4_OR_GREATER                                    syscall.Errno = 8559
+	ERROR_DS_CANT_TREE_DELETE_CRITICAL_OBJ                                    syscall.Errno = 8560
+	ERROR_DS_INIT_FAILURE_CONSOLE                                             syscall.Errno = 8561
+	ERROR_DS_SAM_INIT_FAILURE_CONSOLE                                         syscall.Errno = 8562
+	ERROR_DS_FOREST_VERSION_TOO_HIGH                                          syscall.Errno = 8563
+	ERROR_DS_DOMAIN_VERSION_TOO_HIGH                                          syscall.Errno = 8564
+	ERROR_DS_FOREST_VERSION_TOO_LOW                                           syscall.Errno = 8565
+	ERROR_DS_DOMAIN_VERSION_TOO_LOW                                           syscall.Errno = 8566
+	ERROR_DS_INCOMPATIBLE_VERSION                                             syscall.Errno = 8567
+	ERROR_DS_LOW_DSA_VERSION                                                  syscall.Errno = 8568
+	ERROR_DS_NO_BEHAVIOR_VERSION_IN_MIXEDDOMAIN                               syscall.Errno = 8569
+	ERROR_DS_NOT_SUPPORTED_SORT_ORDER                                         syscall.Errno = 8570
+	ERROR_DS_NAME_NOT_UNIQUE                                                  syscall.Errno = 8571
+	ERROR_DS_MACHINE_ACCOUNT_CREATED_PRENT4                                   syscall.Errno = 8572
+	ERROR_DS_OUT_OF_VERSION_STORE                                             syscall.Errno = 8573
+	ERROR_DS_INCOMPATIBLE_CONTROLS_USED                                       syscall.Errno = 8574
+	ERROR_DS_NO_REF_DOMAIN                                                    syscall.Errno = 8575
+	ERROR_DS_RESERVED_LINK_ID                                                 syscall.Errno = 8576
+	ERROR_DS_LINK_ID_NOT_AVAILABLE                                            syscall.Errno = 8577
+	ERROR_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER                                    syscall.Errno = 8578
+	ERROR_DS_MODIFYDN_DISALLOWED_BY_INSTANCE_TYPE                             syscall.Errno = 8579
+	ERROR_DS_NO_OBJECT_MOVE_IN_SCHEMA_NC                                      syscall.Errno = 8580
+	ERROR_DS_MODIFYDN_DISALLOWED_BY_FLAG                                      syscall.Errno = 8581
+	ERROR_DS_MODIFYDN_WRONG_GRANDPARENT                                       syscall.Errno = 8582
+	ERROR_DS_NAME_ERROR_TRUST_REFERRAL                                        syscall.Errno = 8583
+	ERROR_NOT_SUPPORTED_ON_STANDARD_SERVER                                    syscall.Errno = 8584
+	ERROR_DS_CANT_ACCESS_REMOTE_PART_OF_AD                                    syscall.Errno = 8585
+	ERROR_DS_CR_IMPOSSIBLE_TO_VALIDATE_V2                                     syscall.Errno = 8586
+	ERROR_DS_THREAD_LIMIT_EXCEEDED                                            syscall.Errno = 8587
+	ERROR_DS_NOT_CLOSEST                                                      syscall.Errno = 8588
+	ERROR_DS_CANT_DERIVE_SPN_WITHOUT_SERVER_REF                               syscall.Errno = 8589
+	ERROR_DS_SINGLE_USER_MODE_FAILED                                          syscall.Errno = 8590
+	ERROR_DS_NTDSCRIPT_SYNTAX_ERROR                                           syscall.Errno = 8591
+	ERROR_DS_NTDSCRIPT_PROCESS_ERROR                                          syscall.Errno = 8592
+	ERROR_DS_DIFFERENT_REPL_EPOCHS                                            syscall.Errno = 8593
+	ERROR_DS_DRS_EXTENSIONS_CHANGED                                           syscall.Errno = 8594
+	ERROR_DS_REPLICA_SET_CHANGE_NOT_ALLOWED_ON_DISABLED_CR                    syscall.Errno = 8595
+	ERROR_DS_NO_MSDS_INTID                                                    syscall.Errno = 8596
+	ERROR_DS_DUP_MSDS_INTID                                                   syscall.Errno = 8597
+	ERROR_DS_EXISTS_IN_RDNATTID                                               syscall.Errno = 8598
+	ERROR_DS_AUTHORIZATION_FAILED                                             syscall.Errno = 8599
+	ERROR_DS_INVALID_SCRIPT                                                   syscall.Errno = 8600
+	ERROR_DS_REMOTE_CROSSREF_OP_FAILED                                        syscall.Errno = 8601
+	ERROR_DS_CROSS_REF_BUSY                                                   syscall.Errno = 8602
+	ERROR_DS_CANT_DERIVE_SPN_FOR_DELETED_DOMAIN                               syscall.Errno = 8603
+	ERROR_DS_CANT_DEMOTE_WITH_WRITEABLE_NC                                    syscall.Errno = 8604
+	ERROR_DS_DUPLICATE_ID_FOUND                                               syscall.Errno = 8605
+	ERROR_DS_INSUFFICIENT_ATTR_TO_CREATE_OBJECT                               syscall.Errno = 8606
+	ERROR_DS_GROUP_CONVERSION_ERROR                                           syscall.Errno = 8607
+	ERROR_DS_CANT_MOVE_APP_BASIC_GROUP                                        syscall.Errno = 8608
+	ERROR_DS_CANT_MOVE_APP_QUERY_GROUP                                        syscall.Errno = 8609
+	ERROR_DS_ROLE_NOT_VERIFIED                                                syscall.Errno = 8610
+	ERROR_DS_WKO_CONTAINER_CANNOT_BE_SPECIAL                                  syscall.Errno = 8611
+	ERROR_DS_DOMAIN_RENAME_IN_PROGRESS                                        syscall.Errno = 8612
+	ERROR_DS_EXISTING_AD_CHILD_NC                                             syscall.Errno = 8613
+	ERROR_DS_REPL_LIFETIME_EXCEEDED                                           syscall.Errno = 8614
+	ERROR_DS_DISALLOWED_IN_SYSTEM_CONTAINER                                   syscall.Errno = 8615
+	ERROR_DS_LDAP_SEND_QUEUE_FULL                                             syscall.Errno = 8616
+	ERROR_DS_DRA_OUT_SCHEDULE_WINDOW                                          syscall.Errno = 8617
+	ERROR_DS_POLICY_NOT_KNOWN                                                 syscall.Errno = 8618
+	ERROR_NO_SITE_SETTINGS_OBJECT                                             syscall.Errno = 8619
+	ERROR_NO_SECRETS                                                          syscall.Errno = 8620
+	ERROR_NO_WRITABLE_DC_FOUND                                                syscall.Errno = 8621
+	ERROR_DS_NO_SERVER_OBJECT                                                 syscall.Errno = 8622
+	ERROR_DS_NO_NTDSA_OBJECT                                                  syscall.Errno = 8623
+	ERROR_DS_NON_ASQ_SEARCH                                                   syscall.Errno = 8624
+	ERROR_DS_AUDIT_FAILURE                                                    syscall.Errno = 8625
+	ERROR_DS_INVALID_SEARCH_FLAG_SUBTREE                                      syscall.Errno = 8626
+	ERROR_DS_INVALID_SEARCH_FLAG_TUPLE                                        syscall.Errno = 8627
+	ERROR_DS_HIERARCHY_TABLE_TOO_DEEP                                         syscall.Errno = 8628
+	ERROR_DS_DRA_CORRUPT_UTD_VECTOR                                           syscall.Errno = 8629
+	ERROR_DS_DRA_SECRETS_DENIED                                               syscall.Errno = 8630
+	ERROR_DS_RESERVED_MAPI_ID                                                 syscall.Errno = 8631
+	ERROR_DS_MAPI_ID_NOT_AVAILABLE                                            syscall.Errno = 8632
+	ERROR_DS_DRA_MISSING_KRBTGT_SECRET                                        syscall.Errno = 8633
+	ERROR_DS_DOMAIN_NAME_EXISTS_IN_FOREST                                     syscall.Errno = 8634
+	ERROR_DS_FLAT_NAME_EXISTS_IN_FOREST                                       syscall.Errno = 8635
+	ERROR_INVALID_USER_PRINCIPAL_NAME                                         syscall.Errno = 8636
+	ERROR_DS_OID_MAPPED_GROUP_CANT_HAVE_MEMBERS                               syscall.Errno = 8637
+	ERROR_DS_OID_NOT_FOUND                                                    syscall.Errno = 8638
+	ERROR_DS_DRA_RECYCLED_TARGET                                              syscall.Errno = 8639
+	ERROR_DS_DISALLOWED_NC_REDIRECT                                           syscall.Errno = 8640
+	ERROR_DS_HIGH_ADLDS_FFL                                                   syscall.Errno = 8641
+	ERROR_DS_HIGH_DSA_VERSION                                                 syscall.Errno = 8642
+	ERROR_DS_LOW_ADLDS_FFL                                                    syscall.Errno = 8643
+	ERROR_DOMAIN_SID_SAME_AS_LOCAL_WORKSTATION                                syscall.Errno = 8644
+	ERROR_DS_UNDELETE_SAM_VALIDATION_FAILED                                   syscall.Errno = 8645
+	ERROR_INCORRECT_ACCOUNT_TYPE                                              syscall.Errno = 8646
+	ERROR_DS_SPN_VALUE_NOT_UNIQUE_IN_FOREST                                   syscall.Errno = 8647
+	ERROR_DS_UPN_VALUE_NOT_UNIQUE_IN_FOREST                                   syscall.Errno = 8648
+	ERROR_DS_MISSING_FOREST_TRUST                                             syscall.Errno = 8649
+	ERROR_DS_VALUE_KEY_NOT_UNIQUE                                             syscall.Errno = 8650
+	DNS_ERROR_RESPONSE_CODES_BASE                                             syscall.Errno = 9000
+	DNS_ERROR_RCODE_NO_ERROR                                                                = ERROR_SUCCESS
+	DNS_ERROR_MASK                                                            syscall.Errno = 0x00002328
+	DNS_ERROR_RCODE_FORMAT_ERROR                                              syscall.Errno = 9001
+	DNS_ERROR_RCODE_SERVER_FAILURE                                            syscall.Errno = 9002
+	DNS_ERROR_RCODE_NAME_ERROR                                                syscall.Errno = 9003
+	DNS_ERROR_RCODE_NOT_IMPLEMENTED                                           syscall.Errno = 9004
+	DNS_ERROR_RCODE_REFUSED                                                   syscall.Errno = 9005
+	DNS_ERROR_RCODE_YXDOMAIN                                                  syscall.Errno = 9006
+	DNS_ERROR_RCODE_YXRRSET                                                   syscall.Errno = 9007
+	DNS_ERROR_RCODE_NXRRSET                                                   syscall.Errno = 9008
+	DNS_ERROR_RCODE_NOTAUTH                                                   syscall.Errno = 9009
+	DNS_ERROR_RCODE_NOTZONE                                                   syscall.Errno = 9010
+	DNS_ERROR_RCODE_BADSIG                                                    syscall.Errno = 9016
+	DNS_ERROR_RCODE_BADKEY                                                    syscall.Errno = 9017
+	DNS_ERROR_RCODE_BADTIME                                                   syscall.Errno = 9018
+	DNS_ERROR_RCODE_LAST                                                                    = DNS_ERROR_RCODE_BADTIME
+	DNS_ERROR_DNSSEC_BASE                                                     syscall.Errno = 9100
+	DNS_ERROR_KEYMASTER_REQUIRED                                              syscall.Errno = 9101
+	DNS_ERROR_NOT_ALLOWED_ON_SIGNED_ZONE                                      syscall.Errno = 9102
+	DNS_ERROR_NSEC3_INCOMPATIBLE_WITH_RSA_SHA1                                syscall.Errno = 9103
+	DNS_ERROR_NOT_ENOUGH_SIGNING_KEY_DESCRIPTORS                              syscall.Errno = 9104
+	DNS_ERROR_UNSUPPORTED_ALGORITHM                                           syscall.Errno = 9105
+	DNS_ERROR_INVALID_KEY_SIZE                                                syscall.Errno = 9106
+	DNS_ERROR_SIGNING_KEY_NOT_ACCESSIBLE                                      syscall.Errno = 9107
+	DNS_ERROR_KSP_DOES_NOT_SUPPORT_PROTECTION                                 syscall.Errno = 9108
+	DNS_ERROR_UNEXPECTED_DATA_PROTECTION_ERROR                                syscall.Errno = 9109
+	DNS_ERROR_UNEXPECTED_CNG_ERROR                                            syscall.Errno = 9110
+	DNS_ERROR_UNKNOWN_SIGNING_PARAMETER_VERSION                               syscall.Errno = 9111
+	DNS_ERROR_KSP_NOT_ACCESSIBLE                                              syscall.Errno = 9112
+	DNS_ERROR_TOO_MANY_SKDS                                                   syscall.Errno = 9113
+	DNS_ERROR_INVALID_ROLLOVER_PERIOD                                         syscall.Errno = 9114
+	DNS_ERROR_INVALID_INITIAL_ROLLOVER_OFFSET                                 syscall.Errno = 9115
+	DNS_ERROR_ROLLOVER_IN_PROGRESS                                            syscall.Errno = 9116
+	DNS_ERROR_STANDBY_KEY_NOT_PRESENT                                         syscall.Errno = 9117
+	DNS_ERROR_NOT_ALLOWED_ON_ZSK                                              syscall.Errno = 9118
+	DNS_ERROR_NOT_ALLOWED_ON_ACTIVE_SKD                                       syscall.Errno = 9119
+	DNS_ERROR_ROLLOVER_ALREADY_QUEUED                                         syscall.Errno = 9120
+	DNS_ERROR_NOT_ALLOWED_ON_UNSIGNED_ZONE                                    syscall.Errno = 9121
+	DNS_ERROR_BAD_KEYMASTER                                                   syscall.Errno = 9122
+	DNS_ERROR_INVALID_SIGNATURE_VALIDITY_PERIOD                               syscall.Errno = 9123
+	DNS_ERROR_INVALID_NSEC3_ITERATION_COUNT                                   syscall.Errno = 9124
+	DNS_ERROR_DNSSEC_IS_DISABLED                                              syscall.Errno = 9125
+	DNS_ERROR_INVALID_XML                                                     syscall.Errno = 9126
+	DNS_ERROR_NO_VALID_TRUST_ANCHORS                                          syscall.Errno = 9127
+	DNS_ERROR_ROLLOVER_NOT_POKEABLE                                           syscall.Errno = 9128
+	DNS_ERROR_NSEC3_NAME_COLLISION                                            syscall.Errno = 9129
+	DNS_ERROR_NSEC_INCOMPATIBLE_WITH_NSEC3_RSA_SHA1                           syscall.Errno = 9130
+	DNS_ERROR_PACKET_FMT_BASE                                                 syscall.Errno = 9500
+	DNS_INFO_NO_RECORDS                                                       syscall.Errno = 9501
+	DNS_ERROR_BAD_PACKET                                                      syscall.Errno = 9502
+	DNS_ERROR_NO_PACKET                                                       syscall.Errno = 9503
+	DNS_ERROR_RCODE                                                           syscall.Errno = 9504
+	DNS_ERROR_UNSECURE_PACKET                                                 syscall.Errno = 9505
+	DNS_STATUS_PACKET_UNSECURE                                                              = DNS_ERROR_UNSECURE_PACKET
+	DNS_REQUEST_PENDING                                                       syscall.Errno = 9506
+	DNS_ERROR_NO_MEMORY                                                                     = ERROR_OUTOFMEMORY
+	DNS_ERROR_INVALID_NAME                                                                  = ERROR_INVALID_NAME
+	DNS_ERROR_INVALID_DATA                                                                  = ERROR_INVALID_DATA
+	DNS_ERROR_GENERAL_API_BASE                                                syscall.Errno = 9550
+	DNS_ERROR_INVALID_TYPE                                                    syscall.Errno = 9551
+	DNS_ERROR_INVALID_IP_ADDRESS                                              syscall.Errno = 9552
+	DNS_ERROR_INVALID_PROPERTY                                                syscall.Errno = 9553
+	DNS_ERROR_TRY_AGAIN_LATER                                                 syscall.Errno = 9554
+	DNS_ERROR_NOT_UNIQUE                                                      syscall.Errno = 9555
+	DNS_ERROR_NON_RFC_NAME                                                    syscall.Errno = 9556
+	DNS_STATUS_FQDN                                                           syscall.Errno = 9557
+	DNS_STATUS_DOTTED_NAME                                                    syscall.Errno = 9558
+	DNS_STATUS_SINGLE_PART_NAME                                               syscall.Errno = 9559
+	DNS_ERROR_INVALID_NAME_CHAR                                               syscall.Errno = 9560
+	DNS_ERROR_NUMERIC_NAME                                                    syscall.Errno = 9561
+	DNS_ERROR_NOT_ALLOWED_ON_ROOT_SERVER                                      syscall.Errno = 9562
+	DNS_ERROR_NOT_ALLOWED_UNDER_DELEGATION                                    syscall.Errno = 9563
+	DNS_ERROR_CANNOT_FIND_ROOT_HINTS                                          syscall.Errno = 9564
+	DNS_ERROR_INCONSISTENT_ROOT_HINTS                                         syscall.Errno = 9565
+	DNS_ERROR_DWORD_VALUE_TOO_SMALL                                           syscall.Errno = 9566
+	DNS_ERROR_DWORD_VALUE_TOO_LARGE                                           syscall.Errno = 9567
+	DNS_ERROR_BACKGROUND_LOADING                                              syscall.Errno = 9568
+	DNS_ERROR_NOT_ALLOWED_ON_RODC                                             syscall.Errno = 9569
+	DNS_ERROR_NOT_ALLOWED_UNDER_DNAME                                         syscall.Errno = 9570
+	DNS_ERROR_DELEGATION_REQUIRED                                             syscall.Errno = 9571
+	DNS_ERROR_INVALID_POLICY_TABLE                                            syscall.Errno = 9572
+	DNS_ERROR_ADDRESS_REQUIRED                                                syscall.Errno = 9573
+	DNS_ERROR_ZONE_BASE                                                       syscall.Errno = 9600
+	DNS_ERROR_ZONE_DOES_NOT_EXIST                                             syscall.Errno = 9601
+	DNS_ERROR_NO_ZONE_INFO                                                    syscall.Errno = 9602
+	DNS_ERROR_INVALID_ZONE_OPERATION                                          syscall.Errno = 9603
+	DNS_ERROR_ZONE_CONFIGURATION_ERROR                                        syscall.Errno = 9604
+	DNS_ERROR_ZONE_HAS_NO_SOA_RECORD                                          syscall.Errno = 9605
+	DNS_ERROR_ZONE_HAS_NO_NS_RECORDS                                          syscall.Errno = 9606
+	DNS_ERROR_ZONE_LOCKED                                                     syscall.Errno = 9607
+	DNS_ERROR_ZONE_CREATION_FAILED                                            syscall.Errno = 9608
+	DNS_ERROR_ZONE_ALREADY_EXISTS                                             syscall.Errno = 9609
+	DNS_ERROR_AUTOZONE_ALREADY_EXISTS                                         syscall.Errno = 9610
+	DNS_ERROR_INVALID_ZONE_TYPE                                               syscall.Errno = 9611
+	DNS_ERROR_SECONDARY_REQUIRES_MASTER_IP                                    syscall.Errno = 9612
+	DNS_ERROR_ZONE_NOT_SECONDARY                                              syscall.Errno = 9613
+	DNS_ERROR_NEED_SECONDARY_ADDRESSES                                        syscall.Errno = 9614
+	DNS_ERROR_WINS_INIT_FAILED                                                syscall.Errno = 9615
+	DNS_ERROR_NEED_WINS_SERVERS                                               syscall.Errno = 9616
+	DNS_ERROR_NBSTAT_INIT_FAILED                                              syscall.Errno = 9617
+	DNS_ERROR_SOA_DELETE_INVALID                                              syscall.Errno = 9618
+	DNS_ERROR_FORWARDER_ALREADY_EXISTS                                        syscall.Errno = 9619
+	DNS_ERROR_ZONE_REQUIRES_MASTER_IP                                         syscall.Errno = 9620
+	DNS_ERROR_ZONE_IS_SHUTDOWN                                                syscall.Errno = 9621
+	DNS_ERROR_ZONE_LOCKED_FOR_SIGNING                                         syscall.Errno = 9622
+	DNS_ERROR_DATAFILE_BASE                                                   syscall.Errno = 9650
+	DNS_ERROR_PRIMARY_REQUIRES_DATAFILE                                       syscall.Errno = 9651
+	DNS_ERROR_INVALID_DATAFILE_NAME                                           syscall.Errno = 9652
+	DNS_ERROR_DATAFILE_OPEN_FAILURE                                           syscall.Errno = 9653
+	DNS_ERROR_FILE_WRITEBACK_FAILED                                           syscall.Errno = 9654
+	DNS_ERROR_DATAFILE_PARSING                                                syscall.Errno = 9655
+	DNS_ERROR_DATABASE_BASE                                                   syscall.Errno = 9700
+	DNS_ERROR_RECORD_DOES_NOT_EXIST                                           syscall.Errno = 9701
+	DNS_ERROR_RECORD_FORMAT                                                   syscall.Errno = 9702
+	DNS_ERROR_NODE_CREATION_FAILED                                            syscall.Errno = 9703
+	DNS_ERROR_UNKNOWN_RECORD_TYPE                                             syscall.Errno = 9704
+	DNS_ERROR_RECORD_TIMED_OUT                                                syscall.Errno = 9705
+	DNS_ERROR_NAME_NOT_IN_ZONE                                                syscall.Errno = 9706
+	DNS_ERROR_CNAME_LOOP                                                      syscall.Errno = 9707
+	DNS_ERROR_NODE_IS_CNAME                                                   syscall.Errno = 9708
+	DNS_ERROR_CNAME_COLLISION                                                 syscall.Errno = 9709
+	DNS_ERROR_RECORD_ONLY_AT_ZONE_ROOT                                        syscall.Errno = 9710
+	DNS_ERROR_RECORD_ALREADY_EXISTS                                           syscall.Errno = 9711
+	DNS_ERROR_SECONDARY_DATA                                                  syscall.Errno = 9712
+	DNS_ERROR_NO_CREATE_CACHE_DATA                                            syscall.Errno = 9713
+	DNS_ERROR_NAME_DOES_NOT_EXIST                                             syscall.Errno = 9714
+	DNS_WARNING_PTR_CREATE_FAILED                                             syscall.Errno = 9715
+	DNS_WARNING_DOMAIN_UNDELETED                                              syscall.Errno = 9716
+	DNS_ERROR_DS_UNAVAILABLE                                                  syscall.Errno = 9717
+	DNS_ERROR_DS_ZONE_ALREADY_EXISTS                                          syscall.Errno = 9718
+	DNS_ERROR_NO_BOOTFILE_IF_DS_ZONE                                          syscall.Errno = 9719
+	DNS_ERROR_NODE_IS_DNAME                                                   syscall.Errno = 9720
+	DNS_ERROR_DNAME_COLLISION                                                 syscall.Errno = 9721
+	DNS_ERROR_ALIAS_LOOP                                                      syscall.Errno = 9722
+	DNS_ERROR_OPERATION_BASE                                                  syscall.Errno = 9750
+	DNS_INFO_AXFR_COMPLETE                                                    syscall.Errno = 9751
+	DNS_ERROR_AXFR                                                            syscall.Errno = 9752
+	DNS_INFO_ADDED_LOCAL_WINS                                                 syscall.Errno = 9753
+	DNS_ERROR_SECURE_BASE                                                     syscall.Errno = 9800
+	DNS_STATUS_CONTINUE_NEEDED                                                syscall.Errno = 9801
+	DNS_ERROR_SETUP_BASE                                                      syscall.Errno = 9850
+	DNS_ERROR_NO_TCPIP                                                        syscall.Errno = 9851
+	DNS_ERROR_NO_DNS_SERVERS                                                  syscall.Errno = 9852
+	DNS_ERROR_DP_BASE                                                         syscall.Errno = 9900
+	DNS_ERROR_DP_DOES_NOT_EXIST                                               syscall.Errno = 9901
+	DNS_ERROR_DP_ALREADY_EXISTS                                               syscall.Errno = 9902
+	DNS_ERROR_DP_NOT_ENLISTED                                                 syscall.Errno = 9903
+	DNS_ERROR_DP_ALREADY_ENLISTED                                             syscall.Errno = 9904
+	DNS_ERROR_DP_NOT_AVAILABLE                                                syscall.Errno = 9905
+	DNS_ERROR_DP_FSMO_ERROR                                                   syscall.Errno = 9906
+	DNS_ERROR_RRL_NOT_ENABLED                                                 syscall.Errno = 9911
+	DNS_ERROR_RRL_INVALID_WINDOW_SIZE                                         syscall.Errno = 9912
+	DNS_ERROR_RRL_INVALID_IPV4_PREFIX                                         syscall.Errno = 9913
+	DNS_ERROR_RRL_INVALID_IPV6_PREFIX                                         syscall.Errno = 9914
+	DNS_ERROR_RRL_INVALID_TC_RATE                                             syscall.Errno = 9915
+	DNS_ERROR_RRL_INVALID_LEAK_RATE                                           syscall.Errno = 9916
+	DNS_ERROR_RRL_LEAK_RATE_LESSTHAN_TC_RATE                                  syscall.Errno = 9917
+	DNS_ERROR_VIRTUALIZATION_INSTANCE_ALREADY_EXISTS                          syscall.Errno = 9921
+	DNS_ERROR_VIRTUALIZATION_INSTANCE_DOES_NOT_EXIST                          syscall.Errno = 9922
+	DNS_ERROR_VIRTUALIZATION_TREE_LOCKED                                      syscall.Errno = 9923
+	DNS_ERROR_INVAILD_VIRTUALIZATION_INSTANCE_NAME                            syscall.Errno = 9924
+	DNS_ERROR_DEFAULT_VIRTUALIZATION_INSTANCE                                 syscall.Errno = 9925
+	DNS_ERROR_ZONESCOPE_ALREADY_EXISTS                                        syscall.Errno = 9951
+	DNS_ERROR_ZONESCOPE_DOES_NOT_EXIST                                        syscall.Errno = 9952
+	DNS_ERROR_DEFAULT_ZONESCOPE                                               syscall.Errno = 9953
+	DNS_ERROR_INVALID_ZONESCOPE_NAME                                          syscall.Errno = 9954
+	DNS_ERROR_NOT_ALLOWED_WITH_ZONESCOPES                                     syscall.Errno = 9955
+	DNS_ERROR_LOAD_ZONESCOPE_FAILED                                           syscall.Errno = 9956
+	DNS_ERROR_ZONESCOPE_FILE_WRITEBACK_FAILED                                 syscall.Errno = 9957
+	DNS_ERROR_INVALID_SCOPE_NAME                                              syscall.Errno = 9958
+	DNS_ERROR_SCOPE_DOES_NOT_EXIST                                            syscall.Errno = 9959
+	DNS_ERROR_DEFAULT_SCOPE                                                   syscall.Errno = 9960
+	DNS_ERROR_INVALID_SCOPE_OPERATION                                         syscall.Errno = 9961
+	DNS_ERROR_SCOPE_LOCKED                                                    syscall.Errno = 9962
+	DNS_ERROR_SCOPE_ALREADY_EXISTS                                            syscall.Errno = 9963
+	DNS_ERROR_POLICY_ALREADY_EXISTS                                           syscall.Errno = 9971
+	DNS_ERROR_POLICY_DOES_NOT_EXIST                                           syscall.Errno = 9972
+	DNS_ERROR_POLICY_INVALID_CRITERIA                                         syscall.Errno = 9973
+	DNS_ERROR_POLICY_INVALID_SETTINGS                                         syscall.Errno = 9974
+	DNS_ERROR_CLIENT_SUBNET_IS_ACCESSED                                       syscall.Errno = 9975
+	DNS_ERROR_CLIENT_SUBNET_DOES_NOT_EXIST                                    syscall.Errno = 9976
+	DNS_ERROR_CLIENT_SUBNET_ALREADY_EXISTS                                    syscall.Errno = 9977
+	DNS_ERROR_SUBNET_DOES_NOT_EXIST                                           syscall.Errno = 9978
+	DNS_ERROR_SUBNET_ALREADY_EXISTS                                           syscall.Errno = 9979
+	DNS_ERROR_POLICY_LOCKED                                                   syscall.Errno = 9980
+	DNS_ERROR_POLICY_INVALID_WEIGHT                                           syscall.Errno = 9981
+	DNS_ERROR_POLICY_INVALID_NAME                                             syscall.Errno = 9982
+	DNS_ERROR_POLICY_MISSING_CRITERIA                                         syscall.Errno = 9983
+	DNS_ERROR_INVALID_CLIENT_SUBNET_NAME                                      syscall.Errno = 9984
+	DNS_ERROR_POLICY_PROCESSING_ORDER_INVALID                                 syscall.Errno = 9985
+	DNS_ERROR_POLICY_SCOPE_MISSING                                            syscall.Errno = 9986
+	DNS_ERROR_POLICY_SCOPE_NOT_ALLOWED                                        syscall.Errno = 9987
+	DNS_ERROR_SERVERSCOPE_IS_REFERENCED                                       syscall.Errno = 9988
+	DNS_ERROR_ZONESCOPE_IS_REFERENCED                                         syscall.Errno = 9989
+	DNS_ERROR_POLICY_INVALID_CRITERIA_CLIENT_SUBNET                           syscall.Errno = 9990
+	DNS_ERROR_POLICY_INVALID_CRITERIA_TRANSPORT_PROTOCOL                      syscall.Errno = 9991
+	DNS_ERROR_POLICY_INVALID_CRITERIA_NETWORK_PROTOCOL                        syscall.Errno = 9992
+	DNS_ERROR_POLICY_INVALID_CRITERIA_INTERFACE                               syscall.Errno = 9993
+	DNS_ERROR_POLICY_INVALID_CRITERIA_FQDN                                    syscall.Errno = 9994
+	DNS_ERROR_POLICY_INVALID_CRITERIA_QUERY_TYPE                              syscall.Errno = 9995
+	DNS_ERROR_POLICY_INVALID_CRITERIA_TIME_OF_DAY                             syscall.Errno = 9996
+	WSABASEERR                                                                syscall.Errno = 10000
+	WSAEINTR                                                                  syscall.Errno = 10004
+	WSAEBADF                                                                  syscall.Errno = 10009
+	WSAEACCES                                                                 syscall.Errno = 10013
+	WSAEFAULT                                                                 syscall.Errno = 10014
+	WSAEINVAL                                                                 syscall.Errno = 10022
+	WSAEMFILE                                                                 syscall.Errno = 10024
+	WSAEWOULDBLOCK                                                            syscall.Errno = 10035
+	WSAEINPROGRESS                                                            syscall.Errno = 10036
+	WSAEALREADY                                                               syscall.Errno = 10037
+	WSAENOTSOCK                                                               syscall.Errno = 10038
+	WSAEDESTADDRREQ                                                           syscall.Errno = 10039
+	WSAEMSGSIZE                                                               syscall.Errno = 10040
+	WSAEPROTOTYPE                                                             syscall.Errno = 10041
+	WSAENOPROTOOPT                                                            syscall.Errno = 10042
+	WSAEPROTONOSUPPORT                                                        syscall.Errno = 10043
+	WSAESOCKTNOSUPPORT                                                        syscall.Errno = 10044
+	WSAEOPNOTSUPP                                                             syscall.Errno = 10045
+	WSAEPFNOSUPPORT                                                           syscall.Errno = 10046
+	WSAEAFNOSUPPORT                                                           syscall.Errno = 10047
+	WSAEADDRINUSE                                                             syscall.Errno = 10048
+	WSAEADDRNOTAVAIL                                                          syscall.Errno = 10049
+	WSAENETDOWN                                                               syscall.Errno = 10050
+	WSAENETUNREACH                                                            syscall.Errno = 10051
+	WSAENETRESET                                                              syscall.Errno = 10052
+	WSAECONNABORTED                                                           syscall.Errno = 10053
+	WSAECONNRESET                                                             syscall.Errno = 10054
+	WSAENOBUFS                                                                syscall.Errno = 10055
+	WSAEISCONN                                                                syscall.Errno = 10056
+	WSAENOTCONN                                                               syscall.Errno = 10057
+	WSAESHUTDOWN                                                              syscall.Errno = 10058
+	WSAETOOMANYREFS                                                           syscall.Errno = 10059
+	WSAETIMEDOUT                                                              syscall.Errno = 10060
+	WSAECONNREFUSED                                                           syscall.Errno = 10061
+	WSAELOOP                                                                  syscall.Errno = 10062
+	WSAENAMETOOLONG                                                           syscall.Errno = 10063
+	WSAEHOSTDOWN                                                              syscall.Errno = 10064
+	WSAEHOSTUNREACH                                                           syscall.Errno = 10065
+	WSAENOTEMPTY                                                              syscall.Errno = 10066
+	WSAEPROCLIM                                                               syscall.Errno = 10067
+	WSAEUSERS                                                                 syscall.Errno = 10068
+	WSAEDQUOT                                                                 syscall.Errno = 10069
+	WSAESTALE                                                                 syscall.Errno = 10070
+	WSAEREMOTE                                                                syscall.Errno = 10071
+	WSASYSNOTREADY                                                            syscall.Errno = 10091
+	WSAVERNOTSUPPORTED                                                        syscall.Errno = 10092
+	WSANOTINITIALISED                                                         syscall.Errno = 10093
+	WSAEDISCON                                                                syscall.Errno = 10101
+	WSAENOMORE                                                                syscall.Errno = 10102
+	WSAECANCELLED                                                             syscall.Errno = 10103
+	WSAEINVALIDPROCTABLE                                                      syscall.Errno = 10104
+	WSAEINVALIDPROVIDER                                                       syscall.Errno = 10105
+	WSAEPROVIDERFAILEDINIT                                                    syscall.Errno = 10106
+	WSASYSCALLFAILURE                                                         syscall.Errno = 10107
+	WSASERVICE_NOT_FOUND                                                      syscall.Errno = 10108
+	WSATYPE_NOT_FOUND                                                         syscall.Errno = 10109
+	WSA_E_NO_MORE                                                             syscall.Errno = 10110
+	WSA_E_CANCELLED                                                           syscall.Errno = 10111
+	WSAEREFUSED                                                               syscall.Errno = 10112
+	WSAHOST_NOT_FOUND                                                         syscall.Errno = 11001
+	WSATRY_AGAIN                                                              syscall.Errno = 11002
+	WSANO_RECOVERY                                                            syscall.Errno = 11003
+	WSANO_DATA                                                                syscall.Errno = 11004
+	WSA_QOS_RECEIVERS                                                         syscall.Errno = 11005
+	WSA_QOS_SENDERS                                                           syscall.Errno = 11006
+	WSA_QOS_NO_SENDERS                                                        syscall.Errno = 11007
+	WSA_QOS_NO_RECEIVERS                                                      syscall.Errno = 11008
+	WSA_QOS_REQUEST_CONFIRMED                                                 syscall.Errno = 11009
+	WSA_QOS_ADMISSION_FAILURE                                                 syscall.Errno = 11010
+	WSA_QOS_POLICY_FAILURE                                                    syscall.Errno = 11011
+	WSA_QOS_BAD_STYLE                                                         syscall.Errno = 11012
+	WSA_QOS_BAD_OBJECT                                                        syscall.Errno = 11013
+	WSA_QOS_TRAFFIC_CTRL_ERROR                                                syscall.Errno = 11014
+	WSA_QOS_GENERIC_ERROR                                                     syscall.Errno = 11015
+	WSA_QOS_ESERVICETYPE                                                      syscall.Errno = 11016
+	WSA_QOS_EFLOWSPEC                                                         syscall.Errno = 11017
+	WSA_QOS_EPROVSPECBUF                                                      syscall.Errno = 11018
+	WSA_QOS_EFILTERSTYLE                                                      syscall.Errno = 11019
+	WSA_QOS_EFILTERTYPE                                                       syscall.Errno = 11020
+	WSA_QOS_EFILTERCOUNT                                                      syscall.Errno = 11021
+	WSA_QOS_EOBJLENGTH                                                        syscall.Errno = 11022
+	WSA_QOS_EFLOWCOUNT                                                        syscall.Errno = 11023
+	WSA_QOS_EUNKOWNPSOBJ                                                      syscall.Errno = 11024
+	WSA_QOS_EPOLICYOBJ                                                        syscall.Errno = 11025
+	WSA_QOS_EFLOWDESC                                                         syscall.Errno = 11026
+	WSA_QOS_EPSFLOWSPEC                                                       syscall.Errno = 11027
+	WSA_QOS_EPSFILTERSPEC                                                     syscall.Errno = 11028
+	WSA_QOS_ESDMODEOBJ                                                        syscall.Errno = 11029
+	WSA_QOS_ESHAPERATEOBJ                                                     syscall.Errno = 11030
+	WSA_QOS_RESERVED_PETYPE                                                   syscall.Errno = 11031
+	WSA_SECURE_HOST_NOT_FOUND                                                 syscall.Errno = 11032
+	WSA_IPSEC_NAME_POLICY_ERROR                                               syscall.Errno = 11033
+	ERROR_IPSEC_QM_POLICY_EXISTS                                              syscall.Errno = 13000
+	ERROR_IPSEC_QM_POLICY_NOT_FOUND                                           syscall.Errno = 13001
+	ERROR_IPSEC_QM_POLICY_IN_USE                                              syscall.Errno = 13002
+	ERROR_IPSEC_MM_POLICY_EXISTS                                              syscall.Errno = 13003
+	ERROR_IPSEC_MM_POLICY_NOT_FOUND                                           syscall.Errno = 13004
+	ERROR_IPSEC_MM_POLICY_IN_USE                                              syscall.Errno = 13005
+	ERROR_IPSEC_MM_FILTER_EXISTS                                              syscall.Errno = 13006
+	ERROR_IPSEC_MM_FILTER_NOT_FOUND                                           syscall.Errno = 13007
+	ERROR_IPSEC_TRANSPORT_FILTER_EXISTS                                       syscall.Errno = 13008
+	ERROR_IPSEC_TRANSPORT_FILTER_NOT_FOUND                                    syscall.Errno = 13009
+	ERROR_IPSEC_MM_AUTH_EXISTS                                                syscall.Errno = 13010
+	ERROR_IPSEC_MM_AUTH_NOT_FOUND                                             syscall.Errno = 13011
+	ERROR_IPSEC_MM_AUTH_IN_USE                                                syscall.Errno = 13012
+	ERROR_IPSEC_DEFAULT_MM_POLICY_NOT_FOUND                                   syscall.Errno = 13013
+	ERROR_IPSEC_DEFAULT_MM_AUTH_NOT_FOUND                                     syscall.Errno = 13014
+	ERROR_IPSEC_DEFAULT_QM_POLICY_NOT_FOUND                                   syscall.Errno = 13015
+	ERROR_IPSEC_TUNNEL_FILTER_EXISTS                                          syscall.Errno = 13016
+	ERROR_IPSEC_TUNNEL_FILTER_NOT_FOUND                                       syscall.Errno = 13017
+	ERROR_IPSEC_MM_FILTER_PENDING_DELETION                                    syscall.Errno = 13018
+	ERROR_IPSEC_TRANSPORT_FILTER_PENDING_DELETION                             syscall.Errno = 13019
+	ERROR_IPSEC_TUNNEL_FILTER_PENDING_DELETION                                syscall.Errno = 13020
+	ERROR_IPSEC_MM_POLICY_PENDING_DELETION                                    syscall.Errno = 13021
+	ERROR_IPSEC_MM_AUTH_PENDING_DELETION                                      syscall.Errno = 13022
+	ERROR_IPSEC_QM_POLICY_PENDING_DELETION                                    syscall.Errno = 13023
+	WARNING_IPSEC_MM_POLICY_PRUNED                                            syscall.Errno = 13024
+	WARNING_IPSEC_QM_POLICY_PRUNED                                            syscall.Errno = 13025
+	ERROR_IPSEC_IKE_NEG_STATUS_BEGIN                                          syscall.Errno = 13800
+	ERROR_IPSEC_IKE_AUTH_FAIL                                                 syscall.Errno = 13801
+	ERROR_IPSEC_IKE_ATTRIB_FAIL                                               syscall.Errno = 13802
+	ERROR_IPSEC_IKE_NEGOTIATION_PENDING                                       syscall.Errno = 13803
+	ERROR_IPSEC_IKE_GENERAL_PROCESSING_ERROR                                  syscall.Errno = 13804
+	ERROR_IPSEC_IKE_TIMED_OUT                                                 syscall.Errno = 13805
+	ERROR_IPSEC_IKE_NO_CERT                                                   syscall.Errno = 13806
+	ERROR_IPSEC_IKE_SA_DELETED                                                syscall.Errno = 13807
+	ERROR_IPSEC_IKE_SA_REAPED                                                 syscall.Errno = 13808
+	ERROR_IPSEC_IKE_MM_ACQUIRE_DROP                                           syscall.Errno = 13809
+	ERROR_IPSEC_IKE_QM_ACQUIRE_DROP                                           syscall.Errno = 13810
+	ERROR_IPSEC_IKE_QUEUE_DROP_MM                                             syscall.Errno = 13811
+	ERROR_IPSEC_IKE_QUEUE_DROP_NO_MM                                          syscall.Errno = 13812
+	ERROR_IPSEC_IKE_DROP_NO_RESPONSE                                          syscall.Errno = 13813
+	ERROR_IPSEC_IKE_MM_DELAY_DROP                                             syscall.Errno = 13814
+	ERROR_IPSEC_IKE_QM_DELAY_DROP                                             syscall.Errno = 13815
+	ERROR_IPSEC_IKE_ERROR                                                     syscall.Errno = 13816
+	ERROR_IPSEC_IKE_CRL_FAILED                                                syscall.Errno = 13817
+	ERROR_IPSEC_IKE_INVALID_KEY_USAGE                                         syscall.Errno = 13818
+	ERROR_IPSEC_IKE_INVALID_CERT_TYPE                                         syscall.Errno = 13819
+	ERROR_IPSEC_IKE_NO_PRIVATE_KEY                                            syscall.Errno = 13820
+	ERROR_IPSEC_IKE_SIMULTANEOUS_REKEY                                        syscall.Errno = 13821
+	ERROR_IPSEC_IKE_DH_FAIL                                                   syscall.Errno = 13822
+	ERROR_IPSEC_IKE_CRITICAL_PAYLOAD_NOT_RECOGNIZED                           syscall.Errno = 13823
+	ERROR_IPSEC_IKE_INVALID_HEADER                                            syscall.Errno = 13824
+	ERROR_IPSEC_IKE_NO_POLICY                                                 syscall.Errno = 13825
+	ERROR_IPSEC_IKE_INVALID_SIGNATURE                                         syscall.Errno = 13826
+	ERROR_IPSEC_IKE_KERBEROS_ERROR                                            syscall.Errno = 13827
+	ERROR_IPSEC_IKE_NO_PUBLIC_KEY                                             syscall.Errno = 13828
+	ERROR_IPSEC_IKE_PROCESS_ERR                                               syscall.Errno = 13829
+	ERROR_IPSEC_IKE_PROCESS_ERR_SA                                            syscall.Errno = 13830
+	ERROR_IPSEC_IKE_PROCESS_ERR_PROP                                          syscall.Errno = 13831
+	ERROR_IPSEC_IKE_PROCESS_ERR_TRANS                                         syscall.Errno = 13832
+	ERROR_IPSEC_IKE_PROCESS_ERR_KE                                            syscall.Errno = 13833
+	ERROR_IPSEC_IKE_PROCESS_ERR_ID                                            syscall.Errno = 13834
+	ERROR_IPSEC_IKE_PROCESS_ERR_CERT                                          syscall.Errno = 13835
+	ERROR_IPSEC_IKE_PROCESS_ERR_CERT_REQ                                      syscall.Errno = 13836
+	ERROR_IPSEC_IKE_PROCESS_ERR_HASH                                          syscall.Errno = 13837
+	ERROR_IPSEC_IKE_PROCESS_ERR_SIG                                           syscall.Errno = 13838
+	ERROR_IPSEC_IKE_PROCESS_ERR_NONCE                                         syscall.Errno = 13839
+	ERROR_IPSEC_IKE_PROCESS_ERR_NOTIFY                                        syscall.Errno = 13840
+	ERROR_IPSEC_IKE_PROCESS_ERR_DELETE                                        syscall.Errno = 13841
+	ERROR_IPSEC_IKE_PROCESS_ERR_VENDOR                                        syscall.Errno = 13842
+	ERROR_IPSEC_IKE_INVALID_PAYLOAD                                           syscall.Errno = 13843
+	ERROR_IPSEC_IKE_LOAD_SOFT_SA                                              syscall.Errno = 13844
+	ERROR_IPSEC_IKE_SOFT_SA_TORN_DOWN                                         syscall.Errno = 13845
+	ERROR_IPSEC_IKE_INVALID_COOKIE                                            syscall.Errno = 13846
+	ERROR_IPSEC_IKE_NO_PEER_CERT                                              syscall.Errno = 13847
+	ERROR_IPSEC_IKE_PEER_CRL_FAILED                                           syscall.Errno = 13848
+	ERROR_IPSEC_IKE_POLICY_CHANGE                                             syscall.Errno = 13849
+	ERROR_IPSEC_IKE_NO_MM_POLICY                                              syscall.Errno = 13850
+	ERROR_IPSEC_IKE_NOTCBPRIV                                                 syscall.Errno = 13851
+	ERROR_IPSEC_IKE_SECLOADFAIL                                               syscall.Errno = 13852
+	ERROR_IPSEC_IKE_FAILSSPINIT                                               syscall.Errno = 13853
+	ERROR_IPSEC_IKE_FAILQUERYSSP                                              syscall.Errno = 13854
+	ERROR_IPSEC_IKE_SRVACQFAIL                                                syscall.Errno = 13855
+	ERROR_IPSEC_IKE_SRVQUERYCRED                                              syscall.Errno = 13856
+	ERROR_IPSEC_IKE_GETSPIFAIL                                                syscall.Errno = 13857
+	ERROR_IPSEC_IKE_INVALID_FILTER                                            syscall.Errno = 13858
+	ERROR_IPSEC_IKE_OUT_OF_MEMORY                                             syscall.Errno = 13859
+	ERROR_IPSEC_IKE_ADD_UPDATE_KEY_FAILED                                     syscall.Errno = 13860
+	ERROR_IPSEC_IKE_INVALID_POLICY                                            syscall.Errno = 13861
+	ERROR_IPSEC_IKE_UNKNOWN_DOI                                               syscall.Errno = 13862
+	ERROR_IPSEC_IKE_INVALID_SITUATION                                         syscall.Errno = 13863
+	ERROR_IPSEC_IKE_DH_FAILURE                                                syscall.Errno = 13864
+	ERROR_IPSEC_IKE_INVALID_GROUP                                             syscall.Errno = 13865
+	ERROR_IPSEC_IKE_ENCRYPT                                                   syscall.Errno = 13866
+	ERROR_IPSEC_IKE_DECRYPT                                                   syscall.Errno = 13867
+	ERROR_IPSEC_IKE_POLICY_MATCH                                              syscall.Errno = 13868
+	ERROR_IPSEC_IKE_UNSUPPORTED_ID                                            syscall.Errno = 13869
+	ERROR_IPSEC_IKE_INVALID_HASH                                              syscall.Errno = 13870
+	ERROR_IPSEC_IKE_INVALID_HASH_ALG                                          syscall.Errno = 13871
+	ERROR_IPSEC_IKE_INVALID_HASH_SIZE                                         syscall.Errno = 13872
+	ERROR_IPSEC_IKE_INVALID_ENCRYPT_ALG                                       syscall.Errno = 13873
+	ERROR_IPSEC_IKE_INVALID_AUTH_ALG                                          syscall.Errno = 13874
+	ERROR_IPSEC_IKE_INVALID_SIG                                               syscall.Errno = 13875
+	ERROR_IPSEC_IKE_LOAD_FAILED                                               syscall.Errno = 13876
+	ERROR_IPSEC_IKE_RPC_DELETE                                                syscall.Errno = 13877
+	ERROR_IPSEC_IKE_BENIGN_REINIT                                             syscall.Errno = 13878
+	ERROR_IPSEC_IKE_INVALID_RESPONDER_LIFETIME_NOTIFY                         syscall.Errno = 13879
+	ERROR_IPSEC_IKE_INVALID_MAJOR_VERSION                                     syscall.Errno = 13880
+	ERROR_IPSEC_IKE_INVALID_CERT_KEYLEN                                       syscall.Errno = 13881
+	ERROR_IPSEC_IKE_MM_LIMIT                                                  syscall.Errno = 13882
+	ERROR_IPSEC_IKE_NEGOTIATION_DISABLED                                      syscall.Errno = 13883
+	ERROR_IPSEC_IKE_QM_LIMIT                                                  syscall.Errno = 13884
+	ERROR_IPSEC_IKE_MM_EXPIRED                                                syscall.Errno = 13885
+	ERROR_IPSEC_IKE_PEER_MM_ASSUMED_INVALID                                   syscall.Errno = 13886
+	ERROR_IPSEC_IKE_CERT_CHAIN_POLICY_MISMATCH                                syscall.Errno = 13887
+	ERROR_IPSEC_IKE_UNEXPECTED_MESSAGE_ID                                     syscall.Errno = 13888
+	ERROR_IPSEC_IKE_INVALID_AUTH_PAYLOAD                                      syscall.Errno = 13889
+	ERROR_IPSEC_IKE_DOS_COOKIE_SENT                                           syscall.Errno = 13890
+	ERROR_IPSEC_IKE_SHUTTING_DOWN                                             syscall.Errno = 13891
+	ERROR_IPSEC_IKE_CGA_AUTH_FAILED                                           syscall.Errno = 13892
+	ERROR_IPSEC_IKE_PROCESS_ERR_NATOA                                         syscall.Errno = 13893
+	ERROR_IPSEC_IKE_INVALID_MM_FOR_QM                                         syscall.Errno = 13894
+	ERROR_IPSEC_IKE_QM_EXPIRED                                                syscall.Errno = 13895
+	ERROR_IPSEC_IKE_TOO_MANY_FILTERS                                          syscall.Errno = 13896
+	ERROR_IPSEC_IKE_NEG_STATUS_END                                            syscall.Errno = 13897
+	ERROR_IPSEC_IKE_KILL_DUMMY_NAP_TUNNEL                                     syscall.Errno = 13898
+	ERROR_IPSEC_IKE_INNER_IP_ASSIGNMENT_FAILURE                               syscall.Errno = 13899
+	ERROR_IPSEC_IKE_REQUIRE_CP_PAYLOAD_MISSING                                syscall.Errno = 13900
+	ERROR_IPSEC_KEY_MODULE_IMPERSONATION_NEGOTIATION_PENDING                  syscall.Errno = 13901
+	ERROR_IPSEC_IKE_COEXISTENCE_SUPPRESS                                      syscall.Errno = 13902
+	ERROR_IPSEC_IKE_RATELIMIT_DROP                                            syscall.Errno = 13903
+	ERROR_IPSEC_IKE_PEER_DOESNT_SUPPORT_MOBIKE                                syscall.Errno = 13904
+	ERROR_IPSEC_IKE_AUTHORIZATION_FAILURE                                     syscall.Errno = 13905
+	ERROR_IPSEC_IKE_STRONG_CRED_AUTHORIZATION_FAILURE                         syscall.Errno = 13906
+	ERROR_IPSEC_IKE_AUTHORIZATION_FAILURE_WITH_OPTIONAL_RETRY                 syscall.Errno = 13907
+	ERROR_IPSEC_IKE_STRONG_CRED_AUTHORIZATION_AND_CERTMAP_FAILURE             syscall.Errno = 13908
+	ERROR_IPSEC_IKE_NEG_STATUS_EXTENDED_END                                   syscall.Errno = 13909
+	ERROR_IPSEC_BAD_SPI                                                       syscall.Errno = 13910
+	ERROR_IPSEC_SA_LIFETIME_EXPIRED                                           syscall.Errno = 13911
+	ERROR_IPSEC_WRONG_SA                                                      syscall.Errno = 13912
+	ERROR_IPSEC_REPLAY_CHECK_FAILED                                           syscall.Errno = 13913
+	ERROR_IPSEC_INVALID_PACKET                                                syscall.Errno = 13914
+	ERROR_IPSEC_INTEGRITY_CHECK_FAILED                                        syscall.Errno = 13915
+	ERROR_IPSEC_CLEAR_TEXT_DROP                                               syscall.Errno = 13916
+	ERROR_IPSEC_AUTH_FIREWALL_DROP                                            syscall.Errno = 13917
+	ERROR_IPSEC_THROTTLE_DROP                                                 syscall.Errno = 13918
+	ERROR_IPSEC_DOSP_BLOCK                                                    syscall.Errno = 13925
+	ERROR_IPSEC_DOSP_RECEIVED_MULTICAST                                       syscall.Errno = 13926
+	ERROR_IPSEC_DOSP_INVALID_PACKET                                           syscall.Errno = 13927
+	ERROR_IPSEC_DOSP_STATE_LOOKUP_FAILED                                      syscall.Errno = 13928
+	ERROR_IPSEC_DOSP_MAX_ENTRIES                                              syscall.Errno = 13929
+	ERROR_IPSEC_DOSP_KEYMOD_NOT_ALLOWED                                       syscall.Errno = 13930
+	ERROR_IPSEC_DOSP_NOT_INSTALLED                                            syscall.Errno = 13931
+	ERROR_IPSEC_DOSP_MAX_PER_IP_RATELIMIT_QUEUES                              syscall.Errno = 13932
+	ERROR_SXS_SECTION_NOT_FOUND                                               syscall.Errno = 14000
+	ERROR_SXS_CANT_GEN_ACTCTX                                                 syscall.Errno = 14001
+	ERROR_SXS_INVALID_ACTCTXDATA_FORMAT                                       syscall.Errno = 14002
+	ERROR_SXS_ASSEMBLY_NOT_FOUND                                              syscall.Errno = 14003
+	ERROR_SXS_MANIFEST_FORMAT_ERROR                                           syscall.Errno = 14004
+	ERROR_SXS_MANIFEST_PARSE_ERROR                                            syscall.Errno = 14005
+	ERROR_SXS_ACTIVATION_CONTEXT_DISABLED                                     syscall.Errno = 14006
+	ERROR_SXS_KEY_NOT_FOUND                                                   syscall.Errno = 14007
+	ERROR_SXS_VERSION_CONFLICT                                                syscall.Errno = 14008
+	ERROR_SXS_WRONG_SECTION_TYPE                                              syscall.Errno = 14009
+	ERROR_SXS_THREAD_QUERIES_DISABLED                                         syscall.Errno = 14010
+	ERROR_SXS_PROCESS_DEFAULT_ALREADY_SET                                     syscall.Errno = 14011
+	ERROR_SXS_UNKNOWN_ENCODING_GROUP                                          syscall.Errno = 14012
+	ERROR_SXS_UNKNOWN_ENCODING                                                syscall.Errno = 14013
+	ERROR_SXS_INVALID_XML_NAMESPACE_URI                                       syscall.Errno = 14014
+	ERROR_SXS_ROOT_MANIFEST_DEPENDENCY_NOT_INSTALLED                          syscall.Errno = 14015
+	ERROR_SXS_LEAF_MANIFEST_DEPENDENCY_NOT_INSTALLED                          syscall.Errno = 14016
+	ERROR_SXS_INVALID_ASSEMBLY_IDENTITY_ATTRIBUTE                             syscall.Errno = 14017
+	ERROR_SXS_MANIFEST_MISSING_REQUIRED_DEFAULT_NAMESPACE                     syscall.Errno = 14018
+	ERROR_SXS_MANIFEST_INVALID_REQUIRED_DEFAULT_NAMESPACE                     syscall.Errno = 14019
+	ERROR_SXS_PRIVATE_MANIFEST_CROSS_PATH_WITH_REPARSE_POINT                  syscall.Errno = 14020
+	ERROR_SXS_DUPLICATE_DLL_NAME                                              syscall.Errno = 14021
+	ERROR_SXS_DUPLICATE_WINDOWCLASS_NAME                                      syscall.Errno = 14022
+	ERROR_SXS_DUPLICATE_CLSID                                                 syscall.Errno = 14023
+	ERROR_SXS_DUPLICATE_IID                                                   syscall.Errno = 14024
+	ERROR_SXS_DUPLICATE_TLBID                                                 syscall.Errno = 14025
+	ERROR_SXS_DUPLICATE_PROGID                                                syscall.Errno = 14026
+	ERROR_SXS_DUPLICATE_ASSEMBLY_NAME                                         syscall.Errno = 14027
+	ERROR_SXS_FILE_HASH_MISMATCH                                              syscall.Errno = 14028
+	ERROR_SXS_POLICY_PARSE_ERROR                                              syscall.Errno = 14029
+	ERROR_SXS_XML_E_MISSINGQUOTE                                              syscall.Errno = 14030
+	ERROR_SXS_XML_E_COMMENTSYNTAX                                             syscall.Errno = 14031
+	ERROR_SXS_XML_E_BADSTARTNAMECHAR                                          syscall.Errno = 14032
+	ERROR_SXS_XML_E_BADNAMECHAR                                               syscall.Errno = 14033
+	ERROR_SXS_XML_E_BADCHARINSTRING                                           syscall.Errno = 14034
+	ERROR_SXS_XML_E_XMLDECLSYNTAX                                             syscall.Errno = 14035
+	ERROR_SXS_XML_E_BADCHARDATA                                               syscall.Errno = 14036
+	ERROR_SXS_XML_E_MISSINGWHITESPACE                                         syscall.Errno = 14037
+	ERROR_SXS_XML_E_EXPECTINGTAGEND                                           syscall.Errno = 14038
+	ERROR_SXS_XML_E_MISSINGSEMICOLON                                          syscall.Errno = 14039
+	ERROR_SXS_XML_E_UNBALANCEDPAREN                                           syscall.Errno = 14040
+	ERROR_SXS_XML_E_INTERNALERROR                                             syscall.Errno = 14041
+	ERROR_SXS_XML_E_UNEXPECTED_WHITESPACE                                     syscall.Errno = 14042
+	ERROR_SXS_XML_E_INCOMPLETE_ENCODING                                       syscall.Errno = 14043
+	ERROR_SXS_XML_E_MISSING_PAREN                                             syscall.Errno = 14044
+	ERROR_SXS_XML_E_EXPECTINGCLOSEQUOTE                                       syscall.Errno = 14045
+	ERROR_SXS_XML_E_MULTIPLE_COLONS                                           syscall.Errno = 14046
+	ERROR_SXS_XML_E_INVALID_DECIMAL                                           syscall.Errno = 14047
+	ERROR_SXS_XML_E_INVALID_HEXIDECIMAL                                       syscall.Errno = 14048
+	ERROR_SXS_XML_E_INVALID_UNICODE                                           syscall.Errno = 14049
+	ERROR_SXS_XML_E_WHITESPACEORQUESTIONMARK                                  syscall.Errno = 14050
+	ERROR_SXS_XML_E_UNEXPECTEDENDTAG                                          syscall.Errno = 14051
+	ERROR_SXS_XML_E_UNCLOSEDTAG                                               syscall.Errno = 14052
+	ERROR_SXS_XML_E_DUPLICATEATTRIBUTE                                        syscall.Errno = 14053
+	ERROR_SXS_XML_E_MULTIPLEROOTS                                             syscall.Errno = 14054
+	ERROR_SXS_XML_E_INVALIDATROOTLEVEL                                        syscall.Errno = 14055
+	ERROR_SXS_XML_E_BADXMLDECL                                                syscall.Errno = 14056
+	ERROR_SXS_XML_E_MISSINGROOT                                               syscall.Errno = 14057
+	ERROR_SXS_XML_E_UNEXPECTEDEOF                                             syscall.Errno = 14058
+	ERROR_SXS_XML_E_BADPEREFINSUBSET                                          syscall.Errno = 14059
+	ERROR_SXS_XML_E_UNCLOSEDSTARTTAG                                          syscall.Errno = 14060
+	ERROR_SXS_XML_E_UNCLOSEDENDTAG                                            syscall.Errno = 14061
+	ERROR_SXS_XML_E_UNCLOSEDSTRING                                            syscall.Errno = 14062
+	ERROR_SXS_XML_E_UNCLOSEDCOMMENT                                           syscall.Errno = 14063
+	ERROR_SXS_XML_E_UNCLOSEDDECL                                              syscall.Errno = 14064
+	ERROR_SXS_XML_E_UNCLOSEDCDATA                                             syscall.Errno = 14065
+	ERROR_SXS_XML_E_RESERVEDNAMESPACE                                         syscall.Errno = 14066
+	ERROR_SXS_XML_E_INVALIDENCODING                                           syscall.Errno = 14067
+	ERROR_SXS_XML_E_INVALIDSWITCH                                             syscall.Errno = 14068
+	ERROR_SXS_XML_E_BADXMLCASE                                                syscall.Errno = 14069
+	ERROR_SXS_XML_E_INVALID_STANDALONE                                        syscall.Errno = 14070
+	ERROR_SXS_XML_E_UNEXPECTED_STANDALONE                                     syscall.Errno = 14071
+	ERROR_SXS_XML_E_INVALID_VERSION                                           syscall.Errno = 14072
+	ERROR_SXS_XML_E_MISSINGEQUALS                                             syscall.Errno = 14073
+	ERROR_SXS_PROTECTION_RECOVERY_FAILED                                      syscall.Errno = 14074
+	ERROR_SXS_PROTECTION_PUBLIC_KEY_TOO_SHORT                                 syscall.Errno = 14075
+	ERROR_SXS_PROTECTION_CATALOG_NOT_VALID                                    syscall.Errno = 14076
+	ERROR_SXS_UNTRANSLATABLE_HRESULT                                          syscall.Errno = 14077
+	ERROR_SXS_PROTECTION_CATALOG_FILE_MISSING                                 syscall.Errno = 14078
+	ERROR_SXS_MISSING_ASSEMBLY_IDENTITY_ATTRIBUTE                             syscall.Errno = 14079
+	ERROR_SXS_INVALID_ASSEMBLY_IDENTITY_ATTRIBUTE_NAME                        syscall.Errno = 14080
+	ERROR_SXS_ASSEMBLY_MISSING                                                syscall.Errno = 14081
+	ERROR_SXS_CORRUPT_ACTIVATION_STACK                                        syscall.Errno = 14082
+	ERROR_SXS_CORRUPTION                                                      syscall.Errno = 14083
+	ERROR_SXS_EARLY_DEACTIVATION                                              syscall.Errno = 14084
+	ERROR_SXS_INVALID_DEACTIVATION                                            syscall.Errno = 14085
+	ERROR_SXS_MULTIPLE_DEACTIVATION                                           syscall.Errno = 14086
+	ERROR_SXS_PROCESS_TERMINATION_REQUESTED                                   syscall.Errno = 14087
+	ERROR_SXS_RELEASE_ACTIVATION_CONTEXT                                      syscall.Errno = 14088
+	ERROR_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY                         syscall.Errno = 14089
+	ERROR_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE                                syscall.Errno = 14090
+	ERROR_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME                                 syscall.Errno = 14091
+	ERROR_SXS_IDENTITY_DUPLICATE_ATTRIBUTE                                    syscall.Errno = 14092
+	ERROR_SXS_IDENTITY_PARSE_ERROR                                            syscall.Errno = 14093
+	ERROR_MALFORMED_SUBSTITUTION_STRING                                       syscall.Errno = 14094
+	ERROR_SXS_INCORRECT_PUBLIC_KEY_TOKEN                                      syscall.Errno = 14095
+	ERROR_UNMAPPED_SUBSTITUTION_STRING                                        syscall.Errno = 14096
+	ERROR_SXS_ASSEMBLY_NOT_LOCKED                                             syscall.Errno = 14097
+	ERROR_SXS_COMPONENT_STORE_CORRUPT                                         syscall.Errno = 14098
+	ERROR_ADVANCED_INSTALLER_FAILED                                           syscall.Errno = 14099
+	ERROR_XML_ENCODING_MISMATCH                                               syscall.Errno = 14100
+	ERROR_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT                   syscall.Errno = 14101
+	ERROR_SXS_IDENTITIES_DIFFERENT                                            syscall.Errno = 14102
+	ERROR_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT                                    syscall.Errno = 14103
+	ERROR_SXS_FILE_NOT_PART_OF_ASSEMBLY                                       syscall.Errno = 14104
+	ERROR_SXS_MANIFEST_TOO_BIG                                                syscall.Errno = 14105
+	ERROR_SXS_SETTING_NOT_REGISTERED                                          syscall.Errno = 14106
+	ERROR_SXS_TRANSACTION_CLOSURE_INCOMPLETE                                  syscall.Errno = 14107
+	ERROR_SMI_PRIMITIVE_INSTALLER_FAILED                                      syscall.Errno = 14108
+	ERROR_GENERIC_COMMAND_FAILED                                              syscall.Errno = 14109
+	ERROR_SXS_FILE_HASH_MISSING                                               syscall.Errno = 14110
+	ERROR_SXS_DUPLICATE_ACTIVATABLE_CLASS                                     syscall.Errno = 14111
+	ERROR_EVT_INVALID_CHANNEL_PATH                                            syscall.Errno = 15000
+	ERROR_EVT_INVALID_QUERY                                                   syscall.Errno = 15001
+	ERROR_EVT_PUBLISHER_METADATA_NOT_FOUND                                    syscall.Errno = 15002
+	ERROR_EVT_EVENT_TEMPLATE_NOT_FOUND                                        syscall.Errno = 15003
+	ERROR_EVT_INVALID_PUBLISHER_NAME                                          syscall.Errno = 15004
+	ERROR_EVT_INVALID_EVENT_DATA                                              syscall.Errno = 15005
+	ERROR_EVT_CHANNEL_NOT_FOUND                                               syscall.Errno = 15007
+	ERROR_EVT_MALFORMED_XML_TEXT                                              syscall.Errno = 15008
+	ERROR_EVT_SUBSCRIPTION_TO_DIRECT_CHANNEL                                  syscall.Errno = 15009
+	ERROR_EVT_CONFIGURATION_ERROR                                             syscall.Errno = 15010
+	ERROR_EVT_QUERY_RESULT_STALE                                              syscall.Errno = 15011
+	ERROR_EVT_QUERY_RESULT_INVALID_POSITION                                   syscall.Errno = 15012
+	ERROR_EVT_NON_VALIDATING_MSXML                                            syscall.Errno = 15013
+	ERROR_EVT_FILTER_ALREADYSCOPED                                            syscall.Errno = 15014
+	ERROR_EVT_FILTER_NOTELTSET                                                syscall.Errno = 15015
+	ERROR_EVT_FILTER_INVARG                                                   syscall.Errno = 15016
+	ERROR_EVT_FILTER_INVTEST                                                  syscall.Errno = 15017
+	ERROR_EVT_FILTER_INVTYPE                                                  syscall.Errno = 15018
+	ERROR_EVT_FILTER_PARSEERR                                                 syscall.Errno = 15019
+	ERROR_EVT_FILTER_UNSUPPORTEDOP                                            syscall.Errno = 15020
+	ERROR_EVT_FILTER_UNEXPECTEDTOKEN                                          syscall.Errno = 15021
+	ERROR_EVT_INVALID_OPERATION_OVER_ENABLED_DIRECT_CHANNEL                   syscall.Errno = 15022
+	ERROR_EVT_INVALID_CHANNEL_PROPERTY_VALUE                                  syscall.Errno = 15023
+	ERROR_EVT_INVALID_PUBLISHER_PROPERTY_VALUE                                syscall.Errno = 15024
+	ERROR_EVT_CHANNEL_CANNOT_ACTIVATE                                         syscall.Errno = 15025
+	ERROR_EVT_FILTER_TOO_COMPLEX                                              syscall.Errno = 15026
+	ERROR_EVT_MESSAGE_NOT_FOUND                                               syscall.Errno = 15027
+	ERROR_EVT_MESSAGE_ID_NOT_FOUND                                            syscall.Errno = 15028
+	ERROR_EVT_UNRESOLVED_VALUE_INSERT                                         syscall.Errno = 15029
+	ERROR_EVT_UNRESOLVED_PARAMETER_INSERT                                     syscall.Errno = 15030
+	ERROR_EVT_MAX_INSERTS_REACHED                                             syscall.Errno = 15031
+	ERROR_EVT_EVENT_DEFINITION_NOT_FOUND                                      syscall.Errno = 15032
+	ERROR_EVT_MESSAGE_LOCALE_NOT_FOUND                                        syscall.Errno = 15033
+	ERROR_EVT_VERSION_TOO_OLD                                                 syscall.Errno = 15034
+	ERROR_EVT_VERSION_TOO_NEW                                                 syscall.Errno = 15035
+	ERROR_EVT_CANNOT_OPEN_CHANNEL_OF_QUERY                                    syscall.Errno = 15036
+	ERROR_EVT_PUBLISHER_DISABLED                                              syscall.Errno = 15037
+	ERROR_EVT_FILTER_OUT_OF_RANGE                                             syscall.Errno = 15038
+	ERROR_EC_SUBSCRIPTION_CANNOT_ACTIVATE                                     syscall.Errno = 15080
+	ERROR_EC_LOG_DISABLED                                                     syscall.Errno = 15081
+	ERROR_EC_CIRCULAR_FORWARDING                                              syscall.Errno = 15082
+	ERROR_EC_CREDSTORE_FULL                                                   syscall.Errno = 15083
+	ERROR_EC_CRED_NOT_FOUND                                                   syscall.Errno = 15084
+	ERROR_EC_NO_ACTIVE_CHANNEL                                                syscall.Errno = 15085
+	ERROR_MUI_FILE_NOT_FOUND                                                  syscall.Errno = 15100
+	ERROR_MUI_INVALID_FILE                                                    syscall.Errno = 15101
+	ERROR_MUI_INVALID_RC_CONFIG                                               syscall.Errno = 15102
+	ERROR_MUI_INVALID_LOCALE_NAME                                             syscall.Errno = 15103
+	ERROR_MUI_INVALID_ULTIMATEFALLBACK_NAME                                   syscall.Errno = 15104
+	ERROR_MUI_FILE_NOT_LOADED                                                 syscall.Errno = 15105
+	ERROR_RESOURCE_ENUM_USER_STOP                                             syscall.Errno = 15106
+	ERROR_MUI_INTLSETTINGS_UILANG_NOT_INSTALLED                               syscall.Errno = 15107
+	ERROR_MUI_INTLSETTINGS_INVALID_LOCALE_NAME                                syscall.Errno = 15108
+	ERROR_MRM_RUNTIME_NO_DEFAULT_OR_NEUTRAL_RESOURCE                          syscall.Errno = 15110
+	ERROR_MRM_INVALID_PRICONFIG                                               syscall.Errno = 15111
+	ERROR_MRM_INVALID_FILE_TYPE                                               syscall.Errno = 15112
+	ERROR_MRM_UNKNOWN_QUALIFIER                                               syscall.Errno = 15113
+	ERROR_MRM_INVALID_QUALIFIER_VALUE                                         syscall.Errno = 15114
+	ERROR_MRM_NO_CANDIDATE                                                    syscall.Errno = 15115
+	ERROR_MRM_NO_MATCH_OR_DEFAULT_CANDIDATE                                   syscall.Errno = 15116
+	ERROR_MRM_RESOURCE_TYPE_MISMATCH                                          syscall.Errno = 15117
+	ERROR_MRM_DUPLICATE_MAP_NAME                                              syscall.Errno = 15118
+	ERROR_MRM_DUPLICATE_ENTRY                                                 syscall.Errno = 15119
+	ERROR_MRM_INVALID_RESOURCE_IDENTIFIER                                     syscall.Errno = 15120
+	ERROR_MRM_FILEPATH_TOO_LONG                                               syscall.Errno = 15121
+	ERROR_MRM_UNSUPPORTED_DIRECTORY_TYPE                                      syscall.Errno = 15122
+	ERROR_MRM_INVALID_PRI_FILE                                                syscall.Errno = 15126
+	ERROR_MRM_NAMED_RESOURCE_NOT_FOUND                                        syscall.Errno = 15127
+	ERROR_MRM_MAP_NOT_FOUND                                                   syscall.Errno = 15135
+	ERROR_MRM_UNSUPPORTED_PROFILE_TYPE                                        syscall.Errno = 15136
+	ERROR_MRM_INVALID_QUALIFIER_OPERATOR                                      syscall.Errno = 15137
+	ERROR_MRM_INDETERMINATE_QUALIFIER_VALUE                                   syscall.Errno = 15138
+	ERROR_MRM_AUTOMERGE_ENABLED                                               syscall.Errno = 15139
+	ERROR_MRM_TOO_MANY_RESOURCES                                              syscall.Errno = 15140
+	ERROR_MRM_UNSUPPORTED_FILE_TYPE_FOR_MERGE                                 syscall.Errno = 15141
+	ERROR_MRM_UNSUPPORTED_FILE_TYPE_FOR_LOAD_UNLOAD_PRI_FILE                  syscall.Errno = 15142
+	ERROR_MRM_NO_CURRENT_VIEW_ON_THREAD                                       syscall.Errno = 15143
+	ERROR_DIFFERENT_PROFILE_RESOURCE_MANAGER_EXIST                            syscall.Errno = 15144
+	ERROR_OPERATION_NOT_ALLOWED_FROM_SYSTEM_COMPONENT                         syscall.Errno = 15145
+	ERROR_MRM_DIRECT_REF_TO_NON_DEFAULT_RESOURCE                              syscall.Errno = 15146
+	ERROR_MRM_GENERATION_COUNT_MISMATCH                                       syscall.Errno = 15147
+	ERROR_PRI_MERGE_VERSION_MISMATCH                                          syscall.Errno = 15148
+	ERROR_PRI_MERGE_MISSING_SCHEMA                                            syscall.Errno = 15149
+	ERROR_PRI_MERGE_LOAD_FILE_FAILED                                          syscall.Errno = 15150
+	ERROR_PRI_MERGE_ADD_FILE_FAILED                                           syscall.Errno = 15151
+	ERROR_PRI_MERGE_WRITE_FILE_FAILED                                         syscall.Errno = 15152
+	ERROR_PRI_MERGE_MULTIPLE_PACKAGE_FAMILIES_NOT_ALLOWED                     syscall.Errno = 15153
+	ERROR_PRI_MERGE_MULTIPLE_MAIN_PACKAGES_NOT_ALLOWED                        syscall.Errno = 15154
+	ERROR_PRI_MERGE_BUNDLE_PACKAGES_NOT_ALLOWED                               syscall.Errno = 15155
+	ERROR_PRI_MERGE_MAIN_PACKAGE_REQUIRED                                     syscall.Errno = 15156
+	ERROR_PRI_MERGE_RESOURCE_PACKAGE_REQUIRED                                 syscall.Errno = 15157
+	ERROR_PRI_MERGE_INVALID_FILE_NAME                                         syscall.Errno = 15158
+	ERROR_MRM_PACKAGE_NOT_FOUND                                               syscall.Errno = 15159
+	ERROR_MRM_MISSING_DEFAULT_LANGUAGE                                        syscall.Errno = 15160
+	ERROR_MCA_INVALID_CAPABILITIES_STRING                                     syscall.Errno = 15200
+	ERROR_MCA_INVALID_VCP_VERSION                                             syscall.Errno = 15201
+	ERROR_MCA_MONITOR_VIOLATES_MCCS_SPECIFICATION                             syscall.Errno = 15202
+	ERROR_MCA_MCCS_VERSION_MISMATCH                                           syscall.Errno = 15203
+	ERROR_MCA_UNSUPPORTED_MCCS_VERSION                                        syscall.Errno = 15204
+	ERROR_MCA_INTERNAL_ERROR                                                  syscall.Errno = 15205
+	ERROR_MCA_INVALID_TECHNOLOGY_TYPE_RETURNED                                syscall.Errno = 15206
+	ERROR_MCA_UNSUPPORTED_COLOR_TEMPERATURE                                   syscall.Errno = 15207
+	ERROR_AMBIGUOUS_SYSTEM_DEVICE                                             syscall.Errno = 15250
+	ERROR_SYSTEM_DEVICE_NOT_FOUND                                             syscall.Errno = 15299
+	ERROR_HASH_NOT_SUPPORTED                                                  syscall.Errno = 15300
+	ERROR_HASH_NOT_PRESENT                                                    syscall.Errno = 15301
+	ERROR_SECONDARY_IC_PROVIDER_NOT_REGISTERED                                syscall.Errno = 15321
+	ERROR_GPIO_CLIENT_INFORMATION_INVALID                                     syscall.Errno = 15322
+	ERROR_GPIO_VERSION_NOT_SUPPORTED                                          syscall.Errno = 15323
+	ERROR_GPIO_INVALID_REGISTRATION_PACKET                                    syscall.Errno = 15324
+	ERROR_GPIO_OPERATION_DENIED                                               syscall.Errno = 15325
+	ERROR_GPIO_INCOMPATIBLE_CONNECT_MODE                                      syscall.Errno = 15326
+	ERROR_GPIO_INTERRUPT_ALREADY_UNMASKED                                     syscall.Errno = 15327
+	ERROR_CANNOT_SWITCH_RUNLEVEL                                              syscall.Errno = 15400
+	ERROR_INVALID_RUNLEVEL_SETTING                                            syscall.Errno = 15401
+	ERROR_RUNLEVEL_SWITCH_TIMEOUT                                             syscall.Errno = 15402
+	ERROR_RUNLEVEL_SWITCH_AGENT_TIMEOUT                                       syscall.Errno = 15403
+	ERROR_RUNLEVEL_SWITCH_IN_PROGRESS                                         syscall.Errno = 15404
+	ERROR_SERVICES_FAILED_AUTOSTART                                           syscall.Errno = 15405
+	ERROR_COM_TASK_STOP_PENDING                                               syscall.Errno = 15501
+	ERROR_INSTALL_OPEN_PACKAGE_FAILED                                         syscall.Errno = 15600
+	ERROR_INSTALL_PACKAGE_NOT_FOUND                                           syscall.Errno = 15601
+	ERROR_INSTALL_INVALID_PACKAGE                                             syscall.Errno = 15602
+	ERROR_INSTALL_RESOLVE_DEPENDENCY_FAILED                                   syscall.Errno = 15603
+	ERROR_INSTALL_OUT_OF_DISK_SPACE                                           syscall.Errno = 15604
+	ERROR_INSTALL_NETWORK_FAILURE                                             syscall.Errno = 15605
+	ERROR_INSTALL_REGISTRATION_FAILURE                                        syscall.Errno = 15606
+	ERROR_INSTALL_DEREGISTRATION_FAILURE                                      syscall.Errno = 15607
+	ERROR_INSTALL_CANCEL                                                      syscall.Errno = 15608
+	ERROR_INSTALL_FAILED                                                      syscall.Errno = 15609
+	ERROR_REMOVE_FAILED                                                       syscall.Errno = 15610
+	ERROR_PACKAGE_ALREADY_EXISTS                                              syscall.Errno = 15611
+	ERROR_NEEDS_REMEDIATION                                                   syscall.Errno = 15612
+	ERROR_INSTALL_PREREQUISITE_FAILED                                         syscall.Errno = 15613
+	ERROR_PACKAGE_REPOSITORY_CORRUPTED                                        syscall.Errno = 15614
+	ERROR_INSTALL_POLICY_FAILURE                                              syscall.Errno = 15615
+	ERROR_PACKAGE_UPDATING                                                    syscall.Errno = 15616
+	ERROR_DEPLOYMENT_BLOCKED_BY_POLICY                                        syscall.Errno = 15617
+	ERROR_PACKAGES_IN_USE                                                     syscall.Errno = 15618
+	ERROR_RECOVERY_FILE_CORRUPT                                               syscall.Errno = 15619
+	ERROR_INVALID_STAGED_SIGNATURE                                            syscall.Errno = 15620
+	ERROR_DELETING_EXISTING_APPLICATIONDATA_STORE_FAILED                      syscall.Errno = 15621
+	ERROR_INSTALL_PACKAGE_DOWNGRADE                                           syscall.Errno = 15622
+	ERROR_SYSTEM_NEEDS_REMEDIATION                                            syscall.Errno = 15623
+	ERROR_APPX_INTEGRITY_FAILURE_CLR_NGEN                                     syscall.Errno = 15624
+	ERROR_RESILIENCY_FILE_CORRUPT                                             syscall.Errno = 15625
+	ERROR_INSTALL_FIREWALL_SERVICE_NOT_RUNNING                                syscall.Errno = 15626
+	ERROR_PACKAGE_MOVE_FAILED                                                 syscall.Errno = 15627
+	ERROR_INSTALL_VOLUME_NOT_EMPTY                                            syscall.Errno = 15628
+	ERROR_INSTALL_VOLUME_OFFLINE                                              syscall.Errno = 15629
+	ERROR_INSTALL_VOLUME_CORRUPT                                              syscall.Errno = 15630
+	ERROR_NEEDS_REGISTRATION                                                  syscall.Errno = 15631
+	ERROR_INSTALL_WRONG_PROCESSOR_ARCHITECTURE                                syscall.Errno = 15632
+	ERROR_DEV_SIDELOAD_LIMIT_EXCEEDED                                         syscall.Errno = 15633
+	ERROR_INSTALL_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE                      syscall.Errno = 15634
+	ERROR_PACKAGE_NOT_SUPPORTED_ON_FILESYSTEM                                 syscall.Errno = 15635
+	ERROR_PACKAGE_MOVE_BLOCKED_BY_STREAMING                                   syscall.Errno = 15636
+	ERROR_INSTALL_OPTIONAL_PACKAGE_APPLICATIONID_NOT_UNIQUE                   syscall.Errno = 15637
+	ERROR_PACKAGE_STAGING_ONHOLD                                              syscall.Errno = 15638
+	ERROR_INSTALL_INVALID_RELATED_SET_UPDATE                                  syscall.Errno = 15639
+	ERROR_INSTALL_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE_FULLTRUST_CAPABILITY syscall.Errno = 15640
+	ERROR_DEPLOYMENT_BLOCKED_BY_USER_LOG_OFF                                  syscall.Errno = 15641
+	ERROR_PROVISION_OPTIONAL_PACKAGE_REQUIRES_MAIN_PACKAGE_PROVISIONED        syscall.Errno = 15642
+	ERROR_PACKAGES_REPUTATION_CHECK_FAILED                                    syscall.Errno = 15643
+	ERROR_PACKAGES_REPUTATION_CHECK_TIMEDOUT                                  syscall.Errno = 15644
+	ERROR_DEPLOYMENT_OPTION_NOT_SUPPORTED                                     syscall.Errno = 15645
+	ERROR_APPINSTALLER_ACTIVATION_BLOCKED                                     syscall.Errno = 15646
+	ERROR_REGISTRATION_FROM_REMOTE_DRIVE_NOT_SUPPORTED                        syscall.Errno = 15647
+	ERROR_APPX_RAW_DATA_WRITE_FAILED                                          syscall.Errno = 15648
+	ERROR_DEPLOYMENT_BLOCKED_BY_VOLUME_POLICY_PACKAGE                         syscall.Errno = 15649
+	ERROR_DEPLOYMENT_BLOCKED_BY_VOLUME_POLICY_MACHINE                         syscall.Errno = 15650
+	ERROR_DEPLOYMENT_BLOCKED_BY_PROFILE_POLICY                                syscall.Errno = 15651
+	ERROR_DEPLOYMENT_FAILED_CONFLICTING_MUTABLE_PACKAGE_DIRECTORY             syscall.Errno = 15652
+	ERROR_SINGLETON_RESOURCE_INSTALLED_IN_ACTIVE_USER                         syscall.Errno = 15653
+	ERROR_DIFFERENT_VERSION_OF_PACKAGED_SERVICE_INSTALLED                     syscall.Errno = 15654
+	ERROR_SERVICE_EXISTS_AS_NON_PACKAGED_SERVICE                              syscall.Errno = 15655
+	ERROR_PACKAGED_SERVICE_REQUIRES_ADMIN_PRIVILEGES                          syscall.Errno = 15656
+	APPMODEL_ERROR_NO_PACKAGE                                                 syscall.Errno = 15700
+	APPMODEL_ERROR_PACKAGE_RUNTIME_CORRUPT                                    syscall.Errno = 15701
+	APPMODEL_ERROR_PACKAGE_IDENTITY_CORRUPT                                   syscall.Errno = 15702
+	APPMODEL_ERROR_NO_APPLICATION                                             syscall.Errno = 15703
+	APPMODEL_ERROR_DYNAMIC_PROPERTY_READ_FAILED                               syscall.Errno = 15704
+	APPMODEL_ERROR_DYNAMIC_PROPERTY_INVALID                                   syscall.Errno = 15705
+	APPMODEL_ERROR_PACKAGE_NOT_AVAILABLE                                      syscall.Errno = 15706
+	APPMODEL_ERROR_NO_MUTABLE_DIRECTORY                                       syscall.Errno = 15707
+	ERROR_STATE_LOAD_STORE_FAILED                                             syscall.Errno = 15800
+	ERROR_STATE_GET_VERSION_FAILED                                            syscall.Errno = 15801
+	ERROR_STATE_SET_VERSION_FAILED                                            syscall.Errno = 15802
+	ERROR_STATE_STRUCTURED_RESET_FAILED                                       syscall.Errno = 15803
+	ERROR_STATE_OPEN_CONTAINER_FAILED                                         syscall.Errno = 15804
+	ERROR_STATE_CREATE_CONTAINER_FAILED                                       syscall.Errno = 15805
+	ERROR_STATE_DELETE_CONTAINER_FAILED                                       syscall.Errno = 15806
+	ERROR_STATE_READ_SETTING_FAILED                                           syscall.Errno = 15807
+	ERROR_STATE_WRITE_SETTING_FAILED                                          syscall.Errno = 15808
+	ERROR_STATE_DELETE_SETTING_FAILED                                         syscall.Errno = 15809
+	ERROR_STATE_QUERY_SETTING_FAILED                                          syscall.Errno = 15810
+	ERROR_STATE_READ_COMPOSITE_SETTING_FAILED                                 syscall.Errno = 15811
+	ERROR_STATE_WRITE_COMPOSITE_SETTING_FAILED                                syscall.Errno = 15812
+	ERROR_STATE_ENUMERATE_CONTAINER_FAILED                                    syscall.Errno = 15813
+	ERROR_STATE_ENUMERATE_SETTINGS_FAILED                                     syscall.Errno = 15814
+	ERROR_STATE_COMPOSITE_SETTING_VALUE_SIZE_LIMIT_EXCEEDED                   syscall.Errno = 15815
+	ERROR_STATE_SETTING_VALUE_SIZE_LIMIT_EXCEEDED                             syscall.Errno = 15816
+	ERROR_STATE_SETTING_NAME_SIZE_LIMIT_EXCEEDED                              syscall.Errno = 15817
+	ERROR_STATE_CONTAINER_NAME_SIZE_LIMIT_EXCEEDED                            syscall.Errno = 15818
+	ERROR_API_UNAVAILABLE                                                     syscall.Errno = 15841
+	STORE_ERROR_UNLICENSED                                                    syscall.Errno = 15861
+	STORE_ERROR_UNLICENSED_USER                                               syscall.Errno = 15862
+	STORE_ERROR_PENDING_COM_TRANSACTION                                       syscall.Errno = 15863
+	STORE_ERROR_LICENSE_REVOKED                                               syscall.Errno = 15864
+	SEVERITY_SUCCESS                                                          syscall.Errno = 0
+	SEVERITY_ERROR                                                            syscall.Errno = 1
+	FACILITY_NT_BIT                                                                         = 0x10000000
+	E_NOT_SET                                                                               = ERROR_NOT_FOUND
+	E_NOT_VALID_STATE                                                                       = ERROR_INVALID_STATE
+	E_NOT_SUFFICIENT_BUFFER                                                                 = ERROR_INSUFFICIENT_BUFFER
+	E_TIME_SENSITIVE_THREAD                                                                 = ERROR_TIME_SENSITIVE_THREAD
+	E_NO_TASK_QUEUE                                                                         = ERROR_NO_TASK_QUEUE
+	NOERROR                                                                   syscall.Errno = 0
+	E_UNEXPECTED                                                              Handle        = 0x8000FFFF
+	E_NOTIMPL                                                                 Handle        = 0x80004001
+	E_OUTOFMEMORY                                                             Handle        = 0x8007000E
+	E_INVALIDARG                                                              Handle        = 0x80070057
+	E_NOINTERFACE                                                             Handle        = 0x80004002
+	E_POINTER                                                                 Handle        = 0x80004003
+	E_HANDLE                                                                  Handle        = 0x80070006
+	E_ABORT                                                                   Handle        = 0x80004004
+	E_FAIL                                                                    Handle        = 0x80004005
+	E_ACCESSDENIED                                                            Handle        = 0x80070005
+	E_PENDING                                                                 Handle        = 0x8000000A
+	E_BOUNDS                                                                  Handle        = 0x8000000B
+	E_CHANGED_STATE                                                           Handle        = 0x8000000C
+	E_ILLEGAL_STATE_CHANGE                                                    Handle        = 0x8000000D
+	E_ILLEGAL_METHOD_CALL                                                     Handle        = 0x8000000E
+	RO_E_METADATA_NAME_NOT_FOUND                                              Handle        = 0x8000000F
+	RO_E_METADATA_NAME_IS_NAMESPACE                                           Handle        = 0x80000010
+	RO_E_METADATA_INVALID_TYPE_FORMAT                                         Handle        = 0x80000011
+	RO_E_INVALID_METADATA_FILE                                                Handle        = 0x80000012
+	RO_E_CLOSED                                                               Handle        = 0x80000013
+	RO_E_EXCLUSIVE_WRITE                                                      Handle        = 0x80000014
+	RO_E_CHANGE_NOTIFICATION_IN_PROGRESS                                      Handle        = 0x80000015
+	RO_E_ERROR_STRING_NOT_FOUND                                               Handle        = 0x80000016
+	E_STRING_NOT_NULL_TERMINATED                                              Handle        = 0x80000017
+	E_ILLEGAL_DELEGATE_ASSIGNMENT                                             Handle        = 0x80000018
+	E_ASYNC_OPERATION_NOT_STARTED                                             Handle        = 0x80000019
+	E_APPLICATION_EXITING                                                     Handle        = 0x8000001A
+	E_APPLICATION_VIEW_EXITING                                                Handle        = 0x8000001B
+	RO_E_MUST_BE_AGILE                                                        Handle        = 0x8000001C
+	RO_E_UNSUPPORTED_FROM_MTA                                                 Handle        = 0x8000001D
+	RO_E_COMMITTED                                                            Handle        = 0x8000001E
+	RO_E_BLOCKED_CROSS_ASTA_CALL                                              Handle        = 0x8000001F
+	RO_E_CANNOT_ACTIVATE_FULL_TRUST_SERVER                                    Handle        = 0x80000020
+	RO_E_CANNOT_ACTIVATE_UNIVERSAL_APPLICATION_SERVER                         Handle        = 0x80000021
+	CO_E_INIT_TLS                                                             Handle        = 0x80004006
+	CO_E_INIT_SHARED_ALLOCATOR                                                Handle        = 0x80004007
+	CO_E_INIT_MEMORY_ALLOCATOR                                                Handle        = 0x80004008
+	CO_E_INIT_CLASS_CACHE                                                     Handle        = 0x80004009
+	CO_E_INIT_RPC_CHANNEL                                                     Handle        = 0x8000400A
+	CO_E_INIT_TLS_SET_CHANNEL_CONTROL                                         Handle        = 0x8000400B
+	CO_E_INIT_TLS_CHANNEL_CONTROL                                             Handle        = 0x8000400C
+	CO_E_INIT_UNACCEPTED_USER_ALLOCATOR                                       Handle        = 0x8000400D
+	CO_E_INIT_SCM_MUTEX_EXISTS                                                Handle        = 0x8000400E
+	CO_E_INIT_SCM_FILE_MAPPING_EXISTS                                         Handle        = 0x8000400F
+	CO_E_INIT_SCM_MAP_VIEW_OF_FILE                                            Handle        = 0x80004010
+	CO_E_INIT_SCM_EXEC_FAILURE                                                Handle        = 0x80004011
+	CO_E_INIT_ONLY_SINGLE_THREADED                                            Handle        = 0x80004012
+	CO_E_CANT_REMOTE                                                          Handle        = 0x80004013
+	CO_E_BAD_SERVER_NAME                                                      Handle        = 0x80004014
+	CO_E_WRONG_SERVER_IDENTITY                                                Handle        = 0x80004015
+	CO_E_OLE1DDE_DISABLED                                                     Handle        = 0x80004016
+	CO_E_RUNAS_SYNTAX                                                         Handle        = 0x80004017
+	CO_E_CREATEPROCESS_FAILURE                                                Handle        = 0x80004018
+	CO_E_RUNAS_CREATEPROCESS_FAILURE                                          Handle        = 0x80004019
+	CO_E_RUNAS_LOGON_FAILURE                                                  Handle        = 0x8000401A
+	CO_E_LAUNCH_PERMSSION_DENIED                                              Handle        = 0x8000401B
+	CO_E_START_SERVICE_FAILURE                                                Handle        = 0x8000401C
+	CO_E_REMOTE_COMMUNICATION_FAILURE                                         Handle        = 0x8000401D
+	CO_E_SERVER_START_TIMEOUT                                                 Handle        = 0x8000401E
+	CO_E_CLSREG_INCONSISTENT                                                  Handle        = 0x8000401F
+	CO_E_IIDREG_INCONSISTENT                                                  Handle        = 0x80004020
+	CO_E_NOT_SUPPORTED                                                        Handle        = 0x80004021
+	CO_E_RELOAD_DLL                                                           Handle        = 0x80004022
+	CO_E_MSI_ERROR                                                            Handle        = 0x80004023
+	CO_E_ATTEMPT_TO_CREATE_OUTSIDE_CLIENT_CONTEXT                             Handle        = 0x80004024
+	CO_E_SERVER_PAUSED                                                        Handle        = 0x80004025
+	CO_E_SERVER_NOT_PAUSED                                                    Handle        = 0x80004026
+	CO_E_CLASS_DISABLED                                                       Handle        = 0x80004027
+	CO_E_CLRNOTAVAILABLE                                                      Handle        = 0x80004028
+	CO_E_ASYNC_WORK_REJECTED                                                  Handle        = 0x80004029
+	CO_E_SERVER_INIT_TIMEOUT                                                  Handle        = 0x8000402A
+	CO_E_NO_SECCTX_IN_ACTIVATE                                                Handle        = 0x8000402B
+	CO_E_TRACKER_CONFIG                                                       Handle        = 0x80004030
+	CO_E_THREADPOOL_CONFIG                                                    Handle        = 0x80004031
+	CO_E_SXS_CONFIG                                                           Handle        = 0x80004032
+	CO_E_MALFORMED_SPN                                                        Handle        = 0x80004033
+	CO_E_UNREVOKED_REGISTRATION_ON_APARTMENT_SHUTDOWN                         Handle        = 0x80004034
+	CO_E_PREMATURE_STUB_RUNDOWN                                               Handle        = 0x80004035
+	S_OK                                                                      Handle        = 0
+	S_FALSE                                                                   Handle        = 1
+	OLE_E_FIRST                                                               Handle        = 0x80040000
+	OLE_E_LAST                                                                Handle        = 0x800400FF
+	OLE_S_FIRST                                                               Handle        = 0x00040000
+	OLE_S_LAST                                                                Handle        = 0x000400FF
+	OLE_E_OLEVERB                                                             Handle        = 0x80040000
+	OLE_E_ADVF                                                                Handle        = 0x80040001
+	OLE_E_ENUM_NOMORE                                                         Handle        = 0x80040002
+	OLE_E_ADVISENOTSUPPORTED                                                  Handle        = 0x80040003
+	OLE_E_NOCONNECTION                                                        Handle        = 0x80040004
+	OLE_E_NOTRUNNING                                                          Handle        = 0x80040005
+	OLE_E_NOCACHE                                                             Handle        = 0x80040006
+	OLE_E_BLANK                                                               Handle        = 0x80040007
+	OLE_E_CLASSDIFF                                                           Handle        = 0x80040008
+	OLE_E_CANT_GETMONIKER                                                     Handle        = 0x80040009
+	OLE_E_CANT_BINDTOSOURCE                                                   Handle        = 0x8004000A
+	OLE_E_STATIC                                                              Handle        = 0x8004000B
+	OLE_E_PROMPTSAVECANCELLED                                                 Handle        = 0x8004000C
+	OLE_E_INVALIDRECT                                                         Handle        = 0x8004000D
+	OLE_E_WRONGCOMPOBJ                                                        Handle        = 0x8004000E
+	OLE_E_INVALIDHWND                                                         Handle        = 0x8004000F
+	OLE_E_NOT_INPLACEACTIVE                                                   Handle        = 0x80040010
+	OLE_E_CANTCONVERT                                                         Handle        = 0x80040011
+	OLE_E_NOSTORAGE                                                           Handle        = 0x80040012
+	DV_E_FORMATETC                                                            Handle        = 0x80040064
+	DV_E_DVTARGETDEVICE                                                       Handle        = 0x80040065
+	DV_E_STGMEDIUM                                                            Handle        = 0x80040066
+	DV_E_STATDATA                                                             Handle        = 0x80040067
+	DV_E_LINDEX                                                               Handle        = 0x80040068
+	DV_E_TYMED                                                                Handle        = 0x80040069
+	DV_E_CLIPFORMAT                                                           Handle        = 0x8004006A
+	DV_E_DVASPECT                                                             Handle        = 0x8004006B
+	DV_E_DVTARGETDEVICE_SIZE                                                  Handle        = 0x8004006C
+	DV_E_NOIVIEWOBJECT                                                        Handle        = 0x8004006D
+	DRAGDROP_E_FIRST                                                          syscall.Errno = 0x80040100
+	DRAGDROP_E_LAST                                                           syscall.Errno = 0x8004010F
+	DRAGDROP_S_FIRST                                                          syscall.Errno = 0x00040100
+	DRAGDROP_S_LAST                                                           syscall.Errno = 0x0004010F
+	DRAGDROP_E_NOTREGISTERED                                                  Handle        = 0x80040100
+	DRAGDROP_E_ALREADYREGISTERED                                              Handle        = 0x80040101
+	DRAGDROP_E_INVALIDHWND                                                    Handle        = 0x80040102
+	DRAGDROP_E_CONCURRENT_DRAG_ATTEMPTED                                      Handle        = 0x80040103
+	CLASSFACTORY_E_FIRST                                                      syscall.Errno = 0x80040110
+	CLASSFACTORY_E_LAST                                                       syscall.Errno = 0x8004011F
+	CLASSFACTORY_S_FIRST                                                      syscall.Errno = 0x00040110
+	CLASSFACTORY_S_LAST                                                       syscall.Errno = 0x0004011F
+	CLASS_E_NOAGGREGATION                                                     Handle        = 0x80040110
+	CLASS_E_CLASSNOTAVAILABLE                                                 Handle        = 0x80040111
+	CLASS_E_NOTLICENSED                                                       Handle        = 0x80040112
+	MARSHAL_E_FIRST                                                           syscall.Errno = 0x80040120
+	MARSHAL_E_LAST                                                            syscall.Errno = 0x8004012F
+	MARSHAL_S_FIRST                                                           syscall.Errno = 0x00040120
+	MARSHAL_S_LAST                                                            syscall.Errno = 0x0004012F
+	DATA_E_FIRST                                                              syscall.Errno = 0x80040130
+	DATA_E_LAST                                                               syscall.Errno = 0x8004013F
+	DATA_S_FIRST                                                              syscall.Errno = 0x00040130
+	DATA_S_LAST                                                               syscall.Errno = 0x0004013F
+	VIEW_E_FIRST                                                              syscall.Errno = 0x80040140
+	VIEW_E_LAST                                                               syscall.Errno = 0x8004014F
+	VIEW_S_FIRST                                                              syscall.Errno = 0x00040140
+	VIEW_S_LAST                                                               syscall.Errno = 0x0004014F
+	VIEW_E_DRAW                                                               Handle        = 0x80040140
+	REGDB_E_FIRST                                                             syscall.Errno = 0x80040150
+	REGDB_E_LAST                                                              syscall.Errno = 0x8004015F
+	REGDB_S_FIRST                                                             syscall.Errno = 0x00040150
+	REGDB_S_LAST                                                              syscall.Errno = 0x0004015F
+	REGDB_E_READREGDB                                                         Handle        = 0x80040150
+	REGDB_E_WRITEREGDB                                                        Handle        = 0x80040151
+	REGDB_E_KEYMISSING                                                        Handle        = 0x80040152
+	REGDB_E_INVALIDVALUE                                                      Handle        = 0x80040153
+	REGDB_E_CLASSNOTREG                                                       Handle        = 0x80040154
+	REGDB_E_IIDNOTREG                                                         Handle        = 0x80040155
+	REGDB_E_BADTHREADINGMODEL                                                 Handle        = 0x80040156
+	REGDB_E_PACKAGEPOLICYVIOLATION                                            Handle        = 0x80040157
+	CAT_E_FIRST                                                               syscall.Errno = 0x80040160
+	CAT_E_LAST                                                                syscall.Errno = 0x80040161
+	CAT_E_CATIDNOEXIST                                                        Handle        = 0x80040160
+	CAT_E_NODESCRIPTION                                                       Handle        = 0x80040161
+	CS_E_FIRST                                                                syscall.Errno = 0x80040164
+	CS_E_LAST                                                                 syscall.Errno = 0x8004016F
+	CS_E_PACKAGE_NOTFOUND                                                     Handle        = 0x80040164
+	CS_E_NOT_DELETABLE                                                        Handle        = 0x80040165
+	CS_E_CLASS_NOTFOUND                                                       Handle        = 0x80040166
+	CS_E_INVALID_VERSION                                                      Handle        = 0x80040167
+	CS_E_NO_CLASSSTORE                                                        Handle        = 0x80040168
+	CS_E_OBJECT_NOTFOUND                                                      Handle        = 0x80040169
+	CS_E_OBJECT_ALREADY_EXISTS                                                Handle        = 0x8004016A
+	CS_E_INVALID_PATH                                                         Handle        = 0x8004016B
+	CS_E_NETWORK_ERROR                                                        Handle        = 0x8004016C
+	CS_E_ADMIN_LIMIT_EXCEEDED                                                 Handle        = 0x8004016D
+	CS_E_SCHEMA_MISMATCH                                                      Handle        = 0x8004016E
+	CS_E_INTERNAL_ERROR                                                       Handle        = 0x8004016F
+	CACHE_E_FIRST                                                             syscall.Errno = 0x80040170
+	CACHE_E_LAST                                                              syscall.Errno = 0x8004017F
+	CACHE_S_FIRST                                                             syscall.Errno = 0x00040170
+	CACHE_S_LAST                                                              syscall.Errno = 0x0004017F
+	CACHE_E_NOCACHE_UPDATED                                                   Handle        = 0x80040170
+	OLEOBJ_E_FIRST                                                            syscall.Errno = 0x80040180
+	OLEOBJ_E_LAST                                                             syscall.Errno = 0x8004018F
+	OLEOBJ_S_FIRST                                                            syscall.Errno = 0x00040180
+	OLEOBJ_S_LAST                                                             syscall.Errno = 0x0004018F
+	OLEOBJ_E_NOVERBS                                                          Handle        = 0x80040180
+	OLEOBJ_E_INVALIDVERB                                                      Handle        = 0x80040181
+	CLIENTSITE_E_FIRST                                                        syscall.Errno = 0x80040190
+	CLIENTSITE_E_LAST                                                         syscall.Errno = 0x8004019F
+	CLIENTSITE_S_FIRST                                                        syscall.Errno = 0x00040190
+	CLIENTSITE_S_LAST                                                         syscall.Errno = 0x0004019F
+	INPLACE_E_NOTUNDOABLE                                                     Handle        = 0x800401A0
+	INPLACE_E_NOTOOLSPACE                                                     Handle        = 0x800401A1
+	INPLACE_E_FIRST                                                           syscall.Errno = 0x800401A0
+	INPLACE_E_LAST                                                            syscall.Errno = 0x800401AF
+	INPLACE_S_FIRST                                                           syscall.Errno = 0x000401A0
+	INPLACE_S_LAST                                                            syscall.Errno = 0x000401AF
+	ENUM_E_FIRST                                                              syscall.Errno = 0x800401B0
+	ENUM_E_LAST                                                               syscall.Errno = 0x800401BF
+	ENUM_S_FIRST                                                              syscall.Errno = 0x000401B0
+	ENUM_S_LAST                                                               syscall.Errno = 0x000401BF
+	CONVERT10_E_FIRST                                                         syscall.Errno = 0x800401C0
+	CONVERT10_E_LAST                                                          syscall.Errno = 0x800401CF
+	CONVERT10_S_FIRST                                                         syscall.Errno = 0x000401C0
+	CONVERT10_S_LAST                                                          syscall.Errno = 0x000401CF
+	CONVERT10_E_OLESTREAM_GET                                                 Handle        = 0x800401C0
+	CONVERT10_E_OLESTREAM_PUT                                                 Handle        = 0x800401C1
+	CONVERT10_E_OLESTREAM_FMT                                                 Handle        = 0x800401C2
+	CONVERT10_E_OLESTREAM_BITMAP_TO_DIB                                       Handle        = 0x800401C3
+	CONVERT10_E_STG_FMT                                                       Handle        = 0x800401C4
+	CONVERT10_E_STG_NO_STD_STREAM                                             Handle        = 0x800401C5
+	CONVERT10_E_STG_DIB_TO_BITMAP                                             Handle        = 0x800401C6
+	CLIPBRD_E_FIRST                                                           syscall.Errno = 0x800401D0
+	CLIPBRD_E_LAST                                                            syscall.Errno = 0x800401DF
+	CLIPBRD_S_FIRST                                                           syscall.Errno = 0x000401D0
+	CLIPBRD_S_LAST                                                            syscall.Errno = 0x000401DF
+	CLIPBRD_E_CANT_OPEN                                                       Handle        = 0x800401D0
+	CLIPBRD_E_CANT_EMPTY                                                      Handle        = 0x800401D1
+	CLIPBRD_E_CANT_SET                                                        Handle        = 0x800401D2
+	CLIPBRD_E_BAD_DATA                                                        Handle        = 0x800401D3
+	CLIPBRD_E_CANT_CLOSE                                                      Handle        = 0x800401D4
+	MK_E_FIRST                                                                syscall.Errno = 0x800401E0
+	MK_E_LAST                                                                 syscall.Errno = 0x800401EF
+	MK_S_FIRST                                                                syscall.Errno = 0x000401E0
+	MK_S_LAST                                                                 syscall.Errno = 0x000401EF
+	MK_E_CONNECTMANUALLY                                                      Handle        = 0x800401E0
+	MK_E_EXCEEDEDDEADLINE                                                     Handle        = 0x800401E1
+	MK_E_NEEDGENERIC                                                          Handle        = 0x800401E2
+	MK_E_UNAVAILABLE                                                          Handle        = 0x800401E3
+	MK_E_SYNTAX                                                               Handle        = 0x800401E4
+	MK_E_NOOBJECT                                                             Handle        = 0x800401E5
+	MK_E_INVALIDEXTENSION                                                     Handle        = 0x800401E6
+	MK_E_INTERMEDIATEINTERFACENOTSUPPORTED                                    Handle        = 0x800401E7
+	MK_E_NOTBINDABLE                                                          Handle        = 0x800401E8
+	MK_E_NOTBOUND                                                             Handle        = 0x800401E9
+	MK_E_CANTOPENFILE                                                         Handle        = 0x800401EA
+	MK_E_MUSTBOTHERUSER                                                       Handle        = 0x800401EB
+	MK_E_NOINVERSE                                                            Handle        = 0x800401EC
+	MK_E_NOSTORAGE                                                            Handle        = 0x800401ED
+	MK_E_NOPREFIX                                                             Handle        = 0x800401EE
+	MK_E_ENUMERATION_FAILED                                                   Handle        = 0x800401EF
+	CO_E_FIRST                                                                syscall.Errno = 0x800401F0
+	CO_E_LAST                                                                 syscall.Errno = 0x800401FF
+	CO_S_FIRST                                                                syscall.Errno = 0x000401F0
+	CO_S_LAST                                                                 syscall.Errno = 0x000401FF
+	CO_E_NOTINITIALIZED                                                       Handle        = 0x800401F0
+	CO_E_ALREADYINITIALIZED                                                   Handle        = 0x800401F1
+	CO_E_CANTDETERMINECLASS                                                   Handle        = 0x800401F2
+	CO_E_CLASSSTRING                                                          Handle        = 0x800401F3
+	CO_E_IIDSTRING                                                            Handle        = 0x800401F4
+	CO_E_APPNOTFOUND                                                          Handle        = 0x800401F5
+	CO_E_APPSINGLEUSE                                                         Handle        = 0x800401F6
+	CO_E_ERRORINAPP                                                           Handle        = 0x800401F7
+	CO_E_DLLNOTFOUND                                                          Handle        = 0x800401F8
+	CO_E_ERRORINDLL                                                           Handle        = 0x800401F9
+	CO_E_WRONGOSFORAPP                                                        Handle        = 0x800401FA
+	CO_E_OBJNOTREG                                                            Handle        = 0x800401FB
+	CO_E_OBJISREG                                                             Handle        = 0x800401FC
+	CO_E_OBJNOTCONNECTED                                                      Handle        = 0x800401FD
+	CO_E_APPDIDNTREG                                                          Handle        = 0x800401FE
+	CO_E_RELEASED                                                             Handle        = 0x800401FF
+	EVENT_E_FIRST                                                             syscall.Errno = 0x80040200
+	EVENT_E_LAST                                                              syscall.Errno = 0x8004021F
+	EVENT_S_FIRST                                                             syscall.Errno = 0x00040200
+	EVENT_S_LAST                                                              syscall.Errno = 0x0004021F
+	EVENT_S_SOME_SUBSCRIBERS_FAILED                                           Handle        = 0x00040200
+	EVENT_E_ALL_SUBSCRIBERS_FAILED                                            Handle        = 0x80040201
+	EVENT_S_NOSUBSCRIBERS                                                     Handle        = 0x00040202
+	EVENT_E_QUERYSYNTAX                                                       Handle        = 0x80040203
+	EVENT_E_QUERYFIELD                                                        Handle        = 0x80040204
+	EVENT_E_INTERNALEXCEPTION                                                 Handle        = 0x80040205
+	EVENT_E_INTERNALERROR                                                     Handle        = 0x80040206
+	EVENT_E_INVALID_PER_USER_SID                                              Handle        = 0x80040207
+	EVENT_E_USER_EXCEPTION                                                    Handle        = 0x80040208
+	EVENT_E_TOO_MANY_METHODS                                                  Handle        = 0x80040209
+	EVENT_E_MISSING_EVENTCLASS                                                Handle        = 0x8004020A
+	EVENT_E_NOT_ALL_REMOVED                                                   Handle        = 0x8004020B
+	EVENT_E_COMPLUS_NOT_INSTALLED                                             Handle        = 0x8004020C
+	EVENT_E_CANT_MODIFY_OR_DELETE_UNCONFIGURED_OBJECT                         Handle        = 0x8004020D
+	EVENT_E_CANT_MODIFY_OR_DELETE_CONFIGURED_OBJECT                           Handle        = 0x8004020E
+	EVENT_E_INVALID_EVENT_CLASS_PARTITION                                     Handle        = 0x8004020F
+	EVENT_E_PER_USER_SID_NOT_LOGGED_ON                                        Handle        = 0x80040210
+	TPC_E_INVALID_PROPERTY                                                    Handle        = 0x80040241
+	TPC_E_NO_DEFAULT_TABLET                                                   Handle        = 0x80040212
+	TPC_E_UNKNOWN_PROPERTY                                                    Handle        = 0x8004021B
+	TPC_E_INVALID_INPUT_RECT                                                  Handle        = 0x80040219
+	TPC_E_INVALID_STROKE                                                      Handle        = 0x80040222
+	TPC_E_INITIALIZE_FAIL                                                     Handle        = 0x80040223
+	TPC_E_NOT_RELEVANT                                                        Handle        = 0x80040232
+	TPC_E_INVALID_PACKET_DESCRIPTION                                          Handle        = 0x80040233
+	TPC_E_RECOGNIZER_NOT_REGISTERED                                           Handle        = 0x80040235
+	TPC_E_INVALID_RIGHTS                                                      Handle        = 0x80040236
+	TPC_E_OUT_OF_ORDER_CALL                                                   Handle        = 0x80040237
+	TPC_E_QUEUE_FULL                                                          Handle        = 0x80040238
+	TPC_E_INVALID_CONFIGURATION                                               Handle        = 0x80040239
+	TPC_E_INVALID_DATA_FROM_RECOGNIZER                                        Handle        = 0x8004023A
+	TPC_S_TRUNCATED                                                           Handle        = 0x00040252
+	TPC_S_INTERRUPTED                                                         Handle        = 0x00040253
+	TPC_S_NO_DATA_TO_PROCESS                                                  Handle        = 0x00040254
+	XACT_E_FIRST                                                              syscall.Errno = 0x8004D000
+	XACT_E_LAST                                                               syscall.Errno = 0x8004D02B
+	XACT_S_FIRST                                                              syscall.Errno = 0x0004D000
+	XACT_S_LAST                                                               syscall.Errno = 0x0004D010
+	XACT_E_ALREADYOTHERSINGLEPHASE                                            Handle        = 0x8004D000
+	XACT_E_CANTRETAIN                                                         Handle        = 0x8004D001
+	XACT_E_COMMITFAILED                                                       Handle        = 0x8004D002
+	XACT_E_COMMITPREVENTED                                                    Handle        = 0x8004D003
+	XACT_E_HEURISTICABORT                                                     Handle        = 0x8004D004
+	XACT_E_HEURISTICCOMMIT                                                    Handle        = 0x8004D005
+	XACT_E_HEURISTICDAMAGE                                                    Handle        = 0x8004D006
+	XACT_E_HEURISTICDANGER                                                    Handle        = 0x8004D007
+	XACT_E_ISOLATIONLEVEL                                                     Handle        = 0x8004D008
+	XACT_E_NOASYNC                                                            Handle        = 0x8004D009
+	XACT_E_NOENLIST                                                           Handle        = 0x8004D00A
+	XACT_E_NOISORETAIN                                                        Handle        = 0x8004D00B
+	XACT_E_NORESOURCE                                                         Handle        = 0x8004D00C
+	XACT_E_NOTCURRENT                                                         Handle        = 0x8004D00D
+	XACT_E_NOTRANSACTION                                                      Handle        = 0x8004D00E
+	XACT_E_NOTSUPPORTED                                                       Handle        = 0x8004D00F
+	XACT_E_UNKNOWNRMGRID                                                      Handle        = 0x8004D010
+	XACT_E_WRONGSTATE                                                         Handle        = 0x8004D011
+	XACT_E_WRONGUOW                                                           Handle        = 0x8004D012
+	XACT_E_XTIONEXISTS                                                        Handle        = 0x8004D013
+	XACT_E_NOIMPORTOBJECT                                                     Handle        = 0x8004D014
+	XACT_E_INVALIDCOOKIE                                                      Handle        = 0x8004D015
+	XACT_E_INDOUBT                                                            Handle        = 0x8004D016
+	XACT_E_NOTIMEOUT                                                          Handle        = 0x8004D017
+	XACT_E_ALREADYINPROGRESS                                                  Handle        = 0x8004D018
+	XACT_E_ABORTED                                                            Handle        = 0x8004D019
+	XACT_E_LOGFULL                                                            Handle        = 0x8004D01A
+	XACT_E_TMNOTAVAILABLE                                                     Handle        = 0x8004D01B
+	XACT_E_CONNECTION_DOWN                                                    Handle        = 0x8004D01C
+	XACT_E_CONNECTION_DENIED                                                  Handle        = 0x8004D01D
+	XACT_E_REENLISTTIMEOUT                                                    Handle        = 0x8004D01E
+	XACT_E_TIP_CONNECT_FAILED                                                 Handle        = 0x8004D01F
+	XACT_E_TIP_PROTOCOL_ERROR                                                 Handle        = 0x8004D020
+	XACT_E_TIP_PULL_FAILED                                                    Handle        = 0x8004D021
+	XACT_E_DEST_TMNOTAVAILABLE                                                Handle        = 0x8004D022
+	XACT_E_TIP_DISABLED                                                       Handle        = 0x8004D023
+	XACT_E_NETWORK_TX_DISABLED                                                Handle        = 0x8004D024
+	XACT_E_PARTNER_NETWORK_TX_DISABLED                                        Handle        = 0x8004D025
+	XACT_E_XA_TX_DISABLED                                                     Handle        = 0x8004D026
+	XACT_E_UNABLE_TO_READ_DTC_CONFIG                                          Handle        = 0x8004D027
+	XACT_E_UNABLE_TO_LOAD_DTC_PROXY                                           Handle        = 0x8004D028
+	XACT_E_ABORTING                                                           Handle        = 0x8004D029
+	XACT_E_PUSH_COMM_FAILURE                                                  Handle        = 0x8004D02A
+	XACT_E_PULL_COMM_FAILURE                                                  Handle        = 0x8004D02B
+	XACT_E_LU_TX_DISABLED                                                     Handle        = 0x8004D02C
+	XACT_E_CLERKNOTFOUND                                                      Handle        = 0x8004D080
+	XACT_E_CLERKEXISTS                                                        Handle        = 0x8004D081
+	XACT_E_RECOVERYINPROGRESS                                                 Handle        = 0x8004D082
+	XACT_E_TRANSACTIONCLOSED                                                  Handle        = 0x8004D083
+	XACT_E_INVALIDLSN                                                         Handle        = 0x8004D084
+	XACT_E_REPLAYREQUEST                                                      Handle        = 0x8004D085
+	XACT_S_ASYNC                                                              Handle        = 0x0004D000
+	XACT_S_DEFECT                                                             Handle        = 0x0004D001
+	XACT_S_READONLY                                                           Handle        = 0x0004D002
+	XACT_S_SOMENORETAIN                                                       Handle        = 0x0004D003
+	XACT_S_OKINFORM                                                           Handle        = 0x0004D004
+	XACT_S_MADECHANGESCONTENT                                                 Handle        = 0x0004D005
+	XACT_S_MADECHANGESINFORM                                                  Handle        = 0x0004D006
+	XACT_S_ALLNORETAIN                                                        Handle        = 0x0004D007
+	XACT_S_ABORTING                                                           Handle        = 0x0004D008
+	XACT_S_SINGLEPHASE                                                        Handle        = 0x0004D009
+	XACT_S_LOCALLY_OK                                                         Handle        = 0x0004D00A
+	XACT_S_LASTRESOURCEMANAGER                                                Handle        = 0x0004D010
+	CONTEXT_E_FIRST                                                           syscall.Errno = 0x8004E000
+	CONTEXT_E_LAST                                                            syscall.Errno = 0x8004E02F
+	CONTEXT_S_FIRST                                                           syscall.Errno = 0x0004E000
+	CONTEXT_S_LAST                                                            syscall.Errno = 0x0004E02F
+	CONTEXT_E_ABORTED                                                         Handle        = 0x8004E002
+	CONTEXT_E_ABORTING                                                        Handle        = 0x8004E003
+	CONTEXT_E_NOCONTEXT                                                       Handle        = 0x8004E004
+	CONTEXT_E_WOULD_DEADLOCK                                                  Handle        = 0x8004E005
+	CONTEXT_E_SYNCH_TIMEOUT                                                   Handle        = 0x8004E006
+	CONTEXT_E_OLDREF                                                          Handle        = 0x8004E007
+	CONTEXT_E_ROLENOTFOUND                                                    Handle        = 0x8004E00C
+	CONTEXT_E_TMNOTAVAILABLE                                                  Handle        = 0x8004E00F
+	CO_E_ACTIVATIONFAILED                                                     Handle        = 0x8004E021
+	CO_E_ACTIVATIONFAILED_EVENTLOGGED                                         Handle        = 0x8004E022
+	CO_E_ACTIVATIONFAILED_CATALOGERROR                                        Handle        = 0x8004E023
+	CO_E_ACTIVATIONFAILED_TIMEOUT                                             Handle        = 0x8004E024
+	CO_E_INITIALIZATIONFAILED                                                 Handle        = 0x8004E025
+	CONTEXT_E_NOJIT                                                           Handle        = 0x8004E026
+	CONTEXT_E_NOTRANSACTION                                                   Handle        = 0x8004E027
+	CO_E_THREADINGMODEL_CHANGED                                               Handle        = 0x8004E028
+	CO_E_NOIISINTRINSICS                                                      Handle        = 0x8004E029
+	CO_E_NOCOOKIES                                                            Handle        = 0x8004E02A
+	CO_E_DBERROR                                                              Handle        = 0x8004E02B
+	CO_E_NOTPOOLED                                                            Handle        = 0x8004E02C
+	CO_E_NOTCONSTRUCTED                                                       Handle        = 0x8004E02D
+	CO_E_NOSYNCHRONIZATION                                                    Handle        = 0x8004E02E
+	CO_E_ISOLEVELMISMATCH                                                     Handle        = 0x8004E02F
+	CO_E_CALL_OUT_OF_TX_SCOPE_NOT_ALLOWED                                     Handle        = 0x8004E030
+	CO_E_EXIT_TRANSACTION_SCOPE_NOT_CALLED                                    Handle        = 0x8004E031
+	OLE_S_USEREG                                                              Handle        = 0x00040000
+	OLE_S_STATIC                                                              Handle        = 0x00040001
+	OLE_S_MAC_CLIPFORMAT                                                      Handle        = 0x00040002
+	DRAGDROP_S_DROP                                                           Handle        = 0x00040100
+	DRAGDROP_S_CANCEL                                                         Handle        = 0x00040101
+	DRAGDROP_S_USEDEFAULTCURSORS                                              Handle        = 0x00040102
+	DATA_S_SAMEFORMATETC                                                      Handle        = 0x00040130
+	VIEW_S_ALREADY_FROZEN                                                     Handle        = 0x00040140
+	CACHE_S_FORMATETC_NOTSUPPORTED                                            Handle        = 0x00040170
+	CACHE_S_SAMECACHE                                                         Handle        = 0x00040171
+	CACHE_S_SOMECACHES_NOTUPDATED                                             Handle        = 0x00040172
+	OLEOBJ_S_INVALIDVERB                                                      Handle        = 0x00040180
+	OLEOBJ_S_CANNOT_DOVERB_NOW                                                Handle        = 0x00040181
+	OLEOBJ_S_INVALIDHWND                                                      Handle        = 0x00040182
+	INPLACE_S_TRUNCATED                                                       Handle        = 0x000401A0
+	CONVERT10_S_NO_PRESENTATION                                               Handle        = 0x000401C0
+	MK_S_REDUCED_TO_SELF                                                      Handle        = 0x000401E2
+	MK_S_ME                                                                   Handle        = 0x000401E4
+	MK_S_HIM                                                                  Handle        = 0x000401E5
+	MK_S_US                                                                   Handle        = 0x000401E6
+	MK_S_MONIKERALREADYREGISTERED                                             Handle        = 0x000401E7
+	SCHED_S_TASK_READY                                                        Handle        = 0x00041300
+	SCHED_S_TASK_RUNNING                                                      Handle        = 0x00041301
+	SCHED_S_TASK_DISABLED                                                     Handle        = 0x00041302
+	SCHED_S_TASK_HAS_NOT_RUN                                                  Handle        = 0x00041303
+	SCHED_S_TASK_NO_MORE_RUNS                                                 Handle        = 0x00041304
+	SCHED_S_TASK_NOT_SCHEDULED                                                Handle        = 0x00041305
+	SCHED_S_TASK_TERMINATED                                                   Handle        = 0x00041306
+	SCHED_S_TASK_NO_VALID_TRIGGERS                                            Handle        = 0x00041307
+	SCHED_S_EVENT_TRIGGER                                                     Handle        = 0x00041308
+	SCHED_E_TRIGGER_NOT_FOUND                                                 Handle        = 0x80041309
+	SCHED_E_TASK_NOT_READY                                                    Handle        = 0x8004130A
+	SCHED_E_TASK_NOT_RUNNING                                                  Handle        = 0x8004130B
+	SCHED_E_SERVICE_NOT_INSTALLED                                             Handle        = 0x8004130C
+	SCHED_E_CANNOT_OPEN_TASK                                                  Handle        = 0x8004130D
+	SCHED_E_INVALID_TASK                                                      Handle        = 0x8004130E
+	SCHED_E_ACCOUNT_INFORMATION_NOT_SET                                       Handle        = 0x8004130F
+	SCHED_E_ACCOUNT_NAME_NOT_FOUND                                            Handle        = 0x80041310
+	SCHED_E_ACCOUNT_DBASE_CORRUPT                                             Handle        = 0x80041311
+	SCHED_E_NO_SECURITY_SERVICES                                              Handle        = 0x80041312
+	SCHED_E_UNKNOWN_OBJECT_VERSION                                            Handle        = 0x80041313
+	SCHED_E_UNSUPPORTED_ACCOUNT_OPTION                                        Handle        = 0x80041314
+	SCHED_E_SERVICE_NOT_RUNNING                                               Handle        = 0x80041315
+	SCHED_E_UNEXPECTEDNODE                                                    Handle        = 0x80041316
+	SCHED_E_NAMESPACE                                                         Handle        = 0x80041317
+	SCHED_E_INVALIDVALUE                                                      Handle        = 0x80041318
+	SCHED_E_MISSINGNODE                                                       Handle        = 0x80041319
+	SCHED_E_MALFORMEDXML                                                      Handle        = 0x8004131A
+	SCHED_S_SOME_TRIGGERS_FAILED                                              Handle        = 0x0004131B
+	SCHED_S_BATCH_LOGON_PROBLEM                                               Handle        = 0x0004131C
+	SCHED_E_TOO_MANY_NODES                                                    Handle        = 0x8004131D
+	SCHED_E_PAST_END_BOUNDARY                                                 Handle        = 0x8004131E
+	SCHED_E_ALREADY_RUNNING                                                   Handle        = 0x8004131F
+	SCHED_E_USER_NOT_LOGGED_ON                                                Handle        = 0x80041320
+	SCHED_E_INVALID_TASK_HASH                                                 Handle        = 0x80041321
+	SCHED_E_SERVICE_NOT_AVAILABLE                                             Handle        = 0x80041322
+	SCHED_E_SERVICE_TOO_BUSY                                                  Handle        = 0x80041323
+	SCHED_E_TASK_ATTEMPTED                                                    Handle        = 0x80041324
+	SCHED_S_TASK_QUEUED                                                       Handle        = 0x00041325
+	SCHED_E_TASK_DISABLED                                                     Handle        = 0x80041326
+	SCHED_E_TASK_NOT_V1_COMPAT                                                Handle        = 0x80041327
+	SCHED_E_START_ON_DEMAND                                                   Handle        = 0x80041328
+	SCHED_E_TASK_NOT_UBPM_COMPAT                                              Handle        = 0x80041329
+	SCHED_E_DEPRECATED_FEATURE_USED                                           Handle        = 0x80041330
+	CO_E_CLASS_CREATE_FAILED                                                  Handle        = 0x80080001
+	CO_E_SCM_ERROR                                                            Handle        = 0x80080002
+	CO_E_SCM_RPC_FAILURE                                                      Handle        = 0x80080003
+	CO_E_BAD_PATH                                                             Handle        = 0x80080004
+	CO_E_SERVER_EXEC_FAILURE                                                  Handle        = 0x80080005
+	CO_E_OBJSRV_RPC_FAILURE                                                   Handle        = 0x80080006
+	MK_E_NO_NORMALIZED                                                        Handle        = 0x80080007
+	CO_E_SERVER_STOPPING                                                      Handle        = 0x80080008
+	MEM_E_INVALID_ROOT                                                        Handle        = 0x80080009
+	MEM_E_INVALID_LINK                                                        Handle        = 0x80080010
+	MEM_E_INVALID_SIZE                                                        Handle        = 0x80080011
+	CO_S_NOTALLINTERFACES                                                     Handle        = 0x00080012
+	CO_S_MACHINENAMENOTFOUND                                                  Handle        = 0x00080013
+	CO_E_MISSING_DISPLAYNAME                                                  Handle        = 0x80080015
+	CO_E_RUNAS_VALUE_MUST_BE_AAA                                              Handle        = 0x80080016
+	CO_E_ELEVATION_DISABLED                                                   Handle        = 0x80080017
+	APPX_E_PACKAGING_INTERNAL                                                 Handle        = 0x80080200
+	APPX_E_INTERLEAVING_NOT_ALLOWED                                           Handle        = 0x80080201
+	APPX_E_RELATIONSHIPS_NOT_ALLOWED                                          Handle        = 0x80080202
+	APPX_E_MISSING_REQUIRED_FILE                                              Handle        = 0x80080203
+	APPX_E_INVALID_MANIFEST                                                   Handle        = 0x80080204
+	APPX_E_INVALID_BLOCKMAP                                                   Handle        = 0x80080205
+	APPX_E_CORRUPT_CONTENT                                                    Handle        = 0x80080206
+	APPX_E_BLOCK_HASH_INVALID                                                 Handle        = 0x80080207
+	APPX_E_REQUESTED_RANGE_TOO_LARGE                                          Handle        = 0x80080208
+	APPX_E_INVALID_SIP_CLIENT_DATA                                            Handle        = 0x80080209
+	APPX_E_INVALID_KEY_INFO                                                   Handle        = 0x8008020A
+	APPX_E_INVALID_CONTENTGROUPMAP                                            Handle        = 0x8008020B
+	APPX_E_INVALID_APPINSTALLER                                               Handle        = 0x8008020C
+	APPX_E_DELTA_BASELINE_VERSION_MISMATCH                                    Handle        = 0x8008020D
+	APPX_E_DELTA_PACKAGE_MISSING_FILE                                         Handle        = 0x8008020E
+	APPX_E_INVALID_DELTA_PACKAGE                                              Handle        = 0x8008020F
+	APPX_E_DELTA_APPENDED_PACKAGE_NOT_ALLOWED                                 Handle        = 0x80080210
+	APPX_E_INVALID_PACKAGING_LAYOUT                                           Handle        = 0x80080211
+	APPX_E_INVALID_PACKAGESIGNCONFIG                                          Handle        = 0x80080212
+	APPX_E_RESOURCESPRI_NOT_ALLOWED                                           Handle        = 0x80080213
+	APPX_E_FILE_COMPRESSION_MISMATCH                                          Handle        = 0x80080214
+	APPX_E_INVALID_PAYLOAD_PACKAGE_EXTENSION                                  Handle        = 0x80080215
+	APPX_E_INVALID_ENCRYPTION_EXCLUSION_FILE_LIST                             Handle        = 0x80080216
+	BT_E_SPURIOUS_ACTIVATION                                                  Handle        = 0x80080300
+	DISP_E_UNKNOWNINTERFACE                                                   Handle        = 0x80020001
+	DISP_E_MEMBERNOTFOUND                                                     Handle        = 0x80020003
+	DISP_E_PARAMNOTFOUND                                                      Handle        = 0x80020004
+	DISP_E_TYPEMISMATCH                                                       Handle        = 0x80020005
+	DISP_E_UNKNOWNNAME                                                        Handle        = 0x80020006
+	DISP_E_NONAMEDARGS                                                        Handle        = 0x80020007
+	DISP_E_BADVARTYPE                                                         Handle        = 0x80020008
+	DISP_E_EXCEPTION                                                          Handle        = 0x80020009
+	DISP_E_OVERFLOW                                                           Handle        = 0x8002000A
+	DISP_E_BADINDEX                                                           Handle        = 0x8002000B
+	DISP_E_UNKNOWNLCID                                                        Handle        = 0x8002000C
+	DISP_E_ARRAYISLOCKED                                                      Handle        = 0x8002000D
+	DISP_E_BADPARAMCOUNT                                                      Handle        = 0x8002000E
+	DISP_E_PARAMNOTOPTIONAL                                                   Handle        = 0x8002000F
+	DISP_E_BADCALLEE                                                          Handle        = 0x80020010
+	DISP_E_NOTACOLLECTION                                                     Handle        = 0x80020011
+	DISP_E_DIVBYZERO                                                          Handle        = 0x80020012
+	DISP_E_BUFFERTOOSMALL                                                     Handle        = 0x80020013
+	TYPE_E_BUFFERTOOSMALL                                                     Handle        = 0x80028016
+	TYPE_E_FIELDNOTFOUND                                                      Handle        = 0x80028017
+	TYPE_E_INVDATAREAD                                                        Handle        = 0x80028018
+	TYPE_E_UNSUPFORMAT                                                        Handle        = 0x80028019
+	TYPE_E_REGISTRYACCESS                                                     Handle        = 0x8002801C
+	TYPE_E_LIBNOTREGISTERED                                                   Handle        = 0x8002801D
+	TYPE_E_UNDEFINEDTYPE                                                      Handle        = 0x80028027
+	TYPE_E_QUALIFIEDNAMEDISALLOWED                                            Handle        = 0x80028028
+	TYPE_E_INVALIDSTATE                                                       Handle        = 0x80028029
+	TYPE_E_WRONGTYPEKIND                                                      Handle        = 0x8002802A
+	TYPE_E_ELEMENTNOTFOUND                                                    Handle        = 0x8002802B
+	TYPE_E_AMBIGUOUSNAME                                                      Handle        = 0x8002802C
+	TYPE_E_NAMECONFLICT                                                       Handle        = 0x8002802D
+	TYPE_E_UNKNOWNLCID                                                        Handle        = 0x8002802E
+	TYPE_E_DLLFUNCTIONNOTFOUND                                                Handle        = 0x8002802F
+	TYPE_E_BADMODULEKIND                                                      Handle        = 0x800288BD
+	TYPE_E_SIZETOOBIG                                                         Handle        = 0x800288C5
+	TYPE_E_DUPLICATEID                                                        Handle        = 0x800288C6
+	TYPE_E_INVALIDID                                                          Handle        = 0x800288CF
+	TYPE_E_TYPEMISMATCH                                                       Handle        = 0x80028CA0
+	TYPE_E_OUTOFBOUNDS                                                        Handle        = 0x80028CA1
+	TYPE_E_IOERROR                                                            Handle        = 0x80028CA2
+	TYPE_E_CANTCREATETMPFILE                                                  Handle        = 0x80028CA3
+	TYPE_E_CANTLOADLIBRARY                                                    Handle        = 0x80029C4A
+	TYPE_E_INCONSISTENTPROPFUNCS                                              Handle        = 0x80029C83
+	TYPE_E_CIRCULARTYPE                                                       Handle        = 0x80029C84
+	STG_E_INVALIDFUNCTION                                                     Handle        = 0x80030001
+	STG_E_FILENOTFOUND                                                        Handle        = 0x80030002
+	STG_E_PATHNOTFOUND                                                        Handle        = 0x80030003
+	STG_E_TOOMANYOPENFILES                                                    Handle        = 0x80030004
+	STG_E_ACCESSDENIED                                                        Handle        = 0x80030005
+	STG_E_INVALIDHANDLE                                                       Handle        = 0x80030006
+	STG_E_INSUFFICIENTMEMORY                                                  Handle        = 0x80030008
+	STG_E_INVALIDPOINTER                                                      Handle        = 0x80030009
+	STG_E_NOMOREFILES                                                         Handle        = 0x80030012
+	STG_E_DISKISWRITEPROTECTED                                                Handle        = 0x80030013
+	STG_E_SEEKERROR                                                           Handle        = 0x80030019
+	STG_E_WRITEFAULT                                                          Handle        = 0x8003001D
+	STG_E_READFAULT                                                           Handle        = 0x8003001E
+	STG_E_SHAREVIOLATION                                                      Handle        = 0x80030020
+	STG_E_LOCKVIOLATION                                                       Handle        = 0x80030021
+	STG_E_FILEALREADYEXISTS                                                   Handle        = 0x80030050
+	STG_E_INVALIDPARAMETER                                                    Handle        = 0x80030057
+	STG_E_MEDIUMFULL                                                          Handle        = 0x80030070
+	STG_E_PROPSETMISMATCHED                                                   Handle        = 0x800300F0
+	STG_E_ABNORMALAPIEXIT                                                     Handle        = 0x800300FA
+	STG_E_INVALIDHEADER                                                       Handle        = 0x800300FB
+	STG_E_INVALIDNAME                                                         Handle        = 0x800300FC
+	STG_E_UNKNOWN                                                             Handle        = 0x800300FD
+	STG_E_UNIMPLEMENTEDFUNCTION                                               Handle        = 0x800300FE
+	STG_E_INVALIDFLAG                                                         Handle        = 0x800300FF
+	STG_E_INUSE                                                               Handle        = 0x80030100
+	STG_E_NOTCURRENT                                                          Handle        = 0x80030101
+	STG_E_REVERTED                                                            Handle        = 0x80030102
+	STG_E_CANTSAVE                                                            Handle        = 0x80030103
+	STG_E_OLDFORMAT                                                           Handle        = 0x80030104
+	STG_E_OLDDLL                                                              Handle        = 0x80030105
+	STG_E_SHAREREQUIRED                                                       Handle        = 0x80030106
+	STG_E_NOTFILEBASEDSTORAGE                                                 Handle        = 0x80030107
+	STG_E_EXTANTMARSHALLINGS                                                  Handle        = 0x80030108
+	STG_E_DOCFILECORRUPT                                                      Handle        = 0x80030109
+	STG_E_BADBASEADDRESS                                                      Handle        = 0x80030110
+	STG_E_DOCFILETOOLARGE                                                     Handle        = 0x80030111
+	STG_E_NOTSIMPLEFORMAT                                                     Handle        = 0x80030112
+	STG_E_INCOMPLETE                                                          Handle        = 0x80030201
+	STG_E_TERMINATED                                                          Handle        = 0x80030202
+	STG_S_CONVERTED                                                           Handle        = 0x00030200
+	STG_S_BLOCK                                                               Handle        = 0x00030201
+	STG_S_RETRYNOW                                                            Handle        = 0x00030202
+	STG_S_MONITORING                                                          Handle        = 0x00030203
+	STG_S_MULTIPLEOPENS                                                       Handle        = 0x00030204
+	STG_S_CONSOLIDATIONFAILED                                                 Handle        = 0x00030205
+	STG_S_CANNOTCONSOLIDATE                                                   Handle        = 0x00030206
+	STG_S_POWER_CYCLE_REQUIRED                                                Handle        = 0x00030207
+	STG_E_FIRMWARE_SLOT_INVALID                                               Handle        = 0x80030208
+	STG_E_FIRMWARE_IMAGE_INVALID                                              Handle        = 0x80030209
+	STG_E_DEVICE_UNRESPONSIVE                                                 Handle        = 0x8003020A
+	STG_E_STATUS_COPY_PROTECTION_FAILURE                                      Handle        = 0x80030305
+	STG_E_CSS_AUTHENTICATION_FAILURE                                          Handle        = 0x80030306
+	STG_E_CSS_KEY_NOT_PRESENT                                                 Handle        = 0x80030307
+	STG_E_CSS_KEY_NOT_ESTABLISHED                                             Handle        = 0x80030308
+	STG_E_CSS_SCRAMBLED_SECTOR                                                Handle        = 0x80030309
+	STG_E_CSS_REGION_MISMATCH                                                 Handle        = 0x8003030A
+	STG_E_RESETS_EXHAUSTED                                                    Handle        = 0x8003030B
+	RPC_E_CALL_REJECTED                                                       Handle        = 0x80010001
+	RPC_E_CALL_CANCELED                                                       Handle        = 0x80010002
+	RPC_E_CANTPOST_INSENDCALL                                                 Handle        = 0x80010003
+	RPC_E_CANTCALLOUT_INASYNCCALL                                             Handle        = 0x80010004
+	RPC_E_CANTCALLOUT_INEXTERNALCALL                                          Handle        = 0x80010005
+	RPC_E_CONNECTION_TERMINATED                                               Handle        = 0x80010006
+	RPC_E_SERVER_DIED                                                         Handle        = 0x80010007
+	RPC_E_CLIENT_DIED                                                         Handle        = 0x80010008
+	RPC_E_INVALID_DATAPACKET                                                  Handle        = 0x80010009
+	RPC_E_CANTTRANSMIT_CALL                                                   Handle        = 0x8001000A
+	RPC_E_CLIENT_CANTMARSHAL_DATA                                             Handle        = 0x8001000B
+	RPC_E_CLIENT_CANTUNMARSHAL_DATA                                           Handle        = 0x8001000C
+	RPC_E_SERVER_CANTMARSHAL_DATA                                             Handle        = 0x8001000D
+	RPC_E_SERVER_CANTUNMARSHAL_DATA                                           Handle        = 0x8001000E
+	RPC_E_INVALID_DATA                                                        Handle        = 0x8001000F
+	RPC_E_INVALID_PARAMETER                                                   Handle        = 0x80010010
+	RPC_E_CANTCALLOUT_AGAIN                                                   Handle        = 0x80010011
+	RPC_E_SERVER_DIED_DNE                                                     Handle        = 0x80010012
+	RPC_E_SYS_CALL_FAILED                                                     Handle        = 0x80010100
+	RPC_E_OUT_OF_RESOURCES                                                    Handle        = 0x80010101
+	RPC_E_ATTEMPTED_MULTITHREAD                                               Handle        = 0x80010102
+	RPC_E_NOT_REGISTERED                                                      Handle        = 0x80010103
+	RPC_E_FAULT                                                               Handle        = 0x80010104
+	RPC_E_SERVERFAULT                                                         Handle        = 0x80010105
+	RPC_E_CHANGED_MODE                                                        Handle        = 0x80010106
+	RPC_E_INVALIDMETHOD                                                       Handle        = 0x80010107
+	RPC_E_DISCONNECTED                                                        Handle        = 0x80010108
+	RPC_E_RETRY                                                               Handle        = 0x80010109
+	RPC_E_SERVERCALL_RETRYLATER                                               Handle        = 0x8001010A
+	RPC_E_SERVERCALL_REJECTED                                                 Handle        = 0x8001010B
+	RPC_E_INVALID_CALLDATA                                                    Handle        = 0x8001010C
+	RPC_E_CANTCALLOUT_ININPUTSYNCCALL                                         Handle        = 0x8001010D
+	RPC_E_WRONG_THREAD                                                        Handle        = 0x8001010E
+	RPC_E_THREAD_NOT_INIT                                                     Handle        = 0x8001010F
+	RPC_E_VERSION_MISMATCH                                                    Handle        = 0x80010110
+	RPC_E_INVALID_HEADER                                                      Handle        = 0x80010111
+	RPC_E_INVALID_EXTENSION                                                   Handle        = 0x80010112
+	RPC_E_INVALID_IPID                                                        Handle        = 0x80010113
+	RPC_E_INVALID_OBJECT                                                      Handle        = 0x80010114
+	RPC_S_CALLPENDING                                                         Handle        = 0x80010115
+	RPC_S_WAITONTIMER                                                         Handle        = 0x80010116
+	RPC_E_CALL_COMPLETE                                                       Handle        = 0x80010117
+	RPC_E_UNSECURE_CALL                                                       Handle        = 0x80010118
+	RPC_E_TOO_LATE                                                            Handle        = 0x80010119
+	RPC_E_NO_GOOD_SECURITY_PACKAGES                                           Handle        = 0x8001011A
+	RPC_E_ACCESS_DENIED                                                       Handle        = 0x8001011B
+	RPC_E_REMOTE_DISABLED                                                     Handle        = 0x8001011C
+	RPC_E_INVALID_OBJREF                                                      Handle        = 0x8001011D
+	RPC_E_NO_CONTEXT                                                          Handle        = 0x8001011E
+	RPC_E_TIMEOUT                                                             Handle        = 0x8001011F
+	RPC_E_NO_SYNC                                                             Handle        = 0x80010120
+	RPC_E_FULLSIC_REQUIRED                                                    Handle        = 0x80010121
+	RPC_E_INVALID_STD_NAME                                                    Handle        = 0x80010122
+	CO_E_FAILEDTOIMPERSONATE                                                  Handle        = 0x80010123
+	CO_E_FAILEDTOGETSECCTX                                                    Handle        = 0x80010124
+	CO_E_FAILEDTOOPENTHREADTOKEN                                              Handle        = 0x80010125
+	CO_E_FAILEDTOGETTOKENINFO                                                 Handle        = 0x80010126
+	CO_E_TRUSTEEDOESNTMATCHCLIENT                                             Handle        = 0x80010127
+	CO_E_FAILEDTOQUERYCLIENTBLANKET                                           Handle        = 0x80010128
+	CO_E_FAILEDTOSETDACL                                                      Handle        = 0x80010129
+	CO_E_ACCESSCHECKFAILED                                                    Handle        = 0x8001012A
+	CO_E_NETACCESSAPIFAILED                                                   Handle        = 0x8001012B
+	CO_E_WRONGTRUSTEENAMESYNTAX                                               Handle        = 0x8001012C
+	CO_E_INVALIDSID                                                           Handle        = 0x8001012D
+	CO_E_CONVERSIONFAILED                                                     Handle        = 0x8001012E
+	CO_E_NOMATCHINGSIDFOUND                                                   Handle        = 0x8001012F
+	CO_E_LOOKUPACCSIDFAILED                                                   Handle        = 0x80010130
+	CO_E_NOMATCHINGNAMEFOUND                                                  Handle        = 0x80010131
+	CO_E_LOOKUPACCNAMEFAILED                                                  Handle        = 0x80010132
+	CO_E_SETSERLHNDLFAILED                                                    Handle        = 0x80010133
+	CO_E_FAILEDTOGETWINDIR                                                    Handle        = 0x80010134
+	CO_E_PATHTOOLONG                                                          Handle        = 0x80010135
+	CO_E_FAILEDTOGENUUID                                                      Handle        = 0x80010136
+	CO_E_FAILEDTOCREATEFILE                                                   Handle        = 0x80010137
+	CO_E_FAILEDTOCLOSEHANDLE                                                  Handle        = 0x80010138
+	CO_E_EXCEEDSYSACLLIMIT                                                    Handle        = 0x80010139
+	CO_E_ACESINWRONGORDER                                                     Handle        = 0x8001013A
+	CO_E_INCOMPATIBLESTREAMVERSION                                            Handle        = 0x8001013B
+	CO_E_FAILEDTOOPENPROCESSTOKEN                                             Handle        = 0x8001013C
+	CO_E_DECODEFAILED                                                         Handle        = 0x8001013D
+	CO_E_ACNOTINITIALIZED                                                     Handle        = 0x8001013F
+	CO_E_CANCEL_DISABLED                                                      Handle        = 0x80010140
+	RPC_E_UNEXPECTED                                                          Handle        = 0x8001FFFF
+	ERROR_AUDITING_DISABLED                                                   Handle        = 0xC0090001
+	ERROR_ALL_SIDS_FILTERED                                                   Handle        = 0xC0090002
+	ERROR_BIZRULES_NOT_ENABLED                                                Handle        = 0xC0090003
+	NTE_BAD_UID                                                               Handle        = 0x80090001
+	NTE_BAD_HASH                                                              Handle        = 0x80090002
+	NTE_BAD_KEY                                                               Handle        = 0x80090003
+	NTE_BAD_LEN                                                               Handle        = 0x80090004
+	NTE_BAD_DATA                                                              Handle        = 0x80090005
+	NTE_BAD_SIGNATURE                                                         Handle        = 0x80090006
+	NTE_BAD_VER                                                               Handle        = 0x80090007
+	NTE_BAD_ALGID                                                             Handle        = 0x80090008
+	NTE_BAD_FLAGS                                                             Handle        = 0x80090009
+	NTE_BAD_TYPE                                                              Handle        = 0x8009000A
+	NTE_BAD_KEY_STATE                                                         Handle        = 0x8009000B
+	NTE_BAD_HASH_STATE                                                        Handle        = 0x8009000C
+	NTE_NO_KEY                                                                Handle        = 0x8009000D
+	NTE_NO_MEMORY                                                             Handle        = 0x8009000E
+	NTE_EXISTS                                                                Handle        = 0x8009000F
+	NTE_PERM                                                                  Handle        = 0x80090010
+	NTE_NOT_FOUND                                                             Handle        = 0x80090011
+	NTE_DOUBLE_ENCRYPT                                                        Handle        = 0x80090012
+	NTE_BAD_PROVIDER                                                          Handle        = 0x80090013
+	NTE_BAD_PROV_TYPE                                                         Handle        = 0x80090014
+	NTE_BAD_PUBLIC_KEY                                                        Handle        = 0x80090015
+	NTE_BAD_KEYSET                                                            Handle        = 0x80090016
+	NTE_PROV_TYPE_NOT_DEF                                                     Handle        = 0x80090017
+	NTE_PROV_TYPE_ENTRY_BAD                                                   Handle        = 0x80090018
+	NTE_KEYSET_NOT_DEF                                                        Handle        = 0x80090019
+	NTE_KEYSET_ENTRY_BAD                                                      Handle        = 0x8009001A
+	NTE_PROV_TYPE_NO_MATCH                                                    Handle        = 0x8009001B
+	NTE_SIGNATURE_FILE_BAD                                                    Handle        = 0x8009001C
+	NTE_PROVIDER_DLL_FAIL                                                     Handle        = 0x8009001D
+	NTE_PROV_DLL_NOT_FOUND                                                    Handle        = 0x8009001E
+	NTE_BAD_KEYSET_PARAM                                                      Handle        = 0x8009001F
+	NTE_FAIL                                                                  Handle        = 0x80090020
+	NTE_SYS_ERR                                                               Handle        = 0x80090021
+	NTE_SILENT_CONTEXT                                                        Handle        = 0x80090022
+	NTE_TOKEN_KEYSET_STORAGE_FULL                                             Handle        = 0x80090023
+	NTE_TEMPORARY_PROFILE                                                     Handle        = 0x80090024
+	NTE_FIXEDPARAMETER                                                        Handle        = 0x80090025
+	NTE_INVALID_HANDLE                                                        Handle        = 0x80090026
+	NTE_INVALID_PARAMETER                                                     Handle        = 0x80090027
+	NTE_BUFFER_TOO_SMALL                                                      Handle        = 0x80090028
+	NTE_NOT_SUPPORTED                                                         Handle        = 0x80090029
+	NTE_NO_MORE_ITEMS                                                         Handle        = 0x8009002A
+	NTE_BUFFERS_OVERLAP                                                       Handle        = 0x8009002B
+	NTE_DECRYPTION_FAILURE                                                    Handle        = 0x8009002C
+	NTE_INTERNAL_ERROR                                                        Handle        = 0x8009002D
+	NTE_UI_REQUIRED                                                           Handle        = 0x8009002E
+	NTE_HMAC_NOT_SUPPORTED                                                    Handle        = 0x8009002F
+	NTE_DEVICE_NOT_READY                                                      Handle        = 0x80090030
+	NTE_AUTHENTICATION_IGNORED                                                Handle        = 0x80090031
+	NTE_VALIDATION_FAILED                                                     Handle        = 0x80090032
+	NTE_INCORRECT_PASSWORD                                                    Handle        = 0x80090033
+	NTE_ENCRYPTION_FAILURE                                                    Handle        = 0x80090034
+	NTE_DEVICE_NOT_FOUND                                                      Handle        = 0x80090035
+	NTE_USER_CANCELLED                                                        Handle        = 0x80090036
+	NTE_PASSWORD_CHANGE_REQUIRED                                              Handle        = 0x80090037
+	NTE_NOT_ACTIVE_CONSOLE                                                    Handle        = 0x80090038
+	SEC_E_INSUFFICIENT_MEMORY                                                 Handle        = 0x80090300
+	SEC_E_INVALID_HANDLE                                                      Handle        = 0x80090301
+	SEC_E_UNSUPPORTED_FUNCTION                                                Handle        = 0x80090302
+	SEC_E_TARGET_UNKNOWN                                                      Handle        = 0x80090303
+	SEC_E_INTERNAL_ERROR                                                      Handle        = 0x80090304
+	SEC_E_SECPKG_NOT_FOUND                                                    Handle        = 0x80090305
+	SEC_E_NOT_OWNER                                                           Handle        = 0x80090306
+	SEC_E_CANNOT_INSTALL                                                      Handle        = 0x80090307
+	SEC_E_INVALID_TOKEN                                                       Handle        = 0x80090308
+	SEC_E_CANNOT_PACK                                                         Handle        = 0x80090309
+	SEC_E_QOP_NOT_SUPPORTED                                                   Handle        = 0x8009030A
+	SEC_E_NO_IMPERSONATION                                                    Handle        = 0x8009030B
+	SEC_E_LOGON_DENIED                                                        Handle        = 0x8009030C
+	SEC_E_UNKNOWN_CREDENTIALS                                                 Handle        = 0x8009030D
+	SEC_E_NO_CREDENTIALS                                                      Handle        = 0x8009030E
+	SEC_E_MESSAGE_ALTERED                                                     Handle        = 0x8009030F
+	SEC_E_OUT_OF_SEQUENCE                                                     Handle        = 0x80090310
+	SEC_E_NO_AUTHENTICATING_AUTHORITY                                         Handle        = 0x80090311
+	SEC_I_CONTINUE_NEEDED                                                     Handle        = 0x00090312
+	SEC_I_COMPLETE_NEEDED                                                     Handle        = 0x00090313
+	SEC_I_COMPLETE_AND_CONTINUE                                               Handle        = 0x00090314
+	SEC_I_LOCAL_LOGON                                                         Handle        = 0x00090315
+	SEC_I_GENERIC_EXTENSION_RECEIVED                                          Handle        = 0x00090316
+	SEC_E_BAD_PKGID                                                           Handle        = 0x80090316
+	SEC_E_CONTEXT_EXPIRED                                                     Handle        = 0x80090317
+	SEC_I_CONTEXT_EXPIRED                                                     Handle        = 0x00090317
+	SEC_E_INCOMPLETE_MESSAGE                                                  Handle        = 0x80090318
+	SEC_E_INCOMPLETE_CREDENTIALS                                              Handle        = 0x80090320
+	SEC_E_BUFFER_TOO_SMALL                                                    Handle        = 0x80090321
+	SEC_I_INCOMPLETE_CREDENTIALS                                              Handle        = 0x00090320
+	SEC_I_RENEGOTIATE                                                         Handle        = 0x00090321
+	SEC_E_WRONG_PRINCIPAL                                                     Handle        = 0x80090322
+	SEC_I_NO_LSA_CONTEXT                                                      Handle        = 0x00090323
+	SEC_E_TIME_SKEW                                                           Handle        = 0x80090324
+	SEC_E_UNTRUSTED_ROOT                                                      Handle        = 0x80090325
+	SEC_E_ILLEGAL_MESSAGE                                                     Handle        = 0x80090326
+	SEC_E_CERT_UNKNOWN                                                        Handle        = 0x80090327
+	SEC_E_CERT_EXPIRED                                                        Handle        = 0x80090328
+	SEC_E_ENCRYPT_FAILURE                                                     Handle        = 0x80090329
+	SEC_E_DECRYPT_FAILURE                                                     Handle        = 0x80090330
+	SEC_E_ALGORITHM_MISMATCH                                                  Handle        = 0x80090331
+	SEC_E_SECURITY_QOS_FAILED                                                 Handle        = 0x80090332
+	SEC_E_UNFINISHED_CONTEXT_DELETED                                          Handle        = 0x80090333
+	SEC_E_NO_TGT_REPLY                                                        Handle        = 0x80090334
+	SEC_E_NO_IP_ADDRESSES                                                     Handle        = 0x80090335
+	SEC_E_WRONG_CREDENTIAL_HANDLE                                             Handle        = 0x80090336
+	SEC_E_CRYPTO_SYSTEM_INVALID                                               Handle        = 0x80090337
+	SEC_E_MAX_REFERRALS_EXCEEDED                                              Handle        = 0x80090338
+	SEC_E_MUST_BE_KDC                                                         Handle        = 0x80090339
+	SEC_E_STRONG_CRYPTO_NOT_SUPPORTED                                         Handle        = 0x8009033A
+	SEC_E_TOO_MANY_PRINCIPALS                                                 Handle        = 0x8009033B
+	SEC_E_NO_PA_DATA                                                          Handle        = 0x8009033C
+	SEC_E_PKINIT_NAME_MISMATCH                                                Handle        = 0x8009033D
+	SEC_E_SMARTCARD_LOGON_REQUIRED                                            Handle        = 0x8009033E
+	SEC_E_SHUTDOWN_IN_PROGRESS                                                Handle        = 0x8009033F
+	SEC_E_KDC_INVALID_REQUEST                                                 Handle        = 0x80090340
+	SEC_E_KDC_UNABLE_TO_REFER                                                 Handle        = 0x80090341
+	SEC_E_KDC_UNKNOWN_ETYPE                                                   Handle        = 0x80090342
+	SEC_E_UNSUPPORTED_PREAUTH                                                 Handle        = 0x80090343
+	SEC_E_DELEGATION_REQUIRED                                                 Handle        = 0x80090345
+	SEC_E_BAD_BINDINGS                                                        Handle        = 0x80090346
+	SEC_E_MULTIPLE_ACCOUNTS                                                   Handle        = 0x80090347
+	SEC_E_NO_KERB_KEY                                                         Handle        = 0x80090348
+	SEC_E_CERT_WRONG_USAGE                                                    Handle        = 0x80090349
+	SEC_E_DOWNGRADE_DETECTED                                                  Handle        = 0x80090350
+	SEC_E_SMARTCARD_CERT_REVOKED                                              Handle        = 0x80090351
+	SEC_E_ISSUING_CA_UNTRUSTED                                                Handle        = 0x80090352
+	SEC_E_REVOCATION_OFFLINE_C                                                Handle        = 0x80090353
+	SEC_E_PKINIT_CLIENT_FAILURE                                               Handle        = 0x80090354
+	SEC_E_SMARTCARD_CERT_EXPIRED                                              Handle        = 0x80090355
+	SEC_E_NO_S4U_PROT_SUPPORT                                                 Handle        = 0x80090356
+	SEC_E_CROSSREALM_DELEGATION_FAILURE                                       Handle        = 0x80090357
+	SEC_E_REVOCATION_OFFLINE_KDC                                              Handle        = 0x80090358
+	SEC_E_ISSUING_CA_UNTRUSTED_KDC                                            Handle        = 0x80090359
+	SEC_E_KDC_CERT_EXPIRED                                                    Handle        = 0x8009035A
+	SEC_E_KDC_CERT_REVOKED                                                    Handle        = 0x8009035B
+	SEC_I_SIGNATURE_NEEDED                                                    Handle        = 0x0009035C
+	SEC_E_INVALID_PARAMETER                                                   Handle        = 0x8009035D
+	SEC_E_DELEGATION_POLICY                                                   Handle        = 0x8009035E
+	SEC_E_POLICY_NLTM_ONLY                                                    Handle        = 0x8009035F
+	SEC_I_NO_RENEGOTIATION                                                    Handle        = 0x00090360
+	SEC_E_NO_CONTEXT                                                          Handle        = 0x80090361
+	SEC_E_PKU2U_CERT_FAILURE                                                  Handle        = 0x80090362
+	SEC_E_MUTUAL_AUTH_FAILED                                                  Handle        = 0x80090363
+	SEC_I_MESSAGE_FRAGMENT                                                    Handle        = 0x00090364
+	SEC_E_ONLY_HTTPS_ALLOWED                                                  Handle        = 0x80090365
+	SEC_I_CONTINUE_NEEDED_MESSAGE_OK                                          Handle        = 0x00090366
+	SEC_E_APPLICATION_PROTOCOL_MISMATCH                                       Handle        = 0x80090367
+	SEC_I_ASYNC_CALL_PENDING                                                  Handle        = 0x00090368
+	SEC_E_INVALID_UPN_NAME                                                    Handle        = 0x80090369
+	SEC_E_EXT_BUFFER_TOO_SMALL                                                Handle        = 0x8009036A
+	SEC_E_INSUFFICIENT_BUFFERS                                                Handle        = 0x8009036B
+	SEC_E_NO_SPM                                                                            = SEC_E_INTERNAL_ERROR
+	SEC_E_NOT_SUPPORTED                                                                     = SEC_E_UNSUPPORTED_FUNCTION
+	CRYPT_E_MSG_ERROR                                                         Handle        = 0x80091001
+	CRYPT_E_UNKNOWN_ALGO                                                      Handle        = 0x80091002
+	CRYPT_E_OID_FORMAT                                                        Handle        = 0x80091003
+	CRYPT_E_INVALID_MSG_TYPE                                                  Handle        = 0x80091004
+	CRYPT_E_UNEXPECTED_ENCODING                                               Handle        = 0x80091005
+	CRYPT_E_AUTH_ATTR_MISSING                                                 Handle        = 0x80091006
+	CRYPT_E_HASH_VALUE                                                        Handle        = 0x80091007
+	CRYPT_E_INVALID_INDEX                                                     Handle        = 0x80091008
+	CRYPT_E_ALREADY_DECRYPTED                                                 Handle        = 0x80091009
+	CRYPT_E_NOT_DECRYPTED                                                     Handle        = 0x8009100A
+	CRYPT_E_RECIPIENT_NOT_FOUND                                               Handle        = 0x8009100B
+	CRYPT_E_CONTROL_TYPE                                                      Handle        = 0x8009100C
+	CRYPT_E_ISSUER_SERIALNUMBER                                               Handle        = 0x8009100D
+	CRYPT_E_SIGNER_NOT_FOUND                                                  Handle        = 0x8009100E
+	CRYPT_E_ATTRIBUTES_MISSING                                                Handle        = 0x8009100F
+	CRYPT_E_STREAM_MSG_NOT_READY                                              Handle        = 0x80091010
+	CRYPT_E_STREAM_INSUFFICIENT_DATA                                          Handle        = 0x80091011
+	CRYPT_I_NEW_PROTECTION_REQUIRED                                           Handle        = 0x00091012
+	CRYPT_E_BAD_LEN                                                           Handle        = 0x80092001
+	CRYPT_E_BAD_ENCODE                                                        Handle        = 0x80092002
+	CRYPT_E_FILE_ERROR                                                        Handle        = 0x80092003
+	CRYPT_E_NOT_FOUND                                                         Handle        = 0x80092004
+	CRYPT_E_EXISTS                                                            Handle        = 0x80092005
+	CRYPT_E_NO_PROVIDER                                                       Handle        = 0x80092006
+	CRYPT_E_SELF_SIGNED                                                       Handle        = 0x80092007
+	CRYPT_E_DELETED_PREV                                                      Handle        = 0x80092008
+	CRYPT_E_NO_MATCH                                                          Handle        = 0x80092009
+	CRYPT_E_UNEXPECTED_MSG_TYPE                                               Handle        = 0x8009200A
+	CRYPT_E_NO_KEY_PROPERTY                                                   Handle        = 0x8009200B
+	CRYPT_E_NO_DECRYPT_CERT                                                   Handle        = 0x8009200C
+	CRYPT_E_BAD_MSG                                                           Handle        = 0x8009200D
+	CRYPT_E_NO_SIGNER                                                         Handle        = 0x8009200E
+	CRYPT_E_PENDING_CLOSE                                                     Handle        = 0x8009200F
+	CRYPT_E_REVOKED                                                           Handle        = 0x80092010
+	CRYPT_E_NO_REVOCATION_DLL                                                 Handle        = 0x80092011
+	CRYPT_E_NO_REVOCATION_CHECK                                               Handle        = 0x80092012
+	CRYPT_E_REVOCATION_OFFLINE                                                Handle        = 0x80092013
+	CRYPT_E_NOT_IN_REVOCATION_DATABASE                                        Handle        = 0x80092014
+	CRYPT_E_INVALID_NUMERIC_STRING                                            Handle        = 0x80092020
+	CRYPT_E_INVALID_PRINTABLE_STRING                                          Handle        = 0x80092021
+	CRYPT_E_INVALID_IA5_STRING                                                Handle        = 0x80092022
+	CRYPT_E_INVALID_X500_STRING                                               Handle        = 0x80092023
+	CRYPT_E_NOT_CHAR_STRING                                                   Handle        = 0x80092024
+	CRYPT_E_FILERESIZED                                                       Handle        = 0x80092025
+	CRYPT_E_SECURITY_SETTINGS                                                 Handle        = 0x80092026
+	CRYPT_E_NO_VERIFY_USAGE_DLL                                               Handle        = 0x80092027
+	CRYPT_E_NO_VERIFY_USAGE_CHECK                                             Handle        = 0x80092028
+	CRYPT_E_VERIFY_USAGE_OFFLINE                                              Handle        = 0x80092029
+	CRYPT_E_NOT_IN_CTL                                                        Handle        = 0x8009202A
+	CRYPT_E_NO_TRUSTED_SIGNER                                                 Handle        = 0x8009202B
+	CRYPT_E_MISSING_PUBKEY_PARA                                               Handle        = 0x8009202C
+	CRYPT_E_OBJECT_LOCATOR_OBJECT_NOT_FOUND                                   Handle        = 0x8009202D
+	CRYPT_E_OSS_ERROR                                                         Handle        = 0x80093000
+	OSS_MORE_BUF                                                              Handle        = 0x80093001
+	OSS_NEGATIVE_UINTEGER                                                     Handle        = 0x80093002
+	OSS_PDU_RANGE                                                             Handle        = 0x80093003
+	OSS_MORE_INPUT                                                            Handle        = 0x80093004
+	OSS_DATA_ERROR                                                            Handle        = 0x80093005
+	OSS_BAD_ARG                                                               Handle        = 0x80093006
+	OSS_BAD_VERSION                                                           Handle        = 0x80093007
+	OSS_OUT_MEMORY                                                            Handle        = 0x80093008
+	OSS_PDU_MISMATCH                                                          Handle        = 0x80093009
+	OSS_LIMITED                                                               Handle        = 0x8009300A
+	OSS_BAD_PTR                                                               Handle        = 0x8009300B
+	OSS_BAD_TIME                                                              Handle        = 0x8009300C
+	OSS_INDEFINITE_NOT_SUPPORTED                                              Handle        = 0x8009300D
+	OSS_MEM_ERROR                                                             Handle        = 0x8009300E
+	OSS_BAD_TABLE                                                             Handle        = 0x8009300F
+	OSS_TOO_LONG                                                              Handle        = 0x80093010
+	OSS_CONSTRAINT_VIOLATED                                                   Handle        = 0x80093011
+	OSS_FATAL_ERROR                                                           Handle        = 0x80093012
+	OSS_ACCESS_SERIALIZATION_ERROR                                            Handle        = 0x80093013
+	OSS_NULL_TBL                                                              Handle        = 0x80093014
+	OSS_NULL_FCN                                                              Handle        = 0x80093015
+	OSS_BAD_ENCRULES                                                          Handle        = 0x80093016
+	OSS_UNAVAIL_ENCRULES                                                      Handle        = 0x80093017
+	OSS_CANT_OPEN_TRACE_WINDOW                                                Handle        = 0x80093018
+	OSS_UNIMPLEMENTED                                                         Handle        = 0x80093019
+	OSS_OID_DLL_NOT_LINKED                                                    Handle        = 0x8009301A
+	OSS_CANT_OPEN_TRACE_FILE                                                  Handle        = 0x8009301B
+	OSS_TRACE_FILE_ALREADY_OPEN                                               Handle        = 0x8009301C
+	OSS_TABLE_MISMATCH                                                        Handle        = 0x8009301D
+	OSS_TYPE_NOT_SUPPORTED                                                    Handle        = 0x8009301E
+	OSS_REAL_DLL_NOT_LINKED                                                   Handle        = 0x8009301F
+	OSS_REAL_CODE_NOT_LINKED                                                  Handle        = 0x80093020
+	OSS_OUT_OF_RANGE                                                          Handle        = 0x80093021
+	OSS_COPIER_DLL_NOT_LINKED                                                 Handle        = 0x80093022
+	OSS_CONSTRAINT_DLL_NOT_LINKED                                             Handle        = 0x80093023
+	OSS_COMPARATOR_DLL_NOT_LINKED                                             Handle        = 0x80093024
+	OSS_COMPARATOR_CODE_NOT_LINKED                                            Handle        = 0x80093025
+	OSS_MEM_MGR_DLL_NOT_LINKED                                                Handle        = 0x80093026
+	OSS_PDV_DLL_NOT_LINKED                                                    Handle        = 0x80093027
+	OSS_PDV_CODE_NOT_LINKED                                                   Handle        = 0x80093028
+	OSS_API_DLL_NOT_LINKED                                                    Handle        = 0x80093029
+	OSS_BERDER_DLL_NOT_LINKED                                                 Handle        = 0x8009302A
+	OSS_PER_DLL_NOT_LINKED                                                    Handle        = 0x8009302B
+	OSS_OPEN_TYPE_ERROR                                                       Handle        = 0x8009302C
+	OSS_MUTEX_NOT_CREATED                                                     Handle        = 0x8009302D
+	OSS_CANT_CLOSE_TRACE_FILE                                                 Handle        = 0x8009302E
+	CRYPT_E_ASN1_ERROR                                                        Handle        = 0x80093100
+	CRYPT_E_ASN1_INTERNAL                                                     Handle        = 0x80093101
+	CRYPT_E_ASN1_EOD                                                          Handle        = 0x80093102
+	CRYPT_E_ASN1_CORRUPT                                                      Handle        = 0x80093103
+	CRYPT_E_ASN1_LARGE                                                        Handle        = 0x80093104
+	CRYPT_E_ASN1_CONSTRAINT                                                   Handle        = 0x80093105
+	CRYPT_E_ASN1_MEMORY                                                       Handle        = 0x80093106
+	CRYPT_E_ASN1_OVERFLOW                                                     Handle        = 0x80093107
+	CRYPT_E_ASN1_BADPDU                                                       Handle        = 0x80093108
+	CRYPT_E_ASN1_BADARGS                                                      Handle        = 0x80093109
+	CRYPT_E_ASN1_BADREAL                                                      Handle        = 0x8009310A
+	CRYPT_E_ASN1_BADTAG                                                       Handle        = 0x8009310B
+	CRYPT_E_ASN1_CHOICE                                                       Handle        = 0x8009310C
+	CRYPT_E_ASN1_RULE                                                         Handle        = 0x8009310D
+	CRYPT_E_ASN1_UTF8                                                         Handle        = 0x8009310E
+	CRYPT_E_ASN1_PDU_TYPE                                                     Handle        = 0x80093133
+	CRYPT_E_ASN1_NYI                                                          Handle        = 0x80093134
+	CRYPT_E_ASN1_EXTENDED                                                     Handle        = 0x80093201
+	CRYPT_E_ASN1_NOEOD                                                        Handle        = 0x80093202
+	CERTSRV_E_BAD_REQUESTSUBJECT                                              Handle        = 0x80094001
+	CERTSRV_E_NO_REQUEST                                                      Handle        = 0x80094002
+	CERTSRV_E_BAD_REQUESTSTATUS                                               Handle        = 0x80094003
+	CERTSRV_E_PROPERTY_EMPTY                                                  Handle        = 0x80094004
+	CERTSRV_E_INVALID_CA_CERTIFICATE                                          Handle        = 0x80094005
+	CERTSRV_E_SERVER_SUSPENDED                                                Handle        = 0x80094006
+	CERTSRV_E_ENCODING_LENGTH                                                 Handle        = 0x80094007
+	CERTSRV_E_ROLECONFLICT                                                    Handle        = 0x80094008
+	CERTSRV_E_RESTRICTEDOFFICER                                               Handle        = 0x80094009
+	CERTSRV_E_KEY_ARCHIVAL_NOT_CONFIGURED                                     Handle        = 0x8009400A
+	CERTSRV_E_NO_VALID_KRA                                                    Handle        = 0x8009400B
+	CERTSRV_E_BAD_REQUEST_KEY_ARCHIVAL                                        Handle        = 0x8009400C
+	CERTSRV_E_NO_CAADMIN_DEFINED                                              Handle        = 0x8009400D
+	CERTSRV_E_BAD_RENEWAL_CERT_ATTRIBUTE                                      Handle        = 0x8009400E
+	CERTSRV_E_NO_DB_SESSIONS                                                  Handle        = 0x8009400F
+	CERTSRV_E_ALIGNMENT_FAULT                                                 Handle        = 0x80094010
+	CERTSRV_E_ENROLL_DENIED                                                   Handle        = 0x80094011
+	CERTSRV_E_TEMPLATE_DENIED                                                 Handle        = 0x80094012
+	CERTSRV_E_DOWNLEVEL_DC_SSL_OR_UPGRADE                                     Handle        = 0x80094013
+	CERTSRV_E_ADMIN_DENIED_REQUEST                                            Handle        = 0x80094014
+	CERTSRV_E_NO_POLICY_SERVER                                                Handle        = 0x80094015
+	CERTSRV_E_WEAK_SIGNATURE_OR_KEY                                           Handle        = 0x80094016
+	CERTSRV_E_KEY_ATTESTATION_NOT_SUPPORTED                                   Handle        = 0x80094017
+	CERTSRV_E_ENCRYPTION_CERT_REQUIRED                                        Handle        = 0x80094018
+	CERTSRV_E_UNSUPPORTED_CERT_TYPE                                           Handle        = 0x80094800
+	CERTSRV_E_NO_CERT_TYPE                                                    Handle        = 0x80094801
+	CERTSRV_E_TEMPLATE_CONFLICT                                               Handle        = 0x80094802
+	CERTSRV_E_SUBJECT_ALT_NAME_REQUIRED                                       Handle        = 0x80094803
+	CERTSRV_E_ARCHIVED_KEY_REQUIRED                                           Handle        = 0x80094804
+	CERTSRV_E_SMIME_REQUIRED                                                  Handle        = 0x80094805
+	CERTSRV_E_BAD_RENEWAL_SUBJECT                                             Handle        = 0x80094806
+	CERTSRV_E_BAD_TEMPLATE_VERSION                                            Handle        = 0x80094807
+	CERTSRV_E_TEMPLATE_POLICY_REQUIRED                                        Handle        = 0x80094808
+	CERTSRV_E_SIGNATURE_POLICY_REQUIRED                                       Handle        = 0x80094809
+	CERTSRV_E_SIGNATURE_COUNT                                                 Handle        = 0x8009480A
+	CERTSRV_E_SIGNATURE_REJECTED                                              Handle        = 0x8009480B
+	CERTSRV_E_ISSUANCE_POLICY_REQUIRED                                        Handle        = 0x8009480C
+	CERTSRV_E_SUBJECT_UPN_REQUIRED                                            Handle        = 0x8009480D
+	CERTSRV_E_SUBJECT_DIRECTORY_GUID_REQUIRED                                 Handle        = 0x8009480E
+	CERTSRV_E_SUBJECT_DNS_REQUIRED                                            Handle        = 0x8009480F
+	CERTSRV_E_ARCHIVED_KEY_UNEXPECTED                                         Handle        = 0x80094810
+	CERTSRV_E_KEY_LENGTH                                                      Handle        = 0x80094811
+	CERTSRV_E_SUBJECT_EMAIL_REQUIRED                                          Handle        = 0x80094812
+	CERTSRV_E_UNKNOWN_CERT_TYPE                                               Handle        = 0x80094813
+	CERTSRV_E_CERT_TYPE_OVERLAP                                               Handle        = 0x80094814
+	CERTSRV_E_TOO_MANY_SIGNATURES                                             Handle        = 0x80094815
+	CERTSRV_E_RENEWAL_BAD_PUBLIC_KEY                                          Handle        = 0x80094816
+	CERTSRV_E_INVALID_EK                                                      Handle        = 0x80094817
+	CERTSRV_E_INVALID_IDBINDING                                               Handle        = 0x80094818
+	CERTSRV_E_INVALID_ATTESTATION                                             Handle        = 0x80094819
+	CERTSRV_E_KEY_ATTESTATION                                                 Handle        = 0x8009481A
+	CERTSRV_E_CORRUPT_KEY_ATTESTATION                                         Handle        = 0x8009481B
+	CERTSRV_E_EXPIRED_CHALLENGE                                               Handle        = 0x8009481C
+	CERTSRV_E_INVALID_RESPONSE                                                Handle        = 0x8009481D
+	CERTSRV_E_INVALID_REQUESTID                                               Handle        = 0x8009481E
+	CERTSRV_E_REQUEST_PRECERTIFICATE_MISMATCH                                 Handle        = 0x8009481F
+	CERTSRV_E_PENDING_CLIENT_RESPONSE                                         Handle        = 0x80094820
+	XENROLL_E_KEY_NOT_EXPORTABLE                                              Handle        = 0x80095000
+	XENROLL_E_CANNOT_ADD_ROOT_CERT                                            Handle        = 0x80095001
+	XENROLL_E_RESPONSE_KA_HASH_NOT_FOUND                                      Handle        = 0x80095002
+	XENROLL_E_RESPONSE_UNEXPECTED_KA_HASH                                     Handle        = 0x80095003
+	XENROLL_E_RESPONSE_KA_HASH_MISMATCH                                       Handle        = 0x80095004
+	XENROLL_E_KEYSPEC_SMIME_MISMATCH                                          Handle        = 0x80095005
+	TRUST_E_SYSTEM_ERROR                                                      Handle        = 0x80096001
+	TRUST_E_NO_SIGNER_CERT                                                    Handle        = 0x80096002
+	TRUST_E_COUNTER_SIGNER                                                    Handle        = 0x80096003
+	TRUST_E_CERT_SIGNATURE                                                    Handle        = 0x80096004
+	TRUST_E_TIME_STAMP                                                        Handle        = 0x80096005
+	TRUST_E_BAD_DIGEST                                                        Handle        = 0x80096010
+	TRUST_E_MALFORMED_SIGNATURE                                               Handle        = 0x80096011
+	TRUST_E_BASIC_CONSTRAINTS                                                 Handle        = 0x80096019
+	TRUST_E_FINANCIAL_CRITERIA                                                Handle        = 0x8009601E
+	MSSIPOTF_E_OUTOFMEMRANGE                                                  Handle        = 0x80097001
+	MSSIPOTF_E_CANTGETOBJECT                                                  Handle        = 0x80097002
+	MSSIPOTF_E_NOHEADTABLE                                                    Handle        = 0x80097003
+	MSSIPOTF_E_BAD_MAGICNUMBER                                                Handle        = 0x80097004
+	MSSIPOTF_E_BAD_OFFSET_TABLE                                               Handle        = 0x80097005
+	MSSIPOTF_E_TABLE_TAGORDER                                                 Handle        = 0x80097006
+	MSSIPOTF_E_TABLE_LONGWORD                                                 Handle        = 0x80097007
+	MSSIPOTF_E_BAD_FIRST_TABLE_PLACEMENT                                      Handle        = 0x80097008
+	MSSIPOTF_E_TABLES_OVERLAP                                                 Handle        = 0x80097009
+	MSSIPOTF_E_TABLE_PADBYTES                                                 Handle        = 0x8009700A
+	MSSIPOTF_E_FILETOOSMALL                                                   Handle        = 0x8009700B
+	MSSIPOTF_E_TABLE_CHECKSUM                                                 Handle        = 0x8009700C
+	MSSIPOTF_E_FILE_CHECKSUM                                                  Handle        = 0x8009700D
+	MSSIPOTF_E_FAILED_POLICY                                                  Handle        = 0x80097010
+	MSSIPOTF_E_FAILED_HINTS_CHECK                                             Handle        = 0x80097011
+	MSSIPOTF_E_NOT_OPENTYPE                                                   Handle        = 0x80097012
+	MSSIPOTF_E_FILE                                                           Handle        = 0x80097013
+	MSSIPOTF_E_CRYPT                                                          Handle        = 0x80097014
+	MSSIPOTF_E_BADVERSION                                                     Handle        = 0x80097015
+	MSSIPOTF_E_DSIG_STRUCTURE                                                 Handle        = 0x80097016
+	MSSIPOTF_E_PCONST_CHECK                                                   Handle        = 0x80097017
+	MSSIPOTF_E_STRUCTURE                                                      Handle        = 0x80097018
+	ERROR_CRED_REQUIRES_CONFIRMATION                                          Handle        = 0x80097019
+	NTE_OP_OK                                                                 syscall.Errno = 0
+	TRUST_E_PROVIDER_UNKNOWN                                                  Handle        = 0x800B0001
+	TRUST_E_ACTION_UNKNOWN                                                    Handle        = 0x800B0002
+	TRUST_E_SUBJECT_FORM_UNKNOWN                                              Handle        = 0x800B0003
+	TRUST_E_SUBJECT_NOT_TRUSTED                                               Handle        = 0x800B0004
+	DIGSIG_E_ENCODE                                                           Handle        = 0x800B0005
+	DIGSIG_E_DECODE                                                           Handle        = 0x800B0006
+	DIGSIG_E_EXTENSIBILITY                                                    Handle        = 0x800B0007
+	DIGSIG_E_CRYPTO                                                           Handle        = 0x800B0008
+	PERSIST_E_SIZEDEFINITE                                                    Handle        = 0x800B0009
+	PERSIST_E_SIZEINDEFINITE                                                  Handle        = 0x800B000A
+	PERSIST_E_NOTSELFSIZING                                                   Handle        = 0x800B000B
+	TRUST_E_NOSIGNATURE                                                       Handle        = 0x800B0100
+	CERT_E_EXPIRED                                                            Handle        = 0x800B0101
+	CERT_E_VALIDITYPERIODNESTING                                              Handle        = 0x800B0102
+	CERT_E_ROLE                                                               Handle        = 0x800B0103
+	CERT_E_PATHLENCONST                                                       Handle        = 0x800B0104
+	CERT_E_CRITICAL                                                           Handle        = 0x800B0105
+	CERT_E_PURPOSE                                                            Handle        = 0x800B0106
+	CERT_E_ISSUERCHAINING                                                     Handle        = 0x800B0107
+	CERT_E_MALFORMED                                                          Handle        = 0x800B0108
+	CERT_E_UNTRUSTEDROOT                                                      Handle        = 0x800B0109
+	CERT_E_CHAINING                                                           Handle        = 0x800B010A
+	TRUST_E_FAIL                                                              Handle        = 0x800B010B
+	CERT_E_REVOKED                                                            Handle        = 0x800B010C
+	CERT_E_UNTRUSTEDTESTROOT                                                  Handle        = 0x800B010D
+	CERT_E_REVOCATION_FAILURE                                                 Handle        = 0x800B010E
+	CERT_E_CN_NO_MATCH                                                        Handle        = 0x800B010F
+	CERT_E_WRONG_USAGE                                                        Handle        = 0x800B0110
+	TRUST_E_EXPLICIT_DISTRUST                                                 Handle        = 0x800B0111
+	CERT_E_UNTRUSTEDCA                                                        Handle        = 0x800B0112
+	CERT_E_INVALID_POLICY                                                     Handle        = 0x800B0113
+	CERT_E_INVALID_NAME                                                       Handle        = 0x800B0114
+	SPAPI_E_EXPECTED_SECTION_NAME                                             Handle        = 0x800F0000
+	SPAPI_E_BAD_SECTION_NAME_LINE                                             Handle        = 0x800F0001
+	SPAPI_E_SECTION_NAME_TOO_LONG                                             Handle        = 0x800F0002
+	SPAPI_E_GENERAL_SYNTAX                                                    Handle        = 0x800F0003
+	SPAPI_E_WRONG_INF_STYLE                                                   Handle        = 0x800F0100
+	SPAPI_E_SECTION_NOT_FOUND                                                 Handle        = 0x800F0101
+	SPAPI_E_LINE_NOT_FOUND                                                    Handle        = 0x800F0102
+	SPAPI_E_NO_BACKUP                                                         Handle        = 0x800F0103
+	SPAPI_E_NO_ASSOCIATED_CLASS                                               Handle        = 0x800F0200
+	SPAPI_E_CLASS_MISMATCH                                                    Handle        = 0x800F0201
+	SPAPI_E_DUPLICATE_FOUND                                                   Handle        = 0x800F0202
+	SPAPI_E_NO_DRIVER_SELECTED                                                Handle        = 0x800F0203
+	SPAPI_E_KEY_DOES_NOT_EXIST                                                Handle        = 0x800F0204
+	SPAPI_E_INVALID_DEVINST_NAME                                              Handle        = 0x800F0205
+	SPAPI_E_INVALID_CLASS                                                     Handle        = 0x800F0206
+	SPAPI_E_DEVINST_ALREADY_EXISTS                                            Handle        = 0x800F0207
+	SPAPI_E_DEVINFO_NOT_REGISTERED                                            Handle        = 0x800F0208
+	SPAPI_E_INVALID_REG_PROPERTY                                              Handle        = 0x800F0209
+	SPAPI_E_NO_INF                                                            Handle        = 0x800F020A
+	SPAPI_E_NO_SUCH_DEVINST                                                   Handle        = 0x800F020B
+	SPAPI_E_CANT_LOAD_CLASS_ICON                                              Handle        = 0x800F020C
+	SPAPI_E_INVALID_CLASS_INSTALLER                                           Handle        = 0x800F020D
+	SPAPI_E_DI_DO_DEFAULT                                                     Handle        = 0x800F020E
+	SPAPI_E_DI_NOFILECOPY                                                     Handle        = 0x800F020F
+	SPAPI_E_INVALID_HWPROFILE                                                 Handle        = 0x800F0210
+	SPAPI_E_NO_DEVICE_SELECTED                                                Handle        = 0x800F0211
+	SPAPI_E_DEVINFO_LIST_LOCKED                                               Handle        = 0x800F0212
+	SPAPI_E_DEVINFO_DATA_LOCKED                                               Handle        = 0x800F0213
+	SPAPI_E_DI_BAD_PATH                                                       Handle        = 0x800F0214
+	SPAPI_E_NO_CLASSINSTALL_PARAMS                                            Handle        = 0x800F0215
+	SPAPI_E_FILEQUEUE_LOCKED                                                  Handle        = 0x800F0216
+	SPAPI_E_BAD_SERVICE_INSTALLSECT                                           Handle        = 0x800F0217
+	SPAPI_E_NO_CLASS_DRIVER_LIST                                              Handle        = 0x800F0218
+	SPAPI_E_NO_ASSOCIATED_SERVICE                                             Handle        = 0x800F0219
+	SPAPI_E_NO_DEFAULT_DEVICE_INTERFACE                                       Handle        = 0x800F021A
+	SPAPI_E_DEVICE_INTERFACE_ACTIVE                                           Handle        = 0x800F021B
+	SPAPI_E_DEVICE_INTERFACE_REMOVED                                          Handle        = 0x800F021C
+	SPAPI_E_BAD_INTERFACE_INSTALLSECT                                         Handle        = 0x800F021D
+	SPAPI_E_NO_SUCH_INTERFACE_CLASS                                           Handle        = 0x800F021E
+	SPAPI_E_INVALID_REFERENCE_STRING                                          Handle        = 0x800F021F
+	SPAPI_E_INVALID_MACHINENAME                                               Handle        = 0x800F0220
+	SPAPI_E_REMOTE_COMM_FAILURE                                               Handle        = 0x800F0221
+	SPAPI_E_MACHINE_UNAVAILABLE                                               Handle        = 0x800F0222
+	SPAPI_E_NO_CONFIGMGR_SERVICES                                             Handle        = 0x800F0223
+	SPAPI_E_INVALID_PROPPAGE_PROVIDER                                         Handle        = 0x800F0224
+	SPAPI_E_NO_SUCH_DEVICE_INTERFACE                                          Handle        = 0x800F0225
+	SPAPI_E_DI_POSTPROCESSING_REQUIRED                                        Handle        = 0x800F0226
+	SPAPI_E_INVALID_COINSTALLER                                               Handle        = 0x800F0227
+	SPAPI_E_NO_COMPAT_DRIVERS                                                 Handle        = 0x800F0228
+	SPAPI_E_NO_DEVICE_ICON                                                    Handle        = 0x800F0229
+	SPAPI_E_INVALID_INF_LOGCONFIG                                             Handle        = 0x800F022A
+	SPAPI_E_DI_DONT_INSTALL                                                   Handle        = 0x800F022B
+	SPAPI_E_INVALID_FILTER_DRIVER                                             Handle        = 0x800F022C
+	SPAPI_E_NON_WINDOWS_NT_DRIVER                                             Handle        = 0x800F022D
+	SPAPI_E_NON_WINDOWS_DRIVER                                                Handle        = 0x800F022E
+	SPAPI_E_NO_CATALOG_FOR_OEM_INF                                            Handle        = 0x800F022F
+	SPAPI_E_DEVINSTALL_QUEUE_NONNATIVE                                        Handle        = 0x800F0230
+	SPAPI_E_NOT_DISABLEABLE                                                   Handle        = 0x800F0231
+	SPAPI_E_CANT_REMOVE_DEVINST                                               Handle        = 0x800F0232
+	SPAPI_E_INVALID_TARGET                                                    Handle        = 0x800F0233
+	SPAPI_E_DRIVER_NONNATIVE                                                  Handle        = 0x800F0234
+	SPAPI_E_IN_WOW64                                                          Handle        = 0x800F0235
+	SPAPI_E_SET_SYSTEM_RESTORE_POINT                                          Handle        = 0x800F0236
+	SPAPI_E_INCORRECTLY_COPIED_INF                                            Handle        = 0x800F0237
+	SPAPI_E_SCE_DISABLED                                                      Handle        = 0x800F0238
+	SPAPI_E_UNKNOWN_EXCEPTION                                                 Handle        = 0x800F0239
+	SPAPI_E_PNP_REGISTRY_ERROR                                                Handle        = 0x800F023A
+	SPAPI_E_REMOTE_REQUEST_UNSUPPORTED                                        Handle        = 0x800F023B
+	SPAPI_E_NOT_AN_INSTALLED_OEM_INF                                          Handle        = 0x800F023C
+	SPAPI_E_INF_IN_USE_BY_DEVICES                                             Handle        = 0x800F023D
+	SPAPI_E_DI_FUNCTION_OBSOLETE                                              Handle        = 0x800F023E
+	SPAPI_E_NO_AUTHENTICODE_CATALOG                                           Handle        = 0x800F023F
+	SPAPI_E_AUTHENTICODE_DISALLOWED                                           Handle        = 0x800F0240
+	SPAPI_E_AUTHENTICODE_TRUSTED_PUBLISHER                                    Handle        = 0x800F0241
+	SPAPI_E_AUTHENTICODE_TRUST_NOT_ESTABLISHED                                Handle        = 0x800F0242
+	SPAPI_E_AUTHENTICODE_PUBLISHER_NOT_TRUSTED                                Handle        = 0x800F0243
+	SPAPI_E_SIGNATURE_OSATTRIBUTE_MISMATCH                                    Handle        = 0x800F0244
+	SPAPI_E_ONLY_VALIDATE_VIA_AUTHENTICODE                                    Handle        = 0x800F0245
+	SPAPI_E_DEVICE_INSTALLER_NOT_READY                                        Handle        = 0x800F0246
+	SPAPI_E_DRIVER_STORE_ADD_FAILED                                           Handle        = 0x800F0247
+	SPAPI_E_DEVICE_INSTALL_BLOCKED                                            Handle        = 0x800F0248
+	SPAPI_E_DRIVER_INSTALL_BLOCKED                                            Handle        = 0x800F0249
+	SPAPI_E_WRONG_INF_TYPE                                                    Handle        = 0x800F024A
+	SPAPI_E_FILE_HASH_NOT_IN_CATALOG                                          Handle        = 0x800F024B
+	SPAPI_E_DRIVER_STORE_DELETE_FAILED                                        Handle        = 0x800F024C
+	SPAPI_E_UNRECOVERABLE_STACK_OVERFLOW                                      Handle        = 0x800F0300
+	SPAPI_E_ERROR_NOT_INSTALLED                                               Handle        = 0x800F1000
+	SCARD_S_SUCCESS                                                                         = S_OK
+	SCARD_F_INTERNAL_ERROR                                                    Handle        = 0x80100001
+	SCARD_E_CANCELLED                                                         Handle        = 0x80100002
+	SCARD_E_INVALID_HANDLE                                                    Handle        = 0x80100003
+	SCARD_E_INVALID_PARAMETER                                                 Handle        = 0x80100004
+	SCARD_E_INVALID_TARGET                                                    Handle        = 0x80100005
+	SCARD_E_NO_MEMORY                                                         Handle        = 0x80100006
+	SCARD_F_WAITED_TOO_LONG                                                   Handle        = 0x80100007
+	SCARD_E_INSUFFICIENT_BUFFER                                               Handle        = 0x80100008
+	SCARD_E_UNKNOWN_READER                                                    Handle        = 0x80100009
+	SCARD_E_TIMEOUT                                                           Handle        = 0x8010000A
+	SCARD_E_SHARING_VIOLATION                                                 Handle        = 0x8010000B
+	SCARD_E_NO_SMARTCARD                                                      Handle        = 0x8010000C
+	SCARD_E_UNKNOWN_CARD                                                      Handle        = 0x8010000D
+	SCARD_E_CANT_DISPOSE                                                      Handle        = 0x8010000E
+	SCARD_E_PROTO_MISMATCH                                                    Handle        = 0x8010000F
+	SCARD_E_NOT_READY                                                         Handle        = 0x80100010
+	SCARD_E_INVALID_VALUE                                                     Handle        = 0x80100011
+	SCARD_E_SYSTEM_CANCELLED                                                  Handle        = 0x80100012
+	SCARD_F_COMM_ERROR                                                        Handle        = 0x80100013
+	SCARD_F_UNKNOWN_ERROR                                                     Handle        = 0x80100014
+	SCARD_E_INVALID_ATR                                                       Handle        = 0x80100015
+	SCARD_E_NOT_TRANSACTED                                                    Handle        = 0x80100016
+	SCARD_E_READER_UNAVAILABLE                                                Handle        = 0x80100017
+	SCARD_P_SHUTDOWN                                                          Handle        = 0x80100018
+	SCARD_E_PCI_TOO_SMALL                                                     Handle        = 0x80100019
+	SCARD_E_READER_UNSUPPORTED                                                Handle        = 0x8010001A
+	SCARD_E_DUPLICATE_READER                                                  Handle        = 0x8010001B
+	SCARD_E_CARD_UNSUPPORTED                                                  Handle        = 0x8010001C
+	SCARD_E_NO_SERVICE                                                        Handle        = 0x8010001D
+	SCARD_E_SERVICE_STOPPED                                                   Handle        = 0x8010001E
+	SCARD_E_UNEXPECTED                                                        Handle        = 0x8010001F
+	SCARD_E_ICC_INSTALLATION                                                  Handle        = 0x80100020
+	SCARD_E_ICC_CREATEORDER                                                   Handle        = 0x80100021
+	SCARD_E_UNSUPPORTED_FEATURE                                               Handle        = 0x80100022
+	SCARD_E_DIR_NOT_FOUND                                                     Handle        = 0x80100023
+	SCARD_E_FILE_NOT_FOUND                                                    Handle        = 0x80100024
+	SCARD_E_NO_DIR                                                            Handle        = 0x80100025
+	SCARD_E_NO_FILE                                                           Handle        = 0x80100026
+	SCARD_E_NO_ACCESS                                                         Handle        = 0x80100027
+	SCARD_E_WRITE_TOO_MANY                                                    Handle        = 0x80100028
+	SCARD_E_BAD_SEEK                                                          Handle        = 0x80100029
+	SCARD_E_INVALID_CHV                                                       Handle        = 0x8010002A
+	SCARD_E_UNKNOWN_RES_MNG                                                   Handle        = 0x8010002B
+	SCARD_E_NO_SUCH_CERTIFICATE                                               Handle        = 0x8010002C
+	SCARD_E_CERTIFICATE_UNAVAILABLE                                           Handle        = 0x8010002D
+	SCARD_E_NO_READERS_AVAILABLE                                              Handle        = 0x8010002E
+	SCARD_E_COMM_DATA_LOST                                                    Handle        = 0x8010002F
+	SCARD_E_NO_KEY_CONTAINER                                                  Handle        = 0x80100030
+	SCARD_E_SERVER_TOO_BUSY                                                   Handle        = 0x80100031
+	SCARD_E_PIN_CACHE_EXPIRED                                                 Handle        = 0x80100032
+	SCARD_E_NO_PIN_CACHE                                                      Handle        = 0x80100033
+	SCARD_E_READ_ONLY_CARD                                                    Handle        = 0x80100034
+	SCARD_W_UNSUPPORTED_CARD                                                  Handle        = 0x80100065
+	SCARD_W_UNRESPONSIVE_CARD                                                 Handle        = 0x80100066
+	SCARD_W_UNPOWERED_CARD                                                    Handle        = 0x80100067
+	SCARD_W_RESET_CARD                                                        Handle        = 0x80100068
+	SCARD_W_REMOVED_CARD                                                      Handle        = 0x80100069
+	SCARD_W_SECURITY_VIOLATION                                                Handle        = 0x8010006A
+	SCARD_W_WRONG_CHV                                                         Handle        = 0x8010006B
+	SCARD_W_CHV_BLOCKED                                                       Handle        = 0x8010006C
+	SCARD_W_EOF                                                               Handle        = 0x8010006D
+	SCARD_W_CANCELLED_BY_USER                                                 Handle        = 0x8010006E
+	SCARD_W_CARD_NOT_AUTHENTICATED                                            Handle        = 0x8010006F
+	SCARD_W_CACHE_ITEM_NOT_FOUND                                              Handle        = 0x80100070
+	SCARD_W_CACHE_ITEM_STALE                                                  Handle        = 0x80100071
+	SCARD_W_CACHE_ITEM_TOO_BIG                                                Handle        = 0x80100072
+	COMADMIN_E_OBJECTERRORS                                                   Handle        = 0x80110401
+	COMADMIN_E_OBJECTINVALID                                                  Handle        = 0x80110402
+	COMADMIN_E_KEYMISSING                                                     Handle        = 0x80110403
+	COMADMIN_E_ALREADYINSTALLED                                               Handle        = 0x80110404
+	COMADMIN_E_APP_FILE_WRITEFAIL                                             Handle        = 0x80110407
+	COMADMIN_E_APP_FILE_READFAIL                                              Handle        = 0x80110408
+	COMADMIN_E_APP_FILE_VERSION                                               Handle        = 0x80110409
+	COMADMIN_E_BADPATH                                                        Handle        = 0x8011040A
+	COMADMIN_E_APPLICATIONEXISTS                                              Handle        = 0x8011040B
+	COMADMIN_E_ROLEEXISTS                                                     Handle        = 0x8011040C
+	COMADMIN_E_CANTCOPYFILE                                                   Handle        = 0x8011040D
+	COMADMIN_E_NOUSER                                                         Handle        = 0x8011040F
+	COMADMIN_E_INVALIDUSERIDS                                                 Handle        = 0x80110410
+	COMADMIN_E_NOREGISTRYCLSID                                                Handle        = 0x80110411
+	COMADMIN_E_BADREGISTRYPROGID                                              Handle        = 0x80110412
+	COMADMIN_E_AUTHENTICATIONLEVEL                                            Handle        = 0x80110413
+	COMADMIN_E_USERPASSWDNOTVALID                                             Handle        = 0x80110414
+	COMADMIN_E_CLSIDORIIDMISMATCH                                             Handle        = 0x80110418
+	COMADMIN_E_REMOTEINTERFACE                                                Handle        = 0x80110419
+	COMADMIN_E_DLLREGISTERSERVER                                              Handle        = 0x8011041A
+	COMADMIN_E_NOSERVERSHARE                                                  Handle        = 0x8011041B
+	COMADMIN_E_DLLLOADFAILED                                                  Handle        = 0x8011041D
+	COMADMIN_E_BADREGISTRYLIBID                                               Handle        = 0x8011041E
+	COMADMIN_E_APPDIRNOTFOUND                                                 Handle        = 0x8011041F
+	COMADMIN_E_REGISTRARFAILED                                                Handle        = 0x80110423
+	COMADMIN_E_COMPFILE_DOESNOTEXIST                                          Handle        = 0x80110424
+	COMADMIN_E_COMPFILE_LOADDLLFAIL                                           Handle        = 0x80110425
+	COMADMIN_E_COMPFILE_GETCLASSOBJ                                           Handle        = 0x80110426
+	COMADMIN_E_COMPFILE_CLASSNOTAVAIL                                         Handle        = 0x80110427
+	COMADMIN_E_COMPFILE_BADTLB                                                Handle        = 0x80110428
+	COMADMIN_E_COMPFILE_NOTINSTALLABLE                                        Handle        = 0x80110429
+	COMADMIN_E_NOTCHANGEABLE                                                  Handle        = 0x8011042A
+	COMADMIN_E_NOTDELETEABLE                                                  Handle        = 0x8011042B
+	COMADMIN_E_SESSION                                                        Handle        = 0x8011042C
+	COMADMIN_E_COMP_MOVE_LOCKED                                               Handle        = 0x8011042D
+	COMADMIN_E_COMP_MOVE_BAD_DEST                                             Handle        = 0x8011042E
+	COMADMIN_E_REGISTERTLB                                                    Handle        = 0x80110430
+	COMADMIN_E_SYSTEMAPP                                                      Handle        = 0x80110433
+	COMADMIN_E_COMPFILE_NOREGISTRAR                                           Handle        = 0x80110434
+	COMADMIN_E_COREQCOMPINSTALLED                                             Handle        = 0x80110435
+	COMADMIN_E_SERVICENOTINSTALLED                                            Handle        = 0x80110436
+	COMADMIN_E_PROPERTYSAVEFAILED                                             Handle        = 0x80110437
+	COMADMIN_E_OBJECTEXISTS                                                   Handle        = 0x80110438
+	COMADMIN_E_COMPONENTEXISTS                                                Handle        = 0x80110439
+	COMADMIN_E_REGFILE_CORRUPT                                                Handle        = 0x8011043B
+	COMADMIN_E_PROPERTY_OVERFLOW                                              Handle        = 0x8011043C
+	COMADMIN_E_NOTINREGISTRY                                                  Handle        = 0x8011043E
+	COMADMIN_E_OBJECTNOTPOOLABLE                                              Handle        = 0x8011043F
+	COMADMIN_E_APPLID_MATCHES_CLSID                                           Handle        = 0x80110446
+	COMADMIN_E_ROLE_DOES_NOT_EXIST                                            Handle        = 0x80110447
+	COMADMIN_E_START_APP_NEEDS_COMPONENTS                                     Handle        = 0x80110448
+	COMADMIN_E_REQUIRES_DIFFERENT_PLATFORM                                    Handle        = 0x80110449
+	COMADMIN_E_CAN_NOT_EXPORT_APP_PROXY                                       Handle        = 0x8011044A
+	COMADMIN_E_CAN_NOT_START_APP                                              Handle        = 0x8011044B
+	COMADMIN_E_CAN_NOT_EXPORT_SYS_APP                                         Handle        = 0x8011044C
+	COMADMIN_E_CANT_SUBSCRIBE_TO_COMPONENT                                    Handle        = 0x8011044D
+	COMADMIN_E_EVENTCLASS_CANT_BE_SUBSCRIBER                                  Handle        = 0x8011044E
+	COMADMIN_E_LIB_APP_PROXY_INCOMPATIBLE                                     Handle        = 0x8011044F
+	COMADMIN_E_BASE_PARTITION_ONLY                                            Handle        = 0x80110450
+	COMADMIN_E_START_APP_DISABLED                                             Handle        = 0x80110451
+	COMADMIN_E_CAT_DUPLICATE_PARTITION_NAME                                   Handle        = 0x80110457
+	COMADMIN_E_CAT_INVALID_PARTITION_NAME                                     Handle        = 0x80110458
+	COMADMIN_E_CAT_PARTITION_IN_USE                                           Handle        = 0x80110459
+	COMADMIN_E_FILE_PARTITION_DUPLICATE_FILES                                 Handle        = 0x8011045A
+	COMADMIN_E_CAT_IMPORTED_COMPONENTS_NOT_ALLOWED                            Handle        = 0x8011045B
+	COMADMIN_E_AMBIGUOUS_APPLICATION_NAME                                     Handle        = 0x8011045C
+	COMADMIN_E_AMBIGUOUS_PARTITION_NAME                                       Handle        = 0x8011045D
+	COMADMIN_E_REGDB_NOTINITIALIZED                                           Handle        = 0x80110472
+	COMADMIN_E_REGDB_NOTOPEN                                                  Handle        = 0x80110473
+	COMADMIN_E_REGDB_SYSTEMERR                                                Handle        = 0x80110474
+	COMADMIN_E_REGDB_ALREADYRUNNING                                           Handle        = 0x80110475
+	COMADMIN_E_MIG_VERSIONNOTSUPPORTED                                        Handle        = 0x80110480
+	COMADMIN_E_MIG_SCHEMANOTFOUND                                             Handle        = 0x80110481
+	COMADMIN_E_CAT_BITNESSMISMATCH                                            Handle        = 0x80110482
+	COMADMIN_E_CAT_UNACCEPTABLEBITNESS                                        Handle        = 0x80110483
+	COMADMIN_E_CAT_WRONGAPPBITNESS                                            Handle        = 0x80110484
+	COMADMIN_E_CAT_PAUSE_RESUME_NOT_SUPPORTED                                 Handle        = 0x80110485
+	COMADMIN_E_CAT_SERVERFAULT                                                Handle        = 0x80110486
+	COMQC_E_APPLICATION_NOT_QUEUED                                            Handle        = 0x80110600
+	COMQC_E_NO_QUEUEABLE_INTERFACES                                           Handle        = 0x80110601
+	COMQC_E_QUEUING_SERVICE_NOT_AVAILABLE                                     Handle        = 0x80110602
+	COMQC_E_NO_IPERSISTSTREAM                                                 Handle        = 0x80110603
+	COMQC_E_BAD_MESSAGE                                                       Handle        = 0x80110604
+	COMQC_E_UNAUTHENTICATED                                                   Handle        = 0x80110605
+	COMQC_E_UNTRUSTED_ENQUEUER                                                Handle        = 0x80110606
+	MSDTC_E_DUPLICATE_RESOURCE                                                Handle        = 0x80110701
+	COMADMIN_E_OBJECT_PARENT_MISSING                                          Handle        = 0x80110808
+	COMADMIN_E_OBJECT_DOES_NOT_EXIST                                          Handle        = 0x80110809
+	COMADMIN_E_APP_NOT_RUNNING                                                Handle        = 0x8011080A
+	COMADMIN_E_INVALID_PARTITION                                              Handle        = 0x8011080B
+	COMADMIN_E_SVCAPP_NOT_POOLABLE_OR_RECYCLABLE                              Handle        = 0x8011080D
+	COMADMIN_E_USER_IN_SET                                                    Handle        = 0x8011080E
+	COMADMIN_E_CANTRECYCLELIBRARYAPPS                                         Handle        = 0x8011080F
+	COMADMIN_E_CANTRECYCLESERVICEAPPS                                         Handle        = 0x80110811
+	COMADMIN_E_PROCESSALREADYRECYCLED                                         Handle        = 0x80110812
+	COMADMIN_E_PAUSEDPROCESSMAYNOTBERECYCLED                                  Handle        = 0x80110813
+	COMADMIN_E_CANTMAKEINPROCSERVICE                                          Handle        = 0x80110814
+	COMADMIN_E_PROGIDINUSEBYCLSID                                             Handle        = 0x80110815
+	COMADMIN_E_DEFAULT_PARTITION_NOT_IN_SET                                   Handle        = 0x80110816
+	COMADMIN_E_RECYCLEDPROCESSMAYNOTBEPAUSED                                  Handle        = 0x80110817
+	COMADMIN_E_PARTITION_ACCESSDENIED                                         Handle        = 0x80110818
+	COMADMIN_E_PARTITION_MSI_ONLY                                             Handle        = 0x80110819
+	COMADMIN_E_LEGACYCOMPS_NOT_ALLOWED_IN_1_0_FORMAT                          Handle        = 0x8011081A
+	COMADMIN_E_LEGACYCOMPS_NOT_ALLOWED_IN_NONBASE_PARTITIONS                  Handle        = 0x8011081B
+	COMADMIN_E_COMP_MOVE_SOURCE                                               Handle        = 0x8011081C
+	COMADMIN_E_COMP_MOVE_DEST                                                 Handle        = 0x8011081D
+	COMADMIN_E_COMP_MOVE_PRIVATE                                              Handle        = 0x8011081E
+	COMADMIN_E_BASEPARTITION_REQUIRED_IN_SET                                  Handle        = 0x8011081F
+	COMADMIN_E_CANNOT_ALIAS_EVENTCLASS                                        Handle        = 0x80110820
+	COMADMIN_E_PRIVATE_ACCESSDENIED                                           Handle        = 0x80110821
+	COMADMIN_E_SAFERINVALID                                                   Handle        = 0x80110822
+	COMADMIN_E_REGISTRY_ACCESSDENIED                                          Handle        = 0x80110823
+	COMADMIN_E_PARTITIONS_DISABLED                                            Handle        = 0x80110824
+	WER_S_REPORT_DEBUG                                                        Handle        = 0x001B0000
+	WER_S_REPORT_UPLOADED                                                     Handle        = 0x001B0001
+	WER_S_REPORT_QUEUED                                                       Handle        = 0x001B0002
+	WER_S_DISABLED                                                            Handle        = 0x001B0003
+	WER_S_SUSPENDED_UPLOAD                                                    Handle        = 0x001B0004
+	WER_S_DISABLED_QUEUE                                                      Handle        = 0x001B0005
+	WER_S_DISABLED_ARCHIVE                                                    Handle        = 0x001B0006
+	WER_S_REPORT_ASYNC                                                        Handle        = 0x001B0007
+	WER_S_IGNORE_ASSERT_INSTANCE                                              Handle        = 0x001B0008
+	WER_S_IGNORE_ALL_ASSERTS                                                  Handle        = 0x001B0009
+	WER_S_ASSERT_CONTINUE                                                     Handle        = 0x001B000A
+	WER_S_THROTTLED                                                           Handle        = 0x001B000B
+	WER_S_REPORT_UPLOADED_CAB                                                 Handle        = 0x001B000C
+	WER_E_CRASH_FAILURE                                                       Handle        = 0x801B8000
+	WER_E_CANCELED                                                            Handle        = 0x801B8001
+	WER_E_NETWORK_FAILURE                                                     Handle        = 0x801B8002
+	WER_E_NOT_INITIALIZED                                                     Handle        = 0x801B8003
+	WER_E_ALREADY_REPORTING                                                   Handle        = 0x801B8004
+	WER_E_DUMP_THROTTLED                                                      Handle        = 0x801B8005
+	WER_E_INSUFFICIENT_CONSENT                                                Handle        = 0x801B8006
+	WER_E_TOO_HEAVY                                                           Handle        = 0x801B8007
+	ERROR_FLT_IO_COMPLETE                                                     Handle        = 0x001F0001
+	ERROR_FLT_NO_HANDLER_DEFINED                                              Handle        = 0x801F0001
+	ERROR_FLT_CONTEXT_ALREADY_DEFINED                                         Handle        = 0x801F0002
+	ERROR_FLT_INVALID_ASYNCHRONOUS_REQUEST                                    Handle        = 0x801F0003
+	ERROR_FLT_DISALLOW_FAST_IO                                                Handle        = 0x801F0004
+	ERROR_FLT_INVALID_NAME_REQUEST                                            Handle        = 0x801F0005
+	ERROR_FLT_NOT_SAFE_TO_POST_OPERATION                                      Handle        = 0x801F0006
+	ERROR_FLT_NOT_INITIALIZED                                                 Handle        = 0x801F0007
+	ERROR_FLT_FILTER_NOT_READY                                                Handle        = 0x801F0008
+	ERROR_FLT_POST_OPERATION_CLEANUP                                          Handle        = 0x801F0009
+	ERROR_FLT_INTERNAL_ERROR                                                  Handle        = 0x801F000A
+	ERROR_FLT_DELETING_OBJECT                                                 Handle        = 0x801F000B
+	ERROR_FLT_MUST_BE_NONPAGED_POOL                                           Handle        = 0x801F000C
+	ERROR_FLT_DUPLICATE_ENTRY                                                 Handle        = 0x801F000D
+	ERROR_FLT_CBDQ_DISABLED                                                   Handle        = 0x801F000E
+	ERROR_FLT_DO_NOT_ATTACH                                                   Handle        = 0x801F000F
+	ERROR_FLT_DO_NOT_DETACH                                                   Handle        = 0x801F0010
+	ERROR_FLT_INSTANCE_ALTITUDE_COLLISION                                     Handle        = 0x801F0011
+	ERROR_FLT_INSTANCE_NAME_COLLISION                                         Handle        = 0x801F0012
+	ERROR_FLT_FILTER_NOT_FOUND                                                Handle        = 0x801F0013
+	ERROR_FLT_VOLUME_NOT_FOUND                                                Handle        = 0x801F0014
+	ERROR_FLT_INSTANCE_NOT_FOUND                                              Handle        = 0x801F0015
+	ERROR_FLT_CONTEXT_ALLOCATION_NOT_FOUND                                    Handle        = 0x801F0016
+	ERROR_FLT_INVALID_CONTEXT_REGISTRATION                                    Handle        = 0x801F0017
+	ERROR_FLT_NAME_CACHE_MISS                                                 Handle        = 0x801F0018
+	ERROR_FLT_NO_DEVICE_OBJECT                                                Handle        = 0x801F0019
+	ERROR_FLT_VOLUME_ALREADY_MOUNTED                                          Handle        = 0x801F001A
+	ERROR_FLT_ALREADY_ENLISTED                                                Handle        = 0x801F001B
+	ERROR_FLT_CONTEXT_ALREADY_LINKED                                          Handle        = 0x801F001C
+	ERROR_FLT_NO_WAITER_FOR_REPLY                                             Handle        = 0x801F0020
+	ERROR_FLT_REGISTRATION_BUSY                                               Handle        = 0x801F0023
+	ERROR_HUNG_DISPLAY_DRIVER_THREAD                                          Handle        = 0x80260001
+	DWM_E_COMPOSITIONDISABLED                                                 Handle        = 0x80263001
+	DWM_E_REMOTING_NOT_SUPPORTED                                              Handle        = 0x80263002
+	DWM_E_NO_REDIRECTION_SURFACE_AVAILABLE                                    Handle        = 0x80263003
+	DWM_E_NOT_QUEUING_PRESENTS                                                Handle        = 0x80263004
+	DWM_E_ADAPTER_NOT_FOUND                                                   Handle        = 0x80263005
+	DWM_S_GDI_REDIRECTION_SURFACE                                             Handle        = 0x00263005
+	DWM_E_TEXTURE_TOO_LARGE                                                   Handle        = 0x80263007
+	DWM_S_GDI_REDIRECTION_SURFACE_BLT_VIA_GDI                                 Handle        = 0x00263008
+	ERROR_MONITOR_NO_DESCRIPTOR                                               Handle        = 0x00261001
+	ERROR_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT                                   Handle        = 0x00261002
+	ERROR_MONITOR_INVALID_DESCRIPTOR_CHECKSUM                                 Handle        = 0xC0261003
+	ERROR_MONITOR_INVALID_STANDARD_TIMING_BLOCK                               Handle        = 0xC0261004
+	ERROR_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED                           Handle        = 0xC0261005
+	ERROR_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK                          Handle        = 0xC0261006
+	ERROR_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK                          Handle        = 0xC0261007
+	ERROR_MONITOR_NO_MORE_DESCRIPTOR_DATA                                     Handle        = 0xC0261008
+	ERROR_MONITOR_INVALID_DETAILED_TIMING_BLOCK                               Handle        = 0xC0261009
+	ERROR_MONITOR_INVALID_MANUFACTURE_DATE                                    Handle        = 0xC026100A
+	ERROR_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER                                   Handle        = 0xC0262000
+	ERROR_GRAPHICS_INSUFFICIENT_DMA_BUFFER                                    Handle        = 0xC0262001
+	ERROR_GRAPHICS_INVALID_DISPLAY_ADAPTER                                    Handle        = 0xC0262002
+	ERROR_GRAPHICS_ADAPTER_WAS_RESET                                          Handle        = 0xC0262003
+	ERROR_GRAPHICS_INVALID_DRIVER_MODEL                                       Handle        = 0xC0262004
+	ERROR_GRAPHICS_PRESENT_MODE_CHANGED                                       Handle        = 0xC0262005
+	ERROR_GRAPHICS_PRESENT_OCCLUDED                                           Handle        = 0xC0262006
+	ERROR_GRAPHICS_PRESENT_DENIED                                             Handle        = 0xC0262007
+	ERROR_GRAPHICS_CANNOTCOLORCONVERT                                         Handle        = 0xC0262008
+	ERROR_GRAPHICS_DRIVER_MISMATCH                                            Handle        = 0xC0262009
+	ERROR_GRAPHICS_PARTIAL_DATA_POPULATED                                     Handle        = 0x4026200A
+	ERROR_GRAPHICS_PRESENT_REDIRECTION_DISABLED                               Handle        = 0xC026200B
+	ERROR_GRAPHICS_PRESENT_UNOCCLUDED                                         Handle        = 0xC026200C
+	ERROR_GRAPHICS_WINDOWDC_NOT_AVAILABLE                                     Handle        = 0xC026200D
+	ERROR_GRAPHICS_WINDOWLESS_PRESENT_DISABLED                                Handle        = 0xC026200E
+	ERROR_GRAPHICS_PRESENT_INVALID_WINDOW                                     Handle        = 0xC026200F
+	ERROR_GRAPHICS_PRESENT_BUFFER_NOT_BOUND                                   Handle        = 0xC0262010
+	ERROR_GRAPHICS_VAIL_STATE_CHANGED                                         Handle        = 0xC0262011
+	ERROR_GRAPHICS_INDIRECT_DISPLAY_ABANDON_SWAPCHAIN                         Handle        = 0xC0262012
+	ERROR_GRAPHICS_INDIRECT_DISPLAY_DEVICE_STOPPED                            Handle        = 0xC0262013
+	ERROR_GRAPHICS_NO_VIDEO_MEMORY                                            Handle        = 0xC0262100
+	ERROR_GRAPHICS_CANT_LOCK_MEMORY                                           Handle        = 0xC0262101
+	ERROR_GRAPHICS_ALLOCATION_BUSY                                            Handle        = 0xC0262102
+	ERROR_GRAPHICS_TOO_MANY_REFERENCES                                        Handle        = 0xC0262103
+	ERROR_GRAPHICS_TRY_AGAIN_LATER                                            Handle        = 0xC0262104
+	ERROR_GRAPHICS_TRY_AGAIN_NOW                                              Handle        = 0xC0262105
+	ERROR_GRAPHICS_ALLOCATION_INVALID                                         Handle        = 0xC0262106
+	ERROR_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE                           Handle        = 0xC0262107
+	ERROR_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED                           Handle        = 0xC0262108
+	ERROR_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION                               Handle        = 0xC0262109
+	ERROR_GRAPHICS_INVALID_ALLOCATION_USAGE                                   Handle        = 0xC0262110
+	ERROR_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION                              Handle        = 0xC0262111
+	ERROR_GRAPHICS_ALLOCATION_CLOSED                                          Handle        = 0xC0262112
+	ERROR_GRAPHICS_INVALID_ALLOCATION_INSTANCE                                Handle        = 0xC0262113
+	ERROR_GRAPHICS_INVALID_ALLOCATION_HANDLE                                  Handle        = 0xC0262114
+	ERROR_GRAPHICS_WRONG_ALLOCATION_DEVICE                                    Handle        = 0xC0262115
+	ERROR_GRAPHICS_ALLOCATION_CONTENT_LOST                                    Handle        = 0xC0262116
+	ERROR_GRAPHICS_GPU_EXCEPTION_ON_DEVICE                                    Handle        = 0xC0262200
+	ERROR_GRAPHICS_SKIP_ALLOCATION_PREPARATION                                Handle        = 0x40262201
+	ERROR_GRAPHICS_INVALID_VIDPN_TOPOLOGY                                     Handle        = 0xC0262300
+	ERROR_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED                               Handle        = 0xC0262301
+	ERROR_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED                     Handle        = 0xC0262302
+	ERROR_GRAPHICS_INVALID_VIDPN                                              Handle        = 0xC0262303
+	ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE                               Handle        = 0xC0262304
+	ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET                               Handle        = 0xC0262305
+	ERROR_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED                               Handle        = 0xC0262306
+	ERROR_GRAPHICS_MODE_NOT_PINNED                                            Handle        = 0x00262307
+	ERROR_GRAPHICS_INVALID_VIDPN_SOURCEMODESET                                Handle        = 0xC0262308
+	ERROR_GRAPHICS_INVALID_VIDPN_TARGETMODESET                                Handle        = 0xC0262309
+	ERROR_GRAPHICS_INVALID_FREQUENCY                                          Handle        = 0xC026230A
+	ERROR_GRAPHICS_INVALID_ACTIVE_REGION                                      Handle        = 0xC026230B
+	ERROR_GRAPHICS_INVALID_TOTAL_REGION                                       Handle        = 0xC026230C
+	ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE                          Handle        = 0xC0262310
+	ERROR_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE                          Handle        = 0xC0262311
+	ERROR_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET                             Handle        = 0xC0262312
+	ERROR_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY                                   Handle        = 0xC0262313
+	ERROR_GRAPHICS_MODE_ALREADY_IN_MODESET                                    Handle        = 0xC0262314
+	ERROR_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET                              Handle        = 0xC0262315
+	ERROR_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET                              Handle        = 0xC0262316
+	ERROR_GRAPHICS_SOURCE_ALREADY_IN_SET                                      Handle        = 0xC0262317
+	ERROR_GRAPHICS_TARGET_ALREADY_IN_SET                                      Handle        = 0xC0262318
+	ERROR_GRAPHICS_INVALID_VIDPN_PRESENT_PATH                                 Handle        = 0xC0262319
+	ERROR_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY                              Handle        = 0xC026231A
+	ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET                          Handle        = 0xC026231B
+	ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE                             Handle        = 0xC026231C
+	ERROR_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET                                  Handle        = 0xC026231D
+	ERROR_GRAPHICS_NO_PREFERRED_MODE                                          Handle        = 0x0026231E
+	ERROR_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET                              Handle        = 0xC026231F
+	ERROR_GRAPHICS_STALE_MODESET                                              Handle        = 0xC0262320
+	ERROR_GRAPHICS_INVALID_MONITOR_SOURCEMODESET                              Handle        = 0xC0262321
+	ERROR_GRAPHICS_INVALID_MONITOR_SOURCE_MODE                                Handle        = 0xC0262322
+	ERROR_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN                            Handle        = 0xC0262323
+	ERROR_GRAPHICS_MODE_ID_MUST_BE_UNIQUE                                     Handle        = 0xC0262324
+	ERROR_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION            Handle        = 0xC0262325
+	ERROR_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES                    Handle        = 0xC0262326
+	ERROR_GRAPHICS_PATH_NOT_IN_TOPOLOGY                                       Handle        = 0xC0262327
+	ERROR_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE                      Handle        = 0xC0262328
+	ERROR_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET                      Handle        = 0xC0262329
+	ERROR_GRAPHICS_INVALID_MONITORDESCRIPTORSET                               Handle        = 0xC026232A
+	ERROR_GRAPHICS_INVALID_MONITORDESCRIPTOR                                  Handle        = 0xC026232B
+	ERROR_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET                               Handle        = 0xC026232C
+	ERROR_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET                           Handle        = 0xC026232D
+	ERROR_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE                        Handle        = 0xC026232E
+	ERROR_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE                           Handle        = 0xC026232F
+	ERROR_GRAPHICS_RESOURCES_NOT_RELATED                                      Handle        = 0xC0262330
+	ERROR_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE                                   Handle        = 0xC0262331
+	ERROR_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE                                   Handle        = 0xC0262332
+	ERROR_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET                                  Handle        = 0xC0262333
+	ERROR_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER               Handle        = 0xC0262334
+	ERROR_GRAPHICS_NO_VIDPNMGR                                                Handle        = 0xC0262335
+	ERROR_GRAPHICS_NO_ACTIVE_VIDPN                                            Handle        = 0xC0262336
+	ERROR_GRAPHICS_STALE_VIDPN_TOPOLOGY                                       Handle        = 0xC0262337
+	ERROR_GRAPHICS_MONITOR_NOT_CONNECTED                                      Handle        = 0xC0262338
+	ERROR_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY                                     Handle        = 0xC0262339
+	ERROR_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE                                Handle        = 0xC026233A
+	ERROR_GRAPHICS_INVALID_VISIBLEREGION_SIZE                                 Handle        = 0xC026233B
+	ERROR_GRAPHICS_INVALID_STRIDE                                             Handle        = 0xC026233C
+	ERROR_GRAPHICS_INVALID_PIXELFORMAT                                        Handle        = 0xC026233D
+	ERROR_GRAPHICS_INVALID_COLORBASIS                                         Handle        = 0xC026233E
+	ERROR_GRAPHICS_INVALID_PIXELVALUEACCESSMODE                               Handle        = 0xC026233F
+	ERROR_GRAPHICS_TARGET_NOT_IN_TOPOLOGY                                     Handle        = 0xC0262340
+	ERROR_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT                         Handle        = 0xC0262341
+	ERROR_GRAPHICS_VIDPN_SOURCE_IN_USE                                        Handle        = 0xC0262342
+	ERROR_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN                                   Handle        = 0xC0262343
+	ERROR_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL                            Handle        = 0xC0262344
+	ERROR_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION               Handle        = 0xC0262345
+	ERROR_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED         Handle        = 0xC0262346
+	ERROR_GRAPHICS_INVALID_GAMMA_RAMP                                         Handle        = 0xC0262347
+	ERROR_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED                                   Handle        = 0xC0262348
+	ERROR_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED                                Handle        = 0xC0262349
+	ERROR_GRAPHICS_MODE_NOT_IN_MODESET                                        Handle        = 0xC026234A
+	ERROR_GRAPHICS_DATASET_IS_EMPTY                                           Handle        = 0x0026234B
+	ERROR_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET                                Handle        = 0x0026234C
+	ERROR_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON               Handle        = 0xC026234D
+	ERROR_GRAPHICS_INVALID_PATH_CONTENT_TYPE                                  Handle        = 0xC026234E
+	ERROR_GRAPHICS_INVALID_COPYPROTECTION_TYPE                                Handle        = 0xC026234F
+	ERROR_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS                          Handle        = 0xC0262350
+	ERROR_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED            Handle        = 0x00262351
+	ERROR_GRAPHICS_INVALID_SCANLINE_ORDERING                                  Handle        = 0xC0262352
+	ERROR_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED                               Handle        = 0xC0262353
+	ERROR_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS                           Handle        = 0xC0262354
+	ERROR_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT                                Handle        = 0xC0262355
+	ERROR_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM                             Handle        = 0xC0262356
+	ERROR_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN                          Handle        = 0xC0262357
+	ERROR_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT                  Handle        = 0xC0262358
+	ERROR_GRAPHICS_MAX_NUM_PATHS_REACHED                                      Handle        = 0xC0262359
+	ERROR_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION                         Handle        = 0xC026235A
+	ERROR_GRAPHICS_INVALID_CLIENT_TYPE                                        Handle        = 0xC026235B
+	ERROR_GRAPHICS_CLIENTVIDPN_NOT_SET                                        Handle        = 0xC026235C
+	ERROR_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED                          Handle        = 0xC0262400
+	ERROR_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED                             Handle        = 0xC0262401
+	ERROR_GRAPHICS_UNKNOWN_CHILD_STATUS                                       Handle        = 0x4026242F
+	ERROR_GRAPHICS_NOT_A_LINKED_ADAPTER                                       Handle        = 0xC0262430
+	ERROR_GRAPHICS_LEADLINK_NOT_ENUMERATED                                    Handle        = 0xC0262431
+	ERROR_GRAPHICS_CHAINLINKS_NOT_ENUMERATED                                  Handle        = 0xC0262432
+	ERROR_GRAPHICS_ADAPTER_CHAIN_NOT_READY                                    Handle        = 0xC0262433
+	ERROR_GRAPHICS_CHAINLINKS_NOT_STARTED                                     Handle        = 0xC0262434
+	ERROR_GRAPHICS_CHAINLINKS_NOT_POWERED_ON                                  Handle        = 0xC0262435
+	ERROR_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE                             Handle        = 0xC0262436
+	ERROR_GRAPHICS_LEADLINK_START_DEFERRED                                    Handle        = 0x40262437
+	ERROR_GRAPHICS_NOT_POST_DEVICE_DRIVER                                     Handle        = 0xC0262438
+	ERROR_GRAPHICS_POLLING_TOO_FREQUENTLY                                     Handle        = 0x40262439
+	ERROR_GRAPHICS_START_DEFERRED                                             Handle        = 0x4026243A
+	ERROR_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED                                Handle        = 0xC026243B
+	ERROR_GRAPHICS_DEPENDABLE_CHILD_STATUS                                    Handle        = 0x4026243C
+	ERROR_GRAPHICS_OPM_NOT_SUPPORTED                                          Handle        = 0xC0262500
+	ERROR_GRAPHICS_COPP_NOT_SUPPORTED                                         Handle        = 0xC0262501
+	ERROR_GRAPHICS_UAB_NOT_SUPPORTED                                          Handle        = 0xC0262502
+	ERROR_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS                           Handle        = 0xC0262503
+	ERROR_GRAPHICS_OPM_NO_VIDEO_OUTPUTS_EXIST                                 Handle        = 0xC0262505
+	ERROR_GRAPHICS_OPM_INTERNAL_ERROR                                         Handle        = 0xC026250B
+	ERROR_GRAPHICS_OPM_INVALID_HANDLE                                         Handle        = 0xC026250C
+	ERROR_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH                             Handle        = 0xC026250E
+	ERROR_GRAPHICS_OPM_SPANNING_MODE_ENABLED                                  Handle        = 0xC026250F
+	ERROR_GRAPHICS_OPM_THEATER_MODE_ENABLED                                   Handle        = 0xC0262510
+	ERROR_GRAPHICS_PVP_HFS_FAILED                                             Handle        = 0xC0262511
+	ERROR_GRAPHICS_OPM_INVALID_SRM                                            Handle        = 0xC0262512
+	ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP                           Handle        = 0xC0262513
+	ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP                            Handle        = 0xC0262514
+	ERROR_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA                          Handle        = 0xC0262515
+	ERROR_GRAPHICS_OPM_HDCP_SRM_NEVER_SET                                     Handle        = 0xC0262516
+	ERROR_GRAPHICS_OPM_RESOLUTION_TOO_HIGH                                    Handle        = 0xC0262517
+	ERROR_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE                       Handle        = 0xC0262518
+	ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_NO_LONGER_EXISTS                          Handle        = 0xC026251A
+	ERROR_GRAPHICS_OPM_SESSION_TYPE_CHANGE_IN_PROGRESS                        Handle        = 0xC026251B
+	ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS              Handle        = 0xC026251C
+	ERROR_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST                            Handle        = 0xC026251D
+	ERROR_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR                                  Handle        = 0xC026251E
+	ERROR_GRAPHICS_OPM_VIDEO_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS               Handle        = 0xC026251F
+	ERROR_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED                                Handle        = 0xC0262520
+	ERROR_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST                          Handle        = 0xC0262521
+	ERROR_GRAPHICS_I2C_NOT_SUPPORTED                                          Handle        = 0xC0262580
+	ERROR_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST                                  Handle        = 0xC0262581
+	ERROR_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA                                Handle        = 0xC0262582
+	ERROR_GRAPHICS_I2C_ERROR_RECEIVING_DATA                                   Handle        = 0xC0262583
+	ERROR_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED                                    Handle        = 0xC0262584
+	ERROR_GRAPHICS_DDCCI_INVALID_DATA                                         Handle        = 0xC0262585
+	ERROR_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE          Handle        = 0xC0262586
+	ERROR_GRAPHICS_MCA_INVALID_CAPABILITIES_STRING                            Handle        = 0xC0262587
+	ERROR_GRAPHICS_MCA_INTERNAL_ERROR                                         Handle        = 0xC0262588
+	ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND                              Handle        = 0xC0262589
+	ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH                               Handle        = 0xC026258A
+	ERROR_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM                             Handle        = 0xC026258B
+	ERROR_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE                            Handle        = 0xC026258C
+	ERROR_GRAPHICS_MONITOR_NO_LONGER_EXISTS                                   Handle        = 0xC026258D
+	ERROR_GRAPHICS_DDCCI_CURRENT_CURRENT_VALUE_GREATER_THAN_MAXIMUM_VALUE     Handle        = 0xC02625D8
+	ERROR_GRAPHICS_MCA_INVALID_VCP_VERSION                                    Handle        = 0xC02625D9
+	ERROR_GRAPHICS_MCA_MONITOR_VIOLATES_MCCS_SPECIFICATION                    Handle        = 0xC02625DA
+	ERROR_GRAPHICS_MCA_MCCS_VERSION_MISMATCH                                  Handle        = 0xC02625DB
+	ERROR_GRAPHICS_MCA_UNSUPPORTED_MCCS_VERSION                               Handle        = 0xC02625DC
+	ERROR_GRAPHICS_MCA_INVALID_TECHNOLOGY_TYPE_RETURNED                       Handle        = 0xC02625DE
+	ERROR_GRAPHICS_MCA_UNSUPPORTED_COLOR_TEMPERATURE                          Handle        = 0xC02625DF
+	ERROR_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED                             Handle        = 0xC02625E0
+	ERROR_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME                      Handle        = 0xC02625E1
+	ERROR_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP                     Handle        = 0xC02625E2
+	ERROR_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED                            Handle        = 0xC02625E3
+	ERROR_GRAPHICS_INVALID_POINTER                                            Handle        = 0xC02625E4
+	ERROR_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE                   Handle        = 0xC02625E5
+	ERROR_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL                                  Handle        = 0xC02625E6
+	ERROR_GRAPHICS_INTERNAL_ERROR                                             Handle        = 0xC02625E7
+	ERROR_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS                            Handle        = 0xC02605E8
+	NAP_E_INVALID_PACKET                                                      Handle        = 0x80270001
+	NAP_E_MISSING_SOH                                                         Handle        = 0x80270002
+	NAP_E_CONFLICTING_ID                                                      Handle        = 0x80270003
+	NAP_E_NO_CACHED_SOH                                                       Handle        = 0x80270004
+	NAP_E_STILL_BOUND                                                         Handle        = 0x80270005
+	NAP_E_NOT_REGISTERED                                                      Handle        = 0x80270006
+	NAP_E_NOT_INITIALIZED                                                     Handle        = 0x80270007
+	NAP_E_MISMATCHED_ID                                                       Handle        = 0x80270008
+	NAP_E_NOT_PENDING                                                         Handle        = 0x80270009
+	NAP_E_ID_NOT_FOUND                                                        Handle        = 0x8027000A
+	NAP_E_MAXSIZE_TOO_SMALL                                                   Handle        = 0x8027000B
+	NAP_E_SERVICE_NOT_RUNNING                                                 Handle        = 0x8027000C
+	NAP_S_CERT_ALREADY_PRESENT                                                Handle        = 0x0027000D
+	NAP_E_ENTITY_DISABLED                                                     Handle        = 0x8027000E
+	NAP_E_NETSH_GROUPPOLICY_ERROR                                             Handle        = 0x8027000F
+	NAP_E_TOO_MANY_CALLS                                                      Handle        = 0x80270010
+	NAP_E_SHV_CONFIG_EXISTED                                                  Handle        = 0x80270011
+	NAP_E_SHV_CONFIG_NOT_FOUND                                                Handle        = 0x80270012
+	NAP_E_SHV_TIMEOUT                                                         Handle        = 0x80270013
+	TPM_E_ERROR_MASK                                                          Handle        = 0x80280000
+	TPM_E_AUTHFAIL                                                            Handle        = 0x80280001
+	TPM_E_BADINDEX                                                            Handle        = 0x80280002
+	TPM_E_BAD_PARAMETER                                                       Handle        = 0x80280003
+	TPM_E_AUDITFAILURE                                                        Handle        = 0x80280004
+	TPM_E_CLEAR_DISABLED                                                      Handle        = 0x80280005
+	TPM_E_DEACTIVATED                                                         Handle        = 0x80280006
+	TPM_E_DISABLED                                                            Handle        = 0x80280007
+	TPM_E_DISABLED_CMD                                                        Handle        = 0x80280008
+	TPM_E_FAIL                                                                Handle        = 0x80280009
+	TPM_E_BAD_ORDINAL                                                         Handle        = 0x8028000A
+	TPM_E_INSTALL_DISABLED                                                    Handle        = 0x8028000B
+	TPM_E_INVALID_KEYHANDLE                                                   Handle        = 0x8028000C
+	TPM_E_KEYNOTFOUND                                                         Handle        = 0x8028000D
+	TPM_E_INAPPROPRIATE_ENC                                                   Handle        = 0x8028000E
+	TPM_E_MIGRATEFAIL                                                         Handle        = 0x8028000F
+	TPM_E_INVALID_PCR_INFO                                                    Handle        = 0x80280010
+	TPM_E_NOSPACE                                                             Handle        = 0x80280011
+	TPM_E_NOSRK                                                               Handle        = 0x80280012
+	TPM_E_NOTSEALED_BLOB                                                      Handle        = 0x80280013
+	TPM_E_OWNER_SET                                                           Handle        = 0x80280014
+	TPM_E_RESOURCES                                                           Handle        = 0x80280015
+	TPM_E_SHORTRANDOM                                                         Handle        = 0x80280016
+	TPM_E_SIZE                                                                Handle        = 0x80280017
+	TPM_E_WRONGPCRVAL                                                         Handle        = 0x80280018
+	TPM_E_BAD_PARAM_SIZE                                                      Handle        = 0x80280019
+	TPM_E_SHA_THREAD                                                          Handle        = 0x8028001A
+	TPM_E_SHA_ERROR                                                           Handle        = 0x8028001B
+	TPM_E_FAILEDSELFTEST                                                      Handle        = 0x8028001C
+	TPM_E_AUTH2FAIL                                                           Handle        = 0x8028001D
+	TPM_E_BADTAG                                                              Handle        = 0x8028001E
+	TPM_E_IOERROR                                                             Handle        = 0x8028001F
+	TPM_E_ENCRYPT_ERROR                                                       Handle        = 0x80280020
+	TPM_E_DECRYPT_ERROR                                                       Handle        = 0x80280021
+	TPM_E_INVALID_AUTHHANDLE                                                  Handle        = 0x80280022
+	TPM_E_NO_ENDORSEMENT                                                      Handle        = 0x80280023
+	TPM_E_INVALID_KEYUSAGE                                                    Handle        = 0x80280024
+	TPM_E_WRONG_ENTITYTYPE                                                    Handle        = 0x80280025
+	TPM_E_INVALID_POSTINIT                                                    Handle        = 0x80280026
+	TPM_E_INAPPROPRIATE_SIG                                                   Handle        = 0x80280027
+	TPM_E_BAD_KEY_PROPERTY                                                    Handle        = 0x80280028
+	TPM_E_BAD_MIGRATION                                                       Handle        = 0x80280029
+	TPM_E_BAD_SCHEME                                                          Handle        = 0x8028002A
+	TPM_E_BAD_DATASIZE                                                        Handle        = 0x8028002B
+	TPM_E_BAD_MODE                                                            Handle        = 0x8028002C
+	TPM_E_BAD_PRESENCE                                                        Handle        = 0x8028002D
+	TPM_E_BAD_VERSION                                                         Handle        = 0x8028002E
+	TPM_E_NO_WRAP_TRANSPORT                                                   Handle        = 0x8028002F
+	TPM_E_AUDITFAIL_UNSUCCESSFUL                                              Handle        = 0x80280030
+	TPM_E_AUDITFAIL_SUCCESSFUL                                                Handle        = 0x80280031
+	TPM_E_NOTRESETABLE                                                        Handle        = 0x80280032
+	TPM_E_NOTLOCAL                                                            Handle        = 0x80280033
+	TPM_E_BAD_TYPE                                                            Handle        = 0x80280034
+	TPM_E_INVALID_RESOURCE                                                    Handle        = 0x80280035
+	TPM_E_NOTFIPS                                                             Handle        = 0x80280036
+	TPM_E_INVALID_FAMILY                                                      Handle        = 0x80280037
+	TPM_E_NO_NV_PERMISSION                                                    Handle        = 0x80280038
+	TPM_E_REQUIRES_SIGN                                                       Handle        = 0x80280039
+	TPM_E_KEY_NOTSUPPORTED                                                    Handle        = 0x8028003A
+	TPM_E_AUTH_CONFLICT                                                       Handle        = 0x8028003B
+	TPM_E_AREA_LOCKED                                                         Handle        = 0x8028003C
+	TPM_E_BAD_LOCALITY                                                        Handle        = 0x8028003D
+	TPM_E_READ_ONLY                                                           Handle        = 0x8028003E
+	TPM_E_PER_NOWRITE                                                         Handle        = 0x8028003F
+	TPM_E_FAMILYCOUNT                                                         Handle        = 0x80280040
+	TPM_E_WRITE_LOCKED                                                        Handle        = 0x80280041
+	TPM_E_BAD_ATTRIBUTES                                                      Handle        = 0x80280042
+	TPM_E_INVALID_STRUCTURE                                                   Handle        = 0x80280043
+	TPM_E_KEY_OWNER_CONTROL                                                   Handle        = 0x80280044
+	TPM_E_BAD_COUNTER                                                         Handle        = 0x80280045
+	TPM_E_NOT_FULLWRITE                                                       Handle        = 0x80280046
+	TPM_E_CONTEXT_GAP                                                         Handle        = 0x80280047
+	TPM_E_MAXNVWRITES                                                         Handle        = 0x80280048
+	TPM_E_NOOPERATOR                                                          Handle        = 0x80280049
+	TPM_E_RESOURCEMISSING                                                     Handle        = 0x8028004A
+	TPM_E_DELEGATE_LOCK                                                       Handle        = 0x8028004B
+	TPM_E_DELEGATE_FAMILY                                                     Handle        = 0x8028004C
+	TPM_E_DELEGATE_ADMIN                                                      Handle        = 0x8028004D
+	TPM_E_TRANSPORT_NOTEXCLUSIVE                                              Handle        = 0x8028004E
+	TPM_E_OWNER_CONTROL                                                       Handle        = 0x8028004F
+	TPM_E_DAA_RESOURCES                                                       Handle        = 0x80280050
+	TPM_E_DAA_INPUT_DATA0                                                     Handle        = 0x80280051
+	TPM_E_DAA_INPUT_DATA1                                                     Handle        = 0x80280052
+	TPM_E_DAA_ISSUER_SETTINGS                                                 Handle        = 0x80280053
+	TPM_E_DAA_TPM_SETTINGS                                                    Handle        = 0x80280054
+	TPM_E_DAA_STAGE                                                           Handle        = 0x80280055
+	TPM_E_DAA_ISSUER_VALIDITY                                                 Handle        = 0x80280056
+	TPM_E_DAA_WRONG_W                                                         Handle        = 0x80280057
+	TPM_E_BAD_HANDLE                                                          Handle        = 0x80280058
+	TPM_E_BAD_DELEGATE                                                        Handle        = 0x80280059
+	TPM_E_BADCONTEXT                                                          Handle        = 0x8028005A
+	TPM_E_TOOMANYCONTEXTS                                                     Handle        = 0x8028005B
+	TPM_E_MA_TICKET_SIGNATURE                                                 Handle        = 0x8028005C
+	TPM_E_MA_DESTINATION                                                      Handle        = 0x8028005D
+	TPM_E_MA_SOURCE                                                           Handle        = 0x8028005E
+	TPM_E_MA_AUTHORITY                                                        Handle        = 0x8028005F
+	TPM_E_PERMANENTEK                                                         Handle        = 0x80280061
+	TPM_E_BAD_SIGNATURE                                                       Handle        = 0x80280062
+	TPM_E_NOCONTEXTSPACE                                                      Handle        = 0x80280063
+	TPM_20_E_ASYMMETRIC                                                       Handle        = 0x80280081
+	TPM_20_E_ATTRIBUTES                                                       Handle        = 0x80280082
+	TPM_20_E_HASH                                                             Handle        = 0x80280083
+	TPM_20_E_VALUE                                                            Handle        = 0x80280084
+	TPM_20_E_HIERARCHY                                                        Handle        = 0x80280085
+	TPM_20_E_KEY_SIZE                                                         Handle        = 0x80280087
+	TPM_20_E_MGF                                                              Handle        = 0x80280088
+	TPM_20_E_MODE                                                             Handle        = 0x80280089
+	TPM_20_E_TYPE                                                             Handle        = 0x8028008A
+	TPM_20_E_HANDLE                                                           Handle        = 0x8028008B
+	TPM_20_E_KDF                                                              Handle        = 0x8028008C
+	TPM_20_E_RANGE                                                            Handle        = 0x8028008D
+	TPM_20_E_AUTH_FAIL                                                        Handle        = 0x8028008E
+	TPM_20_E_NONCE                                                            Handle        = 0x8028008F
+	TPM_20_E_PP                                                               Handle        = 0x80280090
+	TPM_20_E_SCHEME                                                           Handle        = 0x80280092
+	TPM_20_E_SIZE                                                             Handle        = 0x80280095
+	TPM_20_E_SYMMETRIC                                                        Handle        = 0x80280096
+	TPM_20_E_TAG                                                              Handle        = 0x80280097
+	TPM_20_E_SELECTOR                                                         Handle        = 0x80280098
+	TPM_20_E_INSUFFICIENT                                                     Handle        = 0x8028009A
+	TPM_20_E_SIGNATURE                                                        Handle        = 0x8028009B
+	TPM_20_E_KEY                                                              Handle        = 0x8028009C
+	TPM_20_E_POLICY_FAIL                                                      Handle        = 0x8028009D
+	TPM_20_E_INTEGRITY                                                        Handle        = 0x8028009F
+	TPM_20_E_TICKET                                                           Handle        = 0x802800A0
+	TPM_20_E_RESERVED_BITS                                                    Handle        = 0x802800A1
+	TPM_20_E_BAD_AUTH                                                         Handle        = 0x802800A2
+	TPM_20_E_EXPIRED                                                          Handle        = 0x802800A3
+	TPM_20_E_POLICY_CC                                                        Handle        = 0x802800A4
+	TPM_20_E_BINDING                                                          Handle        = 0x802800A5
+	TPM_20_E_CURVE                                                            Handle        = 0x802800A6
+	TPM_20_E_ECC_POINT                                                        Handle        = 0x802800A7
+	TPM_20_E_INITIALIZE                                                       Handle        = 0x80280100
+	TPM_20_E_FAILURE                                                          Handle        = 0x80280101
+	TPM_20_E_SEQUENCE                                                         Handle        = 0x80280103
+	TPM_20_E_PRIVATE                                                          Handle        = 0x8028010B
+	TPM_20_E_HMAC                                                             Handle        = 0x80280119
+	TPM_20_E_DISABLED                                                         Handle        = 0x80280120
+	TPM_20_E_EXCLUSIVE                                                        Handle        = 0x80280121
+	TPM_20_E_ECC_CURVE                                                        Handle        = 0x80280123
+	TPM_20_E_AUTH_TYPE                                                        Handle        = 0x80280124
+	TPM_20_E_AUTH_MISSING                                                     Handle        = 0x80280125
+	TPM_20_E_POLICY                                                           Handle        = 0x80280126
+	TPM_20_E_PCR                                                              Handle        = 0x80280127
+	TPM_20_E_PCR_CHANGED                                                      Handle        = 0x80280128
+	TPM_20_E_UPGRADE                                                          Handle        = 0x8028012D
+	TPM_20_E_TOO_MANY_CONTEXTS                                                Handle        = 0x8028012E
+	TPM_20_E_AUTH_UNAVAILABLE                                                 Handle        = 0x8028012F
+	TPM_20_E_REBOOT                                                           Handle        = 0x80280130
+	TPM_20_E_UNBALANCED                                                       Handle        = 0x80280131
+	TPM_20_E_COMMAND_SIZE                                                     Handle        = 0x80280142
+	TPM_20_E_COMMAND_CODE                                                     Handle        = 0x80280143
+	TPM_20_E_AUTHSIZE                                                         Handle        = 0x80280144
+	TPM_20_E_AUTH_CONTEXT                                                     Handle        = 0x80280145
+	TPM_20_E_NV_RANGE                                                         Handle        = 0x80280146
+	TPM_20_E_NV_SIZE                                                          Handle        = 0x80280147
+	TPM_20_E_NV_LOCKED                                                        Handle        = 0x80280148
+	TPM_20_E_NV_AUTHORIZATION                                                 Handle        = 0x80280149
+	TPM_20_E_NV_UNINITIALIZED                                                 Handle        = 0x8028014A
+	TPM_20_E_NV_SPACE                                                         Handle        = 0x8028014B
+	TPM_20_E_NV_DEFINED                                                       Handle        = 0x8028014C
+	TPM_20_E_BAD_CONTEXT                                                      Handle        = 0x80280150
+	TPM_20_E_CPHASH                                                           Handle        = 0x80280151
+	TPM_20_E_PARENT                                                           Handle        = 0x80280152
+	TPM_20_E_NEEDS_TEST                                                       Handle        = 0x80280153
+	TPM_20_E_NO_RESULT                                                        Handle        = 0x80280154
+	TPM_20_E_SENSITIVE                                                        Handle        = 0x80280155
+	TPM_E_COMMAND_BLOCKED                                                     Handle        = 0x80280400
+	TPM_E_INVALID_HANDLE                                                      Handle        = 0x80280401
+	TPM_E_DUPLICATE_VHANDLE                                                   Handle        = 0x80280402
+	TPM_E_EMBEDDED_COMMAND_BLOCKED                                            Handle        = 0x80280403
+	TPM_E_EMBEDDED_COMMAND_UNSUPPORTED                                        Handle        = 0x80280404
+	TPM_E_RETRY                                                               Handle        = 0x80280800
+	TPM_E_NEEDS_SELFTEST                                                      Handle        = 0x80280801
+	TPM_E_DOING_SELFTEST                                                      Handle        = 0x80280802
+	TPM_E_DEFEND_LOCK_RUNNING                                                 Handle        = 0x80280803
+	TPM_20_E_CONTEXT_GAP                                                      Handle        = 0x80280901
+	TPM_20_E_OBJECT_MEMORY                                                    Handle        = 0x80280902
+	TPM_20_E_SESSION_MEMORY                                                   Handle        = 0x80280903
+	TPM_20_E_MEMORY                                                           Handle        = 0x80280904
+	TPM_20_E_SESSION_HANDLES                                                  Handle        = 0x80280905
+	TPM_20_E_OBJECT_HANDLES                                                   Handle        = 0x80280906
+	TPM_20_E_LOCALITY                                                         Handle        = 0x80280907
+	TPM_20_E_YIELDED                                                          Handle        = 0x80280908
+	TPM_20_E_CANCELED                                                         Handle        = 0x80280909
+	TPM_20_E_TESTING                                                          Handle        = 0x8028090A
+	TPM_20_E_NV_RATE                                                          Handle        = 0x80280920
+	TPM_20_E_LOCKOUT                                                          Handle        = 0x80280921
+	TPM_20_E_RETRY                                                            Handle        = 0x80280922
+	TPM_20_E_NV_UNAVAILABLE                                                   Handle        = 0x80280923
+	TBS_E_INTERNAL_ERROR                                                      Handle        = 0x80284001
+	TBS_E_BAD_PARAMETER                                                       Handle        = 0x80284002
+	TBS_E_INVALID_OUTPUT_POINTER                                              Handle        = 0x80284003
+	TBS_E_INVALID_CONTEXT                                                     Handle        = 0x80284004
+	TBS_E_INSUFFICIENT_BUFFER                                                 Handle        = 0x80284005
+	TBS_E_IOERROR                                                             Handle        = 0x80284006
+	TBS_E_INVALID_CONTEXT_PARAM                                               Handle        = 0x80284007
+	TBS_E_SERVICE_NOT_RUNNING                                                 Handle        = 0x80284008
+	TBS_E_TOO_MANY_TBS_CONTEXTS                                               Handle        = 0x80284009
+	TBS_E_TOO_MANY_RESOURCES                                                  Handle        = 0x8028400A
+	TBS_E_SERVICE_START_PENDING                                               Handle        = 0x8028400B
+	TBS_E_PPI_NOT_SUPPORTED                                                   Handle        = 0x8028400C
+	TBS_E_COMMAND_CANCELED                                                    Handle        = 0x8028400D
+	TBS_E_BUFFER_TOO_LARGE                                                    Handle        = 0x8028400E
+	TBS_E_TPM_NOT_FOUND                                                       Handle        = 0x8028400F
+	TBS_E_SERVICE_DISABLED                                                    Handle        = 0x80284010
+	TBS_E_NO_EVENT_LOG                                                        Handle        = 0x80284011
+	TBS_E_ACCESS_DENIED                                                       Handle        = 0x80284012
+	TBS_E_PROVISIONING_NOT_ALLOWED                                            Handle        = 0x80284013
+	TBS_E_PPI_FUNCTION_UNSUPPORTED                                            Handle        = 0x80284014
+	TBS_E_OWNERAUTH_NOT_FOUND                                                 Handle        = 0x80284015
+	TBS_E_PROVISIONING_INCOMPLETE                                             Handle        = 0x80284016
+	TPMAPI_E_INVALID_STATE                                                    Handle        = 0x80290100
+	TPMAPI_E_NOT_ENOUGH_DATA                                                  Handle        = 0x80290101
+	TPMAPI_E_TOO_MUCH_DATA                                                    Handle        = 0x80290102
+	TPMAPI_E_INVALID_OUTPUT_POINTER                                           Handle        = 0x80290103
+	TPMAPI_E_INVALID_PARAMETER                                                Handle        = 0x80290104
+	TPMAPI_E_OUT_OF_MEMORY                                                    Handle        = 0x80290105
+	TPMAPI_E_BUFFER_TOO_SMALL                                                 Handle        = 0x80290106
+	TPMAPI_E_INTERNAL_ERROR                                                   Handle        = 0x80290107
+	TPMAPI_E_ACCESS_DENIED                                                    Handle        = 0x80290108
+	TPMAPI_E_AUTHORIZATION_FAILED                                             Handle        = 0x80290109
+	TPMAPI_E_INVALID_CONTEXT_HANDLE                                           Handle        = 0x8029010A
+	TPMAPI_E_TBS_COMMUNICATION_ERROR                                          Handle        = 0x8029010B
+	TPMAPI_E_TPM_COMMAND_ERROR                                                Handle        = 0x8029010C
+	TPMAPI_E_MESSAGE_TOO_LARGE                                                Handle        = 0x8029010D
+	TPMAPI_E_INVALID_ENCODING                                                 Handle        = 0x8029010E
+	TPMAPI_E_INVALID_KEY_SIZE                                                 Handle        = 0x8029010F
+	TPMAPI_E_ENCRYPTION_FAILED                                                Handle        = 0x80290110
+	TPMAPI_E_INVALID_KEY_PARAMS                                               Handle        = 0x80290111
+	TPMAPI_E_INVALID_MIGRATION_AUTHORIZATION_BLOB                             Handle        = 0x80290112
+	TPMAPI_E_INVALID_PCR_INDEX                                                Handle        = 0x80290113
+	TPMAPI_E_INVALID_DELEGATE_BLOB                                            Handle        = 0x80290114
+	TPMAPI_E_INVALID_CONTEXT_PARAMS                                           Handle        = 0x80290115
+	TPMAPI_E_INVALID_KEY_BLOB                                                 Handle        = 0x80290116
+	TPMAPI_E_INVALID_PCR_DATA                                                 Handle        = 0x80290117
+	TPMAPI_E_INVALID_OWNER_AUTH                                               Handle        = 0x80290118
+	TPMAPI_E_FIPS_RNG_CHECK_FAILED                                            Handle        = 0x80290119
+	TPMAPI_E_EMPTY_TCG_LOG                                                    Handle        = 0x8029011A
+	TPMAPI_E_INVALID_TCG_LOG_ENTRY                                            Handle        = 0x8029011B
+	TPMAPI_E_TCG_SEPARATOR_ABSENT                                             Handle        = 0x8029011C
+	TPMAPI_E_TCG_INVALID_DIGEST_ENTRY                                         Handle        = 0x8029011D
+	TPMAPI_E_POLICY_DENIES_OPERATION                                          Handle        = 0x8029011E
+	TPMAPI_E_NV_BITS_NOT_DEFINED                                              Handle        = 0x8029011F
+	TPMAPI_E_NV_BITS_NOT_READY                                                Handle        = 0x80290120
+	TPMAPI_E_SEALING_KEY_NOT_AVAILABLE                                        Handle        = 0x80290121
+	TPMAPI_E_NO_AUTHORIZATION_CHAIN_FOUND                                     Handle        = 0x80290122
+	TPMAPI_E_SVN_COUNTER_NOT_AVAILABLE                                        Handle        = 0x80290123
+	TPMAPI_E_OWNER_AUTH_NOT_NULL                                              Handle        = 0x80290124
+	TPMAPI_E_ENDORSEMENT_AUTH_NOT_NULL                                        Handle        = 0x80290125
+	TPMAPI_E_AUTHORIZATION_REVOKED                                            Handle        = 0x80290126
+	TPMAPI_E_MALFORMED_AUTHORIZATION_KEY                                      Handle        = 0x80290127
+	TPMAPI_E_AUTHORIZING_KEY_NOT_SUPPORTED                                    Handle        = 0x80290128
+	TPMAPI_E_INVALID_AUTHORIZATION_SIGNATURE                                  Handle        = 0x80290129
+	TPMAPI_E_MALFORMED_AUTHORIZATION_POLICY                                   Handle        = 0x8029012A
+	TPMAPI_E_MALFORMED_AUTHORIZATION_OTHER                                    Handle        = 0x8029012B
+	TPMAPI_E_SEALING_KEY_CHANGED                                              Handle        = 0x8029012C
+	TBSIMP_E_BUFFER_TOO_SMALL                                                 Handle        = 0x80290200
+	TBSIMP_E_CLEANUP_FAILED                                                   Handle        = 0x80290201
+	TBSIMP_E_INVALID_CONTEXT_HANDLE                                           Handle        = 0x80290202
+	TBSIMP_E_INVALID_CONTEXT_PARAM                                            Handle        = 0x80290203
+	TBSIMP_E_TPM_ERROR                                                        Handle        = 0x80290204
+	TBSIMP_E_HASH_BAD_KEY                                                     Handle        = 0x80290205
+	TBSIMP_E_DUPLICATE_VHANDLE                                                Handle        = 0x80290206
+	TBSIMP_E_INVALID_OUTPUT_POINTER                                           Handle        = 0x80290207
+	TBSIMP_E_INVALID_PARAMETER                                                Handle        = 0x80290208
+	TBSIMP_E_RPC_INIT_FAILED                                                  Handle        = 0x80290209
+	TBSIMP_E_SCHEDULER_NOT_RUNNING                                            Handle        = 0x8029020A
+	TBSIMP_E_COMMAND_CANCELED                                                 Handle        = 0x8029020B
+	TBSIMP_E_OUT_OF_MEMORY                                                    Handle        = 0x8029020C
+	TBSIMP_E_LIST_NO_MORE_ITEMS                                               Handle        = 0x8029020D
+	TBSIMP_E_LIST_NOT_FOUND                                                   Handle        = 0x8029020E
+	TBSIMP_E_NOT_ENOUGH_SPACE                                                 Handle        = 0x8029020F
+	TBSIMP_E_NOT_ENOUGH_TPM_CONTEXTS                                          Handle        = 0x80290210
+	TBSIMP_E_COMMAND_FAILED                                                   Handle        = 0x80290211
+	TBSIMP_E_UNKNOWN_ORDINAL                                                  Handle        = 0x80290212
+	TBSIMP_E_RESOURCE_EXPIRED                                                 Handle        = 0x80290213
+	TBSIMP_E_INVALID_RESOURCE                                                 Handle        = 0x80290214
+	TBSIMP_E_NOTHING_TO_UNLOAD                                                Handle        = 0x80290215
+	TBSIMP_E_HASH_TABLE_FULL                                                  Handle        = 0x80290216
+	TBSIMP_E_TOO_MANY_TBS_CONTEXTS                                            Handle        = 0x80290217
+	TBSIMP_E_TOO_MANY_RESOURCES                                               Handle        = 0x80290218
+	TBSIMP_E_PPI_NOT_SUPPORTED                                                Handle        = 0x80290219
+	TBSIMP_E_TPM_INCOMPATIBLE                                                 Handle        = 0x8029021A
+	TBSIMP_E_NO_EVENT_LOG                                                     Handle        = 0x8029021B
+	TPM_E_PPI_ACPI_FAILURE                                                    Handle        = 0x80290300
+	TPM_E_PPI_USER_ABORT                                                      Handle        = 0x80290301
+	TPM_E_PPI_BIOS_FAILURE                                                    Handle        = 0x80290302
+	TPM_E_PPI_NOT_SUPPORTED                                                   Handle        = 0x80290303
+	TPM_E_PPI_BLOCKED_IN_BIOS                                                 Handle        = 0x80290304
+	TPM_E_PCP_ERROR_MASK                                                      Handle        = 0x80290400
+	TPM_E_PCP_DEVICE_NOT_READY                                                Handle        = 0x80290401
+	TPM_E_PCP_INVALID_HANDLE                                                  Handle        = 0x80290402
+	TPM_E_PCP_INVALID_PARAMETER                                               Handle        = 0x80290403
+	TPM_E_PCP_FLAG_NOT_SUPPORTED                                              Handle        = 0x80290404
+	TPM_E_PCP_NOT_SUPPORTED                                                   Handle        = 0x80290405
+	TPM_E_PCP_BUFFER_TOO_SMALL                                                Handle        = 0x80290406
+	TPM_E_PCP_INTERNAL_ERROR                                                  Handle        = 0x80290407
+	TPM_E_PCP_AUTHENTICATION_FAILED                                           Handle        = 0x80290408
+	TPM_E_PCP_AUTHENTICATION_IGNORED                                          Handle        = 0x80290409
+	TPM_E_PCP_POLICY_NOT_FOUND                                                Handle        = 0x8029040A
+	TPM_E_PCP_PROFILE_NOT_FOUND                                               Handle        = 0x8029040B
+	TPM_E_PCP_VALIDATION_FAILED                                               Handle        = 0x8029040C
+	TPM_E_PCP_WRONG_PARENT                                                    Handle        = 0x8029040E
+	TPM_E_KEY_NOT_LOADED                                                      Handle        = 0x8029040F
+	TPM_E_NO_KEY_CERTIFICATION                                                Handle        = 0x80290410
+	TPM_E_KEY_NOT_FINALIZED                                                   Handle        = 0x80290411
+	TPM_E_ATTESTATION_CHALLENGE_NOT_SET                                       Handle        = 0x80290412
+	TPM_E_NOT_PCR_BOUND                                                       Handle        = 0x80290413
+	TPM_E_KEY_ALREADY_FINALIZED                                               Handle        = 0x80290414
+	TPM_E_KEY_USAGE_POLICY_NOT_SUPPORTED                                      Handle        = 0x80290415
+	TPM_E_KEY_USAGE_POLICY_INVALID                                            Handle        = 0x80290416
+	TPM_E_SOFT_KEY_ERROR                                                      Handle        = 0x80290417
+	TPM_E_KEY_NOT_AUTHENTICATED                                               Handle        = 0x80290418
+	TPM_E_PCP_KEY_NOT_AIK                                                     Handle        = 0x80290419
+	TPM_E_KEY_NOT_SIGNING_KEY                                                 Handle        = 0x8029041A
+	TPM_E_LOCKED_OUT                                                          Handle        = 0x8029041B
+	TPM_E_CLAIM_TYPE_NOT_SUPPORTED                                            Handle        = 0x8029041C
+	TPM_E_VERSION_NOT_SUPPORTED                                               Handle        = 0x8029041D
+	TPM_E_BUFFER_LENGTH_MISMATCH                                              Handle        = 0x8029041E
+	TPM_E_PCP_IFX_RSA_KEY_CREATION_BLOCKED                                    Handle        = 0x8029041F
+	TPM_E_PCP_TICKET_MISSING                                                  Handle        = 0x80290420
+	TPM_E_PCP_RAW_POLICY_NOT_SUPPORTED                                        Handle        = 0x80290421
+	TPM_E_PCP_KEY_HANDLE_INVALIDATED                                          Handle        = 0x80290422
+	TPM_E_PCP_UNSUPPORTED_PSS_SALT                                            Handle        = 0x40290423
+	TPM_E_ZERO_EXHAUST_ENABLED                                                Handle        = 0x80290500
+	PLA_E_DCS_NOT_FOUND                                                       Handle        = 0x80300002
+	PLA_E_DCS_IN_USE                                                          Handle        = 0x803000AA
+	PLA_E_TOO_MANY_FOLDERS                                                    Handle        = 0x80300045
+	PLA_E_NO_MIN_DISK                                                         Handle        = 0x80300070
+	PLA_E_DCS_ALREADY_EXISTS                                                  Handle        = 0x803000B7
+	PLA_S_PROPERTY_IGNORED                                                    Handle        = 0x00300100
+	PLA_E_PROPERTY_CONFLICT                                                   Handle        = 0x80300101
+	PLA_E_DCS_SINGLETON_REQUIRED                                              Handle        = 0x80300102
+	PLA_E_CREDENTIALS_REQUIRED                                                Handle        = 0x80300103
+	PLA_E_DCS_NOT_RUNNING                                                     Handle        = 0x80300104
+	PLA_E_CONFLICT_INCL_EXCL_API                                              Handle        = 0x80300105
+	PLA_E_NETWORK_EXE_NOT_VALID                                               Handle        = 0x80300106
+	PLA_E_EXE_ALREADY_CONFIGURED                                              Handle        = 0x80300107
+	PLA_E_EXE_PATH_NOT_VALID                                                  Handle        = 0x80300108
+	PLA_E_DC_ALREADY_EXISTS                                                   Handle        = 0x80300109
+	PLA_E_DCS_START_WAIT_TIMEOUT                                              Handle        = 0x8030010A
+	PLA_E_DC_START_WAIT_TIMEOUT                                               Handle        = 0x8030010B
+	PLA_E_REPORT_WAIT_TIMEOUT                                                 Handle        = 0x8030010C
+	PLA_E_NO_DUPLICATES                                                       Handle        = 0x8030010D
+	PLA_E_EXE_FULL_PATH_REQUIRED                                              Handle        = 0x8030010E
+	PLA_E_INVALID_SESSION_NAME                                                Handle        = 0x8030010F
+	PLA_E_PLA_CHANNEL_NOT_ENABLED                                             Handle        = 0x80300110
+	PLA_E_TASKSCHED_CHANNEL_NOT_ENABLED                                       Handle        = 0x80300111
+	PLA_E_RULES_MANAGER_FAILED                                                Handle        = 0x80300112
+	PLA_E_CABAPI_FAILURE                                                      Handle        = 0x80300113
+	FVE_E_LOCKED_VOLUME                                                       Handle        = 0x80310000
+	FVE_E_NOT_ENCRYPTED                                                       Handle        = 0x80310001
+	FVE_E_NO_TPM_BIOS                                                         Handle        = 0x80310002
+	FVE_E_NO_MBR_METRIC                                                       Handle        = 0x80310003
+	FVE_E_NO_BOOTSECTOR_METRIC                                                Handle        = 0x80310004
+	FVE_E_NO_BOOTMGR_METRIC                                                   Handle        = 0x80310005
+	FVE_E_WRONG_BOOTMGR                                                       Handle        = 0x80310006
+	FVE_E_SECURE_KEY_REQUIRED                                                 Handle        = 0x80310007
+	FVE_E_NOT_ACTIVATED                                                       Handle        = 0x80310008
+	FVE_E_ACTION_NOT_ALLOWED                                                  Handle        = 0x80310009
+	FVE_E_AD_SCHEMA_NOT_INSTALLED                                             Handle        = 0x8031000A
+	FVE_E_AD_INVALID_DATATYPE                                                 Handle        = 0x8031000B
+	FVE_E_AD_INVALID_DATASIZE                                                 Handle        = 0x8031000C
+	FVE_E_AD_NO_VALUES                                                        Handle        = 0x8031000D
+	FVE_E_AD_ATTR_NOT_SET                                                     Handle        = 0x8031000E
+	FVE_E_AD_GUID_NOT_FOUND                                                   Handle        = 0x8031000F
+	FVE_E_BAD_INFORMATION                                                     Handle        = 0x80310010
+	FVE_E_TOO_SMALL                                                           Handle        = 0x80310011
+	FVE_E_SYSTEM_VOLUME                                                       Handle        = 0x80310012
+	FVE_E_FAILED_WRONG_FS                                                     Handle        = 0x80310013
+	FVE_E_BAD_PARTITION_SIZE                                                  Handle        = 0x80310014
+	FVE_E_NOT_SUPPORTED                                                       Handle        = 0x80310015
+	FVE_E_BAD_DATA                                                            Handle        = 0x80310016
+	FVE_E_VOLUME_NOT_BOUND                                                    Handle        = 0x80310017
+	FVE_E_TPM_NOT_OWNED                                                       Handle        = 0x80310018
+	FVE_E_NOT_DATA_VOLUME                                                     Handle        = 0x80310019
+	FVE_E_AD_INSUFFICIENT_BUFFER                                              Handle        = 0x8031001A
+	FVE_E_CONV_READ                                                           Handle        = 0x8031001B
+	FVE_E_CONV_WRITE                                                          Handle        = 0x8031001C
+	FVE_E_KEY_REQUIRED                                                        Handle        = 0x8031001D
+	FVE_E_CLUSTERING_NOT_SUPPORTED                                            Handle        = 0x8031001E
+	FVE_E_VOLUME_BOUND_ALREADY                                                Handle        = 0x8031001F
+	FVE_E_OS_NOT_PROTECTED                                                    Handle        = 0x80310020
+	FVE_E_PROTECTION_DISABLED                                                 Handle        = 0x80310021
+	FVE_E_RECOVERY_KEY_REQUIRED                                               Handle        = 0x80310022
+	FVE_E_FOREIGN_VOLUME                                                      Handle        = 0x80310023
+	FVE_E_OVERLAPPED_UPDATE                                                   Handle        = 0x80310024
+	FVE_E_TPM_SRK_AUTH_NOT_ZERO                                               Handle        = 0x80310025
+	FVE_E_FAILED_SECTOR_SIZE                                                  Handle        = 0x80310026
+	FVE_E_FAILED_AUTHENTICATION                                               Handle        = 0x80310027
+	FVE_E_NOT_OS_VOLUME                                                       Handle        = 0x80310028
+	FVE_E_AUTOUNLOCK_ENABLED                                                  Handle        = 0x80310029
+	FVE_E_WRONG_BOOTSECTOR                                                    Handle        = 0x8031002A
+	FVE_E_WRONG_SYSTEM_FS                                                     Handle        = 0x8031002B
+	FVE_E_POLICY_PASSWORD_REQUIRED                                            Handle        = 0x8031002C
+	FVE_E_CANNOT_SET_FVEK_ENCRYPTED                                           Handle        = 0x8031002D
+	FVE_E_CANNOT_ENCRYPT_NO_KEY                                               Handle        = 0x8031002E
+	FVE_E_BOOTABLE_CDDVD                                                      Handle        = 0x80310030
+	FVE_E_PROTECTOR_EXISTS                                                    Handle        = 0x80310031
+	FVE_E_RELATIVE_PATH                                                       Handle        = 0x80310032
+	FVE_E_PROTECTOR_NOT_FOUND                                                 Handle        = 0x80310033
+	FVE_E_INVALID_KEY_FORMAT                                                  Handle        = 0x80310034
+	FVE_E_INVALID_PASSWORD_FORMAT                                             Handle        = 0x80310035
+	FVE_E_FIPS_RNG_CHECK_FAILED                                               Handle        = 0x80310036
+	FVE_E_FIPS_PREVENTS_RECOVERY_PASSWORD                                     Handle        = 0x80310037
+	FVE_E_FIPS_PREVENTS_EXTERNAL_KEY_EXPORT                                   Handle        = 0x80310038
+	FVE_E_NOT_DECRYPTED                                                       Handle        = 0x80310039
+	FVE_E_INVALID_PROTECTOR_TYPE                                              Handle        = 0x8031003A
+	FVE_E_NO_PROTECTORS_TO_TEST                                               Handle        = 0x8031003B
+	FVE_E_KEYFILE_NOT_FOUND                                                   Handle        = 0x8031003C
+	FVE_E_KEYFILE_INVALID                                                     Handle        = 0x8031003D
+	FVE_E_KEYFILE_NO_VMK                                                      Handle        = 0x8031003E
+	FVE_E_TPM_DISABLED                                                        Handle        = 0x8031003F
+	FVE_E_NOT_ALLOWED_IN_SAFE_MODE                                            Handle        = 0x80310040
+	FVE_E_TPM_INVALID_PCR                                                     Handle        = 0x80310041
+	FVE_E_TPM_NO_VMK                                                          Handle        = 0x80310042
+	FVE_E_PIN_INVALID                                                         Handle        = 0x80310043
+	FVE_E_AUTH_INVALID_APPLICATION                                            Handle        = 0x80310044
+	FVE_E_AUTH_INVALID_CONFIG                                                 Handle        = 0x80310045
+	FVE_E_FIPS_DISABLE_PROTECTION_NOT_ALLOWED                                 Handle        = 0x80310046
+	FVE_E_FS_NOT_EXTENDED                                                     Handle        = 0x80310047
+	FVE_E_FIRMWARE_TYPE_NOT_SUPPORTED                                         Handle        = 0x80310048
+	FVE_E_NO_LICENSE                                                          Handle        = 0x80310049
+	FVE_E_NOT_ON_STACK                                                        Handle        = 0x8031004A
+	FVE_E_FS_MOUNTED                                                          Handle        = 0x8031004B
+	FVE_E_TOKEN_NOT_IMPERSONATED                                              Handle        = 0x8031004C
+	FVE_E_DRY_RUN_FAILED                                                      Handle        = 0x8031004D
+	FVE_E_REBOOT_REQUIRED                                                     Handle        = 0x8031004E
+	FVE_E_DEBUGGER_ENABLED                                                    Handle        = 0x8031004F
+	FVE_E_RAW_ACCESS                                                          Handle        = 0x80310050
+	FVE_E_RAW_BLOCKED                                                         Handle        = 0x80310051
+	FVE_E_BCD_APPLICATIONS_PATH_INCORRECT                                     Handle        = 0x80310052
+	FVE_E_NOT_ALLOWED_IN_VERSION                                              Handle        = 0x80310053
+	FVE_E_NO_AUTOUNLOCK_MASTER_KEY                                            Handle        = 0x80310054
+	FVE_E_MOR_FAILED                                                          Handle        = 0x80310055
+	FVE_E_HIDDEN_VOLUME                                                       Handle        = 0x80310056
+	FVE_E_TRANSIENT_STATE                                                     Handle        = 0x80310057
+	FVE_E_PUBKEY_NOT_ALLOWED                                                  Handle        = 0x80310058
+	FVE_E_VOLUME_HANDLE_OPEN                                                  Handle        = 0x80310059
+	FVE_E_NO_FEATURE_LICENSE                                                  Handle        = 0x8031005A
+	FVE_E_INVALID_STARTUP_OPTIONS                                             Handle        = 0x8031005B
+	FVE_E_POLICY_RECOVERY_PASSWORD_NOT_ALLOWED                                Handle        = 0x8031005C
+	FVE_E_POLICY_RECOVERY_PASSWORD_REQUIRED                                   Handle        = 0x8031005D
+	FVE_E_POLICY_RECOVERY_KEY_NOT_ALLOWED                                     Handle        = 0x8031005E
+	FVE_E_POLICY_RECOVERY_KEY_REQUIRED                                        Handle        = 0x8031005F
+	FVE_E_POLICY_STARTUP_PIN_NOT_ALLOWED                                      Handle        = 0x80310060
+	FVE_E_POLICY_STARTUP_PIN_REQUIRED                                         Handle        = 0x80310061
+	FVE_E_POLICY_STARTUP_KEY_NOT_ALLOWED                                      Handle        = 0x80310062
+	FVE_E_POLICY_STARTUP_KEY_REQUIRED                                         Handle        = 0x80310063
+	FVE_E_POLICY_STARTUP_PIN_KEY_NOT_ALLOWED                                  Handle        = 0x80310064
+	FVE_E_POLICY_STARTUP_PIN_KEY_REQUIRED                                     Handle        = 0x80310065
+	FVE_E_POLICY_STARTUP_TPM_NOT_ALLOWED                                      Handle        = 0x80310066
+	FVE_E_POLICY_STARTUP_TPM_REQUIRED                                         Handle        = 0x80310067
+	FVE_E_POLICY_INVALID_PIN_LENGTH                                           Handle        = 0x80310068
+	FVE_E_KEY_PROTECTOR_NOT_SUPPORTED                                         Handle        = 0x80310069
+	FVE_E_POLICY_PASSPHRASE_NOT_ALLOWED                                       Handle        = 0x8031006A
+	FVE_E_POLICY_PASSPHRASE_REQUIRED                                          Handle        = 0x8031006B
+	FVE_E_FIPS_PREVENTS_PASSPHRASE                                            Handle        = 0x8031006C
+	FVE_E_OS_VOLUME_PASSPHRASE_NOT_ALLOWED                                    Handle        = 0x8031006D
+	FVE_E_INVALID_BITLOCKER_OID                                               Handle        = 0x8031006E
+	FVE_E_VOLUME_TOO_SMALL                                                    Handle        = 0x8031006F
+	FVE_E_DV_NOT_SUPPORTED_ON_FS                                              Handle        = 0x80310070
+	FVE_E_DV_NOT_ALLOWED_BY_GP                                                Handle        = 0x80310071
+	FVE_E_POLICY_USER_CERTIFICATE_NOT_ALLOWED                                 Handle        = 0x80310072
+	FVE_E_POLICY_USER_CERTIFICATE_REQUIRED                                    Handle        = 0x80310073
+	FVE_E_POLICY_USER_CERT_MUST_BE_HW                                         Handle        = 0x80310074
+	FVE_E_POLICY_USER_CONFIGURE_FDV_AUTOUNLOCK_NOT_ALLOWED                    Handle        = 0x80310075
+	FVE_E_POLICY_USER_CONFIGURE_RDV_AUTOUNLOCK_NOT_ALLOWED                    Handle        = 0x80310076
+	FVE_E_POLICY_USER_CONFIGURE_RDV_NOT_ALLOWED                               Handle        = 0x80310077
+	FVE_E_POLICY_USER_ENABLE_RDV_NOT_ALLOWED                                  Handle        = 0x80310078
+	FVE_E_POLICY_USER_DISABLE_RDV_NOT_ALLOWED                                 Handle        = 0x80310079
+	FVE_E_POLICY_INVALID_PASSPHRASE_LENGTH                                    Handle        = 0x80310080
+	FVE_E_POLICY_PASSPHRASE_TOO_SIMPLE                                        Handle        = 0x80310081
+	FVE_E_RECOVERY_PARTITION                                                  Handle        = 0x80310082
+	FVE_E_POLICY_CONFLICT_FDV_RK_OFF_AUK_ON                                   Handle        = 0x80310083
+	FVE_E_POLICY_CONFLICT_RDV_RK_OFF_AUK_ON                                   Handle        = 0x80310084
+	FVE_E_NON_BITLOCKER_OID                                                   Handle        = 0x80310085
+	FVE_E_POLICY_PROHIBITS_SELFSIGNED                                         Handle        = 0x80310086
+	FVE_E_POLICY_CONFLICT_RO_AND_STARTUP_KEY_REQUIRED                         Handle        = 0x80310087
+	FVE_E_CONV_RECOVERY_FAILED                                                Handle        = 0x80310088
+	FVE_E_VIRTUALIZED_SPACE_TOO_BIG                                           Handle        = 0x80310089
+	FVE_E_POLICY_CONFLICT_OSV_RP_OFF_ADB_ON                                   Handle        = 0x80310090
+	FVE_E_POLICY_CONFLICT_FDV_RP_OFF_ADB_ON                                   Handle        = 0x80310091
+	FVE_E_POLICY_CONFLICT_RDV_RP_OFF_ADB_ON                                   Handle        = 0x80310092
+	FVE_E_NON_BITLOCKER_KU                                                    Handle        = 0x80310093
+	FVE_E_PRIVATEKEY_AUTH_FAILED                                              Handle        = 0x80310094
+	FVE_E_REMOVAL_OF_DRA_FAILED                                               Handle        = 0x80310095
+	FVE_E_OPERATION_NOT_SUPPORTED_ON_VISTA_VOLUME                             Handle        = 0x80310096
+	FVE_E_CANT_LOCK_AUTOUNLOCK_ENABLED_VOLUME                                 Handle        = 0x80310097
+	FVE_E_FIPS_HASH_KDF_NOT_ALLOWED                                           Handle        = 0x80310098
+	FVE_E_ENH_PIN_INVALID                                                     Handle        = 0x80310099
+	FVE_E_INVALID_PIN_CHARS                                                   Handle        = 0x8031009A
+	FVE_E_INVALID_DATUM_TYPE                                                  Handle        = 0x8031009B
+	FVE_E_EFI_ONLY                                                            Handle        = 0x8031009C
+	FVE_E_MULTIPLE_NKP_CERTS                                                  Handle        = 0x8031009D
+	FVE_E_REMOVAL_OF_NKP_FAILED                                               Handle        = 0x8031009E
+	FVE_E_INVALID_NKP_CERT                                                    Handle        = 0x8031009F
+	FVE_E_NO_EXISTING_PIN                                                     Handle        = 0x803100A0
+	FVE_E_PROTECTOR_CHANGE_PIN_MISMATCH                                       Handle        = 0x803100A1
+	FVE_E_PIN_PROTECTOR_CHANGE_BY_STD_USER_DISALLOWED                         Handle        = 0x803100A2
+	FVE_E_PROTECTOR_CHANGE_MAX_PIN_CHANGE_ATTEMPTS_REACHED                    Handle        = 0x803100A3
+	FVE_E_POLICY_PASSPHRASE_REQUIRES_ASCII                                    Handle        = 0x803100A4
+	FVE_E_FULL_ENCRYPTION_NOT_ALLOWED_ON_TP_STORAGE                           Handle        = 0x803100A5
+	FVE_E_WIPE_NOT_ALLOWED_ON_TP_STORAGE                                      Handle        = 0x803100A6
+	FVE_E_KEY_LENGTH_NOT_SUPPORTED_BY_EDRIVE                                  Handle        = 0x803100A7
+	FVE_E_NO_EXISTING_PASSPHRASE                                              Handle        = 0x803100A8
+	FVE_E_PROTECTOR_CHANGE_PASSPHRASE_MISMATCH                                Handle        = 0x803100A9
+	FVE_E_PASSPHRASE_TOO_LONG                                                 Handle        = 0x803100AA
+	FVE_E_NO_PASSPHRASE_WITH_TPM                                              Handle        = 0x803100AB
+	FVE_E_NO_TPM_WITH_PASSPHRASE                                              Handle        = 0x803100AC
+	FVE_E_NOT_ALLOWED_ON_CSV_STACK                                            Handle        = 0x803100AD
+	FVE_E_NOT_ALLOWED_ON_CLUSTER                                              Handle        = 0x803100AE
+	FVE_E_EDRIVE_NO_FAILOVER_TO_SW                                            Handle        = 0x803100AF
+	FVE_E_EDRIVE_BAND_IN_USE                                                  Handle        = 0x803100B0
+	FVE_E_EDRIVE_DISALLOWED_BY_GP                                             Handle        = 0x803100B1
+	FVE_E_EDRIVE_INCOMPATIBLE_VOLUME                                          Handle        = 0x803100B2
+	FVE_E_NOT_ALLOWED_TO_UPGRADE_WHILE_CONVERTING                             Handle        = 0x803100B3
+	FVE_E_EDRIVE_DV_NOT_SUPPORTED                                             Handle        = 0x803100B4
+	FVE_E_NO_PREBOOT_KEYBOARD_DETECTED                                        Handle        = 0x803100B5
+	FVE_E_NO_PREBOOT_KEYBOARD_OR_WINRE_DETECTED                               Handle        = 0x803100B6
+	FVE_E_POLICY_REQUIRES_STARTUP_PIN_ON_TOUCH_DEVICE                         Handle        = 0x803100B7
+	FVE_E_POLICY_REQUIRES_RECOVERY_PASSWORD_ON_TOUCH_DEVICE                   Handle        = 0x803100B8
+	FVE_E_WIPE_CANCEL_NOT_APPLICABLE                                          Handle        = 0x803100B9
+	FVE_E_SECUREBOOT_DISABLED                                                 Handle        = 0x803100BA
+	FVE_E_SECUREBOOT_CONFIGURATION_INVALID                                    Handle        = 0x803100BB
+	FVE_E_EDRIVE_DRY_RUN_FAILED                                               Handle        = 0x803100BC
+	FVE_E_SHADOW_COPY_PRESENT                                                 Handle        = 0x803100BD
+	FVE_E_POLICY_INVALID_ENHANCED_BCD_SETTINGS                                Handle        = 0x803100BE
+	FVE_E_EDRIVE_INCOMPATIBLE_FIRMWARE                                        Handle        = 0x803100BF
+	FVE_E_PROTECTOR_CHANGE_MAX_PASSPHRASE_CHANGE_ATTEMPTS_REACHED             Handle        = 0x803100C0
+	FVE_E_PASSPHRASE_PROTECTOR_CHANGE_BY_STD_USER_DISALLOWED                  Handle        = 0x803100C1
+	FVE_E_LIVEID_ACCOUNT_SUSPENDED                                            Handle        = 0x803100C2
+	FVE_E_LIVEID_ACCOUNT_BLOCKED                                              Handle        = 0x803100C3
+	FVE_E_NOT_PROVISIONED_ON_ALL_VOLUMES                                      Handle        = 0x803100C4
+	FVE_E_DE_FIXED_DATA_NOT_SUPPORTED                                         Handle        = 0x803100C5
+	FVE_E_DE_HARDWARE_NOT_COMPLIANT                                           Handle        = 0x803100C6
+	FVE_E_DE_WINRE_NOT_CONFIGURED                                             Handle        = 0x803100C7
+	FVE_E_DE_PROTECTION_SUSPENDED                                             Handle        = 0x803100C8
+	FVE_E_DE_OS_VOLUME_NOT_PROTECTED                                          Handle        = 0x803100C9
+	FVE_E_DE_DEVICE_LOCKEDOUT                                                 Handle        = 0x803100CA
+	FVE_E_DE_PROTECTION_NOT_YET_ENABLED                                       Handle        = 0x803100CB
+	FVE_E_INVALID_PIN_CHARS_DETAILED                                          Handle        = 0x803100CC
+	FVE_E_DEVICE_LOCKOUT_COUNTER_UNAVAILABLE                                  Handle        = 0x803100CD
+	FVE_E_DEVICELOCKOUT_COUNTER_MISMATCH                                      Handle        = 0x803100CE
+	FVE_E_BUFFER_TOO_LARGE                                                    Handle        = 0x803100CF
+	FVE_E_NO_SUCH_CAPABILITY_ON_TARGET                                        Handle        = 0x803100D0
+	FVE_E_DE_PREVENTED_FOR_OS                                                 Handle        = 0x803100D1
+	FVE_E_DE_VOLUME_OPTED_OUT                                                 Handle        = 0x803100D2
+	FVE_E_DE_VOLUME_NOT_SUPPORTED                                             Handle        = 0x803100D3
+	FVE_E_EOW_NOT_SUPPORTED_IN_VERSION                                        Handle        = 0x803100D4
+	FVE_E_ADBACKUP_NOT_ENABLED                                                Handle        = 0x803100D5
+	FVE_E_VOLUME_EXTEND_PREVENTS_EOW_DECRYPT                                  Handle        = 0x803100D6
+	FVE_E_NOT_DE_VOLUME                                                       Handle        = 0x803100D7
+	FVE_E_PROTECTION_CANNOT_BE_DISABLED                                       Handle        = 0x803100D8
+	FVE_E_OSV_KSR_NOT_ALLOWED                                                 Handle        = 0x803100D9
+	FVE_E_AD_BACKUP_REQUIRED_POLICY_NOT_SET_OS_DRIVE                          Handle        = 0x803100DA
+	FVE_E_AD_BACKUP_REQUIRED_POLICY_NOT_SET_FIXED_DRIVE                       Handle        = 0x803100DB
+	FVE_E_AD_BACKUP_REQUIRED_POLICY_NOT_SET_REMOVABLE_DRIVE                   Handle        = 0x803100DC
+	FVE_E_KEY_ROTATION_NOT_SUPPORTED                                          Handle        = 0x803100DD
+	FVE_E_EXECUTE_REQUEST_SENT_TOO_SOON                                       Handle        = 0x803100DE
+	FVE_E_KEY_ROTATION_NOT_ENABLED                                            Handle        = 0x803100DF
+	FVE_E_DEVICE_NOT_JOINED                                                   Handle        = 0x803100E0
+	FWP_E_CALLOUT_NOT_FOUND                                                   Handle        = 0x80320001
+	FWP_E_CONDITION_NOT_FOUND                                                 Handle        = 0x80320002
+	FWP_E_FILTER_NOT_FOUND                                                    Handle        = 0x80320003
+	FWP_E_LAYER_NOT_FOUND                                                     Handle        = 0x80320004
+	FWP_E_PROVIDER_NOT_FOUND                                                  Handle        = 0x80320005
+	FWP_E_PROVIDER_CONTEXT_NOT_FOUND                                          Handle        = 0x80320006
+	FWP_E_SUBLAYER_NOT_FOUND                                                  Handle        = 0x80320007
+	FWP_E_NOT_FOUND                                                           Handle        = 0x80320008
+	FWP_E_ALREADY_EXISTS                                                      Handle        = 0x80320009
+	FWP_E_IN_USE                                                              Handle        = 0x8032000A
+	FWP_E_DYNAMIC_SESSION_IN_PROGRESS                                         Handle        = 0x8032000B
+	FWP_E_WRONG_SESSION                                                       Handle        = 0x8032000C
+	FWP_E_NO_TXN_IN_PROGRESS                                                  Handle        = 0x8032000D
+	FWP_E_TXN_IN_PROGRESS                                                     Handle        = 0x8032000E
+	FWP_E_TXN_ABORTED                                                         Handle        = 0x8032000F
+	FWP_E_SESSION_ABORTED                                                     Handle        = 0x80320010
+	FWP_E_INCOMPATIBLE_TXN                                                    Handle        = 0x80320011
+	FWP_E_TIMEOUT                                                             Handle        = 0x80320012
+	FWP_E_NET_EVENTS_DISABLED                                                 Handle        = 0x80320013
+	FWP_E_INCOMPATIBLE_LAYER                                                  Handle        = 0x80320014
+	FWP_E_KM_CLIENTS_ONLY                                                     Handle        = 0x80320015
+	FWP_E_LIFETIME_MISMATCH                                                   Handle        = 0x80320016
+	FWP_E_BUILTIN_OBJECT                                                      Handle        = 0x80320017
+	FWP_E_TOO_MANY_CALLOUTS                                                   Handle        = 0x80320018
+	FWP_E_NOTIFICATION_DROPPED                                                Handle        = 0x80320019
+	FWP_E_TRAFFIC_MISMATCH                                                    Handle        = 0x8032001A
+	FWP_E_INCOMPATIBLE_SA_STATE                                               Handle        = 0x8032001B
+	FWP_E_NULL_POINTER                                                        Handle        = 0x8032001C
+	FWP_E_INVALID_ENUMERATOR                                                  Handle        = 0x8032001D
+	FWP_E_INVALID_FLAGS                                                       Handle        = 0x8032001E
+	FWP_E_INVALID_NET_MASK                                                    Handle        = 0x8032001F
+	FWP_E_INVALID_RANGE                                                       Handle        = 0x80320020
+	FWP_E_INVALID_INTERVAL                                                    Handle        = 0x80320021
+	FWP_E_ZERO_LENGTH_ARRAY                                                   Handle        = 0x80320022
+	FWP_E_NULL_DISPLAY_NAME                                                   Handle        = 0x80320023
+	FWP_E_INVALID_ACTION_TYPE                                                 Handle        = 0x80320024
+	FWP_E_INVALID_WEIGHT                                                      Handle        = 0x80320025
+	FWP_E_MATCH_TYPE_MISMATCH                                                 Handle        = 0x80320026
+	FWP_E_TYPE_MISMATCH                                                       Handle        = 0x80320027
+	FWP_E_OUT_OF_BOUNDS                                                       Handle        = 0x80320028
+	FWP_E_RESERVED                                                            Handle        = 0x80320029
+	FWP_E_DUPLICATE_CONDITION                                                 Handle        = 0x8032002A
+	FWP_E_DUPLICATE_KEYMOD                                                    Handle        = 0x8032002B
+	FWP_E_ACTION_INCOMPATIBLE_WITH_LAYER                                      Handle        = 0x8032002C
+	FWP_E_ACTION_INCOMPATIBLE_WITH_SUBLAYER                                   Handle        = 0x8032002D
+	FWP_E_CONTEXT_INCOMPATIBLE_WITH_LAYER                                     Handle        = 0x8032002E
+	FWP_E_CONTEXT_INCOMPATIBLE_WITH_CALLOUT                                   Handle        = 0x8032002F
+	FWP_E_INCOMPATIBLE_AUTH_METHOD                                            Handle        = 0x80320030
+	FWP_E_INCOMPATIBLE_DH_GROUP                                               Handle        = 0x80320031
+	FWP_E_EM_NOT_SUPPORTED                                                    Handle        = 0x80320032
+	FWP_E_NEVER_MATCH                                                         Handle        = 0x80320033
+	FWP_E_PROVIDER_CONTEXT_MISMATCH                                           Handle        = 0x80320034
+	FWP_E_INVALID_PARAMETER                                                   Handle        = 0x80320035
+	FWP_E_TOO_MANY_SUBLAYERS                                                  Handle        = 0x80320036
+	FWP_E_CALLOUT_NOTIFICATION_FAILED                                         Handle        = 0x80320037
+	FWP_E_INVALID_AUTH_TRANSFORM                                              Handle        = 0x80320038
+	FWP_E_INVALID_CIPHER_TRANSFORM                                            Handle        = 0x80320039
+	FWP_E_INCOMPATIBLE_CIPHER_TRANSFORM                                       Handle        = 0x8032003A
+	FWP_E_INVALID_TRANSFORM_COMBINATION                                       Handle        = 0x8032003B
+	FWP_E_DUPLICATE_AUTH_METHOD                                               Handle        = 0x8032003C
+	FWP_E_INVALID_TUNNEL_ENDPOINT                                             Handle        = 0x8032003D
+	FWP_E_L2_DRIVER_NOT_READY                                                 Handle        = 0x8032003E
+	FWP_E_KEY_DICTATOR_ALREADY_REGISTERED                                     Handle        = 0x8032003F
+	FWP_E_KEY_DICTATION_INVALID_KEYING_MATERIAL                               Handle        = 0x80320040
+	FWP_E_CONNECTIONS_DISABLED                                                Handle        = 0x80320041
+	FWP_E_INVALID_DNS_NAME                                                    Handle        = 0x80320042
+	FWP_E_STILL_ON                                                            Handle        = 0x80320043
+	FWP_E_IKEEXT_NOT_RUNNING                                                  Handle        = 0x80320044
+	FWP_E_DROP_NOICMP                                                         Handle        = 0x80320104
+	WS_S_ASYNC                                                                Handle        = 0x003D0000
+	WS_S_END                                                                  Handle        = 0x003D0001
+	WS_E_INVALID_FORMAT                                                       Handle        = 0x803D0000
+	WS_E_OBJECT_FAULTED                                                       Handle        = 0x803D0001
+	WS_E_NUMERIC_OVERFLOW                                                     Handle        = 0x803D0002
+	WS_E_INVALID_OPERATION                                                    Handle        = 0x803D0003
+	WS_E_OPERATION_ABORTED                                                    Handle        = 0x803D0004
+	WS_E_ENDPOINT_ACCESS_DENIED                                               Handle        = 0x803D0005
+	WS_E_OPERATION_TIMED_OUT                                                  Handle        = 0x803D0006
+	WS_E_OPERATION_ABANDONED                                                  Handle        = 0x803D0007
+	WS_E_QUOTA_EXCEEDED                                                       Handle        = 0x803D0008
+	WS_E_NO_TRANSLATION_AVAILABLE                                             Handle        = 0x803D0009
+	WS_E_SECURITY_VERIFICATION_FAILURE                                        Handle        = 0x803D000A
+	WS_E_ADDRESS_IN_USE                                                       Handle        = 0x803D000B
+	WS_E_ADDRESS_NOT_AVAILABLE                                                Handle        = 0x803D000C
+	WS_E_ENDPOINT_NOT_FOUND                                                   Handle        = 0x803D000D
+	WS_E_ENDPOINT_NOT_AVAILABLE                                               Handle        = 0x803D000E
+	WS_E_ENDPOINT_FAILURE                                                     Handle        = 0x803D000F
+	WS_E_ENDPOINT_UNREACHABLE                                                 Handle        = 0x803D0010
+	WS_E_ENDPOINT_ACTION_NOT_SUPPORTED                                        Handle        = 0x803D0011
+	WS_E_ENDPOINT_TOO_BUSY                                                    Handle        = 0x803D0012
+	WS_E_ENDPOINT_FAULT_RECEIVED                                              Handle        = 0x803D0013
+	WS_E_ENDPOINT_DISCONNECTED                                                Handle        = 0x803D0014
+	WS_E_PROXY_FAILURE                                                        Handle        = 0x803D0015
+	WS_E_PROXY_ACCESS_DENIED                                                  Handle        = 0x803D0016
+	WS_E_NOT_SUPPORTED                                                        Handle        = 0x803D0017
+	WS_E_PROXY_REQUIRES_BASIC_AUTH                                            Handle        = 0x803D0018
+	WS_E_PROXY_REQUIRES_DIGEST_AUTH                                           Handle        = 0x803D0019
+	WS_E_PROXY_REQUIRES_NTLM_AUTH                                             Handle        = 0x803D001A
+	WS_E_PROXY_REQUIRES_NEGOTIATE_AUTH                                        Handle        = 0x803D001B
+	WS_E_SERVER_REQUIRES_BASIC_AUTH                                           Handle        = 0x803D001C
+	WS_E_SERVER_REQUIRES_DIGEST_AUTH                                          Handle        = 0x803D001D
+	WS_E_SERVER_REQUIRES_NTLM_AUTH                                            Handle        = 0x803D001E
+	WS_E_SERVER_REQUIRES_NEGOTIATE_AUTH                                       Handle        = 0x803D001F
+	WS_E_INVALID_ENDPOINT_URL                                                 Handle        = 0x803D0020
+	WS_E_OTHER                                                                Handle        = 0x803D0021
+	WS_E_SECURITY_TOKEN_EXPIRED                                               Handle        = 0x803D0022
+	WS_E_SECURITY_SYSTEM_FAILURE                                              Handle        = 0x803D0023
+	ERROR_NDIS_INTERFACE_CLOSING                                              syscall.Errno = 0x80340002
+	ERROR_NDIS_BAD_VERSION                                                    syscall.Errno = 0x80340004
+	ERROR_NDIS_BAD_CHARACTERISTICS                                            syscall.Errno = 0x80340005
+	ERROR_NDIS_ADAPTER_NOT_FOUND                                              syscall.Errno = 0x80340006
+	ERROR_NDIS_OPEN_FAILED                                                    syscall.Errno = 0x80340007
+	ERROR_NDIS_DEVICE_FAILED                                                  syscall.Errno = 0x80340008
+	ERROR_NDIS_MULTICAST_FULL                                                 syscall.Errno = 0x80340009
+	ERROR_NDIS_MULTICAST_EXISTS                                               syscall.Errno = 0x8034000A
+	ERROR_NDIS_MULTICAST_NOT_FOUND                                            syscall.Errno = 0x8034000B
+	ERROR_NDIS_REQUEST_ABORTED                                                syscall.Errno = 0x8034000C
+	ERROR_NDIS_RESET_IN_PROGRESS                                              syscall.Errno = 0x8034000D
+	ERROR_NDIS_NOT_SUPPORTED                                                  syscall.Errno = 0x803400BB
+	ERROR_NDIS_INVALID_PACKET                                                 syscall.Errno = 0x8034000F
+	ERROR_NDIS_ADAPTER_NOT_READY                                              syscall.Errno = 0x80340011
+	ERROR_NDIS_INVALID_LENGTH                                                 syscall.Errno = 0x80340014
+	ERROR_NDIS_INVALID_DATA                                                   syscall.Errno = 0x80340015
+	ERROR_NDIS_BUFFER_TOO_SHORT                                               syscall.Errno = 0x80340016
+	ERROR_NDIS_INVALID_OID                                                    syscall.Errno = 0x80340017
+	ERROR_NDIS_ADAPTER_REMOVED                                                syscall.Errno = 0x80340018
+	ERROR_NDIS_UNSUPPORTED_MEDIA                                              syscall.Errno = 0x80340019
+	ERROR_NDIS_GROUP_ADDRESS_IN_USE                                           syscall.Errno = 0x8034001A
+	ERROR_NDIS_FILE_NOT_FOUND                                                 syscall.Errno = 0x8034001B
+	ERROR_NDIS_ERROR_READING_FILE                                             syscall.Errno = 0x8034001C
+	ERROR_NDIS_ALREADY_MAPPED                                                 syscall.Errno = 0x8034001D
+	ERROR_NDIS_RESOURCE_CONFLICT                                              syscall.Errno = 0x8034001E
+	ERROR_NDIS_MEDIA_DISCONNECTED                                             syscall.Errno = 0x8034001F
+	ERROR_NDIS_INVALID_ADDRESS                                                syscall.Errno = 0x80340022
+	ERROR_NDIS_INVALID_DEVICE_REQUEST                                         syscall.Errno = 0x80340010
+	ERROR_NDIS_PAUSED                                                         syscall.Errno = 0x8034002A
+	ERROR_NDIS_INTERFACE_NOT_FOUND                                            syscall.Errno = 0x8034002B
+	ERROR_NDIS_UNSUPPORTED_REVISION                                           syscall.Errno = 0x8034002C
+	ERROR_NDIS_INVALID_PORT                                                   syscall.Errno = 0x8034002D
+	ERROR_NDIS_INVALID_PORT_STATE                                             syscall.Errno = 0x8034002E
+	ERROR_NDIS_LOW_POWER_STATE                                                syscall.Errno = 0x8034002F
+	ERROR_NDIS_REINIT_REQUIRED                                                syscall.Errno = 0x80340030
+	ERROR_NDIS_NO_QUEUES                                                      syscall.Errno = 0x80340031
+	ERROR_NDIS_DOT11_AUTO_CONFIG_ENABLED                                      syscall.Errno = 0x80342000
+	ERROR_NDIS_DOT11_MEDIA_IN_USE                                             syscall.Errno = 0x80342001
+	ERROR_NDIS_DOT11_POWER_STATE_INVALID                                      syscall.Errno = 0x80342002
+	ERROR_NDIS_PM_WOL_PATTERN_LIST_FULL                                       syscall.Errno = 0x80342003
+	ERROR_NDIS_PM_PROTOCOL_OFFLOAD_LIST_FULL                                  syscall.Errno = 0x80342004
+	ERROR_NDIS_DOT11_AP_CHANNEL_CURRENTLY_NOT_AVAILABLE                       syscall.Errno = 0x80342005
+	ERROR_NDIS_DOT11_AP_BAND_CURRENTLY_NOT_AVAILABLE                          syscall.Errno = 0x80342006
+	ERROR_NDIS_DOT11_AP_CHANNEL_NOT_ALLOWED                                   syscall.Errno = 0x80342007
+	ERROR_NDIS_DOT11_AP_BAND_NOT_ALLOWED                                      syscall.Errno = 0x80342008
+	ERROR_NDIS_INDICATION_REQUIRED                                            syscall.Errno = 0x00340001
+	ERROR_NDIS_OFFLOAD_POLICY                                                 syscall.Errno = 0xC034100F
+	ERROR_NDIS_OFFLOAD_CONNECTION_REJECTED                                    syscall.Errno = 0xC0341012
+	ERROR_NDIS_OFFLOAD_PATH_REJECTED                                          syscall.Errno = 0xC0341013
+	ERROR_HV_INVALID_HYPERCALL_CODE                                           syscall.Errno = 0xC0350002
+	ERROR_HV_INVALID_HYPERCALL_INPUT                                          syscall.Errno = 0xC0350003
+	ERROR_HV_INVALID_ALIGNMENT                                                syscall.Errno = 0xC0350004
+	ERROR_HV_INVALID_PARAMETER                                                syscall.Errno = 0xC0350005
+	ERROR_HV_ACCESS_DENIED                                                    syscall.Errno = 0xC0350006
+	ERROR_HV_INVALID_PARTITION_STATE                                          syscall.Errno = 0xC0350007
+	ERROR_HV_OPERATION_DENIED                                                 syscall.Errno = 0xC0350008
+	ERROR_HV_UNKNOWN_PROPERTY                                                 syscall.Errno = 0xC0350009
+	ERROR_HV_PROPERTY_VALUE_OUT_OF_RANGE                                      syscall.Errno = 0xC035000A
+	ERROR_HV_INSUFFICIENT_MEMORY                                              syscall.Errno = 0xC035000B
+	ERROR_HV_PARTITION_TOO_DEEP                                               syscall.Errno = 0xC035000C
+	ERROR_HV_INVALID_PARTITION_ID                                             syscall.Errno = 0xC035000D
+	ERROR_HV_INVALID_VP_INDEX                                                 syscall.Errno = 0xC035000E
+	ERROR_HV_INVALID_PORT_ID                                                  syscall.Errno = 0xC0350011
+	ERROR_HV_INVALID_CONNECTION_ID                                            syscall.Errno = 0xC0350012
+	ERROR_HV_INSUFFICIENT_BUFFERS                                             syscall.Errno = 0xC0350013
+	ERROR_HV_NOT_ACKNOWLEDGED                                                 syscall.Errno = 0xC0350014
+	ERROR_HV_INVALID_VP_STATE                                                 syscall.Errno = 0xC0350015
+	ERROR_HV_ACKNOWLEDGED                                                     syscall.Errno = 0xC0350016
+	ERROR_HV_INVALID_SAVE_RESTORE_STATE                                       syscall.Errno = 0xC0350017
+	ERROR_HV_INVALID_SYNIC_STATE                                              syscall.Errno = 0xC0350018
+	ERROR_HV_OBJECT_IN_USE                                                    syscall.Errno = 0xC0350019
+	ERROR_HV_INVALID_PROXIMITY_DOMAIN_INFO                                    syscall.Errno = 0xC035001A
+	ERROR_HV_NO_DATA                                                          syscall.Errno = 0xC035001B
+	ERROR_HV_INACTIVE                                                         syscall.Errno = 0xC035001C
+	ERROR_HV_NO_RESOURCES                                                     syscall.Errno = 0xC035001D
+	ERROR_HV_FEATURE_UNAVAILABLE                                              syscall.Errno = 0xC035001E
+	ERROR_HV_INSUFFICIENT_BUFFER                                              syscall.Errno = 0xC0350033
+	ERROR_HV_INSUFFICIENT_DEVICE_DOMAINS                                      syscall.Errno = 0xC0350038
+	ERROR_HV_CPUID_FEATURE_VALIDATION                                         syscall.Errno = 0xC035003C
+	ERROR_HV_CPUID_XSAVE_FEATURE_VALIDATION                                   syscall.Errno = 0xC035003D
+	ERROR_HV_PROCESSOR_STARTUP_TIMEOUT                                        syscall.Errno = 0xC035003E
+	ERROR_HV_SMX_ENABLED                                                      syscall.Errno = 0xC035003F
+	ERROR_HV_INVALID_LP_INDEX                                                 syscall.Errno = 0xC0350041
+	ERROR_HV_INVALID_REGISTER_VALUE                                           syscall.Errno = 0xC0350050
+	ERROR_HV_INVALID_VTL_STATE                                                syscall.Errno = 0xC0350051
+	ERROR_HV_NX_NOT_DETECTED                                                  syscall.Errno = 0xC0350055
+	ERROR_HV_INVALID_DEVICE_ID                                                syscall.Errno = 0xC0350057
+	ERROR_HV_INVALID_DEVICE_STATE                                             syscall.Errno = 0xC0350058
+	ERROR_HV_PENDING_PAGE_REQUESTS                                            syscall.Errno = 0x00350059
+	ERROR_HV_PAGE_REQUEST_INVALID                                             syscall.Errno = 0xC0350060
+	ERROR_HV_INVALID_CPU_GROUP_ID                                             syscall.Errno = 0xC035006F
+	ERROR_HV_INVALID_CPU_GROUP_STATE                                          syscall.Errno = 0xC0350070
+	ERROR_HV_OPERATION_FAILED                                                 syscall.Errno = 0xC0350071
+	ERROR_HV_NOT_ALLOWED_WITH_NESTED_VIRT_ACTIVE                              syscall.Errno = 0xC0350072
+	ERROR_HV_INSUFFICIENT_ROOT_MEMORY                                         syscall.Errno = 0xC0350073
+	ERROR_HV_NOT_PRESENT                                                      syscall.Errno = 0xC0351000
+	ERROR_VID_DUPLICATE_HANDLER                                               syscall.Errno = 0xC0370001
+	ERROR_VID_TOO_MANY_HANDLERS                                               syscall.Errno = 0xC0370002
+	ERROR_VID_QUEUE_FULL                                                      syscall.Errno = 0xC0370003
+	ERROR_VID_HANDLER_NOT_PRESENT                                             syscall.Errno = 0xC0370004
+	ERROR_VID_INVALID_OBJECT_NAME                                             syscall.Errno = 0xC0370005
+	ERROR_VID_PARTITION_NAME_TOO_LONG                                         syscall.Errno = 0xC0370006
+	ERROR_VID_MESSAGE_QUEUE_NAME_TOO_LONG                                     syscall.Errno = 0xC0370007
+	ERROR_VID_PARTITION_ALREADY_EXISTS                                        syscall.Errno = 0xC0370008
+	ERROR_VID_PARTITION_DOES_NOT_EXIST                                        syscall.Errno = 0xC0370009
+	ERROR_VID_PARTITION_NAME_NOT_FOUND                                        syscall.Errno = 0xC037000A
+	ERROR_VID_MESSAGE_QUEUE_ALREADY_EXISTS                                    syscall.Errno = 0xC037000B
+	ERROR_VID_EXCEEDED_MBP_ENTRY_MAP_LIMIT                                    syscall.Errno = 0xC037000C
+	ERROR_VID_MB_STILL_REFERENCED                                             syscall.Errno = 0xC037000D
+	ERROR_VID_CHILD_GPA_PAGE_SET_CORRUPTED                                    syscall.Errno = 0xC037000E
+	ERROR_VID_INVALID_NUMA_SETTINGS                                           syscall.Errno = 0xC037000F
+	ERROR_VID_INVALID_NUMA_NODE_INDEX                                         syscall.Errno = 0xC0370010
+	ERROR_VID_NOTIFICATION_QUEUE_ALREADY_ASSOCIATED                           syscall.Errno = 0xC0370011
+	ERROR_VID_INVALID_MEMORY_BLOCK_HANDLE                                     syscall.Errno = 0xC0370012
+	ERROR_VID_PAGE_RANGE_OVERFLOW                                             syscall.Errno = 0xC0370013
+	ERROR_VID_INVALID_MESSAGE_QUEUE_HANDLE                                    syscall.Errno = 0xC0370014
+	ERROR_VID_INVALID_GPA_RANGE_HANDLE                                        syscall.Errno = 0xC0370015
+	ERROR_VID_NO_MEMORY_BLOCK_NOTIFICATION_QUEUE                              syscall.Errno = 0xC0370016
+	ERROR_VID_MEMORY_BLOCK_LOCK_COUNT_EXCEEDED                                syscall.Errno = 0xC0370017
+	ERROR_VID_INVALID_PPM_HANDLE                                              syscall.Errno = 0xC0370018
+	ERROR_VID_MBPS_ARE_LOCKED                                                 syscall.Errno = 0xC0370019
+	ERROR_VID_MESSAGE_QUEUE_CLOSED                                            syscall.Errno = 0xC037001A
+	ERROR_VID_VIRTUAL_PROCESSOR_LIMIT_EXCEEDED                                syscall.Errno = 0xC037001B
+	ERROR_VID_STOP_PENDING                                                    syscall.Errno = 0xC037001C
+	ERROR_VID_INVALID_PROCESSOR_STATE                                         syscall.Errno = 0xC037001D
+	ERROR_VID_EXCEEDED_KM_CONTEXT_COUNT_LIMIT                                 syscall.Errno = 0xC037001E
+	ERROR_VID_KM_INTERFACE_ALREADY_INITIALIZED                                syscall.Errno = 0xC037001F
+	ERROR_VID_MB_PROPERTY_ALREADY_SET_RESET                                   syscall.Errno = 0xC0370020
+	ERROR_VID_MMIO_RANGE_DESTROYED                                            syscall.Errno = 0xC0370021
+	ERROR_VID_INVALID_CHILD_GPA_PAGE_SET                                      syscall.Errno = 0xC0370022
+	ERROR_VID_RESERVE_PAGE_SET_IS_BEING_USED                                  syscall.Errno = 0xC0370023
+	ERROR_VID_RESERVE_PAGE_SET_TOO_SMALL                                      syscall.Errno = 0xC0370024
+	ERROR_VID_MBP_ALREADY_LOCKED_USING_RESERVED_PAGE                          syscall.Errno = 0xC0370025
+	ERROR_VID_MBP_COUNT_EXCEEDED_LIMIT                                        syscall.Errno = 0xC0370026
+	ERROR_VID_SAVED_STATE_CORRUPT                                             syscall.Errno = 0xC0370027
+	ERROR_VID_SAVED_STATE_UNRECOGNIZED_ITEM                                   syscall.Errno = 0xC0370028
+	ERROR_VID_SAVED_STATE_INCOMPATIBLE                                        syscall.Errno = 0xC0370029
+	ERROR_VID_VTL_ACCESS_DENIED                                               syscall.Errno = 0xC037002A
+	ERROR_VMCOMPUTE_TERMINATED_DURING_START                                   syscall.Errno = 0xC0370100
+	ERROR_VMCOMPUTE_IMAGE_MISMATCH                                            syscall.Errno = 0xC0370101
+	ERROR_VMCOMPUTE_HYPERV_NOT_INSTALLED                                      syscall.Errno = 0xC0370102
+	ERROR_VMCOMPUTE_OPERATION_PENDING                                         syscall.Errno = 0xC0370103
+	ERROR_VMCOMPUTE_TOO_MANY_NOTIFICATIONS                                    syscall.Errno = 0xC0370104
+	ERROR_VMCOMPUTE_INVALID_STATE                                             syscall.Errno = 0xC0370105
+	ERROR_VMCOMPUTE_UNEXPECTED_EXIT                                           syscall.Errno = 0xC0370106
+	ERROR_VMCOMPUTE_TERMINATED                                                syscall.Errno = 0xC0370107
+	ERROR_VMCOMPUTE_CONNECT_FAILED                                            syscall.Errno = 0xC0370108
+	ERROR_VMCOMPUTE_TIMEOUT                                                   syscall.Errno = 0xC0370109
+	ERROR_VMCOMPUTE_CONNECTION_CLOSED                                         syscall.Errno = 0xC037010A
+	ERROR_VMCOMPUTE_UNKNOWN_MESSAGE                                           syscall.Errno = 0xC037010B
+	ERROR_VMCOMPUTE_UNSUPPORTED_PROTOCOL_VERSION                              syscall.Errno = 0xC037010C
+	ERROR_VMCOMPUTE_INVALID_JSON                                              syscall.Errno = 0xC037010D
+	ERROR_VMCOMPUTE_SYSTEM_NOT_FOUND                                          syscall.Errno = 0xC037010E
+	ERROR_VMCOMPUTE_SYSTEM_ALREADY_EXISTS                                     syscall.Errno = 0xC037010F
+	ERROR_VMCOMPUTE_SYSTEM_ALREADY_STOPPED                                    syscall.Errno = 0xC0370110
+	ERROR_VMCOMPUTE_PROTOCOL_ERROR                                            syscall.Errno = 0xC0370111
+	ERROR_VMCOMPUTE_INVALID_LAYER                                             syscall.Errno = 0xC0370112
+	ERROR_VMCOMPUTE_WINDOWS_INSIDER_REQUIRED                                  syscall.Errno = 0xC0370113
+	HCS_E_TERMINATED_DURING_START                                             Handle        = 0x80370100
+	HCS_E_IMAGE_MISMATCH                                                      Handle        = 0x80370101
+	HCS_E_HYPERV_NOT_INSTALLED                                                Handle        = 0x80370102
+	HCS_E_INVALID_STATE                                                       Handle        = 0x80370105
+	HCS_E_UNEXPECTED_EXIT                                                     Handle        = 0x80370106
+	HCS_E_TERMINATED                                                          Handle        = 0x80370107
+	HCS_E_CONNECT_FAILED                                                      Handle        = 0x80370108
+	HCS_E_CONNECTION_TIMEOUT                                                  Handle        = 0x80370109
+	HCS_E_CONNECTION_CLOSED                                                   Handle        = 0x8037010A
+	HCS_E_UNKNOWN_MESSAGE                                                     Handle        = 0x8037010B
+	HCS_E_UNSUPPORTED_PROTOCOL_VERSION                                        Handle        = 0x8037010C
+	HCS_E_INVALID_JSON                                                        Handle        = 0x8037010D
+	HCS_E_SYSTEM_NOT_FOUND                                                    Handle        = 0x8037010E
+	HCS_E_SYSTEM_ALREADY_EXISTS                                               Handle        = 0x8037010F
+	HCS_E_SYSTEM_ALREADY_STOPPED                                              Handle        = 0x80370110
+	HCS_E_PROTOCOL_ERROR                                                      Handle        = 0x80370111
+	HCS_E_INVALID_LAYER                                                       Handle        = 0x80370112
+	HCS_E_WINDOWS_INSIDER_REQUIRED                                            Handle        = 0x80370113
+	HCS_E_SERVICE_NOT_AVAILABLE                                               Handle        = 0x80370114
+	HCS_E_OPERATION_NOT_STARTED                                               Handle        = 0x80370115
+	HCS_E_OPERATION_ALREADY_STARTED                                           Handle        = 0x80370116
+	HCS_E_OPERATION_PENDING                                                   Handle        = 0x80370117
+	HCS_E_OPERATION_TIMEOUT                                                   Handle        = 0x80370118
+	HCS_E_OPERATION_SYSTEM_CALLBACK_ALREADY_SET                               Handle        = 0x80370119
+	HCS_E_OPERATION_RESULT_ALLOCATION_FAILED                                  Handle        = 0x8037011A
+	HCS_E_ACCESS_DENIED                                                       Handle        = 0x8037011B
+	HCS_E_GUEST_CRITICAL_ERROR                                                Handle        = 0x8037011C
+	ERROR_VNET_VIRTUAL_SWITCH_NAME_NOT_FOUND                                  syscall.Errno = 0xC0370200
+	ERROR_VID_REMOTE_NODE_PARENT_GPA_PAGES_USED                               syscall.Errno = 0x80370001
+	WHV_E_UNKNOWN_CAPABILITY                                                  Handle        = 0x80370300
+	WHV_E_INSUFFICIENT_BUFFER                                                 Handle        = 0x80370301
+	WHV_E_UNKNOWN_PROPERTY                                                    Handle        = 0x80370302
+	WHV_E_UNSUPPORTED_HYPERVISOR_CONFIG                                       Handle        = 0x80370303
+	WHV_E_INVALID_PARTITION_CONFIG                                            Handle        = 0x80370304
+	WHV_E_GPA_RANGE_NOT_FOUND                                                 Handle        = 0x80370305
+	WHV_E_VP_ALREADY_EXISTS                                                   Handle        = 0x80370306
+	WHV_E_VP_DOES_NOT_EXIST                                                   Handle        = 0x80370307
+	WHV_E_INVALID_VP_STATE                                                    Handle        = 0x80370308
+	WHV_E_INVALID_VP_REGISTER_NAME                                            Handle        = 0x80370309
+	ERROR_VSMB_SAVED_STATE_FILE_NOT_FOUND                                     syscall.Errno = 0xC0370400
+	ERROR_VSMB_SAVED_STATE_CORRUPT                                            syscall.Errno = 0xC0370401
+	ERROR_VOLMGR_INCOMPLETE_REGENERATION                                      syscall.Errno = 0x80380001
+	ERROR_VOLMGR_INCOMPLETE_DISK_MIGRATION                                    syscall.Errno = 0x80380002
+	ERROR_VOLMGR_DATABASE_FULL                                                syscall.Errno = 0xC0380001
+	ERROR_VOLMGR_DISK_CONFIGURATION_CORRUPTED                                 syscall.Errno = 0xC0380002
+	ERROR_VOLMGR_DISK_CONFIGURATION_NOT_IN_SYNC                               syscall.Errno = 0xC0380003
+	ERROR_VOLMGR_PACK_CONFIG_UPDATE_FAILED                                    syscall.Errno = 0xC0380004
+	ERROR_VOLMGR_DISK_CONTAINS_NON_SIMPLE_VOLUME                              syscall.Errno = 0xC0380005
+	ERROR_VOLMGR_DISK_DUPLICATE                                               syscall.Errno = 0xC0380006
+	ERROR_VOLMGR_DISK_DYNAMIC                                                 syscall.Errno = 0xC0380007
+	ERROR_VOLMGR_DISK_ID_INVALID                                              syscall.Errno = 0xC0380008
+	ERROR_VOLMGR_DISK_INVALID                                                 syscall.Errno = 0xC0380009
+	ERROR_VOLMGR_DISK_LAST_VOTER                                              syscall.Errno = 0xC038000A
+	ERROR_VOLMGR_DISK_LAYOUT_INVALID                                          syscall.Errno = 0xC038000B
+	ERROR_VOLMGR_DISK_LAYOUT_NON_BASIC_BETWEEN_BASIC_PARTITIONS               syscall.Errno = 0xC038000C
+	ERROR_VOLMGR_DISK_LAYOUT_NOT_CYLINDER_ALIGNED                             syscall.Errno = 0xC038000D
+	ERROR_VOLMGR_DISK_LAYOUT_PARTITIONS_TOO_SMALL                             syscall.Errno = 0xC038000E
+	ERROR_VOLMGR_DISK_LAYOUT_PRIMARY_BETWEEN_LOGICAL_PARTITIONS               syscall.Errno = 0xC038000F
+	ERROR_VOLMGR_DISK_LAYOUT_TOO_MANY_PARTITIONS                              syscall.Errno = 0xC0380010
+	ERROR_VOLMGR_DISK_MISSING                                                 syscall.Errno = 0xC0380011
+	ERROR_VOLMGR_DISK_NOT_EMPTY                                               syscall.Errno = 0xC0380012
+	ERROR_VOLMGR_DISK_NOT_ENOUGH_SPACE                                        syscall.Errno = 0xC0380013
+	ERROR_VOLMGR_DISK_REVECTORING_FAILED                                      syscall.Errno = 0xC0380014
+	ERROR_VOLMGR_DISK_SECTOR_SIZE_INVALID                                     syscall.Errno = 0xC0380015
+	ERROR_VOLMGR_DISK_SET_NOT_CONTAINED                                       syscall.Errno = 0xC0380016
+	ERROR_VOLMGR_DISK_USED_BY_MULTIPLE_MEMBERS                                syscall.Errno = 0xC0380017
+	ERROR_VOLMGR_DISK_USED_BY_MULTIPLE_PLEXES                                 syscall.Errno = 0xC0380018
+	ERROR_VOLMGR_DYNAMIC_DISK_NOT_SUPPORTED                                   syscall.Errno = 0xC0380019
+	ERROR_VOLMGR_EXTENT_ALREADY_USED                                          syscall.Errno = 0xC038001A
+	ERROR_VOLMGR_EXTENT_NOT_CONTIGUOUS                                        syscall.Errno = 0xC038001B
+	ERROR_VOLMGR_EXTENT_NOT_IN_PUBLIC_REGION                                  syscall.Errno = 0xC038001C
+	ERROR_VOLMGR_EXTENT_NOT_SECTOR_ALIGNED                                    syscall.Errno = 0xC038001D
+	ERROR_VOLMGR_EXTENT_OVERLAPS_EBR_PARTITION                                syscall.Errno = 0xC038001E
+	ERROR_VOLMGR_EXTENT_VOLUME_LENGTHS_DO_NOT_MATCH                           syscall.Errno = 0xC038001F
+	ERROR_VOLMGR_FAULT_TOLERANT_NOT_SUPPORTED                                 syscall.Errno = 0xC0380020
+	ERROR_VOLMGR_INTERLEAVE_LENGTH_INVALID                                    syscall.Errno = 0xC0380021
+	ERROR_VOLMGR_MAXIMUM_REGISTERED_USERS                                     syscall.Errno = 0xC0380022
+	ERROR_VOLMGR_MEMBER_IN_SYNC                                               syscall.Errno = 0xC0380023
+	ERROR_VOLMGR_MEMBER_INDEX_DUPLICATE                                       syscall.Errno = 0xC0380024
+	ERROR_VOLMGR_MEMBER_INDEX_INVALID                                         syscall.Errno = 0xC0380025
+	ERROR_VOLMGR_MEMBER_MISSING                                               syscall.Errno = 0xC0380026
+	ERROR_VOLMGR_MEMBER_NOT_DETACHED                                          syscall.Errno = 0xC0380027
+	ERROR_VOLMGR_MEMBER_REGENERATING                                          syscall.Errno = 0xC0380028
+	ERROR_VOLMGR_ALL_DISKS_FAILED                                             syscall.Errno = 0xC0380029
+	ERROR_VOLMGR_NO_REGISTERED_USERS                                          syscall.Errno = 0xC038002A
+	ERROR_VOLMGR_NO_SUCH_USER                                                 syscall.Errno = 0xC038002B
+	ERROR_VOLMGR_NOTIFICATION_RESET                                           syscall.Errno = 0xC038002C
+	ERROR_VOLMGR_NUMBER_OF_MEMBERS_INVALID                                    syscall.Errno = 0xC038002D
+	ERROR_VOLMGR_NUMBER_OF_PLEXES_INVALID                                     syscall.Errno = 0xC038002E
+	ERROR_VOLMGR_PACK_DUPLICATE                                               syscall.Errno = 0xC038002F
+	ERROR_VOLMGR_PACK_ID_INVALID                                              syscall.Errno = 0xC0380030
+	ERROR_VOLMGR_PACK_INVALID                                                 syscall.Errno = 0xC0380031
+	ERROR_VOLMGR_PACK_NAME_INVALID                                            syscall.Errno = 0xC0380032
+	ERROR_VOLMGR_PACK_OFFLINE                                                 syscall.Errno = 0xC0380033
+	ERROR_VOLMGR_PACK_HAS_QUORUM                                              syscall.Errno = 0xC0380034
+	ERROR_VOLMGR_PACK_WITHOUT_QUORUM                                          syscall.Errno = 0xC0380035
+	ERROR_VOLMGR_PARTITION_STYLE_INVALID                                      syscall.Errno = 0xC0380036
+	ERROR_VOLMGR_PARTITION_UPDATE_FAILED                                      syscall.Errno = 0xC0380037
+	ERROR_VOLMGR_PLEX_IN_SYNC                                                 syscall.Errno = 0xC0380038
+	ERROR_VOLMGR_PLEX_INDEX_DUPLICATE                                         syscall.Errno = 0xC0380039
+	ERROR_VOLMGR_PLEX_INDEX_INVALID                                           syscall.Errno = 0xC038003A
+	ERROR_VOLMGR_PLEX_LAST_ACTIVE                                             syscall.Errno = 0xC038003B
+	ERROR_VOLMGR_PLEX_MISSING                                                 syscall.Errno = 0xC038003C
+	ERROR_VOLMGR_PLEX_REGENERATING                                            syscall.Errno = 0xC038003D
+	ERROR_VOLMGR_PLEX_TYPE_INVALID                                            syscall.Errno = 0xC038003E
+	ERROR_VOLMGR_PLEX_NOT_RAID5                                               syscall.Errno = 0xC038003F
+	ERROR_VOLMGR_PLEX_NOT_SIMPLE                                              syscall.Errno = 0xC0380040
+	ERROR_VOLMGR_STRUCTURE_SIZE_INVALID                                       syscall.Errno = 0xC0380041
+	ERROR_VOLMGR_TOO_MANY_NOTIFICATION_REQUESTS                               syscall.Errno = 0xC0380042
+	ERROR_VOLMGR_TRANSACTION_IN_PROGRESS                                      syscall.Errno = 0xC0380043
+	ERROR_VOLMGR_UNEXPECTED_DISK_LAYOUT_CHANGE                                syscall.Errno = 0xC0380044
+	ERROR_VOLMGR_VOLUME_CONTAINS_MISSING_DISK                                 syscall.Errno = 0xC0380045
+	ERROR_VOLMGR_VOLUME_ID_INVALID                                            syscall.Errno = 0xC0380046
+	ERROR_VOLMGR_VOLUME_LENGTH_INVALID                                        syscall.Errno = 0xC0380047
+	ERROR_VOLMGR_VOLUME_LENGTH_NOT_SECTOR_SIZE_MULTIPLE                       syscall.Errno = 0xC0380048
+	ERROR_VOLMGR_VOLUME_NOT_MIRRORED                                          syscall.Errno = 0xC0380049
+	ERROR_VOLMGR_VOLUME_NOT_RETAINED                                          syscall.Errno = 0xC038004A
+	ERROR_VOLMGR_VOLUME_OFFLINE                                               syscall.Errno = 0xC038004B
+	ERROR_VOLMGR_VOLUME_RETAINED                                              syscall.Errno = 0xC038004C
+	ERROR_VOLMGR_NUMBER_OF_EXTENTS_INVALID                                    syscall.Errno = 0xC038004D
+	ERROR_VOLMGR_DIFFERENT_SECTOR_SIZE                                        syscall.Errno = 0xC038004E
+	ERROR_VOLMGR_BAD_BOOT_DISK                                                syscall.Errno = 0xC038004F
+	ERROR_VOLMGR_PACK_CONFIG_OFFLINE                                          syscall.Errno = 0xC0380050
+	ERROR_VOLMGR_PACK_CONFIG_ONLINE                                           syscall.Errno = 0xC0380051
+	ERROR_VOLMGR_NOT_PRIMARY_PACK                                             syscall.Errno = 0xC0380052
+	ERROR_VOLMGR_PACK_LOG_UPDATE_FAILED                                       syscall.Errno = 0xC0380053
+	ERROR_VOLMGR_NUMBER_OF_DISKS_IN_PLEX_INVALID                              syscall.Errno = 0xC0380054
+	ERROR_VOLMGR_NUMBER_OF_DISKS_IN_MEMBER_INVALID                            syscall.Errno = 0xC0380055
+	ERROR_VOLMGR_VOLUME_MIRRORED                                              syscall.Errno = 0xC0380056
+	ERROR_VOLMGR_PLEX_NOT_SIMPLE_SPANNED                                      syscall.Errno = 0xC0380057
+	ERROR_VOLMGR_NO_VALID_LOG_COPIES                                          syscall.Errno = 0xC0380058
+	ERROR_VOLMGR_PRIMARY_PACK_PRESENT                                         syscall.Errno = 0xC0380059
+	ERROR_VOLMGR_NUMBER_OF_DISKS_INVALID                                      syscall.Errno = 0xC038005A
+	ERROR_VOLMGR_MIRROR_NOT_SUPPORTED                                         syscall.Errno = 0xC038005B
+	ERROR_VOLMGR_RAID5_NOT_SUPPORTED                                          syscall.Errno = 0xC038005C
+	ERROR_BCD_NOT_ALL_ENTRIES_IMPORTED                                        syscall.Errno = 0x80390001
+	ERROR_BCD_TOO_MANY_ELEMENTS                                               syscall.Errno = 0xC0390002
+	ERROR_BCD_NOT_ALL_ENTRIES_SYNCHRONIZED                                    syscall.Errno = 0x80390003
+	ERROR_VHD_DRIVE_FOOTER_MISSING                                            syscall.Errno = 0xC03A0001
+	ERROR_VHD_DRIVE_FOOTER_CHECKSUM_MISMATCH                                  syscall.Errno = 0xC03A0002
+	ERROR_VHD_DRIVE_FOOTER_CORRUPT                                            syscall.Errno = 0xC03A0003
+	ERROR_VHD_FORMAT_UNKNOWN                                                  syscall.Errno = 0xC03A0004
+	ERROR_VHD_FORMAT_UNSUPPORTED_VERSION                                      syscall.Errno = 0xC03A0005
+	ERROR_VHD_SPARSE_HEADER_CHECKSUM_MISMATCH                                 syscall.Errno = 0xC03A0006
+	ERROR_VHD_SPARSE_HEADER_UNSUPPORTED_VERSION                               syscall.Errno = 0xC03A0007
+	ERROR_VHD_SPARSE_HEADER_CORRUPT                                           syscall.Errno = 0xC03A0008
+	ERROR_VHD_BLOCK_ALLOCATION_FAILURE                                        syscall.Errno = 0xC03A0009
+	ERROR_VHD_BLOCK_ALLOCATION_TABLE_CORRUPT                                  syscall.Errno = 0xC03A000A
+	ERROR_VHD_INVALID_BLOCK_SIZE                                              syscall.Errno = 0xC03A000B
+	ERROR_VHD_BITMAP_MISMATCH                                                 syscall.Errno = 0xC03A000C
+	ERROR_VHD_PARENT_VHD_NOT_FOUND                                            syscall.Errno = 0xC03A000D
+	ERROR_VHD_CHILD_PARENT_ID_MISMATCH                                        syscall.Errno = 0xC03A000E
+	ERROR_VHD_CHILD_PARENT_TIMESTAMP_MISMATCH                                 syscall.Errno = 0xC03A000F
+	ERROR_VHD_METADATA_READ_FAILURE                                           syscall.Errno = 0xC03A0010
+	ERROR_VHD_METADATA_WRITE_FAILURE                                          syscall.Errno = 0xC03A0011
+	ERROR_VHD_INVALID_SIZE                                                    syscall.Errno = 0xC03A0012
+	ERROR_VHD_INVALID_FILE_SIZE                                               syscall.Errno = 0xC03A0013
+	ERROR_VIRTDISK_PROVIDER_NOT_FOUND                                         syscall.Errno = 0xC03A0014
+	ERROR_VIRTDISK_NOT_VIRTUAL_DISK                                           syscall.Errno = 0xC03A0015
+	ERROR_VHD_PARENT_VHD_ACCESS_DENIED                                        syscall.Errno = 0xC03A0016
+	ERROR_VHD_CHILD_PARENT_SIZE_MISMATCH                                      syscall.Errno = 0xC03A0017
+	ERROR_VHD_DIFFERENCING_CHAIN_CYCLE_DETECTED                               syscall.Errno = 0xC03A0018
+	ERROR_VHD_DIFFERENCING_CHAIN_ERROR_IN_PARENT                              syscall.Errno = 0xC03A0019
+	ERROR_VIRTUAL_DISK_LIMITATION                                             syscall.Errno = 0xC03A001A
+	ERROR_VHD_INVALID_TYPE                                                    syscall.Errno = 0xC03A001B
+	ERROR_VHD_INVALID_STATE                                                   syscall.Errno = 0xC03A001C
+	ERROR_VIRTDISK_UNSUPPORTED_DISK_SECTOR_SIZE                               syscall.Errno = 0xC03A001D
+	ERROR_VIRTDISK_DISK_ALREADY_OWNED                                         syscall.Errno = 0xC03A001E
+	ERROR_VIRTDISK_DISK_ONLINE_AND_WRITABLE                                   syscall.Errno = 0xC03A001F
+	ERROR_CTLOG_TRACKING_NOT_INITIALIZED                                      syscall.Errno = 0xC03A0020
+	ERROR_CTLOG_LOGFILE_SIZE_EXCEEDED_MAXSIZE                                 syscall.Errno = 0xC03A0021
+	ERROR_CTLOG_VHD_CHANGED_OFFLINE                                           syscall.Errno = 0xC03A0022
+	ERROR_CTLOG_INVALID_TRACKING_STATE                                        syscall.Errno = 0xC03A0023
+	ERROR_CTLOG_INCONSISTENT_TRACKING_FILE                                    syscall.Errno = 0xC03A0024
+	ERROR_VHD_RESIZE_WOULD_TRUNCATE_DATA                                      syscall.Errno = 0xC03A0025
+	ERROR_VHD_COULD_NOT_COMPUTE_MINIMUM_VIRTUAL_SIZE                          syscall.Errno = 0xC03A0026
+	ERROR_VHD_ALREADY_AT_OR_BELOW_MINIMUM_VIRTUAL_SIZE                        syscall.Errno = 0xC03A0027
+	ERROR_VHD_METADATA_FULL                                                   syscall.Errno = 0xC03A0028
+	ERROR_VHD_INVALID_CHANGE_TRACKING_ID                                      syscall.Errno = 0xC03A0029
+	ERROR_VHD_CHANGE_TRACKING_DISABLED                                        syscall.Errno = 0xC03A002A
+	ERROR_VHD_MISSING_CHANGE_TRACKING_INFORMATION                             syscall.Errno = 0xC03A0030
+	ERROR_QUERY_STORAGE_ERROR                                                 syscall.Errno = 0x803A0001
+	HCN_E_NETWORK_NOT_FOUND                                                   Handle        = 0x803B0001
+	HCN_E_ENDPOINT_NOT_FOUND                                                  Handle        = 0x803B0002
+	HCN_E_LAYER_NOT_FOUND                                                     Handle        = 0x803B0003
+	HCN_E_SWITCH_NOT_FOUND                                                    Handle        = 0x803B0004
+	HCN_E_SUBNET_NOT_FOUND                                                    Handle        = 0x803B0005
+	HCN_E_ADAPTER_NOT_FOUND                                                   Handle        = 0x803B0006
+	HCN_E_PORT_NOT_FOUND                                                      Handle        = 0x803B0007
+	HCN_E_POLICY_NOT_FOUND                                                    Handle        = 0x803B0008
+	HCN_E_VFP_PORTSETTING_NOT_FOUND                                           Handle        = 0x803B0009
+	HCN_E_INVALID_NETWORK                                                     Handle        = 0x803B000A
+	HCN_E_INVALID_NETWORK_TYPE                                                Handle        = 0x803B000B
+	HCN_E_INVALID_ENDPOINT                                                    Handle        = 0x803B000C
+	HCN_E_INVALID_POLICY                                                      Handle        = 0x803B000D
+	HCN_E_INVALID_POLICY_TYPE                                                 Handle        = 0x803B000E
+	HCN_E_INVALID_REMOTE_ENDPOINT_OPERATION                                   Handle        = 0x803B000F
+	HCN_E_NETWORK_ALREADY_EXISTS                                              Handle        = 0x803B0010
+	HCN_E_LAYER_ALREADY_EXISTS                                                Handle        = 0x803B0011
+	HCN_E_POLICY_ALREADY_EXISTS                                               Handle        = 0x803B0012
+	HCN_E_PORT_ALREADY_EXISTS                                                 Handle        = 0x803B0013
+	HCN_E_ENDPOINT_ALREADY_ATTACHED                                           Handle        = 0x803B0014
+	HCN_E_REQUEST_UNSUPPORTED                                                 Handle        = 0x803B0015
+	HCN_E_MAPPING_NOT_SUPPORTED                                               Handle        = 0x803B0016
+	HCN_E_DEGRADED_OPERATION                                                  Handle        = 0x803B0017
+	HCN_E_SHARED_SWITCH_MODIFICATION                                          Handle        = 0x803B0018
+	HCN_E_GUID_CONVERSION_FAILURE                                             Handle        = 0x803B0019
+	HCN_E_REGKEY_FAILURE                                                      Handle        = 0x803B001A
+	HCN_E_INVALID_JSON                                                        Handle        = 0x803B001B
+	HCN_E_INVALID_JSON_REFERENCE                                              Handle        = 0x803B001C
+	HCN_E_ENDPOINT_SHARING_DISABLED                                           Handle        = 0x803B001D
+	HCN_E_INVALID_IP                                                          Handle        = 0x803B001E
+	HCN_E_SWITCH_EXTENSION_NOT_FOUND                                          Handle        = 0x803B001F
+	HCN_E_MANAGER_STOPPED                                                     Handle        = 0x803B0020
+	GCN_E_MODULE_NOT_FOUND                                                    Handle        = 0x803B0021
+	GCN_E_NO_REQUEST_HANDLERS                                                 Handle        = 0x803B0022
+	GCN_E_REQUEST_UNSUPPORTED                                                 Handle        = 0x803B0023
+	GCN_E_RUNTIMEKEYS_FAILED                                                  Handle        = 0x803B0024
+	GCN_E_NETADAPTER_TIMEOUT                                                  Handle        = 0x803B0025
+	GCN_E_NETADAPTER_NOT_FOUND                                                Handle        = 0x803B0026
+	GCN_E_NETCOMPARTMENT_NOT_FOUND                                            Handle        = 0x803B0027
+	GCN_E_NETINTERFACE_NOT_FOUND                                              Handle        = 0x803B0028
+	GCN_E_DEFAULTNAMESPACE_EXISTS                                             Handle        = 0x803B0029
+	HCN_E_ICS_DISABLED                                                        Handle        = 0x803B002A
+	HCN_E_ENDPOINT_NAMESPACE_ALREADY_EXISTS                                   Handle        = 0x803B002B
+	HCN_E_ENTITY_HAS_REFERENCES                                               Handle        = 0x803B002C
+	HCN_E_INVALID_INTERNAL_PORT                                               Handle        = 0x803B002D
+	HCN_E_NAMESPACE_ATTACH_FAILED                                             Handle        = 0x803B002E
+	HCN_E_ADDR_INVALID_OR_RESERVED                                            Handle        = 0x803B002F
+	SDIAG_E_CANCELLED                                                         syscall.Errno = 0x803C0100
+	SDIAG_E_SCRIPT                                                            syscall.Errno = 0x803C0101
+	SDIAG_E_POWERSHELL                                                        syscall.Errno = 0x803C0102
+	SDIAG_E_MANAGEDHOST                                                       syscall.Errno = 0x803C0103
+	SDIAG_E_NOVERIFIER                                                        syscall.Errno = 0x803C0104
+	SDIAG_S_CANNOTRUN                                                         syscall.Errno = 0x003C0105
+	SDIAG_E_DISABLED                                                          syscall.Errno = 0x803C0106
+	SDIAG_E_TRUST                                                             syscall.Errno = 0x803C0107
+	SDIAG_E_CANNOTRUN                                                         syscall.Errno = 0x803C0108
+	SDIAG_E_VERSION                                                           syscall.Errno = 0x803C0109
+	SDIAG_E_RESOURCE                                                          syscall.Errno = 0x803C010A
+	SDIAG_E_ROOTCAUSE                                                         syscall.Errno = 0x803C010B
+	WPN_E_CHANNEL_CLOSED                                                      Handle        = 0x803E0100
+	WPN_E_CHANNEL_REQUEST_NOT_COMPLETE                                        Handle        = 0x803E0101
+	WPN_E_INVALID_APP                                                         Handle        = 0x803E0102
+	WPN_E_OUTSTANDING_CHANNEL_REQUEST                                         Handle        = 0x803E0103
+	WPN_E_DUPLICATE_CHANNEL                                                   Handle        = 0x803E0104
+	WPN_E_PLATFORM_UNAVAILABLE                                                Handle        = 0x803E0105
+	WPN_E_NOTIFICATION_POSTED                                                 Handle        = 0x803E0106
+	WPN_E_NOTIFICATION_HIDDEN                                                 Handle        = 0x803E0107
+	WPN_E_NOTIFICATION_NOT_POSTED                                             Handle        = 0x803E0108
+	WPN_E_CLOUD_DISABLED                                                      Handle        = 0x803E0109
+	WPN_E_CLOUD_INCAPABLE                                                     Handle        = 0x803E0110
+	WPN_E_CLOUD_AUTH_UNAVAILABLE                                              Handle        = 0x803E011A
+	WPN_E_CLOUD_SERVICE_UNAVAILABLE                                           Handle        = 0x803E011B
+	WPN_E_FAILED_LOCK_SCREEN_UPDATE_INTIALIZATION                             Handle        = 0x803E011C
+	WPN_E_NOTIFICATION_DISABLED                                               Handle        = 0x803E0111
+	WPN_E_NOTIFICATION_INCAPABLE                                              Handle        = 0x803E0112
+	WPN_E_INTERNET_INCAPABLE                                                  Handle        = 0x803E0113
+	WPN_E_NOTIFICATION_TYPE_DISABLED                                          Handle        = 0x803E0114
+	WPN_E_NOTIFICATION_SIZE                                                   Handle        = 0x803E0115
+	WPN_E_TAG_SIZE                                                            Handle        = 0x803E0116
+	WPN_E_ACCESS_DENIED                                                       Handle        = 0x803E0117
+	WPN_E_DUPLICATE_REGISTRATION                                              Handle        = 0x803E0118
+	WPN_E_PUSH_NOTIFICATION_INCAPABLE                                         Handle        = 0x803E0119
+	WPN_E_DEV_ID_SIZE                                                         Handle        = 0x803E0120
+	WPN_E_TAG_ALPHANUMERIC                                                    Handle        = 0x803E012A
+	WPN_E_INVALID_HTTP_STATUS_CODE                                            Handle        = 0x803E012B
+	WPN_E_OUT_OF_SESSION                                                      Handle        = 0x803E0200
+	WPN_E_POWER_SAVE                                                          Handle        = 0x803E0201
+	WPN_E_IMAGE_NOT_FOUND_IN_CACHE                                            Handle        = 0x803E0202
+	WPN_E_ALL_URL_NOT_COMPLETED                                               Handle        = 0x803E0203
+	WPN_E_INVALID_CLOUD_IMAGE                                                 Handle        = 0x803E0204
+	WPN_E_NOTIFICATION_ID_MATCHED                                             Handle        = 0x803E0205
+	WPN_E_CALLBACK_ALREADY_REGISTERED                                         Handle        = 0x803E0206
+	WPN_E_TOAST_NOTIFICATION_DROPPED                                          Handle        = 0x803E0207
+	WPN_E_STORAGE_LOCKED                                                      Handle        = 0x803E0208
+	WPN_E_GROUP_SIZE                                                          Handle        = 0x803E0209
+	WPN_E_GROUP_ALPHANUMERIC                                                  Handle        = 0x803E020A
+	WPN_E_CLOUD_DISABLED_FOR_APP                                              Handle        = 0x803E020B
+	E_MBN_CONTEXT_NOT_ACTIVATED                                               Handle        = 0x80548201
+	E_MBN_BAD_SIM                                                             Handle        = 0x80548202
+	E_MBN_DATA_CLASS_NOT_AVAILABLE                                            Handle        = 0x80548203
+	E_MBN_INVALID_ACCESS_STRING                                               Handle        = 0x80548204
+	E_MBN_MAX_ACTIVATED_CONTEXTS                                              Handle        = 0x80548205
+	E_MBN_PACKET_SVC_DETACHED                                                 Handle        = 0x80548206
+	E_MBN_PROVIDER_NOT_VISIBLE                                                Handle        = 0x80548207
+	E_MBN_RADIO_POWER_OFF                                                     Handle        = 0x80548208
+	E_MBN_SERVICE_NOT_ACTIVATED                                               Handle        = 0x80548209
+	E_MBN_SIM_NOT_INSERTED                                                    Handle        = 0x8054820A
+	E_MBN_VOICE_CALL_IN_PROGRESS                                              Handle        = 0x8054820B
+	E_MBN_INVALID_CACHE                                                       Handle        = 0x8054820C
+	E_MBN_NOT_REGISTERED                                                      Handle        = 0x8054820D
+	E_MBN_PROVIDERS_NOT_FOUND                                                 Handle        = 0x8054820E
+	E_MBN_PIN_NOT_SUPPORTED                                                   Handle        = 0x8054820F
+	E_MBN_PIN_REQUIRED                                                        Handle        = 0x80548210
+	E_MBN_PIN_DISABLED                                                        Handle        = 0x80548211
+	E_MBN_FAILURE                                                             Handle        = 0x80548212
+	E_MBN_INVALID_PROFILE                                                     Handle        = 0x80548218
+	E_MBN_DEFAULT_PROFILE_EXIST                                               Handle        = 0x80548219
+	E_MBN_SMS_ENCODING_NOT_SUPPORTED                                          Handle        = 0x80548220
+	E_MBN_SMS_FILTER_NOT_SUPPORTED                                            Handle        = 0x80548221
+	E_MBN_SMS_INVALID_MEMORY_INDEX                                            Handle        = 0x80548222
+	E_MBN_SMS_LANG_NOT_SUPPORTED                                              Handle        = 0x80548223
+	E_MBN_SMS_MEMORY_FAILURE                                                  Handle        = 0x80548224
+	E_MBN_SMS_NETWORK_TIMEOUT                                                 Handle        = 0x80548225
+	E_MBN_SMS_UNKNOWN_SMSC_ADDRESS                                            Handle        = 0x80548226
+	E_MBN_SMS_FORMAT_NOT_SUPPORTED                                            Handle        = 0x80548227
+	E_MBN_SMS_OPERATION_NOT_ALLOWED                                           Handle        = 0x80548228
+	E_MBN_SMS_MEMORY_FULL                                                     Handle        = 0x80548229
+	PEER_E_IPV6_NOT_INSTALLED                                                 Handle        = 0x80630001
+	PEER_E_NOT_INITIALIZED                                                    Handle        = 0x80630002
+	PEER_E_CANNOT_START_SERVICE                                               Handle        = 0x80630003
+	PEER_E_NOT_LICENSED                                                       Handle        = 0x80630004
+	PEER_E_INVALID_GRAPH                                                      Handle        = 0x80630010
+	PEER_E_DBNAME_CHANGED                                                     Handle        = 0x80630011
+	PEER_E_DUPLICATE_GRAPH                                                    Handle        = 0x80630012
+	PEER_E_GRAPH_NOT_READY                                                    Handle        = 0x80630013
+	PEER_E_GRAPH_SHUTTING_DOWN                                                Handle        = 0x80630014
+	PEER_E_GRAPH_IN_USE                                                       Handle        = 0x80630015
+	PEER_E_INVALID_DATABASE                                                   Handle        = 0x80630016
+	PEER_E_TOO_MANY_ATTRIBUTES                                                Handle        = 0x80630017
+	PEER_E_CONNECTION_NOT_FOUND                                               Handle        = 0x80630103
+	PEER_E_CONNECT_SELF                                                       Handle        = 0x80630106
+	PEER_E_ALREADY_LISTENING                                                  Handle        = 0x80630107
+	PEER_E_NODE_NOT_FOUND                                                     Handle        = 0x80630108
+	PEER_E_CONNECTION_FAILED                                                  Handle        = 0x80630109
+	PEER_E_CONNECTION_NOT_AUTHENTICATED                                       Handle        = 0x8063010A
+	PEER_E_CONNECTION_REFUSED                                                 Handle        = 0x8063010B
+	PEER_E_CLASSIFIER_TOO_LONG                                                Handle        = 0x80630201
+	PEER_E_TOO_MANY_IDENTITIES                                                Handle        = 0x80630202
+	PEER_E_NO_KEY_ACCESS                                                      Handle        = 0x80630203
+	PEER_E_GROUPS_EXIST                                                       Handle        = 0x80630204
+	PEER_E_RECORD_NOT_FOUND                                                   Handle        = 0x80630301
+	PEER_E_DATABASE_ACCESSDENIED                                              Handle        = 0x80630302
+	PEER_E_DBINITIALIZATION_FAILED                                            Handle        = 0x80630303
+	PEER_E_MAX_RECORD_SIZE_EXCEEDED                                           Handle        = 0x80630304
+	PEER_E_DATABASE_ALREADY_PRESENT                                           Handle        = 0x80630305
+	PEER_E_DATABASE_NOT_PRESENT                                               Handle        = 0x80630306
+	PEER_E_IDENTITY_NOT_FOUND                                                 Handle        = 0x80630401
+	PEER_E_EVENT_HANDLE_NOT_FOUND                                             Handle        = 0x80630501
+	PEER_E_INVALID_SEARCH                                                     Handle        = 0x80630601
+	PEER_E_INVALID_ATTRIBUTES                                                 Handle        = 0x80630602
+	PEER_E_INVITATION_NOT_TRUSTED                                             Handle        = 0x80630701
+	PEER_E_CHAIN_TOO_LONG                                                     Handle        = 0x80630703
+	PEER_E_INVALID_TIME_PERIOD                                                Handle        = 0x80630705
+	PEER_E_CIRCULAR_CHAIN_DETECTED                                            Handle        = 0x80630706
+	PEER_E_CERT_STORE_CORRUPTED                                               Handle        = 0x80630801
+	PEER_E_NO_CLOUD                                                           Handle        = 0x80631001
+	PEER_E_CLOUD_NAME_AMBIGUOUS                                               Handle        = 0x80631005
+	PEER_E_INVALID_RECORD                                                     Handle        = 0x80632010
+	PEER_E_NOT_AUTHORIZED                                                     Handle        = 0x80632020
+	PEER_E_PASSWORD_DOES_NOT_MEET_POLICY                                      Handle        = 0x80632021
+	PEER_E_DEFERRED_VALIDATION                                                Handle        = 0x80632030
+	PEER_E_INVALID_GROUP_PROPERTIES                                           Handle        = 0x80632040
+	PEER_E_INVALID_PEER_NAME                                                  Handle        = 0x80632050
+	PEER_E_INVALID_CLASSIFIER                                                 Handle        = 0x80632060
+	PEER_E_INVALID_FRIENDLY_NAME                                              Handle        = 0x80632070
+	PEER_E_INVALID_ROLE_PROPERTY                                              Handle        = 0x80632071
+	PEER_E_INVALID_CLASSIFIER_PROPERTY                                        Handle        = 0x80632072
+	PEER_E_INVALID_RECORD_EXPIRATION                                          Handle        = 0x80632080
+	PEER_E_INVALID_CREDENTIAL_INFO                                            Handle        = 0x80632081
+	PEER_E_INVALID_CREDENTIAL                                                 Handle        = 0x80632082
+	PEER_E_INVALID_RECORD_SIZE                                                Handle        = 0x80632083
+	PEER_E_UNSUPPORTED_VERSION                                                Handle        = 0x80632090
+	PEER_E_GROUP_NOT_READY                                                    Handle        = 0x80632091
+	PEER_E_GROUP_IN_USE                                                       Handle        = 0x80632092
+	PEER_E_INVALID_GROUP                                                      Handle        = 0x80632093
+	PEER_E_NO_MEMBERS_FOUND                                                   Handle        = 0x80632094
+	PEER_E_NO_MEMBER_CONNECTIONS                                              Handle        = 0x80632095
+	PEER_E_UNABLE_TO_LISTEN                                                   Handle        = 0x80632096
+	PEER_E_IDENTITY_DELETED                                                   Handle        = 0x806320A0
+	PEER_E_SERVICE_NOT_AVAILABLE                                              Handle        = 0x806320A1
+	PEER_E_CONTACT_NOT_FOUND                                                  Handle        = 0x80636001
+	PEER_S_GRAPH_DATA_CREATED                                                 Handle        = 0x00630001
+	PEER_S_NO_EVENT_DATA                                                      Handle        = 0x00630002
+	PEER_S_ALREADY_CONNECTED                                                  Handle        = 0x00632000
+	PEER_S_SUBSCRIPTION_EXISTS                                                Handle        = 0x00636000
+	PEER_S_NO_CONNECTIVITY                                                    Handle        = 0x00630005
+	PEER_S_ALREADY_A_MEMBER                                                   Handle        = 0x00630006
+	PEER_E_CANNOT_CONVERT_PEER_NAME                                           Handle        = 0x80634001
+	PEER_E_INVALID_PEER_HOST_NAME                                             Handle        = 0x80634002
+	PEER_E_NO_MORE                                                            Handle        = 0x80634003
+	PEER_E_PNRP_DUPLICATE_PEER_NAME                                           Handle        = 0x80634005
+	PEER_E_INVITE_CANCELLED                                                   Handle        = 0x80637000
+	PEER_E_INVITE_RESPONSE_NOT_AVAILABLE                                      Handle        = 0x80637001
+	PEER_E_NOT_SIGNED_IN                                                      Handle        = 0x80637003
+	PEER_E_PRIVACY_DECLINED                                                   Handle        = 0x80637004
+	PEER_E_TIMEOUT                                                            Handle        = 0x80637005
+	PEER_E_INVALID_ADDRESS                                                    Handle        = 0x80637007
+	PEER_E_FW_EXCEPTION_DISABLED                                              Handle        = 0x80637008
+	PEER_E_FW_BLOCKED_BY_POLICY                                               Handle        = 0x80637009
+	PEER_E_FW_BLOCKED_BY_SHIELDS_UP                                           Handle        = 0x8063700A
+	PEER_E_FW_DECLINED                                                        Handle        = 0x8063700B
+	UI_E_CREATE_FAILED                                                        Handle        = 0x802A0001
+	UI_E_SHUTDOWN_CALLED                                                      Handle        = 0x802A0002
+	UI_E_ILLEGAL_REENTRANCY                                                   Handle        = 0x802A0003
+	UI_E_OBJECT_SEALED                                                        Handle        = 0x802A0004
+	UI_E_VALUE_NOT_SET                                                        Handle        = 0x802A0005
+	UI_E_VALUE_NOT_DETERMINED                                                 Handle        = 0x802A0006
+	UI_E_INVALID_OUTPUT                                                       Handle        = 0x802A0007
+	UI_E_BOOLEAN_EXPECTED                                                     Handle        = 0x802A0008
+	UI_E_DIFFERENT_OWNER                                                      Handle        = 0x802A0009
+	UI_E_AMBIGUOUS_MATCH                                                      Handle        = 0x802A000A
+	UI_E_FP_OVERFLOW                                                          Handle        = 0x802A000B
+	UI_E_WRONG_THREAD                                                         Handle        = 0x802A000C
+	UI_E_STORYBOARD_ACTIVE                                                    Handle        = 0x802A0101
+	UI_E_STORYBOARD_NOT_PLAYING                                               Handle        = 0x802A0102
+	UI_E_START_KEYFRAME_AFTER_END                                             Handle        = 0x802A0103
+	UI_E_END_KEYFRAME_NOT_DETERMINED                                          Handle        = 0x802A0104
+	UI_E_LOOPS_OVERLAP                                                        Handle        = 0x802A0105
+	UI_E_TRANSITION_ALREADY_USED                                              Handle        = 0x802A0106
+	UI_E_TRANSITION_NOT_IN_STORYBOARD                                         Handle        = 0x802A0107
+	UI_E_TRANSITION_ECLIPSED                                                  Handle        = 0x802A0108
+	UI_E_TIME_BEFORE_LAST_UPDATE                                              Handle        = 0x802A0109
+	UI_E_TIMER_CLIENT_ALREADY_CONNECTED                                       Handle        = 0x802A010A
+	UI_E_INVALID_DIMENSION                                                    Handle        = 0x802A010B
+	UI_E_PRIMITIVE_OUT_OF_BOUNDS                                              Handle        = 0x802A010C
+	UI_E_WINDOW_CLOSED                                                        Handle        = 0x802A0201
+	E_BLUETOOTH_ATT_INVALID_HANDLE                                            Handle        = 0x80650001
+	E_BLUETOOTH_ATT_READ_NOT_PERMITTED                                        Handle        = 0x80650002
+	E_BLUETOOTH_ATT_WRITE_NOT_PERMITTED                                       Handle        = 0x80650003
+	E_BLUETOOTH_ATT_INVALID_PDU                                               Handle        = 0x80650004
+	E_BLUETOOTH_ATT_INSUFFICIENT_AUTHENTICATION                               Handle        = 0x80650005
+	E_BLUETOOTH_ATT_REQUEST_NOT_SUPPORTED                                     Handle        = 0x80650006
+	E_BLUETOOTH_ATT_INVALID_OFFSET                                            Handle        = 0x80650007
+	E_BLUETOOTH_ATT_INSUFFICIENT_AUTHORIZATION                                Handle        = 0x80650008
+	E_BLUETOOTH_ATT_PREPARE_QUEUE_FULL                                        Handle        = 0x80650009
+	E_BLUETOOTH_ATT_ATTRIBUTE_NOT_FOUND                                       Handle        = 0x8065000A
+	E_BLUETOOTH_ATT_ATTRIBUTE_NOT_LONG                                        Handle        = 0x8065000B
+	E_BLUETOOTH_ATT_INSUFFICIENT_ENCRYPTION_KEY_SIZE                          Handle        = 0x8065000C
+	E_BLUETOOTH_ATT_INVALID_ATTRIBUTE_VALUE_LENGTH                            Handle        = 0x8065000D
+	E_BLUETOOTH_ATT_UNLIKELY                                                  Handle        = 0x8065000E
+	E_BLUETOOTH_ATT_INSUFFICIENT_ENCRYPTION                                   Handle        = 0x8065000F
+	E_BLUETOOTH_ATT_UNSUPPORTED_GROUP_TYPE                                    Handle        = 0x80650010
+	E_BLUETOOTH_ATT_INSUFFICIENT_RESOURCES                                    Handle        = 0x80650011
+	E_BLUETOOTH_ATT_UNKNOWN_ERROR                                             Handle        = 0x80651000
+	E_AUDIO_ENGINE_NODE_NOT_FOUND                                             Handle        = 0x80660001
+	E_HDAUDIO_EMPTY_CONNECTION_LIST                                           Handle        = 0x80660002
+	E_HDAUDIO_CONNECTION_LIST_NOT_SUPPORTED                                   Handle        = 0x80660003
+	E_HDAUDIO_NO_LOGICAL_DEVICES_CREATED                                      Handle        = 0x80660004
+	E_HDAUDIO_NULL_LINKED_LIST_ENTRY                                          Handle        = 0x80660005
+	STATEREPOSITORY_E_CONCURRENCY_LOCKING_FAILURE                             Handle        = 0x80670001
+	STATEREPOSITORY_E_STATEMENT_INPROGRESS                                    Handle        = 0x80670002
+	STATEREPOSITORY_E_CONFIGURATION_INVALID                                   Handle        = 0x80670003
+	STATEREPOSITORY_E_UNKNOWN_SCHEMA_VERSION                                  Handle        = 0x80670004
+	STATEREPOSITORY_ERROR_DICTIONARY_CORRUPTED                                Handle        = 0x80670005
+	STATEREPOSITORY_E_BLOCKED                                                 Handle        = 0x80670006
+	STATEREPOSITORY_E_BUSY_RETRY                                              Handle        = 0x80670007
+	STATEREPOSITORY_E_BUSY_RECOVERY_RETRY                                     Handle        = 0x80670008
+	STATEREPOSITORY_E_LOCKED_RETRY                                            Handle        = 0x80670009
+	STATEREPOSITORY_E_LOCKED_SHAREDCACHE_RETRY                                Handle        = 0x8067000A
+	STATEREPOSITORY_E_TRANSACTION_REQUIRED                                    Handle        = 0x8067000B
+	STATEREPOSITORY_E_BUSY_TIMEOUT_EXCEEDED                                   Handle        = 0x8067000C
+	STATEREPOSITORY_E_BUSY_RECOVERY_TIMEOUT_EXCEEDED                          Handle        = 0x8067000D
+	STATEREPOSITORY_E_LOCKED_TIMEOUT_EXCEEDED                                 Handle        = 0x8067000E
+	STATEREPOSITORY_E_LOCKED_SHAREDCACHE_TIMEOUT_EXCEEDED                     Handle        = 0x8067000F
+	STATEREPOSITORY_E_SERVICE_STOP_IN_PROGRESS                                Handle        = 0x80670010
+	STATEREPOSTORY_E_NESTED_TRANSACTION_NOT_SUPPORTED                         Handle        = 0x80670011
+	STATEREPOSITORY_ERROR_CACHE_CORRUPTED                                     Handle        = 0x80670012
+	STATEREPOSITORY_TRANSACTION_CALLER_ID_CHANGED                             Handle        = 0x00670013
+	STATEREPOSITORY_TRANSACTION_IN_PROGRESS                                   Handle        = 0x00670014
+	ERROR_SPACES_POOL_WAS_DELETED                                             Handle        = 0x00E70001
+	ERROR_SPACES_FAULT_DOMAIN_TYPE_INVALID                                    Handle        = 0x80E70001
+	ERROR_SPACES_INTERNAL_ERROR                                               Handle        = 0x80E70002
+	ERROR_SPACES_RESILIENCY_TYPE_INVALID                                      Handle        = 0x80E70003
+	ERROR_SPACES_DRIVE_SECTOR_SIZE_INVALID                                    Handle        = 0x80E70004
+	ERROR_SPACES_DRIVE_REDUNDANCY_INVALID                                     Handle        = 0x80E70006
+	ERROR_SPACES_NUMBER_OF_DATA_COPIES_INVALID                                Handle        = 0x80E70007
+	ERROR_SPACES_PARITY_LAYOUT_INVALID                                        Handle        = 0x80E70008
+	ERROR_SPACES_INTERLEAVE_LENGTH_INVALID                                    Handle        = 0x80E70009
+	ERROR_SPACES_NUMBER_OF_COLUMNS_INVALID                                    Handle        = 0x80E7000A
+	ERROR_SPACES_NOT_ENOUGH_DRIVES                                            Handle        = 0x80E7000B
+	ERROR_SPACES_EXTENDED_ERROR                                               Handle        = 0x80E7000C
+	ERROR_SPACES_PROVISIONING_TYPE_INVALID                                    Handle        = 0x80E7000D
+	ERROR_SPACES_ALLOCATION_SIZE_INVALID                                      Handle        = 0x80E7000E
+	ERROR_SPACES_ENCLOSURE_AWARE_INVALID                                      Handle        = 0x80E7000F
+	ERROR_SPACES_WRITE_CACHE_SIZE_INVALID                                     Handle        = 0x80E70010
+	ERROR_SPACES_NUMBER_OF_GROUPS_INVALID                                     Handle        = 0x80E70011
+	ERROR_SPACES_DRIVE_OPERATIONAL_STATE_INVALID                              Handle        = 0x80E70012
+	ERROR_SPACES_ENTRY_INCOMPLETE                                             Handle        = 0x80E70013
+	ERROR_SPACES_ENTRY_INVALID                                                Handle        = 0x80E70014
+	ERROR_VOLSNAP_BOOTFILE_NOT_VALID                                          Handle        = 0x80820001
+	ERROR_VOLSNAP_ACTIVATION_TIMEOUT                                          Handle        = 0x80820002
+	ERROR_TIERING_NOT_SUPPORTED_ON_VOLUME                                     Handle        = 0x80830001
+	ERROR_TIERING_VOLUME_DISMOUNT_IN_PROGRESS                                 Handle        = 0x80830002
+	ERROR_TIERING_STORAGE_TIER_NOT_FOUND                                      Handle        = 0x80830003
+	ERROR_TIERING_INVALID_FILE_ID                                             Handle        = 0x80830004
+	ERROR_TIERING_WRONG_CLUSTER_NODE                                          Handle        = 0x80830005
+	ERROR_TIERING_ALREADY_PROCESSING                                          Handle        = 0x80830006
+	ERROR_TIERING_CANNOT_PIN_OBJECT                                           Handle        = 0x80830007
+	ERROR_TIERING_FILE_IS_NOT_PINNED                                          Handle        = 0x80830008
+	ERROR_NOT_A_TIERED_VOLUME                                                 Handle        = 0x80830009
+	ERROR_ATTRIBUTE_NOT_PRESENT                                               Handle        = 0x8083000A
+	ERROR_SECCORE_INVALID_COMMAND                                             Handle        = 0xC0E80000
+	ERROR_NO_APPLICABLE_APP_LICENSES_FOUND                                    Handle        = 0xC0EA0001
+	ERROR_CLIP_LICENSE_NOT_FOUND                                              Handle        = 0xC0EA0002
+	ERROR_CLIP_DEVICE_LICENSE_MISSING                                         Handle        = 0xC0EA0003
+	ERROR_CLIP_LICENSE_INVALID_SIGNATURE                                      Handle        = 0xC0EA0004
+	ERROR_CLIP_KEYHOLDER_LICENSE_MISSING_OR_INVALID                           Handle        = 0xC0EA0005
+	ERROR_CLIP_LICENSE_EXPIRED                                                Handle        = 0xC0EA0006
+	ERROR_CLIP_LICENSE_SIGNED_BY_UNKNOWN_SOURCE                               Handle        = 0xC0EA0007
+	ERROR_CLIP_LICENSE_NOT_SIGNED                                             Handle        = 0xC0EA0008
+	ERROR_CLIP_LICENSE_HARDWARE_ID_OUT_OF_TOLERANCE                           Handle        = 0xC0EA0009
+	ERROR_CLIP_LICENSE_DEVICE_ID_MISMATCH                                     Handle        = 0xC0EA000A
+	DXGI_STATUS_OCCLUDED                                                      Handle        = 0x087A0001
+	DXGI_STATUS_CLIPPED                                                       Handle        = 0x087A0002
+	DXGI_STATUS_NO_REDIRECTION                                                Handle        = 0x087A0004
+	DXGI_STATUS_NO_DESKTOP_ACCESS                                             Handle        = 0x087A0005
+	DXGI_STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE                                  Handle        = 0x087A0006
+	DXGI_STATUS_MODE_CHANGED                                                  Handle        = 0x087A0007
+	DXGI_STATUS_MODE_CHANGE_IN_PROGRESS                                       Handle        = 0x087A0008
+	DXGI_ERROR_INVALID_CALL                                                   Handle        = 0x887A0001
+	DXGI_ERROR_NOT_FOUND                                                      Handle        = 0x887A0002
+	DXGI_ERROR_MORE_DATA                                                      Handle        = 0x887A0003
+	DXGI_ERROR_UNSUPPORTED                                                    Handle        = 0x887A0004
+	DXGI_ERROR_DEVICE_REMOVED                                                 Handle        = 0x887A0005
+	DXGI_ERROR_DEVICE_HUNG                                                    Handle        = 0x887A0006
+	DXGI_ERROR_DEVICE_RESET                                                   Handle        = 0x887A0007
+	DXGI_ERROR_WAS_STILL_DRAWING                                              Handle        = 0x887A000A
+	DXGI_ERROR_FRAME_STATISTICS_DISJOINT                                      Handle        = 0x887A000B
+	DXGI_ERROR_GRAPHICS_VIDPN_SOURCE_IN_USE                                   Handle        = 0x887A000C
+	DXGI_ERROR_DRIVER_INTERNAL_ERROR                                          Handle        = 0x887A0020
+	DXGI_ERROR_NONEXCLUSIVE                                                   Handle        = 0x887A0021
+	DXGI_ERROR_NOT_CURRENTLY_AVAILABLE                                        Handle        = 0x887A0022
+	DXGI_ERROR_REMOTE_CLIENT_DISCONNECTED                                     Handle        = 0x887A0023
+	DXGI_ERROR_REMOTE_OUTOFMEMORY                                             Handle        = 0x887A0024
+	DXGI_ERROR_ACCESS_LOST                                                    Handle        = 0x887A0026
+	DXGI_ERROR_WAIT_TIMEOUT                                                   Handle        = 0x887A0027
+	DXGI_ERROR_SESSION_DISCONNECTED                                           Handle        = 0x887A0028
+	DXGI_ERROR_RESTRICT_TO_OUTPUT_STALE                                       Handle        = 0x887A0029
+	DXGI_ERROR_CANNOT_PROTECT_CONTENT                                         Handle        = 0x887A002A
+	DXGI_ERROR_ACCESS_DENIED                                                  Handle        = 0x887A002B
+	DXGI_ERROR_NAME_ALREADY_EXISTS                                            Handle        = 0x887A002C
+	DXGI_ERROR_SDK_COMPONENT_MISSING                                          Handle        = 0x887A002D
+	DXGI_ERROR_NOT_CURRENT                                                    Handle        = 0x887A002E
+	DXGI_ERROR_HW_PROTECTION_OUTOFMEMORY                                      Handle        = 0x887A0030
+	DXGI_ERROR_DYNAMIC_CODE_POLICY_VIOLATION                                  Handle        = 0x887A0031
+	DXGI_ERROR_NON_COMPOSITED_UI                                              Handle        = 0x887A0032
+	DXGI_STATUS_UNOCCLUDED                                                    Handle        = 0x087A0009
+	DXGI_STATUS_DDA_WAS_STILL_DRAWING                                         Handle        = 0x087A000A
+	DXGI_ERROR_MODE_CHANGE_IN_PROGRESS                                        Handle        = 0x887A0025
+	DXGI_STATUS_PRESENT_REQUIRED                                              Handle        = 0x087A002F
+	DXGI_ERROR_CACHE_CORRUPT                                                  Handle        = 0x887A0033
+	DXGI_ERROR_CACHE_FULL                                                     Handle        = 0x887A0034
+	DXGI_ERROR_CACHE_HASH_COLLISION                                           Handle        = 0x887A0035
+	DXGI_ERROR_ALREADY_EXISTS                                                 Handle        = 0x887A0036
+	DXGI_DDI_ERR_WASSTILLDRAWING                                              Handle        = 0x887B0001
+	DXGI_DDI_ERR_UNSUPPORTED                                                  Handle        = 0x887B0002
+	DXGI_DDI_ERR_NONEXCLUSIVE                                                 Handle        = 0x887B0003
+	D3D10_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS                                 Handle        = 0x88790001
+	D3D10_ERROR_FILE_NOT_FOUND                                                Handle        = 0x88790002
+	D3D11_ERROR_TOO_MANY_UNIQUE_STATE_OBJECTS                                 Handle        = 0x887C0001
+	D3D11_ERROR_FILE_NOT_FOUND                                                Handle        = 0x887C0002
+	D3D11_ERROR_TOO_MANY_UNIQUE_VIEW_OBJECTS                                  Handle        = 0x887C0003
+	D3D11_ERROR_DEFERRED_CONTEXT_MAP_WITHOUT_INITIAL_DISCARD                  Handle        = 0x887C0004
+	D3D12_ERROR_ADAPTER_NOT_FOUND                                             Handle        = 0x887E0001
+	D3D12_ERROR_DRIVER_VERSION_MISMATCH                                       Handle        = 0x887E0002
+	D2DERR_WRONG_STATE                                                        Handle        = 0x88990001
+	D2DERR_NOT_INITIALIZED                                                    Handle        = 0x88990002
+	D2DERR_UNSUPPORTED_OPERATION                                              Handle        = 0x88990003
+	D2DERR_SCANNER_FAILED                                                     Handle        = 0x88990004
+	D2DERR_SCREEN_ACCESS_DENIED                                               Handle        = 0x88990005
+	D2DERR_DISPLAY_STATE_INVALID                                              Handle        = 0x88990006
+	D2DERR_ZERO_VECTOR                                                        Handle        = 0x88990007
+	D2DERR_INTERNAL_ERROR                                                     Handle        = 0x88990008
+	D2DERR_DISPLAY_FORMAT_NOT_SUPPORTED                                       Handle        = 0x88990009
+	D2DERR_INVALID_CALL                                                       Handle        = 0x8899000A
+	D2DERR_NO_HARDWARE_DEVICE                                                 Handle        = 0x8899000B
+	D2DERR_RECREATE_TARGET                                                    Handle        = 0x8899000C
+	D2DERR_TOO_MANY_SHADER_ELEMENTS                                           Handle        = 0x8899000D
+	D2DERR_SHADER_COMPILE_FAILED                                              Handle        = 0x8899000E
+	D2DERR_MAX_TEXTURE_SIZE_EXCEEDED                                          Handle        = 0x8899000F
+	D2DERR_UNSUPPORTED_VERSION                                                Handle        = 0x88990010
+	D2DERR_BAD_NUMBER                                                         Handle        = 0x88990011
+	D2DERR_WRONG_FACTORY                                                      Handle        = 0x88990012
+	D2DERR_LAYER_ALREADY_IN_USE                                               Handle        = 0x88990013
+	D2DERR_POP_CALL_DID_NOT_MATCH_PUSH                                        Handle        = 0x88990014
+	D2DERR_WRONG_RESOURCE_DOMAIN                                              Handle        = 0x88990015
+	D2DERR_PUSH_POP_UNBALANCED                                                Handle        = 0x88990016
+	D2DERR_RENDER_TARGET_HAS_LAYER_OR_CLIPRECT                                Handle        = 0x88990017
+	D2DERR_INCOMPATIBLE_BRUSH_TYPES                                           Handle        = 0x88990018
+	D2DERR_WIN32_ERROR                                                        Handle        = 0x88990019
+	D2DERR_TARGET_NOT_GDI_COMPATIBLE                                          Handle        = 0x8899001A
+	D2DERR_TEXT_EFFECT_IS_WRONG_TYPE                                          Handle        = 0x8899001B
+	D2DERR_TEXT_RENDERER_NOT_RELEASED                                         Handle        = 0x8899001C
+	D2DERR_EXCEEDS_MAX_BITMAP_SIZE                                            Handle        = 0x8899001D
+	D2DERR_INVALID_GRAPH_CONFIGURATION                                        Handle        = 0x8899001E
+	D2DERR_INVALID_INTERNAL_GRAPH_CONFIGURATION                               Handle        = 0x8899001F
+	D2DERR_CYCLIC_GRAPH                                                       Handle        = 0x88990020
+	D2DERR_BITMAP_CANNOT_DRAW                                                 Handle        = 0x88990021
+	D2DERR_OUTSTANDING_BITMAP_REFERENCES                                      Handle        = 0x88990022
+	D2DERR_ORIGINAL_TARGET_NOT_BOUND                                          Handle        = 0x88990023
+	D2DERR_INVALID_TARGET                                                     Handle        = 0x88990024
+	D2DERR_BITMAP_BOUND_AS_TARGET                                             Handle        = 0x88990025
+	D2DERR_INSUFFICIENT_DEVICE_CAPABILITIES                                   Handle        = 0x88990026
+	D2DERR_INTERMEDIATE_TOO_LARGE                                             Handle        = 0x88990027
+	D2DERR_EFFECT_IS_NOT_REGISTERED                                           Handle        = 0x88990028
+	D2DERR_INVALID_PROPERTY                                                   Handle        = 0x88990029
+	D2DERR_NO_SUBPROPERTIES                                                   Handle        = 0x8899002A
+	D2DERR_PRINT_JOB_CLOSED                                                   Handle        = 0x8899002B
+	D2DERR_PRINT_FORMAT_NOT_SUPPORTED                                         Handle        = 0x8899002C
+	D2DERR_TOO_MANY_TRANSFORM_INPUTS                                          Handle        = 0x8899002D
+	D2DERR_INVALID_GLYPH_IMAGE                                                Handle        = 0x8899002E
+	DWRITE_E_FILEFORMAT                                                       Handle        = 0x88985000
+	DWRITE_E_UNEXPECTED                                                       Handle        = 0x88985001
+	DWRITE_E_NOFONT                                                           Handle        = 0x88985002
+	DWRITE_E_FILENOTFOUND                                                     Handle        = 0x88985003
+	DWRITE_E_FILEACCESS                                                       Handle        = 0x88985004
+	DWRITE_E_FONTCOLLECTIONOBSOLETE                                           Handle        = 0x88985005
+	DWRITE_E_ALREADYREGISTERED                                                Handle        = 0x88985006
+	DWRITE_E_CACHEFORMAT                                                      Handle        = 0x88985007
+	DWRITE_E_CACHEVERSION                                                     Handle        = 0x88985008
+	DWRITE_E_UNSUPPORTEDOPERATION                                             Handle        = 0x88985009
+	DWRITE_E_TEXTRENDERERINCOMPATIBLE                                         Handle        = 0x8898500A
+	DWRITE_E_FLOWDIRECTIONCONFLICTS                                           Handle        = 0x8898500B
+	DWRITE_E_NOCOLOR                                                          Handle        = 0x8898500C
+	DWRITE_E_REMOTEFONT                                                       Handle        = 0x8898500D
+	DWRITE_E_DOWNLOADCANCELLED                                                Handle        = 0x8898500E
+	DWRITE_E_DOWNLOADFAILED                                                   Handle        = 0x8898500F
+	DWRITE_E_TOOMANYDOWNLOADS                                                 Handle        = 0x88985010
+	WINCODEC_ERR_WRONGSTATE                                                   Handle        = 0x88982F04
+	WINCODEC_ERR_VALUEOUTOFRANGE                                              Handle        = 0x88982F05
+	WINCODEC_ERR_UNKNOWNIMAGEFORMAT                                           Handle        = 0x88982F07
+	WINCODEC_ERR_UNSUPPORTEDVERSION                                           Handle        = 0x88982F0B
+	WINCODEC_ERR_NOTINITIALIZED                                               Handle        = 0x88982F0C
+	WINCODEC_ERR_ALREADYLOCKED                                                Handle        = 0x88982F0D
+	WINCODEC_ERR_PROPERTYNOTFOUND                                             Handle        = 0x88982F40
+	WINCODEC_ERR_PROPERTYNOTSUPPORTED                                         Handle        = 0x88982F41
+	WINCODEC_ERR_PROPERTYSIZE                                                 Handle        = 0x88982F42
+	WINCODEC_ERR_CODECPRESENT                                                 Handle        = 0x88982F43
+	WINCODEC_ERR_CODECNOTHUMBNAIL                                             Handle        = 0x88982F44
+	WINCODEC_ERR_PALETTEUNAVAILABLE                                           Handle        = 0x88982F45
+	WINCODEC_ERR_CODECTOOMANYSCANLINES                                        Handle        = 0x88982F46
+	WINCODEC_ERR_INTERNALERROR                                                Handle        = 0x88982F48
+	WINCODEC_ERR_SOURCERECTDOESNOTMATCHDIMENSIONS                             Handle        = 0x88982F49
+	WINCODEC_ERR_COMPONENTNOTFOUND                                            Handle        = 0x88982F50
+	WINCODEC_ERR_IMAGESIZEOUTOFRANGE                                          Handle        = 0x88982F51
+	WINCODEC_ERR_TOOMUCHMETADATA                                              Handle        = 0x88982F52
+	WINCODEC_ERR_BADIMAGE                                                     Handle        = 0x88982F60
+	WINCODEC_ERR_BADHEADER                                                    Handle        = 0x88982F61
+	WINCODEC_ERR_FRAMEMISSING                                                 Handle        = 0x88982F62
+	WINCODEC_ERR_BADMETADATAHEADER                                            Handle        = 0x88982F63
+	WINCODEC_ERR_BADSTREAMDATA                                                Handle        = 0x88982F70
+	WINCODEC_ERR_STREAMWRITE                                                  Handle        = 0x88982F71
+	WINCODEC_ERR_STREAMREAD                                                   Handle        = 0x88982F72
+	WINCODEC_ERR_STREAMNOTAVAILABLE                                           Handle        = 0x88982F73
+	WINCODEC_ERR_UNSUPPORTEDPIXELFORMAT                                       Handle        = 0x88982F80
+	WINCODEC_ERR_UNSUPPORTEDOPERATION                                         Handle        = 0x88982F81
+	WINCODEC_ERR_INVALIDREGISTRATION                                          Handle        = 0x88982F8A
+	WINCODEC_ERR_COMPONENTINITIALIZEFAILURE                                   Handle        = 0x88982F8B
+	WINCODEC_ERR_INSUFFICIENTBUFFER                                           Handle        = 0x88982F8C
+	WINCODEC_ERR_DUPLICATEMETADATAPRESENT                                     Handle        = 0x88982F8D
+	WINCODEC_ERR_PROPERTYUNEXPECTEDTYPE                                       Handle        = 0x88982F8E
+	WINCODEC_ERR_UNEXPECTEDSIZE                                               Handle        = 0x88982F8F
+	WINCODEC_ERR_INVALIDQUERYREQUEST                                          Handle        = 0x88982F90
+	WINCODEC_ERR_UNEXPECTEDMETADATATYPE                                       Handle        = 0x88982F91
+	WINCODEC_ERR_REQUESTONLYVALIDATMETADATAROOT                               Handle        = 0x88982F92
+	WINCODEC_ERR_INVALIDQUERYCHARACTER                                        Handle        = 0x88982F93
+	WINCODEC_ERR_WIN32ERROR                                                   Handle        = 0x88982F94
+	WINCODEC_ERR_INVALIDPROGRESSIVELEVEL                                      Handle        = 0x88982F95
+	WINCODEC_ERR_INVALIDJPEGSCANINDEX                                         Handle        = 0x88982F96
+	MILERR_OBJECTBUSY                                                         Handle        = 0x88980001
+	MILERR_INSUFFICIENTBUFFER                                                 Handle        = 0x88980002
+	MILERR_WIN32ERROR                                                         Handle        = 0x88980003
+	MILERR_SCANNER_FAILED                                                     Handle        = 0x88980004
+	MILERR_SCREENACCESSDENIED                                                 Handle        = 0x88980005
+	MILERR_DISPLAYSTATEINVALID                                                Handle        = 0x88980006
+	MILERR_NONINVERTIBLEMATRIX                                                Handle        = 0x88980007
+	MILERR_ZEROVECTOR                                                         Handle        = 0x88980008
+	MILERR_TERMINATED                                                         Handle        = 0x88980009
+	MILERR_BADNUMBER                                                          Handle        = 0x8898000A
+	MILERR_INTERNALERROR                                                      Handle        = 0x88980080
+	MILERR_DISPLAYFORMATNOTSUPPORTED                                          Handle        = 0x88980084
+	MILERR_INVALIDCALL                                                        Handle        = 0x88980085
+	MILERR_ALREADYLOCKED                                                      Handle        = 0x88980086
+	MILERR_NOTLOCKED                                                          Handle        = 0x88980087
+	MILERR_DEVICECANNOTRENDERTEXT                                             Handle        = 0x88980088
+	MILERR_GLYPHBITMAPMISSED                                                  Handle        = 0x88980089
+	MILERR_MALFORMEDGLYPHCACHE                                                Handle        = 0x8898008A
+	MILERR_GENERIC_IGNORE                                                     Handle        = 0x8898008B
+	MILERR_MALFORMED_GUIDELINE_DATA                                           Handle        = 0x8898008C
+	MILERR_NO_HARDWARE_DEVICE                                                 Handle        = 0x8898008D
+	MILERR_NEED_RECREATE_AND_PRESENT                                          Handle        = 0x8898008E
+	MILERR_ALREADY_INITIALIZED                                                Handle        = 0x8898008F
+	MILERR_MISMATCHED_SIZE                                                    Handle        = 0x88980090
+	MILERR_NO_REDIRECTION_SURFACE_AVAILABLE                                   Handle        = 0x88980091
+	MILERR_REMOTING_NOT_SUPPORTED                                             Handle        = 0x88980092
+	MILERR_QUEUED_PRESENT_NOT_SUPPORTED                                       Handle        = 0x88980093
+	MILERR_NOT_QUEUING_PRESENTS                                               Handle        = 0x88980094
+	MILERR_NO_REDIRECTION_SURFACE_RETRY_LATER                                 Handle        = 0x88980095
+	MILERR_TOOMANYSHADERELEMNTS                                               Handle        = 0x88980096
+	MILERR_MROW_READLOCK_FAILED                                               Handle        = 0x88980097
+	MILERR_MROW_UPDATE_FAILED                                                 Handle        = 0x88980098
+	MILERR_SHADER_COMPILE_FAILED                                              Handle        = 0x88980099
+	MILERR_MAX_TEXTURE_SIZE_EXCEEDED                                          Handle        = 0x8898009A
+	MILERR_QPC_TIME_WENT_BACKWARD                                             Handle        = 0x8898009B
+	MILERR_DXGI_ENUMERATION_OUT_OF_SYNC                                       Handle        = 0x8898009D
+	MILERR_ADAPTER_NOT_FOUND                                                  Handle        = 0x8898009E
+	MILERR_COLORSPACE_NOT_SUPPORTED                                           Handle        = 0x8898009F
+	MILERR_PREFILTER_NOT_SUPPORTED                                            Handle        = 0x889800A0
+	MILERR_DISPLAYID_ACCESS_DENIED                                            Handle        = 0x889800A1
+	UCEERR_INVALIDPACKETHEADER                                                Handle        = 0x88980400
+	UCEERR_UNKNOWNPACKET                                                      Handle        = 0x88980401
+	UCEERR_ILLEGALPACKET                                                      Handle        = 0x88980402
+	UCEERR_MALFORMEDPACKET                                                    Handle        = 0x88980403
+	UCEERR_ILLEGALHANDLE                                                      Handle        = 0x88980404
+	UCEERR_HANDLELOOKUPFAILED                                                 Handle        = 0x88980405
+	UCEERR_RENDERTHREADFAILURE                                                Handle        = 0x88980406
+	UCEERR_CTXSTACKFRSTTARGETNULL                                             Handle        = 0x88980407
+	UCEERR_CONNECTIONIDLOOKUPFAILED                                           Handle        = 0x88980408
+	UCEERR_BLOCKSFULL                                                         Handle        = 0x88980409
+	UCEERR_MEMORYFAILURE                                                      Handle        = 0x8898040A
+	UCEERR_PACKETRECORDOUTOFRANGE                                             Handle        = 0x8898040B
+	UCEERR_ILLEGALRECORDTYPE                                                  Handle        = 0x8898040C
+	UCEERR_OUTOFHANDLES                                                       Handle        = 0x8898040D
+	UCEERR_UNCHANGABLE_UPDATE_ATTEMPTED                                       Handle        = 0x8898040E
+	UCEERR_NO_MULTIPLE_WORKER_THREADS                                         Handle        = 0x8898040F
+	UCEERR_REMOTINGNOTSUPPORTED                                               Handle        = 0x88980410
+	UCEERR_MISSINGENDCOMMAND                                                  Handle        = 0x88980411
+	UCEERR_MISSINGBEGINCOMMAND                                                Handle        = 0x88980412
+	UCEERR_CHANNELSYNCTIMEDOUT                                                Handle        = 0x88980413
+	UCEERR_CHANNELSYNCABANDONED                                               Handle        = 0x88980414
+	UCEERR_UNSUPPORTEDTRANSPORTVERSION                                        Handle        = 0x88980415
+	UCEERR_TRANSPORTUNAVAILABLE                                               Handle        = 0x88980416
+	UCEERR_FEEDBACK_UNSUPPORTED                                               Handle        = 0x88980417
+	UCEERR_COMMANDTRANSPORTDENIED                                             Handle        = 0x88980418
+	UCEERR_GRAPHICSSTREAMUNAVAILABLE                                          Handle        = 0x88980419
+	UCEERR_GRAPHICSSTREAMALREADYOPEN                                          Handle        = 0x88980420
+	UCEERR_TRANSPORTDISCONNECTED                                              Handle        = 0x88980421
+	UCEERR_TRANSPORTOVERLOADED                                                Handle        = 0x88980422
+	UCEERR_PARTITION_ZOMBIED                                                  Handle        = 0x88980423
+	MILAVERR_NOCLOCK                                                          Handle        = 0x88980500
+	MILAVERR_NOMEDIATYPE                                                      Handle        = 0x88980501
+	MILAVERR_NOVIDEOMIXER                                                     Handle        = 0x88980502
+	MILAVERR_NOVIDEOPRESENTER                                                 Handle        = 0x88980503
+	MILAVERR_NOREADYFRAMES                                                    Handle        = 0x88980504
+	MILAVERR_MODULENOTLOADED                                                  Handle        = 0x88980505
+	MILAVERR_WMPFACTORYNOTREGISTERED                                          Handle        = 0x88980506
+	MILAVERR_INVALIDWMPVERSION                                                Handle        = 0x88980507
+	MILAVERR_INSUFFICIENTVIDEORESOURCES                                       Handle        = 0x88980508
+	MILAVERR_VIDEOACCELERATIONNOTAVAILABLE                                    Handle        = 0x88980509
+	MILAVERR_REQUESTEDTEXTURETOOBIG                                           Handle        = 0x8898050A
+	MILAVERR_SEEKFAILED                                                       Handle        = 0x8898050B
+	MILAVERR_UNEXPECTEDWMPFAILURE                                             Handle        = 0x8898050C
+	MILAVERR_MEDIAPLAYERCLOSED                                                Handle        = 0x8898050D
+	MILAVERR_UNKNOWNHARDWAREERROR                                             Handle        = 0x8898050E
+	MILEFFECTSERR_UNKNOWNPROPERTY                                             Handle        = 0x8898060E
+	MILEFFECTSERR_EFFECTNOTPARTOFGROUP                                        Handle        = 0x8898060F
+	MILEFFECTSERR_NOINPUTSOURCEATTACHED                                       Handle        = 0x88980610
+	MILEFFECTSERR_CONNECTORNOTCONNECTED                                       Handle        = 0x88980611
+	MILEFFECTSERR_CONNECTORNOTASSOCIATEDWITHEFFECT                            Handle        = 0x88980612
+	MILEFFECTSERR_RESERVED                                                    Handle        = 0x88980613
+	MILEFFECTSERR_CYCLEDETECTED                                               Handle        = 0x88980614
+	MILEFFECTSERR_EFFECTINMORETHANONEGRAPH                                    Handle        = 0x88980615
+	MILEFFECTSERR_EFFECTALREADYINAGRAPH                                       Handle        = 0x88980616
+	MILEFFECTSERR_EFFECTHASNOCHILDREN                                         Handle        = 0x88980617
+	MILEFFECTSERR_ALREADYATTACHEDTOLISTENER                                   Handle        = 0x88980618
+	MILEFFECTSERR_NOTAFFINETRANSFORM                                          Handle        = 0x88980619
+	MILEFFECTSERR_EMPTYBOUNDS                                                 Handle        = 0x8898061A
+	MILEFFECTSERR_OUTPUTSIZETOOLARGE                                          Handle        = 0x8898061B
+	DWMERR_STATE_TRANSITION_FAILED                                            Handle        = 0x88980700
+	DWMERR_THEME_FAILED                                                       Handle        = 0x88980701
+	DWMERR_CATASTROPHIC_FAILURE                                               Handle        = 0x88980702
+	DCOMPOSITION_ERROR_WINDOW_ALREADY_COMPOSED                                Handle        = 0x88980800
+	DCOMPOSITION_ERROR_SURFACE_BEING_RENDERED                                 Handle        = 0x88980801
+	DCOMPOSITION_ERROR_SURFACE_NOT_BEING_RENDERED                             Handle        = 0x88980802
+	ONL_E_INVALID_AUTHENTICATION_TARGET                                       Handle        = 0x80860001
+	ONL_E_ACCESS_DENIED_BY_TOU                                                Handle        = 0x80860002
+	ONL_E_INVALID_APPLICATION                                                 Handle        = 0x80860003
+	ONL_E_PASSWORD_UPDATE_REQUIRED                                            Handle        = 0x80860004
+	ONL_E_ACCOUNT_UPDATE_REQUIRED                                             Handle        = 0x80860005
+	ONL_E_FORCESIGNIN                                                         Handle        = 0x80860006
+	ONL_E_ACCOUNT_LOCKED                                                      Handle        = 0x80860007
+	ONL_E_PARENTAL_CONSENT_REQUIRED                                           Handle        = 0x80860008
+	ONL_E_EMAIL_VERIFICATION_REQUIRED                                         Handle        = 0x80860009
+	ONL_E_ACCOUNT_SUSPENDED_COMPROIMISE                                       Handle        = 0x8086000A
+	ONL_E_ACCOUNT_SUSPENDED_ABUSE                                             Handle        = 0x8086000B
+	ONL_E_ACTION_REQUIRED                                                     Handle        = 0x8086000C
+	ONL_CONNECTION_COUNT_LIMIT                                                Handle        = 0x8086000D
+	ONL_E_CONNECTED_ACCOUNT_CAN_NOT_SIGNOUT                                   Handle        = 0x8086000E
+	ONL_E_USER_AUTHENTICATION_REQUIRED                                        Handle        = 0x8086000F
+	ONL_E_REQUEST_THROTTLED                                                   Handle        = 0x80860010
+	FA_E_MAX_PERSISTED_ITEMS_REACHED                                          Handle        = 0x80270220
+	FA_E_HOMEGROUP_NOT_AVAILABLE                                              Handle        = 0x80270222
+	E_MONITOR_RESOLUTION_TOO_LOW                                              Handle        = 0x80270250
+	E_ELEVATED_ACTIVATION_NOT_SUPPORTED                                       Handle        = 0x80270251
+	E_UAC_DISABLED                                                            Handle        = 0x80270252
+	E_FULL_ADMIN_NOT_SUPPORTED                                                Handle        = 0x80270253
+	E_APPLICATION_NOT_REGISTERED                                              Handle        = 0x80270254
+	E_MULTIPLE_EXTENSIONS_FOR_APPLICATION                                     Handle        = 0x80270255
+	E_MULTIPLE_PACKAGES_FOR_FAMILY                                            Handle        = 0x80270256
+	E_APPLICATION_MANAGER_NOT_RUNNING                                         Handle        = 0x80270257
+	S_STORE_LAUNCHED_FOR_REMEDIATION                                          Handle        = 0x00270258
+	S_APPLICATION_ACTIVATION_ERROR_HANDLED_BY_DIALOG                          Handle        = 0x00270259
+	E_APPLICATION_ACTIVATION_TIMED_OUT                                        Handle        = 0x8027025A
+	E_APPLICATION_ACTIVATION_EXEC_FAILURE                                     Handle        = 0x8027025B
+	E_APPLICATION_TEMPORARY_LICENSE_ERROR                                     Handle        = 0x8027025C
+	E_APPLICATION_TRIAL_LICENSE_EXPIRED                                       Handle        = 0x8027025D
+	E_SKYDRIVE_ROOT_TARGET_FILE_SYSTEM_NOT_SUPPORTED                          Handle        = 0x80270260
+	E_SKYDRIVE_ROOT_TARGET_OVERLAP                                            Handle        = 0x80270261
+	E_SKYDRIVE_ROOT_TARGET_CANNOT_INDEX                                       Handle        = 0x80270262
+	E_SKYDRIVE_FILE_NOT_UPLOADED                                              Handle        = 0x80270263
+	E_SKYDRIVE_UPDATE_AVAILABILITY_FAIL                                       Handle        = 0x80270264
+	E_SKYDRIVE_ROOT_TARGET_VOLUME_ROOT_NOT_SUPPORTED                          Handle        = 0x80270265
+	E_SYNCENGINE_FILE_SIZE_OVER_LIMIT                                         Handle        = 0x8802B001
+	E_SYNCENGINE_FILE_SIZE_EXCEEDS_REMAINING_QUOTA                            Handle        = 0x8802B002
+	E_SYNCENGINE_UNSUPPORTED_FILE_NAME                                        Handle        = 0x8802B003
+	E_SYNCENGINE_FOLDER_ITEM_COUNT_LIMIT_EXCEEDED                             Handle        = 0x8802B004
+	E_SYNCENGINE_FILE_SYNC_PARTNER_ERROR                                      Handle        = 0x8802B005
+	E_SYNCENGINE_SYNC_PAUSED_BY_SERVICE                                       Handle        = 0x8802B006
+	E_SYNCENGINE_FILE_IDENTIFIER_UNKNOWN                                      Handle        = 0x8802C002
+	E_SYNCENGINE_SERVICE_AUTHENTICATION_FAILED                                Handle        = 0x8802C003
+	E_SYNCENGINE_UNKNOWN_SERVICE_ERROR                                        Handle        = 0x8802C004
+	E_SYNCENGINE_SERVICE_RETURNED_UNEXPECTED_SIZE                             Handle        = 0x8802C005
+	E_SYNCENGINE_REQUEST_BLOCKED_BY_SERVICE                                   Handle        = 0x8802C006
+	E_SYNCENGINE_REQUEST_BLOCKED_DUE_TO_CLIENT_ERROR                          Handle        = 0x8802C007
+	E_SYNCENGINE_FOLDER_INACCESSIBLE                                          Handle        = 0x8802D001
+	E_SYNCENGINE_UNSUPPORTED_FOLDER_NAME                                      Handle        = 0x8802D002
+	E_SYNCENGINE_UNSUPPORTED_MARKET                                           Handle        = 0x8802D003
+	E_SYNCENGINE_PATH_LENGTH_LIMIT_EXCEEDED                                   Handle        = 0x8802D004
+	E_SYNCENGINE_REMOTE_PATH_LENGTH_LIMIT_EXCEEDED                            Handle        = 0x8802D005
+	E_SYNCENGINE_CLIENT_UPDATE_NEEDED                                         Handle        = 0x8802D006
+	E_SYNCENGINE_PROXY_AUTHENTICATION_REQUIRED                                Handle        = 0x8802D007
+	E_SYNCENGINE_STORAGE_SERVICE_PROVISIONING_FAILED                          Handle        = 0x8802D008
+	E_SYNCENGINE_UNSUPPORTED_REPARSE_POINT                                    Handle        = 0x8802D009
+	E_SYNCENGINE_STORAGE_SERVICE_BLOCKED                                      Handle        = 0x8802D00A
+	E_SYNCENGINE_FOLDER_IN_REDIRECTION                                        Handle        = 0x8802D00B
+	EAS_E_POLICY_NOT_MANAGED_BY_OS                                            Handle        = 0x80550001
+	EAS_E_POLICY_COMPLIANT_WITH_ACTIONS                                       Handle        = 0x80550002
+	EAS_E_REQUESTED_POLICY_NOT_ENFORCEABLE                                    Handle        = 0x80550003
+	EAS_E_CURRENT_USER_HAS_BLANK_PASSWORD                                     Handle        = 0x80550004
+	EAS_E_REQUESTED_POLICY_PASSWORD_EXPIRATION_INCOMPATIBLE                   Handle        = 0x80550005
+	EAS_E_USER_CANNOT_CHANGE_PASSWORD                                         Handle        = 0x80550006
+	EAS_E_ADMINS_HAVE_BLANK_PASSWORD                                          Handle        = 0x80550007
+	EAS_E_ADMINS_CANNOT_CHANGE_PASSWORD                                       Handle        = 0x80550008
+	EAS_E_LOCAL_CONTROLLED_USERS_CANNOT_CHANGE_PASSWORD                       Handle        = 0x80550009
+	EAS_E_PASSWORD_POLICY_NOT_ENFORCEABLE_FOR_CONNECTED_ADMINS                Handle        = 0x8055000A
+	EAS_E_CONNECTED_ADMINS_NEED_TO_CHANGE_PASSWORD                            Handle        = 0x8055000B
+	EAS_E_PASSWORD_POLICY_NOT_ENFORCEABLE_FOR_CURRENT_CONNECTED_USER          Handle        = 0x8055000C
+	EAS_E_CURRENT_CONNECTED_USER_NEED_TO_CHANGE_PASSWORD                      Handle        = 0x8055000D
+	WEB_E_UNSUPPORTED_FORMAT                                                  Handle        = 0x83750001
+	WEB_E_INVALID_XML                                                         Handle        = 0x83750002
+	WEB_E_MISSING_REQUIRED_ELEMENT                                            Handle        = 0x83750003
+	WEB_E_MISSING_REQUIRED_ATTRIBUTE                                          Handle        = 0x83750004
+	WEB_E_UNEXPECTED_CONTENT                                                  Handle        = 0x83750005
+	WEB_E_RESOURCE_TOO_LARGE                                                  Handle        = 0x83750006
+	WEB_E_INVALID_JSON_STRING                                                 Handle        = 0x83750007
+	WEB_E_INVALID_JSON_NUMBER                                                 Handle        = 0x83750008
+	WEB_E_JSON_VALUE_NOT_FOUND                                                Handle        = 0x83750009
+	HTTP_E_STATUS_UNEXPECTED                                                  Handle        = 0x80190001
+	HTTP_E_STATUS_UNEXPECTED_REDIRECTION                                      Handle        = 0x80190003
+	HTTP_E_STATUS_UNEXPECTED_CLIENT_ERROR                                     Handle        = 0x80190004
+	HTTP_E_STATUS_UNEXPECTED_SERVER_ERROR                                     Handle        = 0x80190005
+	HTTP_E_STATUS_AMBIGUOUS                                                   Handle        = 0x8019012C
+	HTTP_E_STATUS_MOVED                                                       Handle        = 0x8019012D
+	HTTP_E_STATUS_REDIRECT                                                    Handle        = 0x8019012E
+	HTTP_E_STATUS_REDIRECT_METHOD                                             Handle        = 0x8019012F
+	HTTP_E_STATUS_NOT_MODIFIED                                                Handle        = 0x80190130
+	HTTP_E_STATUS_USE_PROXY                                                   Handle        = 0x80190131
+	HTTP_E_STATUS_REDIRECT_KEEP_VERB                                          Handle        = 0x80190133
+	HTTP_E_STATUS_BAD_REQUEST                                                 Handle        = 0x80190190
+	HTTP_E_STATUS_DENIED                                                      Handle        = 0x80190191
+	HTTP_E_STATUS_PAYMENT_REQ                                                 Handle        = 0x80190192
+	HTTP_E_STATUS_FORBIDDEN                                                   Handle        = 0x80190193
+	HTTP_E_STATUS_NOT_FOUND                                                   Handle        = 0x80190194
+	HTTP_E_STATUS_BAD_METHOD                                                  Handle        = 0x80190195
+	HTTP_E_STATUS_NONE_ACCEPTABLE                                             Handle        = 0x80190196
+	HTTP_E_STATUS_PROXY_AUTH_REQ                                              Handle        = 0x80190197
+	HTTP_E_STATUS_REQUEST_TIMEOUT                                             Handle        = 0x80190198
+	HTTP_E_STATUS_CONFLICT                                                    Handle        = 0x80190199
+	HTTP_E_STATUS_GONE                                                        Handle        = 0x8019019A
+	HTTP_E_STATUS_LENGTH_REQUIRED                                             Handle        = 0x8019019B
+	HTTP_E_STATUS_PRECOND_FAILED                                              Handle        = 0x8019019C
+	HTTP_E_STATUS_REQUEST_TOO_LARGE                                           Handle        = 0x8019019D
+	HTTP_E_STATUS_URI_TOO_LONG                                                Handle        = 0x8019019E
+	HTTP_E_STATUS_UNSUPPORTED_MEDIA                                           Handle        = 0x8019019F
+	HTTP_E_STATUS_RANGE_NOT_SATISFIABLE                                       Handle        = 0x801901A0
+	HTTP_E_STATUS_EXPECTATION_FAILED                                          Handle        = 0x801901A1
+	HTTP_E_STATUS_SERVER_ERROR                                                Handle        = 0x801901F4
+	HTTP_E_STATUS_NOT_SUPPORTED                                               Handle        = 0x801901F5
+	HTTP_E_STATUS_BAD_GATEWAY                                                 Handle        = 0x801901F6
+	HTTP_E_STATUS_SERVICE_UNAVAIL                                             Handle        = 0x801901F7
+	HTTP_E_STATUS_GATEWAY_TIMEOUT                                             Handle        = 0x801901F8
+	HTTP_E_STATUS_VERSION_NOT_SUP                                             Handle        = 0x801901F9
+	E_INVALID_PROTOCOL_OPERATION                                              Handle        = 0x83760001
+	E_INVALID_PROTOCOL_FORMAT                                                 Handle        = 0x83760002
+	E_PROTOCOL_EXTENSIONS_NOT_SUPPORTED                                       Handle        = 0x83760003
+	E_SUBPROTOCOL_NOT_SUPPORTED                                               Handle        = 0x83760004
+	E_PROTOCOL_VERSION_NOT_SUPPORTED                                          Handle        = 0x83760005
+	INPUT_E_OUT_OF_ORDER                                                      Handle        = 0x80400000
+	INPUT_E_REENTRANCY                                                        Handle        = 0x80400001
+	INPUT_E_MULTIMODAL                                                        Handle        = 0x80400002
+	INPUT_E_PACKET                                                            Handle        = 0x80400003
+	INPUT_E_FRAME                                                             Handle        = 0x80400004
+	INPUT_E_HISTORY                                                           Handle        = 0x80400005
+	INPUT_E_DEVICE_INFO                                                       Handle        = 0x80400006
+	INPUT_E_TRANSFORM                                                         Handle        = 0x80400007
+	INPUT_E_DEVICE_PROPERTY                                                   Handle        = 0x80400008
+	INET_E_INVALID_URL                                                        Handle        = 0x800C0002
+	INET_E_NO_SESSION                                                         Handle        = 0x800C0003
+	INET_E_CANNOT_CONNECT                                                     Handle        = 0x800C0004
+	INET_E_RESOURCE_NOT_FOUND                                                 Handle        = 0x800C0005
+	INET_E_OBJECT_NOT_FOUND                                                   Handle        = 0x800C0006
+	INET_E_DATA_NOT_AVAILABLE                                                 Handle        = 0x800C0007
+	INET_E_DOWNLOAD_FAILURE                                                   Handle        = 0x800C0008
+	INET_E_AUTHENTICATION_REQUIRED                                            Handle        = 0x800C0009
+	INET_E_NO_VALID_MEDIA                                                     Handle        = 0x800C000A
+	INET_E_CONNECTION_TIMEOUT                                                 Handle        = 0x800C000B
+	INET_E_INVALID_REQUEST                                                    Handle        = 0x800C000C
+	INET_E_UNKNOWN_PROTOCOL                                                   Handle        = 0x800C000D
+	INET_E_SECURITY_PROBLEM                                                   Handle        = 0x800C000E
+	INET_E_CANNOT_LOAD_DATA                                                   Handle        = 0x800C000F
+	INET_E_CANNOT_INSTANTIATE_OBJECT                                          Handle        = 0x800C0010
+	INET_E_INVALID_CERTIFICATE                                                Handle        = 0x800C0019
+	INET_E_REDIRECT_FAILED                                                    Handle        = 0x800C0014
+	INET_E_REDIRECT_TO_DIR                                                    Handle        = 0x800C0015
+	ERROR_DBG_CREATE_PROCESS_FAILURE_LOCKDOWN                                 Handle        = 0x80B00001
+	ERROR_DBG_ATTACH_PROCESS_FAILURE_LOCKDOWN                                 Handle        = 0x80B00002
+	ERROR_DBG_CONNECT_SERVER_FAILURE_LOCKDOWN                                 Handle        = 0x80B00003
+	ERROR_DBG_START_SERVER_FAILURE_LOCKDOWN                                   Handle        = 0x80B00004
+	ERROR_IO_PREEMPTED                                                        Handle        = 0x89010001
+	JSCRIPT_E_CANTEXECUTE                                                     Handle        = 0x89020001
+	WEP_E_NOT_PROVISIONED_ON_ALL_VOLUMES                                      Handle        = 0x88010001
+	WEP_E_FIXED_DATA_NOT_SUPPORTED                                            Handle        = 0x88010002
+	WEP_E_HARDWARE_NOT_COMPLIANT                                              Handle        = 0x88010003
+	WEP_E_LOCK_NOT_CONFIGURED                                                 Handle        = 0x88010004
+	WEP_E_PROTECTION_SUSPENDED                                                Handle        = 0x88010005
+	WEP_E_NO_LICENSE                                                          Handle        = 0x88010006
+	WEP_E_OS_NOT_PROTECTED                                                    Handle        = 0x88010007
+	WEP_E_UNEXPECTED_FAIL                                                     Handle        = 0x88010008
+	WEP_E_BUFFER_TOO_LARGE                                                    Handle        = 0x88010009
+	ERROR_SVHDX_ERROR_STORED                                                  Handle        = 0xC05C0000
+	ERROR_SVHDX_ERROR_NOT_AVAILABLE                                           Handle        = 0xC05CFF00
+	ERROR_SVHDX_UNIT_ATTENTION_AVAILABLE                                      Handle        = 0xC05CFF01
+	ERROR_SVHDX_UNIT_ATTENTION_CAPACITY_DATA_CHANGED                          Handle        = 0xC05CFF02
+	ERROR_SVHDX_UNIT_ATTENTION_RESERVATIONS_PREEMPTED                         Handle        = 0xC05CFF03
+	ERROR_SVHDX_UNIT_ATTENTION_RESERVATIONS_RELEASED                          Handle        = 0xC05CFF04
+	ERROR_SVHDX_UNIT_ATTENTION_REGISTRATIONS_PREEMPTED                        Handle        = 0xC05CFF05
+	ERROR_SVHDX_UNIT_ATTENTION_OPERATING_DEFINITION_CHANGED                   Handle        = 0xC05CFF06
+	ERROR_SVHDX_RESERVATION_CONFLICT                                          Handle        = 0xC05CFF07
+	ERROR_SVHDX_WRONG_FILE_TYPE                                               Handle        = 0xC05CFF08
+	ERROR_SVHDX_VERSION_MISMATCH                                              Handle        = 0xC05CFF09
+	ERROR_VHD_SHARED                                                          Handle        = 0xC05CFF0A
+	ERROR_SVHDX_NO_INITIATOR                                                  Handle        = 0xC05CFF0B
+	ERROR_VHDSET_BACKING_STORAGE_NOT_FOUND                                    Handle        = 0xC05CFF0C
+	ERROR_SMB_NO_PREAUTH_INTEGRITY_HASH_OVERLAP                               Handle        = 0xC05D0000
+	ERROR_SMB_BAD_CLUSTER_DIALECT                                             Handle        = 0xC05D0001
+	WININET_E_OUT_OF_HANDLES                                                  Handle        = 0x80072EE1
+	WININET_E_TIMEOUT                                                         Handle        = 0x80072EE2
+	WININET_E_EXTENDED_ERROR                                                  Handle        = 0x80072EE3
+	WININET_E_INTERNAL_ERROR                                                  Handle        = 0x80072EE4
+	WININET_E_INVALID_URL                                                     Handle        = 0x80072EE5
+	WININET_E_UNRECOGNIZED_SCHEME                                             Handle        = 0x80072EE6
+	WININET_E_NAME_NOT_RESOLVED                                               Handle        = 0x80072EE7
+	WININET_E_PROTOCOL_NOT_FOUND                                              Handle        = 0x80072EE8
+	WININET_E_INVALID_OPTION                                                  Handle        = 0x80072EE9
+	WININET_E_BAD_OPTION_LENGTH                                               Handle        = 0x80072EEA
+	WININET_E_OPTION_NOT_SETTABLE                                             Handle        = 0x80072EEB
+	WININET_E_SHUTDOWN                                                        Handle        = 0x80072EEC
+	WININET_E_INCORRECT_USER_NAME                                             Handle        = 0x80072EED
+	WININET_E_INCORRECT_PASSWORD                                              Handle        = 0x80072EEE
+	WININET_E_LOGIN_FAILURE                                                   Handle        = 0x80072EEF
+	WININET_E_INVALID_OPERATION                                               Handle        = 0x80072EF0
+	WININET_E_OPERATION_CANCELLED                                             Handle        = 0x80072EF1
+	WININET_E_INCORRECT_HANDLE_TYPE                                           Handle        = 0x80072EF2
+	WININET_E_INCORRECT_HANDLE_STATE                                          Handle        = 0x80072EF3
+	WININET_E_NOT_PROXY_REQUEST                                               Handle        = 0x80072EF4
+	WININET_E_REGISTRY_VALUE_NOT_FOUND                                        Handle        = 0x80072EF5
+	WININET_E_BAD_REGISTRY_PARAMETER                                          Handle        = 0x80072EF6
+	WININET_E_NO_DIRECT_ACCESS                                                Handle        = 0x80072EF7
+	WININET_E_NO_CONTEXT                                                      Handle        = 0x80072EF8
+	WININET_E_NO_CALLBACK                                                     Handle        = 0x80072EF9
+	WININET_E_REQUEST_PENDING                                                 Handle        = 0x80072EFA
+	WININET_E_INCORRECT_FORMAT                                                Handle        = 0x80072EFB
+	WININET_E_ITEM_NOT_FOUND                                                  Handle        = 0x80072EFC
+	WININET_E_CANNOT_CONNECT                                                  Handle        = 0x80072EFD
+	WININET_E_CONNECTION_ABORTED                                              Handle        = 0x80072EFE
+	WININET_E_CONNECTION_RESET                                                Handle        = 0x80072EFF
+	WININET_E_FORCE_RETRY                                                     Handle        = 0x80072F00
+	WININET_E_INVALID_PROXY_REQUEST                                           Handle        = 0x80072F01
+	WININET_E_NEED_UI                                                         Handle        = 0x80072F02
+	WININET_E_HANDLE_EXISTS                                                   Handle        = 0x80072F04
+	WININET_E_SEC_CERT_DATE_INVALID                                           Handle        = 0x80072F05
+	WININET_E_SEC_CERT_CN_INVALID                                             Handle        = 0x80072F06
+	WININET_E_HTTP_TO_HTTPS_ON_REDIR                                          Handle        = 0x80072F07
+	WININET_E_HTTPS_TO_HTTP_ON_REDIR                                          Handle        = 0x80072F08
+	WININET_E_MIXED_SECURITY                                                  Handle        = 0x80072F09
+	WININET_E_CHG_POST_IS_NON_SECURE                                          Handle        = 0x80072F0A
+	WININET_E_POST_IS_NON_SECURE                                              Handle        = 0x80072F0B
+	WININET_E_CLIENT_AUTH_CERT_NEEDED                                         Handle        = 0x80072F0C
+	WININET_E_INVALID_CA                                                      Handle        = 0x80072F0D
+	WININET_E_CLIENT_AUTH_NOT_SETUP                                           Handle        = 0x80072F0E
+	WININET_E_ASYNC_THREAD_FAILED                                             Handle        = 0x80072F0F
+	WININET_E_REDIRECT_SCHEME_CHANGE                                          Handle        = 0x80072F10
+	WININET_E_DIALOG_PENDING                                                  Handle        = 0x80072F11
+	WININET_E_RETRY_DIALOG                                                    Handle        = 0x80072F12
+	WININET_E_NO_NEW_CONTAINERS                                               Handle        = 0x80072F13
+	WININET_E_HTTPS_HTTP_SUBMIT_REDIR                                         Handle        = 0x80072F14
+	WININET_E_SEC_CERT_ERRORS                                                 Handle        = 0x80072F17
+	WININET_E_SEC_CERT_REV_FAILED                                             Handle        = 0x80072F19
+	WININET_E_HEADER_NOT_FOUND                                                Handle        = 0x80072F76
+	WININET_E_DOWNLEVEL_SERVER                                                Handle        = 0x80072F77
+	WININET_E_INVALID_SERVER_RESPONSE                                         Handle        = 0x80072F78
+	WININET_E_INVALID_HEADER                                                  Handle        = 0x80072F79
+	WININET_E_INVALID_QUERY_REQUEST                                           Handle        = 0x80072F7A
+	WININET_E_HEADER_ALREADY_EXISTS                                           Handle        = 0x80072F7B
+	WININET_E_REDIRECT_FAILED                                                 Handle        = 0x80072F7C
+	WININET_E_SECURITY_CHANNEL_ERROR                                          Handle        = 0x80072F7D
+	WININET_E_UNABLE_TO_CACHE_FILE                                            Handle        = 0x80072F7E
+	WININET_E_TCPIP_NOT_INSTALLED                                             Handle        = 0x80072F7F
+	WININET_E_DISCONNECTED                                                    Handle        = 0x80072F83
+	WININET_E_SERVER_UNREACHABLE                                              Handle        = 0x80072F84
+	WININET_E_PROXY_SERVER_UNREACHABLE                                        Handle        = 0x80072F85
+	WININET_E_BAD_AUTO_PROXY_SCRIPT                                           Handle        = 0x80072F86
+	WININET_E_UNABLE_TO_DOWNLOAD_SCRIPT                                       Handle        = 0x80072F87
+	WININET_E_SEC_INVALID_CERT                                                Handle        = 0x80072F89
+	WININET_E_SEC_CERT_REVOKED                                                Handle        = 0x80072F8A
+	WININET_E_FAILED_DUETOSECURITYCHECK                                       Handle        = 0x80072F8B
+	WININET_E_NOT_INITIALIZED                                                 Handle        = 0x80072F8C
+	WININET_E_LOGIN_FAILURE_DISPLAY_ENTITY_BODY                               Handle        = 0x80072F8E
+	WININET_E_DECODING_FAILED                                                 Handle        = 0x80072F8F
+	WININET_E_NOT_REDIRECTED                                                  Handle        = 0x80072F80
+	WININET_E_COOKIE_NEEDS_CONFIRMATION                                       Handle        = 0x80072F81
+	WININET_E_COOKIE_DECLINED                                                 Handle        = 0x80072F82
+	WININET_E_REDIRECT_NEEDS_CONFIRMATION                                     Handle        = 0x80072F88
+	SQLITE_E_ERROR                                                            Handle        = 0x87AF0001
+	SQLITE_E_INTERNAL                                                         Handle        = 0x87AF0002
+	SQLITE_E_PERM                                                             Handle        = 0x87AF0003
+	SQLITE_E_ABORT                                                            Handle        = 0x87AF0004
+	SQLITE_E_BUSY                                                             Handle        = 0x87AF0005
+	SQLITE_E_LOCKED                                                           Handle        = 0x87AF0006
+	SQLITE_E_NOMEM                                                            Handle        = 0x87AF0007
+	SQLITE_E_READONLY                                                         Handle        = 0x87AF0008
+	SQLITE_E_INTERRUPT                                                        Handle        = 0x87AF0009
+	SQLITE_E_IOERR                                                            Handle        = 0x87AF000A
+	SQLITE_E_CORRUPT                                                          Handle        = 0x87AF000B
+	SQLITE_E_NOTFOUND                                                         Handle        = 0x87AF000C
+	SQLITE_E_FULL                                                             Handle        = 0x87AF000D
+	SQLITE_E_CANTOPEN                                                         Handle        = 0x87AF000E
+	SQLITE_E_PROTOCOL                                                         Handle        = 0x87AF000F
+	SQLITE_E_EMPTY                                                            Handle        = 0x87AF0010
+	SQLITE_E_SCHEMA                                                           Handle        = 0x87AF0011
+	SQLITE_E_TOOBIG                                                           Handle        = 0x87AF0012
+	SQLITE_E_CONSTRAINT                                                       Handle        = 0x87AF0013
+	SQLITE_E_MISMATCH                                                         Handle        = 0x87AF0014
+	SQLITE_E_MISUSE                                                           Handle        = 0x87AF0015
+	SQLITE_E_NOLFS                                                            Handle        = 0x87AF0016
+	SQLITE_E_AUTH                                                             Handle        = 0x87AF0017
+	SQLITE_E_FORMAT                                                           Handle        = 0x87AF0018
+	SQLITE_E_RANGE                                                            Handle        = 0x87AF0019
+	SQLITE_E_NOTADB                                                           Handle        = 0x87AF001A
+	SQLITE_E_NOTICE                                                           Handle        = 0x87AF001B
+	SQLITE_E_WARNING                                                          Handle        = 0x87AF001C
+	SQLITE_E_ROW                                                              Handle        = 0x87AF0064
+	SQLITE_E_DONE                                                             Handle        = 0x87AF0065
+	SQLITE_E_IOERR_READ                                                       Handle        = 0x87AF010A
+	SQLITE_E_IOERR_SHORT_READ                                                 Handle        = 0x87AF020A
+	SQLITE_E_IOERR_WRITE                                                      Handle        = 0x87AF030A
+	SQLITE_E_IOERR_FSYNC                                                      Handle        = 0x87AF040A
+	SQLITE_E_IOERR_DIR_FSYNC                                                  Handle        = 0x87AF050A
+	SQLITE_E_IOERR_TRUNCATE                                                   Handle        = 0x87AF060A
+	SQLITE_E_IOERR_FSTAT                                                      Handle        = 0x87AF070A
+	SQLITE_E_IOERR_UNLOCK                                                     Handle        = 0x87AF080A
+	SQLITE_E_IOERR_RDLOCK                                                     Handle        = 0x87AF090A
+	SQLITE_E_IOERR_DELETE                                                     Handle        = 0x87AF0A0A
+	SQLITE_E_IOERR_BLOCKED                                                    Handle        = 0x87AF0B0A
+	SQLITE_E_IOERR_NOMEM                                                      Handle        = 0x87AF0C0A
+	SQLITE_E_IOERR_ACCESS                                                     Handle        = 0x87AF0D0A
+	SQLITE_E_IOERR_CHECKRESERVEDLOCK                                          Handle        = 0x87AF0E0A
+	SQLITE_E_IOERR_LOCK                                                       Handle        = 0x87AF0F0A
+	SQLITE_E_IOERR_CLOSE                                                      Handle        = 0x87AF100A
+	SQLITE_E_IOERR_DIR_CLOSE                                                  Handle        = 0x87AF110A
+	SQLITE_E_IOERR_SHMOPEN                                                    Handle        = 0x87AF120A
+	SQLITE_E_IOERR_SHMSIZE                                                    Handle        = 0x87AF130A
+	SQLITE_E_IOERR_SHMLOCK                                                    Handle        = 0x87AF140A
+	SQLITE_E_IOERR_SHMMAP                                                     Handle        = 0x87AF150A
+	SQLITE_E_IOERR_SEEK                                                       Handle        = 0x87AF160A
+	SQLITE_E_IOERR_DELETE_NOENT                                               Handle        = 0x87AF170A
+	SQLITE_E_IOERR_MMAP                                                       Handle        = 0x87AF180A
+	SQLITE_E_IOERR_GETTEMPPATH                                                Handle        = 0x87AF190A
+	SQLITE_E_IOERR_CONVPATH                                                   Handle        = 0x87AF1A0A
+	SQLITE_E_IOERR_VNODE                                                      Handle        = 0x87AF1A02
+	SQLITE_E_IOERR_AUTH                                                       Handle        = 0x87AF1A03
+	SQLITE_E_LOCKED_SHAREDCACHE                                               Handle        = 0x87AF0106
+	SQLITE_E_BUSY_RECOVERY                                                    Handle        = 0x87AF0105
+	SQLITE_E_BUSY_SNAPSHOT                                                    Handle        = 0x87AF0205
+	SQLITE_E_CANTOPEN_NOTEMPDIR                                               Handle        = 0x87AF010E
+	SQLITE_E_CANTOPEN_ISDIR                                                   Handle        = 0x87AF020E
+	SQLITE_E_CANTOPEN_FULLPATH                                                Handle        = 0x87AF030E
+	SQLITE_E_CANTOPEN_CONVPATH                                                Handle        = 0x87AF040E
+	SQLITE_E_CORRUPT_VTAB                                                     Handle        = 0x87AF010B
+	SQLITE_E_READONLY_RECOVERY                                                Handle        = 0x87AF0108
+	SQLITE_E_READONLY_CANTLOCK                                                Handle        = 0x87AF0208
+	SQLITE_E_READONLY_ROLLBACK                                                Handle        = 0x87AF0308
+	SQLITE_E_READONLY_DBMOVED                                                 Handle        = 0x87AF0408
+	SQLITE_E_ABORT_ROLLBACK                                                   Handle        = 0x87AF0204
+	SQLITE_E_CONSTRAINT_CHECK                                                 Handle        = 0x87AF0113
+	SQLITE_E_CONSTRAINT_COMMITHOOK                                            Handle        = 0x87AF0213
+	SQLITE_E_CONSTRAINT_FOREIGNKEY                                            Handle        = 0x87AF0313
+	SQLITE_E_CONSTRAINT_FUNCTION                                              Handle        = 0x87AF0413
+	SQLITE_E_CONSTRAINT_NOTNULL                                               Handle        = 0x87AF0513
+	SQLITE_E_CONSTRAINT_PRIMARYKEY                                            Handle        = 0x87AF0613
+	SQLITE_E_CONSTRAINT_TRIGGER                                               Handle        = 0x87AF0713
+	SQLITE_E_CONSTRAINT_UNIQUE                                                Handle        = 0x87AF0813
+	SQLITE_E_CONSTRAINT_VTAB                                                  Handle        = 0x87AF0913
+	SQLITE_E_CONSTRAINT_ROWID                                                 Handle        = 0x87AF0A13
+	SQLITE_E_NOTICE_RECOVER_WAL                                               Handle        = 0x87AF011B
+	SQLITE_E_NOTICE_RECOVER_ROLLBACK                                          Handle        = 0x87AF021B
+	SQLITE_E_WARNING_AUTOINDEX                                                Handle        = 0x87AF011C
+	UTC_E_TOGGLE_TRACE_STARTED                                                Handle        = 0x87C51001
+	UTC_E_ALTERNATIVE_TRACE_CANNOT_PREEMPT                                    Handle        = 0x87C51002
+	UTC_E_AOT_NOT_RUNNING                                                     Handle        = 0x87C51003
+	UTC_E_SCRIPT_TYPE_INVALID                                                 Handle        = 0x87C51004
+	UTC_E_SCENARIODEF_NOT_FOUND                                               Handle        = 0x87C51005
+	UTC_E_TRACEPROFILE_NOT_FOUND                                              Handle        = 0x87C51006
+	UTC_E_FORWARDER_ALREADY_ENABLED                                           Handle        = 0x87C51007
+	UTC_E_FORWARDER_ALREADY_DISABLED                                          Handle        = 0x87C51008
+	UTC_E_EVENTLOG_ENTRY_MALFORMED                                            Handle        = 0x87C51009
+	UTC_E_DIAGRULES_SCHEMAVERSION_MISMATCH                                    Handle        = 0x87C5100A
+	UTC_E_SCRIPT_TERMINATED                                                   Handle        = 0x87C5100B
+	UTC_E_INVALID_CUSTOM_FILTER                                               Handle        = 0x87C5100C
+	UTC_E_TRACE_NOT_RUNNING                                                   Handle        = 0x87C5100D
+	UTC_E_REESCALATED_TOO_QUICKLY                                             Handle        = 0x87C5100E
+	UTC_E_ESCALATION_ALREADY_RUNNING                                          Handle        = 0x87C5100F
+	UTC_E_PERFTRACK_ALREADY_TRACING                                           Handle        = 0x87C51010
+	UTC_E_REACHED_MAX_ESCALATIONS                                             Handle        = 0x87C51011
+	UTC_E_FORWARDER_PRODUCER_MISMATCH                                         Handle        = 0x87C51012
+	UTC_E_INTENTIONAL_SCRIPT_FAILURE                                          Handle        = 0x87C51013
+	UTC_E_SQM_INIT_FAILED                                                     Handle        = 0x87C51014
+	UTC_E_NO_WER_LOGGER_SUPPORTED                                             Handle        = 0x87C51015
+	UTC_E_TRACERS_DONT_EXIST                                                  Handle        = 0x87C51016
+	UTC_E_WINRT_INIT_FAILED                                                   Handle        = 0x87C51017
+	UTC_E_SCENARIODEF_SCHEMAVERSION_MISMATCH                                  Handle        = 0x87C51018
+	UTC_E_INVALID_FILTER                                                      Handle        = 0x87C51019
+	UTC_E_EXE_TERMINATED                                                      Handle        = 0x87C5101A
+	UTC_E_ESCALATION_NOT_AUTHORIZED                                           Handle        = 0x87C5101B
+	UTC_E_SETUP_NOT_AUTHORIZED                                                Handle        = 0x87C5101C
+	UTC_E_CHILD_PROCESS_FAILED                                                Handle        = 0x87C5101D
+	UTC_E_COMMAND_LINE_NOT_AUTHORIZED                                         Handle        = 0x87C5101E
+	UTC_E_CANNOT_LOAD_SCENARIO_EDITOR_XML                                     Handle        = 0x87C5101F
+	UTC_E_ESCALATION_TIMED_OUT                                                Handle        = 0x87C51020
+	UTC_E_SETUP_TIMED_OUT                                                     Handle        = 0x87C51021
+	UTC_E_TRIGGER_MISMATCH                                                    Handle        = 0x87C51022
+	UTC_E_TRIGGER_NOT_FOUND                                                   Handle        = 0x87C51023
+	UTC_E_SIF_NOT_SUPPORTED                                                   Handle        = 0x87C51024
+	UTC_E_DELAY_TERMINATED                                                    Handle        = 0x87C51025
+	UTC_E_DEVICE_TICKET_ERROR                                                 Handle        = 0x87C51026
+	UTC_E_TRACE_BUFFER_LIMIT_EXCEEDED                                         Handle        = 0x87C51027
+	UTC_E_API_RESULT_UNAVAILABLE                                              Handle        = 0x87C51028
+	UTC_E_RPC_TIMEOUT                                                         Handle        = 0x87C51029
+	UTC_E_RPC_WAIT_FAILED                                                     Handle        = 0x87C5102A
+	UTC_E_API_BUSY                                                            Handle        = 0x87C5102B
+	UTC_E_TRACE_MIN_DURATION_REQUIREMENT_NOT_MET                              Handle        = 0x87C5102C
+	UTC_E_EXCLUSIVITY_NOT_AVAILABLE                                           Handle        = 0x87C5102D
+	UTC_E_GETFILE_FILE_PATH_NOT_APPROVED                                      Handle        = 0x87C5102E
+	UTC_E_ESCALATION_DIRECTORY_ALREADY_EXISTS                                 Handle        = 0x87C5102F
+	UTC_E_TIME_TRIGGER_ON_START_INVALID                                       Handle        = 0x87C51030
+	UTC_E_TIME_TRIGGER_ONLY_VALID_ON_SINGLE_TRANSITION                        Handle        = 0x87C51031
+	UTC_E_TIME_TRIGGER_INVALID_TIME_RANGE                                     Handle        = 0x87C51032
+	UTC_E_MULTIPLE_TIME_TRIGGER_ON_SINGLE_STATE                               Handle        = 0x87C51033
+	UTC_E_BINARY_MISSING                                                      Handle        = 0x87C51034
+	UTC_E_NETWORK_CAPTURE_NOT_ALLOWED                                         Handle        = 0x87C51035
+	UTC_E_FAILED_TO_RESOLVE_CONTAINER_ID                                      Handle        = 0x87C51036
+	UTC_E_UNABLE_TO_RESOLVE_SESSION                                           Handle        = 0x87C51037
+	UTC_E_THROTTLED                                                           Handle        = 0x87C51038
+	UTC_E_UNAPPROVED_SCRIPT                                                   Handle        = 0x87C51039
+	UTC_E_SCRIPT_MISSING                                                      Handle        = 0x87C5103A
+	UTC_E_SCENARIO_THROTTLED                                                  Handle        = 0x87C5103B
+	UTC_E_API_NOT_SUPPORTED                                                   Handle        = 0x87C5103C
+	UTC_E_GETFILE_EXTERNAL_PATH_NOT_APPROVED                                  Handle        = 0x87C5103D
+	UTC_E_TRY_GET_SCENARIO_TIMEOUT_EXCEEDED                                   Handle        = 0x87C5103E
+	UTC_E_CERT_REV_FAILED                                                     Handle        = 0x87C5103F
+	UTC_E_FAILED_TO_START_NDISCAP                                             Handle        = 0x87C51040
+	UTC_E_KERNELDUMP_LIMIT_REACHED                                            Handle        = 0x87C51041
+	UTC_E_MISSING_AGGREGATE_EVENT_TAG                                         Handle        = 0x87C51042
+	UTC_E_INVALID_AGGREGATION_STRUCT                                          Handle        = 0x87C51043
+	UTC_E_ACTION_NOT_SUPPORTED_IN_DESTINATION                                 Handle        = 0x87C51044
+	UTC_E_FILTER_MISSING_ATTRIBUTE                                            Handle        = 0x87C51045
+	UTC_E_FILTER_INVALID_TYPE                                                 Handle        = 0x87C51046
+	UTC_E_FILTER_VARIABLE_NOT_FOUND                                           Handle        = 0x87C51047
+	UTC_E_FILTER_FUNCTION_RESTRICTED                                          Handle        = 0x87C51048
+	UTC_E_FILTER_VERSION_MISMATCH                                             Handle        = 0x87C51049
+	UTC_E_FILTER_INVALID_FUNCTION                                             Handle        = 0x87C51050
+	UTC_E_FILTER_INVALID_FUNCTION_PARAMS                                      Handle        = 0x87C51051
+	UTC_E_FILTER_INVALID_COMMAND                                              Handle        = 0x87C51052
+	UTC_E_FILTER_ILLEGAL_EVAL                                                 Handle        = 0x87C51053
+	UTC_E_TTTRACER_RETURNED_ERROR                                             Handle        = 0x87C51054
+	UTC_E_AGENT_DIAGNOSTICS_TOO_LARGE                                         Handle        = 0x87C51055
+	UTC_E_FAILED_TO_RECEIVE_AGENT_DIAGNOSTICS                                 Handle        = 0x87C51056
+	UTC_E_SCENARIO_HAS_NO_ACTIONS                                             Handle        = 0x87C51057
+	UTC_E_TTTRACER_STORAGE_FULL                                               Handle        = 0x87C51058
+	UTC_E_INSUFFICIENT_SPACE_TO_START_TRACE                                   Handle        = 0x87C51059
+	UTC_E_ESCALATION_CANCELLED_AT_SHUTDOWN                                    Handle        = 0x87C5105A
+	UTC_E_GETFILEINFOACTION_FILE_NOT_APPROVED                                 Handle        = 0x87C5105B
+	UTC_E_SETREGKEYACTION_TYPE_NOT_APPROVED                                   Handle        = 0x87C5105C
+	WINML_ERR_INVALID_DEVICE                                                  Handle        = 0x88900001
+	WINML_ERR_INVALID_BINDING                                                 Handle        = 0x88900002
+	WINML_ERR_VALUE_NOTFOUND                                                  Handle        = 0x88900003
+	WINML_ERR_SIZE_MISMATCH                                                   Handle        = 0x88900004
+	STATUS_WAIT_0                                                             NTStatus      = 0x00000000
+	STATUS_SUCCESS                                                            NTStatus      = 0x00000000
+	STATUS_WAIT_1                                                             NTStatus      = 0x00000001
+	STATUS_WAIT_2                                                             NTStatus      = 0x00000002
+	STATUS_WAIT_3                                                             NTStatus      = 0x00000003
+	STATUS_WAIT_63                                                            NTStatus      = 0x0000003F
+	STATUS_ABANDONED                                                          NTStatus      = 0x00000080
+	STATUS_ABANDONED_WAIT_0                                                   NTStatus      = 0x00000080
+	STATUS_ABANDONED_WAIT_63                                                  NTStatus      = 0x000000BF
+	STATUS_USER_APC                                                           NTStatus      = 0x000000C0
+	STATUS_ALREADY_COMPLETE                                                   NTStatus      = 0x000000FF
+	STATUS_KERNEL_APC                                                         NTStatus      = 0x00000100
+	STATUS_ALERTED                                                            NTStatus      = 0x00000101
+	STATUS_TIMEOUT                                                            NTStatus      = 0x00000102
+	STATUS_PENDING                                                            NTStatus      = 0x00000103
+	STATUS_REPARSE                                                            NTStatus      = 0x00000104
+	STATUS_MORE_ENTRIES                                                       NTStatus      = 0x00000105
+	STATUS_NOT_ALL_ASSIGNED                                                   NTStatus      = 0x00000106
+	STATUS_SOME_NOT_MAPPED                                                    NTStatus      = 0x00000107
+	STATUS_OPLOCK_BREAK_IN_PROGRESS                                           NTStatus      = 0x00000108
+	STATUS_VOLUME_MOUNTED                                                     NTStatus      = 0x00000109
+	STATUS_RXACT_COMMITTED                                                    NTStatus      = 0x0000010A
+	STATUS_NOTIFY_CLEANUP                                                     NTStatus      = 0x0000010B
+	STATUS_NOTIFY_ENUM_DIR                                                    NTStatus      = 0x0000010C
+	STATUS_NO_QUOTAS_FOR_ACCOUNT                                              NTStatus      = 0x0000010D
+	STATUS_PRIMARY_TRANSPORT_CONNECT_FAILED                                   NTStatus      = 0x0000010E
+	STATUS_PAGE_FAULT_TRANSITION                                              NTStatus      = 0x00000110
+	STATUS_PAGE_FAULT_DEMAND_ZERO                                             NTStatus      = 0x00000111
+	STATUS_PAGE_FAULT_COPY_ON_WRITE                                           NTStatus      = 0x00000112
+	STATUS_PAGE_FAULT_GUARD_PAGE                                              NTStatus      = 0x00000113
+	STATUS_PAGE_FAULT_PAGING_FILE                                             NTStatus      = 0x00000114
+	STATUS_CACHE_PAGE_LOCKED                                                  NTStatus      = 0x00000115
+	STATUS_CRASH_DUMP                                                         NTStatus      = 0x00000116
+	STATUS_BUFFER_ALL_ZEROS                                                   NTStatus      = 0x00000117
+	STATUS_REPARSE_OBJECT                                                     NTStatus      = 0x00000118
+	STATUS_RESOURCE_REQUIREMENTS_CHANGED                                      NTStatus      = 0x00000119
+	STATUS_TRANSLATION_COMPLETE                                               NTStatus      = 0x00000120
+	STATUS_DS_MEMBERSHIP_EVALUATED_LOCALLY                                    NTStatus      = 0x00000121
+	STATUS_NOTHING_TO_TERMINATE                                               NTStatus      = 0x00000122
+	STATUS_PROCESS_NOT_IN_JOB                                                 NTStatus      = 0x00000123
+	STATUS_PROCESS_IN_JOB                                                     NTStatus      = 0x00000124
+	STATUS_VOLSNAP_HIBERNATE_READY                                            NTStatus      = 0x00000125
+	STATUS_FSFILTER_OP_COMPLETED_SUCCESSFULLY                                 NTStatus      = 0x00000126
+	STATUS_INTERRUPT_VECTOR_ALREADY_CONNECTED                                 NTStatus      = 0x00000127
+	STATUS_INTERRUPT_STILL_CONNECTED                                          NTStatus      = 0x00000128
+	STATUS_PROCESS_CLONED                                                     NTStatus      = 0x00000129
+	STATUS_FILE_LOCKED_WITH_ONLY_READERS                                      NTStatus      = 0x0000012A
+	STATUS_FILE_LOCKED_WITH_WRITERS                                           NTStatus      = 0x0000012B
+	STATUS_VALID_IMAGE_HASH                                                   NTStatus      = 0x0000012C
+	STATUS_VALID_CATALOG_HASH                                                 NTStatus      = 0x0000012D
+	STATUS_VALID_STRONG_CODE_HASH                                             NTStatus      = 0x0000012E
+	STATUS_GHOSTED                                                            NTStatus      = 0x0000012F
+	STATUS_DATA_OVERWRITTEN                                                   NTStatus      = 0x00000130
+	STATUS_RESOURCEMANAGER_READ_ONLY                                          NTStatus      = 0x00000202
+	STATUS_RING_PREVIOUSLY_EMPTY                                              NTStatus      = 0x00000210
+	STATUS_RING_PREVIOUSLY_FULL                                               NTStatus      = 0x00000211
+	STATUS_RING_PREVIOUSLY_ABOVE_QUOTA                                        NTStatus      = 0x00000212
+	STATUS_RING_NEWLY_EMPTY                                                   NTStatus      = 0x00000213
+	STATUS_RING_SIGNAL_OPPOSITE_ENDPOINT                                      NTStatus      = 0x00000214
+	STATUS_OPLOCK_SWITCHED_TO_NEW_HANDLE                                      NTStatus      = 0x00000215
+	STATUS_OPLOCK_HANDLE_CLOSED                                               NTStatus      = 0x00000216
+	STATUS_WAIT_FOR_OPLOCK                                                    NTStatus      = 0x00000367
+	STATUS_REPARSE_GLOBAL                                                     NTStatus      = 0x00000368
+	STATUS_FLT_IO_COMPLETE                                                    NTStatus      = 0x001C0001
+	STATUS_OBJECT_NAME_EXISTS                                                 NTStatus      = 0x40000000
+	STATUS_THREAD_WAS_SUSPENDED                                               NTStatus      = 0x40000001
+	STATUS_WORKING_SET_LIMIT_RANGE                                            NTStatus      = 0x40000002
+	STATUS_IMAGE_NOT_AT_BASE                                                  NTStatus      = 0x40000003
+	STATUS_RXACT_STATE_CREATED                                                NTStatus      = 0x40000004
+	STATUS_SEGMENT_NOTIFICATION                                               NTStatus      = 0x40000005
+	STATUS_LOCAL_USER_SESSION_KEY                                             NTStatus      = 0x40000006
+	STATUS_BAD_CURRENT_DIRECTORY                                              NTStatus      = 0x40000007
+	STATUS_SERIAL_MORE_WRITES                                                 NTStatus      = 0x40000008
+	STATUS_REGISTRY_RECOVERED                                                 NTStatus      = 0x40000009
+	STATUS_FT_READ_RECOVERY_FROM_BACKUP                                       NTStatus      = 0x4000000A
+	STATUS_FT_WRITE_RECOVERY                                                  NTStatus      = 0x4000000B
+	STATUS_SERIAL_COUNTER_TIMEOUT                                             NTStatus      = 0x4000000C
+	STATUS_NULL_LM_PASSWORD                                                   NTStatus      = 0x4000000D
+	STATUS_IMAGE_MACHINE_TYPE_MISMATCH                                        NTStatus      = 0x4000000E
+	STATUS_RECEIVE_PARTIAL                                                    NTStatus      = 0x4000000F
+	STATUS_RECEIVE_EXPEDITED                                                  NTStatus      = 0x40000010
+	STATUS_RECEIVE_PARTIAL_EXPEDITED                                          NTStatus      = 0x40000011
+	STATUS_EVENT_DONE                                                         NTStatus      = 0x40000012
+	STATUS_EVENT_PENDING                                                      NTStatus      = 0x40000013
+	STATUS_CHECKING_FILE_SYSTEM                                               NTStatus      = 0x40000014
+	STATUS_FATAL_APP_EXIT                                                     NTStatus      = 0x40000015
+	STATUS_PREDEFINED_HANDLE                                                  NTStatus      = 0x40000016
+	STATUS_WAS_UNLOCKED                                                       NTStatus      = 0x40000017
+	STATUS_SERVICE_NOTIFICATION                                               NTStatus      = 0x40000018
+	STATUS_WAS_LOCKED                                                         NTStatus      = 0x40000019
+	STATUS_LOG_HARD_ERROR                                                     NTStatus      = 0x4000001A
+	STATUS_ALREADY_WIN32                                                      NTStatus      = 0x4000001B
+	STATUS_WX86_UNSIMULATE                                                    NTStatus      = 0x4000001C
+	STATUS_WX86_CONTINUE                                                      NTStatus      = 0x4000001D
+	STATUS_WX86_SINGLE_STEP                                                   NTStatus      = 0x4000001E
+	STATUS_WX86_BREAKPOINT                                                    NTStatus      = 0x4000001F
+	STATUS_WX86_EXCEPTION_CONTINUE                                            NTStatus      = 0x40000020
+	STATUS_WX86_EXCEPTION_LASTCHANCE                                          NTStatus      = 0x40000021
+	STATUS_WX86_EXCEPTION_CHAIN                                               NTStatus      = 0x40000022
+	STATUS_IMAGE_MACHINE_TYPE_MISMATCH_EXE                                    NTStatus      = 0x40000023
+	STATUS_NO_YIELD_PERFORMED                                                 NTStatus      = 0x40000024
+	STATUS_TIMER_RESUME_IGNORED                                               NTStatus      = 0x40000025
+	STATUS_ARBITRATION_UNHANDLED                                              NTStatus      = 0x40000026
+	STATUS_CARDBUS_NOT_SUPPORTED                                              NTStatus      = 0x40000027
+	STATUS_WX86_CREATEWX86TIB                                                 NTStatus      = 0x40000028
+	STATUS_MP_PROCESSOR_MISMATCH                                              NTStatus      = 0x40000029
+	STATUS_HIBERNATED                                                         NTStatus      = 0x4000002A
+	STATUS_RESUME_HIBERNATION                                                 NTStatus      = 0x4000002B
+	STATUS_FIRMWARE_UPDATED                                                   NTStatus      = 0x4000002C
+	STATUS_DRIVERS_LEAKING_LOCKED_PAGES                                       NTStatus      = 0x4000002D
+	STATUS_MESSAGE_RETRIEVED                                                  NTStatus      = 0x4000002E
+	STATUS_SYSTEM_POWERSTATE_TRANSITION                                       NTStatus      = 0x4000002F
+	STATUS_ALPC_CHECK_COMPLETION_LIST                                         NTStatus      = 0x40000030
+	STATUS_SYSTEM_POWERSTATE_COMPLEX_TRANSITION                               NTStatus      = 0x40000031
+	STATUS_ACCESS_AUDIT_BY_POLICY                                             NTStatus      = 0x40000032
+	STATUS_ABANDON_HIBERFILE                                                  NTStatus      = 0x40000033
+	STATUS_BIZRULES_NOT_ENABLED                                               NTStatus      = 0x40000034
+	STATUS_FT_READ_FROM_COPY                                                  NTStatus      = 0x40000035
+	STATUS_IMAGE_AT_DIFFERENT_BASE                                            NTStatus      = 0x40000036
+	STATUS_PATCH_DEFERRED                                                     NTStatus      = 0x40000037
+	STATUS_HEURISTIC_DAMAGE_POSSIBLE                                          NTStatus      = 0x40190001
+	STATUS_GUARD_PAGE_VIOLATION                                               NTStatus      = 0x80000001
+	STATUS_DATATYPE_MISALIGNMENT                                              NTStatus      = 0x80000002
+	STATUS_BREAKPOINT                                                         NTStatus      = 0x80000003
+	STATUS_SINGLE_STEP                                                        NTStatus      = 0x80000004
+	STATUS_BUFFER_OVERFLOW                                                    NTStatus      = 0x80000005
+	STATUS_NO_MORE_FILES                                                      NTStatus      = 0x80000006
+	STATUS_WAKE_SYSTEM_DEBUGGER                                               NTStatus      = 0x80000007
+	STATUS_HANDLES_CLOSED                                                     NTStatus      = 0x8000000A
+	STATUS_NO_INHERITANCE                                                     NTStatus      = 0x8000000B
+	STATUS_GUID_SUBSTITUTION_MADE                                             NTStatus      = 0x8000000C
+	STATUS_PARTIAL_COPY                                                       NTStatus      = 0x8000000D
+	STATUS_DEVICE_PAPER_EMPTY                                                 NTStatus      = 0x8000000E
+	STATUS_DEVICE_POWERED_OFF                                                 NTStatus      = 0x8000000F
+	STATUS_DEVICE_OFF_LINE                                                    NTStatus      = 0x80000010
+	STATUS_DEVICE_BUSY                                                        NTStatus      = 0x80000011
+	STATUS_NO_MORE_EAS                                                        NTStatus      = 0x80000012
+	STATUS_INVALID_EA_NAME                                                    NTStatus      = 0x80000013
+	STATUS_EA_LIST_INCONSISTENT                                               NTStatus      = 0x80000014
+	STATUS_INVALID_EA_FLAG                                                    NTStatus      = 0x80000015
+	STATUS_VERIFY_REQUIRED                                                    NTStatus      = 0x80000016
+	STATUS_EXTRANEOUS_INFORMATION                                             NTStatus      = 0x80000017
+	STATUS_RXACT_COMMIT_NECESSARY                                             NTStatus      = 0x80000018
+	STATUS_NO_MORE_ENTRIES                                                    NTStatus      = 0x8000001A
+	STATUS_FILEMARK_DETECTED                                                  NTStatus      = 0x8000001B
+	STATUS_MEDIA_CHANGED                                                      NTStatus      = 0x8000001C
+	STATUS_BUS_RESET                                                          NTStatus      = 0x8000001D
+	STATUS_END_OF_MEDIA                                                       NTStatus      = 0x8000001E
+	STATUS_BEGINNING_OF_MEDIA                                                 NTStatus      = 0x8000001F
+	STATUS_MEDIA_CHECK                                                        NTStatus      = 0x80000020
+	STATUS_SETMARK_DETECTED                                                   NTStatus      = 0x80000021
+	STATUS_NO_DATA_DETECTED                                                   NTStatus      = 0x80000022
+	STATUS_REDIRECTOR_HAS_OPEN_HANDLES                                        NTStatus      = 0x80000023
+	STATUS_SERVER_HAS_OPEN_HANDLES                                            NTStatus      = 0x80000024
+	STATUS_ALREADY_DISCONNECTED                                               NTStatus      = 0x80000025
+	STATUS_LONGJUMP                                                           NTStatus      = 0x80000026
+	STATUS_CLEANER_CARTRIDGE_INSTALLED                                        NTStatus      = 0x80000027
+	STATUS_PLUGPLAY_QUERY_VETOED                                              NTStatus      = 0x80000028
+	STATUS_UNWIND_CONSOLIDATE                                                 NTStatus      = 0x80000029
+	STATUS_REGISTRY_HIVE_RECOVERED                                            NTStatus      = 0x8000002A
+	STATUS_DLL_MIGHT_BE_INSECURE                                              NTStatus      = 0x8000002B
+	STATUS_DLL_MIGHT_BE_INCOMPATIBLE                                          NTStatus      = 0x8000002C
+	STATUS_STOPPED_ON_SYMLINK                                                 NTStatus      = 0x8000002D
+	STATUS_CANNOT_GRANT_REQUESTED_OPLOCK                                      NTStatus      = 0x8000002E
+	STATUS_NO_ACE_CONDITION                                                   NTStatus      = 0x8000002F
+	STATUS_DEVICE_SUPPORT_IN_PROGRESS                                         NTStatus      = 0x80000030
+	STATUS_DEVICE_POWER_CYCLE_REQUIRED                                        NTStatus      = 0x80000031
+	STATUS_NO_WORK_DONE                                                       NTStatus      = 0x80000032
+	STATUS_CLUSTER_NODE_ALREADY_UP                                            NTStatus      = 0x80130001
+	STATUS_CLUSTER_NODE_ALREADY_DOWN                                          NTStatus      = 0x80130002
+	STATUS_CLUSTER_NETWORK_ALREADY_ONLINE                                     NTStatus      = 0x80130003
+	STATUS_CLUSTER_NETWORK_ALREADY_OFFLINE                                    NTStatus      = 0x80130004
+	STATUS_CLUSTER_NODE_ALREADY_MEMBER                                        NTStatus      = 0x80130005
+	STATUS_FLT_BUFFER_TOO_SMALL                                               NTStatus      = 0x801C0001
+	STATUS_FVE_PARTIAL_METADATA                                               NTStatus      = 0x80210001
+	STATUS_FVE_TRANSIENT_STATE                                                NTStatus      = 0x80210002
+	STATUS_CLOUD_FILE_PROPERTY_BLOB_CHECKSUM_MISMATCH                         NTStatus      = 0x8000CF00
+	STATUS_UNSUCCESSFUL                                                       NTStatus      = 0xC0000001
+	STATUS_NOT_IMPLEMENTED                                                    NTStatus      = 0xC0000002
+	STATUS_INVALID_INFO_CLASS                                                 NTStatus      = 0xC0000003
+	STATUS_INFO_LENGTH_MISMATCH                                               NTStatus      = 0xC0000004
+	STATUS_ACCESS_VIOLATION                                                   NTStatus      = 0xC0000005
+	STATUS_IN_PAGE_ERROR                                                      NTStatus      = 0xC0000006
+	STATUS_PAGEFILE_QUOTA                                                     NTStatus      = 0xC0000007
+	STATUS_INVALID_HANDLE                                                     NTStatus      = 0xC0000008
+	STATUS_BAD_INITIAL_STACK                                                  NTStatus      = 0xC0000009
+	STATUS_BAD_INITIAL_PC                                                     NTStatus      = 0xC000000A
+	STATUS_INVALID_CID                                                        NTStatus      = 0xC000000B
+	STATUS_TIMER_NOT_CANCELED                                                 NTStatus      = 0xC000000C
+	STATUS_INVALID_PARAMETER                                                  NTStatus      = 0xC000000D
+	STATUS_NO_SUCH_DEVICE                                                     NTStatus      = 0xC000000E
+	STATUS_NO_SUCH_FILE                                                       NTStatus      = 0xC000000F
+	STATUS_INVALID_DEVICE_REQUEST                                             NTStatus      = 0xC0000010
+	STATUS_END_OF_FILE                                                        NTStatus      = 0xC0000011
+	STATUS_WRONG_VOLUME                                                       NTStatus      = 0xC0000012
+	STATUS_NO_MEDIA_IN_DEVICE                                                 NTStatus      = 0xC0000013
+	STATUS_UNRECOGNIZED_MEDIA                                                 NTStatus      = 0xC0000014
+	STATUS_NONEXISTENT_SECTOR                                                 NTStatus      = 0xC0000015
+	STATUS_MORE_PROCESSING_REQUIRED                                           NTStatus      = 0xC0000016
+	STATUS_NO_MEMORY                                                          NTStatus      = 0xC0000017
+	STATUS_CONFLICTING_ADDRESSES                                              NTStatus      = 0xC0000018
+	STATUS_NOT_MAPPED_VIEW                                                    NTStatus      = 0xC0000019
+	STATUS_UNABLE_TO_FREE_VM                                                  NTStatus      = 0xC000001A
+	STATUS_UNABLE_TO_DELETE_SECTION                                           NTStatus      = 0xC000001B
+	STATUS_INVALID_SYSTEM_SERVICE                                             NTStatus      = 0xC000001C
+	STATUS_ILLEGAL_INSTRUCTION                                                NTStatus      = 0xC000001D
+	STATUS_INVALID_LOCK_SEQUENCE                                              NTStatus      = 0xC000001E
+	STATUS_INVALID_VIEW_SIZE                                                  NTStatus      = 0xC000001F
+	STATUS_INVALID_FILE_FOR_SECTION                                           NTStatus      = 0xC0000020
+	STATUS_ALREADY_COMMITTED                                                  NTStatus      = 0xC0000021
+	STATUS_ACCESS_DENIED                                                      NTStatus      = 0xC0000022
+	STATUS_BUFFER_TOO_SMALL                                                   NTStatus      = 0xC0000023
+	STATUS_OBJECT_TYPE_MISMATCH                                               NTStatus      = 0xC0000024
+	STATUS_NONCONTINUABLE_EXCEPTION                                           NTStatus      = 0xC0000025
+	STATUS_INVALID_DISPOSITION                                                NTStatus      = 0xC0000026
+	STATUS_UNWIND                                                             NTStatus      = 0xC0000027
+	STATUS_BAD_STACK                                                          NTStatus      = 0xC0000028
+	STATUS_INVALID_UNWIND_TARGET                                              NTStatus      = 0xC0000029
+	STATUS_NOT_LOCKED                                                         NTStatus      = 0xC000002A
+	STATUS_PARITY_ERROR                                                       NTStatus      = 0xC000002B
+	STATUS_UNABLE_TO_DECOMMIT_VM                                              NTStatus      = 0xC000002C
+	STATUS_NOT_COMMITTED                                                      NTStatus      = 0xC000002D
+	STATUS_INVALID_PORT_ATTRIBUTES                                            NTStatus      = 0xC000002E
+	STATUS_PORT_MESSAGE_TOO_LONG                                              NTStatus      = 0xC000002F
+	STATUS_INVALID_PARAMETER_MIX                                              NTStatus      = 0xC0000030
+	STATUS_INVALID_QUOTA_LOWER                                                NTStatus      = 0xC0000031
+	STATUS_DISK_CORRUPT_ERROR                                                 NTStatus      = 0xC0000032
+	STATUS_OBJECT_NAME_INVALID                                                NTStatus      = 0xC0000033
+	STATUS_OBJECT_NAME_NOT_FOUND                                              NTStatus      = 0xC0000034
+	STATUS_OBJECT_NAME_COLLISION                                              NTStatus      = 0xC0000035
+	STATUS_PORT_DO_NOT_DISTURB                                                NTStatus      = 0xC0000036
+	STATUS_PORT_DISCONNECTED                                                  NTStatus      = 0xC0000037
+	STATUS_DEVICE_ALREADY_ATTACHED                                            NTStatus      = 0xC0000038
+	STATUS_OBJECT_PATH_INVALID                                                NTStatus      = 0xC0000039
+	STATUS_OBJECT_PATH_NOT_FOUND                                              NTStatus      = 0xC000003A
+	STATUS_OBJECT_PATH_SYNTAX_BAD                                             NTStatus      = 0xC000003B
+	STATUS_DATA_OVERRUN                                                       NTStatus      = 0xC000003C
+	STATUS_DATA_LATE_ERROR                                                    NTStatus      = 0xC000003D
+	STATUS_DATA_ERROR                                                         NTStatus      = 0xC000003E
+	STATUS_CRC_ERROR                                                          NTStatus      = 0xC000003F
+	STATUS_SECTION_TOO_BIG                                                    NTStatus      = 0xC0000040
+	STATUS_PORT_CONNECTION_REFUSED                                            NTStatus      = 0xC0000041
+	STATUS_INVALID_PORT_HANDLE                                                NTStatus      = 0xC0000042
+	STATUS_SHARING_VIOLATION                                                  NTStatus      = 0xC0000043
+	STATUS_QUOTA_EXCEEDED                                                     NTStatus      = 0xC0000044
+	STATUS_INVALID_PAGE_PROTECTION                                            NTStatus      = 0xC0000045
+	STATUS_MUTANT_NOT_OWNED                                                   NTStatus      = 0xC0000046
+	STATUS_SEMAPHORE_LIMIT_EXCEEDED                                           NTStatus      = 0xC0000047
+	STATUS_PORT_ALREADY_SET                                                   NTStatus      = 0xC0000048
+	STATUS_SECTION_NOT_IMAGE                                                  NTStatus      = 0xC0000049
+	STATUS_SUSPEND_COUNT_EXCEEDED                                             NTStatus      = 0xC000004A
+	STATUS_THREAD_IS_TERMINATING                                              NTStatus      = 0xC000004B
+	STATUS_BAD_WORKING_SET_LIMIT                                              NTStatus      = 0xC000004C
+	STATUS_INCOMPATIBLE_FILE_MAP                                              NTStatus      = 0xC000004D
+	STATUS_SECTION_PROTECTION                                                 NTStatus      = 0xC000004E
+	STATUS_EAS_NOT_SUPPORTED                                                  NTStatus      = 0xC000004F
+	STATUS_EA_TOO_LARGE                                                       NTStatus      = 0xC0000050
+	STATUS_NONEXISTENT_EA_ENTRY                                               NTStatus      = 0xC0000051
+	STATUS_NO_EAS_ON_FILE                                                     NTStatus      = 0xC0000052
+	STATUS_EA_CORRUPT_ERROR                                                   NTStatus      = 0xC0000053
+	STATUS_FILE_LOCK_CONFLICT                                                 NTStatus      = 0xC0000054
+	STATUS_LOCK_NOT_GRANTED                                                   NTStatus      = 0xC0000055
+	STATUS_DELETE_PENDING                                                     NTStatus      = 0xC0000056
+	STATUS_CTL_FILE_NOT_SUPPORTED                                             NTStatus      = 0xC0000057
+	STATUS_UNKNOWN_REVISION                                                   NTStatus      = 0xC0000058
+	STATUS_REVISION_MISMATCH                                                  NTStatus      = 0xC0000059
+	STATUS_INVALID_OWNER                                                      NTStatus      = 0xC000005A
+	STATUS_INVALID_PRIMARY_GROUP                                              NTStatus      = 0xC000005B
+	STATUS_NO_IMPERSONATION_TOKEN                                             NTStatus      = 0xC000005C
+	STATUS_CANT_DISABLE_MANDATORY                                             NTStatus      = 0xC000005D
+	STATUS_NO_LOGON_SERVERS                                                   NTStatus      = 0xC000005E
+	STATUS_NO_SUCH_LOGON_SESSION                                              NTStatus      = 0xC000005F
+	STATUS_NO_SUCH_PRIVILEGE                                                  NTStatus      = 0xC0000060
+	STATUS_PRIVILEGE_NOT_HELD                                                 NTStatus      = 0xC0000061
+	STATUS_INVALID_ACCOUNT_NAME                                               NTStatus      = 0xC0000062
+	STATUS_USER_EXISTS                                                        NTStatus      = 0xC0000063
+	STATUS_NO_SUCH_USER                                                       NTStatus      = 0xC0000064
+	STATUS_GROUP_EXISTS                                                       NTStatus      = 0xC0000065
+	STATUS_NO_SUCH_GROUP                                                      NTStatus      = 0xC0000066
+	STATUS_MEMBER_IN_GROUP                                                    NTStatus      = 0xC0000067
+	STATUS_MEMBER_NOT_IN_GROUP                                                NTStatus      = 0xC0000068
+	STATUS_LAST_ADMIN                                                         NTStatus      = 0xC0000069
+	STATUS_WRONG_PASSWORD                                                     NTStatus      = 0xC000006A
+	STATUS_ILL_FORMED_PASSWORD                                                NTStatus      = 0xC000006B
+	STATUS_PASSWORD_RESTRICTION                                               NTStatus      = 0xC000006C
+	STATUS_LOGON_FAILURE                                                      NTStatus      = 0xC000006D
+	STATUS_ACCOUNT_RESTRICTION                                                NTStatus      = 0xC000006E
+	STATUS_INVALID_LOGON_HOURS                                                NTStatus      = 0xC000006F
+	STATUS_INVALID_WORKSTATION                                                NTStatus      = 0xC0000070
+	STATUS_PASSWORD_EXPIRED                                                   NTStatus      = 0xC0000071
+	STATUS_ACCOUNT_DISABLED                                                   NTStatus      = 0xC0000072
+	STATUS_NONE_MAPPED                                                        NTStatus      = 0xC0000073
+	STATUS_TOO_MANY_LUIDS_REQUESTED                                           NTStatus      = 0xC0000074
+	STATUS_LUIDS_EXHAUSTED                                                    NTStatus      = 0xC0000075
+	STATUS_INVALID_SUB_AUTHORITY                                              NTStatus      = 0xC0000076
+	STATUS_INVALID_ACL                                                        NTStatus      = 0xC0000077
+	STATUS_INVALID_SID                                                        NTStatus      = 0xC0000078
+	STATUS_INVALID_SECURITY_DESCR                                             NTStatus      = 0xC0000079
+	STATUS_PROCEDURE_NOT_FOUND                                                NTStatus      = 0xC000007A
+	STATUS_INVALID_IMAGE_FORMAT                                               NTStatus      = 0xC000007B
+	STATUS_NO_TOKEN                                                           NTStatus      = 0xC000007C
+	STATUS_BAD_INHERITANCE_ACL                                                NTStatus      = 0xC000007D
+	STATUS_RANGE_NOT_LOCKED                                                   NTStatus      = 0xC000007E
+	STATUS_DISK_FULL                                                          NTStatus      = 0xC000007F
+	STATUS_SERVER_DISABLED                                                    NTStatus      = 0xC0000080
+	STATUS_SERVER_NOT_DISABLED                                                NTStatus      = 0xC0000081
+	STATUS_TOO_MANY_GUIDS_REQUESTED                                           NTStatus      = 0xC0000082
+	STATUS_GUIDS_EXHAUSTED                                                    NTStatus      = 0xC0000083
+	STATUS_INVALID_ID_AUTHORITY                                               NTStatus      = 0xC0000084
+	STATUS_AGENTS_EXHAUSTED                                                   NTStatus      = 0xC0000085
+	STATUS_INVALID_VOLUME_LABEL                                               NTStatus      = 0xC0000086
+	STATUS_SECTION_NOT_EXTENDED                                               NTStatus      = 0xC0000087
+	STATUS_NOT_MAPPED_DATA                                                    NTStatus      = 0xC0000088
+	STATUS_RESOURCE_DATA_NOT_FOUND                                            NTStatus      = 0xC0000089
+	STATUS_RESOURCE_TYPE_NOT_FOUND                                            NTStatus      = 0xC000008A
+	STATUS_RESOURCE_NAME_NOT_FOUND                                            NTStatus      = 0xC000008B
+	STATUS_ARRAY_BOUNDS_EXCEEDED                                              NTStatus      = 0xC000008C
+	STATUS_FLOAT_DENORMAL_OPERAND                                             NTStatus      = 0xC000008D
+	STATUS_FLOAT_DIVIDE_BY_ZERO                                               NTStatus      = 0xC000008E
+	STATUS_FLOAT_INEXACT_RESULT                                               NTStatus      = 0xC000008F
+	STATUS_FLOAT_INVALID_OPERATION                                            NTStatus      = 0xC0000090
+	STATUS_FLOAT_OVERFLOW                                                     NTStatus      = 0xC0000091
+	STATUS_FLOAT_STACK_CHECK                                                  NTStatus      = 0xC0000092
+	STATUS_FLOAT_UNDERFLOW                                                    NTStatus      = 0xC0000093
+	STATUS_INTEGER_DIVIDE_BY_ZERO                                             NTStatus      = 0xC0000094
+	STATUS_INTEGER_OVERFLOW                                                   NTStatus      = 0xC0000095
+	STATUS_PRIVILEGED_INSTRUCTION                                             NTStatus      = 0xC0000096
+	STATUS_TOO_MANY_PAGING_FILES                                              NTStatus      = 0xC0000097
+	STATUS_FILE_INVALID                                                       NTStatus      = 0xC0000098
+	STATUS_ALLOTTED_SPACE_EXCEEDED                                            NTStatus      = 0xC0000099
+	STATUS_INSUFFICIENT_RESOURCES                                             NTStatus      = 0xC000009A
+	STATUS_DFS_EXIT_PATH_FOUND                                                NTStatus      = 0xC000009B
+	STATUS_DEVICE_DATA_ERROR                                                  NTStatus      = 0xC000009C
+	STATUS_DEVICE_NOT_CONNECTED                                               NTStatus      = 0xC000009D
+	STATUS_DEVICE_POWER_FAILURE                                               NTStatus      = 0xC000009E
+	STATUS_FREE_VM_NOT_AT_BASE                                                NTStatus      = 0xC000009F
+	STATUS_MEMORY_NOT_ALLOCATED                                               NTStatus      = 0xC00000A0
+	STATUS_WORKING_SET_QUOTA                                                  NTStatus      = 0xC00000A1
+	STATUS_MEDIA_WRITE_PROTECTED                                              NTStatus      = 0xC00000A2
+	STATUS_DEVICE_NOT_READY                                                   NTStatus      = 0xC00000A3
+	STATUS_INVALID_GROUP_ATTRIBUTES                                           NTStatus      = 0xC00000A4
+	STATUS_BAD_IMPERSONATION_LEVEL                                            NTStatus      = 0xC00000A5
+	STATUS_CANT_OPEN_ANONYMOUS                                                NTStatus      = 0xC00000A6
+	STATUS_BAD_VALIDATION_CLASS                                               NTStatus      = 0xC00000A7
+	STATUS_BAD_TOKEN_TYPE                                                     NTStatus      = 0xC00000A8
+	STATUS_BAD_MASTER_BOOT_RECORD                                             NTStatus      = 0xC00000A9
+	STATUS_INSTRUCTION_MISALIGNMENT                                           NTStatus      = 0xC00000AA
+	STATUS_INSTANCE_NOT_AVAILABLE                                             NTStatus      = 0xC00000AB
+	STATUS_PIPE_NOT_AVAILABLE                                                 NTStatus      = 0xC00000AC
+	STATUS_INVALID_PIPE_STATE                                                 NTStatus      = 0xC00000AD
+	STATUS_PIPE_BUSY                                                          NTStatus      = 0xC00000AE
+	STATUS_ILLEGAL_FUNCTION                                                   NTStatus      = 0xC00000AF
+	STATUS_PIPE_DISCONNECTED                                                  NTStatus      = 0xC00000B0
+	STATUS_PIPE_CLOSING                                                       NTStatus      = 0xC00000B1
+	STATUS_PIPE_CONNECTED                                                     NTStatus      = 0xC00000B2
+	STATUS_PIPE_LISTENING                                                     NTStatus      = 0xC00000B3
+	STATUS_INVALID_READ_MODE                                                  NTStatus      = 0xC00000B4
+	STATUS_IO_TIMEOUT                                                         NTStatus      = 0xC00000B5
+	STATUS_FILE_FORCED_CLOSED                                                 NTStatus      = 0xC00000B6
+	STATUS_PROFILING_NOT_STARTED                                              NTStatus      = 0xC00000B7
+	STATUS_PROFILING_NOT_STOPPED                                              NTStatus      = 0xC00000B8
+	STATUS_COULD_NOT_INTERPRET                                                NTStatus      = 0xC00000B9
+	STATUS_FILE_IS_A_DIRECTORY                                                NTStatus      = 0xC00000BA
+	STATUS_NOT_SUPPORTED                                                      NTStatus      = 0xC00000BB
+	STATUS_REMOTE_NOT_LISTENING                                               NTStatus      = 0xC00000BC
+	STATUS_DUPLICATE_NAME                                                     NTStatus      = 0xC00000BD
+	STATUS_BAD_NETWORK_PATH                                                   NTStatus      = 0xC00000BE
+	STATUS_NETWORK_BUSY                                                       NTStatus      = 0xC00000BF
+	STATUS_DEVICE_DOES_NOT_EXIST                                              NTStatus      = 0xC00000C0
+	STATUS_TOO_MANY_COMMANDS                                                  NTStatus      = 0xC00000C1
+	STATUS_ADAPTER_HARDWARE_ERROR                                             NTStatus      = 0xC00000C2
+	STATUS_INVALID_NETWORK_RESPONSE                                           NTStatus      = 0xC00000C3
+	STATUS_UNEXPECTED_NETWORK_ERROR                                           NTStatus      = 0xC00000C4
+	STATUS_BAD_REMOTE_ADAPTER                                                 NTStatus      = 0xC00000C5
+	STATUS_PRINT_QUEUE_FULL                                                   NTStatus      = 0xC00000C6
+	STATUS_NO_SPOOL_SPACE                                                     NTStatus      = 0xC00000C7
+	STATUS_PRINT_CANCELLED                                                    NTStatus      = 0xC00000C8
+	STATUS_NETWORK_NAME_DELETED                                               NTStatus      = 0xC00000C9
+	STATUS_NETWORK_ACCESS_DENIED                                              NTStatus      = 0xC00000CA
+	STATUS_BAD_DEVICE_TYPE                                                    NTStatus      = 0xC00000CB
+	STATUS_BAD_NETWORK_NAME                                                   NTStatus      = 0xC00000CC
+	STATUS_TOO_MANY_NAMES                                                     NTStatus      = 0xC00000CD
+	STATUS_TOO_MANY_SESSIONS                                                  NTStatus      = 0xC00000CE
+	STATUS_SHARING_PAUSED                                                     NTStatus      = 0xC00000CF
+	STATUS_REQUEST_NOT_ACCEPTED                                               NTStatus      = 0xC00000D0
+	STATUS_REDIRECTOR_PAUSED                                                  NTStatus      = 0xC00000D1
+	STATUS_NET_WRITE_FAULT                                                    NTStatus      = 0xC00000D2
+	STATUS_PROFILING_AT_LIMIT                                                 NTStatus      = 0xC00000D3
+	STATUS_NOT_SAME_DEVICE                                                    NTStatus      = 0xC00000D4
+	STATUS_FILE_RENAMED                                                       NTStatus      = 0xC00000D5
+	STATUS_VIRTUAL_CIRCUIT_CLOSED                                             NTStatus      = 0xC00000D6
+	STATUS_NO_SECURITY_ON_OBJECT                                              NTStatus      = 0xC00000D7
+	STATUS_CANT_WAIT                                                          NTStatus      = 0xC00000D8
+	STATUS_PIPE_EMPTY                                                         NTStatus      = 0xC00000D9
+	STATUS_CANT_ACCESS_DOMAIN_INFO                                            NTStatus      = 0xC00000DA
+	STATUS_CANT_TERMINATE_SELF                                                NTStatus      = 0xC00000DB
+	STATUS_INVALID_SERVER_STATE                                               NTStatus      = 0xC00000DC
+	STATUS_INVALID_DOMAIN_STATE                                               NTStatus      = 0xC00000DD
+	STATUS_INVALID_DOMAIN_ROLE                                                NTStatus      = 0xC00000DE
+	STATUS_NO_SUCH_DOMAIN                                                     NTStatus      = 0xC00000DF
+	STATUS_DOMAIN_EXISTS                                                      NTStatus      = 0xC00000E0
+	STATUS_DOMAIN_LIMIT_EXCEEDED                                              NTStatus      = 0xC00000E1
+	STATUS_OPLOCK_NOT_GRANTED                                                 NTStatus      = 0xC00000E2
+	STATUS_INVALID_OPLOCK_PROTOCOL                                            NTStatus      = 0xC00000E3
+	STATUS_INTERNAL_DB_CORRUPTION                                             NTStatus      = 0xC00000E4
+	STATUS_INTERNAL_ERROR                                                     NTStatus      = 0xC00000E5
+	STATUS_GENERIC_NOT_MAPPED                                                 NTStatus      = 0xC00000E6
+	STATUS_BAD_DESCRIPTOR_FORMAT                                              NTStatus      = 0xC00000E7
+	STATUS_INVALID_USER_BUFFER                                                NTStatus      = 0xC00000E8
+	STATUS_UNEXPECTED_IO_ERROR                                                NTStatus      = 0xC00000E9
+	STATUS_UNEXPECTED_MM_CREATE_ERR                                           NTStatus      = 0xC00000EA
+	STATUS_UNEXPECTED_MM_MAP_ERROR                                            NTStatus      = 0xC00000EB
+	STATUS_UNEXPECTED_MM_EXTEND_ERR                                           NTStatus      = 0xC00000EC
+	STATUS_NOT_LOGON_PROCESS                                                  NTStatus      = 0xC00000ED
+	STATUS_LOGON_SESSION_EXISTS                                               NTStatus      = 0xC00000EE
+	STATUS_INVALID_PARAMETER_1                                                NTStatus      = 0xC00000EF
+	STATUS_INVALID_PARAMETER_2                                                NTStatus      = 0xC00000F0
+	STATUS_INVALID_PARAMETER_3                                                NTStatus      = 0xC00000F1
+	STATUS_INVALID_PARAMETER_4                                                NTStatus      = 0xC00000F2
+	STATUS_INVALID_PARAMETER_5                                                NTStatus      = 0xC00000F3
+	STATUS_INVALID_PARAMETER_6                                                NTStatus      = 0xC00000F4
+	STATUS_INVALID_PARAMETER_7                                                NTStatus      = 0xC00000F5
+	STATUS_INVALID_PARAMETER_8                                                NTStatus      = 0xC00000F6
+	STATUS_INVALID_PARAMETER_9                                                NTStatus      = 0xC00000F7
+	STATUS_INVALID_PARAMETER_10                                               NTStatus      = 0xC00000F8
+	STATUS_INVALID_PARAMETER_11                                               NTStatus      = 0xC00000F9
+	STATUS_INVALID_PARAMETER_12                                               NTStatus      = 0xC00000FA
+	STATUS_REDIRECTOR_NOT_STARTED                                             NTStatus      = 0xC00000FB
+	STATUS_REDIRECTOR_STARTED                                                 NTStatus      = 0xC00000FC
+	STATUS_STACK_OVERFLOW                                                     NTStatus      = 0xC00000FD
+	STATUS_NO_SUCH_PACKAGE                                                    NTStatus      = 0xC00000FE
+	STATUS_BAD_FUNCTION_TABLE                                                 NTStatus      = 0xC00000FF
+	STATUS_VARIABLE_NOT_FOUND                                                 NTStatus      = 0xC0000100
+	STATUS_DIRECTORY_NOT_EMPTY                                                NTStatus      = 0xC0000101
+	STATUS_FILE_CORRUPT_ERROR                                                 NTStatus      = 0xC0000102
+	STATUS_NOT_A_DIRECTORY                                                    NTStatus      = 0xC0000103
+	STATUS_BAD_LOGON_SESSION_STATE                                            NTStatus      = 0xC0000104
+	STATUS_LOGON_SESSION_COLLISION                                            NTStatus      = 0xC0000105
+	STATUS_NAME_TOO_LONG                                                      NTStatus      = 0xC0000106
+	STATUS_FILES_OPEN                                                         NTStatus      = 0xC0000107
+	STATUS_CONNECTION_IN_USE                                                  NTStatus      = 0xC0000108
+	STATUS_MESSAGE_NOT_FOUND                                                  NTStatus      = 0xC0000109
+	STATUS_PROCESS_IS_TERMINATING                                             NTStatus      = 0xC000010A
+	STATUS_INVALID_LOGON_TYPE                                                 NTStatus      = 0xC000010B
+	STATUS_NO_GUID_TRANSLATION                                                NTStatus      = 0xC000010C
+	STATUS_CANNOT_IMPERSONATE                                                 NTStatus      = 0xC000010D
+	STATUS_IMAGE_ALREADY_LOADED                                               NTStatus      = 0xC000010E
+	STATUS_ABIOS_NOT_PRESENT                                                  NTStatus      = 0xC000010F
+	STATUS_ABIOS_LID_NOT_EXIST                                                NTStatus      = 0xC0000110
+	STATUS_ABIOS_LID_ALREADY_OWNED                                            NTStatus      = 0xC0000111
+	STATUS_ABIOS_NOT_LID_OWNER                                                NTStatus      = 0xC0000112
+	STATUS_ABIOS_INVALID_COMMAND                                              NTStatus      = 0xC0000113
+	STATUS_ABIOS_INVALID_LID                                                  NTStatus      = 0xC0000114
+	STATUS_ABIOS_SELECTOR_NOT_AVAILABLE                                       NTStatus      = 0xC0000115
+	STATUS_ABIOS_INVALID_SELECTOR                                             NTStatus      = 0xC0000116
+	STATUS_NO_LDT                                                             NTStatus      = 0xC0000117
+	STATUS_INVALID_LDT_SIZE                                                   NTStatus      = 0xC0000118
+	STATUS_INVALID_LDT_OFFSET                                                 NTStatus      = 0xC0000119
+	STATUS_INVALID_LDT_DESCRIPTOR                                             NTStatus      = 0xC000011A
+	STATUS_INVALID_IMAGE_NE_FORMAT                                            NTStatus      = 0xC000011B
+	STATUS_RXACT_INVALID_STATE                                                NTStatus      = 0xC000011C
+	STATUS_RXACT_COMMIT_FAILURE                                               NTStatus      = 0xC000011D
+	STATUS_MAPPED_FILE_SIZE_ZERO                                              NTStatus      = 0xC000011E
+	STATUS_TOO_MANY_OPENED_FILES                                              NTStatus      = 0xC000011F
+	STATUS_CANCELLED                                                          NTStatus      = 0xC0000120
+	STATUS_CANNOT_DELETE                                                      NTStatus      = 0xC0000121
+	STATUS_INVALID_COMPUTER_NAME                                              NTStatus      = 0xC0000122
+	STATUS_FILE_DELETED                                                       NTStatus      = 0xC0000123
+	STATUS_SPECIAL_ACCOUNT                                                    NTStatus      = 0xC0000124
+	STATUS_SPECIAL_GROUP                                                      NTStatus      = 0xC0000125
+	STATUS_SPECIAL_USER                                                       NTStatus      = 0xC0000126
+	STATUS_MEMBERS_PRIMARY_GROUP                                              NTStatus      = 0xC0000127
+	STATUS_FILE_CLOSED                                                        NTStatus      = 0xC0000128
+	STATUS_TOO_MANY_THREADS                                                   NTStatus      = 0xC0000129
+	STATUS_THREAD_NOT_IN_PROCESS                                              NTStatus      = 0xC000012A
+	STATUS_TOKEN_ALREADY_IN_USE                                               NTStatus      = 0xC000012B
+	STATUS_PAGEFILE_QUOTA_EXCEEDED                                            NTStatus      = 0xC000012C
+	STATUS_COMMITMENT_LIMIT                                                   NTStatus      = 0xC000012D
+	STATUS_INVALID_IMAGE_LE_FORMAT                                            NTStatus      = 0xC000012E
+	STATUS_INVALID_IMAGE_NOT_MZ                                               NTStatus      = 0xC000012F
+	STATUS_INVALID_IMAGE_PROTECT                                              NTStatus      = 0xC0000130
+	STATUS_INVALID_IMAGE_WIN_16                                               NTStatus      = 0xC0000131
+	STATUS_LOGON_SERVER_CONFLICT                                              NTStatus      = 0xC0000132
+	STATUS_TIME_DIFFERENCE_AT_DC                                              NTStatus      = 0xC0000133
+	STATUS_SYNCHRONIZATION_REQUIRED                                           NTStatus      = 0xC0000134
+	STATUS_DLL_NOT_FOUND                                                      NTStatus      = 0xC0000135
+	STATUS_OPEN_FAILED                                                        NTStatus      = 0xC0000136
+	STATUS_IO_PRIVILEGE_FAILED                                                NTStatus      = 0xC0000137
+	STATUS_ORDINAL_NOT_FOUND                                                  NTStatus      = 0xC0000138
+	STATUS_ENTRYPOINT_NOT_FOUND                                               NTStatus      = 0xC0000139
+	STATUS_CONTROL_C_EXIT                                                     NTStatus      = 0xC000013A
+	STATUS_LOCAL_DISCONNECT                                                   NTStatus      = 0xC000013B
+	STATUS_REMOTE_DISCONNECT                                                  NTStatus      = 0xC000013C
+	STATUS_REMOTE_RESOURCES                                                   NTStatus      = 0xC000013D
+	STATUS_LINK_FAILED                                                        NTStatus      = 0xC000013E
+	STATUS_LINK_TIMEOUT                                                       NTStatus      = 0xC000013F
+	STATUS_INVALID_CONNECTION                                                 NTStatus      = 0xC0000140
+	STATUS_INVALID_ADDRESS                                                    NTStatus      = 0xC0000141
+	STATUS_DLL_INIT_FAILED                                                    NTStatus      = 0xC0000142
+	STATUS_MISSING_SYSTEMFILE                                                 NTStatus      = 0xC0000143
+	STATUS_UNHANDLED_EXCEPTION                                                NTStatus      = 0xC0000144
+	STATUS_APP_INIT_FAILURE                                                   NTStatus      = 0xC0000145
+	STATUS_PAGEFILE_CREATE_FAILED                                             NTStatus      = 0xC0000146
+	STATUS_NO_PAGEFILE                                                        NTStatus      = 0xC0000147
+	STATUS_INVALID_LEVEL                                                      NTStatus      = 0xC0000148
+	STATUS_WRONG_PASSWORD_CORE                                                NTStatus      = 0xC0000149
+	STATUS_ILLEGAL_FLOAT_CONTEXT                                              NTStatus      = 0xC000014A
+	STATUS_PIPE_BROKEN                                                        NTStatus      = 0xC000014B
+	STATUS_REGISTRY_CORRUPT                                                   NTStatus      = 0xC000014C
+	STATUS_REGISTRY_IO_FAILED                                                 NTStatus      = 0xC000014D
+	STATUS_NO_EVENT_PAIR                                                      NTStatus      = 0xC000014E
+	STATUS_UNRECOGNIZED_VOLUME                                                NTStatus      = 0xC000014F
+	STATUS_SERIAL_NO_DEVICE_INITED                                            NTStatus      = 0xC0000150
+	STATUS_NO_SUCH_ALIAS                                                      NTStatus      = 0xC0000151
+	STATUS_MEMBER_NOT_IN_ALIAS                                                NTStatus      = 0xC0000152
+	STATUS_MEMBER_IN_ALIAS                                                    NTStatus      = 0xC0000153
+	STATUS_ALIAS_EXISTS                                                       NTStatus      = 0xC0000154
+	STATUS_LOGON_NOT_GRANTED                                                  NTStatus      = 0xC0000155
+	STATUS_TOO_MANY_SECRETS                                                   NTStatus      = 0xC0000156
+	STATUS_SECRET_TOO_LONG                                                    NTStatus      = 0xC0000157
+	STATUS_INTERNAL_DB_ERROR                                                  NTStatus      = 0xC0000158
+	STATUS_FULLSCREEN_MODE                                                    NTStatus      = 0xC0000159
+	STATUS_TOO_MANY_CONTEXT_IDS                                               NTStatus      = 0xC000015A
+	STATUS_LOGON_TYPE_NOT_GRANTED                                             NTStatus      = 0xC000015B
+	STATUS_NOT_REGISTRY_FILE                                                  NTStatus      = 0xC000015C
+	STATUS_NT_CROSS_ENCRYPTION_REQUIRED                                       NTStatus      = 0xC000015D
+	STATUS_DOMAIN_CTRLR_CONFIG_ERROR                                          NTStatus      = 0xC000015E
+	STATUS_FT_MISSING_MEMBER                                                  NTStatus      = 0xC000015F
+	STATUS_ILL_FORMED_SERVICE_ENTRY                                           NTStatus      = 0xC0000160
+	STATUS_ILLEGAL_CHARACTER                                                  NTStatus      = 0xC0000161
+	STATUS_UNMAPPABLE_CHARACTER                                               NTStatus      = 0xC0000162
+	STATUS_UNDEFINED_CHARACTER                                                NTStatus      = 0xC0000163
+	STATUS_FLOPPY_VOLUME                                                      NTStatus      = 0xC0000164
+	STATUS_FLOPPY_ID_MARK_NOT_FOUND                                           NTStatus      = 0xC0000165
+	STATUS_FLOPPY_WRONG_CYLINDER                                              NTStatus      = 0xC0000166
+	STATUS_FLOPPY_UNKNOWN_ERROR                                               NTStatus      = 0xC0000167
+	STATUS_FLOPPY_BAD_REGISTERS                                               NTStatus      = 0xC0000168
+	STATUS_DISK_RECALIBRATE_FAILED                                            NTStatus      = 0xC0000169
+	STATUS_DISK_OPERATION_FAILED                                              NTStatus      = 0xC000016A
+	STATUS_DISK_RESET_FAILED                                                  NTStatus      = 0xC000016B
+	STATUS_SHARED_IRQ_BUSY                                                    NTStatus      = 0xC000016C
+	STATUS_FT_ORPHANING                                                       NTStatus      = 0xC000016D
+	STATUS_BIOS_FAILED_TO_CONNECT_INTERRUPT                                   NTStatus      = 0xC000016E
+	STATUS_PARTITION_FAILURE                                                  NTStatus      = 0xC0000172
+	STATUS_INVALID_BLOCK_LENGTH                                               NTStatus      = 0xC0000173
+	STATUS_DEVICE_NOT_PARTITIONED                                             NTStatus      = 0xC0000174
+	STATUS_UNABLE_TO_LOCK_MEDIA                                               NTStatus      = 0xC0000175
+	STATUS_UNABLE_TO_UNLOAD_MEDIA                                             NTStatus      = 0xC0000176
+	STATUS_EOM_OVERFLOW                                                       NTStatus      = 0xC0000177
+	STATUS_NO_MEDIA                                                           NTStatus      = 0xC0000178
+	STATUS_NO_SUCH_MEMBER                                                     NTStatus      = 0xC000017A
+	STATUS_INVALID_MEMBER                                                     NTStatus      = 0xC000017B
+	STATUS_KEY_DELETED                                                        NTStatus      = 0xC000017C
+	STATUS_NO_LOG_SPACE                                                       NTStatus      = 0xC000017D
+	STATUS_TOO_MANY_SIDS                                                      NTStatus      = 0xC000017E
+	STATUS_LM_CROSS_ENCRYPTION_REQUIRED                                       NTStatus      = 0xC000017F
+	STATUS_KEY_HAS_CHILDREN                                                   NTStatus      = 0xC0000180
+	STATUS_CHILD_MUST_BE_VOLATILE                                             NTStatus      = 0xC0000181
+	STATUS_DEVICE_CONFIGURATION_ERROR                                         NTStatus      = 0xC0000182
+	STATUS_DRIVER_INTERNAL_ERROR                                              NTStatus      = 0xC0000183
+	STATUS_INVALID_DEVICE_STATE                                               NTStatus      = 0xC0000184
+	STATUS_IO_DEVICE_ERROR                                                    NTStatus      = 0xC0000185
+	STATUS_DEVICE_PROTOCOL_ERROR                                              NTStatus      = 0xC0000186
+	STATUS_BACKUP_CONTROLLER                                                  NTStatus      = 0xC0000187
+	STATUS_LOG_FILE_FULL                                                      NTStatus      = 0xC0000188
+	STATUS_TOO_LATE                                                           NTStatus      = 0xC0000189
+	STATUS_NO_TRUST_LSA_SECRET                                                NTStatus      = 0xC000018A
+	STATUS_NO_TRUST_SAM_ACCOUNT                                               NTStatus      = 0xC000018B
+	STATUS_TRUSTED_DOMAIN_FAILURE                                             NTStatus      = 0xC000018C
+	STATUS_TRUSTED_RELATIONSHIP_FAILURE                                       NTStatus      = 0xC000018D
+	STATUS_EVENTLOG_FILE_CORRUPT                                              NTStatus      = 0xC000018E
+	STATUS_EVENTLOG_CANT_START                                                NTStatus      = 0xC000018F
+	STATUS_TRUST_FAILURE                                                      NTStatus      = 0xC0000190
+	STATUS_MUTANT_LIMIT_EXCEEDED                                              NTStatus      = 0xC0000191
+	STATUS_NETLOGON_NOT_STARTED                                               NTStatus      = 0xC0000192
+	STATUS_ACCOUNT_EXPIRED                                                    NTStatus      = 0xC0000193
+	STATUS_POSSIBLE_DEADLOCK                                                  NTStatus      = 0xC0000194
+	STATUS_NETWORK_CREDENTIAL_CONFLICT                                        NTStatus      = 0xC0000195
+	STATUS_REMOTE_SESSION_LIMIT                                               NTStatus      = 0xC0000196
+	STATUS_EVENTLOG_FILE_CHANGED                                              NTStatus      = 0xC0000197
+	STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT                                  NTStatus      = 0xC0000198
+	STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT                                  NTStatus      = 0xC0000199
+	STATUS_NOLOGON_SERVER_TRUST_ACCOUNT                                       NTStatus      = 0xC000019A
+	STATUS_DOMAIN_TRUST_INCONSISTENT                                          NTStatus      = 0xC000019B
+	STATUS_FS_DRIVER_REQUIRED                                                 NTStatus      = 0xC000019C
+	STATUS_IMAGE_ALREADY_LOADED_AS_DLL                                        NTStatus      = 0xC000019D
+	STATUS_INCOMPATIBLE_WITH_GLOBAL_SHORT_NAME_REGISTRY_SETTING               NTStatus      = 0xC000019E
+	STATUS_SHORT_NAMES_NOT_ENABLED_ON_VOLUME                                  NTStatus      = 0xC000019F
+	STATUS_SECURITY_STREAM_IS_INCONSISTENT                                    NTStatus      = 0xC00001A0
+	STATUS_INVALID_LOCK_RANGE                                                 NTStatus      = 0xC00001A1
+	STATUS_INVALID_ACE_CONDITION                                              NTStatus      = 0xC00001A2
+	STATUS_IMAGE_SUBSYSTEM_NOT_PRESENT                                        NTStatus      = 0xC00001A3
+	STATUS_NOTIFICATION_GUID_ALREADY_DEFINED                                  NTStatus      = 0xC00001A4
+	STATUS_INVALID_EXCEPTION_HANDLER                                          NTStatus      = 0xC00001A5
+	STATUS_DUPLICATE_PRIVILEGES                                               NTStatus      = 0xC00001A6
+	STATUS_NOT_ALLOWED_ON_SYSTEM_FILE                                         NTStatus      = 0xC00001A7
+	STATUS_REPAIR_NEEDED                                                      NTStatus      = 0xC00001A8
+	STATUS_QUOTA_NOT_ENABLED                                                  NTStatus      = 0xC00001A9
+	STATUS_NO_APPLICATION_PACKAGE                                             NTStatus      = 0xC00001AA
+	STATUS_FILE_METADATA_OPTIMIZATION_IN_PROGRESS                             NTStatus      = 0xC00001AB
+	STATUS_NOT_SAME_OBJECT                                                    NTStatus      = 0xC00001AC
+	STATUS_FATAL_MEMORY_EXHAUSTION                                            NTStatus      = 0xC00001AD
+	STATUS_ERROR_PROCESS_NOT_IN_JOB                                           NTStatus      = 0xC00001AE
+	STATUS_CPU_SET_INVALID                                                    NTStatus      = 0xC00001AF
+	STATUS_IO_DEVICE_INVALID_DATA                                             NTStatus      = 0xC00001B0
+	STATUS_IO_UNALIGNED_WRITE                                                 NTStatus      = 0xC00001B1
+	STATUS_NETWORK_OPEN_RESTRICTION                                           NTStatus      = 0xC0000201
+	STATUS_NO_USER_SESSION_KEY                                                NTStatus      = 0xC0000202
+	STATUS_USER_SESSION_DELETED                                               NTStatus      = 0xC0000203
+	STATUS_RESOURCE_LANG_NOT_FOUND                                            NTStatus      = 0xC0000204
+	STATUS_INSUFF_SERVER_RESOURCES                                            NTStatus      = 0xC0000205
+	STATUS_INVALID_BUFFER_SIZE                                                NTStatus      = 0xC0000206
+	STATUS_INVALID_ADDRESS_COMPONENT                                          NTStatus      = 0xC0000207
+	STATUS_INVALID_ADDRESS_WILDCARD                                           NTStatus      = 0xC0000208
+	STATUS_TOO_MANY_ADDRESSES                                                 NTStatus      = 0xC0000209
+	STATUS_ADDRESS_ALREADY_EXISTS                                             NTStatus      = 0xC000020A
+	STATUS_ADDRESS_CLOSED                                                     NTStatus      = 0xC000020B
+	STATUS_CONNECTION_DISCONNECTED                                            NTStatus      = 0xC000020C
+	STATUS_CONNECTION_RESET                                                   NTStatus      = 0xC000020D
+	STATUS_TOO_MANY_NODES                                                     NTStatus      = 0xC000020E
+	STATUS_TRANSACTION_ABORTED                                                NTStatus      = 0xC000020F
+	STATUS_TRANSACTION_TIMED_OUT                                              NTStatus      = 0xC0000210
+	STATUS_TRANSACTION_NO_RELEASE                                             NTStatus      = 0xC0000211
+	STATUS_TRANSACTION_NO_MATCH                                               NTStatus      = 0xC0000212
+	STATUS_TRANSACTION_RESPONDED                                              NTStatus      = 0xC0000213
+	STATUS_TRANSACTION_INVALID_ID                                             NTStatus      = 0xC0000214
+	STATUS_TRANSACTION_INVALID_TYPE                                           NTStatus      = 0xC0000215
+	STATUS_NOT_SERVER_SESSION                                                 NTStatus      = 0xC0000216
+	STATUS_NOT_CLIENT_SESSION                                                 NTStatus      = 0xC0000217
+	STATUS_CANNOT_LOAD_REGISTRY_FILE                                          NTStatus      = 0xC0000218
+	STATUS_DEBUG_ATTACH_FAILED                                                NTStatus      = 0xC0000219
+	STATUS_SYSTEM_PROCESS_TERMINATED                                          NTStatus      = 0xC000021A
+	STATUS_DATA_NOT_ACCEPTED                                                  NTStatus      = 0xC000021B
+	STATUS_NO_BROWSER_SERVERS_FOUND                                           NTStatus      = 0xC000021C
+	STATUS_VDM_HARD_ERROR                                                     NTStatus      = 0xC000021D
+	STATUS_DRIVER_CANCEL_TIMEOUT                                              NTStatus      = 0xC000021E
+	STATUS_REPLY_MESSAGE_MISMATCH                                             NTStatus      = 0xC000021F
+	STATUS_MAPPED_ALIGNMENT                                                   NTStatus      = 0xC0000220
+	STATUS_IMAGE_CHECKSUM_MISMATCH                                            NTStatus      = 0xC0000221
+	STATUS_LOST_WRITEBEHIND_DATA                                              NTStatus      = 0xC0000222
+	STATUS_CLIENT_SERVER_PARAMETERS_INVALID                                   NTStatus      = 0xC0000223
+	STATUS_PASSWORD_MUST_CHANGE                                               NTStatus      = 0xC0000224
+	STATUS_NOT_FOUND                                                          NTStatus      = 0xC0000225
+	STATUS_NOT_TINY_STREAM                                                    NTStatus      = 0xC0000226
+	STATUS_RECOVERY_FAILURE                                                   NTStatus      = 0xC0000227
+	STATUS_STACK_OVERFLOW_READ                                                NTStatus      = 0xC0000228
+	STATUS_FAIL_CHECK                                                         NTStatus      = 0xC0000229
+	STATUS_DUPLICATE_OBJECTID                                                 NTStatus      = 0xC000022A
+	STATUS_OBJECTID_EXISTS                                                    NTStatus      = 0xC000022B
+	STATUS_CONVERT_TO_LARGE                                                   NTStatus      = 0xC000022C
+	STATUS_RETRY                                                              NTStatus      = 0xC000022D
+	STATUS_FOUND_OUT_OF_SCOPE                                                 NTStatus      = 0xC000022E
+	STATUS_ALLOCATE_BUCKET                                                    NTStatus      = 0xC000022F
+	STATUS_PROPSET_NOT_FOUND                                                  NTStatus      = 0xC0000230
+	STATUS_MARSHALL_OVERFLOW                                                  NTStatus      = 0xC0000231
+	STATUS_INVALID_VARIANT                                                    NTStatus      = 0xC0000232
+	STATUS_DOMAIN_CONTROLLER_NOT_FOUND                                        NTStatus      = 0xC0000233
+	STATUS_ACCOUNT_LOCKED_OUT                                                 NTStatus      = 0xC0000234
+	STATUS_HANDLE_NOT_CLOSABLE                                                NTStatus      = 0xC0000235
+	STATUS_CONNECTION_REFUSED                                                 NTStatus      = 0xC0000236
+	STATUS_GRACEFUL_DISCONNECT                                                NTStatus      = 0xC0000237
+	STATUS_ADDRESS_ALREADY_ASSOCIATED                                         NTStatus      = 0xC0000238
+	STATUS_ADDRESS_NOT_ASSOCIATED                                             NTStatus      = 0xC0000239
+	STATUS_CONNECTION_INVALID                                                 NTStatus      = 0xC000023A
+	STATUS_CONNECTION_ACTIVE                                                  NTStatus      = 0xC000023B
+	STATUS_NETWORK_UNREACHABLE                                                NTStatus      = 0xC000023C
+	STATUS_HOST_UNREACHABLE                                                   NTStatus      = 0xC000023D
+	STATUS_PROTOCOL_UNREACHABLE                                               NTStatus      = 0xC000023E
+	STATUS_PORT_UNREACHABLE                                                   NTStatus      = 0xC000023F
+	STATUS_REQUEST_ABORTED                                                    NTStatus      = 0xC0000240
+	STATUS_CONNECTION_ABORTED                                                 NTStatus      = 0xC0000241
+	STATUS_BAD_COMPRESSION_BUFFER                                             NTStatus      = 0xC0000242
+	STATUS_USER_MAPPED_FILE                                                   NTStatus      = 0xC0000243
+	STATUS_AUDIT_FAILED                                                       NTStatus      = 0xC0000244
+	STATUS_TIMER_RESOLUTION_NOT_SET                                           NTStatus      = 0xC0000245
+	STATUS_CONNECTION_COUNT_LIMIT                                             NTStatus      = 0xC0000246
+	STATUS_LOGIN_TIME_RESTRICTION                                             NTStatus      = 0xC0000247
+	STATUS_LOGIN_WKSTA_RESTRICTION                                            NTStatus      = 0xC0000248
+	STATUS_IMAGE_MP_UP_MISMATCH                                               NTStatus      = 0xC0000249
+	STATUS_INSUFFICIENT_LOGON_INFO                                            NTStatus      = 0xC0000250
+	STATUS_BAD_DLL_ENTRYPOINT                                                 NTStatus      = 0xC0000251
+	STATUS_BAD_SERVICE_ENTRYPOINT                                             NTStatus      = 0xC0000252
+	STATUS_LPC_REPLY_LOST                                                     NTStatus      = 0xC0000253
+	STATUS_IP_ADDRESS_CONFLICT1                                               NTStatus      = 0xC0000254
+	STATUS_IP_ADDRESS_CONFLICT2                                               NTStatus      = 0xC0000255
+	STATUS_REGISTRY_QUOTA_LIMIT                                               NTStatus      = 0xC0000256
+	STATUS_PATH_NOT_COVERED                                                   NTStatus      = 0xC0000257
+	STATUS_NO_CALLBACK_ACTIVE                                                 NTStatus      = 0xC0000258
+	STATUS_LICENSE_QUOTA_EXCEEDED                                             NTStatus      = 0xC0000259
+	STATUS_PWD_TOO_SHORT                                                      NTStatus      = 0xC000025A
+	STATUS_PWD_TOO_RECENT                                                     NTStatus      = 0xC000025B
+	STATUS_PWD_HISTORY_CONFLICT                                               NTStatus      = 0xC000025C
+	STATUS_PLUGPLAY_NO_DEVICE                                                 NTStatus      = 0xC000025E
+	STATUS_UNSUPPORTED_COMPRESSION                                            NTStatus      = 0xC000025F
+	STATUS_INVALID_HW_PROFILE                                                 NTStatus      = 0xC0000260
+	STATUS_INVALID_PLUGPLAY_DEVICE_PATH                                       NTStatus      = 0xC0000261
+	STATUS_DRIVER_ORDINAL_NOT_FOUND                                           NTStatus      = 0xC0000262
+	STATUS_DRIVER_ENTRYPOINT_NOT_FOUND                                        NTStatus      = 0xC0000263
+	STATUS_RESOURCE_NOT_OWNED                                                 NTStatus      = 0xC0000264
+	STATUS_TOO_MANY_LINKS                                                     NTStatus      = 0xC0000265
+	STATUS_QUOTA_LIST_INCONSISTENT                                            NTStatus      = 0xC0000266
+	STATUS_FILE_IS_OFFLINE                                                    NTStatus      = 0xC0000267
+	STATUS_EVALUATION_EXPIRATION                                              NTStatus      = 0xC0000268
+	STATUS_ILLEGAL_DLL_RELOCATION                                             NTStatus      = 0xC0000269
+	STATUS_LICENSE_VIOLATION                                                  NTStatus      = 0xC000026A
+	STATUS_DLL_INIT_FAILED_LOGOFF                                             NTStatus      = 0xC000026B
+	STATUS_DRIVER_UNABLE_TO_LOAD                                              NTStatus      = 0xC000026C
+	STATUS_DFS_UNAVAILABLE                                                    NTStatus      = 0xC000026D
+	STATUS_VOLUME_DISMOUNTED                                                  NTStatus      = 0xC000026E
+	STATUS_WX86_INTERNAL_ERROR                                                NTStatus      = 0xC000026F
+	STATUS_WX86_FLOAT_STACK_CHECK                                             NTStatus      = 0xC0000270
+	STATUS_VALIDATE_CONTINUE                                                  NTStatus      = 0xC0000271
+	STATUS_NO_MATCH                                                           NTStatus      = 0xC0000272
+	STATUS_NO_MORE_MATCHES                                                    NTStatus      = 0xC0000273
+	STATUS_NOT_A_REPARSE_POINT                                                NTStatus      = 0xC0000275
+	STATUS_IO_REPARSE_TAG_INVALID                                             NTStatus      = 0xC0000276
+	STATUS_IO_REPARSE_TAG_MISMATCH                                            NTStatus      = 0xC0000277
+	STATUS_IO_REPARSE_DATA_INVALID                                            NTStatus      = 0xC0000278
+	STATUS_IO_REPARSE_TAG_NOT_HANDLED                                         NTStatus      = 0xC0000279
+	STATUS_PWD_TOO_LONG                                                       NTStatus      = 0xC000027A
+	STATUS_STOWED_EXCEPTION                                                   NTStatus      = 0xC000027B
+	STATUS_CONTEXT_STOWED_EXCEPTION                                           NTStatus      = 0xC000027C
+	STATUS_REPARSE_POINT_NOT_RESOLVED                                         NTStatus      = 0xC0000280
+	STATUS_DIRECTORY_IS_A_REPARSE_POINT                                       NTStatus      = 0xC0000281
+	STATUS_RANGE_LIST_CONFLICT                                                NTStatus      = 0xC0000282
+	STATUS_SOURCE_ELEMENT_EMPTY                                               NTStatus      = 0xC0000283
+	STATUS_DESTINATION_ELEMENT_FULL                                           NTStatus      = 0xC0000284
+	STATUS_ILLEGAL_ELEMENT_ADDRESS                                            NTStatus      = 0xC0000285
+	STATUS_MAGAZINE_NOT_PRESENT                                               NTStatus      = 0xC0000286
+	STATUS_REINITIALIZATION_NEEDED                                            NTStatus      = 0xC0000287
+	STATUS_DEVICE_REQUIRES_CLEANING                                           NTStatus      = 0x80000288
+	STATUS_DEVICE_DOOR_OPEN                                                   NTStatus      = 0x80000289
+	STATUS_ENCRYPTION_FAILED                                                  NTStatus      = 0xC000028A
+	STATUS_DECRYPTION_FAILED                                                  NTStatus      = 0xC000028B
+	STATUS_RANGE_NOT_FOUND                                                    NTStatus      = 0xC000028C
+	STATUS_NO_RECOVERY_POLICY                                                 NTStatus      = 0xC000028D
+	STATUS_NO_EFS                                                             NTStatus      = 0xC000028E
+	STATUS_WRONG_EFS                                                          NTStatus      = 0xC000028F
+	STATUS_NO_USER_KEYS                                                       NTStatus      = 0xC0000290
+	STATUS_FILE_NOT_ENCRYPTED                                                 NTStatus      = 0xC0000291
+	STATUS_NOT_EXPORT_FORMAT                                                  NTStatus      = 0xC0000292
+	STATUS_FILE_ENCRYPTED                                                     NTStatus      = 0xC0000293
+	STATUS_WAKE_SYSTEM                                                        NTStatus      = 0x40000294
+	STATUS_WMI_GUID_NOT_FOUND                                                 NTStatus      = 0xC0000295
+	STATUS_WMI_INSTANCE_NOT_FOUND                                             NTStatus      = 0xC0000296
+	STATUS_WMI_ITEMID_NOT_FOUND                                               NTStatus      = 0xC0000297
+	STATUS_WMI_TRY_AGAIN                                                      NTStatus      = 0xC0000298
+	STATUS_SHARED_POLICY                                                      NTStatus      = 0xC0000299
+	STATUS_POLICY_OBJECT_NOT_FOUND                                            NTStatus      = 0xC000029A
+	STATUS_POLICY_ONLY_IN_DS                                                  NTStatus      = 0xC000029B
+	STATUS_VOLUME_NOT_UPGRADED                                                NTStatus      = 0xC000029C
+	STATUS_REMOTE_STORAGE_NOT_ACTIVE                                          NTStatus      = 0xC000029D
+	STATUS_REMOTE_STORAGE_MEDIA_ERROR                                         NTStatus      = 0xC000029E
+	STATUS_NO_TRACKING_SERVICE                                                NTStatus      = 0xC000029F
+	STATUS_SERVER_SID_MISMATCH                                                NTStatus      = 0xC00002A0
+	STATUS_DS_NO_ATTRIBUTE_OR_VALUE                                           NTStatus      = 0xC00002A1
+	STATUS_DS_INVALID_ATTRIBUTE_SYNTAX                                        NTStatus      = 0xC00002A2
+	STATUS_DS_ATTRIBUTE_TYPE_UNDEFINED                                        NTStatus      = 0xC00002A3
+	STATUS_DS_ATTRIBUTE_OR_VALUE_EXISTS                                       NTStatus      = 0xC00002A4
+	STATUS_DS_BUSY                                                            NTStatus      = 0xC00002A5
+	STATUS_DS_UNAVAILABLE                                                     NTStatus      = 0xC00002A6
+	STATUS_DS_NO_RIDS_ALLOCATED                                               NTStatus      = 0xC00002A7
+	STATUS_DS_NO_MORE_RIDS                                                    NTStatus      = 0xC00002A8
+	STATUS_DS_INCORRECT_ROLE_OWNER                                            NTStatus      = 0xC00002A9
+	STATUS_DS_RIDMGR_INIT_ERROR                                               NTStatus      = 0xC00002AA
+	STATUS_DS_OBJ_CLASS_VIOLATION                                             NTStatus      = 0xC00002AB
+	STATUS_DS_CANT_ON_NON_LEAF                                                NTStatus      = 0xC00002AC
+	STATUS_DS_CANT_ON_RDN                                                     NTStatus      = 0xC00002AD
+	STATUS_DS_CANT_MOD_OBJ_CLASS                                              NTStatus      = 0xC00002AE
+	STATUS_DS_CROSS_DOM_MOVE_FAILED                                           NTStatus      = 0xC00002AF
+	STATUS_DS_GC_NOT_AVAILABLE                                                NTStatus      = 0xC00002B0
+	STATUS_DIRECTORY_SERVICE_REQUIRED                                         NTStatus      = 0xC00002B1
+	STATUS_REPARSE_ATTRIBUTE_CONFLICT                                         NTStatus      = 0xC00002B2
+	STATUS_CANT_ENABLE_DENY_ONLY                                              NTStatus      = 0xC00002B3
+	STATUS_FLOAT_MULTIPLE_FAULTS                                              NTStatus      = 0xC00002B4
+	STATUS_FLOAT_MULTIPLE_TRAPS                                               NTStatus      = 0xC00002B5
+	STATUS_DEVICE_REMOVED                                                     NTStatus      = 0xC00002B6
+	STATUS_JOURNAL_DELETE_IN_PROGRESS                                         NTStatus      = 0xC00002B7
+	STATUS_JOURNAL_NOT_ACTIVE                                                 NTStatus      = 0xC00002B8
+	STATUS_NOINTERFACE                                                        NTStatus      = 0xC00002B9
+	STATUS_DS_RIDMGR_DISABLED                                                 NTStatus      = 0xC00002BA
+	STATUS_DS_ADMIN_LIMIT_EXCEEDED                                            NTStatus      = 0xC00002C1
+	STATUS_DRIVER_FAILED_SLEEP                                                NTStatus      = 0xC00002C2
+	STATUS_MUTUAL_AUTHENTICATION_FAILED                                       NTStatus      = 0xC00002C3
+	STATUS_CORRUPT_SYSTEM_FILE                                                NTStatus      = 0xC00002C4
+	STATUS_DATATYPE_MISALIGNMENT_ERROR                                        NTStatus      = 0xC00002C5
+	STATUS_WMI_READ_ONLY                                                      NTStatus      = 0xC00002C6
+	STATUS_WMI_SET_FAILURE                                                    NTStatus      = 0xC00002C7
+	STATUS_COMMITMENT_MINIMUM                                                 NTStatus      = 0xC00002C8
+	STATUS_REG_NAT_CONSUMPTION                                                NTStatus      = 0xC00002C9
+	STATUS_TRANSPORT_FULL                                                     NTStatus      = 0xC00002CA
+	STATUS_DS_SAM_INIT_FAILURE                                                NTStatus      = 0xC00002CB
+	STATUS_ONLY_IF_CONNECTED                                                  NTStatus      = 0xC00002CC
+	STATUS_DS_SENSITIVE_GROUP_VIOLATION                                       NTStatus      = 0xC00002CD
+	STATUS_PNP_RESTART_ENUMERATION                                            NTStatus      = 0xC00002CE
+	STATUS_JOURNAL_ENTRY_DELETED                                              NTStatus      = 0xC00002CF
+	STATUS_DS_CANT_MOD_PRIMARYGROUPID                                         NTStatus      = 0xC00002D0
+	STATUS_SYSTEM_IMAGE_BAD_SIGNATURE                                         NTStatus      = 0xC00002D1
+	STATUS_PNP_REBOOT_REQUIRED                                                NTStatus      = 0xC00002D2
+	STATUS_POWER_STATE_INVALID                                                NTStatus      = 0xC00002D3
+	STATUS_DS_INVALID_GROUP_TYPE                                              NTStatus      = 0xC00002D4
+	STATUS_DS_NO_NEST_GLOBALGROUP_IN_MIXEDDOMAIN                              NTStatus      = 0xC00002D5
+	STATUS_DS_NO_NEST_LOCALGROUP_IN_MIXEDDOMAIN                               NTStatus      = 0xC00002D6
+	STATUS_DS_GLOBAL_CANT_HAVE_LOCAL_MEMBER                                   NTStatus      = 0xC00002D7
+	STATUS_DS_GLOBAL_CANT_HAVE_UNIVERSAL_MEMBER                               NTStatus      = 0xC00002D8
+	STATUS_DS_UNIVERSAL_CANT_HAVE_LOCAL_MEMBER                                NTStatus      = 0xC00002D9
+	STATUS_DS_GLOBAL_CANT_HAVE_CROSSDOMAIN_MEMBER                             NTStatus      = 0xC00002DA
+	STATUS_DS_LOCAL_CANT_HAVE_CROSSDOMAIN_LOCAL_MEMBER                        NTStatus      = 0xC00002DB
+	STATUS_DS_HAVE_PRIMARY_MEMBERS                                            NTStatus      = 0xC00002DC
+	STATUS_WMI_NOT_SUPPORTED                                                  NTStatus      = 0xC00002DD
+	STATUS_INSUFFICIENT_POWER                                                 NTStatus      = 0xC00002DE
+	STATUS_SAM_NEED_BOOTKEY_PASSWORD                                          NTStatus      = 0xC00002DF
+	STATUS_SAM_NEED_BOOTKEY_FLOPPY                                            NTStatus      = 0xC00002E0
+	STATUS_DS_CANT_START                                                      NTStatus      = 0xC00002E1
+	STATUS_DS_INIT_FAILURE                                                    NTStatus      = 0xC00002E2
+	STATUS_SAM_INIT_FAILURE                                                   NTStatus      = 0xC00002E3
+	STATUS_DS_GC_REQUIRED                                                     NTStatus      = 0xC00002E4
+	STATUS_DS_LOCAL_MEMBER_OF_LOCAL_ONLY                                      NTStatus      = 0xC00002E5
+	STATUS_DS_NO_FPO_IN_UNIVERSAL_GROUPS                                      NTStatus      = 0xC00002E6
+	STATUS_DS_MACHINE_ACCOUNT_QUOTA_EXCEEDED                                  NTStatus      = 0xC00002E7
+	STATUS_MULTIPLE_FAULT_VIOLATION                                           NTStatus      = 0xC00002E8
+	STATUS_CURRENT_DOMAIN_NOT_ALLOWED                                         NTStatus      = 0xC00002E9
+	STATUS_CANNOT_MAKE                                                        NTStatus      = 0xC00002EA
+	STATUS_SYSTEM_SHUTDOWN                                                    NTStatus      = 0xC00002EB
+	STATUS_DS_INIT_FAILURE_CONSOLE                                            NTStatus      = 0xC00002EC
+	STATUS_DS_SAM_INIT_FAILURE_CONSOLE                                        NTStatus      = 0xC00002ED
+	STATUS_UNFINISHED_CONTEXT_DELETED                                         NTStatus      = 0xC00002EE
+	STATUS_NO_TGT_REPLY                                                       NTStatus      = 0xC00002EF
+	STATUS_OBJECTID_NOT_FOUND                                                 NTStatus      = 0xC00002F0
+	STATUS_NO_IP_ADDRESSES                                                    NTStatus      = 0xC00002F1
+	STATUS_WRONG_CREDENTIAL_HANDLE                                            NTStatus      = 0xC00002F2
+	STATUS_CRYPTO_SYSTEM_INVALID                                              NTStatus      = 0xC00002F3
+	STATUS_MAX_REFERRALS_EXCEEDED                                             NTStatus      = 0xC00002F4
+	STATUS_MUST_BE_KDC                                                        NTStatus      = 0xC00002F5
+	STATUS_STRONG_CRYPTO_NOT_SUPPORTED                                        NTStatus      = 0xC00002F6
+	STATUS_TOO_MANY_PRINCIPALS                                                NTStatus      = 0xC00002F7
+	STATUS_NO_PA_DATA                                                         NTStatus      = 0xC00002F8
+	STATUS_PKINIT_NAME_MISMATCH                                               NTStatus      = 0xC00002F9
+	STATUS_SMARTCARD_LOGON_REQUIRED                                           NTStatus      = 0xC00002FA
+	STATUS_KDC_INVALID_REQUEST                                                NTStatus      = 0xC00002FB
+	STATUS_KDC_UNABLE_TO_REFER                                                NTStatus      = 0xC00002FC
+	STATUS_KDC_UNKNOWN_ETYPE                                                  NTStatus      = 0xC00002FD
+	STATUS_SHUTDOWN_IN_PROGRESS                                               NTStatus      = 0xC00002FE
+	STATUS_SERVER_SHUTDOWN_IN_PROGRESS                                        NTStatus      = 0xC00002FF
+	STATUS_NOT_SUPPORTED_ON_SBS                                               NTStatus      = 0xC0000300
+	STATUS_WMI_GUID_DISCONNECTED                                              NTStatus      = 0xC0000301
+	STATUS_WMI_ALREADY_DISABLED                                               NTStatus      = 0xC0000302
+	STATUS_WMI_ALREADY_ENABLED                                                NTStatus      = 0xC0000303
+	STATUS_MFT_TOO_FRAGMENTED                                                 NTStatus      = 0xC0000304
+	STATUS_COPY_PROTECTION_FAILURE                                            NTStatus      = 0xC0000305
+	STATUS_CSS_AUTHENTICATION_FAILURE                                         NTStatus      = 0xC0000306
+	STATUS_CSS_KEY_NOT_PRESENT                                                NTStatus      = 0xC0000307
+	STATUS_CSS_KEY_NOT_ESTABLISHED                                            NTStatus      = 0xC0000308
+	STATUS_CSS_SCRAMBLED_SECTOR                                               NTStatus      = 0xC0000309
+	STATUS_CSS_REGION_MISMATCH                                                NTStatus      = 0xC000030A
+	STATUS_CSS_RESETS_EXHAUSTED                                               NTStatus      = 0xC000030B
+	STATUS_PASSWORD_CHANGE_REQUIRED                                           NTStatus      = 0xC000030C
+	STATUS_LOST_MODE_LOGON_RESTRICTION                                        NTStatus      = 0xC000030D
+	STATUS_PKINIT_FAILURE                                                     NTStatus      = 0xC0000320
+	STATUS_SMARTCARD_SUBSYSTEM_FAILURE                                        NTStatus      = 0xC0000321
+	STATUS_NO_KERB_KEY                                                        NTStatus      = 0xC0000322
+	STATUS_HOST_DOWN                                                          NTStatus      = 0xC0000350
+	STATUS_UNSUPPORTED_PREAUTH                                                NTStatus      = 0xC0000351
+	STATUS_EFS_ALG_BLOB_TOO_BIG                                               NTStatus      = 0xC0000352
+	STATUS_PORT_NOT_SET                                                       NTStatus      = 0xC0000353
+	STATUS_DEBUGGER_INACTIVE                                                  NTStatus      = 0xC0000354
+	STATUS_DS_VERSION_CHECK_FAILURE                                           NTStatus      = 0xC0000355
+	STATUS_AUDITING_DISABLED                                                  NTStatus      = 0xC0000356
+	STATUS_PRENT4_MACHINE_ACCOUNT                                             NTStatus      = 0xC0000357
+	STATUS_DS_AG_CANT_HAVE_UNIVERSAL_MEMBER                                   NTStatus      = 0xC0000358
+	STATUS_INVALID_IMAGE_WIN_32                                               NTStatus      = 0xC0000359
+	STATUS_INVALID_IMAGE_WIN_64                                               NTStatus      = 0xC000035A
+	STATUS_BAD_BINDINGS                                                       NTStatus      = 0xC000035B
+	STATUS_NETWORK_SESSION_EXPIRED                                            NTStatus      = 0xC000035C
+	STATUS_APPHELP_BLOCK                                                      NTStatus      = 0xC000035D
+	STATUS_ALL_SIDS_FILTERED                                                  NTStatus      = 0xC000035E
+	STATUS_NOT_SAFE_MODE_DRIVER                                               NTStatus      = 0xC000035F
+	STATUS_ACCESS_DISABLED_BY_POLICY_DEFAULT                                  NTStatus      = 0xC0000361
+	STATUS_ACCESS_DISABLED_BY_POLICY_PATH                                     NTStatus      = 0xC0000362
+	STATUS_ACCESS_DISABLED_BY_POLICY_PUBLISHER                                NTStatus      = 0xC0000363
+	STATUS_ACCESS_DISABLED_BY_POLICY_OTHER                                    NTStatus      = 0xC0000364
+	STATUS_FAILED_DRIVER_ENTRY                                                NTStatus      = 0xC0000365
+	STATUS_DEVICE_ENUMERATION_ERROR                                           NTStatus      = 0xC0000366
+	STATUS_MOUNT_POINT_NOT_RESOLVED                                           NTStatus      = 0xC0000368
+	STATUS_INVALID_DEVICE_OBJECT_PARAMETER                                    NTStatus      = 0xC0000369
+	STATUS_MCA_OCCURED                                                        NTStatus      = 0xC000036A
+	STATUS_DRIVER_BLOCKED_CRITICAL                                            NTStatus      = 0xC000036B
+	STATUS_DRIVER_BLOCKED                                                     NTStatus      = 0xC000036C
+	STATUS_DRIVER_DATABASE_ERROR                                              NTStatus      = 0xC000036D
+	STATUS_SYSTEM_HIVE_TOO_LARGE                                              NTStatus      = 0xC000036E
+	STATUS_INVALID_IMPORT_OF_NON_DLL                                          NTStatus      = 0xC000036F
+	STATUS_DS_SHUTTING_DOWN                                                   NTStatus      = 0x40000370
+	STATUS_NO_SECRETS                                                         NTStatus      = 0xC0000371
+	STATUS_ACCESS_DISABLED_NO_SAFER_UI_BY_POLICY                              NTStatus      = 0xC0000372
+	STATUS_FAILED_STACK_SWITCH                                                NTStatus      = 0xC0000373
+	STATUS_HEAP_CORRUPTION                                                    NTStatus      = 0xC0000374
+	STATUS_SMARTCARD_WRONG_PIN                                                NTStatus      = 0xC0000380
+	STATUS_SMARTCARD_CARD_BLOCKED                                             NTStatus      = 0xC0000381
+	STATUS_SMARTCARD_CARD_NOT_AUTHENTICATED                                   NTStatus      = 0xC0000382
+	STATUS_SMARTCARD_NO_CARD                                                  NTStatus      = 0xC0000383
+	STATUS_SMARTCARD_NO_KEY_CONTAINER                                         NTStatus      = 0xC0000384
+	STATUS_SMARTCARD_NO_CERTIFICATE                                           NTStatus      = 0xC0000385
+	STATUS_SMARTCARD_NO_KEYSET                                                NTStatus      = 0xC0000386
+	STATUS_SMARTCARD_IO_ERROR                                                 NTStatus      = 0xC0000387
+	STATUS_DOWNGRADE_DETECTED                                                 NTStatus      = 0xC0000388
+	STATUS_SMARTCARD_CERT_REVOKED                                             NTStatus      = 0xC0000389
+	STATUS_ISSUING_CA_UNTRUSTED                                               NTStatus      = 0xC000038A
+	STATUS_REVOCATION_OFFLINE_C                                               NTStatus      = 0xC000038B
+	STATUS_PKINIT_CLIENT_FAILURE                                              NTStatus      = 0xC000038C
+	STATUS_SMARTCARD_CERT_EXPIRED                                             NTStatus      = 0xC000038D
+	STATUS_DRIVER_FAILED_PRIOR_UNLOAD                                         NTStatus      = 0xC000038E
+	STATUS_SMARTCARD_SILENT_CONTEXT                                           NTStatus      = 0xC000038F
+	STATUS_PER_USER_TRUST_QUOTA_EXCEEDED                                      NTStatus      = 0xC0000401
+	STATUS_ALL_USER_TRUST_QUOTA_EXCEEDED                                      NTStatus      = 0xC0000402
+	STATUS_USER_DELETE_TRUST_QUOTA_EXCEEDED                                   NTStatus      = 0xC0000403
+	STATUS_DS_NAME_NOT_UNIQUE                                                 NTStatus      = 0xC0000404
+	STATUS_DS_DUPLICATE_ID_FOUND                                              NTStatus      = 0xC0000405
+	STATUS_DS_GROUP_CONVERSION_ERROR                                          NTStatus      = 0xC0000406
+	STATUS_VOLSNAP_PREPARE_HIBERNATE                                          NTStatus      = 0xC0000407
+	STATUS_USER2USER_REQUIRED                                                 NTStatus      = 0xC0000408
+	STATUS_STACK_BUFFER_OVERRUN                                               NTStatus      = 0xC0000409
+	STATUS_NO_S4U_PROT_SUPPORT                                                NTStatus      = 0xC000040A
+	STATUS_CROSSREALM_DELEGATION_FAILURE                                      NTStatus      = 0xC000040B
+	STATUS_REVOCATION_OFFLINE_KDC                                             NTStatus      = 0xC000040C
+	STATUS_ISSUING_CA_UNTRUSTED_KDC                                           NTStatus      = 0xC000040D
+	STATUS_KDC_CERT_EXPIRED                                                   NTStatus      = 0xC000040E
+	STATUS_KDC_CERT_REVOKED                                                   NTStatus      = 0xC000040F
+	STATUS_PARAMETER_QUOTA_EXCEEDED                                           NTStatus      = 0xC0000410
+	STATUS_HIBERNATION_FAILURE                                                NTStatus      = 0xC0000411
+	STATUS_DELAY_LOAD_FAILED                                                  NTStatus      = 0xC0000412
+	STATUS_AUTHENTICATION_FIREWALL_FAILED                                     NTStatus      = 0xC0000413
+	STATUS_VDM_DISALLOWED                                                     NTStatus      = 0xC0000414
+	STATUS_HUNG_DISPLAY_DRIVER_THREAD                                         NTStatus      = 0xC0000415
+	STATUS_INSUFFICIENT_RESOURCE_FOR_SPECIFIED_SHARED_SECTION_SIZE            NTStatus      = 0xC0000416
+	STATUS_INVALID_CRUNTIME_PARAMETER                                         NTStatus      = 0xC0000417
+	STATUS_NTLM_BLOCKED                                                       NTStatus      = 0xC0000418
+	STATUS_DS_SRC_SID_EXISTS_IN_FOREST                                        NTStatus      = 0xC0000419
+	STATUS_DS_DOMAIN_NAME_EXISTS_IN_FOREST                                    NTStatus      = 0xC000041A
+	STATUS_DS_FLAT_NAME_EXISTS_IN_FOREST                                      NTStatus      = 0xC000041B
+	STATUS_INVALID_USER_PRINCIPAL_NAME                                        NTStatus      = 0xC000041C
+	STATUS_FATAL_USER_CALLBACK_EXCEPTION                                      NTStatus      = 0xC000041D
+	STATUS_ASSERTION_FAILURE                                                  NTStatus      = 0xC0000420
+	STATUS_VERIFIER_STOP                                                      NTStatus      = 0xC0000421
+	STATUS_CALLBACK_POP_STACK                                                 NTStatus      = 0xC0000423
+	STATUS_INCOMPATIBLE_DRIVER_BLOCKED                                        NTStatus      = 0xC0000424
+	STATUS_HIVE_UNLOADED                                                      NTStatus      = 0xC0000425
+	STATUS_COMPRESSION_DISABLED                                               NTStatus      = 0xC0000426
+	STATUS_FILE_SYSTEM_LIMITATION                                             NTStatus      = 0xC0000427
+	STATUS_INVALID_IMAGE_HASH                                                 NTStatus      = 0xC0000428
+	STATUS_NOT_CAPABLE                                                        NTStatus      = 0xC0000429
+	STATUS_REQUEST_OUT_OF_SEQUENCE                                            NTStatus      = 0xC000042A
+	STATUS_IMPLEMENTATION_LIMIT                                               NTStatus      = 0xC000042B
+	STATUS_ELEVATION_REQUIRED                                                 NTStatus      = 0xC000042C
+	STATUS_NO_SECURITY_CONTEXT                                                NTStatus      = 0xC000042D
+	STATUS_PKU2U_CERT_FAILURE                                                 NTStatus      = 0xC000042F
+	STATUS_BEYOND_VDL                                                         NTStatus      = 0xC0000432
+	STATUS_ENCOUNTERED_WRITE_IN_PROGRESS                                      NTStatus      = 0xC0000433
+	STATUS_PTE_CHANGED                                                        NTStatus      = 0xC0000434
+	STATUS_PURGE_FAILED                                                       NTStatus      = 0xC0000435
+	STATUS_CRED_REQUIRES_CONFIRMATION                                         NTStatus      = 0xC0000440
+	STATUS_CS_ENCRYPTION_INVALID_SERVER_RESPONSE                              NTStatus      = 0xC0000441
+	STATUS_CS_ENCRYPTION_UNSUPPORTED_SERVER                                   NTStatus      = 0xC0000442
+	STATUS_CS_ENCRYPTION_EXISTING_ENCRYPTED_FILE                              NTStatus      = 0xC0000443
+	STATUS_CS_ENCRYPTION_NEW_ENCRYPTED_FILE                                   NTStatus      = 0xC0000444
+	STATUS_CS_ENCRYPTION_FILE_NOT_CSE                                         NTStatus      = 0xC0000445
+	STATUS_INVALID_LABEL                                                      NTStatus      = 0xC0000446
+	STATUS_DRIVER_PROCESS_TERMINATED                                          NTStatus      = 0xC0000450
+	STATUS_AMBIGUOUS_SYSTEM_DEVICE                                            NTStatus      = 0xC0000451
+	STATUS_SYSTEM_DEVICE_NOT_FOUND                                            NTStatus      = 0xC0000452
+	STATUS_RESTART_BOOT_APPLICATION                                           NTStatus      = 0xC0000453
+	STATUS_INSUFFICIENT_NVRAM_RESOURCES                                       NTStatus      = 0xC0000454
+	STATUS_INVALID_SESSION                                                    NTStatus      = 0xC0000455
+	STATUS_THREAD_ALREADY_IN_SESSION                                          NTStatus      = 0xC0000456
+	STATUS_THREAD_NOT_IN_SESSION                                              NTStatus      = 0xC0000457
+	STATUS_INVALID_WEIGHT                                                     NTStatus      = 0xC0000458
+	STATUS_REQUEST_PAUSED                                                     NTStatus      = 0xC0000459
+	STATUS_NO_RANGES_PROCESSED                                                NTStatus      = 0xC0000460
+	STATUS_DISK_RESOURCES_EXHAUSTED                                           NTStatus      = 0xC0000461
+	STATUS_NEEDS_REMEDIATION                                                  NTStatus      = 0xC0000462
+	STATUS_DEVICE_FEATURE_NOT_SUPPORTED                                       NTStatus      = 0xC0000463
+	STATUS_DEVICE_UNREACHABLE                                                 NTStatus      = 0xC0000464
+	STATUS_INVALID_TOKEN                                                      NTStatus      = 0xC0000465
+	STATUS_SERVER_UNAVAILABLE                                                 NTStatus      = 0xC0000466
+	STATUS_FILE_NOT_AVAILABLE                                                 NTStatus      = 0xC0000467
+	STATUS_DEVICE_INSUFFICIENT_RESOURCES                                      NTStatus      = 0xC0000468
+	STATUS_PACKAGE_UPDATING                                                   NTStatus      = 0xC0000469
+	STATUS_NOT_READ_FROM_COPY                                                 NTStatus      = 0xC000046A
+	STATUS_FT_WRITE_FAILURE                                                   NTStatus      = 0xC000046B
+	STATUS_FT_DI_SCAN_REQUIRED                                                NTStatus      = 0xC000046C
+	STATUS_OBJECT_NOT_EXTERNALLY_BACKED                                       NTStatus      = 0xC000046D
+	STATUS_EXTERNAL_BACKING_PROVIDER_UNKNOWN                                  NTStatus      = 0xC000046E
+	STATUS_COMPRESSION_NOT_BENEFICIAL                                         NTStatus      = 0xC000046F
+	STATUS_DATA_CHECKSUM_ERROR                                                NTStatus      = 0xC0000470
+	STATUS_INTERMIXED_KERNEL_EA_OPERATION                                     NTStatus      = 0xC0000471
+	STATUS_TRIM_READ_ZERO_NOT_SUPPORTED                                       NTStatus      = 0xC0000472
+	STATUS_TOO_MANY_SEGMENT_DESCRIPTORS                                       NTStatus      = 0xC0000473
+	STATUS_INVALID_OFFSET_ALIGNMENT                                           NTStatus      = 0xC0000474
+	STATUS_INVALID_FIELD_IN_PARAMETER_LIST                                    NTStatus      = 0xC0000475
+	STATUS_OPERATION_IN_PROGRESS                                              NTStatus      = 0xC0000476
+	STATUS_INVALID_INITIATOR_TARGET_PATH                                      NTStatus      = 0xC0000477
+	STATUS_SCRUB_DATA_DISABLED                                                NTStatus      = 0xC0000478
+	STATUS_NOT_REDUNDANT_STORAGE                                              NTStatus      = 0xC0000479
+	STATUS_RESIDENT_FILE_NOT_SUPPORTED                                        NTStatus      = 0xC000047A
+	STATUS_COMPRESSED_FILE_NOT_SUPPORTED                                      NTStatus      = 0xC000047B
+	STATUS_DIRECTORY_NOT_SUPPORTED                                            NTStatus      = 0xC000047C
+	STATUS_IO_OPERATION_TIMEOUT                                               NTStatus      = 0xC000047D
+	STATUS_SYSTEM_NEEDS_REMEDIATION                                           NTStatus      = 0xC000047E
+	STATUS_APPX_INTEGRITY_FAILURE_CLR_NGEN                                    NTStatus      = 0xC000047F
+	STATUS_SHARE_UNAVAILABLE                                                  NTStatus      = 0xC0000480
+	STATUS_APISET_NOT_HOSTED                                                  NTStatus      = 0xC0000481
+	STATUS_APISET_NOT_PRESENT                                                 NTStatus      = 0xC0000482
+	STATUS_DEVICE_HARDWARE_ERROR                                              NTStatus      = 0xC0000483
+	STATUS_FIRMWARE_SLOT_INVALID                                              NTStatus      = 0xC0000484
+	STATUS_FIRMWARE_IMAGE_INVALID                                             NTStatus      = 0xC0000485
+	STATUS_STORAGE_TOPOLOGY_ID_MISMATCH                                       NTStatus      = 0xC0000486
+	STATUS_WIM_NOT_BOOTABLE                                                   NTStatus      = 0xC0000487
+	STATUS_BLOCKED_BY_PARENTAL_CONTROLS                                       NTStatus      = 0xC0000488
+	STATUS_NEEDS_REGISTRATION                                                 NTStatus      = 0xC0000489
+	STATUS_QUOTA_ACTIVITY                                                     NTStatus      = 0xC000048A
+	STATUS_CALLBACK_INVOKE_INLINE                                             NTStatus      = 0xC000048B
+	STATUS_BLOCK_TOO_MANY_REFERENCES                                          NTStatus      = 0xC000048C
+	STATUS_MARKED_TO_DISALLOW_WRITES                                          NTStatus      = 0xC000048D
+	STATUS_NETWORK_ACCESS_DENIED_EDP                                          NTStatus      = 0xC000048E
+	STATUS_ENCLAVE_FAILURE                                                    NTStatus      = 0xC000048F
+	STATUS_PNP_NO_COMPAT_DRIVERS                                              NTStatus      = 0xC0000490
+	STATUS_PNP_DRIVER_PACKAGE_NOT_FOUND                                       NTStatus      = 0xC0000491
+	STATUS_PNP_DRIVER_CONFIGURATION_NOT_FOUND                                 NTStatus      = 0xC0000492
+	STATUS_PNP_DRIVER_CONFIGURATION_INCOMPLETE                                NTStatus      = 0xC0000493
+	STATUS_PNP_FUNCTION_DRIVER_REQUIRED                                       NTStatus      = 0xC0000494
+	STATUS_PNP_DEVICE_CONFIGURATION_PENDING                                   NTStatus      = 0xC0000495
+	STATUS_DEVICE_HINT_NAME_BUFFER_TOO_SMALL                                  NTStatus      = 0xC0000496
+	STATUS_PACKAGE_NOT_AVAILABLE                                              NTStatus      = 0xC0000497
+	STATUS_DEVICE_IN_MAINTENANCE                                              NTStatus      = 0xC0000499
+	STATUS_NOT_SUPPORTED_ON_DAX                                               NTStatus      = 0xC000049A
+	STATUS_FREE_SPACE_TOO_FRAGMENTED                                          NTStatus      = 0xC000049B
+	STATUS_DAX_MAPPING_EXISTS                                                 NTStatus      = 0xC000049C
+	STATUS_CHILD_PROCESS_BLOCKED                                              NTStatus      = 0xC000049D
+	STATUS_STORAGE_LOST_DATA_PERSISTENCE                                      NTStatus      = 0xC000049E
+	STATUS_VRF_CFG_ENABLED                                                    NTStatus      = 0xC000049F
+	STATUS_PARTITION_TERMINATING                                              NTStatus      = 0xC00004A0
+	STATUS_EXTERNAL_SYSKEY_NOT_SUPPORTED                                      NTStatus      = 0xC00004A1
+	STATUS_ENCLAVE_VIOLATION                                                  NTStatus      = 0xC00004A2
+	STATUS_FILE_PROTECTED_UNDER_DPL                                           NTStatus      = 0xC00004A3
+	STATUS_VOLUME_NOT_CLUSTER_ALIGNED                                         NTStatus      = 0xC00004A4
+	STATUS_NO_PHYSICALLY_ALIGNED_FREE_SPACE_FOUND                             NTStatus      = 0xC00004A5
+	STATUS_APPX_FILE_NOT_ENCRYPTED                                            NTStatus      = 0xC00004A6
+	STATUS_RWRAW_ENCRYPTED_FILE_NOT_ENCRYPTED                                 NTStatus      = 0xC00004A7
+	STATUS_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILEOFFSET                       NTStatus      = 0xC00004A8
+	STATUS_RWRAW_ENCRYPTED_INVALID_EDATAINFO_FILERANGE                        NTStatus      = 0xC00004A9
+	STATUS_RWRAW_ENCRYPTED_INVALID_EDATAINFO_PARAMETER                        NTStatus      = 0xC00004AA
+	STATUS_FT_READ_FAILURE                                                    NTStatus      = 0xC00004AB
+	STATUS_PATCH_CONFLICT                                                     NTStatus      = 0xC00004AC
+	STATUS_STORAGE_RESERVE_ID_INVALID                                         NTStatus      = 0xC00004AD
+	STATUS_STORAGE_RESERVE_DOES_NOT_EXIST                                     NTStatus      = 0xC00004AE
+	STATUS_STORAGE_RESERVE_ALREADY_EXISTS                                     NTStatus      = 0xC00004AF
+	STATUS_STORAGE_RESERVE_NOT_EMPTY                                          NTStatus      = 0xC00004B0
+	STATUS_NOT_A_DAX_VOLUME                                                   NTStatus      = 0xC00004B1
+	STATUS_NOT_DAX_MAPPABLE                                                   NTStatus      = 0xC00004B2
+	STATUS_CASE_DIFFERING_NAMES_IN_DIR                                        NTStatus      = 0xC00004B3
+	STATUS_FILE_NOT_SUPPORTED                                                 NTStatus      = 0xC00004B4
+	STATUS_NOT_SUPPORTED_WITH_BTT                                             NTStatus      = 0xC00004B5
+	STATUS_ENCRYPTION_DISABLED                                                NTStatus      = 0xC00004B6
+	STATUS_ENCRYPTING_METADATA_DISALLOWED                                     NTStatus      = 0xC00004B7
+	STATUS_CANT_CLEAR_ENCRYPTION_FLAG                                         NTStatus      = 0xC00004B8
+	STATUS_INVALID_TASK_NAME                                                  NTStatus      = 0xC0000500
+	STATUS_INVALID_TASK_INDEX                                                 NTStatus      = 0xC0000501
+	STATUS_THREAD_ALREADY_IN_TASK                                             NTStatus      = 0xC0000502
+	STATUS_CALLBACK_BYPASS                                                    NTStatus      = 0xC0000503
+	STATUS_UNDEFINED_SCOPE                                                    NTStatus      = 0xC0000504
+	STATUS_INVALID_CAP                                                        NTStatus      = 0xC0000505
+	STATUS_NOT_GUI_PROCESS                                                    NTStatus      = 0xC0000506
+	STATUS_DEVICE_HUNG                                                        NTStatus      = 0xC0000507
+	STATUS_CONTAINER_ASSIGNED                                                 NTStatus      = 0xC0000508
+	STATUS_JOB_NO_CONTAINER                                                   NTStatus      = 0xC0000509
+	STATUS_DEVICE_UNRESPONSIVE                                                NTStatus      = 0xC000050A
+	STATUS_REPARSE_POINT_ENCOUNTERED                                          NTStatus      = 0xC000050B
+	STATUS_ATTRIBUTE_NOT_PRESENT                                              NTStatus      = 0xC000050C
+	STATUS_NOT_A_TIERED_VOLUME                                                NTStatus      = 0xC000050D
+	STATUS_ALREADY_HAS_STREAM_ID                                              NTStatus      = 0xC000050E
+	STATUS_JOB_NOT_EMPTY                                                      NTStatus      = 0xC000050F
+	STATUS_ALREADY_INITIALIZED                                                NTStatus      = 0xC0000510
+	STATUS_ENCLAVE_NOT_TERMINATED                                             NTStatus      = 0xC0000511
+	STATUS_ENCLAVE_IS_TERMINATING                                             NTStatus      = 0xC0000512
+	STATUS_SMB1_NOT_AVAILABLE                                                 NTStatus      = 0xC0000513
+	STATUS_SMR_GARBAGE_COLLECTION_REQUIRED                                    NTStatus      = 0xC0000514
+	STATUS_INTERRUPTED                                                        NTStatus      = 0xC0000515
+	STATUS_THREAD_NOT_RUNNING                                                 NTStatus      = 0xC0000516
+	STATUS_FAIL_FAST_EXCEPTION                                                NTStatus      = 0xC0000602
+	STATUS_IMAGE_CERT_REVOKED                                                 NTStatus      = 0xC0000603
+	STATUS_DYNAMIC_CODE_BLOCKED                                               NTStatus      = 0xC0000604
+	STATUS_IMAGE_CERT_EXPIRED                                                 NTStatus      = 0xC0000605
+	STATUS_STRICT_CFG_VIOLATION                                               NTStatus      = 0xC0000606
+	STATUS_SET_CONTEXT_DENIED                                                 NTStatus      = 0xC000060A
+	STATUS_CROSS_PARTITION_VIOLATION                                          NTStatus      = 0xC000060B
+	STATUS_PORT_CLOSED                                                        NTStatus      = 0xC0000700
+	STATUS_MESSAGE_LOST                                                       NTStatus      = 0xC0000701
+	STATUS_INVALID_MESSAGE                                                    NTStatus      = 0xC0000702
+	STATUS_REQUEST_CANCELED                                                   NTStatus      = 0xC0000703
+	STATUS_RECURSIVE_DISPATCH                                                 NTStatus      = 0xC0000704
+	STATUS_LPC_RECEIVE_BUFFER_EXPECTED                                        NTStatus      = 0xC0000705
+	STATUS_LPC_INVALID_CONNECTION_USAGE                                       NTStatus      = 0xC0000706
+	STATUS_LPC_REQUESTS_NOT_ALLOWED                                           NTStatus      = 0xC0000707
+	STATUS_RESOURCE_IN_USE                                                    NTStatus      = 0xC0000708
+	STATUS_HARDWARE_MEMORY_ERROR                                              NTStatus      = 0xC0000709
+	STATUS_THREADPOOL_HANDLE_EXCEPTION                                        NTStatus      = 0xC000070A
+	STATUS_THREADPOOL_SET_EVENT_ON_COMPLETION_FAILED                          NTStatus      = 0xC000070B
+	STATUS_THREADPOOL_RELEASE_SEMAPHORE_ON_COMPLETION_FAILED                  NTStatus      = 0xC000070C
+	STATUS_THREADPOOL_RELEASE_MUTEX_ON_COMPLETION_FAILED                      NTStatus      = 0xC000070D
+	STATUS_THREADPOOL_FREE_LIBRARY_ON_COMPLETION_FAILED                       NTStatus      = 0xC000070E
+	STATUS_THREADPOOL_RELEASED_DURING_OPERATION                               NTStatus      = 0xC000070F
+	STATUS_CALLBACK_RETURNED_WHILE_IMPERSONATING                              NTStatus      = 0xC0000710
+	STATUS_APC_RETURNED_WHILE_IMPERSONATING                                   NTStatus      = 0xC0000711
+	STATUS_PROCESS_IS_PROTECTED                                               NTStatus      = 0xC0000712
+	STATUS_MCA_EXCEPTION                                                      NTStatus      = 0xC0000713
+	STATUS_CERTIFICATE_MAPPING_NOT_UNIQUE                                     NTStatus      = 0xC0000714
+	STATUS_SYMLINK_CLASS_DISABLED                                             NTStatus      = 0xC0000715
+	STATUS_INVALID_IDN_NORMALIZATION                                          NTStatus      = 0xC0000716
+	STATUS_NO_UNICODE_TRANSLATION                                             NTStatus      = 0xC0000717
+	STATUS_ALREADY_REGISTERED                                                 NTStatus      = 0xC0000718
+	STATUS_CONTEXT_MISMATCH                                                   NTStatus      = 0xC0000719
+	STATUS_PORT_ALREADY_HAS_COMPLETION_LIST                                   NTStatus      = 0xC000071A
+	STATUS_CALLBACK_RETURNED_THREAD_PRIORITY                                  NTStatus      = 0xC000071B
+	STATUS_INVALID_THREAD                                                     NTStatus      = 0xC000071C
+	STATUS_CALLBACK_RETURNED_TRANSACTION                                      NTStatus      = 0xC000071D
+	STATUS_CALLBACK_RETURNED_LDR_LOCK                                         NTStatus      = 0xC000071E
+	STATUS_CALLBACK_RETURNED_LANG                                             NTStatus      = 0xC000071F
+	STATUS_CALLBACK_RETURNED_PRI_BACK                                         NTStatus      = 0xC0000720
+	STATUS_CALLBACK_RETURNED_THREAD_AFFINITY                                  NTStatus      = 0xC0000721
+	STATUS_LPC_HANDLE_COUNT_EXCEEDED                                          NTStatus      = 0xC0000722
+	STATUS_EXECUTABLE_MEMORY_WRITE                                            NTStatus      = 0xC0000723
+	STATUS_KERNEL_EXECUTABLE_MEMORY_WRITE                                     NTStatus      = 0xC0000724
+	STATUS_ATTACHED_EXECUTABLE_MEMORY_WRITE                                   NTStatus      = 0xC0000725
+	STATUS_TRIGGERED_EXECUTABLE_MEMORY_WRITE                                  NTStatus      = 0xC0000726
+	STATUS_DISK_REPAIR_DISABLED                                               NTStatus      = 0xC0000800
+	STATUS_DS_DOMAIN_RENAME_IN_PROGRESS                                       NTStatus      = 0xC0000801
+	STATUS_DISK_QUOTA_EXCEEDED                                                NTStatus      = 0xC0000802
+	STATUS_DATA_LOST_REPAIR                                                   NTStatus      = 0x80000803
+	STATUS_CONTENT_BLOCKED                                                    NTStatus      = 0xC0000804
+	STATUS_BAD_CLUSTERS                                                       NTStatus      = 0xC0000805
+	STATUS_VOLUME_DIRTY                                                       NTStatus      = 0xC0000806
+	STATUS_DISK_REPAIR_REDIRECTED                                             NTStatus      = 0x40000807
+	STATUS_DISK_REPAIR_UNSUCCESSFUL                                           NTStatus      = 0xC0000808
+	STATUS_CORRUPT_LOG_OVERFULL                                               NTStatus      = 0xC0000809
+	STATUS_CORRUPT_LOG_CORRUPTED                                              NTStatus      = 0xC000080A
+	STATUS_CORRUPT_LOG_UNAVAILABLE                                            NTStatus      = 0xC000080B
+	STATUS_CORRUPT_LOG_DELETED_FULL                                           NTStatus      = 0xC000080C
+	STATUS_CORRUPT_LOG_CLEARED                                                NTStatus      = 0xC000080D
+	STATUS_ORPHAN_NAME_EXHAUSTED                                              NTStatus      = 0xC000080E
+	STATUS_PROACTIVE_SCAN_IN_PROGRESS                                         NTStatus      = 0xC000080F
+	STATUS_ENCRYPTED_IO_NOT_POSSIBLE                                          NTStatus      = 0xC0000810
+	STATUS_CORRUPT_LOG_UPLEVEL_RECORDS                                        NTStatus      = 0xC0000811
+	STATUS_FILE_CHECKED_OUT                                                   NTStatus      = 0xC0000901
+	STATUS_CHECKOUT_REQUIRED                                                  NTStatus      = 0xC0000902
+	STATUS_BAD_FILE_TYPE                                                      NTStatus      = 0xC0000903
+	STATUS_FILE_TOO_LARGE                                                     NTStatus      = 0xC0000904
+	STATUS_FORMS_AUTH_REQUIRED                                                NTStatus      = 0xC0000905
+	STATUS_VIRUS_INFECTED                                                     NTStatus      = 0xC0000906
+	STATUS_VIRUS_DELETED                                                      NTStatus      = 0xC0000907
+	STATUS_BAD_MCFG_TABLE                                                     NTStatus      = 0xC0000908
+	STATUS_CANNOT_BREAK_OPLOCK                                                NTStatus      = 0xC0000909
+	STATUS_BAD_KEY                                                            NTStatus      = 0xC000090A
+	STATUS_BAD_DATA                                                           NTStatus      = 0xC000090B
+	STATUS_NO_KEY                                                             NTStatus      = 0xC000090C
+	STATUS_FILE_HANDLE_REVOKED                                                NTStatus      = 0xC0000910
+	STATUS_WOW_ASSERTION                                                      NTStatus      = 0xC0009898
+	STATUS_INVALID_SIGNATURE                                                  NTStatus      = 0xC000A000
+	STATUS_HMAC_NOT_SUPPORTED                                                 NTStatus      = 0xC000A001
+	STATUS_AUTH_TAG_MISMATCH                                                  NTStatus      = 0xC000A002
+	STATUS_INVALID_STATE_TRANSITION                                           NTStatus      = 0xC000A003
+	STATUS_INVALID_KERNEL_INFO_VERSION                                        NTStatus      = 0xC000A004
+	STATUS_INVALID_PEP_INFO_VERSION                                           NTStatus      = 0xC000A005
+	STATUS_HANDLE_REVOKED                                                     NTStatus      = 0xC000A006
+	STATUS_EOF_ON_GHOSTED_RANGE                                               NTStatus      = 0xC000A007
+	STATUS_IPSEC_QUEUE_OVERFLOW                                               NTStatus      = 0xC000A010
+	STATUS_ND_QUEUE_OVERFLOW                                                  NTStatus      = 0xC000A011
+	STATUS_HOPLIMIT_EXCEEDED                                                  NTStatus      = 0xC000A012
+	STATUS_PROTOCOL_NOT_SUPPORTED                                             NTStatus      = 0xC000A013
+	STATUS_FASTPATH_REJECTED                                                  NTStatus      = 0xC000A014
+	STATUS_LOST_WRITEBEHIND_DATA_NETWORK_DISCONNECTED                         NTStatus      = 0xC000A080
+	STATUS_LOST_WRITEBEHIND_DATA_NETWORK_SERVER_ERROR                         NTStatus      = 0xC000A081
+	STATUS_LOST_WRITEBEHIND_DATA_LOCAL_DISK_ERROR                             NTStatus      = 0xC000A082
+	STATUS_XML_PARSE_ERROR                                                    NTStatus      = 0xC000A083
+	STATUS_XMLDSIG_ERROR                                                      NTStatus      = 0xC000A084
+	STATUS_WRONG_COMPARTMENT                                                  NTStatus      = 0xC000A085
+	STATUS_AUTHIP_FAILURE                                                     NTStatus      = 0xC000A086
+	STATUS_DS_OID_MAPPED_GROUP_CANT_HAVE_MEMBERS                              NTStatus      = 0xC000A087
+	STATUS_DS_OID_NOT_FOUND                                                   NTStatus      = 0xC000A088
+	STATUS_INCORRECT_ACCOUNT_TYPE                                             NTStatus      = 0xC000A089
+	STATUS_HASH_NOT_SUPPORTED                                                 NTStatus      = 0xC000A100
+	STATUS_HASH_NOT_PRESENT                                                   NTStatus      = 0xC000A101
+	STATUS_SECONDARY_IC_PROVIDER_NOT_REGISTERED                               NTStatus      = 0xC000A121
+	STATUS_GPIO_CLIENT_INFORMATION_INVALID                                    NTStatus      = 0xC000A122
+	STATUS_GPIO_VERSION_NOT_SUPPORTED                                         NTStatus      = 0xC000A123
+	STATUS_GPIO_INVALID_REGISTRATION_PACKET                                   NTStatus      = 0xC000A124
+	STATUS_GPIO_OPERATION_DENIED                                              NTStatus      = 0xC000A125
+	STATUS_GPIO_INCOMPATIBLE_CONNECT_MODE                                     NTStatus      = 0xC000A126
+	STATUS_GPIO_INTERRUPT_ALREADY_UNMASKED                                    NTStatus      = 0x8000A127
+	STATUS_CANNOT_SWITCH_RUNLEVEL                                             NTStatus      = 0xC000A141
+	STATUS_INVALID_RUNLEVEL_SETTING                                           NTStatus      = 0xC000A142
+	STATUS_RUNLEVEL_SWITCH_TIMEOUT                                            NTStatus      = 0xC000A143
+	STATUS_SERVICES_FAILED_AUTOSTART                                          NTStatus      = 0x4000A144
+	STATUS_RUNLEVEL_SWITCH_AGENT_TIMEOUT                                      NTStatus      = 0xC000A145
+	STATUS_RUNLEVEL_SWITCH_IN_PROGRESS                                        NTStatus      = 0xC000A146
+	STATUS_NOT_APPCONTAINER                                                   NTStatus      = 0xC000A200
+	STATUS_NOT_SUPPORTED_IN_APPCONTAINER                                      NTStatus      = 0xC000A201
+	STATUS_INVALID_PACKAGE_SID_LENGTH                                         NTStatus      = 0xC000A202
+	STATUS_LPAC_ACCESS_DENIED                                                 NTStatus      = 0xC000A203
+	STATUS_ADMINLESS_ACCESS_DENIED                                            NTStatus      = 0xC000A204
+	STATUS_APP_DATA_NOT_FOUND                                                 NTStatus      = 0xC000A281
+	STATUS_APP_DATA_EXPIRED                                                   NTStatus      = 0xC000A282
+	STATUS_APP_DATA_CORRUPT                                                   NTStatus      = 0xC000A283
+	STATUS_APP_DATA_LIMIT_EXCEEDED                                            NTStatus      = 0xC000A284
+	STATUS_APP_DATA_REBOOT_REQUIRED                                           NTStatus      = 0xC000A285
+	STATUS_OFFLOAD_READ_FLT_NOT_SUPPORTED                                     NTStatus      = 0xC000A2A1
+	STATUS_OFFLOAD_WRITE_FLT_NOT_SUPPORTED                                    NTStatus      = 0xC000A2A2
+	STATUS_OFFLOAD_READ_FILE_NOT_SUPPORTED                                    NTStatus      = 0xC000A2A3
+	STATUS_OFFLOAD_WRITE_FILE_NOT_SUPPORTED                                   NTStatus      = 0xC000A2A4
+	STATUS_WOF_WIM_HEADER_CORRUPT                                             NTStatus      = 0xC000A2A5
+	STATUS_WOF_WIM_RESOURCE_TABLE_CORRUPT                                     NTStatus      = 0xC000A2A6
+	STATUS_WOF_FILE_RESOURCE_TABLE_CORRUPT                                    NTStatus      = 0xC000A2A7
+	STATUS_FILE_SYSTEM_VIRTUALIZATION_UNAVAILABLE                             NTStatus      = 0xC000CE01
+	STATUS_FILE_SYSTEM_VIRTUALIZATION_METADATA_CORRUPT                        NTStatus      = 0xC000CE02
+	STATUS_FILE_SYSTEM_VIRTUALIZATION_BUSY                                    NTStatus      = 0xC000CE03
+	STATUS_FILE_SYSTEM_VIRTUALIZATION_PROVIDER_UNKNOWN                        NTStatus      = 0xC000CE04
+	STATUS_FILE_SYSTEM_VIRTUALIZATION_INVALID_OPERATION                       NTStatus      = 0xC000CE05
+	STATUS_CLOUD_FILE_SYNC_ROOT_METADATA_CORRUPT                              NTStatus      = 0xC000CF00
+	STATUS_CLOUD_FILE_PROVIDER_NOT_RUNNING                                    NTStatus      = 0xC000CF01
+	STATUS_CLOUD_FILE_METADATA_CORRUPT                                        NTStatus      = 0xC000CF02
+	STATUS_CLOUD_FILE_METADATA_TOO_LARGE                                      NTStatus      = 0xC000CF03
+	STATUS_CLOUD_FILE_PROPERTY_BLOB_TOO_LARGE                                 NTStatus      = 0x8000CF04
+	STATUS_CLOUD_FILE_TOO_MANY_PROPERTY_BLOBS                                 NTStatus      = 0x8000CF05
+	STATUS_CLOUD_FILE_PROPERTY_VERSION_NOT_SUPPORTED                          NTStatus      = 0xC000CF06
+	STATUS_NOT_A_CLOUD_FILE                                                   NTStatus      = 0xC000CF07
+	STATUS_CLOUD_FILE_NOT_IN_SYNC                                             NTStatus      = 0xC000CF08
+	STATUS_CLOUD_FILE_ALREADY_CONNECTED                                       NTStatus      = 0xC000CF09
+	STATUS_CLOUD_FILE_NOT_SUPPORTED                                           NTStatus      = 0xC000CF0A
+	STATUS_CLOUD_FILE_INVALID_REQUEST                                         NTStatus      = 0xC000CF0B
+	STATUS_CLOUD_FILE_READ_ONLY_VOLUME                                        NTStatus      = 0xC000CF0C
+	STATUS_CLOUD_FILE_CONNECTED_PROVIDER_ONLY                                 NTStatus      = 0xC000CF0D
+	STATUS_CLOUD_FILE_VALIDATION_FAILED                                       NTStatus      = 0xC000CF0E
+	STATUS_CLOUD_FILE_AUTHENTICATION_FAILED                                   NTStatus      = 0xC000CF0F
+	STATUS_CLOUD_FILE_INSUFFICIENT_RESOURCES                                  NTStatus      = 0xC000CF10
+	STATUS_CLOUD_FILE_NETWORK_UNAVAILABLE                                     NTStatus      = 0xC000CF11
+	STATUS_CLOUD_FILE_UNSUCCESSFUL                                            NTStatus      = 0xC000CF12
+	STATUS_CLOUD_FILE_NOT_UNDER_SYNC_ROOT                                     NTStatus      = 0xC000CF13
+	STATUS_CLOUD_FILE_IN_USE                                                  NTStatus      = 0xC000CF14
+	STATUS_CLOUD_FILE_PINNED                                                  NTStatus      = 0xC000CF15
+	STATUS_CLOUD_FILE_REQUEST_ABORTED                                         NTStatus      = 0xC000CF16
+	STATUS_CLOUD_FILE_PROPERTY_CORRUPT                                        NTStatus      = 0xC000CF17
+	STATUS_CLOUD_FILE_ACCESS_DENIED                                           NTStatus      = 0xC000CF18
+	STATUS_CLOUD_FILE_INCOMPATIBLE_HARDLINKS                                  NTStatus      = 0xC000CF19
+	STATUS_CLOUD_FILE_PROPERTY_LOCK_CONFLICT                                  NTStatus      = 0xC000CF1A
+	STATUS_CLOUD_FILE_REQUEST_CANCELED                                        NTStatus      = 0xC000CF1B
+	STATUS_CLOUD_FILE_PROVIDER_TERMINATED                                     NTStatus      = 0xC000CF1D
+	STATUS_NOT_A_CLOUD_SYNC_ROOT                                              NTStatus      = 0xC000CF1E
+	STATUS_CLOUD_FILE_REQUEST_TIMEOUT                                         NTStatus      = 0xC000CF1F
+	STATUS_ACPI_INVALID_OPCODE                                                NTStatus      = 0xC0140001
+	STATUS_ACPI_STACK_OVERFLOW                                                NTStatus      = 0xC0140002
+	STATUS_ACPI_ASSERT_FAILED                                                 NTStatus      = 0xC0140003
+	STATUS_ACPI_INVALID_INDEX                                                 NTStatus      = 0xC0140004
+	STATUS_ACPI_INVALID_ARGUMENT                                              NTStatus      = 0xC0140005
+	STATUS_ACPI_FATAL                                                         NTStatus      = 0xC0140006
+	STATUS_ACPI_INVALID_SUPERNAME                                             NTStatus      = 0xC0140007
+	STATUS_ACPI_INVALID_ARGTYPE                                               NTStatus      = 0xC0140008
+	STATUS_ACPI_INVALID_OBJTYPE                                               NTStatus      = 0xC0140009
+	STATUS_ACPI_INVALID_TARGETTYPE                                            NTStatus      = 0xC014000A
+	STATUS_ACPI_INCORRECT_ARGUMENT_COUNT                                      NTStatus      = 0xC014000B
+	STATUS_ACPI_ADDRESS_NOT_MAPPED                                            NTStatus      = 0xC014000C
+	STATUS_ACPI_INVALID_EVENTTYPE                                             NTStatus      = 0xC014000D
+	STATUS_ACPI_HANDLER_COLLISION                                             NTStatus      = 0xC014000E
+	STATUS_ACPI_INVALID_DATA                                                  NTStatus      = 0xC014000F
+	STATUS_ACPI_INVALID_REGION                                                NTStatus      = 0xC0140010
+	STATUS_ACPI_INVALID_ACCESS_SIZE                                           NTStatus      = 0xC0140011
+	STATUS_ACPI_ACQUIRE_GLOBAL_LOCK                                           NTStatus      = 0xC0140012
+	STATUS_ACPI_ALREADY_INITIALIZED                                           NTStatus      = 0xC0140013
+	STATUS_ACPI_NOT_INITIALIZED                                               NTStatus      = 0xC0140014
+	STATUS_ACPI_INVALID_MUTEX_LEVEL                                           NTStatus      = 0xC0140015
+	STATUS_ACPI_MUTEX_NOT_OWNED                                               NTStatus      = 0xC0140016
+	STATUS_ACPI_MUTEX_NOT_OWNER                                               NTStatus      = 0xC0140017
+	STATUS_ACPI_RS_ACCESS                                                     NTStatus      = 0xC0140018
+	STATUS_ACPI_INVALID_TABLE                                                 NTStatus      = 0xC0140019
+	STATUS_ACPI_REG_HANDLER_FAILED                                            NTStatus      = 0xC0140020
+	STATUS_ACPI_POWER_REQUEST_FAILED                                          NTStatus      = 0xC0140021
+	STATUS_CTX_WINSTATION_NAME_INVALID                                        NTStatus      = 0xC00A0001
+	STATUS_CTX_INVALID_PD                                                     NTStatus      = 0xC00A0002
+	STATUS_CTX_PD_NOT_FOUND                                                   NTStatus      = 0xC00A0003
+	STATUS_CTX_CDM_CONNECT                                                    NTStatus      = 0x400A0004
+	STATUS_CTX_CDM_DISCONNECT                                                 NTStatus      = 0x400A0005
+	STATUS_CTX_CLOSE_PENDING                                                  NTStatus      = 0xC00A0006
+	STATUS_CTX_NO_OUTBUF                                                      NTStatus      = 0xC00A0007
+	STATUS_CTX_MODEM_INF_NOT_FOUND                                            NTStatus      = 0xC00A0008
+	STATUS_CTX_INVALID_MODEMNAME                                              NTStatus      = 0xC00A0009
+	STATUS_CTX_RESPONSE_ERROR                                                 NTStatus      = 0xC00A000A
+	STATUS_CTX_MODEM_RESPONSE_TIMEOUT                                         NTStatus      = 0xC00A000B
+	STATUS_CTX_MODEM_RESPONSE_NO_CARRIER                                      NTStatus      = 0xC00A000C
+	STATUS_CTX_MODEM_RESPONSE_NO_DIALTONE                                     NTStatus      = 0xC00A000D
+	STATUS_CTX_MODEM_RESPONSE_BUSY                                            NTStatus      = 0xC00A000E
+	STATUS_CTX_MODEM_RESPONSE_VOICE                                           NTStatus      = 0xC00A000F
+	STATUS_CTX_TD_ERROR                                                       NTStatus      = 0xC00A0010
+	STATUS_CTX_LICENSE_CLIENT_INVALID                                         NTStatus      = 0xC00A0012
+	STATUS_CTX_LICENSE_NOT_AVAILABLE                                          NTStatus      = 0xC00A0013
+	STATUS_CTX_LICENSE_EXPIRED                                                NTStatus      = 0xC00A0014
+	STATUS_CTX_WINSTATION_NOT_FOUND                                           NTStatus      = 0xC00A0015
+	STATUS_CTX_WINSTATION_NAME_COLLISION                                      NTStatus      = 0xC00A0016
+	STATUS_CTX_WINSTATION_BUSY                                                NTStatus      = 0xC00A0017
+	STATUS_CTX_BAD_VIDEO_MODE                                                 NTStatus      = 0xC00A0018
+	STATUS_CTX_GRAPHICS_INVALID                                               NTStatus      = 0xC00A0022
+	STATUS_CTX_NOT_CONSOLE                                                    NTStatus      = 0xC00A0024
+	STATUS_CTX_CLIENT_QUERY_TIMEOUT                                           NTStatus      = 0xC00A0026
+	STATUS_CTX_CONSOLE_DISCONNECT                                             NTStatus      = 0xC00A0027
+	STATUS_CTX_CONSOLE_CONNECT                                                NTStatus      = 0xC00A0028
+	STATUS_CTX_SHADOW_DENIED                                                  NTStatus      = 0xC00A002A
+	STATUS_CTX_WINSTATION_ACCESS_DENIED                                       NTStatus      = 0xC00A002B
+	STATUS_CTX_INVALID_WD                                                     NTStatus      = 0xC00A002E
+	STATUS_CTX_WD_NOT_FOUND                                                   NTStatus      = 0xC00A002F
+	STATUS_CTX_SHADOW_INVALID                                                 NTStatus      = 0xC00A0030
+	STATUS_CTX_SHADOW_DISABLED                                                NTStatus      = 0xC00A0031
+	STATUS_RDP_PROTOCOL_ERROR                                                 NTStatus      = 0xC00A0032
+	STATUS_CTX_CLIENT_LICENSE_NOT_SET                                         NTStatus      = 0xC00A0033
+	STATUS_CTX_CLIENT_LICENSE_IN_USE                                          NTStatus      = 0xC00A0034
+	STATUS_CTX_SHADOW_ENDED_BY_MODE_CHANGE                                    NTStatus      = 0xC00A0035
+	STATUS_CTX_SHADOW_NOT_RUNNING                                             NTStatus      = 0xC00A0036
+	STATUS_CTX_LOGON_DISABLED                                                 NTStatus      = 0xC00A0037
+	STATUS_CTX_SECURITY_LAYER_ERROR                                           NTStatus      = 0xC00A0038
+	STATUS_TS_INCOMPATIBLE_SESSIONS                                           NTStatus      = 0xC00A0039
+	STATUS_TS_VIDEO_SUBSYSTEM_ERROR                                           NTStatus      = 0xC00A003A
+	STATUS_PNP_BAD_MPS_TABLE                                                  NTStatus      = 0xC0040035
+	STATUS_PNP_TRANSLATION_FAILED                                             NTStatus      = 0xC0040036
+	STATUS_PNP_IRQ_TRANSLATION_FAILED                                         NTStatus      = 0xC0040037
+	STATUS_PNP_INVALID_ID                                                     NTStatus      = 0xC0040038
+	STATUS_IO_REISSUE_AS_CACHED                                               NTStatus      = 0xC0040039
+	STATUS_MUI_FILE_NOT_FOUND                                                 NTStatus      = 0xC00B0001
+	STATUS_MUI_INVALID_FILE                                                   NTStatus      = 0xC00B0002
+	STATUS_MUI_INVALID_RC_CONFIG                                              NTStatus      = 0xC00B0003
+	STATUS_MUI_INVALID_LOCALE_NAME                                            NTStatus      = 0xC00B0004
+	STATUS_MUI_INVALID_ULTIMATEFALLBACK_NAME                                  NTStatus      = 0xC00B0005
+	STATUS_MUI_FILE_NOT_LOADED                                                NTStatus      = 0xC00B0006
+	STATUS_RESOURCE_ENUM_USER_STOP                                            NTStatus      = 0xC00B0007
+	STATUS_FLT_NO_HANDLER_DEFINED                                             NTStatus      = 0xC01C0001
+	STATUS_FLT_CONTEXT_ALREADY_DEFINED                                        NTStatus      = 0xC01C0002
+	STATUS_FLT_INVALID_ASYNCHRONOUS_REQUEST                                   NTStatus      = 0xC01C0003
+	STATUS_FLT_DISALLOW_FAST_IO                                               NTStatus      = 0xC01C0004
+	STATUS_FLT_INVALID_NAME_REQUEST                                           NTStatus      = 0xC01C0005
+	STATUS_FLT_NOT_SAFE_TO_POST_OPERATION                                     NTStatus      = 0xC01C0006
+	STATUS_FLT_NOT_INITIALIZED                                                NTStatus      = 0xC01C0007
+	STATUS_FLT_FILTER_NOT_READY                                               NTStatus      = 0xC01C0008
+	STATUS_FLT_POST_OPERATION_CLEANUP                                         NTStatus      = 0xC01C0009
+	STATUS_FLT_INTERNAL_ERROR                                                 NTStatus      = 0xC01C000A
+	STATUS_FLT_DELETING_OBJECT                                                NTStatus      = 0xC01C000B
+	STATUS_FLT_MUST_BE_NONPAGED_POOL                                          NTStatus      = 0xC01C000C
+	STATUS_FLT_DUPLICATE_ENTRY                                                NTStatus      = 0xC01C000D
+	STATUS_FLT_CBDQ_DISABLED                                                  NTStatus      = 0xC01C000E
+	STATUS_FLT_DO_NOT_ATTACH                                                  NTStatus      = 0xC01C000F
+	STATUS_FLT_DO_NOT_DETACH                                                  NTStatus      = 0xC01C0010
+	STATUS_FLT_INSTANCE_ALTITUDE_COLLISION                                    NTStatus      = 0xC01C0011
+	STATUS_FLT_INSTANCE_NAME_COLLISION                                        NTStatus      = 0xC01C0012
+	STATUS_FLT_FILTER_NOT_FOUND                                               NTStatus      = 0xC01C0013
+	STATUS_FLT_VOLUME_NOT_FOUND                                               NTStatus      = 0xC01C0014
+	STATUS_FLT_INSTANCE_NOT_FOUND                                             NTStatus      = 0xC01C0015
+	STATUS_FLT_CONTEXT_ALLOCATION_NOT_FOUND                                   NTStatus      = 0xC01C0016
+	STATUS_FLT_INVALID_CONTEXT_REGISTRATION                                   NTStatus      = 0xC01C0017
+	STATUS_FLT_NAME_CACHE_MISS                                                NTStatus      = 0xC01C0018
+	STATUS_FLT_NO_DEVICE_OBJECT                                               NTStatus      = 0xC01C0019
+	STATUS_FLT_VOLUME_ALREADY_MOUNTED                                         NTStatus      = 0xC01C001A
+	STATUS_FLT_ALREADY_ENLISTED                                               NTStatus      = 0xC01C001B
+	STATUS_FLT_CONTEXT_ALREADY_LINKED                                         NTStatus      = 0xC01C001C
+	STATUS_FLT_NO_WAITER_FOR_REPLY                                            NTStatus      = 0xC01C0020
+	STATUS_FLT_REGISTRATION_BUSY                                              NTStatus      = 0xC01C0023
+	STATUS_SXS_SECTION_NOT_FOUND                                              NTStatus      = 0xC0150001
+	STATUS_SXS_CANT_GEN_ACTCTX                                                NTStatus      = 0xC0150002
+	STATUS_SXS_INVALID_ACTCTXDATA_FORMAT                                      NTStatus      = 0xC0150003
+	STATUS_SXS_ASSEMBLY_NOT_FOUND                                             NTStatus      = 0xC0150004
+	STATUS_SXS_MANIFEST_FORMAT_ERROR                                          NTStatus      = 0xC0150005
+	STATUS_SXS_MANIFEST_PARSE_ERROR                                           NTStatus      = 0xC0150006
+	STATUS_SXS_ACTIVATION_CONTEXT_DISABLED                                    NTStatus      = 0xC0150007
+	STATUS_SXS_KEY_NOT_FOUND                                                  NTStatus      = 0xC0150008
+	STATUS_SXS_VERSION_CONFLICT                                               NTStatus      = 0xC0150009
+	STATUS_SXS_WRONG_SECTION_TYPE                                             NTStatus      = 0xC015000A
+	STATUS_SXS_THREAD_QUERIES_DISABLED                                        NTStatus      = 0xC015000B
+	STATUS_SXS_ASSEMBLY_MISSING                                               NTStatus      = 0xC015000C
+	STATUS_SXS_RELEASE_ACTIVATION_CONTEXT                                     NTStatus      = 0x4015000D
+	STATUS_SXS_PROCESS_DEFAULT_ALREADY_SET                                    NTStatus      = 0xC015000E
+	STATUS_SXS_EARLY_DEACTIVATION                                             NTStatus      = 0xC015000F
+	STATUS_SXS_INVALID_DEACTIVATION                                           NTStatus      = 0xC0150010
+	STATUS_SXS_MULTIPLE_DEACTIVATION                                          NTStatus      = 0xC0150011
+	STATUS_SXS_SYSTEM_DEFAULT_ACTIVATION_CONTEXT_EMPTY                        NTStatus      = 0xC0150012
+	STATUS_SXS_PROCESS_TERMINATION_REQUESTED                                  NTStatus      = 0xC0150013
+	STATUS_SXS_CORRUPT_ACTIVATION_STACK                                       NTStatus      = 0xC0150014
+	STATUS_SXS_CORRUPTION                                                     NTStatus      = 0xC0150015
+	STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_VALUE                               NTStatus      = 0xC0150016
+	STATUS_SXS_INVALID_IDENTITY_ATTRIBUTE_NAME                                NTStatus      = 0xC0150017
+	STATUS_SXS_IDENTITY_DUPLICATE_ATTRIBUTE                                   NTStatus      = 0xC0150018
+	STATUS_SXS_IDENTITY_PARSE_ERROR                                           NTStatus      = 0xC0150019
+	STATUS_SXS_COMPONENT_STORE_CORRUPT                                        NTStatus      = 0xC015001A
+	STATUS_SXS_FILE_HASH_MISMATCH                                             NTStatus      = 0xC015001B
+	STATUS_SXS_MANIFEST_IDENTITY_SAME_BUT_CONTENTS_DIFFERENT                  NTStatus      = 0xC015001C
+	STATUS_SXS_IDENTITIES_DIFFERENT                                           NTStatus      = 0xC015001D
+	STATUS_SXS_ASSEMBLY_IS_NOT_A_DEPLOYMENT                                   NTStatus      = 0xC015001E
+	STATUS_SXS_FILE_NOT_PART_OF_ASSEMBLY                                      NTStatus      = 0xC015001F
+	STATUS_ADVANCED_INSTALLER_FAILED                                          NTStatus      = 0xC0150020
+	STATUS_XML_ENCODING_MISMATCH                                              NTStatus      = 0xC0150021
+	STATUS_SXS_MANIFEST_TOO_BIG                                               NTStatus      = 0xC0150022
+	STATUS_SXS_SETTING_NOT_REGISTERED                                         NTStatus      = 0xC0150023
+	STATUS_SXS_TRANSACTION_CLOSURE_INCOMPLETE                                 NTStatus      = 0xC0150024
+	STATUS_SMI_PRIMITIVE_INSTALLER_FAILED                                     NTStatus      = 0xC0150025
+	STATUS_GENERIC_COMMAND_FAILED                                             NTStatus      = 0xC0150026
+	STATUS_SXS_FILE_HASH_MISSING                                              NTStatus      = 0xC0150027
+	STATUS_CLUSTER_INVALID_NODE                                               NTStatus      = 0xC0130001
+	STATUS_CLUSTER_NODE_EXISTS                                                NTStatus      = 0xC0130002
+	STATUS_CLUSTER_JOIN_IN_PROGRESS                                           NTStatus      = 0xC0130003
+	STATUS_CLUSTER_NODE_NOT_FOUND                                             NTStatus      = 0xC0130004
+	STATUS_CLUSTER_LOCAL_NODE_NOT_FOUND                                       NTStatus      = 0xC0130005
+	STATUS_CLUSTER_NETWORK_EXISTS                                             NTStatus      = 0xC0130006
+	STATUS_CLUSTER_NETWORK_NOT_FOUND                                          NTStatus      = 0xC0130007
+	STATUS_CLUSTER_NETINTERFACE_EXISTS                                        NTStatus      = 0xC0130008
+	STATUS_CLUSTER_NETINTERFACE_NOT_FOUND                                     NTStatus      = 0xC0130009
+	STATUS_CLUSTER_INVALID_REQUEST                                            NTStatus      = 0xC013000A
+	STATUS_CLUSTER_INVALID_NETWORK_PROVIDER                                   NTStatus      = 0xC013000B
+	STATUS_CLUSTER_NODE_DOWN                                                  NTStatus      = 0xC013000C
+	STATUS_CLUSTER_NODE_UNREACHABLE                                           NTStatus      = 0xC013000D
+	STATUS_CLUSTER_NODE_NOT_MEMBER                                            NTStatus      = 0xC013000E
+	STATUS_CLUSTER_JOIN_NOT_IN_PROGRESS                                       NTStatus      = 0xC013000F
+	STATUS_CLUSTER_INVALID_NETWORK                                            NTStatus      = 0xC0130010
+	STATUS_CLUSTER_NO_NET_ADAPTERS                                            NTStatus      = 0xC0130011
+	STATUS_CLUSTER_NODE_UP                                                    NTStatus      = 0xC0130012
+	STATUS_CLUSTER_NODE_PAUSED                                                NTStatus      = 0xC0130013
+	STATUS_CLUSTER_NODE_NOT_PAUSED                                            NTStatus      = 0xC0130014
+	STATUS_CLUSTER_NO_SECURITY_CONTEXT                                        NTStatus      = 0xC0130015
+	STATUS_CLUSTER_NETWORK_NOT_INTERNAL                                       NTStatus      = 0xC0130016
+	STATUS_CLUSTER_POISONED                                                   NTStatus      = 0xC0130017
+	STATUS_CLUSTER_NON_CSV_PATH                                               NTStatus      = 0xC0130018
+	STATUS_CLUSTER_CSV_VOLUME_NOT_LOCAL                                       NTStatus      = 0xC0130019
+	STATUS_CLUSTER_CSV_READ_OPLOCK_BREAK_IN_PROGRESS                          NTStatus      = 0xC0130020
+	STATUS_CLUSTER_CSV_AUTO_PAUSE_ERROR                                       NTStatus      = 0xC0130021
+	STATUS_CLUSTER_CSV_REDIRECTED                                             NTStatus      = 0xC0130022
+	STATUS_CLUSTER_CSV_NOT_REDIRECTED                                         NTStatus      = 0xC0130023
+	STATUS_CLUSTER_CSV_VOLUME_DRAINING                                        NTStatus      = 0xC0130024
+	STATUS_CLUSTER_CSV_SNAPSHOT_CREATION_IN_PROGRESS                          NTStatus      = 0xC0130025
+	STATUS_CLUSTER_CSV_VOLUME_DRAINING_SUCCEEDED_DOWNLEVEL                    NTStatus      = 0xC0130026
+	STATUS_CLUSTER_CSV_NO_SNAPSHOTS                                           NTStatus      = 0xC0130027
+	STATUS_CSV_IO_PAUSE_TIMEOUT                                               NTStatus      = 0xC0130028
+	STATUS_CLUSTER_CSV_INVALID_HANDLE                                         NTStatus      = 0xC0130029
+	STATUS_CLUSTER_CSV_SUPPORTED_ONLY_ON_COORDINATOR                          NTStatus      = 0xC0130030
+	STATUS_CLUSTER_CAM_TICKET_REPLAY_DETECTED                                 NTStatus      = 0xC0130031
+	STATUS_TRANSACTIONAL_CONFLICT                                             NTStatus      = 0xC0190001
+	STATUS_INVALID_TRANSACTION                                                NTStatus      = 0xC0190002
+	STATUS_TRANSACTION_NOT_ACTIVE                                             NTStatus      = 0xC0190003
+	STATUS_TM_INITIALIZATION_FAILED                                           NTStatus      = 0xC0190004
+	STATUS_RM_NOT_ACTIVE                                                      NTStatus      = 0xC0190005
+	STATUS_RM_METADATA_CORRUPT                                                NTStatus      = 0xC0190006
+	STATUS_TRANSACTION_NOT_JOINED                                             NTStatus      = 0xC0190007
+	STATUS_DIRECTORY_NOT_RM                                                   NTStatus      = 0xC0190008
+	STATUS_COULD_NOT_RESIZE_LOG                                               NTStatus      = 0x80190009
+	STATUS_TRANSACTIONS_UNSUPPORTED_REMOTE                                    NTStatus      = 0xC019000A
+	STATUS_LOG_RESIZE_INVALID_SIZE                                            NTStatus      = 0xC019000B
+	STATUS_REMOTE_FILE_VERSION_MISMATCH                                       NTStatus      = 0xC019000C
+	STATUS_CRM_PROTOCOL_ALREADY_EXISTS                                        NTStatus      = 0xC019000F
+	STATUS_TRANSACTION_PROPAGATION_FAILED                                     NTStatus      = 0xC0190010
+	STATUS_CRM_PROTOCOL_NOT_FOUND                                             NTStatus      = 0xC0190011
+	STATUS_TRANSACTION_SUPERIOR_EXISTS                                        NTStatus      = 0xC0190012
+	STATUS_TRANSACTION_REQUEST_NOT_VALID                                      NTStatus      = 0xC0190013
+	STATUS_TRANSACTION_NOT_REQUESTED                                          NTStatus      = 0xC0190014
+	STATUS_TRANSACTION_ALREADY_ABORTED                                        NTStatus      = 0xC0190015
+	STATUS_TRANSACTION_ALREADY_COMMITTED                                      NTStatus      = 0xC0190016
+	STATUS_TRANSACTION_INVALID_MARSHALL_BUFFER                                NTStatus      = 0xC0190017
+	STATUS_CURRENT_TRANSACTION_NOT_VALID                                      NTStatus      = 0xC0190018
+	STATUS_LOG_GROWTH_FAILED                                                  NTStatus      = 0xC0190019
+	STATUS_OBJECT_NO_LONGER_EXISTS                                            NTStatus      = 0xC0190021
+	STATUS_STREAM_MINIVERSION_NOT_FOUND                                       NTStatus      = 0xC0190022
+	STATUS_STREAM_MINIVERSION_NOT_VALID                                       NTStatus      = 0xC0190023
+	STATUS_MINIVERSION_INACCESSIBLE_FROM_SPECIFIED_TRANSACTION                NTStatus      = 0xC0190024
+	STATUS_CANT_OPEN_MINIVERSION_WITH_MODIFY_INTENT                           NTStatus      = 0xC0190025
+	STATUS_CANT_CREATE_MORE_STREAM_MINIVERSIONS                               NTStatus      = 0xC0190026
+	STATUS_HANDLE_NO_LONGER_VALID                                             NTStatus      = 0xC0190028
+	STATUS_NO_TXF_METADATA                                                    NTStatus      = 0x80190029
+	STATUS_LOG_CORRUPTION_DETECTED                                            NTStatus      = 0xC0190030
+	STATUS_CANT_RECOVER_WITH_HANDLE_OPEN                                      NTStatus      = 0x80190031
+	STATUS_RM_DISCONNECTED                                                    NTStatus      = 0xC0190032
+	STATUS_ENLISTMENT_NOT_SUPERIOR                                            NTStatus      = 0xC0190033
+	STATUS_RECOVERY_NOT_NEEDED                                                NTStatus      = 0x40190034
+	STATUS_RM_ALREADY_STARTED                                                 NTStatus      = 0x40190035
+	STATUS_FILE_IDENTITY_NOT_PERSISTENT                                       NTStatus      = 0xC0190036
+	STATUS_CANT_BREAK_TRANSACTIONAL_DEPENDENCY                                NTStatus      = 0xC0190037
+	STATUS_CANT_CROSS_RM_BOUNDARY                                             NTStatus      = 0xC0190038
+	STATUS_TXF_DIR_NOT_EMPTY                                                  NTStatus      = 0xC0190039
+	STATUS_INDOUBT_TRANSACTIONS_EXIST                                         NTStatus      = 0xC019003A
+	STATUS_TM_VOLATILE                                                        NTStatus      = 0xC019003B
+	STATUS_ROLLBACK_TIMER_EXPIRED                                             NTStatus      = 0xC019003C
+	STATUS_TXF_ATTRIBUTE_CORRUPT                                              NTStatus      = 0xC019003D
+	STATUS_EFS_NOT_ALLOWED_IN_TRANSACTION                                     NTStatus      = 0xC019003E
+	STATUS_TRANSACTIONAL_OPEN_NOT_ALLOWED                                     NTStatus      = 0xC019003F
+	STATUS_TRANSACTED_MAPPING_UNSUPPORTED_REMOTE                              NTStatus      = 0xC0190040
+	STATUS_TXF_METADATA_ALREADY_PRESENT                                       NTStatus      = 0x80190041
+	STATUS_TRANSACTION_SCOPE_CALLBACKS_NOT_SET                                NTStatus      = 0x80190042
+	STATUS_TRANSACTION_REQUIRED_PROMOTION                                     NTStatus      = 0xC0190043
+	STATUS_CANNOT_EXECUTE_FILE_IN_TRANSACTION                                 NTStatus      = 0xC0190044
+	STATUS_TRANSACTIONS_NOT_FROZEN                                            NTStatus      = 0xC0190045
+	STATUS_TRANSACTION_FREEZE_IN_PROGRESS                                     NTStatus      = 0xC0190046
+	STATUS_NOT_SNAPSHOT_VOLUME                                                NTStatus      = 0xC0190047
+	STATUS_NO_SAVEPOINT_WITH_OPEN_FILES                                       NTStatus      = 0xC0190048
+	STATUS_SPARSE_NOT_ALLOWED_IN_TRANSACTION                                  NTStatus      = 0xC0190049
+	STATUS_TM_IDENTITY_MISMATCH                                               NTStatus      = 0xC019004A
+	STATUS_FLOATED_SECTION                                                    NTStatus      = 0xC019004B
+	STATUS_CANNOT_ACCEPT_TRANSACTED_WORK                                      NTStatus      = 0xC019004C
+	STATUS_CANNOT_ABORT_TRANSACTIONS                                          NTStatus      = 0xC019004D
+	STATUS_TRANSACTION_NOT_FOUND                                              NTStatus      = 0xC019004E
+	STATUS_RESOURCEMANAGER_NOT_FOUND                                          NTStatus      = 0xC019004F
+	STATUS_ENLISTMENT_NOT_FOUND                                               NTStatus      = 0xC0190050
+	STATUS_TRANSACTIONMANAGER_NOT_FOUND                                       NTStatus      = 0xC0190051
+	STATUS_TRANSACTIONMANAGER_NOT_ONLINE                                      NTStatus      = 0xC0190052
+	STATUS_TRANSACTIONMANAGER_RECOVERY_NAME_COLLISION                         NTStatus      = 0xC0190053
+	STATUS_TRANSACTION_NOT_ROOT                                               NTStatus      = 0xC0190054
+	STATUS_TRANSACTION_OBJECT_EXPIRED                                         NTStatus      = 0xC0190055
+	STATUS_COMPRESSION_NOT_ALLOWED_IN_TRANSACTION                             NTStatus      = 0xC0190056
+	STATUS_TRANSACTION_RESPONSE_NOT_ENLISTED                                  NTStatus      = 0xC0190057
+	STATUS_TRANSACTION_RECORD_TOO_LONG                                        NTStatus      = 0xC0190058
+	STATUS_NO_LINK_TRACKING_IN_TRANSACTION                                    NTStatus      = 0xC0190059
+	STATUS_OPERATION_NOT_SUPPORTED_IN_TRANSACTION                             NTStatus      = 0xC019005A
+	STATUS_TRANSACTION_INTEGRITY_VIOLATED                                     NTStatus      = 0xC019005B
+	STATUS_TRANSACTIONMANAGER_IDENTITY_MISMATCH                               NTStatus      = 0xC019005C
+	STATUS_RM_CANNOT_BE_FROZEN_FOR_SNAPSHOT                                   NTStatus      = 0xC019005D
+	STATUS_TRANSACTION_MUST_WRITETHROUGH                                      NTStatus      = 0xC019005E
+	STATUS_TRANSACTION_NO_SUPERIOR                                            NTStatus      = 0xC019005F
+	STATUS_EXPIRED_HANDLE                                                     NTStatus      = 0xC0190060
+	STATUS_TRANSACTION_NOT_ENLISTED                                           NTStatus      = 0xC0190061
+	STATUS_LOG_SECTOR_INVALID                                                 NTStatus      = 0xC01A0001
+	STATUS_LOG_SECTOR_PARITY_INVALID                                          NTStatus      = 0xC01A0002
+	STATUS_LOG_SECTOR_REMAPPED                                                NTStatus      = 0xC01A0003
+	STATUS_LOG_BLOCK_INCOMPLETE                                               NTStatus      = 0xC01A0004
+	STATUS_LOG_INVALID_RANGE                                                  NTStatus      = 0xC01A0005
+	STATUS_LOG_BLOCKS_EXHAUSTED                                               NTStatus      = 0xC01A0006
+	STATUS_LOG_READ_CONTEXT_INVALID                                           NTStatus      = 0xC01A0007
+	STATUS_LOG_RESTART_INVALID                                                NTStatus      = 0xC01A0008
+	STATUS_LOG_BLOCK_VERSION                                                  NTStatus      = 0xC01A0009
+	STATUS_LOG_BLOCK_INVALID                                                  NTStatus      = 0xC01A000A
+	STATUS_LOG_READ_MODE_INVALID                                              NTStatus      = 0xC01A000B
+	STATUS_LOG_NO_RESTART                                                     NTStatus      = 0x401A000C
+	STATUS_LOG_METADATA_CORRUPT                                               NTStatus      = 0xC01A000D
+	STATUS_LOG_METADATA_INVALID                                               NTStatus      = 0xC01A000E
+	STATUS_LOG_METADATA_INCONSISTENT                                          NTStatus      = 0xC01A000F
+	STATUS_LOG_RESERVATION_INVALID                                            NTStatus      = 0xC01A0010
+	STATUS_LOG_CANT_DELETE                                                    NTStatus      = 0xC01A0011
+	STATUS_LOG_CONTAINER_LIMIT_EXCEEDED                                       NTStatus      = 0xC01A0012
+	STATUS_LOG_START_OF_LOG                                                   NTStatus      = 0xC01A0013
+	STATUS_LOG_POLICY_ALREADY_INSTALLED                                       NTStatus      = 0xC01A0014
+	STATUS_LOG_POLICY_NOT_INSTALLED                                           NTStatus      = 0xC01A0015
+	STATUS_LOG_POLICY_INVALID                                                 NTStatus      = 0xC01A0016
+	STATUS_LOG_POLICY_CONFLICT                                                NTStatus      = 0xC01A0017
+	STATUS_LOG_PINNED_ARCHIVE_TAIL                                            NTStatus      = 0xC01A0018
+	STATUS_LOG_RECORD_NONEXISTENT                                             NTStatus      = 0xC01A0019
+	STATUS_LOG_RECORDS_RESERVED_INVALID                                       NTStatus      = 0xC01A001A
+	STATUS_LOG_SPACE_RESERVED_INVALID                                         NTStatus      = 0xC01A001B
+	STATUS_LOG_TAIL_INVALID                                                   NTStatus      = 0xC01A001C
+	STATUS_LOG_FULL                                                           NTStatus      = 0xC01A001D
+	STATUS_LOG_MULTIPLEXED                                                    NTStatus      = 0xC01A001E
+	STATUS_LOG_DEDICATED                                                      NTStatus      = 0xC01A001F
+	STATUS_LOG_ARCHIVE_NOT_IN_PROGRESS                                        NTStatus      = 0xC01A0020
+	STATUS_LOG_ARCHIVE_IN_PROGRESS                                            NTStatus      = 0xC01A0021
+	STATUS_LOG_EPHEMERAL                                                      NTStatus      = 0xC01A0022
+	STATUS_LOG_NOT_ENOUGH_CONTAINERS                                          NTStatus      = 0xC01A0023
+	STATUS_LOG_CLIENT_ALREADY_REGISTERED                                      NTStatus      = 0xC01A0024
+	STATUS_LOG_CLIENT_NOT_REGISTERED                                          NTStatus      = 0xC01A0025
+	STATUS_LOG_FULL_HANDLER_IN_PROGRESS                                       NTStatus      = 0xC01A0026
+	STATUS_LOG_CONTAINER_READ_FAILED                                          NTStatus      = 0xC01A0027
+	STATUS_LOG_CONTAINER_WRITE_FAILED                                         NTStatus      = 0xC01A0028
+	STATUS_LOG_CONTAINER_OPEN_FAILED                                          NTStatus      = 0xC01A0029
+	STATUS_LOG_CONTAINER_STATE_INVALID                                        NTStatus      = 0xC01A002A
+	STATUS_LOG_STATE_INVALID                                                  NTStatus      = 0xC01A002B
+	STATUS_LOG_PINNED                                                         NTStatus      = 0xC01A002C
+	STATUS_LOG_METADATA_FLUSH_FAILED                                          NTStatus      = 0xC01A002D
+	STATUS_LOG_INCONSISTENT_SECURITY                                          NTStatus      = 0xC01A002E
+	STATUS_LOG_APPENDED_FLUSH_FAILED                                          NTStatus      = 0xC01A002F
+	STATUS_LOG_PINNED_RESERVATION                                             NTStatus      = 0xC01A0030
+	STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD                                   NTStatus      = 0xC01B00EA
+	STATUS_VIDEO_HUNG_DISPLAY_DRIVER_THREAD_RECOVERED                         NTStatus      = 0x801B00EB
+	STATUS_VIDEO_DRIVER_DEBUG_REPORT_REQUEST                                  NTStatus      = 0x401B00EC
+	STATUS_MONITOR_NO_DESCRIPTOR                                              NTStatus      = 0xC01D0001
+	STATUS_MONITOR_UNKNOWN_DESCRIPTOR_FORMAT                                  NTStatus      = 0xC01D0002
+	STATUS_MONITOR_INVALID_DESCRIPTOR_CHECKSUM                                NTStatus      = 0xC01D0003
+	STATUS_MONITOR_INVALID_STANDARD_TIMING_BLOCK                              NTStatus      = 0xC01D0004
+	STATUS_MONITOR_WMI_DATABLOCK_REGISTRATION_FAILED                          NTStatus      = 0xC01D0005
+	STATUS_MONITOR_INVALID_SERIAL_NUMBER_MONDSC_BLOCK                         NTStatus      = 0xC01D0006
+	STATUS_MONITOR_INVALID_USER_FRIENDLY_MONDSC_BLOCK                         NTStatus      = 0xC01D0007
+	STATUS_MONITOR_NO_MORE_DESCRIPTOR_DATA                                    NTStatus      = 0xC01D0008
+	STATUS_MONITOR_INVALID_DETAILED_TIMING_BLOCK                              NTStatus      = 0xC01D0009
+	STATUS_MONITOR_INVALID_MANUFACTURE_DATE                                   NTStatus      = 0xC01D000A
+	STATUS_GRAPHICS_NOT_EXCLUSIVE_MODE_OWNER                                  NTStatus      = 0xC01E0000
+	STATUS_GRAPHICS_INSUFFICIENT_DMA_BUFFER                                   NTStatus      = 0xC01E0001
+	STATUS_GRAPHICS_INVALID_DISPLAY_ADAPTER                                   NTStatus      = 0xC01E0002
+	STATUS_GRAPHICS_ADAPTER_WAS_RESET                                         NTStatus      = 0xC01E0003
+	STATUS_GRAPHICS_INVALID_DRIVER_MODEL                                      NTStatus      = 0xC01E0004
+	STATUS_GRAPHICS_PRESENT_MODE_CHANGED                                      NTStatus      = 0xC01E0005
+	STATUS_GRAPHICS_PRESENT_OCCLUDED                                          NTStatus      = 0xC01E0006
+	STATUS_GRAPHICS_PRESENT_DENIED                                            NTStatus      = 0xC01E0007
+	STATUS_GRAPHICS_CANNOTCOLORCONVERT                                        NTStatus      = 0xC01E0008
+	STATUS_GRAPHICS_DRIVER_MISMATCH                                           NTStatus      = 0xC01E0009
+	STATUS_GRAPHICS_PARTIAL_DATA_POPULATED                                    NTStatus      = 0x401E000A
+	STATUS_GRAPHICS_PRESENT_REDIRECTION_DISABLED                              NTStatus      = 0xC01E000B
+	STATUS_GRAPHICS_PRESENT_UNOCCLUDED                                        NTStatus      = 0xC01E000C
+	STATUS_GRAPHICS_WINDOWDC_NOT_AVAILABLE                                    NTStatus      = 0xC01E000D
+	STATUS_GRAPHICS_WINDOWLESS_PRESENT_DISABLED                               NTStatus      = 0xC01E000E
+	STATUS_GRAPHICS_PRESENT_INVALID_WINDOW                                    NTStatus      = 0xC01E000F
+	STATUS_GRAPHICS_PRESENT_BUFFER_NOT_BOUND                                  NTStatus      = 0xC01E0010
+	STATUS_GRAPHICS_VAIL_STATE_CHANGED                                        NTStatus      = 0xC01E0011
+	STATUS_GRAPHICS_INDIRECT_DISPLAY_ABANDON_SWAPCHAIN                        NTStatus      = 0xC01E0012
+	STATUS_GRAPHICS_INDIRECT_DISPLAY_DEVICE_STOPPED                           NTStatus      = 0xC01E0013
+	STATUS_GRAPHICS_NO_VIDEO_MEMORY                                           NTStatus      = 0xC01E0100
+	STATUS_GRAPHICS_CANT_LOCK_MEMORY                                          NTStatus      = 0xC01E0101
+	STATUS_GRAPHICS_ALLOCATION_BUSY                                           NTStatus      = 0xC01E0102
+	STATUS_GRAPHICS_TOO_MANY_REFERENCES                                       NTStatus      = 0xC01E0103
+	STATUS_GRAPHICS_TRY_AGAIN_LATER                                           NTStatus      = 0xC01E0104
+	STATUS_GRAPHICS_TRY_AGAIN_NOW                                             NTStatus      = 0xC01E0105
+	STATUS_GRAPHICS_ALLOCATION_INVALID                                        NTStatus      = 0xC01E0106
+	STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNAVAILABLE                          NTStatus      = 0xC01E0107
+	STATUS_GRAPHICS_UNSWIZZLING_APERTURE_UNSUPPORTED                          NTStatus      = 0xC01E0108
+	STATUS_GRAPHICS_CANT_EVICT_PINNED_ALLOCATION                              NTStatus      = 0xC01E0109
+	STATUS_GRAPHICS_INVALID_ALLOCATION_USAGE                                  NTStatus      = 0xC01E0110
+	STATUS_GRAPHICS_CANT_RENDER_LOCKED_ALLOCATION                             NTStatus      = 0xC01E0111
+	STATUS_GRAPHICS_ALLOCATION_CLOSED                                         NTStatus      = 0xC01E0112
+	STATUS_GRAPHICS_INVALID_ALLOCATION_INSTANCE                               NTStatus      = 0xC01E0113
+	STATUS_GRAPHICS_INVALID_ALLOCATION_HANDLE                                 NTStatus      = 0xC01E0114
+	STATUS_GRAPHICS_WRONG_ALLOCATION_DEVICE                                   NTStatus      = 0xC01E0115
+	STATUS_GRAPHICS_ALLOCATION_CONTENT_LOST                                   NTStatus      = 0xC01E0116
+	STATUS_GRAPHICS_GPU_EXCEPTION_ON_DEVICE                                   NTStatus      = 0xC01E0200
+	STATUS_GRAPHICS_SKIP_ALLOCATION_PREPARATION                               NTStatus      = 0x401E0201
+	STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY                                    NTStatus      = 0xC01E0300
+	STATUS_GRAPHICS_VIDPN_TOPOLOGY_NOT_SUPPORTED                              NTStatus      = 0xC01E0301
+	STATUS_GRAPHICS_VIDPN_TOPOLOGY_CURRENTLY_NOT_SUPPORTED                    NTStatus      = 0xC01E0302
+	STATUS_GRAPHICS_INVALID_VIDPN                                             NTStatus      = 0xC01E0303
+	STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE                              NTStatus      = 0xC01E0304
+	STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET                              NTStatus      = 0xC01E0305
+	STATUS_GRAPHICS_VIDPN_MODALITY_NOT_SUPPORTED                              NTStatus      = 0xC01E0306
+	STATUS_GRAPHICS_MODE_NOT_PINNED                                           NTStatus      = 0x401E0307
+	STATUS_GRAPHICS_INVALID_VIDPN_SOURCEMODESET                               NTStatus      = 0xC01E0308
+	STATUS_GRAPHICS_INVALID_VIDPN_TARGETMODESET                               NTStatus      = 0xC01E0309
+	STATUS_GRAPHICS_INVALID_FREQUENCY                                         NTStatus      = 0xC01E030A
+	STATUS_GRAPHICS_INVALID_ACTIVE_REGION                                     NTStatus      = 0xC01E030B
+	STATUS_GRAPHICS_INVALID_TOTAL_REGION                                      NTStatus      = 0xC01E030C
+	STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_SOURCE_MODE                         NTStatus      = 0xC01E0310
+	STATUS_GRAPHICS_INVALID_VIDEO_PRESENT_TARGET_MODE                         NTStatus      = 0xC01E0311
+	STATUS_GRAPHICS_PINNED_MODE_MUST_REMAIN_IN_SET                            NTStatus      = 0xC01E0312
+	STATUS_GRAPHICS_PATH_ALREADY_IN_TOPOLOGY                                  NTStatus      = 0xC01E0313
+	STATUS_GRAPHICS_MODE_ALREADY_IN_MODESET                                   NTStatus      = 0xC01E0314
+	STATUS_GRAPHICS_INVALID_VIDEOPRESENTSOURCESET                             NTStatus      = 0xC01E0315
+	STATUS_GRAPHICS_INVALID_VIDEOPRESENTTARGETSET                             NTStatus      = 0xC01E0316
+	STATUS_GRAPHICS_SOURCE_ALREADY_IN_SET                                     NTStatus      = 0xC01E0317
+	STATUS_GRAPHICS_TARGET_ALREADY_IN_SET                                     NTStatus      = 0xC01E0318
+	STATUS_GRAPHICS_INVALID_VIDPN_PRESENT_PATH                                NTStatus      = 0xC01E0319
+	STATUS_GRAPHICS_NO_RECOMMENDED_VIDPN_TOPOLOGY                             NTStatus      = 0xC01E031A
+	STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGESET                         NTStatus      = 0xC01E031B
+	STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE                            NTStatus      = 0xC01E031C
+	STATUS_GRAPHICS_FREQUENCYRANGE_NOT_IN_SET                                 NTStatus      = 0xC01E031D
+	STATUS_GRAPHICS_NO_PREFERRED_MODE                                         NTStatus      = 0x401E031E
+	STATUS_GRAPHICS_FREQUENCYRANGE_ALREADY_IN_SET                             NTStatus      = 0xC01E031F
+	STATUS_GRAPHICS_STALE_MODESET                                             NTStatus      = 0xC01E0320
+	STATUS_GRAPHICS_INVALID_MONITOR_SOURCEMODESET                             NTStatus      = 0xC01E0321
+	STATUS_GRAPHICS_INVALID_MONITOR_SOURCE_MODE                               NTStatus      = 0xC01E0322
+	STATUS_GRAPHICS_NO_RECOMMENDED_FUNCTIONAL_VIDPN                           NTStatus      = 0xC01E0323
+	STATUS_GRAPHICS_MODE_ID_MUST_BE_UNIQUE                                    NTStatus      = 0xC01E0324
+	STATUS_GRAPHICS_EMPTY_ADAPTER_MONITOR_MODE_SUPPORT_INTERSECTION           NTStatus      = 0xC01E0325
+	STATUS_GRAPHICS_VIDEO_PRESENT_TARGETS_LESS_THAN_SOURCES                   NTStatus      = 0xC01E0326
+	STATUS_GRAPHICS_PATH_NOT_IN_TOPOLOGY                                      NTStatus      = 0xC01E0327
+	STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_SOURCE                     NTStatus      = 0xC01E0328
+	STATUS_GRAPHICS_ADAPTER_MUST_HAVE_AT_LEAST_ONE_TARGET                     NTStatus      = 0xC01E0329
+	STATUS_GRAPHICS_INVALID_MONITORDESCRIPTORSET                              NTStatus      = 0xC01E032A
+	STATUS_GRAPHICS_INVALID_MONITORDESCRIPTOR                                 NTStatus      = 0xC01E032B
+	STATUS_GRAPHICS_MONITORDESCRIPTOR_NOT_IN_SET                              NTStatus      = 0xC01E032C
+	STATUS_GRAPHICS_MONITORDESCRIPTOR_ALREADY_IN_SET                          NTStatus      = 0xC01E032D
+	STATUS_GRAPHICS_MONITORDESCRIPTOR_ID_MUST_BE_UNIQUE                       NTStatus      = 0xC01E032E
+	STATUS_GRAPHICS_INVALID_VIDPN_TARGET_SUBSET_TYPE                          NTStatus      = 0xC01E032F
+	STATUS_GRAPHICS_RESOURCES_NOT_RELATED                                     NTStatus      = 0xC01E0330
+	STATUS_GRAPHICS_SOURCE_ID_MUST_BE_UNIQUE                                  NTStatus      = 0xC01E0331
+	STATUS_GRAPHICS_TARGET_ID_MUST_BE_UNIQUE                                  NTStatus      = 0xC01E0332
+	STATUS_GRAPHICS_NO_AVAILABLE_VIDPN_TARGET                                 NTStatus      = 0xC01E0333
+	STATUS_GRAPHICS_MONITOR_COULD_NOT_BE_ASSOCIATED_WITH_ADAPTER              NTStatus      = 0xC01E0334
+	STATUS_GRAPHICS_NO_VIDPNMGR                                               NTStatus      = 0xC01E0335
+	STATUS_GRAPHICS_NO_ACTIVE_VIDPN                                           NTStatus      = 0xC01E0336
+	STATUS_GRAPHICS_STALE_VIDPN_TOPOLOGY                                      NTStatus      = 0xC01E0337
+	STATUS_GRAPHICS_MONITOR_NOT_CONNECTED                                     NTStatus      = 0xC01E0338
+	STATUS_GRAPHICS_SOURCE_NOT_IN_TOPOLOGY                                    NTStatus      = 0xC01E0339
+	STATUS_GRAPHICS_INVALID_PRIMARYSURFACE_SIZE                               NTStatus      = 0xC01E033A
+	STATUS_GRAPHICS_INVALID_VISIBLEREGION_SIZE                                NTStatus      = 0xC01E033B
+	STATUS_GRAPHICS_INVALID_STRIDE                                            NTStatus      = 0xC01E033C
+	STATUS_GRAPHICS_INVALID_PIXELFORMAT                                       NTStatus      = 0xC01E033D
+	STATUS_GRAPHICS_INVALID_COLORBASIS                                        NTStatus      = 0xC01E033E
+	STATUS_GRAPHICS_INVALID_PIXELVALUEACCESSMODE                              NTStatus      = 0xC01E033F
+	STATUS_GRAPHICS_TARGET_NOT_IN_TOPOLOGY                                    NTStatus      = 0xC01E0340
+	STATUS_GRAPHICS_NO_DISPLAY_MODE_MANAGEMENT_SUPPORT                        NTStatus      = 0xC01E0341
+	STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE                                       NTStatus      = 0xC01E0342
+	STATUS_GRAPHICS_CANT_ACCESS_ACTIVE_VIDPN                                  NTStatus      = 0xC01E0343
+	STATUS_GRAPHICS_INVALID_PATH_IMPORTANCE_ORDINAL                           NTStatus      = 0xC01E0344
+	STATUS_GRAPHICS_INVALID_PATH_CONTENT_GEOMETRY_TRANSFORMATION              NTStatus      = 0xC01E0345
+	STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_SUPPORTED        NTStatus      = 0xC01E0346
+	STATUS_GRAPHICS_INVALID_GAMMA_RAMP                                        NTStatus      = 0xC01E0347
+	STATUS_GRAPHICS_GAMMA_RAMP_NOT_SUPPORTED                                  NTStatus      = 0xC01E0348
+	STATUS_GRAPHICS_MULTISAMPLING_NOT_SUPPORTED                               NTStatus      = 0xC01E0349
+	STATUS_GRAPHICS_MODE_NOT_IN_MODESET                                       NTStatus      = 0xC01E034A
+	STATUS_GRAPHICS_DATASET_IS_EMPTY                                          NTStatus      = 0x401E034B
+	STATUS_GRAPHICS_NO_MORE_ELEMENTS_IN_DATASET                               NTStatus      = 0x401E034C
+	STATUS_GRAPHICS_INVALID_VIDPN_TOPOLOGY_RECOMMENDATION_REASON              NTStatus      = 0xC01E034D
+	STATUS_GRAPHICS_INVALID_PATH_CONTENT_TYPE                                 NTStatus      = 0xC01E034E
+	STATUS_GRAPHICS_INVALID_COPYPROTECTION_TYPE                               NTStatus      = 0xC01E034F
+	STATUS_GRAPHICS_UNASSIGNED_MODESET_ALREADY_EXISTS                         NTStatus      = 0xC01E0350
+	STATUS_GRAPHICS_PATH_CONTENT_GEOMETRY_TRANSFORMATION_NOT_PINNED           NTStatus      = 0x401E0351
+	STATUS_GRAPHICS_INVALID_SCANLINE_ORDERING                                 NTStatus      = 0xC01E0352
+	STATUS_GRAPHICS_TOPOLOGY_CHANGES_NOT_ALLOWED                              NTStatus      = 0xC01E0353
+	STATUS_GRAPHICS_NO_AVAILABLE_IMPORTANCE_ORDINALS                          NTStatus      = 0xC01E0354
+	STATUS_GRAPHICS_INCOMPATIBLE_PRIVATE_FORMAT                               NTStatus      = 0xC01E0355
+	STATUS_GRAPHICS_INVALID_MODE_PRUNING_ALGORITHM                            NTStatus      = 0xC01E0356
+	STATUS_GRAPHICS_INVALID_MONITOR_CAPABILITY_ORIGIN                         NTStatus      = 0xC01E0357
+	STATUS_GRAPHICS_INVALID_MONITOR_FREQUENCYRANGE_CONSTRAINT                 NTStatus      = 0xC01E0358
+	STATUS_GRAPHICS_MAX_NUM_PATHS_REACHED                                     NTStatus      = 0xC01E0359
+	STATUS_GRAPHICS_CANCEL_VIDPN_TOPOLOGY_AUGMENTATION                        NTStatus      = 0xC01E035A
+	STATUS_GRAPHICS_INVALID_CLIENT_TYPE                                       NTStatus      = 0xC01E035B
+	STATUS_GRAPHICS_CLIENTVIDPN_NOT_SET                                       NTStatus      = 0xC01E035C
+	STATUS_GRAPHICS_SPECIFIED_CHILD_ALREADY_CONNECTED                         NTStatus      = 0xC01E0400
+	STATUS_GRAPHICS_CHILD_DESCRIPTOR_NOT_SUPPORTED                            NTStatus      = 0xC01E0401
+	STATUS_GRAPHICS_UNKNOWN_CHILD_STATUS                                      NTStatus      = 0x401E042F
+	STATUS_GRAPHICS_NOT_A_LINKED_ADAPTER                                      NTStatus      = 0xC01E0430
+	STATUS_GRAPHICS_LEADLINK_NOT_ENUMERATED                                   NTStatus      = 0xC01E0431
+	STATUS_GRAPHICS_CHAINLINKS_NOT_ENUMERATED                                 NTStatus      = 0xC01E0432
+	STATUS_GRAPHICS_ADAPTER_CHAIN_NOT_READY                                   NTStatus      = 0xC01E0433
+	STATUS_GRAPHICS_CHAINLINKS_NOT_STARTED                                    NTStatus      = 0xC01E0434
+	STATUS_GRAPHICS_CHAINLINKS_NOT_POWERED_ON                                 NTStatus      = 0xC01E0435
+	STATUS_GRAPHICS_INCONSISTENT_DEVICE_LINK_STATE                            NTStatus      = 0xC01E0436
+	STATUS_GRAPHICS_LEADLINK_START_DEFERRED                                   NTStatus      = 0x401E0437
+	STATUS_GRAPHICS_NOT_POST_DEVICE_DRIVER                                    NTStatus      = 0xC01E0438
+	STATUS_GRAPHICS_POLLING_TOO_FREQUENTLY                                    NTStatus      = 0x401E0439
+	STATUS_GRAPHICS_START_DEFERRED                                            NTStatus      = 0x401E043A
+	STATUS_GRAPHICS_ADAPTER_ACCESS_NOT_EXCLUDED                               NTStatus      = 0xC01E043B
+	STATUS_GRAPHICS_DEPENDABLE_CHILD_STATUS                                   NTStatus      = 0x401E043C
+	STATUS_GRAPHICS_OPM_NOT_SUPPORTED                                         NTStatus      = 0xC01E0500
+	STATUS_GRAPHICS_COPP_NOT_SUPPORTED                                        NTStatus      = 0xC01E0501
+	STATUS_GRAPHICS_UAB_NOT_SUPPORTED                                         NTStatus      = 0xC01E0502
+	STATUS_GRAPHICS_OPM_INVALID_ENCRYPTED_PARAMETERS                          NTStatus      = 0xC01E0503
+	STATUS_GRAPHICS_OPM_NO_PROTECTED_OUTPUTS_EXIST                            NTStatus      = 0xC01E0505
+	STATUS_GRAPHICS_OPM_INTERNAL_ERROR                                        NTStatus      = 0xC01E050B
+	STATUS_GRAPHICS_OPM_INVALID_HANDLE                                        NTStatus      = 0xC01E050C
+	STATUS_GRAPHICS_PVP_INVALID_CERTIFICATE_LENGTH                            NTStatus      = 0xC01E050E
+	STATUS_GRAPHICS_OPM_SPANNING_MODE_ENABLED                                 NTStatus      = 0xC01E050F
+	STATUS_GRAPHICS_OPM_THEATER_MODE_ENABLED                                  NTStatus      = 0xC01E0510
+	STATUS_GRAPHICS_PVP_HFS_FAILED                                            NTStatus      = 0xC01E0511
+	STATUS_GRAPHICS_OPM_INVALID_SRM                                           NTStatus      = 0xC01E0512
+	STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_HDCP                          NTStatus      = 0xC01E0513
+	STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_ACP                           NTStatus      = 0xC01E0514
+	STATUS_GRAPHICS_OPM_OUTPUT_DOES_NOT_SUPPORT_CGMSA                         NTStatus      = 0xC01E0515
+	STATUS_GRAPHICS_OPM_HDCP_SRM_NEVER_SET                                    NTStatus      = 0xC01E0516
+	STATUS_GRAPHICS_OPM_RESOLUTION_TOO_HIGH                                   NTStatus      = 0xC01E0517
+	STATUS_GRAPHICS_OPM_ALL_HDCP_HARDWARE_ALREADY_IN_USE                      NTStatus      = 0xC01E0518
+	STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_NO_LONGER_EXISTS                     NTStatus      = 0xC01E051A
+	STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_COPP_SEMANTICS         NTStatus      = 0xC01E051C
+	STATUS_GRAPHICS_OPM_INVALID_INFORMATION_REQUEST                           NTStatus      = 0xC01E051D
+	STATUS_GRAPHICS_OPM_DRIVER_INTERNAL_ERROR                                 NTStatus      = 0xC01E051E
+	STATUS_GRAPHICS_OPM_PROTECTED_OUTPUT_DOES_NOT_HAVE_OPM_SEMANTICS          NTStatus      = 0xC01E051F
+	STATUS_GRAPHICS_OPM_SIGNALING_NOT_SUPPORTED                               NTStatus      = 0xC01E0520
+	STATUS_GRAPHICS_OPM_INVALID_CONFIGURATION_REQUEST                         NTStatus      = 0xC01E0521
+	STATUS_GRAPHICS_I2C_NOT_SUPPORTED                                         NTStatus      = 0xC01E0580
+	STATUS_GRAPHICS_I2C_DEVICE_DOES_NOT_EXIST                                 NTStatus      = 0xC01E0581
+	STATUS_GRAPHICS_I2C_ERROR_TRANSMITTING_DATA                               NTStatus      = 0xC01E0582
+	STATUS_GRAPHICS_I2C_ERROR_RECEIVING_DATA                                  NTStatus      = 0xC01E0583
+	STATUS_GRAPHICS_DDCCI_VCP_NOT_SUPPORTED                                   NTStatus      = 0xC01E0584
+	STATUS_GRAPHICS_DDCCI_INVALID_DATA                                        NTStatus      = 0xC01E0585
+	STATUS_GRAPHICS_DDCCI_MONITOR_RETURNED_INVALID_TIMING_STATUS_BYTE         NTStatus      = 0xC01E0586
+	STATUS_GRAPHICS_DDCCI_INVALID_CAPABILITIES_STRING                         NTStatus      = 0xC01E0587
+	STATUS_GRAPHICS_MCA_INTERNAL_ERROR                                        NTStatus      = 0xC01E0588
+	STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_COMMAND                             NTStatus      = 0xC01E0589
+	STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_LENGTH                              NTStatus      = 0xC01E058A
+	STATUS_GRAPHICS_DDCCI_INVALID_MESSAGE_CHECKSUM                            NTStatus      = 0xC01E058B
+	STATUS_GRAPHICS_INVALID_PHYSICAL_MONITOR_HANDLE                           NTStatus      = 0xC01E058C
+	STATUS_GRAPHICS_MONITOR_NO_LONGER_EXISTS                                  NTStatus      = 0xC01E058D
+	STATUS_GRAPHICS_ONLY_CONSOLE_SESSION_SUPPORTED                            NTStatus      = 0xC01E05E0
+	STATUS_GRAPHICS_NO_DISPLAY_DEVICE_CORRESPONDS_TO_NAME                     NTStatus      = 0xC01E05E1
+	STATUS_GRAPHICS_DISPLAY_DEVICE_NOT_ATTACHED_TO_DESKTOP                    NTStatus      = 0xC01E05E2
+	STATUS_GRAPHICS_MIRRORING_DEVICES_NOT_SUPPORTED                           NTStatus      = 0xC01E05E3
+	STATUS_GRAPHICS_INVALID_POINTER                                           NTStatus      = 0xC01E05E4
+	STATUS_GRAPHICS_NO_MONITORS_CORRESPOND_TO_DISPLAY_DEVICE                  NTStatus      = 0xC01E05E5
+	STATUS_GRAPHICS_PARAMETER_ARRAY_TOO_SMALL                                 NTStatus      = 0xC01E05E6
+	STATUS_GRAPHICS_INTERNAL_ERROR                                            NTStatus      = 0xC01E05E7
+	STATUS_GRAPHICS_SESSION_TYPE_CHANGE_IN_PROGRESS                           NTStatus      = 0xC01E05E8
+	STATUS_FVE_LOCKED_VOLUME                                                  NTStatus      = 0xC0210000
+	STATUS_FVE_NOT_ENCRYPTED                                                  NTStatus      = 0xC0210001
+	STATUS_FVE_BAD_INFORMATION                                                NTStatus      = 0xC0210002
+	STATUS_FVE_TOO_SMALL                                                      NTStatus      = 0xC0210003
+	STATUS_FVE_FAILED_WRONG_FS                                                NTStatus      = 0xC0210004
+	STATUS_FVE_BAD_PARTITION_SIZE                                             NTStatus      = 0xC0210005
+	STATUS_FVE_FS_NOT_EXTENDED                                                NTStatus      = 0xC0210006
+	STATUS_FVE_FS_MOUNTED                                                     NTStatus      = 0xC0210007
+	STATUS_FVE_NO_LICENSE                                                     NTStatus      = 0xC0210008
+	STATUS_FVE_ACTION_NOT_ALLOWED                                             NTStatus      = 0xC0210009
+	STATUS_FVE_BAD_DATA                                                       NTStatus      = 0xC021000A
+	STATUS_FVE_VOLUME_NOT_BOUND                                               NTStatus      = 0xC021000B
+	STATUS_FVE_NOT_DATA_VOLUME                                                NTStatus      = 0xC021000C
+	STATUS_FVE_CONV_READ_ERROR                                                NTStatus      = 0xC021000D
+	STATUS_FVE_CONV_WRITE_ERROR                                               NTStatus      = 0xC021000E
+	STATUS_FVE_OVERLAPPED_UPDATE                                              NTStatus      = 0xC021000F
+	STATUS_FVE_FAILED_SECTOR_SIZE                                             NTStatus      = 0xC0210010
+	STATUS_FVE_FAILED_AUTHENTICATION                                          NTStatus      = 0xC0210011
+	STATUS_FVE_NOT_OS_VOLUME                                                  NTStatus      = 0xC0210012
+	STATUS_FVE_KEYFILE_NOT_FOUND                                              NTStatus      = 0xC0210013
+	STATUS_FVE_KEYFILE_INVALID                                                NTStatus      = 0xC0210014
+	STATUS_FVE_KEYFILE_NO_VMK                                                 NTStatus      = 0xC0210015
+	STATUS_FVE_TPM_DISABLED                                                   NTStatus      = 0xC0210016
+	STATUS_FVE_TPM_SRK_AUTH_NOT_ZERO                                          NTStatus      = 0xC0210017
+	STATUS_FVE_TPM_INVALID_PCR                                                NTStatus      = 0xC0210018
+	STATUS_FVE_TPM_NO_VMK                                                     NTStatus      = 0xC0210019
+	STATUS_FVE_PIN_INVALID                                                    NTStatus      = 0xC021001A
+	STATUS_FVE_AUTH_INVALID_APPLICATION                                       NTStatus      = 0xC021001B
+	STATUS_FVE_AUTH_INVALID_CONFIG                                            NTStatus      = 0xC021001C
+	STATUS_FVE_DEBUGGER_ENABLED                                               NTStatus      = 0xC021001D
+	STATUS_FVE_DRY_RUN_FAILED                                                 NTStatus      = 0xC021001E
+	STATUS_FVE_BAD_METADATA_POINTER                                           NTStatus      = 0xC021001F
+	STATUS_FVE_OLD_METADATA_COPY                                              NTStatus      = 0xC0210020
+	STATUS_FVE_REBOOT_REQUIRED                                                NTStatus      = 0xC0210021
+	STATUS_FVE_RAW_ACCESS                                                     NTStatus      = 0xC0210022
+	STATUS_FVE_RAW_BLOCKED                                                    NTStatus      = 0xC0210023
+	STATUS_FVE_NO_AUTOUNLOCK_MASTER_KEY                                       NTStatus      = 0xC0210024
+	STATUS_FVE_MOR_FAILED                                                     NTStatus      = 0xC0210025
+	STATUS_FVE_NO_FEATURE_LICENSE                                             NTStatus      = 0xC0210026
+	STATUS_FVE_POLICY_USER_DISABLE_RDV_NOT_ALLOWED                            NTStatus      = 0xC0210027
+	STATUS_FVE_CONV_RECOVERY_FAILED                                           NTStatus      = 0xC0210028
+	STATUS_FVE_VIRTUALIZED_SPACE_TOO_BIG                                      NTStatus      = 0xC0210029
+	STATUS_FVE_INVALID_DATUM_TYPE                                             NTStatus      = 0xC021002A
+	STATUS_FVE_VOLUME_TOO_SMALL                                               NTStatus      = 0xC0210030
+	STATUS_FVE_ENH_PIN_INVALID                                                NTStatus      = 0xC0210031
+	STATUS_FVE_FULL_ENCRYPTION_NOT_ALLOWED_ON_TP_STORAGE                      NTStatus      = 0xC0210032
+	STATUS_FVE_WIPE_NOT_ALLOWED_ON_TP_STORAGE                                 NTStatus      = 0xC0210033
+	STATUS_FVE_NOT_ALLOWED_ON_CSV_STACK                                       NTStatus      = 0xC0210034
+	STATUS_FVE_NOT_ALLOWED_ON_CLUSTER                                         NTStatus      = 0xC0210035
+	STATUS_FVE_NOT_ALLOWED_TO_UPGRADE_WHILE_CONVERTING                        NTStatus      = 0xC0210036
+	STATUS_FVE_WIPE_CANCEL_NOT_APPLICABLE                                     NTStatus      = 0xC0210037
+	STATUS_FVE_EDRIVE_DRY_RUN_FAILED                                          NTStatus      = 0xC0210038
+	STATUS_FVE_SECUREBOOT_DISABLED                                            NTStatus      = 0xC0210039
+	STATUS_FVE_SECUREBOOT_CONFIG_CHANGE                                       NTStatus      = 0xC021003A
+	STATUS_FVE_DEVICE_LOCKEDOUT                                               NTStatus      = 0xC021003B
+	STATUS_FVE_VOLUME_EXTEND_PREVENTS_EOW_DECRYPT                             NTStatus      = 0xC021003C
+	STATUS_FVE_NOT_DE_VOLUME                                                  NTStatus      = 0xC021003D
+	STATUS_FVE_PROTECTION_DISABLED                                            NTStatus      = 0xC021003E
+	STATUS_FVE_PROTECTION_CANNOT_BE_DISABLED                                  NTStatus      = 0xC021003F
+	STATUS_FVE_OSV_KSR_NOT_ALLOWED                                            NTStatus      = 0xC0210040
+	STATUS_FWP_CALLOUT_NOT_FOUND                                              NTStatus      = 0xC0220001
+	STATUS_FWP_CONDITION_NOT_FOUND                                            NTStatus      = 0xC0220002
+	STATUS_FWP_FILTER_NOT_FOUND                                               NTStatus      = 0xC0220003
+	STATUS_FWP_LAYER_NOT_FOUND                                                NTStatus      = 0xC0220004
+	STATUS_FWP_PROVIDER_NOT_FOUND                                             NTStatus      = 0xC0220005
+	STATUS_FWP_PROVIDER_CONTEXT_NOT_FOUND                                     NTStatus      = 0xC0220006
+	STATUS_FWP_SUBLAYER_NOT_FOUND                                             NTStatus      = 0xC0220007
+	STATUS_FWP_NOT_FOUND                                                      NTStatus      = 0xC0220008
+	STATUS_FWP_ALREADY_EXISTS                                                 NTStatus      = 0xC0220009
+	STATUS_FWP_IN_USE                                                         NTStatus      = 0xC022000A
+	STATUS_FWP_DYNAMIC_SESSION_IN_PROGRESS                                    NTStatus      = 0xC022000B
+	STATUS_FWP_WRONG_SESSION                                                  NTStatus      = 0xC022000C
+	STATUS_FWP_NO_TXN_IN_PROGRESS                                             NTStatus      = 0xC022000D
+	STATUS_FWP_TXN_IN_PROGRESS                                                NTStatus      = 0xC022000E
+	STATUS_FWP_TXN_ABORTED                                                    NTStatus      = 0xC022000F
+	STATUS_FWP_SESSION_ABORTED                                                NTStatus      = 0xC0220010
+	STATUS_FWP_INCOMPATIBLE_TXN                                               NTStatus      = 0xC0220011
+	STATUS_FWP_TIMEOUT                                                        NTStatus      = 0xC0220012
+	STATUS_FWP_NET_EVENTS_DISABLED                                            NTStatus      = 0xC0220013
+	STATUS_FWP_INCOMPATIBLE_LAYER                                             NTStatus      = 0xC0220014
+	STATUS_FWP_KM_CLIENTS_ONLY                                                NTStatus      = 0xC0220015
+	STATUS_FWP_LIFETIME_MISMATCH                                              NTStatus      = 0xC0220016
+	STATUS_FWP_BUILTIN_OBJECT                                                 NTStatus      = 0xC0220017
+	STATUS_FWP_TOO_MANY_CALLOUTS                                              NTStatus      = 0xC0220018
+	STATUS_FWP_NOTIFICATION_DROPPED                                           NTStatus      = 0xC0220019
+	STATUS_FWP_TRAFFIC_MISMATCH                                               NTStatus      = 0xC022001A
+	STATUS_FWP_INCOMPATIBLE_SA_STATE                                          NTStatus      = 0xC022001B
+	STATUS_FWP_NULL_POINTER                                                   NTStatus      = 0xC022001C
+	STATUS_FWP_INVALID_ENUMERATOR                                             NTStatus      = 0xC022001D
+	STATUS_FWP_INVALID_FLAGS                                                  NTStatus      = 0xC022001E
+	STATUS_FWP_INVALID_NET_MASK                                               NTStatus      = 0xC022001F
+	STATUS_FWP_INVALID_RANGE                                                  NTStatus      = 0xC0220020
+	STATUS_FWP_INVALID_INTERVAL                                               NTStatus      = 0xC0220021
+	STATUS_FWP_ZERO_LENGTH_ARRAY                                              NTStatus      = 0xC0220022
+	STATUS_FWP_NULL_DISPLAY_NAME                                              NTStatus      = 0xC0220023
+	STATUS_FWP_INVALID_ACTION_TYPE                                            NTStatus      = 0xC0220024
+	STATUS_FWP_INVALID_WEIGHT                                                 NTStatus      = 0xC0220025
+	STATUS_FWP_MATCH_TYPE_MISMATCH                                            NTStatus      = 0xC0220026
+	STATUS_FWP_TYPE_MISMATCH                                                  NTStatus      = 0xC0220027
+	STATUS_FWP_OUT_OF_BOUNDS                                                  NTStatus      = 0xC0220028
+	STATUS_FWP_RESERVED                                                       NTStatus      = 0xC0220029
+	STATUS_FWP_DUPLICATE_CONDITION                                            NTStatus      = 0xC022002A
+	STATUS_FWP_DUPLICATE_KEYMOD                                               NTStatus      = 0xC022002B
+	STATUS_FWP_ACTION_INCOMPATIBLE_WITH_LAYER                                 NTStatus      = 0xC022002C
+	STATUS_FWP_ACTION_INCOMPATIBLE_WITH_SUBLAYER                              NTStatus      = 0xC022002D
+	STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_LAYER                                NTStatus      = 0xC022002E
+	STATUS_FWP_CONTEXT_INCOMPATIBLE_WITH_CALLOUT                              NTStatus      = 0xC022002F
+	STATUS_FWP_INCOMPATIBLE_AUTH_METHOD                                       NTStatus      = 0xC0220030
+	STATUS_FWP_INCOMPATIBLE_DH_GROUP                                          NTStatus      = 0xC0220031
+	STATUS_FWP_EM_NOT_SUPPORTED                                               NTStatus      = 0xC0220032
+	STATUS_FWP_NEVER_MATCH                                                    NTStatus      = 0xC0220033
+	STATUS_FWP_PROVIDER_CONTEXT_MISMATCH                                      NTStatus      = 0xC0220034
+	STATUS_FWP_INVALID_PARAMETER                                              NTStatus      = 0xC0220035
+	STATUS_FWP_TOO_MANY_SUBLAYERS                                             NTStatus      = 0xC0220036
+	STATUS_FWP_CALLOUT_NOTIFICATION_FAILED                                    NTStatus      = 0xC0220037
+	STATUS_FWP_INVALID_AUTH_TRANSFORM                                         NTStatus      = 0xC0220038
+	STATUS_FWP_INVALID_CIPHER_TRANSFORM                                       NTStatus      = 0xC0220039
+	STATUS_FWP_INCOMPATIBLE_CIPHER_TRANSFORM                                  NTStatus      = 0xC022003A
+	STATUS_FWP_INVALID_TRANSFORM_COMBINATION                                  NTStatus      = 0xC022003B
+	STATUS_FWP_DUPLICATE_AUTH_METHOD                                          NTStatus      = 0xC022003C
+	STATUS_FWP_INVALID_TUNNEL_ENDPOINT                                        NTStatus      = 0xC022003D
+	STATUS_FWP_L2_DRIVER_NOT_READY                                            NTStatus      = 0xC022003E
+	STATUS_FWP_KEY_DICTATOR_ALREADY_REGISTERED                                NTStatus      = 0xC022003F
+	STATUS_FWP_KEY_DICTATION_INVALID_KEYING_MATERIAL                          NTStatus      = 0xC0220040
+	STATUS_FWP_CONNECTIONS_DISABLED                                           NTStatus      = 0xC0220041
+	STATUS_FWP_INVALID_DNS_NAME                                               NTStatus      = 0xC0220042
+	STATUS_FWP_STILL_ON                                                       NTStatus      = 0xC0220043
+	STATUS_FWP_IKEEXT_NOT_RUNNING                                             NTStatus      = 0xC0220044
+	STATUS_FWP_TCPIP_NOT_READY                                                NTStatus      = 0xC0220100
+	STATUS_FWP_INJECT_HANDLE_CLOSING                                          NTStatus      = 0xC0220101
+	STATUS_FWP_INJECT_HANDLE_STALE                                            NTStatus      = 0xC0220102
+	STATUS_FWP_CANNOT_PEND                                                    NTStatus      = 0xC0220103
+	STATUS_FWP_DROP_NOICMP                                                    NTStatus      = 0xC0220104
+	STATUS_NDIS_CLOSING                                                       NTStatus      = 0xC0230002
+	STATUS_NDIS_BAD_VERSION                                                   NTStatus      = 0xC0230004
+	STATUS_NDIS_BAD_CHARACTERISTICS                                           NTStatus      = 0xC0230005
+	STATUS_NDIS_ADAPTER_NOT_FOUND                                             NTStatus      = 0xC0230006
+	STATUS_NDIS_OPEN_FAILED                                                   NTStatus      = 0xC0230007
+	STATUS_NDIS_DEVICE_FAILED                                                 NTStatus      = 0xC0230008
+	STATUS_NDIS_MULTICAST_FULL                                                NTStatus      = 0xC0230009
+	STATUS_NDIS_MULTICAST_EXISTS                                              NTStatus      = 0xC023000A
+	STATUS_NDIS_MULTICAST_NOT_FOUND                                           NTStatus      = 0xC023000B
+	STATUS_NDIS_REQUEST_ABORTED                                               NTStatus      = 0xC023000C
+	STATUS_NDIS_RESET_IN_PROGRESS                                             NTStatus      = 0xC023000D
+	STATUS_NDIS_NOT_SUPPORTED                                                 NTStatus      = 0xC02300BB
+	STATUS_NDIS_INVALID_PACKET                                                NTStatus      = 0xC023000F
+	STATUS_NDIS_ADAPTER_NOT_READY                                             NTStatus      = 0xC0230011
+	STATUS_NDIS_INVALID_LENGTH                                                NTStatus      = 0xC0230014
+	STATUS_NDIS_INVALID_DATA                                                  NTStatus      = 0xC0230015
+	STATUS_NDIS_BUFFER_TOO_SHORT                                              NTStatus      = 0xC0230016
+	STATUS_NDIS_INVALID_OID                                                   NTStatus      = 0xC0230017
+	STATUS_NDIS_ADAPTER_REMOVED                                               NTStatus      = 0xC0230018
+	STATUS_NDIS_UNSUPPORTED_MEDIA                                             NTStatus      = 0xC0230019
+	STATUS_NDIS_GROUP_ADDRESS_IN_USE                                          NTStatus      = 0xC023001A
+	STATUS_NDIS_FILE_NOT_FOUND                                                NTStatus      = 0xC023001B
+	STATUS_NDIS_ERROR_READING_FILE                                            NTStatus      = 0xC023001C
+	STATUS_NDIS_ALREADY_MAPPED                                                NTStatus      = 0xC023001D
+	STATUS_NDIS_RESOURCE_CONFLICT                                             NTStatus      = 0xC023001E
+	STATUS_NDIS_MEDIA_DISCONNECTED                                            NTStatus      = 0xC023001F
+	STATUS_NDIS_INVALID_ADDRESS                                               NTStatus      = 0xC0230022
+	STATUS_NDIS_INVALID_DEVICE_REQUEST                                        NTStatus      = 0xC0230010
+	STATUS_NDIS_PAUSED                                                        NTStatus      = 0xC023002A
+	STATUS_NDIS_INTERFACE_NOT_FOUND                                           NTStatus      = 0xC023002B
+	STATUS_NDIS_UNSUPPORTED_REVISION                                          NTStatus      = 0xC023002C
+	STATUS_NDIS_INVALID_PORT                                                  NTStatus      = 0xC023002D
+	STATUS_NDIS_INVALID_PORT_STATE                                            NTStatus      = 0xC023002E
+	STATUS_NDIS_LOW_POWER_STATE                                               NTStatus      = 0xC023002F
+	STATUS_NDIS_REINIT_REQUIRED                                               NTStatus      = 0xC0230030
+	STATUS_NDIS_NO_QUEUES                                                     NTStatus      = 0xC0230031
+	STATUS_NDIS_DOT11_AUTO_CONFIG_ENABLED                                     NTStatus      = 0xC0232000
+	STATUS_NDIS_DOT11_MEDIA_IN_USE                                            NTStatus      = 0xC0232001
+	STATUS_NDIS_DOT11_POWER_STATE_INVALID                                     NTStatus      = 0xC0232002
+	STATUS_NDIS_PM_WOL_PATTERN_LIST_FULL                                      NTStatus      = 0xC0232003
+	STATUS_NDIS_PM_PROTOCOL_OFFLOAD_LIST_FULL                                 NTStatus      = 0xC0232004
+	STATUS_NDIS_DOT11_AP_CHANNEL_CURRENTLY_NOT_AVAILABLE                      NTStatus      = 0xC0232005
+	STATUS_NDIS_DOT11_AP_BAND_CURRENTLY_NOT_AVAILABLE                         NTStatus      = 0xC0232006
+	STATUS_NDIS_DOT11_AP_CHANNEL_NOT_ALLOWED                                  NTStatus      = 0xC0232007
+	STATUS_NDIS_DOT11_AP_BAND_NOT_ALLOWED                                     NTStatus      = 0xC0232008
+	STATUS_NDIS_INDICATION_REQUIRED                                           NTStatus      = 0x40230001
+	STATUS_NDIS_OFFLOAD_POLICY                                                NTStatus      = 0xC023100F
+	STATUS_NDIS_OFFLOAD_CONNECTION_REJECTED                                   NTStatus      = 0xC0231012
+	STATUS_NDIS_OFFLOAD_PATH_REJECTED                                         NTStatus      = 0xC0231013
+	STATUS_TPM_ERROR_MASK                                                     NTStatus      = 0xC0290000
+	STATUS_TPM_AUTHFAIL                                                       NTStatus      = 0xC0290001
+	STATUS_TPM_BADINDEX                                                       NTStatus      = 0xC0290002
+	STATUS_TPM_BAD_PARAMETER                                                  NTStatus      = 0xC0290003
+	STATUS_TPM_AUDITFAILURE                                                   NTStatus      = 0xC0290004
+	STATUS_TPM_CLEAR_DISABLED                                                 NTStatus      = 0xC0290005
+	STATUS_TPM_DEACTIVATED                                                    NTStatus      = 0xC0290006
+	STATUS_TPM_DISABLED                                                       NTStatus      = 0xC0290007
+	STATUS_TPM_DISABLED_CMD                                                   NTStatus      = 0xC0290008
+	STATUS_TPM_FAIL                                                           NTStatus      = 0xC0290009
+	STATUS_TPM_BAD_ORDINAL                                                    NTStatus      = 0xC029000A
+	STATUS_TPM_INSTALL_DISABLED                                               NTStatus      = 0xC029000B
+	STATUS_TPM_INVALID_KEYHANDLE                                              NTStatus      = 0xC029000C
+	STATUS_TPM_KEYNOTFOUND                                                    NTStatus      = 0xC029000D
+	STATUS_TPM_INAPPROPRIATE_ENC                                              NTStatus      = 0xC029000E
+	STATUS_TPM_MIGRATEFAIL                                                    NTStatus      = 0xC029000F
+	STATUS_TPM_INVALID_PCR_INFO                                               NTStatus      = 0xC0290010
+	STATUS_TPM_NOSPACE                                                        NTStatus      = 0xC0290011
+	STATUS_TPM_NOSRK                                                          NTStatus      = 0xC0290012
+	STATUS_TPM_NOTSEALED_BLOB                                                 NTStatus      = 0xC0290013
+	STATUS_TPM_OWNER_SET                                                      NTStatus      = 0xC0290014
+	STATUS_TPM_RESOURCES                                                      NTStatus      = 0xC0290015
+	STATUS_TPM_SHORTRANDOM                                                    NTStatus      = 0xC0290016
+	STATUS_TPM_SIZE                                                           NTStatus      = 0xC0290017
+	STATUS_TPM_WRONGPCRVAL                                                    NTStatus      = 0xC0290018
+	STATUS_TPM_BAD_PARAM_SIZE                                                 NTStatus      = 0xC0290019
+	STATUS_TPM_SHA_THREAD                                                     NTStatus      = 0xC029001A
+	STATUS_TPM_SHA_ERROR                                                      NTStatus      = 0xC029001B
+	STATUS_TPM_FAILEDSELFTEST                                                 NTStatus      = 0xC029001C
+	STATUS_TPM_AUTH2FAIL                                                      NTStatus      = 0xC029001D
+	STATUS_TPM_BADTAG                                                         NTStatus      = 0xC029001E
+	STATUS_TPM_IOERROR                                                        NTStatus      = 0xC029001F
+	STATUS_TPM_ENCRYPT_ERROR                                                  NTStatus      = 0xC0290020
+	STATUS_TPM_DECRYPT_ERROR                                                  NTStatus      = 0xC0290021
+	STATUS_TPM_INVALID_AUTHHANDLE                                             NTStatus      = 0xC0290022
+	STATUS_TPM_NO_ENDORSEMENT                                                 NTStatus      = 0xC0290023
+	STATUS_TPM_INVALID_KEYUSAGE                                               NTStatus      = 0xC0290024
+	STATUS_TPM_WRONG_ENTITYTYPE                                               NTStatus      = 0xC0290025
+	STATUS_TPM_INVALID_POSTINIT                                               NTStatus      = 0xC0290026
+	STATUS_TPM_INAPPROPRIATE_SIG                                              NTStatus      = 0xC0290027
+	STATUS_TPM_BAD_KEY_PROPERTY                                               NTStatus      = 0xC0290028
+	STATUS_TPM_BAD_MIGRATION                                                  NTStatus      = 0xC0290029
+	STATUS_TPM_BAD_SCHEME                                                     NTStatus      = 0xC029002A
+	STATUS_TPM_BAD_DATASIZE                                                   NTStatus      = 0xC029002B
+	STATUS_TPM_BAD_MODE                                                       NTStatus      = 0xC029002C
+	STATUS_TPM_BAD_PRESENCE                                                   NTStatus      = 0xC029002D
+	STATUS_TPM_BAD_VERSION                                                    NTStatus      = 0xC029002E
+	STATUS_TPM_NO_WRAP_TRANSPORT                                              NTStatus      = 0xC029002F
+	STATUS_TPM_AUDITFAIL_UNSUCCESSFUL                                         NTStatus      = 0xC0290030
+	STATUS_TPM_AUDITFAIL_SUCCESSFUL                                           NTStatus      = 0xC0290031
+	STATUS_TPM_NOTRESETABLE                                                   NTStatus      = 0xC0290032
+	STATUS_TPM_NOTLOCAL                                                       NTStatus      = 0xC0290033
+	STATUS_TPM_BAD_TYPE                                                       NTStatus      = 0xC0290034
+	STATUS_TPM_INVALID_RESOURCE                                               NTStatus      = 0xC0290035
+	STATUS_TPM_NOTFIPS                                                        NTStatus      = 0xC0290036
+	STATUS_TPM_INVALID_FAMILY                                                 NTStatus      = 0xC0290037
+	STATUS_TPM_NO_NV_PERMISSION                                               NTStatus      = 0xC0290038
+	STATUS_TPM_REQUIRES_SIGN                                                  NTStatus      = 0xC0290039
+	STATUS_TPM_KEY_NOTSUPPORTED                                               NTStatus      = 0xC029003A
+	STATUS_TPM_AUTH_CONFLICT                                                  NTStatus      = 0xC029003B
+	STATUS_TPM_AREA_LOCKED                                                    NTStatus      = 0xC029003C
+	STATUS_TPM_BAD_LOCALITY                                                   NTStatus      = 0xC029003D
+	STATUS_TPM_READ_ONLY                                                      NTStatus      = 0xC029003E
+	STATUS_TPM_PER_NOWRITE                                                    NTStatus      = 0xC029003F
+	STATUS_TPM_FAMILYCOUNT                                                    NTStatus      = 0xC0290040
+	STATUS_TPM_WRITE_LOCKED                                                   NTStatus      = 0xC0290041
+	STATUS_TPM_BAD_ATTRIBUTES                                                 NTStatus      = 0xC0290042
+	STATUS_TPM_INVALID_STRUCTURE                                              NTStatus      = 0xC0290043
+	STATUS_TPM_KEY_OWNER_CONTROL                                              NTStatus      = 0xC0290044
+	STATUS_TPM_BAD_COUNTER                                                    NTStatus      = 0xC0290045
+	STATUS_TPM_NOT_FULLWRITE                                                  NTStatus      = 0xC0290046
+	STATUS_TPM_CONTEXT_GAP                                                    NTStatus      = 0xC0290047
+	STATUS_TPM_MAXNVWRITES                                                    NTStatus      = 0xC0290048
+	STATUS_TPM_NOOPERATOR                                                     NTStatus      = 0xC0290049
+	STATUS_TPM_RESOURCEMISSING                                                NTStatus      = 0xC029004A
+	STATUS_TPM_DELEGATE_LOCK                                                  NTStatus      = 0xC029004B
+	STATUS_TPM_DELEGATE_FAMILY                                                NTStatus      = 0xC029004C
+	STATUS_TPM_DELEGATE_ADMIN                                                 NTStatus      = 0xC029004D
+	STATUS_TPM_TRANSPORT_NOTEXCLUSIVE                                         NTStatus      = 0xC029004E
+	STATUS_TPM_OWNER_CONTROL                                                  NTStatus      = 0xC029004F
+	STATUS_TPM_DAA_RESOURCES                                                  NTStatus      = 0xC0290050
+	STATUS_TPM_DAA_INPUT_DATA0                                                NTStatus      = 0xC0290051
+	STATUS_TPM_DAA_INPUT_DATA1                                                NTStatus      = 0xC0290052
+	STATUS_TPM_DAA_ISSUER_SETTINGS                                            NTStatus      = 0xC0290053
+	STATUS_TPM_DAA_TPM_SETTINGS                                               NTStatus      = 0xC0290054
+	STATUS_TPM_DAA_STAGE                                                      NTStatus      = 0xC0290055
+	STATUS_TPM_DAA_ISSUER_VALIDITY                                            NTStatus      = 0xC0290056
+	STATUS_TPM_DAA_WRONG_W                                                    NTStatus      = 0xC0290057
+	STATUS_TPM_BAD_HANDLE                                                     NTStatus      = 0xC0290058
+	STATUS_TPM_BAD_DELEGATE                                                   NTStatus      = 0xC0290059
+	STATUS_TPM_BADCONTEXT                                                     NTStatus      = 0xC029005A
+	STATUS_TPM_TOOMANYCONTEXTS                                                NTStatus      = 0xC029005B
+	STATUS_TPM_MA_TICKET_SIGNATURE                                            NTStatus      = 0xC029005C
+	STATUS_TPM_MA_DESTINATION                                                 NTStatus      = 0xC029005D
+	STATUS_TPM_MA_SOURCE                                                      NTStatus      = 0xC029005E
+	STATUS_TPM_MA_AUTHORITY                                                   NTStatus      = 0xC029005F
+	STATUS_TPM_PERMANENTEK                                                    NTStatus      = 0xC0290061
+	STATUS_TPM_BAD_SIGNATURE                                                  NTStatus      = 0xC0290062
+	STATUS_TPM_NOCONTEXTSPACE                                                 NTStatus      = 0xC0290063
+	STATUS_TPM_20_E_ASYMMETRIC                                                NTStatus      = 0xC0290081
+	STATUS_TPM_20_E_ATTRIBUTES                                                NTStatus      = 0xC0290082
+	STATUS_TPM_20_E_HASH                                                      NTStatus      = 0xC0290083
+	STATUS_TPM_20_E_VALUE                                                     NTStatus      = 0xC0290084
+	STATUS_TPM_20_E_HIERARCHY                                                 NTStatus      = 0xC0290085
+	STATUS_TPM_20_E_KEY_SIZE                                                  NTStatus      = 0xC0290087
+	STATUS_TPM_20_E_MGF                                                       NTStatus      = 0xC0290088
+	STATUS_TPM_20_E_MODE                                                      NTStatus      = 0xC0290089
+	STATUS_TPM_20_E_TYPE                                                      NTStatus      = 0xC029008A
+	STATUS_TPM_20_E_HANDLE                                                    NTStatus      = 0xC029008B
+	STATUS_TPM_20_E_KDF                                                       NTStatus      = 0xC029008C
+	STATUS_TPM_20_E_RANGE                                                     NTStatus      = 0xC029008D
+	STATUS_TPM_20_E_AUTH_FAIL                                                 NTStatus      = 0xC029008E
+	STATUS_TPM_20_E_NONCE                                                     NTStatus      = 0xC029008F
+	STATUS_TPM_20_E_PP                                                        NTStatus      = 0xC0290090
+	STATUS_TPM_20_E_SCHEME                                                    NTStatus      = 0xC0290092
+	STATUS_TPM_20_E_SIZE                                                      NTStatus      = 0xC0290095
+	STATUS_TPM_20_E_SYMMETRIC                                                 NTStatus      = 0xC0290096
+	STATUS_TPM_20_E_TAG                                                       NTStatus      = 0xC0290097
+	STATUS_TPM_20_E_SELECTOR                                                  NTStatus      = 0xC0290098
+	STATUS_TPM_20_E_INSUFFICIENT                                              NTStatus      = 0xC029009A
+	STATUS_TPM_20_E_SIGNATURE                                                 NTStatus      = 0xC029009B
+	STATUS_TPM_20_E_KEY                                                       NTStatus      = 0xC029009C
+	STATUS_TPM_20_E_POLICY_FAIL                                               NTStatus      = 0xC029009D
+	STATUS_TPM_20_E_INTEGRITY                                                 NTStatus      = 0xC029009F
+	STATUS_TPM_20_E_TICKET                                                    NTStatus      = 0xC02900A0
+	STATUS_TPM_20_E_RESERVED_BITS                                             NTStatus      = 0xC02900A1
+	STATUS_TPM_20_E_BAD_AUTH                                                  NTStatus      = 0xC02900A2
+	STATUS_TPM_20_E_EXPIRED                                                   NTStatus      = 0xC02900A3
+	STATUS_TPM_20_E_POLICY_CC                                                 NTStatus      = 0xC02900A4
+	STATUS_TPM_20_E_BINDING                                                   NTStatus      = 0xC02900A5
+	STATUS_TPM_20_E_CURVE                                                     NTStatus      = 0xC02900A6
+	STATUS_TPM_20_E_ECC_POINT                                                 NTStatus      = 0xC02900A7
+	STATUS_TPM_20_E_INITIALIZE                                                NTStatus      = 0xC0290100
+	STATUS_TPM_20_E_FAILURE                                                   NTStatus      = 0xC0290101
+	STATUS_TPM_20_E_SEQUENCE                                                  NTStatus      = 0xC0290103
+	STATUS_TPM_20_E_PRIVATE                                                   NTStatus      = 0xC029010B
+	STATUS_TPM_20_E_HMAC                                                      NTStatus      = 0xC0290119
+	STATUS_TPM_20_E_DISABLED                                                  NTStatus      = 0xC0290120
+	STATUS_TPM_20_E_EXCLUSIVE                                                 NTStatus      = 0xC0290121
+	STATUS_TPM_20_E_ECC_CURVE                                                 NTStatus      = 0xC0290123
+	STATUS_TPM_20_E_AUTH_TYPE                                                 NTStatus      = 0xC0290124
+	STATUS_TPM_20_E_AUTH_MISSING                                              NTStatus      = 0xC0290125
+	STATUS_TPM_20_E_POLICY                                                    NTStatus      = 0xC0290126
+	STATUS_TPM_20_E_PCR                                                       NTStatus      = 0xC0290127
+	STATUS_TPM_20_E_PCR_CHANGED                                               NTStatus      = 0xC0290128
+	STATUS_TPM_20_E_UPGRADE                                                   NTStatus      = 0xC029012D
+	STATUS_TPM_20_E_TOO_MANY_CONTEXTS                                         NTStatus      = 0xC029012E
+	STATUS_TPM_20_E_AUTH_UNAVAILABLE                                          NTStatus      = 0xC029012F
+	STATUS_TPM_20_E_REBOOT                                                    NTStatus      = 0xC0290130
+	STATUS_TPM_20_E_UNBALANCED                                                NTStatus      = 0xC0290131
+	STATUS_TPM_20_E_COMMAND_SIZE                                              NTStatus      = 0xC0290142
+	STATUS_TPM_20_E_COMMAND_CODE                                              NTStatus      = 0xC0290143
+	STATUS_TPM_20_E_AUTHSIZE                                                  NTStatus      = 0xC0290144
+	STATUS_TPM_20_E_AUTH_CONTEXT                                              NTStatus      = 0xC0290145
+	STATUS_TPM_20_E_NV_RANGE                                                  NTStatus      = 0xC0290146
+	STATUS_TPM_20_E_NV_SIZE                                                   NTStatus      = 0xC0290147
+	STATUS_TPM_20_E_NV_LOCKED                                                 NTStatus      = 0xC0290148
+	STATUS_TPM_20_E_NV_AUTHORIZATION                                          NTStatus      = 0xC0290149
+	STATUS_TPM_20_E_NV_UNINITIALIZED                                          NTStatus      = 0xC029014A
+	STATUS_TPM_20_E_NV_SPACE                                                  NTStatus      = 0xC029014B
+	STATUS_TPM_20_E_NV_DEFINED                                                NTStatus      = 0xC029014C
+	STATUS_TPM_20_E_BAD_CONTEXT                                               NTStatus      = 0xC0290150
+	STATUS_TPM_20_E_CPHASH                                                    NTStatus      = 0xC0290151
+	STATUS_TPM_20_E_PARENT                                                    NTStatus      = 0xC0290152
+	STATUS_TPM_20_E_NEEDS_TEST                                                NTStatus      = 0xC0290153
+	STATUS_TPM_20_E_NO_RESULT                                                 NTStatus      = 0xC0290154
+	STATUS_TPM_20_E_SENSITIVE                                                 NTStatus      = 0xC0290155
+	STATUS_TPM_COMMAND_BLOCKED                                                NTStatus      = 0xC0290400
+	STATUS_TPM_INVALID_HANDLE                                                 NTStatus      = 0xC0290401
+	STATUS_TPM_DUPLICATE_VHANDLE                                              NTStatus      = 0xC0290402
+	STATUS_TPM_EMBEDDED_COMMAND_BLOCKED                                       NTStatus      = 0xC0290403
+	STATUS_TPM_EMBEDDED_COMMAND_UNSUPPORTED                                   NTStatus      = 0xC0290404
+	STATUS_TPM_RETRY                                                          NTStatus      = 0xC0290800
+	STATUS_TPM_NEEDS_SELFTEST                                                 NTStatus      = 0xC0290801
+	STATUS_TPM_DOING_SELFTEST                                                 NTStatus      = 0xC0290802
+	STATUS_TPM_DEFEND_LOCK_RUNNING                                            NTStatus      = 0xC0290803
+	STATUS_TPM_COMMAND_CANCELED                                               NTStatus      = 0xC0291001
+	STATUS_TPM_TOO_MANY_CONTEXTS                                              NTStatus      = 0xC0291002
+	STATUS_TPM_NOT_FOUND                                                      NTStatus      = 0xC0291003
+	STATUS_TPM_ACCESS_DENIED                                                  NTStatus      = 0xC0291004
+	STATUS_TPM_INSUFFICIENT_BUFFER                                            NTStatus      = 0xC0291005
+	STATUS_TPM_PPI_FUNCTION_UNSUPPORTED                                       NTStatus      = 0xC0291006
+	STATUS_PCP_ERROR_MASK                                                     NTStatus      = 0xC0292000
+	STATUS_PCP_DEVICE_NOT_READY                                               NTStatus      = 0xC0292001
+	STATUS_PCP_INVALID_HANDLE                                                 NTStatus      = 0xC0292002
+	STATUS_PCP_INVALID_PARAMETER                                              NTStatus      = 0xC0292003
+	STATUS_PCP_FLAG_NOT_SUPPORTED                                             NTStatus      = 0xC0292004
+	STATUS_PCP_NOT_SUPPORTED                                                  NTStatus      = 0xC0292005
+	STATUS_PCP_BUFFER_TOO_SMALL                                               NTStatus      = 0xC0292006
+	STATUS_PCP_INTERNAL_ERROR                                                 NTStatus      = 0xC0292007
+	STATUS_PCP_AUTHENTICATION_FAILED                                          NTStatus      = 0xC0292008
+	STATUS_PCP_AUTHENTICATION_IGNORED                                         NTStatus      = 0xC0292009
+	STATUS_PCP_POLICY_NOT_FOUND                                               NTStatus      = 0xC029200A
+	STATUS_PCP_PROFILE_NOT_FOUND                                              NTStatus      = 0xC029200B
+	STATUS_PCP_VALIDATION_FAILED                                              NTStatus      = 0xC029200C
+	STATUS_PCP_DEVICE_NOT_FOUND                                               NTStatus      = 0xC029200D
+	STATUS_PCP_WRONG_PARENT                                                   NTStatus      = 0xC029200E
+	STATUS_PCP_KEY_NOT_LOADED                                                 NTStatus      = 0xC029200F
+	STATUS_PCP_NO_KEY_CERTIFICATION                                           NTStatus      = 0xC0292010
+	STATUS_PCP_KEY_NOT_FINALIZED                                              NTStatus      = 0xC0292011
+	STATUS_PCP_ATTESTATION_CHALLENGE_NOT_SET                                  NTStatus      = 0xC0292012
+	STATUS_PCP_NOT_PCR_BOUND                                                  NTStatus      = 0xC0292013
+	STATUS_PCP_KEY_ALREADY_FINALIZED                                          NTStatus      = 0xC0292014
+	STATUS_PCP_KEY_USAGE_POLICY_NOT_SUPPORTED                                 NTStatus      = 0xC0292015
+	STATUS_PCP_KEY_USAGE_POLICY_INVALID                                       NTStatus      = 0xC0292016
+	STATUS_PCP_SOFT_KEY_ERROR                                                 NTStatus      = 0xC0292017
+	STATUS_PCP_KEY_NOT_AUTHENTICATED                                          NTStatus      = 0xC0292018
+	STATUS_PCP_KEY_NOT_AIK                                                    NTStatus      = 0xC0292019
+	STATUS_PCP_KEY_NOT_SIGNING_KEY                                            NTStatus      = 0xC029201A
+	STATUS_PCP_LOCKED_OUT                                                     NTStatus      = 0xC029201B
+	STATUS_PCP_CLAIM_TYPE_NOT_SUPPORTED                                       NTStatus      = 0xC029201C
+	STATUS_PCP_TPM_VERSION_NOT_SUPPORTED                                      NTStatus      = 0xC029201D
+	STATUS_PCP_BUFFER_LENGTH_MISMATCH                                         NTStatus      = 0xC029201E
+	STATUS_PCP_IFX_RSA_KEY_CREATION_BLOCKED                                   NTStatus      = 0xC029201F
+	STATUS_PCP_TICKET_MISSING                                                 NTStatus      = 0xC0292020
+	STATUS_PCP_RAW_POLICY_NOT_SUPPORTED                                       NTStatus      = 0xC0292021
+	STATUS_PCP_KEY_HANDLE_INVALIDATED                                         NTStatus      = 0xC0292022
+	STATUS_PCP_UNSUPPORTED_PSS_SALT                                           NTStatus      = 0x40292023
+	STATUS_RTPM_CONTEXT_CONTINUE                                              NTStatus      = 0x00293000
+	STATUS_RTPM_CONTEXT_COMPLETE                                              NTStatus      = 0x00293001
+	STATUS_RTPM_NO_RESULT                                                     NTStatus      = 0xC0293002
+	STATUS_RTPM_PCR_READ_INCOMPLETE                                           NTStatus      = 0xC0293003
+	STATUS_RTPM_INVALID_CONTEXT                                               NTStatus      = 0xC0293004
+	STATUS_RTPM_UNSUPPORTED_CMD                                               NTStatus      = 0xC0293005
+	STATUS_TPM_ZERO_EXHAUST_ENABLED                                           NTStatus      = 0xC0294000
+	STATUS_HV_INVALID_HYPERCALL_CODE                                          NTStatus      = 0xC0350002
+	STATUS_HV_INVALID_HYPERCALL_INPUT                                         NTStatus      = 0xC0350003
+	STATUS_HV_INVALID_ALIGNMENT                                               NTStatus      = 0xC0350004
+	STATUS_HV_INVALID_PARAMETER                                               NTStatus      = 0xC0350005
+	STATUS_HV_ACCESS_DENIED                                                   NTStatus      = 0xC0350006
+	STATUS_HV_INVALID_PARTITION_STATE                                         NTStatus      = 0xC0350007
+	STATUS_HV_OPERATION_DENIED                                                NTStatus      = 0xC0350008
+	STATUS_HV_UNKNOWN_PROPERTY                                                NTStatus      = 0xC0350009
+	STATUS_HV_PROPERTY_VALUE_OUT_OF_RANGE                                     NTStatus      = 0xC035000A
+	STATUS_HV_INSUFFICIENT_MEMORY                                             NTStatus      = 0xC035000B
+	STATUS_HV_PARTITION_TOO_DEEP                                              NTStatus      = 0xC035000C
+	STATUS_HV_INVALID_PARTITION_ID                                            NTStatus      = 0xC035000D
+	STATUS_HV_INVALID_VP_INDEX                                                NTStatus      = 0xC035000E
+	STATUS_HV_INVALID_PORT_ID                                                 NTStatus      = 0xC0350011
+	STATUS_HV_INVALID_CONNECTION_ID                                           NTStatus      = 0xC0350012
+	STATUS_HV_INSUFFICIENT_BUFFERS                                            NTStatus      = 0xC0350013
+	STATUS_HV_NOT_ACKNOWLEDGED                                                NTStatus      = 0xC0350014
+	STATUS_HV_INVALID_VP_STATE                                                NTStatus      = 0xC0350015
+	STATUS_HV_ACKNOWLEDGED                                                    NTStatus      = 0xC0350016
+	STATUS_HV_INVALID_SAVE_RESTORE_STATE                                      NTStatus      = 0xC0350017
+	STATUS_HV_INVALID_SYNIC_STATE                                             NTStatus      = 0xC0350018
+	STATUS_HV_OBJECT_IN_USE                                                   NTStatus      = 0xC0350019
+	STATUS_HV_INVALID_PROXIMITY_DOMAIN_INFO                                   NTStatus      = 0xC035001A
+	STATUS_HV_NO_DATA                                                         NTStatus      = 0xC035001B
+	STATUS_HV_INACTIVE                                                        NTStatus      = 0xC035001C
+	STATUS_HV_NO_RESOURCES                                                    NTStatus      = 0xC035001D
+	STATUS_HV_FEATURE_UNAVAILABLE                                             NTStatus      = 0xC035001E
+	STATUS_HV_INSUFFICIENT_BUFFER                                             NTStatus      = 0xC0350033
+	STATUS_HV_INSUFFICIENT_DEVICE_DOMAINS                                     NTStatus      = 0xC0350038
+	STATUS_HV_CPUID_FEATURE_VALIDATION_ERROR                                  NTStatus      = 0xC035003C
+	STATUS_HV_CPUID_XSAVE_FEATURE_VALIDATION_ERROR                            NTStatus      = 0xC035003D
+	STATUS_HV_PROCESSOR_STARTUP_TIMEOUT                                       NTStatus      = 0xC035003E
+	STATUS_HV_SMX_ENABLED                                                     NTStatus      = 0xC035003F
+	STATUS_HV_INVALID_LP_INDEX                                                NTStatus      = 0xC0350041
+	STATUS_HV_INVALID_REGISTER_VALUE                                          NTStatus      = 0xC0350050
+	STATUS_HV_INVALID_VTL_STATE                                               NTStatus      = 0xC0350051
+	STATUS_HV_NX_NOT_DETECTED                                                 NTStatus      = 0xC0350055
+	STATUS_HV_INVALID_DEVICE_ID                                               NTStatus      = 0xC0350057
+	STATUS_HV_INVALID_DEVICE_STATE                                            NTStatus      = 0xC0350058
+	STATUS_HV_PENDING_PAGE_REQUESTS                                           NTStatus      = 0x00350059
+	STATUS_HV_PAGE_REQUEST_INVALID                                            NTStatus      = 0xC0350060
+	STATUS_HV_INVALID_CPU_GROUP_ID                                            NTStatus      = 0xC035006F
+	STATUS_HV_INVALID_CPU_GROUP_STATE                                         NTStatus      = 0xC0350070
+	STATUS_HV_OPERATION_FAILED                                                NTStatus      = 0xC0350071
+	STATUS_HV_NOT_ALLOWED_WITH_NESTED_VIRT_ACTIVE                             NTStatus      = 0xC0350072
+	STATUS_HV_INSUFFICIENT_ROOT_MEMORY                                        NTStatus      = 0xC0350073
+	STATUS_HV_NOT_PRESENT                                                     NTStatus      = 0xC0351000
+	STATUS_VID_DUPLICATE_HANDLER                                              NTStatus      = 0xC0370001
+	STATUS_VID_TOO_MANY_HANDLERS                                              NTStatus      = 0xC0370002
+	STATUS_VID_QUEUE_FULL                                                     NTStatus      = 0xC0370003
+	STATUS_VID_HANDLER_NOT_PRESENT                                            NTStatus      = 0xC0370004
+	STATUS_VID_INVALID_OBJECT_NAME                                            NTStatus      = 0xC0370005
+	STATUS_VID_PARTITION_NAME_TOO_LONG                                        NTStatus      = 0xC0370006
+	STATUS_VID_MESSAGE_QUEUE_NAME_TOO_LONG                                    NTStatus      = 0xC0370007
+	STATUS_VID_PARTITION_ALREADY_EXISTS                                       NTStatus      = 0xC0370008
+	STATUS_VID_PARTITION_DOES_NOT_EXIST                                       NTStatus      = 0xC0370009
+	STATUS_VID_PARTITION_NAME_NOT_FOUND                                       NTStatus      = 0xC037000A
+	STATUS_VID_MESSAGE_QUEUE_ALREADY_EXISTS                                   NTStatus      = 0xC037000B
+	STATUS_VID_EXCEEDED_MBP_ENTRY_MAP_LIMIT                                   NTStatus      = 0xC037000C
+	STATUS_VID_MB_STILL_REFERENCED                                            NTStatus      = 0xC037000D
+	STATUS_VID_CHILD_GPA_PAGE_SET_CORRUPTED                                   NTStatus      = 0xC037000E
+	STATUS_VID_INVALID_NUMA_SETTINGS                                          NTStatus      = 0xC037000F
+	STATUS_VID_INVALID_NUMA_NODE_INDEX                                        NTStatus      = 0xC0370010
+	STATUS_VID_NOTIFICATION_QUEUE_ALREADY_ASSOCIATED                          NTStatus      = 0xC0370011
+	STATUS_VID_INVALID_MEMORY_BLOCK_HANDLE                                    NTStatus      = 0xC0370012
+	STATUS_VID_PAGE_RANGE_OVERFLOW                                            NTStatus      = 0xC0370013
+	STATUS_VID_INVALID_MESSAGE_QUEUE_HANDLE                                   NTStatus      = 0xC0370014
+	STATUS_VID_INVALID_GPA_RANGE_HANDLE                                       NTStatus      = 0xC0370015
+	STATUS_VID_NO_MEMORY_BLOCK_NOTIFICATION_QUEUE                             NTStatus      = 0xC0370016
+	STATUS_VID_MEMORY_BLOCK_LOCK_COUNT_EXCEEDED                               NTStatus      = 0xC0370017
+	STATUS_VID_INVALID_PPM_HANDLE                                             NTStatus      = 0xC0370018
+	STATUS_VID_MBPS_ARE_LOCKED                                                NTStatus      = 0xC0370019
+	STATUS_VID_MESSAGE_QUEUE_CLOSED                                           NTStatus      = 0xC037001A
+	STATUS_VID_VIRTUAL_PROCESSOR_LIMIT_EXCEEDED                               NTStatus      = 0xC037001B
+	STATUS_VID_STOP_PENDING                                                   NTStatus      = 0xC037001C
+	STATUS_VID_INVALID_PROCESSOR_STATE                                        NTStatus      = 0xC037001D
+	STATUS_VID_EXCEEDED_KM_CONTEXT_COUNT_LIMIT                                NTStatus      = 0xC037001E
+	STATUS_VID_KM_INTERFACE_ALREADY_INITIALIZED                               NTStatus      = 0xC037001F
+	STATUS_VID_MB_PROPERTY_ALREADY_SET_RESET                                  NTStatus      = 0xC0370020
+	STATUS_VID_MMIO_RANGE_DESTROYED                                           NTStatus      = 0xC0370021
+	STATUS_VID_INVALID_CHILD_GPA_PAGE_SET                                     NTStatus      = 0xC0370022
+	STATUS_VID_RESERVE_PAGE_SET_IS_BEING_USED                                 NTStatus      = 0xC0370023
+	STATUS_VID_RESERVE_PAGE_SET_TOO_SMALL                                     NTStatus      = 0xC0370024
+	STATUS_VID_MBP_ALREADY_LOCKED_USING_RESERVED_PAGE                         NTStatus      = 0xC0370025
+	STATUS_VID_MBP_COUNT_EXCEEDED_LIMIT                                       NTStatus      = 0xC0370026
+	STATUS_VID_SAVED_STATE_CORRUPT                                            NTStatus      = 0xC0370027
+	STATUS_VID_SAVED_STATE_UNRECOGNIZED_ITEM                                  NTStatus      = 0xC0370028
+	STATUS_VID_SAVED_STATE_INCOMPATIBLE                                       NTStatus      = 0xC0370029
+	STATUS_VID_VTL_ACCESS_DENIED                                              NTStatus      = 0xC037002A
+	STATUS_VID_REMOTE_NODE_PARENT_GPA_PAGES_USED                              NTStatus      = 0x80370001
+	STATUS_IPSEC_BAD_SPI                                                      NTStatus      = 0xC0360001
+	STATUS_IPSEC_SA_LIFETIME_EXPIRED                                          NTStatus      = 0xC0360002
+	STATUS_IPSEC_WRONG_SA                                                     NTStatus      = 0xC0360003
+	STATUS_IPSEC_REPLAY_CHECK_FAILED                                          NTStatus      = 0xC0360004
+	STATUS_IPSEC_INVALID_PACKET                                               NTStatus      = 0xC0360005
+	STATUS_IPSEC_INTEGRITY_CHECK_FAILED                                       NTStatus      = 0xC0360006
+	STATUS_IPSEC_CLEAR_TEXT_DROP                                              NTStatus      = 0xC0360007
+	STATUS_IPSEC_AUTH_FIREWALL_DROP                                           NTStatus      = 0xC0360008
+	STATUS_IPSEC_THROTTLE_DROP                                                NTStatus      = 0xC0360009
+	STATUS_IPSEC_DOSP_BLOCK                                                   NTStatus      = 0xC0368000
+	STATUS_IPSEC_DOSP_RECEIVED_MULTICAST                                      NTStatus      = 0xC0368001
+	STATUS_IPSEC_DOSP_INVALID_PACKET                                          NTStatus      = 0xC0368002
+	STATUS_IPSEC_DOSP_STATE_LOOKUP_FAILED                                     NTStatus      = 0xC0368003
+	STATUS_IPSEC_DOSP_MAX_ENTRIES                                             NTStatus      = 0xC0368004
+	STATUS_IPSEC_DOSP_KEYMOD_NOT_ALLOWED                                      NTStatus      = 0xC0368005
+	STATUS_IPSEC_DOSP_MAX_PER_IP_RATELIMIT_QUEUES                             NTStatus      = 0xC0368006
+	STATUS_VOLMGR_INCOMPLETE_REGENERATION                                     NTStatus      = 0x80380001
+	STATUS_VOLMGR_INCOMPLETE_DISK_MIGRATION                                   NTStatus      = 0x80380002
+	STATUS_VOLMGR_DATABASE_FULL                                               NTStatus      = 0xC0380001
+	STATUS_VOLMGR_DISK_CONFIGURATION_CORRUPTED                                NTStatus      = 0xC0380002
+	STATUS_VOLMGR_DISK_CONFIGURATION_NOT_IN_SYNC                              NTStatus      = 0xC0380003
+	STATUS_VOLMGR_PACK_CONFIG_UPDATE_FAILED                                   NTStatus      = 0xC0380004
+	STATUS_VOLMGR_DISK_CONTAINS_NON_SIMPLE_VOLUME                             NTStatus      = 0xC0380005
+	STATUS_VOLMGR_DISK_DUPLICATE                                              NTStatus      = 0xC0380006
+	STATUS_VOLMGR_DISK_DYNAMIC                                                NTStatus      = 0xC0380007
+	STATUS_VOLMGR_DISK_ID_INVALID                                             NTStatus      = 0xC0380008
+	STATUS_VOLMGR_DISK_INVALID                                                NTStatus      = 0xC0380009
+	STATUS_VOLMGR_DISK_LAST_VOTER                                             NTStatus      = 0xC038000A
+	STATUS_VOLMGR_DISK_LAYOUT_INVALID                                         NTStatus      = 0xC038000B
+	STATUS_VOLMGR_DISK_LAYOUT_NON_BASIC_BETWEEN_BASIC_PARTITIONS              NTStatus      = 0xC038000C
+	STATUS_VOLMGR_DISK_LAYOUT_NOT_CYLINDER_ALIGNED                            NTStatus      = 0xC038000D
+	STATUS_VOLMGR_DISK_LAYOUT_PARTITIONS_TOO_SMALL                            NTStatus      = 0xC038000E
+	STATUS_VOLMGR_DISK_LAYOUT_PRIMARY_BETWEEN_LOGICAL_PARTITIONS              NTStatus      = 0xC038000F
+	STATUS_VOLMGR_DISK_LAYOUT_TOO_MANY_PARTITIONS                             NTStatus      = 0xC0380010
+	STATUS_VOLMGR_DISK_MISSING                                                NTStatus      = 0xC0380011
+	STATUS_VOLMGR_DISK_NOT_EMPTY                                              NTStatus      = 0xC0380012
+	STATUS_VOLMGR_DISK_NOT_ENOUGH_SPACE                                       NTStatus      = 0xC0380013
+	STATUS_VOLMGR_DISK_REVECTORING_FAILED                                     NTStatus      = 0xC0380014
+	STATUS_VOLMGR_DISK_SECTOR_SIZE_INVALID                                    NTStatus      = 0xC0380015
+	STATUS_VOLMGR_DISK_SET_NOT_CONTAINED                                      NTStatus      = 0xC0380016
+	STATUS_VOLMGR_DISK_USED_BY_MULTIPLE_MEMBERS                               NTStatus      = 0xC0380017
+	STATUS_VOLMGR_DISK_USED_BY_MULTIPLE_PLEXES                                NTStatus      = 0xC0380018
+	STATUS_VOLMGR_DYNAMIC_DISK_NOT_SUPPORTED                                  NTStatus      = 0xC0380019
+	STATUS_VOLMGR_EXTENT_ALREADY_USED                                         NTStatus      = 0xC038001A
+	STATUS_VOLMGR_EXTENT_NOT_CONTIGUOUS                                       NTStatus      = 0xC038001B
+	STATUS_VOLMGR_EXTENT_NOT_IN_PUBLIC_REGION                                 NTStatus      = 0xC038001C
+	STATUS_VOLMGR_EXTENT_NOT_SECTOR_ALIGNED                                   NTStatus      = 0xC038001D
+	STATUS_VOLMGR_EXTENT_OVERLAPS_EBR_PARTITION                               NTStatus      = 0xC038001E
+	STATUS_VOLMGR_EXTENT_VOLUME_LENGTHS_DO_NOT_MATCH                          NTStatus      = 0xC038001F
+	STATUS_VOLMGR_FAULT_TOLERANT_NOT_SUPPORTED                                NTStatus      = 0xC0380020
+	STATUS_VOLMGR_INTERLEAVE_LENGTH_INVALID                                   NTStatus      = 0xC0380021
+	STATUS_VOLMGR_MAXIMUM_REGISTERED_USERS                                    NTStatus      = 0xC0380022
+	STATUS_VOLMGR_MEMBER_IN_SYNC                                              NTStatus      = 0xC0380023
+	STATUS_VOLMGR_MEMBER_INDEX_DUPLICATE                                      NTStatus      = 0xC0380024
+	STATUS_VOLMGR_MEMBER_INDEX_INVALID                                        NTStatus      = 0xC0380025
+	STATUS_VOLMGR_MEMBER_MISSING                                              NTStatus      = 0xC0380026
+	STATUS_VOLMGR_MEMBER_NOT_DETACHED                                         NTStatus      = 0xC0380027
+	STATUS_VOLMGR_MEMBER_REGENERATING                                         NTStatus      = 0xC0380028
+	STATUS_VOLMGR_ALL_DISKS_FAILED                                            NTStatus      = 0xC0380029
+	STATUS_VOLMGR_NO_REGISTERED_USERS                                         NTStatus      = 0xC038002A
+	STATUS_VOLMGR_NO_SUCH_USER                                                NTStatus      = 0xC038002B
+	STATUS_VOLMGR_NOTIFICATION_RESET                                          NTStatus      = 0xC038002C
+	STATUS_VOLMGR_NUMBER_OF_MEMBERS_INVALID                                   NTStatus      = 0xC038002D
+	STATUS_VOLMGR_NUMBER_OF_PLEXES_INVALID                                    NTStatus      = 0xC038002E
+	STATUS_VOLMGR_PACK_DUPLICATE                                              NTStatus      = 0xC038002F
+	STATUS_VOLMGR_PACK_ID_INVALID                                             NTStatus      = 0xC0380030
+	STATUS_VOLMGR_PACK_INVALID                                                NTStatus      = 0xC0380031
+	STATUS_VOLMGR_PACK_NAME_INVALID                                           NTStatus      = 0xC0380032
+	STATUS_VOLMGR_PACK_OFFLINE                                                NTStatus      = 0xC0380033
+	STATUS_VOLMGR_PACK_HAS_QUORUM                                             NTStatus      = 0xC0380034
+	STATUS_VOLMGR_PACK_WITHOUT_QUORUM                                         NTStatus      = 0xC0380035
+	STATUS_VOLMGR_PARTITION_STYLE_INVALID                                     NTStatus      = 0xC0380036
+	STATUS_VOLMGR_PARTITION_UPDATE_FAILED                                     NTStatus      = 0xC0380037
+	STATUS_VOLMGR_PLEX_IN_SYNC                                                NTStatus      = 0xC0380038
+	STATUS_VOLMGR_PLEX_INDEX_DUPLICATE                                        NTStatus      = 0xC0380039
+	STATUS_VOLMGR_PLEX_INDEX_INVALID                                          NTStatus      = 0xC038003A
+	STATUS_VOLMGR_PLEX_LAST_ACTIVE                                            NTStatus      = 0xC038003B
+	STATUS_VOLMGR_PLEX_MISSING                                                NTStatus      = 0xC038003C
+	STATUS_VOLMGR_PLEX_REGENERATING                                           NTStatus      = 0xC038003D
+	STATUS_VOLMGR_PLEX_TYPE_INVALID                                           NTStatus      = 0xC038003E
+	STATUS_VOLMGR_PLEX_NOT_RAID5                                              NTStatus      = 0xC038003F
+	STATUS_VOLMGR_PLEX_NOT_SIMPLE                                             NTStatus      = 0xC0380040
+	STATUS_VOLMGR_STRUCTURE_SIZE_INVALID                                      NTStatus      = 0xC0380041
+	STATUS_VOLMGR_TOO_MANY_NOTIFICATION_REQUESTS                              NTStatus      = 0xC0380042
+	STATUS_VOLMGR_TRANSACTION_IN_PROGRESS                                     NTStatus      = 0xC0380043
+	STATUS_VOLMGR_UNEXPECTED_DISK_LAYOUT_CHANGE                               NTStatus      = 0xC0380044
+	STATUS_VOLMGR_VOLUME_CONTAINS_MISSING_DISK                                NTStatus      = 0xC0380045
+	STATUS_VOLMGR_VOLUME_ID_INVALID                                           NTStatus      = 0xC0380046
+	STATUS_VOLMGR_VOLUME_LENGTH_INVALID                                       NTStatus      = 0xC0380047
+	STATUS_VOLMGR_VOLUME_LENGTH_NOT_SECTOR_SIZE_MULTIPLE                      NTStatus      = 0xC0380048
+	STATUS_VOLMGR_VOLUME_NOT_MIRRORED                                         NTStatus      = 0xC0380049
+	STATUS_VOLMGR_VOLUME_NOT_RETAINED                                         NTStatus      = 0xC038004A
+	STATUS_VOLMGR_VOLUME_OFFLINE                                              NTStatus      = 0xC038004B
+	STATUS_VOLMGR_VOLUME_RETAINED                                             NTStatus      = 0xC038004C
+	STATUS_VOLMGR_NUMBER_OF_EXTENTS_INVALID                                   NTStatus      = 0xC038004D
+	STATUS_VOLMGR_DIFFERENT_SECTOR_SIZE                                       NTStatus      = 0xC038004E
+	STATUS_VOLMGR_BAD_BOOT_DISK                                               NTStatus      = 0xC038004F
+	STATUS_VOLMGR_PACK_CONFIG_OFFLINE                                         NTStatus      = 0xC0380050
+	STATUS_VOLMGR_PACK_CONFIG_ONLINE                                          NTStatus      = 0xC0380051
+	STATUS_VOLMGR_NOT_PRIMARY_PACK                                            NTStatus      = 0xC0380052
+	STATUS_VOLMGR_PACK_LOG_UPDATE_FAILED                                      NTStatus      = 0xC0380053
+	STATUS_VOLMGR_NUMBER_OF_DISKS_IN_PLEX_INVALID                             NTStatus      = 0xC0380054
+	STATUS_VOLMGR_NUMBER_OF_DISKS_IN_MEMBER_INVALID                           NTStatus      = 0xC0380055
+	STATUS_VOLMGR_VOLUME_MIRRORED                                             NTStatus      = 0xC0380056
+	STATUS_VOLMGR_PLEX_NOT_SIMPLE_SPANNED                                     NTStatus      = 0xC0380057
+	STATUS_VOLMGR_NO_VALID_LOG_COPIES                                         NTStatus      = 0xC0380058
+	STATUS_VOLMGR_PRIMARY_PACK_PRESENT                                        NTStatus      = 0xC0380059
+	STATUS_VOLMGR_NUMBER_OF_DISKS_INVALID                                     NTStatus      = 0xC038005A
+	STATUS_VOLMGR_MIRROR_NOT_SUPPORTED                                        NTStatus      = 0xC038005B
+	STATUS_VOLMGR_RAID5_NOT_SUPPORTED                                         NTStatus      = 0xC038005C
+	STATUS_BCD_NOT_ALL_ENTRIES_IMPORTED                                       NTStatus      = 0x80390001
+	STATUS_BCD_TOO_MANY_ELEMENTS                                              NTStatus      = 0xC0390002
+	STATUS_BCD_NOT_ALL_ENTRIES_SYNCHRONIZED                                   NTStatus      = 0x80390003
+	STATUS_VHD_DRIVE_FOOTER_MISSING                                           NTStatus      = 0xC03A0001
+	STATUS_VHD_DRIVE_FOOTER_CHECKSUM_MISMATCH                                 NTStatus      = 0xC03A0002
+	STATUS_VHD_DRIVE_FOOTER_CORRUPT                                           NTStatus      = 0xC03A0003
+	STATUS_VHD_FORMAT_UNKNOWN                                                 NTStatus      = 0xC03A0004
+	STATUS_VHD_FORMAT_UNSUPPORTED_VERSION                                     NTStatus      = 0xC03A0005
+	STATUS_VHD_SPARSE_HEADER_CHECKSUM_MISMATCH                                NTStatus      = 0xC03A0006
+	STATUS_VHD_SPARSE_HEADER_UNSUPPORTED_VERSION                              NTStatus      = 0xC03A0007
+	STATUS_VHD_SPARSE_HEADER_CORRUPT                                          NTStatus      = 0xC03A0008
+	STATUS_VHD_BLOCK_ALLOCATION_FAILURE                                       NTStatus      = 0xC03A0009
+	STATUS_VHD_BLOCK_ALLOCATION_TABLE_CORRUPT                                 NTStatus      = 0xC03A000A
+	STATUS_VHD_INVALID_BLOCK_SIZE                                             NTStatus      = 0xC03A000B
+	STATUS_VHD_BITMAP_MISMATCH                                                NTStatus      = 0xC03A000C
+	STATUS_VHD_PARENT_VHD_NOT_FOUND                                           NTStatus      = 0xC03A000D
+	STATUS_VHD_CHILD_PARENT_ID_MISMATCH                                       NTStatus      = 0xC03A000E
+	STATUS_VHD_CHILD_PARENT_TIMESTAMP_MISMATCH                                NTStatus      = 0xC03A000F
+	STATUS_VHD_METADATA_READ_FAILURE                                          NTStatus      = 0xC03A0010
+	STATUS_VHD_METADATA_WRITE_FAILURE                                         NTStatus      = 0xC03A0011
+	STATUS_VHD_INVALID_SIZE                                                   NTStatus      = 0xC03A0012
+	STATUS_VHD_INVALID_FILE_SIZE                                              NTStatus      = 0xC03A0013
+	STATUS_VIRTDISK_PROVIDER_NOT_FOUND                                        NTStatus      = 0xC03A0014
+	STATUS_VIRTDISK_NOT_VIRTUAL_DISK                                          NTStatus      = 0xC03A0015
+	STATUS_VHD_PARENT_VHD_ACCESS_DENIED                                       NTStatus      = 0xC03A0016
+	STATUS_VHD_CHILD_PARENT_SIZE_MISMATCH                                     NTStatus      = 0xC03A0017
+	STATUS_VHD_DIFFERENCING_CHAIN_CYCLE_DETECTED                              NTStatus      = 0xC03A0018
+	STATUS_VHD_DIFFERENCING_CHAIN_ERROR_IN_PARENT                             NTStatus      = 0xC03A0019
+	STATUS_VIRTUAL_DISK_LIMITATION                                            NTStatus      = 0xC03A001A
+	STATUS_VHD_INVALID_TYPE                                                   NTStatus      = 0xC03A001B
+	STATUS_VHD_INVALID_STATE                                                  NTStatus      = 0xC03A001C
+	STATUS_VIRTDISK_UNSUPPORTED_DISK_SECTOR_SIZE                              NTStatus      = 0xC03A001D
+	STATUS_VIRTDISK_DISK_ALREADY_OWNED                                        NTStatus      = 0xC03A001E
+	STATUS_VIRTDISK_DISK_ONLINE_AND_WRITABLE                                  NTStatus      = 0xC03A001F
+	STATUS_CTLOG_TRACKING_NOT_INITIALIZED                                     NTStatus      = 0xC03A0020
+	STATUS_CTLOG_LOGFILE_SIZE_EXCEEDED_MAXSIZE                                NTStatus      = 0xC03A0021
+	STATUS_CTLOG_VHD_CHANGED_OFFLINE                                          NTStatus      = 0xC03A0022
+	STATUS_CTLOG_INVALID_TRACKING_STATE                                       NTStatus      = 0xC03A0023
+	STATUS_CTLOG_INCONSISTENT_TRACKING_FILE                                   NTStatus      = 0xC03A0024
+	STATUS_VHD_METADATA_FULL                                                  NTStatus      = 0xC03A0028
+	STATUS_VHD_INVALID_CHANGE_TRACKING_ID                                     NTStatus      = 0xC03A0029
+	STATUS_VHD_CHANGE_TRACKING_DISABLED                                       NTStatus      = 0xC03A002A
+	STATUS_VHD_MISSING_CHANGE_TRACKING_INFORMATION                            NTStatus      = 0xC03A0030
+	STATUS_VHD_RESIZE_WOULD_TRUNCATE_DATA                                     NTStatus      = 0xC03A0031
+	STATUS_VHD_COULD_NOT_COMPUTE_MINIMUM_VIRTUAL_SIZE                         NTStatus      = 0xC03A0032
+	STATUS_VHD_ALREADY_AT_OR_BELOW_MINIMUM_VIRTUAL_SIZE                       NTStatus      = 0xC03A0033
+	STATUS_QUERY_STORAGE_ERROR                                                NTStatus      = 0x803A0001
+	STATUS_GDI_HANDLE_LEAK                                                    NTStatus      = 0x803F0001
+	STATUS_RKF_KEY_NOT_FOUND                                                  NTStatus      = 0xC0400001
+	STATUS_RKF_DUPLICATE_KEY                                                  NTStatus      = 0xC0400002
+	STATUS_RKF_BLOB_FULL                                                      NTStatus      = 0xC0400003
+	STATUS_RKF_STORE_FULL                                                     NTStatus      = 0xC0400004
+	STATUS_RKF_FILE_BLOCKED                                                   NTStatus      = 0xC0400005
+	STATUS_RKF_ACTIVE_KEY                                                     NTStatus      = 0xC0400006
+	STATUS_RDBSS_RESTART_OPERATION                                            NTStatus      = 0xC0410001
+	STATUS_RDBSS_CONTINUE_OPERATION                                           NTStatus      = 0xC0410002
+	STATUS_RDBSS_POST_OPERATION                                               NTStatus      = 0xC0410003
+	STATUS_RDBSS_RETRY_LOOKUP                                                 NTStatus      = 0xC0410004
+	STATUS_BTH_ATT_INVALID_HANDLE                                             NTStatus      = 0xC0420001
+	STATUS_BTH_ATT_READ_NOT_PERMITTED                                         NTStatus      = 0xC0420002
+	STATUS_BTH_ATT_WRITE_NOT_PERMITTED                                        NTStatus      = 0xC0420003
+	STATUS_BTH_ATT_INVALID_PDU                                                NTStatus      = 0xC0420004
+	STATUS_BTH_ATT_INSUFFICIENT_AUTHENTICATION                                NTStatus      = 0xC0420005
+	STATUS_BTH_ATT_REQUEST_NOT_SUPPORTED                                      NTStatus      = 0xC0420006
+	STATUS_BTH_ATT_INVALID_OFFSET                                             NTStatus      = 0xC0420007
+	STATUS_BTH_ATT_INSUFFICIENT_AUTHORIZATION                                 NTStatus      = 0xC0420008
+	STATUS_BTH_ATT_PREPARE_QUEUE_FULL                                         NTStatus      = 0xC0420009
+	STATUS_BTH_ATT_ATTRIBUTE_NOT_FOUND                                        NTStatus      = 0xC042000A
+	STATUS_BTH_ATT_ATTRIBUTE_NOT_LONG                                         NTStatus      = 0xC042000B
+	STATUS_BTH_ATT_INSUFFICIENT_ENCRYPTION_KEY_SIZE                           NTStatus      = 0xC042000C
+	STATUS_BTH_ATT_INVALID_ATTRIBUTE_VALUE_LENGTH                             NTStatus      = 0xC042000D
+	STATUS_BTH_ATT_UNLIKELY                                                   NTStatus      = 0xC042000E
+	STATUS_BTH_ATT_INSUFFICIENT_ENCRYPTION                                    NTStatus      = 0xC042000F
+	STATUS_BTH_ATT_UNSUPPORTED_GROUP_TYPE                                     NTStatus      = 0xC0420010
+	STATUS_BTH_ATT_INSUFFICIENT_RESOURCES                                     NTStatus      = 0xC0420011
+	STATUS_BTH_ATT_UNKNOWN_ERROR                                              NTStatus      = 0xC0421000
+	STATUS_SECUREBOOT_ROLLBACK_DETECTED                                       NTStatus      = 0xC0430001
+	STATUS_SECUREBOOT_POLICY_VIOLATION                                        NTStatus      = 0xC0430002
+	STATUS_SECUREBOOT_INVALID_POLICY                                          NTStatus      = 0xC0430003
+	STATUS_SECUREBOOT_POLICY_PUBLISHER_NOT_FOUND                              NTStatus      = 0xC0430004
+	STATUS_SECUREBOOT_POLICY_NOT_SIGNED                                       NTStatus      = 0xC0430005
+	STATUS_SECUREBOOT_NOT_ENABLED                                             NTStatus      = 0x80430006
+	STATUS_SECUREBOOT_FILE_REPLACED                                           NTStatus      = 0xC0430007
+	STATUS_SECUREBOOT_POLICY_NOT_AUTHORIZED                                   NTStatus      = 0xC0430008
+	STATUS_SECUREBOOT_POLICY_UNKNOWN                                          NTStatus      = 0xC0430009
+	STATUS_SECUREBOOT_POLICY_MISSING_ANTIROLLBACKVERSION                      NTStatus      = 0xC043000A
+	STATUS_SECUREBOOT_PLATFORM_ID_MISMATCH                                    NTStatus      = 0xC043000B
+	STATUS_SECUREBOOT_POLICY_ROLLBACK_DETECTED                                NTStatus      = 0xC043000C
+	STATUS_SECUREBOOT_POLICY_UPGRADE_MISMATCH                                 NTStatus      = 0xC043000D
+	STATUS_SECUREBOOT_REQUIRED_POLICY_FILE_MISSING                            NTStatus      = 0xC043000E
+	STATUS_SECUREBOOT_NOT_BASE_POLICY                                         NTStatus      = 0xC043000F
+	STATUS_SECUREBOOT_NOT_SUPPLEMENTAL_POLICY                                 NTStatus      = 0xC0430010
+	STATUS_PLATFORM_MANIFEST_NOT_AUTHORIZED                                   NTStatus      = 0xC0EB0001
+	STATUS_PLATFORM_MANIFEST_INVALID                                          NTStatus      = 0xC0EB0002
+	STATUS_PLATFORM_MANIFEST_FILE_NOT_AUTHORIZED                              NTStatus      = 0xC0EB0003
+	STATUS_PLATFORM_MANIFEST_CATALOG_NOT_AUTHORIZED                           NTStatus      = 0xC0EB0004
+	STATUS_PLATFORM_MANIFEST_BINARY_ID_NOT_FOUND                              NTStatus      = 0xC0EB0005
+	STATUS_PLATFORM_MANIFEST_NOT_ACTIVE                                       NTStatus      = 0xC0EB0006
+	STATUS_PLATFORM_MANIFEST_NOT_SIGNED                                       NTStatus      = 0xC0EB0007
+	STATUS_SYSTEM_INTEGRITY_ROLLBACK_DETECTED                                 NTStatus      = 0xC0E90001
+	STATUS_SYSTEM_INTEGRITY_POLICY_VIOLATION                                  NTStatus      = 0xC0E90002
+	STATUS_SYSTEM_INTEGRITY_INVALID_POLICY                                    NTStatus      = 0xC0E90003
+	STATUS_SYSTEM_INTEGRITY_POLICY_NOT_SIGNED                                 NTStatus      = 0xC0E90004
+	STATUS_SYSTEM_INTEGRITY_TOO_MANY_POLICIES                                 NTStatus      = 0xC0E90005
+	STATUS_SYSTEM_INTEGRITY_SUPPLEMENTAL_POLICY_NOT_AUTHORIZED                NTStatus      = 0xC0E90006
+	STATUS_NO_APPLICABLE_APP_LICENSES_FOUND                                   NTStatus      = 0xC0EA0001
+	STATUS_CLIP_LICENSE_NOT_FOUND                                             NTStatus      = 0xC0EA0002
+	STATUS_CLIP_DEVICE_LICENSE_MISSING                                        NTStatus      = 0xC0EA0003
+	STATUS_CLIP_LICENSE_INVALID_SIGNATURE                                     NTStatus      = 0xC0EA0004
+	STATUS_CLIP_KEYHOLDER_LICENSE_MISSING_OR_INVALID                          NTStatus      = 0xC0EA0005
+	STATUS_CLIP_LICENSE_EXPIRED                                               NTStatus      = 0xC0EA0006
+	STATUS_CLIP_LICENSE_SIGNED_BY_UNKNOWN_SOURCE                              NTStatus      = 0xC0EA0007
+	STATUS_CLIP_LICENSE_NOT_SIGNED                                            NTStatus      = 0xC0EA0008
+	STATUS_CLIP_LICENSE_HARDWARE_ID_OUT_OF_TOLERANCE                          NTStatus      = 0xC0EA0009
+	STATUS_CLIP_LICENSE_DEVICE_ID_MISMATCH                                    NTStatus      = 0xC0EA000A
+	STATUS_AUDIO_ENGINE_NODE_NOT_FOUND                                        NTStatus      = 0xC0440001
+	STATUS_HDAUDIO_EMPTY_CONNECTION_LIST                                      NTStatus      = 0xC0440002
+	STATUS_HDAUDIO_CONNECTION_LIST_NOT_SUPPORTED                              NTStatus      = 0xC0440003
+	STATUS_HDAUDIO_NO_LOGICAL_DEVICES_CREATED                                 NTStatus      = 0xC0440004
+	STATUS_HDAUDIO_NULL_LINKED_LIST_ENTRY                                     NTStatus      = 0xC0440005
+	STATUS_SPACES_REPAIRED                                                    NTStatus      = 0x00E70000
+	STATUS_SPACES_PAUSE                                                       NTStatus      = 0x00E70001
+	STATUS_SPACES_COMPLETE                                                    NTStatus      = 0x00E70002
+	STATUS_SPACES_REDIRECT                                                    NTStatus      = 0x00E70003
+	STATUS_SPACES_FAULT_DOMAIN_TYPE_INVALID                                   NTStatus      = 0xC0E70001
+	STATUS_SPACES_RESILIENCY_TYPE_INVALID                                     NTStatus      = 0xC0E70003
+	STATUS_SPACES_DRIVE_SECTOR_SIZE_INVALID                                   NTStatus      = 0xC0E70004
+	STATUS_SPACES_DRIVE_REDUNDANCY_INVALID                                    NTStatus      = 0xC0E70006
+	STATUS_SPACES_NUMBER_OF_DATA_COPIES_INVALID                               NTStatus      = 0xC0E70007
+	STATUS_SPACES_INTERLEAVE_LENGTH_INVALID                                   NTStatus      = 0xC0E70009
+	STATUS_SPACES_NUMBER_OF_COLUMNS_INVALID                                   NTStatus      = 0xC0E7000A
+	STATUS_SPACES_NOT_ENOUGH_DRIVES                                           NTStatus      = 0xC0E7000B
+	STATUS_SPACES_EXTENDED_ERROR                                              NTStatus      = 0xC0E7000C
+	STATUS_SPACES_PROVISIONING_TYPE_INVALID                                   NTStatus      = 0xC0E7000D
+	STATUS_SPACES_ALLOCATION_SIZE_INVALID                                     NTStatus      = 0xC0E7000E
+	STATUS_SPACES_ENCLOSURE_AWARE_INVALID                                     NTStatus      = 0xC0E7000F
+	STATUS_SPACES_WRITE_CACHE_SIZE_INVALID                                    NTStatus      = 0xC0E70010
+	STATUS_SPACES_NUMBER_OF_GROUPS_INVALID                                    NTStatus      = 0xC0E70011
+	STATUS_SPACES_DRIVE_OPERATIONAL_STATE_INVALID                             NTStatus      = 0xC0E70012
+	STATUS_SPACES_UPDATE_COLUMN_STATE                                         NTStatus      = 0xC0E70013
+	STATUS_SPACES_MAP_REQUIRED                                                NTStatus      = 0xC0E70014
+	STATUS_SPACES_UNSUPPORTED_VERSION                                         NTStatus      = 0xC0E70015
+	STATUS_SPACES_CORRUPT_METADATA                                            NTStatus      = 0xC0E70016
+	STATUS_SPACES_DRT_FULL                                                    NTStatus      = 0xC0E70017
+	STATUS_SPACES_INCONSISTENCY                                               NTStatus      = 0xC0E70018
+	STATUS_SPACES_LOG_NOT_READY                                               NTStatus      = 0xC0E70019
+	STATUS_SPACES_NO_REDUNDANCY                                               NTStatus      = 0xC0E7001A
+	STATUS_SPACES_DRIVE_NOT_READY                                             NTStatus      = 0xC0E7001B
+	STATUS_SPACES_DRIVE_SPLIT                                                 NTStatus      = 0xC0E7001C
+	STATUS_SPACES_DRIVE_LOST_DATA                                             NTStatus      = 0xC0E7001D
+	STATUS_SPACES_ENTRY_INCOMPLETE                                            NTStatus      = 0xC0E7001E
+	STATUS_SPACES_ENTRY_INVALID                                               NTStatus      = 0xC0E7001F
+	STATUS_SPACES_MARK_DIRTY                                                  NTStatus      = 0xC0E70020
+	STATUS_VOLSNAP_BOOTFILE_NOT_VALID                                         NTStatus      = 0xC0500003
+	STATUS_VOLSNAP_ACTIVATION_TIMEOUT                                         NTStatus      = 0xC0500004
+	STATUS_IO_PREEMPTED                                                       NTStatus      = 0xC0510001
+	STATUS_SVHDX_ERROR_STORED                                                 NTStatus      = 0xC05C0000
+	STATUS_SVHDX_ERROR_NOT_AVAILABLE                                          NTStatus      = 0xC05CFF00
+	STATUS_SVHDX_UNIT_ATTENTION_AVAILABLE                                     NTStatus      = 0xC05CFF01
+	STATUS_SVHDX_UNIT_ATTENTION_CAPACITY_DATA_CHANGED                         NTStatus      = 0xC05CFF02
+	STATUS_SVHDX_UNIT_ATTENTION_RESERVATIONS_PREEMPTED                        NTStatus      = 0xC05CFF03
+	STATUS_SVHDX_UNIT_ATTENTION_RESERVATIONS_RELEASED                         NTStatus      = 0xC05CFF04
+	STATUS_SVHDX_UNIT_ATTENTION_REGISTRATIONS_PREEMPTED                       NTStatus      = 0xC05CFF05
+	STATUS_SVHDX_UNIT_ATTENTION_OPERATING_DEFINITION_CHANGED                  NTStatus      = 0xC05CFF06
+	STATUS_SVHDX_RESERVATION_CONFLICT                                         NTStatus      = 0xC05CFF07
+	STATUS_SVHDX_WRONG_FILE_TYPE                                              NTStatus      = 0xC05CFF08
+	STATUS_SVHDX_VERSION_MISMATCH                                             NTStatus      = 0xC05CFF09
+	STATUS_VHD_SHARED                                                         NTStatus      = 0xC05CFF0A
+	STATUS_SVHDX_NO_INITIATOR                                                 NTStatus      = 0xC05CFF0B
+	STATUS_VHDSET_BACKING_STORAGE_NOT_FOUND                                   NTStatus      = 0xC05CFF0C
+	STATUS_SMB_NO_PREAUTH_INTEGRITY_HASH_OVERLAP                              NTStatus      = 0xC05D0000
+	STATUS_SMB_BAD_CLUSTER_DIALECT                                            NTStatus      = 0xC05D0001
+	STATUS_SMB_GUEST_LOGON_BLOCKED                                            NTStatus      = 0xC05D0002
+	STATUS_SECCORE_INVALID_COMMAND                                            NTStatus      = 0xC0E80000
+	STATUS_VSM_NOT_INITIALIZED                                                NTStatus      = 0xC0450000
+	STATUS_VSM_DMA_PROTECTION_NOT_IN_USE                                      NTStatus      = 0xC0450001
+	STATUS_APPEXEC_CONDITION_NOT_SATISFIED                                    NTStatus      = 0xC0EC0000
+	STATUS_APPEXEC_HANDLE_INVALIDATED                                         NTStatus      = 0xC0EC0001
+	STATUS_APPEXEC_INVALID_HOST_GENERATION                                    NTStatus      = 0xC0EC0002
+	STATUS_APPEXEC_UNEXPECTED_PROCESS_REGISTRATION                            NTStatus      = 0xC0EC0003
+	STATUS_APPEXEC_INVALID_HOST_STATE                                         NTStatus      = 0xC0EC0004
+	STATUS_APPEXEC_NO_DONOR                                                   NTStatus      = 0xC0EC0005
+	STATUS_APPEXEC_HOST_ID_MISMATCH                                           NTStatus      = 0xC0EC0006
+	STATUS_APPEXEC_UNKNOWN_USER                                               NTStatus      = 0xC0EC0007
+)
diff --git a/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go b/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go
new file mode 100644
index 0000000..6048ac6
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/zknownfolderids_windows.go
@@ -0,0 +1,149 @@
+// Code generated by 'mkknownfolderids.bash'; DO NOT EDIT.
+
+package windows
+
+type KNOWNFOLDERID GUID
+
+var (
+	FOLDERID_NetworkFolder          = &KNOWNFOLDERID{0xd20beec4, 0x5ca8, 0x4905, [8]byte{0xae, 0x3b, 0xbf, 0x25, 0x1e, 0xa0, 0x9b, 0x53}}
+	FOLDERID_ComputerFolder         = &KNOWNFOLDERID{0x0ac0837c, 0xbbf8, 0x452a, [8]byte{0x85, 0x0d, 0x79, 0xd0, 0x8e, 0x66, 0x7c, 0xa7}}
+	FOLDERID_InternetFolder         = &KNOWNFOLDERID{0x4d9f7874, 0x4e0c, 0x4904, [8]byte{0x96, 0x7b, 0x40, 0xb0, 0xd2, 0x0c, 0x3e, 0x4b}}
+	FOLDERID_ControlPanelFolder     = &KNOWNFOLDERID{0x82a74aeb, 0xaeb4, 0x465c, [8]byte{0xa0, 0x14, 0xd0, 0x97, 0xee, 0x34, 0x6d, 0x63}}
+	FOLDERID_PrintersFolder         = &KNOWNFOLDERID{0x76fc4e2d, 0xd6ad, 0x4519, [8]byte{0xa6, 0x63, 0x37, 0xbd, 0x56, 0x06, 0x81, 0x85}}
+	FOLDERID_SyncManagerFolder      = &KNOWNFOLDERID{0x43668bf8, 0xc14e, 0x49b2, [8]byte{0x97, 0xc9, 0x74, 0x77, 0x84, 0xd7, 0x84, 0xb7}}
+	FOLDERID_SyncSetupFolder        = &KNOWNFOLDERID{0x0f214138, 0xb1d3, 0x4a90, [8]byte{0xbb, 0xa9, 0x27, 0xcb, 0xc0, 0xc5, 0x38, 0x9a}}
+	FOLDERID_ConflictFolder         = &KNOWNFOLDERID{0x4bfefb45, 0x347d, 0x4006, [8]byte{0xa5, 0xbe, 0xac, 0x0c, 0xb0, 0x56, 0x71, 0x92}}
+	FOLDERID_SyncResultsFolder      = &KNOWNFOLDERID{0x289a9a43, 0xbe44, 0x4057, [8]byte{0xa4, 0x1b, 0x58, 0x7a, 0x76, 0xd7, 0xe7, 0xf9}}
+	FOLDERID_RecycleBinFolder       = &KNOWNFOLDERID{0xb7534046, 0x3ecb, 0x4c18, [8]byte{0xbe, 0x4e, 0x64, 0xcd, 0x4c, 0xb7, 0xd6, 0xac}}
+	FOLDERID_ConnectionsFolder      = &KNOWNFOLDERID{0x6f0cd92b, 0x2e97, 0x45d1, [8]byte{0x88, 0xff, 0xb0, 0xd1, 0x86, 0xb8, 0xde, 0xdd}}
+	FOLDERID_Fonts                  = &KNOWNFOLDERID{0xfd228cb7, 0xae11, 0x4ae3, [8]byte{0x86, 0x4c, 0x16, 0xf3, 0x91, 0x0a, 0xb8, 0xfe}}
+	FOLDERID_Desktop                = &KNOWNFOLDERID{0xb4bfcc3a, 0xdb2c, 0x424c, [8]byte{0xb0, 0x29, 0x7f, 0xe9, 0x9a, 0x87, 0xc6, 0x41}}
+	FOLDERID_Startup                = &KNOWNFOLDERID{0xb97d20bb, 0xf46a, 0x4c97, [8]byte{0xba, 0x10, 0x5e, 0x36, 0x08, 0x43, 0x08, 0x54}}
+	FOLDERID_Programs               = &KNOWNFOLDERID{0xa77f5d77, 0x2e2b, 0x44c3, [8]byte{0xa6, 0xa2, 0xab, 0xa6, 0x01, 0x05, 0x4a, 0x51}}
+	FOLDERID_StartMenu              = &KNOWNFOLDERID{0x625b53c3, 0xab48, 0x4ec1, [8]byte{0xba, 0x1f, 0xa1, 0xef, 0x41, 0x46, 0xfc, 0x19}}
+	FOLDERID_Recent                 = &KNOWNFOLDERID{0xae50c081, 0xebd2, 0x438a, [8]byte{0x86, 0x55, 0x8a, 0x09, 0x2e, 0x34, 0x98, 0x7a}}
+	FOLDERID_SendTo                 = &KNOWNFOLDERID{0x8983036c, 0x27c0, 0x404b, [8]byte{0x8f, 0x08, 0x10, 0x2d, 0x10, 0xdc, 0xfd, 0x74}}
+	FOLDERID_Documents              = &KNOWNFOLDERID{0xfdd39ad0, 0x238f, 0x46af, [8]byte{0xad, 0xb4, 0x6c, 0x85, 0x48, 0x03, 0x69, 0xc7}}
+	FOLDERID_Favorites              = &KNOWNFOLDERID{0x1777f761, 0x68ad, 0x4d8a, [8]byte{0x87, 0xbd, 0x30, 0xb7, 0x59, 0xfa, 0x33, 0xdd}}
+	FOLDERID_NetHood                = &KNOWNFOLDERID{0xc5abbf53, 0xe17f, 0x4121, [8]byte{0x89, 0x00, 0x86, 0x62, 0x6f, 0xc2, 0xc9, 0x73}}
+	FOLDERID_PrintHood              = &KNOWNFOLDERID{0x9274bd8d, 0xcfd1, 0x41c3, [8]byte{0xb3, 0x5e, 0xb1, 0x3f, 0x55, 0xa7, 0x58, 0xf4}}
+	FOLDERID_Templates              = &KNOWNFOLDERID{0xa63293e8, 0x664e, 0x48db, [8]byte{0xa0, 0x79, 0xdf, 0x75, 0x9e, 0x05, 0x09, 0xf7}}
+	FOLDERID_CommonStartup          = &KNOWNFOLDERID{0x82a5ea35, 0xd9cd, 0x47c5, [8]byte{0x96, 0x29, 0xe1, 0x5d, 0x2f, 0x71, 0x4e, 0x6e}}
+	FOLDERID_CommonPrograms         = &KNOWNFOLDERID{0x0139d44e, 0x6afe, 0x49f2, [8]byte{0x86, 0x90, 0x3d, 0xaf, 0xca, 0xe6, 0xff, 0xb8}}
+	FOLDERID_CommonStartMenu        = &KNOWNFOLDERID{0xa4115719, 0xd62e, 0x491d, [8]byte{0xaa, 0x7c, 0xe7, 0x4b, 0x8b, 0xe3, 0xb0, 0x67}}
+	FOLDERID_PublicDesktop          = &KNOWNFOLDERID{0xc4aa340d, 0xf20f, 0x4863, [8]byte{0xaf, 0xef, 0xf8, 0x7e, 0xf2, 0xe6, 0xba, 0x25}}
+	FOLDERID_ProgramData            = &KNOWNFOLDERID{0x62ab5d82, 0xfdc1, 0x4dc3, [8]byte{0xa9, 0xdd, 0x07, 0x0d, 0x1d, 0x49, 0x5d, 0x97}}
+	FOLDERID_CommonTemplates        = &KNOWNFOLDERID{0xb94237e7, 0x57ac, 0x4347, [8]byte{0x91, 0x51, 0xb0, 0x8c, 0x6c, 0x32, 0xd1, 0xf7}}
+	FOLDERID_PublicDocuments        = &KNOWNFOLDERID{0xed4824af, 0xdce4, 0x45a8, [8]byte{0x81, 0xe2, 0xfc, 0x79, 0x65, 0x08, 0x36, 0x34}}
+	FOLDERID_RoamingAppData         = &KNOWNFOLDERID{0x3eb685db, 0x65f9, 0x4cf6, [8]byte{0xa0, 0x3a, 0xe3, 0xef, 0x65, 0x72, 0x9f, 0x3d}}
+	FOLDERID_LocalAppData           = &KNOWNFOLDERID{0xf1b32785, 0x6fba, 0x4fcf, [8]byte{0x9d, 0x55, 0x7b, 0x8e, 0x7f, 0x15, 0x70, 0x91}}
+	FOLDERID_LocalAppDataLow        = &KNOWNFOLDERID{0xa520a1a4, 0x1780, 0x4ff6, [8]byte{0xbd, 0x18, 0x16, 0x73, 0x43, 0xc5, 0xaf, 0x16}}
+	FOLDERID_InternetCache          = &KNOWNFOLDERID{0x352481e8, 0x33be, 0x4251, [8]byte{0xba, 0x85, 0x60, 0x07, 0xca, 0xed, 0xcf, 0x9d}}
+	FOLDERID_Cookies                = &KNOWNFOLDERID{0x2b0f765d, 0xc0e9, 0x4171, [8]byte{0x90, 0x8e, 0x08, 0xa6, 0x11, 0xb8, 0x4f, 0xf6}}
+	FOLDERID_History                = &KNOWNFOLDERID{0xd9dc8a3b, 0xb784, 0x432e, [8]byte{0xa7, 0x81, 0x5a, 0x11, 0x30, 0xa7, 0x59, 0x63}}
+	FOLDERID_System                 = &KNOWNFOLDERID{0x1ac14e77, 0x02e7, 0x4e5d, [8]byte{0xb7, 0x44, 0x2e, 0xb1, 0xae, 0x51, 0x98, 0xb7}}
+	FOLDERID_SystemX86              = &KNOWNFOLDERID{0xd65231b0, 0xb2f1, 0x4857, [8]byte{0xa4, 0xce, 0xa8, 0xe7, 0xc6, 0xea, 0x7d, 0x27}}
+	FOLDERID_Windows                = &KNOWNFOLDERID{0xf38bf404, 0x1d43, 0x42f2, [8]byte{0x93, 0x05, 0x67, 0xde, 0x0b, 0x28, 0xfc, 0x23}}
+	FOLDERID_Profile                = &KNOWNFOLDERID{0x5e6c858f, 0x0e22, 0x4760, [8]byte{0x9a, 0xfe, 0xea, 0x33, 0x17, 0xb6, 0x71, 0x73}}
+	FOLDERID_Pictures               = &KNOWNFOLDERID{0x33e28130, 0x4e1e, 0x4676, [8]byte{0x83, 0x5a, 0x98, 0x39, 0x5c, 0x3b, 0xc3, 0xbb}}
+	FOLDERID_ProgramFilesX86        = &KNOWNFOLDERID{0x7c5a40ef, 0xa0fb, 0x4bfc, [8]byte{0x87, 0x4a, 0xc0, 0xf2, 0xe0, 0xb9, 0xfa, 0x8e}}
+	FOLDERID_ProgramFilesCommonX86  = &KNOWNFOLDERID{0xde974d24, 0xd9c6, 0x4d3e, [8]byte{0xbf, 0x91, 0xf4, 0x45, 0x51, 0x20, 0xb9, 0x17}}
+	FOLDERID_ProgramFilesX64        = &KNOWNFOLDERID{0x6d809377, 0x6af0, 0x444b, [8]byte{0x89, 0x57, 0xa3, 0x77, 0x3f, 0x02, 0x20, 0x0e}}
+	FOLDERID_ProgramFilesCommonX64  = &KNOWNFOLDERID{0x6365d5a7, 0x0f0d, 0x45e5, [8]byte{0x87, 0xf6, 0x0d, 0xa5, 0x6b, 0x6a, 0x4f, 0x7d}}
+	FOLDERID_ProgramFiles           = &KNOWNFOLDERID{0x905e63b6, 0xc1bf, 0x494e, [8]byte{0xb2, 0x9c, 0x65, 0xb7, 0x32, 0xd3, 0xd2, 0x1a}}
+	FOLDERID_ProgramFilesCommon     = &KNOWNFOLDERID{0xf7f1ed05, 0x9f6d, 0x47a2, [8]byte{0xaa, 0xae, 0x29, 0xd3, 0x17, 0xc6, 0xf0, 0x66}}
+	FOLDERID_UserProgramFiles       = &KNOWNFOLDERID{0x5cd7aee2, 0x2219, 0x4a67, [8]byte{0xb8, 0x5d, 0x6c, 0x9c, 0xe1, 0x56, 0x60, 0xcb}}
+	FOLDERID_UserProgramFilesCommon = &KNOWNFOLDERID{0xbcbd3057, 0xca5c, 0x4622, [8]byte{0xb4, 0x2d, 0xbc, 0x56, 0xdb, 0x0a, 0xe5, 0x16}}
+	FOLDERID_AdminTools             = &KNOWNFOLDERID{0x724ef170, 0xa42d, 0x4fef, [8]byte{0x9f, 0x26, 0xb6, 0x0e, 0x84, 0x6f, 0xba, 0x4f}}
+	FOLDERID_CommonAdminTools       = &KNOWNFOLDERID{0xd0384e7d, 0xbac3, 0x4797, [8]byte{0x8f, 0x14, 0xcb, 0xa2, 0x29, 0xb3, 0x92, 0xb5}}
+	FOLDERID_Music                  = &KNOWNFOLDERID{0x4bd8d571, 0x6d19, 0x48d3, [8]byte{0xbe, 0x97, 0x42, 0x22, 0x20, 0x08, 0x0e, 0x43}}
+	FOLDERID_Videos                 = &KNOWNFOLDERID{0x18989b1d, 0x99b5, 0x455b, [8]byte{0x84, 0x1c, 0xab, 0x7c, 0x74, 0xe4, 0xdd, 0xfc}}
+	FOLDERID_Ringtones              = &KNOWNFOLDERID{0xc870044b, 0xf49e, 0x4126, [8]byte{0xa9, 0xc3, 0xb5, 0x2a, 0x1f, 0xf4, 0x11, 0xe8}}
+	FOLDERID_PublicPictures         = &KNOWNFOLDERID{0xb6ebfb86, 0x6907, 0x413c, [8]byte{0x9a, 0xf7, 0x4f, 0xc2, 0xab, 0xf0, 0x7c, 0xc5}}
+	FOLDERID_PublicMusic            = &KNOWNFOLDERID{0x3214fab5, 0x9757, 0x4298, [8]byte{0xbb, 0x61, 0x92, 0xa9, 0xde, 0xaa, 0x44, 0xff}}
+	FOLDERID_PublicVideos           = &KNOWNFOLDERID{0x2400183a, 0x6185, 0x49fb, [8]byte{0xa2, 0xd8, 0x4a, 0x39, 0x2a, 0x60, 0x2b, 0xa3}}
+	FOLDERID_PublicRingtones        = &KNOWNFOLDERID{0xe555ab60, 0x153b, 0x4d17, [8]byte{0x9f, 0x04, 0xa5, 0xfe, 0x99, 0xfc, 0x15, 0xec}}
+	FOLDERID_ResourceDir            = &KNOWNFOLDERID{0x8ad10c31, 0x2adb, 0x4296, [8]byte{0xa8, 0xf7, 0xe4, 0x70, 0x12, 0x32, 0xc9, 0x72}}
+	FOLDERID_LocalizedResourcesDir  = &KNOWNFOLDERID{0x2a00375e, 0x224c, 0x49de, [8]byte{0xb8, 0xd1, 0x44, 0x0d, 0xf7, 0xef, 0x3d, 0xdc}}
+	FOLDERID_CommonOEMLinks         = &KNOWNFOLDERID{0xc1bae2d0, 0x10df, 0x4334, [8]byte{0xbe, 0xdd, 0x7a, 0xa2, 0x0b, 0x22, 0x7a, 0x9d}}
+	FOLDERID_CDBurning              = &KNOWNFOLDERID{0x9e52ab10, 0xf80d, 0x49df, [8]byte{0xac, 0xb8, 0x43, 0x30, 0xf5, 0x68, 0x78, 0x55}}
+	FOLDERID_UserProfiles           = &KNOWNFOLDERID{0x0762d272, 0xc50a, 0x4bb0, [8]byte{0xa3, 0x82, 0x69, 0x7d, 0xcd, 0x72, 0x9b, 0x80}}
+	FOLDERID_Playlists              = &KNOWNFOLDERID{0xde92c1c7, 0x837f, 0x4f69, [8]byte{0xa3, 0xbb, 0x86, 0xe6, 0x31, 0x20, 0x4a, 0x23}}
+	FOLDERID_SamplePlaylists        = &KNOWNFOLDERID{0x15ca69b3, 0x30ee, 0x49c1, [8]byte{0xac, 0xe1, 0x6b, 0x5e, 0xc3, 0x72, 0xaf, 0xb5}}
+	FOLDERID_SampleMusic            = &KNOWNFOLDERID{0xb250c668, 0xf57d, 0x4ee1, [8]byte{0xa6, 0x3c, 0x29, 0x0e, 0xe7, 0xd1, 0xaa, 0x1f}}
+	FOLDERID_SamplePictures         = &KNOWNFOLDERID{0xc4900540, 0x2379, 0x4c75, [8]byte{0x84, 0x4b, 0x64, 0xe6, 0xfa, 0xf8, 0x71, 0x6b}}
+	FOLDERID_SampleVideos           = &KNOWNFOLDERID{0x859ead94, 0x2e85, 0x48ad, [8]byte{0xa7, 0x1a, 0x09, 0x69, 0xcb, 0x56, 0xa6, 0xcd}}
+	FOLDERID_PhotoAlbums            = &KNOWNFOLDERID{0x69d2cf90, 0xfc33, 0x4fb7, [8]byte{0x9a, 0x0c, 0xeb, 0xb0, 0xf0, 0xfc, 0xb4, 0x3c}}
+	FOLDERID_Public                 = &KNOWNFOLDERID{0xdfdf76a2, 0xc82a, 0x4d63, [8]byte{0x90, 0x6a, 0x56, 0x44, 0xac, 0x45, 0x73, 0x85}}
+	FOLDERID_ChangeRemovePrograms   = &KNOWNFOLDERID{0xdf7266ac, 0x9274, 0x4867, [8]byte{0x8d, 0x55, 0x3b, 0xd6, 0x61, 0xde, 0x87, 0x2d}}
+	FOLDERID_AppUpdates             = &KNOWNFOLDERID{0xa305ce99, 0xf527, 0x492b, [8]byte{0x8b, 0x1a, 0x7e, 0x76, 0xfa, 0x98, 0xd6, 0xe4}}
+	FOLDERID_AddNewPrograms         = &KNOWNFOLDERID{0xde61d971, 0x5ebc, 0x4f02, [8]byte{0xa3, 0xa9, 0x6c, 0x82, 0x89, 0x5e, 0x5c, 0x04}}
+	FOLDERID_Downloads              = &KNOWNFOLDERID{0x374de290, 0x123f, 0x4565, [8]byte{0x91, 0x64, 0x39, 0xc4, 0x92, 0x5e, 0x46, 0x7b}}
+	FOLDERID_PublicDownloads        = &KNOWNFOLDERID{0x3d644c9b, 0x1fb8, 0x4f30, [8]byte{0x9b, 0x45, 0xf6, 0x70, 0x23, 0x5f, 0x79, 0xc0}}
+	FOLDERID_SavedSearches          = &KNOWNFOLDERID{0x7d1d3a04, 0xdebb, 0x4115, [8]byte{0x95, 0xcf, 0x2f, 0x29, 0xda, 0x29, 0x20, 0xda}}
+	FOLDERID_QuickLaunch            = &KNOWNFOLDERID{0x52a4f021, 0x7b75, 0x48a9, [8]byte{0x9f, 0x6b, 0x4b, 0x87, 0xa2, 0x10, 0xbc, 0x8f}}
+	FOLDERID_Contacts               = &KNOWNFOLDERID{0x56784854, 0xc6cb, 0x462b, [8]byte{0x81, 0x69, 0x88, 0xe3, 0x50, 0xac, 0xb8, 0x82}}
+	FOLDERID_SidebarParts           = &KNOWNFOLDERID{0xa75d362e, 0x50fc, 0x4fb7, [8]byte{0xac, 0x2c, 0xa8, 0xbe, 0xaa, 0x31, 0x44, 0x93}}
+	FOLDERID_SidebarDefaultParts    = &KNOWNFOLDERID{0x7b396e54, 0x9ec5, 0x4300, [8]byte{0xbe, 0x0a, 0x24, 0x82, 0xeb, 0xae, 0x1a, 0x26}}
+	FOLDERID_PublicGameTasks        = &KNOWNFOLDERID{0xdebf2536, 0xe1a8, 0x4c59, [8]byte{0xb6, 0xa2, 0x41, 0x45, 0x86, 0x47, 0x6a, 0xea}}
+	FOLDERID_GameTasks              = &KNOWNFOLDERID{0x054fae61, 0x4dd8, 0x4787, [8]byte{0x80, 0xb6, 0x09, 0x02, 0x20, 0xc4, 0xb7, 0x00}}
+	FOLDERID_SavedGames             = &KNOWNFOLDERID{0x4c5c32ff, 0xbb9d, 0x43b0, [8]byte{0xb5, 0xb4, 0x2d, 0x72, 0xe5, 0x4e, 0xaa, 0xa4}}
+	FOLDERID_Games                  = &KNOWNFOLDERID{0xcac52c1a, 0xb53d, 0x4edc, [8]byte{0x92, 0xd7, 0x6b, 0x2e, 0x8a, 0xc1, 0x94, 0x34}}
+	FOLDERID_SEARCH_MAPI            = &KNOWNFOLDERID{0x98ec0e18, 0x2098, 0x4d44, [8]byte{0x86, 0x44, 0x66, 0x97, 0x93, 0x15, 0xa2, 0x81}}
+	FOLDERID_SEARCH_CSC             = &KNOWNFOLDERID{0xee32e446, 0x31ca, 0x4aba, [8]byte{0x81, 0x4f, 0xa5, 0xeb, 0xd2, 0xfd, 0x6d, 0x5e}}
+	FOLDERID_Links                  = &KNOWNFOLDERID{0xbfb9d5e0, 0xc6a9, 0x404c, [8]byte{0xb2, 0xb2, 0xae, 0x6d, 0xb6, 0xaf, 0x49, 0x68}}
+	FOLDERID_UsersFiles             = &KNOWNFOLDERID{0xf3ce0f7c, 0x4901, 0x4acc, [8]byte{0x86, 0x48, 0xd5, 0xd4, 0x4b, 0x04, 0xef, 0x8f}}
+	FOLDERID_UsersLibraries         = &KNOWNFOLDERID{0xa302545d, 0xdeff, 0x464b, [8]byte{0xab, 0xe8, 0x61, 0xc8, 0x64, 0x8d, 0x93, 0x9b}}
+	FOLDERID_SearchHome             = &KNOWNFOLDERID{0x190337d1, 0xb8ca, 0x4121, [8]byte{0xa6, 0x39, 0x6d, 0x47, 0x2d, 0x16, 0x97, 0x2a}}
+	FOLDERID_OriginalImages         = &KNOWNFOLDERID{0x2c36c0aa, 0x5812, 0x4b87, [8]byte{0xbf, 0xd0, 0x4c, 0xd0, 0xdf, 0xb1, 0x9b, 0x39}}
+	FOLDERID_DocumentsLibrary       = &KNOWNFOLDERID{0x7b0db17d, 0x9cd2, 0x4a93, [8]byte{0x97, 0x33, 0x46, 0xcc, 0x89, 0x02, 0x2e, 0x7c}}
+	FOLDERID_MusicLibrary           = &KNOWNFOLDERID{0x2112ab0a, 0xc86a, 0x4ffe, [8]byte{0xa3, 0x68, 0x0d, 0xe9, 0x6e, 0x47, 0x01, 0x2e}}
+	FOLDERID_PicturesLibrary        = &KNOWNFOLDERID{0xa990ae9f, 0xa03b, 0x4e80, [8]byte{0x94, 0xbc, 0x99, 0x12, 0xd7, 0x50, 0x41, 0x04}}
+	FOLDERID_VideosLibrary          = &KNOWNFOLDERID{0x491e922f, 0x5643, 0x4af4, [8]byte{0xa7, 0xeb, 0x4e, 0x7a, 0x13, 0x8d, 0x81, 0x74}}
+	FOLDERID_RecordedTVLibrary      = &KNOWNFOLDERID{0x1a6fdba2, 0xf42d, 0x4358, [8]byte{0xa7, 0x98, 0xb7, 0x4d, 0x74, 0x59, 0x26, 0xc5}}
+	FOLDERID_HomeGroup              = &KNOWNFOLDERID{0x52528a6b, 0xb9e3, 0x4add, [8]byte{0xb6, 0x0d, 0x58, 0x8c, 0x2d, 0xba, 0x84, 0x2d}}
+	FOLDERID_HomeGroupCurrentUser   = &KNOWNFOLDERID{0x9b74b6a3, 0x0dfd, 0x4f11, [8]byte{0x9e, 0x78, 0x5f, 0x78, 0x00, 0xf2, 0xe7, 0x72}}
+	FOLDERID_DeviceMetadataStore    = &KNOWNFOLDERID{0x5ce4a5e9, 0xe4eb, 0x479d, [8]byte{0xb8, 0x9f, 0x13, 0x0c, 0x02, 0x88, 0x61, 0x55}}
+	FOLDERID_Libraries              = &KNOWNFOLDERID{0x1b3ea5dc, 0xb587, 0x4786, [8]byte{0xb4, 0xef, 0xbd, 0x1d, 0xc3, 0x32, 0xae, 0xae}}
+	FOLDERID_PublicLibraries        = &KNOWNFOLDERID{0x48daf80b, 0xe6cf, 0x4f4e, [8]byte{0xb8, 0x00, 0x0e, 0x69, 0xd8, 0x4e, 0xe3, 0x84}}
+	FOLDERID_UserPinned             = &KNOWNFOLDERID{0x9e3995ab, 0x1f9c, 0x4f13, [8]byte{0xb8, 0x27, 0x48, 0xb2, 0x4b, 0x6c, 0x71, 0x74}}
+	FOLDERID_ImplicitAppShortcuts   = &KNOWNFOLDERID{0xbcb5256f, 0x79f6, 0x4cee, [8]byte{0xb7, 0x25, 0xdc, 0x34, 0xe4, 0x02, 0xfd, 0x46}}
+	FOLDERID_AccountPictures        = &KNOWNFOLDERID{0x008ca0b1, 0x55b4, 0x4c56, [8]byte{0xb8, 0xa8, 0x4d, 0xe4, 0xb2, 0x99, 0xd3, 0xbe}}
+	FOLDERID_PublicUserTiles        = &KNOWNFOLDERID{0x0482af6c, 0x08f1, 0x4c34, [8]byte{0x8c, 0x90, 0xe1, 0x7e, 0xc9, 0x8b, 0x1e, 0x17}}
+	FOLDERID_AppsFolder             = &KNOWNFOLDERID{0x1e87508d, 0x89c2, 0x42f0, [8]byte{0x8a, 0x7e, 0x64, 0x5a, 0x0f, 0x50, 0xca, 0x58}}
+	FOLDERID_StartMenuAllPrograms   = &KNOWNFOLDERID{0xf26305ef, 0x6948, 0x40b9, [8]byte{0xb2, 0x55, 0x81, 0x45, 0x3d, 0x09, 0xc7, 0x85}}
+	FOLDERID_CommonStartMenuPlaces  = &KNOWNFOLDERID{0xa440879f, 0x87a0, 0x4f7d, [8]byte{0xb7, 0x00, 0x02, 0x07, 0xb9, 0x66, 0x19, 0x4a}}
+	FOLDERID_ApplicationShortcuts   = &KNOWNFOLDERID{0xa3918781, 0xe5f2, 0x4890, [8]byte{0xb3, 0xd9, 0xa7, 0xe5, 0x43, 0x32, 0x32, 0x8c}}
+	FOLDERID_RoamingTiles           = &KNOWNFOLDERID{0x00bcfc5a, 0xed94, 0x4e48, [8]byte{0x96, 0xa1, 0x3f, 0x62, 0x17, 0xf2, 0x19, 0x90}}
+	FOLDERID_RoamedTileImages       = &KNOWNFOLDERID{0xaaa8d5a5, 0xf1d6, 0x4259, [8]byte{0xba, 0xa8, 0x78, 0xe7, 0xef, 0x60, 0x83, 0x5e}}
+	FOLDERID_Screenshots            = &KNOWNFOLDERID{0xb7bede81, 0xdf94, 0x4682, [8]byte{0xa7, 0xd8, 0x57, 0xa5, 0x26, 0x20, 0xb8, 0x6f}}
+	FOLDERID_CameraRoll             = &KNOWNFOLDERID{0xab5fb87b, 0x7ce2, 0x4f83, [8]byte{0x91, 0x5d, 0x55, 0x08, 0x46, 0xc9, 0x53, 0x7b}}
+	FOLDERID_SkyDrive               = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}}
+	FOLDERID_OneDrive               = &KNOWNFOLDERID{0xa52bba46, 0xe9e1, 0x435f, [8]byte{0xb3, 0xd9, 0x28, 0xda, 0xa6, 0x48, 0xc0, 0xf6}}
+	FOLDERID_SkyDriveDocuments      = &KNOWNFOLDERID{0x24d89e24, 0x2f19, 0x4534, [8]byte{0x9d, 0xde, 0x6a, 0x66, 0x71, 0xfb, 0xb8, 0xfe}}
+	FOLDERID_SkyDrivePictures       = &KNOWNFOLDERID{0x339719b5, 0x8c47, 0x4894, [8]byte{0x94, 0xc2, 0xd8, 0xf7, 0x7a, 0xdd, 0x44, 0xa6}}
+	FOLDERID_SkyDriveMusic          = &KNOWNFOLDERID{0xc3f2459e, 0x80d6, 0x45dc, [8]byte{0xbf, 0xef, 0x1f, 0x76, 0x9f, 0x2b, 0xe7, 0x30}}
+	FOLDERID_SkyDriveCameraRoll     = &KNOWNFOLDERID{0x767e6811, 0x49cb, 0x4273, [8]byte{0x87, 0xc2, 0x20, 0xf3, 0x55, 0xe1, 0x08, 0x5b}}
+	FOLDERID_SearchHistory          = &KNOWNFOLDERID{0x0d4c3db6, 0x03a3, 0x462f, [8]byte{0xa0, 0xe6, 0x08, 0x92, 0x4c, 0x41, 0xb5, 0xd4}}
+	FOLDERID_SearchTemplates        = &KNOWNFOLDERID{0x7e636bfe, 0xdfa9, 0x4d5e, [8]byte{0xb4, 0x56, 0xd7, 0xb3, 0x98, 0x51, 0xd8, 0xa9}}
+	FOLDERID_CameraRollLibrary      = &KNOWNFOLDERID{0x2b20df75, 0x1eda, 0x4039, [8]byte{0x80, 0x97, 0x38, 0x79, 0x82, 0x27, 0xd5, 0xb7}}
+	FOLDERID_SavedPictures          = &KNOWNFOLDERID{0x3b193882, 0xd3ad, 0x4eab, [8]byte{0x96, 0x5a, 0x69, 0x82, 0x9d, 0x1f, 0xb5, 0x9f}}
+	FOLDERID_SavedPicturesLibrary   = &KNOWNFOLDERID{0xe25b5812, 0xbe88, 0x4bd9, [8]byte{0x94, 0xb0, 0x29, 0x23, 0x34, 0x77, 0xb6, 0xc3}}
+	FOLDERID_RetailDemo             = &KNOWNFOLDERID{0x12d4c69e, 0x24ad, 0x4923, [8]byte{0xbe, 0x19, 0x31, 0x32, 0x1c, 0x43, 0xa7, 0x67}}
+	FOLDERID_Device                 = &KNOWNFOLDERID{0x1c2ac1dc, 0x4358, 0x4b6c, [8]byte{0x97, 0x33, 0xaf, 0x21, 0x15, 0x65, 0x76, 0xf0}}
+	FOLDERID_DevelopmentFiles       = &KNOWNFOLDERID{0xdbe8e08e, 0x3053, 0x4bbc, [8]byte{0xb1, 0x83, 0x2a, 0x7b, 0x2b, 0x19, 0x1e, 0x59}}
+	FOLDERID_Objects3D              = &KNOWNFOLDERID{0x31c0dd25, 0x9439, 0x4f12, [8]byte{0xbf, 0x41, 0x7f, 0xf4, 0xed, 0xa3, 0x87, 0x22}}
+	FOLDERID_AppCaptures            = &KNOWNFOLDERID{0xedc0fe71, 0x98d8, 0x4f4a, [8]byte{0xb9, 0x20, 0xc8, 0xdc, 0x13, 0x3c, 0xb1, 0x65}}
+	FOLDERID_LocalDocuments         = &KNOWNFOLDERID{0xf42ee2d3, 0x909f, 0x4907, [8]byte{0x88, 0x71, 0x4c, 0x22, 0xfc, 0x0b, 0xf7, 0x56}}
+	FOLDERID_LocalPictures          = &KNOWNFOLDERID{0x0ddd015d, 0xb06c, 0x45d5, [8]byte{0x8c, 0x4c, 0xf5, 0x97, 0x13, 0x85, 0x46, 0x39}}
+	FOLDERID_LocalVideos            = &KNOWNFOLDERID{0x35286a68, 0x3c57, 0x41a1, [8]byte{0xbb, 0xb1, 0x0e, 0xae, 0x73, 0xd7, 0x6c, 0x95}}
+	FOLDERID_LocalMusic             = &KNOWNFOLDERID{0xa0c69a99, 0x21c8, 0x4671, [8]byte{0x87, 0x03, 0x79, 0x34, 0x16, 0x2f, 0xcf, 0x1d}}
+	FOLDERID_LocalDownloads         = &KNOWNFOLDERID{0x7d83ee9b, 0x2244, 0x4e70, [8]byte{0xb1, 0xf5, 0x53, 0x93, 0x04, 0x2a, 0xf1, 0xe4}}
+	FOLDERID_RecordedCalls          = &KNOWNFOLDERID{0x2f8b40c2, 0x83ed, 0x48ee, [8]byte{0xb3, 0x83, 0xa1, 0xf1, 0x57, 0xec, 0x6f, 0x9a}}
+	FOLDERID_AllAppMods             = &KNOWNFOLDERID{0x7ad67899, 0x66af, 0x43ba, [8]byte{0x91, 0x56, 0x6a, 0xad, 0x42, 0xe6, 0xc5, 0x96}}
+	FOLDERID_CurrentAppMods         = &KNOWNFOLDERID{0x3db40b20, 0x2a30, 0x4dbe, [8]byte{0x91, 0x7e, 0x77, 0x1d, 0xd2, 0x1d, 0xd0, 0x99}}
+	FOLDERID_AppDataDesktop         = &KNOWNFOLDERID{0xb2c5e279, 0x7add, 0x439f, [8]byte{0xb2, 0x8c, 0xc4, 0x1f, 0xe1, 0xbb, 0xf6, 0x72}}
+	FOLDERID_AppDataDocuments       = &KNOWNFOLDERID{0x7be16610, 0x1f7f, 0x44ac, [8]byte{0xbf, 0xf0, 0x83, 0xe1, 0x5f, 0x2f, 0xfc, 0xa1}}
+	FOLDERID_AppDataFavorites       = &KNOWNFOLDERID{0x7cfbefbc, 0xde1f, 0x45aa, [8]byte{0xb8, 0x43, 0xa5, 0x42, 0xac, 0x53, 0x6c, 0xc9}}
+	FOLDERID_AppDataProgramData     = &KNOWNFOLDERID{0x559d40a3, 0xa036, 0x40fa, [8]byte{0xaf, 0x61, 0x84, 0xcb, 0x43, 0x0a, 0x4d, 0x34}}
+)
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
new file mode 100644
index 0000000..426151a
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -0,0 +1,4713 @@
+// Code generated by 'go generate'; DO NOT EDIT.
+
+package windows
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+	errnoERROR_IO_PENDING = 997
+)
+
+var (
+	errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+	errERROR_EINVAL     error = syscall.EINVAL
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+	switch e {
+	case 0:
+		return errERROR_EINVAL
+	case errnoERROR_IO_PENDING:
+		return errERROR_IO_PENDING
+	}
+	// TODO: add more here, after collecting data on the common
+	// error values see on Windows. (perhaps when running
+	// all.bat?)
+	return e
+}
+
+var (
+	modCfgMgr32 = NewLazySystemDLL("CfgMgr32.dll")
+	modadvapi32 = NewLazySystemDLL("advapi32.dll")
+	modcrypt32  = NewLazySystemDLL("crypt32.dll")
+	moddnsapi   = NewLazySystemDLL("dnsapi.dll")
+	moddwmapi   = NewLazySystemDLL("dwmapi.dll")
+	modiphlpapi = NewLazySystemDLL("iphlpapi.dll")
+	modkernel32 = NewLazySystemDLL("kernel32.dll")
+	modmswsock  = NewLazySystemDLL("mswsock.dll")
+	modnetapi32 = NewLazySystemDLL("netapi32.dll")
+	modntdll    = NewLazySystemDLL("ntdll.dll")
+	modole32    = NewLazySystemDLL("ole32.dll")
+	modpsapi    = NewLazySystemDLL("psapi.dll")
+	modsechost  = NewLazySystemDLL("sechost.dll")
+	modsecur32  = NewLazySystemDLL("secur32.dll")
+	modsetupapi = NewLazySystemDLL("setupapi.dll")
+	modshell32  = NewLazySystemDLL("shell32.dll")
+	moduser32   = NewLazySystemDLL("user32.dll")
+	moduserenv  = NewLazySystemDLL("userenv.dll")
+	modversion  = NewLazySystemDLL("version.dll")
+	modwinmm    = NewLazySystemDLL("winmm.dll")
+	modwintrust = NewLazySystemDLL("wintrust.dll")
+	modws2_32   = NewLazySystemDLL("ws2_32.dll")
+	modwtsapi32 = NewLazySystemDLL("wtsapi32.dll")
+
+	procCM_Get_DevNode_Status                                = modCfgMgr32.NewProc("CM_Get_DevNode_Status")
+	procCM_Get_Device_Interface_ListW                        = modCfgMgr32.NewProc("CM_Get_Device_Interface_ListW")
+	procCM_Get_Device_Interface_List_SizeW                   = modCfgMgr32.NewProc("CM_Get_Device_Interface_List_SizeW")
+	procCM_MapCrToWin32Err                                   = modCfgMgr32.NewProc("CM_MapCrToWin32Err")
+	procAdjustTokenGroups                                    = modadvapi32.NewProc("AdjustTokenGroups")
+	procAdjustTokenPrivileges                                = modadvapi32.NewProc("AdjustTokenPrivileges")
+	procAllocateAndInitializeSid                             = modadvapi32.NewProc("AllocateAndInitializeSid")
+	procBuildSecurityDescriptorW                             = modadvapi32.NewProc("BuildSecurityDescriptorW")
+	procChangeServiceConfig2W                                = modadvapi32.NewProc("ChangeServiceConfig2W")
+	procChangeServiceConfigW                                 = modadvapi32.NewProc("ChangeServiceConfigW")
+	procCheckTokenMembership                                 = modadvapi32.NewProc("CheckTokenMembership")
+	procCloseServiceHandle                                   = modadvapi32.NewProc("CloseServiceHandle")
+	procControlService                                       = modadvapi32.NewProc("ControlService")
+	procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW")
+	procConvertSidToStringSidW                               = modadvapi32.NewProc("ConvertSidToStringSidW")
+	procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW")
+	procConvertStringSidToSidW                               = modadvapi32.NewProc("ConvertStringSidToSidW")
+	procCopySid                                              = modadvapi32.NewProc("CopySid")
+	procCreateProcessAsUserW                                 = modadvapi32.NewProc("CreateProcessAsUserW")
+	procCreateServiceW                                       = modadvapi32.NewProc("CreateServiceW")
+	procCreateWellKnownSid                                   = modadvapi32.NewProc("CreateWellKnownSid")
+	procCryptAcquireContextW                                 = modadvapi32.NewProc("CryptAcquireContextW")
+	procCryptGenRandom                                       = modadvapi32.NewProc("CryptGenRandom")
+	procCryptReleaseContext                                  = modadvapi32.NewProc("CryptReleaseContext")
+	procDeleteService                                        = modadvapi32.NewProc("DeleteService")
+	procDeregisterEventSource                                = modadvapi32.NewProc("DeregisterEventSource")
+	procDuplicateTokenEx                                     = modadvapi32.NewProc("DuplicateTokenEx")
+	procEnumDependentServicesW                               = modadvapi32.NewProc("EnumDependentServicesW")
+	procEnumServicesStatusExW                                = modadvapi32.NewProc("EnumServicesStatusExW")
+	procEqualSid                                             = modadvapi32.NewProc("EqualSid")
+	procFreeSid                                              = modadvapi32.NewProc("FreeSid")
+	procGetAce                                               = modadvapi32.NewProc("GetAce")
+	procGetLengthSid                                         = modadvapi32.NewProc("GetLengthSid")
+	procGetNamedSecurityInfoW                                = modadvapi32.NewProc("GetNamedSecurityInfoW")
+	procGetSecurityDescriptorControl                         = modadvapi32.NewProc("GetSecurityDescriptorControl")
+	procGetSecurityDescriptorDacl                            = modadvapi32.NewProc("GetSecurityDescriptorDacl")
+	procGetSecurityDescriptorGroup                           = modadvapi32.NewProc("GetSecurityDescriptorGroup")
+	procGetSecurityDescriptorLength                          = modadvapi32.NewProc("GetSecurityDescriptorLength")
+	procGetSecurityDescriptorOwner                           = modadvapi32.NewProc("GetSecurityDescriptorOwner")
+	procGetSecurityDescriptorRMControl                       = modadvapi32.NewProc("GetSecurityDescriptorRMControl")
+	procGetSecurityDescriptorSacl                            = modadvapi32.NewProc("GetSecurityDescriptorSacl")
+	procGetSecurityInfo                                      = modadvapi32.NewProc("GetSecurityInfo")
+	procGetSidIdentifierAuthority                            = modadvapi32.NewProc("GetSidIdentifierAuthority")
+	procGetSidSubAuthority                                   = modadvapi32.NewProc("GetSidSubAuthority")
+	procGetSidSubAuthorityCount                              = modadvapi32.NewProc("GetSidSubAuthorityCount")
+	procGetTokenInformation                                  = modadvapi32.NewProc("GetTokenInformation")
+	procImpersonateSelf                                      = modadvapi32.NewProc("ImpersonateSelf")
+	procInitializeSecurityDescriptor                         = modadvapi32.NewProc("InitializeSecurityDescriptor")
+	procInitiateSystemShutdownExW                            = modadvapi32.NewProc("InitiateSystemShutdownExW")
+	procIsTokenRestricted                                    = modadvapi32.NewProc("IsTokenRestricted")
+	procIsValidSecurityDescriptor                            = modadvapi32.NewProc("IsValidSecurityDescriptor")
+	procIsValidSid                                           = modadvapi32.NewProc("IsValidSid")
+	procIsWellKnownSid                                       = modadvapi32.NewProc("IsWellKnownSid")
+	procLookupAccountNameW                                   = modadvapi32.NewProc("LookupAccountNameW")
+	procLookupAccountSidW                                    = modadvapi32.NewProc("LookupAccountSidW")
+	procLookupPrivilegeValueW                                = modadvapi32.NewProc("LookupPrivilegeValueW")
+	procMakeAbsoluteSD                                       = modadvapi32.NewProc("MakeAbsoluteSD")
+	procMakeSelfRelativeSD                                   = modadvapi32.NewProc("MakeSelfRelativeSD")
+	procNotifyServiceStatusChangeW                           = modadvapi32.NewProc("NotifyServiceStatusChangeW")
+	procOpenProcessToken                                     = modadvapi32.NewProc("OpenProcessToken")
+	procOpenSCManagerW                                       = modadvapi32.NewProc("OpenSCManagerW")
+	procOpenServiceW                                         = modadvapi32.NewProc("OpenServiceW")
+	procOpenThreadToken                                      = modadvapi32.NewProc("OpenThreadToken")
+	procQueryServiceConfig2W                                 = modadvapi32.NewProc("QueryServiceConfig2W")
+	procQueryServiceConfigW                                  = modadvapi32.NewProc("QueryServiceConfigW")
+	procQueryServiceDynamicInformation                       = modadvapi32.NewProc("QueryServiceDynamicInformation")
+	procQueryServiceLockStatusW                              = modadvapi32.NewProc("QueryServiceLockStatusW")
+	procQueryServiceStatus                                   = modadvapi32.NewProc("QueryServiceStatus")
+	procQueryServiceStatusEx                                 = modadvapi32.NewProc("QueryServiceStatusEx")
+	procRegCloseKey                                          = modadvapi32.NewProc("RegCloseKey")
+	procRegEnumKeyExW                                        = modadvapi32.NewProc("RegEnumKeyExW")
+	procRegNotifyChangeKeyValue                              = modadvapi32.NewProc("RegNotifyChangeKeyValue")
+	procRegOpenKeyExW                                        = modadvapi32.NewProc("RegOpenKeyExW")
+	procRegQueryInfoKeyW                                     = modadvapi32.NewProc("RegQueryInfoKeyW")
+	procRegQueryValueExW                                     = modadvapi32.NewProc("RegQueryValueExW")
+	procRegisterEventSourceW                                 = modadvapi32.NewProc("RegisterEventSourceW")
+	procRegisterServiceCtrlHandlerExW                        = modadvapi32.NewProc("RegisterServiceCtrlHandlerExW")
+	procReportEventW                                         = modadvapi32.NewProc("ReportEventW")
+	procRevertToSelf                                         = modadvapi32.NewProc("RevertToSelf")
+	procSetEntriesInAclW                                     = modadvapi32.NewProc("SetEntriesInAclW")
+	procSetKernelObjectSecurity                              = modadvapi32.NewProc("SetKernelObjectSecurity")
+	procSetNamedSecurityInfoW                                = modadvapi32.NewProc("SetNamedSecurityInfoW")
+	procSetSecurityDescriptorControl                         = modadvapi32.NewProc("SetSecurityDescriptorControl")
+	procSetSecurityDescriptorDacl                            = modadvapi32.NewProc("SetSecurityDescriptorDacl")
+	procSetSecurityDescriptorGroup                           = modadvapi32.NewProc("SetSecurityDescriptorGroup")
+	procSetSecurityDescriptorOwner                           = modadvapi32.NewProc("SetSecurityDescriptorOwner")
+	procSetSecurityDescriptorRMControl                       = modadvapi32.NewProc("SetSecurityDescriptorRMControl")
+	procSetSecurityDescriptorSacl                            = modadvapi32.NewProc("SetSecurityDescriptorSacl")
+	procSetSecurityInfo                                      = modadvapi32.NewProc("SetSecurityInfo")
+	procSetServiceStatus                                     = modadvapi32.NewProc("SetServiceStatus")
+	procSetThreadToken                                       = modadvapi32.NewProc("SetThreadToken")
+	procSetTokenInformation                                  = modadvapi32.NewProc("SetTokenInformation")
+	procStartServiceCtrlDispatcherW                          = modadvapi32.NewProc("StartServiceCtrlDispatcherW")
+	procStartServiceW                                        = modadvapi32.NewProc("StartServiceW")
+	procCertAddCertificateContextToStore                     = modcrypt32.NewProc("CertAddCertificateContextToStore")
+	procCertCloseStore                                       = modcrypt32.NewProc("CertCloseStore")
+	procCertCreateCertificateContext                         = modcrypt32.NewProc("CertCreateCertificateContext")
+	procCertDeleteCertificateFromStore                       = modcrypt32.NewProc("CertDeleteCertificateFromStore")
+	procCertDuplicateCertificateContext                      = modcrypt32.NewProc("CertDuplicateCertificateContext")
+	procCertEnumCertificatesInStore                          = modcrypt32.NewProc("CertEnumCertificatesInStore")
+	procCertFindCertificateInStore                           = modcrypt32.NewProc("CertFindCertificateInStore")
+	procCertFindChainInStore                                 = modcrypt32.NewProc("CertFindChainInStore")
+	procCertFindExtension                                    = modcrypt32.NewProc("CertFindExtension")
+	procCertFreeCertificateChain                             = modcrypt32.NewProc("CertFreeCertificateChain")
+	procCertFreeCertificateContext                           = modcrypt32.NewProc("CertFreeCertificateContext")
+	procCertGetCertificateChain                              = modcrypt32.NewProc("CertGetCertificateChain")
+	procCertGetNameStringW                                   = modcrypt32.NewProc("CertGetNameStringW")
+	procCertOpenStore                                        = modcrypt32.NewProc("CertOpenStore")
+	procCertOpenSystemStoreW                                 = modcrypt32.NewProc("CertOpenSystemStoreW")
+	procCertVerifyCertificateChainPolicy                     = modcrypt32.NewProc("CertVerifyCertificateChainPolicy")
+	procCryptAcquireCertificatePrivateKey                    = modcrypt32.NewProc("CryptAcquireCertificatePrivateKey")
+	procCryptDecodeObject                                    = modcrypt32.NewProc("CryptDecodeObject")
+	procCryptProtectData                                     = modcrypt32.NewProc("CryptProtectData")
+	procCryptQueryObject                                     = modcrypt32.NewProc("CryptQueryObject")
+	procCryptUnprotectData                                   = modcrypt32.NewProc("CryptUnprotectData")
+	procPFXImportCertStore                                   = modcrypt32.NewProc("PFXImportCertStore")
+	procDnsNameCompare_W                                     = moddnsapi.NewProc("DnsNameCompare_W")
+	procDnsQuery_W                                           = moddnsapi.NewProc("DnsQuery_W")
+	procDnsRecordListFree                                    = moddnsapi.NewProc("DnsRecordListFree")
+	procDwmGetWindowAttribute                                = moddwmapi.NewProc("DwmGetWindowAttribute")
+	procDwmSetWindowAttribute                                = moddwmapi.NewProc("DwmSetWindowAttribute")
+	procCancelMibChangeNotify2                               = modiphlpapi.NewProc("CancelMibChangeNotify2")
+	procGetAdaptersAddresses                                 = modiphlpapi.NewProc("GetAdaptersAddresses")
+	procGetAdaptersInfo                                      = modiphlpapi.NewProc("GetAdaptersInfo")
+	procGetBestInterfaceEx                                   = modiphlpapi.NewProc("GetBestInterfaceEx")
+	procGetIfEntry                                           = modiphlpapi.NewProc("GetIfEntry")
+	procGetIfEntry2Ex                                        = modiphlpapi.NewProc("GetIfEntry2Ex")
+	procGetUnicastIpAddressEntry                             = modiphlpapi.NewProc("GetUnicastIpAddressEntry")
+	procNotifyIpInterfaceChange                              = modiphlpapi.NewProc("NotifyIpInterfaceChange")
+	procNotifyUnicastIpAddressChange                         = modiphlpapi.NewProc("NotifyUnicastIpAddressChange")
+	procAddDllDirectory                                      = modkernel32.NewProc("AddDllDirectory")
+	procAssignProcessToJobObject                             = modkernel32.NewProc("AssignProcessToJobObject")
+	procCancelIo                                             = modkernel32.NewProc("CancelIo")
+	procCancelIoEx                                           = modkernel32.NewProc("CancelIoEx")
+	procClearCommBreak                                       = modkernel32.NewProc("ClearCommBreak")
+	procClearCommError                                       = modkernel32.NewProc("ClearCommError")
+	procCloseHandle                                          = modkernel32.NewProc("CloseHandle")
+	procClosePseudoConsole                                   = modkernel32.NewProc("ClosePseudoConsole")
+	procConnectNamedPipe                                     = modkernel32.NewProc("ConnectNamedPipe")
+	procCreateDirectoryW                                     = modkernel32.NewProc("CreateDirectoryW")
+	procCreateEventExW                                       = modkernel32.NewProc("CreateEventExW")
+	procCreateEventW                                         = modkernel32.NewProc("CreateEventW")
+	procCreateFileMappingW                                   = modkernel32.NewProc("CreateFileMappingW")
+	procCreateFileW                                          = modkernel32.NewProc("CreateFileW")
+	procCreateHardLinkW                                      = modkernel32.NewProc("CreateHardLinkW")
+	procCreateIoCompletionPort                               = modkernel32.NewProc("CreateIoCompletionPort")
+	procCreateJobObjectW                                     = modkernel32.NewProc("CreateJobObjectW")
+	procCreateMutexExW                                       = modkernel32.NewProc("CreateMutexExW")
+	procCreateMutexW                                         = modkernel32.NewProc("CreateMutexW")
+	procCreateNamedPipeW                                     = modkernel32.NewProc("CreateNamedPipeW")
+	procCreatePipe                                           = modkernel32.NewProc("CreatePipe")
+	procCreateProcessW                                       = modkernel32.NewProc("CreateProcessW")
+	procCreatePseudoConsole                                  = modkernel32.NewProc("CreatePseudoConsole")
+	procCreateSymbolicLinkW                                  = modkernel32.NewProc("CreateSymbolicLinkW")
+	procCreateToolhelp32Snapshot                             = modkernel32.NewProc("CreateToolhelp32Snapshot")
+	procDefineDosDeviceW                                     = modkernel32.NewProc("DefineDosDeviceW")
+	procDeleteFileW                                          = modkernel32.NewProc("DeleteFileW")
+	procDeleteProcThreadAttributeList                        = modkernel32.NewProc("DeleteProcThreadAttributeList")
+	procDeleteVolumeMountPointW                              = modkernel32.NewProc("DeleteVolumeMountPointW")
+	procDeviceIoControl                                      = modkernel32.NewProc("DeviceIoControl")
+	procDisconnectNamedPipe                                  = modkernel32.NewProc("DisconnectNamedPipe")
+	procDuplicateHandle                                      = modkernel32.NewProc("DuplicateHandle")
+	procEscapeCommFunction                                   = modkernel32.NewProc("EscapeCommFunction")
+	procExitProcess                                          = modkernel32.NewProc("ExitProcess")
+	procExpandEnvironmentStringsW                            = modkernel32.NewProc("ExpandEnvironmentStringsW")
+	procFindClose                                            = modkernel32.NewProc("FindClose")
+	procFindCloseChangeNotification                          = modkernel32.NewProc("FindCloseChangeNotification")
+	procFindFirstChangeNotificationW                         = modkernel32.NewProc("FindFirstChangeNotificationW")
+	procFindFirstFileW                                       = modkernel32.NewProc("FindFirstFileW")
+	procFindFirstVolumeMountPointW                           = modkernel32.NewProc("FindFirstVolumeMountPointW")
+	procFindFirstVolumeW                                     = modkernel32.NewProc("FindFirstVolumeW")
+	procFindNextChangeNotification                           = modkernel32.NewProc("FindNextChangeNotification")
+	procFindNextFileW                                        = modkernel32.NewProc("FindNextFileW")
+	procFindNextVolumeMountPointW                            = modkernel32.NewProc("FindNextVolumeMountPointW")
+	procFindNextVolumeW                                      = modkernel32.NewProc("FindNextVolumeW")
+	procFindResourceW                                        = modkernel32.NewProc("FindResourceW")
+	procFindVolumeClose                                      = modkernel32.NewProc("FindVolumeClose")
+	procFindVolumeMountPointClose                            = modkernel32.NewProc("FindVolumeMountPointClose")
+	procFlushConsoleInputBuffer                              = modkernel32.NewProc("FlushConsoleInputBuffer")
+	procFlushFileBuffers                                     = modkernel32.NewProc("FlushFileBuffers")
+	procFlushViewOfFile                                      = modkernel32.NewProc("FlushViewOfFile")
+	procFormatMessageW                                       = modkernel32.NewProc("FormatMessageW")
+	procFreeEnvironmentStringsW                              = modkernel32.NewProc("FreeEnvironmentStringsW")
+	procFreeLibrary                                          = modkernel32.NewProc("FreeLibrary")
+	procGenerateConsoleCtrlEvent                             = modkernel32.NewProc("GenerateConsoleCtrlEvent")
+	procGetACP                                               = modkernel32.NewProc("GetACP")
+	procGetActiveProcessorCount                              = modkernel32.NewProc("GetActiveProcessorCount")
+	procGetCommModemStatus                                   = modkernel32.NewProc("GetCommModemStatus")
+	procGetCommState                                         = modkernel32.NewProc("GetCommState")
+	procGetCommTimeouts                                      = modkernel32.NewProc("GetCommTimeouts")
+	procGetCommandLineW                                      = modkernel32.NewProc("GetCommandLineW")
+	procGetComputerNameExW                                   = modkernel32.NewProc("GetComputerNameExW")
+	procGetComputerNameW                                     = modkernel32.NewProc("GetComputerNameW")
+	procGetConsoleCP                                         = modkernel32.NewProc("GetConsoleCP")
+	procGetConsoleMode                                       = modkernel32.NewProc("GetConsoleMode")
+	procGetConsoleOutputCP                                   = modkernel32.NewProc("GetConsoleOutputCP")
+	procGetConsoleScreenBufferInfo                           = modkernel32.NewProc("GetConsoleScreenBufferInfo")
+	procGetCurrentDirectoryW                                 = modkernel32.NewProc("GetCurrentDirectoryW")
+	procGetCurrentProcessId                                  = modkernel32.NewProc("GetCurrentProcessId")
+	procGetCurrentThreadId                                   = modkernel32.NewProc("GetCurrentThreadId")
+	procGetDiskFreeSpaceExW                                  = modkernel32.NewProc("GetDiskFreeSpaceExW")
+	procGetDriveTypeW                                        = modkernel32.NewProc("GetDriveTypeW")
+	procGetEnvironmentStringsW                               = modkernel32.NewProc("GetEnvironmentStringsW")
+	procGetEnvironmentVariableW                              = modkernel32.NewProc("GetEnvironmentVariableW")
+	procGetExitCodeProcess                                   = modkernel32.NewProc("GetExitCodeProcess")
+	procGetFileAttributesExW                                 = modkernel32.NewProc("GetFileAttributesExW")
+	procGetFileAttributesW                                   = modkernel32.NewProc("GetFileAttributesW")
+	procGetFileInformationByHandle                           = modkernel32.NewProc("GetFileInformationByHandle")
+	procGetFileInformationByHandleEx                         = modkernel32.NewProc("GetFileInformationByHandleEx")
+	procGetFileTime                                          = modkernel32.NewProc("GetFileTime")
+	procGetFileType                                          = modkernel32.NewProc("GetFileType")
+	procGetFinalPathNameByHandleW                            = modkernel32.NewProc("GetFinalPathNameByHandleW")
+	procGetFullPathNameW                                     = modkernel32.NewProc("GetFullPathNameW")
+	procGetLargePageMinimum                                  = modkernel32.NewProc("GetLargePageMinimum")
+	procGetLastError                                         = modkernel32.NewProc("GetLastError")
+	procGetLogicalDriveStringsW                              = modkernel32.NewProc("GetLogicalDriveStringsW")
+	procGetLogicalDrives                                     = modkernel32.NewProc("GetLogicalDrives")
+	procGetLongPathNameW                                     = modkernel32.NewProc("GetLongPathNameW")
+	procGetMaximumProcessorCount                             = modkernel32.NewProc("GetMaximumProcessorCount")
+	procGetModuleFileNameW                                   = modkernel32.NewProc("GetModuleFileNameW")
+	procGetModuleHandleExW                                   = modkernel32.NewProc("GetModuleHandleExW")
+	procGetNamedPipeClientProcessId                          = modkernel32.NewProc("GetNamedPipeClientProcessId")
+	procGetNamedPipeHandleStateW                             = modkernel32.NewProc("GetNamedPipeHandleStateW")
+	procGetNamedPipeInfo                                     = modkernel32.NewProc("GetNamedPipeInfo")
+	procGetNamedPipeServerProcessId                          = modkernel32.NewProc("GetNamedPipeServerProcessId")
+	procGetNumberOfConsoleInputEvents                        = modkernel32.NewProc("GetNumberOfConsoleInputEvents")
+	procGetOverlappedResult                                  = modkernel32.NewProc("GetOverlappedResult")
+	procGetPriorityClass                                     = modkernel32.NewProc("GetPriorityClass")
+	procGetProcAddress                                       = modkernel32.NewProc("GetProcAddress")
+	procGetProcessId                                         = modkernel32.NewProc("GetProcessId")
+	procGetProcessPreferredUILanguages                       = modkernel32.NewProc("GetProcessPreferredUILanguages")
+	procGetProcessShutdownParameters                         = modkernel32.NewProc("GetProcessShutdownParameters")
+	procGetProcessTimes                                      = modkernel32.NewProc("GetProcessTimes")
+	procGetProcessWorkingSetSizeEx                           = modkernel32.NewProc("GetProcessWorkingSetSizeEx")
+	procGetQueuedCompletionStatus                            = modkernel32.NewProc("GetQueuedCompletionStatus")
+	procGetShortPathNameW                                    = modkernel32.NewProc("GetShortPathNameW")
+	procGetStartupInfoW                                      = modkernel32.NewProc("GetStartupInfoW")
+	procGetStdHandle                                         = modkernel32.NewProc("GetStdHandle")
+	procGetSystemDirectoryW                                  = modkernel32.NewProc("GetSystemDirectoryW")
+	procGetSystemPreferredUILanguages                        = modkernel32.NewProc("GetSystemPreferredUILanguages")
+	procGetSystemTimeAsFileTime                              = modkernel32.NewProc("GetSystemTimeAsFileTime")
+	procGetSystemTimePreciseAsFileTime                       = modkernel32.NewProc("GetSystemTimePreciseAsFileTime")
+	procGetSystemWindowsDirectoryW                           = modkernel32.NewProc("GetSystemWindowsDirectoryW")
+	procGetTempPathW                                         = modkernel32.NewProc("GetTempPathW")
+	procGetThreadPreferredUILanguages                        = modkernel32.NewProc("GetThreadPreferredUILanguages")
+	procGetTickCount64                                       = modkernel32.NewProc("GetTickCount64")
+	procGetTimeZoneInformation                               = modkernel32.NewProc("GetTimeZoneInformation")
+	procGetUserPreferredUILanguages                          = modkernel32.NewProc("GetUserPreferredUILanguages")
+	procGetVersion                                           = modkernel32.NewProc("GetVersion")
+	procGetVolumeInformationByHandleW                        = modkernel32.NewProc("GetVolumeInformationByHandleW")
+	procGetVolumeInformationW                                = modkernel32.NewProc("GetVolumeInformationW")
+	procGetVolumeNameForVolumeMountPointW                    = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW")
+	procGetVolumePathNameW                                   = modkernel32.NewProc("GetVolumePathNameW")
+	procGetVolumePathNamesForVolumeNameW                     = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW")
+	procGetWindowsDirectoryW                                 = modkernel32.NewProc("GetWindowsDirectoryW")
+	procInitializeProcThreadAttributeList                    = modkernel32.NewProc("InitializeProcThreadAttributeList")
+	procIsWow64Process                                       = modkernel32.NewProc("IsWow64Process")
+	procIsWow64Process2                                      = modkernel32.NewProc("IsWow64Process2")
+	procLoadLibraryExW                                       = modkernel32.NewProc("LoadLibraryExW")
+	procLoadLibraryW                                         = modkernel32.NewProc("LoadLibraryW")
+	procLoadResource                                         = modkernel32.NewProc("LoadResource")
+	procLocalAlloc                                           = modkernel32.NewProc("LocalAlloc")
+	procLocalFree                                            = modkernel32.NewProc("LocalFree")
+	procLockFileEx                                           = modkernel32.NewProc("LockFileEx")
+	procLockResource                                         = modkernel32.NewProc("LockResource")
+	procMapViewOfFile                                        = modkernel32.NewProc("MapViewOfFile")
+	procModule32FirstW                                       = modkernel32.NewProc("Module32FirstW")
+	procModule32NextW                                        = modkernel32.NewProc("Module32NextW")
+	procMoveFileExW                                          = modkernel32.NewProc("MoveFileExW")
+	procMoveFileW                                            = modkernel32.NewProc("MoveFileW")
+	procMultiByteToWideChar                                  = modkernel32.NewProc("MultiByteToWideChar")
+	procOpenEventW                                           = modkernel32.NewProc("OpenEventW")
+	procOpenMutexW                                           = modkernel32.NewProc("OpenMutexW")
+	procOpenProcess                                          = modkernel32.NewProc("OpenProcess")
+	procOpenThread                                           = modkernel32.NewProc("OpenThread")
+	procPostQueuedCompletionStatus                           = modkernel32.NewProc("PostQueuedCompletionStatus")
+	procProcess32FirstW                                      = modkernel32.NewProc("Process32FirstW")
+	procProcess32NextW                                       = modkernel32.NewProc("Process32NextW")
+	procProcessIdToSessionId                                 = modkernel32.NewProc("ProcessIdToSessionId")
+	procPulseEvent                                           = modkernel32.NewProc("PulseEvent")
+	procPurgeComm                                            = modkernel32.NewProc("PurgeComm")
+	procQueryDosDeviceW                                      = modkernel32.NewProc("QueryDosDeviceW")
+	procQueryFullProcessImageNameW                           = modkernel32.NewProc("QueryFullProcessImageNameW")
+	procQueryInformationJobObject                            = modkernel32.NewProc("QueryInformationJobObject")
+	procReadConsoleW                                         = modkernel32.NewProc("ReadConsoleW")
+	procReadDirectoryChangesW                                = modkernel32.NewProc("ReadDirectoryChangesW")
+	procReadFile                                             = modkernel32.NewProc("ReadFile")
+	procReadProcessMemory                                    = modkernel32.NewProc("ReadProcessMemory")
+	procReleaseMutex                                         = modkernel32.NewProc("ReleaseMutex")
+	procRemoveDirectoryW                                     = modkernel32.NewProc("RemoveDirectoryW")
+	procRemoveDllDirectory                                   = modkernel32.NewProc("RemoveDllDirectory")
+	procResetEvent                                           = modkernel32.NewProc("ResetEvent")
+	procResizePseudoConsole                                  = modkernel32.NewProc("ResizePseudoConsole")
+	procResumeThread                                         = modkernel32.NewProc("ResumeThread")
+	procSetCommBreak                                         = modkernel32.NewProc("SetCommBreak")
+	procSetCommMask                                          = modkernel32.NewProc("SetCommMask")
+	procSetCommState                                         = modkernel32.NewProc("SetCommState")
+	procSetCommTimeouts                                      = modkernel32.NewProc("SetCommTimeouts")
+	procSetConsoleCP                                         = modkernel32.NewProc("SetConsoleCP")
+	procSetConsoleCursorPosition                             = modkernel32.NewProc("SetConsoleCursorPosition")
+	procSetConsoleMode                                       = modkernel32.NewProc("SetConsoleMode")
+	procSetConsoleOutputCP                                   = modkernel32.NewProc("SetConsoleOutputCP")
+	procSetCurrentDirectoryW                                 = modkernel32.NewProc("SetCurrentDirectoryW")
+	procSetDefaultDllDirectories                             = modkernel32.NewProc("SetDefaultDllDirectories")
+	procSetDllDirectoryW                                     = modkernel32.NewProc("SetDllDirectoryW")
+	procSetEndOfFile                                         = modkernel32.NewProc("SetEndOfFile")
+	procSetEnvironmentVariableW                              = modkernel32.NewProc("SetEnvironmentVariableW")
+	procSetErrorMode                                         = modkernel32.NewProc("SetErrorMode")
+	procSetEvent                                             = modkernel32.NewProc("SetEvent")
+	procSetFileAttributesW                                   = modkernel32.NewProc("SetFileAttributesW")
+	procSetFileCompletionNotificationModes                   = modkernel32.NewProc("SetFileCompletionNotificationModes")
+	procSetFileInformationByHandle                           = modkernel32.NewProc("SetFileInformationByHandle")
+	procSetFilePointer                                       = modkernel32.NewProc("SetFilePointer")
+	procSetFileTime                                          = modkernel32.NewProc("SetFileTime")
+	procSetFileValidData                                     = modkernel32.NewProc("SetFileValidData")
+	procSetHandleInformation                                 = modkernel32.NewProc("SetHandleInformation")
+	procSetInformationJobObject                              = modkernel32.NewProc("SetInformationJobObject")
+	procSetNamedPipeHandleState                              = modkernel32.NewProc("SetNamedPipeHandleState")
+	procSetPriorityClass                                     = modkernel32.NewProc("SetPriorityClass")
+	procSetProcessPriorityBoost                              = modkernel32.NewProc("SetProcessPriorityBoost")
+	procSetProcessShutdownParameters                         = modkernel32.NewProc("SetProcessShutdownParameters")
+	procSetProcessWorkingSetSizeEx                           = modkernel32.NewProc("SetProcessWorkingSetSizeEx")
+	procSetStdHandle                                         = modkernel32.NewProc("SetStdHandle")
+	procSetVolumeLabelW                                      = modkernel32.NewProc("SetVolumeLabelW")
+	procSetVolumeMountPointW                                 = modkernel32.NewProc("SetVolumeMountPointW")
+	procSetupComm                                            = modkernel32.NewProc("SetupComm")
+	procSizeofResource                                       = modkernel32.NewProc("SizeofResource")
+	procSleepEx                                              = modkernel32.NewProc("SleepEx")
+	procTerminateJobObject                                   = modkernel32.NewProc("TerminateJobObject")
+	procTerminateProcess                                     = modkernel32.NewProc("TerminateProcess")
+	procThread32First                                        = modkernel32.NewProc("Thread32First")
+	procThread32Next                                         = modkernel32.NewProc("Thread32Next")
+	procUnlockFileEx                                         = modkernel32.NewProc("UnlockFileEx")
+	procUnmapViewOfFile                                      = modkernel32.NewProc("UnmapViewOfFile")
+	procUpdateProcThreadAttribute                            = modkernel32.NewProc("UpdateProcThreadAttribute")
+	procVirtualAlloc                                         = modkernel32.NewProc("VirtualAlloc")
+	procVirtualFree                                          = modkernel32.NewProc("VirtualFree")
+	procVirtualLock                                          = modkernel32.NewProc("VirtualLock")
+	procVirtualProtect                                       = modkernel32.NewProc("VirtualProtect")
+	procVirtualProtectEx                                     = modkernel32.NewProc("VirtualProtectEx")
+	procVirtualQuery                                         = modkernel32.NewProc("VirtualQuery")
+	procVirtualQueryEx                                       = modkernel32.NewProc("VirtualQueryEx")
+	procVirtualUnlock                                        = modkernel32.NewProc("VirtualUnlock")
+	procWTSGetActiveConsoleSessionId                         = modkernel32.NewProc("WTSGetActiveConsoleSessionId")
+	procWaitCommEvent                                        = modkernel32.NewProc("WaitCommEvent")
+	procWaitForMultipleObjects                               = modkernel32.NewProc("WaitForMultipleObjects")
+	procWaitForSingleObject                                  = modkernel32.NewProc("WaitForSingleObject")
+	procWriteConsoleW                                        = modkernel32.NewProc("WriteConsoleW")
+	procWriteFile                                            = modkernel32.NewProc("WriteFile")
+	procWriteProcessMemory                                   = modkernel32.NewProc("WriteProcessMemory")
+	procAcceptEx                                             = modmswsock.NewProc("AcceptEx")
+	procGetAcceptExSockaddrs                                 = modmswsock.NewProc("GetAcceptExSockaddrs")
+	procTransmitFile                                         = modmswsock.NewProc("TransmitFile")
+	procNetApiBufferFree                                     = modnetapi32.NewProc("NetApiBufferFree")
+	procNetGetJoinInformation                                = modnetapi32.NewProc("NetGetJoinInformation")
+	procNetUserEnum                                          = modnetapi32.NewProc("NetUserEnum")
+	procNetUserGetInfo                                       = modnetapi32.NewProc("NetUserGetInfo")
+	procNtCreateFile                                         = modntdll.NewProc("NtCreateFile")
+	procNtCreateNamedPipeFile                                = modntdll.NewProc("NtCreateNamedPipeFile")
+	procNtQueryInformationProcess                            = modntdll.NewProc("NtQueryInformationProcess")
+	procNtQuerySystemInformation                             = modntdll.NewProc("NtQuerySystemInformation")
+	procNtSetInformationFile                                 = modntdll.NewProc("NtSetInformationFile")
+	procNtSetInformationProcess                              = modntdll.NewProc("NtSetInformationProcess")
+	procNtSetSystemInformation                               = modntdll.NewProc("NtSetSystemInformation")
+	procRtlAddFunctionTable                                  = modntdll.NewProc("RtlAddFunctionTable")
+	procRtlDefaultNpAcl                                      = modntdll.NewProc("RtlDefaultNpAcl")
+	procRtlDeleteFunctionTable                               = modntdll.NewProc("RtlDeleteFunctionTable")
+	procRtlDosPathNameToNtPathName_U_WithStatus              = modntdll.NewProc("RtlDosPathNameToNtPathName_U_WithStatus")
+	procRtlDosPathNameToRelativeNtPathName_U_WithStatus      = modntdll.NewProc("RtlDosPathNameToRelativeNtPathName_U_WithStatus")
+	procRtlGetCurrentPeb                                     = modntdll.NewProc("RtlGetCurrentPeb")
+	procRtlGetNtVersionNumbers                               = modntdll.NewProc("RtlGetNtVersionNumbers")
+	procRtlGetVersion                                        = modntdll.NewProc("RtlGetVersion")
+	procRtlInitString                                        = modntdll.NewProc("RtlInitString")
+	procRtlInitUnicodeString                                 = modntdll.NewProc("RtlInitUnicodeString")
+	procRtlNtStatusToDosErrorNoTeb                           = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb")
+	procCLSIDFromString                                      = modole32.NewProc("CLSIDFromString")
+	procCoCreateGuid                                         = modole32.NewProc("CoCreateGuid")
+	procCoGetObject                                          = modole32.NewProc("CoGetObject")
+	procCoInitializeEx                                       = modole32.NewProc("CoInitializeEx")
+	procCoTaskMemFree                                        = modole32.NewProc("CoTaskMemFree")
+	procCoUninitialize                                       = modole32.NewProc("CoUninitialize")
+	procStringFromGUID2                                      = modole32.NewProc("StringFromGUID2")
+	procEnumProcessModules                                   = modpsapi.NewProc("EnumProcessModules")
+	procEnumProcessModulesEx                                 = modpsapi.NewProc("EnumProcessModulesEx")
+	procEnumProcesses                                        = modpsapi.NewProc("EnumProcesses")
+	procGetModuleBaseNameW                                   = modpsapi.NewProc("GetModuleBaseNameW")
+	procGetModuleFileNameExW                                 = modpsapi.NewProc("GetModuleFileNameExW")
+	procGetModuleInformation                                 = modpsapi.NewProc("GetModuleInformation")
+	procQueryWorkingSetEx                                    = modpsapi.NewProc("QueryWorkingSetEx")
+	procSubscribeServiceChangeNotifications                  = modsechost.NewProc("SubscribeServiceChangeNotifications")
+	procUnsubscribeServiceChangeNotifications                = modsechost.NewProc("UnsubscribeServiceChangeNotifications")
+	procGetUserNameExW                                       = modsecur32.NewProc("GetUserNameExW")
+	procTranslateNameW                                       = modsecur32.NewProc("TranslateNameW")
+	procSetupDiBuildDriverInfoList                           = modsetupapi.NewProc("SetupDiBuildDriverInfoList")
+	procSetupDiCallClassInstaller                            = modsetupapi.NewProc("SetupDiCallClassInstaller")
+	procSetupDiCancelDriverInfoSearch                        = modsetupapi.NewProc("SetupDiCancelDriverInfoSearch")
+	procSetupDiClassGuidsFromNameExW                         = modsetupapi.NewProc("SetupDiClassGuidsFromNameExW")
+	procSetupDiClassNameFromGuidExW                          = modsetupapi.NewProc("SetupDiClassNameFromGuidExW")
+	procSetupDiCreateDeviceInfoListExW                       = modsetupapi.NewProc("SetupDiCreateDeviceInfoListExW")
+	procSetupDiCreateDeviceInfoW                             = modsetupapi.NewProc("SetupDiCreateDeviceInfoW")
+	procSetupDiDestroyDeviceInfoList                         = modsetupapi.NewProc("SetupDiDestroyDeviceInfoList")
+	procSetupDiDestroyDriverInfoList                         = modsetupapi.NewProc("SetupDiDestroyDriverInfoList")
+	procSetupDiEnumDeviceInfo                                = modsetupapi.NewProc("SetupDiEnumDeviceInfo")
+	procSetupDiEnumDriverInfoW                               = modsetupapi.NewProc("SetupDiEnumDriverInfoW")
+	procSetupDiGetClassDevsExW                               = modsetupapi.NewProc("SetupDiGetClassDevsExW")
+	procSetupDiGetClassInstallParamsW                        = modsetupapi.NewProc("SetupDiGetClassInstallParamsW")
+	procSetupDiGetDeviceInfoListDetailW                      = modsetupapi.NewProc("SetupDiGetDeviceInfoListDetailW")
+	procSetupDiGetDeviceInstallParamsW                       = modsetupapi.NewProc("SetupDiGetDeviceInstallParamsW")
+	procSetupDiGetDeviceInstanceIdW                          = modsetupapi.NewProc("SetupDiGetDeviceInstanceIdW")
+	procSetupDiGetDevicePropertyW                            = modsetupapi.NewProc("SetupDiGetDevicePropertyW")
+	procSetupDiGetDeviceRegistryPropertyW                    = modsetupapi.NewProc("SetupDiGetDeviceRegistryPropertyW")
+	procSetupDiGetDriverInfoDetailW                          = modsetupapi.NewProc("SetupDiGetDriverInfoDetailW")
+	procSetupDiGetSelectedDevice                             = modsetupapi.NewProc("SetupDiGetSelectedDevice")
+	procSetupDiGetSelectedDriverW                            = modsetupapi.NewProc("SetupDiGetSelectedDriverW")
+	procSetupDiOpenDevRegKey                                 = modsetupapi.NewProc("SetupDiOpenDevRegKey")
+	procSetupDiSetClassInstallParamsW                        = modsetupapi.NewProc("SetupDiSetClassInstallParamsW")
+	procSetupDiSetDeviceInstallParamsW                       = modsetupapi.NewProc("SetupDiSetDeviceInstallParamsW")
+	procSetupDiSetDeviceRegistryPropertyW                    = modsetupapi.NewProc("SetupDiSetDeviceRegistryPropertyW")
+	procSetupDiSetSelectedDevice                             = modsetupapi.NewProc("SetupDiSetSelectedDevice")
+	procSetupDiSetSelectedDriverW                            = modsetupapi.NewProc("SetupDiSetSelectedDriverW")
+	procSetupUninstallOEMInfW                                = modsetupapi.NewProc("SetupUninstallOEMInfW")
+	procCommandLineToArgvW                                   = modshell32.NewProc("CommandLineToArgvW")
+	procSHGetKnownFolderPath                                 = modshell32.NewProc("SHGetKnownFolderPath")
+	procShellExecuteW                                        = modshell32.NewProc("ShellExecuteW")
+	procEnumChildWindows                                     = moduser32.NewProc("EnumChildWindows")
+	procEnumWindows                                          = moduser32.NewProc("EnumWindows")
+	procExitWindowsEx                                        = moduser32.NewProc("ExitWindowsEx")
+	procGetClassNameW                                        = moduser32.NewProc("GetClassNameW")
+	procGetDesktopWindow                                     = moduser32.NewProc("GetDesktopWindow")
+	procGetForegroundWindow                                  = moduser32.NewProc("GetForegroundWindow")
+	procGetGUIThreadInfo                                     = moduser32.NewProc("GetGUIThreadInfo")
+	procGetKeyboardLayout                                    = moduser32.NewProc("GetKeyboardLayout")
+	procGetShellWindow                                       = moduser32.NewProc("GetShellWindow")
+	procGetWindowThreadProcessId                             = moduser32.NewProc("GetWindowThreadProcessId")
+	procIsWindow                                             = moduser32.NewProc("IsWindow")
+	procIsWindowUnicode                                      = moduser32.NewProc("IsWindowUnicode")
+	procIsWindowVisible                                      = moduser32.NewProc("IsWindowVisible")
+	procLoadKeyboardLayoutW                                  = moduser32.NewProc("LoadKeyboardLayoutW")
+	procMessageBoxW                                          = moduser32.NewProc("MessageBoxW")
+	procToUnicodeEx                                          = moduser32.NewProc("ToUnicodeEx")
+	procUnloadKeyboardLayout                                 = moduser32.NewProc("UnloadKeyboardLayout")
+	procCreateEnvironmentBlock                               = moduserenv.NewProc("CreateEnvironmentBlock")
+	procDestroyEnvironmentBlock                              = moduserenv.NewProc("DestroyEnvironmentBlock")
+	procGetUserProfileDirectoryW                             = moduserenv.NewProc("GetUserProfileDirectoryW")
+	procGetFileVersionInfoSizeW                              = modversion.NewProc("GetFileVersionInfoSizeW")
+	procGetFileVersionInfoW                                  = modversion.NewProc("GetFileVersionInfoW")
+	procVerQueryValueW                                       = modversion.NewProc("VerQueryValueW")
+	proctimeBeginPeriod                                      = modwinmm.NewProc("timeBeginPeriod")
+	proctimeEndPeriod                                        = modwinmm.NewProc("timeEndPeriod")
+	procWinVerifyTrustEx                                     = modwintrust.NewProc("WinVerifyTrustEx")
+	procFreeAddrInfoW                                        = modws2_32.NewProc("FreeAddrInfoW")
+	procGetAddrInfoW                                         = modws2_32.NewProc("GetAddrInfoW")
+	procWSACleanup                                           = modws2_32.NewProc("WSACleanup")
+	procWSADuplicateSocketW                                  = modws2_32.NewProc("WSADuplicateSocketW")
+	procWSAEnumProtocolsW                                    = modws2_32.NewProc("WSAEnumProtocolsW")
+	procWSAGetOverlappedResult                               = modws2_32.NewProc("WSAGetOverlappedResult")
+	procWSAIoctl                                             = modws2_32.NewProc("WSAIoctl")
+	procWSALookupServiceBeginW                               = modws2_32.NewProc("WSALookupServiceBeginW")
+	procWSALookupServiceEnd                                  = modws2_32.NewProc("WSALookupServiceEnd")
+	procWSALookupServiceNextW                                = modws2_32.NewProc("WSALookupServiceNextW")
+	procWSARecv                                              = modws2_32.NewProc("WSARecv")
+	procWSARecvFrom                                          = modws2_32.NewProc("WSARecvFrom")
+	procWSASend                                              = modws2_32.NewProc("WSASend")
+	procWSASendTo                                            = modws2_32.NewProc("WSASendTo")
+	procWSASocketW                                           = modws2_32.NewProc("WSASocketW")
+	procWSAStartup                                           = modws2_32.NewProc("WSAStartup")
+	procbind                                                 = modws2_32.NewProc("bind")
+	procclosesocket                                          = modws2_32.NewProc("closesocket")
+	procconnect                                              = modws2_32.NewProc("connect")
+	procgethostbyname                                        = modws2_32.NewProc("gethostbyname")
+	procgetpeername                                          = modws2_32.NewProc("getpeername")
+	procgetprotobyname                                       = modws2_32.NewProc("getprotobyname")
+	procgetservbyname                                        = modws2_32.NewProc("getservbyname")
+	procgetsockname                                          = modws2_32.NewProc("getsockname")
+	procgetsockopt                                           = modws2_32.NewProc("getsockopt")
+	proclisten                                               = modws2_32.NewProc("listen")
+	procntohs                                                = modws2_32.NewProc("ntohs")
+	procrecvfrom                                             = modws2_32.NewProc("recvfrom")
+	procsendto                                               = modws2_32.NewProc("sendto")
+	procsetsockopt                                           = modws2_32.NewProc("setsockopt")
+	procshutdown                                             = modws2_32.NewProc("shutdown")
+	procsocket                                               = modws2_32.NewProc("socket")
+	procWTSEnumerateSessionsW                                = modwtsapi32.NewProc("WTSEnumerateSessionsW")
+	procWTSFreeMemory                                        = modwtsapi32.NewProc("WTSFreeMemory")
+	procWTSQueryUserToken                                    = modwtsapi32.NewProc("WTSQueryUserToken")
+)
+
+func cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) {
+	r0, _, _ := syscall.SyscallN(procCM_Get_DevNode_Status.Addr(), uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags))
+	ret = CONFIGRET(r0)
+	return
+}
+
+func cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) {
+	r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_ListW.Addr(), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags))
+	ret = CONFIGRET(r0)
+	return
+}
+
+func cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) {
+	r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_List_SizeW.Addr(), uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags))
+	ret = CONFIGRET(r0)
+	return
+}
+
+func cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) {
+	r0, _, _ := syscall.SyscallN(procCM_MapCrToWin32Err.Addr(), uintptr(configRet), uintptr(defaultWin32Error))
+	ret = Errno(r0)
+	return
+}
+
+func AdjustTokenGroups(token Token, resetToDefault bool, newstate *Tokengroups, buflen uint32, prevstate *Tokengroups, returnlen *uint32) (err error) {
+	var _p0 uint32
+	if resetToDefault {
+		_p0 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procAdjustTokenGroups.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func AdjustTokenPrivileges(token Token, disableAllPrivileges bool, newstate *Tokenprivileges, buflen uint32, prevstate *Tokenprivileges, returnlen *uint32) (err error) {
+	var _p0 uint32
+	if disableAllPrivileges {
+		_p0 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) {
+	r1, _, e1 := syscall.SyscallN(procAllocateAndInitializeSid.Addr(), uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) {
+	r0, _, _ := syscall.SyscallN(procBuildSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor)))
+	if r0 != 0 {
+		ret = syscall.Errno(r0)
+	}
+	return
+}
+
+func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) {
+	r1, _, e1 := syscall.SyscallN(procChangeServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) {
+	r1, _, e1 := syscall.SyscallN(procChangeServiceConfigW.Addr(), uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCheckTokenMembership.Addr(), uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CloseServiceHandle(handle Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCloseServiceHandle.Addr(), uintptr(handle))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) {
+	r1, _, e1 := syscall.SyscallN(procControlService.Addr(), uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) {
+	r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) {
+	var _p0 *uint16
+	_p0, err = syscall.UTF16PtrFromString(str)
+	if err != nil {
+		return
+	}
+	return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size)
+}
+
+func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) {
+	r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCopySid.Addr(), uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CreateProcessAsUser(token Token, appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) {
+	var _p0 uint32
+	if inheritHandles {
+		_p0 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procCreateProcessAsUserW.Addr(), uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procCreateServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)))
+	handle = Handle(r0)
+	if handle == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCreateWellKnownSid.Addr(), uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCryptAcquireContextW.Addr(), uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCryptGenRandom.Addr(), uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CryptReleaseContext(provhandle Handle, flags uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCryptReleaseContext.Addr(), uintptr(provhandle), uintptr(flags))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func DeleteService(service Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procDeleteService.Addr(), uintptr(service))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func DeregisterEventSource(handle Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procDeregisterEventSource.Addr(), uintptr(handle))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) {
+	r1, _, e1 := syscall.SyscallN(procDuplicateTokenEx.Addr(), uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procEnumDependentServicesW.Addr(), uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) {
+	r1, _, e1 := syscall.SyscallN(procEnumServicesStatusExW.Addr(), uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) {
+	r0, _, _ := syscall.SyscallN(procEqualSid.Addr(), uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)))
+	isEqual = r0 != 0
+	return
+}
+
+func FreeSid(sid *SID) (err error) {
+	r1, _, e1 := syscall.SyscallN(procFreeSid.Addr(), uintptr(unsafe.Pointer(sid)))
+	if r1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetAce.Addr(), uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetLengthSid(sid *SID) (len uint32) {
+	r0, _, _ := syscall.SyscallN(procGetLengthSid.Addr(), uintptr(unsafe.Pointer(sid)))
+	len = uint32(r0)
+	return
+}
+
+func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) {
+	var _p0 *uint16
+	_p0, ret = syscall.UTF16PtrFromString(objectName)
+	if ret != nil {
+		return
+	}
+	return _getNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl, sd)
+}
+
+func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) {
+	r0, _, _ := syscall.SyscallN(procGetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)))
+	if r0 != 0 {
+		ret = syscall.Errno(r0)
+	}
+	return
+}
+
+func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) {
+	var _p0 uint32
+	if *daclPresent {
+		_p0 = 1
+	}
+	var _p1 uint32
+	if *daclDefaulted {
+		_p1 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)))
+	*daclPresent = _p0 != 0
+	*daclDefaulted = _p1 != 0
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) {
+	var _p0 uint32
+	if *groupDefaulted {
+		_p0 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0)))
+	*groupDefaulted = _p0 != 0
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) {
+	r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorLength.Addr(), uintptr(unsafe.Pointer(sd)))
+	len = uint32(r0)
+	return
+}
+
+func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) {
+	var _p0 uint32
+	if *ownerDefaulted {
+		_p0 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0)))
+	*ownerDefaulted = _p0 != 0
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) {
+	r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)))
+	if r0 != 0 {
+		ret = syscall.Errno(r0)
+	}
+	return
+}
+
+func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) {
+	var _p0 uint32
+	if *saclPresent {
+		_p0 = 1
+	}
+	var _p1 uint32
+	if *saclDefaulted {
+		_p1 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)))
+	*saclPresent = _p0 != 0
+	*saclDefaulted = _p1 != 0
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) {
+	r0, _, _ := syscall.SyscallN(procGetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)))
+	if r0 != 0 {
+		ret = syscall.Errno(r0)
+	}
+	return
+}
+
+func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) {
+	r0, _, _ := syscall.SyscallN(procGetSidIdentifierAuthority.Addr(), uintptr(unsafe.Pointer(sid)))
+	authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0))
+	return
+}
+
+func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) {
+	r0, _, _ := syscall.SyscallN(procGetSidSubAuthority.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(index))
+	subAuthority = (*uint32)(unsafe.Pointer(r0))
+	return
+}
+
+func getSidSubAuthorityCount(sid *SID) (count *uint8) {
+	r0, _, _ := syscall.SyscallN(procGetSidSubAuthorityCount.Addr(), uintptr(unsafe.Pointer(sid)))
+	count = (*uint8)(unsafe.Pointer(r0))
+	return
+}
+
+func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func ImpersonateSelf(impersonationlevel uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(impersonationlevel))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procInitializeSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) {
+	var _p0 uint32
+	if forceAppsClosed {
+		_p0 = 1
+	}
+	var _p1 uint32
+	if rebootAfterShutdown {
+		_p1 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procInitiateSystemShutdownExW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func isTokenRestricted(tokenHandle Token) (ret bool, err error) {
+	r0, _, e1 := syscall.SyscallN(procIsTokenRestricted.Addr(), uintptr(tokenHandle))
+	ret = r0 != 0
+	if !ret {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) {
+	r0, _, _ := syscall.SyscallN(procIsValidSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(sd)))
+	isValid = r0 != 0
+	return
+}
+
+func isValidSid(sid *SID) (isValid bool) {
+	r0, _, _ := syscall.SyscallN(procIsValidSid.Addr(), uintptr(unsafe.Pointer(sid)))
+	isValid = r0 != 0
+	return
+}
+
+func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) {
+	r0, _, _ := syscall.SyscallN(procIsWellKnownSid.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(sidType))
+	isWellKnown = r0 != 0
+	return
+}
+
+func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) {
+	r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procMakeAbsoluteSD.Addr(), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procMakeSelfRelativeSD.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) {
+	r0, _, _ := syscall.SyscallN(procNotifyServiceStatusChangeW.Addr(), uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier)))
+	if r0 != 0 {
+		ret = syscall.Errno(r0)
+	}
+	return
+}
+
+func OpenProcessToken(process Handle, access uint32, token *Token) (err error) {
+	r1, _, e1 := syscall.SyscallN(procOpenProcessToken.Addr(), uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procOpenSCManagerW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access))
+	handle = Handle(r0)
+	if handle == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procOpenServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access))
+	handle = Handle(r0)
+	if handle == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) {
+	var _p0 uint32
+	if openAsSelf {
+		_p0 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procQueryServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procQueryServiceConfigW.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func QueryServiceDynamicInformation(service Handle, infoLevel uint32, dynamicInfo unsafe.Pointer) (err error) {
+	err = procQueryServiceDynamicInformation.Find()
+	if err != nil {
+		return
+	}
+	r1, _, e1 := syscall.SyscallN(procQueryServiceDynamicInformation.Addr(), uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procQueryServiceLockStatusW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) {
+	r1, _, e1 := syscall.SyscallN(procQueryServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(status)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procQueryServiceStatusEx.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func RegCloseKey(key Handle) (regerrno error) {
+	r0, _, _ := syscall.SyscallN(procRegCloseKey.Addr(), uintptr(key))
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) {
+	r0, _, _ := syscall.SyscallN(procRegEnumKeyExW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)))
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, event Handle, asynchronous bool) (regerrno error) {
+	var _p0 uint32
+	if watchSubtree {
+		_p0 = 1
+	}
+	var _p1 uint32
+	if asynchronous {
+		_p1 = 1
+	}
+	r0, _, _ := syscall.SyscallN(procRegNotifyChangeKeyValue.Addr(), uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1))
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) {
+	r0, _, _ := syscall.SyscallN(procRegOpenKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)))
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) {
+	r0, _, _ := syscall.SyscallN(procRegQueryInfoKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime)))
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) {
+	r0, _, _ := syscall.SyscallN(procRegQueryValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)))
+	if r0 != 0 {
+		regerrno = syscall.Errno(r0)
+	}
+	return
+}
+
+func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procRegisterEventSourceW.Addr(), uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)))
+	handle = Handle(r0)
+	if handle == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procRegisterServiceCtrlHandlerExW.Addr(), uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context))
+	handle = Handle(r0)
+	if handle == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) {
+	r1, _, e1 := syscall.SyscallN(procReportEventW.Addr(), uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func RevertToSelf() (err error) {
+	r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr())
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) {
+	r0, _, _ := syscall.SyscallN(procSetEntriesInAclW.Addr(), uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)))
+	if r0 != 0 {
+		ret = syscall.Errno(r0)
+	}
+	return
+}
+
+func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetKernelObjectSecurity.Addr(), uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) {
+	var _p0 *uint16
+	_p0, ret = syscall.UTF16PtrFromString(objectName)
+	if ret != nil {
+		return
+	}
+	return _SetNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl)
+}
+
+func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) {
+	r0, _, _ := syscall.SyscallN(procSetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)))
+	if r0 != 0 {
+		ret = syscall.Errno(r0)
+	}
+	return
+}
+
+func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) {
+	var _p0 uint32
+	if daclPresent {
+		_p0 = 1
+	}
+	var _p1 uint32
+	if daclDefaulted {
+		_p1 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) {
+	var _p0 uint32
+	if groupDefaulted {
+		_p0 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) {
+	var _p0 uint32
+	if ownerDefaulted {
+		_p0 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) {
+	syscall.SyscallN(procSetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)))
+	return
+}
+
+func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) {
+	var _p0 uint32
+	if saclPresent {
+		_p0 = 1
+	}
+	var _p1 uint32
+	if saclDefaulted {
+		_p1 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) {
+	r0, _, _ := syscall.SyscallN(procSetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)))
+	if r0 != 0 {
+		ret = syscall.Errno(r0)
+	}
+	return
+}
+
+func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceStatus)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetThreadToken(thread *Handle, token Token) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetThreadToken.Addr(), uintptr(unsafe.Pointer(thread)), uintptr(token))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) {
+	r1, _, e1 := syscall.SyscallN(procStartServiceCtrlDispatcherW.Addr(), uintptr(unsafe.Pointer(serviceTable)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) {
+	r1, _, e1 := syscall.SyscallN(procStartServiceW.Addr(), uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCertAddCertificateContextToStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CertCloseStore(store Handle, flags uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCertCloseStore.Addr(), uintptr(store), uintptr(flags))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) {
+	r0, _, e1 := syscall.SyscallN(procCertCreateCertificateContext.Addr(), uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen))
+	context = (*CertContext)(unsafe.Pointer(r0))
+	if context == nil {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CertDeleteCertificateFromStore(certContext *CertContext) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCertDeleteCertificateFromStore.Addr(), uintptr(unsafe.Pointer(certContext)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) {
+	r0, _, _ := syscall.SyscallN(procCertDuplicateCertificateContext.Addr(), uintptr(unsafe.Pointer(certContext)))
+	dupContext = (*CertContext)(unsafe.Pointer(r0))
+	return
+}
+
+func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) {
+	r0, _, e1 := syscall.SyscallN(procCertEnumCertificatesInStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(prevContext)))
+	context = (*CertContext)(unsafe.Pointer(r0))
+	if context == nil {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) {
+	r0, _, e1 := syscall.SyscallN(procCertFindCertificateInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext)))
+	cert = (*CertContext)(unsafe.Pointer(r0))
+	if cert == nil {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) {
+	r0, _, e1 := syscall.SyscallN(procCertFindChainInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext)))
+	certchain = (*CertChainContext)(unsafe.Pointer(r0))
+	if certchain == nil {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) {
+	r0, _, _ := syscall.SyscallN(procCertFindExtension.Addr(), uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions)))
+	ret = (*CertExtension)(unsafe.Pointer(r0))
+	return
+}
+
+func CertFreeCertificateChain(ctx *CertChainContext) {
+	syscall.SyscallN(procCertFreeCertificateChain.Addr(), uintptr(unsafe.Pointer(ctx)))
+	return
+}
+
+func CertFreeCertificateContext(ctx *CertContext) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCertFreeCertificateContext.Addr(), uintptr(unsafe.Pointer(ctx)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCertGetCertificateChain.Addr(), uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) {
+	r0, _, _ := syscall.SyscallN(procCertGetNameStringW.Addr(), uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size))
+	chars = uint32(r0)
+	return
+}
+
+func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procCertOpenStore.Addr(), uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para))
+	handle = Handle(r0)
+	if handle == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procCertOpenSystemStoreW.Addr(), uintptr(hprov), uintptr(unsafe.Pointer(name)))
+	store = Handle(r0)
+	if store == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCertVerifyCertificateChainPolicy.Addr(), uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CryptAcquireCertificatePrivateKey(cert *CertContext, flags uint32, parameters unsafe.Pointer, cryptProvOrNCryptKey *Handle, keySpec *uint32, callerFreeProvOrNCryptKey *bool) (err error) {
+	var _p0 uint32
+	if *callerFreeProvOrNCryptKey {
+		_p0 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procCryptAcquireCertificatePrivateKey.Addr(), uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0)))
+	*callerFreeProvOrNCryptKey = _p0 != 0
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCryptDecodeObject.Addr(), uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCryptProtectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCryptQueryObject.Addr(), uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCryptUnprotectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procPFXImportCertStore.Addr(), uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags))
+	store = Handle(r0)
+	if store == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) {
+	r0, _, _ := syscall.SyscallN(procDnsNameCompare_W.Addr(), uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)))
+	same = r0 != 0
+	return
+}
+
+func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) {
+	var _p0 *uint16
+	_p0, status = syscall.UTF16PtrFromString(name)
+	if status != nil {
+		return
+	}
+	return _DnsQuery(_p0, qtype, options, extra, qrs, pr)
+}
+
+func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) {
+	r0, _, _ := syscall.SyscallN(procDnsQuery_W.Addr(), uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr)))
+	if r0 != 0 {
+		status = syscall.Errno(r0)
+	}
+	return
+}
+
+func DnsRecordListFree(rl *DNSRecord, freetype uint32) {
+	syscall.SyscallN(procDnsRecordListFree.Addr(), uintptr(unsafe.Pointer(rl)), uintptr(freetype))
+	return
+}
+
+func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) {
+	r0, _, _ := syscall.SyscallN(procDwmGetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size))
+	if r0 != 0 {
+		ret = syscall.Errno(r0)
+	}
+	return
+}
+
+func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) {
+	r0, _, _ := syscall.SyscallN(procDwmSetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size))
+	if r0 != 0 {
+		ret = syscall.Errno(r0)
+	}
+	return
+}
+
+func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) {
+	r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle))
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
+func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) {
+	r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)))
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
+func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) {
+	r0, _, _ := syscall.SyscallN(procGetAdaptersInfo.Addr(), uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)))
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
+func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) {
+	r0, _, _ := syscall.SyscallN(procGetBestInterfaceEx.Addr(), uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)))
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
+func GetIfEntry(pIfRow *MibIfRow) (errcode error) {
+	r0, _, _ := syscall.SyscallN(procGetIfEntry.Addr(), uintptr(unsafe.Pointer(pIfRow)))
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
+func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) {
+	r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row)))
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
+func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) {
+	r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row)))
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
+func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) {
+	var _p0 uint32
+	if initialNotification {
+		_p0 = 1
+	}
+	r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
+func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) {
+	var _p0 uint32
+	if initialNotification {
+		_p0 = 1
+	}
+	r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
+func AddDllDirectory(path *uint16) (cookie uintptr, err error) {
+	r0, _, e1 := syscall.SyscallN(procAddDllDirectory.Addr(), uintptr(unsafe.Pointer(path)))
+	cookie = uintptr(r0)
+	if cookie == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func AssignProcessToJobObject(job Handle, process Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procAssignProcessToJobObject.Addr(), uintptr(job), uintptr(process))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CancelIo(s Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCancelIo.Addr(), uintptr(s))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CancelIoEx(s Handle, o *Overlapped) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(s), uintptr(unsafe.Pointer(o)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func ClearCommBreak(handle Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procClearCommBreak.Addr(), uintptr(handle))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) {
+	r1, _, e1 := syscall.SyscallN(procClearCommError.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CloseHandle(handle Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCloseHandle.Addr(), uintptr(handle))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func ClosePseudoConsole(console Handle) {
+	syscall.SyscallN(procClosePseudoConsole.Addr(), uintptr(console))
+	return
+}
+
+func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) {
+	r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(overlapped)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCreateDirectoryW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procCreateEventExW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess))
+	handle = Handle(r0)
+	if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procCreateEventW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)))
+	handle = Handle(r0)
+	if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procCreateFileMappingW.Addr(), uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name)))
+	handle = Handle(r0)
+	if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile))
+	handle = Handle(r0)
+	if handle == InvalidHandle {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCreateHardLinkW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved))
+	if r1&0xff == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt))
+	handle = Handle(r0)
+	if handle == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procCreateJobObjectW.Addr(), uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)))
+	handle = Handle(r0)
+	if handle == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procCreateMutexExW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess))
+	handle = Handle(r0)
+	if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) {
+	var _p0 uint32
+	if initialOwner {
+		_p0 = 1
+	}
+	r0, _, e1 := syscall.SyscallN(procCreateMutexW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name)))
+	handle = Handle(r0)
+	if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)))
+	handle = Handle(r0)
+	if handle == InvalidHandle {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCreatePipe.Addr(), uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) {
+	var _p0 uint32
+	if inheritHandles {
+		_p0 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procCreateProcessW.Addr(), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) {
+	r0, _, _ := syscall.SyscallN(procCreatePseudoConsole.Addr(), uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)))
+	if r0 != 0 {
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procCreateSymbolicLinkW.Addr(), uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags))
+	if r1&0xff == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procCreateToolhelp32Snapshot.Addr(), uintptr(flags), uintptr(processId))
+	handle = Handle(r0)
+	if handle == InvalidHandle {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) {
+	r1, _, e1 := syscall.SyscallN(procDefineDosDeviceW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func DeleteFile(path *uint16) (err error) {
+	r1, _, e1 := syscall.SyscallN(procDeleteFileW.Addr(), uintptr(unsafe.Pointer(path)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) {
+	syscall.SyscallN(procDeleteProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist)))
+	return
+}
+
+func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) {
+	r1, _, e1 := syscall.SyscallN(procDeleteVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) {
+	r1, _, e1 := syscall.SyscallN(procDeviceIoControl.Addr(), uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func DisconnectNamedPipe(pipe Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) {
+	var _p0 uint32
+	if bInheritHandle {
+		_p0 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procDuplicateHandle.Addr(), uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procEscapeCommFunction.Addr(), uintptr(handle), uintptr(dwFunc))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func ExitProcess(exitcode uint32) {
+	syscall.SyscallN(procExitProcess.Addr(), uintptr(exitcode))
+	return
+}
+
+func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
+	n = uint32(r0)
+	if n == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func FindClose(handle Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procFindClose.Addr(), uintptr(handle))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func FindCloseChangeNotification(handle Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procFindCloseChangeNotification.Addr(), uintptr(handle))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func FindFirstChangeNotification(path string, watchSubtree bool, notifyFilter uint32) (handle Handle, err error) {
+	var _p0 *uint16
+	_p0, err = syscall.UTF16PtrFromString(path)
+	if err != nil {
+		return
+	}
+	return _FindFirstChangeNotification(_p0, watchSubtree, notifyFilter)
+}
+
+func _FindFirstChangeNotification(path *uint16, watchSubtree bool, notifyFilter uint32) (handle Handle, err error) {
+	var _p1 uint32
+	if watchSubtree {
+		_p1 = 1
+	}
+	r0, _, e1 := syscall.SyscallN(procFindFirstChangeNotificationW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter))
+	handle = Handle(r0)
+	if handle == InvalidHandle {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procFindFirstFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)))
+	handle = Handle(r0)
+	if handle == InvalidHandle {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procFindFirstVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength))
+	handle = Handle(r0)
+	if handle == InvalidHandle {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procFindFirstVolumeW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength))
+	handle = Handle(r0)
+	if handle == InvalidHandle {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func FindNextChangeNotification(handle Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procFindNextChangeNotification.Addr(), uintptr(handle))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func findNextFile1(handle Handle, data *win32finddata1) (err error) {
+	r1, _, e1 := syscall.SyscallN(procFindNextFileW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procFindNextVolumeMountPointW.Addr(), uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procFindNextVolumeW.Addr(), uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procFindResourceW.Addr(), uintptr(module), uintptr(name), uintptr(resType))
+	resInfo = Handle(r0)
+	if resInfo == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func FindVolumeClose(findVolume Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procFindVolumeClose.Addr(), uintptr(findVolume))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procFindVolumeMountPointClose.Addr(), uintptr(findVolumeMountPoint))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func FlushConsoleInputBuffer(console Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procFlushConsoleInputBuffer.Addr(), uintptr(console))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func FlushFileBuffers(handle Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func FlushViewOfFile(addr uintptr, length uintptr) (err error) {
+	r1, _, e1 := syscall.SyscallN(procFlushViewOfFile.Addr(), uintptr(addr), uintptr(length))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) {
+	var _p0 *uint16
+	if len(buf) > 0 {
+		_p0 = &buf[0]
+	}
+	r0, _, e1 := syscall.SyscallN(procFormatMessageW.Addr(), uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)))
+	n = uint32(r0)
+	if n == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func FreeEnvironmentStrings(envs *uint16) (err error) {
+	r1, _, e1 := syscall.SyscallN(procFreeEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(envs)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func FreeLibrary(handle Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procFreeLibrary.Addr(), uintptr(handle))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGenerateConsoleCtrlEvent.Addr(), uintptr(ctrlEvent), uintptr(processGroupID))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetACP() (acp uint32) {
+	r0, _, _ := syscall.SyscallN(procGetACP.Addr())
+	acp = uint32(r0)
+	return
+}
+
+func GetActiveProcessorCount(groupNumber uint16) (ret uint32) {
+	r0, _, _ := syscall.SyscallN(procGetActiveProcessorCount.Addr(), uintptr(groupNumber))
+	ret = uint32(r0)
+	return
+}
+
+func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetCommModemStatus.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetCommState(handle Handle, lpDCB *DCB) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetCommandLine() (cmd *uint16) {
+	r0, _, _ := syscall.SyscallN(procGetCommandLineW.Addr())
+	cmd = (*uint16)(unsafe.Pointer(r0))
+	return
+}
+
+func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetComputerNameExW.Addr(), uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetComputerName(buf *uint16, n *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetComputerNameW.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetConsoleCP() (cp uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetConsoleCP.Addr())
+	cp = uint32(r0)
+	if cp == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetConsoleMode(console Handle, mode *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetConsoleMode.Addr(), uintptr(console), uintptr(unsafe.Pointer(mode)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetConsoleOutputCP() (cp uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetConsoleOutputCP.Addr())
+	cp = uint32(r0)
+	if cp == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetConsoleScreenBufferInfo.Addr(), uintptr(console), uintptr(unsafe.Pointer(info)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetCurrentDirectoryW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf)))
+	n = uint32(r0)
+	if n == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetCurrentProcessId() (pid uint32) {
+	r0, _, _ := syscall.SyscallN(procGetCurrentProcessId.Addr())
+	pid = uint32(r0)
+	return
+}
+
+func GetCurrentThreadId() (id uint32) {
+	r0, _, _ := syscall.SyscallN(procGetCurrentThreadId.Addr())
+	id = uint32(r0)
+	return
+}
+
+func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetDiskFreeSpaceExW.Addr(), uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetDriveType(rootPathName *uint16) (driveType uint32) {
+	r0, _, _ := syscall.SyscallN(procGetDriveTypeW.Addr(), uintptr(unsafe.Pointer(rootPathName)))
+	driveType = uint32(r0)
+	return
+}
+
+func GetEnvironmentStrings() (envs *uint16, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetEnvironmentStringsW.Addr())
+	envs = (*uint16)(unsafe.Pointer(r0))
+	if envs == nil {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size))
+	n = uint32(r0)
+	if n == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetExitCodeProcess.Addr(), uintptr(handle), uintptr(unsafe.Pointer(exitcode)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetFileAttributesExW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetFileAttributes(name *uint16) (attrs uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name)))
+	attrs = uint32(r0)
+	if attrs == INVALID_FILE_ATTRIBUTES {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandle.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandleEx.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetFileType(filehandle Handle) (n uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetFileType.Addr(), uintptr(filehandle))
+	n = uint32(r0)
+	if n == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetFinalPathNameByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags))
+	n = uint32(r0)
+	if n == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetFullPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)))
+	n = uint32(r0)
+	if n == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetLargePageMinimum() (size uintptr) {
+	r0, _, _ := syscall.SyscallN(procGetLargePageMinimum.Addr())
+	size = uintptr(r0)
+	return
+}
+
+func GetLastError() (lasterr error) {
+	r0, _, _ := syscall.SyscallN(procGetLastError.Addr())
+	if r0 != 0 {
+		lasterr = syscall.Errno(r0)
+	}
+	return
+}
+
+func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetLogicalDriveStringsW.Addr(), uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)))
+	n = uint32(r0)
+	if n == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetLogicalDrives() (drivesBitMask uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetLogicalDrives.Addr())
+	drivesBitMask = uint32(r0)
+	if drivesBitMask == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetLongPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen))
+	n = uint32(r0)
+	if n == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetMaximumProcessorCount(groupNumber uint16) (ret uint32) {
+	r0, _, _ := syscall.SyscallN(procGetMaximumProcessorCount.Addr(), uintptr(groupNumber))
+	ret = uint32(r0)
+	return
+}
+
+func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetModuleFileNameW.Addr(), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size))
+	n = uint32(r0)
+	if n == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetModuleHandleExW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetNamedPipeClientProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetNamedPipeServerProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetNumberOfConsoleInputEvents.Addr(), uintptr(console), uintptr(unsafe.Pointer(numevents)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) {
+	var _p0 uint32
+	if wait {
+		_p0 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procGetOverlappedResult.Addr(), uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetPriorityClass(process Handle) (ret uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetPriorityClass.Addr(), uintptr(process))
+	ret = uint32(r0)
+	if ret == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetProcAddress(module Handle, procname string) (proc uintptr, err error) {
+	var _p0 *byte
+	_p0, err = syscall.BytePtrFromString(procname)
+	if err != nil {
+		return
+	}
+	return _GetProcAddress(module, _p0)
+}
+
+func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetProcAddress.Addr(), uintptr(module), uintptr(unsafe.Pointer(procname)))
+	proc = uintptr(r0)
+	if proc == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetProcessId(process Handle) (id uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetProcessId.Addr(), uintptr(process))
+	id = uint32(r0)
+	if id == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetProcessPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetProcessShutdownParameters.Addr(), uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetProcessTimes.Addr(), uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) {
+	syscall.SyscallN(procGetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)))
+	return
+}
+
+func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetShortPathNameW.Addr(), uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen))
+	n = uint32(r0)
+	if n == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func getStartupInfo(startupInfo *StartupInfo) {
+	syscall.SyscallN(procGetStartupInfoW.Addr(), uintptr(unsafe.Pointer(startupInfo)))
+	return
+}
+
+func GetStdHandle(stdhandle uint32) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetStdHandle.Addr(), uintptr(stdhandle))
+	handle = Handle(r0)
+	if handle == InvalidHandle {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetSystemDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen))
+	len = uint32(r0)
+	if len == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetSystemPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetSystemTimeAsFileTime(time *Filetime) {
+	syscall.SyscallN(procGetSystemTimeAsFileTime.Addr(), uintptr(unsafe.Pointer(time)))
+	return
+}
+
+func GetSystemTimePreciseAsFileTime(time *Filetime) {
+	syscall.SyscallN(procGetSystemTimePreciseAsFileTime.Addr(), uintptr(unsafe.Pointer(time)))
+	return
+}
+
+func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetSystemWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen))
+	len = uint32(r0)
+	if len == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetTempPathW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf)))
+	n = uint32(r0)
+	if n == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetThreadPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func getTickCount64() (ms uint64) {
+	r0, _, _ := syscall.SyscallN(procGetTickCount64.Addr())
+	ms = uint64(r0)
+	return
+}
+
+func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetTimeZoneInformation.Addr(), uintptr(unsafe.Pointer(tzi)))
+	rc = uint32(r0)
+	if rc == 0xffffffff {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetUserPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetVersion() (ver uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetVersion.Addr())
+	ver = uint32(r0)
+	if ver == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetVolumeInformationByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetVolumeInformationW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetVolumeNameForVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetVolumePathNameW.Addr(), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetVolumePathNamesForVolumeNameW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen))
+	len = uint32(r0)
+	if len == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) {
+	r1, _, e1 := syscall.SyscallN(procInitializeProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func IsWow64Process(handle Handle, isWow64 *bool) (err error) {
+	var _p0 uint32
+	if *isWow64 {
+		_p0 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procIsWow64Process.Addr(), uintptr(handle), uintptr(unsafe.Pointer(&_p0)))
+	*isWow64 = _p0 != 0
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func IsWow64Process2(handle Handle, processMachine *uint16, nativeMachine *uint16) (err error) {
+	err = procIsWow64Process2.Find()
+	if err != nil {
+		return
+	}
+	r1, _, e1 := syscall.SyscallN(procIsWow64Process2.Addr(), uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) {
+	var _p0 *uint16
+	_p0, err = syscall.UTF16PtrFromString(libname)
+	if err != nil {
+		return
+	}
+	return _LoadLibraryEx(_p0, zero, flags)
+}
+
+func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procLoadLibraryExW.Addr(), uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags))
+	handle = Handle(r0)
+	if handle == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func LoadLibrary(libname string) (handle Handle, err error) {
+	var _p0 *uint16
+	_p0, err = syscall.UTF16PtrFromString(libname)
+	if err != nil {
+		return
+	}
+	return _LoadLibrary(_p0)
+}
+
+func _LoadLibrary(libname *uint16) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procLoadLibraryW.Addr(), uintptr(unsafe.Pointer(libname)))
+	handle = Handle(r0)
+	if handle == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procLoadResource.Addr(), uintptr(module), uintptr(resInfo))
+	resData = Handle(r0)
+	if resData == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) {
+	r0, _, e1 := syscall.SyscallN(procLocalAlloc.Addr(), uintptr(flags), uintptr(length))
+	ptr = uintptr(r0)
+	if ptr == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func LocalFree(hmem Handle) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procLocalFree.Addr(), uintptr(hmem))
+	handle = Handle(r0)
+	if handle != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) {
+	r1, _, e1 := syscall.SyscallN(procLockFileEx.Addr(), uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func LockResource(resData Handle) (addr uintptr, err error) {
+	r0, _, e1 := syscall.SyscallN(procLockResource.Addr(), uintptr(resData))
+	addr = uintptr(r0)
+	if addr == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) {
+	r0, _, e1 := syscall.SyscallN(procMapViewOfFile.Addr(), uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length))
+	addr = uintptr(r0)
+	if addr == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procModule32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procModule32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procMoveFileExW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func MoveFile(from *uint16, to *uint16) (err error) {
+	r1, _, e1 := syscall.SyscallN(procMoveFileW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) {
+	r0, _, e1 := syscall.SyscallN(procMultiByteToWideChar.Addr(), uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar))
+	nwrite = int32(r0)
+	if nwrite == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) {
+	var _p0 uint32
+	if inheritHandle {
+		_p0 = 1
+	}
+	r0, _, e1 := syscall.SyscallN(procOpenEventW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name)))
+	handle = Handle(r0)
+	if handle == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) {
+	var _p0 uint32
+	if inheritHandle {
+		_p0 = 1
+	}
+	r0, _, e1 := syscall.SyscallN(procOpenMutexW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name)))
+	handle = Handle(r0)
+	if handle == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) {
+	var _p0 uint32
+	if inheritHandle {
+		_p0 = 1
+	}
+	r0, _, e1 := syscall.SyscallN(procOpenProcess.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(processId))
+	handle = Handle(r0)
+	if handle == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) {
+	var _p0 uint32
+	if inheritHandle {
+		_p0 = 1
+	}
+	r0, _, e1 := syscall.SyscallN(procOpenThread.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(threadId))
+	handle = Handle(r0)
+	if handle == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) {
+	r1, _, e1 := syscall.SyscallN(procPostQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procProcess32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procProcess32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procProcessIdToSessionId.Addr(), uintptr(pid), uintptr(unsafe.Pointer(sessionid)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func PulseEvent(event Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procPulseEvent.Addr(), uintptr(event))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func PurgeComm(handle Handle, dwFlags uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procPurgeComm.Addr(), uintptr(handle), uintptr(dwFlags))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procQueryDosDeviceW.Addr(), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max))
+	n = uint32(r0)
+	if n == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procQueryFullProcessImageNameW.Addr(), uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procQueryInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) {
+	r1, _, e1 := syscall.SyscallN(procReadConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) {
+	var _p0 uint32
+	if watchSubTree {
+		_p0 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procReadDirectoryChangesW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func readFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) {
+	var _p0 *byte
+	if len(buf) > 0 {
+		_p0 = &buf[0]
+	}
+	r1, _, e1 := syscall.SyscallN(procReadFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) {
+	r1, _, e1 := syscall.SyscallN(procReadProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func ReleaseMutex(mutex Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procReleaseMutex.Addr(), uintptr(mutex))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func RemoveDirectory(path *uint16) (err error) {
+	r1, _, e1 := syscall.SyscallN(procRemoveDirectoryW.Addr(), uintptr(unsafe.Pointer(path)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func RemoveDllDirectory(cookie uintptr) (err error) {
+	r1, _, e1 := syscall.SyscallN(procRemoveDllDirectory.Addr(), uintptr(cookie))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func ResetEvent(event Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procResetEvent.Addr(), uintptr(event))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func resizePseudoConsole(pconsole Handle, size uint32) (hr error) {
+	r0, _, _ := syscall.SyscallN(procResizePseudoConsole.Addr(), uintptr(pconsole), uintptr(size))
+	if r0 != 0 {
+		hr = syscall.Errno(r0)
+	}
+	return
+}
+
+func ResumeThread(thread Handle) (ret uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procResumeThread.Addr(), uintptr(thread))
+	ret = uint32(r0)
+	if ret == 0xffffffff {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetCommBreak(handle Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetCommBreak.Addr(), uintptr(handle))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetCommMask(handle Handle, dwEvtMask uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetCommMask.Addr(), uintptr(handle), uintptr(dwEvtMask))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetCommState(handle Handle, lpDCB *DCB) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetConsoleCP(cp uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetConsoleCP.Addr(), uintptr(cp))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setConsoleCursorPosition(console Handle, position uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetConsoleCursorPosition.Addr(), uintptr(console), uintptr(position))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetConsoleMode(console Handle, mode uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetConsoleMode.Addr(), uintptr(console), uintptr(mode))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetConsoleOutputCP(cp uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetConsoleOutputCP.Addr(), uintptr(cp))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetCurrentDirectory(path *uint16) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetCurrentDirectoryW.Addr(), uintptr(unsafe.Pointer(path)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetDefaultDllDirectories(directoryFlags uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetDefaultDllDirectories.Addr(), uintptr(directoryFlags))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetDllDirectory(path string) (err error) {
+	var _p0 *uint16
+	_p0, err = syscall.UTF16PtrFromString(path)
+	if err != nil {
+		return
+	}
+	return _SetDllDirectory(_p0)
+}
+
+func _SetDllDirectory(path *uint16) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetDllDirectoryW.Addr(), uintptr(unsafe.Pointer(path)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetEndOfFile(handle Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetEndOfFile.Addr(), uintptr(handle))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetEnvironmentVariable(name *uint16, value *uint16) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetErrorMode(mode uint32) (ret uint32) {
+	r0, _, _ := syscall.SyscallN(procSetErrorMode.Addr(), uintptr(mode))
+	ret = uint32(r0)
+	return
+}
+
+func SetEvent(event Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetEvent.Addr(), uintptr(event))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetFileAttributes(name *uint16, attrs uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(attrs))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(handle), uintptr(flags))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetFileInformationByHandle.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procSetFilePointer.Addr(), uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence))
+	newlowoffset = uint32(r0)
+	if newlowoffset == 0xffffffff {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetFileValidData(handle Handle, validDataLength int64) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetFileValidData.Addr(), uintptr(handle), uintptr(validDataLength))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetHandleInformation.Addr(), uintptr(handle), uintptr(mask), uintptr(flags))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) {
+	r0, _, e1 := syscall.SyscallN(procSetInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength))
+	ret = int(r0)
+	if ret == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetNamedPipeHandleState.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetPriorityClass(process Handle, priorityClass uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetPriorityClass.Addr(), uintptr(process), uintptr(priorityClass))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetProcessPriorityBoost(process Handle, disable bool) (err error) {
+	var _p0 uint32
+	if disable {
+		_p0 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procSetProcessPriorityBoost.Addr(), uintptr(process), uintptr(_p0))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetProcessShutdownParameters(level uint32, flags uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetProcessShutdownParameters.Addr(), uintptr(level), uintptr(flags))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetStdHandle(stdhandle uint32, handle Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetStdHandle.Addr(), uintptr(stdhandle), uintptr(handle))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetVolumeLabelW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupComm.Addr(), uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procSizeofResource.Addr(), uintptr(module), uintptr(resInfo))
+	size = uint32(r0)
+	if size == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SleepEx(milliseconds uint32, alertable bool) (ret uint32) {
+	var _p0 uint32
+	if alertable {
+		_p0 = 1
+	}
+	r0, _, _ := syscall.SyscallN(procSleepEx.Addr(), uintptr(milliseconds), uintptr(_p0))
+	ret = uint32(r0)
+	return
+}
+
+func TerminateJobObject(job Handle, exitCode uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procTerminateJobObject.Addr(), uintptr(job), uintptr(exitCode))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func TerminateProcess(handle Handle, exitcode uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procTerminateProcess.Addr(), uintptr(handle), uintptr(exitcode))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procThread32First.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procThread32Next.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) {
+	r1, _, e1 := syscall.SyscallN(procUnlockFileEx.Addr(), uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func UnmapViewOfFile(addr uintptr) (err error) {
+	r1, _, e1 := syscall.SyscallN(procUnmapViewOfFile.Addr(), uintptr(addr))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) {
+	r1, _, e1 := syscall.SyscallN(procUpdateProcThreadAttribute.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) {
+	r0, _, e1 := syscall.SyscallN(procVirtualAlloc.Addr(), uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect))
+	value = uintptr(r0)
+	if value == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procVirtualFree.Addr(), uintptr(address), uintptr(size), uintptr(freetype))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func VirtualLock(addr uintptr, length uintptr) (err error) {
+	r1, _, e1 := syscall.SyscallN(procVirtualLock.Addr(), uintptr(addr), uintptr(length))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procVirtualProtect.Addr(), uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procVirtualProtectEx.Addr(), uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) {
+	r1, _, e1 := syscall.SyscallN(procVirtualQuery.Addr(), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) {
+	r1, _, e1 := syscall.SyscallN(procVirtualQueryEx.Addr(), uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func VirtualUnlock(addr uintptr, length uintptr) (err error) {
+	r1, _, e1 := syscall.SyscallN(procVirtualUnlock.Addr(), uintptr(addr), uintptr(length))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WTSGetActiveConsoleSessionId() (sessionID uint32) {
+	r0, _, _ := syscall.SyscallN(procWTSGetActiveConsoleSessionId.Addr())
+	sessionID = uint32(r0)
+	return
+}
+
+func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) {
+	r1, _, e1 := syscall.SyscallN(procWaitCommEvent.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) {
+	var _p0 uint32
+	if waitAll {
+		_p0 = 1
+	}
+	r0, _, e1 := syscall.SyscallN(procWaitForMultipleObjects.Addr(), uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds))
+	event = uint32(r0)
+	if event == 0xffffffff {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procWaitForSingleObject.Addr(), uintptr(handle), uintptr(waitMilliseconds))
+	event = uint32(r0)
+	if event == 0xffffffff {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) {
+	r1, _, e1 := syscall.SyscallN(procWriteConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func writeFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) {
+	var _p0 *byte
+	if len(buf) > 0 {
+		_p0 = &buf[0]
+	}
+	r1, _, e1 := syscall.SyscallN(procWriteFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) {
+	r1, _, e1 := syscall.SyscallN(procWriteProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) {
+	r1, _, e1 := syscall.SyscallN(procAcceptEx.Addr(), uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) {
+	syscall.SyscallN(procGetAcceptExSockaddrs.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)))
+	return
+}
+
+func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procTransmitFile.Addr(), uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func NetApiBufferFree(buf *byte) (neterr error) {
+	r0, _, _ := syscall.SyscallN(procNetApiBufferFree.Addr(), uintptr(unsafe.Pointer(buf)))
+	if r0 != 0 {
+		neterr = syscall.Errno(r0)
+	}
+	return
+}
+
+func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) {
+	r0, _, _ := syscall.SyscallN(procNetGetJoinInformation.Addr(), uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType)))
+	if r0 != 0 {
+		neterr = syscall.Errno(r0)
+	}
+	return
+}
+
+func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) {
+	r0, _, _ := syscall.SyscallN(procNetUserEnum.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)))
+	if r0 != 0 {
+		neterr = syscall.Errno(r0)
+	}
+	return
+}
+
+func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) {
+	r0, _, _ := syscall.SyscallN(procNetUserGetInfo.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)))
+	if r0 != 0 {
+		neterr = syscall.Errno(r0)
+	}
+	return
+}
+
+func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) {
+	r0, _, _ := syscall.SyscallN(procNtCreateFile.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength))
+	if r0 != 0 {
+		ntstatus = NTStatus(r0)
+	}
+	return
+}
+
+func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) {
+	r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)))
+	if r0 != 0 {
+		ntstatus = NTStatus(r0)
+	}
+	return
+}
+
+func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) {
+	r0, _, _ := syscall.SyscallN(procNtQueryInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)))
+	if r0 != 0 {
+		ntstatus = NTStatus(r0)
+	}
+	return
+}
+
+func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) {
+	r0, _, _ := syscall.SyscallN(procNtQuerySystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)))
+	if r0 != 0 {
+		ntstatus = NTStatus(r0)
+	}
+	return
+}
+
+func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) {
+	r0, _, _ := syscall.SyscallN(procNtSetInformationFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class))
+	if r0 != 0 {
+		ntstatus = NTStatus(r0)
+	}
+	return
+}
+
+func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) {
+	r0, _, _ := syscall.SyscallN(procNtSetInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen))
+	if r0 != 0 {
+		ntstatus = NTStatus(r0)
+	}
+	return
+}
+
+func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) {
+	r0, _, _ := syscall.SyscallN(procNtSetSystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen))
+	if r0 != 0 {
+		ntstatus = NTStatus(r0)
+	}
+	return
+}
+
+func RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) {
+	r0, _, _ := syscall.SyscallN(procRtlAddFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress))
+	ret = r0 != 0
+	return
+}
+
+func RtlDefaultNpAcl(acl **ACL) (ntstatus error) {
+	r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(acl)))
+	if r0 != 0 {
+		ntstatus = NTStatus(r0)
+	}
+	return
+}
+
+func RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) {
+	r0, _, _ := syscall.SyscallN(procRtlDeleteFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable)))
+	ret = r0 != 0
+	return
+}
+
+func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) {
+	r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)))
+	if r0 != 0 {
+		ntstatus = NTStatus(r0)
+	}
+	return
+}
+
+func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) {
+	r0, _, _ := syscall.SyscallN(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)))
+	if r0 != 0 {
+		ntstatus = NTStatus(r0)
+	}
+	return
+}
+
+func RtlGetCurrentPeb() (peb *PEB) {
+	r0, _, _ := syscall.SyscallN(procRtlGetCurrentPeb.Addr())
+	peb = (*PEB)(unsafe.Pointer(r0))
+	return
+}
+
+func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) {
+	syscall.SyscallN(procRtlGetNtVersionNumbers.Addr(), uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber)))
+	return
+}
+
+func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) {
+	r0, _, _ := syscall.SyscallN(procRtlGetVersion.Addr(), uintptr(unsafe.Pointer(info)))
+	if r0 != 0 {
+		ntstatus = NTStatus(r0)
+	}
+	return
+}
+
+func RtlInitString(destinationString *NTString, sourceString *byte) {
+	syscall.SyscallN(procRtlInitString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)))
+	return
+}
+
+func RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) {
+	syscall.SyscallN(procRtlInitUnicodeString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)))
+	return
+}
+
+func rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) {
+	r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(ntstatus))
+	ret = syscall.Errno(r0)
+	return
+}
+
+func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) {
+	r0, _, _ := syscall.SyscallN(procCLSIDFromString.Addr(), uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)))
+	if r0 != 0 {
+		ret = syscall.Errno(r0)
+	}
+	return
+}
+
+func coCreateGuid(pguid *GUID) (ret error) {
+	r0, _, _ := syscall.SyscallN(procCoCreateGuid.Addr(), uintptr(unsafe.Pointer(pguid)))
+	if r0 != 0 {
+		ret = syscall.Errno(r0)
+	}
+	return
+}
+
+func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) {
+	r0, _, _ := syscall.SyscallN(procCoGetObject.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)))
+	if r0 != 0 {
+		ret = syscall.Errno(r0)
+	}
+	return
+}
+
+func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) {
+	r0, _, _ := syscall.SyscallN(procCoInitializeEx.Addr(), uintptr(reserved), uintptr(coInit))
+	if r0 != 0 {
+		ret = syscall.Errno(r0)
+	}
+	return
+}
+
+func CoTaskMemFree(address unsafe.Pointer) {
+	syscall.SyscallN(procCoTaskMemFree.Addr(), uintptr(address))
+	return
+}
+
+func CoUninitialize() {
+	syscall.SyscallN(procCoUninitialize.Addr())
+	return
+}
+
+func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) {
+	r0, _, _ := syscall.SyscallN(procStringFromGUID2.Addr(), uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax))
+	chars = int32(r0)
+	return
+}
+
+func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procEnumProcessModules.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procEnumProcessModulesEx.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procEnumProcesses.Addr(), uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetModuleBaseNameW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetModuleFileNameExW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetModuleInformation.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procQueryWorkingSetEx.Addr(), uintptr(process), uintptr(pv), uintptr(cb))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callback uintptr, callbackCtx uintptr, subscription *uintptr) (ret error) {
+	ret = procSubscribeServiceChangeNotifications.Find()
+	if ret != nil {
+		return
+	}
+	r0, _, _ := syscall.SyscallN(procSubscribeServiceChangeNotifications.Addr(), uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)))
+	if r0 != 0 {
+		ret = syscall.Errno(r0)
+	}
+	return
+}
+
+func UnsubscribeServiceChangeNotifications(subscription uintptr) (err error) {
+	err = procUnsubscribeServiceChangeNotifications.Find()
+	if err != nil {
+		return
+	}
+	syscall.SyscallN(procUnsubscribeServiceChangeNotifications.Addr(), uintptr(subscription))
+	return
+}
+
+func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetUserNameExW.Addr(), uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize)))
+	if r1&0xff == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procTranslateNameW.Addr(), uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)))
+	if r1&0xff == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiBuildDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiCallClassInstaller.Addr(), uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiCancelDriverInfoSearch.Addr(), uintptr(deviceInfoSet))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiClassGuidsFromNameExW.Addr(), uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiClassNameFromGuidExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) {
+	r0, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoListExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
+	handle = DevInfo(r0)
+	if handle == DevInfo(InvalidHandle) {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDeviceInfoList.Addr(), uintptr(deviceInfoSet))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiEnumDeviceInfo.Addr(), uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiEnumDriverInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) {
+	r0, _, e1 := syscall.SyscallN(procSetupDiGetClassDevsExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
+	handle = DevInfo(r0)
+	if handle == DevInfo(InvalidHandle) {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiGetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInfoListDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstanceIdW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiGetDevicePropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiGetDriverInfoDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procSetupDiOpenDevRegKey.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired))
+	key = Handle(r0)
+	if key == InvalidHandle {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiSetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) {
+	r1, _, e1 := syscall.SyscallN(procSetupUninstallOEMInfW.Addr(), uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) {
+	r0, _, e1 := syscall.SyscallN(procCommandLineToArgvW.Addr(), uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)))
+	argv = (**uint16)(unsafe.Pointer(r0))
+	if argv == nil {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) {
+	r0, _, _ := syscall.SyscallN(procSHGetKnownFolderPath.Addr(), uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)))
+	if r0 != 0 {
+		ret = syscall.Errno(r0)
+	}
+	return
+}
+
+func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procShellExecuteW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd))
+	if r1 <= 32 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) {
+	syscall.SyscallN(procEnumChildWindows.Addr(), uintptr(hwnd), uintptr(enumFunc), uintptr(param))
+	return
+}
+
+func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) {
+	r1, _, e1 := syscall.SyscallN(procEnumWindows.Addr(), uintptr(enumFunc), uintptr(param))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func ExitWindowsEx(flags uint32, reason uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procExitWindowsEx.Addr(), uintptr(flags), uintptr(reason))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetClassNameW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount))
+	copied = int32(r0)
+	if copied == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetDesktopWindow() (hwnd HWND) {
+	r0, _, _ := syscall.SyscallN(procGetDesktopWindow.Addr())
+	hwnd = HWND(r0)
+	return
+}
+
+func GetForegroundWindow() (hwnd HWND) {
+	r0, _, _ := syscall.SyscallN(procGetForegroundWindow.Addr())
+	hwnd = HWND(r0)
+	return
+}
+
+func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetGUIThreadInfo.Addr(), uintptr(thread), uintptr(unsafe.Pointer(info)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetKeyboardLayout(tid uint32) (hkl Handle) {
+	r0, _, _ := syscall.SyscallN(procGetKeyboardLayout.Addr(), uintptr(tid))
+	hkl = Handle(r0)
+	return
+}
+
+func GetShellWindow() (shellWindow HWND) {
+	r0, _, _ := syscall.SyscallN(procGetShellWindow.Addr())
+	shellWindow = HWND(r0)
+	return
+}
+
+func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetWindowThreadProcessId.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(pid)))
+	tid = uint32(r0)
+	if tid == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func IsWindow(hwnd HWND) (isWindow bool) {
+	r0, _, _ := syscall.SyscallN(procIsWindow.Addr(), uintptr(hwnd))
+	isWindow = r0 != 0
+	return
+}
+
+func IsWindowUnicode(hwnd HWND) (isUnicode bool) {
+	r0, _, _ := syscall.SyscallN(procIsWindowUnicode.Addr(), uintptr(hwnd))
+	isUnicode = r0 != 0
+	return
+}
+
+func IsWindowVisible(hwnd HWND) (isVisible bool) {
+	r0, _, _ := syscall.SyscallN(procIsWindowVisible.Addr(), uintptr(hwnd))
+	isVisible = r0 != 0
+	return
+}
+
+func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procLoadKeyboardLayoutW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags))
+	hkl = Handle(r0)
+	if hkl == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) {
+	r0, _, e1 := syscall.SyscallN(procMessageBoxW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype))
+	ret = int32(r0)
+	if ret == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) {
+	r0, _, _ := syscall.SyscallN(procToUnicodeEx.Addr(), uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl))
+	ret = int32(r0)
+	return
+}
+
+func UnloadKeyboardLayout(hkl Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procUnloadKeyboardLayout.Addr(), uintptr(hkl))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func CreateEnvironmentBlock(block **uint16, token Token, inheritExisting bool) (err error) {
+	var _p0 uint32
+	if inheritExisting {
+		_p0 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procCreateEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func DestroyEnvironmentBlock(block *uint16) (err error) {
+	r1, _, e1 := syscall.SyscallN(procDestroyEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetUserProfileDirectoryW.Addr(), uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetFileVersionInfoSize(filename string, zeroHandle *Handle) (bufSize uint32, err error) {
+	var _p0 *uint16
+	_p0, err = syscall.UTF16PtrFromString(filename)
+	if err != nil {
+		return
+	}
+	return _GetFileVersionInfoSize(_p0, zeroHandle)
+}
+
+func _GetFileVersionInfoSize(filename *uint16, zeroHandle *Handle) (bufSize uint32, err error) {
+	r0, _, e1 := syscall.SyscallN(procGetFileVersionInfoSizeW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)))
+	bufSize = uint32(r0)
+	if bufSize == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetFileVersionInfo(filename string, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) {
+	var _p0 *uint16
+	_p0, err = syscall.UTF16PtrFromString(filename)
+	if err != nil {
+		return
+	}
+	return _GetFileVersionInfo(_p0, handle, bufSize, buffer)
+}
+
+func _GetFileVersionInfo(filename *uint16, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) {
+	r1, _, e1 := syscall.SyscallN(procGetFileVersionInfoW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func VerQueryValue(block unsafe.Pointer, subBlock string, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) {
+	var _p0 *uint16
+	_p0, err = syscall.UTF16PtrFromString(subBlock)
+	if err != nil {
+		return
+	}
+	return _VerQueryValue(block, _p0, pointerToBufferPointer, bufSize)
+}
+
+func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procVerQueryValueW.Addr(), uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func TimeBeginPeriod(period uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(proctimeBeginPeriod.Addr(), uintptr(period))
+	if r1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func TimeEndPeriod(period uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(proctimeEndPeriod.Addr(), uintptr(period))
+	if r1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) {
+	r0, _, _ := syscall.SyscallN(procWinVerifyTrustEx.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data)))
+	if r0 != 0 {
+		ret = syscall.Errno(r0)
+	}
+	return
+}
+
+func FreeAddrInfoW(addrinfo *AddrinfoW) {
+	syscall.SyscallN(procFreeAddrInfoW.Addr(), uintptr(unsafe.Pointer(addrinfo)))
+	return
+}
+
+func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) {
+	r0, _, _ := syscall.SyscallN(procGetAddrInfoW.Addr(), uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)))
+	if r0 != 0 {
+		sockerr = syscall.Errno(r0)
+	}
+	return
+}
+
+func WSACleanup() (err error) {
+	r1, _, e1 := syscall.SyscallN(procWSACleanup.Addr())
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) {
+	r1, _, e1 := syscall.SyscallN(procWSADuplicateSocketW.Addr(), uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info)))
+	if r1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) {
+	r0, _, e1 := syscall.SyscallN(procWSAEnumProtocolsW.Addr(), uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength)))
+	n = int32(r0)
+	if n == -1 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WSAGetOverlappedResult(h Handle, o *Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) {
+	var _p0 uint32
+	if wait {
+		_p0 = 1
+	}
+	r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) {
+	r1, _, e1 := syscall.SyscallN(procWSAIoctl.Addr(), uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine))
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procWSALookupServiceBeginW.Addr(), uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle)))
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WSALookupServiceEnd(handle Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procWSALookupServiceEnd.Addr(), uintptr(handle))
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) {
+	r1, _, e1 := syscall.SyscallN(procWSALookupServiceNextW.Addr(), uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)))
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) {
+	r1, _, e1 := syscall.SyscallN(procWSARecv.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) {
+	r1, _, e1 := syscall.SyscallN(procWSARecvFrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) {
+	r1, _, e1 := syscall.SyscallN(procWSASend.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) {
+	r1, _, e1 := syscall.SyscallN(procWSASendTo.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procWSASocketW.Addr(), uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags))
+	handle = Handle(r0)
+	if handle == InvalidHandle {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WSAStartup(verreq uint32, data *WSAData) (sockerr error) {
+	r0, _, _ := syscall.SyscallN(procWSAStartup.Addr(), uintptr(verreq), uintptr(unsafe.Pointer(data)))
+	if r0 != 0 {
+		sockerr = syscall.Errno(r0)
+	}
+	return
+}
+
+func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen))
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func Closesocket(s Handle) (err error) {
+	r1, _, e1 := syscall.SyscallN(procclosesocket.Addr(), uintptr(s))
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procconnect.Addr(), uintptr(s), uintptr(name), uintptr(namelen))
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetHostByName(name string) (h *Hostent, err error) {
+	var _p0 *byte
+	_p0, err = syscall.BytePtrFromString(name)
+	if err != nil {
+		return
+	}
+	return _GetHostByName(_p0)
+}
+
+func _GetHostByName(name *byte) (h *Hostent, err error) {
+	r0, _, e1 := syscall.SyscallN(procgethostbyname.Addr(), uintptr(unsafe.Pointer(name)))
+	h = (*Hostent)(unsafe.Pointer(r0))
+	if h == nil {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetProtoByName(name string) (p *Protoent, err error) {
+	var _p0 *byte
+	_p0, err = syscall.BytePtrFromString(name)
+	if err != nil {
+		return
+	}
+	return _GetProtoByName(_p0)
+}
+
+func _GetProtoByName(name *byte) (p *Protoent, err error) {
+	r0, _, e1 := syscall.SyscallN(procgetprotobyname.Addr(), uintptr(unsafe.Pointer(name)))
+	p = (*Protoent)(unsafe.Pointer(r0))
+	if p == nil {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func GetServByName(name string, proto string) (s *Servent, err error) {
+	var _p0 *byte
+	_p0, err = syscall.BytePtrFromString(name)
+	if err != nil {
+		return
+	}
+	var _p1 *byte
+	_p1, err = syscall.BytePtrFromString(proto)
+	if err != nil {
+		return
+	}
+	return _GetServByName(_p0, _p1)
+}
+
+func _GetServByName(name *byte, proto *byte) (s *Servent, err error) {
+	r0, _, e1 := syscall.SyscallN(procgetservbyname.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)))
+	s = (*Servent)(unsafe.Pointer(r0))
+	if s == nil {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procgetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)))
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func listen(s Handle, backlog int32) (err error) {
+	r1, _, e1 := syscall.SyscallN(proclisten.Addr(), uintptr(s), uintptr(backlog))
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func Ntohs(netshort uint16) (u uint16) {
+	r0, _, _ := syscall.SyscallN(procntohs.Addr(), uintptr(netshort))
+	u = uint16(r0)
+	return
+}
+
+func recvfrom(s Handle, buf []byte, flags int32, from *RawSockaddrAny, fromlen *int32) (n int32, err error) {
+	var _p0 *byte
+	if len(buf) > 0 {
+		_p0 = &buf[0]
+	}
+	r0, _, e1 := syscall.SyscallN(procrecvfrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+	n = int32(r0)
+	if n == -1 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func sendto(s Handle, buf []byte, flags int32, to unsafe.Pointer, tolen int32) (err error) {
+	var _p0 *byte
+	if len(buf) > 0 {
+		_p0 = &buf[0]
+	}
+	r1, _, e1 := syscall.SyscallN(procsendto.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen))
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procsetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen))
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func shutdown(s Handle, how int32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procshutdown.Addr(), uintptr(s), uintptr(how))
+	if r1 == socket_error {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func socket(af int32, typ int32, protocol int32) (handle Handle, err error) {
+	r0, _, e1 := syscall.SyscallN(procsocket.Addr(), uintptr(af), uintptr(typ), uintptr(protocol))
+	handle = Handle(r0)
+	if handle == InvalidHandle {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) {
+	r1, _, e1 := syscall.SyscallN(procWTSEnumerateSessionsW.Addr(), uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+func WTSFreeMemory(ptr uintptr) {
+	syscall.SyscallN(procWTSFreeMemory.Addr(), uintptr(ptr))
+	return
+}
+
+func WTSQueryUserToken(session uint32, token *Token) (err error) {
+	r1, _, e1 := syscall.SyscallN(procWTSQueryUserToken.Addr(), uintptr(session), uintptr(unsafe.Pointer(token)))
+	if r1 == 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go
index 9d2ae54..fb82732 100644
--- a/vendor/golang.org/x/text/unicode/bidi/core.go
+++ b/vendor/golang.org/x/text/unicode/bidi/core.go
@@ -427,13 +427,6 @@
 
 func (i *isolatingRunSequence) Len() int { return len(i.indexes) }
 
-func maxLevel(a, b level) level {
-	if a > b {
-		return a
-	}
-	return b
-}
-
 // Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types,
 // either L or R, for each isolating run sequence.
 func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence {
@@ -474,8 +467,8 @@
 		indexes: indexes,
 		types:   types,
 		level:   level,
-		sos:     typeForLevel(maxLevel(prevLevel, level)),
-		eos:     typeForLevel(maxLevel(succLevel, level)),
+		sos:     typeForLevel(max(prevLevel, level)),
+		eos:     typeForLevel(max(succLevel, level)),
 	}
 }
 
diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE
deleted file mode 100644
index d645695..0000000
--- a/vendor/google.golang.org/genproto/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
-                                 Apache License
-                           Version 2.0, January 2004
-                        http://www.apache.org/licenses/
-
-   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-   1. Definitions.
-
-      "License" shall mean the terms and conditions for use, reproduction,
-      and distribution as defined by Sections 1 through 9 of this document.
-
-      "Licensor" shall mean the copyright owner or entity authorized by
-      the copyright owner that is granting the License.
-
-      "Legal Entity" shall mean the union of the acting entity and all
-      other entities that control, are controlled by, or are under common
-      control with that entity. For the purposes of this definition,
-      "control" means (i) the power, direct or indirect, to cause the
-      direction or management of such entity, whether by contract or
-      otherwise, or (ii) ownership of fifty percent (50%) or more of the
-      outstanding shares, or (iii) beneficial ownership of such entity.
-
-      "You" (or "Your") shall mean an individual or Legal Entity
-      exercising permissions granted by this License.
-
-      "Source" form shall mean the preferred form for making modifications,
-      including but not limited to software source code, documentation
-      source, and configuration files.
-
-      "Object" form shall mean any form resulting from mechanical
-      transformation or translation of a Source form, including but
-      not limited to compiled object code, generated documentation,
-      and conversions to other media types.
-
-      "Work" shall mean the work of authorship, whether in Source or
-      Object form, made available under the License, as indicated by a
-      copyright notice that is included in or attached to the work
-      (an example is provided in the Appendix below).
-
-      "Derivative Works" shall mean any work, whether in Source or Object
-      form, that is based on (or derived from) the Work and for which the
-      editorial revisions, annotations, elaborations, or other modifications
-      represent, as a whole, an original work of authorship. For the purposes
-      of this License, Derivative Works shall not include works that remain
-      separable from, or merely link (or bind by name) to the interfaces of,
-      the Work and Derivative Works thereof.
-
-      "Contribution" shall mean any work of authorship, including
-      the original version of the Work and any modifications or additions
-      to that Work or Derivative Works thereof, that is intentionally
-      submitted to Licensor for inclusion in the Work by the copyright owner
-      or by an individual or Legal Entity authorized to submit on behalf of
-      the copyright owner. For the purposes of this definition, "submitted"
-      means any form of electronic, verbal, or written communication sent
-      to the Licensor or its representatives, including but not limited to
-      communication on electronic mailing lists, source code control systems,
-      and issue tracking systems that are managed by, or on behalf of, the
-      Licensor for the purpose of discussing and improving the Work, but
-      excluding communication that is conspicuously marked or otherwise
-      designated in writing by the copyright owner as "Not a Contribution."
-
-      "Contributor" shall mean Licensor and any individual or Legal Entity
-      on behalf of whom a Contribution has been received by Licensor and
-      subsequently incorporated within the Work.
-
-   2. Grant of Copyright License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      copyright license to reproduce, prepare Derivative Works of,
-      publicly display, publicly perform, sublicense, and distribute the
-      Work and such Derivative Works in Source or Object form.
-
-   3. Grant of Patent License. Subject to the terms and conditions of
-      this License, each Contributor hereby grants to You a perpetual,
-      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
-      (except as stated in this section) patent license to make, have made,
-      use, offer to sell, sell, import, and otherwise transfer the Work,
-      where such license applies only to those patent claims licensable
-      by such Contributor that are necessarily infringed by their
-      Contribution(s) alone or by combination of their Contribution(s)
-      with the Work to which such Contribution(s) was submitted. If You
-      institute patent litigation against any entity (including a
-      cross-claim or counterclaim in a lawsuit) alleging that the Work
-      or a Contribution incorporated within the Work constitutes direct
-      or contributory patent infringement, then any patent licenses
-      granted to You under this License for that Work shall terminate
-      as of the date such litigation is filed.
-
-   4. Redistribution. You may reproduce and distribute copies of the
-      Work or Derivative Works thereof in any medium, with or without
-      modifications, and in Source or Object form, provided that You
-      meet the following conditions:
-
-      (a) You must give any other recipients of the Work or
-          Derivative Works a copy of this License; and
-
-      (b) You must cause any modified files to carry prominent notices
-          stating that You changed the files; and
-
-      (c) You must retain, in the Source form of any Derivative Works
-          that You distribute, all copyright, patent, trademark, and
-          attribution notices from the Source form of the Work,
-          excluding those notices that do not pertain to any part of
-          the Derivative Works; and
-
-      (d) If the Work includes a "NOTICE" text file as part of its
-          distribution, then any Derivative Works that You distribute must
-          include a readable copy of the attribution notices contained
-          within such NOTICE file, excluding those notices that do not
-          pertain to any part of the Derivative Works, in at least one
-          of the following places: within a NOTICE text file distributed
-          as part of the Derivative Works; within the Source form or
-          documentation, if provided along with the Derivative Works; or,
-          within a display generated by the Derivative Works, if and
-          wherever such third-party notices normally appear. The contents
-          of the NOTICE file are for informational purposes only and
-          do not modify the License. You may add Your own attribution
-          notices within Derivative Works that You distribute, alongside
-          or as an addendum to the NOTICE text from the Work, provided
-          that such additional attribution notices cannot be construed
-          as modifying the License.
-
-      You may add Your own copyright statement to Your modifications and
-      may provide additional or different license terms and conditions
-      for use, reproduction, or distribution of Your modifications, or
-      for any such Derivative Works as a whole, provided Your use,
-      reproduction, and distribution of the Work otherwise complies with
-      the conditions stated in this License.
-
-   5. Submission of Contributions. Unless You explicitly state otherwise,
-      any Contribution intentionally submitted for inclusion in the Work
-      by You to the Licensor shall be under the terms and conditions of
-      this License, without any additional terms or conditions.
-      Notwithstanding the above, nothing herein shall supersede or modify
-      the terms of any separate license agreement you may have executed
-      with Licensor regarding such Contributions.
-
-   6. Trademarks. This License does not grant permission to use the trade
-      names, trademarks, service marks, or product names of the Licensor,
-      except as required for reasonable and customary use in describing the
-      origin of the Work and reproducing the content of the NOTICE file.
-
-   7. Disclaimer of Warranty. Unless required by applicable law or
-      agreed to in writing, Licensor provides the Work (and each
-      Contributor provides its Contributions) on an "AS IS" BASIS,
-      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
-      implied, including, without limitation, any warranties or conditions
-      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
-      PARTICULAR PURPOSE. You are solely responsible for determining the
-      appropriateness of using or redistributing the Work and assume any
-      risks associated with Your exercise of permissions under this License.
-
-   8. Limitation of Liability. In no event and under no legal theory,
-      whether in tort (including negligence), contract, or otherwise,
-      unless required by applicable law (such as deliberate and grossly
-      negligent acts) or agreed to in writing, shall any Contributor be
-      liable to You for damages, including any direct, indirect, special,
-      incidental, or consequential damages of any character arising as a
-      result of this License or out of the use or inability to use the
-      Work (including but not limited to damages for loss of goodwill,
-      work stoppage, computer failure or malfunction, or any and all
-      other commercial damages or losses), even if such Contributor
-      has been advised of the possibility of such damages.
-
-   9. Accepting Warranty or Additional Liability. While redistributing
-      the Work or Derivative Works thereof, You may choose to offer,
-      and charge a fee for, acceptance of support, warranty, indemnity,
-      or other liability obligations and/or rights consistent with this
-      License. However, in accepting such obligations, You may act only
-      on Your own behalf and on Your sole responsibility, not on behalf
-      of any other Contributor, and only if You agree to indemnify,
-      defend, and hold each Contributor harmless for any liability
-      incurred by, or claims asserted against, such Contributor by reason
-      of your accepting any such warranty or additional liability.
-
-   END OF TERMS AND CONDITIONS
-
-   APPENDIX: How to apply the Apache License to your work.
-
-      To apply the Apache License to your work, attach the following
-      boilerplate notice, with the fields enclosed by brackets "[]"
-      replaced with your own identifying information. (Don't include
-      the brackets!)  The text should be enclosed in the appropriate
-      comment syntax for the file format. We also recommend that a
-      file or class name and description of purpose be included on the
-      same "printed page" as the copyright notice for easier
-      identification within third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
index 191bea4..0b789e2 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2015 Google LLC
+// Copyright 2025 Google LLC
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // versions:
 // 	protoc-gen-go v1.26.0
-// 	protoc        v3.12.2
+// 	protoc        v4.24.4
 // source: google/api/annotations.proto
 
 package annotations
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
index 83774fb..f840481 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // versions:
 // 	protoc-gen-go v1.26.0
-// 	protoc        v3.21.12
+// 	protoc        v4.24.4
 // source: google/api/client.proto
 
 package annotations
@@ -180,6 +180,8 @@
 	ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"`
 	// The destination where API teams want this client library to be published.
 	Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"`
+	// Configuration for which RPCs should be generated in the GAPIC client.
+	SelectiveGapicGeneration *SelectiveGapicGeneration `protobuf:"bytes,3,opt,name=selective_gapic_generation,json=selectiveGapicGeneration,proto3" json:"selective_gapic_generation,omitempty"`
 }
 
 func (x *CommonLanguageSettings) Reset() {
@@ -229,6 +231,13 @@
 	return nil
 }
 
+func (x *CommonLanguageSettings) GetSelectiveGapicGeneration() *SelectiveGapicGeneration {
+	if x != nil {
+		return x.SelectiveGapicGeneration
+	}
+	return nil
+}
+
 // Details about how and where to publish client libraries.
 type ClientLibrarySettings struct {
 	state         protoimpl.MessageState
@@ -409,6 +418,9 @@
 	// Optional link to proto reference documentation.  Example:
 	// https://cloud.google.com/pubsub/lite/docs/reference/rpc
 	ProtoReferenceDocumentationUri string `protobuf:"bytes,110,opt,name=proto_reference_documentation_uri,json=protoReferenceDocumentationUri,proto3" json:"proto_reference_documentation_uri,omitempty"`
+	// Optional link to REST reference documentation.  Example:
+	// https://cloud.google.com/pubsub/lite/docs/reference/rest
+	RestReferenceDocumentationUri string `protobuf:"bytes,111,opt,name=rest_reference_documentation_uri,json=restReferenceDocumentationUri,proto3" json:"rest_reference_documentation_uri,omitempty"`
 }
 
 func (x *Publishing) Reset() {
@@ -513,6 +525,13 @@
 	return ""
 }
 
+func (x *Publishing) GetRestReferenceDocumentationUri() string {
+	if x != nil {
+		return x.RestReferenceDocumentationUri
+	}
+	return ""
+}
+
 // Settings for Java client libraries.
 type JavaSettings struct {
 	state         protoimpl.MessageState
@@ -709,6 +728,8 @@
 
 	// Some settings.
 	Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
+	// Experimental features to be included during client library generation.
+	ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"`
 }
 
 func (x *PythonSettings) Reset() {
@@ -750,6 +771,13 @@
 	return nil
 }
 
+func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures {
+	if x != nil {
+		return x.ExperimentalFeatures
+	}
+	return nil
+}
+
 // Settings for Node client libraries.
 type NodeSettings struct {
 	state         protoimpl.MessageState
@@ -965,6 +993,16 @@
 
 	// Some settings.
 	Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
+	// Map of service names to renamed services. Keys are the package relative
+	// service names and values are the name to be used for the service client
+	// and call options.
+	//
+	// publishing:
+	//
+	//	go_settings:
+	//	  renamed_services:
+	//	    Publisher: TopicAdmin
+	RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
 }
 
 func (x *GoSettings) Reset() {
@@ -1006,6 +1044,13 @@
 	return nil
 }
 
+func (x *GoSettings) GetRenamedServices() map[string]string {
+	if x != nil {
+		return x.RenamedServices
+	}
+	return nil
+}
+
 // Describes the generator configuration for a method.
 type MethodSettings struct {
 	state         protoimpl.MessageState
@@ -1014,6 +1059,13 @@
 
 	// The fully qualified name of the method, for which the options below apply.
 	// This is used to find the method to apply the options.
+	//
+	// Example:
+	//
+	//	publishing:
+	//	  method_settings:
+	//	  - selector: google.storage.control.v2.StorageControl.CreateFolder
+	//	    # method settings for CreateFolder...
 	Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
 	// Describes settings to use for long-running operations when generating
 	// API methods for RPCs. Complements RPCs that use the annotations in
@@ -1023,16 +1075,25 @@
 	//
 	//	publishing:
 	//	  method_settings:
-	//	    - selector: google.cloud.speech.v2.Speech.BatchRecognize
-	//	      long_running:
-	//	        initial_poll_delay:
-	//	          seconds: 60 # 1 minute
-	//	        poll_delay_multiplier: 1.5
-	//	        max_poll_delay:
-	//	          seconds: 360 # 6 minutes
-	//	        total_poll_timeout:
-	//	           seconds: 54000 # 90 minutes
+	//	  - selector: google.cloud.speech.v2.Speech.BatchRecognize
+	//	    long_running:
+	//	      initial_poll_delay: 60s # 1 minute
+	//	      poll_delay_multiplier: 1.5
+	//	      max_poll_delay: 360s # 6 minutes
+	//	      total_poll_timeout: 54000s # 90 minutes
 	LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"`
+	// List of top-level fields of the request message, that should be
+	// automatically populated by the client libraries based on their
+	// (google.api.field_info).format. Currently supported format: UUID4.
+	//
+	// Example of a YAML configuration:
+	//
+	//	publishing:
+	//	  method_settings:
+	//	  - selector: google.example.v1.ExampleService.CreateExample
+	//	    auto_populated_fields:
+	//	    - request_id
+	AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"`
 }
 
 func (x *MethodSettings) Reset() {
@@ -1081,6 +1142,156 @@
 	return nil
 }
 
+func (x *MethodSettings) GetAutoPopulatedFields() []string {
+	if x != nil {
+		return x.AutoPopulatedFields
+	}
+	return nil
+}
+
+// This message is used to configure the generation of a subset of the RPCs in
+// a service for client libraries.
+type SelectiveGapicGeneration struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// An allowlist of the fully qualified names of RPCs that should be included
+	// on public client surfaces.
+	Methods []string `protobuf:"bytes,1,rep,name=methods,proto3" json:"methods,omitempty"`
+	// Setting this to true indicates to the client generators that methods
+	// that would be excluded from the generation should instead be generated
+	// in a way that indicates these methods should not be consumed by
+	// end users. How this is expressed is up to individual language
+	// implementations to decide. Some examples may be: added annotations,
+	// obfuscated identifiers, or other language idiomatic patterns.
+	GenerateOmittedAsInternal bool `protobuf:"varint,2,opt,name=generate_omitted_as_internal,json=generateOmittedAsInternal,proto3" json:"generate_omitted_as_internal,omitempty"`
+}
+
+func (x *SelectiveGapicGeneration) Reset() {
+	*x = SelectiveGapicGeneration{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_client_proto_msgTypes[12]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *SelectiveGapicGeneration) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SelectiveGapicGeneration) ProtoMessage() {}
+
+func (x *SelectiveGapicGeneration) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_client_proto_msgTypes[12]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use SelectiveGapicGeneration.ProtoReflect.Descriptor instead.
+func (*SelectiveGapicGeneration) Descriptor() ([]byte, []int) {
+	return file_google_api_client_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *SelectiveGapicGeneration) GetMethods() []string {
+	if x != nil {
+		return x.Methods
+	}
+	return nil
+}
+
+func (x *SelectiveGapicGeneration) GetGenerateOmittedAsInternal() bool {
+	if x != nil {
+		return x.GenerateOmittedAsInternal
+	}
+	return false
+}
+
+// Experimental features to be included during client library generation.
+// These fields will be deprecated once the feature graduates and is enabled
+// by default.
+type PythonSettings_ExperimentalFeatures struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// Enables generation of asynchronous REST clients if `rest` transport is
+	// enabled. By default, asynchronous REST clients will not be generated.
+	// This feature will be enabled by default 1 month after launching the
+	// feature in preview packages.
+	RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"`
+	// Enables generation of protobuf code using new types that are more
+	// Pythonic which are included in `protobuf>=5.29.x`. This feature will be
+	// enabled by default 1 month after launching the feature in preview
+	// packages.
+	ProtobufPythonicTypesEnabled bool `protobuf:"varint,2,opt,name=protobuf_pythonic_types_enabled,json=protobufPythonicTypesEnabled,proto3" json:"protobuf_pythonic_types_enabled,omitempty"`
+	// Disables generation of an unversioned Python package for this client
+	// library. This means that the module names will need to be versioned in
+	// import statements. For example `import google.cloud.library_v2` instead
+	// of `import google.cloud.library`.
+	UnversionedPackageDisabled bool `protobuf:"varint,3,opt,name=unversioned_package_disabled,json=unversionedPackageDisabled,proto3" json:"unversioned_package_disabled,omitempty"`
+}
+
+func (x *PythonSettings_ExperimentalFeatures) Reset() {
+	*x = PythonSettings_ExperimentalFeatures{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_client_proto_msgTypes[14]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *PythonSettings_ExperimentalFeatures) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {}
+
+func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_client_proto_msgTypes[14]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead.
+func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) {
+	return file_google_api_client_proto_rawDescGZIP(), []int{6, 0}
+}
+
+func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool {
+	if x != nil {
+		return x.RestAsyncIoEnabled
+	}
+	return false
+}
+
+func (x *PythonSettings_ExperimentalFeatures) GetProtobufPythonicTypesEnabled() bool {
+	if x != nil {
+		return x.ProtobufPythonicTypesEnabled
+	}
+	return false
+}
+
+func (x *PythonSettings_ExperimentalFeatures) GetUnversionedPackageDisabled() bool {
+	if x != nil {
+		return x.UnversionedPackageDisabled
+	}
+	return false
+}
+
 // Describes settings to use when generating API methods that use the
 // long-running operation pattern.
 // All default values below are from those used in the client library
@@ -1109,7 +1320,7 @@
 func (x *MethodSettings_LongRunning) Reset() {
 	*x = MethodSettings_LongRunning{}
 	if protoimpl.UnsafeEnabled {
-		mi := &file_google_api_client_proto_msgTypes[15]
+		mi := &file_google_api_client_proto_msgTypes[18]
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		ms.StoreMessageInfo(mi)
 	}
@@ -1122,7 +1333,7 @@
 func (*MethodSettings_LongRunning) ProtoMessage() {}
 
 func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message {
-	mi := &file_google_api_client_proto_msgTypes[15]
+	mi := &file_google_api_client_proto_msgTypes[18]
 	if protoimpl.UnsafeEnabled && x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -1191,6 +1402,14 @@
 		Tag:           "bytes,1050,opt,name=oauth_scopes",
 		Filename:      "google/api/client.proto",
 	},
+	{
+		ExtendedType:  (*descriptorpb.ServiceOptions)(nil),
+		ExtensionType: (*string)(nil),
+		Field:         525000001,
+		Name:          "google.api.api_version",
+		Tag:           "bytes,525000001,opt,name=api_version",
+		Filename:      "google/api/client.proto",
+	},
 }
 
 // Extension fields to descriptorpb.MethodOptions.
@@ -1272,6 +1491,23 @@
 	//
 	// optional string oauth_scopes = 1050;
 	E_OauthScopes = &file_google_api_client_proto_extTypes[2]
+	// The API version of this service, which should be sent by version-aware
+	// clients to the service. This allows services to abide by the schema and
+	// behavior of the service at the time this API version was deployed.
+	// The format of the API version must be treated as opaque by clients.
+	// Services may use a format with an apparent structure, but clients must
+	// not rely on this to determine components within an API version, or attempt
+	// to construct other valid API versions. Note that this is for upcoming
+	// functionality and may not be implemented for all services.
+	//
+	// Example:
+	//
+	//	service Foo {
+	//	  option (google.api.api_version) = "v1_20230821_preview";
+	//	}
+	//
+	// optional string api_version = 525000001;
+	E_ApiVersion = &file_google_api_client_proto_extTypes[3]
 )
 
 var File_google_api_client_proto protoreflect.FileDescriptor
@@ -1285,7 +1521,7 @@
 	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
 	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
 	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf8, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
 	0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
 	0x73, 0x12, 0x30, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64,
 	0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18,
@@ -1294,227 +1530,283 @@
 	0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
 	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62,
 	0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
-	0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x05,
-	0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53,
-	0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
-	0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
-	0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67,
-	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65,
-	0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a,
-	0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x65, 0x6e,
-	0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x4e,
-	0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6a,
-	0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01,
+	0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x62, 0x0a,
+	0x1a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x70, 0x69, 0x63,
+	0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
+	0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53,
+	0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e,
+	0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69,
+	0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+	0x6e, 0x22, 0x93, 0x05, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72,
+	0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76,
+	0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65,
+	0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f,
+	0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53,
+	0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67,
+	0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69,
+	0x63, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72,
+	0x65, 0x73, 0x74, 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12,
+	0x3d, 0x0a, 0x0d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+	0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+	0x52, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a,
+	0x0a, 0x0c, 0x63, 0x70, 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16,
+	0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+	0x69, 0x2e, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63,
+	0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68,
+	0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b,
+	0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68,
+	0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65,
+	0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e,
+	0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32,
+	0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74,
+	0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74,
+	0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e,
+	0x6f, 0x64, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01,
 	0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
-	0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6a, 0x61,
-	0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x63, 0x70,
-	0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b,
-	0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x70,
-	0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x70, 0x70, 0x53, 0x65,
-	0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, 0x70, 0x5f, 0x73, 0x65,
-	0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74,
-	0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
-	0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74,
-	0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53,
-	0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53,
-	0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f,
-	0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x64, 0x65,
-	0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65,
-	0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74,
-	0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32,
-	0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74,
-	0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x64, 0x6f, 0x74,
-	0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x72,
-	0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01,
-	0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
-	0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x75,
-	0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x67, 0x6f,
-	0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32,
-	0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53,
-	0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69,
-	0x6e, 0x67, 0x73, 0x22, 0xab, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69,
-	0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74,
-	0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53,
-	0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53,
-	0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x69,
-	0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
-	0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x64,
-	0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69,
-	0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
-	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x69, 0x5f,
-	0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21,
-	0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x68,
-	0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x4c, 0x61, 0x62, 0x65,
-	0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67,
-	0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x69, 0x20, 0x03, 0x28,
-	0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x69, 0x74, 0x68,
-	0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x6f, 0x63, 0x5f, 0x74,
-	0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x49, 0x0a,
-	0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x6b, 0x20,
-	0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
-	0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72,
-	0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61,
-	0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, 0x6c, 0x69, 0x62, 0x72,
-	0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6d, 0x20, 0x03,
-	0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
-	0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74,
-	0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65,
-	0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f,
-	0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
-	0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6e, 0x20, 0x01, 0x28,
-	0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
-	0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72,
-	0x69, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
-	0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61,
-	0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62,
-	0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73,
-	0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d,
-	0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
-	0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e,
-	0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69,
-	0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06,
-	0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
-	0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
-	0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76,
-	0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74,
-	0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
-	0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49,
-	0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a,
-	0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
-	0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
-	0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70,
+	0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f,
+	0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f,
+	0x74, 0x6e, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+	0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52,
+	0x0e, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12,
+	0x3d, 0x0a, 0x0d, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+	0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+	0x52, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37,
+	0x0a, 0x0b, 0x67, 0x6f, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+	0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53,
+	0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c,
+	0x69, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+	0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
+	0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74,
+	0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74,
+	0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e,
+	0x65, 0x77, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01,
+	0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12,
+	0x2b, 0x0a, 0x11, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x5f, 0x75, 0x72, 0x69, 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75,
+	0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e,
+	0x61, 0x70, 0x69, 0x5f, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61,
+	0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62,
+	0x65, 0x6c, 0x18, 0x68, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+	0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e,
+	0x65, 0x72, 0x5f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18,
+	0x69, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72,
+	0x47, 0x69, 0x74, 0x68, 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64,
+	0x6f, 0x63, 0x5f, 0x74, 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20,
+	0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69,
+	0x78, 0x12, 0x49, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+	0x6e, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61,
+	0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c,
+	0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10,
+	0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+	0x18, 0x6d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+	0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72,
+	0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61,
+	0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f,
+	0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18,
+	0x6e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65,
+	0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69,
+	0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65,
+	0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x1d, 0x72, 0x65, 0x73, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f,
+	0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a,
+	0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12,
+	0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
+	0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72,
+	0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76,
+	0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18,
+	0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+	0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e,
+	0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65,
+	0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43,
+	0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d,
+	0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e,
+	0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63,
+	0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+	0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+	0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
+	0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43,
+	0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f,
+	0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61,
+	0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06,
+	0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74,
+	0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+	0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67,
+	0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
+	0x6e, 0x22, 0x87, 0x03, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74,
+	0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+	0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
+	0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+	0x12, 0x64, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c,
+	0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+	0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74,
+	0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65,
+	0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
+	0x52, 0x14, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65,
+	0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0xd2, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72,
+	0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12,
+	0x31, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f,
+	0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12,
+	0x72, 0x65, 0x73, 0x74, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c,
+	0x65, 0x64, 0x12, 0x45, 0x0a, 0x1f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x5f, 0x70,
+	0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x65, 0x6e,
+	0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x54, 0x79, 0x70,
+	0x65, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x1c, 0x75, 0x6e, 0x76,
+	0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65,
+	0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52,
+	0x1a, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x50, 0x61, 0x63, 0x6b,
+	0x61, 0x67, 0x65, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, 0x0c, 0x4e,
+	0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63,
+	0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c,
+	0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52,
+	0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e,
+	0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f,
+	0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61,
+	0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06,
+	0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65,
+	0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+	0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f,
+	0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e,
+	0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
+	0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+	0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65,
+	0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65,
+	0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
+	0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+	0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+	0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73,
+	0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67,
+	0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38,
+	0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
+	0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09,
+	0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63,
+	0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64,
+	0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
+	0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72,
+	0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a,
+	0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+	0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+	0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+	0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
+	0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65,
+	0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+	0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
+	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+	0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79,
 	0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d,
 	0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
 	0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67,
 	0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f,
-	0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65,
-	0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
-	0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61,
-	0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d,
-	0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
-	0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
-	0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
-	0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65,
-	0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae,
-	0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
-	0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
-	0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43,
-	0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74,
-	0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a,
-	0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
-	0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69,
-	0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69,
-	0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65,
-	0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e,
-	0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03,
-	0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
-	0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
-	0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
-	0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52,
-	0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f,
-	0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20,
-	0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f,
-	0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f,
-	0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65,
-	0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e,
-	0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12,
-	0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73,
-	0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52,
-	0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e,
-	0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
-	0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
-	0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
-	0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65,
-	0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e,
-	0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
-	0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22,
-	0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12,
-	0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
-	0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d,
-	0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69,
-	0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47,
-	0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d,
-	0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e,
-	0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63,
-	0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x8e, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
-	0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65,
-	0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65,
-	0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e,
-	0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65,
-	0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69,
-	0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x1a,
-	0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12,
-	0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f,
-	0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75,
-	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50,
-	0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c,
-	0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65,
-	0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c,
-	0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e,
-	0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03,
-	0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
-	0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a,
-	0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65,
-	0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61,
-	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54,
-	0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e,
-	0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61,
-	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c,
-	0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54,
-	0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
-	0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03,
-	0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10,
-	0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57,
-	0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05,
-	0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e,
-	0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18,
-	0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73,
-	0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45,
-	0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49,
-	0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49,
-	0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a,
-	0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41,
-	0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f,
-	0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68,
-	0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09,
-	0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73,
-	0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75,
-	0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f,
-	0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
-	0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, 0x0a, 0x0e, 0x63,
-	0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x43,
-	0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
-	0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
-	0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
-	0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2,
-	0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+	0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xe4, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69,
+	0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+	0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53,
+	0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12,
+	0x56, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
+	0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
+	0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+	0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53,
+	0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d,
+	0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+	0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
+	0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc2, 0x03, 0x0a, 0x0e,
+	0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a,
+	0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f,
+	0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+	0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65,
+	0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e,
+	0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75,
+	0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f,
+	0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03,
+	0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61,
+	0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f,
+	0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69,
+	0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18,
+	0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c,
+	0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79,
+	0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74,
+	0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f,
+	0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
+	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f,
+	0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c,
+	0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20,
+	0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10,
+	0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
+	0x22, 0x75, 0x0a, 0x18, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70,
+	0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07,
+	0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d,
+	0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x12, 0x3f, 0x0a, 0x1c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+	0x74, 0x65, 0x5f, 0x6f, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x73, 0x5f, 0x69, 0x6e,
+	0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x67, 0x65,
+	0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4f, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x41, 0x73, 0x49,
+	0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65,
+	0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f,
+	0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41,
+	0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
+	0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a,
+	0x03, 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53,
+	0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45,
+	0x57, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10,
+	0x05, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45,
+	0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a,
+	0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65,
+	0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49,
+	0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54,
+	0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
+	0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10,
+	0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e,
+	0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+	0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74,
+	0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28,
+	0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+	0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f,
+	0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61,
+	0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68,
+	0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+	0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
+	0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b,
+	0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f,
+	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65,
+	0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab,
+	0xfa, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69,
+	0x6f, 0x6e, 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+	0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74,
+	0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61,
+	0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61,
+	0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x33,
 }
 
 var (
@@ -1530,68 +1822,75 @@
 }
 
 var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
-var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
+var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 19)
 var file_google_api_client_proto_goTypes = []interface{}{
-	(ClientLibraryOrganization)(0),      // 0: google.api.ClientLibraryOrganization
-	(ClientLibraryDestination)(0),       // 1: google.api.ClientLibraryDestination
-	(*CommonLanguageSettings)(nil),      // 2: google.api.CommonLanguageSettings
-	(*ClientLibrarySettings)(nil),       // 3: google.api.ClientLibrarySettings
-	(*Publishing)(nil),                  // 4: google.api.Publishing
-	(*JavaSettings)(nil),                // 5: google.api.JavaSettings
-	(*CppSettings)(nil),                 // 6: google.api.CppSettings
-	(*PhpSettings)(nil),                 // 7: google.api.PhpSettings
-	(*PythonSettings)(nil),              // 8: google.api.PythonSettings
-	(*NodeSettings)(nil),                // 9: google.api.NodeSettings
-	(*DotnetSettings)(nil),              // 10: google.api.DotnetSettings
-	(*RubySettings)(nil),                // 11: google.api.RubySettings
-	(*GoSettings)(nil),                  // 12: google.api.GoSettings
-	(*MethodSettings)(nil),              // 13: google.api.MethodSettings
-	nil,                                 // 14: google.api.JavaSettings.ServiceClassNamesEntry
-	nil,                                 // 15: google.api.DotnetSettings.RenamedServicesEntry
-	nil,                                 // 16: google.api.DotnetSettings.RenamedResourcesEntry
-	(*MethodSettings_LongRunning)(nil),  // 17: google.api.MethodSettings.LongRunning
-	(api.LaunchStage)(0),                // 18: google.api.LaunchStage
-	(*durationpb.Duration)(nil),         // 19: google.protobuf.Duration
-	(*descriptorpb.MethodOptions)(nil),  // 20: google.protobuf.MethodOptions
-	(*descriptorpb.ServiceOptions)(nil), // 21: google.protobuf.ServiceOptions
+	(ClientLibraryOrganization)(0),              // 0: google.api.ClientLibraryOrganization
+	(ClientLibraryDestination)(0),               // 1: google.api.ClientLibraryDestination
+	(*CommonLanguageSettings)(nil),              // 2: google.api.CommonLanguageSettings
+	(*ClientLibrarySettings)(nil),               // 3: google.api.ClientLibrarySettings
+	(*Publishing)(nil),                          // 4: google.api.Publishing
+	(*JavaSettings)(nil),                        // 5: google.api.JavaSettings
+	(*CppSettings)(nil),                         // 6: google.api.CppSettings
+	(*PhpSettings)(nil),                         // 7: google.api.PhpSettings
+	(*PythonSettings)(nil),                      // 8: google.api.PythonSettings
+	(*NodeSettings)(nil),                        // 9: google.api.NodeSettings
+	(*DotnetSettings)(nil),                      // 10: google.api.DotnetSettings
+	(*RubySettings)(nil),                        // 11: google.api.RubySettings
+	(*GoSettings)(nil),                          // 12: google.api.GoSettings
+	(*MethodSettings)(nil),                      // 13: google.api.MethodSettings
+	(*SelectiveGapicGeneration)(nil),            // 14: google.api.SelectiveGapicGeneration
+	nil,                                         // 15: google.api.JavaSettings.ServiceClassNamesEntry
+	(*PythonSettings_ExperimentalFeatures)(nil), // 16: google.api.PythonSettings.ExperimentalFeatures
+	nil,                                 // 17: google.api.DotnetSettings.RenamedServicesEntry
+	nil,                                 // 18: google.api.DotnetSettings.RenamedResourcesEntry
+	nil,                                 // 19: google.api.GoSettings.RenamedServicesEntry
+	(*MethodSettings_LongRunning)(nil),  // 20: google.api.MethodSettings.LongRunning
+	(api.LaunchStage)(0),                // 21: google.api.LaunchStage
+	(*durationpb.Duration)(nil),         // 22: google.protobuf.Duration
+	(*descriptorpb.MethodOptions)(nil),  // 23: google.protobuf.MethodOptions
+	(*descriptorpb.ServiceOptions)(nil), // 24: google.protobuf.ServiceOptions
 }
 var file_google_api_client_proto_depIdxs = []int32{
 	1,  // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination
-	18, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage
-	5,  // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings
-	6,  // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings
-	7,  // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings
-	8,  // 5: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings
-	9,  // 6: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings
-	10, // 7: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings
-	11, // 8: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings
-	12, // 9: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings
-	13, // 10: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings
-	0,  // 11: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization
-	3,  // 12: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings
-	14, // 13: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry
-	2,  // 14: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings
-	2,  // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings
-	2,  // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings
-	2,  // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings
-	2,  // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings
-	2,  // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings
-	15, // 20: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry
-	16, // 21: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry
-	2,  // 22: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings
-	2,  // 23: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings
-	17, // 24: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning
-	19, // 25: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration
-	19, // 26: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration
-	19, // 27: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration
-	20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions
-	21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions
-	21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions
-	31, // [31:31] is the sub-list for method output_type
-	31, // [31:31] is the sub-list for method input_type
-	31, // [31:31] is the sub-list for extension type_name
-	28, // [28:31] is the sub-list for extension extendee
-	0,  // [0:28] is the sub-list for field type_name
+	14, // 1: google.api.CommonLanguageSettings.selective_gapic_generation:type_name -> google.api.SelectiveGapicGeneration
+	21, // 2: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage
+	5,  // 3: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings
+	6,  // 4: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings
+	7,  // 5: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings
+	8,  // 6: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings
+	9,  // 7: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings
+	10, // 8: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings
+	11, // 9: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings
+	12, // 10: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings
+	13, // 11: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings
+	0,  // 12: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization
+	3,  // 13: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings
+	15, // 14: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry
+	2,  // 15: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings
+	2,  // 16: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings
+	2,  // 17: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings
+	2,  // 18: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings
+	16, // 19: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures
+	2,  // 20: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings
+	2,  // 21: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings
+	17, // 22: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry
+	18, // 23: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry
+	2,  // 24: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings
+	2,  // 25: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings
+	19, // 26: google.api.GoSettings.renamed_services:type_name -> google.api.GoSettings.RenamedServicesEntry
+	20, // 27: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning
+	22, // 28: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration
+	22, // 29: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration
+	22, // 30: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration
+	23, // 31: google.api.method_signature:extendee -> google.protobuf.MethodOptions
+	24, // 32: google.api.default_host:extendee -> google.protobuf.ServiceOptions
+	24, // 33: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions
+	24, // 34: google.api.api_version:extendee -> google.protobuf.ServiceOptions
+	35, // [35:35] is the sub-list for method output_type
+	35, // [35:35] is the sub-list for method input_type
+	35, // [35:35] is the sub-list for extension type_name
+	31, // [31:35] is the sub-list for extension extendee
+	0,  // [0:31] is the sub-list for field type_name
 }
 
 func init() { file_google_api_client_proto_init() }
@@ -1744,7 +2043,31 @@
 				return nil
 			}
 		}
-		file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+		file_google_api_client_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*SelectiveGapicGeneration); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_client_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*PythonSettings_ExperimentalFeatures); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_client_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
 			switch v := v.(*MethodSettings_LongRunning); i {
 			case 0:
 				return &v.state
@@ -1763,8 +2086,8 @@
 			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
 			RawDescriptor: file_google_api_client_proto_rawDesc,
 			NumEnums:      2,
-			NumMessages:   16,
-			NumExtensions: 3,
+			NumMessages:   19,
+			NumExtensions: 4,
 			NumServices:   0,
 		},
 		GoTypes:           file_google_api_client_proto_goTypes,
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
index dbe2e2d..5d583b8 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // versions:
 // 	protoc-gen-go v1.26.0
-// 	protoc        v3.21.9
+// 	protoc        v4.24.4
 // source: google/api/field_behavior.proto
 
 package annotations
@@ -78,6 +78,19 @@
 	// a non-empty value will be returned. The user will not be aware of what
 	// non-empty value to expect.
 	FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7
+	// Denotes that the field in a resource (a message annotated with
+	// google.api.resource) is used in the resource name to uniquely identify the
+	// resource. For AIP-compliant APIs, this should only be applied to the
+	// `name` field on the resource.
+	//
+	// This behavior should not be applied to references to other resources within
+	// the message.
+	//
+	// The identifier field of resources often have different field behavior
+	// depending on the request it is embedded in (e.g. for Create methods name
+	// is optional and unused, while for Update methods it is required). Instead
+	// of method-specific annotations, only `IDENTIFIER` is required.
+	FieldBehavior_IDENTIFIER FieldBehavior = 8
 )
 
 // Enum value maps for FieldBehavior.
@@ -91,6 +104,7 @@
 		5: "IMMUTABLE",
 		6: "UNORDERED_LIST",
 		7: "NON_EMPTY_DEFAULT",
+		8: "IDENTIFIER",
 	}
 	FieldBehavior_value = map[string]int32{
 		"FIELD_BEHAVIOR_UNSPECIFIED": 0,
@@ -101,6 +115,7 @@
 		"IMMUTABLE":                  5,
 		"UNORDERED_LIST":             6,
 		"NON_EMPTY_DEFAULT":          7,
+		"IDENTIFIER":                 8,
 	}
 )
 
@@ -169,7 +184,7 @@
 	0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67,
 	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64,
 	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a,
-	0xa6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f,
+	0xb6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f,
 	0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56,
 	0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
 	0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12,
@@ -179,21 +194,22 @@
 	0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a,
 	0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10,
 	0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44,
-	0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c,
+	0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4e,
+	0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x10, 0x08, 0x3a, 0x64, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c,
 	0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f,
 	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
 	0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e,
 	0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69,
-	0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x52, 0x0d, 0x66, 0x69, 0x65,
-	0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70, 0x0a, 0x0e, 0x63, 0x6f,
-	0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x12, 0x46, 0x69,
-	0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
-	0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
-	0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
-	0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x33,
+	0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x02, 0x10, 0x00, 0x52,
+	0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70,
+	0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+	0x42, 0x12, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50,
+	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
+	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70,
+	0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e,
+	0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49,
+	0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
 }
 
 var (
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go
new file mode 100644
index 0000000..53e9dd1
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go
@@ -0,0 +1,392 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.26.0
+// 	protoc        v4.24.4
+// source: google/api/field_info.proto
+
+package annotations
+
+import (
+	reflect "reflect"
+	sync "sync"
+
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The standard format of a field value. The supported formats are all backed
+// by either an RFC defined by the IETF or a Google-defined AIP.
+type FieldInfo_Format int32
+
+const (
+	// Default, unspecified value.
+	FieldInfo_FORMAT_UNSPECIFIED FieldInfo_Format = 0
+	// Universally Unique Identifier, version 4, value as defined by
+	// https://datatracker.ietf.org/doc/html/rfc4122. The value may be
+	// normalized to entirely lowercase letters. For example, the value
+	// `F47AC10B-58CC-0372-8567-0E02B2C3D479` would be normalized to
+	// `f47ac10b-58cc-0372-8567-0e02b2c3d479`.
+	FieldInfo_UUID4 FieldInfo_Format = 1
+	// Internet Protocol v4 value as defined by [RFC
+	// 791](https://datatracker.ietf.org/doc/html/rfc791). The value may be
+	// condensed, with leading zeros in each octet stripped. For example,
+	// `001.022.233.040` would be condensed to `1.22.233.40`.
+	FieldInfo_IPV4 FieldInfo_Format = 2
+	// Internet Protocol v6 value as defined by [RFC
+	// 2460](https://datatracker.ietf.org/doc/html/rfc2460). The value may be
+	// normalized to entirely lowercase letters with zeros compressed, following
+	// [RFC 5952](https://datatracker.ietf.org/doc/html/rfc5952). For example,
+	// the value `2001:0DB8:0::0` would be normalized to `2001:db8::`.
+	FieldInfo_IPV6 FieldInfo_Format = 3
+	// An IP address in either v4 or v6 format as described by the individual
+	// values defined herein. See the comments on the IPV4 and IPV6 types for
+	// allowed normalizations of each.
+	FieldInfo_IPV4_OR_IPV6 FieldInfo_Format = 4
+)
+
+// Enum value maps for FieldInfo_Format.
+var (
+	FieldInfo_Format_name = map[int32]string{
+		0: "FORMAT_UNSPECIFIED",
+		1: "UUID4",
+		2: "IPV4",
+		3: "IPV6",
+		4: "IPV4_OR_IPV6",
+	}
+	FieldInfo_Format_value = map[string]int32{
+		"FORMAT_UNSPECIFIED": 0,
+		"UUID4":              1,
+		"IPV4":               2,
+		"IPV6":               3,
+		"IPV4_OR_IPV6":       4,
+	}
+)
+
+func (x FieldInfo_Format) Enum() *FieldInfo_Format {
+	p := new(FieldInfo_Format)
+	*p = x
+	return p
+}
+
+func (x FieldInfo_Format) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FieldInfo_Format) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_api_field_info_proto_enumTypes[0].Descriptor()
+}
+
+func (FieldInfo_Format) Type() protoreflect.EnumType {
+	return &file_google_api_field_info_proto_enumTypes[0]
+}
+
+func (x FieldInfo_Format) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use FieldInfo_Format.Descriptor instead.
+func (FieldInfo_Format) EnumDescriptor() ([]byte, []int) {
+	return file_google_api_field_info_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// Rich semantic information of an API field beyond basic typing.
+type FieldInfo struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The standard format of a field value. This does not explicitly configure
+	// any API consumer, just documents the API's format for the field it is
+	// applied to.
+	Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"`
+	// The type(s) that the annotated, generic field may represent.
+	//
+	// Currently, this must only be used on fields of type `google.protobuf.Any`.
+	// Supporting other generic types may be considered in the future.
+	ReferencedTypes []*TypeReference `protobuf:"bytes,2,rep,name=referenced_types,json=referencedTypes,proto3" json:"referenced_types,omitempty"`
+}
+
+func (x *FieldInfo) Reset() {
+	*x = FieldInfo{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_field_info_proto_msgTypes[0]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *FieldInfo) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldInfo) ProtoMessage() {}
+
+func (x *FieldInfo) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_field_info_proto_msgTypes[0]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldInfo.ProtoReflect.Descriptor instead.
+func (*FieldInfo) Descriptor() ([]byte, []int) {
+	return file_google_api_field_info_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *FieldInfo) GetFormat() FieldInfo_Format {
+	if x != nil {
+		return x.Format
+	}
+	return FieldInfo_FORMAT_UNSPECIFIED
+}
+
+func (x *FieldInfo) GetReferencedTypes() []*TypeReference {
+	if x != nil {
+		return x.ReferencedTypes
+	}
+	return nil
+}
+
+// A reference to a message type, for use in [FieldInfo][google.api.FieldInfo].
+type TypeReference struct {
+	state         protoimpl.MessageState
+	sizeCache     protoimpl.SizeCache
+	unknownFields protoimpl.UnknownFields
+
+	// The name of the type that the annotated, generic field may represent.
+	// If the type is in the same protobuf package, the value can be the simple
+	// message name e.g., `"MyMessage"`. Otherwise, the value must be the
+	// fully-qualified message name e.g., `"google.library.v1.Book"`.
+	//
+	// If the type(s) are unknown to the service (e.g. the field accepts generic
+	// user input), use the wildcard `"*"` to denote this behavior.
+	//
+	// See [AIP-202](https://google.aip.dev/202#type-references) for more details.
+	TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
+}
+
+func (x *TypeReference) Reset() {
+	*x = TypeReference{}
+	if protoimpl.UnsafeEnabled {
+		mi := &file_google_api_field_info_proto_msgTypes[1]
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		ms.StoreMessageInfo(mi)
+	}
+}
+
+func (x *TypeReference) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TypeReference) ProtoMessage() {}
+
+func (x *TypeReference) ProtoReflect() protoreflect.Message {
+	mi := &file_google_api_field_info_proto_msgTypes[1]
+	if protoimpl.UnsafeEnabled && x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use TypeReference.ProtoReflect.Descriptor instead.
+func (*TypeReference) Descriptor() ([]byte, []int) {
+	return file_google_api_field_info_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *TypeReference) GetTypeName() string {
+	if x != nil {
+		return x.TypeName
+	}
+	return ""
+}
+
+var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{
+	{
+		ExtendedType:  (*descriptorpb.FieldOptions)(nil),
+		ExtensionType: (*FieldInfo)(nil),
+		Field:         291403980,
+		Name:          "google.api.field_info",
+		Tag:           "bytes,291403980,opt,name=field_info",
+		Filename:      "google/api/field_info.proto",
+	},
+}
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+	// Rich semantic descriptor of an API field beyond the basic typing.
+	//
+	// Examples:
+	//
+	//	string request_id = 1 [(google.api.field_info).format = UUID4];
+	//	string old_ip_address = 2 [(google.api.field_info).format = IPV4];
+	//	string new_ip_address = 3 [(google.api.field_info).format = IPV6];
+	//	string actual_ip_address = 4 [
+	//	  (google.api.field_info).format = IPV4_OR_IPV6
+	//	];
+	//	google.protobuf.Any generic_field = 5 [
+	//	  (google.api.field_info).referenced_types = {type_name: "ActualType"},
+	//	  (google.api.field_info).referenced_types = {type_name: "OtherType"},
+	//	];
+	//	google.protobuf.Any generic_user_input = 5 [
+	//	  (google.api.field_info).referenced_types = {type_name: "*"},
+	//	];
+	//
+	// optional google.api.FieldInfo field_info = 291403980;
+	E_FieldInfo = &file_google_api_field_info_proto_extTypes[0]
+)
+
+var File_google_api_field_info_proto protoreflect.FileDescriptor
+
+var file_google_api_field_info_proto_rawDesc = []byte{
+	0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65,
+	0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+	0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
+	0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x01, 0x0a, 0x09,
+	0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72,
+	0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f,
+	0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12,
+	0x44, 0x0a, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+	0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72,
+	0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64,
+	0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12,
+	0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
+	0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34,
+	0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04,
+	0x49, 0x50, 0x56, 0x36, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f,
+	0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x04, 0x22, 0x2c, 0x0a, 0x0d, 0x54, 0x79, 0x70, 0x65,
+	0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70,
+	0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79,
+	0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
+	0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
+	0x6f, 0x6e, 0x73, 0x18, 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+	0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42,
+	0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+	0x69, 0x42, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74,
+	0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61,
+	0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
+	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61,
+	0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70,
+	0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+	file_google_api_field_info_proto_rawDescOnce sync.Once
+	file_google_api_field_info_proto_rawDescData = file_google_api_field_info_proto_rawDesc
+)
+
+func file_google_api_field_info_proto_rawDescGZIP() []byte {
+	file_google_api_field_info_proto_rawDescOnce.Do(func() {
+		file_google_api_field_info_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_info_proto_rawDescData)
+	})
+	return file_google_api_field_info_proto_rawDescData
+}
+
+var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_google_api_field_info_proto_goTypes = []interface{}{
+	(FieldInfo_Format)(0),             // 0: google.api.FieldInfo.Format
+	(*FieldInfo)(nil),                 // 1: google.api.FieldInfo
+	(*TypeReference)(nil),             // 2: google.api.TypeReference
+	(*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions
+}
+var file_google_api_field_info_proto_depIdxs = []int32{
+	0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format
+	2, // 1: google.api.FieldInfo.referenced_types:type_name -> google.api.TypeReference
+	3, // 2: google.api.field_info:extendee -> google.protobuf.FieldOptions
+	1, // 3: google.api.field_info:type_name -> google.api.FieldInfo
+	4, // [4:4] is the sub-list for method output_type
+	4, // [4:4] is the sub-list for method input_type
+	3, // [3:4] is the sub-list for extension type_name
+	2, // [2:3] is the sub-list for extension extendee
+	0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_google_api_field_info_proto_init() }
+func file_google_api_field_info_proto_init() {
+	if File_google_api_field_info_proto != nil {
+		return
+	}
+	if !protoimpl.UnsafeEnabled {
+		file_google_api_field_info_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*FieldInfo); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+		file_google_api_field_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+			switch v := v.(*TypeReference); i {
+			case 0:
+				return &v.state
+			case 1:
+				return &v.sizeCache
+			case 2:
+				return &v.unknownFields
+			default:
+				return nil
+			}
+		}
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_google_api_field_info_proto_rawDesc,
+			NumEnums:      1,
+			NumMessages:   2,
+			NumExtensions: 1,
+			NumServices:   0,
+		},
+		GoTypes:           file_google_api_field_info_proto_goTypes,
+		DependencyIndexes: file_google_api_field_info_proto_depIdxs,
+		EnumInfos:         file_google_api_field_info_proto_enumTypes,
+		MessageInfos:      file_google_api_field_info_proto_msgTypes,
+		ExtensionInfos:    file_google_api_field_info_proto_extTypes,
+	}.Build()
+	File_google_api_field_info_proto = out.File
+	file_google_api_field_info_proto_rawDesc = nil
+	file_google_api_field_info_proto_goTypes = nil
+	file_google_api_field_info_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
index 8a0e1c3..d30fcee 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // versions:
 // 	protoc-gen-go v1.26.0
-// 	protoc        v3.21.9
+// 	protoc        v4.24.4
 // source: google/api/http.proto
 
 package annotations
@@ -102,7 +102,7 @@
 	return false
 }
 
-// # gRPC Transcoding
+// gRPC Transcoding
 //
 // gRPC Transcoding is a feature for mapping between a gRPC method and one or
 // more HTTP REST endpoints. It allows developers to build a single API service
@@ -143,9 +143,8 @@
 //
 // This enables an HTTP REST to gRPC mapping as below:
 //
-// HTTP | gRPC
-// -----|-----
-// `GET /v1/messages/123456`  | `GetMessage(name: "messages/123456")`
+// - HTTP: `GET /v1/messages/123456`
+// - gRPC: `GetMessage(name: "messages/123456")`
 //
 // Any fields in the request message which are not bound by the path template
 // automatically become HTTP query parameters if there is no HTTP request body.
@@ -169,11 +168,9 @@
 //
 // This enables a HTTP JSON to RPC mapping as below:
 //
-// HTTP | gRPC
-// -----|-----
-// `GET /v1/messages/123456?revision=2&sub.subfield=foo` |
-// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield:
-// "foo"))`
+// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo`
+// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub:
+// SubMessage(subfield: "foo"))`
 //
 // Note that fields which are mapped to URL query parameters must have a
 // primitive type or a repeated primitive type or a non-repeated message type.
@@ -203,10 +200,8 @@
 // representation of the JSON in the request body is determined by
 // protos JSON encoding:
 //
-// HTTP | gRPC
-// -----|-----
-// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id:
-// "123456" message { text: "Hi!" })`
+// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }`
+// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })`
 //
 // The special name `*` can be used in the body mapping to define that
 // every field not bound by the path template should be mapped to the
@@ -228,10 +223,8 @@
 //
 // The following HTTP JSON to RPC mapping is enabled:
 //
-// HTTP | gRPC
-// -----|-----
-// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id:
-// "123456" text: "Hi!")`
+// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }`
+// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")`
 //
 // Note that when using `*` in the body mapping, it is not possible to
 // have HTTP parameters, as all fields not bound by the path end in
@@ -259,13 +252,13 @@
 //
 // This enables the following two alternative HTTP JSON to RPC mappings:
 //
-// HTTP | gRPC
-// -----|-----
-// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
-// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id:
-// "123456")`
+// - HTTP: `GET /v1/messages/123456`
+// - gRPC: `GetMessage(message_id: "123456")`
 //
-// ## Rules for HTTP mapping
+// - HTTP: `GET /v1/users/me/messages/123456`
+// - gRPC: `GetMessage(user_id: "me" message_id: "123456")`
+//
+// # Rules for HTTP mapping
 //
 //  1. Leaf request fields (recursive expansion nested messages in the request
 //     message) are classified into three categories:
@@ -284,7 +277,7 @@
 //     request body, all
 //     fields are passed via URL path and URL query parameters.
 //
-// ### Path template syntax
+// Path template syntax
 //
 //	Template = "/" Segments [ Verb ] ;
 //	Segments = Segment { "/" Segment } ;
@@ -323,7 +316,7 @@
 // Document](https://developers.google.com/discovery/v1/reference/apis) as
 // `{+var}`.
 //
-// ## Using gRPC API Service Configuration
+// # Using gRPC API Service Configuration
 //
 // gRPC API Service Configuration (service config) is a configuration language
 // for configuring a gRPC service to become a user-facing product. The
@@ -338,15 +331,14 @@
 // specified in the service config will override any matching transcoding
 // configuration in the proto.
 //
-// Example:
+// The following example selects a gRPC method and applies an `HttpRule` to it:
 //
 //	http:
 //	  rules:
-//	    # Selects a gRPC method and applies HttpRule to it.
 //	    - selector: example.v1.Messaging.GetMessage
 //	      get: /v1/messages/{message_id}/{sub.subfield}
 //
-// ## Special notes
+// # Special notes
 //
 // When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the
 // proto to JSON conversion must follow the [proto3
@@ -671,14 +663,14 @@
 	0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e,
 	0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
 	0x6b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01,
-	0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e,
+	0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x67, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e,
 	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x09, 0x48, 0x74, 0x74, 0x70,
 	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
 	0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72,
 	0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61,
 	0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61,
-	0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04,
-	0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+	0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50,
+	0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
 }
 
 var (
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go
index bbcc12d..175974a 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // versions:
 // 	protoc-gen-go v1.26.0
-// 	protoc        v3.21.9
+// 	protoc        v4.24.4
 // source: google/api/resource.proto
 
 package annotations
@@ -253,8 +253,13 @@
 	History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"`
 	// The plural name used in the resource name and permission names, such as
 	// 'projects' for the resource name of 'projects/{project}' and the permission
-	// name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same
-	// concept of the `plural` field in k8s CRD spec
+	// name of 'cloudresourcemanager.googleapis.com/projects.get'. One exception
+	// to this is for Nested Collections that have stuttering names, as defined
+	// in [AIP-122](https://google.aip.dev/122#nested-collections), where the
+	// collection ID in the resource name pattern does not necessarily directly
+	// match the `plural` value.
+	//
+	// It is the same concept of the `plural` field in k8s CRD spec
 	// https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
 	//
 	// Note: The plural form is required even for singleton resources. See
@@ -551,15 +556,14 @@
 	0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9d, 0x08, 0x20, 0x01, 0x28, 0x0b,
 	0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65,
 	0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
-	0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f,
+	0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x6b, 0x0a, 0x0e, 0x63, 0x6f,
 	0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x52, 0x65,
 	0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67,
 	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67,
 	0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
 	0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
 	0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
-	0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x33,
+	0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
 }
 
 var (
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go
index 9a9ae04..b8c4aa7 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // versions:
 // 	protoc-gen-go v1.26.0
-// 	protoc        v3.21.9
+// 	protoc        v4.24.4
 // source: google/api/routing.proto
 
 package annotations
@@ -69,7 +69,7 @@
 // The routing header consists of one or multiple key-value pairs. Every key
 // and value must be percent-encoded, and joined together in the format of
 // `key1=value1&key2=value2`.
-// In the examples below I am skipping the percent-encoding for readablity.
+// The examples below skip the percent-encoding for readability.
 //
 // # Example 1
 //
diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go
index 4549486..a69c1d4 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // versions:
 // 	protoc-gen-go v1.26.0
-// 	protoc        v3.21.9
+// 	protoc        v4.24.4
 // source: google/api/launch_stage.proto
 
 package api
diff --git a/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go b/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go
deleted file mode 100644
index 1d3f1b5..0000000
--- a/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This file, and the {{.RootMod}} import, won't actually become part of
-// the resultant binary.
-//go:build modhack
-// +build modhack
-
-package api
-
-// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
-import _ "google.golang.org/genproto/internal"
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
index a6b5081..06a3f71 100644
--- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2022 Google LLC
+// Copyright 2025 Google LLC
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
 // Code generated by protoc-gen-go. DO NOT EDIT.
 // versions:
 // 	protoc-gen-go v1.26.0
-// 	protoc        v3.21.9
+// 	protoc        v4.24.4
 // source: google/rpc/status.proto
 
 package status
diff --git a/vendor/google.golang.org/genproto/internal/doc.go b/vendor/google.golang.org/genproto/internal/doc.go
deleted file mode 100644
index 90e89b4..0000000
--- a/vendor/google.golang.org/genproto/internal/doc.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//      http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This file makes internal an importable go package
-// for use with backreferences from submodules.
-package internal
diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml
deleted file mode 100644
index 87f40b8..0000000
--- a/vendor/google.golang.org/grpc/.travis.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-language: go
-
-matrix:
-  include:
-  - go: 1.13.x
-    env: VET=1 GO111MODULE=on
-  - go: 1.13.x
-    env: RACE=1 GO111MODULE=on
-  - go: 1.13.x
-    env: RUN386=1
-  - go: 1.13.x
-    env: GRPC_GO_RETRY=on
-  - go: 1.13.x
-    env: TESTEXAMPLES=1
-  - go: 1.12.x
-    env: GO111MODULE=on
-  - go: 1.11.x
-    env: GO111MODULE=on
-  - go: 1.9.x
-    env: GAE=1
-
-go_import_path: google.golang.org/grpc
-
-before_install:
-  - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi
-  - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi
-  - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi
-  - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi
-
-install:
-  - try3() { eval "$*" || eval "$*" || eval "$*"; }
-  - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi'
-  - if [[ -n "${GAE}" ]]; then source ./install_gae.sh; make testappenginedeps; fi
-  - if [[ -n "${VET}" ]]; then ./vet.sh -install; fi
-
-script:
-  - set -e
-  - if [[ -n "${TESTEXAMPLES}" ]]; then examples/examples_test.sh; exit 0; fi
-  - if [[ -n "${VET}" ]]; then ./vet.sh; fi
-  - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi
-  - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi
-  - make test
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
index 4f1567e..2079de7 100644
--- a/vendor/google.golang.org/grpc/CONTRIBUTING.md
+++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -1,62 +1,159 @@
 # How to contribute
 
-We definitely welcome your patches and contributions to gRPC! Please read the gRPC
-organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md)
-and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding.
+We welcome your patches and contributions to gRPC! Please read the gRPC
+organization's [governance
+rules](https://github.com/grpc/grpc-community/blob/master/governance.md) before
+proceeding.
 
-If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
+If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
 
 ## Legal requirements
 
 In order to protect both you and ourselves, you will need to sign the
-[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf).
+[Contributor License
+Agreement](https://identity.linuxfoundation.org/projects/cncf). When you create
+your first PR, a link will be added as a comment that contains the steps needed
+to complete this process.
+
+## Getting Started
+
+A great way to start is by searching through our open issues. [Unassigned issues
+labeled as "help
+wanted"](https://github.com/grpc/grpc-go/issues?q=sort%3Aupdated-desc%20is%3Aissue%20is%3Aopen%20label%3A%22Status%3A%20Help%20Wanted%22%20no%3Aassignee)
+are especially nice for first-time contributors, as they should be well-defined
+problems that already have agreed-upon solutions.
+
+## Code Style
+
+We follow [Google's published Go style
+guide](https://google.github.io/styleguide/go/). Note that there are three
+primary documents that make up this style guide; please follow them as closely
+as possible. If a reviewer recommends something that contradicts those
+guidelines, there may be valid reasons to do so, but it should be rare.
 
 ## Guidelines for Pull Requests
-How to get your contributions merged smoothly and quickly.
+
+Please read the following carefully to ensure your contributions can be merged
+smoothly and quickly.
+
+### PR Contents
 
 - Create **small PRs** that are narrowly focused on **addressing a single
-  concern**. We often times receive PRs that are trying to fix several things at
-  a time, but only one fix is considered acceptable, nothing gets merged and
-  both author's & review's time is wasted. Create more PRs to address different
-  concerns and everyone will be happy.
+  concern**. We often receive PRs that attempt to fix several things at the same
+  time, and if one part of the PR has a problem, that will hold up the entire
+  PR.
 
-- The grpc package should only depend on standard Go packages and a small number
-  of exceptions. If your contribution introduces new dependencies which are NOT
-  in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a
-  discussion with gRPC-Go authors and consultants.
+- If your change does not address an **open issue** with an **agreed
+  resolution**, consider opening an issue and discussing it first. If you are
+  suggesting a behavioral or API change, consider starting with a [gRFC
+  proposal](https://github.com/grpc/proposal). Many new features that are not
+  bug fixes will require cross-language agreement.
 
-- For speculative changes, consider opening an issue and discussing it first. If
-  you are suggesting a behavioral or API change, consider starting with a [gRFC
-  proposal](https://github.com/grpc/proposal).
+- If you want to fix **formatting or style**, consider whether your changes are
+  an obvious improvement or might be considered a personal preference. If a
+  style change is based on preference, it likely will not be accepted. If it
+  corrects widely agreed-upon anti-patterns, then please do create a PR and
+  explain the benefits of the change.
 
-- Provide a good **PR description** as a record of **what** change is being made
-  and **why** it was made. Link to a github issue if it exists.
-
-- Don't fix code style and formatting unless you are already changing that line
-  to address an issue. PRs with irrelevant changes won't be merged. If you do
-  want to fix formatting or style, do that in a separate PR.
-
-- Unless your PR is trivial, you should expect there will be reviewer comments
-  that you'll need to address before merging. We expect you to be reasonably
-  responsive to those comments, otherwise the PR will be closed after 2-3 weeks
-  of inactivity.
-
-- Maintain **clean commit history** and use **meaningful commit messages**. PRs
-  with messy commit history are difficult to review and won't be merged. Use
-  `rebase -i upstream/master` to curate your commit history and/or to bring in
-  latest changes from master (but avoid rebasing in the middle of a code
-  review).
-
-- Keep your PR up to date with upstream/master (if there are merge conflicts, we
-  can't really merge your change).
+- For correcting **misspellings**, please be aware that we use some terms that
+  are sometimes flagged by spell checkers. As an example, "if an only if" is
+  often written as "iff". Please do not make spelling correction changes unless
+  you are certain they are misspellings.
 
 - **All tests need to be passing** before your change can be merged. We
-  recommend you **run tests locally** before creating your PR to catch breakages
-  early on.
-  - `make all` to test everything, OR
-  - `make vet` to catch vet errors
-  - `make test` to run the tests
-  - `make testrace` to run tests in race mode
-  - optional `make testappengine` to run tests with appengine
+  recommend you run tests locally before creating your PR to catch breakages
+  early on:
 
-- Exceptions to the rules can be made if there's a compelling reason for doing so.
+  - `./scripts/vet.sh` to catch vet errors.
+  - `go test -cpu 1,4 -timeout 7m ./...` to run the tests.
+  - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode.
+
+  Note that we have a multi-module repo, so `go test` commands may need to be
+  run from the root of each module in order to cause all tests to run.
+
+  *Alternatively*, you may find it easier to push your changes to your fork on
+  GitHub, which will trigger a GitHub Actions run that you can use to verify
+  everything is passing.
+
+- Note that there are two GitHub actions checks that need not be green:
+
+  1. We test the freshness of the generated proto code we maintain via the
+     `vet-proto` check. If the source proto files are updated, but our repo is
+     not updated, an optional checker will fail. This will be fixed by our team
+     in a separate PR and will not prevent the merge of your PR.
+
+  2. We run a checker that will fail if there is any change in dependencies of
+     an exported package via the `dependencies` check. If new dependencies are
+     added that are not appropriate, we may not accept your PR (see below).
+
+- If you are adding a **new file**, make sure it has the **copyright message**
+  template at the top as a comment. You can copy the message from an existing
+  file and update the year.
+
+- The grpc package should only depend on standard Go packages and a small number
+  of exceptions. **If your contribution introduces new dependencies**, you will
+  need a discussion with gRPC-Go maintainers.
+
+### PR Descriptions
+
+- **PR titles** should start with the name of the component being addressed, or
+  the type of change. Examples: transport, client, server, round_robin, xds,
+  cleanup, deps.
+
+- Read and follow the **guidelines for PR titles and descriptions** here:
+  https://google.github.io/eng-practices/review/developer/cl-descriptions.html
+
+  *particularly* the sections "First Line" and "Body is Informative".
+
+  Note: your PR description will be used as the git commit message in a
+  squash-and-merge if your PR is approved. We may make changes to this as
+  necessary.
+
+- **Does this PR relate to an open issue?** On the first line, please use the
+  tag `Fixes #<issue>` to ensure the issue is closed when the PR is merged. Or
+  use `Updates #<issue>` if the PR is related to an open issue, but does not fix
+  it. Consider filing an issue if one does not already exist.
+
+- PR descriptions *must* conclude with **release notes** as follows:
+
+  ```
+  RELEASE NOTES:
+  * <component>: <summary>
+  ```
+
+  This need not match the PR title.
+
+  The summary must:
+
+  * be something that gRPC users will understand.
+
+  * clearly explain the feature being added, the issue being fixed, or the
+    behavior being changed, etc. If fixing a bug, be clear about how the bug
+    can be triggered by an end-user.
+
+  * begin with a capital letter and use complete sentences.
+
+  * be as short as possible to describe the change being made.
+
+  If a PR is *not* end-user visible -- e.g. a cleanup, testing change, or
+  GitHub-related, use `RELEASE NOTES: n/a`.
+
+### PR Process
+
+- Please **self-review** your code changes before sending your PR. This will
+  prevent simple, obvious errors from causing delays.
+
+- Maintain a **clean commit history** and use **meaningful commit messages**.
+  PRs with messy commit histories are difficult to review and won't be merged.
+  Before sending your PR, ensure your changes are based on top of the latest
+  `upstream/master` commits, and avoid rebasing in the middle of a code review.
+  You should **never use `git push -f`** unless absolutely necessary during a
+  review, as it can interfere with GitHub's tracking of comments.
+
+- Unless your PR is trivial, you should **expect reviewer comments** that you
+  will need to address before merging. We'll label the PR as `Status: Requires
+  Reporter Clarification` if we expect you to respond to these comments in a
+  timely manner. If the PR remains inactive for 6 days, it will be marked as
+  `stale`, and we will automatically close it after 7 days if we don't hear back
+  from you. Please feel free to ping issues or bugs if you do not get a response
+  within a week.
diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md
index 093c82b..df35bb9 100644
--- a/vendor/google.golang.org/grpc/MAINTAINERS.md
+++ b/vendor/google.golang.org/grpc/MAINTAINERS.md
@@ -8,20 +8,29 @@
 for general contribution guidelines.
 
 ## Maintainers (in alphabetical order)
-- [canguler](https://github.com/canguler), Google LLC
-- [cesarghali](https://github.com/cesarghali), Google LLC
+
+- [arjan-bal](https://github.com/arjan-bal), Google LLC
+- [arvindbr8](https://github.com/arvindbr8), Google LLC
+- [atollena](https://github.com/atollena), Datadog, Inc.
 - [dfawley](https://github.com/dfawley), Google LLC
 - [easwars](https://github.com/easwars), Google LLC
-- [jadekler](https://github.com/jadekler), Google LLC
-- [menghanl](https://github.com/menghanl), Google LLC
-- [srini100](https://github.com/srini100), Google LLC
+- [gtcooke94](https://github.com/gtcooke94), Google LLC
 
 ## Emeritus Maintainers (in alphabetical order)
-- [adelez](https://github.com/adelez), Google LLC
-- [iamqizhao](https://github.com/iamqizhao), Google LLC
-- [jtattermusch](https://github.com/jtattermusch), Google LLC
-- [lyuxuan](https://github.com/lyuxuan), Google LLC
-- [makmukhi](https://github.com/makmukhi), Google LLC
-- [matt-kwong](https://github.com/matt-kwong), Google LLC
-- [nicolasnoble](https://github.com/nicolasnoble), Google LLC
-- [yongni](https://github.com/yongni), Google LLC
+- [adelez](https://github.com/adelez)
+- [aranjans](https://github.com/aranjans)
+- [canguler](https://github.com/canguler)
+- [cesarghali](https://github.com/cesarghali)
+- [erm-g](https://github.com/erm-g)
+- [iamqizhao](https://github.com/iamqizhao)
+- [jeanbza](https://github.com/jeanbza)
+- [jtattermusch](https://github.com/jtattermusch)
+- [lyuxuan](https://github.com/lyuxuan)
+- [makmukhi](https://github.com/makmukhi)
+- [matt-kwong](https://github.com/matt-kwong)
+- [menghanl](https://github.com/menghanl)
+- [nicolasnoble](https://github.com/nicolasnoble)
+- [purnesh42h](https://github.com/purnesh42h)
+- [srini100](https://github.com/srini100)
+- [yongni](https://github.com/yongni)
+- [zasweq](https://github.com/zasweq)
diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile
index db982aa..be38384 100644
--- a/vendor/google.golang.org/grpc/Makefile
+++ b/vendor/google.golang.org/grpc/Makefile
@@ -1,13 +1,13 @@
 all: vet test testrace
 
-build: deps
+build:
 	go build google.golang.org/grpc/...
 
 clean:
 	go clean -i google.golang.org/grpc/...
 
 deps:
-	go get -d -v google.golang.org/grpc/...
+	GO111MODULE=on go get -d -v google.golang.org/grpc/...
 
 proto:
 	@ if ! which protoc > /dev/null; then \
@@ -16,32 +16,24 @@
 	fi
 	go generate google.golang.org/grpc/...
 
-test: testdeps
+test:
 	go test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
 
-testappengine: testappenginedeps
-	goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
+testsubmodule:
+	cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/...
+	cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/...
 
-testappenginedeps:
-	goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/...
-
-testdeps:
-	go get -d -v -t google.golang.org/grpc/...
-
-testrace: testdeps
+testrace:
 	go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/...
 
-updatedeps:
-	go get -d -v -u -f google.golang.org/grpc/...
-
-updatetestdeps:
-	go get -d -v -t -u -f google.golang.org/grpc/...
+testdeps:
+	GO111MODULE=on go get -d -v -t google.golang.org/grpc/...
 
 vet: vetdeps
-	./vet.sh
+	./scripts/vet.sh
 
 vetdeps:
-	./vet.sh -install
+	./scripts/vet.sh -install
 
 .PHONY: \
 	all \
@@ -50,11 +42,8 @@
 	deps \
 	proto \
 	test \
-	testappengine \
-	testappenginedeps \
-	testdeps \
+	testsubmodule \
 	testrace \
-	updatedeps \
-	updatetestdeps \
+	testdeps \
 	vet \
 	vetdeps
diff --git a/vendor/google.golang.org/grpc/NOTICE.txt b/vendor/google.golang.org/grpc/NOTICE.txt
new file mode 100644
index 0000000..5301977
--- /dev/null
+++ b/vendor/google.golang.org/grpc/NOTICE.txt
@@ -0,0 +1,13 @@
+Copyright 2014 gRPC authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
index afbc43d..f9a88d5 100644
--- a/vendor/google.golang.org/grpc/README.md
+++ b/vendor/google.golang.org/grpc/README.md
@@ -1,64 +1,47 @@
 # gRPC-Go
 
-[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go)
-[![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc)
+[![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API]
 [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go)
+[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go)
 
-The Go implementation of [gRPC](https://grpc.io/): A high performance, open
-source, general RPC framework that puts mobile and HTTP/2 first. For more
-information see the [gRPC Quick Start:
-Go](https://grpc.io/docs/quickstart/go.html) guide.
+The [Go][] implementation of [gRPC][]: A high performance, open source, general
+RPC framework that puts mobile and HTTP/2 first. For more information see the
+[Go gRPC docs][], or jump directly into the [quick start][].
 
-Installation
-------------
+## Prerequisites
 
-To install this package, you need to install Go and setup your Go workspace on
-your computer. The simplest way to install the library is to run:
+- **[Go][]**: any one of the **two latest major** [releases][go-releases].
 
-```
-$ go get -u google.golang.org/grpc
+## Installation
+
+Simply add the following import to your code, and then `go [build|run|test]`
+will automatically fetch the necessary dependencies:
+
+
+```go
+import "google.golang.org/grpc"
 ```
 
-With Go module support (Go 1.11+), simply `import "google.golang.org/grpc"` in
-your source code and `go [build|run|test]` will automatically download the
-necessary dependencies ([Go modules
-ref](https://github.com/golang/go/wiki/Modules)).
+> **Note:** If you are trying to access `grpc-go` from **China**, see the
+> [FAQ](#FAQ) below.
 
-If you are trying to access grpc-go from within China, please see the
-[FAQ](#FAQ) below.
+## Learn more
 
-Prerequisites
--------------
-gRPC-Go requires Go 1.9 or later.
+- [Go gRPC docs][], which include a [quick start][] and [API
+  reference][API] among other resources
+- [Low-level technical docs](Documentation) from this repository
+- [Performance benchmark][]
+- [Examples](examples)
+- [Contribution guidelines](CONTRIBUTING.md)
 
-Documentation
--------------
-- See [godoc](https://godoc.org/google.golang.org/grpc) for package and API
-  descriptions.
-- Documentation on specific topics can be found in the [Documentation
-  directory](Documentation/).
-- Examples can be found in the [examples directory](examples/).
+## FAQ
 
-Performance
------------
-Performance benchmark data for grpc-go and other languages is maintained in
-[this
-dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696).
+### I/O Timeout Errors
 
-Status
-------
-General Availability [Google Cloud Platform Launch
-Stages](https://cloud.google.com/terms/launch-stages).
-
-FAQ
----
-
-#### I/O Timeout Errors
-
-The `golang.org` domain may be blocked from some countries.  `go get` usually
+The `golang.org` domain may be blocked from some countries. `go get` usually
 produces an error like the following when this happens:
 
-```
+```console
 $ go get -u google.golang.org/grpc
 package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout)
 ```
@@ -67,19 +50,10 @@
 
 - Set up a VPN and access google.golang.org through that.
 
-- Without Go module support: `git clone` the repo manually:
-
-  ```
-  git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc
-  ```
-
-  You will need to do the same for all of grpc's dependencies in `golang.org`,
-  e.g. `golang.org/x/net`.
-
 - With Go module support: it is possible to use the `replace` feature of `go
   mod` to create aliases for golang.org packages.  In your project's directory:
 
-  ```
+  ```sh
   go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest
   go mod tidy
   go mod vendor
@@ -87,35 +61,48 @@
   ```
 
   Again, this will need to be done for all transitive dependencies hosted on
-  golang.org as well.  Please refer to [this
-  issue](https://github.com/golang/go/issues/28652) in the golang repo regarding
-  this concern.
+  golang.org as well. For details, refer to [golang/go issue
+  #28652](https://github.com/golang/go/issues/28652).
 
-#### Compiling error, undefined: grpc.SupportPackageIsVersion
+### Compiling error, undefined: grpc.SupportPackageIsVersion
 
-Please update proto package, gRPC package and rebuild the proto files:
- - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`
- - `go get -u google.golang.org/grpc`
- - `protoc --go_out=plugins=grpc:. *.proto`
+Please update to the latest version of gRPC-Go using
+`go get google.golang.org/grpc`.
 
-#### How to turn on logging
+### How to turn on logging
 
-The default logger is controlled by the environment variables. Turn everything
-on by setting:
+The default logger is controlled by environment variables. Turn everything on
+like this:
 
-```
-GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info
+```console
+$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99
+$ export GRPC_GO_LOG_SEVERITY_LEVEL=info
 ```
 
-#### The RPC failed with error `"code = Unavailable desc = transport is closing"`
+### The RPC failed with error `"code = Unavailable desc = transport is closing"`
 
 This error means the connection the RPC is using was closed, and there are many
 possible reasons, including:
  1. mis-configured transport credentials, connection failed on handshaking
  1. bytes disrupted, possibly by a proxy in between
  1. server shutdown
+ 1. Keepalive parameters caused connection shutdown, for example if you have
+    configured your server to terminate connections regularly to [trigger DNS
+    lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779).
+    If this is the case, you may want to increase your
+    [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters),
+    to allow longer RPC calls to finish.
 
 It can be tricky to debug this because the error happens on the client side but
 the root cause of the connection being closed is on the server side. Turn on
 logging on __both client and server__, and see if there are any transport
 errors.
+
+[API]: https://pkg.go.dev/google.golang.org/grpc
+[Go]: https://golang.org
+[Go module]: https://github.com/golang/go/wiki/Modules
+[gRPC]: https://grpc.io
+[Go gRPC docs]: https://grpc.io/docs/languages/go
+[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608
+[quick start]: https://grpc.io/docs/languages/go/quickstart
+[go-releases]: https://golang.org/doc/devel/release.html
diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md
new file mode 100644
index 0000000..abab279
--- /dev/null
+++ b/vendor/google.golang.org/grpc/SECURITY.md
@@ -0,0 +1,3 @@
+# Security Policy
+
+For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go
new file mode 100644
index 0000000..52d530d
--- /dev/null
+++ b/vendor/google.golang.org/grpc/attributes/attributes.go
@@ -0,0 +1,141 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package attributes defines a generic key/value store used in various gRPC
+// components.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package attributes
+
+import (
+	"fmt"
+	"strings"
+)
+
+// Attributes is an immutable struct for storing and retrieving generic
+// key/value pairs.  Keys must be hashable, and users should define their own
+// types for keys.  Values should not be modified after they are added to an
+// Attributes or if they were received from one.  If values implement 'Equal(o
+// any) bool', it will be called by (*Attributes).Equal to determine whether
+// two values with the same key should be considered equal.
+type Attributes struct {
+	m map[any]any
+}
+
+// New returns a new Attributes containing the key/value pair.
+func New(key, value any) *Attributes {
+	return &Attributes{m: map[any]any{key: value}}
+}
+
+// WithValue returns a new Attributes containing the previous keys and values
+// and the new key/value pair.  If the same key appears multiple times, the
+// last value overwrites all previous values for that key.  To remove an
+// existing key, use a nil value.  value should not be modified later.
+func (a *Attributes) WithValue(key, value any) *Attributes {
+	if a == nil {
+		return New(key, value)
+	}
+	n := &Attributes{m: make(map[any]any, len(a.m)+1)}
+	for k, v := range a.m {
+		n.m[k] = v
+	}
+	n.m[key] = value
+	return n
+}
+
+// Value returns the value associated with these attributes for key, or nil if
+// no value is associated with key.  The returned value should not be modified.
+func (a *Attributes) Value(key any) any {
+	if a == nil {
+		return nil
+	}
+	return a.m[key]
+}
+
+// Equal returns whether a and o are equivalent.  If 'Equal(o any) bool' is
+// implemented for a value in the attributes, it is called to determine if the
+// value matches the one stored in the other attributes.  If Equal is not
+// implemented, standard equality is used to determine if the two values are
+// equal. Note that some types (e.g. maps) aren't comparable by default, so
+// they must be wrapped in a struct, or in an alias type, with Equal defined.
+func (a *Attributes) Equal(o *Attributes) bool {
+	if a == nil && o == nil {
+		return true
+	}
+	if a == nil || o == nil {
+		return false
+	}
+	if len(a.m) != len(o.m) {
+		return false
+	}
+	for k, v := range a.m {
+		ov, ok := o.m[k]
+		if !ok {
+			// o missing element of a
+			return false
+		}
+		if eq, ok := v.(interface{ Equal(o any) bool }); ok {
+			if !eq.Equal(ov) {
+				return false
+			}
+		} else if v != ov {
+			// Fallback to a standard equality check if Value is unimplemented.
+			return false
+		}
+	}
+	return true
+}
+
+// String prints the attribute map. If any key or values throughout the map
+// implement fmt.Stringer, it calls that method and appends.
+func (a *Attributes) String() string {
+	var sb strings.Builder
+	sb.WriteString("{")
+	first := true
+	for k, v := range a.m {
+		if !first {
+			sb.WriteString(", ")
+		}
+		sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v)))
+		first = false
+	}
+	sb.WriteString("}")
+	return sb.String()
+}
+
+func str(x any) (s string) {
+	if v, ok := x.(fmt.Stringer); ok {
+		return fmt.Sprint(v)
+	} else if v, ok := x.(string); ok {
+		return v
+	}
+	return fmt.Sprintf("<%p>", x)
+}
+
+// MarshalJSON helps implement the json.Marshaler interface, thereby rendering
+// the Attributes correctly when printing (via pretty.JSON) structs containing
+// Attributes as fields.
+//
+// Is it impossible to unmarshal attributes from a JSON representation and this
+// method is meant only for debugging purposes.
+func (a *Attributes) MarshalJSON() ([]byte, error) {
+	return []byte(a.String()), nil
+}
diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go
index ff7c3ee..29475e3 100644
--- a/vendor/google.golang.org/grpc/backoff.go
+++ b/vendor/google.golang.org/grpc/backoff.go
@@ -48,7 +48,10 @@
 // here for more details:
 // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
 //
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type ConnectParams struct {
 	// Backoff specifies the configuration options for connection backoff.
 	Backoff backoff.Config
diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go
index 0787d0b..d7b40b7 100644
--- a/vendor/google.golang.org/grpc/backoff/backoff.go
+++ b/vendor/google.golang.org/grpc/backoff/backoff.go
@@ -39,7 +39,7 @@
 	MaxDelay time.Duration
 }
 
-// DefaultConfig is a backoff configuration with the default values specfied
+// DefaultConfig is a backoff configuration with the default values specified
 // at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
 //
 // This should be useful for callers who want to configure backoff with
diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go
deleted file mode 100644
index a8eb0f4..0000000
--- a/vendor/google.golang.org/grpc/balancer.go
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
-	"context"
-	"net"
-	"sync"
-
-	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/credentials"
-	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/naming"
-	"google.golang.org/grpc/status"
-)
-
-// Address represents a server the client connects to.
-//
-// Deprecated: please use package balancer.
-type Address struct {
-	// Addr is the server address on which a connection will be established.
-	Addr string
-	// Metadata is the information associated with Addr, which may be used
-	// to make load balancing decision.
-	Metadata interface{}
-}
-
-// BalancerConfig specifies the configurations for Balancer.
-//
-// Deprecated: please use package balancer.  May be removed in a future 1.x release.
-type BalancerConfig struct {
-	// DialCreds is the transport credential the Balancer implementation can
-	// use to dial to a remote load balancer server. The Balancer implementations
-	// can ignore this if it does not need to talk to another party securely.
-	DialCreds credentials.TransportCredentials
-	// Dialer is the custom dialer the Balancer implementation can use to dial
-	// to a remote load balancer server. The Balancer implementations
-	// can ignore this if it doesn't need to talk to remote balancer.
-	Dialer func(context.Context, string) (net.Conn, error)
-}
-
-// BalancerGetOptions configures a Get call.
-//
-// Deprecated: please use package balancer.  May be removed in a future 1.x release.
-type BalancerGetOptions struct {
-	// BlockingWait specifies whether Get should block when there is no
-	// connected address.
-	BlockingWait bool
-}
-
-// Balancer chooses network addresses for RPCs.
-//
-// Deprecated: please use package balancer.  May be removed in a future 1.x release.
-type Balancer interface {
-	// Start does the initialization work to bootstrap a Balancer. For example,
-	// this function may start the name resolution and watch the updates. It will
-	// be called when dialing.
-	Start(target string, config BalancerConfig) error
-	// Up informs the Balancer that gRPC has a connection to the server at
-	// addr. It returns down which is called once the connection to addr gets
-	// lost or closed.
-	// TODO: It is not clear how to construct and take advantage of the meaningful error
-	// parameter for down. Need realistic demands to guide.
-	Up(addr Address) (down func(error))
-	// Get gets the address of a server for the RPC corresponding to ctx.
-	// i) If it returns a connected address, gRPC internals issues the RPC on the
-	// connection to this address;
-	// ii) If it returns an address on which the connection is under construction
-	// (initiated by Notify(...)) but not connected, gRPC internals
-	//  * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or
-	//  Shutdown state;
-	//  or
-	//  * issues RPC on the connection otherwise.
-	// iii) If it returns an address on which the connection does not exist, gRPC
-	// internals treats it as an error and will fail the corresponding RPC.
-	//
-	// Therefore, the following is the recommended rule when writing a custom Balancer.
-	// If opts.BlockingWait is true, it should return a connected address or
-	// block if there is no connected address. It should respect the timeout or
-	// cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast
-	// RPCs), it should return an address it has notified via Notify(...) immediately
-	// instead of blocking.
-	//
-	// The function returns put which is called once the rpc has completed or failed.
-	// put can collect and report RPC stats to a remote load balancer.
-	//
-	// This function should only return the errors Balancer cannot recover by itself.
-	// gRPC internals will fail the RPC if an error is returned.
-	Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error)
-	// Notify returns a channel that is used by gRPC internals to watch the addresses
-	// gRPC needs to connect. The addresses might be from a name resolver or remote
-	// load balancer. gRPC internals will compare it with the existing connected
-	// addresses. If the address Balancer notified is not in the existing connected
-	// addresses, gRPC starts to connect the address. If an address in the existing
-	// connected addresses is not in the notification list, the corresponding connection
-	// is shutdown gracefully. Otherwise, there are no operations to take. Note that
-	// the Address slice must be the full list of the Addresses which should be connected.
-	// It is NOT delta.
-	Notify() <-chan []Address
-	// Close shuts down the balancer.
-	Close() error
-}
-
-// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
-// the name resolution updates and updates the addresses available correspondingly.
-//
-// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release.
-func RoundRobin(r naming.Resolver) Balancer {
-	return &roundRobin{r: r}
-}
-
-type addrInfo struct {
-	addr      Address
-	connected bool
-}
-
-type roundRobin struct {
-	r      naming.Resolver
-	w      naming.Watcher
-	addrs  []*addrInfo // all the addresses the client should potentially connect
-	mu     sync.Mutex
-	addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to.
-	next   int            // index of the next address to return for Get()
-	waitCh chan struct{}  // the channel to block when there is no connected address available
-	done   bool           // The Balancer is closed.
-}
-
-func (rr *roundRobin) watchAddrUpdates() error {
-	updates, err := rr.w.Next()
-	if err != nil {
-		grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err)
-		return err
-	}
-	rr.mu.Lock()
-	defer rr.mu.Unlock()
-	for _, update := range updates {
-		addr := Address{
-			Addr:     update.Addr,
-			Metadata: update.Metadata,
-		}
-		switch update.Op {
-		case naming.Add:
-			var exist bool
-			for _, v := range rr.addrs {
-				if addr == v.addr {
-					exist = true
-					grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr)
-					break
-				}
-			}
-			if exist {
-				continue
-			}
-			rr.addrs = append(rr.addrs, &addrInfo{addr: addr})
-		case naming.Delete:
-			for i, v := range rr.addrs {
-				if addr == v.addr {
-					copy(rr.addrs[i:], rr.addrs[i+1:])
-					rr.addrs = rr.addrs[:len(rr.addrs)-1]
-					break
-				}
-			}
-		default:
-			grpclog.Errorln("Unknown update.Op ", update.Op)
-		}
-	}
-	// Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified.
-	open := make([]Address, len(rr.addrs))
-	for i, v := range rr.addrs {
-		open[i] = v.addr
-	}
-	if rr.done {
-		return ErrClientConnClosing
-	}
-	select {
-	case <-rr.addrCh:
-	default:
-	}
-	rr.addrCh <- open
-	return nil
-}
-
-func (rr *roundRobin) Start(target string, config BalancerConfig) error {
-	rr.mu.Lock()
-	defer rr.mu.Unlock()
-	if rr.done {
-		return ErrClientConnClosing
-	}
-	if rr.r == nil {
-		// If there is no name resolver installed, it is not needed to
-		// do name resolution. In this case, target is added into rr.addrs
-		// as the only address available and rr.addrCh stays nil.
-		rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}})
-		return nil
-	}
-	w, err := rr.r.Resolve(target)
-	if err != nil {
-		return err
-	}
-	rr.w = w
-	rr.addrCh = make(chan []Address, 1)
-	go func() {
-		for {
-			if err := rr.watchAddrUpdates(); err != nil {
-				return
-			}
-		}
-	}()
-	return nil
-}
-
-// Up sets the connected state of addr and sends notification if there are pending
-// Get() calls.
-func (rr *roundRobin) Up(addr Address) func(error) {
-	rr.mu.Lock()
-	defer rr.mu.Unlock()
-	var cnt int
-	for _, a := range rr.addrs {
-		if a.addr == addr {
-			if a.connected {
-				return nil
-			}
-			a.connected = true
-		}
-		if a.connected {
-			cnt++
-		}
-	}
-	// addr is only one which is connected. Notify the Get() callers who are blocking.
-	if cnt == 1 && rr.waitCh != nil {
-		close(rr.waitCh)
-		rr.waitCh = nil
-	}
-	return func(err error) {
-		rr.down(addr, err)
-	}
-}
-
-// down unsets the connected state of addr.
-func (rr *roundRobin) down(addr Address, err error) {
-	rr.mu.Lock()
-	defer rr.mu.Unlock()
-	for _, a := range rr.addrs {
-		if addr == a.addr {
-			a.connected = false
-			break
-		}
-	}
-}
-
-// Get returns the next addr in the rotation.
-func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
-	var ch chan struct{}
-	rr.mu.Lock()
-	if rr.done {
-		rr.mu.Unlock()
-		err = ErrClientConnClosing
-		return
-	}
-
-	if len(rr.addrs) > 0 {
-		if rr.next >= len(rr.addrs) {
-			rr.next = 0
-		}
-		next := rr.next
-		for {
-			a := rr.addrs[next]
-			next = (next + 1) % len(rr.addrs)
-			if a.connected {
-				addr = a.addr
-				rr.next = next
-				rr.mu.Unlock()
-				return
-			}
-			if next == rr.next {
-				// Has iterated all the possible address but none is connected.
-				break
-			}
-		}
-	}
-	if !opts.BlockingWait {
-		if len(rr.addrs) == 0 {
-			rr.mu.Unlock()
-			err = status.Errorf(codes.Unavailable, "there is no address available")
-			return
-		}
-		// Returns the next addr on rr.addrs for failfast RPCs.
-		addr = rr.addrs[rr.next].addr
-		rr.next++
-		rr.mu.Unlock()
-		return
-	}
-	// Wait on rr.waitCh for non-failfast RPCs.
-	if rr.waitCh == nil {
-		ch = make(chan struct{})
-		rr.waitCh = ch
-	} else {
-		ch = rr.waitCh
-	}
-	rr.mu.Unlock()
-	for {
-		select {
-		case <-ctx.Done():
-			err = ctx.Err()
-			return
-		case <-ch:
-			rr.mu.Lock()
-			if rr.done {
-				rr.mu.Unlock()
-				err = ErrClientConnClosing
-				return
-			}
-
-			if len(rr.addrs) > 0 {
-				if rr.next >= len(rr.addrs) {
-					rr.next = 0
-				}
-				next := rr.next
-				for {
-					a := rr.addrs[next]
-					next = (next + 1) % len(rr.addrs)
-					if a.connected {
-						addr = a.addr
-						rr.next = next
-						rr.mu.Unlock()
-						return
-					}
-					if next == rr.next {
-						// Has iterated all the possible address but none is connected.
-						break
-					}
-				}
-			}
-			// The newly added addr got removed by Down() again.
-			if rr.waitCh == nil {
-				ch = make(chan struct{})
-				rr.waitCh = ch
-			} else {
-				ch = rr.waitCh
-			}
-			rr.mu.Unlock()
-		}
-	}
-}
-
-func (rr *roundRobin) Notify() <-chan []Address {
-	return rr.addrCh
-}
-
-func (rr *roundRobin) Close() error {
-	rr.mu.Lock()
-	defer rr.mu.Unlock()
-	if rr.done {
-		return errBalancerClosed
-	}
-	rr.done = true
-	if rr.w != nil {
-		rr.w.Close()
-	}
-	if rr.waitCh != nil {
-		close(rr.waitCh)
-		rr.waitCh = nil
-	}
-	if rr.addrCh != nil {
-		close(rr.addrCh)
-	}
-	return nil
-}
-
-// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn.
-// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get()
-// returns the only address Up by resetTransport().
-type pickFirst struct {
-	*roundRobin
-}
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index 917c242..b126401 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -27,8 +27,11 @@
 	"net"
 	"strings"
 
+	"google.golang.org/grpc/channelz"
 	"google.golang.org/grpc/connectivity"
 	"google.golang.org/grpc/credentials"
+	estats "google.golang.org/grpc/experimental/stats"
+	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/internal"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/resolver"
@@ -38,6 +41,8 @@
 var (
 	// m is a map from name to balancer builder.
 	m = make(map[string]Builder)
+
+	logger = grpclog.Component("balancer")
 )
 
 // Register registers the balancer builder to the balancer map. b.Name
@@ -50,7 +55,14 @@
 // an init() function), and is not thread-safe. If multiple Balancers are
 // registered with the same name, the one registered last will take effect.
 func Register(b Builder) {
-	m[strings.ToLower(b.Name())] = b
+	name := strings.ToLower(b.Name())
+	if name != b.Name() {
+		// TODO: Skip the use of strings.ToLower() to index the map after v1.59
+		// is released to switch to case sensitive balancer registry. Also,
+		// remove this warning and update the docstrings for Register and Get.
+		logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name())
+	}
+	m[name] = b
 }
 
 // unregisterForTesting deletes the balancer with the given name from the
@@ -63,58 +75,52 @@
 
 func init() {
 	internal.BalancerUnregister = unregisterForTesting
+	internal.ConnectedAddress = connectedAddress
+	internal.SetConnectedAddress = setConnectedAddress
 }
 
 // Get returns the resolver builder registered with the given name.
 // Note that the compare is done in a case-insensitive fashion.
 // If no builder is register with the name, nil will be returned.
 func Get(name string) Builder {
+	if strings.ToLower(name) != name {
+		// TODO: Skip the use of strings.ToLower() to index the map after v1.59
+		// is released to switch to case sensitive balancer registry. Also,
+		// remove this warning and update the docstrings for Register and Get.
+		logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name)
+	}
 	if b, ok := m[strings.ToLower(name)]; ok {
 		return b
 	}
 	return nil
 }
 
-// SubConn represents a gRPC sub connection.
-// Each sub connection contains a list of addresses. gRPC will
-// try to connect to them (in sequence), and stop trying the
-// remainder once one connection is successful.
-//
-// The reconnect backoff will be applied on the list, not a single address.
-// For example, try_on_all_addresses -> backoff -> try_on_all_addresses.
-//
-// All SubConns start in IDLE, and will not try to connect. To trigger
-// the connecting, Balancers must call Connect.
-// When the connection encounters an error, it will reconnect immediately.
-// When the connection becomes IDLE, it will not reconnect unless Connect is
-// called.
-//
-// This interface is to be implemented by gRPC. Users should not need a
-// brand new implementation of this interface. For the situations like
-// testing, the new implementation should embed this interface. This allows
-// gRPC to add new methods to this interface.
-type SubConn interface {
-	// UpdateAddresses updates the addresses used in this SubConn.
-	// gRPC checks if currently-connected address is still in the new list.
-	// If it's in the list, the connection will be kept.
-	// If it's not in the list, the connection will gracefully closed, and
-	// a new connection will be created.
-	//
-	// This will trigger a state transition for the SubConn.
-	UpdateAddresses([]resolver.Address)
-	// Connect starts the connecting for this SubConn.
-	Connect()
-}
-
 // NewSubConnOptions contains options to create new SubConn.
 type NewSubConnOptions struct {
 	// CredsBundle is the credentials bundle that will be used in the created
 	// SubConn. If it's nil, the original creds from grpc DialOptions will be
 	// used.
+	//
+	// Deprecated: Use the Attributes field in resolver.Address to pass
+	// arbitrary data to the credential handshaker.
 	CredsBundle credentials.Bundle
 	// HealthCheckEnabled indicates whether health check service should be
 	// enabled on this SubConn
 	HealthCheckEnabled bool
+	// StateListener is called when the state of the subconn changes.  If nil,
+	// Balancer.UpdateSubConnState will be called instead.  Will never be
+	// invoked until after Connect() is called on the SubConn created with
+	// these options.
+	StateListener func(SubConnState)
+}
+
+// State contains the balancer's state relevant to the gRPC ClientConn.
+type State struct {
+	// State contains the connectivity state of the balancer, which is used to
+	// determine the state of the ClientConn.
+	ConnectivityState connectivity.State
+	// Picker is used to choose connections (SubConns) for RPCs.
+	Picker Picker
 }
 
 // ClientConn represents a gRPC ClientConn.
@@ -123,48 +129,92 @@
 // brand new implementation of this interface. For the situations like
 // testing, the new implementation should embed this interface. This allows
 // gRPC to add new methods to this interface.
+//
+// NOTICE: This interface is intended to be implemented by gRPC, or intercepted
+// by custom load balancing polices.  Users should not need their own complete
+// implementation of this interface -- they should always delegate to a
+// ClientConn passed to Builder.Build() by embedding it in their
+// implementations. An embedded ClientConn must never be nil, or runtime panics
+// will occur.
 type ClientConn interface {
 	// NewSubConn is called by balancer to create a new SubConn.
 	// It doesn't block and wait for the connections to be established.
 	// Behaviors of the SubConn can be controlled by options.
+	//
+	// Deprecated: please be aware that in a future version, SubConns will only
+	// support one address per SubConn.
 	NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error)
 	// RemoveSubConn removes the SubConn from ClientConn.
 	// The SubConn will be shutdown.
-	RemoveSubConn(SubConn)
-
-	// UpdateBalancerState is called by balancer to notify gRPC that some internal
-	// state in balancer has changed.
 	//
-	// gRPC will update the connectivity state of the ClientConn, and will call pick
-	// on the new picker to pick new SubConn.
-	UpdateBalancerState(s connectivity.State, p Picker)
+	// Deprecated: use SubConn.Shutdown instead.
+	RemoveSubConn(SubConn)
+	// UpdateAddresses updates the addresses used in the passed in SubConn.
+	// gRPC checks if the currently connected address is still in the new list.
+	// If so, the connection will be kept. Else, the connection will be
+	// gracefully closed, and a new connection will be created.
+	//
+	// This may trigger a state transition for the SubConn.
+	//
+	// Deprecated: this method will be removed.  Create new SubConns for new
+	// addresses instead.
+	UpdateAddresses(SubConn, []resolver.Address)
+
+	// UpdateState notifies gRPC that the balancer's internal state has
+	// changed.
+	//
+	// gRPC will update the connectivity state of the ClientConn, and will call
+	// Pick on the new Picker to pick new SubConns.
+	UpdateState(State)
 
 	// ResolveNow is called by balancer to notify gRPC to do a name resolving.
-	ResolveNow(resolver.ResolveNowOption)
+	ResolveNow(resolver.ResolveNowOptions)
 
 	// Target returns the dial target for this ClientConn.
 	//
 	// Deprecated: Use the Target field in the BuildOptions instead.
 	Target() string
+
+	// MetricsRecorder provides the metrics recorder that balancers can use to
+	// record metrics. Balancer implementations which do not register metrics on
+	// metrics registry and record on them can ignore this method. The returned
+	// MetricsRecorder is guaranteed to never be nil.
+	MetricsRecorder() estats.MetricsRecorder
+
+	// EnforceClientConnEmbedding is included to force implementers to embed
+	// another implementation of this interface, allowing gRPC to add methods
+	// without breaking users.
+	internal.EnforceClientConnEmbedding
 }
 
 // BuildOptions contains additional information for Build.
 type BuildOptions struct {
-	// DialCreds is the transport credential the Balancer implementation can
-	// use to dial to a remote load balancer server. The Balancer implementations
-	// can ignore this if it does not need to talk to another party securely.
+	// DialCreds is the transport credentials to use when communicating with a
+	// remote load balancer server. Balancer implementations which do not
+	// communicate with a remote load balancer server can ignore this field.
 	DialCreds credentials.TransportCredentials
-	// CredsBundle is the credentials bundle that the Balancer can use.
+	// CredsBundle is the credentials bundle to use when communicating with a
+	// remote load balancer server. Balancer implementations which do not
+	// communicate with a remote load balancer server can ignore this field.
 	CredsBundle credentials.Bundle
-	// Dialer is the custom dialer the Balancer implementation can use to dial
-	// to a remote load balancer server. The Balancer implementations
-	// can ignore this if it doesn't need to talk to remote balancer.
+	// Dialer is the custom dialer to use when communicating with a remote load
+	// balancer server. Balancer implementations which do not communicate with a
+	// remote load balancer server can ignore this field.
 	Dialer func(context.Context, string) (net.Conn, error)
-	// ChannelzParentID is the entity parent's channelz unique identification number.
-	ChannelzParentID int64
-	// Target contains the parsed address info of the dial target. It is the same resolver.Target as
-	// passed to the resolver.
-	// See the documentation for the resolver.Target type for details about what it contains.
+	// Authority is the server name to use as part of the authentication
+	// handshake when communicating with a remote load balancer server. Balancer
+	// implementations which do not communicate with a remote load balancer
+	// server can ignore this field.
+	Authority string
+	// ChannelzParent is the parent ClientConn's channelz channel.
+	ChannelzParent channelz.Identifier
+	// CustomUserAgent is the custom user agent set on the parent ClientConn.
+	// The balancer should set the same custom user agent if it creates a
+	// ClientConn.
+	CustomUserAgent string
+	// Target contains the parsed address info of the dial target. It is the
+	// same resolver.Target as passed to the resolver. See the documentation for
+	// the resolver.Target type for details about what it contains.
 	Target resolver.Target
 }
 
@@ -185,11 +235,14 @@
 	ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error)
 }
 
-// PickOptions contains addition information for the Pick operation.
-type PickOptions struct {
+// PickInfo contains additional information for the Pick operation.
+type PickInfo struct {
 	// FullMethodName is the method name that NewClientStream() is called
 	// with. The canonical format is /service/Method.
 	FullMethodName string
+	// Ctx is the RPC's context, and may contain relevant RPC-level information
+	// like the outgoing header metadata.
+	Ctx context.Context
 }
 
 // DoneInfo contains additional information for done.
@@ -205,56 +258,79 @@
 	// ServerLoad is the load received from server. It's usually sent as part of
 	// trailing metadata.
 	//
-	// The only supported type now is *orca_v1.LoadReport.
-	ServerLoad interface{}
+	// The only supported type now is *orca_v3.LoadReport.
+	ServerLoad any
 }
 
 var (
 	// ErrNoSubConnAvailable indicates no SubConn is available for pick().
-	// gRPC will block the RPC until a new picker is available via UpdateBalancerState().
+	// gRPC will block the RPC until a new picker is available via UpdateState().
 	ErrNoSubConnAvailable = errors.New("no SubConn is available")
 	// ErrTransientFailure indicates all SubConns are in TransientFailure.
 	// WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
+	//
+	// Deprecated: return an appropriate error based on the last resolution or
+	// connection attempt instead.  The behavior is the same for any non-gRPC
+	// status error.
 	ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
 )
 
+// PickResult contains information related to a connection chosen for an RPC.
+type PickResult struct {
+	// SubConn is the connection to use for this pick, if its state is Ready.
+	// If the state is not Ready, gRPC will block the RPC until a new Picker is
+	// provided by the balancer (using ClientConn.UpdateState).  The SubConn
+	// must be one returned by ClientConn.NewSubConn.
+	SubConn SubConn
+
+	// Done is called when the RPC is completed.  If the SubConn is not ready,
+	// this will be called with a nil parameter.  If the SubConn is not a valid
+	// type, Done may not be called.  May be nil if the balancer does not wish
+	// to be notified when the RPC completes.
+	Done func(DoneInfo)
+
+	// Metadata provides a way for LB policies to inject arbitrary per-call
+	// metadata. Any metadata returned here will be merged with existing
+	// metadata added by the client application.
+	//
+	// LB policies with child policies are responsible for propagating metadata
+	// injected by their children to the ClientConn, as part of Pick().
+	Metadata metadata.MD
+}
+
+// TransientFailureError returns e.  It exists for backward compatibility and
+// will be deleted soon.
+//
+// Deprecated: no longer necessary, picker errors are treated this way by
+// default.
+func TransientFailureError(e error) error { return e }
+
 // Picker is used by gRPC to pick a SubConn to send an RPC.
 // Balancer is expected to generate a new picker from its snapshot every time its
 // internal state has changed.
 //
-// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
+// The pickers used by gRPC can be updated by ClientConn.UpdateState().
 type Picker interface {
-	// Pick returns the SubConn to be used to send the RPC.
-	// The returned SubConn must be one returned by NewSubConn().
+	// Pick returns the connection to use for this RPC and related information.
 	//
-	// This functions is expected to return:
-	// - a SubConn that is known to be READY;
-	// - ErrNoSubConnAvailable if no SubConn is available, but progress is being
-	//   made (for example, some SubConn is in CONNECTING mode);
-	// - other errors if no active connecting is happening (for example, all SubConn
-	//   are in TRANSIENT_FAILURE mode).
+	// Pick should not block.  If the balancer needs to do I/O or any blocking
+	// or time-consuming work to service this call, it should return
+	// ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when
+	// the Picker is updated (using ClientConn.UpdateState).
 	//
-	// If a SubConn is returned:
-	// - If it is READY, gRPC will send the RPC on it;
-	// - If it is not ready, or becomes not ready after it's returned, gRPC will
-	//   block until UpdateBalancerState() is called and will call pick on the
-	//   new picker. The done function returned from Pick(), if not nil, will be
-	//   called with nil error, no bytes sent and no bytes received.
+	// If an error is returned:
 	//
-	// If the returned error is not nil:
-	// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
-	// - If the error is ErrTransientFailure:
-	//   - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState()
-	//     is called to pick again;
-	//   - Otherwise, RPC will fail with unavailable error.
-	// - Else (error is other non-nil error):
-	//   - The RPC will fail with unavailable error.
+	// - If the error is ErrNoSubConnAvailable, gRPC will block until a new
+	//   Picker is provided by the balancer (using ClientConn.UpdateState).
 	//
-	// The returned done() function will be called once the rpc has finished,
-	// with the final status of that RPC.  If the SubConn returned is not a
-	// valid SubConn type, done may not be called.  done may be nil if balancer
-	// doesn't care about the RPC status.
-	Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error)
+	// - If the error is a status error (implemented by the grpc/status
+	//   package), gRPC will terminate the RPC with the code and message
+	//   provided.
+	//
+	// - For all other errors, wait for ready RPCs will wait, but non-wait for
+	//   ready RPCs will be terminated with this error's Error() string and
+	//   status code Unavailable.
+	Pick(info PickInfo) (PickResult, error)
 }
 
 // Balancer takes input from gRPC, manages SubConns, and collects and aggregates
@@ -262,38 +338,46 @@
 //
 // It also generates and updates the Picker used by gRPC to pick SubConns for RPCs.
 //
-// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed
-// to be called synchronously from the same goroutine.
-// There's no guarantee on picker.Pick, it may be called anytime.
+// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are
+// guaranteed to be called synchronously from the same goroutine.  There's no
+// guarantee on picker.Pick, it may be called anytime.
 type Balancer interface {
-	// HandleSubConnStateChange is called by gRPC when the connectivity state
-	// of sc has changed.
-	// Balancer is expected to aggregate all the state of SubConn and report
-	// that back to gRPC.
-	// Balancer should also generate and update Pickers when its internal state has
-	// been changed by the new state.
+	// UpdateClientConnState is called by gRPC when the state of the ClientConn
+	// changes.  If the error returned is ErrBadResolverState, the ClientConn
+	// will begin calling ResolveNow on the active name resolver with
+	// exponential backoff until a subsequent call to UpdateClientConnState
+	// returns a nil error.  Any other errors are currently ignored.
+	UpdateClientConnState(ClientConnState) error
+	// ResolverError is called by gRPC when the name resolver reports an error.
+	ResolverError(error)
+	// UpdateSubConnState is called by gRPC when the state of a SubConn
+	// changes.
 	//
-	// Deprecated: if V2Balancer is implemented by the Balancer,
-	// UpdateSubConnState will be called instead.
-	HandleSubConnStateChange(sc SubConn, state connectivity.State)
-	// HandleResolvedAddrs is called by gRPC to send updated resolved addresses to
-	// balancers.
-	// Balancer can create new SubConn or remove SubConn with the addresses.
-	// An empty address slice and a non-nil error will be passed if the resolver returns
-	// non-nil error to gRPC.
-	//
-	// Deprecated: if V2Balancer is implemented by the Balancer,
-	// UpdateClientConnState will be called instead.
-	HandleResolvedAddrs([]resolver.Address, error)
-	// Close closes the balancer. The balancer is not required to call
-	// ClientConn.RemoveSubConn for its existing SubConns.
+	// Deprecated: Use NewSubConnOptions.StateListener when creating the
+	// SubConn instead.
+	UpdateSubConnState(SubConn, SubConnState)
+	// Close closes the balancer. The balancer is not currently required to
+	// call SubConn.Shutdown for its existing SubConns; however, this will be
+	// required in a future release, so it is recommended.
 	Close()
+	// ExitIdle instructs the LB policy to reconnect to backends / exit the
+	// IDLE state, if appropriate and possible.  Note that SubConns that enter
+	// the IDLE state will not reconnect until SubConn.Connect is called.
+	ExitIdle()
 }
 
-// SubConnState describes the state of a SubConn.
-type SubConnState struct {
-	ConnectivityState connectivity.State
-	// TODO: add last connection error
+// ExitIdler is an optional interface for balancers to implement.  If
+// implemented, ExitIdle will be called when ClientConn.Connect is called, if
+// the ClientConn is idle.  If unimplemented, ClientConn.Connect will cause
+// all SubConns to connect.
+//
+// Deprecated: All balancers must implement this interface. This interface will
+// be removed in a future release.
+type ExitIdler interface {
+	// ExitIdle instructs the LB policy to reconnect to backends / exit the
+	// IDLE state, if appropriate and possible.  Note that SubConns that enter
+	// the IDLE state will not reconnect until SubConn.Connect is called.
+	ExitIdle()
 }
 
 // ClientConnState describes the state of a ClientConn relevant to the
@@ -308,66 +392,3 @@
 // ErrBadResolverState may be returned by UpdateClientConnState to indicate a
 // problem with the provided name resolver data.
 var ErrBadResolverState = errors.New("bad resolver state")
-
-// V2Balancer is defined for documentation purposes.  If a Balancer also
-// implements V2Balancer, its UpdateClientConnState method will be called
-// instead of HandleResolvedAddrs and its UpdateSubConnState will be called
-// instead of HandleSubConnStateChange.
-type V2Balancer interface {
-	// UpdateClientConnState is called by gRPC when the state of the ClientConn
-	// changes.  If the error returned is ErrBadResolverState, the ClientConn
-	// will begin calling ResolveNow on the active name resolver with
-	// exponential backoff until a subsequent call to UpdateClientConnState
-	// returns a nil error.  Any other errors are currently ignored.
-	UpdateClientConnState(ClientConnState) error
-	// ResolverError is called by gRPC when the name resolver reports an error.
-	ResolverError(error)
-	// UpdateSubConnState is called by gRPC when the state of a SubConn
-	// changes.
-	UpdateSubConnState(SubConn, SubConnState)
-	// Close closes the balancer. The balancer is not required to call
-	// ClientConn.RemoveSubConn for its existing SubConns.
-	Close()
-}
-
-// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
-// and returns one aggregated connectivity state.
-//
-// It's not thread safe.
-type ConnectivityStateEvaluator struct {
-	numReady            uint64 // Number of addrConns in ready state.
-	numConnecting       uint64 // Number of addrConns in connecting state.
-	numTransientFailure uint64 // Number of addrConns in transientFailure.
-}
-
-// RecordTransition records state change happening in subConn and based on that
-// it evaluates what aggregated state should be.
-//
-//  - If at least one SubConn in Ready, the aggregated state is Ready;
-//  - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
-//  - Else the aggregated state is TransientFailure.
-//
-// Idle and Shutdown are not considered.
-func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
-	// Update counters.
-	for idx, state := range []connectivity.State{oldState, newState} {
-		updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
-		switch state {
-		case connectivity.Ready:
-			cse.numReady += updateVal
-		case connectivity.Connecting:
-			cse.numConnecting += updateVal
-		case connectivity.TransientFailure:
-			cse.numTransientFailure += updateVal
-		}
-	}
-
-	// Evaluate.
-	if cse.numReady > 0 {
-		return connectivity.Ready
-	}
-	if cse.numConnecting > 0 {
-		return connectivity.Connecting
-	}
-	return connectivity.TransientFailure
-}
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
index 1a5c1aa..4d57687 100644
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -19,7 +19,8 @@
 package base
 
 import (
-	"context"
+	"errors"
+	"fmt"
 
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/connectivity"
@@ -27,34 +28,36 @@
 	"google.golang.org/grpc/resolver"
 )
 
+var logger = grpclog.Component("balancer")
+
 type baseBuilder struct {
 	name          string
 	pickerBuilder PickerBuilder
 	config        Config
 }
 
-func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
-	return &baseBalancer{
+func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
+	bal := &baseBalancer{
 		cc:            cc,
 		pickerBuilder: bb.pickerBuilder,
 
-		subConns: make(map[resolver.Address]balancer.SubConn),
+		subConns: resolver.NewAddressMapV2[balancer.SubConn](),
 		scStates: make(map[balancer.SubConn]connectivity.State),
 		csEvltr:  &balancer.ConnectivityStateEvaluator{},
-		// Initialize picker to a picker that always return
-		// ErrNoSubConnAvailable, because when state of a SubConn changes, we
-		// may call UpdateBalancerState with this picker.
-		picker: NewErrPicker(balancer.ErrNoSubConnAvailable),
-		config: bb.config,
+		config:   bb.config,
+		state:    connectivity.Connecting,
 	}
+	// Initialize picker to a picker that always returns
+	// ErrNoSubConnAvailable, because when state of a SubConn changes, we
+	// may call UpdateState with this picker.
+	bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
+	return bal
 }
 
 func (bb *baseBuilder) Name() string {
 	return bb.name
 }
 
-var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer
-
 type baseBalancer struct {
 	cc            balancer.ClientConn
 	pickerBuilder PickerBuilder
@@ -62,87 +65,145 @@
 	csEvltr *balancer.ConnectivityStateEvaluator
 	state   connectivity.State
 
-	subConns map[resolver.Address]balancer.SubConn
+	subConns *resolver.AddressMapV2[balancer.SubConn]
 	scStates map[balancer.SubConn]connectivity.State
 	picker   balancer.Picker
 	config   Config
+
+	resolverErr error // the last error reported by the resolver; cleared on successful resolution
+	connErr     error // the last connection error; cleared upon leaving TransientFailure
 }
 
-func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
-	panic("not implemented")
-}
+func (b *baseBalancer) ResolverError(err error) {
+	b.resolverErr = err
+	if b.subConns.Len() == 0 {
+		b.state = connectivity.TransientFailure
+	}
 
-func (b *baseBalancer) ResolverError(error) {
-	// Ignore
+	if b.state != connectivity.TransientFailure {
+		// The picker will not change since the balancer does not currently
+		// report an error.
+		return
+	}
+	b.regeneratePicker()
+	b.cc.UpdateState(balancer.State{
+		ConnectivityState: b.state,
+		Picker:            b.picker,
+	})
 }
 
 func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
-	// TODO: handle s.ResolverState.Err (log if not nil) once implemented.
 	// TODO: handle s.ResolverState.ServiceConfig?
-	if grpclog.V(2) {
-		grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
+	if logger.V(2) {
+		logger.Info("base.baseBalancer: got new ClientConn state: ", s)
 	}
+	// Successful resolution; clear resolver error and ensure we return nil.
+	b.resolverErr = nil
 	// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
-	addrsSet := make(map[resolver.Address]struct{})
+	addrsSet := resolver.NewAddressMapV2[any]()
 	for _, a := range s.ResolverState.Addresses {
-		addrsSet[a] = struct{}{}
-		if _, ok := b.subConns[a]; !ok {
+		addrsSet.Set(a, nil)
+		if _, ok := b.subConns.Get(a); !ok {
 			// a is a new address (not existing in b.subConns).
-			sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck})
+			var sc balancer.SubConn
+			opts := balancer.NewSubConnOptions{
+				HealthCheckEnabled: b.config.HealthCheck,
+				StateListener:      func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) },
+			}
+			sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts)
 			if err != nil {
-				grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
+				logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
 				continue
 			}
-			b.subConns[a] = sc
+			b.subConns.Set(a, sc)
 			b.scStates[sc] = connectivity.Idle
+			b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle)
 			sc.Connect()
 		}
 	}
-	for a, sc := range b.subConns {
+	for _, a := range b.subConns.Keys() {
+		sc, _ := b.subConns.Get(a)
 		// a was removed by resolver.
-		if _, ok := addrsSet[a]; !ok {
-			b.cc.RemoveSubConn(sc)
-			delete(b.subConns, a)
+		if _, ok := addrsSet.Get(a); !ok {
+			sc.Shutdown()
+			b.subConns.Delete(a)
 			// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
-			// The entry will be deleted in HandleSubConnStateChange.
+			// The entry will be deleted in updateSubConnState.
 		}
 	}
+	// If resolver state contains no addresses, return an error so ClientConn
+	// will trigger re-resolve. Also records this as a resolver error, so when
+	// the overall state turns transient failure, the error message will have
+	// the zero address information.
+	if len(s.ResolverState.Addresses) == 0 {
+		b.ResolverError(errors.New("produced zero addresses"))
+		return balancer.ErrBadResolverState
+	}
+
+	b.regeneratePicker()
+	b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
 	return nil
 }
 
+// mergeErrors builds an error from the last connection error and the last
+// resolver error.  Must only be called if b.state is TransientFailure.
+func (b *baseBalancer) mergeErrors() error {
+	// connErr must always be non-nil unless there are no SubConns, in which
+	// case resolverErr must be non-nil.
+	if b.connErr == nil {
+		return fmt.Errorf("last resolver error: %v", b.resolverErr)
+	}
+	if b.resolverErr == nil {
+		return fmt.Errorf("last connection error: %v", b.connErr)
+	}
+	return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr)
+}
+
 // regeneratePicker takes a snapshot of the balancer, and generates a picker
 // from it. The picker is
-//  - errPicker with ErrTransientFailure if the balancer is in TransientFailure,
-//  - built by the pickerBuilder with all READY SubConns otherwise.
+//   - errPicker if the balancer is in TransientFailure,
+//   - built by the pickerBuilder with all READY SubConns otherwise.
 func (b *baseBalancer) regeneratePicker() {
 	if b.state == connectivity.TransientFailure {
-		b.picker = NewErrPicker(balancer.ErrTransientFailure)
+		b.picker = NewErrPicker(b.mergeErrors())
 		return
 	}
-	readySCs := make(map[resolver.Address]balancer.SubConn)
+	readySCs := make(map[balancer.SubConn]SubConnInfo)
 
 	// Filter out all ready SCs from full subConn map.
-	for addr, sc := range b.subConns {
+	for _, addr := range b.subConns.Keys() {
+		sc, _ := b.subConns.Get(addr)
 		if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
-			readySCs[addr] = sc
+			readySCs[sc] = SubConnInfo{Address: addr}
 		}
 	}
-	b.picker = b.pickerBuilder.Build(readySCs)
+	b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
 }
 
-func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
-	panic("not implemented")
-}
-
+// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn.
 func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
+	logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state)
+}
+
+func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
 	s := state.ConnectivityState
-	if grpclog.V(2) {
-		grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
+	if logger.V(2) {
+		logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
 	}
 	oldS, ok := b.scStates[sc]
 	if !ok {
-		if grpclog.V(2) {
-			grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
+		if logger.V(2) {
+			logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
+		}
+		return
+	}
+	if oldS == connectivity.TransientFailure &&
+		(s == connectivity.Connecting || s == connectivity.Idle) {
+		// Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or
+		// CONNECTING transitions to prevent the aggregated state from being
+		// always CONNECTING when many backends exist but are all down.
+		if s == connectivity.Idle {
+			sc.Connect()
 		}
 		return
 	}
@@ -151,41 +212,51 @@
 	case connectivity.Idle:
 		sc.Connect()
 	case connectivity.Shutdown:
-		// When an address was removed by resolver, b called RemoveSubConn but
-		// kept the sc's state in scStates. Remove state for this sc here.
+		// When an address was removed by resolver, b called Shutdown but kept
+		// the sc's state in scStates. Remove state for this sc here.
 		delete(b.scStates, sc)
+	case connectivity.TransientFailure:
+		// Save error to be reported via picker.
+		b.connErr = state.ConnectionError
 	}
 
-	oldAggrState := b.state
 	b.state = b.csEvltr.RecordTransition(oldS, s)
 
 	// Regenerate picker when one of the following happens:
-	//  - this sc became ready from not-ready
-	//  - this sc became not-ready from ready
-	//  - the aggregated state of balancer became TransientFailure from non-TransientFailure
-	//  - the aggregated state of balancer became non-TransientFailure from TransientFailure
+	//  - this sc entered or left ready
+	//  - the aggregated state of balancer is TransientFailure
+	//    (may need to update error message)
 	if (s == connectivity.Ready) != (oldS == connectivity.Ready) ||
-		(b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
+		b.state == connectivity.TransientFailure {
 		b.regeneratePicker()
 	}
-
-	b.cc.UpdateBalancerState(b.state, b.picker)
+	b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
 }
 
 // Close is a nop because base balancer doesn't have internal state to clean up,
-// and it doesn't need to call RemoveSubConn for the SubConns.
+// and it doesn't need to call Shutdown for the SubConns.
 func (b *baseBalancer) Close() {
 }
 
-// NewErrPicker returns a picker that always returns err on Pick().
+// ExitIdle is a nop because the base balancer attempts to stay connected to
+// all SubConns at all times.
+func (b *baseBalancer) ExitIdle() {
+}
+
+// NewErrPicker returns a Picker that always returns err on Pick().
 func NewErrPicker(err error) balancer.Picker {
 	return &errPicker{err: err}
 }
 
+// NewErrPickerV2 is temporarily defined for backward compatibility reasons.
+//
+// Deprecated: use NewErrPicker instead.
+var NewErrPickerV2 = NewErrPicker
+
 type errPicker struct {
 	err error // Pick() always returns this err.
 }
 
-func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
-	return nil, nil, p.err
+func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+	return balancer.PickResult{}, p.err
 }
diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go
index 34b1f29..e31d76e 100644
--- a/vendor/google.golang.org/grpc/balancer/base/base.go
+++ b/vendor/google.golang.org/grpc/balancer/base/base.go
@@ -37,15 +37,22 @@
 
 // PickerBuilder creates balancer.Picker.
 type PickerBuilder interface {
-	// Build takes a slice of ready SubConns, and returns a picker that will be
-	// used by gRPC to pick a SubConn.
-	Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
+	// Build returns a picker that will be used by gRPC to pick a SubConn.
+	Build(info PickerBuildInfo) balancer.Picker
 }
 
-// NewBalancerBuilder returns a balancer builder. The balancers
-// built by this builder will use the picker builder to build pickers.
-func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
-	return NewBalancerBuilderWithConfig(name, pb, Config{})
+// PickerBuildInfo contains information needed by the picker builder to
+// construct a picker.
+type PickerBuildInfo struct {
+	// ReadySCs is a map from all ready SubConns to the Addresses used to
+	// create them.
+	ReadySCs map[balancer.SubConn]SubConnInfo
+}
+
+// SubConnInfo contains information about a SubConn created by the base
+// balancer.
+type SubConnInfo struct {
+	Address resolver.Address // the address used to create this SubConn
 }
 
 // Config contains the config info about the base balancer builder.
@@ -54,8 +61,8 @@
 	HealthCheck bool
 }
 
-// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config.
-func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder {
+// NewBalancerBuilder returns a base balancer builder configured by the provided config.
+func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder {
 	return &baseBuilder{
 		name:          name,
 		pickerBuilder: pb,
diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
new file mode 100644
index 0000000..c334135
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
@@ -0,0 +1,74 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package balancer
+
+import "google.golang.org/grpc/connectivity"
+
+// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
+// and returns one aggregated connectivity state.
+//
+// It's not thread safe.
+type ConnectivityStateEvaluator struct {
+	numReady            uint64 // Number of addrConns in ready state.
+	numConnecting       uint64 // Number of addrConns in connecting state.
+	numTransientFailure uint64 // Number of addrConns in transient failure state.
+	numIdle             uint64 // Number of addrConns in idle state.
+}
+
+// RecordTransition records state change happening in subConn and based on that
+// it evaluates what aggregated state should be.
+//
+//   - If at least one SubConn in Ready, the aggregated state is Ready;
+//   - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
+//   - Else if at least one SubConn is Idle, the aggregated state is Idle;
+//   - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure.
+//
+// Shutdown is not considered.
+func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
+	// Update counters.
+	for idx, state := range []connectivity.State{oldState, newState} {
+		updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
+		switch state {
+		case connectivity.Ready:
+			cse.numReady += updateVal
+		case connectivity.Connecting:
+			cse.numConnecting += updateVal
+		case connectivity.TransientFailure:
+			cse.numTransientFailure += updateVal
+		case connectivity.Idle:
+			cse.numIdle += updateVal
+		}
+	}
+	return cse.CurrentState()
+}
+
+// CurrentState returns the current aggregate conn state by evaluating the counters
+func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State {
+	// Evaluate.
+	if cse.numReady > 0 {
+		return connectivity.Ready
+	}
+	if cse.numConnecting > 0 {
+		return connectivity.Connecting
+	}
+	if cse.numIdle > 0 {
+		return connectivity.Idle
+	}
+	return connectivity.TransientFailure
+}
diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
new file mode 100644
index 0000000..360db08
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
@@ -0,0 +1,389 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package endpointsharding implements a load balancing policy that manages
+// homogeneous child policies each owning a single endpoint.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package endpointsharding
+
+import (
+	"errors"
+	rand "math/rand/v2"
+	"sync"
+	"sync/atomic"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/balancer/base"
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/resolver"
+)
+
+var randIntN = rand.IntN
+
+// ChildState is the balancer state of a child along with the endpoint which
+// identifies the child balancer.
+type ChildState struct {
+	Endpoint resolver.Endpoint
+	State    balancer.State
+
+	// Balancer exposes only the ExitIdler interface of the child LB policy.
+	// Other methods of the child policy are called only by endpointsharding.
+	Balancer ExitIdler
+}
+
+// ExitIdler provides access to only the ExitIdle method of the child balancer.
+type ExitIdler interface {
+	// ExitIdle instructs the LB policy to reconnect to backends / exit the
+	// IDLE state, if appropriate and possible.  Note that SubConns that enter
+	// the IDLE state will not reconnect until SubConn.Connect is called.
+	ExitIdle()
+}
+
+// Options are the options to configure the behaviour of the
+// endpointsharding balancer.
+type Options struct {
+	// DisableAutoReconnect allows the balancer to keep child balancer in the
+	// IDLE state until they are explicitly triggered to exit using the
+	// ChildState obtained from the endpointsharding picker. When set to false,
+	// the endpointsharding balancer will automatically call ExitIdle on child
+	// connections that report IDLE.
+	DisableAutoReconnect bool
+}
+
+// ChildBuilderFunc creates a new balancer with the ClientConn. It has the same
+// type as the balancer.Builder.Build method.
+type ChildBuilderFunc func(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer
+
+// NewBalancer returns a load balancing policy that manages homogeneous child
+// policies each owning a single endpoint. The endpointsharding balancer
+// forwards the LoadBalancingConfig in ClientConn state updates to its children.
+func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions, childBuilder ChildBuilderFunc, esOpts Options) balancer.Balancer {
+	es := &endpointSharding{
+		cc:           cc,
+		bOpts:        opts,
+		esOpts:       esOpts,
+		childBuilder: childBuilder,
+	}
+	es.children.Store(resolver.NewEndpointMap[*balancerWrapper]())
+	return es
+}
+
+// endpointSharding is a balancer that wraps child balancers. It creates a child
+// balancer with child config for every unique Endpoint received. It updates the
+// child states on any update from parent or child.
+type endpointSharding struct {
+	cc           balancer.ClientConn
+	bOpts        balancer.BuildOptions
+	esOpts       Options
+	childBuilder ChildBuilderFunc
+
+	// childMu synchronizes calls to any single child. It must be held for all
+	// calls into a child. To avoid deadlocks, do not acquire childMu while
+	// holding mu.
+	childMu  sync.Mutex
+	children atomic.Pointer[resolver.EndpointMap[*balancerWrapper]]
+
+	// inhibitChildUpdates is set during UpdateClientConnState/ResolverError
+	// calls (calls to children will each produce an update, only want one
+	// update).
+	inhibitChildUpdates atomic.Bool
+
+	// mu synchronizes access to the state stored in balancerWrappers in the
+	// children field. mu must not be held during calls into a child since
+	// synchronous calls back from the child may require taking mu, causing a
+	// deadlock. To avoid deadlocks, do not acquire childMu while holding mu.
+	mu sync.Mutex
+}
+
+// rotateEndpoints returns a slice of all the input endpoints rotated a random
+// amount.
+func rotateEndpoints(es []resolver.Endpoint) []resolver.Endpoint {
+	les := len(es)
+	if les == 0 {
+		return es
+	}
+	r := randIntN(les)
+	// Make a copy to avoid mutating data beyond the end of es.
+	ret := make([]resolver.Endpoint, les)
+	copy(ret, es[r:])
+	copy(ret[les-r:], es[:r])
+	return ret
+}
+
+// UpdateClientConnState creates a child for new endpoints and deletes children
+// for endpoints that are no longer present. It also updates all the children,
+// and sends a single synchronous update of the childrens' aggregated state at
+// the end of the UpdateClientConnState operation. If any endpoint has no
+// addresses it will ignore that endpoint. Otherwise, returns first error found
+// from a child, but fully processes the new update.
+func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState) error {
+	es.childMu.Lock()
+	defer es.childMu.Unlock()
+
+	es.inhibitChildUpdates.Store(true)
+	defer func() {
+		es.inhibitChildUpdates.Store(false)
+		es.updateState()
+	}()
+	var ret error
+
+	children := es.children.Load()
+	newChildren := resolver.NewEndpointMap[*balancerWrapper]()
+
+	// Update/Create new children.
+	for _, endpoint := range rotateEndpoints(state.ResolverState.Endpoints) {
+		if _, ok := newChildren.Get(endpoint); ok {
+			// Endpoint child was already created, continue to avoid duplicate
+			// update.
+			continue
+		}
+		childBalancer, ok := children.Get(endpoint)
+		if ok {
+			// Endpoint attributes may have changed, update the stored endpoint.
+			es.mu.Lock()
+			childBalancer.childState.Endpoint = endpoint
+			es.mu.Unlock()
+		} else {
+			childBalancer = &balancerWrapper{
+				childState: ChildState{Endpoint: endpoint},
+				ClientConn: es.cc,
+				es:         es,
+			}
+			childBalancer.childState.Balancer = childBalancer
+			childBalancer.child = es.childBuilder(childBalancer, es.bOpts)
+		}
+		newChildren.Set(endpoint, childBalancer)
+		if err := childBalancer.updateClientConnStateLocked(balancer.ClientConnState{
+			BalancerConfig: state.BalancerConfig,
+			ResolverState: resolver.State{
+				Endpoints:  []resolver.Endpoint{endpoint},
+				Attributes: state.ResolverState.Attributes,
+			},
+		}); err != nil && ret == nil {
+			// Return first error found, and always commit full processing of
+			// updating children. If desired to process more specific errors
+			// across all endpoints, caller should make these specific
+			// validations, this is a current limitation for simplicity sake.
+			ret = err
+		}
+	}
+	// Delete old children that are no longer present.
+	for _, e := range children.Keys() {
+		child, _ := children.Get(e)
+		if _, ok := newChildren.Get(e); !ok {
+			child.closeLocked()
+		}
+	}
+	es.children.Store(newChildren)
+	if newChildren.Len() == 0 {
+		return balancer.ErrBadResolverState
+	}
+	return ret
+}
+
+// ResolverError forwards the resolver error to all of the endpointSharding's
+// children and sends a single synchronous update of the childStates at the end
+// of the ResolverError operation.
+func (es *endpointSharding) ResolverError(err error) {
+	es.childMu.Lock()
+	defer es.childMu.Unlock()
+	es.inhibitChildUpdates.Store(true)
+	defer func() {
+		es.inhibitChildUpdates.Store(false)
+		es.updateState()
+	}()
+	children := es.children.Load()
+	for _, child := range children.Values() {
+		child.resolverErrorLocked(err)
+	}
+}
+
+func (es *endpointSharding) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) {
+	// UpdateSubConnState is deprecated.
+}
+
+func (es *endpointSharding) Close() {
+	es.childMu.Lock()
+	defer es.childMu.Unlock()
+	children := es.children.Load()
+	for _, child := range children.Values() {
+		child.closeLocked()
+	}
+}
+
+func (es *endpointSharding) ExitIdle() {
+	es.childMu.Lock()
+	defer es.childMu.Unlock()
+	for _, bw := range es.children.Load().Values() {
+		if !bw.isClosed {
+			bw.child.ExitIdle()
+		}
+	}
+}
+
+// updateState updates this component's state. It sends the aggregated state,
+// and a picker with round robin behavior with all the child states present if
+// needed.
+func (es *endpointSharding) updateState() {
+	if es.inhibitChildUpdates.Load() {
+		return
+	}
+	var readyPickers, connectingPickers, idlePickers, transientFailurePickers []balancer.Picker
+
+	es.mu.Lock()
+	defer es.mu.Unlock()
+
+	children := es.children.Load()
+	childStates := make([]ChildState, 0, children.Len())
+
+	for _, child := range children.Values() {
+		childState := child.childState
+		childStates = append(childStates, childState)
+		childPicker := childState.State.Picker
+		switch childState.State.ConnectivityState {
+		case connectivity.Ready:
+			readyPickers = append(readyPickers, childPicker)
+		case connectivity.Connecting:
+			connectingPickers = append(connectingPickers, childPicker)
+		case connectivity.Idle:
+			idlePickers = append(idlePickers, childPicker)
+		case connectivity.TransientFailure:
+			transientFailurePickers = append(transientFailurePickers, childPicker)
+			// connectivity.Shutdown shouldn't appear.
+		}
+	}
+
+	// Construct the round robin picker based off the aggregated state. Whatever
+	// the aggregated state, use the pickers present that are currently in that
+	// state only.
+	var aggState connectivity.State
+	var pickers []balancer.Picker
+	if len(readyPickers) >= 1 {
+		aggState = connectivity.Ready
+		pickers = readyPickers
+	} else if len(connectingPickers) >= 1 {
+		aggState = connectivity.Connecting
+		pickers = connectingPickers
+	} else if len(idlePickers) >= 1 {
+		aggState = connectivity.Idle
+		pickers = idlePickers
+	} else if len(transientFailurePickers) >= 1 {
+		aggState = connectivity.TransientFailure
+		pickers = transientFailurePickers
+	} else {
+		aggState = connectivity.TransientFailure
+		pickers = []balancer.Picker{base.NewErrPicker(errors.New("no children to pick from"))}
+	} // No children (resolver error before valid update).
+	p := &pickerWithChildStates{
+		pickers:     pickers,
+		childStates: childStates,
+		next:        uint32(randIntN(len(pickers))),
+	}
+	es.cc.UpdateState(balancer.State{
+		ConnectivityState: aggState,
+		Picker:            p,
+	})
+}
+
+// pickerWithChildStates delegates to the pickers it holds in a round robin
+// fashion. It also contains the childStates of all the endpointSharding's
+// children.
+type pickerWithChildStates struct {
+	pickers     []balancer.Picker
+	childStates []ChildState
+	next        uint32
+}
+
+func (p *pickerWithChildStates) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+	nextIndex := atomic.AddUint32(&p.next, 1)
+	picker := p.pickers[nextIndex%uint32(len(p.pickers))]
+	return picker.Pick(info)
+}
+
+// ChildStatesFromPicker returns the state of all the children managed by the
+// endpoint sharding balancer that created this picker.
+func ChildStatesFromPicker(picker balancer.Picker) []ChildState {
+	p, ok := picker.(*pickerWithChildStates)
+	if !ok {
+		return nil
+	}
+	return p.childStates
+}
+
+// balancerWrapper is a wrapper of a balancer. It ID's a child balancer by
+// endpoint, and persists recent child balancer state.
+type balancerWrapper struct {
+	// The following fields are initialized at build time and read-only after
+	// that and therefore do not need to be guarded by a mutex.
+
+	// child contains the wrapped balancer. Access its methods only through
+	// methods on balancerWrapper to ensure proper synchronization
+	child               balancer.Balancer
+	balancer.ClientConn // embed to intercept UpdateState, doesn't deal with SubConns
+
+	es *endpointSharding
+
+	// Access to the following fields is guarded by es.mu.
+
+	childState ChildState
+	isClosed   bool
+}
+
+func (bw *balancerWrapper) UpdateState(state balancer.State) {
+	bw.es.mu.Lock()
+	bw.childState.State = state
+	bw.es.mu.Unlock()
+	if state.ConnectivityState == connectivity.Idle && !bw.es.esOpts.DisableAutoReconnect {
+		bw.ExitIdle()
+	}
+	bw.es.updateState()
+}
+
+// ExitIdle pings an IDLE child balancer to exit idle in a new goroutine to
+// avoid deadlocks due to synchronous balancer state updates.
+func (bw *balancerWrapper) ExitIdle() {
+	go func() {
+		bw.es.childMu.Lock()
+		if !bw.isClosed {
+			bw.child.ExitIdle()
+		}
+		bw.es.childMu.Unlock()
+	}()
+}
+
+// updateClientConnStateLocked delivers the ClientConnState to the child
+// balancer. Callers must hold the child mutex of the parent endpointsharding
+// balancer.
+func (bw *balancerWrapper) updateClientConnStateLocked(ccs balancer.ClientConnState) error {
+	return bw.child.UpdateClientConnState(ccs)
+}
+
+// closeLocked closes the child balancer. Callers must hold the child mutext of
+// the parent endpointsharding balancer.
+func (bw *balancerWrapper) closeLocked() {
+	bw.child.Close()
+	bw.isClosed = true
+}
+
+func (bw *balancerWrapper) resolverErrorLocked(err error) {
+	bw.child.ResolverError(err)
+}
diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
new file mode 100644
index 0000000..4ecfa1c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
@@ -0,0 +1,51 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package state declares grpclb types to be set by resolvers wishing to pass
+// information to grpclb via resolver.State Attributes.
+package state
+
+import (
+	"google.golang.org/grpc/resolver"
+)
+
+// keyType is the key to use for storing State in Attributes.
+type keyType string
+
+const key = keyType("grpc.grpclb.state")
+
+// State contains gRPCLB-relevant data passed from the name resolver.
+type State struct {
+	// BalancerAddresses contains the remote load balancer address(es).  If
+	// set, overrides any resolver-provided addresses with Type of GRPCLB.
+	BalancerAddresses []resolver.Address
+}
+
+// Set returns a copy of the provided state with attributes containing s.  s's
+// data should not be mutated after calling Set.
+func Set(state resolver.State, s *State) resolver.State {
+	state.Attributes = state.Attributes.WithValue(key, s)
+	return state
+}
+
+// Get returns the grpclb State in the resolver.State, or nil if not present.
+// The returned data should not be mutated.
+func Get(state resolver.State) *State {
+	s, _ := state.Attributes.Value(key).(*State)
+	return s
+}
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go
new file mode 100644
index 0000000..7d66cb4
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains code internal to the pickfirst package.
+package internal
+
+import (
+	rand "math/rand/v2"
+	"time"
+)
+
+var (
+	// RandShuffle pseudo-randomizes the order of addresses.
+	RandShuffle = rand.Shuffle
+	// TimeAfterFunc allows mocking the timer for testing connection delay
+	// related functionality.
+	TimeAfterFunc = func(d time.Duration, f func()) func() {
+		timer := time.AfterFunc(d, f)
+		return func() { timer.Stop() }
+	}
+)
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
new file mode 100644
index 0000000..b15c10e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
@@ -0,0 +1,291 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package pickfirst contains the pick_first load balancing policy.
+package pickfirst
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	rand "math/rand/v2"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/balancer/pickfirst/internal"
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/envconfig"
+	internalgrpclog "google.golang.org/grpc/internal/grpclog"
+	"google.golang.org/grpc/internal/pretty"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/serviceconfig"
+
+	_ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required.
+)
+
+func init() {
+	if envconfig.NewPickFirstEnabled {
+		return
+	}
+	balancer.Register(pickfirstBuilder{})
+}
+
+var logger = grpclog.Component("pick-first-lb")
+
+const (
+	// Name is the name of the pick_first balancer.
+	Name      = "pick_first"
+	logPrefix = "[pick-first-lb %p] "
+)
+
+type pickfirstBuilder struct{}
+
+func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
+	b := &pickfirstBalancer{cc: cc}
+	b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
+	return b
+}
+
+func (pickfirstBuilder) Name() string {
+	return Name
+}
+
+type pfConfig struct {
+	serviceconfig.LoadBalancingConfig `json:"-"`
+
+	// If set to true, instructs the LB policy to shuffle the order of the list
+	// of endpoints received from the name resolver before attempting to
+	// connect to them.
+	ShuffleAddressList bool `json:"shuffleAddressList"`
+}
+
+func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
+	var cfg pfConfig
+	if err := json.Unmarshal(js, &cfg); err != nil {
+		return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
+	}
+	return cfg, nil
+}
+
+type pickfirstBalancer struct {
+	logger  *internalgrpclog.PrefixLogger
+	state   connectivity.State
+	cc      balancer.ClientConn
+	subConn balancer.SubConn
+}
+
+func (b *pickfirstBalancer) ResolverError(err error) {
+	if b.logger.V(2) {
+		b.logger.Infof("Received error from the name resolver: %v", err)
+	}
+	if b.subConn == nil {
+		b.state = connectivity.TransientFailure
+	}
+
+	if b.state != connectivity.TransientFailure {
+		// The picker will not change since the balancer does not currently
+		// report an error.
+		return
+	}
+	b.cc.UpdateState(balancer.State{
+		ConnectivityState: connectivity.TransientFailure,
+		Picker:            &picker{err: fmt.Errorf("name resolver error: %v", err)},
+	})
+}
+
+// Shuffler is an interface for shuffling an address list.
+type Shuffler interface {
+	ShuffleAddressListForTesting(n int, swap func(i, j int))
+}
+
+// ShuffleAddressListForTesting pseudo-randomizes the order of addresses.  n
+// is the number of elements.  swap swaps the elements with indexes i and j.
+func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
+
+func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
+	if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
+		// The resolver reported an empty address list. Treat it like an error by
+		// calling b.ResolverError.
+		if b.subConn != nil {
+			// Shut down the old subConn. All addresses were removed, so it is
+			// no longer valid.
+			b.subConn.Shutdown()
+			b.subConn = nil
+		}
+		b.ResolverError(errors.New("produced zero addresses"))
+		return balancer.ErrBadResolverState
+	}
+	// We don't have to guard this block with the env var because ParseConfig
+	// already does so.
+	cfg, ok := state.BalancerConfig.(pfConfig)
+	if state.BalancerConfig != nil && !ok {
+		return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig)
+	}
+
+	if b.logger.V(2) {
+		b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
+	}
+
+	var addrs []resolver.Address
+	if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
+		// Perform the optional shuffling described in gRFC A62. The shuffling will
+		// change the order of endpoints but not touch the order of the addresses
+		// within each endpoint. - A61
+		if cfg.ShuffleAddressList {
+			endpoints = append([]resolver.Endpoint{}, endpoints...)
+			internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
+		}
+
+		// "Flatten the list by concatenating the ordered list of addresses for each
+		// of the endpoints, in order." - A61
+		for _, endpoint := range endpoints {
+			// "In the flattened list, interleave addresses from the two address
+			// families, as per RFC-8304 section 4." - A61
+			// TODO: support the above language.
+			addrs = append(addrs, endpoint.Addresses...)
+		}
+	} else {
+		// Endpoints not set, process addresses until we migrate resolver
+		// emissions fully to Endpoints. The top channel does wrap emitted
+		// addresses with endpoints, however some balancers such as weighted
+		// target do not forward the corresponding correct endpoints down/split
+		// endpoints properly. Once all balancers correctly forward endpoints
+		// down, can delete this else conditional.
+		addrs = state.ResolverState.Addresses
+		if cfg.ShuffleAddressList {
+			addrs = append([]resolver.Address{}, addrs...)
+			internal.RandShuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
+		}
+	}
+
+	if b.subConn != nil {
+		b.cc.UpdateAddresses(b.subConn, addrs)
+		return nil
+	}
+
+	var subConn balancer.SubConn
+	subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{
+		StateListener: func(state balancer.SubConnState) {
+			b.updateSubConnState(subConn, state)
+		},
+	})
+	if err != nil {
+		if b.logger.V(2) {
+			b.logger.Infof("Failed to create new SubConn: %v", err)
+		}
+		b.state = connectivity.TransientFailure
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: connectivity.TransientFailure,
+			Picker:            &picker{err: fmt.Errorf("error creating connection: %v", err)},
+		})
+		return balancer.ErrBadResolverState
+	}
+	b.subConn = subConn
+	b.state = connectivity.Idle
+	b.cc.UpdateState(balancer.State{
+		ConnectivityState: connectivity.Connecting,
+		Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
+	})
+	b.subConn.Connect()
+	return nil
+}
+
+// UpdateSubConnState is unused as a StateListener is always registered when
+// creating SubConns.
+func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
+	b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
+}
+
+func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
+	if b.logger.V(2) {
+		b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state)
+	}
+	if b.subConn != subConn {
+		if b.logger.V(2) {
+			b.logger.Infof("Ignored state change because subConn is not recognized")
+		}
+		return
+	}
+	if state.ConnectivityState == connectivity.Shutdown {
+		b.subConn = nil
+		return
+	}
+
+	switch state.ConnectivityState {
+	case connectivity.Ready:
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: state.ConnectivityState,
+			Picker:            &picker{result: balancer.PickResult{SubConn: subConn}},
+		})
+	case connectivity.Connecting:
+		if b.state == connectivity.TransientFailure {
+			// We stay in TransientFailure until we are Ready. See A62.
+			return
+		}
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: state.ConnectivityState,
+			Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
+		})
+	case connectivity.Idle:
+		if b.state == connectivity.TransientFailure {
+			// We stay in TransientFailure until we are Ready. Also kick the
+			// subConn out of Idle into Connecting. See A62.
+			b.subConn.Connect()
+			return
+		}
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: state.ConnectivityState,
+			Picker:            &idlePicker{subConn: subConn},
+		})
+	case connectivity.TransientFailure:
+		b.cc.UpdateState(balancer.State{
+			ConnectivityState: state.ConnectivityState,
+			Picker:            &picker{err: state.ConnectionError},
+		})
+	}
+	b.state = state.ConnectivityState
+}
+
+func (b *pickfirstBalancer) Close() {
+}
+
+func (b *pickfirstBalancer) ExitIdle() {
+	if b.subConn != nil && b.state == connectivity.Idle {
+		b.subConn.Connect()
+	}
+}
+
+type picker struct {
+	result balancer.PickResult
+	err    error
+}
+
+func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+	return p.result, p.err
+}
+
+// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
+// CONNECTING when Pick is called.
+type idlePicker struct {
+	subConn balancer.SubConn
+}
+
+func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+	i.subConn.Connect()
+	return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
+}
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
new file mode 100644
index 0000000..9ffdd28
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
@@ -0,0 +1,913 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package pickfirstleaf contains the pick_first load balancing policy which
+// will be the universal leaf policy after dualstack changes are implemented.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package pickfirstleaf
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net"
+	"net/netip"
+	"sync"
+	"time"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/balancer/pickfirst/internal"
+	"google.golang.org/grpc/connectivity"
+	expstats "google.golang.org/grpc/experimental/stats"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/envconfig"
+	internalgrpclog "google.golang.org/grpc/internal/grpclog"
+	"google.golang.org/grpc/internal/pretty"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/serviceconfig"
+)
+
+func init() {
+	if envconfig.NewPickFirstEnabled {
+		// Register as the default pick_first balancer.
+		Name = "pick_first"
+	}
+	balancer.Register(pickfirstBuilder{})
+}
+
+// enableHealthListenerKeyType is a unique key type used in resolver
+// attributes to indicate whether the health listener usage is enabled.
+type enableHealthListenerKeyType struct{}
+
+var (
+	logger = grpclog.Component("pick-first-leaf-lb")
+	// Name is the name of the pick_first_leaf balancer.
+	// It is changed to "pick_first" in init() if this balancer is to be
+	// registered as the default pickfirst.
+	Name                 = "pick_first_leaf"
+	disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+		Name:        "grpc.lb.pick_first.disconnections",
+		Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.",
+		Unit:        "{disconnection}",
+		Labels:      []string{"grpc.target"},
+		Default:     false,
+	})
+	connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+		Name:        "grpc.lb.pick_first.connection_attempts_succeeded",
+		Description: "EXPERIMENTAL. Number of successful connection attempts.",
+		Unit:        "{attempt}",
+		Labels:      []string{"grpc.target"},
+		Default:     false,
+	})
+	connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+		Name:        "grpc.lb.pick_first.connection_attempts_failed",
+		Description: "EXPERIMENTAL. Number of failed connection attempts.",
+		Unit:        "{attempt}",
+		Labels:      []string{"grpc.target"},
+		Default:     false,
+	})
+)
+
+const (
+	// TODO: change to pick-first when this becomes the default pick_first policy.
+	logPrefix = "[pick-first-leaf-lb %p] "
+	// connectionDelayInterval is the time to wait for during the happy eyeballs
+	// pass before starting the next connection attempt.
+	connectionDelayInterval = 250 * time.Millisecond
+)
+
+type ipAddrFamily int
+
+const (
+	// ipAddrFamilyUnknown represents strings that can't be parsed as an IP
+	// address.
+	ipAddrFamilyUnknown ipAddrFamily = iota
+	ipAddrFamilyV4
+	ipAddrFamilyV6
+)
+
+type pickfirstBuilder struct{}
+
+func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer {
+	b := &pickfirstBalancer{
+		cc:              cc,
+		target:          bo.Target.String(),
+		metricsRecorder: cc.MetricsRecorder(),
+
+		subConns:              resolver.NewAddressMapV2[*scData](),
+		state:                 connectivity.Connecting,
+		cancelConnectionTimer: func() {},
+	}
+	b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
+	return b
+}
+
+func (b pickfirstBuilder) Name() string {
+	return Name
+}
+
+func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
+	var cfg pfConfig
+	if err := json.Unmarshal(js, &cfg); err != nil {
+		return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
+	}
+	return cfg, nil
+}
+
+// EnableHealthListener updates the state to configure pickfirst for using a
+// generic health listener.
+func EnableHealthListener(state resolver.State) resolver.State {
+	state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true)
+	return state
+}
+
+type pfConfig struct {
+	serviceconfig.LoadBalancingConfig `json:"-"`
+
+	// If set to true, instructs the LB policy to shuffle the order of the list
+	// of endpoints received from the name resolver before attempting to
+	// connect to them.
+	ShuffleAddressList bool `json:"shuffleAddressList"`
+}
+
+// scData keeps track of the current state of the subConn.
+// It is not safe for concurrent access.
+type scData struct {
+	// The following fields are initialized at build time and read-only after
+	// that.
+	subConn balancer.SubConn
+	addr    resolver.Address
+
+	rawConnectivityState connectivity.State
+	// The effective connectivity state based on raw connectivity, health state
+	// and after following sticky TransientFailure behaviour defined in A62.
+	effectiveState              connectivity.State
+	lastErr                     error
+	connectionFailedInFirstPass bool
+}
+
+func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
+	sd := &scData{
+		rawConnectivityState: connectivity.Idle,
+		effectiveState:       connectivity.Idle,
+		addr:                 addr,
+	}
+	sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{
+		StateListener: func(state balancer.SubConnState) {
+			b.updateSubConnState(sd, state)
+		},
+	})
+	if err != nil {
+		return nil, err
+	}
+	sd.subConn = sc
+	return sd, nil
+}
+
+type pickfirstBalancer struct {
+	// The following fields are initialized at build time and read-only after
+	// that and therefore do not need to be guarded by a mutex.
+	logger          *internalgrpclog.PrefixLogger
+	cc              balancer.ClientConn
+	target          string
+	metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil
+
+	// The mutex is used to ensure synchronization of updates triggered
+	// from the idle picker and the already serialized resolver,
+	// SubConn state updates.
+	mu sync.Mutex
+	// State reported to the channel based on SubConn states and resolver
+	// updates.
+	state connectivity.State
+	// scData for active subonns mapped by address.
+	subConns              *resolver.AddressMapV2[*scData]
+	addressList           addressList
+	firstPass             bool
+	numTF                 int
+	cancelConnectionTimer func()
+	healthCheckingEnabled bool
+}
+
+// ResolverError is called by the ClientConn when the name resolver produces
+// an error or when pickfirst determined the resolver update to be invalid.
+func (b *pickfirstBalancer) ResolverError(err error) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	b.resolverErrorLocked(err)
+}
+
+func (b *pickfirstBalancer) resolverErrorLocked(err error) {
+	if b.logger.V(2) {
+		b.logger.Infof("Received error from the name resolver: %v", err)
+	}
+
+	// The picker will not change since the balancer does not currently
+	// report an error. If the balancer hasn't received a single good resolver
+	// update yet, transition to TRANSIENT_FAILURE.
+	if b.state != connectivity.TransientFailure && b.addressList.size() > 0 {
+		if b.logger.V(2) {
+			b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.")
+		}
+		return
+	}
+
+	b.updateBalancerState(balancer.State{
+		ConnectivityState: connectivity.TransientFailure,
+		Picker:            &picker{err: fmt.Errorf("name resolver error: %v", err)},
+	})
+}
+
+func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	b.cancelConnectionTimer()
+	if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
+		// Cleanup state pertaining to the previous resolver state.
+		// Treat an empty address list like an error by calling b.ResolverError.
+		b.closeSubConnsLocked()
+		b.addressList.updateAddrs(nil)
+		b.resolverErrorLocked(errors.New("produced zero addresses"))
+		return balancer.ErrBadResolverState
+	}
+	b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil
+	cfg, ok := state.BalancerConfig.(pfConfig)
+	if state.BalancerConfig != nil && !ok {
+		return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState)
+	}
+
+	if b.logger.V(2) {
+		b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
+	}
+
+	var newAddrs []resolver.Address
+	if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
+		// Perform the optional shuffling described in gRFC A62. The shuffling
+		// will change the order of endpoints but not touch the order of the
+		// addresses within each endpoint. - A61
+		if cfg.ShuffleAddressList {
+			endpoints = append([]resolver.Endpoint{}, endpoints...)
+			internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
+		}
+
+		// "Flatten the list by concatenating the ordered list of addresses for
+		// each of the endpoints, in order." - A61
+		for _, endpoint := range endpoints {
+			newAddrs = append(newAddrs, endpoint.Addresses...)
+		}
+	} else {
+		// Endpoints not set, process addresses until we migrate resolver
+		// emissions fully to Endpoints. The top channel does wrap emitted
+		// addresses with endpoints, however some balancers such as weighted
+		// target do not forward the corresponding correct endpoints down/split
+		// endpoints properly. Once all balancers correctly forward endpoints
+		// down, can delete this else conditional.
+		newAddrs = state.ResolverState.Addresses
+		if cfg.ShuffleAddressList {
+			newAddrs = append([]resolver.Address{}, newAddrs...)
+			internal.RandShuffle(len(newAddrs), func(i, j int) { newAddrs[i], newAddrs[j] = newAddrs[j], newAddrs[i] })
+		}
+	}
+
+	// If an address appears in multiple endpoints or in the same endpoint
+	// multiple times, we keep it only once. We will create only one SubConn
+	// for the address because an AddressMap is used to store SubConns.
+	// Not de-duplicating would result in attempting to connect to the same
+	// SubConn multiple times in the same pass. We don't want this.
+	newAddrs = deDupAddresses(newAddrs)
+	newAddrs = interleaveAddresses(newAddrs)
+
+	prevAddr := b.addressList.currentAddress()
+	prevSCData, found := b.subConns.Get(prevAddr)
+	prevAddrsCount := b.addressList.size()
+	isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready
+	b.addressList.updateAddrs(newAddrs)
+
+	// If the previous ready SubConn exists in new address list,
+	// keep this connection and don't create new SubConns.
+	if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) {
+		return nil
+	}
+
+	b.reconcileSubConnsLocked(newAddrs)
+	// If it's the first resolver update or the balancer was already READY
+	// (but the new address list does not contain the ready SubConn) or
+	// CONNECTING, enter CONNECTING.
+	// We may be in TRANSIENT_FAILURE due to a previous empty address list,
+	// we should still enter CONNECTING because the sticky TF behaviour
+	//  mentioned in A62 applies only when the TRANSIENT_FAILURE is reported
+	// due to connectivity failures.
+	if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 {
+		// Start connection attempt at first address.
+		b.forceUpdateConcludedStateLocked(balancer.State{
+			ConnectivityState: connectivity.Connecting,
+			Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
+		})
+		b.startFirstPassLocked()
+	} else if b.state == connectivity.TransientFailure {
+		// If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until
+		// we're READY. See A62.
+		b.startFirstPassLocked()
+	}
+	return nil
+}
+
+// UpdateSubConnState is unused as a StateListener is always registered when
+// creating SubConns.
+func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
+	b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
+}
+
+func (b *pickfirstBalancer) Close() {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	b.closeSubConnsLocked()
+	b.cancelConnectionTimer()
+	b.state = connectivity.Shutdown
+}
+
+// ExitIdle moves the balancer out of idle state. It can be called concurrently
+// by the idlePicker and clientConn so access to variables should be
+// synchronized.
+func (b *pickfirstBalancer) ExitIdle() {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	if b.state == connectivity.Idle {
+		// Move the balancer into CONNECTING state immediately. This is done to
+		// avoid staying in IDLE if a resolver update arrives before the first
+		// SubConn reports CONNECTING.
+		b.updateBalancerState(balancer.State{
+			ConnectivityState: connectivity.Connecting,
+			Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
+		})
+		b.startFirstPassLocked()
+	}
+}
+
+func (b *pickfirstBalancer) startFirstPassLocked() {
+	b.firstPass = true
+	b.numTF = 0
+	// Reset the connection attempt record for existing SubConns.
+	for _, sd := range b.subConns.Values() {
+		sd.connectionFailedInFirstPass = false
+	}
+	b.requestConnectionLocked()
+}
+
+func (b *pickfirstBalancer) closeSubConnsLocked() {
+	for _, sd := range b.subConns.Values() {
+		sd.subConn.Shutdown()
+	}
+	b.subConns = resolver.NewAddressMapV2[*scData]()
+}
+
+// deDupAddresses ensures that each address appears only once in the slice.
+func deDupAddresses(addrs []resolver.Address) []resolver.Address {
+	seenAddrs := resolver.NewAddressMapV2[*scData]()
+	retAddrs := []resolver.Address{}
+
+	for _, addr := range addrs {
+		if _, ok := seenAddrs.Get(addr); ok {
+			continue
+		}
+		retAddrs = append(retAddrs, addr)
+	}
+	return retAddrs
+}
+
+// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6)
+// as per RFC-8305 section 4.
+// Whichever address family is first in the list is followed by an address of
+// the other address family; that is, if the first address in the list is IPv6,
+// then the first IPv4 address should be moved up in the list to be second in
+// the list. It doesn't support configuring "First Address Family Count", i.e.
+// there will always be a single member of the first address family at the
+// beginning of the interleaved list.
+// Addresses that are neither IPv4 nor IPv6 are treated as part of a third
+// "unknown" family for interleaving.
+// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6
+func interleaveAddresses(addrs []resolver.Address) []resolver.Address {
+	familyAddrsMap := map[ipAddrFamily][]resolver.Address{}
+	interleavingOrder := []ipAddrFamily{}
+	for _, addr := range addrs {
+		family := addressFamily(addr.Addr)
+		if _, found := familyAddrsMap[family]; !found {
+			interleavingOrder = append(interleavingOrder, family)
+		}
+		familyAddrsMap[family] = append(familyAddrsMap[family], addr)
+	}
+
+	interleavedAddrs := make([]resolver.Address, 0, len(addrs))
+
+	for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) {
+		// Some IP types may have fewer addresses than others, so we look for
+		// the next type that has a remaining member to add to the interleaved
+		// list.
+		family := interleavingOrder[curFamilyIdx]
+		remainingMembers := familyAddrsMap[family]
+		if len(remainingMembers) > 0 {
+			interleavedAddrs = append(interleavedAddrs, remainingMembers[0])
+			familyAddrsMap[family] = remainingMembers[1:]
+		}
+	}
+
+	return interleavedAddrs
+}
+
+// addressFamily returns the ipAddrFamily after parsing the address string.
+// If the address isn't of the format "ip-address:port", it returns
+// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when
+// using a resolver like passthrough where the address may be a hostname in
+// some format that the dialer can resolve.
+func addressFamily(address string) ipAddrFamily {
+	// Parse the IP after removing the port.
+	host, _, err := net.SplitHostPort(address)
+	if err != nil {
+		return ipAddrFamilyUnknown
+	}
+	ip, err := netip.ParseAddr(host)
+	if err != nil {
+		return ipAddrFamilyUnknown
+	}
+	switch {
+	case ip.Is4() || ip.Is4In6():
+		return ipAddrFamilyV4
+	case ip.Is6():
+		return ipAddrFamilyV6
+	default:
+		return ipAddrFamilyUnknown
+	}
+}
+
+// reconcileSubConnsLocked updates the active subchannels based on a new address
+// list from the resolver. It does this by:
+//   - closing subchannels: any existing subchannels associated with addresses
+//     that are no longer in the updated list are shut down.
+//   - removing subchannels: entries for these closed subchannels are removed
+//     from the subchannel map.
+//
+// This ensures that the subchannel map accurately reflects the current set of
+// addresses received from the name resolver.
+func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
+	newAddrsMap := resolver.NewAddressMapV2[bool]()
+	for _, addr := range newAddrs {
+		newAddrsMap.Set(addr, true)
+	}
+
+	for _, oldAddr := range b.subConns.Keys() {
+		if _, ok := newAddrsMap.Get(oldAddr); ok {
+			continue
+		}
+		val, _ := b.subConns.Get(oldAddr)
+		val.subConn.Shutdown()
+		b.subConns.Delete(oldAddr)
+	}
+}
+
+// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn
+// becomes ready, which means that all other subConn must be shutdown.
+func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
+	b.cancelConnectionTimer()
+	for _, sd := range b.subConns.Values() {
+		if sd.subConn != selected.subConn {
+			sd.subConn.Shutdown()
+		}
+	}
+	b.subConns = resolver.NewAddressMapV2[*scData]()
+	b.subConns.Set(selected.addr, selected)
+}
+
+// requestConnectionLocked starts connecting on the subchannel corresponding to
+// the current address. If no subchannel exists, one is created. If the current
+// subchannel is in TransientFailure, a connection to the next address is
+// attempted until a subchannel is found.
+func (b *pickfirstBalancer) requestConnectionLocked() {
+	if !b.addressList.isValid() {
+		return
+	}
+	var lastErr error
+	for valid := true; valid; valid = b.addressList.increment() {
+		curAddr := b.addressList.currentAddress()
+		sd, ok := b.subConns.Get(curAddr)
+		if !ok {
+			var err error
+			// We want to assign the new scData to sd from the outer scope,
+			// hence we can't use := below.
+			sd, err = b.newSCData(curAddr)
+			if err != nil {
+				// This should never happen, unless the clientConn is being shut
+				// down.
+				if b.logger.V(2) {
+					b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err)
+				}
+				// Do nothing, the LB policy will be closed soon.
+				return
+			}
+			b.subConns.Set(curAddr, sd)
+		}
+
+		switch sd.rawConnectivityState {
+		case connectivity.Idle:
+			sd.subConn.Connect()
+			b.scheduleNextConnectionLocked()
+			return
+		case connectivity.TransientFailure:
+			// The SubConn is being re-used and failed during a previous pass
+			// over the addressList. It has not completed backoff yet.
+			// Mark it as having failed and try the next address.
+			sd.connectionFailedInFirstPass = true
+			lastErr = sd.lastErr
+			continue
+		case connectivity.Connecting:
+			// Wait for the connection attempt to complete or the timer to fire
+			// before attempting the next address.
+			b.scheduleNextConnectionLocked()
+			return
+		default:
+			b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState)
+			return
+
+		}
+	}
+
+	// All the remaining addresses in the list are in TRANSIENT_FAILURE, end the
+	// first pass if possible.
+	b.endFirstPassIfPossibleLocked(lastErr)
+}
+
+func (b *pickfirstBalancer) scheduleNextConnectionLocked() {
+	b.cancelConnectionTimer()
+	if !b.addressList.hasNext() {
+		return
+	}
+	curAddr := b.addressList.currentAddress()
+	cancelled := false // Access to this is protected by the balancer's mutex.
+	closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() {
+		b.mu.Lock()
+		defer b.mu.Unlock()
+		// If the scheduled task is cancelled while acquiring the mutex, return.
+		if cancelled {
+			return
+		}
+		if b.logger.V(2) {
+			b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr)
+		}
+		if b.addressList.increment() {
+			b.requestConnectionLocked()
+		}
+	})
+	// Access to the cancellation callback held by the balancer is guarded by
+	// the balancer's mutex, so it's safe to set the boolean from the callback.
+	b.cancelConnectionTimer = sync.OnceFunc(func() {
+		cancelled = true
+		closeFn()
+	})
+}
+
+func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	oldState := sd.rawConnectivityState
+	sd.rawConnectivityState = newState.ConnectivityState
+	// Previously relevant SubConns can still callback with state updates.
+	// To prevent pickers from returning these obsolete SubConns, this logic
+	// is included to check if the current list of active SubConns includes this
+	// SubConn.
+	if !b.isActiveSCData(sd) {
+		return
+	}
+	if newState.ConnectivityState == connectivity.Shutdown {
+		sd.effectiveState = connectivity.Shutdown
+		return
+	}
+
+	// Record a connection attempt when exiting CONNECTING.
+	if newState.ConnectivityState == connectivity.TransientFailure {
+		sd.connectionFailedInFirstPass = true
+		connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target)
+	}
+
+	if newState.ConnectivityState == connectivity.Ready {
+		connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
+		b.shutdownRemainingLocked(sd)
+		if !b.addressList.seekTo(sd.addr) {
+			// This should not fail as we should have only one SubConn after
+			// entering READY. The SubConn should be present in the addressList.
+			b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses)
+			return
+		}
+		if !b.healthCheckingEnabled {
+			if b.logger.V(2) {
+				b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn)
+			}
+
+			sd.effectiveState = connectivity.Ready
+			b.updateBalancerState(balancer.State{
+				ConnectivityState: connectivity.Ready,
+				Picker:            &picker{result: balancer.PickResult{SubConn: sd.subConn}},
+			})
+			return
+		}
+		if b.logger.V(2) {
+			b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn)
+		}
+		// Send a CONNECTING update to take the SubConn out of sticky-TF if
+		// required.
+		sd.effectiveState = connectivity.Connecting
+		b.updateBalancerState(balancer.State{
+			ConnectivityState: connectivity.Connecting,
+			Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
+		})
+		sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) {
+			b.updateSubConnHealthState(sd, scs)
+		})
+		return
+	}
+
+	// If the LB policy is READY, and it receives a subchannel state change,
+	// it means that the READY subchannel has failed.
+	// A SubConn can also transition from CONNECTING directly to IDLE when
+	// a transport is successfully created, but the connection fails
+	// before the SubConn can send the notification for READY. We treat
+	// this as a successful connection and transition to IDLE.
+	// TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second
+	// part of the if condition below once the issue is fixed.
+	if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) {
+		// Once a transport fails, the balancer enters IDLE and starts from
+		// the first address when the picker is used.
+		b.shutdownRemainingLocked(sd)
+		sd.effectiveState = newState.ConnectivityState
+		// READY SubConn interspliced in between CONNECTING and IDLE, need to
+		// account for that.
+		if oldState == connectivity.Connecting {
+			// A known issue (https://github.com/grpc/grpc-go/issues/7862)
+			// causes a race that prevents the READY state change notification.
+			// This works around it.
+			connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
+		}
+		disconnectionsMetric.Record(b.metricsRecorder, 1, b.target)
+		b.addressList.reset()
+		b.updateBalancerState(balancer.State{
+			ConnectivityState: connectivity.Idle,
+			Picker:            &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)},
+		})
+		return
+	}
+
+	if b.firstPass {
+		switch newState.ConnectivityState {
+		case connectivity.Connecting:
+			// The effective state can be in either IDLE, CONNECTING or
+			// TRANSIENT_FAILURE. If it's  TRANSIENT_FAILURE, stay in
+			// TRANSIENT_FAILURE until it's READY. See A62.
+			if sd.effectiveState != connectivity.TransientFailure {
+				sd.effectiveState = connectivity.Connecting
+				b.updateBalancerState(balancer.State{
+					ConnectivityState: connectivity.Connecting,
+					Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
+				})
+			}
+		case connectivity.TransientFailure:
+			sd.lastErr = newState.ConnectionError
+			sd.effectiveState = connectivity.TransientFailure
+			// Since we're re-using common SubConns while handling resolver
+			// updates, we could receive an out of turn TRANSIENT_FAILURE from
+			// a pass over the previous address list. Happy Eyeballs will also
+			// cause out of order updates to arrive.
+
+			if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) {
+				b.cancelConnectionTimer()
+				if b.addressList.increment() {
+					b.requestConnectionLocked()
+					return
+				}
+			}
+
+			// End the first pass if we've seen a TRANSIENT_FAILURE from all
+			// SubConns once.
+			b.endFirstPassIfPossibleLocked(newState.ConnectionError)
+		}
+		return
+	}
+
+	// We have finished the first pass, keep re-connecting failing SubConns.
+	switch newState.ConnectivityState {
+	case connectivity.TransientFailure:
+		b.numTF = (b.numTF + 1) % b.subConns.Len()
+		sd.lastErr = newState.ConnectionError
+		if b.numTF%b.subConns.Len() == 0 {
+			b.updateBalancerState(balancer.State{
+				ConnectivityState: connectivity.TransientFailure,
+				Picker:            &picker{err: newState.ConnectionError},
+			})
+		}
+		// We don't need to request re-resolution since the SubConn already
+		// does that before reporting TRANSIENT_FAILURE.
+		// TODO: #7534 - Move re-resolution requests from SubConn into
+		// pick_first.
+	case connectivity.Idle:
+		sd.subConn.Connect()
+	}
+}
+
+// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the
+// addresses are tried and their SubConns have reported a failure.
+func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
+	// An optimization to avoid iterating over the entire SubConn map.
+	if b.addressList.isValid() {
+		return
+	}
+	// Connect() has been called on all the SubConns. The first pass can be
+	// ended if all the SubConns have reported a failure.
+	for _, sd := range b.subConns.Values() {
+		if !sd.connectionFailedInFirstPass {
+			return
+		}
+	}
+	b.firstPass = false
+	b.updateBalancerState(balancer.State{
+		ConnectivityState: connectivity.TransientFailure,
+		Picker:            &picker{err: lastErr},
+	})
+	// Start re-connecting all the SubConns that are already in IDLE.
+	for _, sd := range b.subConns.Values() {
+		if sd.rawConnectivityState == connectivity.Idle {
+			sd.subConn.Connect()
+		}
+	}
+}
+
+func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool {
+	activeSD, found := b.subConns.Get(sd.addr)
+	return found && activeSD == sd
+}
+
+func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	// Previously relevant SubConns can still callback with state updates.
+	// To prevent pickers from returning these obsolete SubConns, this logic
+	// is included to check if the current list of active SubConns includes
+	// this SubConn.
+	if !b.isActiveSCData(sd) {
+		return
+	}
+	sd.effectiveState = state.ConnectivityState
+	switch state.ConnectivityState {
+	case connectivity.Ready:
+		b.updateBalancerState(balancer.State{
+			ConnectivityState: connectivity.Ready,
+			Picker:            &picker{result: balancer.PickResult{SubConn: sd.subConn}},
+		})
+	case connectivity.TransientFailure:
+		b.updateBalancerState(balancer.State{
+			ConnectivityState: connectivity.TransientFailure,
+			Picker:            &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)},
+		})
+	case connectivity.Connecting:
+		b.updateBalancerState(balancer.State{
+			ConnectivityState: connectivity.Connecting,
+			Picker:            &picker{err: balancer.ErrNoSubConnAvailable},
+		})
+	default:
+		b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state)
+	}
+}
+
+// updateBalancerState stores the state reported to the channel and calls
+// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate
+// updates to the channel.
+func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) {
+	// In case of TransientFailures allow the picker to be updated to update
+	// the connectivity error, in all other cases don't send duplicate state
+	// updates.
+	if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure {
+		return
+	}
+	b.forceUpdateConcludedStateLocked(newState)
+}
+
+// forceUpdateConcludedStateLocked stores the state reported to the channel and
+// calls ClientConn.UpdateState().
+// A separate function is defined to force update the ClientConn state since the
+// channel doesn't correctly assume that LB policies start in CONNECTING and
+// relies on LB policy to send an initial CONNECTING update.
+func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) {
+	b.state = newState.ConnectivityState
+	b.cc.UpdateState(newState)
+}
+
+type picker struct {
+	result balancer.PickResult
+	err    error
+}
+
+func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+	return p.result, p.err
+}
+
+// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
+// CONNECTING when Pick is called.
+type idlePicker struct {
+	exitIdle func()
+}
+
+func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+	i.exitIdle()
+	return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
+}
+
+// addressList manages sequentially iterating over addresses present in a list
+// of endpoints. It provides a 1 dimensional view of the addresses present in
+// the endpoints.
+// This type is not safe for concurrent access.
+type addressList struct {
+	addresses []resolver.Address
+	idx       int
+}
+
+func (al *addressList) isValid() bool {
+	return al.idx < len(al.addresses)
+}
+
+func (al *addressList) size() int {
+	return len(al.addresses)
+}
+
+// increment moves to the next index in the address list.
+// This method returns false if it went off the list, true otherwise.
+func (al *addressList) increment() bool {
+	if !al.isValid() {
+		return false
+	}
+	al.idx++
+	return al.idx < len(al.addresses)
+}
+
+// currentAddress returns the current address pointed to in the addressList.
+// If the list is in an invalid state, it returns an empty address instead.
+func (al *addressList) currentAddress() resolver.Address {
+	if !al.isValid() {
+		return resolver.Address{}
+	}
+	return al.addresses[al.idx]
+}
+
+func (al *addressList) reset() {
+	al.idx = 0
+}
+
+func (al *addressList) updateAddrs(addrs []resolver.Address) {
+	al.addresses = addrs
+	al.reset()
+}
+
+// seekTo returns false if the needle was not found and the current index was
+// left unchanged.
+func (al *addressList) seekTo(needle resolver.Address) bool {
+	for ai, addr := range al.addresses {
+		if !equalAddressIgnoringBalAttributes(&addr, &needle) {
+			continue
+		}
+		al.idx = ai
+		return true
+	}
+	return false
+}
+
+// hasNext returns whether incrementing the addressList will result in moving
+// past the end of the list. If the list has already moved past the end, it
+// returns false.
+func (al *addressList) hasNext() bool {
+	if !al.isValid() {
+		return false
+	}
+	return al.idx+1 < len(al.addresses)
+}
+
+// equalAddressIgnoringBalAttributes returns true is a and b are considered
+// equal. This is different from the Equal method on the resolver.Address type
+// which considers all fields to determine equality. Here, we only consider
+// fields that are meaningful to the SubConn.
+func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
+	return a.Addr == b.Addr && a.ServerName == b.ServerName &&
+		a.Attributes.Equal(b.Attributes)
+}
diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
index 29f7a4d..22045bf 100644
--- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
+++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
@@ -22,62 +22,51 @@
 package roundrobin
 
 import (
-	"context"
-	"sync"
+	"fmt"
 
 	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/balancer/base"
+	"google.golang.org/grpc/balancer/endpointsharding"
+	"google.golang.org/grpc/balancer/pickfirst/pickfirstleaf"
 	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/internal/grpcrand"
-	"google.golang.org/grpc/resolver"
+	internalgrpclog "google.golang.org/grpc/internal/grpclog"
 )
 
 // Name is the name of round_robin balancer.
 const Name = "round_robin"
 
-// newBuilder creates a new roundrobin balancer builder.
-func newBuilder() balancer.Builder {
-	return base.NewBalancerBuilderWithConfig(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
-}
+var logger = grpclog.Component("roundrobin")
 
 func init() {
-	balancer.Register(newBuilder())
+	balancer.Register(builder{})
 }
 
-type rrPickerBuilder struct{}
+type builder struct{}
 
-func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker {
-	grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs)
-	if len(readySCs) == 0 {
-		return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
-	}
-	var scs []balancer.SubConn
-	for _, sc := range readySCs {
-		scs = append(scs, sc)
-	}
-	return &rrPicker{
-		subConns: scs,
-		// Start at a random index, as the same RR balancer rebuilds a new
-		// picker when SubConn states change, and we don't want to apply excess
-		// load to the first server in the list.
-		next: grpcrand.Intn(len(scs)),
-	}
+func (bb builder) Name() string {
+	return Name
 }
 
-type rrPicker struct {
-	// subConns is the snapshot of the roundrobin balancer when this picker was
-	// created. The slice is immutable. Each Get() will do a round robin
-	// selection from it and return the selected SubConn.
-	subConns []balancer.SubConn
-
-	mu   sync.Mutex
-	next int
+func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
+	childBuilder := balancer.Get(pickfirstleaf.Name).Build
+	bal := &rrBalancer{
+		cc:       cc,
+		Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}),
+	}
+	bal.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[%p] ", bal))
+	bal.logger.Infof("Created")
+	return bal
 }
 
-func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
-	p.mu.Lock()
-	sc := p.subConns[p.next]
-	p.next = (p.next + 1) % len(p.subConns)
-	p.mu.Unlock()
-	return sc, nil, nil
+type rrBalancer struct {
+	balancer.Balancer
+	cc     balancer.ClientConn
+	logger *internalgrpclog.PrefixLogger
+}
+
+func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error {
+	return b.Balancer.UpdateClientConnState(balancer.ClientConnState{
+		// Enable the health listener in pickfirst children for client side health
+		// checks and outlier detection, if configured.
+		ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState),
+	})
 }
diff --git a/vendor/google.golang.org/grpc/balancer/subconn.go b/vendor/google.golang.org/grpc/balancer/subconn.go
new file mode 100644
index 0000000..9ee44d4
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/subconn.go
@@ -0,0 +1,134 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package balancer
+
+import (
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/resolver"
+)
+
+// A SubConn represents a single connection to a gRPC backend service.
+//
+// All SubConns start in IDLE, and will not try to connect. To trigger a
+// connection attempt, Balancers must call Connect.
+//
+// If the connection attempt fails, the SubConn will transition to
+// TRANSIENT_FAILURE for a backoff period, and then return to IDLE.  If the
+// connection attempt succeeds, it will transition to READY.
+//
+// If a READY SubConn becomes disconnected, the SubConn will transition to IDLE.
+//
+// If a connection re-enters IDLE, Balancers must call Connect again to trigger
+// a new connection attempt.
+//
+// Each SubConn contains a list of addresses.  gRPC will try to connect to the
+// addresses in sequence, and stop trying the remainder once the first
+// connection is successful.  However, this behavior is deprecated.  SubConns
+// should only use a single address.
+//
+// NOTICE: This interface is intended to be implemented by gRPC, or intercepted
+// by custom load balancing polices.  Users should not need their own complete
+// implementation of this interface -- they should always delegate to a SubConn
+// returned by ClientConn.NewSubConn() by embedding it in their implementations.
+// An embedded SubConn must never be nil, or runtime panics will occur.
+type SubConn interface {
+	// UpdateAddresses updates the addresses used in this SubConn.
+	// gRPC checks if currently-connected address is still in the new list.
+	// If it's in the list, the connection will be kept.
+	// If it's not in the list, the connection will gracefully close, and
+	// a new connection will be created.
+	//
+	// This will trigger a state transition for the SubConn.
+	//
+	// Deprecated: this method will be removed.  Create new SubConns for new
+	// addresses instead.
+	UpdateAddresses([]resolver.Address)
+	// Connect starts the connecting for this SubConn.
+	Connect()
+	// GetOrBuildProducer returns a reference to the existing Producer for this
+	// ProducerBuilder in this SubConn, or, if one does not currently exist,
+	// creates a new one and returns it.  Returns a close function which may be
+	// called when the Producer is no longer needed.  Otherwise the producer
+	// will automatically be closed upon connection loss or subchannel close.
+	// Should only be called on a SubConn in state Ready.  Otherwise the
+	// producer will be unable to create streams.
+	GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
+	// Shutdown shuts down the SubConn gracefully.  Any started RPCs will be
+	// allowed to complete.  No future calls should be made on the SubConn.
+	// One final state update will be delivered to the StateListener (or
+	// UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to
+	// indicate the shutdown operation.  This may be delivered before
+	// in-progress RPCs are complete and the actual connection is closed.
+	Shutdown()
+	// RegisterHealthListener registers a health listener that receives health
+	// updates for a Ready SubConn. Only one health listener can be registered
+	// at a time. A health listener should be registered each time the SubConn's
+	// connectivity state changes to READY. Registering a health listener when
+	// the connectivity state is not READY may result in undefined behaviour.
+	// This method must not be called synchronously while handling an update
+	// from a previously registered health listener.
+	RegisterHealthListener(func(SubConnState))
+	// EnforceSubConnEmbedding is included to force implementers to embed
+	// another implementation of this interface, allowing gRPC to add methods
+	// without breaking users.
+	internal.EnforceSubConnEmbedding
+}
+
+// A ProducerBuilder is a simple constructor for a Producer.  It is used by the
+// SubConn to create producers when needed.
+type ProducerBuilder interface {
+	// Build creates a Producer.  The first parameter is always a
+	// grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
+	// associated SubConn), but is declared as `any` to avoid a dependency
+	// cycle.  Build also returns a close function that will be called when all
+	// references to the Producer have been given up for a SubConn, or when a
+	// connectivity state change occurs on the SubConn.  The close function
+	// should always block until all asynchronous cleanup work is completed.
+	Build(grpcClientConnInterface any) (p Producer, close func())
+}
+
+// SubConnState describes the state of a SubConn.
+type SubConnState struct {
+	// ConnectivityState is the connectivity state of the SubConn.
+	ConnectivityState connectivity.State
+	// ConnectionError is set if the ConnectivityState is TransientFailure,
+	// describing the reason the SubConn failed.  Otherwise, it is nil.
+	ConnectionError error
+	// connectedAddr contains the connected address when ConnectivityState is
+	// Ready. Otherwise, it is indeterminate.
+	connectedAddress resolver.Address
+}
+
+// connectedAddress returns the connected address for a SubConnState. The
+// address is only valid if the state is READY.
+func connectedAddress(scs SubConnState) resolver.Address {
+	return scs.connectedAddress
+}
+
+// setConnectedAddress sets the connected address for a SubConnState.
+func setConnectedAddress(scs *SubConnState, addr resolver.Address) {
+	scs.connectedAddress = addr
+}
+
+// A Producer is a type shared among potentially many consumers.  It is
+// associated with a SubConn, and an implementation will typically contain
+// other methods to provide additional functionality, e.g. configuration or
+// subscription registration.
+type Producer any
diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
deleted file mode 100644
index 5356194..0000000
--- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
-	"fmt"
-	"sync"
-
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/connectivity"
-	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/internal/buffer"
-	"google.golang.org/grpc/internal/grpcsync"
-	"google.golang.org/grpc/resolver"
-)
-
-// scStateUpdate contains the subConn and the new state it changed to.
-type scStateUpdate struct {
-	sc    balancer.SubConn
-	state connectivity.State
-}
-
-// ccBalancerWrapper is a wrapper on top of cc for balancers.
-// It implements balancer.ClientConn interface.
-type ccBalancerWrapper struct {
-	cc         *ClientConn
-	balancerMu sync.Mutex // synchronizes calls to the balancer
-	balancer   balancer.Balancer
-	scBuffer   *buffer.Unbounded
-	done       *grpcsync.Event
-
-	mu       sync.Mutex
-	subConns map[*acBalancerWrapper]struct{}
-}
-
-func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
-	ccb := &ccBalancerWrapper{
-		cc:       cc,
-		scBuffer: buffer.NewUnbounded(),
-		done:     grpcsync.NewEvent(),
-		subConns: make(map[*acBalancerWrapper]struct{}),
-	}
-	go ccb.watcher()
-	ccb.balancer = b.Build(ccb, bopts)
-	return ccb
-}
-
-// watcher balancer functions sequentially, so the balancer can be implemented
-// lock-free.
-func (ccb *ccBalancerWrapper) watcher() {
-	for {
-		select {
-		case t := <-ccb.scBuffer.Get():
-			ccb.scBuffer.Load()
-			if ccb.done.HasFired() {
-				break
-			}
-			ccb.balancerMu.Lock()
-			su := t.(*scStateUpdate)
-			if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
-				ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state})
-			} else {
-				ccb.balancer.HandleSubConnStateChange(su.sc, su.state)
-			}
-			ccb.balancerMu.Unlock()
-		case <-ccb.done.Done():
-		}
-
-		if ccb.done.HasFired() {
-			ccb.balancer.Close()
-			ccb.mu.Lock()
-			scs := ccb.subConns
-			ccb.subConns = nil
-			ccb.mu.Unlock()
-			for acbw := range scs {
-				ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
-			}
-			ccb.UpdateBalancerState(connectivity.Connecting, nil)
-			return
-		}
-	}
-}
-
-func (ccb *ccBalancerWrapper) close() {
-	ccb.done.Fire()
-}
-
-func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
-	// When updating addresses for a SubConn, if the address in use is not in
-	// the new addresses, the old ac will be tearDown() and a new ac will be
-	// created. tearDown() generates a state change with Shutdown state, we
-	// don't want the balancer to receive this state change. So before
-	// tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and
-	// this function will be called with (nil, Shutdown). We don't need to call
-	// balancer method in this case.
-	if sc == nil {
-		return
-	}
-	ccb.scBuffer.Put(&scStateUpdate{
-		sc:    sc,
-		state: s,
-	})
-}
-
-func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
-	ccb.balancerMu.Lock()
-	defer ccb.balancerMu.Unlock()
-	if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
-		return ub.UpdateClientConnState(*ccs)
-	}
-	ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil)
-	return nil
-}
-
-func (ccb *ccBalancerWrapper) resolverError(err error) {
-	if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
-		ccb.balancerMu.Lock()
-		ub.ResolverError(err)
-		ccb.balancerMu.Unlock()
-	}
-}
-
-func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
-	if len(addrs) <= 0 {
-		return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
-	}
-	ccb.mu.Lock()
-	defer ccb.mu.Unlock()
-	if ccb.subConns == nil {
-		return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
-	}
-	ac, err := ccb.cc.newAddrConn(addrs, opts)
-	if err != nil {
-		return nil, err
-	}
-	acbw := &acBalancerWrapper{ac: ac}
-	acbw.ac.mu.Lock()
-	ac.acbw = acbw
-	acbw.ac.mu.Unlock()
-	ccb.subConns[acbw] = struct{}{}
-	return acbw, nil
-}
-
-func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
-	acbw, ok := sc.(*acBalancerWrapper)
-	if !ok {
-		return
-	}
-	ccb.mu.Lock()
-	defer ccb.mu.Unlock()
-	if ccb.subConns == nil {
-		return
-	}
-	delete(ccb.subConns, acbw)
-	ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
-}
-
-func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
-	ccb.mu.Lock()
-	defer ccb.mu.Unlock()
-	if ccb.subConns == nil {
-		return
-	}
-	// Update picker before updating state.  Even though the ordering here does
-	// not matter, it can lead to multiple calls of Pick in the common start-up
-	// case where we wait for ready and then perform an RPC.  If the picker is
-	// updated later, we could call the "connecting" picker when the state is
-	// updated, and then call the "ready" picker after the picker gets updated.
-	ccb.cc.blockingpicker.updatePicker(p)
-	ccb.cc.csMgr.updateState(s)
-}
-
-func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) {
-	ccb.cc.resolveNow(o)
-}
-
-func (ccb *ccBalancerWrapper) Target() string {
-	return ccb.cc.target
-}
-
-// acBalancerWrapper is a wrapper on top of ac for balancers.
-// It implements balancer.SubConn interface.
-type acBalancerWrapper struct {
-	mu sync.Mutex
-	ac *addrConn
-}
-
-func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
-	acbw.mu.Lock()
-	defer acbw.mu.Unlock()
-	if len(addrs) <= 0 {
-		acbw.ac.tearDown(errConnDrain)
-		return
-	}
-	if !acbw.ac.tryUpdateAddrs(addrs) {
-		cc := acbw.ac.cc
-		opts := acbw.ac.scopts
-		acbw.ac.mu.Lock()
-		// Set old ac.acbw to nil so the Shutdown state update will be ignored
-		// by balancer.
-		//
-		// TODO(bar) the state transition could be wrong when tearDown() old ac
-		// and creating new ac, fix the transition.
-		acbw.ac.acbw = nil
-		acbw.ac.mu.Unlock()
-		acState := acbw.ac.getState()
-		acbw.ac.tearDown(errConnDrain)
-
-		if acState == connectivity.Shutdown {
-			return
-		}
-
-		ac, err := cc.newAddrConn(addrs, opts)
-		if err != nil {
-			grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
-			return
-		}
-		acbw.ac = ac
-		ac.mu.Lock()
-		ac.acbw = acbw
-		ac.mu.Unlock()
-		if acState != connectivity.Idle {
-			ac.connect()
-		}
-	}
-}
-
-func (acbw *acBalancerWrapper) Connect() {
-	acbw.mu.Lock()
-	defer acbw.mu.Unlock()
-	acbw.ac.connect()
-}
-
-func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
-	acbw.mu.Lock()
-	defer acbw.mu.Unlock()
-	return acbw.ac
-}
diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
deleted file mode 100644
index 66e9a44..0000000
--- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
-	"context"
-	"sync"
-
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/connectivity"
-	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/resolver"
-)
-
-type balancerWrapperBuilder struct {
-	b Balancer // The v1 balancer.
-}
-
-func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
-	bwb.b.Start(opts.Target.Endpoint, BalancerConfig{
-		DialCreds: opts.DialCreds,
-		Dialer:    opts.Dialer,
-	})
-	_, pickfirst := bwb.b.(*pickFirst)
-	bw := &balancerWrapper{
-		balancer:   bwb.b,
-		pickfirst:  pickfirst,
-		cc:         cc,
-		targetAddr: opts.Target.Endpoint,
-		startCh:    make(chan struct{}),
-		conns:      make(map[resolver.Address]balancer.SubConn),
-		connSt:     make(map[balancer.SubConn]*scState),
-		csEvltr:    &balancer.ConnectivityStateEvaluator{},
-		state:      connectivity.Idle,
-	}
-	cc.UpdateBalancerState(connectivity.Idle, bw)
-	go bw.lbWatcher()
-	return bw
-}
-
-func (bwb *balancerWrapperBuilder) Name() string {
-	return "wrapper"
-}
-
-type scState struct {
-	addr Address // The v1 address type.
-	s    connectivity.State
-	down func(error)
-}
-
-type balancerWrapper struct {
-	balancer  Balancer // The v1 balancer.
-	pickfirst bool
-
-	cc         balancer.ClientConn
-	targetAddr string // Target without the scheme.
-
-	mu     sync.Mutex
-	conns  map[resolver.Address]balancer.SubConn
-	connSt map[balancer.SubConn]*scState
-	// This channel is closed when handling the first resolver result.
-	// lbWatcher blocks until this is closed, to avoid race between
-	// - NewSubConn is created, cc wants to notify balancer of state changes;
-	// - Build hasn't return, cc doesn't have access to balancer.
-	startCh chan struct{}
-
-	// To aggregate the connectivity state.
-	csEvltr *balancer.ConnectivityStateEvaluator
-	state   connectivity.State
-}
-
-// lbWatcher watches the Notify channel of the balancer and manages
-// connections accordingly.
-func (bw *balancerWrapper) lbWatcher() {
-	<-bw.startCh
-	notifyCh := bw.balancer.Notify()
-	if notifyCh == nil {
-		// There's no resolver in the balancer. Connect directly.
-		a := resolver.Address{
-			Addr: bw.targetAddr,
-			Type: resolver.Backend,
-		}
-		sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
-		if err != nil {
-			grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
-		} else {
-			bw.mu.Lock()
-			bw.conns[a] = sc
-			bw.connSt[sc] = &scState{
-				addr: Address{Addr: bw.targetAddr},
-				s:    connectivity.Idle,
-			}
-			bw.mu.Unlock()
-			sc.Connect()
-		}
-		return
-	}
-
-	for addrs := range notifyCh {
-		grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs)
-		if bw.pickfirst {
-			var (
-				oldA  resolver.Address
-				oldSC balancer.SubConn
-			)
-			bw.mu.Lock()
-			for oldA, oldSC = range bw.conns {
-				break
-			}
-			bw.mu.Unlock()
-			if len(addrs) <= 0 {
-				if oldSC != nil {
-					// Teardown old sc.
-					bw.mu.Lock()
-					delete(bw.conns, oldA)
-					delete(bw.connSt, oldSC)
-					bw.mu.Unlock()
-					bw.cc.RemoveSubConn(oldSC)
-				}
-				continue
-			}
-
-			var newAddrs []resolver.Address
-			for _, a := range addrs {
-				newAddr := resolver.Address{
-					Addr:       a.Addr,
-					Type:       resolver.Backend, // All addresses from balancer are all backends.
-					ServerName: "",
-					Metadata:   a.Metadata,
-				}
-				newAddrs = append(newAddrs, newAddr)
-			}
-			if oldSC == nil {
-				// Create new sc.
-				sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{})
-				if err != nil {
-					grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err)
-				} else {
-					bw.mu.Lock()
-					// For pickfirst, there should be only one SubConn, so the
-					// address doesn't matter. All states updating (up and down)
-					// and picking should all happen on that only SubConn.
-					bw.conns[resolver.Address{}] = sc
-					bw.connSt[sc] = &scState{
-						addr: addrs[0], // Use the first address.
-						s:    connectivity.Idle,
-					}
-					bw.mu.Unlock()
-					sc.Connect()
-				}
-			} else {
-				bw.mu.Lock()
-				bw.connSt[oldSC].addr = addrs[0]
-				bw.mu.Unlock()
-				oldSC.UpdateAddresses(newAddrs)
-			}
-		} else {
-			var (
-				add []resolver.Address // Addresses need to setup connections.
-				del []balancer.SubConn // Connections need to tear down.
-			)
-			resAddrs := make(map[resolver.Address]Address)
-			for _, a := range addrs {
-				resAddrs[resolver.Address{
-					Addr:       a.Addr,
-					Type:       resolver.Backend, // All addresses from balancer are all backends.
-					ServerName: "",
-					Metadata:   a.Metadata,
-				}] = a
-			}
-			bw.mu.Lock()
-			for a := range resAddrs {
-				if _, ok := bw.conns[a]; !ok {
-					add = append(add, a)
-				}
-			}
-			for a, c := range bw.conns {
-				if _, ok := resAddrs[a]; !ok {
-					del = append(del, c)
-					delete(bw.conns, a)
-					// Keep the state of this sc in bw.connSt until its state becomes Shutdown.
-				}
-			}
-			bw.mu.Unlock()
-			for _, a := range add {
-				sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
-				if err != nil {
-					grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
-				} else {
-					bw.mu.Lock()
-					bw.conns[a] = sc
-					bw.connSt[sc] = &scState{
-						addr: resAddrs[a],
-						s:    connectivity.Idle,
-					}
-					bw.mu.Unlock()
-					sc.Connect()
-				}
-			}
-			for _, c := range del {
-				bw.cc.RemoveSubConn(c)
-			}
-		}
-	}
-}
-
-func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
-	bw.mu.Lock()
-	defer bw.mu.Unlock()
-	scSt, ok := bw.connSt[sc]
-	if !ok {
-		return
-	}
-	if s == connectivity.Idle {
-		sc.Connect()
-	}
-	oldS := scSt.s
-	scSt.s = s
-	if oldS != connectivity.Ready && s == connectivity.Ready {
-		scSt.down = bw.balancer.Up(scSt.addr)
-	} else if oldS == connectivity.Ready && s != connectivity.Ready {
-		if scSt.down != nil {
-			scSt.down(errConnClosing)
-		}
-	}
-	sa := bw.csEvltr.RecordTransition(oldS, s)
-	if bw.state != sa {
-		bw.state = sa
-	}
-	bw.cc.UpdateBalancerState(bw.state, bw)
-	if s == connectivity.Shutdown {
-		// Remove state for this sc.
-		delete(bw.connSt, sc)
-	}
-}
-
-func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
-	bw.mu.Lock()
-	defer bw.mu.Unlock()
-	select {
-	case <-bw.startCh:
-	default:
-		close(bw.startCh)
-	}
-	// There should be a resolver inside the balancer.
-	// All updates here, if any, are ignored.
-}
-
-func (bw *balancerWrapper) Close() {
-	bw.mu.Lock()
-	defer bw.mu.Unlock()
-	select {
-	case <-bw.startCh:
-	default:
-		close(bw.startCh)
-	}
-	bw.balancer.Close()
-}
-
-// The picker is the balancerWrapper itself.
-// It either blocks or returns error, consistent with v1 balancer Get().
-func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (sc balancer.SubConn, done func(balancer.DoneInfo), err error) {
-	failfast := true // Default failfast is true.
-	if ss, ok := rpcInfoFromContext(ctx); ok {
-		failfast = ss.failfast
-	}
-	a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast})
-	if err != nil {
-		return nil, nil, err
-	}
-	if p != nil {
-		done = func(balancer.DoneInfo) { p() }
-		defer func() {
-			if err != nil {
-				p()
-			}
-		}()
-	}
-
-	bw.mu.Lock()
-	defer bw.mu.Unlock()
-	if bw.pickfirst {
-		// Get the first sc in conns.
-		for _, sc := range bw.conns {
-			return sc, done, nil
-		}
-		return nil, nil, balancer.ErrNoSubConnAvailable
-	}
-	sc, ok1 := bw.conns[resolver.Address{
-		Addr:       a.Addr,
-		Type:       resolver.Backend,
-		ServerName: "",
-		Metadata:   a.Metadata,
-	}]
-	s, ok2 := bw.connSt[sc]
-	if !ok1 || !ok2 {
-		// This can only happen due to a race where Get() returned an address
-		// that was subsequently removed by Notify.  In this case we should
-		// retry always.
-		return nil, nil, balancer.ErrNoSubConnAvailable
-	}
-	switch s.s {
-	case connectivity.Ready, connectivity.Idle:
-		return sc, done, nil
-	case connectivity.Shutdown, connectivity.TransientFailure:
-		// If the returned sc has been shut down or is in transient failure,
-		// return error, and this RPC will fail or wait for another picker (if
-		// non-failfast).
-		return nil, nil, balancer.ErrTransientFailure
-	default:
-		// For other states (connecting or unknown), the v1 balancer would
-		// traditionally wait until ready and then issue the RPC.  Returning
-		// ErrNoSubConnAvailable will be a slight improvement in that it will
-		// allow the balancer to choose another address in case others are
-		// connected.
-		return nil, nil, balancer.ErrNoSubConnAvailable
-	}
-}
diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go
new file mode 100644
index 0000000..948a21e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer_wrapper.go
@@ -0,0 +1,520 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+	"fmt"
+	"sync"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/experimental/stats"
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/internal/balancer/gracefulswitch"
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcsync"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/status"
+)
+
+var (
+	setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address))
+	// noOpRegisterHealthListenerFn is used when client side health checking is
+	// disabled. It sends a single READY update on the registered listener.
+	noOpRegisterHealthListenerFn = func(_ context.Context, listener func(balancer.SubConnState)) func() {
+		listener(balancer.SubConnState{ConnectivityState: connectivity.Ready})
+		return func() {}
+	}
+)
+
+// ccBalancerWrapper sits between the ClientConn and the Balancer.
+//
+// ccBalancerWrapper implements methods corresponding to the ones on the
+// balancer.Balancer interface. The ClientConn is free to call these methods
+// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn
+// to the Balancer happen in order by performing them in the serializer, without
+// any mutexes held.
+//
+// ccBalancerWrapper also implements the balancer.ClientConn interface and is
+// passed to the Balancer implementations. It invokes unexported methods on the
+// ClientConn to handle these calls from the Balancer.
+//
+// It uses the gracefulswitch.Balancer internally to ensure that balancer
+// switches happen in a graceful manner.
+type ccBalancerWrapper struct {
+	internal.EnforceClientConnEmbedding
+	// The following fields are initialized when the wrapper is created and are
+	// read-only afterwards, and therefore can be accessed without a mutex.
+	cc               *ClientConn
+	opts             balancer.BuildOptions
+	serializer       *grpcsync.CallbackSerializer
+	serializerCancel context.CancelFunc
+
+	// The following fields are only accessed within the serializer or during
+	// initialization.
+	curBalancerName string
+	balancer        *gracefulswitch.Balancer
+
+	// The following field is protected by mu.  Caller must take cc.mu before
+	// taking mu.
+	mu     sync.Mutex
+	closed bool
+}
+
+// newCCBalancerWrapper creates a new balancer wrapper in idle state. The
+// underlying balancer is not created until the updateClientConnState() method
+// is invoked.
+func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
+	ctx, cancel := context.WithCancel(cc.ctx)
+	ccb := &ccBalancerWrapper{
+		cc: cc,
+		opts: balancer.BuildOptions{
+			DialCreds:       cc.dopts.copts.TransportCredentials,
+			CredsBundle:     cc.dopts.copts.CredsBundle,
+			Dialer:          cc.dopts.copts.Dialer,
+			Authority:       cc.authority,
+			CustomUserAgent: cc.dopts.copts.UserAgent,
+			ChannelzParent:  cc.channelz,
+			Target:          cc.parsedTarget,
+		},
+		serializer:       grpcsync.NewCallbackSerializer(ctx),
+		serializerCancel: cancel,
+	}
+	ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts)
+	return ccb
+}
+
+func (ccb *ccBalancerWrapper) MetricsRecorder() stats.MetricsRecorder {
+	return ccb.cc.metricsRecorderList
+}
+
+// updateClientConnState is invoked by grpc to push a ClientConnState update to
+// the underlying balancer.  This is always executed from the serializer, so
+// it is safe to call into the balancer here.
+func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
+	errCh := make(chan error)
+	uccs := func(ctx context.Context) {
+		defer close(errCh)
+		if ctx.Err() != nil || ccb.balancer == nil {
+			return
+		}
+		name := gracefulswitch.ChildName(ccs.BalancerConfig)
+		if ccb.curBalancerName != name {
+			ccb.curBalancerName = name
+			channelz.Infof(logger, ccb.cc.channelz, "Channel switches to new LB policy %q", name)
+		}
+		err := ccb.balancer.UpdateClientConnState(*ccs)
+		if logger.V(2) && err != nil {
+			logger.Infof("error from balancer.UpdateClientConnState: %v", err)
+		}
+		errCh <- err
+	}
+	onFailure := func() { close(errCh) }
+
+	// UpdateClientConnState can race with Close, and when the latter wins, the
+	// serializer is closed, and the attempt to schedule the callback will fail.
+	// It is acceptable to ignore this failure. But since we want to handle the
+	// state update in a blocking fashion (when we successfully schedule the
+	// callback), we have to use the ScheduleOr method and not the MaybeSchedule
+	// method on the serializer.
+	ccb.serializer.ScheduleOr(uccs, onFailure)
+	return <-errCh
+}
+
+// resolverError is invoked by grpc to push a resolver error to the underlying
+// balancer.  The call to the balancer is executed from the serializer.
+func (ccb *ccBalancerWrapper) resolverError(err error) {
+	ccb.serializer.TrySchedule(func(ctx context.Context) {
+		if ctx.Err() != nil || ccb.balancer == nil {
+			return
+		}
+		ccb.balancer.ResolverError(err)
+	})
+}
+
+// close initiates async shutdown of the wrapper.  cc.mu must be held when
+// calling this function.  To determine the wrapper has finished shutting down,
+// the channel should block on ccb.serializer.Done() without cc.mu held.
+func (ccb *ccBalancerWrapper) close() {
+	ccb.mu.Lock()
+	ccb.closed = true
+	ccb.mu.Unlock()
+	channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing")
+	ccb.serializer.TrySchedule(func(context.Context) {
+		if ccb.balancer == nil {
+			return
+		}
+		ccb.balancer.Close()
+		ccb.balancer = nil
+	})
+	ccb.serializerCancel()
+}
+
+// exitIdle invokes the balancer's exitIdle method in the serializer.
+func (ccb *ccBalancerWrapper) exitIdle() {
+	ccb.serializer.TrySchedule(func(ctx context.Context) {
+		if ctx.Err() != nil || ccb.balancer == nil {
+			return
+		}
+		ccb.balancer.ExitIdle()
+	})
+}
+
+func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
+	ccb.cc.mu.Lock()
+	defer ccb.cc.mu.Unlock()
+
+	ccb.mu.Lock()
+	if ccb.closed {
+		ccb.mu.Unlock()
+		return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed")
+	}
+	ccb.mu.Unlock()
+
+	if len(addrs) == 0 {
+		return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
+	}
+	ac, err := ccb.cc.newAddrConnLocked(addrs, opts)
+	if err != nil {
+		channelz.Warningf(logger, ccb.cc.channelz, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
+		return nil, err
+	}
+	acbw := &acBalancerWrapper{
+		ccb:           ccb,
+		ac:            ac,
+		producers:     make(map[balancer.ProducerBuilder]*refCountedProducer),
+		stateListener: opts.StateListener,
+		healthData:    newHealthData(connectivity.Idle),
+	}
+	ac.acbw = acbw
+	return acbw, nil
+}
+
+func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) {
+	// The graceful switch balancer will never call this.
+	logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc")
+}
+
+func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
+	acbw, ok := sc.(*acBalancerWrapper)
+	if !ok {
+		return
+	}
+	acbw.UpdateAddresses(addrs)
+}
+
+func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
+	ccb.cc.mu.Lock()
+	defer ccb.cc.mu.Unlock()
+	if ccb.cc.conns == nil {
+		// The CC has been closed; ignore this update.
+		return
+	}
+
+	ccb.mu.Lock()
+	if ccb.closed {
+		ccb.mu.Unlock()
+		return
+	}
+	ccb.mu.Unlock()
+	// Update picker before updating state.  Even though the ordering here does
+	// not matter, it can lead to multiple calls of Pick in the common start-up
+	// case where we wait for ready and then perform an RPC.  If the picker is
+	// updated later, we could call the "connecting" picker when the state is
+	// updated, and then call the "ready" picker after the picker gets updated.
+
+	// Note that there is no need to check if the balancer wrapper was closed,
+	// as we know the graceful switch LB policy will not call cc if it has been
+	// closed.
+	ccb.cc.pickerWrapper.updatePicker(s.Picker)
+	ccb.cc.csMgr.updateState(s.ConnectivityState)
+}
+
+func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
+	ccb.cc.mu.RLock()
+	defer ccb.cc.mu.RUnlock()
+
+	ccb.mu.Lock()
+	if ccb.closed {
+		ccb.mu.Unlock()
+		return
+	}
+	ccb.mu.Unlock()
+	ccb.cc.resolveNowLocked(o)
+}
+
+func (ccb *ccBalancerWrapper) Target() string {
+	return ccb.cc.target
+}
+
+// acBalancerWrapper is a wrapper on top of ac for balancers.
+// It implements balancer.SubConn interface.
+type acBalancerWrapper struct {
+	internal.EnforceSubConnEmbedding
+	ac            *addrConn          // read-only
+	ccb           *ccBalancerWrapper // read-only
+	stateListener func(balancer.SubConnState)
+
+	producersMu sync.Mutex
+	producers   map[balancer.ProducerBuilder]*refCountedProducer
+
+	// Access to healthData is protected by healthMu.
+	healthMu sync.Mutex
+	// healthData is stored as a pointer to detect when the health listener is
+	// dropped or updated. This is required as closures can't be compared for
+	// equality.
+	healthData *healthData
+}
+
+// healthData holds data related to health state reporting.
+type healthData struct {
+	// connectivityState stores the most recent connectivity state delivered
+	// to the LB policy. This is stored to avoid sending updates when the
+	// SubConn has already exited connectivity state READY.
+	connectivityState connectivity.State
+	// closeHealthProducer stores function to close the ref counted health
+	// producer. The health producer is automatically closed when the SubConn
+	// state changes.
+	closeHealthProducer func()
+}
+
+func newHealthData(s connectivity.State) *healthData {
+	return &healthData{
+		connectivityState:   s,
+		closeHealthProducer: func() {},
+	}
+}
+
+// updateState is invoked by grpc to push a subConn state update to the
+// underlying balancer.
+func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) {
+	acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
+		if ctx.Err() != nil || acbw.ccb.balancer == nil {
+			return
+		}
+		// Invalidate all producers on any state change.
+		acbw.closeProducers()
+
+		// Even though it is optional for balancers, gracefulswitch ensures
+		// opts.StateListener is set, so this cannot ever be nil.
+		// TODO: delete this comment when UpdateSubConnState is removed.
+		scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err}
+		if s == connectivity.Ready {
+			setConnectedAddress(&scs, curAddr)
+		}
+		// Invalidate the health listener by updating the healthData.
+		acbw.healthMu.Lock()
+		// A race may occur if a health listener is registered soon after the
+		// connectivity state is set but before the stateListener is called.
+		// Two cases may arise:
+		// 1. The new state is not READY: RegisterHealthListener has checks to
+		//    ensure no updates are sent when the connectivity state is not
+		//    READY.
+		// 2. The new state is READY: This means that the old state wasn't Ready.
+		//    The RegisterHealthListener API mentions that a health listener
+		//    must not be registered when a SubConn is not ready to avoid such
+		//    races. When this happens, the LB policy would get health updates
+		//    on the old listener. When the LB policy registers a new listener
+		//    on receiving the connectivity update, the health updates will be
+		//    sent to the new health listener.
+		acbw.healthData = newHealthData(scs.ConnectivityState)
+		acbw.healthMu.Unlock()
+
+		acbw.stateListener(scs)
+	})
+}
+
+func (acbw *acBalancerWrapper) String() string {
+	return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelz.ID)
+}
+
+func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
+	acbw.ac.updateAddrs(addrs)
+}
+
+func (acbw *acBalancerWrapper) Connect() {
+	go acbw.ac.connect()
+}
+
+func (acbw *acBalancerWrapper) Shutdown() {
+	acbw.closeProducers()
+	acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
+}
+
+// NewStream begins a streaming RPC on the addrConn.  If the addrConn is not
+// ready, blocks until it is or ctx expires.  Returns an error when the context
+// expires or the addrConn is shut down.
+func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
+	transport := acbw.ac.getReadyTransport()
+	if transport == nil {
+		return nil, status.Errorf(codes.Unavailable, "SubConn state is not Ready")
+
+	}
+	return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
+}
+
+// Invoke performs a unary RPC.  If the addrConn is not ready, returns
+// errSubConnNotReady.
+func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error {
+	cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...)
+	if err != nil {
+		return err
+	}
+	if err := cs.SendMsg(args); err != nil {
+		return err
+	}
+	return cs.RecvMsg(reply)
+}
+
+type refCountedProducer struct {
+	producer balancer.Producer
+	refs     int    // number of current refs to the producer
+	close    func() // underlying producer's close function
+}
+
+func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
+	acbw.producersMu.Lock()
+	defer acbw.producersMu.Unlock()
+
+	// Look up existing producer from this builder.
+	pData := acbw.producers[pb]
+	if pData == nil {
+		// Not found; create a new one and add it to the producers map.
+		p, closeFn := pb.Build(acbw)
+		pData = &refCountedProducer{producer: p, close: closeFn}
+		acbw.producers[pb] = pData
+	}
+	// Account for this new reference.
+	pData.refs++
+
+	// Return a cleanup function wrapped in a OnceFunc to remove this reference
+	// and delete the refCountedProducer from the map if the total reference
+	// count goes to zero.
+	unref := func() {
+		acbw.producersMu.Lock()
+		// If closeProducers has already closed this producer instance, refs is
+		// set to 0, so the check after decrementing will never pass, and the
+		// producer will not be double-closed.
+		pData.refs--
+		if pData.refs == 0 {
+			defer pData.close() // Run outside the acbw mutex
+			delete(acbw.producers, pb)
+		}
+		acbw.producersMu.Unlock()
+	}
+	return pData.producer, sync.OnceFunc(unref)
+}
+
+func (acbw *acBalancerWrapper) closeProducers() {
+	acbw.producersMu.Lock()
+	defer acbw.producersMu.Unlock()
+	for pb, pData := range acbw.producers {
+		pData.refs = 0
+		pData.close()
+		delete(acbw.producers, pb)
+	}
+}
+
+// healthProducerRegisterFn is a type alias for the health producer's function
+// for registering listeners.
+type healthProducerRegisterFn = func(context.Context, balancer.SubConn, string, func(balancer.SubConnState)) func()
+
+// healthListenerRegFn returns a function to register a listener for health
+// updates. If client side health checks are disabled, the registered listener
+// will get a single READY (raw connectivity state) update.
+//
+// Client side health checking is enabled when all the following
+// conditions are satisfied:
+// 1. Health checking is not disabled using the dial option.
+// 2. The health package is imported.
+// 3. The health check config is present in the service config.
+func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func(balancer.SubConnState)) func() {
+	if acbw.ccb.cc.dopts.disableHealthCheck {
+		return noOpRegisterHealthListenerFn
+	}
+	regHealthLisFn := internal.RegisterClientHealthCheckListener
+	if regHealthLisFn == nil {
+		// The health package is not imported.
+		return noOpRegisterHealthListenerFn
+	}
+	cfg := acbw.ac.cc.healthCheckConfig()
+	if cfg == nil {
+		return noOpRegisterHealthListenerFn
+	}
+	return func(ctx context.Context, listener func(balancer.SubConnState)) func() {
+		return regHealthLisFn.(healthProducerRegisterFn)(ctx, acbw, cfg.ServiceName, listener)
+	}
+}
+
+// RegisterHealthListener accepts a health listener from the LB policy. It sends
+// updates to the health listener as long as the SubConn's connectivity state
+// doesn't change and a new health listener is not registered. To invalidate
+// the currently registered health listener, acbw updates the healthData. If a
+// nil listener is registered, the active health listener is dropped.
+func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) {
+	acbw.healthMu.Lock()
+	defer acbw.healthMu.Unlock()
+	acbw.healthData.closeHealthProducer()
+	// listeners should not be registered when the connectivity state
+	// isn't Ready. This may happen when the balancer registers a listener
+	// after the connectivityState is updated, but before it is notified
+	// of the update.
+	if acbw.healthData.connectivityState != connectivity.Ready {
+		return
+	}
+	// Replace the health data to stop sending updates to any previously
+	// registered health listeners.
+	hd := newHealthData(connectivity.Ready)
+	acbw.healthData = hd
+	if listener == nil {
+		return
+	}
+
+	registerFn := acbw.healthListenerRegFn()
+	acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
+		if ctx.Err() != nil || acbw.ccb.balancer == nil {
+			return
+		}
+		// Don't send updates if a new listener is registered.
+		acbw.healthMu.Lock()
+		defer acbw.healthMu.Unlock()
+		if acbw.healthData != hd {
+			return
+		}
+		// Serialize the health updates from the health producer with
+		// other calls into the LB policy.
+		listenerWrapper := func(scs balancer.SubConnState) {
+			acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
+				if ctx.Err() != nil || acbw.ccb.balancer == nil {
+					return
+				}
+				acbw.healthMu.Lock()
+				defer acbw.healthMu.Unlock()
+				if acbw.healthData != hd {
+					return
+				}
+				listener(scs)
+			})
+		}
+
+		hd.closeHealthProducer = registerFn(ctx, listenerWrapper)
+	})
+}
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
index f393bb6..b1364a0 100644
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -1,24 +1,45 @@
+// Copyright 2018 The gRPC Authors
+// All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The canonical version of this proto can be found at
+// https://github.com/grpc/grpc-proto/blob/master/grpc/binlog/v1/binarylog.proto
+
 // Code generated by protoc-gen-go. DO NOT EDIT.
-// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto
+// versions:
+// 	protoc-gen-go v1.36.6
+// 	protoc        v5.27.1
+// source: grpc/binlog/v1/binarylog.proto
 
-package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
+package grpc_binarylog_v1
 
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import duration "github.com/golang/protobuf/ptypes/duration"
-import timestamp "github.com/golang/protobuf/ptypes/timestamp"
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	durationpb "google.golang.org/protobuf/types/known/durationpb"
+	timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+	reflect "reflect"
+	sync "sync"
+	unsafe "unsafe"
+)
 
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
 
 // Enumerates the type of event
 // Note the terminology is different from the RPC semantics
@@ -54,32 +75,55 @@
 	GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7
 )
 
-var GrpcLogEntry_EventType_name = map[int32]string{
-	0: "EVENT_TYPE_UNKNOWN",
-	1: "EVENT_TYPE_CLIENT_HEADER",
-	2: "EVENT_TYPE_SERVER_HEADER",
-	3: "EVENT_TYPE_CLIENT_MESSAGE",
-	4: "EVENT_TYPE_SERVER_MESSAGE",
-	5: "EVENT_TYPE_CLIENT_HALF_CLOSE",
-	6: "EVENT_TYPE_SERVER_TRAILER",
-	7: "EVENT_TYPE_CANCEL",
-}
-var GrpcLogEntry_EventType_value = map[string]int32{
-	"EVENT_TYPE_UNKNOWN":           0,
-	"EVENT_TYPE_CLIENT_HEADER":     1,
-	"EVENT_TYPE_SERVER_HEADER":     2,
-	"EVENT_TYPE_CLIENT_MESSAGE":    3,
-	"EVENT_TYPE_SERVER_MESSAGE":    4,
-	"EVENT_TYPE_CLIENT_HALF_CLOSE": 5,
-	"EVENT_TYPE_SERVER_TRAILER":    6,
-	"EVENT_TYPE_CANCEL":            7,
+// Enum value maps for GrpcLogEntry_EventType.
+var (
+	GrpcLogEntry_EventType_name = map[int32]string{
+		0: "EVENT_TYPE_UNKNOWN",
+		1: "EVENT_TYPE_CLIENT_HEADER",
+		2: "EVENT_TYPE_SERVER_HEADER",
+		3: "EVENT_TYPE_CLIENT_MESSAGE",
+		4: "EVENT_TYPE_SERVER_MESSAGE",
+		5: "EVENT_TYPE_CLIENT_HALF_CLOSE",
+		6: "EVENT_TYPE_SERVER_TRAILER",
+		7: "EVENT_TYPE_CANCEL",
+	}
+	GrpcLogEntry_EventType_value = map[string]int32{
+		"EVENT_TYPE_UNKNOWN":           0,
+		"EVENT_TYPE_CLIENT_HEADER":     1,
+		"EVENT_TYPE_SERVER_HEADER":     2,
+		"EVENT_TYPE_CLIENT_MESSAGE":    3,
+		"EVENT_TYPE_SERVER_MESSAGE":    4,
+		"EVENT_TYPE_CLIENT_HALF_CLOSE": 5,
+		"EVENT_TYPE_SERVER_TRAILER":    6,
+		"EVENT_TYPE_CANCEL":            7,
+	}
+)
+
+func (x GrpcLogEntry_EventType) Enum() *GrpcLogEntry_EventType {
+	p := new(GrpcLogEntry_EventType)
+	*p = x
+	return p
 }
 
 func (x GrpcLogEntry_EventType) String() string {
-	return proto.EnumName(GrpcLogEntry_EventType_name, int32(x))
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
 }
+
+func (GrpcLogEntry_EventType) Descriptor() protoreflect.EnumDescriptor {
+	return file_grpc_binlog_v1_binarylog_proto_enumTypes[0].Descriptor()
+}
+
+func (GrpcLogEntry_EventType) Type() protoreflect.EnumType {
+	return &file_grpc_binlog_v1_binarylog_proto_enumTypes[0]
+}
+
+func (x GrpcLogEntry_EventType) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use GrpcLogEntry_EventType.Descriptor instead.
 func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0}
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 0}
 }
 
 // Enumerates the entity that generates the log entry
@@ -91,22 +135,45 @@
 	GrpcLogEntry_LOGGER_SERVER  GrpcLogEntry_Logger = 2
 )
 
-var GrpcLogEntry_Logger_name = map[int32]string{
-	0: "LOGGER_UNKNOWN",
-	1: "LOGGER_CLIENT",
-	2: "LOGGER_SERVER",
-}
-var GrpcLogEntry_Logger_value = map[string]int32{
-	"LOGGER_UNKNOWN": 0,
-	"LOGGER_CLIENT":  1,
-	"LOGGER_SERVER":  2,
+// Enum value maps for GrpcLogEntry_Logger.
+var (
+	GrpcLogEntry_Logger_name = map[int32]string{
+		0: "LOGGER_UNKNOWN",
+		1: "LOGGER_CLIENT",
+		2: "LOGGER_SERVER",
+	}
+	GrpcLogEntry_Logger_value = map[string]int32{
+		"LOGGER_UNKNOWN": 0,
+		"LOGGER_CLIENT":  1,
+		"LOGGER_SERVER":  2,
+	}
+)
+
+func (x GrpcLogEntry_Logger) Enum() *GrpcLogEntry_Logger {
+	p := new(GrpcLogEntry_Logger)
+	*p = x
+	return p
 }
 
 func (x GrpcLogEntry_Logger) String() string {
-	return proto.EnumName(GrpcLogEntry_Logger_name, int32(x))
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
 }
+
+func (GrpcLogEntry_Logger) Descriptor() protoreflect.EnumDescriptor {
+	return file_grpc_binlog_v1_binarylog_proto_enumTypes[1].Descriptor()
+}
+
+func (GrpcLogEntry_Logger) Type() protoreflect.EnumType {
+	return &file_grpc_binlog_v1_binarylog_proto_enumTypes[1]
+}
+
+func (x GrpcLogEntry_Logger) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use GrpcLogEntry_Logger.Descriptor instead.
 func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1}
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 1}
 }
 
 type Address_Type int32
@@ -122,30 +189,54 @@
 	Address_TYPE_UNIX Address_Type = 3
 )
 
-var Address_Type_name = map[int32]string{
-	0: "TYPE_UNKNOWN",
-	1: "TYPE_IPV4",
-	2: "TYPE_IPV6",
-	3: "TYPE_UNIX",
-}
-var Address_Type_value = map[string]int32{
-	"TYPE_UNKNOWN": 0,
-	"TYPE_IPV4":    1,
-	"TYPE_IPV6":    2,
-	"TYPE_UNIX":    3,
+// Enum value maps for Address_Type.
+var (
+	Address_Type_name = map[int32]string{
+		0: "TYPE_UNKNOWN",
+		1: "TYPE_IPV4",
+		2: "TYPE_IPV6",
+		3: "TYPE_UNIX",
+	}
+	Address_Type_value = map[string]int32{
+		"TYPE_UNKNOWN": 0,
+		"TYPE_IPV4":    1,
+		"TYPE_IPV6":    2,
+		"TYPE_UNIX":    3,
+	}
+)
+
+func (x Address_Type) Enum() *Address_Type {
+	p := new(Address_Type)
+	*p = x
+	return p
 }
 
 func (x Address_Type) String() string {
-	return proto.EnumName(Address_Type_name, int32(x))
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
 }
+
+func (Address_Type) Descriptor() protoreflect.EnumDescriptor {
+	return file_grpc_binlog_v1_binarylog_proto_enumTypes[2].Descriptor()
+}
+
+func (Address_Type) Type() protoreflect.EnumType {
+	return &file_grpc_binlog_v1_binarylog_proto_enumTypes[2]
+}
+
+func (x Address_Type) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Address_Type.Descriptor instead.
 func (Address_Type) EnumDescriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0}
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7, 0}
 }
 
 // Log entry we store in binary logs
 type GrpcLogEntry struct {
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The timestamp of the binary log message
-	Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+	Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
 	// Uniquely identifies a call. The value must not be 0 in order to disambiguate
 	// from an unset value.
 	// Each call may have several log entries, they will all have the same call_id.
@@ -158,11 +249,12 @@
 	// durability or ordering is not guaranteed.
 	SequenceIdWithinCall uint64                 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"`
 	Type                 GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"`
-	Logger               GrpcLogEntry_Logger    `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"`
+	Logger               GrpcLogEntry_Logger    `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // One of the above Logger enum
 	// The logger uses one of the following fields to record the payload,
 	// according to the type of the log entry.
 	//
 	// Types that are valid to be assigned to Payload:
+	//
 	//	*GrpcLogEntry_ClientHeader
 	//	*GrpcLogEntry_ServerHeader
 	//	*GrpcLogEntry_Message
@@ -175,71 +267,133 @@
 	// EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in
 	// the case of trailers-only. On server side, peer is always
 	// logged on EVENT_TYPE_CLIENT_HEADER.
-	Peer                 *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	Peer          *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
-func (m *GrpcLogEntry) Reset()         { *m = GrpcLogEntry{} }
-func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) }
-func (*GrpcLogEntry) ProtoMessage()    {}
+func (x *GrpcLogEntry) Reset() {
+	*x = GrpcLogEntry{}
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *GrpcLogEntry) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcLogEntry) ProtoMessage() {}
+
+func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcLogEntry.ProtoReflect.Descriptor instead.
 func (*GrpcLogEntry) Descriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{0}
-}
-func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b)
-}
-func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic)
-}
-func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_GrpcLogEntry.Merge(dst, src)
-}
-func (m *GrpcLogEntry) XXX_Size() int {
-	return xxx_messageInfo_GrpcLogEntry.Size(m)
-}
-func (m *GrpcLogEntry) XXX_DiscardUnknown() {
-	xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m)
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0}
 }
 
-var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo
-
-func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp {
-	if m != nil {
-		return m.Timestamp
+func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp {
+	if x != nil {
+		return x.Timestamp
 	}
 	return nil
 }
 
-func (m *GrpcLogEntry) GetCallId() uint64 {
-	if m != nil {
-		return m.CallId
+func (x *GrpcLogEntry) GetCallId() uint64 {
+	if x != nil {
+		return x.CallId
 	}
 	return 0
 }
 
-func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 {
-	if m != nil {
-		return m.SequenceIdWithinCall
+func (x *GrpcLogEntry) GetSequenceIdWithinCall() uint64 {
+	if x != nil {
+		return x.SequenceIdWithinCall
 	}
 	return 0
 }
 
-func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType {
-	if m != nil {
-		return m.Type
+func (x *GrpcLogEntry) GetType() GrpcLogEntry_EventType {
+	if x != nil {
+		return x.Type
 	}
 	return GrpcLogEntry_EVENT_TYPE_UNKNOWN
 }
 
-func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger {
-	if m != nil {
-		return m.Logger
+func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger {
+	if x != nil {
+		return x.Logger
 	}
 	return GrpcLogEntry_LOGGER_UNKNOWN
 }
 
+func (x *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
+	if x != nil {
+		return x.Payload
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetClientHeader() *ClientHeader {
+	if x != nil {
+		if x, ok := x.Payload.(*GrpcLogEntry_ClientHeader); ok {
+			return x.ClientHeader
+		}
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetServerHeader() *ServerHeader {
+	if x != nil {
+		if x, ok := x.Payload.(*GrpcLogEntry_ServerHeader); ok {
+			return x.ServerHeader
+		}
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetMessage() *Message {
+	if x != nil {
+		if x, ok := x.Payload.(*GrpcLogEntry_Message); ok {
+			return x.Message
+		}
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetTrailer() *Trailer {
+	if x != nil {
+		if x, ok := x.Payload.(*GrpcLogEntry_Trailer); ok {
+			return x.Trailer
+		}
+	}
+	return nil
+}
+
+func (x *GrpcLogEntry) GetPayloadTruncated() bool {
+	if x != nil {
+		return x.PayloadTruncated
+	}
+	return false
+}
+
+func (x *GrpcLogEntry) GetPeer() *Address {
+	if x != nil {
+		return x.Peer
+	}
+	return nil
+}
+
 type isGrpcLogEntry_Payload interface {
 	isGrpcLogEntry_Payload()
 }
@@ -253,6 +407,7 @@
 }
 
 type GrpcLogEntry_Message struct {
+	// Used by EVENT_TYPE_CLIENT_MESSAGE, EVENT_TYPE_SERVER_MESSAGE
 	Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"`
 }
 
@@ -268,168 +423,8 @@
 
 func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {}
 
-func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
-	if m != nil {
-		return m.Payload
-	}
-	return nil
-}
-
-func (m *GrpcLogEntry) GetClientHeader() *ClientHeader {
-	if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok {
-		return x.ClientHeader
-	}
-	return nil
-}
-
-func (m *GrpcLogEntry) GetServerHeader() *ServerHeader {
-	if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok {
-		return x.ServerHeader
-	}
-	return nil
-}
-
-func (m *GrpcLogEntry) GetMessage() *Message {
-	if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok {
-		return x.Message
-	}
-	return nil
-}
-
-func (m *GrpcLogEntry) GetTrailer() *Trailer {
-	if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok {
-		return x.Trailer
-	}
-	return nil
-}
-
-func (m *GrpcLogEntry) GetPayloadTruncated() bool {
-	if m != nil {
-		return m.PayloadTruncated
-	}
-	return false
-}
-
-func (m *GrpcLogEntry) GetPeer() *Address {
-	if m != nil {
-		return m.Peer
-	}
-	return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{
-		(*GrpcLogEntry_ClientHeader)(nil),
-		(*GrpcLogEntry_ServerHeader)(nil),
-		(*GrpcLogEntry_Message)(nil),
-		(*GrpcLogEntry_Trailer)(nil),
-	}
-}
-
-func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*GrpcLogEntry)
-	// payload
-	switch x := m.Payload.(type) {
-	case *GrpcLogEntry_ClientHeader:
-		b.EncodeVarint(6<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.ClientHeader); err != nil {
-			return err
-		}
-	case *GrpcLogEntry_ServerHeader:
-		b.EncodeVarint(7<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.ServerHeader); err != nil {
-			return err
-		}
-	case *GrpcLogEntry_Message:
-		b.EncodeVarint(8<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.Message); err != nil {
-			return err
-		}
-	case *GrpcLogEntry_Trailer:
-		b.EncodeVarint(9<<3 | proto.WireBytes)
-		if err := b.EncodeMessage(x.Trailer); err != nil {
-			return err
-		}
-	case nil:
-	default:
-		return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*GrpcLogEntry)
-	switch tag {
-	case 6: // payload.client_header
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(ClientHeader)
-		err := b.DecodeMessage(msg)
-		m.Payload = &GrpcLogEntry_ClientHeader{msg}
-		return true, err
-	case 7: // payload.server_header
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(ServerHeader)
-		err := b.DecodeMessage(msg)
-		m.Payload = &GrpcLogEntry_ServerHeader{msg}
-		return true, err
-	case 8: // payload.message
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(Message)
-		err := b.DecodeMessage(msg)
-		m.Payload = &GrpcLogEntry_Message{msg}
-		return true, err
-	case 9: // payload.trailer
-		if wire != proto.WireBytes {
-			return true, proto.ErrInternalBadWireType
-		}
-		msg := new(Trailer)
-		err := b.DecodeMessage(msg)
-		m.Payload = &GrpcLogEntry_Trailer{msg}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*GrpcLogEntry)
-	// payload
-	switch x := m.Payload.(type) {
-	case *GrpcLogEntry_ClientHeader:
-		s := proto.Size(x.ClientHeader)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *GrpcLogEntry_ServerHeader:
-		s := proto.Size(x.ServerHeader)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *GrpcLogEntry_Message:
-		s := proto.Size(x.Message)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case *GrpcLogEntry_Trailer:
-		s := proto.Size(x.Trailer)
-		n += 1 // tag and wire
-		n += proto.SizeVarint(uint64(s))
-		n += s
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
 type ClientHeader struct {
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// This contains only the metadata from the application.
 	Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
 	// The name of the RPC method, which looks something like:
@@ -438,109 +433,121 @@
 	MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"`
 	// A single process may be used to run multiple virtual
 	// servers with different identities.
-	// The authority is the name of such a server identitiy.
+	// The authority is the name of such a server identity.
 	// It is typically a portion of the URI in the form of
 	// <host> or <host>:<port> .
 	Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
 	// the RPC timeout
-	Timeout              *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
-	XXX_unrecognized     []byte             `json:"-"`
-	XXX_sizecache        int32              `json:"-"`
+	Timeout       *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
-func (m *ClientHeader) Reset()         { *m = ClientHeader{} }
-func (m *ClientHeader) String() string { return proto.CompactTextString(m) }
-func (*ClientHeader) ProtoMessage()    {}
+func (x *ClientHeader) Reset() {
+	*x = ClientHeader{}
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ClientHeader) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClientHeader) ProtoMessage() {}
+
+func (x *ClientHeader) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClientHeader.ProtoReflect.Descriptor instead.
 func (*ClientHeader) Descriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{1}
-}
-func (m *ClientHeader) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ClientHeader.Unmarshal(m, b)
-}
-func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic)
-}
-func (dst *ClientHeader) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ClientHeader.Merge(dst, src)
-}
-func (m *ClientHeader) XXX_Size() int {
-	return xxx_messageInfo_ClientHeader.Size(m)
-}
-func (m *ClientHeader) XXX_DiscardUnknown() {
-	xxx_messageInfo_ClientHeader.DiscardUnknown(m)
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{1}
 }
 
-var xxx_messageInfo_ClientHeader proto.InternalMessageInfo
-
-func (m *ClientHeader) GetMetadata() *Metadata {
-	if m != nil {
-		return m.Metadata
+func (x *ClientHeader) GetMetadata() *Metadata {
+	if x != nil {
+		return x.Metadata
 	}
 	return nil
 }
 
-func (m *ClientHeader) GetMethodName() string {
-	if m != nil {
-		return m.MethodName
+func (x *ClientHeader) GetMethodName() string {
+	if x != nil {
+		return x.MethodName
 	}
 	return ""
 }
 
-func (m *ClientHeader) GetAuthority() string {
-	if m != nil {
-		return m.Authority
+func (x *ClientHeader) GetAuthority() string {
+	if x != nil {
+		return x.Authority
 	}
 	return ""
 }
 
-func (m *ClientHeader) GetTimeout() *duration.Duration {
-	if m != nil {
-		return m.Timeout
+func (x *ClientHeader) GetTimeout() *durationpb.Duration {
+	if x != nil {
+		return x.Timeout
 	}
 	return nil
 }
 
 type ServerHeader struct {
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// This contains only the metadata from the application.
-	Metadata             *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
-	XXX_unrecognized     []byte    `json:"-"`
-	XXX_sizecache        int32     `json:"-"`
+	Metadata      *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
-func (m *ServerHeader) Reset()         { *m = ServerHeader{} }
-func (m *ServerHeader) String() string { return proto.CompactTextString(m) }
-func (*ServerHeader) ProtoMessage()    {}
+func (x *ServerHeader) Reset() {
+	*x = ServerHeader{}
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ServerHeader) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServerHeader) ProtoMessage() {}
+
+func (x *ServerHeader) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServerHeader.ProtoReflect.Descriptor instead.
 func (*ServerHeader) Descriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{2}
-}
-func (m *ServerHeader) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ServerHeader.Unmarshal(m, b)
-}
-func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic)
-}
-func (dst *ServerHeader) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ServerHeader.Merge(dst, src)
-}
-func (m *ServerHeader) XXX_Size() int {
-	return xxx_messageInfo_ServerHeader.Size(m)
-}
-func (m *ServerHeader) XXX_DiscardUnknown() {
-	xxx_messageInfo_ServerHeader.DiscardUnknown(m)
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{2}
 }
 
-var xxx_messageInfo_ServerHeader proto.InternalMessageInfo
-
-func (m *ServerHeader) GetMetadata() *Metadata {
-	if m != nil {
-		return m.Metadata
+func (x *ServerHeader) GetMetadata() *Metadata {
+	if x != nil {
+		return x.Metadata
 	}
 	return nil
 }
 
 type Trailer struct {
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// This contains only the metadata from the application.
 	Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
 	// The gRPC status code.
@@ -550,110 +557,121 @@
 	StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"`
 	// The value of the 'grpc-status-details-bin' metadata key. If
 	// present, this is always an encoded 'google.rpc.Status' message.
-	StatusDetails        []byte   `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
-func (m *Trailer) Reset()         { *m = Trailer{} }
-func (m *Trailer) String() string { return proto.CompactTextString(m) }
-func (*Trailer) ProtoMessage()    {}
+func (x *Trailer) Reset() {
+	*x = Trailer{}
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Trailer) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Trailer) ProtoMessage() {}
+
+func (x *Trailer) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Trailer.ProtoReflect.Descriptor instead.
 func (*Trailer) Descriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{3}
-}
-func (m *Trailer) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Trailer.Unmarshal(m, b)
-}
-func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Trailer.Marshal(b, m, deterministic)
-}
-func (dst *Trailer) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Trailer.Merge(dst, src)
-}
-func (m *Trailer) XXX_Size() int {
-	return xxx_messageInfo_Trailer.Size(m)
-}
-func (m *Trailer) XXX_DiscardUnknown() {
-	xxx_messageInfo_Trailer.DiscardUnknown(m)
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{3}
 }
 
-var xxx_messageInfo_Trailer proto.InternalMessageInfo
-
-func (m *Trailer) GetMetadata() *Metadata {
-	if m != nil {
-		return m.Metadata
+func (x *Trailer) GetMetadata() *Metadata {
+	if x != nil {
+		return x.Metadata
 	}
 	return nil
 }
 
-func (m *Trailer) GetStatusCode() uint32 {
-	if m != nil {
-		return m.StatusCode
+func (x *Trailer) GetStatusCode() uint32 {
+	if x != nil {
+		return x.StatusCode
 	}
 	return 0
 }
 
-func (m *Trailer) GetStatusMessage() string {
-	if m != nil {
-		return m.StatusMessage
+func (x *Trailer) GetStatusMessage() string {
+	if x != nil {
+		return x.StatusMessage
 	}
 	return ""
 }
 
-func (m *Trailer) GetStatusDetails() []byte {
-	if m != nil {
-		return m.StatusDetails
+func (x *Trailer) GetStatusDetails() []byte {
+	if x != nil {
+		return x.StatusDetails
 	}
 	return nil
 }
 
 // Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE
 type Message struct {
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Length of the message. It may not be the same as the length of the
 	// data field, as the logging payload can be truncated or omitted.
 	Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"`
 	// May be truncated or omitted.
-	Data                 []byte   `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	Data          []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
-func (m *Message) Reset()         { *m = Message{} }
-func (m *Message) String() string { return proto.CompactTextString(m) }
-func (*Message) ProtoMessage()    {}
+func (x *Message) Reset() {
+	*x = Message{}
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Message) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Message) ProtoMessage() {}
+
+func (x *Message) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Message.ProtoReflect.Descriptor instead.
 func (*Message) Descriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{4}
-}
-func (m *Message) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Message.Unmarshal(m, b)
-}
-func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Message.Marshal(b, m, deterministic)
-}
-func (dst *Message) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Message.Merge(dst, src)
-}
-func (m *Message) XXX_Size() int {
-	return xxx_messageInfo_Message.Size(m)
-}
-func (m *Message) XXX_DiscardUnknown() {
-	xxx_messageInfo_Message.DiscardUnknown(m)
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{4}
 }
 
-var xxx_messageInfo_Message proto.InternalMessageInfo
-
-func (m *Message) GetLength() uint32 {
-	if m != nil {
-		return m.Length
+func (x *Message) GetLength() uint32 {
+	if x != nil {
+		return x.Length
 	}
 	return 0
 }
 
-func (m *Message) GetData() []byte {
-	if m != nil {
-		return m.Data
+func (x *Message) GetData() []byte {
+	if x != nil {
+		return x.Data
 	}
 	return nil
 }
@@ -666,12 +684,12 @@
 // Header keys added by gRPC are omitted. To be more specific,
 // implementations will not log the following entries, and this is
 // not to be treated as a truncation:
-// - entries handled by grpc that are not user visible, such as those
-//   that begin with 'grpc-' (with exception of grpc-trace-bin)
-//   or keys like 'lb-token'
-// - transport specific entries, including but not limited to:
-//   ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
-// - entries added for call credentials
+//   - entries handled by grpc that are not user visible, such as those
+//     that begin with 'grpc-' (with exception of grpc-trace-bin)
+//     or keys like 'lb-token'
+//   - transport specific entries, including but not limited to:
+//     ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
+//   - entries added for call credentials
 //
 // Implementations must always log grpc-trace-bin if it is present.
 // Practically speaking it will only be visible on server side because
@@ -680,221 +698,307 @@
 // header is just a normal metadata key.
 // The pair will not count towards the size limit.
 type Metadata struct {
-	Entry                []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
-	XXX_unrecognized     []byte           `json:"-"`
-	XXX_sizecache        int32            `json:"-"`
+	state         protoimpl.MessageState `protogen:"open.v1"`
+	Entry         []*MetadataEntry       `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
-func (m *Metadata) Reset()         { *m = Metadata{} }
-func (m *Metadata) String() string { return proto.CompactTextString(m) }
-func (*Metadata) ProtoMessage()    {}
+func (x *Metadata) Reset() {
+	*x = Metadata{}
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Metadata) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Metadata) ProtoMessage() {}
+
+func (x *Metadata) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Metadata.ProtoReflect.Descriptor instead.
 func (*Metadata) Descriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{5}
-}
-func (m *Metadata) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Metadata.Unmarshal(m, b)
-}
-func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
-}
-func (dst *Metadata) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Metadata.Merge(dst, src)
-}
-func (m *Metadata) XXX_Size() int {
-	return xxx_messageInfo_Metadata.Size(m)
-}
-func (m *Metadata) XXX_DiscardUnknown() {
-	xxx_messageInfo_Metadata.DiscardUnknown(m)
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{5}
 }
 
-var xxx_messageInfo_Metadata proto.InternalMessageInfo
-
-func (m *Metadata) GetEntry() []*MetadataEntry {
-	if m != nil {
-		return m.Entry
+func (x *Metadata) GetEntry() []*MetadataEntry {
+	if x != nil {
+		return x.Entry
 	}
 	return nil
 }
 
 // A metadata key value pair
 type MetadataEntry struct {
-	Key                  string   `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
-	Value                []byte   `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	state         protoimpl.MessageState `protogen:"open.v1"`
+	Key           string                 `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	Value         []byte                 `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
-func (m *MetadataEntry) Reset()         { *m = MetadataEntry{} }
-func (m *MetadataEntry) String() string { return proto.CompactTextString(m) }
-func (*MetadataEntry) ProtoMessage()    {}
+func (x *MetadataEntry) Reset() {
+	*x = MetadataEntry{}
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *MetadataEntry) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetadataEntry) ProtoMessage() {}
+
+func (x *MetadataEntry) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetadataEntry.ProtoReflect.Descriptor instead.
 func (*MetadataEntry) Descriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{6}
-}
-func (m *MetadataEntry) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_MetadataEntry.Unmarshal(m, b)
-}
-func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic)
-}
-func (dst *MetadataEntry) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_MetadataEntry.Merge(dst, src)
-}
-func (m *MetadataEntry) XXX_Size() int {
-	return xxx_messageInfo_MetadataEntry.Size(m)
-}
-func (m *MetadataEntry) XXX_DiscardUnknown() {
-	xxx_messageInfo_MetadataEntry.DiscardUnknown(m)
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{6}
 }
 
-var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo
-
-func (m *MetadataEntry) GetKey() string {
-	if m != nil {
-		return m.Key
+func (x *MetadataEntry) GetKey() string {
+	if x != nil {
+		return x.Key
 	}
 	return ""
 }
 
-func (m *MetadataEntry) GetValue() []byte {
-	if m != nil {
-		return m.Value
+func (x *MetadataEntry) GetValue() []byte {
+	if x != nil {
+		return x.Value
 	}
 	return nil
 }
 
 // Address information
 type Address struct {
-	Type    Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
-	Address string       `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
+	state   protoimpl.MessageState `protogen:"open.v1"`
+	Type    Address_Type           `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
+	Address string                 `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
 	// only for TYPE_IPV4 and TYPE_IPV6
-	IpPort               uint32   `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	IpPort        uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
-func (m *Address) Reset()         { *m = Address{} }
-func (m *Address) String() string { return proto.CompactTextString(m) }
-func (*Address) ProtoMessage()    {}
+func (x *Address) Reset() {
+	*x = Address{}
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *Address) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Address) ProtoMessage() {}
+
+func (x *Address) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use Address.ProtoReflect.Descriptor instead.
 func (*Address) Descriptor() ([]byte, []int) {
-	return fileDescriptor_binarylog_264c8c9c551ce911, []int{7}
-}
-func (m *Address) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_Address.Unmarshal(m, b)
-}
-func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_Address.Marshal(b, m, deterministic)
-}
-func (dst *Address) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_Address.Merge(dst, src)
-}
-func (m *Address) XXX_Size() int {
-	return xxx_messageInfo_Address.Size(m)
-}
-func (m *Address) XXX_DiscardUnknown() {
-	xxx_messageInfo_Address.DiscardUnknown(m)
+	return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7}
 }
 
-var xxx_messageInfo_Address proto.InternalMessageInfo
-
-func (m *Address) GetType() Address_Type {
-	if m != nil {
-		return m.Type
+func (x *Address) GetType() Address_Type {
+	if x != nil {
+		return x.Type
 	}
 	return Address_TYPE_UNKNOWN
 }
 
-func (m *Address) GetAddress() string {
-	if m != nil {
-		return m.Address
+func (x *Address) GetAddress() string {
+	if x != nil {
+		return x.Address
 	}
 	return ""
 }
 
-func (m *Address) GetIpPort() uint32 {
-	if m != nil {
-		return m.IpPort
+func (x *Address) GetIpPort() uint32 {
+	if x != nil {
+		return x.IpPort
 	}
 	return 0
 }
 
-func init() {
-	proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry")
-	proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader")
-	proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader")
-	proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer")
-	proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message")
-	proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata")
-	proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry")
-	proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address")
-	proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value)
-	proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value)
-	proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value)
+var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor
+
+const file_grpc_binlog_v1_binarylog_proto_rawDesc = "" +
+	"\n" +
+	"\x1egrpc/binlog/v1/binarylog.proto\x12\x11grpc.binarylog.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xbb\a\n" +
+	"\fGrpcLogEntry\x128\n" +
+	"\ttimestamp\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12\x17\n" +
+	"\acall_id\x18\x02 \x01(\x04R\x06callId\x125\n" +
+	"\x17sequence_id_within_call\x18\x03 \x01(\x04R\x14sequenceIdWithinCall\x12=\n" +
+	"\x04type\x18\x04 \x01(\x0e2).grpc.binarylog.v1.GrpcLogEntry.EventTypeR\x04type\x12>\n" +
+	"\x06logger\x18\x05 \x01(\x0e2&.grpc.binarylog.v1.GrpcLogEntry.LoggerR\x06logger\x12F\n" +
+	"\rclient_header\x18\x06 \x01(\v2\x1f.grpc.binarylog.v1.ClientHeaderH\x00R\fclientHeader\x12F\n" +
+	"\rserver_header\x18\a \x01(\v2\x1f.grpc.binarylog.v1.ServerHeaderH\x00R\fserverHeader\x126\n" +
+	"\amessage\x18\b \x01(\v2\x1a.grpc.binarylog.v1.MessageH\x00R\amessage\x126\n" +
+	"\atrailer\x18\t \x01(\v2\x1a.grpc.binarylog.v1.TrailerH\x00R\atrailer\x12+\n" +
+	"\x11payload_truncated\x18\n" +
+	" \x01(\bR\x10payloadTruncated\x12.\n" +
+	"\x04peer\x18\v \x01(\v2\x1a.grpc.binarylog.v1.AddressR\x04peer\"\xf5\x01\n" +
+	"\tEventType\x12\x16\n" +
+	"\x12EVENT_TYPE_UNKNOWN\x10\x00\x12\x1c\n" +
+	"\x18EVENT_TYPE_CLIENT_HEADER\x10\x01\x12\x1c\n" +
+	"\x18EVENT_TYPE_SERVER_HEADER\x10\x02\x12\x1d\n" +
+	"\x19EVENT_TYPE_CLIENT_MESSAGE\x10\x03\x12\x1d\n" +
+	"\x19EVENT_TYPE_SERVER_MESSAGE\x10\x04\x12 \n" +
+	"\x1cEVENT_TYPE_CLIENT_HALF_CLOSE\x10\x05\x12\x1d\n" +
+	"\x19EVENT_TYPE_SERVER_TRAILER\x10\x06\x12\x15\n" +
+	"\x11EVENT_TYPE_CANCEL\x10\a\"B\n" +
+	"\x06Logger\x12\x12\n" +
+	"\x0eLOGGER_UNKNOWN\x10\x00\x12\x11\n" +
+	"\rLOGGER_CLIENT\x10\x01\x12\x11\n" +
+	"\rLOGGER_SERVER\x10\x02B\t\n" +
+	"\apayload\"\xbb\x01\n" +
+	"\fClientHeader\x127\n" +
+	"\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\x12\x1f\n" +
+	"\vmethod_name\x18\x02 \x01(\tR\n" +
+	"methodName\x12\x1c\n" +
+	"\tauthority\x18\x03 \x01(\tR\tauthority\x123\n" +
+	"\atimeout\x18\x04 \x01(\v2\x19.google.protobuf.DurationR\atimeout\"G\n" +
+	"\fServerHeader\x127\n" +
+	"\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\"\xb1\x01\n" +
+	"\aTrailer\x127\n" +
+	"\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\x12\x1f\n" +
+	"\vstatus_code\x18\x02 \x01(\rR\n" +
+	"statusCode\x12%\n" +
+	"\x0estatus_message\x18\x03 \x01(\tR\rstatusMessage\x12%\n" +
+	"\x0estatus_details\x18\x04 \x01(\fR\rstatusDetails\"5\n" +
+	"\aMessage\x12\x16\n" +
+	"\x06length\x18\x01 \x01(\rR\x06length\x12\x12\n" +
+	"\x04data\x18\x02 \x01(\fR\x04data\"B\n" +
+	"\bMetadata\x126\n" +
+	"\x05entry\x18\x01 \x03(\v2 .grpc.binarylog.v1.MetadataEntryR\x05entry\"7\n" +
+	"\rMetadataEntry\x12\x10\n" +
+	"\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
+	"\x05value\x18\x02 \x01(\fR\x05value\"\xb8\x01\n" +
+	"\aAddress\x123\n" +
+	"\x04type\x18\x01 \x01(\x0e2\x1f.grpc.binarylog.v1.Address.TypeR\x04type\x12\x18\n" +
+	"\aaddress\x18\x02 \x01(\tR\aaddress\x12\x17\n" +
+	"\aip_port\x18\x03 \x01(\rR\x06ipPort\"E\n" +
+	"\x04Type\x12\x10\n" +
+	"\fTYPE_UNKNOWN\x10\x00\x12\r\n" +
+	"\tTYPE_IPV4\x10\x01\x12\r\n" +
+	"\tTYPE_IPV6\x10\x02\x12\r\n" +
+	"\tTYPE_UNIX\x10\x03B\\\n" +
+	"\x14io.grpc.binarylog.v1B\x0eBinaryLogProtoP\x01Z2google.golang.org/grpc/binarylog/grpc_binarylog_v1b\x06proto3"
+
+var (
+	file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once
+	file_grpc_binlog_v1_binarylog_proto_rawDescData []byte
+)
+
+func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte {
+	file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() {
+		file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_binlog_v1_binarylog_proto_rawDesc), len(file_grpc_binlog_v1_binarylog_proto_rawDesc)))
+	})
+	return file_grpc_binlog_v1_binarylog_proto_rawDescData
 }
 
-func init() {
-	proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911)
+var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
+var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{
+	(GrpcLogEntry_EventType)(0),   // 0: grpc.binarylog.v1.GrpcLogEntry.EventType
+	(GrpcLogEntry_Logger)(0),      // 1: grpc.binarylog.v1.GrpcLogEntry.Logger
+	(Address_Type)(0),             // 2: grpc.binarylog.v1.Address.Type
+	(*GrpcLogEntry)(nil),          // 3: grpc.binarylog.v1.GrpcLogEntry
+	(*ClientHeader)(nil),          // 4: grpc.binarylog.v1.ClientHeader
+	(*ServerHeader)(nil),          // 5: grpc.binarylog.v1.ServerHeader
+	(*Trailer)(nil),               // 6: grpc.binarylog.v1.Trailer
+	(*Message)(nil),               // 7: grpc.binarylog.v1.Message
+	(*Metadata)(nil),              // 8: grpc.binarylog.v1.Metadata
+	(*MetadataEntry)(nil),         // 9: grpc.binarylog.v1.MetadataEntry
+	(*Address)(nil),               // 10: grpc.binarylog.v1.Address
+	(*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp
+	(*durationpb.Duration)(nil),   // 12: google.protobuf.Duration
+}
+var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{
+	11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp
+	0,  // 1: grpc.binarylog.v1.GrpcLogEntry.type:type_name -> grpc.binarylog.v1.GrpcLogEntry.EventType
+	1,  // 2: grpc.binarylog.v1.GrpcLogEntry.logger:type_name -> grpc.binarylog.v1.GrpcLogEntry.Logger
+	4,  // 3: grpc.binarylog.v1.GrpcLogEntry.client_header:type_name -> grpc.binarylog.v1.ClientHeader
+	5,  // 4: grpc.binarylog.v1.GrpcLogEntry.server_header:type_name -> grpc.binarylog.v1.ServerHeader
+	7,  // 5: grpc.binarylog.v1.GrpcLogEntry.message:type_name -> grpc.binarylog.v1.Message
+	6,  // 6: grpc.binarylog.v1.GrpcLogEntry.trailer:type_name -> grpc.binarylog.v1.Trailer
+	10, // 7: grpc.binarylog.v1.GrpcLogEntry.peer:type_name -> grpc.binarylog.v1.Address
+	8,  // 8: grpc.binarylog.v1.ClientHeader.metadata:type_name -> grpc.binarylog.v1.Metadata
+	12, // 9: grpc.binarylog.v1.ClientHeader.timeout:type_name -> google.protobuf.Duration
+	8,  // 10: grpc.binarylog.v1.ServerHeader.metadata:type_name -> grpc.binarylog.v1.Metadata
+	8,  // 11: grpc.binarylog.v1.Trailer.metadata:type_name -> grpc.binarylog.v1.Metadata
+	9,  // 12: grpc.binarylog.v1.Metadata.entry:type_name -> grpc.binarylog.v1.MetadataEntry
+	2,  // 13: grpc.binarylog.v1.Address.type:type_name -> grpc.binarylog.v1.Address.Type
+	14, // [14:14] is the sub-list for method output_type
+	14, // [14:14] is the sub-list for method input_type
+	14, // [14:14] is the sub-list for extension type_name
+	14, // [14:14] is the sub-list for extension extendee
+	0,  // [0:14] is the sub-list for field type_name
 }
 
-var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{
-	// 900 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44,
-	0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04,
-	0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d,
-	0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c,
-	0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf,
-	0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2,
-	0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09,
-	0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e,
-	0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef,
-	0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36,
-	0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5,
-	0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46,
-	0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84,
-	0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72,
-	0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa,
-	0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb,
-	0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84,
-	0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1,
-	0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c,
-	0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24,
-	0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba,
-	0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8,
-	0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5,
-	0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1,
-	0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94,
-	0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f,
-	0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec,
-	0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b,
-	0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1,
-	0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5,
-	0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b,
-	0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d,
-	0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42,
-	0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4,
-	0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd,
-	0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51,
-	0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01,
-	0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58,
-	0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5,
-	0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff,
-	0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26,
-	0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23,
-	0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44,
-	0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46,
-	0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf,
-	0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab,
-	0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32,
-	0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49,
-	0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb,
-	0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c,
-	0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0,
-	0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed,
-	0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f,
-	0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7,
-	0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e,
-	0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50,
-	0xd4, 0x07, 0x00, 0x00,
+func init() { file_grpc_binlog_v1_binarylog_proto_init() }
+func file_grpc_binlog_v1_binarylog_proto_init() {
+	if File_grpc_binlog_v1_binarylog_proto != nil {
+		return
+	}
+	file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{
+		(*GrpcLogEntry_ClientHeader)(nil),
+		(*GrpcLogEntry_ServerHeader)(nil),
+		(*GrpcLogEntry_Message)(nil),
+		(*GrpcLogEntry_Trailer)(nil),
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_binlog_v1_binarylog_proto_rawDesc), len(file_grpc_binlog_v1_binarylog_proto_rawDesc)),
+			NumEnums:      3,
+			NumMessages:   8,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_grpc_binlog_v1_binarylog_proto_goTypes,
+		DependencyIndexes: file_grpc_binlog_v1_binarylog_proto_depIdxs,
+		EnumInfos:         file_grpc_binlog_v1_binarylog_proto_enumTypes,
+		MessageInfos:      file_grpc_binlog_v1_binarylog_proto_msgTypes,
+	}.Build()
+	File_grpc_binlog_v1_binarylog_proto = out.File
+	file_grpc_binlog_v1_binarylog_proto_goTypes = nil
+	file_grpc_binlog_v1_binarylog_proto_depIdxs = nil
 }
diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go
index 9e20e4d..788c89c 100644
--- a/vendor/google.golang.org/grpc/call.go
+++ b/vendor/google.golang.org/grpc/call.go
@@ -26,7 +26,7 @@
 // received.  This is typically called by generated code.
 //
 // All errors returned by Invoke are compatible with the status package.
-func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
+func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error {
 	// allow interceptor to see all applicable call options, which means those
 	// configured as defaults from dial option as well as per-call options
 	opts = combine(cc.dopts.callOptions, opts)
@@ -56,13 +56,13 @@
 // received.  This is typically called by generated code.
 //
 // DEPRECATED: Use ClientConn.Invoke instead.
-func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
+func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error {
 	return cc.Invoke(ctx, method, args, reply, opts...)
 }
 
 var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}
 
-func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
+func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error {
 	cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
 	if err != nil {
 		return err
diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go
new file mode 100644
index 0000000..32b7fa5
--- /dev/null
+++ b/vendor/google.golang.org/grpc/channelz/channelz.go
@@ -0,0 +1,36 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package channelz exports internals of the channelz implementation as required
+// by other gRPC packages.
+//
+// The implementation of the channelz spec as defined in
+// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by
+// the `internal/channelz` package.
+//
+// # Experimental
+//
+// Notice: All APIs in this package are experimental and may be removed in a
+// later release.
+package channelz
+
+import "google.golang.org/grpc/internal/channelz"
+
+// Identifier is an opaque identifier which uniquely identifies an entity in the
+// channelz database.
+type Identifier = channelz.Identifier
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index 4414ba8..a3c315f 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -23,8 +23,8 @@
 	"errors"
 	"fmt"
 	"math"
-	"net"
-	"reflect"
+	"net/url"
+	"slices"
 	"strings"
 	"sync"
 	"sync/atomic"
@@ -32,13 +32,15 @@
 
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/balancer/base"
+	"google.golang.org/grpc/balancer/pickfirst"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/connectivity"
-	"google.golang.org/grpc/credentials"
-	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/internal/backoff"
+	"google.golang.org/grpc/internal"
 	"google.golang.org/grpc/internal/channelz"
 	"google.golang.org/grpc/internal/grpcsync"
+	"google.golang.org/grpc/internal/idle"
+	iresolver "google.golang.org/grpc/internal/resolver"
+	"google.golang.org/grpc/internal/stats"
 	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/keepalive"
 	"google.golang.org/grpc/resolver"
@@ -46,15 +48,14 @@
 	"google.golang.org/grpc/status"
 
 	_ "google.golang.org/grpc/balancer/roundrobin"           // To register roundrobin.
-	_ "google.golang.org/grpc/internal/resolver/dns"         // To register dns resolver.
 	_ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver.
+	_ "google.golang.org/grpc/internal/resolver/unix"        // To register unix resolver.
+	_ "google.golang.org/grpc/resolver/dns"                  // To register dns resolver.
 )
 
 const (
 	// minimum time to give a connection to complete
 	minConnectTimeout = 20 * time.Second
-	// must match grpclbName in grpclb/grpclb.go
-	grpclbName = "grpclb"
 )
 
 var (
@@ -68,11 +69,14 @@
 	errConnDrain = errors.New("grpc: the connection is drained")
 	// errConnClosing indicates that the connection is closing.
 	errConnClosing = errors.New("grpc: the connection is closing")
-	// errBalancerClosed indicates that the balancer is closed.
-	errBalancerClosed = errors.New("grpc: balancer is closed")
+	// errConnIdling indicates the connection is being closed as the channel
+	// is moving to an idle mode due to inactivity.
+	errConnIdling = errors.New("grpc: the connection is closing due to channel idleness")
 	// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
 	// service config.
 	invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
+	// PickFirstBalancerName is the name of the pick_first balancer.
+	PickFirstBalancerName = pickfirst.Name
 )
 
 // The following errors are returned from Dial and DialContext
@@ -80,17 +84,17 @@
 	// errNoTransportSecurity indicates that there is no transport security
 	// being set for ClientConn. Users should either set one or explicitly
 	// call WithInsecure DialOption to disable security.
-	errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)")
+	errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)")
 	// errTransportCredsAndBundle indicates that creds bundle is used together
 	// with other individual Transport Credentials.
 	errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials")
-	// errTransportCredentialsMissing indicates that users want to transmit security
-	// information (e.g., OAuth2 token) which requires secure connection on an insecure
-	// connection.
+	// errNoTransportCredsInBundle indicated that the configured creds bundle
+	// returned a transport credentials which was nil.
+	errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials")
+	// errTransportCredentialsMissing indicates that users want to transmit
+	// security information (e.g., OAuth2 token) which requires secure
+	// connection on an insecure connection.
 	errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)")
-	// errCredentialsConflict indicates that grpc.WithTransportCredentials()
-	// and grpc.WithInsecure() are both called for a connection.
-	errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)")
 )
 
 const (
@@ -101,114 +105,168 @@
 	defaultReadBufSize  = 32 * 1024
 )
 
-// Dial creates a client connection to the given target.
-func Dial(target string, opts ...DialOption) (*ClientConn, error) {
-	return DialContext(context.Background(), target, opts...)
+type defaultConfigSelector struct {
+	sc *ServiceConfig
 }
 
-// DialContext creates a client connection to the given target. By default, it's
-// a non-blocking dial (the function won't wait for connections to be
-// established, and connecting happens in the background). To make it a blocking
-// dial, use WithBlock() dial option.
-//
-// In the non-blocking case, the ctx does not act against the connection. It
-// only controls the setup steps.
-//
-// In the blocking case, ctx can be used to cancel or expire the pending
-// connection. Once this function returns, the cancellation and expiration of
-// ctx will be noop. Users should call ClientConn.Close to terminate all the
-// pending operations after this function returns.
+func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) {
+	return &iresolver.RPCConfig{
+		Context:      rpcInfo.Context,
+		MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method),
+	}, nil
+}
+
+// NewClient creates a new gRPC "channel" for the target URI provided.  No I/O
+// is performed.  Use of the ClientConn for RPCs will automatically cause it to
+// connect.  The Connect method may be called to manually create a connection,
+// but for most users this should be unnecessary.
 //
 // The target name syntax is defined in
-// https://github.com/grpc/grpc/blob/master/doc/naming.md.
-// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target.
-func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
+// https://github.com/grpc/grpc/blob/master/doc/naming.md.  E.g. to use the dns
+// name resolver, a "dns:///" prefix may be applied to the target.  The default
+// name resolver will be used if no scheme is detected, or if the parsed scheme
+// is not a registered name resolver.  The default resolver is "dns" but can be
+// overridden using the resolver package's SetDefaultScheme.
+//
+// Examples:
+//
+//   - "foo.googleapis.com:8080"
+//   - "dns:///foo.googleapis.com:8080"
+//   - "dns:///foo.googleapis.com"
+//   - "dns:///10.0.0.213:8080"
+//   - "dns:///%5B2001:db8:85a3:8d3:1319:8a2e:370:7348%5D:443"
+//   - "dns://8.8.8.8/foo.googleapis.com:8080"
+//   - "dns://8.8.8.8/foo.googleapis.com"
+//   - "zookeeper://zk.example.com:9900/example_service"
+//
+// The DialOptions returned by WithBlock, WithTimeout,
+// WithReturnConnectionError, and FailOnNonTempDialError are ignored by this
+// function.
+func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) {
 	cc := &ClientConn{
-		target:            target,
-		csMgr:             &connectivityStateManager{},
-		conns:             make(map[*addrConn]struct{}),
-		dopts:             defaultDialOptions(),
-		blockingpicker:    newPickerWrapper(),
-		czData:            new(channelzData),
-		firstResolveEvent: grpcsync.NewEvent(),
+		target: target,
+		conns:  make(map[*addrConn]struct{}),
+		dopts:  defaultDialOptions(),
 	}
+
 	cc.retryThrottler.Store((*retryThrottler)(nil))
+	cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
 	cc.ctx, cc.cancel = context.WithCancel(context.Background())
 
+	// Apply dial options.
+	disableGlobalOpts := false
+	for _, opt := range opts {
+		if _, ok := opt.(*disableGlobalDialOptions); ok {
+			disableGlobalOpts = true
+			break
+		}
+	}
+
+	if !disableGlobalOpts {
+		for _, opt := range globalDialOptions {
+			opt.apply(&cc.dopts)
+		}
+	}
+
 	for _, opt := range opts {
 		opt.apply(&cc.dopts)
 	}
 
+	// Determine the resolver to use.
+	if err := cc.initParsedTargetAndResolverBuilder(); err != nil {
+		return nil, err
+	}
+
+	for _, opt := range globalPerTargetDialOptions {
+		opt.DialOptionForTarget(cc.parsedTarget.URL).apply(&cc.dopts)
+	}
+
 	chainUnaryClientInterceptors(cc)
 	chainStreamClientInterceptors(cc)
 
+	if err := cc.validateTransportCredentials(); err != nil {
+		return nil, err
+	}
+
+	if cc.dopts.defaultServiceConfigRawJSON != nil {
+		scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON, cc.dopts.maxCallAttempts)
+		if scpr.Err != nil {
+			return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err)
+		}
+		cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig)
+	}
+	cc.keepaliveParams = cc.dopts.copts.KeepaliveParams
+
+	if err = cc.initAuthority(); err != nil {
+		return nil, err
+	}
+
+	// Register ClientConn with channelz. Note that this is only done after
+	// channel creation cannot fail.
+	cc.channelzRegistration(target)
+	channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", cc.parsedTarget)
+	channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority)
+
+	cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz)
+	cc.pickerWrapper = newPickerWrapper()
+
+	cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
+
+	cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc.
+	cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout)
+
+	return cc, nil
+}
+
+// Dial calls DialContext(context.Background(), target, opts...).
+//
+// Deprecated: use NewClient instead.  Will be supported throughout 1.x.
+func Dial(target string, opts ...DialOption) (*ClientConn, error) {
+	return DialContext(context.Background(), target, opts...)
+}
+
+// DialContext calls NewClient and then exits idle mode.  If WithBlock(true) is
+// used, it calls Connect and WaitForStateChange until either the context
+// expires or the state of the ClientConn is Ready.
+//
+// One subtle difference between NewClient and Dial and DialContext is that the
+// former uses "dns" as the default name resolver, while the latter use
+// "passthrough" for backward compatibility.  This distinction should not matter
+// to most users, but could matter to legacy users that specify a custom dialer
+// and expect it to receive the target string directly.
+//
+// Deprecated: use NewClient instead.  Will be supported throughout 1.x.
+func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
+	// At the end of this method, we kick the channel out of idle, rather than
+	// waiting for the first rpc.
+	//
+	// WithLocalDNSResolution dial option in `grpc.Dial` ensures that it
+	// preserves behavior: when default scheme passthrough is used, skip
+	// hostname resolution, when "dns" is used for resolution, perform
+	// resolution on the client.
+	opts = append([]DialOption{withDefaultScheme("passthrough"), WithLocalDNSResolution()}, opts...)
+	cc, err := NewClient(target, opts...)
+	if err != nil {
+		return nil, err
+	}
+
+	// We start the channel off in idle mode, but kick it out of idle now,
+	// instead of waiting for the first RPC.  This is the legacy behavior of
+	// Dial.
 	defer func() {
 		if err != nil {
 			cc.Close()
 		}
 	}()
 
-	if channelz.IsOn() {
-		if cc.dopts.channelzParentID != 0 {
-			cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
-			channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
-				Desc:     "Channel Created",
-				Severity: channelz.CtINFO,
-				Parent: &channelz.TraceEventDesc{
-					Desc:     fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID),
-					Severity: channelz.CtINFO,
-				},
-			})
-		} else {
-			cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target)
-			channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
-				Desc:     "Channel Created",
-				Severity: channelz.CtINFO,
-			})
-		}
-		cc.csMgr.channelzID = cc.channelzID
+	// This creates the name resolver, load balancer, etc.
+	if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
+		return nil, err
 	}
 
-	if !cc.dopts.insecure {
-		if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
-			return nil, errNoTransportSecurity
-		}
-		if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
-			return nil, errTransportCredsAndBundle
-		}
-	} else {
-		if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil {
-			return nil, errCredentialsConflict
-		}
-		for _, cd := range cc.dopts.copts.PerRPCCredentials {
-			if cd.RequireTransportSecurity() {
-				return nil, errTransportCredentialsMissing
-			}
-		}
-	}
-
-	if cc.dopts.defaultServiceConfigRawJSON != nil {
-		scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
-		if scpr.Err != nil {
-			return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err)
-		}
-		cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig)
-	}
-	cc.mkp = cc.dopts.copts.KeepaliveParams
-
-	if cc.dopts.copts.Dialer == nil {
-		cc.dopts.copts.Dialer = newProxyDialer(
-			func(ctx context.Context, addr string) (net.Conn, error) {
-				network, addr := parseDialTarget(addr)
-				return (&net.Dialer{}).DialContext(ctx, network, addr)
-			},
-		)
-	}
-
-	if cc.dopts.copts.UserAgent != "" {
-		cc.dopts.copts.UserAgent += " " + grpcUA
-	} else {
-		cc.dopts.copts.UserAgent = grpcUA
+	// Return now for non-blocking dials.
+	if !cc.dopts.block {
+		return cc, nil
 	}
 
 	if cc.dopts.timeout > 0 {
@@ -219,116 +277,186 @@
 	defer func() {
 		select {
 		case <-ctx.Done():
-			conn, err = nil, ctx.Err()
+			switch {
+			case ctx.Err() == err:
+				conn = nil
+			case err == nil || !cc.dopts.returnLastError:
+				conn, err = nil, ctx.Err()
+			default:
+				conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err)
+			}
 		default:
 		}
 	}()
 
-	scSet := false
-	if cc.dopts.scChan != nil {
-		// Try to get an initial service config.
-		select {
-		case sc, ok := <-cc.dopts.scChan:
-			if ok {
-				cc.sc = &sc
-				scSet = true
-			}
-		default:
+	// A blocking dial blocks until the clientConn is ready.
+	for {
+		s := cc.GetState()
+		if s == connectivity.Idle {
+			cc.Connect()
 		}
-	}
-	if cc.dopts.bs == nil {
-		cc.dopts.bs = backoff.DefaultExponential
-	}
-	if cc.dopts.resolverBuilder == nil {
-		// Only try to parse target when resolver builder is not already set.
-		cc.parsedTarget = parseTarget(cc.target)
-		grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme)
-		cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
-		if cc.dopts.resolverBuilder == nil {
-			// If resolver builder is still nil, the parsed target's scheme is
-			// not registered. Fallback to default resolver and set Endpoint to
-			// the original target.
-			grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
-			cc.parsedTarget = resolver.Target{
-				Scheme:   resolver.GetDefaultScheme(),
-				Endpoint: target,
+		if s == connectivity.Ready {
+			return cc, nil
+		} else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
+			if err = cc.connectionError(); err != nil {
+				terr, ok := err.(interface {
+					Temporary() bool
+				})
+				if ok && !terr.Temporary() {
+					return nil, err
+				}
 			}
-			cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
 		}
-	} else {
-		cc.parsedTarget = resolver.Target{Endpoint: target}
-	}
-	creds := cc.dopts.copts.TransportCredentials
-	if creds != nil && creds.Info().ServerName != "" {
-		cc.authority = creds.Info().ServerName
-	} else if cc.dopts.insecure && cc.dopts.authority != "" {
-		cc.authority = cc.dopts.authority
-	} else {
-		// Use endpoint from "scheme://authority/endpoint" as the default
-		// authority for ClientConn.
-		cc.authority = cc.parsedTarget.Endpoint
-	}
-
-	if cc.dopts.scChan != nil && !scSet {
-		// Blocking wait for the initial service config.
-		select {
-		case sc, ok := <-cc.dopts.scChan:
-			if ok {
-				cc.sc = &sc
+		if !cc.WaitForStateChange(ctx, s) {
+			// ctx got timeout or canceled.
+			if err = cc.connectionError(); err != nil && cc.dopts.returnLastError {
+				return nil, err
 			}
-		case <-ctx.Done():
 			return nil, ctx.Err()
 		}
 	}
-	if cc.dopts.scChan != nil {
-		go cc.scWatcher()
-	}
+}
 
-	var credsClone credentials.TransportCredentials
-	if creds := cc.dopts.copts.TransportCredentials; creds != nil {
-		credsClone = creds.Clone()
+// addTraceEvent is a helper method to add a trace event on the channel. If the
+// channel is a nested one, the same event is also added on the parent channel.
+func (cc *ClientConn) addTraceEvent(msg string) {
+	ted := &channelz.TraceEvent{
+		Desc:     fmt.Sprintf("Channel %s", msg),
+		Severity: channelz.CtInfo,
 	}
-	cc.balancerBuildOpts = balancer.BuildOptions{
-		DialCreds:        credsClone,
-		CredsBundle:      cc.dopts.copts.CredsBundle,
-		Dialer:           cc.dopts.copts.Dialer,
-		ChannelzParentID: cc.channelzID,
-		Target:           cc.parsedTarget,
+	if cc.dopts.channelzParent != nil {
+		ted.Parent = &channelz.TraceEvent{
+			Desc:     fmt.Sprintf("Nested channel(id:%d) %s", cc.channelz.ID, msg),
+			Severity: channelz.CtInfo,
+		}
 	}
+	channelz.AddTraceEvent(logger, cc.channelz, 0, ted)
+}
 
-	// Build the resolver.
-	rWrapper, err := newCCResolverWrapper(cc)
-	if err != nil {
-		return nil, fmt.Errorf("failed to build resolver: %v", err)
-	}
+type idler ClientConn
 
+func (i *idler) EnterIdleMode() {
+	(*ClientConn)(i).enterIdleMode()
+}
+
+func (i *idler) ExitIdleMode() error {
+	return (*ClientConn)(i).exitIdleMode()
+}
+
+// exitIdleMode moves the channel out of idle mode by recreating the name
+// resolver and load balancer.  This should never be called directly; use
+// cc.idlenessMgr.ExitIdleMode instead.
+func (cc *ClientConn) exitIdleMode() (err error) {
 	cc.mu.Lock()
-	cc.resolverWrapper = rWrapper
+	if cc.conns == nil {
+		cc.mu.Unlock()
+		return errConnClosing
+	}
 	cc.mu.Unlock()
-	// A blocking dial blocks until the clientConn is ready.
-	if cc.dopts.block {
-		for {
-			s := cc.GetState()
-			if s == connectivity.Ready {
-				break
-			} else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
-				if err = cc.blockingpicker.connectionError(); err != nil {
-					terr, ok := err.(interface {
-						Temporary() bool
-					})
-					if ok && !terr.Temporary() {
-						return nil, err
-					}
-				}
-			}
-			if !cc.WaitForStateChange(ctx, s) {
-				// ctx got timeout or canceled.
-				return nil, ctx.Err()
+
+	// This needs to be called without cc.mu because this builds a new resolver
+	// which might update state or report error inline, which would then need to
+	// acquire cc.mu.
+	if err := cc.resolverWrapper.start(); err != nil {
+		return err
+	}
+
+	cc.addTraceEvent("exiting idle mode")
+	return nil
+}
+
+// initIdleStateLocked initializes common state to how it should be while idle.
+func (cc *ClientConn) initIdleStateLocked() {
+	cc.resolverWrapper = newCCResolverWrapper(cc)
+	cc.balancerWrapper = newCCBalancerWrapper(cc)
+	cc.firstResolveEvent = grpcsync.NewEvent()
+	// cc.conns == nil is a proxy for the ClientConn being closed. So, instead
+	// of setting it to nil here, we recreate the map. This also means that we
+	// don't have to do this when exiting idle mode.
+	cc.conns = make(map[*addrConn]struct{})
+}
+
+// enterIdleMode puts the channel in idle mode, and as part of it shuts down the
+// name resolver, load balancer, and any subchannels.  This should never be
+// called directly; use cc.idlenessMgr.EnterIdleMode instead.
+func (cc *ClientConn) enterIdleMode() {
+	cc.mu.Lock()
+
+	if cc.conns == nil {
+		cc.mu.Unlock()
+		return
+	}
+
+	conns := cc.conns
+
+	rWrapper := cc.resolverWrapper
+	rWrapper.close()
+	cc.pickerWrapper.reset()
+	bWrapper := cc.balancerWrapper
+	bWrapper.close()
+	cc.csMgr.updateState(connectivity.Idle)
+	cc.addTraceEvent("entering idle mode")
+
+	cc.initIdleStateLocked()
+
+	cc.mu.Unlock()
+
+	// Block until the name resolver and LB policy are closed.
+	<-rWrapper.serializer.Done()
+	<-bWrapper.serializer.Done()
+
+	// Close all subchannels after the LB policy is closed.
+	for ac := range conns {
+		ac.tearDown(errConnIdling)
+	}
+}
+
+// validateTransportCredentials performs a series of checks on the configured
+// transport credentials. It returns a non-nil error if any of these conditions
+// are met:
+//   - no transport creds and no creds bundle is configured
+//   - both transport creds and creds bundle are configured
+//   - creds bundle is configured, but it lacks a transport credentials
+//   - insecure transport creds configured alongside call creds that require
+//     transport level security
+//
+// If none of the above conditions are met, the configured credentials are
+// deemed valid and a nil error is returned.
+func (cc *ClientConn) validateTransportCredentials() error {
+	if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
+		return errNoTransportSecurity
+	}
+	if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
+		return errTransportCredsAndBundle
+	}
+	if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil {
+		return errNoTransportCredsInBundle
+	}
+	transportCreds := cc.dopts.copts.TransportCredentials
+	if transportCreds == nil {
+		transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials()
+	}
+	if transportCreds.Info().SecurityProtocol == "insecure" {
+		for _, cd := range cc.dopts.copts.PerRPCCredentials {
+			if cd.RequireTransportSecurity() {
+				return errTransportCredentialsMissing
 			}
 		}
 	}
+	return nil
+}
 
-	return cc, nil
+// channelzRegistration registers the newly created ClientConn with channelz and
+// stores the returned identifier in `cc.channelz`.  A channelz trace event is
+// emitted for ClientConn creation. If the newly created ClientConn is a nested
+// one, i.e a valid parent ClientConn ID is specified via a dial option, the
+// trace event is also added to the parent.
+//
+// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
+func (cc *ClientConn) channelzRegistration(target string) {
+	parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel)
+	cc.channelz = channelz.RegisterChannel(parentChannel, target)
+	cc.addTraceEvent(fmt.Sprintf("created for target %q", target))
 }
 
 // chainUnaryClientInterceptors chains all unary client interceptors into one.
@@ -345,7 +473,7 @@
 	} else if len(interceptors) == 1 {
 		chainedInt = interceptors[0]
 	} else {
-		chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error {
+		chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error {
 			return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...)
 		}
 	}
@@ -357,7 +485,7 @@
 	if curr == len(interceptors)-1 {
 		return finalInvoker
 	}
-	return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
+	return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error {
 		return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...)
 	}
 }
@@ -393,13 +521,27 @@
 	}
 }
 
+// newConnectivityStateManager creates an connectivityStateManager with
+// the specified channel.
+func newConnectivityStateManager(ctx context.Context, channel *channelz.Channel) *connectivityStateManager {
+	return &connectivityStateManager{
+		channelz: channel,
+		pubSub:   grpcsync.NewPubSub(ctx),
+	}
+}
+
 // connectivityStateManager keeps the connectivity.State of ClientConn.
 // This struct will eventually be exported so the balancers can access it.
+//
+// TODO: If possible, get rid of the `connectivityStateManager` type, and
+// provide this functionality using the `PubSub`, to avoid keeping track of
+// the connectivity state at two places.
 type connectivityStateManager struct {
 	mu         sync.Mutex
 	state      connectivity.State
 	notifyChan chan struct{}
-	channelzID int64
+	channelz   *channelz.Channel
+	pubSub     *grpcsync.PubSub
 }
 
 // updateState updates the connectivity.State of ClientConn.
@@ -415,12 +557,10 @@
 		return
 	}
 	csm.state = state
-	if channelz.IsOn() {
-		channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{
-			Desc:     fmt.Sprintf("Channel Connectivity change to %v", state),
-			Severity: channelz.CtINFO,
-		})
-	}
+	csm.channelz.ChannelMetrics.State.Store(&state)
+	csm.pubSub.Publish(state)
+
+	channelz.Infof(logger, csm.channelz, "Channel Connectivity change to %v", state)
 	if csm.notifyChan != nil {
 		// There are other goroutines waiting on this channel.
 		close(csm.notifyChan)
@@ -443,6 +583,20 @@
 	return csm.notifyChan
 }
 
+// ClientConnInterface defines the functions clients need to perform unary and
+// streaming RPCs.  It is implemented by *ClientConn, and is only intended to
+// be referenced by generated code.
+type ClientConnInterface interface {
+	// Invoke performs a unary RPC and returns after the response is received
+	// into reply.
+	Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error
+	// NewStream begins a streaming RPC.
+	NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error)
+}
+
+// Assert *ClientConn implements ClientConnInterface.
+var _ ClientConnInterface = (*ClientConn)(nil)
+
 // ClientConn represents a virtual connection to a conceptual endpoint, to
 // perform RPCs.
 //
@@ -456,37 +610,47 @@
 // handshakes. It also handles errors on established connections by
 // re-resolving the name and reconnecting.
 type ClientConn struct {
-	ctx    context.Context
-	cancel context.CancelFunc
+	ctx    context.Context    // Initialized using the background context at dial time.
+	cancel context.CancelFunc // Cancelled on close.
 
-	target       string
-	parsedTarget resolver.Target
-	authority    string
-	dopts        dialOptions
-	csMgr        *connectivityStateManager
+	// The following are initialized at dial time, and are read-only after that.
+	target              string            // User's dial target.
+	parsedTarget        resolver.Target   // See initParsedTargetAndResolverBuilder().
+	authority           string            // See initAuthority().
+	dopts               dialOptions       // Default and user specified dial options.
+	channelz            *channelz.Channel // Channelz object.
+	resolverBuilder     resolver.Builder  // See initParsedTargetAndResolverBuilder().
+	idlenessMgr         *idle.Manager
+	metricsRecorderList *stats.MetricsRecorderList
 
-	balancerBuildOpts balancer.BuildOptions
-	blockingpicker    *pickerWrapper
+	// The following provide their own synchronization, and therefore don't
+	// require cc.mu to be held to access them.
+	csMgr              *connectivityStateManager
+	pickerWrapper      *pickerWrapper
+	safeConfigSelector iresolver.SafeConfigSelector
+	retryThrottler     atomic.Value // Updated from service config.
 
+	// mu protects the following fields.
+	// TODO: split mu so the same mutex isn't used for everything.
 	mu              sync.RWMutex
-	resolverWrapper *ccResolverWrapper
-	sc              *ServiceConfig
-	conns           map[*addrConn]struct{}
-	// Keepalive parameter can be updated if a GoAway is received.
-	mkp             keepalive.ClientParameters
-	curBalancerName string
-	balancerWrapper *ccBalancerWrapper
-	retryThrottler  atomic.Value
-
+	resolverWrapper *ccResolverWrapper         // Always recreated whenever entering idle to simplify Close.
+	balancerWrapper *ccBalancerWrapper         // Always recreated whenever entering idle to simplify Close.
+	sc              *ServiceConfig             // Latest service config received from the resolver.
+	conns           map[*addrConn]struct{}     // Set to nil on close.
+	keepaliveParams keepalive.ClientParameters // May be updated upon receipt of a GoAway.
+	// firstResolveEvent is used to track whether the name resolver sent us at
+	// least one update. RPCs block on this event.  May be accessed without mu
+	// if we know we cannot be asked to enter idle mode while accessing it (e.g.
+	// when the idle manager has already been closed, or if we are already
+	// entering idle mode).
 	firstResolveEvent *grpcsync.Event
 
-	channelzID int64 // channelz unique identification number
-	czData     *channelzData
+	lceMu               sync.Mutex // protects lastConnectionError
+	lastConnectionError error
 }
 
 // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
 // ctx expires. A true value is returned in former case and false in latter.
-// This is an EXPERIMENTAL API.
 func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
 	ch := cc.csMgr.getNotifyChan()
 	if cc.csMgr.getState() != sourceState {
@@ -501,73 +665,92 @@
 }
 
 // GetState returns the connectivity.State of ClientConn.
-// This is an EXPERIMENTAL API.
 func (cc *ClientConn) GetState() connectivity.State {
 	return cc.csMgr.getState()
 }
 
-func (cc *ClientConn) scWatcher() {
-	for {
-		select {
-		case sc, ok := <-cc.dopts.scChan:
-			if !ok {
-				return
-			}
-			cc.mu.Lock()
-			// TODO: load balance policy runtime change is ignored.
-			// We may revisit this decision in the future.
-			cc.sc = &sc
-			cc.mu.Unlock()
-		case <-cc.ctx.Done():
-			return
-		}
+// Connect causes all subchannels in the ClientConn to attempt to connect if
+// the channel is idle.  Does not wait for the connection attempts to begin
+// before returning.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
+// release.
+func (cc *ClientConn) Connect() {
+	if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
+		cc.addTraceEvent(err.Error())
+		return
 	}
+	// If the ClientConn was not in idle mode, we need to call ExitIdle on the
+	// LB policy so that connections can be created.
+	cc.mu.Lock()
+	cc.balancerWrapper.exitIdle()
+	cc.mu.Unlock()
 }
 
-// waitForResolvedAddrs blocks until the resolver has provided addresses or the
-// context expires.  Returns nil unless the context expires first; otherwise
-// returns a status error based on the context.
-func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
+// waitForResolvedAddrs blocks until the resolver provides addresses or the
+// context expires, whichever happens first.
+//
+// Error is nil unless the context expires first; otherwise returns a status
+// error based on the context.
+//
+// The returned boolean indicates whether it did block or not. If the
+// resolution has already happened once before, it returns false without
+// blocking. Otherwise, it wait for the resolution and return true if
+// resolution has succeeded or return false along with error if resolution has
+// failed.
+func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) (bool, error) {
 	// This is on the RPC path, so we use a fast path to avoid the
 	// more-expensive "select" below after the resolver has returned once.
 	if cc.firstResolveEvent.HasFired() {
-		return nil
+		return false, nil
 	}
+	internal.NewStreamWaitingForResolver()
 	select {
 	case <-cc.firstResolveEvent.Done():
-		return nil
+		return true, nil
 	case <-ctx.Done():
-		return status.FromContextError(ctx.Err()).Err()
+		return false, status.FromContextError(ctx.Err()).Err()
 	case <-cc.ctx.Done():
-		return ErrClientConnClosing
+		return false, ErrClientConnClosing
 	}
 }
 
 var emptyServiceConfig *ServiceConfig
 
 func init() {
-	cfg := parseServiceConfig("{}")
+	cfg := parseServiceConfig("{}", defaultMaxCallAttempts)
 	if cfg.Err != nil {
 		panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err))
 	}
 	emptyServiceConfig = cfg.Config.(*ServiceConfig)
+
+	internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() {
+		return cc.csMgr.pubSub.Subscribe(s)
+	}
+	internal.EnterIdleModeForTesting = func(cc *ClientConn) {
+		cc.idlenessMgr.EnterIdleModeForTesting()
+	}
+	internal.ExitIdleModeForTesting = func(cc *ClientConn) error {
+		return cc.idlenessMgr.ExitIdleMode()
+	}
 }
 
-func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) {
+func (cc *ClientConn) maybeApplyDefaultServiceConfig() {
 	if cc.sc != nil {
-		cc.applyServiceConfigAndBalancer(cc.sc, addrs)
+		cc.applyServiceConfigAndBalancer(cc.sc, nil)
 		return
 	}
 	if cc.dopts.defaultServiceConfig != nil {
-		cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs)
+		cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig})
 	} else {
-		cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs)
+		cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig})
 	}
 }
 
-func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
+func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error {
 	defer cc.firstResolveEvent.Fire()
-	cc.mu.Lock()
 	// Check if the ClientConn is already closed. Some fields (e.g.
 	// balancerWrapper) are set to nil when closing the ClientConn, and could
 	// cause nil pointer panic if we don't have this check.
@@ -580,11 +763,9 @@
 		// May need to apply the initial service config in case the resolver
 		// doesn't support service configs, or doesn't provide a service config
 		// with the new addresses.
-		cc.maybeApplyDefaultServiceConfig(nil)
+		cc.maybeApplyDefaultServiceConfig()
 
-		if cc.balancerWrapper != nil {
-			cc.balancerWrapper.resolverError(err)
-		}
+		cc.balancerWrapper.resolverError(err)
 
 		// No addresses are valid with err set; return early.
 		cc.mu.Unlock()
@@ -592,49 +773,40 @@
 	}
 
 	var ret error
-	if cc.dopts.disableServiceConfig || s.ServiceConfig == nil {
-		cc.maybeApplyDefaultServiceConfig(s.Addresses)
+	if cc.dopts.disableServiceConfig {
+		channelz.Infof(logger, cc.channelz, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig)
+		cc.maybeApplyDefaultServiceConfig()
+	} else if s.ServiceConfig == nil {
+		cc.maybeApplyDefaultServiceConfig()
 		// TODO: do we need to apply a failing LB policy if there is no
 		// default, per the error handling design?
 	} else {
 		if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok {
-			cc.applyServiceConfigAndBalancer(sc, s.Addresses)
+			configSelector := iresolver.GetConfigSelector(s)
+			if configSelector != nil {
+				if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 {
+					channelz.Infof(logger, cc.channelz, "method configs in service config will be ignored due to presence of config selector")
+				}
+			} else {
+				configSelector = &defaultConfigSelector{sc}
+			}
+			cc.applyServiceConfigAndBalancer(sc, configSelector)
 		} else {
 			ret = balancer.ErrBadResolverState
-			if cc.balancerWrapper == nil {
-				var err error
-				if s.ServiceConfig.Err != nil {
-					err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err)
-				} else {
-					err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config)
-				}
-				cc.blockingpicker.updatePicker(base.NewErrPicker(err))
-				cc.csMgr.updateState(connectivity.TransientFailure)
+			if cc.sc == nil {
+				// Apply the failing LB only if we haven't received valid service config
+				// from the name resolver in the past.
+				cc.applyFailingLBLocked(s.ServiceConfig)
 				cc.mu.Unlock()
 				return ret
 			}
 		}
 	}
 
-	var balCfg serviceconfig.LoadBalancingConfig
-	if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil {
-		balCfg = cc.sc.lbConfig.cfg
-	}
-
-	cbn := cc.curBalancerName
+	balCfg := cc.sc.lbConfig
 	bw := cc.balancerWrapper
 	cc.mu.Unlock()
-	if cbn != grpclbName {
-		// Filter any grpclb addresses since we don't have the grpclb balancer.
-		for i := 0; i < len(s.Addresses); {
-			if s.Addresses[i].Type == resolver.GRPCLB {
-				copy(s.Addresses[i:], s.Addresses[i+1:])
-				s.Addresses = s.Addresses[:len(s.Addresses)-1]
-				continue
-			}
-			i++
-		}
-	}
+
 	uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
 	if ret == nil {
 		ret = uccsErr // prefer ErrBadResolver state since any other error is
@@ -643,95 +815,65 @@
 	return ret
 }
 
-// switchBalancer starts the switching from current balancer to the balancer
-// with the given name.
-//
-// It will NOT send the current address list to the new balancer. If needed,
-// caller of this function should send address list to the new balancer after
-// this function returns.
-//
-// Caller must hold cc.mu.
-func (cc *ClientConn) switchBalancer(name string) {
-	if strings.EqualFold(cc.curBalancerName, name) {
-		return
+// applyFailingLBLocked is akin to configuring an LB policy on the channel which
+// always fails RPCs. Here, an actual LB policy is not configured, but an always
+// erroring picker is configured, which returns errors with information about
+// what was invalid in the received service config. A config selector with no
+// service config is configured, and the connectivity state of the channel is
+// set to TransientFailure.
+func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) {
+	var err error
+	if sc.Err != nil {
+		err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err)
+	} else {
+		err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config)
 	}
-
-	grpclog.Infof("ClientConn switching balancer to %q", name)
-	if cc.dopts.balancerBuilder != nil {
-		grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead")
-		return
-	}
-	if cc.balancerWrapper != nil {
-		cc.balancerWrapper.close()
-	}
-
-	builder := balancer.Get(name)
-	if channelz.IsOn() {
-		if builder == nil {
-			channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
-				Desc:     fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName),
-				Severity: channelz.CtWarning,
-			})
-		} else {
-			channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
-				Desc:     fmt.Sprintf("Channel switches to new LB policy %q", name),
-				Severity: channelz.CtINFO,
-			})
-		}
-	}
-	if builder == nil {
-		grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name)
-		builder = newPickfirstBuilder()
-	}
-
-	cc.curBalancerName = builder.Name()
-	cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
+	cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
+	cc.pickerWrapper.updatePicker(base.NewErrPicker(err))
+	cc.csMgr.updateState(connectivity.TransientFailure)
 }
 
-func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
-	cc.mu.Lock()
-	if cc.conns == nil {
-		cc.mu.Unlock()
-		return
-	}
-	// TODO(bar switching) send updates to all balancer wrappers when balancer
-	// gracefully switching is supported.
-	cc.balancerWrapper.handleSubConnStateChange(sc, s)
-	cc.mu.Unlock()
+// Makes a copy of the input addresses slice. Addresses are passed during
+// subconn creation and address update operations.
+func copyAddresses(in []resolver.Address) []resolver.Address {
+	out := make([]resolver.Address, len(in))
+	copy(out, in)
+	return out
 }
 
-// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
+// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns.
 //
 // Caller needs to make sure len(addrs) > 0.
-func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) {
+func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) {
+	if cc.conns == nil {
+		return nil, ErrClientConnClosing
+	}
+
 	ac := &addrConn{
+		state:        connectivity.Idle,
 		cc:           cc,
-		addrs:        addrs,
+		addrs:        copyAddresses(addrs),
 		scopts:       opts,
 		dopts:        cc.dopts,
-		czData:       new(channelzData),
+		channelz:     channelz.RegisterSubChannel(cc.channelz, ""),
 		resetBackoff: make(chan struct{}),
 	}
 	ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
+	// Start with our address set to the first address; this may be updated if
+	// we connect to different addresses.
+	ac.channelz.ChannelMetrics.Target.Store(&addrs[0].Addr)
+
+	channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{
+		Desc:     "Subchannel created",
+		Severity: channelz.CtInfo,
+		Parent: &channelz.TraceEvent{
+			Desc:     fmt.Sprintf("Subchannel(id:%d) created", ac.channelz.ID),
+			Severity: channelz.CtInfo,
+		},
+	})
+
 	// Track ac in cc. This needs to be done before any getTransport(...) is called.
-	cc.mu.Lock()
-	if cc.conns == nil {
-		cc.mu.Unlock()
-		return nil, ErrClientConnClosing
-	}
-	if channelz.IsOn() {
-		ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "")
-		channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
-			Desc:     "Subchannel Created",
-			Severity: channelz.CtINFO,
-			Parent: &channelz.TraceEventDesc{
-				Desc:     fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID),
-				Severity: channelz.CtINFO,
-			},
-		})
-	}
 	cc.conns[ac] = struct{}{}
-	cc.mu.Unlock()
 	return ac, nil
 }
 
@@ -748,34 +890,33 @@
 	ac.tearDown(err)
 }
 
-func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric {
-	return &channelz.ChannelInternalMetric{
-		State:                    cc.GetState(),
-		Target:                   cc.target,
-		CallsStarted:             atomic.LoadInt64(&cc.czData.callsStarted),
-		CallsSucceeded:           atomic.LoadInt64(&cc.czData.callsSucceeded),
-		CallsFailed:              atomic.LoadInt64(&cc.czData.callsFailed),
-		LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)),
-	}
-}
-
 // Target returns the target string of the ClientConn.
-// This is an EXPERIMENTAL API.
 func (cc *ClientConn) Target() string {
 	return cc.target
 }
 
+// CanonicalTarget returns the canonical target string used when creating cc.
+//
+// This always has the form "<scheme>://[authority]/<endpoint>".  For example:
+//
+//   - "dns:///example.com:42"
+//   - "dns://8.8.8.8/example.com:42"
+//   - "unix:///path/to/socket"
+func (cc *ClientConn) CanonicalTarget() string {
+	return cc.parsedTarget.String()
+}
+
 func (cc *ClientConn) incrCallsStarted() {
-	atomic.AddInt64(&cc.czData.callsStarted, 1)
-	atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano())
+	cc.channelz.ChannelMetrics.CallsStarted.Add(1)
+	cc.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano())
 }
 
 func (cc *ClientConn) incrCallsSucceeded() {
-	atomic.AddInt64(&cc.czData.callsSucceeded, 1)
+	cc.channelz.ChannelMetrics.CallsSucceeded.Add(1)
 }
 
 func (cc *ClientConn) incrCallsFailed() {
-	atomic.AddInt64(&cc.czData.callsFailed, 1)
+	cc.channelz.ChannelMetrics.CallsFailed.Add(1)
 }
 
 // connect starts creating a transport.
@@ -784,89 +925,146 @@
 func (ac *addrConn) connect() error {
 	ac.mu.Lock()
 	if ac.state == connectivity.Shutdown {
+		if logger.V(2) {
+			logger.Infof("connect called on shutdown addrConn; ignoring.")
+		}
 		ac.mu.Unlock()
 		return errConnClosing
 	}
 	if ac.state != connectivity.Idle {
+		if logger.V(2) {
+			logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state)
+		}
 		ac.mu.Unlock()
 		return nil
 	}
-	// Update connectivity state within the lock to prevent subsequent or
-	// concurrent calls from resetting the transport more than once.
-	ac.updateConnectivityState(connectivity.Connecting)
-	ac.mu.Unlock()
 
-	// Start a goroutine connecting to the server asynchronously.
-	go ac.resetTransport()
+	ac.resetTransportAndUnlock()
 	return nil
 }
 
-// tryUpdateAddrs tries to update ac.addrs with the new addresses list.
-//
-// If ac is Connecting, it returns false. The caller should tear down the ac and
-// create a new one. Note that the backoff will be reset when this happens.
-//
-// If ac is TransientFailure, it updates ac.addrs and returns true. The updated
-// addresses will be picked up by retry in the next iteration after backoff.
-//
-// If ac is Shutdown or Idle, it updates ac.addrs and returns true.
-//
-// If ac is Ready, it checks whether current connected address of ac is in the
-// new addrs list.
-//  - If true, it updates ac.addrs and returns true. The ac will keep using
-//    the existing connection.
-//  - If false, it does nothing and returns false.
-func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
+// equalAddressIgnoringBalAttributes returns true is a and b are considered equal.
+// This is different from the Equal method on the resolver.Address type which
+// considers all fields to determine equality. Here, we only consider fields
+// that are meaningful to the subConn.
+func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
+	return a.Addr == b.Addr && a.ServerName == b.ServerName &&
+		a.Attributes.Equal(b.Attributes) &&
+		a.Metadata == b.Metadata
+}
+
+func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool {
+	return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) })
+}
+
+// updateAddrs updates ac.addrs with the new addresses list and handles active
+// connections or connection attempts.
+func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
+	addrs = copyAddresses(addrs)
+	limit := len(addrs)
+	if limit > 5 {
+		limit = 5
+	}
+	channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit])
+
 	ac.mu.Lock()
-	defer ac.mu.Unlock()
-	grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
+	if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) {
+		ac.mu.Unlock()
+		return
+	}
+
+	ac.addrs = addrs
+
 	if ac.state == connectivity.Shutdown ||
 		ac.state == connectivity.TransientFailure ||
 		ac.state == connectivity.Idle {
-		ac.addrs = addrs
-		return true
+		// We were not connecting, so do nothing but update the addresses.
+		ac.mu.Unlock()
+		return
 	}
 
-	if ac.state == connectivity.Connecting {
-		return false
-	}
-
-	// ac.state is Ready, try to find the connected address.
-	var curAddrFound bool
-	for _, a := range addrs {
-		if reflect.DeepEqual(ac.curAddr, a) {
-			curAddrFound = true
-			break
+	if ac.state == connectivity.Ready {
+		// Try to find the connected address.
+		for _, a := range addrs {
+			a.ServerName = ac.cc.getServerName(a)
+			if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) {
+				// We are connected to a valid address, so do nothing but
+				// update the addresses.
+				ac.mu.Unlock()
+				return
+			}
 		}
 	}
-	grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
-	if curAddrFound {
-		ac.addrs = addrs
+
+	// We are either connected to the wrong address or currently connecting.
+	// Stop the current iteration and restart.
+
+	ac.cancel()
+	ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx)
+
+	// We have to defer here because GracefulClose => onClose, which requires
+	// locking ac.mu.
+	if ac.transport != nil {
+		defer ac.transport.GracefulClose()
+		ac.transport = nil
 	}
 
-	return curAddrFound
+	if len(addrs) == 0 {
+		ac.updateConnectivityState(connectivity.Idle, nil)
+	}
+
+	// Since we were connecting/connected, we should start a new connection
+	// attempt.
+	go ac.resetTransportAndUnlock()
+}
+
+// getServerName determines the serverName to be used in the connection
+// handshake. The default value for the serverName is the authority on the
+// ClientConn, which either comes from the user's dial target or through an
+// authority override specified using the WithAuthority dial option. Name
+// resolvers can specify a per-address override for the serverName through the
+// resolver.Address.ServerName field which is used only if the WithAuthority
+// dial option was not used. The rationale is that per-address authority
+// overrides specified by the name resolver can represent a security risk, while
+// an override specified by the user is more dependable since they probably know
+// what they are doing.
+func (cc *ClientConn) getServerName(addr resolver.Address) string {
+	if cc.dopts.authority != "" {
+		return cc.dopts.authority
+	}
+	if addr.ServerName != "" {
+		return addr.ServerName
+	}
+	return cc.authority
+}
+
+func getMethodConfig(sc *ServiceConfig, method string) MethodConfig {
+	if sc == nil {
+		return MethodConfig{}
+	}
+	if m, ok := sc.Methods[method]; ok {
+		return m
+	}
+	i := strings.LastIndex(method, "/")
+	if m, ok := sc.Methods[method[:i+1]]; ok {
+		return m
+	}
+	return sc.Methods[""]
 }
 
 // GetMethodConfig gets the method config of the input method.
 // If there's an exact match for input method (i.e. /service/method), we return
 // the corresponding MethodConfig.
-// If there isn't an exact match for the input method, we look for the default config
-// under the service (i.e /service/). If there is a default MethodConfig for
-// the service, we return it.
+// If there isn't an exact match for the input method, we look for the service's default
+// config under the service (i.e /service/) and then for the default for all services (empty string).
+//
+// If there is a default MethodConfig for the service, we return it.
 // Otherwise, we return an empty MethodConfig.
 func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
 	// TODO: Avoid the locking here.
 	cc.mu.RLock()
 	defer cc.mu.RUnlock()
-	if cc.sc == nil {
-		return MethodConfig{}
-	}
-	m, ok := cc.sc.Methods[method]
-	if !ok {
-		i := strings.LastIndex(method, "/")
-		m = cc.sc.Methods[method[:i+1]]
-	}
-	return m
+	return getMethodConfig(cc.sc, method)
 }
 
 func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
@@ -878,22 +1076,15 @@
 	return cc.sc.healthCheckConfig
 }
 
-func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
-	t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{
-		FullMethodName: method,
-	})
-	if err != nil {
-		return nil, nil, toRPCErr(err)
-	}
-	return t, done, nil
-}
-
-func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) {
+func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) {
 	if sc == nil {
 		// should never reach here.
 		return
 	}
 	cc.sc = sc
+	if configSelector != nil {
+		cc.safeConfigSelector.UpdateConfigSelector(configSelector)
+	}
 
 	if cc.sc.retryThrottling != nil {
 		newThrottler := &retryThrottler{
@@ -906,46 +1097,16 @@
 	} else {
 		cc.retryThrottler.Store((*retryThrottler)(nil))
 	}
-
-	if cc.dopts.balancerBuilder == nil {
-		// Only look at balancer types and switch balancer if balancer dial
-		// option is not set.
-		var newBalancerName string
-		if cc.sc != nil && cc.sc.lbConfig != nil {
-			newBalancerName = cc.sc.lbConfig.name
-		} else {
-			var isGRPCLB bool
-			for _, a := range addrs {
-				if a.Type == resolver.GRPCLB {
-					isGRPCLB = true
-					break
-				}
-			}
-			if isGRPCLB {
-				newBalancerName = grpclbName
-			} else if cc.sc != nil && cc.sc.LB != nil {
-				newBalancerName = *cc.sc.LB
-			} else {
-				newBalancerName = PickFirstBalancerName
-			}
-		}
-		cc.switchBalancer(newBalancerName)
-	} else if cc.balancerWrapper == nil {
-		// Balancer dial option was set, and this is the first time handling
-		// resolved addresses. Build a balancer with dopts.balancerBuilder.
-		cc.curBalancerName = cc.dopts.balancerBuilder.Name()
-		cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
-	}
 }
 
-func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
+func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
 	cc.mu.RLock()
-	r := cc.resolverWrapper
+	cc.resolverWrapper.resolveNow(o)
 	cc.mu.RUnlock()
-	if r == nil {
-		return
-	}
-	go r.resolveNow(o)
+}
+
+func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) {
+	cc.resolverWrapper.resolveNow(o)
 }
 
 // ResetConnectBackoff wakes up all subchannels in transient failure and causes
@@ -957,7 +1118,10 @@
 // However, if a previously unavailable network becomes available, this may be
 // used to trigger an immediate reconnect.
 //
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func (cc *ClientConn) ResetConnectBackoff() {
 	cc.mu.Lock()
 	conns := cc.conns
@@ -969,51 +1133,52 @@
 
 // Close tears down the ClientConn and all underlying connections.
 func (cc *ClientConn) Close() error {
-	defer cc.cancel()
+	defer func() {
+		cc.cancel()
+		<-cc.csMgr.pubSub.Done()
+	}()
+
+	// Prevent calls to enter/exit idle immediately, and ensure we are not
+	// currently entering/exiting idle mode.
+	cc.idlenessMgr.Close()
 
 	cc.mu.Lock()
 	if cc.conns == nil {
 		cc.mu.Unlock()
 		return ErrClientConnClosing
 	}
+
 	conns := cc.conns
 	cc.conns = nil
 	cc.csMgr.updateState(connectivity.Shutdown)
 
-	rWrapper := cc.resolverWrapper
-	cc.resolverWrapper = nil
-	bWrapper := cc.balancerWrapper
-	cc.balancerWrapper = nil
+	// We can safely unlock and continue to access all fields now as
+	// cc.conns==nil, preventing any further operations on cc.
 	cc.mu.Unlock()
 
-	cc.blockingpicker.close()
+	cc.resolverWrapper.close()
+	// The order of closing matters here since the balancer wrapper assumes the
+	// picker is closed before it is closed.
+	cc.pickerWrapper.close()
+	cc.balancerWrapper.close()
 
-	if rWrapper != nil {
-		rWrapper.close()
-	}
-	if bWrapper != nil {
-		bWrapper.close()
-	}
-
+	<-cc.resolverWrapper.serializer.Done()
+	<-cc.balancerWrapper.serializer.Done()
+	var wg sync.WaitGroup
 	for ac := range conns {
-		ac.tearDown(ErrClientConnClosing)
+		wg.Add(1)
+		go func(ac *addrConn) {
+			defer wg.Done()
+			ac.tearDown(ErrClientConnClosing)
+		}(ac)
 	}
-	if channelz.IsOn() {
-		ted := &channelz.TraceEventDesc{
-			Desc:     "Channel Deleted",
-			Severity: channelz.CtINFO,
-		}
-		if cc.dopts.channelzParentID != 0 {
-			ted.Parent = &channelz.TraceEventDesc{
-				Desc:     fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID),
-				Severity: channelz.CtINFO,
-			}
-		}
-		channelz.AddTraceEvent(cc.channelzID, ted)
-		// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
-		// the entity being deleted, and thus prevent it from being deleted right away.
-		channelz.RemoveEntry(cc.channelzID)
-	}
+	wg.Wait()
+	cc.addTraceEvent("deleted")
+	// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
+	// trace reference to the entity being deleted, and thus prevent it from being
+	// deleted right away.
+	channelz.RemoveEntry(cc.channelz.ID)
+
 	return nil
 }
 
@@ -1024,7 +1189,7 @@
 
 	cc     *ClientConn
 	dopts  dialOptions
-	acbw   balancer.SubConn
+	acbw   *acBalancerWrapper
 	scopts balancer.NewSubConnOptions
 
 	// transport is set when there's a viable transport (note: ac state may not be READY as LB channel
@@ -1033,6 +1198,10 @@
 	// is received, transport is closed, ac has been torn down).
 	transport transport.ClientTransport // The current transport.
 
+	// This mutex is used on the RPC path, so its usage should be minimized as
+	// much as possible.
+	// TODO: Find a lock-free way to retrieve the transport and state from the
+	// addrConn.
 	mu      sync.Mutex
 	curAddr resolver.Address   // The current address.
 	addrs   []resolver.Address // All addresses that the resolver resolved to.
@@ -1043,151 +1212,128 @@
 	backoffIdx   int // Needs to be stateful for resetConnectBackoff.
 	resetBackoff chan struct{}
 
-	channelzID int64 // channelz unique identification number.
-	czData     *channelzData
+	channelz *channelz.SubChannel
 }
 
 // Note: this requires a lock on ac.mu.
-func (ac *addrConn) updateConnectivityState(s connectivity.State) {
+func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) {
 	if ac.state == s {
 		return
 	}
-
-	updateMsg := fmt.Sprintf("Subchannel Connectivity change to %v", s)
 	ac.state = s
-	if channelz.IsOn() {
-		channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
-			Desc:     updateMsg,
-			Severity: channelz.CtINFO,
-		})
+	ac.channelz.ChannelMetrics.State.Store(&s)
+	if lastErr == nil {
+		channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v", s)
+	} else {
+		channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr)
 	}
-	ac.cc.handleSubConnStateChange(ac.acbw, s)
+	ac.acbw.updateState(s, ac.curAddr, lastErr)
 }
 
 // adjustParams updates parameters used to create transports upon
 // receiving a GoAway.
 func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
-	switch r {
-	case transport.GoAwayTooManyPings:
+	if r == transport.GoAwayTooManyPings {
 		v := 2 * ac.dopts.copts.KeepaliveParams.Time
 		ac.cc.mu.Lock()
-		if v > ac.cc.mkp.Time {
-			ac.cc.mkp.Time = v
+		if v > ac.cc.keepaliveParams.Time {
+			ac.cc.keepaliveParams.Time = v
 		}
 		ac.cc.mu.Unlock()
 	}
 }
 
-func (ac *addrConn) resetTransport() {
-	for i := 0; ; i++ {
-		if i > 0 {
-			ac.cc.resolveNow(resolver.ResolveNowOption{})
-		}
-
-		ac.mu.Lock()
-		if ac.state == connectivity.Shutdown {
-			ac.mu.Unlock()
-			return
-		}
-
-		addrs := ac.addrs
-		backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx)
-		// This will be the duration that dial gets to finish.
-		dialDuration := minConnectTimeout
-		if ac.dopts.minConnectTimeout != nil {
-			dialDuration = ac.dopts.minConnectTimeout()
-		}
-
-		if dialDuration < backoffFor {
-			// Give dial more time as we keep failing to connect.
-			dialDuration = backoffFor
-		}
-		// We can potentially spend all the time trying the first address, and
-		// if the server accepts the connection and then hangs, the following
-		// addresses will never be tried.
-		//
-		// The spec doesn't mention what should be done for multiple addresses.
-		// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
-		connectDeadline := time.Now().Add(dialDuration)
-
-		ac.updateConnectivityState(connectivity.Connecting)
-		ac.transport = nil
+// resetTransportAndUnlock unconditionally connects the addrConn.
+//
+// ac.mu must be held by the caller, and this function will guarantee it is released.
+func (ac *addrConn) resetTransportAndUnlock() {
+	acCtx := ac.ctx
+	if acCtx.Err() != nil {
 		ac.mu.Unlock()
-
-		newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline)
-		if err != nil {
-			// After exhausting all addresses, the addrConn enters
-			// TRANSIENT_FAILURE.
-			ac.mu.Lock()
-			if ac.state == connectivity.Shutdown {
-				ac.mu.Unlock()
-				return
-			}
-			ac.updateConnectivityState(connectivity.TransientFailure)
-
-			// Backoff.
-			b := ac.resetBackoff
-			ac.mu.Unlock()
-
-			timer := time.NewTimer(backoffFor)
-			select {
-			case <-timer.C:
-				ac.mu.Lock()
-				ac.backoffIdx++
-				ac.mu.Unlock()
-			case <-b:
-				timer.Stop()
-			case <-ac.ctx.Done():
-				timer.Stop()
-				return
-			}
-			continue
-		}
-
-		ac.mu.Lock()
-		if ac.state == connectivity.Shutdown {
-			ac.mu.Unlock()
-			newTr.Close()
-			return
-		}
-		ac.curAddr = addr
-		ac.transport = newTr
-		ac.backoffIdx = 0
-
-		hctx, hcancel := context.WithCancel(ac.ctx)
-		ac.startHealthCheck(hctx)
-		ac.mu.Unlock()
-
-		// Block until the created transport is down. And when this happens,
-		// we restart from the top of the addr list.
-		<-reconnect.Done()
-		hcancel()
-		// restart connecting - the top of the loop will set state to
-		// CONNECTING.  This is against the current connectivity semantics doc,
-		// however it allows for graceful behavior for RPCs not yet dispatched
-		// - unfortunate timing would otherwise lead to the RPC failing even
-		// though the TRANSIENT_FAILURE state (called for by the doc) would be
-		// instantaneous.
-		//
-		// Ideally we should transition to Idle here and block until there is
-		// RPC activity that leads to the balancer requesting a reconnect of
-		// the associated SubConn.
+		return
 	}
+
+	addrs := ac.addrs
+	backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx)
+	// This will be the duration that dial gets to finish.
+	dialDuration := minConnectTimeout
+	if ac.dopts.minConnectTimeout != nil {
+		dialDuration = ac.dopts.minConnectTimeout()
+	}
+
+	if dialDuration < backoffFor {
+		// Give dial more time as we keep failing to connect.
+		dialDuration = backoffFor
+	}
+	// We can potentially spend all the time trying the first address, and
+	// if the server accepts the connection and then hangs, the following
+	// addresses will never be tried.
+	//
+	// The spec doesn't mention what should be done for multiple addresses.
+	// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
+	connectDeadline := time.Now().Add(dialDuration)
+
+	ac.updateConnectivityState(connectivity.Connecting, nil)
+	ac.mu.Unlock()
+
+	if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
+		// TODO: #7534 - Move re-resolution requests into the pick_first LB policy
+		// to ensure one resolution request per pass instead of per subconn failure.
+		ac.cc.resolveNow(resolver.ResolveNowOptions{})
+		ac.mu.Lock()
+		if acCtx.Err() != nil {
+			// addrConn was torn down.
+			ac.mu.Unlock()
+			return
+		}
+		// After exhausting all addresses, the addrConn enters
+		// TRANSIENT_FAILURE.
+		ac.updateConnectivityState(connectivity.TransientFailure, err)
+
+		// Backoff.
+		b := ac.resetBackoff
+		ac.mu.Unlock()
+
+		timer := time.NewTimer(backoffFor)
+		select {
+		case <-timer.C:
+			ac.mu.Lock()
+			ac.backoffIdx++
+			ac.mu.Unlock()
+		case <-b:
+			timer.Stop()
+		case <-acCtx.Done():
+			timer.Stop()
+			return
+		}
+
+		ac.mu.Lock()
+		if acCtx.Err() == nil {
+			ac.updateConnectivityState(connectivity.Idle, err)
+		}
+		ac.mu.Unlock()
+		return
+	}
+	// Success; reset backoff.
+	ac.mu.Lock()
+	ac.backoffIdx = 0
+	ac.mu.Unlock()
 }
 
-// tryAllAddrs tries to creates a connection to the addresses, and stop when at the
-// first successful one. It returns the transport, the address and a Event in
-// the successful case. The Event fires when the returned transport disconnects.
-func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) {
+// tryAllAddrs tries to create a connection to the addresses, and stop when at
+// the first successful one. It returns an error if no address was successfully
+// connected, or updates ac appropriately with the new transport.
+func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error {
+	var firstConnErr error
 	for _, addr := range addrs {
-		ac.mu.Lock()
-		if ac.state == connectivity.Shutdown {
-			ac.mu.Unlock()
-			return nil, resolver.Address{}, nil, errConnClosing
+		ac.channelz.ChannelMetrics.Target.Store(&addr.Addr)
+		if ctx.Err() != nil {
+			return errConnClosing
 		}
+		ac.mu.Lock()
 
 		ac.cc.mu.RLock()
-		ac.dopts.copts.KeepaliveParams = ac.cc.mkp
+		ac.dopts.copts.KeepaliveParams = ac.cc.keepaliveParams
 		ac.cc.mu.RUnlock()
 
 		copts := ac.dopts.copts
@@ -1196,108 +1342,103 @@
 		}
 		ac.mu.Unlock()
 
-		if channelz.IsOn() {
-			channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
-				Desc:     fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr),
-				Severity: channelz.CtINFO,
-			})
-		}
+		channelz.Infof(logger, ac.channelz, "Subchannel picks a new address %q to connect", addr.Addr)
 
-		newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline)
+		err := ac.createTransport(ctx, addr, copts, connectDeadline)
 		if err == nil {
-			return newTr, addr, reconnect, nil
+			return nil
 		}
-		ac.cc.blockingpicker.updateConnectionError(err)
+		if firstConnErr == nil {
+			firstConnErr = err
+		}
+		ac.cc.updateConnectionError(err)
 	}
 
 	// Couldn't connect to any address.
-	return nil, resolver.Address{}, nil, fmt.Errorf("couldn't connect to any address")
+	return firstConnErr
 }
 
-// createTransport creates a connection to addr. It returns the transport and a
-// Event in the successful case. The Event fires when the returned transport
-// disconnects.
-func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) {
-	prefaceReceived := make(chan struct{})
-	onCloseCalled := make(chan struct{})
-	reconnect := grpcsync.NewEvent()
+// createTransport creates a connection to addr. It returns an error if the
+// address was not successfully connected, or updates ac appropriately with the
+// new transport.
+func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error {
+	addr.ServerName = ac.cc.getServerName(addr)
+	hctx, hcancel := context.WithCancel(ctx)
 
-	authority := ac.cc.authority
-	// addr.ServerName takes precedent over ClientConn authority, if present.
-	if addr.ServerName != "" {
-		authority = addr.ServerName
-	}
-
-	target := transport.TargetInfo{
-		Addr:      addr.Addr,
-		Metadata:  addr.Metadata,
-		Authority: authority,
-	}
-
-	once := sync.Once{}
-	onGoAway := func(r transport.GoAwayReason) {
+	onClose := func(r transport.GoAwayReason) {
 		ac.mu.Lock()
+		defer ac.mu.Unlock()
+		// adjust params based on GoAwayReason
 		ac.adjustParams(r)
-		once.Do(func() {
-			if ac.state == connectivity.Ready {
-				// Prevent this SubConn from being used for new RPCs by setting its
-				// state to Connecting.
-				//
-				// TODO: this should be Idle when grpc-go properly supports it.
-				ac.updateConnectivityState(connectivity.Connecting)
-			}
-		})
-		ac.mu.Unlock()
-		reconnect.Fire()
+		if ctx.Err() != nil {
+			// Already shut down or connection attempt canceled.  tearDown() or
+			// updateAddrs() already cleared the transport and canceled hctx
+			// via ac.ctx, and we expected this connection to be closed, so do
+			// nothing here.
+			return
+		}
+		hcancel()
+		if ac.transport == nil {
+			// We're still connecting to this address, which could error.  Do
+			// not update the connectivity state or resolve; these will happen
+			// at the end of the tryAllAddrs connection loop in the event of an
+			// error.
+			return
+		}
+		ac.transport = nil
+		// Refresh the name resolver on any connection loss.
+		ac.cc.resolveNow(resolver.ResolveNowOptions{})
+		// Always go idle and wait for the LB policy to initiate a new
+		// connection attempt.
+		ac.updateConnectivityState(connectivity.Idle, nil)
 	}
 
-	onClose := func() {
-		ac.mu.Lock()
-		once.Do(func() {
-			if ac.state == connectivity.Ready {
-				// Prevent this SubConn from being used for new RPCs by setting its
-				// state to Connecting.
-				//
-				// TODO: this should be Idle when grpc-go properly supports it.
-				ac.updateConnectivityState(connectivity.Connecting)
-			}
-		})
-		ac.mu.Unlock()
-		close(onCloseCalled)
-		reconnect.Fire()
-	}
-
-	onPrefaceReceipt := func() {
-		close(prefaceReceived)
-	}
-
-	connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
+	connectCtx, cancel := context.WithDeadline(ctx, connectDeadline)
 	defer cancel()
-	if channelz.IsOn() {
-		copts.ChannelzParentID = ac.channelzID
-	}
+	copts.ChannelzParent = ac.channelz
 
-	newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose)
+	newTr, err := transport.NewHTTP2Client(connectCtx, ac.cc.ctx, addr, copts, onClose)
 	if err != nil {
+		if logger.V(2) {
+			logger.Infof("Creating new client transport to %q: %v", addr, err)
+		}
 		// newTr is either nil, or closed.
-		grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err)
-		return nil, nil, err
+		hcancel()
+		channelz.Warningf(logger, ac.channelz, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err)
+		return err
 	}
 
-	select {
-	case <-time.After(connectDeadline.Sub(time.Now())):
-		// We didn't get the preface in time.
-		newTr.Close()
-		grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
-		return nil, nil, errors.New("timed out waiting for server handshake")
-	case <-prefaceReceived:
-		// We got the preface - huzzah! things are good.
-	case <-onCloseCalled:
-		// The transport has already closed - noop.
-		return nil, nil, errors.New("connection closed")
-		// TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix.
+	ac.mu.Lock()
+	defer ac.mu.Unlock()
+	if ctx.Err() != nil {
+		// This can happen if the subConn was removed while in `Connecting`
+		// state. tearDown() would have set the state to `Shutdown`, but
+		// would not have closed the transport since ac.transport would not
+		// have been set at that point.
+		//
+		// We run this in a goroutine because newTr.Close() calls onClose()
+		// inline, which requires locking ac.mu.
+		//
+		// The error we pass to Close() is immaterial since there are no open
+		// streams at this point, so no trailers with error details will be sent
+		// out. We just need to pass a non-nil error.
+		//
+		// This can also happen when updateAddrs is called during a connection
+		// attempt.
+		go newTr.Close(transport.ErrConnClosing)
+		return nil
 	}
-	return newTr, reconnect, nil
+	if hctx.Err() != nil {
+		// onClose was already called for this connection, but the connection
+		// was successfully established first.  Consider it a success and set
+		// the new state to Idle.
+		ac.updateConnectivityState(connectivity.Idle, nil)
+		return nil
+	}
+	ac.curAddr = addr
+	ac.transport = newTr
+	ac.startHealthCheck(hctx) // Will set state to READY if appropriate.
+	return nil
 }
 
 // startHealthCheck starts the health checking stream (RPC) to watch the health
@@ -1305,7 +1446,7 @@
 //
 // LB channel health checking is enabled when all requirements below are met:
 // 1. it is not disabled by the user with the WithDisableHealthCheck DialOption
-// 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package
+// 2. internal.HealthCheckFunc is set by importing the grpc/health package
 // 3. a service config with non-empty healthCheckConfig field is provided
 // 4. the load balancer requests it
 //
@@ -1316,7 +1457,7 @@
 	var healthcheckManagingState bool
 	defer func() {
 		if !healthcheckManagingState {
-			ac.updateConnectivityState(connectivity.Ready)
+			ac.updateConnectivityState(connectivity.Ready, nil)
 		}
 	}()
 
@@ -1330,12 +1471,12 @@
 	if !ac.scopts.HealthCheckEnabled {
 		return
 	}
-	healthCheckFunc := ac.cc.dopts.healthCheckFunc
+	healthCheckFunc := internal.HealthCheckFunc
 	if healthCheckFunc == nil {
 		// The health package is not imported to set health check function.
 		//
 		// TODO: add a link to the health check doc in the error message.
-		grpclog.Error("Health check is requested but health check function is not set.")
+		channelz.Error(logger, ac.channelz, "Health check is requested but health check function is not set.")
 		return
 	}
 
@@ -1343,7 +1484,7 @@
 
 	// Set up the health check helper functions.
 	currentTr := ac.transport
-	newStream := func(method string) (interface{}, error) {
+	newStream := func(method string) (any, error) {
 		ac.mu.Lock()
 		if ac.transport != currentTr {
 			ac.mu.Unlock()
@@ -1352,28 +1493,22 @@
 		ac.mu.Unlock()
 		return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac)
 	}
-	setConnectivityState := func(s connectivity.State) {
+	setConnectivityState := func(s connectivity.State, lastErr error) {
 		ac.mu.Lock()
 		defer ac.mu.Unlock()
 		if ac.transport != currentTr {
 			return
 		}
-		ac.updateConnectivityState(s)
+		ac.updateConnectivityState(s, lastErr)
 	}
 	// Start the health checking stream.
 	go func() {
-		err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
+		err := healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
 		if err != nil {
 			if status.Code(err) == codes.Unimplemented {
-				if channelz.IsOn() {
-					channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
-						Desc:     "Subchannel health check is unimplemented at server side, thus health check is disabled",
-						Severity: channelz.CtError,
-					})
-				}
-				grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled")
+				channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled")
 			} else {
-				grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err)
+				channelz.Errorf(logger, ac.channelz, "Health checking failed: %v", err)
 			}
 		}
 	}()
@@ -1387,33 +1522,20 @@
 	ac.mu.Unlock()
 }
 
-// getReadyTransport returns the transport if ac's state is READY.
-// Otherwise it returns nil, false.
-// If ac's state is IDLE, it will trigger ac to connect.
-func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) {
+// getReadyTransport returns the transport if ac's state is READY or nil if not.
+func (ac *addrConn) getReadyTransport() transport.ClientTransport {
 	ac.mu.Lock()
-	if ac.state == connectivity.Ready && ac.transport != nil {
-		t := ac.transport
-		ac.mu.Unlock()
-		return t, true
+	defer ac.mu.Unlock()
+	if ac.state == connectivity.Ready {
+		return ac.transport
 	}
-	var idle bool
-	if ac.state == connectivity.Idle {
-		idle = true
-	}
-	ac.mu.Unlock()
-	// Trigger idle ac to connect.
-	if idle {
-		ac.connect()
-	}
-	return nil, false
+	return nil
 }
 
 // tearDown starts to tear down the addrConn.
-// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in
-// some edge cases (e.g., the caller opens and closes many addrConn's in a
-// tight loop.
-// tearDown doesn't remove ac from ac.cc.conns.
+//
+// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct
+// will leak. In most cases, call cc.removeAddrConn() instead.
 func (ac *addrConn) tearDown(err error) {
 	ac.mu.Lock()
 	if ac.state == connectivity.Shutdown {
@@ -1424,68 +1546,48 @@
 	ac.transport = nil
 	// We have to set the state to Shutdown before anything else to prevent races
 	// between setting the state and logic that waits on context cancellation / etc.
-	ac.updateConnectivityState(connectivity.Shutdown)
+	ac.updateConnectivityState(connectivity.Shutdown, nil)
 	ac.cancel()
 	ac.curAddr = resolver.Address{}
-	if err == errConnDrain && curTr != nil {
-		// GracefulClose(...) may be executed multiple times when
-		// i) receiving multiple GoAway frames from the server; or
-		// ii) there are concurrent name resolver/Balancer triggered
-		// address removal and GoAway.
-		// We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu.
-		ac.mu.Unlock()
-		curTr.GracefulClose()
-		ac.mu.Lock()
-	}
-	if channelz.IsOn() {
-		channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
-			Desc:     "Subchannel Deleted",
-			Severity: channelz.CtINFO,
-			Parent: &channelz.TraceEventDesc{
-				Desc:     fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID),
-				Severity: channelz.CtINFO,
-			},
-		})
-		// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
-		// the entity being deleted, and thus prevent it from being deleted right away.
-		channelz.RemoveEntry(ac.channelzID)
-	}
+
+	channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{
+		Desc:     "Subchannel deleted",
+		Severity: channelz.CtInfo,
+		Parent: &channelz.TraceEvent{
+			Desc:     fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelz.ID),
+			Severity: channelz.CtInfo,
+		},
+	})
+	// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
+	// trace reference to the entity being deleted, and thus prevent it from
+	// being deleted right away.
+	channelz.RemoveEntry(ac.channelz.ID)
 	ac.mu.Unlock()
-}
 
-func (ac *addrConn) getState() connectivity.State {
-	ac.mu.Lock()
-	defer ac.mu.Unlock()
-	return ac.state
-}
-
-func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric {
-	ac.mu.Lock()
-	addr := ac.curAddr.Addr
-	ac.mu.Unlock()
-	return &channelz.ChannelInternalMetric{
-		State:                    ac.getState(),
-		Target:                   addr,
-		CallsStarted:             atomic.LoadInt64(&ac.czData.callsStarted),
-		CallsSucceeded:           atomic.LoadInt64(&ac.czData.callsSucceeded),
-		CallsFailed:              atomic.LoadInt64(&ac.czData.callsFailed),
-		LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)),
+	// We have to release the lock before the call to GracefulClose/Close here
+	// because both of them call onClose(), which requires locking ac.mu.
+	if curTr != nil {
+		if err == errConnDrain {
+			// Close the transport gracefully when the subConn is being shutdown.
+			//
+			// GracefulClose() may be executed multiple times if:
+			// - multiple GoAway frames are received from the server
+			// - there are concurrent name resolver or balancer triggered
+			//   address removal and GoAway
+			curTr.GracefulClose()
+		} else {
+			// Hard close the transport when the channel is entering idle or is
+			// being shutdown. In the case where the channel is being shutdown,
+			// closing of transports is also taken care of by cancellation of cc.ctx.
+			// But in the case where the channel is entering idle, we need to
+			// explicitly close the transports here. Instead of distinguishing
+			// between these two cases, it is simpler to close the transport
+			// unconditionally here.
+			curTr.Close(err)
+		}
 	}
 }
 
-func (ac *addrConn) incrCallsStarted() {
-	atomic.AddInt64(&ac.czData.callsStarted, 1)
-	atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano())
-}
-
-func (ac *addrConn) incrCallsSucceeded() {
-	atomic.AddInt64(&ac.czData.callsSucceeded, 1)
-}
-
-func (ac *addrConn) incrCallsFailed() {
-	atomic.AddInt64(&ac.czData.callsFailed, 1)
-}
-
 type retryThrottler struct {
 	max    float64
 	thresh float64
@@ -1523,12 +1625,17 @@
 	}
 }
 
-type channelzChannel struct {
-	cc *ClientConn
+func (ac *addrConn) incrCallsStarted() {
+	ac.channelz.ChannelMetrics.CallsStarted.Add(1)
+	ac.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano())
 }
 
-func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric {
-	return c.cc.channelzMetric()
+func (ac *addrConn) incrCallsSucceeded() {
+	ac.channelz.ChannelMetrics.CallsSucceeded.Add(1)
+}
+
+func (ac *addrConn) incrCallsFailed() {
+	ac.channelz.ChannelMetrics.CallsFailed.Add(1)
 }
 
 // ErrClientConnTimeout indicates that the ClientConn cannot establish the
@@ -1537,3 +1644,189 @@
 // Deprecated: This error is never returned by grpc and should not be
 // referenced by users.
 var ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
+
+// getResolver finds the scheme in the cc's resolvers or the global registry.
+// scheme should always be lowercase (typically by virtue of url.Parse()
+// performing proper RFC3986 behavior).
+func (cc *ClientConn) getResolver(scheme string) resolver.Builder {
+	for _, rb := range cc.dopts.resolvers {
+		if scheme == rb.Scheme() {
+			return rb
+		}
+	}
+	return resolver.Get(scheme)
+}
+
+func (cc *ClientConn) updateConnectionError(err error) {
+	cc.lceMu.Lock()
+	cc.lastConnectionError = err
+	cc.lceMu.Unlock()
+}
+
+func (cc *ClientConn) connectionError() error {
+	cc.lceMu.Lock()
+	defer cc.lceMu.Unlock()
+	return cc.lastConnectionError
+}
+
+// initParsedTargetAndResolverBuilder parses the user's dial target and stores
+// the parsed target in `cc.parsedTarget`.
+//
+// The resolver to use is determined based on the scheme in the parsed target
+// and the same is stored in `cc.resolverBuilder`.
+//
+// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
+func (cc *ClientConn) initParsedTargetAndResolverBuilder() error {
+	logger.Infof("original dial target is: %q", cc.target)
+
+	var rb resolver.Builder
+	parsedTarget, err := parseTarget(cc.target)
+	if err == nil {
+		rb = cc.getResolver(parsedTarget.URL.Scheme)
+		if rb != nil {
+			cc.parsedTarget = parsedTarget
+			cc.resolverBuilder = rb
+			return nil
+		}
+	}
+
+	// We are here because the user's dial target did not contain a scheme or
+	// specified an unregistered scheme. We should fallback to the default
+	// scheme, except when a custom dialer is specified in which case, we should
+	// always use passthrough scheme. For either case, we need to respect any overridden
+	// global defaults set by the user.
+	defScheme := cc.dopts.defaultScheme
+	if internal.UserSetDefaultScheme {
+		defScheme = resolver.GetDefaultScheme()
+	}
+
+	canonicalTarget := defScheme + ":///" + cc.target
+
+	parsedTarget, err = parseTarget(canonicalTarget)
+	if err != nil {
+		return err
+	}
+	rb = cc.getResolver(parsedTarget.URL.Scheme)
+	if rb == nil {
+		return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme)
+	}
+	cc.parsedTarget = parsedTarget
+	cc.resolverBuilder = rb
+	return nil
+}
+
+// parseTarget uses RFC 3986 semantics to parse the given target into a
+// resolver.Target struct containing url. Query params are stripped from the
+// endpoint.
+func parseTarget(target string) (resolver.Target, error) {
+	u, err := url.Parse(target)
+	if err != nil {
+		return resolver.Target{}, err
+	}
+
+	return resolver.Target{URL: *u}, nil
+}
+
+// encodeAuthority escapes the authority string based on valid chars defined in
+// https://datatracker.ietf.org/doc/html/rfc3986#section-3.2.
+func encodeAuthority(authority string) string {
+	const upperhex = "0123456789ABCDEF"
+
+	// Return for characters that must be escaped as per
+	// Valid chars are mentioned here:
+	// https://datatracker.ietf.org/doc/html/rfc3986#section-3.2
+	shouldEscape := func(c byte) bool {
+		// Alphanum are always allowed.
+		if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
+			return false
+		}
+		switch c {
+		case '-', '_', '.', '~': // Unreserved characters
+			return false
+		case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters
+			return false
+		case ':', '[', ']', '@': // Authority related delimiters
+			return false
+		}
+		// Everything else must be escaped.
+		return true
+	}
+
+	hexCount := 0
+	for i := 0; i < len(authority); i++ {
+		c := authority[i]
+		if shouldEscape(c) {
+			hexCount++
+		}
+	}
+
+	if hexCount == 0 {
+		return authority
+	}
+
+	required := len(authority) + 2*hexCount
+	t := make([]byte, required)
+
+	j := 0
+	// This logic is a barebones version of escape in the go net/url library.
+	for i := 0; i < len(authority); i++ {
+		switch c := authority[i]; {
+		case shouldEscape(c):
+			t[j] = '%'
+			t[j+1] = upperhex[c>>4]
+			t[j+2] = upperhex[c&15]
+			j += 3
+		default:
+			t[j] = authority[i]
+			j++
+		}
+	}
+	return string(t)
+}
+
+// Determine channel authority. The order of precedence is as follows:
+// - user specified authority override using `WithAuthority` dial option
+// - creds' notion of server name for the authentication handshake
+// - endpoint from dial target of the form "scheme://[authority]/endpoint"
+//
+// Stores the determined authority in `cc.authority`.
+//
+// Returns a non-nil error if the authority returned by the transport
+// credentials do not match the authority configured through the dial option.
+//
+// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
+func (cc *ClientConn) initAuthority() error {
+	dopts := cc.dopts
+	// Historically, we had two options for users to specify the serverName or
+	// authority for a channel. One was through the transport credentials
+	// (either in its constructor, or through the OverrideServerName() method).
+	// The other option (for cases where WithInsecure() dial option was used)
+	// was to use the WithAuthority() dial option.
+	//
+	// A few things have changed since:
+	// - `insecure` package with an implementation of the `TransportCredentials`
+	//   interface for the insecure case
+	// - WithAuthority() dial option support for secure credentials
+	authorityFromCreds := ""
+	if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" {
+		authorityFromCreds = creds.Info().ServerName
+	}
+	authorityFromDialOption := dopts.authority
+	if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption {
+		return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption)
+	}
+
+	endpoint := cc.parsedTarget.Endpoint()
+	if authorityFromDialOption != "" {
+		cc.authority = authorityFromDialOption
+	} else if authorityFromCreds != "" {
+		cc.authority = authorityFromCreds
+	} else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok {
+		cc.authority = auth.OverrideAuthority(cc.parsedTarget)
+	} else if strings.HasPrefix(endpoint, ":") {
+		cc.authority = "localhost" + encodeAuthority(endpoint)
+	} else {
+		cc.authority = encodeAuthority(endpoint)
+	}
+	return nil
+}
diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go
index 1297765..959c2f9 100644
--- a/vendor/google.golang.org/grpc/codec.go
+++ b/vendor/google.golang.org/grpc/codec.go
@@ -21,18 +21,73 @@
 import (
 	"google.golang.org/grpc/encoding"
 	_ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto"
+	"google.golang.org/grpc/mem"
 )
 
-// baseCodec contains the functionality of both Codec and encoding.Codec, but
-// omits the name/string, which vary between the two and are not needed for
-// anything besides the registry in the encoding package.
+// baseCodec captures the new encoding.CodecV2 interface without the Name
+// function, allowing it to be implemented by older Codec and encoding.Codec
+// implementations. The omitted Name function is only needed for the register in
+// the encoding package and is not part of the core functionality.
 type baseCodec interface {
-	Marshal(v interface{}) ([]byte, error)
-	Unmarshal(data []byte, v interface{}) error
+	Marshal(v any) (mem.BufferSlice, error)
+	Unmarshal(data mem.BufferSlice, v any) error
 }
 
-var _ baseCodec = Codec(nil)
-var _ baseCodec = encoding.Codec(nil)
+// getCodec returns an encoding.CodecV2 for the codec of the given name (if
+// registered). Initially checks the V2 registry with encoding.GetCodecV2 and
+// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry
+// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge
+// to turn it into an encoding.CodecV2. Returns nil otherwise.
+func getCodec(name string) encoding.CodecV2 {
+	if codecV1 := encoding.GetCodec(name); codecV1 != nil {
+		return newCodecV1Bridge(codecV1)
+	}
+
+	return encoding.GetCodecV2(name)
+}
+
+func newCodecV0Bridge(c Codec) baseCodec {
+	return codecV0Bridge{codec: c}
+}
+
+func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 {
+	return codecV1Bridge{
+		codecV0Bridge: codecV0Bridge{codec: c},
+		name:          c.Name(),
+	}
+}
+
+var _ baseCodec = codecV0Bridge{}
+
+type codecV0Bridge struct {
+	codec interface {
+		Marshal(v any) ([]byte, error)
+		Unmarshal(data []byte, v any) error
+	}
+}
+
+func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) {
+	data, err := c.codec.Marshal(v)
+	if err != nil {
+		return nil, err
+	}
+	return mem.BufferSlice{mem.SliceBuffer(data)}, nil
+}
+
+func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) {
+	return c.codec.Unmarshal(data.Materialize(), v)
+}
+
+var _ encoding.CodecV2 = codecV1Bridge{}
+
+type codecV1Bridge struct {
+	codecV0Bridge
+	name string
+}
+
+func (c codecV1Bridge) Name() string {
+	return c.name
+}
 
 // Codec defines the interface gRPC uses to encode and decode messages.
 // Note that implementations of this interface must be thread safe;
@@ -41,9 +96,9 @@
 // Deprecated: use encoding.Codec instead.
 type Codec interface {
 	// Marshal returns the wire format of v.
-	Marshal(v interface{}) ([]byte, error)
+	Marshal(v any) ([]byte, error)
 	// Unmarshal parses the wire format into v.
-	Unmarshal(data []byte, v interface{}) error
+	Unmarshal(data []byte, v any) error
 	// String returns the name of the Codec implementation.  This is unused by
 	// gRPC.
 	String() string
diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh
deleted file mode 100644
index 4cdc6ba..0000000
--- a/vendor/google.golang.org/grpc/codegen.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/env bash
-
-# This script serves as an example to demonstrate how to generate the gRPC-Go
-# interface and the related messages from .proto file.
-#
-# It assumes the installation of i) Google proto buffer compiler at
-# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen
-# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have
-# not, please install them first.
-#
-# We recommend running this script at $GOPATH/src.
-#
-# If this is not what you need, feel free to make your own scripts. Again, this
-# script is for demonstration purpose.
-#
-proto=$1
-protoc --go_out=plugins=grpc:. $proto
diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go
index 0b206a5..934fac2 100644
--- a/vendor/google.golang.org/grpc/codes/code_string.go
+++ b/vendor/google.golang.org/grpc/codes/code_string.go
@@ -18,7 +18,15 @@
 
 package codes
 
-import "strconv"
+import (
+	"strconv"
+
+	"google.golang.org/grpc/internal"
+)
+
+func init() {
+	internal.CanonicalString = canonicalString
+}
 
 func (c Code) String() string {
 	switch c {
@@ -60,3 +68,44 @@
 		return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
 	}
 }
+
+func canonicalString(c Code) string {
+	switch c {
+	case OK:
+		return "OK"
+	case Canceled:
+		return "CANCELLED"
+	case Unknown:
+		return "UNKNOWN"
+	case InvalidArgument:
+		return "INVALID_ARGUMENT"
+	case DeadlineExceeded:
+		return "DEADLINE_EXCEEDED"
+	case NotFound:
+		return "NOT_FOUND"
+	case AlreadyExists:
+		return "ALREADY_EXISTS"
+	case PermissionDenied:
+		return "PERMISSION_DENIED"
+	case ResourceExhausted:
+		return "RESOURCE_EXHAUSTED"
+	case FailedPrecondition:
+		return "FAILED_PRECONDITION"
+	case Aborted:
+		return "ABORTED"
+	case OutOfRange:
+		return "OUT_OF_RANGE"
+	case Unimplemented:
+		return "UNIMPLEMENTED"
+	case Internal:
+		return "INTERNAL"
+	case Unavailable:
+		return "UNAVAILABLE"
+	case DataLoss:
+		return "DATA_LOSS"
+	case Unauthenticated:
+		return "UNAUTHENTICATED"
+	default:
+		return "CODE(" + strconv.FormatInt(int64(c), 10) + ")"
+	}
+}
diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go
index 0273883..0b42c30 100644
--- a/vendor/google.golang.org/grpc/codes/codes.go
+++ b/vendor/google.golang.org/grpc/codes/codes.go
@@ -25,7 +25,13 @@
 	"strconv"
 )
 
-// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
+// A Code is a status code defined according to the [gRPC documentation].
+//
+// Only the codes defined as consts in this package are valid codes. Do not use
+// other code values.  Behavior of other codes is implementation-specific and
+// interoperability between implementations is not guaranteed.
+//
+// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
 type Code uint32
 
 const (
@@ -33,6 +39,9 @@
 	OK Code = 0
 
 	// Canceled indicates the operation was canceled (typically by the caller).
+	//
+	// The gRPC framework will generate this error code when cancellation
+	// is requested.
 	Canceled Code = 1
 
 	// Unknown error. An example of where this error may be returned is
@@ -40,12 +49,17 @@
 	// an error-space that is not known in this address space. Also
 	// errors raised by APIs that do not return enough error information
 	// may be converted to this error.
+	//
+	// The gRPC framework will generate this error code in the above two
+	// mentioned cases.
 	Unknown Code = 2
 
 	// InvalidArgument indicates client specified an invalid argument.
 	// Note that this differs from FailedPrecondition. It indicates arguments
 	// that are problematic regardless of the state of the system
 	// (e.g., a malformed file name).
+	//
+	// This error code will not be generated by the gRPC framework.
 	InvalidArgument Code = 3
 
 	// DeadlineExceeded means operation expired before completion.
@@ -53,14 +67,21 @@
 	// returned even if the operation has completed successfully. For
 	// example, a successful response from a server could have been delayed
 	// long enough for the deadline to expire.
+	//
+	// The gRPC framework will generate this error code when the deadline is
+	// exceeded.
 	DeadlineExceeded Code = 4
 
 	// NotFound means some requested entity (e.g., file or directory) was
 	// not found.
+	//
+	// This error code will not be generated by the gRPC framework.
 	NotFound Code = 5
 
 	// AlreadyExists means an attempt to create an entity failed because one
 	// already exists.
+	//
+	// This error code will not be generated by the gRPC framework.
 	AlreadyExists Code = 6
 
 	// PermissionDenied indicates the caller does not have permission to
@@ -69,10 +90,17 @@
 	// instead for those errors). It must not be
 	// used if the caller cannot be identified (use Unauthenticated
 	// instead for those errors).
+	//
+	// This error code will not be generated by the gRPC core framework,
+	// but expect authentication middleware to use it.
 	PermissionDenied Code = 7
 
 	// ResourceExhausted indicates some resource has been exhausted, perhaps
 	// a per-user quota, or perhaps the entire file system is out of space.
+	//
+	// This error code will be generated by the gRPC framework in
+	// out-of-memory and server overload situations, or when a message is
+	// larger than the configured maximum size.
 	ResourceExhausted Code = 8
 
 	// FailedPrecondition indicates operation was rejected because the
@@ -94,6 +122,8 @@
 	//      REST Get/Update/Delete on a resource and the resource on the
 	//      server does not match the condition. E.g., conflicting
 	//      read-modify-write on the same resource.
+	//
+	// This error code will not be generated by the gRPC framework.
 	FailedPrecondition Code = 9
 
 	// Aborted indicates the operation was aborted, typically due to a
@@ -102,6 +132,8 @@
 	//
 	// See litmus test above for deciding between FailedPrecondition,
 	// Aborted, and Unavailable.
+	//
+	// This error code will not be generated by the gRPC framework.
 	Aborted Code = 10
 
 	// OutOfRange means operation was attempted past the valid range.
@@ -119,15 +151,26 @@
 	// error) when it applies so that callers who are iterating through
 	// a space can easily look for an OutOfRange error to detect when
 	// they are done.
+	//
+	// This error code will not be generated by the gRPC framework.
 	OutOfRange Code = 11
 
 	// Unimplemented indicates operation is not implemented or not
 	// supported/enabled in this service.
+	//
+	// This error code will be generated by the gRPC framework. Most
+	// commonly, you will see this error code when a method implementation
+	// is missing on the server. It can also be generated for unknown
+	// compression algorithms or a disagreement as to whether an RPC should
+	// be streaming.
 	Unimplemented Code = 12
 
 	// Internal errors. Means some invariants expected by underlying
 	// system has been broken. If you see one of these errors,
 	// something is very broken.
+	//
+	// This error code will be generated by the gRPC framework in several
+	// internal error conditions.
 	Internal Code = 13
 
 	// Unavailable indicates the service is currently unavailable.
@@ -137,13 +180,22 @@
 	//
 	// See litmus test above for deciding between FailedPrecondition,
 	// Aborted, and Unavailable.
+	//
+	// This error code will be generated by the gRPC framework during
+	// abrupt shutdown of a server process or network connection.
 	Unavailable Code = 14
 
 	// DataLoss indicates unrecoverable data loss or corruption.
+	//
+	// This error code will not be generated by the gRPC framework.
 	DataLoss Code = 15
 
 	// Unauthenticated indicates the request does not have valid
 	// authentication credentials for the operation.
+	//
+	// The gRPC framework will generate this error code when the
+	// authentication metadata is invalid or a Credentials callback fails,
+	// but also expect authentication middleware to generate it.
 	Unauthenticated Code = 16
 
 	_maxCode = 17
@@ -183,7 +235,7 @@
 
 	if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
 		if ci >= _maxCode {
-			return fmt.Errorf("invalid code: %q", ci)
+			return fmt.Errorf("invalid code: %d", ci)
 		}
 
 		*c = Code(ci)
diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go
index 34ec36f..4a89926 100644
--- a/vendor/google.golang.org/grpc/connectivity/connectivity.go
+++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go
@@ -18,15 +18,14 @@
 
 // Package connectivity defines connectivity semantics.
 // For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md.
-// All APIs in this package are experimental.
 package connectivity
 
 import (
-	"context"
-
 	"google.golang.org/grpc/grpclog"
 )
 
+var logger = grpclog.Component("core")
+
 // State indicates the state of connectivity.
 // It can be the state of a ClientConn or SubConn.
 type State int
@@ -44,8 +43,8 @@
 	case Shutdown:
 		return "SHUTDOWN"
 	default:
-		grpclog.Errorf("unknown connectivity state: %d", s)
-		return "Invalid-State"
+		logger.Errorf("unknown connectivity state: %d", s)
+		return "INVALID_STATE"
 	}
 }
 
@@ -62,12 +61,34 @@
 	Shutdown
 )
 
-// Reporter reports the connectivity states.
-type Reporter interface {
-	// CurrentState returns the current state of the reporter.
-	CurrentState() State
-	// WaitForStateChange blocks until the reporter's state is different from the given state,
-	// and returns true.
-	// It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled).
-	WaitForStateChange(context.Context, State) bool
+// ServingMode indicates the current mode of operation of the server.
+//
+// Only xDS enabled gRPC servers currently report their serving mode.
+type ServingMode int
+
+const (
+	// ServingModeStarting indicates that the server is starting up.
+	ServingModeStarting ServingMode = iota
+	// ServingModeServing indicates that the server contains all required
+	// configuration and is serving RPCs.
+	ServingModeServing
+	// ServingModeNotServing indicates that the server is not accepting new
+	// connections. Existing connections will be closed gracefully, allowing
+	// in-progress RPCs to complete. A server enters this mode when it does not
+	// contain the required configuration to serve RPCs.
+	ServingModeNotServing
+)
+
+func (s ServingMode) String() string {
+	switch s {
+	case ServingModeStarting:
+		return "STARTING"
+	case ServingModeServing:
+		return "SERVING"
+	case ServingModeNotServing:
+		return "NOT_SERVING"
+	default:
+		logger.Errorf("unknown serving mode: %d", s)
+		return "INVALID_MODE"
+	}
 }
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
index c690161..c8e337c 100644
--- a/vendor/google.golang.org/grpc/credentials/credentials.go
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -24,56 +24,126 @@
 
 import (
 	"context"
-	"crypto/tls"
-	"crypto/x509"
 	"errors"
 	"fmt"
-	"io/ioutil"
 	"net"
 
-	"github.com/golang/protobuf/proto"
-
-	"google.golang.org/grpc/credentials/internal"
-	ginternal "google.golang.org/grpc/internal"
+	"google.golang.org/grpc/attributes"
+	icredentials "google.golang.org/grpc/internal/credentials"
+	"google.golang.org/protobuf/proto"
 )
 
 // PerRPCCredentials defines the common interface for the credentials which need to
 // attach security information to every RPC (e.g., oauth2).
 type PerRPCCredentials interface {
-	// GetRequestMetadata gets the current request metadata, refreshing
-	// tokens if required. This should be called by the transport layer on
-	// each request, and the data should be populated in headers or other
-	// context. If a status code is returned, it will be used as the status
-	// for the RPC. uri is the URI of the entry point for the request.
-	// When supported by the underlying implementation, ctx can be used for
-	// timeout and cancellation. Additionally, RequestInfo data will be
-	// available via ctx to this call.
-	// TODO(zhaoq): Define the set of the qualified keys instead of leaving
-	// it as an arbitrary string.
+	// GetRequestMetadata gets the current request metadata, refreshing tokens
+	// if required. This should be called by the transport layer on each
+	// request, and the data should be populated in headers or other
+	// context. If a status code is returned, it will be used as the status for
+	// the RPC (restricted to an allowable set of codes as defined by gRFC
+	// A54). uri is the URI of the entry point for the request.  When supported
+	// by the underlying implementation, ctx can be used for timeout and
+	// cancellation. Additionally, RequestInfo data will be available via ctx
+	// to this call.  TODO(zhaoq): Define the set of the qualified keys instead
+	// of leaving it as an arbitrary string.
 	GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
 	// RequireTransportSecurity indicates whether the credentials requires
 	// transport security.
 	RequireTransportSecurity() bool
 }
 
-// ProtocolInfo provides information regarding the gRPC wire protocol version,
-// security protocol, security protocol version in use, server name, etc.
+// SecurityLevel defines the protection level on an established connection.
+//
+// This API is experimental.
+type SecurityLevel int
+
+const (
+	// InvalidSecurityLevel indicates an invalid security level.
+	// The zero SecurityLevel value is invalid for backward compatibility.
+	InvalidSecurityLevel SecurityLevel = iota
+	// NoSecurity indicates a connection is insecure.
+	NoSecurity
+	// IntegrityOnly indicates a connection only provides integrity protection.
+	IntegrityOnly
+	// PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection.
+	PrivacyAndIntegrity
+)
+
+// String returns SecurityLevel in a string format.
+func (s SecurityLevel) String() string {
+	switch s {
+	case NoSecurity:
+		return "NoSecurity"
+	case IntegrityOnly:
+		return "IntegrityOnly"
+	case PrivacyAndIntegrity:
+		return "PrivacyAndIntegrity"
+	}
+	return fmt.Sprintf("invalid SecurityLevel: %v", int(s))
+}
+
+// CommonAuthInfo contains authenticated information common to AuthInfo implementations.
+// It should be embedded in a struct implementing AuthInfo to provide additional information
+// about the credentials.
+//
+// This API is experimental.
+type CommonAuthInfo struct {
+	SecurityLevel SecurityLevel
+}
+
+// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct.
+func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo {
+	return c
+}
+
+// ProtocolInfo provides static information regarding transport credentials.
 type ProtocolInfo struct {
 	// ProtocolVersion is the gRPC wire protocol version.
+	//
+	// Deprecated: this is unused by gRPC.
 	ProtocolVersion string
 	// SecurityProtocol is the security protocol in use.
 	SecurityProtocol string
-	// SecurityVersion is the security protocol version.
+	// SecurityVersion is the security protocol version.  It is a static version string from the
+	// credentials, not a value that reflects per-connection protocol negotiation.  To retrieve
+	// details about the credentials used for a connection, use the Peer's AuthInfo field instead.
+	//
+	// Deprecated: please use Peer.AuthInfo.
 	SecurityVersion string
-	// ServerName is the user-configured server name.
+	// ServerName is the user-configured server name.  If set, this overrides
+	// the default :authority header used for all RPCs on the channel using the
+	// containing credentials, unless grpc.WithAuthority is set on the channel,
+	// in which case that setting will take precedence.
+	//
+	// This must be a valid `:authority` header according to
+	// [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2).
+	//
+	// Deprecated: Users should use grpc.WithAuthority to override the authority
+	// on a channel instead of configuring the credentials.
 	ServerName string
 }
 
 // AuthInfo defines the common interface for the auth information the users are interested in.
+// A struct that implements AuthInfo should embed CommonAuthInfo by including additional
+// information about the credentials in it.
 type AuthInfo interface {
 	AuthType() string
 }
 
+// AuthorityValidator validates the authority used to override the `:authority`
+// header. This is an optional interface that implementations of AuthInfo can
+// implement if they support per-RPC authority overrides. It is invoked when the
+// application attempts to override the HTTP/2 `:authority` header using the
+// CallAuthority call option.
+type AuthorityValidator interface {
+	// ValidateAuthority checks the authority value used to override the
+	// `:authority` header. The authority parameter is the override value
+	// provided by the application via the CallAuthority option. This value
+	// typically corresponds to the server hostname or endpoint the RPC is
+	// targeting. It returns non-nil error if the validation fails.
+	ValidateAuthority(authority string) error
+}
+
 // ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
 // and the caller should not close rawConn.
 var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
@@ -81,20 +151,30 @@
 // TransportCredentials defines the common interface for all the live gRPC wire
 // protocols and supported transport security protocols (e.g., TLS, SSL).
 type TransportCredentials interface {
-	// ClientHandshake does the authentication handshake specified by the corresponding
-	// authentication protocol on rawConn for clients. It returns the authenticated
-	// connection and the corresponding auth information about the connection.
-	// Implementations must use the provided context to implement timely cancellation.
-	// gRPC will try to reconnect if the error returned is a temporary error
-	// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
-	// If the returned error is a wrapper error, implementations should make sure that
+	// ClientHandshake does the authentication handshake specified by the
+	// corresponding authentication protocol on rawConn for clients. It returns
+	// the authenticated connection and the corresponding auth information
+	// about the connection.  The auth information should embed CommonAuthInfo
+	// to return additional information about the credentials. Implementations
+	// must use the provided context to implement timely cancellation.  gRPC
+	// will try to reconnect if the error returned is a temporary error
+	// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).  If the
+	// returned error is a wrapper error, implementations should make sure that
 	// the error implements Temporary() to have the correct retry behaviors.
+	// Additionally, ClientHandshakeInfo data will be available via the context
+	// passed to this call.
+	//
+	// The second argument to this method is the `:authority` header value used
+	// while creating new streams on this connection after authentication
+	// succeeds. Implementations must use this as the server name during the
+	// authentication handshake.
 	//
 	// If the returned net.Conn is closed, it MUST close the net.Conn provided.
 	ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
 	// ServerHandshake does the authentication handshake for servers. It returns
 	// the authenticated connection and the corresponding auth information about
-	// the connection.
+	// the connection. The auth information should embed CommonAuthInfo to return additional information
+	// about the credentials.
 	//
 	// If the returned net.Conn is closed, it MUST close the net.Conn provided.
 	ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
@@ -102,9 +182,18 @@
 	Info() ProtocolInfo
 	// Clone makes a copy of this TransportCredentials.
 	Clone() TransportCredentials
-	// OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server.
-	// gRPC internals also use it to override the virtual hosting name if it is set.
-	// It must be called before dialing. Currently, this is only used by grpclb.
+	// OverrideServerName specifies the value used for the following:
+	//
+	// - verifying the hostname on the returned certificates
+	// - as SNI in the client's handshake to support virtual hosting
+	// - as the value for `:authority` header at stream creation time
+	//
+	// The provided string should be a valid `:authority` header according to
+	// [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2).
+	//
+	// Deprecated: this method is unused by gRPC.  Users should use
+	// grpc.WithAuthority to override the authority on a channel instead of
+	// configuring the credentials.
 	OverrideServerName(string) error
 }
 
@@ -118,8 +207,18 @@
 //
 // This API is experimental.
 type Bundle interface {
+	// TransportCredentials returns the transport credentials from the Bundle.
+	//
+	// Implementations must return non-nil transport credentials. If transport
+	// security is not needed by the Bundle, implementations may choose to
+	// return insecure.NewCredentials().
 	TransportCredentials() TransportCredentials
+
+	// PerRPCCredentials returns the per-RPC credentials from the Bundle.
+	//
+	// May be nil if per-RPC credentials are not needed.
 	PerRPCCredentials() PerRPCCredentials
+
 	// NewWithMode should make a copy of Bundle, and switch mode. Modifying the
 	// existing Bundle may cause races.
 	//
@@ -127,226 +226,18 @@
 	NewWithMode(mode string) (Bundle, error)
 }
 
-// TLSInfo contains the auth information for a TLS authenticated connection.
-// It implements the AuthInfo interface.
-type TLSInfo struct {
-	State tls.ConnectionState
-}
-
-// AuthType returns the type of TLSInfo as a string.
-func (t TLSInfo) AuthType() string {
-	return "tls"
-}
-
-// GetSecurityValue returns security info requested by channelz.
-func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
-	v := &TLSChannelzSecurityValue{
-		StandardName: cipherSuiteLookup[t.State.CipherSuite],
-	}
-	// Currently there's no way to get LocalCertificate info from tls package.
-	if len(t.State.PeerCertificates) > 0 {
-		v.RemoteCertificate = t.State.PeerCertificates[0].Raw
-	}
-	return v
-}
-
-// tlsCreds is the credentials required for authenticating a connection using TLS.
-type tlsCreds struct {
-	// TLS configuration
-	config *tls.Config
-}
-
-func (c tlsCreds) Info() ProtocolInfo {
-	return ProtocolInfo{
-		SecurityProtocol: "tls",
-		SecurityVersion:  "1.2",
-		ServerName:       c.config.ServerName,
-	}
-}
-
-func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
-	// use local cfg to avoid clobbering ServerName if using multiple endpoints
-	cfg := cloneTLSConfig(c.config)
-	if cfg.ServerName == "" {
-		serverName, _, err := net.SplitHostPort(authority)
-		if err != nil {
-			// If the authority had no host port or if the authority cannot be parsed, use it as-is.
-			serverName = authority
-		}
-		cfg.ServerName = serverName
-	}
-	conn := tls.Client(rawConn, cfg)
-	errChannel := make(chan error, 1)
-	go func() {
-		errChannel <- conn.Handshake()
-	}()
-	select {
-	case err := <-errChannel:
-		if err != nil {
-			return nil, nil, err
-		}
-	case <-ctx.Done():
-		return nil, nil, ctx.Err()
-	}
-	return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
-}
-
-func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
-	conn := tls.Server(rawConn, c.config)
-	if err := conn.Handshake(); err != nil {
-		return nil, nil, err
-	}
-	return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
-}
-
-func (c *tlsCreds) Clone() TransportCredentials {
-	return NewTLS(c.config)
-}
-
-func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
-	c.config.ServerName = serverNameOverride
-	return nil
-}
-
-const alpnProtoStrH2 = "h2"
-
-func appendH2ToNextProtos(ps []string) []string {
-	for _, p := range ps {
-		if p == alpnProtoStrH2 {
-			return ps
-		}
-	}
-	ret := make([]string, 0, len(ps)+1)
-	ret = append(ret, ps...)
-	return append(ret, alpnProtoStrH2)
-}
-
-// NewTLS uses c to construct a TransportCredentials based on TLS.
-func NewTLS(c *tls.Config) TransportCredentials {
-	tc := &tlsCreds{cloneTLSConfig(c)}
-	tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
-	return tc
-}
-
-// NewClientTLSFromCert constructs TLS credentials from the input certificate for client.
-// serverNameOverride is for testing only. If set to a non empty string,
-// it will override the virtual host name of authority (e.g. :authority header field) in requests.
-func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
-	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
-}
-
-// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client.
-// serverNameOverride is for testing only. If set to a non empty string,
-// it will override the virtual host name of authority (e.g. :authority header field) in requests.
-func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
-	b, err := ioutil.ReadFile(certFile)
-	if err != nil {
-		return nil, err
-	}
-	cp := x509.NewCertPool()
-	if !cp.AppendCertsFromPEM(b) {
-		return nil, fmt.Errorf("credentials: failed to append certificates")
-	}
-	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
-}
-
-// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
-func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
-	return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
-}
-
-// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
-// file for server.
-func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
-	cert, err := tls.LoadX509KeyPair(certFile, keyFile)
-	if err != nil {
-		return nil, err
-	}
-	return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
-}
-
-// ChannelzSecurityInfo defines the interface that security protocols should implement
-// in order to provide security info to channelz.
-type ChannelzSecurityInfo interface {
-	GetSecurityValue() ChannelzSecurityValue
-}
-
-// ChannelzSecurityValue defines the interface that GetSecurityValue() return value
-// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
-// and *OtherChannelzSecurityValue.
-type ChannelzSecurityValue interface {
-	isChannelzSecurityValue()
-}
-
-// TLSChannelzSecurityValue defines the struct that TLS protocol should return
-// from GetSecurityValue(), containing security info like cipher and certificate used.
-type TLSChannelzSecurityValue struct {
-	ChannelzSecurityValue
-	StandardName      string
-	LocalCertificate  []byte
-	RemoteCertificate []byte
-}
-
-// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
-// from GetSecurityValue(), which contains protocol specific security info. Note
-// the Value field will be sent to users of channelz requesting channel info, and
-// thus sensitive info should better be avoided.
-type OtherChannelzSecurityValue struct {
-	ChannelzSecurityValue
-	Name  string
-	Value proto.Message
-}
-
-var cipherSuiteLookup = map[uint16]string{
-	tls.TLS_RSA_WITH_RC4_128_SHA:                "TLS_RSA_WITH_RC4_128_SHA",
-	tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA:           "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
-	tls.TLS_RSA_WITH_AES_128_CBC_SHA:            "TLS_RSA_WITH_AES_128_CBC_SHA",
-	tls.TLS_RSA_WITH_AES_256_CBC_SHA:            "TLS_RSA_WITH_AES_256_CBC_SHA",
-	tls.TLS_RSA_WITH_AES_128_GCM_SHA256:         "TLS_RSA_WITH_AES_128_GCM_SHA256",
-	tls.TLS_RSA_WITH_AES_256_GCM_SHA384:         "TLS_RSA_WITH_AES_256_GCM_SHA384",
-	tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA:        "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
-	tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA:    "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
-	tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:    "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
-	tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA:          "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
-	tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA:     "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
-	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:      "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
-	tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:      "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
-	tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:   "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
-	tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
-	tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:   "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
-	tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
-	tls.TLS_FALLBACK_SCSV:                       "TLS_FALLBACK_SCSV",
-	tls.TLS_RSA_WITH_AES_128_CBC_SHA256:         "TLS_RSA_WITH_AES_128_CBC_SHA256",
-	tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
-	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256:   "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
-	tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305:    "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
-	tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305:  "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
-}
-
-// cloneTLSConfig returns a shallow clone of the exported
-// fields of cfg, ignoring the unexported sync.Once, which
-// contains a mutex and must not be copied.
-//
-// If cfg is nil, a new zero tls.Config is returned.
-//
-// TODO: inline this function if possible.
-func cloneTLSConfig(cfg *tls.Config) *tls.Config {
-	if cfg == nil {
-		return &tls.Config{}
-	}
-
-	return cfg.Clone()
-}
-
 // RequestInfo contains request data attached to the context passed to GetRequestMetadata calls.
 //
 // This API is experimental.
 type RequestInfo struct {
 	// The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method")
 	Method string
+	// AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake)
+	AuthInfo AuthInfo
 }
 
-// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object.
+// requestInfoKey is a struct to be used as the key to store RequestInfo in a
+// context.
 type requestInfoKey struct{}
 
 // RequestInfoFromContext extracts the RequestInfo from the context if it exists.
@@ -354,11 +245,94 @@
 // This API is experimental.
 func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
 	ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo)
-	return
+	return ri, ok
 }
 
-func init() {
-	ginternal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context {
-		return context.WithValue(ctx, requestInfoKey{}, ri)
+// NewContextWithRequestInfo creates a new context from ctx and attaches ri to it.
+//
+// This RequestInfo will be accessible via RequestInfoFromContext.
+//
+// Intended to be used from tests for PerRPCCredentials implementations (that
+// often need to check connection's SecurityLevel). Should not be used from
+// non-test code: the gRPC client already prepares a context with the correct
+// RequestInfo attached when calling PerRPCCredentials.GetRequestMetadata.
+//
+// This API is experimental.
+func NewContextWithRequestInfo(ctx context.Context, ri RequestInfo) context.Context {
+	return context.WithValue(ctx, requestInfoKey{}, ri)
+}
+
+// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes
+// it possible to pass arbitrary data to the handshaker from gRPC, resolver,
+// balancer etc. Individual credential implementations control the actual
+// format of the data that they are willing to receive.
+//
+// This API is experimental.
+type ClientHandshakeInfo struct {
+	// Attributes contains the attributes for the address. It could be provided
+	// by the gRPC, resolver, balancer etc.
+	Attributes *attributes.Attributes
+}
+
+// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored
+// in ctx.
+//
+// This API is experimental.
+func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo {
+	chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo)
+	return chi
+}
+
+// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
+// It returns success if 1) the condition is satisfied or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
+// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
+//
+// This API is experimental.
+func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error {
+	type internalInfo interface {
+		GetCommonAuthInfo() CommonAuthInfo
 	}
+	if ai == nil {
+		return errors.New("AuthInfo is nil")
+	}
+	if ci, ok := ai.(internalInfo); ok {
+		// CommonAuthInfo.SecurityLevel has an invalid value.
+		if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel {
+			return nil
+		}
+		if ci.GetCommonAuthInfo().SecurityLevel < level {
+			return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel)
+		}
+	}
+	// The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method.
+	return nil
+}
+
+// ChannelzSecurityInfo defines the interface that security protocols should implement
+// in order to provide security info to channelz.
+//
+// This API is experimental.
+type ChannelzSecurityInfo interface {
+	GetSecurityValue() ChannelzSecurityValue
+}
+
+// ChannelzSecurityValue defines the interface that GetSecurityValue() return value
+// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
+// and *OtherChannelzSecurityValue.
+//
+// This API is experimental.
+type ChannelzSecurityValue interface {
+	isChannelzSecurityValue()
+}
+
+// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
+// from GetSecurityValue(), which contains protocol specific security info. Note
+// the Value field will be sent to users of channelz requesting channel info, and
+// thus sensitive info should better be avoided.
+//
+// This API is experimental.
+type OtherChannelzSecurityValue struct {
+	ChannelzSecurityValue
+	Name  string
+	Value proto.Message
 }
diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
new file mode 100644
index 0000000..93156c0
--- /dev/null
+++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
@@ -0,0 +1,104 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package insecure provides an implementation of the
+// credentials.TransportCredentials interface which disables transport security.
+package insecure
+
+import (
+	"context"
+	"net"
+
+	"google.golang.org/grpc/credentials"
+)
+
+// NewCredentials returns a credentials which disables transport security.
+//
+// Note that using this credentials with per-RPC credentials which require
+// transport security is incompatible and will cause RPCs to fail.
+func NewCredentials() credentials.TransportCredentials {
+	return insecureTC{}
+}
+
+// insecureTC implements the insecure transport credentials. The handshake
+// methods simply return the passed in net.Conn and set the security level to
+// NoSecurity.
+type insecureTC struct{}
+
+func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+	return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
+}
+
+func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+	return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
+}
+
+func (insecureTC) Info() credentials.ProtocolInfo {
+	return credentials.ProtocolInfo{SecurityProtocol: "insecure"}
+}
+
+func (insecureTC) Clone() credentials.TransportCredentials {
+	return insecureTC{}
+}
+
+func (insecureTC) OverrideServerName(string) error {
+	return nil
+}
+
+// info contains the auth information for an insecure connection.
+// It implements the AuthInfo interface.
+type info struct {
+	credentials.CommonAuthInfo
+}
+
+// AuthType returns the type of info as a string.
+func (info) AuthType() string {
+	return "insecure"
+}
+
+// ValidateAuthority allows any value to be overridden for the :authority
+// header.
+func (info) ValidateAuthority(string) error {
+	return nil
+}
+
+// insecureBundle implements an insecure bundle.
+// An insecure bundle provides a thin wrapper around insecureTC to support
+// the credentials.Bundle interface.
+type insecureBundle struct{}
+
+// NewBundle returns a bundle with disabled transport security and no per rpc credential.
+func NewBundle() credentials.Bundle {
+	return insecureBundle{}
+}
+
+// NewWithMode returns a new insecure Bundle. The mode is ignored.
+func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) {
+	return insecureBundle{}, nil
+}
+
+// PerRPCCredentials returns an nil implementation as insecure
+// bundle does not support a per rpc credential.
+func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials {
+	return nil
+}
+
+// TransportCredentials returns the underlying insecure transport credential.
+func (insecureBundle) TransportCredentials() credentials.TransportCredentials {
+	return NewCredentials()
+}
diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go b/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go
deleted file mode 100644
index 2f4472b..0000000
--- a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// +build !appengine
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package internal contains credentials-internal code.
-package internal
-
-import (
-	"net"
-	"syscall"
-)
-
-type sysConn = syscall.Conn
-
-// syscallConn keeps reference of rawConn to support syscall.Conn for channelz.
-// SyscallConn() (the method in interface syscall.Conn) is explicitly
-// implemented on this type,
-//
-// Interface syscall.Conn is implemented by most net.Conn implementations (e.g.
-// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns
-// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn
-// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't
-// help here).
-type syscallConn struct {
-	net.Conn
-	// sysConn is a type alias of syscall.Conn. It's necessary because the name
-	// `Conn` collides with `net.Conn`.
-	sysConn
-}
-
-// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that
-// implements syscall.Conn. rawConn will be used to support syscall, and newConn
-// will be used for read/write.
-//
-// This function returns newConn if rawConn doesn't implement syscall.Conn.
-func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
-	sysConn, ok := rawConn.(syscall.Conn)
-	if !ok {
-		return newConn
-	}
-	return &syscallConn{
-		Conn:    newConn,
-		sysConn: sysConn,
-	}
-}
diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go b/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go
deleted file mode 100644
index d4346e9..0000000
--- a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// +build appengine
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package internal
-
-import (
-	"net"
-)
-
-// WrapSyscallConn returns newConn on appengine.
-func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
-	return newConn
-}
diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go
new file mode 100644
index 0000000..8277be7
--- /dev/null
+++ b/vendor/google.golang.org/grpc/credentials/tls.go
@@ -0,0 +1,320 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package credentials
+
+import (
+	"context"
+	"crypto/tls"
+	"crypto/x509"
+	"errors"
+	"fmt"
+	"net"
+	"net/url"
+	"os"
+
+	"google.golang.org/grpc/grpclog"
+	credinternal "google.golang.org/grpc/internal/credentials"
+	"google.golang.org/grpc/internal/envconfig"
+)
+
+const alpnFailureHelpMessage = "If you upgraded from a grpc-go version earlier than 1.67, your TLS connections may have stopped working due to ALPN enforcement. For more details, see: https://github.com/grpc/grpc-go/issues/434"
+
+var logger = grpclog.Component("credentials")
+
+// TLSInfo contains the auth information for a TLS authenticated connection.
+// It implements the AuthInfo interface.
+type TLSInfo struct {
+	State tls.ConnectionState
+	CommonAuthInfo
+	// This API is experimental.
+	SPIFFEID *url.URL
+}
+
+// AuthType returns the type of TLSInfo as a string.
+func (t TLSInfo) AuthType() string {
+	return "tls"
+}
+
+// ValidateAuthority validates the provided authority being used to override the
+// :authority header by verifying it against the peer certificates. It returns a
+// non-nil error if the validation fails.
+func (t TLSInfo) ValidateAuthority(authority string) error {
+	var errs []error
+	for _, cert := range t.State.PeerCertificates {
+		var err error
+		if err = cert.VerifyHostname(authority); err == nil {
+			return nil
+		}
+		errs = append(errs, err)
+	}
+	return fmt.Errorf("credentials: invalid authority %q: %v", authority, errors.Join(errs...))
+}
+
+// cipherSuiteLookup returns the string version of a TLS cipher suite ID.
+func cipherSuiteLookup(cipherSuiteID uint16) string {
+	for _, s := range tls.CipherSuites() {
+		if s.ID == cipherSuiteID {
+			return s.Name
+		}
+	}
+	for _, s := range tls.InsecureCipherSuites() {
+		if s.ID == cipherSuiteID {
+			return s.Name
+		}
+	}
+	return fmt.Sprintf("unknown ID: %v", cipherSuiteID)
+}
+
+// GetSecurityValue returns security info requested by channelz.
+func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
+	v := &TLSChannelzSecurityValue{
+		StandardName: cipherSuiteLookup(t.State.CipherSuite),
+	}
+	// Currently there's no way to get LocalCertificate info from tls package.
+	if len(t.State.PeerCertificates) > 0 {
+		v.RemoteCertificate = t.State.PeerCertificates[0].Raw
+	}
+	return v
+}
+
+// tlsCreds is the credentials required for authenticating a connection using TLS.
+type tlsCreds struct {
+	// TLS configuration
+	config *tls.Config
+}
+
+func (c tlsCreds) Info() ProtocolInfo {
+	return ProtocolInfo{
+		SecurityProtocol: "tls",
+		SecurityVersion:  "1.2",
+		ServerName:       c.config.ServerName,
+	}
+}
+
+func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
+	// use local cfg to avoid clobbering ServerName if using multiple endpoints
+	cfg := credinternal.CloneTLSConfig(c.config)
+
+	serverName, _, err := net.SplitHostPort(authority)
+	if err != nil {
+		// If the authority had no host port or if the authority cannot be parsed, use it as-is.
+		serverName = authority
+	}
+	cfg.ServerName = serverName
+
+	conn := tls.Client(rawConn, cfg)
+	errChannel := make(chan error, 1)
+	go func() {
+		errChannel <- conn.Handshake()
+		close(errChannel)
+	}()
+	select {
+	case err := <-errChannel:
+		if err != nil {
+			conn.Close()
+			return nil, nil, err
+		}
+	case <-ctx.Done():
+		conn.Close()
+		return nil, nil, ctx.Err()
+	}
+
+	// The negotiated protocol can be either of the following:
+	// 1. h2: When the server supports ALPN. Only HTTP/2 can be negotiated since
+	//    it is the only protocol advertised by the client during the handshake.
+	//    The tls library ensures that the server chooses a protocol advertised
+	//    by the client.
+	// 2. "" (empty string): If the server doesn't support ALPN. ALPN is a requirement
+	//    for using HTTP/2 over TLS. We can terminate the connection immediately.
+	np := conn.ConnectionState().NegotiatedProtocol
+	if np == "" {
+		if envconfig.EnforceALPNEnabled {
+			conn.Close()
+			return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage)
+		}
+		logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName)
+	}
+	tlsInfo := TLSInfo{
+		State: conn.ConnectionState(),
+		CommonAuthInfo: CommonAuthInfo{
+			SecurityLevel: PrivacyAndIntegrity,
+		},
+	}
+	id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
+	if id != nil {
+		tlsInfo.SPIFFEID = id
+	}
+	return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
+}
+
+func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
+	conn := tls.Server(rawConn, c.config)
+	if err := conn.Handshake(); err != nil {
+		conn.Close()
+		return nil, nil, err
+	}
+	cs := conn.ConnectionState()
+	// The negotiated application protocol can be empty only if the client doesn't
+	// support ALPN. In such cases, we can close the connection since ALPN is required
+	// for using HTTP/2 over TLS.
+	if cs.NegotiatedProtocol == "" {
+		if envconfig.EnforceALPNEnabled {
+			conn.Close()
+			return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage)
+		} else if logger.V(2) {
+			logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases")
+		}
+	}
+	tlsInfo := TLSInfo{
+		State: cs,
+		CommonAuthInfo: CommonAuthInfo{
+			SecurityLevel: PrivacyAndIntegrity,
+		},
+	}
+	id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
+	if id != nil {
+		tlsInfo.SPIFFEID = id
+	}
+	return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
+}
+
+func (c *tlsCreds) Clone() TransportCredentials {
+	return NewTLS(c.config)
+}
+
+func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
+	c.config.ServerName = serverNameOverride
+	return nil
+}
+
+// The following cipher suites are forbidden for use with HTTP/2 by
+// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
+var tls12ForbiddenCipherSuites = map[uint16]struct{}{
+	tls.TLS_RSA_WITH_AES_128_CBC_SHA:         {},
+	tls.TLS_RSA_WITH_AES_256_CBC_SHA:         {},
+	tls.TLS_RSA_WITH_AES_128_GCM_SHA256:      {},
+	tls.TLS_RSA_WITH_AES_256_GCM_SHA384:      {},
+	tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {},
+	tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {},
+	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:   {},
+	tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:   {},
+}
+
+// NewTLS uses c to construct a TransportCredentials based on TLS.
+func NewTLS(c *tls.Config) TransportCredentials {
+	config := applyDefaults(c)
+	if config.GetConfigForClient != nil {
+		oldFn := config.GetConfigForClient
+		config.GetConfigForClient = func(hello *tls.ClientHelloInfo) (*tls.Config, error) {
+			cfgForClient, err := oldFn(hello)
+			if err != nil || cfgForClient == nil {
+				return cfgForClient, err
+			}
+			return applyDefaults(cfgForClient), nil
+		}
+	}
+	return &tlsCreds{config: config}
+}
+
+func applyDefaults(c *tls.Config) *tls.Config {
+	config := credinternal.CloneTLSConfig(c)
+	config.NextProtos = credinternal.AppendH2ToNextProtos(config.NextProtos)
+	// If the user did not configure a MinVersion and did not configure a
+	// MaxVersion < 1.2, use MinVersion=1.2, which is required by
+	// https://datatracker.ietf.org/doc/html/rfc7540#section-9.2
+	if config.MinVersion == 0 && (config.MaxVersion == 0 || config.MaxVersion >= tls.VersionTLS12) {
+		config.MinVersion = tls.VersionTLS12
+	}
+	// If the user did not configure CipherSuites, use all "secure" cipher
+	// suites reported by the TLS package, but remove some explicitly forbidden
+	// by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
+	if config.CipherSuites == nil {
+		for _, cs := range tls.CipherSuites() {
+			if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok {
+				config.CipherSuites = append(config.CipherSuites, cs.ID)
+			}
+		}
+	}
+	return config
+}
+
+// NewClientTLSFromCert constructs TLS credentials from the provided root
+// certificate authority certificate(s) to validate server connections. If
+// certificates to establish the identity of the client need to be included in
+// the credentials (eg: for mTLS), use NewTLS instead, where a complete
+// tls.Config can be specified.
+//
+// serverNameOverride is for testing only. If set to a non empty string, it will
+// override the virtual host name of authority (e.g. :authority header field) in
+// requests.  Users should use grpc.WithAuthority passed to grpc.NewClient to
+// override the authority of the client instead.
+func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
+	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
+}
+
+// NewClientTLSFromFile constructs TLS credentials from the provided root
+// certificate authority certificate file(s) to validate server connections. If
+// certificates to establish the identity of the client need to be included in
+// the credentials (eg: for mTLS), use NewTLS instead, where a complete
+// tls.Config can be specified.
+//
+// serverNameOverride is for testing only. If set to a non empty string, it will
+// override the virtual host name of authority (e.g. :authority header field) in
+// requests.  Users should use grpc.WithAuthority passed to grpc.NewClient to
+// override the authority of the client instead.
+func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
+	b, err := os.ReadFile(certFile)
+	if err != nil {
+		return nil, err
+	}
+	cp := x509.NewCertPool()
+	if !cp.AppendCertsFromPEM(b) {
+		return nil, fmt.Errorf("credentials: failed to append certificates")
+	}
+	return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
+}
+
+// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
+func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
+	return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
+}
+
+// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
+// file for server.
+func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
+	cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+	if err != nil {
+		return nil, err
+	}
+	return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
+}
+
+// TLSChannelzSecurityValue defines the struct that TLS protocol should return
+// from GetSecurityValue(), containing security info like cipher and certificate used.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type TLSChannelzSecurityValue struct {
+	ChannelzSecurityValue
+	StandardName      string
+	LocalCertificate  []byte
+	RemoteCertificate []byte
+}
diff --git a/vendor/google.golang.org/grpc/credentials/tls13.go b/vendor/google.golang.org/grpc/credentials/tls13.go
deleted file mode 100644
index ccbf35b..0000000
--- a/vendor/google.golang.org/grpc/credentials/tls13.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// +build go1.12
-
-/*
- *
- * Copyright 2019 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package credentials
-
-import "crypto/tls"
-
-// This init function adds cipher suite constants only defined in Go 1.12.
-func init() {
-	cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256"
-	cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384"
-	cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256"
-}
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index 9f872df..7a5ac2e 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -20,23 +20,50 @@
 
 import (
 	"context"
-	"fmt"
 	"net"
+	"net/url"
 	"time"
 
 	"google.golang.org/grpc/backoff"
-	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/channelz"
 	"google.golang.org/grpc/credentials"
-	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/credentials/insecure"
 	"google.golang.org/grpc/internal"
 	internalbackoff "google.golang.org/grpc/internal/backoff"
-	"google.golang.org/grpc/internal/envconfig"
+	"google.golang.org/grpc/internal/binarylog"
 	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/mem"
 	"google.golang.org/grpc/resolver"
 	"google.golang.org/grpc/stats"
 )
 
+const (
+	// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#limits-on-retries-and-hedges
+	defaultMaxCallAttempts = 5
+)
+
+func init() {
+	internal.AddGlobalDialOptions = func(opt ...DialOption) {
+		globalDialOptions = append(globalDialOptions, opt...)
+	}
+	internal.ClearGlobalDialOptions = func() {
+		globalDialOptions = nil
+	}
+	internal.AddGlobalPerTargetDialOptions = func(opt any) {
+		if ptdo, ok := opt.(perTargetDialOption); ok {
+			globalPerTargetDialOptions = append(globalPerTargetDialOptions, ptdo)
+		}
+	}
+	internal.ClearGlobalPerTargetDialOptions = func() {
+		globalPerTargetDialOptions = nil
+	}
+	internal.WithBinaryLogger = withBinaryLogger
+	internal.JoinDialOptions = newJoinDialOption
+	internal.DisableGlobalDialOptions = newDisableGlobalDialOptions
+	internal.WithBufferPool = withBufferPool
+}
+
 // dialOptions configure a Dial call. dialOptions are set by the DialOption
 // values passed to Dial.
 type dialOptions struct {
@@ -46,33 +73,29 @@
 	chainUnaryInts  []UnaryClientInterceptor
 	chainStreamInts []StreamClientInterceptor
 
-	cp          Compressor
-	dc          Decompressor
-	bs          internalbackoff.Strategy
-	block       bool
-	insecure    bool
-	timeout     time.Duration
-	scChan      <-chan ServiceConfig
-	authority   string
-	copts       transport.ConnectOptions
-	callOptions []CallOption
-	// This is used by v1 balancer dial option WithBalancer to support v1
-	// balancer, and also by WithBalancerName dial option.
-	balancerBuilder balancer.Builder
-	// This is to support grpclb.
-	resolverBuilder             resolver.Builder
-	channelzParentID            int64
+	compressorV0                Compressor
+	dc                          Decompressor
+	bs                          internalbackoff.Strategy
+	block                       bool
+	returnLastError             bool
+	timeout                     time.Duration
+	authority                   string
+	binaryLogger                binarylog.Logger
+	copts                       transport.ConnectOptions
+	callOptions                 []CallOption
+	channelzParent              channelz.Identifier
 	disableServiceConfig        bool
 	disableRetry                bool
 	disableHealthCheck          bool
-	healthCheckFunc             internal.HealthChecker
 	minConnectTimeout           func() time.Duration
 	defaultServiceConfig        *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
 	defaultServiceConfigRawJSON *string
-	// This is used by ccResolverWrapper to backoff between successive calls to
-	// resolver.ResolveNow(). The user will have no need to configure this, but
-	// we need to be able to configure this in tests.
-	resolveNowBackoff func(int) time.Duration
+	resolvers                   []resolver.Builder
+	idleTimeout                 time.Duration
+	defaultScheme               string
+	maxCallAttempts             int
+	enableLocalDNSResolution    bool // Specifies if target hostnames should be resolved when proxying is enabled.
+	useProxy                    bool // Specifies if a server should be connected via proxy.
 }
 
 // DialOption configures how we set up the connection.
@@ -80,14 +103,42 @@
 	apply(*dialOptions)
 }
 
+var globalDialOptions []DialOption
+
+// perTargetDialOption takes a parsed target and returns a dial option to apply.
+//
+// This gets called after NewClient() parses the target, and allows per target
+// configuration set through a returned DialOption. The DialOption will not take
+// effect if specifies a resolver builder, as that Dial Option is factored in
+// while parsing target.
+type perTargetDialOption interface {
+	// DialOption returns a Dial Option to apply.
+	DialOptionForTarget(parsedTarget url.URL) DialOption
+}
+
+var globalPerTargetDialOptions []perTargetDialOption
+
 // EmptyDialOption does not alter the dial configuration. It can be embedded in
 // another structure to build custom dial options.
 //
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type EmptyDialOption struct{}
 
 func (EmptyDialOption) apply(*dialOptions) {}
 
+type disableGlobalDialOptions struct{}
+
+func (disableGlobalDialOptions) apply(*dialOptions) {}
+
+// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn
+// from applying the global DialOptions (set via AddGlobalDialOptions).
+func newDisableGlobalDialOptions() DialOption {
+	return &disableGlobalDialOptions{}
+}
+
 // funcDialOption wraps a function that modifies dialOptions into an
 // implementation of the DialOption interface.
 type funcDialOption struct {
@@ -104,13 +155,40 @@
 	}
 }
 
-// WithWriteBufferSize determines how much data can be batched before doing a
-// write on the wire. The corresponding memory allocation for this buffer will
-// be twice the size to keep syscalls low. The default value for this buffer is
-// 32KB.
+type joinDialOption struct {
+	opts []DialOption
+}
+
+func (jdo *joinDialOption) apply(do *dialOptions) {
+	for _, opt := range jdo.opts {
+		opt.apply(do)
+	}
+}
+
+func newJoinDialOption(opts ...DialOption) DialOption {
+	return &joinDialOption{opts: opts}
+}
+
+// WithSharedWriteBuffer allows reusing per-connection transport write buffer.
+// If this option is set to true every connection will release the buffer after
+// flushing the data on the wire.
 //
-// Zero will disable the write buffer such that each write will be on underlying
-// connection. Note: A Send call may not directly translate to a write.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithSharedWriteBuffer(val bool) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.SharedWriteBuffer = val
+	})
+}
+
+// WithWriteBufferSize determines how much data can be batched before doing a
+// write on the wire. The default value for this buffer is 32KB.
+//
+// Zero or negative values will disable the write buffer such that each write
+// will be on underlying connection. Note: A Send call may not directly
+// translate to a write.
 func WithWriteBufferSize(s int) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.copts.WriteBufferSize = s
@@ -120,8 +198,9 @@
 // WithReadBufferSize lets you set the size of read buffer, this determines how
 // much data can be read at most for each read syscall.
 //
-// The default value for this buffer is 32KB. Zero will disable read buffer for
-// a connection so data framer can access the underlying conn directly.
+// The default value for this buffer is 32KB. Zero or negative values will
+// disable read buffer for a connection so data framer can access the
+// underlying conn directly.
 func WithReadBufferSize(s int) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.copts.ReadBufferSize = s
@@ -134,6 +213,7 @@
 func WithInitialWindowSize(s int32) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.copts.InitialWindowSize = s
+		o.copts.StaticWindowSize = true
 	})
 }
 
@@ -143,6 +223,26 @@
 func WithInitialConnWindowSize(s int32) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.copts.InitialConnWindowSize = s
+		o.copts.StaticWindowSize = true
+	})
+}
+
+// WithStaticStreamWindowSize returns a DialOption which sets the initial
+// stream window size to the value provided and disables dynamic flow control.
+func WithStaticStreamWindowSize(s int32) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.InitialWindowSize = s
+		o.copts.StaticWindowSize = true
+	})
+}
+
+// WithStaticConnWindowSize returns a DialOption which sets the initial
+// connection window size to the value provided and disables dynamic flow
+// control.
+func WithStaticConnWindowSize(s int32) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.InitialConnWindowSize = s
+		o.copts.StaticWindowSize = true
 	})
 }
 
@@ -179,7 +279,7 @@
 // Deprecated: use UseCompressor instead.  Will be supported throughout 1.x.
 func WithCompressor(cp Compressor) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
-		o.cp = cp
+		o.compressorV0 = cp
 	})
 }
 
@@ -199,67 +299,14 @@
 	})
 }
 
-// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
-// Name resolver will be ignored if this DialOption is specified.
-//
-// Deprecated: use the new balancer APIs in balancer package and
-// WithBalancerName.  Will be removed in a future 1.x release.
-func WithBalancer(b Balancer) DialOption {
-	return newFuncDialOption(func(o *dialOptions) {
-		o.balancerBuilder = &balancerWrapperBuilder{
-			b: b,
-		}
-	})
-}
-
-// WithBalancerName sets the balancer that the ClientConn will be initialized
-// with. Balancer registered with balancerName will be used. This function
-// panics if no balancer was registered by balancerName.
-//
-// The balancer cannot be overridden by balancer option specified by service
-// config.
-//
-// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig
-// instead.  Will be removed in a future 1.x release.
-func WithBalancerName(balancerName string) DialOption {
-	builder := balancer.Get(balancerName)
-	if builder == nil {
-		panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName))
-	}
-	return newFuncDialOption(func(o *dialOptions) {
-		o.balancerBuilder = builder
-	})
-}
-
-// withResolverBuilder is only for grpclb.
-func withResolverBuilder(b resolver.Builder) DialOption {
-	return newFuncDialOption(func(o *dialOptions) {
-		o.resolverBuilder = b
-	})
-}
-
-// WithServiceConfig returns a DialOption which has a channel to read the
-// service configuration.
-//
-// Deprecated: service config should be received through name resolver or via
-// WithDefaultServiceConfig, as specified at
-// https://github.com/grpc/grpc/blob/master/doc/service_config.md.  Will be
-// removed in a future 1.x release.
-func WithServiceConfig(c <-chan ServiceConfig) DialOption {
-	return newFuncDialOption(func(o *dialOptions) {
-		o.scChan = c
-	})
-}
-
-// WithConnectParams configures the dialer to use the provided ConnectParams.
+// WithConnectParams configures the ClientConn to use the provided ConnectParams
+// for creating and maintaining connections to servers.
 //
 // The backoff configuration specified as part of the ConnectParams overrides
 // all defaults specified in
 // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider
 // using the backoff.DefaultConfig as a base, in cases where you want to
 // override only a subset of the backoff configuration.
-//
-// This API is EXPERIMENTAL.
 func WithConnectParams(p ConnectParams) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.bs = internalbackoff.Exponential{Config: p.Backoff}
@@ -297,21 +344,78 @@
 	})
 }
 
-// WithBlock returns a DialOption which makes caller of Dial blocks until the
+// WithBlock returns a DialOption which makes callers of Dial block until the
 // underlying connection is up. Without this, Dial returns immediately and
 // connecting the server happens in background.
+//
+// Use of this feature is not recommended.  For more information, please see:
+// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
+//
+// Deprecated: this DialOption is not supported by NewClient.
+// Will be supported throughout 1.x.
 func WithBlock() DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.block = true
 	})
 }
 
+// WithReturnConnectionError returns a DialOption which makes the client connection
+// return a string containing both the last connection error that occurred and
+// the context.DeadlineExceeded error.
+// Implies WithBlock()
+//
+// Use of this feature is not recommended.  For more information, please see:
+// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
+//
+// Deprecated: this DialOption is not supported by NewClient.
+// Will be supported throughout 1.x.
+func WithReturnConnectionError() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.block = true
+		o.returnLastError = true
+	})
+}
+
 // WithInsecure returns a DialOption which disables transport security for this
-// ClientConn. Note that transport security is required unless WithInsecure is
-// set.
+// ClientConn. Under the hood, it uses insecure.NewCredentials().
+//
+// Note that using this DialOption with per-RPC credentials (through
+// WithCredentialsBundle or WithPerRPCCredentials) which require transport
+// security is incompatible and will cause RPCs to fail.
+//
+// Deprecated: use WithTransportCredentials and insecure.NewCredentials()
+// instead. Will be supported throughout 1.x.
 func WithInsecure() DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
-		o.insecure = true
+		o.copts.TransportCredentials = insecure.NewCredentials()
+	})
+}
+
+// WithNoProxy returns a DialOption which disables the use of proxies for this
+// ClientConn. This is ignored if WithDialer or WithContextDialer are used.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithNoProxy() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.useProxy = false
+	})
+}
+
+// WithLocalDNSResolution forces local DNS name resolution even when a proxy is
+// specified in the environment.  By default, the server name is provided
+// directly to the proxy as part of the CONNECT handshake. This is ignored if
+// WithNoProxy is used.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithLocalDNSResolution() DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.enableLocalDNSResolution = true
 	})
 }
 
@@ -336,7 +440,10 @@
 // the ClientConn.WithCreds. This should not be used together with
 // WithTransportCredentials.
 //
-// This API is experimental.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func WithCredentialsBundle(b credentials.Bundle) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.copts.CredsBundle = b
@@ -346,8 +453,8 @@
 // WithTimeout returns a DialOption that configures a timeout for dialing a
 // ClientConn initially. This is valid if and only if WithBlock() is present.
 //
-// Deprecated: use DialContext and context.WithTimeout instead.  Will be
-// supported throughout 1.x.
+// Deprecated: this DialOption is not supported by NewClient.
+// Will be supported throughout 1.x.
 func WithTimeout(d time.Duration) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.timeout = d
@@ -358,17 +465,28 @@
 // connections. If FailOnNonTempDialError() is set to true, and an error is
 // returned by f, gRPC checks the error's Temporary() method to decide if it
 // should try to reconnect to the network address.
+//
+// Note that gRPC by default performs name resolution on the target passed to
+// NewClient. To bypass name resolution and cause the target string to be
+// passed directly to the dialer here instead, use the "passthrough" resolver
+// by specifying it in the target string, e.g. "passthrough:target".
+//
+// Note: All supported releases of Go (as of December 2023) override the OS
+// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
+// with OS defaults for keepalive time and interval, use a net.Dialer that sets
+// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket
+// option to true from the Control field. For a concrete example of how to do
+// this, see internal.NetDialerWithTCPKeepalive().
+//
+// For more information, please see [issue 23459] in the Go GitHub repo.
+//
+// [issue 23459]: https://github.com/golang/go/issues/23459
 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.copts.Dialer = f
 	})
 }
 
-func init() {
-	internal.WithResolverBuilder = withResolverBuilder
-	internal.WithHealthCheckFunc = withHealthCheckFunc
-}
-
 // WithDialer returns a DialOption that specifies a function to use for dialing
 // network addresses. If FailOnNonTempDialError() is set to true, and an error
 // is returned by f, gRPC checks the error's Temporary() method to decide if it
@@ -390,7 +508,21 @@
 // all the RPCs and underlying network connections in this ClientConn.
 func WithStatsHandler(h stats.Handler) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
-		o.copts.StatsHandler = h
+		if h == nil {
+			logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption")
+			// Do not allow a nil stats handler, which would otherwise cause
+			// panics.
+			return
+		}
+		o.copts.StatsHandlers = append(o.copts.StatsHandlers, h)
+	})
+}
+
+// withBinaryLogger returns a DialOption that specifies the binary logger for
+// this ClientConn.
+func withBinaryLogger(bl binarylog.Logger) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.binaryLogger = bl
 	})
 }
 
@@ -402,7 +534,12 @@
 // FailOnNonTempDialError only affects the initial dial, and does not do
 // anything useful unless you are also using WithBlock().
 //
-// This is an EXPERIMENTAL API.
+// Use of this feature is not recommended.  For more information, please see:
+// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
+//
+// Deprecated: this DialOption is not supported by NewClient.
+// This API may be changed or removed in a
+// later release.
 func FailOnNonTempDialError(f bool) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.copts.FailOnNonTempDialError = f
@@ -413,15 +550,17 @@
 // the RPCs.
 func WithUserAgent(s string) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
-		o.copts.UserAgent = s
+		o.copts.UserAgent = s + " " + grpcUA
 	})
 }
 
 // WithKeepaliveParams returns a DialOption that specifies keepalive parameters
 // for the client transport.
+//
+// Keepalive is disabled by default.
 func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
 	if kp.Time < internal.KeepaliveMinPingTime {
-		grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
+		logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
 		kp.Time = internal.KeepaliveMinPingTime
 	}
 	return newFuncDialOption(func(o *dialOptions) {
@@ -457,7 +596,7 @@
 }
 
 // WithChainStreamInterceptor returns a DialOption that specifies the chained
-// interceptor for unary RPCs. The first interceptor will be the outer most,
+// interceptor for streaming RPCs. The first interceptor will be the outer most,
 // while the last interceptor will be the inner most wrapper around the real call.
 // All interceptors added by this method will be chained, and the interceptor
 // defined by WithStreamInterceptor will always be prepended to the chain.
@@ -468,8 +607,9 @@
 }
 
 // WithAuthority returns a DialOption that specifies the value to be used as the
-// :authority pseudo-header. This value only works with WithInsecure and has no
-// effect if TransportCredentials are present.
+// :authority pseudo-header and as the server name in authentication handshake.
+// This overrides all other ways of setting authority on the channel, but can be
+// overridden per-call by using grpc.CallAuthority.
 func WithAuthority(a string) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.authority = a
@@ -479,9 +619,14 @@
 // WithChannelzParentID returns a DialOption that specifies the channelz ID of
 // current ClientConn's parent. This function is used in nested channel creation
 // (e.g. grpclb dial).
-func WithChannelzParentID(id int64) DialOption {
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithChannelzParentID(c channelz.Identifier) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
-		o.channelzParentID = id
+		o.channelzParent = c
 	})
 }
 
@@ -500,11 +645,16 @@
 // WithDefaultServiceConfig returns a DialOption that configures the default
 // service config, which will be used in cases where:
 //
-// 1. WithDisableServiceConfig is also used.
-// 2. Resolver does not return a service config or if the resolver returns an
-//    invalid service config.
+// 1. WithDisableServiceConfig is also used, or
 //
-// This API is EXPERIMENTAL.
+// 2. The name resolver does not provide a service config or provides an
+// invalid service config.
+//
+// The parameter s is the JSON representation of the default service config.
+// For more information about service configs, see:
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md
+// For a simple example of usage, see:
+// examples/features/load_balancing/client/main.go
 func WithDefaultServiceConfig(s string) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.defaultServiceConfigRawJSON = &s
@@ -515,59 +665,61 @@
 // service config enables them.  This does not impact transparent retries, which
 // will happen automatically if no data is written to the wire or if the RPC is
 // unprocessed by the remote server.
-//
-// Retry support is currently disabled by default, but will be enabled by
-// default in the future.  Until then, it may be enabled by setting the
-// environment variable "GRPC_GO_RETRY" to "on".
-//
-// This API is EXPERIMENTAL.
 func WithDisableRetry() DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.disableRetry = true
 	})
 }
 
+// MaxHeaderListSizeDialOption is a DialOption that specifies the maximum
+// (uncompressed) size of header list that the client is prepared to accept.
+type MaxHeaderListSizeDialOption struct {
+	MaxHeaderListSize uint32
+}
+
+func (o MaxHeaderListSizeDialOption) apply(do *dialOptions) {
+	do.copts.MaxHeaderListSize = &o.MaxHeaderListSize
+}
+
 // WithMaxHeaderListSize returns a DialOption that specifies the maximum
 // (uncompressed) size of header list that the client is prepared to accept.
 func WithMaxHeaderListSize(s uint32) DialOption {
-	return newFuncDialOption(func(o *dialOptions) {
-		o.copts.MaxHeaderListSize = &s
-	})
+	return MaxHeaderListSizeDialOption{
+		MaxHeaderListSize: s,
+	}
 }
 
 // WithDisableHealthCheck disables the LB channel health checking for all
 // SubConns of this ClientConn.
 //
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func WithDisableHealthCheck() DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
 		o.disableHealthCheck = true
 	})
 }
 
-// withHealthCheckFunc replaces the default health check function with the
-// provided one. It makes tests easier to change the health check function.
-//
-// For testing purpose only.
-func withHealthCheckFunc(f internal.HealthChecker) DialOption {
-	return newFuncDialOption(func(o *dialOptions) {
-		o.healthCheckFunc = f
-	})
-}
-
 func defaultDialOptions() dialOptions {
 	return dialOptions{
-		disableRetry:    !envconfig.Retry,
-		healthCheckFunc: internal.HealthCheckFunc,
 		copts: transport.ConnectOptions{
-			WriteBufferSize: defaultWriteBufSize,
 			ReadBufferSize:  defaultReadBufSize,
+			WriteBufferSize: defaultWriteBufSize,
+			UserAgent:       grpcUA,
+			BufferPool:      mem.DefaultBufferPool(),
 		},
-		resolveNowBackoff: internalbackoff.DefaultExponential.Backoff,
+		bs:                       internalbackoff.DefaultExponential,
+		idleTimeout:              30 * time.Minute,
+		defaultScheme:            "dns",
+		maxCallAttempts:          defaultMaxCallAttempts,
+		useProxy:                 true,
+		enableLocalDNSResolution: false,
 	}
 }
 
-// withGetMinConnectDeadline specifies the function that clientconn uses to
+// withMinConnectDeadline specifies the function that clientconn uses to
 // get minConnectDeadline. This can be used to make connection attempts happen
 // faster/slower.
 //
@@ -578,12 +730,68 @@
 	})
 }
 
-// withResolveNowBackoff specifies the function that clientconn uses to backoff
-// between successive calls to resolver.ResolveNow().
-//
-// For testing purpose only.
-func withResolveNowBackoff(f func(int) time.Duration) DialOption {
+// withDefaultScheme is used to allow Dial to use "passthrough" as the default
+// name resolver, while NewClient uses "dns" otherwise.
+func withDefaultScheme(s string) DialOption {
 	return newFuncDialOption(func(o *dialOptions) {
-		o.resolveNowBackoff = f
+		o.defaultScheme = s
+	})
+}
+
+// WithResolvers allows a list of resolver implementations to be registered
+// locally with the ClientConn without needing to be globally registered via
+// resolver.Register.  They will be matched against the scheme used for the
+// current Dial only, and will take precedence over the global registry.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithResolvers(rs ...resolver.Builder) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.resolvers = append(o.resolvers, rs...)
+	})
+}
+
+// WithIdleTimeout returns a DialOption that configures an idle timeout for the
+// channel. If the channel is idle for the configured timeout, i.e there are no
+// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode
+// and as a result the name resolver and load balancer will be shut down. The
+// channel will exit idle mode when the Connect() method is called or when an
+// RPC is initiated.
+//
+// A default timeout of 30 minutes will be used if this dial option is not set
+// at dial time and idleness can be disabled by passing a timeout of zero.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithIdleTimeout(d time.Duration) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.idleTimeout = d
+	})
+}
+
+// WithMaxCallAttempts returns a DialOption that configures the maximum number
+// of attempts per call (including retries and hedging) using the channel.
+// Service owners may specify a higher value for these parameters, but higher
+// values will be treated as equal to the maximum value by the client
+// implementation. This mitigates security concerns related to the service
+// config being transferred to the client via DNS.
+//
+// A value of 5 will be used if this dial option is not set or n < 2.
+func WithMaxCallAttempts(n int) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		if n < 2 {
+			n = defaultMaxCallAttempts
+		}
+		o.maxCallAttempts = n
+	})
+}
+
+func withBufferPool(bufferPool mem.BufferPool) DialOption {
+	return newFuncDialOption(func(o *dialOptions) {
+		o.copts.BufferPool = bufferPool
 	})
 }
diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go
index 187adbb..e7b532b 100644
--- a/vendor/google.golang.org/grpc/doc.go
+++ b/vendor/google.golang.org/grpc/doc.go
@@ -16,6 +16,8 @@
  *
  */
 
+//go:generate ./scripts/regenerate.sh
+
 /*
 Package grpc implements an RPC system called gRPC.
 
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
index 195e844..11d0ae1 100644
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -19,12 +19,17 @@
 // Package encoding defines the interface for the compressor and codec, and
 // functions to register and retrieve compressors and codecs.
 //
-// This package is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
 package encoding
 
 import (
 	"io"
 	"strings"
+
+	"google.golang.org/grpc/internal/grpcutil"
 )
 
 // Identity specifies the optional encoding for uncompressed streams.
@@ -33,6 +38,10 @@
 
 // Compressor is used for compressing and decompressing when sending or
 // receiving messages.
+//
+// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`,
+// gRPC will invoke it to determine the size of the buffer allocated for the
+// result of decompression.  A return value of -1 indicates unknown size.
 type Compressor interface {
 	// Compress writes the data written to wc to w after compressing it.  If an
 	// error occurs while initializing the compressor, that error is returned
@@ -46,10 +55,6 @@
 	// coding header.  The result must be static; the result cannot change
 	// between calls.
 	Name() string
-	// EXPERIMENTAL: if a Compressor implements
-	// DecompressedSize(compressedBytes []byte) int, gRPC will call it
-	// to determine the size of the buffer allocated for the result of decompression.
-	// Return -1 to indicate unknown size.
 }
 
 var registeredCompressor = make(map[string]Compressor)
@@ -65,6 +70,9 @@
 // registered with the same name, the one registered last will take effect.
 func RegisterCompressor(c Compressor) {
 	registeredCompressor[c.Name()] = c
+	if !grpcutil.IsCompressorNameRegistered(c.Name()) {
+		grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name())
+	}
 }
 
 // GetCompressor returns Compressor for the given compressor name.
@@ -77,16 +85,16 @@
 // methods can be called from concurrent goroutines.
 type Codec interface {
 	// Marshal returns the wire format of v.
-	Marshal(v interface{}) ([]byte, error)
+	Marshal(v any) ([]byte, error)
 	// Unmarshal parses the wire format into v.
-	Unmarshal(data []byte, v interface{}) error
+	Unmarshal(data []byte, v any) error
 	// Name returns the name of the Codec implementation. The returned string
 	// will be used as part of content type in transmission.  The result must be
 	// static; the result cannot change between calls.
 	Name() string
 }
 
-var registeredCodecs = make(map[string]Codec)
+var registeredCodecs = make(map[string]any)
 
 // RegisterCodec registers the provided Codec for use with all gRPC clients and
 // servers.
@@ -100,7 +108,7 @@
 // more details.
 //
 // NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe.  If multiple Compressors are
+// an init() function), and is not thread-safe.  If multiple Codecs are
 // registered with the same name, the one registered last will take effect.
 func RegisterCodec(codec Codec) {
 	if codec == nil {
@@ -118,5 +126,6 @@
 //
 // The content-subtype is expected to be lowercase.
 func GetCodec(contentSubtype string) Codec {
-	return registeredCodecs[contentSubtype]
+	c, _ := registeredCodecs[contentSubtype].(Codec)
+	return c
 }
diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go
new file mode 100644
index 0000000..074c5e2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/encoding/encoding_v2.go
@@ -0,0 +1,81 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package encoding
+
+import (
+	"strings"
+
+	"google.golang.org/grpc/mem"
+)
+
+// CodecV2 defines the interface gRPC uses to encode and decode messages. Note
+// that implementations of this interface must be thread safe; a CodecV2's
+// methods can be called from concurrent goroutines.
+type CodecV2 interface {
+	// Marshal returns the wire format of v. The buffers in the returned
+	// [mem.BufferSlice] must have at least one reference each, which will be freed
+	// by gRPC when they are no longer needed.
+	Marshal(v any) (out mem.BufferSlice, err error)
+	// Unmarshal parses the wire format into v. Note that data will be freed as soon
+	// as this function returns. If the codec wishes to guarantee access to the data
+	// after this function, it must take its own reference that it frees when it is
+	// no longer needed.
+	Unmarshal(data mem.BufferSlice, v any) error
+	// Name returns the name of the Codec implementation. The returned string
+	// will be used as part of content type in transmission.  The result must be
+	// static; the result cannot change between calls.
+	Name() string
+}
+
+// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and
+// servers.
+//
+// The CodecV2 will be stored and looked up by result of its Name() method, which
+// should match the content-subtype of the encoding handled by the CodecV2.  This
+// is case-insensitive, and is stored and looked up as lowercase.  If the
+// result of calling Name() is an empty string, RegisterCodecV2 will panic. See
+// Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// If both a Codec and CodecV2 are registered with the same name, the CodecV2
+// will be used.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe.  If multiple Codecs are
+// registered with the same name, the one registered last will take effect.
+func RegisterCodecV2(codec CodecV2) {
+	if codec == nil {
+		panic("cannot register a nil CodecV2")
+	}
+	if codec.Name() == "" {
+		panic("cannot register CodecV2 with empty string result for Name()")
+	}
+	contentSubtype := strings.ToLower(codec.Name())
+	registeredCodecs[contentSubtype] = codec
+}
+
+// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is
+// registered for the content-subtype.
+//
+// The content-subtype is expected to be lowercase.
+func GetCodecV2(contentSubtype string) CodecV2 {
+	c, _ := registeredCodecs[contentSubtype].(CodecV2)
+	return c
+}
diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go
index 66b97a6..1ab874c 100644
--- a/vendor/google.golang.org/grpc/encoding/proto/proto.go
+++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go
@@ -1,6 +1,6 @@
 /*
  *
- * Copyright 2018 gRPC authors.
+ * Copyright 2024 gRPC authors.
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -21,90 +21,92 @@
 package proto
 
 import (
-	"math"
-	"sync"
+	"fmt"
 
-	"github.com/golang/protobuf/proto"
 	"google.golang.org/grpc/encoding"
+	"google.golang.org/grpc/mem"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/protoadapt"
 )
 
 // Name is the name registered for the proto compressor.
 const Name = "proto"
 
 func init() {
-	encoding.RegisterCodec(codec{})
+	encoding.RegisterCodecV2(&codecV2{})
 }
 
-// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
-type codec struct{}
+// codec is a CodecV2 implementation with protobuf. It is the default codec for
+// gRPC.
+type codecV2 struct{}
 
-type cachedProtoBuffer struct {
-	lastMarshaledSize uint32
-	proto.Buffer
-}
-
-func capToMaxInt32(val int) uint32 {
-	if val > math.MaxInt32 {
-		return uint32(math.MaxInt32)
-	}
-	return uint32(val)
-}
-
-func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
-	protoMsg := v.(proto.Message)
-	newSlice := make([]byte, 0, cb.lastMarshaledSize)
-
-	cb.SetBuf(newSlice)
-	cb.Reset()
-	if err := cb.Marshal(protoMsg); err != nil {
-		return nil, err
-	}
-	out := cb.Bytes()
-	cb.lastMarshaledSize = capToMaxInt32(len(out))
-	return out, nil
-}
-
-func (codec) Marshal(v interface{}) ([]byte, error) {
-	if pm, ok := v.(proto.Marshaler); ok {
-		// object can marshal itself, no need for buffer
-		return pm.Marshal()
+func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) {
+	vv := messageV2Of(v)
+	if vv == nil {
+		return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v)
 	}
 
-	cb := protoBufferPool.Get().(*cachedProtoBuffer)
-	out, err := marshal(v, cb)
+	// Important: if we remove this Size call then we cannot use
+	// UseCachedSize in MarshalOptions below.
+	size := proto.Size(vv)
 
-	// put back buffer and lose the ref to the slice
-	cb.SetBuf(nil)
-	protoBufferPool.Put(cb)
-	return out, err
-}
+	// MarshalOptions with UseCachedSize allows reusing the result from the
+	// previous Size call. This is safe here because:
+	//
+	// 1. We just computed the size.
+	// 2. We assume the message is not being mutated concurrently.
+	//
+	// Important: If the proto.Size call above is removed, using UseCachedSize
+	// becomes unsafe and may lead to incorrect marshaling.
+	//
+	// For more details, see the doc of UseCachedSize:
+	// https://pkg.go.dev/google.golang.org/protobuf/proto#MarshalOptions
+	marshalOptions := proto.MarshalOptions{UseCachedSize: true}
 
-func (codec) Unmarshal(data []byte, v interface{}) error {
-	protoMsg := v.(proto.Message)
-	protoMsg.Reset()
-
-	if pu, ok := protoMsg.(proto.Unmarshaler); ok {
-		// object can unmarshal itself, no need for buffer
-		return pu.Unmarshal(data)
-	}
-
-	cb := protoBufferPool.Get().(*cachedProtoBuffer)
-	cb.SetBuf(data)
-	err := cb.Unmarshal(protoMsg)
-	cb.SetBuf(nil)
-	protoBufferPool.Put(cb)
-	return err
-}
-
-func (codec) Name() string {
-	return Name
-}
-
-var protoBufferPool = &sync.Pool{
-	New: func() interface{} {
-		return &cachedProtoBuffer{
-			Buffer:            proto.Buffer{},
-			lastMarshaledSize: 16,
+	if mem.IsBelowBufferPoolingThreshold(size) {
+		buf, err := marshalOptions.Marshal(vv)
+		if err != nil {
+			return nil, err
 		}
-	},
+		data = append(data, mem.SliceBuffer(buf))
+	} else {
+		pool := mem.DefaultBufferPool()
+		buf := pool.Get(size)
+		if _, err := marshalOptions.MarshalAppend((*buf)[:0], vv); err != nil {
+			pool.Put(buf)
+			return nil, err
+		}
+		data = append(data, mem.NewBuffer(buf, pool))
+	}
+
+	return data, nil
+}
+
+func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) {
+	vv := messageV2Of(v)
+	if vv == nil {
+		return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
+	}
+
+	buf := data.MaterializeToBuffer(mem.DefaultBufferPool())
+	defer buf.Free()
+	// TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not
+	//  really possible without a major overhaul of the proto package, but the
+	//  vtprotobuf library may be able to support this.
+	return proto.Unmarshal(buf.ReadOnlyData(), vv)
+}
+
+func messageV2Of(v any) proto.Message {
+	switch v := v.(type) {
+	case protoadapt.MessageV1:
+		return protoadapt.MessageV2Of(v)
+	case protoadapt.MessageV2:
+		return v
+	}
+
+	return nil
+}
+
+func (c *codecV2) Name() string {
+	return Name
 }
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
new file mode 100644
index 0000000..ad75313
--- /dev/null
+++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
@@ -0,0 +1,270 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package stats
+
+import (
+	"maps"
+
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/stats"
+)
+
+func init() {
+	internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting
+}
+
+var logger = grpclog.Component("metrics-registry")
+
+// DefaultMetrics are the default metrics registered through global metrics
+// registry. This is written to at initialization time only, and is read only
+// after initialization.
+var DefaultMetrics = stats.NewMetricSet()
+
+// MetricDescriptor is the data for a registered metric.
+type MetricDescriptor struct {
+	// The name of this metric. This name must be unique across the whole binary
+	// (including any per call metrics). See
+	// https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions
+	// for metric naming conventions.
+	Name string
+	// The description of this metric.
+	Description string
+	// The unit (e.g. entries, seconds) of this metric.
+	Unit string
+	// The required label keys for this metric. These are intended to
+	// metrics emitted from a stats handler.
+	Labels []string
+	// The optional label keys for this metric. These are intended to attached
+	// to metrics emitted from a stats handler if configured.
+	OptionalLabels []string
+	// Whether this metric is on by default.
+	Default bool
+	// The type of metric. This is set by the metric registry, and not intended
+	// to be set by a component registering a metric.
+	Type MetricType
+	// Bounds are the bounds of this metric. This only applies to histogram
+	// metrics. If unset or set with length 0, stats handlers will fall back to
+	// default bounds.
+	Bounds []float64
+}
+
+// MetricType is the type of metric.
+type MetricType int
+
+// Type of metric supported by this instrument registry.
+const (
+	MetricTypeIntCount MetricType = iota
+	MetricTypeFloatCount
+	MetricTypeIntHisto
+	MetricTypeFloatHisto
+	MetricTypeIntGauge
+)
+
+// Int64CountHandle is a typed handle for a int count metric. This handle
+// is passed at the recording point in order to know which metric to record
+// on.
+type Int64CountHandle MetricDescriptor
+
+// Descriptor returns the int64 count handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64CountHandle) Descriptor() *MetricDescriptor {
+	return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 count value on the metrics recorder provided.
+func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+	recorder.RecordInt64Count(h, incr, labels...)
+}
+
+// Float64CountHandle is a typed handle for a float count metric. This handle is
+// passed at the recording point in order to know which metric to record on.
+type Float64CountHandle MetricDescriptor
+
+// Descriptor returns the float64 count handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Float64CountHandle) Descriptor() *MetricDescriptor {
+	return (*MetricDescriptor)(h)
+}
+
+// Record records the float64 count value on the metrics recorder provided.
+func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
+	recorder.RecordFloat64Count(h, incr, labels...)
+}
+
+// Int64HistoHandle is a typed handle for an int histogram metric. This handle
+// is passed at the recording point in order to know which metric to record on.
+type Int64HistoHandle MetricDescriptor
+
+// Descriptor returns the int64 histo handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64HistoHandle) Descriptor() *MetricDescriptor {
+	return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 histo value on the metrics recorder provided.
+func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+	recorder.RecordInt64Histo(h, incr, labels...)
+}
+
+// Float64HistoHandle is a typed handle for a float histogram metric. This
+// handle is passed at the recording point in order to know which metric to
+// record on.
+type Float64HistoHandle MetricDescriptor
+
+// Descriptor returns the float64 histo handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Float64HistoHandle) Descriptor() *MetricDescriptor {
+	return (*MetricDescriptor)(h)
+}
+
+// Record records the float64 histo value on the metrics recorder provided.
+func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
+	recorder.RecordFloat64Histo(h, incr, labels...)
+}
+
+// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is
+// passed at the recording point in order to know which metric to record on.
+type Int64GaugeHandle MetricDescriptor
+
+// Descriptor returns the int64 gauge handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor {
+	return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 histo value on the metrics recorder provided.
+func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+	recorder.RecordInt64Gauge(h, incr, labels...)
+}
+
+// registeredMetrics are the registered metric descriptor names.
+var registeredMetrics = make(map[string]bool)
+
+// metricsRegistry contains all of the registered metrics.
+//
+// This is written to only at init time, and read only after that.
+var metricsRegistry = make(map[string]*MetricDescriptor)
+
+// DescriptorForMetric returns the MetricDescriptor from the global registry.
+//
+// Returns nil if MetricDescriptor not present.
+func DescriptorForMetric(metricName string) *MetricDescriptor {
+	return metricsRegistry[metricName]
+}
+
+func registerMetric(metricName string, def bool) {
+	if registeredMetrics[metricName] {
+		logger.Fatalf("metric %v already registered", metricName)
+	}
+	registeredMetrics[metricName] = true
+	if def {
+		DefaultMetrics = DefaultMetrics.Add(metricName)
+	}
+}
+
+// RegisterInt64Count registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle {
+	registerMetric(descriptor.Name, descriptor.Default)
+	descriptor.Type = MetricTypeIntCount
+	descPtr := &descriptor
+	metricsRegistry[descriptor.Name] = descPtr
+	return (*Int64CountHandle)(descPtr)
+}
+
+// RegisterFloat64Count registers the metric description onto the global
+// registry. It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle {
+	registerMetric(descriptor.Name, descriptor.Default)
+	descriptor.Type = MetricTypeFloatCount
+	descPtr := &descriptor
+	metricsRegistry[descriptor.Name] = descPtr
+	return (*Float64CountHandle)(descPtr)
+}
+
+// RegisterInt64Histo registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle {
+	registerMetric(descriptor.Name, descriptor.Default)
+	descriptor.Type = MetricTypeIntHisto
+	descPtr := &descriptor
+	metricsRegistry[descriptor.Name] = descPtr
+	return (*Int64HistoHandle)(descPtr)
+}
+
+// RegisterFloat64Histo registers the metric description onto the global
+// registry. It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle {
+	registerMetric(descriptor.Name, descriptor.Default)
+	descriptor.Type = MetricTypeFloatHisto
+	descPtr := &descriptor
+	metricsRegistry[descriptor.Name] = descPtr
+	return (*Float64HistoHandle)(descPtr)
+}
+
+// RegisterInt64Gauge registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle {
+	registerMetric(descriptor.Name, descriptor.Default)
+	descriptor.Type = MetricTypeIntGauge
+	descPtr := &descriptor
+	metricsRegistry[descriptor.Name] = descPtr
+	return (*Int64GaugeHandle)(descPtr)
+}
+
+// snapshotMetricsRegistryForTesting snapshots the global data of the metrics
+// registry. Returns a cleanup function that sets the metrics registry to its
+// original state.
+func snapshotMetricsRegistryForTesting() func() {
+	oldDefaultMetrics := DefaultMetrics
+	oldRegisteredMetrics := registeredMetrics
+	oldMetricsRegistry := metricsRegistry
+
+	registeredMetrics = make(map[string]bool)
+	metricsRegistry = make(map[string]*MetricDescriptor)
+	maps.Copy(registeredMetrics, registeredMetrics)
+	maps.Copy(metricsRegistry, metricsRegistry)
+
+	return func() {
+		DefaultMetrics = oldDefaultMetrics
+		registeredMetrics = oldRegisteredMetrics
+		metricsRegistry = oldMetricsRegistry
+	}
+}
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
new file mode 100644
index 0000000..ee14236
--- /dev/null
+++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
@@ -0,0 +1,54 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package stats contains experimental metrics/stats API's.
+package stats
+
+import "google.golang.org/grpc/stats"
+
+// MetricsRecorder records on metrics derived from metric registry.
+type MetricsRecorder interface {
+	// RecordInt64Count records the measurement alongside labels on the int
+	// count associated with the provided handle.
+	RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string)
+	// RecordFloat64Count records the measurement alongside labels on the float
+	// count associated with the provided handle.
+	RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string)
+	// RecordInt64Histo records the measurement alongside labels on the int
+	// histo associated with the provided handle.
+	RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string)
+	// RecordFloat64Histo records the measurement alongside labels on the float
+	// histo associated with the provided handle.
+	RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string)
+	// RecordInt64Gauge records the measurement alongside labels on the int
+	// gauge associated with the provided handle.
+	RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string)
+}
+
+// Metrics is an experimental legacy alias of the now-stable stats.MetricSet.
+// Metrics will be deleted in a future release.
+type Metrics = stats.MetricSet
+
+// Metric was replaced by direct usage of strings.
+type Metric = string
+
+// NewMetrics is an experimental legacy alias of the now-stable
+// stats.NewMetricSet.  NewMetrics will be deleted in a future release.
+func NewMetrics(metrics ...Metric) *Metrics {
+	return stats.NewMetricSet(metrics...)
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go
new file mode 100644
index 0000000..f1ae080
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/component.go
@@ -0,0 +1,115 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpclog
+
+import (
+	"fmt"
+)
+
+// componentData records the settings for a component.
+type componentData struct {
+	name string
+}
+
+var cache = map[string]*componentData{}
+
+func (c *componentData) InfoDepth(depth int, args ...any) {
+	args = append([]any{"[" + string(c.name) + "]"}, args...)
+	InfoDepth(depth+1, args...)
+}
+
+func (c *componentData) WarningDepth(depth int, args ...any) {
+	args = append([]any{"[" + string(c.name) + "]"}, args...)
+	WarningDepth(depth+1, args...)
+}
+
+func (c *componentData) ErrorDepth(depth int, args ...any) {
+	args = append([]any{"[" + string(c.name) + "]"}, args...)
+	ErrorDepth(depth+1, args...)
+}
+
+func (c *componentData) FatalDepth(depth int, args ...any) {
+	args = append([]any{"[" + string(c.name) + "]"}, args...)
+	FatalDepth(depth+1, args...)
+}
+
+func (c *componentData) Info(args ...any) {
+	c.InfoDepth(1, args...)
+}
+
+func (c *componentData) Warning(args ...any) {
+	c.WarningDepth(1, args...)
+}
+
+func (c *componentData) Error(args ...any) {
+	c.ErrorDepth(1, args...)
+}
+
+func (c *componentData) Fatal(args ...any) {
+	c.FatalDepth(1, args...)
+}
+
+func (c *componentData) Infof(format string, args ...any) {
+	c.InfoDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Warningf(format string, args ...any) {
+	c.WarningDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Errorf(format string, args ...any) {
+	c.ErrorDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Fatalf(format string, args ...any) {
+	c.FatalDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Infoln(args ...any) {
+	c.InfoDepth(1, args...)
+}
+
+func (c *componentData) Warningln(args ...any) {
+	c.WarningDepth(1, args...)
+}
+
+func (c *componentData) Errorln(args ...any) {
+	c.ErrorDepth(1, args...)
+}
+
+func (c *componentData) Fatalln(args ...any) {
+	c.FatalDepth(1, args...)
+}
+
+func (c *componentData) V(l int) bool {
+	return V(l)
+}
+
+// Component creates a new component and returns it for logging. If a component
+// with the name already exists, nothing will be created and it will be
+// returned. SetLoggerV2 will panic if it is called with a logger created by
+// Component.
+func Component(componentName string) DepthLoggerV2 {
+	if cData, ok := cache[componentName]; ok {
+		return cData
+	}
+	c := &componentData{componentName}
+	cache[componentName] = c
+	return c
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go
index 874ea6d..db32010 100644
--- a/vendor/google.golang.org/grpc/grpclog/grpclog.go
+++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go
@@ -18,88 +18,91 @@
 
 // Package grpclog defines logging for grpc.
 //
-// All logs in transport and grpclb packages only go to verbose level 2.
-// All logs in other packages in grpc are logged in spite of the verbosity level.
-//
-// In the default logger,
-// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL,
-// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL.
-package grpclog // import "google.golang.org/grpc/grpclog"
+// In the default logger, severity level can be set by environment variable
+// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by
+// GRPC_GO_LOG_VERBOSITY_LEVEL.
+package grpclog
 
-import "os"
+import (
+	"os"
 
-var logger = newLoggerV2()
+	"google.golang.org/grpc/grpclog/internal"
+)
+
+func init() {
+	SetLoggerV2(newLoggerV2())
+}
 
 // V reports whether verbosity level l is at least the requested verbose level.
 func V(l int) bool {
-	return logger.V(l)
+	return internal.LoggerV2Impl.V(l)
 }
 
 // Info logs to the INFO log.
-func Info(args ...interface{}) {
-	logger.Info(args...)
+func Info(args ...any) {
+	internal.LoggerV2Impl.Info(args...)
 }
 
 // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
-func Infof(format string, args ...interface{}) {
-	logger.Infof(format, args...)
+func Infof(format string, args ...any) {
+	internal.LoggerV2Impl.Infof(format, args...)
 }
 
 // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
-func Infoln(args ...interface{}) {
-	logger.Infoln(args...)
+func Infoln(args ...any) {
+	internal.LoggerV2Impl.Infoln(args...)
 }
 
 // Warning logs to the WARNING log.
-func Warning(args ...interface{}) {
-	logger.Warning(args...)
+func Warning(args ...any) {
+	internal.LoggerV2Impl.Warning(args...)
 }
 
 // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
-func Warningf(format string, args ...interface{}) {
-	logger.Warningf(format, args...)
+func Warningf(format string, args ...any) {
+	internal.LoggerV2Impl.Warningf(format, args...)
 }
 
 // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
-func Warningln(args ...interface{}) {
-	logger.Warningln(args...)
+func Warningln(args ...any) {
+	internal.LoggerV2Impl.Warningln(args...)
 }
 
 // Error logs to the ERROR log.
-func Error(args ...interface{}) {
-	logger.Error(args...)
+func Error(args ...any) {
+	internal.LoggerV2Impl.Error(args...)
 }
 
 // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
-func Errorf(format string, args ...interface{}) {
-	logger.Errorf(format, args...)
+func Errorf(format string, args ...any) {
+	internal.LoggerV2Impl.Errorf(format, args...)
 }
 
 // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
-func Errorln(args ...interface{}) {
-	logger.Errorln(args...)
+func Errorln(args ...any) {
+	internal.LoggerV2Impl.Errorln(args...)
 }
 
 // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
 // It calls os.Exit() with exit code 1.
-func Fatal(args ...interface{}) {
-	logger.Fatal(args...)
+func Fatal(args ...any) {
+	internal.LoggerV2Impl.Fatal(args...)
 	// Make sure fatal logs will exit.
 	os.Exit(1)
 }
 
 // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
 // It calls os.Exit() with exit code 1.
-func Fatalf(format string, args ...interface{}) {
-	logger.Fatalf(format, args...)
+func Fatalf(format string, args ...any) {
+	internal.LoggerV2Impl.Fatalf(format, args...)
 	// Make sure fatal logs will exit.
 	os.Exit(1)
 }
 
 // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
-// It calle os.Exit()) with exit code 1.
-func Fatalln(args ...interface{}) {
-	logger.Fatalln(args...)
+// It calls os.Exit() with exit code 1.
+func Fatalln(args ...any) {
+	internal.LoggerV2Impl.Fatalln(args...)
 	// Make sure fatal logs will exit.
 	os.Exit(1)
 }
@@ -107,20 +110,77 @@
 // Print prints to the logger. Arguments are handled in the manner of fmt.Print.
 //
 // Deprecated: use Info.
-func Print(args ...interface{}) {
-	logger.Info(args...)
+func Print(args ...any) {
+	internal.LoggerV2Impl.Info(args...)
 }
 
 // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
 //
 // Deprecated: use Infof.
-func Printf(format string, args ...interface{}) {
-	logger.Infof(format, args...)
+func Printf(format string, args ...any) {
+	internal.LoggerV2Impl.Infof(format, args...)
 }
 
 // Println prints to the logger. Arguments are handled in the manner of fmt.Println.
 //
 // Deprecated: use Infoln.
-func Println(args ...interface{}) {
-	logger.Infoln(args...)
+func Println(args ...any) {
+	internal.LoggerV2Impl.Infoln(args...)
+}
+
+// InfoDepth logs to the INFO log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func InfoDepth(depth int, args ...any) {
+	if internal.DepthLoggerV2Impl != nil {
+		internal.DepthLoggerV2Impl.InfoDepth(depth, args...)
+	} else {
+		internal.LoggerV2Impl.Infoln(args...)
+	}
+}
+
+// WarningDepth logs to the WARNING log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WarningDepth(depth int, args ...any) {
+	if internal.DepthLoggerV2Impl != nil {
+		internal.DepthLoggerV2Impl.WarningDepth(depth, args...)
+	} else {
+		internal.LoggerV2Impl.Warningln(args...)
+	}
+}
+
+// ErrorDepth logs to the ERROR log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ErrorDepth(depth int, args ...any) {
+	if internal.DepthLoggerV2Impl != nil {
+		internal.DepthLoggerV2Impl.ErrorDepth(depth, args...)
+	} else {
+		internal.LoggerV2Impl.Errorln(args...)
+	}
+}
+
+// FatalDepth logs to the FATAL log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func FatalDepth(depth int, args ...any) {
+	if internal.DepthLoggerV2Impl != nil {
+		internal.DepthLoggerV2Impl.FatalDepth(depth, args...)
+	} else {
+		internal.LoggerV2Impl.Fatalln(args...)
+	}
+	os.Exit(1)
 }
diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
new file mode 100644
index 0000000..59c03bc
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
@@ -0,0 +1,26 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains functionality internal to the grpclog package.
+package internal
+
+// LoggerV2Impl is the logger used for the non-depth log functions.
+var LoggerV2Impl LoggerV2
+
+// DepthLoggerV2Impl is the logger used for the depth log functions.
+var DepthLoggerV2Impl DepthLoggerV2
diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go
new file mode 100644
index 0000000..e524fdd
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go
@@ -0,0 +1,87 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+// Logger mimics golang's standard Logger as an interface.
+//
+// Deprecated: use LoggerV2.
+type Logger interface {
+	Fatal(args ...any)
+	Fatalf(format string, args ...any)
+	Fatalln(args ...any)
+	Print(args ...any)
+	Printf(format string, args ...any)
+	Println(args ...any)
+}
+
+// LoggerWrapper wraps Logger into a LoggerV2.
+type LoggerWrapper struct {
+	Logger
+}
+
+// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Info(args ...any) {
+	l.Logger.Print(args...)
+}
+
+// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Infoln(args ...any) {
+	l.Logger.Println(args...)
+}
+
+// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Infof(format string, args ...any) {
+	l.Logger.Printf(format, args...)
+}
+
+// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Warning(args ...any) {
+	l.Logger.Print(args...)
+}
+
+// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Warningln(args ...any) {
+	l.Logger.Println(args...)
+}
+
+// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Warningf(format string, args ...any) {
+	l.Logger.Printf(format, args...)
+}
+
+// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Error(args ...any) {
+	l.Logger.Print(args...)
+}
+
+// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Errorln(args ...any) {
+	l.Logger.Println(args...)
+}
+
+// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Errorf(format string, args ...any) {
+	l.Logger.Printf(format, args...)
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func (*LoggerWrapper) V(int) bool {
+	// Returns true for all verbose level.
+	return true
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
new file mode 100644
index 0000000..ed90060
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
@@ -0,0 +1,267 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"log"
+	"os"
+)
+
+// LoggerV2 does underlying logging work for grpclog.
+type LoggerV2 interface {
+	// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
+	Info(args ...any)
+	// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
+	Infoln(args ...any)
+	// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
+	Infof(format string, args ...any)
+	// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
+	Warning(args ...any)
+	// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
+	Warningln(args ...any)
+	// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
+	Warningf(format string, args ...any)
+	// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+	Error(args ...any)
+	// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+	Errorln(args ...any)
+	// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+	Errorf(format string, args ...any)
+	// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
+	// Implementations may also call os.Exit() with a non-zero exit code.
+	Fatal(args ...any)
+	// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
+	// Implementations may also call os.Exit() with a non-zero exit code.
+	Fatalln(args ...any)
+	// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
+	// Implementations may also call os.Exit() with a non-zero exit code.
+	Fatalf(format string, args ...any)
+	// V reports whether verbosity level l is at least the requested verbose level.
+	V(l int) bool
+}
+
+// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
+// DepthLoggerV2, the below functions will be called with the appropriate stack
+// depth set for trivial functions the logger may ignore.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type DepthLoggerV2 interface {
+	LoggerV2
+	// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
+	InfoDepth(depth int, args ...any)
+	// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
+	WarningDepth(depth int, args ...any)
+	// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
+	ErrorDepth(depth int, args ...any)
+	// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
+	FatalDepth(depth int, args ...any)
+}
+
+const (
+	// infoLog indicates Info severity.
+	infoLog int = iota
+	// warningLog indicates Warning severity.
+	warningLog
+	// errorLog indicates Error severity.
+	errorLog
+	// fatalLog indicates Fatal severity.
+	fatalLog
+)
+
+// severityName contains the string representation of each severity.
+var severityName = []string{
+	infoLog:    "INFO",
+	warningLog: "WARNING",
+	errorLog:   "ERROR",
+	fatalLog:   "FATAL",
+}
+
+// sprintf is fmt.Sprintf.
+// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
+var sprintf = fmt.Sprintf
+
+// sprint is fmt.Sprint.
+// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
+var sprint = fmt.Sprint
+
+// sprintln is fmt.Sprintln.
+// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
+var sprintln = fmt.Sprintln
+
+// exit is os.Exit.
+// This var exists to make it possible to test functions calling os.Exit.
+var exit = os.Exit
+
+// loggerT is the default logger used by grpclog.
+type loggerT struct {
+	m          []*log.Logger
+	v          int
+	jsonFormat bool
+}
+
+func (g *loggerT) output(severity int, s string) {
+	sevStr := severityName[severity]
+	if !g.jsonFormat {
+		g.m[severity].Output(2, sevStr+": "+s)
+		return
+	}
+	// TODO: we can also include the logging component, but that needs more
+	// (API) changes.
+	b, _ := json.Marshal(map[string]string{
+		"severity": sevStr,
+		"message":  s,
+	})
+	g.m[severity].Output(2, string(b))
+}
+
+func (g *loggerT) printf(severity int, format string, args ...any) {
+	// Note the discard check is duplicated in each print func, rather than in
+	// output, to avoid the expensive Sprint calls.
+	// De-duplicating this by moving to output would be a significant performance regression!
+	if lg := g.m[severity]; lg.Writer() == io.Discard {
+		return
+	}
+	g.output(severity, sprintf(format, args...))
+}
+
+func (g *loggerT) print(severity int, v ...any) {
+	if lg := g.m[severity]; lg.Writer() == io.Discard {
+		return
+	}
+	g.output(severity, sprint(v...))
+}
+
+func (g *loggerT) println(severity int, v ...any) {
+	if lg := g.m[severity]; lg.Writer() == io.Discard {
+		return
+	}
+	g.output(severity, sprintln(v...))
+}
+
+func (g *loggerT) Info(args ...any) {
+	g.print(infoLog, args...)
+}
+
+func (g *loggerT) Infoln(args ...any) {
+	g.println(infoLog, args...)
+}
+
+func (g *loggerT) Infof(format string, args ...any) {
+	g.printf(infoLog, format, args...)
+}
+
+func (g *loggerT) Warning(args ...any) {
+	g.print(warningLog, args...)
+}
+
+func (g *loggerT) Warningln(args ...any) {
+	g.println(warningLog, args...)
+}
+
+func (g *loggerT) Warningf(format string, args ...any) {
+	g.printf(warningLog, format, args...)
+}
+
+func (g *loggerT) Error(args ...any) {
+	g.print(errorLog, args...)
+}
+
+func (g *loggerT) Errorln(args ...any) {
+	g.println(errorLog, args...)
+}
+
+func (g *loggerT) Errorf(format string, args ...any) {
+	g.printf(errorLog, format, args...)
+}
+
+func (g *loggerT) Fatal(args ...any) {
+	g.print(fatalLog, args...)
+	exit(1)
+}
+
+func (g *loggerT) Fatalln(args ...any) {
+	g.println(fatalLog, args...)
+	exit(1)
+}
+
+func (g *loggerT) Fatalf(format string, args ...any) {
+	g.printf(fatalLog, format, args...)
+	exit(1)
+}
+
+func (g *loggerT) V(l int) bool {
+	return l <= g.v
+}
+
+// LoggerV2Config configures the LoggerV2 implementation.
+type LoggerV2Config struct {
+	// Verbosity sets the verbosity level of the logger.
+	Verbosity int
+	// FormatJSON controls whether the logger should output logs in JSON format.
+	FormatJSON bool
+}
+
+// combineLoggers returns a combined logger for both higher & lower severity logs,
+// or only one if the other is io.Discard.
+//
+// This uses io.Discard instead of io.MultiWriter when all loggers
+// are set to io.Discard. Both this package and the standard log package have
+// significant optimizations for io.Discard, which io.MultiWriter lacks (as of
+// this writing).
+func combineLoggers(lower, higher io.Writer) io.Writer {
+	if lower == io.Discard {
+		return higher
+	}
+	if higher == io.Discard {
+		return lower
+	}
+	return io.MultiWriter(lower, higher)
+}
+
+// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration.
+// The infoW, warningW, and errorW writers are used to write log messages of
+// different severity levels.
+func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 {
+	flag := log.LstdFlags
+	if c.FormatJSON {
+		flag = 0
+	}
+
+	warningW = combineLoggers(infoW, warningW)
+	errorW = combineLoggers(errorW, warningW)
+
+	fatalW := errorW
+
+	m := []*log.Logger{
+		log.New(infoW, "", flag),
+		log.New(warningW, "", flag),
+		log.New(errorW, "", flag),
+		log.New(fatalW, "", flag),
+	}
+	return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON}
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go
index 097494f..4b20358 100644
--- a/vendor/google.golang.org/grpc/grpclog/logger.go
+++ b/vendor/google.golang.org/grpc/grpclog/logger.go
@@ -18,68 +18,17 @@
 
 package grpclog
 
+import "google.golang.org/grpc/grpclog/internal"
+
 // Logger mimics golang's standard Logger as an interface.
 //
 // Deprecated: use LoggerV2.
-type Logger interface {
-	Fatal(args ...interface{})
-	Fatalf(format string, args ...interface{})
-	Fatalln(args ...interface{})
-	Print(args ...interface{})
-	Printf(format string, args ...interface{})
-	Println(args ...interface{})
-}
+type Logger internal.Logger
 
 // SetLogger sets the logger that is used in grpc. Call only from
 // init() functions.
 //
 // Deprecated: use SetLoggerV2.
 func SetLogger(l Logger) {
-	logger = &loggerWrapper{Logger: l}
-}
-
-// loggerWrapper wraps Logger into a LoggerV2.
-type loggerWrapper struct {
-	Logger
-}
-
-func (g *loggerWrapper) Info(args ...interface{}) {
-	g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Infoln(args ...interface{}) {
-	g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Infof(format string, args ...interface{}) {
-	g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) Warning(args ...interface{}) {
-	g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Warningln(args ...interface{}) {
-	g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Warningf(format string, args ...interface{}) {
-	g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) Error(args ...interface{}) {
-	g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Errorln(args ...interface{}) {
-	g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Errorf(format string, args ...interface{}) {
-	g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) V(l int) bool {
-	// Returns true for all verbose level.
-	return true
+	internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l}
 }
diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
index d493257..892dc13 100644
--- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go
+++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
@@ -20,77 +20,24 @@
 
 import (
 	"io"
-	"io/ioutil"
-	"log"
 	"os"
 	"strconv"
+	"strings"
+
+	"google.golang.org/grpc/grpclog/internal"
 )
 
 // LoggerV2 does underlying logging work for grpclog.
-type LoggerV2 interface {
-	// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
-	Info(args ...interface{})
-	// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
-	Infoln(args ...interface{})
-	// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
-	Infof(format string, args ...interface{})
-	// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
-	Warning(args ...interface{})
-	// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
-	Warningln(args ...interface{})
-	// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
-	Warningf(format string, args ...interface{})
-	// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
-	Error(args ...interface{})
-	// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
-	Errorln(args ...interface{})
-	// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
-	Errorf(format string, args ...interface{})
-	// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
-	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
-	// Implementations may also call os.Exit() with a non-zero exit code.
-	Fatal(args ...interface{})
-	// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
-	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
-	// Implementations may also call os.Exit() with a non-zero exit code.
-	Fatalln(args ...interface{})
-	// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
-	// gRPC ensures that all Fatal logs will exit with os.Exit(1).
-	// Implementations may also call os.Exit() with a non-zero exit code.
-	Fatalf(format string, args ...interface{})
-	// V reports whether verbosity level l is at least the requested verbose level.
-	V(l int) bool
-}
+type LoggerV2 internal.LoggerV2
 
 // SetLoggerV2 sets logger that is used in grpc to a V2 logger.
 // Not mutex-protected, should be called before any gRPC functions.
 func SetLoggerV2(l LoggerV2) {
-	logger = l
-}
-
-const (
-	// infoLog indicates Info severity.
-	infoLog int = iota
-	// warningLog indicates Warning severity.
-	warningLog
-	// errorLog indicates Error severity.
-	errorLog
-	// fatalLog indicates Fatal severity.
-	fatalLog
-)
-
-// severityName contains the string representation of each severity.
-var severityName = []string{
-	infoLog:    "INFO",
-	warningLog: "WARNING",
-	errorLog:   "ERROR",
-	fatalLog:   "FATAL",
-}
-
-// loggerT is the default logger used by grpclog.
-type loggerT struct {
-	m []*log.Logger
-	v int
+	if _, ok := l.(*componentData); ok {
+		panic("cannot use component logger as grpclog logger")
+	}
+	internal.LoggerV2Impl = l
+	internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2)
 }
 
 // NewLoggerV2 creates a loggerV2 with the provided writers.
@@ -99,27 +46,21 @@
 // Warning logs will be written to warningW and infoW.
 // Info logs will be written to infoW.
 func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
-	return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0)
+	return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{})
 }
 
 // NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
 // verbosity level.
 func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
-	var m []*log.Logger
-	m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags))
-	m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags))
-	ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
-	m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags))
-	m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags))
-	return &loggerT{m: m, v: v}
+	return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v})
 }
 
 // newLoggerV2 creates a loggerV2 to be used as default logger.
 // All logs are written to stderr.
 func newLoggerV2() LoggerV2 {
-	errorW := ioutil.Discard
-	warningW := ioutil.Discard
-	infoW := ioutil.Discard
+	errorW := io.Discard
+	warningW := io.Discard
+	infoW := io.Discard
 
 	logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL")
 	switch logLevel {
@@ -136,60 +77,21 @@
 	if vl, err := strconv.Atoi(vLevel); err == nil {
 		v = vl
 	}
-	return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v)
+
+	jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json")
+
+	return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{
+		Verbosity:  v,
+		FormatJSON: jsonFormat,
+	})
 }
 
-func (g *loggerT) Info(args ...interface{}) {
-	g.m[infoLog].Print(args...)
-}
-
-func (g *loggerT) Infoln(args ...interface{}) {
-	g.m[infoLog].Println(args...)
-}
-
-func (g *loggerT) Infof(format string, args ...interface{}) {
-	g.m[infoLog].Printf(format, args...)
-}
-
-func (g *loggerT) Warning(args ...interface{}) {
-	g.m[warningLog].Print(args...)
-}
-
-func (g *loggerT) Warningln(args ...interface{}) {
-	g.m[warningLog].Println(args...)
-}
-
-func (g *loggerT) Warningf(format string, args ...interface{}) {
-	g.m[warningLog].Printf(format, args...)
-}
-
-func (g *loggerT) Error(args ...interface{}) {
-	g.m[errorLog].Print(args...)
-}
-
-func (g *loggerT) Errorln(args ...interface{}) {
-	g.m[errorLog].Println(args...)
-}
-
-func (g *loggerT) Errorf(format string, args ...interface{}) {
-	g.m[errorLog].Printf(format, args...)
-}
-
-func (g *loggerT) Fatal(args ...interface{}) {
-	g.m[fatalLog].Fatal(args...)
-	// No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
-}
-
-func (g *loggerT) Fatalln(args ...interface{}) {
-	g.m[fatalLog].Fatalln(args...)
-	// No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
-}
-
-func (g *loggerT) Fatalf(format string, args ...interface{}) {
-	g.m[fatalLog].Fatalf(format, args...)
-	// No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
-}
-
-func (g *loggerT) V(l int) bool {
-	return l <= g.v
-}
+// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
+// DepthLoggerV2, the below functions will be called with the appropriate stack
+// depth set for trivial functions the logger may ignore.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type DepthLoggerV2 internal.DepthLoggerV2
diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh
deleted file mode 100644
index 7c7bcad..0000000
--- a/vendor/google.golang.org/grpc/install_gae.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-
-TMP=$(mktemp -d /tmp/sdk.XXX) \
-&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \
-&& unzip -q $TMP.zip -d $TMP \
-&& export PATH="$PATH:$TMP/go_appengine"
diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go
index 8b73500..877d78f 100644
--- a/vendor/google.golang.org/grpc/interceptor.go
+++ b/vendor/google.golang.org/grpc/interceptor.go
@@ -23,41 +23,68 @@
 )
 
 // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
-type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error
+type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error
 
-// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC
-// and it is the responsibility of the interceptor to call it.
-// This is an EXPERIMENTAL API.
-type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
+// UnaryClientInterceptor intercepts the execution of a unary RPC on the client.
+// Unary interceptors can be specified as a DialOption, using
+// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a
+// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC
+// delegates all unary RPC invocations to the interceptor, and it is the
+// responsibility of the interceptor to call invoker to complete the processing
+// of the RPC.
+//
+// method is the RPC name. req and reply are the corresponding request and
+// response messages. cc is the ClientConn on which the RPC was invoked. invoker
+// is the handler to complete the RPC and it is the responsibility of the
+// interceptor to call it. opts contain all applicable call options, including
+// defaults from the ClientConn as well as per-call options.
+//
+// The returned error must be compatible with the status package.
+type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
 
 // Streamer is called by StreamClientInterceptor to create a ClientStream.
 type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error)
 
-// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O
-// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it.
-// This is an EXPERIMENTAL API.
+// StreamClientInterceptor intercepts the creation of a ClientStream. Stream
+// interceptors can be specified as a DialOption, using WithStreamInterceptor()
+// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream
+// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations
+// to the interceptor, and it is the responsibility of the interceptor to call
+// streamer.
+//
+// desc contains a description of the stream. cc is the ClientConn on which the
+// RPC was invoked. streamer is the handler to create a ClientStream and it is
+// the responsibility of the interceptor to call it. opts contain all applicable
+// call options, including defaults from the ClientConn as well as per-call
+// options.
+//
+// StreamClientInterceptor may return a custom ClientStream to intercept all I/O
+// operations. The returned error must be compatible with the status package.
 type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error)
 
 // UnaryServerInfo consists of various information about a unary RPC on
 // server side. All per-rpc information may be mutated by the interceptor.
 type UnaryServerInfo struct {
 	// Server is the service implementation the user provides. This is read-only.
-	Server interface{}
+	Server any
 	// FullMethod is the full RPC method string, i.e., /package.service/method.
 	FullMethod string
 }
 
 // UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
-// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the
-// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as
-// the status message of the RPC.
-type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
+// execution of a unary RPC.
+//
+// If a UnaryHandler returns an error, it should either be produced by the
+// status package, or be one of the context errors. Otherwise, gRPC will use
+// codes.Unknown as the status code and err.Error() as the status message of the
+// RPC.
+type UnaryHandler func(ctx context.Context, req any) (any, error)
 
 // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
 // contains all the information of this RPC the interceptor can operate on. And handler is the wrapper
 // of the service method implementation. It is the responsibility of the interceptor to invoke handler
 // to complete the RPC.
-type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error)
+type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error)
 
 // StreamServerInfo consists of various information about a streaming RPC on
 // server side. All per-rpc information may be mutated by the interceptor.
@@ -74,4 +101,4 @@
 // info contains all the information of this RPC the interceptor can operate on. And handler is the
 // service method implementation. It is the responsibility of the interceptor to invoke handler to
 // complete the RPC.
-type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error
+type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error
diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
index 5fc0ee3..b6ae7f2 100644
--- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go
+++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
@@ -23,10 +23,12 @@
 package backoff
 
 import (
+	"context"
+	"errors"
+	rand "math/rand/v2"
 	"time"
 
 	grpcbackoff "google.golang.org/grpc/backoff"
-	"google.golang.org/grpc/internal/grpcrand"
 )
 
 // Strategy defines the methodology for backing off after a grpc connection
@@ -65,9 +67,43 @@
 	}
 	// Randomize backoff delays so that if a cluster of requests start at
 	// the same time, they won't operate in lockstep.
-	backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1)
+	backoff *= 1 + bc.Config.Jitter*(rand.Float64()*2-1)
 	if backoff < 0 {
 		return 0
 	}
 	return time.Duration(backoff)
 }
+
+// ErrResetBackoff is the error to be returned by the function executed by RunF,
+// to instruct the latter to reset its backoff state.
+var ErrResetBackoff = errors.New("reset backoff state")
+
+// RunF provides a convenient way to run a function f repeatedly until the
+// context expires or f returns a non-nil error that is not ErrResetBackoff.
+// When f returns ErrResetBackoff, RunF continues to run f, but resets its
+// backoff state before doing so. backoff accepts an integer representing the
+// number of retries, and returns the amount of time to backoff.
+func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) {
+	attempt := 0
+	timer := time.NewTimer(0)
+	for ctx.Err() == nil {
+		select {
+		case <-timer.C:
+		case <-ctx.Done():
+			timer.Stop()
+			return
+		}
+
+		err := f()
+		if errors.Is(err, ErrResetBackoff) {
+			timer.Reset(0)
+			attempt = 0
+			continue
+		}
+		if err != nil {
+			return
+		}
+		timer.Reset(backoff(attempt))
+		attempt++
+	}
+}
diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
new file mode 100644
index 0000000..85540f8
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
@@ -0,0 +1,84 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package gracefulswitch
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/serviceconfig"
+)
+
+type lbConfig struct {
+	serviceconfig.LoadBalancingConfig
+
+	childBuilder balancer.Builder
+	childConfig  serviceconfig.LoadBalancingConfig
+}
+
+// ChildName returns the name of the child balancer of the gracefulswitch
+// Balancer.
+func ChildName(l serviceconfig.LoadBalancingConfig) string {
+	return l.(*lbConfig).childBuilder.Name()
+}
+
+// ParseConfig parses a child config list and returns a LB config for the
+// gracefulswitch Balancer.
+//
+// cfg is expected to be a json.RawMessage containing a JSON array of LB policy
+// names + configs as the format of the "loadBalancingConfig" field in
+// ServiceConfig.  It returns a type that should be passed to
+// UpdateClientConnState in the BalancerConfig field.
+func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
+	var lbCfg []map[string]json.RawMessage
+	if err := json.Unmarshal(cfg, &lbCfg); err != nil {
+		return nil, err
+	}
+	for i, e := range lbCfg {
+		if len(e) != 1 {
+			return nil, fmt.Errorf("expected a JSON struct with one entry; received entry %v at index %d", e, i)
+		}
+
+		var name string
+		var jsonCfg json.RawMessage
+		for name, jsonCfg = range e {
+		}
+
+		builder := balancer.Get(name)
+		if builder == nil {
+			// Skip unregistered balancer names.
+			continue
+		}
+
+		parser, ok := builder.(balancer.ConfigParser)
+		if !ok {
+			// This is a valid child with no config.
+			return &lbConfig{childBuilder: builder}, nil
+		}
+
+		cfg, err := parser.ParseConfig(jsonCfg)
+		if err != nil {
+			return nil, fmt.Errorf("error parsing config for policy %q: %v", name, err)
+		}
+		return &lbConfig{childBuilder: builder, childConfig: cfg}, nil
+	}
+
+	return nil, fmt.Errorf("no supported policies found in config: %v", string(cfg))
+}
diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
new file mode 100644
index 0000000..ba25b89
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
@@ -0,0 +1,409 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package gracefulswitch implements a graceful switch load balancer.
+package gracefulswitch
+
+import (
+	"errors"
+	"fmt"
+	"sync"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/balancer/base"
+	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/resolver"
+)
+
+var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed")
+var _ balancer.Balancer = (*Balancer)(nil)
+
+// NewBalancer returns a graceful switch Balancer.
+func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer {
+	return &Balancer{
+		cc:    cc,
+		bOpts: opts,
+	}
+}
+
+// Balancer is a utility to gracefully switch from one balancer to
+// a new balancer. It implements the balancer.Balancer interface.
+type Balancer struct {
+	bOpts balancer.BuildOptions
+	cc    balancer.ClientConn
+
+	// mu protects the following fields and all fields within balancerCurrent
+	// and balancerPending. mu does not need to be held when calling into the
+	// child balancers, as all calls into these children happen only as a direct
+	// result of a call into the gracefulSwitchBalancer, which are also
+	// guaranteed to be synchronous. There is one exception: an UpdateState call
+	// from a child balancer when current and pending are populated can lead to
+	// calling Close() on the current. To prevent that racing with an
+	// UpdateSubConnState from the channel, we hold currentMu during Close and
+	// UpdateSubConnState calls.
+	mu              sync.Mutex
+	balancerCurrent *balancerWrapper
+	balancerPending *balancerWrapper
+	closed          bool // set to true when this balancer is closed
+
+	// currentMu must be locked before mu. This mutex guards against this
+	// sequence of events: UpdateSubConnState() called, finds the
+	// balancerCurrent, gives up lock, updateState comes in, causes Close() on
+	// balancerCurrent before the UpdateSubConnState is called on the
+	// balancerCurrent.
+	currentMu sync.Mutex
+}
+
+// swap swaps out the current lb with the pending lb and updates the ClientConn.
+// The caller must hold gsb.mu.
+func (gsb *Balancer) swap() {
+	gsb.cc.UpdateState(gsb.balancerPending.lastState)
+	cur := gsb.balancerCurrent
+	gsb.balancerCurrent = gsb.balancerPending
+	gsb.balancerPending = nil
+	go func() {
+		gsb.currentMu.Lock()
+		defer gsb.currentMu.Unlock()
+		cur.Close()
+	}()
+}
+
+// Helper function that checks if the balancer passed in is current or pending.
+// The caller must hold gsb.mu.
+func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool {
+	return bw == gsb.balancerCurrent || bw == gsb.balancerPending
+}
+
+// SwitchTo initializes the graceful switch process, which completes based on
+// connectivity state changes on the current/pending balancer. Thus, the switch
+// process is not complete when this method returns. This method must be called
+// synchronously alongside the rest of the balancer.Balancer methods this
+// Graceful Switch Balancer implements.
+//
+// Deprecated: use ParseConfig and pass a parsed config to UpdateClientConnState
+// to cause the Balancer to automatically change to the new child when necessary.
+func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
+	_, err := gsb.switchTo(builder)
+	return err
+}
+
+func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error) {
+	gsb.mu.Lock()
+	if gsb.closed {
+		gsb.mu.Unlock()
+		return nil, errBalancerClosed
+	}
+	bw := &balancerWrapper{
+		ClientConn: gsb.cc,
+		builder:    builder,
+		gsb:        gsb,
+		lastState: balancer.State{
+			ConnectivityState: connectivity.Connecting,
+			Picker:            base.NewErrPicker(balancer.ErrNoSubConnAvailable),
+		},
+		subconns: make(map[balancer.SubConn]bool),
+	}
+	balToClose := gsb.balancerPending // nil if there is no pending balancer
+	if gsb.balancerCurrent == nil {
+		gsb.balancerCurrent = bw
+	} else {
+		gsb.balancerPending = bw
+	}
+	gsb.mu.Unlock()
+	balToClose.Close()
+	// This function takes a builder instead of a balancer because builder.Build
+	// can call back inline, and this utility needs to handle the callbacks.
+	newBalancer := builder.Build(bw, gsb.bOpts)
+	if newBalancer == nil {
+		// This is illegal and should never happen; we clear the balancerWrapper
+		// we were constructing if it happens to avoid a potential panic.
+		gsb.mu.Lock()
+		if gsb.balancerPending != nil {
+			gsb.balancerPending = nil
+		} else {
+			gsb.balancerCurrent = nil
+		}
+		gsb.mu.Unlock()
+		return nil, balancer.ErrBadResolverState
+	}
+
+	// This write doesn't need to take gsb.mu because this field never gets read
+	// or written to on any calls from the current or pending. Calls from grpc
+	// to this balancer are guaranteed to be called synchronously, so this
+	// bw.Balancer field will never be forwarded to until this SwitchTo()
+	// function returns.
+	bw.Balancer = newBalancer
+	return bw, nil
+}
+
+// Returns nil if the graceful switch balancer is closed.
+func (gsb *Balancer) latestBalancer() *balancerWrapper {
+	gsb.mu.Lock()
+	defer gsb.mu.Unlock()
+	if gsb.balancerPending != nil {
+		return gsb.balancerPending
+	}
+	return gsb.balancerCurrent
+}
+
+// UpdateClientConnState forwards the update to the latest balancer created.
+//
+// If the state's BalancerConfig is the config returned by a call to
+// gracefulswitch.ParseConfig, then this function will automatically SwitchTo
+// the balancer indicated by the config before forwarding its config to it, if
+// necessary.
+func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error {
+	// The resolver data is only relevant to the most recent LB Policy.
+	balToUpdate := gsb.latestBalancer()
+	gsbCfg, ok := state.BalancerConfig.(*lbConfig)
+	if ok {
+		// Switch to the child in the config unless it is already active.
+		if balToUpdate == nil || gsbCfg.childBuilder.Name() != balToUpdate.builder.Name() {
+			var err error
+			balToUpdate, err = gsb.switchTo(gsbCfg.childBuilder)
+			if err != nil {
+				return fmt.Errorf("could not switch to new child balancer: %w", err)
+			}
+		}
+		// Unwrap the child balancer's config.
+		state.BalancerConfig = gsbCfg.childConfig
+	}
+
+	if balToUpdate == nil {
+		return errBalancerClosed
+	}
+
+	// Perform this call without gsb.mu to prevent deadlocks if the child calls
+	// back into the channel. The latest balancer can never be closed during a
+	// call from the channel, even without gsb.mu held.
+	return balToUpdate.UpdateClientConnState(state)
+}
+
+// ResolverError forwards the error to the latest balancer created.
+func (gsb *Balancer) ResolverError(err error) {
+	// The resolver data is only relevant to the most recent LB Policy.
+	balToUpdate := gsb.latestBalancer()
+	if balToUpdate == nil {
+		gsb.cc.UpdateState(balancer.State{
+			ConnectivityState: connectivity.TransientFailure,
+			Picker:            base.NewErrPicker(err),
+		})
+		return
+	}
+	// Perform this call without gsb.mu to prevent deadlocks if the child calls
+	// back into the channel. The latest balancer can never be closed during a
+	// call from the channel, even without gsb.mu held.
+	balToUpdate.ResolverError(err)
+}
+
+// ExitIdle forwards the call to the latest balancer created.
+//
+// If the latest balancer does not support ExitIdle, the subConns are
+// re-connected to manually.
+func (gsb *Balancer) ExitIdle() {
+	balToUpdate := gsb.latestBalancer()
+	if balToUpdate == nil {
+		return
+	}
+	// There is no need to protect this read with a mutex, as the write to the
+	// Balancer field happens in SwitchTo, which completes before this can be
+	// called.
+	balToUpdate.ExitIdle()
+}
+
+// updateSubConnState forwards the update to the appropriate child.
+func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) {
+	gsb.currentMu.Lock()
+	defer gsb.currentMu.Unlock()
+	gsb.mu.Lock()
+	// Forward update to the appropriate child.  Even if there is a pending
+	// balancer, the current balancer should continue to get SubConn updates to
+	// maintain the proper state while the pending is still connecting.
+	var balToUpdate *balancerWrapper
+	if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] {
+		balToUpdate = gsb.balancerCurrent
+	} else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] {
+		balToUpdate = gsb.balancerPending
+	}
+	if balToUpdate == nil {
+		// SubConn belonged to a stale lb policy that has not yet fully closed,
+		// or the balancer was already closed.
+		gsb.mu.Unlock()
+		return
+	}
+	if state.ConnectivityState == connectivity.Shutdown {
+		delete(balToUpdate.subconns, sc)
+	}
+	gsb.mu.Unlock()
+	if cb != nil {
+		cb(state)
+	} else {
+		balToUpdate.UpdateSubConnState(sc, state)
+	}
+}
+
+// UpdateSubConnState forwards the update to the appropriate child.
+func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
+	gsb.updateSubConnState(sc, state, nil)
+}
+
+// Close closes any active child balancers.
+func (gsb *Balancer) Close() {
+	gsb.mu.Lock()
+	gsb.closed = true
+	currentBalancerToClose := gsb.balancerCurrent
+	gsb.balancerCurrent = nil
+	pendingBalancerToClose := gsb.balancerPending
+	gsb.balancerPending = nil
+	gsb.mu.Unlock()
+
+	currentBalancerToClose.Close()
+	pendingBalancerToClose.Close()
+}
+
+// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer
+// methods to help cleanup SubConns created by the wrapped balancer.
+//
+// It implements the balancer.ClientConn interface and is passed down in that
+// capacity to the wrapped balancer. It maintains a set of subConns created by
+// the wrapped balancer and calls from the latter to create/update/shutdown
+// SubConns update this set before being forwarded to the parent ClientConn.
+// State updates from the wrapped balancer can result in invocation of the
+// graceful switch logic.
+type balancerWrapper struct {
+	balancer.ClientConn
+	balancer.Balancer
+	gsb     *Balancer
+	builder balancer.Builder
+
+	lastState balancer.State
+	subconns  map[balancer.SubConn]bool // subconns created by this balancer
+}
+
+// Close closes the underlying LB policy and shuts down the subconns it
+// created. bw must not be referenced via balancerCurrent or balancerPending in
+// gsb when called. gsb.mu must not be held.  Does not panic with a nil
+// receiver.
+func (bw *balancerWrapper) Close() {
+	// before Close is called.
+	if bw == nil {
+		return
+	}
+	// There is no need to protect this read with a mutex, as Close() is
+	// impossible to be called concurrently with the write in SwitchTo(). The
+	// callsites of Close() for this balancer in Graceful Switch Balancer will
+	// never be called until SwitchTo() returns.
+	bw.Balancer.Close()
+	bw.gsb.mu.Lock()
+	for sc := range bw.subconns {
+		sc.Shutdown()
+	}
+	bw.gsb.mu.Unlock()
+}
+
+func (bw *balancerWrapper) UpdateState(state balancer.State) {
+	// Hold the mutex for this entire call to ensure it cannot occur
+	// concurrently with other updateState() calls. This causes updates to
+	// lastState and calls to cc.UpdateState to happen atomically.
+	bw.gsb.mu.Lock()
+	defer bw.gsb.mu.Unlock()
+	bw.lastState = state
+
+	if !bw.gsb.balancerCurrentOrPending(bw) {
+		return
+	}
+
+	if bw == bw.gsb.balancerCurrent {
+		// In the case that the current balancer exits READY, and there is a pending
+		// balancer, you can forward the pending balancer's cached State up to
+		// ClientConn and swap the pending into the current. This is because there
+		// is no reason to gracefully switch from and keep using the old policy as
+		// the ClientConn is not connected to any backends.
+		if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil {
+			bw.gsb.swap()
+			return
+		}
+		// Even if there is a pending balancer waiting to be gracefully switched to,
+		// continue to forward current balancer updates to the Client Conn. Ignoring
+		// state + picker from the current would cause undefined behavior/cause the
+		// system to behave incorrectly from the current LB policies perspective.
+		// Also, the current LB is still being used by grpc to choose SubConns per
+		// RPC, and thus should use the most updated form of the current balancer.
+		bw.gsb.cc.UpdateState(state)
+		return
+	}
+	// This method is now dealing with a state update from the pending balancer.
+	// If the current balancer is currently in a state other than READY, the new
+	// policy can be swapped into place immediately. This is because there is no
+	// reason to gracefully switch from and keep using the old policy as the
+	// ClientConn is not connected to any backends.
+	if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready {
+		bw.gsb.swap()
+	}
+}
+
+func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
+	bw.gsb.mu.Lock()
+	if !bw.gsb.balancerCurrentOrPending(bw) {
+		bw.gsb.mu.Unlock()
+		return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
+	}
+	bw.gsb.mu.Unlock()
+
+	var sc balancer.SubConn
+	oldListener := opts.StateListener
+	opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) }
+	sc, err := bw.gsb.cc.NewSubConn(addrs, opts)
+	if err != nil {
+		return nil, err
+	}
+	bw.gsb.mu.Lock()
+	if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call
+		sc.Shutdown()
+		bw.gsb.mu.Unlock()
+		return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
+	}
+	bw.subconns[sc] = true
+	bw.gsb.mu.Unlock()
+	return sc, nil
+}
+
+func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) {
+	// Ignore ResolveNow requests from anything other than the most recent
+	// balancer, because older balancers were already removed from the config.
+	if bw != bw.gsb.latestBalancer() {
+		return
+	}
+	bw.gsb.cc.ResolveNow(opts)
+}
+
+func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) {
+	// Note: existing third party balancers may call this, so it must remain
+	// until RemoveSubConn is fully removed.
+	sc.Shutdown()
+}
+
+func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
+	bw.gsb.mu.Lock()
+	if !bw.gsb.balancerCurrentOrPending(bw) {
+		bw.gsb.mu.Unlock()
+		return
+	}
+	bw.gsb.mu.Unlock()
+	bw.gsb.cc.UpdateAddresses(sc, addrs)
+}
diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go
index 3a905d9..94a08d6 100644
--- a/vendor/google.golang.org/grpc/internal/balancerload/load.go
+++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go
@@ -25,7 +25,7 @@
 // Parser converts loads from metadata into a concrete type.
 type Parser interface {
 	// Parse parses loads from metadata.
-	Parse(md metadata.MD) interface{}
+	Parse(md metadata.MD) any
 }
 
 var parser Parser
@@ -38,7 +38,7 @@
 }
 
 // Parse calls parser.Read().
-func Parse(md metadata.MD) interface{} {
+func Parse(md metadata.MD) any {
 	if parser == nil {
 		return nil
 	}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
index 4062c02..755fdeb 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
@@ -25,38 +25,51 @@
 	"os"
 
 	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/grpcutil"
 )
 
-// Logger is the global binary logger. It can be used to get binary logger for
-// each method.
+var grpclogLogger = grpclog.Component("binarylog")
+
+// Logger specifies MethodLoggers for method names with a Log call that
+// takes a context.
+//
+// This is used in the 1.0 release of gcp/observability, and thus must not be
+// deleted or changed.
 type Logger interface {
-	getMethodLogger(methodName string) *MethodLogger
+	GetMethodLogger(methodName string) MethodLogger
 }
 
 // binLogger is the global binary logger for the binary. One of this should be
 // built at init time from the configuration (environment variable or flags).
 //
-// It is used to get a methodLogger for each individual method.
+// It is used to get a MethodLogger for each individual method.
 var binLogger Logger
 
-// SetLogger sets the binarg logger.
+// SetLogger sets the binary logger.
 //
 // Only call this at init time.
 func SetLogger(l Logger) {
 	binLogger = l
 }
 
-// GetMethodLogger returns the methodLogger for the given methodName.
+// GetLogger gets the binary logger.
+//
+// Only call this at init time.
+func GetLogger() Logger {
+	return binLogger
+}
+
+// GetMethodLogger returns the MethodLogger for the given methodName.
 //
 // methodName should be in the format of "/service/method".
 //
-// Each methodLogger returned by this method is a new instance. This is to
+// Each MethodLogger returned by this method is a new instance. This is to
 // generate sequence id within the call.
-func GetMethodLogger(methodName string) *MethodLogger {
+func GetMethodLogger(methodName string) MethodLogger {
 	if binLogger == nil {
 		return nil
 	}
-	return binLogger.getMethodLogger(methodName)
+	return binLogger.GetMethodLogger(methodName)
 }
 
 func init() {
@@ -65,17 +78,29 @@
 	binLogger = NewLoggerFromConfigString(configStr)
 }
 
-type methodLoggerConfig struct {
+// MethodLoggerConfig contains the setting for logging behavior of a method
+// logger. Currently, it contains the max length of header and message.
+type MethodLoggerConfig struct {
 	// Max length of header and message.
-	hdr, msg uint64
+	Header, Message uint64
+}
+
+// LoggerConfig contains the config for loggers to create method loggers.
+type LoggerConfig struct {
+	All      *MethodLoggerConfig
+	Services map[string]*MethodLoggerConfig
+	Methods  map[string]*MethodLoggerConfig
+
+	Blacklist map[string]struct{}
 }
 
 type logger struct {
-	all      *methodLoggerConfig
-	services map[string]*methodLoggerConfig
-	methods  map[string]*methodLoggerConfig
+	config LoggerConfig
+}
 
-	blacklist map[string]struct{}
+// NewLoggerFromConfig builds a logger with the given LoggerConfig.
+func NewLoggerFromConfig(config LoggerConfig) Logger {
+	return &logger{config: config}
 }
 
 // newEmptyLogger creates an empty logger. The map fields need to be filled in
@@ -85,83 +110,83 @@
 }
 
 // Set method logger for "*".
-func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error {
-	if l.all != nil {
+func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error {
+	if l.config.All != nil {
 		return fmt.Errorf("conflicting global rules found")
 	}
-	l.all = ml
+	l.config.All = ml
 	return nil
 }
 
 // Set method logger for "service/*".
 //
-// New methodLogger with same service overrides the old one.
-func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
-	if _, ok := l.services[service]; ok {
-		return fmt.Errorf("conflicting rules for service %v found", service)
+// New MethodLogger with same service overrides the old one.
+func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error {
+	if _, ok := l.config.Services[service]; ok {
+		return fmt.Errorf("conflicting service rules for service %v found", service)
 	}
-	if l.services == nil {
-		l.services = make(map[string]*methodLoggerConfig)
+	if l.config.Services == nil {
+		l.config.Services = make(map[string]*MethodLoggerConfig)
 	}
-	l.services[service] = ml
+	l.config.Services[service] = ml
 	return nil
 }
 
 // Set method logger for "service/method".
 //
-// New methodLogger with same method overrides the old one.
-func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
-	if _, ok := l.blacklist[method]; ok {
-		return fmt.Errorf("conflicting rules for method %v found", method)
+// New MethodLogger with same method overrides the old one.
+func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error {
+	if _, ok := l.config.Blacklist[method]; ok {
+		return fmt.Errorf("conflicting blacklist rules for method %v found", method)
 	}
-	if _, ok := l.methods[method]; ok {
-		return fmt.Errorf("conflicting rules for method %v found", method)
+	if _, ok := l.config.Methods[method]; ok {
+		return fmt.Errorf("conflicting method rules for method %v found", method)
 	}
-	if l.methods == nil {
-		l.methods = make(map[string]*methodLoggerConfig)
+	if l.config.Methods == nil {
+		l.config.Methods = make(map[string]*MethodLoggerConfig)
 	}
-	l.methods[method] = ml
+	l.config.Methods[method] = ml
 	return nil
 }
 
 // Set blacklist method for "-service/method".
 func (l *logger) setBlacklist(method string) error {
-	if _, ok := l.blacklist[method]; ok {
-		return fmt.Errorf("conflicting rules for method %v found", method)
+	if _, ok := l.config.Blacklist[method]; ok {
+		return fmt.Errorf("conflicting blacklist rules for method %v found", method)
 	}
-	if _, ok := l.methods[method]; ok {
-		return fmt.Errorf("conflicting rules for method %v found", method)
+	if _, ok := l.config.Methods[method]; ok {
+		return fmt.Errorf("conflicting method rules for method %v found", method)
 	}
-	if l.blacklist == nil {
-		l.blacklist = make(map[string]struct{})
+	if l.config.Blacklist == nil {
+		l.config.Blacklist = make(map[string]struct{})
 	}
-	l.blacklist[method] = struct{}{}
+	l.config.Blacklist[method] = struct{}{}
 	return nil
 }
 
-// getMethodLogger returns the methodLogger for the given methodName.
+// getMethodLogger returns the MethodLogger for the given methodName.
 //
 // methodName should be in the format of "/service/method".
 //
-// Each methodLogger returned by this method is a new instance. This is to
+// Each MethodLogger returned by this method is a new instance. This is to
 // generate sequence id within the call.
-func (l *logger) getMethodLogger(methodName string) *MethodLogger {
-	s, m, err := parseMethodName(methodName)
+func (l *logger) GetMethodLogger(methodName string) MethodLogger {
+	s, m, err := grpcutil.ParseMethod(methodName)
 	if err != nil {
-		grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err)
+		grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
 		return nil
 	}
-	if ml, ok := l.methods[s+"/"+m]; ok {
-		return newMethodLogger(ml.hdr, ml.msg)
+	if ml, ok := l.config.Methods[s+"/"+m]; ok {
+		return NewTruncatingMethodLogger(ml.Header, ml.Message)
 	}
-	if _, ok := l.blacklist[s+"/"+m]; ok {
+	if _, ok := l.config.Blacklist[s+"/"+m]; ok {
 		return nil
 	}
-	if ml, ok := l.services[s]; ok {
-		return newMethodLogger(ml.hdr, ml.msg)
+	if ml, ok := l.config.Services[s]; ok {
+		return NewTruncatingMethodLogger(ml.Header, ml.Message)
 	}
-	if l.all == nil {
+	if l.config.All == nil {
 		return nil
 	}
-	return newMethodLogger(l.all.hdr, l.all.msg)
+	return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message)
 }
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
index be30d0e..f9e80e2 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
@@ -24,23 +24,21 @@
 	"regexp"
 	"strconv"
 	"strings"
-
-	"google.golang.org/grpc/grpclog"
 )
 
 // NewLoggerFromConfigString reads the string and build a logger. It can be used
 // to build a new logger and assign it to binarylog.Logger.
 //
 // Example filter config strings:
-//  - "" Nothing will be logged
-//  - "*" All headers and messages will be fully logged.
-//  - "*{h}" Only headers will be logged.
-//  - "*{m:256}" Only the first 256 bytes of each message will be logged.
-//  - "Foo/*" Logs every method in service Foo
-//  - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
-//  - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
-//    /Foo/Bar, logs all headers and messages in every other method in service
-//    Foo.
+//   - "" Nothing will be logged
+//   - "*" All headers and messages will be fully logged.
+//   - "*{h}" Only headers will be logged.
+//   - "*{m:256}" Only the first 256 bytes of each message will be logged.
+//   - "Foo/*" Logs every method in service Foo
+//   - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
+//   - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
+//     /Foo/Bar, logs all headers and messages in every other method in service
+//     Foo.
 //
 // If two configs exist for one certain method or service, the one specified
 // later overrides the previous config.
@@ -52,14 +50,14 @@
 	methods := strings.Split(s, ",")
 	for _, method := range methods {
 		if err := l.fillMethodLoggerWithConfigString(method); err != nil {
-			grpclog.Warningf("failed to parse binary log config: %v", err)
+			grpclogLogger.Warningf("failed to parse binary log config: %v", err)
 			return nil
 		}
 	}
 	return l
 }
 
-// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds
+// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds
 // it to the right map in the logger.
 func (l *logger) fillMethodLoggerWithConfigString(config string) error {
 	// "" is invalid.
@@ -91,7 +89,7 @@
 		if err != nil {
 			return fmt.Errorf("invalid config: %q, %v", config, err)
 		}
-		if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
+		if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
 			return fmt.Errorf("invalid config: %v", err)
 		}
 		return nil
@@ -106,11 +104,11 @@
 		return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
 	}
 	if m == "*" {
-		if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
+		if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
 			return fmt.Errorf("invalid config: %v", err)
 		}
 	} else {
-		if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
+		if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
 			return fmt.Errorf("invalid config: %v", err)
 		}
 	}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
index 160f6e8..9669328 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
@@ -19,17 +19,18 @@
 package binarylog
 
 import (
+	"context"
 	"net"
 	"strings"
 	"sync/atomic"
 	"time"
 
-	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/ptypes"
-	pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
-	"google.golang.org/grpc/grpclog"
+	binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/status"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/types/known/durationpb"
+	"google.golang.org/protobuf/types/known/timestamppb"
 )
 
 type callIDGenerator struct {
@@ -49,48 +50,67 @@
 var idGen callIDGenerator
 
 // MethodLogger is the sub-logger for each method.
-type MethodLogger struct {
+//
+// This is used in the 1.0 release of gcp/observability, and thus must not be
+// deleted or changed.
+type MethodLogger interface {
+	Log(context.Context, LogEntryConfig)
+}
+
+// TruncatingMethodLogger is a method logger that truncates headers and messages
+// based on configured fields.
+type TruncatingMethodLogger struct {
 	headerMaxLen, messageMaxLen uint64
 
 	callID          uint64
 	idWithinCallGen *callIDGenerator
 
-	sink Sink // TODO(blog): make this plugable.
+	sink Sink // TODO(blog): make this pluggable.
 }
 
-func newMethodLogger(h, m uint64) *MethodLogger {
-	return &MethodLogger{
+// NewTruncatingMethodLogger returns a new truncating method logger.
+//
+// This is used in the 1.0 release of gcp/observability, and thus must not be
+// deleted or changed.
+func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
+	return &TruncatingMethodLogger{
 		headerMaxLen:  h,
 		messageMaxLen: m,
 
 		callID:          idGen.next(),
 		idWithinCallGen: &callIDGenerator{},
 
-		sink: defaultSink, // TODO(blog): make it plugable.
+		sink: DefaultSink, // TODO(blog): make it pluggable.
 	}
 }
 
-// Log creates a proto binary log entry, and logs it to the sink.
-func (ml *MethodLogger) Log(c LogEntryConfig) {
+// Build is an internal only method for building the proto message out of the
+// input event. It's made public to enable other library to reuse as much logic
+// in TruncatingMethodLogger as possible.
+func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry {
 	m := c.toProto()
-	timestamp, _ := ptypes.TimestampProto(time.Now())
+	timestamp := timestamppb.Now()
 	m.Timestamp = timestamp
 	m.CallId = ml.callID
 	m.SequenceIdWithinCall = ml.idWithinCallGen.next()
 
 	switch pay := m.Payload.(type) {
-	case *pb.GrpcLogEntry_ClientHeader:
+	case *binlogpb.GrpcLogEntry_ClientHeader:
 		m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
-	case *pb.GrpcLogEntry_ServerHeader:
+	case *binlogpb.GrpcLogEntry_ServerHeader:
 		m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
-	case *pb.GrpcLogEntry_Message:
+	case *binlogpb.GrpcLogEntry_Message:
 		m.PayloadTruncated = ml.truncateMessage(pay.Message)
 	}
-
-	ml.sink.Write(m)
+	return m
 }
 
-func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
+// Log creates a proto binary log entry, and logs it to the sink.
+func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) {
+	ml.sink.Write(ml.Build(c))
+}
+
+func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) {
 	if ml.headerMaxLen == maxUInt {
 		return false
 	}
@@ -109,7 +129,7 @@
 			// but not counted towards the size limit.
 			continue
 		}
-		currentEntryLen := uint64(len(entry.Value))
+		currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue()))
 		if currentEntryLen > bytesLimit {
 			break
 		}
@@ -120,7 +140,7 @@
 	return truncated
 }
 
-func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
+func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) {
 	if ml.messageMaxLen == maxUInt {
 		return false
 	}
@@ -132,8 +152,11 @@
 }
 
 // LogEntryConfig represents the configuration for binary log entry.
+//
+// This is used in the 1.0 release of gcp/observability, and thus must not be
+// deleted or changed.
 type LogEntryConfig interface {
-	toProto() *pb.GrpcLogEntry
+	toProto() *binlogpb.GrpcLogEntry
 }
 
 // ClientHeader configs the binary log entry to be a ClientHeader entry.
@@ -147,27 +170,27 @@
 	PeerAddr net.Addr
 }
 
-func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
+func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry {
 	// This function doesn't need to set all the fields (e.g. seq ID). The Log
 	// function will set the fields when necessary.
-	clientHeader := &pb.ClientHeader{
+	clientHeader := &binlogpb.ClientHeader{
 		Metadata:   mdToMetadataProto(c.Header),
 		MethodName: c.MethodName,
 		Authority:  c.Authority,
 	}
 	if c.Timeout > 0 {
-		clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
+		clientHeader.Timeout = durationpb.New(c.Timeout)
 	}
-	ret := &pb.GrpcLogEntry{
-		Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
-		Payload: &pb.GrpcLogEntry_ClientHeader{
+	ret := &binlogpb.GrpcLogEntry{
+		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
+		Payload: &binlogpb.GrpcLogEntry_ClientHeader{
 			ClientHeader: clientHeader,
 		},
 	}
 	if c.OnClientSide {
-		ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
 	} else {
-		ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
 	}
 	if c.PeerAddr != nil {
 		ret.Peer = addrToProto(c.PeerAddr)
@@ -183,19 +206,19 @@
 	PeerAddr net.Addr
 }
 
-func (c *ServerHeader) toProto() *pb.GrpcLogEntry {
-	ret := &pb.GrpcLogEntry{
-		Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
-		Payload: &pb.GrpcLogEntry_ServerHeader{
-			ServerHeader: &pb.ServerHeader{
+func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry {
+	ret := &binlogpb.GrpcLogEntry{
+		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
+		Payload: &binlogpb.GrpcLogEntry_ServerHeader{
+			ServerHeader: &binlogpb.ServerHeader{
 				Metadata: mdToMetadataProto(c.Header),
 			},
 		},
 	}
 	if c.OnClientSide {
-		ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
 	} else {
-		ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
 	}
 	if c.PeerAddr != nil {
 		ret.Peer = addrToProto(c.PeerAddr)
@@ -208,10 +231,10 @@
 	OnClientSide bool
 	// Message can be a proto.Message or []byte. Other messages formats are not
 	// supported.
-	Message interface{}
+	Message any
 }
 
-func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
+func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry {
 	var (
 		data []byte
 		err  error
@@ -219,26 +242,26 @@
 	if m, ok := c.Message.(proto.Message); ok {
 		data, err = proto.Marshal(m)
 		if err != nil {
-			grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
+			grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
 		}
 	} else if b, ok := c.Message.([]byte); ok {
 		data = b
 	} else {
-		grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
+		grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
 	}
-	ret := &pb.GrpcLogEntry{
-		Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
-		Payload: &pb.GrpcLogEntry_Message{
-			Message: &pb.Message{
+	ret := &binlogpb.GrpcLogEntry{
+		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
+		Payload: &binlogpb.GrpcLogEntry_Message{
+			Message: &binlogpb.Message{
 				Length: uint32(len(data)),
 				Data:   data,
 			},
 		},
 	}
 	if c.OnClientSide {
-		ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
 	} else {
-		ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
 	}
 	return ret
 }
@@ -248,10 +271,10 @@
 	OnClientSide bool
 	// Message can be a proto.Message or []byte. Other messages formats are not
 	// supported.
-	Message interface{}
+	Message any
 }
 
-func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
+func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry {
 	var (
 		data []byte
 		err  error
@@ -259,26 +282,26 @@
 	if m, ok := c.Message.(proto.Message); ok {
 		data, err = proto.Marshal(m)
 		if err != nil {
-			grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
+			grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
 		}
 	} else if b, ok := c.Message.([]byte); ok {
 		data = b
 	} else {
-		grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
+		grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
 	}
-	ret := &pb.GrpcLogEntry{
-		Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
-		Payload: &pb.GrpcLogEntry_Message{
-			Message: &pb.Message{
+	ret := &binlogpb.GrpcLogEntry{
+		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
+		Payload: &binlogpb.GrpcLogEntry_Message{
+			Message: &binlogpb.Message{
 				Length: uint32(len(data)),
 				Data:   data,
 			},
 		},
 	}
 	if c.OnClientSide {
-		ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
 	} else {
-		ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
 	}
 	return ret
 }
@@ -288,15 +311,15 @@
 	OnClientSide bool
 }
 
-func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry {
-	ret := &pb.GrpcLogEntry{
-		Type:    pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
+func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry {
+	ret := &binlogpb.GrpcLogEntry{
+		Type:    binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
 		Payload: nil, // No payload here.
 	}
 	if c.OnClientSide {
-		ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
 	} else {
-		ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
 	}
 	return ret
 }
@@ -312,10 +335,10 @@
 	PeerAddr net.Addr
 }
 
-func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
+func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry {
 	st, ok := status.FromError(c.Err)
 	if !ok {
-		grpclog.Info("binarylogging: error in trailer is not a status error")
+		grpclogLogger.Info("binarylogging: error in trailer is not a status error")
 	}
 	var (
 		detailsBytes []byte
@@ -325,13 +348,13 @@
 	if stProto != nil && len(stProto.Details) != 0 {
 		detailsBytes, err = proto.Marshal(stProto)
 		if err != nil {
-			grpclog.Infof("binarylogging: failed to marshal status proto: %v", err)
+			grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
 		}
 	}
-	ret := &pb.GrpcLogEntry{
-		Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
-		Payload: &pb.GrpcLogEntry_Trailer{
-			Trailer: &pb.Trailer{
+	ret := &binlogpb.GrpcLogEntry{
+		Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
+		Payload: &binlogpb.GrpcLogEntry_Trailer{
+			Trailer: &binlogpb.Trailer{
 				Metadata:      mdToMetadataProto(c.Trailer),
 				StatusCode:    uint32(st.Code()),
 				StatusMessage: st.Message(),
@@ -340,9 +363,9 @@
 		},
 	}
 	if c.OnClientSide {
-		ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
 	} else {
-		ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
 	}
 	if c.PeerAddr != nil {
 		ret.Peer = addrToProto(c.PeerAddr)
@@ -355,15 +378,15 @@
 	OnClientSide bool
 }
 
-func (c *Cancel) toProto() *pb.GrpcLogEntry {
-	ret := &pb.GrpcLogEntry{
-		Type:    pb.GrpcLogEntry_EVENT_TYPE_CANCEL,
+func (c *Cancel) toProto() *binlogpb.GrpcLogEntry {
+	ret := &binlogpb.GrpcLogEntry{
+		Type:    binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL,
 		Payload: nil,
 	}
 	if c.OnClientSide {
-		ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
 	} else {
-		ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+		ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
 	}
 	return ret
 }
@@ -374,21 +397,21 @@
 	switch key {
 	case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te":
 		return true
-	case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users.
+	case "grpc-trace-bin": // grpc-trace-bin is special because it's visible to users.
 		return false
 	}
 	return strings.HasPrefix(key, "grpc-")
 }
 
-func mdToMetadataProto(md metadata.MD) *pb.Metadata {
-	ret := &pb.Metadata{}
+func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata {
+	ret := &binlogpb.Metadata{}
 	for k, vv := range md {
 		if metadataKeyOmit(k) {
 			continue
 		}
 		for _, v := range vv {
 			ret.Entry = append(ret.Entry,
-				&pb.MetadataEntry{
+				&binlogpb.MetadataEntry{
 					Key:   k,
 					Value: []byte(v),
 				},
@@ -398,26 +421,26 @@
 	return ret
 }
 
-func addrToProto(addr net.Addr) *pb.Address {
-	ret := &pb.Address{}
+func addrToProto(addr net.Addr) *binlogpb.Address {
+	ret := &binlogpb.Address{}
 	switch a := addr.(type) {
 	case *net.TCPAddr:
 		if a.IP.To4() != nil {
-			ret.Type = pb.Address_TYPE_IPV4
+			ret.Type = binlogpb.Address_TYPE_IPV4
 		} else if a.IP.To16() != nil {
-			ret.Type = pb.Address_TYPE_IPV6
+			ret.Type = binlogpb.Address_TYPE_IPV6
 		} else {
-			ret.Type = pb.Address_TYPE_UNKNOWN
+			ret.Type = binlogpb.Address_TYPE_UNKNOWN
 			// Do not set address and port fields.
 			break
 		}
 		ret.Address = a.IP.String()
 		ret.IpPort = uint32(a.Port)
 	case *net.UnixAddr:
-		ret.Type = pb.Address_TYPE_UNIX
+		ret.Type = binlogpb.Address_TYPE_UNIX
 		ret.Address = a.String()
 	default:
-		ret.Type = pb.Address_TYPE_UNKNOWN
+		ret.Type = binlogpb.Address_TYPE_UNKNOWN
 	}
 	return ret
 }
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
deleted file mode 100644
index 113d40c..0000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-# Copyright 2018 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -eux -o pipefail
-
-TMP=$(mktemp -d)
-
-function finish {
-  rm -rf "$TMP"
-}
-trap finish EXIT
-
-pushd "$TMP"
-mkdir -p grpc/binarylog/grpc_binarylog_v1
-curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto
-
-protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto
-popd
-rm -f ./grpc_binarylog_v1/*.pb.go
-cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/
-
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
index a2e7c34..9ea598b 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
@@ -21,45 +21,36 @@
 import (
 	"bufio"
 	"encoding/binary"
-	"fmt"
 	"io"
-	"io/ioutil"
 	"sync"
 	"time"
 
-	"github.com/golang/protobuf/proto"
-	pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
-	"google.golang.org/grpc/grpclog"
+	binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
+	"google.golang.org/protobuf/proto"
 )
 
 var (
-	defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
+	// DefaultSink is the sink where the logs will be written to. It's exported
+	// for the binarylog package to update.
+	DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
 )
 
-// SetDefaultSink sets the sink where binary logs will be written to.
-//
-// Not thread safe. Only set during initialization.
-func SetDefaultSink(s Sink) {
-	if defaultSink != nil {
-		defaultSink.Close()
-	}
-	defaultSink = s
-}
-
 // Sink writes log entry into the binary log sink.
+//
+// sink is a copy of the exported binarylog.Sink, to avoid circular dependency.
 type Sink interface {
 	// Write will be called to write the log entry into the sink.
 	//
 	// It should be thread-safe so it can be called in parallel.
-	Write(*pb.GrpcLogEntry) error
+	Write(*binlogpb.GrpcLogEntry) error
 	// Close will be called when the Sink is replaced by a new Sink.
 	Close() error
 }
 
 type noopSink struct{}
 
-func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil }
-func (ns *noopSink) Close() error                 { return nil }
+func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil }
+func (ns *noopSink) Close() error                       { return nil }
 
 // newWriterSink creates a binary log sink with the given writer.
 //
@@ -67,7 +58,7 @@
 // message is prefixed with a 4 byte big endian unsigned integer as the length.
 //
 // No buffer is done, Close() doesn't try to close the writer.
-func newWriterSink(w io.Writer) *writerSink {
+func newWriterSink(w io.Writer) Sink {
 	return &writerSink{out: w}
 }
 
@@ -75,10 +66,11 @@
 	out io.Writer
 }
 
-func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
+func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error {
 	b, err := proto.Marshal(e)
 	if err != nil {
-		grpclog.Infof("binary logging: failed to marshal proto message: %v", err)
+		grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err)
+		return err
 	}
 	hdr := make([]byte, 4)
 	binary.BigEndian.PutUint32(hdr, uint32(len(b)))
@@ -93,25 +85,28 @@
 
 func (ws *writerSink) Close() error { return nil }
 
-type bufWriteCloserSink struct {
-	mu     sync.Mutex
-	closer io.Closer
-	out    *writerSink   // out is built on buf.
-	buf    *bufio.Writer // buf is kept for flush.
+type bufferedSink struct {
+	mu             sync.Mutex
+	closer         io.Closer
+	out            Sink          // out is built on buf.
+	buf            *bufio.Writer // buf is kept for flush.
+	flusherStarted bool
 
-	writeStartOnce sync.Once
-	writeTicker    *time.Ticker
+	writeTicker *time.Ticker
+	done        chan struct{}
 }
 
-func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error {
-	// Start the write loop when Write is called.
-	fs.writeStartOnce.Do(fs.startFlushGoroutine)
+func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error {
 	fs.mu.Lock()
+	defer fs.mu.Unlock()
+	if !fs.flusherStarted {
+		// Start the write loop when Write is called.
+		fs.startFlushGoroutine()
+		fs.flusherStarted = true
+	}
 	if err := fs.out.Write(e); err != nil {
-		fs.mu.Unlock()
 		return err
 	}
-	fs.mu.Unlock()
 	return nil
 }
 
@@ -119,44 +114,57 @@
 	bufFlushDuration = 60 * time.Second
 )
 
-func (fs *bufWriteCloserSink) startFlushGoroutine() {
+func (fs *bufferedSink) startFlushGoroutine() {
 	fs.writeTicker = time.NewTicker(bufFlushDuration)
 	go func() {
-		for range fs.writeTicker.C {
+		for {
+			select {
+			case <-fs.done:
+				return
+			case <-fs.writeTicker.C:
+			}
 			fs.mu.Lock()
-			fs.buf.Flush()
+			if err := fs.buf.Flush(); err != nil {
+				grpclogLogger.Warningf("failed to flush to Sink: %v", err)
+			}
 			fs.mu.Unlock()
 		}
 	}()
 }
 
-func (fs *bufWriteCloserSink) Close() error {
+func (fs *bufferedSink) Close() error {
+	fs.mu.Lock()
+	defer fs.mu.Unlock()
 	if fs.writeTicker != nil {
 		fs.writeTicker.Stop()
 	}
-	fs.mu.Lock()
-	fs.buf.Flush()
-	fs.closer.Close()
-	fs.out.Close()
-	fs.mu.Unlock()
+	close(fs.done)
+	if err := fs.buf.Flush(); err != nil {
+		grpclogLogger.Warningf("failed to flush to Sink: %v", err)
+	}
+	if err := fs.closer.Close(); err != nil {
+		grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err)
+	}
+	if err := fs.out.Close(); err != nil {
+		grpclogLogger.Warningf("failed to close the Sink: %v", err)
+	}
 	return nil
 }
 
-func newBufWriteCloserSink(o io.WriteCloser) Sink {
+// NewBufferedSink creates a binary log sink with the given WriteCloser.
+//
+// Write() marshals the proto message and writes it to the given writer. Each
+// message is prefixed with a 4 byte big endian unsigned integer as the length.
+//
+// Content is kept in a buffer, and is flushed every 60 seconds.
+//
+// Close closes the WriteCloser.
+func NewBufferedSink(o io.WriteCloser) Sink {
 	bufW := bufio.NewWriter(o)
-	return &bufWriteCloserSink{
+	return &bufferedSink{
 		closer: o,
 		out:    newWriterSink(bufW),
 		buf:    bufW,
+		done:   make(chan struct{}),
 	}
 }
-
-// NewTempFileSink creates a temp file and returns a Sink that writes to this
-// file.
-func NewTempFileSink() (Sink, error) {
-	tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt")
-	if err != nil {
-		return nil, fmt.Errorf("failed to create temp file: %v", err)
-	}
-	return newBufWriteCloserSink(tempFile), nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/util.go b/vendor/google.golang.org/grpc/internal/binarylog/util.go
deleted file mode 100644
index 15dc780..0000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/util.go
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package binarylog
-
-import (
-	"errors"
-	"strings"
-)
-
-// parseMethodName splits service and method from the input. It expects format
-// "/service/method".
-//
-// TODO: move to internal/grpcutil.
-func parseMethodName(methodName string) (service, method string, _ error) {
-	if !strings.HasPrefix(methodName, "/") {
-		return "", "", errors.New("invalid method name: should start with /")
-	}
-	methodName = methodName[1:]
-
-	pos := strings.LastIndex(methodName, "/")
-	if pos < 0 {
-		return "", "", errors.New("invalid method name: suffix /method is missing")
-	}
-	return methodName[:pos], methodName[pos+1:], nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
index 2cb3109..467392b 100644
--- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
+++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
@@ -18,7 +18,10 @@
 // Package buffer provides an implementation of an unbounded buffer.
 package buffer
 
-import "sync"
+import (
+	"errors"
+	"sync"
+)
 
 // Unbounded is an implementation of an unbounded buffer which does not use
 // extra goroutines. This is typically used for passing updates from one entity
@@ -26,37 +29,52 @@
 //
 // All methods on this type are thread-safe and don't block on anything except
 // the underlying mutex used for synchronization.
+//
+// Unbounded supports values of any type to be stored in it by using a channel
+// of `any`. This means that a call to Put() incurs an extra memory allocation,
+// and also that users need a type assertion while reading. For performance
+// critical code paths, using Unbounded is strongly discouraged and defining a
+// new type specific implementation of this buffer is preferred. See
+// internal/transport/transport.go for an example of this.
 type Unbounded struct {
-	c       chan interface{}
+	c       chan any
+	closed  bool
+	closing bool
 	mu      sync.Mutex
-	backlog []interface{}
+	backlog []any
 }
 
 // NewUnbounded returns a new instance of Unbounded.
 func NewUnbounded() *Unbounded {
-	return &Unbounded{c: make(chan interface{}, 1)}
+	return &Unbounded{c: make(chan any, 1)}
 }
 
+var errBufferClosed = errors.New("Put called on closed buffer.Unbounded")
+
 // Put adds t to the unbounded buffer.
-func (b *Unbounded) Put(t interface{}) {
+func (b *Unbounded) Put(t any) error {
 	b.mu.Lock()
+	defer b.mu.Unlock()
+	if b.closing {
+		return errBufferClosed
+	}
 	if len(b.backlog) == 0 {
 		select {
 		case b.c <- t:
-			b.mu.Unlock()
-			return
+			return nil
 		default:
 		}
 	}
 	b.backlog = append(b.backlog, t)
-	b.mu.Unlock()
+	return nil
 }
 
-// Load sends the earliest buffered data, if any, onto the read channel
-// returned by Get(). Users are expected to call this every time they read a
+// Load sends the earliest buffered data, if any, onto the read channel returned
+// by Get(). Users are expected to call this every time they successfully read a
 // value from the read channel.
 func (b *Unbounded) Load() {
 	b.mu.Lock()
+	defer b.mu.Unlock()
 	if len(b.backlog) > 0 {
 		select {
 		case b.c <- b.backlog[0]:
@@ -64,8 +82,10 @@
 			b.backlog = b.backlog[1:]
 		default:
 		}
+	} else if b.closing && !b.closed {
+		b.closed = true
+		close(b.c)
 	}
-	b.mu.Unlock()
 }
 
 // Get returns a read channel on which values added to the buffer, via Put(),
@@ -73,6 +93,25 @@
 //
 // Upon reading a value from this channel, users are expected to call Load() to
 // send the next buffered value onto the channel if there is any.
-func (b *Unbounded) Get() <-chan interface{} {
+//
+// If the unbounded buffer is closed, the read channel returned by this method
+// is closed after all data is drained.
+func (b *Unbounded) Get() <-chan any {
 	return b.c
 }
+
+// Close closes the unbounded buffer. No subsequent data may be Put(), and the
+// channel returned from Get() will be closed after all the data is read and
+// Load() is called for the final time.
+func (b *Unbounded) Close() {
+	b.mu.Lock()
+	defer b.mu.Unlock()
+	if b.closing {
+		return
+	}
+	b.closing = true
+	if len(b.backlog) == 0 {
+		b.closed = true
+		close(b.c)
+	}
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go
new file mode 100644
index 0000000..3ec6627
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/channel.go
@@ -0,0 +1,270 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"fmt"
+	"sync/atomic"
+
+	"google.golang.org/grpc/connectivity"
+)
+
+// Channel represents a channel within channelz, which includes metrics and
+// internal channelz data, such as channelz id, child list, etc.
+type Channel struct {
+	Entity
+	// ID is the channelz id of this channel.
+	ID int64
+	// RefName is the human readable reference string of this channel.
+	RefName string
+
+	closeCalled bool
+	nestedChans map[int64]string
+	subChans    map[int64]string
+	Parent      *Channel
+	trace       *ChannelTrace
+	// traceRefCount is the number of trace events that reference this channel.
+	// Non-zero traceRefCount means the trace of this channel cannot be deleted.
+	traceRefCount int32
+
+	// ChannelMetrics holds connectivity state, target and call metrics for the
+	// channel within channelz.
+	ChannelMetrics ChannelMetrics
+}
+
+// Implemented to make Channel implement the Identifier interface used for
+// nesting.
+func (c *Channel) channelzIdentifier() {}
+
+// String returns a string representation of the Channel, including its parent
+// entity and ID.
+func (c *Channel) String() string {
+	if c.Parent == nil {
+		return fmt.Sprintf("Channel #%d", c.ID)
+	}
+	return fmt.Sprintf("%s Channel #%d", c.Parent, c.ID)
+}
+
+func (c *Channel) id() int64 {
+	return c.ID
+}
+
+// SubChans returns a copy of the map of sub-channels associated with the
+// Channel.
+func (c *Channel) SubChans() map[int64]string {
+	db.mu.RLock()
+	defer db.mu.RUnlock()
+	return copyMap(c.subChans)
+}
+
+// NestedChans returns a copy of the map of nested channels associated with the
+// Channel.
+func (c *Channel) NestedChans() map[int64]string {
+	db.mu.RLock()
+	defer db.mu.RUnlock()
+	return copyMap(c.nestedChans)
+}
+
+// Trace returns a copy of the Channel's trace data.
+func (c *Channel) Trace() *ChannelTrace {
+	db.mu.RLock()
+	defer db.mu.RUnlock()
+	return c.trace.copy()
+}
+
+// ChannelMetrics holds connectivity state, target and call metrics for the
+// channel within channelz.
+type ChannelMetrics struct {
+	// The current connectivity state of the channel.
+	State atomic.Pointer[connectivity.State]
+	// The target this channel originally tried to connect to.  May be absent
+	Target atomic.Pointer[string]
+	// The number of calls started on the channel.
+	CallsStarted atomic.Int64
+	// The number of calls that have completed with an OK status.
+	CallsSucceeded atomic.Int64
+	// The number of calls that have a completed with a non-OK status.
+	CallsFailed atomic.Int64
+	// The last time a call was started on the channel.
+	LastCallStartedTimestamp atomic.Int64
+}
+
+// CopyFrom copies the metrics in o to c.  For testing only.
+func (c *ChannelMetrics) CopyFrom(o *ChannelMetrics) {
+	c.State.Store(o.State.Load())
+	c.Target.Store(o.Target.Load())
+	c.CallsStarted.Store(o.CallsStarted.Load())
+	c.CallsSucceeded.Store(o.CallsSucceeded.Load())
+	c.CallsFailed.Store(o.CallsFailed.Load())
+	c.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load())
+}
+
+// Equal returns true iff the metrics of c are the same as the metrics of o.
+// For testing only.
+func (c *ChannelMetrics) Equal(o any) bool {
+	oc, ok := o.(*ChannelMetrics)
+	if !ok {
+		return false
+	}
+	if (c.State.Load() == nil) != (oc.State.Load() == nil) {
+		return false
+	}
+	if c.State.Load() != nil && *c.State.Load() != *oc.State.Load() {
+		return false
+	}
+	if (c.Target.Load() == nil) != (oc.Target.Load() == nil) {
+		return false
+	}
+	if c.Target.Load() != nil && *c.Target.Load() != *oc.Target.Load() {
+		return false
+	}
+	return c.CallsStarted.Load() == oc.CallsStarted.Load() &&
+		c.CallsFailed.Load() == oc.CallsFailed.Load() &&
+		c.CallsSucceeded.Load() == oc.CallsSucceeded.Load() &&
+		c.LastCallStartedTimestamp.Load() == oc.LastCallStartedTimestamp.Load()
+}
+
+func strFromPointer(s *string) string {
+	if s == nil {
+		return ""
+	}
+	return *s
+}
+
+// String returns a string representation of the ChannelMetrics, including its
+// state, target, and call metrics.
+func (c *ChannelMetrics) String() string {
+	return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v",
+		c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(),
+	)
+}
+
+// NewChannelMetricForTesting creates a new instance of ChannelMetrics with
+// specified initial values for testing purposes.
+func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics {
+	c := &ChannelMetrics{}
+	c.State.Store(&state)
+	c.Target.Store(&target)
+	c.CallsStarted.Store(started)
+	c.CallsSucceeded.Store(succeeded)
+	c.CallsFailed.Store(failed)
+	c.LastCallStartedTimestamp.Store(timestamp)
+	return c
+}
+
+func (c *Channel) addChild(id int64, e entry) {
+	switch v := e.(type) {
+	case *SubChannel:
+		c.subChans[id] = v.RefName
+	case *Channel:
+		c.nestedChans[id] = v.RefName
+	default:
+		logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
+	}
+}
+
+func (c *Channel) deleteChild(id int64) {
+	delete(c.subChans, id)
+	delete(c.nestedChans, id)
+	c.deleteSelfIfReady()
+}
+
+func (c *Channel) triggerDelete() {
+	c.closeCalled = true
+	c.deleteSelfIfReady()
+}
+
+func (c *Channel) getParentID() int64 {
+	if c.Parent == nil {
+		return -1
+	}
+	return c.Parent.ID
+}
+
+// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
+// deleting the channel reference from its parent's child list.
+//
+// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
+// corresponding grpc object has been invoked, and the channel does not have any children left.
+//
+// The returned boolean value indicates whether the channel has been successfully deleted from tree.
+func (c *Channel) deleteSelfFromTree() (deleted bool) {
+	if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
+		return false
+	}
+	// not top channel
+	if c.Parent != nil {
+		c.Parent.deleteChild(c.ID)
+	}
+	return true
+}
+
+// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
+// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
+// channel, and its memory will be garbage collected.
+//
+// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
+// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
+// the trace of the referenced entity must not be deleted. In order to release the resource allocated
+// by grpc, the reference to the grpc object is reset to a dummy object.
+//
+// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
+//
+// It returns a bool to indicate whether the channel can be safely deleted from map.
+func (c *Channel) deleteSelfFromMap() (delete bool) {
+	return c.getTraceRefCount() == 0
+}
+
+// deleteSelfIfReady tries to delete the channel itself from the channelz database.
+// The delete process includes two steps:
+//  1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
+//     parent's child list.
+//  2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
+//     will return entry not found error.
+func (c *Channel) deleteSelfIfReady() {
+	if !c.deleteSelfFromTree() {
+		return
+	}
+	if !c.deleteSelfFromMap() {
+		return
+	}
+	db.deleteEntry(c.ID)
+	c.trace.clear()
+}
+
+func (c *Channel) getChannelTrace() *ChannelTrace {
+	return c.trace
+}
+
+func (c *Channel) incrTraceRefCount() {
+	atomic.AddInt32(&c.traceRefCount, 1)
+}
+
+func (c *Channel) decrTraceRefCount() {
+	atomic.AddInt32(&c.traceRefCount, -1)
+}
+
+func (c *Channel) getTraceRefCount() int {
+	i := atomic.LoadInt32(&c.traceRefCount)
+	return int(i)
+}
+
+func (c *Channel) getRefName() string {
+	return c.RefName
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
new file mode 100644
index 0000000..64c7919
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
@@ -0,0 +1,395 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"fmt"
+	"sort"
+	"sync"
+	"time"
+)
+
+// entry represents a node in the channelz database.
+type entry interface {
+	// addChild adds a child e, whose channelz id is id to child list
+	addChild(id int64, e entry)
+	// deleteChild deletes a child with channelz id to be id from child list
+	deleteChild(id int64)
+	// triggerDelete tries to delete self from channelz database. However, if
+	// child list is not empty, then deletion from the database is on hold until
+	// the last child is deleted from database.
+	triggerDelete()
+	// deleteSelfIfReady check whether triggerDelete() has been called before,
+	// and whether child list is now empty. If both conditions are met, then
+	// delete self from database.
+	deleteSelfIfReady()
+	// getParentID returns parent ID of the entry. 0 value parent ID means no parent.
+	getParentID() int64
+	Entity
+}
+
+// channelMap is the storage data structure for channelz.
+//
+// Methods of channelMap can be divided into two categories with respect to
+// locking.
+//
+// 1. Methods acquire the global lock.
+// 2. Methods that can only be called when global lock is held.
+//
+// A second type of method need always to be called inside a first type of method.
+type channelMap struct {
+	mu               sync.RWMutex
+	topLevelChannels map[int64]struct{}
+	channels         map[int64]*Channel
+	subChannels      map[int64]*SubChannel
+	sockets          map[int64]*Socket
+	servers          map[int64]*Server
+}
+
+func newChannelMap() *channelMap {
+	return &channelMap{
+		topLevelChannels: make(map[int64]struct{}),
+		channels:         make(map[int64]*Channel),
+		subChannels:      make(map[int64]*SubChannel),
+		sockets:          make(map[int64]*Socket),
+		servers:          make(map[int64]*Server),
+	}
+}
+
+func (c *channelMap) addServer(id int64, s *Server) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	s.cm = c
+	c.servers[id] = s
+}
+
+func (c *channelMap) addChannel(id int64, cn *Channel, isTopChannel bool, pid int64) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	cn.trace.cm = c
+	c.channels[id] = cn
+	if isTopChannel {
+		c.topLevelChannels[id] = struct{}{}
+	} else if p := c.channels[pid]; p != nil {
+		p.addChild(id, cn)
+	} else {
+		logger.Infof("channel %d references invalid parent ID %d", id, pid)
+	}
+}
+
+func (c *channelMap) addSubChannel(id int64, sc *SubChannel, pid int64) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	sc.trace.cm = c
+	c.subChannels[id] = sc
+	if p := c.channels[pid]; p != nil {
+		p.addChild(id, sc)
+	} else {
+		logger.Infof("subchannel %d references invalid parent ID %d", id, pid)
+	}
+}
+
+func (c *channelMap) addSocket(s *Socket) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	s.cm = c
+	c.sockets[s.ID] = s
+	if s.Parent == nil {
+		logger.Infof("normal socket %d has no parent", s.ID)
+	}
+	s.Parent.(entry).addChild(s.ID, s)
+}
+
+// removeEntry triggers the removal of an entry, which may not indeed delete the
+// entry, if it has to wait on the deletion of its children and until no other
+// entity's channel trace references it.  It may lead to a chain of entry
+// deletion. For example, deleting the last socket of a gracefully shutting down
+// server will lead to the server being also deleted.
+func (c *channelMap) removeEntry(id int64) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	c.findEntry(id).triggerDelete()
+}
+
+// tracedChannel represents tracing operations which are present on both
+// channels and subChannels.
+type tracedChannel interface {
+	getChannelTrace() *ChannelTrace
+	incrTraceRefCount()
+	decrTraceRefCount()
+	getRefName() string
+}
+
+// c.mu must be held by the caller
+func (c *channelMap) decrTraceRefCount(id int64) {
+	e := c.findEntry(id)
+	if v, ok := e.(tracedChannel); ok {
+		v.decrTraceRefCount()
+		e.deleteSelfIfReady()
+	}
+}
+
+// c.mu must be held by the caller.
+func (c *channelMap) findEntry(id int64) entry {
+	if v, ok := c.channels[id]; ok {
+		return v
+	}
+	if v, ok := c.subChannels[id]; ok {
+		return v
+	}
+	if v, ok := c.servers[id]; ok {
+		return v
+	}
+	if v, ok := c.sockets[id]; ok {
+		return v
+	}
+	return &dummyEntry{idNotFound: id}
+}
+
+// c.mu must be held by the caller
+//
+// deleteEntry deletes an entry from the channelMap. Before calling this method,
+// caller must check this entry is ready to be deleted, i.e removeEntry() has
+// been called on it, and no children still exist.
+func (c *channelMap) deleteEntry(id int64) entry {
+	if v, ok := c.sockets[id]; ok {
+		delete(c.sockets, id)
+		return v
+	}
+	if v, ok := c.subChannels[id]; ok {
+		delete(c.subChannels, id)
+		return v
+	}
+	if v, ok := c.channels[id]; ok {
+		delete(c.channels, id)
+		delete(c.topLevelChannels, id)
+		return v
+	}
+	if v, ok := c.servers[id]; ok {
+		delete(c.servers, id)
+		return v
+	}
+	return &dummyEntry{idNotFound: id}
+}
+
+func (c *channelMap) traceEvent(id int64, desc *TraceEvent) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	child := c.findEntry(id)
+	childTC, ok := child.(tracedChannel)
+	if !ok {
+		return
+	}
+	childTC.getChannelTrace().append(&traceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
+	if desc.Parent != nil {
+		parent := c.findEntry(child.getParentID())
+		var chanType RefChannelType
+		switch child.(type) {
+		case *Channel:
+			chanType = RefChannel
+		case *SubChannel:
+			chanType = RefSubChannel
+		}
+		if parentTC, ok := parent.(tracedChannel); ok {
+			parentTC.getChannelTrace().append(&traceEvent{
+				Desc:      desc.Parent.Desc,
+				Severity:  desc.Parent.Severity,
+				Timestamp: time.Now(),
+				RefID:     id,
+				RefName:   childTC.getRefName(),
+				RefType:   chanType,
+			})
+			childTC.incrTraceRefCount()
+		}
+	}
+}
+
+type int64Slice []int64
+
+func (s int64Slice) Len() int           { return len(s) }
+func (s int64Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
+
+func copyMap(m map[int64]string) map[int64]string {
+	n := make(map[int64]string)
+	for k, v := range m {
+		n[k] = v
+	}
+	return n
+}
+
+func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) {
+	if maxResults <= 0 {
+		maxResults = EntriesPerPage
+	}
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	l := int64(len(c.topLevelChannels))
+	ids := make([]int64, 0, l)
+
+	for k := range c.topLevelChannels {
+		ids = append(ids, k)
+	}
+	sort.Sort(int64Slice(ids))
+	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
+	end := true
+	var t []*Channel
+	for _, v := range ids[idx:] {
+		if len(t) == maxResults {
+			end = false
+			break
+		}
+		if cn, ok := c.channels[v]; ok {
+			t = append(t, cn)
+		}
+	}
+	return t, end
+}
+
+func (c *channelMap) getServers(id int64, maxResults int) ([]*Server, bool) {
+	if maxResults <= 0 {
+		maxResults = EntriesPerPage
+	}
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	ids := make([]int64, 0, len(c.servers))
+	for k := range c.servers {
+		ids = append(ids, k)
+	}
+	sort.Sort(int64Slice(ids))
+	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
+	end := true
+	var s []*Server
+	for _, v := range ids[idx:] {
+		if len(s) == maxResults {
+			end = false
+			break
+		}
+		if svr, ok := c.servers[v]; ok {
+			s = append(s, svr)
+		}
+	}
+	return s, end
+}
+
+func (c *channelMap) getServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) {
+	if maxResults <= 0 {
+		maxResults = EntriesPerPage
+	}
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	svr, ok := c.servers[id]
+	if !ok {
+		// server with id doesn't exist.
+		return nil, true
+	}
+	svrskts := svr.sockets
+	ids := make([]int64, 0, len(svrskts))
+	sks := make([]*Socket, 0, min(len(svrskts), maxResults))
+	for k := range svrskts {
+		ids = append(ids, k)
+	}
+	sort.Sort(int64Slice(ids))
+	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
+	end := true
+	for _, v := range ids[idx:] {
+		if len(sks) == maxResults {
+			end = false
+			break
+		}
+		if ns, ok := c.sockets[v]; ok {
+			sks = append(sks, ns)
+		}
+	}
+	return sks, end
+}
+
+func (c *channelMap) getChannel(id int64) *Channel {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	return c.channels[id]
+}
+
+func (c *channelMap) getSubChannel(id int64) *SubChannel {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	return c.subChannels[id]
+}
+
+func (c *channelMap) getSocket(id int64) *Socket {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	return c.sockets[id]
+}
+
+func (c *channelMap) getServer(id int64) *Server {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+	return c.servers[id]
+}
+
+type dummyEntry struct {
+	// dummyEntry is a fake entry to handle entry not found case.
+	idNotFound int64
+	Entity
+}
+
+func (d *dummyEntry) String() string {
+	return fmt.Sprintf("non-existent entity #%d", d.idNotFound)
+}
+
+func (d *dummyEntry) ID() int64 { return d.idNotFound }
+
+func (d *dummyEntry) addChild(id int64, e entry) {
+	// Note: It is possible for a normal program to reach here under race
+	// condition.  For example, there could be a race between ClientConn.Close()
+	// info being propagated to addrConn and http2Client. ClientConn.Close()
+	// cancel the context and result in http2Client to error. The error info is
+	// then caught by transport monitor and before addrConn.tearDown() is called
+	// in side ClientConn.Close(). Therefore, the addrConn will create a new
+	// transport. And when registering the new transport in channelz, its parent
+	// addrConn could have already been torn down and deleted from channelz
+	// tracking, and thus reach the code here.
+	logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
+}
+
+func (d *dummyEntry) deleteChild(id int64) {
+	// It is possible for a normal program to reach here under race condition.
+	// Refer to the example described in addChild().
+	logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
+}
+
+func (d *dummyEntry) triggerDelete() {
+	logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
+}
+
+func (*dummyEntry) deleteSelfIfReady() {
+	// code should not reach here. deleteSelfIfReady is always called on an existing entry.
+}
+
+func (*dummyEntry) getParentID() int64 {
+	return 0
+}
+
+// Entity is implemented by all channelz types.
+type Entity interface {
+	isEntity()
+	fmt.Stringer
+	id() int64
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
index f0744f9..078bb81 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
@@ -16,135 +16,54 @@
  *
  */
 
-// Package channelz defines APIs for enabling channelz service, entry
+// Package channelz defines internal APIs for enabling channelz service, entry
 // registration/deletion, and accessing channelz data. It also defines channelz
 // metric struct formats.
-//
-// All APIs in this package are experimental.
 package channelz
 
 import (
-	"fmt"
-	"sort"
-	"sync"
 	"sync/atomic"
 	"time"
 
-	"google.golang.org/grpc/grpclog"
-)
-
-const (
-	defaultMaxTraceEntry int32 = 30
+	"google.golang.org/grpc/internal"
 )
 
 var (
-	db    dbWrapper
-	idGen idGenerator
-	// EntryPerPage defines the number of channelz entries to be shown on a web page.
-	EntryPerPage  = int64(50)
-	curState      int32
-	maxTraceEntry = defaultMaxTraceEntry
+	// IDGen is the global channelz entity ID generator.  It should not be used
+	// outside this package except by tests.
+	IDGen IDGenerator
+
+	db = newChannelMap()
+	// EntriesPerPage defines the number of channelz entries to be shown on a web page.
+	EntriesPerPage = 50
+	curState       int32
 )
 
 // TurnOn turns on channelz data collection.
 func TurnOn() {
-	if !IsOn() {
-		NewChannelzStorage()
-		atomic.StoreInt32(&curState, 1)
+	atomic.StoreInt32(&curState, 1)
+}
+
+func init() {
+	internal.ChannelzTurnOffForTesting = func() {
+		atomic.StoreInt32(&curState, 0)
 	}
 }
 
 // IsOn returns whether channelz data collection is on.
 func IsOn() bool {
-	return atomic.CompareAndSwapInt32(&curState, 1, 1)
-}
-
-// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel).
-// Setting it to 0 will disable channel tracing.
-func SetMaxTraceEntry(i int32) {
-	atomic.StoreInt32(&maxTraceEntry, i)
-}
-
-// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default.
-func ResetMaxTraceEntryToDefault() {
-	atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
-}
-
-func getMaxTraceEntry() int {
-	i := atomic.LoadInt32(&maxTraceEntry)
-	return int(i)
-}
-
-// dbWarpper wraps around a reference to internal channelz data storage, and
-// provide synchronized functionality to set and get the reference.
-type dbWrapper struct {
-	mu sync.RWMutex
-	DB *channelMap
-}
-
-func (d *dbWrapper) set(db *channelMap) {
-	d.mu.Lock()
-	d.DB = db
-	d.mu.Unlock()
-}
-
-func (d *dbWrapper) get() *channelMap {
-	d.mu.RLock()
-	defer d.mu.RUnlock()
-	return d.DB
-}
-
-// NewChannelzStorage initializes channelz data storage and id generator.
-//
-// This function returns a cleanup function to wait for all channelz state to be reset by the
-// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests
-// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen
-// to remove some entity just register by the new test, since the id space is the same.
-//
-// Note: This function is exported for testing purpose only. User should not call
-// it in most cases.
-func NewChannelzStorage() (cleanup func() error) {
-	db.set(&channelMap{
-		topLevelChannels: make(map[int64]struct{}),
-		channels:         make(map[int64]*channel),
-		listenSockets:    make(map[int64]*listenSocket),
-		normalSockets:    make(map[int64]*normalSocket),
-		servers:          make(map[int64]*server),
-		subChannels:      make(map[int64]*subChannel),
-	})
-	idGen.reset()
-	return func() error {
-		var err error
-		cm := db.get()
-		if cm == nil {
-			return nil
-		}
-		for i := 0; i < 1000; i++ {
-			cm.mu.Lock()
-			if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 {
-				cm.mu.Unlock()
-				// all things stored in the channelz map have been cleared.
-				return nil
-			}
-			cm.mu.Unlock()
-			time.Sleep(10 * time.Millisecond)
-		}
-
-		cm.mu.Lock()
-		err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets))
-		cm.mu.Unlock()
-		return err
-	}
+	return atomic.LoadInt32(&curState) == 1
 }
 
 // GetTopChannels returns a slice of top channel's ChannelMetric, along with a
 // boolean indicating whether there's more top channels to be queried for.
 //
-// The arg id specifies that only top channel with id at or above it will be included
-// in the result. The returned slice is up to a length of the arg maxResults or
-// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
-func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
-	return db.get().GetTopChannels(id, maxResults)
+// The arg id specifies that only top channel with id at or above it will be
+// included in the result. The returned slice is up to a length of the arg
+// maxResults or EntriesPerPage if maxResults is zero, and is sorted in ascending
+// id order.
+func GetTopChannels(id int64, maxResults int) ([]*Channel, bool) {
+	return db.getTopChannels(id, maxResults)
 }
 
 // GetServers returns a slice of server's ServerMetric, along with a
@@ -152,576 +71,160 @@
 //
 // The arg id specifies that only server with id at or above it will be included
 // in the result. The returned slice is up to a length of the arg maxResults or
-// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
-func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) {
-	return db.get().GetServers(id, maxResults)
+// EntriesPerPage if maxResults is zero, and is sorted in ascending id order.
+func GetServers(id int64, maxResults int) ([]*Server, bool) {
+	return db.getServers(id, maxResults)
 }
 
 // GetServerSockets returns a slice of server's (identified by id) normal socket's
-// SocketMetric, along with a boolean indicating whether there's more sockets to
+// SocketMetrics, along with a boolean indicating whether there's more sockets to
 // be queried for.
 //
 // The arg startID specifies that only sockets with id at or above it will be
 // included in the result. The returned slice is up to a length of the arg maxResults
-// or EntryPerPage if maxResults is zero, and is sorted in ascending id order.
-func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
-	return db.get().GetServerSockets(id, startID, maxResults)
+// or EntriesPerPage if maxResults is zero, and is sorted in ascending id order.
+func GetServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) {
+	return db.getServerSockets(id, startID, maxResults)
 }
 
-// GetChannel returns the ChannelMetric for the channel (identified by id).
-func GetChannel(id int64) *ChannelMetric {
-	return db.get().GetChannel(id)
+// GetChannel returns the Channel for the channel (identified by id).
+func GetChannel(id int64) *Channel {
+	return db.getChannel(id)
 }
 
-// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id).
-func GetSubChannel(id int64) *SubChannelMetric {
-	return db.get().GetSubChannel(id)
+// GetSubChannel returns the SubChannel for the subchannel (identified by id).
+func GetSubChannel(id int64) *SubChannel {
+	return db.getSubChannel(id)
 }
 
-// GetSocket returns the SocketInternalMetric for the socket (identified by id).
-func GetSocket(id int64) *SocketMetric {
-	return db.get().GetSocket(id)
+// GetSocket returns the Socket for the socket (identified by id).
+func GetSocket(id int64) *Socket {
+	return db.getSocket(id)
 }
 
 // GetServer returns the ServerMetric for the server (identified by id).
-func GetServer(id int64) *ServerMetric {
-	return db.get().GetServer(id)
+func GetServer(id int64) *Server {
+	return db.getServer(id)
 }
 
-// RegisterChannel registers the given channel c in channelz database with ref
-// as its reference name, and add it to the child list of its parent (identified
-// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
-// assigned to this channel.
-func RegisterChannel(c Channel, pid int64, ref string) int64 {
-	id := idGen.genID()
-	cn := &channel{
-		refName:     ref,
-		c:           c,
-		subChans:    make(map[int64]string),
+// RegisterChannel registers the given channel c in the channelz database with
+// target as its target and reference name, and adds it to the child list of its
+// parent.  parent == nil means no parent.
+//
+// Returns a unique channelz identifier assigned to this channel.
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterChannel(parent *Channel, target string) *Channel {
+	id := IDGen.genID()
+
+	if !IsOn() {
+		return &Channel{ID: id}
+	}
+
+	isTopChannel := parent == nil
+
+	cn := &Channel{
+		ID:          id,
+		RefName:     target,
 		nestedChans: make(map[int64]string),
-		id:          id,
-		pid:         pid,
-		trace:       &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
+		subChans:    make(map[int64]string),
+		Parent:      parent,
+		trace:       &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())},
 	}
-	if pid == 0 {
-		db.get().addChannel(id, cn, true, pid, ref)
-	} else {
-		db.get().addChannel(id, cn, false, pid, ref)
-	}
-	return id
+	cn.ChannelMetrics.Target.Store(&target)
+	db.addChannel(id, cn, isTopChannel, cn.getParentID())
+	return cn
 }
 
-// RegisterSubChannel registers the given channel c in channelz database with ref
-// as its reference name, and add it to the child list of its parent (identified
-// by pid). It returns the unique channelz tracking id assigned to this subchannel.
-func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
-	if pid == 0 {
-		grpclog.Error("a SubChannel's parent id cannot be 0")
-		return 0
+// RegisterSubChannel registers the given subChannel c in the channelz database
+// with ref as its reference name, and adds it to the child list of its parent
+// (identified by pid).
+//
+// Returns a unique channelz identifier assigned to this subChannel.
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterSubChannel(parent *Channel, ref string) *SubChannel {
+	id := IDGen.genID()
+	sc := &SubChannel{
+		ID:      id,
+		RefName: ref,
+		parent:  parent,
 	}
-	id := idGen.genID()
-	sc := &subChannel{
-		refName: ref,
-		c:       c,
-		sockets: make(map[int64]string),
-		id:      id,
-		pid:     pid,
-		trace:   &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
+
+	if !IsOn() {
+		return sc
 	}
-	db.get().addSubChannel(id, sc, pid, ref)
-	return id
+
+	sc.sockets = make(map[int64]string)
+	sc.trace = &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())}
+	db.addSubChannel(id, sc, parent.ID)
+	return sc
 }
 
 // RegisterServer registers the given server s in channelz database. It returns
 // the unique channelz tracking id assigned to this server.
-func RegisterServer(s Server, ref string) int64 {
-	id := idGen.genID()
-	svr := &server{
-		refName:       ref,
-		s:             s,
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterServer(ref string) *Server {
+	id := IDGen.genID()
+	if !IsOn() {
+		return &Server{ID: id}
+	}
+
+	svr := &Server{
+		RefName:       ref,
 		sockets:       make(map[int64]string),
 		listenSockets: make(map[int64]string),
-		id:            id,
+		ID:            id,
 	}
-	db.get().addServer(id, svr)
-	return id
+	db.addServer(id, svr)
+	return svr
 }
 
-// RegisterListenSocket registers the given listen socket s in channelz database
-// with ref as its reference name, and add it to the child list of its parent
-// (identified by pid). It returns the unique channelz tracking id assigned to
-// this listen socket.
-func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
-	if pid == 0 {
-		grpclog.Error("a ListenSocket's parent id cannot be 0")
-		return 0
+// RegisterSocket registers the given normal socket s in channelz database
+// with ref as its reference name, and adds it to the child list of its parent
+// (identified by skt.Parent, which must be set). It returns the unique channelz
+// tracking id assigned to this normal socket.
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterSocket(skt *Socket) *Socket {
+	skt.ID = IDGen.genID()
+	if IsOn() {
+		db.addSocket(skt)
 	}
-	id := idGen.genID()
-	ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
-	db.get().addListenSocket(id, ls, pid, ref)
-	return id
+	return skt
 }
 
-// RegisterNormalSocket registers the given normal socket s in channelz database
-// with ref as its reference name, and add it to the child list of its parent
-// (identified by pid). It returns the unique channelz tracking id assigned to
-// this normal socket.
-func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
-	if pid == 0 {
-		grpclog.Error("a NormalSocket's parent id cannot be 0")
-		return 0
-	}
-	id := idGen.genID()
-	ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
-	db.get().addNormalSocket(id, ns, pid, ref)
-	return id
-}
-
-// RemoveEntry removes an entry with unique channelz trakcing id to be id from
+// RemoveEntry removes an entry with unique channelz tracking id to be id from
 // channelz database.
+//
+// If channelz is not turned ON, this function is a no-op.
 func RemoveEntry(id int64) {
-	db.get().removeEntry(id)
-}
-
-// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added
-// to the channel trace.
-// The Parent field is optional. It is used for event that will be recorded in the entity's parent
-// trace also.
-type TraceEventDesc struct {
-	Desc     string
-	Severity Severity
-	Parent   *TraceEventDesc
-}
-
-// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
-func AddTraceEvent(id int64, desc *TraceEventDesc) {
-	if getMaxTraceEntry() == 0 {
+	if !IsOn() {
 		return
 	}
-	db.get().traceEvent(id, desc)
+	db.removeEntry(id)
 }
 
-// channelMap is the storage data structure for channelz.
-// Methods of channelMap can be divided in two two categories with respect to locking.
-// 1. Methods acquire the global lock.
-// 2. Methods that can only be called when global lock is held.
-// A second type of method need always to be called inside a first type of method.
-type channelMap struct {
-	mu               sync.RWMutex
-	topLevelChannels map[int64]struct{}
-	servers          map[int64]*server
-	channels         map[int64]*channel
-	subChannels      map[int64]*subChannel
-	listenSockets    map[int64]*listenSocket
-	normalSockets    map[int64]*normalSocket
-}
-
-func (c *channelMap) addServer(id int64, s *server) {
-	c.mu.Lock()
-	s.cm = c
-	c.servers[id] = s
-	c.mu.Unlock()
-}
-
-func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
-	c.mu.Lock()
-	cn.cm = c
-	cn.trace.cm = c
-	c.channels[id] = cn
-	if isTopChannel {
-		c.topLevelChannels[id] = struct{}{}
-	} else {
-		c.findEntry(pid).addChild(id, cn)
-	}
-	c.mu.Unlock()
-}
-
-func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
-	c.mu.Lock()
-	sc.cm = c
-	sc.trace.cm = c
-	c.subChannels[id] = sc
-	c.findEntry(pid).addChild(id, sc)
-	c.mu.Unlock()
-}
-
-func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) {
-	c.mu.Lock()
-	ls.cm = c
-	c.listenSockets[id] = ls
-	c.findEntry(pid).addChild(id, ls)
-	c.mu.Unlock()
-}
-
-func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) {
-	c.mu.Lock()
-	ns.cm = c
-	c.normalSockets[id] = ns
-	c.findEntry(pid).addChild(id, ns)
-	c.mu.Unlock()
-}
-
-// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to
-// wait on the deletion of its children and until no other entity's channel trace references it.
-// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully
-// shutting down server will lead to the server being also deleted.
-func (c *channelMap) removeEntry(id int64) {
-	c.mu.Lock()
-	c.findEntry(id).triggerDelete()
-	c.mu.Unlock()
-}
-
-// c.mu must be held by the caller
-func (c *channelMap) decrTraceRefCount(id int64) {
-	e := c.findEntry(id)
-	if v, ok := e.(tracedChannel); ok {
-		v.decrTraceRefCount()
-		e.deleteSelfIfReady()
-	}
-}
-
-// c.mu must be held by the caller.
-func (c *channelMap) findEntry(id int64) entry {
-	var v entry
-	var ok bool
-	if v, ok = c.channels[id]; ok {
-		return v
-	}
-	if v, ok = c.subChannels[id]; ok {
-		return v
-	}
-	if v, ok = c.servers[id]; ok {
-		return v
-	}
-	if v, ok = c.listenSockets[id]; ok {
-		return v
-	}
-	if v, ok = c.normalSockets[id]; ok {
-		return v
-	}
-	return &dummyEntry{idNotFound: id}
-}
-
-// c.mu must be held by the caller
-// deleteEntry simply deletes an entry from the channelMap. Before calling this
-// method, caller must check this entry is ready to be deleted, i.e removeEntry()
-// has been called on it, and no children still exist.
-// Conditionals are ordered by the expected frequency of deletion of each entity
-// type, in order to optimize performance.
-func (c *channelMap) deleteEntry(id int64) {
-	var ok bool
-	if _, ok = c.normalSockets[id]; ok {
-		delete(c.normalSockets, id)
-		return
-	}
-	if _, ok = c.subChannels[id]; ok {
-		delete(c.subChannels, id)
-		return
-	}
-	if _, ok = c.channels[id]; ok {
-		delete(c.channels, id)
-		delete(c.topLevelChannels, id)
-		return
-	}
-	if _, ok = c.listenSockets[id]; ok {
-		delete(c.listenSockets, id)
-		return
-	}
-	if _, ok = c.servers[id]; ok {
-		delete(c.servers, id)
-		return
-	}
-}
-
-func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) {
-	c.mu.Lock()
-	child := c.findEntry(id)
-	childTC, ok := child.(tracedChannel)
-	if !ok {
-		c.mu.Unlock()
-		return
-	}
-	childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
-	if desc.Parent != nil {
-		parent := c.findEntry(child.getParentID())
-		var chanType RefChannelType
-		switch child.(type) {
-		case *channel:
-			chanType = RefChannel
-		case *subChannel:
-			chanType = RefSubChannel
-		}
-		if parentTC, ok := parent.(tracedChannel); ok {
-			parentTC.getChannelTrace().append(&TraceEvent{
-				Desc:      desc.Parent.Desc,
-				Severity:  desc.Parent.Severity,
-				Timestamp: time.Now(),
-				RefID:     id,
-				RefName:   childTC.getRefName(),
-				RefType:   chanType,
-			})
-			childTC.incrTraceRefCount()
-		}
-	}
-	c.mu.Unlock()
-}
-
-type int64Slice []int64
-
-func (s int64Slice) Len() int           { return len(s) }
-func (s int64Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
-
-func copyMap(m map[int64]string) map[int64]string {
-	n := make(map[int64]string)
-	for k, v := range m {
-		n[k] = v
-	}
-	return n
-}
-
-func min(a, b int64) int64 {
-	if a < b {
-		return a
-	}
-	return b
-}
-
-func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
-	if maxResults <= 0 {
-		maxResults = EntryPerPage
-	}
-	c.mu.RLock()
-	l := int64(len(c.topLevelChannels))
-	ids := make([]int64, 0, l)
-	cns := make([]*channel, 0, min(l, maxResults))
-
-	for k := range c.topLevelChannels {
-		ids = append(ids, k)
-	}
-	sort.Sort(int64Slice(ids))
-	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
-	count := int64(0)
-	var end bool
-	var t []*ChannelMetric
-	for i, v := range ids[idx:] {
-		if count == maxResults {
-			break
-		}
-		if cn, ok := c.channels[v]; ok {
-			cns = append(cns, cn)
-			t = append(t, &ChannelMetric{
-				NestedChans: copyMap(cn.nestedChans),
-				SubChans:    copyMap(cn.subChans),
-			})
-			count++
-		}
-		if i == len(ids[idx:])-1 {
-			end = true
-			break
-		}
-	}
-	c.mu.RUnlock()
-	if count == 0 {
-		end = true
-	}
-
-	for i, cn := range cns {
-		t[i].ChannelData = cn.c.ChannelzMetric()
-		t[i].ID = cn.id
-		t[i].RefName = cn.refName
-		t[i].Trace = cn.trace.dumpData()
-	}
-	return t, end
-}
-
-func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) {
-	if maxResults <= 0 {
-		maxResults = EntryPerPage
-	}
-	c.mu.RLock()
-	l := int64(len(c.servers))
-	ids := make([]int64, 0, l)
-	ss := make([]*server, 0, min(l, maxResults))
-	for k := range c.servers {
-		ids = append(ids, k)
-	}
-	sort.Sort(int64Slice(ids))
-	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
-	count := int64(0)
-	var end bool
-	var s []*ServerMetric
-	for i, v := range ids[idx:] {
-		if count == maxResults {
-			break
-		}
-		if svr, ok := c.servers[v]; ok {
-			ss = append(ss, svr)
-			s = append(s, &ServerMetric{
-				ListenSockets: copyMap(svr.listenSockets),
-			})
-			count++
-		}
-		if i == len(ids[idx:])-1 {
-			end = true
-			break
-		}
-	}
-	c.mu.RUnlock()
-	if count == 0 {
-		end = true
-	}
-
-	for i, svr := range ss {
-		s[i].ServerData = svr.s.ChannelzMetric()
-		s[i].ID = svr.id
-		s[i].RefName = svr.refName
-	}
-	return s, end
-}
-
-func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
-	if maxResults <= 0 {
-		maxResults = EntryPerPage
-	}
-	var svr *server
-	var ok bool
-	c.mu.RLock()
-	if svr, ok = c.servers[id]; !ok {
-		// server with id doesn't exist.
-		c.mu.RUnlock()
-		return nil, true
-	}
-	svrskts := svr.sockets
-	l := int64(len(svrskts))
-	ids := make([]int64, 0, l)
-	sks := make([]*normalSocket, 0, min(l, maxResults))
-	for k := range svrskts {
-		ids = append(ids, k)
-	}
-	sort.Sort(int64Slice(ids))
-	idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
-	count := int64(0)
-	var end bool
-	for i, v := range ids[idx:] {
-		if count == maxResults {
-			break
-		}
-		if ns, ok := c.normalSockets[v]; ok {
-			sks = append(sks, ns)
-			count++
-		}
-		if i == len(ids[idx:])-1 {
-			end = true
-			break
-		}
-	}
-	c.mu.RUnlock()
-	if count == 0 {
-		end = true
-	}
-	var s []*SocketMetric
-	for _, ns := range sks {
-		sm := &SocketMetric{}
-		sm.SocketData = ns.s.ChannelzMetric()
-		sm.ID = ns.id
-		sm.RefName = ns.refName
-		s = append(s, sm)
-	}
-	return s, end
-}
-
-func (c *channelMap) GetChannel(id int64) *ChannelMetric {
-	cm := &ChannelMetric{}
-	var cn *channel
-	var ok bool
-	c.mu.RLock()
-	if cn, ok = c.channels[id]; !ok {
-		// channel with id doesn't exist.
-		c.mu.RUnlock()
-		return nil
-	}
-	cm.NestedChans = copyMap(cn.nestedChans)
-	cm.SubChans = copyMap(cn.subChans)
-	// cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when
-	// holding the lock to prevent potential data race.
-	chanCopy := cn.c
-	c.mu.RUnlock()
-	cm.ChannelData = chanCopy.ChannelzMetric()
-	cm.ID = cn.id
-	cm.RefName = cn.refName
-	cm.Trace = cn.trace.dumpData()
-	return cm
-}
-
-func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric {
-	cm := &SubChannelMetric{}
-	var sc *subChannel
-	var ok bool
-	c.mu.RLock()
-	if sc, ok = c.subChannels[id]; !ok {
-		// subchannel with id doesn't exist.
-		c.mu.RUnlock()
-		return nil
-	}
-	cm.Sockets = copyMap(sc.sockets)
-	// sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when
-	// holding the lock to prevent potential data race.
-	chanCopy := sc.c
-	c.mu.RUnlock()
-	cm.ChannelData = chanCopy.ChannelzMetric()
-	cm.ID = sc.id
-	cm.RefName = sc.refName
-	cm.Trace = sc.trace.dumpData()
-	return cm
-}
-
-func (c *channelMap) GetSocket(id int64) *SocketMetric {
-	sm := &SocketMetric{}
-	c.mu.RLock()
-	if ls, ok := c.listenSockets[id]; ok {
-		c.mu.RUnlock()
-		sm.SocketData = ls.s.ChannelzMetric()
-		sm.ID = ls.id
-		sm.RefName = ls.refName
-		return sm
-	}
-	if ns, ok := c.normalSockets[id]; ok {
-		c.mu.RUnlock()
-		sm.SocketData = ns.s.ChannelzMetric()
-		sm.ID = ns.id
-		sm.RefName = ns.refName
-		return sm
-	}
-	c.mu.RUnlock()
-	return nil
-}
-
-func (c *channelMap) GetServer(id int64) *ServerMetric {
-	sm := &ServerMetric{}
-	var svr *server
-	var ok bool
-	c.mu.RLock()
-	if svr, ok = c.servers[id]; !ok {
-		c.mu.RUnlock()
-		return nil
-	}
-	sm.ListenSockets = copyMap(svr.listenSockets)
-	c.mu.RUnlock()
-	sm.ID = svr.id
-	sm.RefName = svr.refName
-	sm.ServerData = svr.s.ChannelzMetric()
-	return sm
-}
-
-type idGenerator struct {
+// IDGenerator is an incrementing atomic that tracks IDs for channelz entities.
+type IDGenerator struct {
 	id int64
 }
 
-func (i *idGenerator) reset() {
+// Reset resets the generated ID back to zero.  Should only be used at
+// initialization or by tests sensitive to the ID number.
+func (i *IDGenerator) Reset() {
 	atomic.StoreInt64(&i.id, 0)
 }
 
-func (i *idGenerator) genID() int64 {
+func (i *IDGenerator) genID() int64 {
 	return atomic.AddInt64(&i.id, 1)
 }
+
+// Identifier is an opaque channelz identifier used to expose channelz symbols
+// outside of grpc.  Currently only implemented by Channel since no other
+// types require exposure outside grpc.
+type Identifier interface {
+	Entity
+	channelzIdentifier()
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go
new file mode 100644
index 0000000..ee4d721
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go
@@ -0,0 +1,75 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"fmt"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+var logger = grpclog.Component("channelz")
+
+// Info logs and adds a trace event if channelz is on.
+func Info(l grpclog.DepthLoggerV2, e Entity, args ...any) {
+	AddTraceEvent(l, e, 1, &TraceEvent{
+		Desc:     fmt.Sprint(args...),
+		Severity: CtInfo,
+	})
+}
+
+// Infof logs and adds a trace event if channelz is on.
+func Infof(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
+	AddTraceEvent(l, e, 1, &TraceEvent{
+		Desc:     fmt.Sprintf(format, args...),
+		Severity: CtInfo,
+	})
+}
+
+// Warning logs and adds a trace event if channelz is on.
+func Warning(l grpclog.DepthLoggerV2, e Entity, args ...any) {
+	AddTraceEvent(l, e, 1, &TraceEvent{
+		Desc:     fmt.Sprint(args...),
+		Severity: CtWarning,
+	})
+}
+
+// Warningf logs and adds a trace event if channelz is on.
+func Warningf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
+	AddTraceEvent(l, e, 1, &TraceEvent{
+		Desc:     fmt.Sprintf(format, args...),
+		Severity: CtWarning,
+	})
+}
+
+// Error logs and adds a trace event if channelz is on.
+func Error(l grpclog.DepthLoggerV2, e Entity, args ...any) {
+	AddTraceEvent(l, e, 1, &TraceEvent{
+		Desc:     fmt.Sprint(args...),
+		Severity: CtError,
+	})
+}
+
+// Errorf logs and adds a trace event if channelz is on.
+func Errorf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
+	AddTraceEvent(l, e, 1, &TraceEvent{
+		Desc:     fmt.Sprintf(format, args...),
+		Severity: CtError,
+	})
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go
new file mode 100644
index 0000000..b5a8249
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/server.go
@@ -0,0 +1,121 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"fmt"
+	"sync/atomic"
+)
+
+// Server is the channelz representation of a server.
+type Server struct {
+	Entity
+	ID      int64
+	RefName string
+
+	ServerMetrics ServerMetrics
+
+	closeCalled   bool
+	sockets       map[int64]string
+	listenSockets map[int64]string
+	cm            *channelMap
+}
+
+// ServerMetrics defines a struct containing metrics for servers.
+type ServerMetrics struct {
+	// The number of incoming calls started on the server.
+	CallsStarted atomic.Int64
+	// The number of incoming calls that have completed with an OK status.
+	CallsSucceeded atomic.Int64
+	// The number of incoming calls that have a completed with a non-OK status.
+	CallsFailed atomic.Int64
+	// The last time a call was started on the server.
+	LastCallStartedTimestamp atomic.Int64
+}
+
+// NewServerMetricsForTesting returns an initialized ServerMetrics.
+func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *ServerMetrics {
+	sm := &ServerMetrics{}
+	sm.CallsStarted.Store(started)
+	sm.CallsSucceeded.Store(succeeded)
+	sm.CallsFailed.Store(failed)
+	sm.LastCallStartedTimestamp.Store(timestamp)
+	return sm
+}
+
+// CopyFrom copies the metrics data from the provided ServerMetrics
+// instance into the current instance.
+func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) {
+	sm.CallsStarted.Store(o.CallsStarted.Load())
+	sm.CallsSucceeded.Store(o.CallsSucceeded.Load())
+	sm.CallsFailed.Store(o.CallsFailed.Load())
+	sm.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load())
+}
+
+// ListenSockets returns the listening sockets for s.
+func (s *Server) ListenSockets() map[int64]string {
+	db.mu.RLock()
+	defer db.mu.RUnlock()
+	return copyMap(s.listenSockets)
+}
+
+// String returns a printable description of s.
+func (s *Server) String() string {
+	return fmt.Sprintf("Server #%d", s.ID)
+}
+
+func (s *Server) id() int64 {
+	return s.ID
+}
+
+func (s *Server) addChild(id int64, e entry) {
+	switch v := e.(type) {
+	case *Socket:
+		switch v.SocketType {
+		case SocketTypeNormal:
+			s.sockets[id] = v.RefName
+		case SocketTypeListen:
+			s.listenSockets[id] = v.RefName
+		}
+	default:
+		logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
+	}
+}
+
+func (s *Server) deleteChild(id int64) {
+	delete(s.sockets, id)
+	delete(s.listenSockets, id)
+	s.deleteSelfIfReady()
+}
+
+func (s *Server) triggerDelete() {
+	s.closeCalled = true
+	s.deleteSelfIfReady()
+}
+
+func (s *Server) deleteSelfIfReady() {
+	if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
+		return
+	}
+	s.cm.deleteEntry(s.ID)
+}
+
+func (s *Server) getParentID() int64 {
+	return 0
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go
new file mode 100644
index 0000000..9010384
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/socket.go
@@ -0,0 +1,137 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"fmt"
+	"net"
+	"sync/atomic"
+
+	"google.golang.org/grpc/credentials"
+)
+
+// SocketMetrics defines the struct that the implementor of Socket interface
+// should return from ChannelzMetric().
+type SocketMetrics struct {
+	// The number of streams that have been started.
+	StreamsStarted atomic.Int64
+	// The number of streams that have ended successfully:
+	// On client side, receiving frame with eos bit set.
+	// On server side, sending frame with eos bit set.
+	StreamsSucceeded atomic.Int64
+	// The number of streams that have ended unsuccessfully:
+	// On client side, termination without receiving frame with eos bit set.
+	// On server side, termination without sending frame with eos bit set.
+	StreamsFailed atomic.Int64
+	// The number of messages successfully sent on this socket.
+	MessagesSent     atomic.Int64
+	MessagesReceived atomic.Int64
+	// The number of keep alives sent.  This is typically implemented with HTTP/2
+	// ping messages.
+	KeepAlivesSent atomic.Int64
+	// The last time a stream was created by this endpoint.  Usually unset for
+	// servers.
+	LastLocalStreamCreatedTimestamp atomic.Int64
+	// The last time a stream was created by the remote endpoint.  Usually unset
+	// for clients.
+	LastRemoteStreamCreatedTimestamp atomic.Int64
+	// The last time a message was sent by this endpoint.
+	LastMessageSentTimestamp atomic.Int64
+	// The last time a message was received by this endpoint.
+	LastMessageReceivedTimestamp atomic.Int64
+}
+
+// EphemeralSocketMetrics are metrics that change rapidly and are tracked
+// outside of channelz.
+type EphemeralSocketMetrics struct {
+	// The amount of window, granted to the local endpoint by the remote endpoint.
+	// This may be slightly out of date due to network latency.  This does NOT
+	// include stream level or TCP level flow control info.
+	LocalFlowControlWindow int64
+	// The amount of window, granted to the remote endpoint by the local endpoint.
+	// This may be slightly out of date due to network latency.  This does NOT
+	// include stream level or TCP level flow control info.
+	RemoteFlowControlWindow int64
+}
+
+// SocketType represents the type of socket.
+type SocketType string
+
+// SocketType can be one of these.
+const (
+	SocketTypeNormal = "NormalSocket"
+	SocketTypeListen = "ListenSocket"
+)
+
+// Socket represents a socket within channelz which includes socket
+// metrics and data related to socket activity and provides methods
+// for managing and interacting with sockets.
+type Socket struct {
+	Entity
+	SocketType       SocketType
+	ID               int64
+	Parent           Entity
+	cm               *channelMap
+	SocketMetrics    SocketMetrics
+	EphemeralMetrics func() *EphemeralSocketMetrics
+
+	RefName string
+	// The locally bound address.  Immutable.
+	LocalAddr net.Addr
+	// The remote bound address.  May be absent.  Immutable.
+	RemoteAddr net.Addr
+	// Optional, represents the name of the remote endpoint, if different than
+	// the original target name.  Immutable.
+	RemoteName string
+	// Immutable.
+	SocketOptions *SocketOptionData
+	// Immutable.
+	Security credentials.ChannelzSecurityValue
+}
+
+// String returns a string representation of the Socket, including its parent
+// entity, socket type, and ID.
+func (ls *Socket) String() string {
+	return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID)
+}
+
+func (ls *Socket) id() int64 {
+	return ls.ID
+}
+
+func (ls *Socket) addChild(id int64, e entry) {
+	logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
+}
+
+func (ls *Socket) deleteChild(id int64) {
+	logger.Errorf("cannot delete a child (id = %d) from a listen socket", id)
+}
+
+func (ls *Socket) triggerDelete() {
+	ls.cm.deleteEntry(ls.ID)
+	ls.Parent.(entry).deleteChild(ls.ID)
+}
+
+func (ls *Socket) deleteSelfIfReady() {
+	logger.Errorf("cannot call deleteSelfIfReady on a listen socket")
+}
+
+func (ls *Socket) getParentID() int64 {
+	return ls.Parent.id()
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
new file mode 100644
index 0000000..b20802e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
@@ -0,0 +1,153 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"fmt"
+	"sync/atomic"
+)
+
+// SubChannel is the channelz representation of a subchannel.
+type SubChannel struct {
+	Entity
+	// ID is the channelz id of this subchannel.
+	ID int64
+	// RefName is the human readable reference string of this subchannel.
+	RefName       string
+	closeCalled   bool
+	sockets       map[int64]string
+	parent        *Channel
+	trace         *ChannelTrace
+	traceRefCount int32
+
+	ChannelMetrics ChannelMetrics
+}
+
+func (sc *SubChannel) String() string {
+	return fmt.Sprintf("%s SubChannel #%d", sc.parent, sc.ID)
+}
+
+func (sc *SubChannel) id() int64 {
+	return sc.ID
+}
+
+// Sockets returns a copy of the sockets map associated with the SubChannel.
+func (sc *SubChannel) Sockets() map[int64]string {
+	db.mu.RLock()
+	defer db.mu.RUnlock()
+	return copyMap(sc.sockets)
+}
+
+// Trace returns a copy of the ChannelTrace associated with the SubChannel.
+func (sc *SubChannel) Trace() *ChannelTrace {
+	db.mu.RLock()
+	defer db.mu.RUnlock()
+	return sc.trace.copy()
+}
+
+func (sc *SubChannel) addChild(id int64, e entry) {
+	if v, ok := e.(*Socket); ok && v.SocketType == SocketTypeNormal {
+		sc.sockets[id] = v.RefName
+	} else {
+		logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
+	}
+}
+
+func (sc *SubChannel) deleteChild(id int64) {
+	delete(sc.sockets, id)
+	sc.deleteSelfIfReady()
+}
+
+func (sc *SubChannel) triggerDelete() {
+	sc.closeCalled = true
+	sc.deleteSelfIfReady()
+}
+
+func (sc *SubChannel) getParentID() int64 {
+	return sc.parent.ID
+}
+
+// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
+// means deleting the subchannel reference from its parent's child list.
+//
+// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
+// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
+//
+// The returned boolean value indicates whether the channel has been successfully deleted from tree.
+func (sc *SubChannel) deleteSelfFromTree() (deleted bool) {
+	if !sc.closeCalled || len(sc.sockets) != 0 {
+		return false
+	}
+	sc.parent.deleteChild(sc.ID)
+	return true
+}
+
+// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
+// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
+// the subchannel, and its memory will be garbage collected.
+//
+// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
+// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
+// the trace of the referenced entity must not be deleted. In order to release the resource allocated
+// by grpc, the reference to the grpc object is reset to a dummy object.
+//
+// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
+//
+// It returns a bool to indicate whether the channel can be safely deleted from map.
+func (sc *SubChannel) deleteSelfFromMap() (delete bool) {
+	return sc.getTraceRefCount() == 0
+}
+
+// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
+// The delete process includes two steps:
+//  1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
+//     its parent's child list.
+//  2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
+//     by id will return entry not found error.
+func (sc *SubChannel) deleteSelfIfReady() {
+	if !sc.deleteSelfFromTree() {
+		return
+	}
+	if !sc.deleteSelfFromMap() {
+		return
+	}
+	db.deleteEntry(sc.ID)
+	sc.trace.clear()
+}
+
+func (sc *SubChannel) getChannelTrace() *ChannelTrace {
+	return sc.trace
+}
+
+func (sc *SubChannel) incrTraceRefCount() {
+	atomic.AddInt32(&sc.traceRefCount, 1)
+}
+
+func (sc *SubChannel) decrTraceRefCount() {
+	atomic.AddInt32(&sc.traceRefCount, -1)
+}
+
+func (sc *SubChannel) getTraceRefCount() int {
+	i := atomic.LoadInt32(&sc.traceRefCount)
+	return int(i)
+}
+
+func (sc *SubChannel) getRefName() string {
+	return sc.RefName
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go
new file mode 100644
index 0000000..5ac73ff
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go
@@ -0,0 +1,65 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"syscall"
+
+	"golang.org/x/sys/unix"
+)
+
+// SocketOptionData defines the struct to hold socket option data, and related
+// getter function to obtain info from fd.
+type SocketOptionData struct {
+	Linger      *unix.Linger
+	RecvTimeout *unix.Timeval
+	SendTimeout *unix.Timeval
+	TCPInfo     *unix.TCPInfo
+}
+
+// Getsockopt defines the function to get socket options requested by channelz.
+// It is to be passed to syscall.RawConn.Control().
+func (s *SocketOptionData) Getsockopt(fd uintptr) {
+	if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil {
+		s.Linger = v
+	}
+	if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil {
+		s.RecvTimeout = v
+	}
+	if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil {
+		s.SendTimeout = v
+	}
+	if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil {
+		s.TCPInfo = v
+	}
+}
+
+// GetSocketOption gets the socket option info of the conn.
+func GetSocketOption(socket any) *SocketOptionData {
+	c, ok := socket.(syscall.Conn)
+	if !ok {
+		return nil
+	}
+	data := &SocketOptionData{}
+	if rawConn, err := c.SyscallConn(); err == nil {
+		rawConn.Control(data.Getsockopt)
+		return data
+	}
+	return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
new file mode 100644
index 0000000..0e6e18e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
@@ -0,0 +1,47 @@
+//go:build !linux
+
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"sync"
+)
+
+var once sync.Once
+
+// SocketOptionData defines the struct to hold socket option data, and related
+// getter function to obtain info from fd.
+// Windows OS doesn't support Socket Option
+type SocketOptionData struct {
+}
+
+// Getsockopt defines the function to get socket options requested by channelz.
+// It is to be passed to syscall.RawConn.Control().
+// Windows OS doesn't support Socket Option
+func (s *SocketOptionData) Getsockopt(uintptr) {
+	once.Do(func() {
+		logger.Warning("Channelz: socket options are not supported on non-linux environments")
+	})
+}
+
+// GetSocketOption gets the socket option info of the conn.
+func GetSocketOption(any) *SocketOptionData {
+	return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go
new file mode 100644
index 0000000..3b7ba59
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go
@@ -0,0 +1,213 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+	"fmt"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+const (
+	defaultMaxTraceEntry int32 = 30
+)
+
+var maxTraceEntry = defaultMaxTraceEntry
+
+// SetMaxTraceEntry sets maximum number of trace entries per entity (i.e.
+// channel/subchannel).  Setting it to 0 will disable channel tracing.
+func SetMaxTraceEntry(i int32) {
+	atomic.StoreInt32(&maxTraceEntry, i)
+}
+
+// ResetMaxTraceEntryToDefault resets the maximum number of trace entries per
+// entity to default.
+func ResetMaxTraceEntryToDefault() {
+	atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
+}
+
+func getMaxTraceEntry() int {
+	i := atomic.LoadInt32(&maxTraceEntry)
+	return int(i)
+}
+
+// traceEvent is an internal representation of a single trace event
+type traceEvent struct {
+	// Desc is a simple description of the trace event.
+	Desc string
+	// Severity states the severity of this trace event.
+	Severity Severity
+	// Timestamp is the event time.
+	Timestamp time.Time
+	// RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
+	// involved in this event.
+	// e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
+	RefID int64
+	// RefName is the reference name for the entity that gets referenced in the event.
+	RefName string
+	// RefType indicates the referenced entity type, i.e Channel or SubChannel.
+	RefType RefChannelType
+}
+
+// TraceEvent is what the caller of AddTraceEvent should provide to describe the
+// event to be added to the channel trace.
+//
+// The Parent field is optional. It is used for an event that will be recorded
+// in the entity's parent trace.
+type TraceEvent struct {
+	Desc     string
+	Severity Severity
+	Parent   *TraceEvent
+}
+
+// ChannelTrace provides tracing information for a channel.
+// It tracks various events and metadata related to the channel's lifecycle
+// and operations.
+type ChannelTrace struct {
+	cm          *channelMap
+	clearCalled bool
+	// The time when the trace was created.
+	CreationTime time.Time
+	// A counter for the number of events recorded in the
+	// trace.
+	EventNum int64
+	mu       sync.Mutex
+	// A slice of traceEvent pointers representing the events recorded for
+	// this channel.
+	Events []*traceEvent
+}
+
+func (c *ChannelTrace) copy() *ChannelTrace {
+	return &ChannelTrace{
+		CreationTime: c.CreationTime,
+		EventNum:     c.EventNum,
+		Events:       append(([]*traceEvent)(nil), c.Events...),
+	}
+}
+
+func (c *ChannelTrace) append(e *traceEvent) {
+	c.mu.Lock()
+	if len(c.Events) == getMaxTraceEntry() {
+		del := c.Events[0]
+		c.Events = c.Events[1:]
+		if del.RefID != 0 {
+			// start recursive cleanup in a goroutine to not block the call originated from grpc.
+			go func() {
+				// need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
+				c.cm.mu.Lock()
+				c.cm.decrTraceRefCount(del.RefID)
+				c.cm.mu.Unlock()
+			}()
+		}
+	}
+	e.Timestamp = time.Now()
+	c.Events = append(c.Events, e)
+	c.EventNum++
+	c.mu.Unlock()
+}
+
+func (c *ChannelTrace) clear() {
+	if c.clearCalled {
+		return
+	}
+	c.clearCalled = true
+	c.mu.Lock()
+	for _, e := range c.Events {
+		if e.RefID != 0 {
+			// caller should have already held the c.cm.mu lock.
+			c.cm.decrTraceRefCount(e.RefID)
+		}
+	}
+	c.mu.Unlock()
+}
+
+// Severity is the severity level of a trace event.
+// The canonical enumeration of all valid values is here:
+// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
+type Severity int
+
+const (
+	// CtUnknown indicates unknown severity of a trace event.
+	CtUnknown Severity = iota
+	// CtInfo indicates info level severity of a trace event.
+	CtInfo
+	// CtWarning indicates warning level severity of a trace event.
+	CtWarning
+	// CtError indicates error level severity of a trace event.
+	CtError
+)
+
+// RefChannelType is the type of the entity being referenced in a trace event.
+type RefChannelType int
+
+const (
+	// RefUnknown indicates an unknown entity type, the zero value for this type.
+	RefUnknown RefChannelType = iota
+	// RefChannel indicates the referenced entity is a Channel.
+	RefChannel
+	// RefSubChannel indicates the referenced entity is a SubChannel.
+	RefSubChannel
+	// RefServer indicates the referenced entity is a Server.
+	RefServer
+	// RefListenSocket indicates the referenced entity is a ListenSocket.
+	RefListenSocket
+	// RefNormalSocket indicates the referenced entity is a NormalSocket.
+	RefNormalSocket
+)
+
+var refChannelTypeToString = map[RefChannelType]string{
+	RefUnknown:      "Unknown",
+	RefChannel:      "Channel",
+	RefSubChannel:   "SubChannel",
+	RefServer:       "Server",
+	RefListenSocket: "ListenSocket",
+	RefNormalSocket: "NormalSocket",
+}
+
+// String returns a string representation of the RefChannelType
+func (r RefChannelType) String() string {
+	return refChannelTypeToString[r]
+}
+
+// AddTraceEvent adds trace related to the entity with specified id, using the
+// provided TraceEventDesc.
+//
+// If channelz is not turned ON, this will simply log the event descriptions.
+func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) {
+	// Log only the trace description associated with the bottom most entity.
+	d := fmt.Sprintf("[%s] %s", e, desc.Desc)
+	switch desc.Severity {
+	case CtUnknown, CtInfo:
+		l.InfoDepth(depth+1, d)
+	case CtWarning:
+		l.WarningDepth(depth+1, d)
+	case CtError:
+		l.ErrorDepth(depth+1, d)
+	}
+
+	if getMaxTraceEntry() == 0 {
+		return
+	}
+	if IsOn() {
+		db.traceEvent(e.id(), desc)
+	}
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go
deleted file mode 100644
index 17c2274..0000000
--- a/vendor/google.golang.org/grpc/internal/channelz/types.go
+++ /dev/null
@@ -1,702 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
-	"net"
-	"sync"
-	"sync/atomic"
-	"time"
-
-	"google.golang.org/grpc/connectivity"
-	"google.golang.org/grpc/credentials"
-	"google.golang.org/grpc/grpclog"
-)
-
-// entry represents a node in the channelz database.
-type entry interface {
-	// addChild adds a child e, whose channelz id is id to child list
-	addChild(id int64, e entry)
-	// deleteChild deletes a child with channelz id to be id from child list
-	deleteChild(id int64)
-	// triggerDelete tries to delete self from channelz database. However, if child
-	// list is not empty, then deletion from the database is on hold until the last
-	// child is deleted from database.
-	triggerDelete()
-	// deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
-	// list is now empty. If both conditions are met, then delete self from database.
-	deleteSelfIfReady()
-	// getParentID returns parent ID of the entry. 0 value parent ID means no parent.
-	getParentID() int64
-}
-
-// dummyEntry is a fake entry to handle entry not found case.
-type dummyEntry struct {
-	idNotFound int64
-}
-
-func (d *dummyEntry) addChild(id int64, e entry) {
-	// Note: It is possible for a normal program to reach here under race condition.
-	// For example, there could be a race between ClientConn.Close() info being propagated
-	// to addrConn and http2Client. ClientConn.Close() cancel the context and result
-	// in http2Client to error. The error info is then caught by transport monitor
-	// and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore,
-	// the addrConn will create a new transport. And when registering the new transport in
-	// channelz, its parent addrConn could have already been torn down and deleted
-	// from channelz tracking, and thus reach the code here.
-	grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
-}
-
-func (d *dummyEntry) deleteChild(id int64) {
-	// It is possible for a normal program to reach here under race condition.
-	// Refer to the example described in addChild().
-	grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
-}
-
-func (d *dummyEntry) triggerDelete() {
-	grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
-}
-
-func (*dummyEntry) deleteSelfIfReady() {
-	// code should not reach here. deleteSelfIfReady is always called on an existing entry.
-}
-
-func (*dummyEntry) getParentID() int64 {
-	return 0
-}
-
-// ChannelMetric defines the info channelz provides for a specific Channel, which
-// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
-// child list, etc.
-type ChannelMetric struct {
-	// ID is the channelz id of this channel.
-	ID int64
-	// RefName is the human readable reference string of this channel.
-	RefName string
-	// ChannelData contains channel internal metric reported by the channel through
-	// ChannelzMetric().
-	ChannelData *ChannelInternalMetric
-	// NestedChans tracks the nested channel type children of this channel in the format of
-	// a map from nested channel channelz id to corresponding reference string.
-	NestedChans map[int64]string
-	// SubChans tracks the subchannel type children of this channel in the format of a
-	// map from subchannel channelz id to corresponding reference string.
-	SubChans map[int64]string
-	// Sockets tracks the socket type children of this channel in the format of a map
-	// from socket channelz id to corresponding reference string.
-	// Note current grpc implementation doesn't allow channel having sockets directly,
-	// therefore, this is field is unused.
-	Sockets map[int64]string
-	// Trace contains the most recent traced events.
-	Trace *ChannelTrace
-}
-
-// SubChannelMetric defines the info channelz provides for a specific SubChannel,
-// which includes ChannelInternalMetric and channelz-specific data, such as
-// channelz id, child list, etc.
-type SubChannelMetric struct {
-	// ID is the channelz id of this subchannel.
-	ID int64
-	// RefName is the human readable reference string of this subchannel.
-	RefName string
-	// ChannelData contains subchannel internal metric reported by the subchannel
-	// through ChannelzMetric().
-	ChannelData *ChannelInternalMetric
-	// NestedChans tracks the nested channel type children of this subchannel in the format of
-	// a map from nested channel channelz id to corresponding reference string.
-	// Note current grpc implementation doesn't allow subchannel to have nested channels
-	// as children, therefore, this field is unused.
-	NestedChans map[int64]string
-	// SubChans tracks the subchannel type children of this subchannel in the format of a
-	// map from subchannel channelz id to corresponding reference string.
-	// Note current grpc implementation doesn't allow subchannel to have subchannels
-	// as children, therefore, this field is unused.
-	SubChans map[int64]string
-	// Sockets tracks the socket type children of this subchannel in the format of a map
-	// from socket channelz id to corresponding reference string.
-	Sockets map[int64]string
-	// Trace contains the most recent traced events.
-	Trace *ChannelTrace
-}
-
-// ChannelInternalMetric defines the struct that the implementor of Channel interface
-// should return from ChannelzMetric().
-type ChannelInternalMetric struct {
-	// current connectivity state of the channel.
-	State connectivity.State
-	// The target this channel originally tried to connect to.  May be absent
-	Target string
-	// The number of calls started on the channel.
-	CallsStarted int64
-	// The number of calls that have completed with an OK status.
-	CallsSucceeded int64
-	// The number of calls that have a completed with a non-OK status.
-	CallsFailed int64
-	// The last time a call was started on the channel.
-	LastCallStartedTimestamp time.Time
-}
-
-// ChannelTrace stores traced events on a channel/subchannel and related info.
-type ChannelTrace struct {
-	// EventNum is the number of events that ever got traced (i.e. including those that have been deleted)
-	EventNum int64
-	// CreationTime is the creation time of the trace.
-	CreationTime time.Time
-	// Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the
-	// oldest one)
-	Events []*TraceEvent
-}
-
-// TraceEvent represent a single trace event
-type TraceEvent struct {
-	// Desc is a simple description of the trace event.
-	Desc string
-	// Severity states the severity of this trace event.
-	Severity Severity
-	// Timestamp is the event time.
-	Timestamp time.Time
-	// RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
-	// involved in this event.
-	// e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
-	RefID int64
-	// RefName is the reference name for the entity that gets referenced in the event.
-	RefName string
-	// RefType indicates the referenced entity type, i.e Channel or SubChannel.
-	RefType RefChannelType
-}
-
-// Channel is the interface that should be satisfied in order to be tracked by
-// channelz as Channel or SubChannel.
-type Channel interface {
-	ChannelzMetric() *ChannelInternalMetric
-}
-
-type dummyChannel struct{}
-
-func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric {
-	return &ChannelInternalMetric{}
-}
-
-type channel struct {
-	refName     string
-	c           Channel
-	closeCalled bool
-	nestedChans map[int64]string
-	subChans    map[int64]string
-	id          int64
-	pid         int64
-	cm          *channelMap
-	trace       *channelTrace
-	// traceRefCount is the number of trace events that reference this channel.
-	// Non-zero traceRefCount means the trace of this channel cannot be deleted.
-	traceRefCount int32
-}
-
-func (c *channel) addChild(id int64, e entry) {
-	switch v := e.(type) {
-	case *subChannel:
-		c.subChans[id] = v.refName
-	case *channel:
-		c.nestedChans[id] = v.refName
-	default:
-		grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
-	}
-}
-
-func (c *channel) deleteChild(id int64) {
-	delete(c.subChans, id)
-	delete(c.nestedChans, id)
-	c.deleteSelfIfReady()
-}
-
-func (c *channel) triggerDelete() {
-	c.closeCalled = true
-	c.deleteSelfIfReady()
-}
-
-func (c *channel) getParentID() int64 {
-	return c.pid
-}
-
-// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
-// deleting the channel reference from its parent's child list.
-//
-// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
-// corresponding grpc object has been invoked, and the channel does not have any children left.
-//
-// The returned boolean value indicates whether the channel has been successfully deleted from tree.
-func (c *channel) deleteSelfFromTree() (deleted bool) {
-	if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
-		return false
-	}
-	// not top channel
-	if c.pid != 0 {
-		c.cm.findEntry(c.pid).deleteChild(c.id)
-	}
-	return true
-}
-
-// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
-// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
-// channel, and its memory will be garbage collected.
-//
-// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
-// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
-// the trace of the referenced entity must not be deleted. In order to release the resource allocated
-// by grpc, the reference to the grpc object is reset to a dummy object.
-//
-// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
-//
-// It returns a bool to indicate whether the channel can be safely deleted from map.
-func (c *channel) deleteSelfFromMap() (delete bool) {
-	if c.getTraceRefCount() != 0 {
-		c.c = &dummyChannel{}
-		return false
-	}
-	return true
-}
-
-// deleteSelfIfReady tries to delete the channel itself from the channelz database.
-// The delete process includes two steps:
-// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
-//    parent's child list.
-// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
-//    will return entry not found error.
-func (c *channel) deleteSelfIfReady() {
-	if !c.deleteSelfFromTree() {
-		return
-	}
-	if !c.deleteSelfFromMap() {
-		return
-	}
-	c.cm.deleteEntry(c.id)
-	c.trace.clear()
-}
-
-func (c *channel) getChannelTrace() *channelTrace {
-	return c.trace
-}
-
-func (c *channel) incrTraceRefCount() {
-	atomic.AddInt32(&c.traceRefCount, 1)
-}
-
-func (c *channel) decrTraceRefCount() {
-	atomic.AddInt32(&c.traceRefCount, -1)
-}
-
-func (c *channel) getTraceRefCount() int {
-	i := atomic.LoadInt32(&c.traceRefCount)
-	return int(i)
-}
-
-func (c *channel) getRefName() string {
-	return c.refName
-}
-
-type subChannel struct {
-	refName       string
-	c             Channel
-	closeCalled   bool
-	sockets       map[int64]string
-	id            int64
-	pid           int64
-	cm            *channelMap
-	trace         *channelTrace
-	traceRefCount int32
-}
-
-func (sc *subChannel) addChild(id int64, e entry) {
-	if v, ok := e.(*normalSocket); ok {
-		sc.sockets[id] = v.refName
-	} else {
-		grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
-	}
-}
-
-func (sc *subChannel) deleteChild(id int64) {
-	delete(sc.sockets, id)
-	sc.deleteSelfIfReady()
-}
-
-func (sc *subChannel) triggerDelete() {
-	sc.closeCalled = true
-	sc.deleteSelfIfReady()
-}
-
-func (sc *subChannel) getParentID() int64 {
-	return sc.pid
-}
-
-// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
-// means deleting the subchannel reference from its parent's child list.
-//
-// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
-// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
-//
-// The returned boolean value indicates whether the channel has been successfully deleted from tree.
-func (sc *subChannel) deleteSelfFromTree() (deleted bool) {
-	if !sc.closeCalled || len(sc.sockets) != 0 {
-		return false
-	}
-	sc.cm.findEntry(sc.pid).deleteChild(sc.id)
-	return true
-}
-
-// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
-// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
-// the subchannel, and its memory will be garbage collected.
-//
-// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
-// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
-// the trace of the referenced entity must not be deleted. In order to release the resource allocated
-// by grpc, the reference to the grpc object is reset to a dummy object.
-//
-// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
-//
-// It returns a bool to indicate whether the channel can be safely deleted from map.
-func (sc *subChannel) deleteSelfFromMap() (delete bool) {
-	if sc.getTraceRefCount() != 0 {
-		// free the grpc struct (i.e. addrConn)
-		sc.c = &dummyChannel{}
-		return false
-	}
-	return true
-}
-
-// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
-// The delete process includes two steps:
-// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
-//    its parent's child list.
-// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
-//    by id will return entry not found error.
-func (sc *subChannel) deleteSelfIfReady() {
-	if !sc.deleteSelfFromTree() {
-		return
-	}
-	if !sc.deleteSelfFromMap() {
-		return
-	}
-	sc.cm.deleteEntry(sc.id)
-	sc.trace.clear()
-}
-
-func (sc *subChannel) getChannelTrace() *channelTrace {
-	return sc.trace
-}
-
-func (sc *subChannel) incrTraceRefCount() {
-	atomic.AddInt32(&sc.traceRefCount, 1)
-}
-
-func (sc *subChannel) decrTraceRefCount() {
-	atomic.AddInt32(&sc.traceRefCount, -1)
-}
-
-func (sc *subChannel) getTraceRefCount() int {
-	i := atomic.LoadInt32(&sc.traceRefCount)
-	return int(i)
-}
-
-func (sc *subChannel) getRefName() string {
-	return sc.refName
-}
-
-// SocketMetric defines the info channelz provides for a specific Socket, which
-// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc.
-type SocketMetric struct {
-	// ID is the channelz id of this socket.
-	ID int64
-	// RefName is the human readable reference string of this socket.
-	RefName string
-	// SocketData contains socket internal metric reported by the socket through
-	// ChannelzMetric().
-	SocketData *SocketInternalMetric
-}
-
-// SocketInternalMetric defines the struct that the implementor of Socket interface
-// should return from ChannelzMetric().
-type SocketInternalMetric struct {
-	// The number of streams that have been started.
-	StreamsStarted int64
-	// The number of streams that have ended successfully:
-	// On client side, receiving frame with eos bit set.
-	// On server side, sending frame with eos bit set.
-	StreamsSucceeded int64
-	// The number of streams that have ended unsuccessfully:
-	// On client side, termination without receiving frame with eos bit set.
-	// On server side, termination without sending frame with eos bit set.
-	StreamsFailed int64
-	// The number of messages successfully sent on this socket.
-	MessagesSent     int64
-	MessagesReceived int64
-	// The number of keep alives sent.  This is typically implemented with HTTP/2
-	// ping messages.
-	KeepAlivesSent int64
-	// The last time a stream was created by this endpoint.  Usually unset for
-	// servers.
-	LastLocalStreamCreatedTimestamp time.Time
-	// The last time a stream was created by the remote endpoint.  Usually unset
-	// for clients.
-	LastRemoteStreamCreatedTimestamp time.Time
-	// The last time a message was sent by this endpoint.
-	LastMessageSentTimestamp time.Time
-	// The last time a message was received by this endpoint.
-	LastMessageReceivedTimestamp time.Time
-	// The amount of window, granted to the local endpoint by the remote endpoint.
-	// This may be slightly out of date due to network latency.  This does NOT
-	// include stream level or TCP level flow control info.
-	LocalFlowControlWindow int64
-	// The amount of window, granted to the remote endpoint by the local endpoint.
-	// This may be slightly out of date due to network latency.  This does NOT
-	// include stream level or TCP level flow control info.
-	RemoteFlowControlWindow int64
-	// The locally bound address.
-	LocalAddr net.Addr
-	// The remote bound address.  May be absent.
-	RemoteAddr net.Addr
-	// Optional, represents the name of the remote endpoint, if different than
-	// the original target name.
-	RemoteName    string
-	SocketOptions *SocketOptionData
-	Security      credentials.ChannelzSecurityValue
-}
-
-// Socket is the interface that should be satisfied in order to be tracked by
-// channelz as Socket.
-type Socket interface {
-	ChannelzMetric() *SocketInternalMetric
-}
-
-type listenSocket struct {
-	refName string
-	s       Socket
-	id      int64
-	pid     int64
-	cm      *channelMap
-}
-
-func (ls *listenSocket) addChild(id int64, e entry) {
-	grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
-}
-
-func (ls *listenSocket) deleteChild(id int64) {
-	grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id)
-}
-
-func (ls *listenSocket) triggerDelete() {
-	ls.cm.deleteEntry(ls.id)
-	ls.cm.findEntry(ls.pid).deleteChild(ls.id)
-}
-
-func (ls *listenSocket) deleteSelfIfReady() {
-	grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
-}
-
-func (ls *listenSocket) getParentID() int64 {
-	return ls.pid
-}
-
-type normalSocket struct {
-	refName string
-	s       Socket
-	id      int64
-	pid     int64
-	cm      *channelMap
-}
-
-func (ns *normalSocket) addChild(id int64, e entry) {
-	grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
-}
-
-func (ns *normalSocket) deleteChild(id int64) {
-	grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id)
-}
-
-func (ns *normalSocket) triggerDelete() {
-	ns.cm.deleteEntry(ns.id)
-	ns.cm.findEntry(ns.pid).deleteChild(ns.id)
-}
-
-func (ns *normalSocket) deleteSelfIfReady() {
-	grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
-}
-
-func (ns *normalSocket) getParentID() int64 {
-	return ns.pid
-}
-
-// ServerMetric defines the info channelz provides for a specific Server, which
-// includes ServerInternalMetric and channelz-specific data, such as channelz id,
-// child list, etc.
-type ServerMetric struct {
-	// ID is the channelz id of this server.
-	ID int64
-	// RefName is the human readable reference string of this server.
-	RefName string
-	// ServerData contains server internal metric reported by the server through
-	// ChannelzMetric().
-	ServerData *ServerInternalMetric
-	// ListenSockets tracks the listener socket type children of this server in the
-	// format of a map from socket channelz id to corresponding reference string.
-	ListenSockets map[int64]string
-}
-
-// ServerInternalMetric defines the struct that the implementor of Server interface
-// should return from ChannelzMetric().
-type ServerInternalMetric struct {
-	// The number of incoming calls started on the server.
-	CallsStarted int64
-	// The number of incoming calls that have completed with an OK status.
-	CallsSucceeded int64
-	// The number of incoming calls that have a completed with a non-OK status.
-	CallsFailed int64
-	// The last time a call was started on the server.
-	LastCallStartedTimestamp time.Time
-}
-
-// Server is the interface to be satisfied in order to be tracked by channelz as
-// Server.
-type Server interface {
-	ChannelzMetric() *ServerInternalMetric
-}
-
-type server struct {
-	refName       string
-	s             Server
-	closeCalled   bool
-	sockets       map[int64]string
-	listenSockets map[int64]string
-	id            int64
-	cm            *channelMap
-}
-
-func (s *server) addChild(id int64, e entry) {
-	switch v := e.(type) {
-	case *normalSocket:
-		s.sockets[id] = v.refName
-	case *listenSocket:
-		s.listenSockets[id] = v.refName
-	default:
-		grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
-	}
-}
-
-func (s *server) deleteChild(id int64) {
-	delete(s.sockets, id)
-	delete(s.listenSockets, id)
-	s.deleteSelfIfReady()
-}
-
-func (s *server) triggerDelete() {
-	s.closeCalled = true
-	s.deleteSelfIfReady()
-}
-
-func (s *server) deleteSelfIfReady() {
-	if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
-		return
-	}
-	s.cm.deleteEntry(s.id)
-}
-
-func (s *server) getParentID() int64 {
-	return 0
-}
-
-type tracedChannel interface {
-	getChannelTrace() *channelTrace
-	incrTraceRefCount()
-	decrTraceRefCount()
-	getRefName() string
-}
-
-type channelTrace struct {
-	cm          *channelMap
-	createdTime time.Time
-	eventCount  int64
-	mu          sync.Mutex
-	events      []*TraceEvent
-}
-
-func (c *channelTrace) append(e *TraceEvent) {
-	c.mu.Lock()
-	if len(c.events) == getMaxTraceEntry() {
-		del := c.events[0]
-		c.events = c.events[1:]
-		if del.RefID != 0 {
-			// start recursive cleanup in a goroutine to not block the call originated from grpc.
-			go func() {
-				// need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
-				c.cm.mu.Lock()
-				c.cm.decrTraceRefCount(del.RefID)
-				c.cm.mu.Unlock()
-			}()
-		}
-	}
-	e.Timestamp = time.Now()
-	c.events = append(c.events, e)
-	c.eventCount++
-	c.mu.Unlock()
-}
-
-func (c *channelTrace) clear() {
-	c.mu.Lock()
-	for _, e := range c.events {
-		if e.RefID != 0 {
-			// caller should have already held the c.cm.mu lock.
-			c.cm.decrTraceRefCount(e.RefID)
-		}
-	}
-	c.mu.Unlock()
-}
-
-// Severity is the severity level of a trace event.
-// The canonical enumeration of all valid values is here:
-// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
-type Severity int
-
-const (
-	// CtUNKNOWN indicates unknown severity of a trace event.
-	CtUNKNOWN Severity = iota
-	// CtINFO indicates info level severity of a trace event.
-	CtINFO
-	// CtWarning indicates warning level severity of a trace event.
-	CtWarning
-	// CtError indicates error level severity of a trace event.
-	CtError
-)
-
-// RefChannelType is the type of the entity being referenced in a trace event.
-type RefChannelType int
-
-const (
-	// RefChannel indicates the referenced entity is a Channel.
-	RefChannel RefChannelType = iota
-	// RefSubChannel indicates the referenced entity is a SubChannel.
-	RefSubChannel
-)
-
-func (c *channelTrace) dumpData() *ChannelTrace {
-	c.mu.Lock()
-	ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
-	ct.Events = c.events[:len(c.events)]
-	c.mu.Unlock()
-	return ct
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
deleted file mode 100644
index 692dd61..0000000
--- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// +build !appengine
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
-	"syscall"
-
-	"golang.org/x/sys/unix"
-)
-
-// SocketOptionData defines the struct to hold socket option data, and related
-// getter function to obtain info from fd.
-type SocketOptionData struct {
-	Linger      *unix.Linger
-	RecvTimeout *unix.Timeval
-	SendTimeout *unix.Timeval
-	TCPInfo     *unix.TCPInfo
-}
-
-// Getsockopt defines the function to get socket options requested by channelz.
-// It is to be passed to syscall.RawConn.Control().
-func (s *SocketOptionData) Getsockopt(fd uintptr) {
-	if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil {
-		s.Linger = v
-	}
-	if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil {
-		s.RecvTimeout = v
-	}
-	if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil {
-		s.SendTimeout = v
-	}
-	if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil {
-		s.TCPInfo = v
-	}
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
deleted file mode 100644
index 79edbef..0000000
--- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// +build !linux appengine
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
-	"sync"
-
-	"google.golang.org/grpc/grpclog"
-)
-
-var once sync.Once
-
-// SocketOptionData defines the struct to hold socket option data, and related
-// getter function to obtain info from fd.
-// Windows OS doesn't support Socket Option
-type SocketOptionData struct {
-}
-
-// Getsockopt defines the function to get socket options requested by channelz.
-// It is to be passed to syscall.RawConn.Control().
-// Windows OS doesn't support Socket Option
-func (s *SocketOptionData) Getsockopt(fd uintptr) {
-	once.Do(func() {
-		grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.")
-	})
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
deleted file mode 100644
index fdf409d..0000000
--- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// +build linux,!appengine
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
-	"syscall"
-)
-
-// GetSocketOption gets the socket option info of the conn.
-func GetSocketOption(socket interface{}) *SocketOptionData {
-	c, ok := socket.(syscall.Conn)
-	if !ok {
-		return nil
-	}
-	data := &SocketOptionData{}
-	if rawConn, err := c.SyscallConn(); err == nil {
-		rawConn.Control(data.Getsockopt)
-		return data
-	}
-	return nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
deleted file mode 100644
index 8864a08..0000000
--- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// +build !linux appengine
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-// GetSocketOption gets the socket option info of the conn.
-func GetSocketOption(c interface{}) *SocketOptionData {
-	return nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go
new file mode 100644
index 0000000..48b22d9
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+	"context"
+)
+
+// clientHandshakeInfoKey is a struct used as the key to store
+// ClientHandshakeInfo in a context.
+type clientHandshakeInfoKey struct{}
+
+// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx.
+func ClientHandshakeInfoFromContext(ctx context.Context) any {
+	return ctx.Value(clientHandshakeInfoKey{})
+}
+
+// NewClientHandshakeInfoContext creates a context with chi.
+func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context {
+	return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
+}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go
new file mode 100644
index 0000000..25ade62
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go
@@ -0,0 +1,75 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package credentials defines APIs for parsing SPIFFE ID.
+//
+// All APIs in this package are experimental.
+package credentials
+
+import (
+	"crypto/tls"
+	"crypto/x509"
+	"net/url"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+var logger = grpclog.Component("credentials")
+
+// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format
+// is invalid, return nil with warning.
+func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
+	if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 {
+		return nil
+	}
+	return SPIFFEIDFromCert(state.PeerCertificates[0])
+}
+
+// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE
+// ID format is invalid, return nil with warning.
+func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL {
+	if cert == nil || cert.URIs == nil {
+		return nil
+	}
+	var spiffeID *url.URL
+	for _, uri := range cert.URIs {
+		if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") {
+			continue
+		}
+		// From this point, we assume the uri is intended for a SPIFFE ID.
+		if len(uri.String()) > 2048 {
+			logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes")
+			return nil
+		}
+		if len(uri.Host) == 0 || len(uri.Path) == 0 {
+			logger.Warning("invalid SPIFFE ID: domain or workload ID is empty")
+			return nil
+		}
+		if len(uri.Host) > 255 {
+			logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters")
+			return nil
+		}
+		// A valid SPIFFE certificate can only have exactly one URI SAN field.
+		if len(cert.URIs) > 1 {
+			logger.Warning("invalid SPIFFE ID: multiple URI SANs")
+			return nil
+		}
+		spiffeID = uri
+	}
+	return spiffeID
+}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
new file mode 100644
index 0000000..2919632
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
@@ -0,0 +1,58 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package credentials
+
+import (
+	"net"
+	"syscall"
+)
+
+type sysConn = syscall.Conn
+
+// syscallConn keeps reference of rawConn to support syscall.Conn for channelz.
+// SyscallConn() (the method in interface syscall.Conn) is explicitly
+// implemented on this type,
+//
+// Interface syscall.Conn is implemented by most net.Conn implementations (e.g.
+// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns
+// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn
+// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't
+// help here).
+type syscallConn struct {
+	net.Conn
+	// sysConn is a type alias of syscall.Conn. It's necessary because the name
+	// `Conn` collides with `net.Conn`.
+	sysConn
+}
+
+// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that
+// implements syscall.Conn. rawConn will be used to support syscall, and newConn
+// will be used for read/write.
+//
+// This function returns newConn if rawConn doesn't implement syscall.Conn.
+func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
+	sysConn, ok := rawConn.(syscall.Conn)
+	if !ok {
+		return newConn
+	}
+	return &syscallConn{
+		Conn:    newConn,
+		sysConn: sysConn,
+	}
+}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go
new file mode 100644
index 0000000..f792fd2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/util.go
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package credentials
+
+import (
+	"crypto/tls"
+)
+
+const alpnProtoStrH2 = "h2"
+
+// AppendH2ToNextProtos appends h2 to next protos.
+func AppendH2ToNextProtos(ps []string) []string {
+	for _, p := range ps {
+		if p == alpnProtoStrH2 {
+			return ps
+		}
+	}
+	ret := make([]string, 0, len(ps)+1)
+	ret = append(ret, ps...)
+	return append(ret, alpnProtoStrH2)
+}
+
+// CloneTLSConfig returns a shallow clone of the exported
+// fields of cfg, ignoring the unexported sync.Once, which
+// contains a mutex and must not be copied.
+//
+// If cfg is nil, a new zero tls.Config is returned.
+//
+// TODO: inline this function if possible.
+func CloneTLSConfig(cfg *tls.Config) *tls.Config {
+	if cfg == nil {
+		return &tls.Config{}
+	}
+
+	return cfg.Clone()
+}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index 3ee8740..7e060f5 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -21,15 +21,81 @@
 
 import (
 	"os"
+	"strconv"
 	"strings"
 )
 
-const (
-	prefix   = "GRPC_GO_"
-	retryStr = prefix + "RETRY"
+var (
+	// EnableTXTServiceConfig is set if the DNS resolver should perform TXT
+	// lookups for service config ("GRPC_ENABLE_TXT_SERVICE_CONFIG" is not
+	// "false").
+	EnableTXTServiceConfig = boolFromEnv("GRPC_ENABLE_TXT_SERVICE_CONFIG", true)
+
+	// TXTErrIgnore is set if TXT errors should be ignored
+	// ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
+	TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
+
+	// RingHashCap indicates the maximum ring size which defaults to 4096
+	// entries but may be overridden by setting the environment variable
+	// "GRPC_RING_HASH_CAP".  This does not override the default bounds
+	// checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
+	RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
+
+	// ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS
+	// handshakes that can be performed.
+	ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100)
+
+	// EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled
+	// should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this
+	// option is present for backward compatibility. This option may be overridden
+	// by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true"
+	// or "false".
+	EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true)
+
+	// NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used
+	// instead of the exiting pickfirst implementation. This can be disabled by
+	// setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST"
+	// to "false".
+	NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", true)
+
+	// XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash
+	// key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by
+	// setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". When the
+	// implementation of A76 is stable, we will flip the default value to false
+	// in a subsequent release. A final release will remove this environment
+	// variable, enabling the new behavior unconditionally.
+	XDSEndpointHashKeyBackwardCompat = boolFromEnv("GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT", true)
+
+	// RingHashSetRequestHashKey is set if the ring hash balancer can get the
+	// request hash header by setting the "requestHashHeader" field, according
+	// to gRFC A76. It can be enabled by setting the environment variable
+	// "GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY" to "true".
+	RingHashSetRequestHashKey = boolFromEnv("GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY", false)
+
+	// ALTSHandshakerKeepaliveParams is set if we should add the
+	// KeepaliveParams when dial the ALTS handshaker service.
+	ALTSHandshakerKeepaliveParams = boolFromEnv("GRPC_EXPERIMENTAL_ALTS_HANDSHAKER_KEEPALIVE_PARAMS", false)
 )
 
-var (
-	// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
-	Retry = strings.EqualFold(os.Getenv(retryStr), "on")
-)
+func boolFromEnv(envVar string, def bool) bool {
+	if def {
+		// The default is true; return true unless the variable is "false".
+		return !strings.EqualFold(os.Getenv(envVar), "false")
+	}
+	// The default is false; return false unless the variable is "true".
+	return strings.EqualFold(os.Getenv(envVar), "true")
+}
+
+func uint64FromEnv(envVar string, def, min, max uint64) uint64 {
+	v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64)
+	if err != nil {
+		return def
+	}
+	if v < min {
+		return min
+	}
+	if v > max {
+		return max
+	}
+	return v
+}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go
new file mode 100644
index 0000000..dd314cf
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package envconfig
+
+import "os"
+
+const (
+	envObservabilityConfig     = "GRPC_GCP_OBSERVABILITY_CONFIG"
+	envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE"
+)
+
+var (
+	// ObservabilityConfig is the json configuration for the gcp/observability
+	// package specified directly in the envObservabilityConfig env var.
+	//
+	// This is used in the 1.0 release of gcp/observability, and thus must not be
+	// deleted or changed.
+	ObservabilityConfig = os.Getenv(envObservabilityConfig)
+	// ObservabilityConfigFile is the json configuration for the
+	// gcp/observability specified in a file with the location specified in
+	// envObservabilityConfigFile env var.
+	//
+	// This is used in the 1.0 release of gcp/observability, and thus must not be
+	// deleted or changed.
+	ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile)
+)
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
new file mode 100644
index 0000000..b1f883b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
@@ -0,0 +1,77 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package envconfig
+
+import (
+	"os"
+)
+
+const (
+	// XDSBootstrapFileNameEnv is the env variable to set bootstrap file name.
+	// Do not use this and read from env directly. Its value is read and kept in
+	// variable XDSBootstrapFileName.
+	//
+	// When both bootstrap FileName and FileContent are set, FileName is used.
+	XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP"
+	// XDSBootstrapFileContentEnv is the env variable to set bootstrap file
+	// content. Do not use this and read from env directly. Its value is read
+	// and kept in variable XDSBootstrapFileContent.
+	//
+	// When both bootstrap FileName and FileContent are set, FileName is used.
+	XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
+)
+
+var (
+	// XDSBootstrapFileName holds the name of the file which contains xDS
+	// bootstrap configuration. Users can specify the location of the bootstrap
+	// file by setting the environment variable "GRPC_XDS_BOOTSTRAP".
+	//
+	// When both bootstrap FileName and FileContent are set, FileName is used.
+	XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv)
+	// XDSBootstrapFileContent holds the content of the xDS bootstrap
+	// configuration. Users can specify the bootstrap config by setting the
+	// environment variable "GRPC_XDS_BOOTSTRAP_CONFIG".
+	//
+	// When both bootstrap FileName and FileContent are set, FileName is used.
+	XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv)
+
+	// C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
+	C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
+
+	// XDSDualstackEndpointsEnabled is true if gRPC should read the
+	// "additional addresses" in the xDS endpoint resource.
+	XDSDualstackEndpointsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_DUALSTACK_ENDPOINTS", true)
+
+	// XDSSystemRootCertsEnabled is true when xDS enabled gRPC clients can use
+	// the system's default root certificates for TLS certificate validation.
+	// For more details, see:
+	// https://github.com/grpc/proposal/blob/master/A82-xds-system-root-certs.md.
+	XDSSystemRootCertsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_SYSTEM_ROOT_CERTS", false)
+
+	// XDSSPIFFEEnabled controls if SPIFFE Bundle Maps can be used as roots of
+	// trust.  For more details, see:
+	// https://github.com/grpc/proposal/blob/master/A87-mtls-spiffe-support.md
+	XDSSPIFFEEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_MTLS_SPIFFE", false)
+
+	// XDSHTTPConnectEnabled is true if gRPC should parse custom Metadata
+	// configuring use of an HTTP CONNECT proxy via xDS from cluster resources.
+	// For more details, see:
+	// https://github.com/grpc/proposal/blob/master/A86-xds-http-connect.md
+	XDSHTTPConnectEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_HTTP_CONNECT", false)
+)
diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go
new file mode 100644
index 0000000..7617be2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/experimental.go
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+var (
+	// WithBufferPool is implemented by the grpc package and returns a dial
+	// option to configure a shared buffer pool for a grpc.ClientConn.
+	WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption
+
+	// BufferPool is implemented by the grpc package and returns a server
+	// option to configure a shared buffer pool for a grpc.Server.
+	BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
+)
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
new file mode 100644
index 0000000..092ad18
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
@@ -0,0 +1,79 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package grpclog provides logging functionality for internal gRPC packages,
+// outside of the functionality provided by the external `grpclog` package.
+package grpclog
+
+import (
+	"fmt"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+// PrefixLogger does logging with a prefix.
+//
+// Logging method on a nil logs without any prefix.
+type PrefixLogger struct {
+	logger grpclog.DepthLoggerV2
+	prefix string
+}
+
+// Infof does info logging.
+func (pl *PrefixLogger) Infof(format string, args ...any) {
+	if pl != nil {
+		// Handle nil, so the tests can pass in a nil logger.
+		format = pl.prefix + format
+		pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
+		return
+	}
+	grpclog.InfoDepth(1, fmt.Sprintf(format, args...))
+}
+
+// Warningf does warning logging.
+func (pl *PrefixLogger) Warningf(format string, args ...any) {
+	if pl != nil {
+		format = pl.prefix + format
+		pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
+		return
+	}
+	grpclog.WarningDepth(1, fmt.Sprintf(format, args...))
+}
+
+// Errorf does error logging.
+func (pl *PrefixLogger) Errorf(format string, args ...any) {
+	if pl != nil {
+		format = pl.prefix + format
+		pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
+		return
+	}
+	grpclog.ErrorDepth(1, fmt.Sprintf(format, args...))
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func (pl *PrefixLogger) V(l int) bool {
+	if pl != nil {
+		return pl.logger.V(l)
+	}
+	return true
+}
+
+// NewPrefixLogger creates a prefix logger with the given prefix.
+func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger {
+	return &PrefixLogger{logger: logger, prefix: prefix}
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
deleted file mode 100644
index 200b115..0000000
--- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package grpcrand implements math/rand functions in a concurrent-safe way
-// with a global random source, independent of math/rand's global source.
-package grpcrand
-
-import (
-	"math/rand"
-	"sync"
-	"time"
-)
-
-var (
-	r  = rand.New(rand.NewSource(time.Now().UnixNano()))
-	mu sync.Mutex
-)
-
-// Int63n implements rand.Int63n on the grpcrand global source.
-func Int63n(n int64) int64 {
-	mu.Lock()
-	res := r.Int63n(n)
-	mu.Unlock()
-	return res
-}
-
-// Intn implements rand.Intn on the grpcrand global source.
-func Intn(n int) int {
-	mu.Lock()
-	res := r.Intn(n)
-	mu.Unlock()
-	return res
-}
-
-// Float64 implements rand.Float64 on the grpcrand global source.
-func Float64() float64 {
-	mu.Lock()
-	res := r.Float64()
-	mu.Unlock()
-	return res
-}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
new file mode 100644
index 0000000..9b6d8a1
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
@@ -0,0 +1,98 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcsync
+
+import (
+	"context"
+
+	"google.golang.org/grpc/internal/buffer"
+)
+
+// CallbackSerializer provides a mechanism to schedule callbacks in a
+// synchronized manner. It provides a FIFO guarantee on the order of execution
+// of scheduled callbacks. New callbacks can be scheduled by invoking the
+// Schedule() method.
+//
+// This type is safe for concurrent access.
+type CallbackSerializer struct {
+	// done is closed once the serializer is shut down completely, i.e all
+	// scheduled callbacks are executed and the serializer has deallocated all
+	// its resources.
+	done chan struct{}
+
+	callbacks *buffer.Unbounded
+}
+
+// NewCallbackSerializer returns a new CallbackSerializer instance. The provided
+// context will be passed to the scheduled callbacks. Users should cancel the
+// provided context to shutdown the CallbackSerializer. It is guaranteed that no
+// callbacks will be added once this context is canceled, and any pending un-run
+// callbacks will be executed before the serializer is shut down.
+func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
+	cs := &CallbackSerializer{
+		done:      make(chan struct{}),
+		callbacks: buffer.NewUnbounded(),
+	}
+	go cs.run(ctx)
+	return cs
+}
+
+// TrySchedule tries to schedule the provided callback function f to be
+// executed in the order it was added. This is a best-effort operation. If the
+// context passed to NewCallbackSerializer was canceled before this method is
+// called, the callback will not be scheduled.
+//
+// Callbacks are expected to honor the context when performing any blocking
+// operations, and should return early when the context is canceled.
+func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) {
+	cs.callbacks.Put(f)
+}
+
+// ScheduleOr schedules the provided callback function f to be executed in the
+// order it was added. If the context passed to NewCallbackSerializer has been
+// canceled before this method is called, the onFailure callback will be
+// executed inline instead.
+//
+// Callbacks are expected to honor the context when performing any blocking
+// operations, and should return early when the context is canceled.
+func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) {
+	if cs.callbacks.Put(f) != nil {
+		onFailure()
+	}
+}
+
+func (cs *CallbackSerializer) run(ctx context.Context) {
+	defer close(cs.done)
+
+	// Close the buffer when the context is canceled
+	// to prevent new callbacks from being added.
+	context.AfterFunc(ctx, cs.callbacks.Close)
+
+	// Run all callbacks.
+	for cb := range cs.callbacks.Get() {
+		cs.callbacks.Load()
+		cb.(func(context.Context))(ctx)
+	}
+}
+
+// Done returns a channel that is closed after the context passed to
+// NewCallbackSerializer is canceled and all callbacks have been executed.
+func (cs *CallbackSerializer) Done() <-chan struct{} {
+	return cs.done
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go
index fbe697c..d788c24 100644
--- a/vendor/google.golang.org/grpc/internal/grpcsync/event.go
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go
@@ -21,28 +21,25 @@
 package grpcsync
 
 import (
-	"sync"
 	"sync/atomic"
 )
 
 // Event represents a one-time event that may occur in the future.
 type Event struct {
-	fired int32
+	fired atomic.Bool
 	c     chan struct{}
-	o     sync.Once
 }
 
 // Fire causes e to complete.  It is safe to call multiple times, and
 // concurrently.  It returns true iff this call to Fire caused the signaling
-// channel returned by Done to close.
+// channel returned by Done to close. If Fire returns false, it is possible
+// the Done channel has not been closed yet.
 func (e *Event) Fire() bool {
-	ret := false
-	e.o.Do(func() {
-		atomic.StoreInt32(&e.fired, 1)
+	if e.fired.CompareAndSwap(false, true) {
 		close(e.c)
-		ret = true
-	})
-	return ret
+		return true
+	}
+	return false
 }
 
 // Done returns a channel that will be closed when Fire is called.
@@ -52,7 +49,7 @@
 
 // HasFired returns true if Fire has been called.
 func (e *Event) HasFired() bool {
-	return atomic.LoadInt32(&e.fired) == 1
+	return e.fired.Load()
 }
 
 // NewEvent returns a new, ready-to-use Event.
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
new file mode 100644
index 0000000..6d8c2f5
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
@@ -0,0 +1,121 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcsync
+
+import (
+	"context"
+	"sync"
+)
+
+// Subscriber represents an entity that is subscribed to messages published on
+// a PubSub. It wraps the callback to be invoked by the PubSub when a new
+// message is published.
+type Subscriber interface {
+	// OnMessage is invoked when a new message is published. Implementations
+	// must not block in this method.
+	OnMessage(msg any)
+}
+
+// PubSub is a simple one-to-many publish-subscribe system that supports
+// messages of arbitrary type. It guarantees that messages are delivered in
+// the same order in which they were published.
+//
+// Publisher invokes the Publish() method to publish new messages, while
+// subscribers interested in receiving these messages register a callback
+// via the Subscribe() method.
+//
+// Once a PubSub is stopped, no more messages can be published, but any pending
+// published messages will be delivered to the subscribers.  Done may be used
+// to determine when all published messages have been delivered.
+type PubSub struct {
+	cs *CallbackSerializer
+
+	// Access to the below fields are guarded by this mutex.
+	mu          sync.Mutex
+	msg         any
+	subscribers map[Subscriber]bool
+}
+
+// NewPubSub returns a new PubSub instance.  Users should cancel the
+// provided context to shutdown the PubSub.
+func NewPubSub(ctx context.Context) *PubSub {
+	return &PubSub{
+		cs:          NewCallbackSerializer(ctx),
+		subscribers: map[Subscriber]bool{},
+	}
+}
+
+// Subscribe registers the provided Subscriber to the PubSub.
+//
+// If the PubSub contains a previously published message, the Subscriber's
+// OnMessage() callback will be invoked asynchronously with the existing
+// message to begin with, and subsequently for every newly published message.
+//
+// The caller is responsible for invoking the returned cancel function to
+// unsubscribe itself from the PubSub.
+func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) {
+	ps.mu.Lock()
+	defer ps.mu.Unlock()
+
+	ps.subscribers[sub] = true
+
+	if ps.msg != nil {
+		msg := ps.msg
+		ps.cs.TrySchedule(func(context.Context) {
+			ps.mu.Lock()
+			defer ps.mu.Unlock()
+			if !ps.subscribers[sub] {
+				return
+			}
+			sub.OnMessage(msg)
+		})
+	}
+
+	return func() {
+		ps.mu.Lock()
+		defer ps.mu.Unlock()
+		delete(ps.subscribers, sub)
+	}
+}
+
+// Publish publishes the provided message to the PubSub, and invokes
+// callbacks registered by subscribers asynchronously.
+func (ps *PubSub) Publish(msg any) {
+	ps.mu.Lock()
+	defer ps.mu.Unlock()
+
+	ps.msg = msg
+	for sub := range ps.subscribers {
+		s := sub
+		ps.cs.TrySchedule(func(context.Context) {
+			ps.mu.Lock()
+			defer ps.mu.Unlock()
+			if !ps.subscribers[s] {
+				return
+			}
+			s.OnMessage(msg)
+		})
+	}
+}
+
+// Done returns a channel that is closed after the context passed to NewPubSub
+// is canceled and all updates have been sent to subscribers.
+func (ps *PubSub) Done() <-chan struct{} {
+	return ps.cs.Done()
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
new file mode 100644
index 0000000..e8d8669
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+	"strings"
+)
+
+// RegisteredCompressorNames holds names of the registered compressors.
+var RegisteredCompressorNames []string
+
+// IsCompressorNameRegistered returns true when name is available in registry.
+func IsCompressorNameRegistered(name string) bool {
+	for _, compressor := range RegisteredCompressorNames {
+		if compressor == name {
+			return true
+		}
+	}
+	return false
+}
+
+// RegisteredCompressors returns a string of registered compressor names
+// separated by comma.
+func RegisteredCompressors() string {
+	return strings.Join(RegisteredCompressorNames, ",")
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
new file mode 100644
index 0000000..b25b0ba
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
@@ -0,0 +1,63 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+	"strconv"
+	"time"
+)
+
+const maxTimeoutValue int64 = 100000000 - 1
+
+// div does integer division and round-up the result. Note that this is
+// equivalent to (d+r-1)/r but has less chance to overflow.
+func div(d, r time.Duration) int64 {
+	if d%r > 0 {
+		return int64(d/r + 1)
+	}
+	return int64(d / r)
+}
+
+// EncodeDuration encodes the duration to the format grpc-timeout header
+// accepts.
+//
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
+func EncodeDuration(t time.Duration) string {
+	// TODO: This is simplistic and not bandwidth efficient. Improve it.
+	if t <= 0 {
+		return "0n"
+	}
+	if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "n"
+	}
+	if d := div(t, time.Microsecond); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "u"
+	}
+	if d := div(t, time.Millisecond); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "m"
+	}
+	if d := div(t, time.Second); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "S"
+	}
+	if d := div(t, time.Minute); d <= maxTimeoutValue {
+		return strconv.FormatInt(d, 10) + "M"
+	}
+	// Note that maxTimeoutValue * time.Hour > MaxInt64.
+	return strconv.FormatInt(div(t, time.Hour), 10) + "H"
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
new file mode 100644
index 0000000..e2f948e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
@@ -0,0 +1,20 @@
+/*
+ *
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package grpcutil provides utility functions used across the gRPC codebase.
+package grpcutil
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
new file mode 100644
index 0000000..6f22bd8
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
@@ -0,0 +1,40 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+	"context"
+
+	"google.golang.org/grpc/metadata"
+)
+
+type mdExtraKey struct{}
+
+// WithExtraMetadata creates a new context with incoming md attached.
+func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context {
+	return context.WithValue(ctx, mdExtraKey{}, md)
+}
+
+// ExtraMetadata returns the incoming metadata in ctx if it exists.  The
+// returned MD should not be modified. Writing to it may cause races.
+// Modification should be made to copies of the returned MD.
+func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) {
+	md, ok = ctx.Value(mdExtraKey{}).(metadata.MD)
+	return
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
new file mode 100644
index 0000000..683d195
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
@@ -0,0 +1,88 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+	"errors"
+	"strings"
+)
+
+// ParseMethod splits service and method from the input. It expects format
+// "/service/method".
+func ParseMethod(methodName string) (service, method string, _ error) {
+	if !strings.HasPrefix(methodName, "/") {
+		return "", "", errors.New("invalid method name: should start with /")
+	}
+	methodName = methodName[1:]
+
+	pos := strings.LastIndex(methodName, "/")
+	if pos < 0 {
+		return "", "", errors.New("invalid method name: suffix /method is missing")
+	}
+	return methodName[:pos], methodName[pos+1:], nil
+}
+
+// baseContentType is the base content-type for gRPC.  This is a valid
+// content-type on its own, but can also include a content-subtype such as
+// "proto" as a suffix after "+" or ";".  See
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
+// for more details.
+const baseContentType = "application/grpc"
+
+// ContentSubtype returns the content-subtype for the given content-type.  The
+// given content-type must be a valid content-type that starts with
+// "application/grpc". A content-subtype will follow "application/grpc" after a
+// "+" or ";". See
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// If contentType is not a valid content-type for gRPC, the boolean
+// will be false, otherwise true. If content-type == "application/grpc",
+// "application/grpc+", or "application/grpc;", the boolean will be true,
+// but no content-subtype will be returned.
+//
+// contentType is assumed to be lowercase already.
+func ContentSubtype(contentType string) (string, bool) {
+	if contentType == baseContentType {
+		return "", true
+	}
+	if !strings.HasPrefix(contentType, baseContentType) {
+		return "", false
+	}
+	// guaranteed since != baseContentType and has baseContentType prefix
+	switch contentType[len(baseContentType)] {
+	case '+', ';':
+		// this will return true for "application/grpc+" or "application/grpc;"
+		// which the previous validContentType function tested to be valid, so we
+		// just say that no content-subtype is specified in this case
+		return contentType[len(baseContentType)+1:], true
+	default:
+		return "", false
+	}
+}
+
+// ContentType builds full content type with the given sub-type.
+//
+// contentSubtype is assumed to be lowercase
+func ContentType(contentSubtype string) string {
+	if contentSubtype == "" {
+		return baseContentType
+	}
+	return baseContentType + "+" + contentSubtype
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go
new file mode 100644
index 0000000..7a092b2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go
@@ -0,0 +1,31 @@
+/*
+ *
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import "regexp"
+
+// FullMatchWithRegex returns whether the full text matches the regex provided.
+func FullMatchWithRegex(re *regexp.Regexp, text string) bool {
+	if len(text) == 0 {
+		return re.MatchString(text)
+	}
+	re.Longest()
+	rem := re.FindString(text)
+	return len(rem) == len(text)
+}
diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go
new file mode 100644
index 0000000..2c13ee9
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/idle/idle.go
@@ -0,0 +1,280 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package idle contains a component for managing idleness (entering and exiting)
+// based on RPC activity.
+package idle
+
+import (
+	"fmt"
+	"math"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// For overriding in unit tests.
+var timeAfterFunc = func(d time.Duration, f func()) *time.Timer {
+	return time.AfterFunc(d, f)
+}
+
+// Enforcer is the functionality provided by grpc.ClientConn to enter
+// and exit from idle mode.
+type Enforcer interface {
+	ExitIdleMode() error
+	EnterIdleMode()
+}
+
+// Manager implements idleness detection and calls the configured Enforcer to
+// enter/exit idle mode when appropriate.  Must be created by NewManager.
+type Manager struct {
+	// State accessed atomically.
+	lastCallEndTime           int64 // Unix timestamp in nanos; time when the most recent RPC completed.
+	activeCallsCount          int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there.
+	activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback.
+	closed                    int32 // Boolean; True when the manager is closed.
+
+	// Can be accessed without atomics or mutex since these are set at creation
+	// time and read-only after that.
+	enforcer Enforcer // Functionality provided by grpc.ClientConn.
+	timeout  time.Duration
+
+	// idleMu is used to guarantee mutual exclusion in two scenarios:
+	// - Opposing intentions:
+	//   - a: Idle timeout has fired and handleIdleTimeout() is trying to put
+	//     the channel in idle mode because the channel has been inactive.
+	//   - b: At the same time an RPC is made on the channel, and OnCallBegin()
+	//     is trying to prevent the channel from going idle.
+	// - Competing intentions:
+	//   - The channel is in idle mode and there are multiple RPCs starting at
+	//     the same time, all trying to move the channel out of idle. Only one
+	//     of them should succeed in doing so, while the other RPCs should
+	//     piggyback on the first one and be successfully handled.
+	idleMu       sync.RWMutex
+	actuallyIdle bool
+	timer        *time.Timer
+}
+
+// NewManager creates a new idleness manager implementation for the
+// given idle timeout.  It begins in idle mode.
+func NewManager(enforcer Enforcer, timeout time.Duration) *Manager {
+	return &Manager{
+		enforcer:         enforcer,
+		timeout:          timeout,
+		actuallyIdle:     true,
+		activeCallsCount: -math.MaxInt32,
+	}
+}
+
+// resetIdleTimerLocked resets the idle timer to the given duration.  Called
+// when exiting idle mode or when the timer fires and we need to reset it.
+func (m *Manager) resetIdleTimerLocked(d time.Duration) {
+	if m.isClosed() || m.timeout == 0 || m.actuallyIdle {
+		return
+	}
+
+	// It is safe to ignore the return value from Reset() because this method is
+	// only ever called from the timer callback or when exiting idle mode.
+	if m.timer != nil {
+		m.timer.Stop()
+	}
+	m.timer = timeAfterFunc(d, m.handleIdleTimeout)
+}
+
+func (m *Manager) resetIdleTimer(d time.Duration) {
+	m.idleMu.Lock()
+	defer m.idleMu.Unlock()
+	m.resetIdleTimerLocked(d)
+}
+
+// handleIdleTimeout is the timer callback that is invoked upon expiry of the
+// configured idle timeout. The channel is considered inactive if there are no
+// ongoing calls and no RPC activity since the last time the timer fired.
+func (m *Manager) handleIdleTimeout() {
+	if m.isClosed() {
+		return
+	}
+
+	if atomic.LoadInt32(&m.activeCallsCount) > 0 {
+		m.resetIdleTimer(m.timeout)
+		return
+	}
+
+	// There has been activity on the channel since we last got here. Reset the
+	// timer and return.
+	if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
+		// Set the timer to fire after a duration of idle timeout, calculated
+		// from the time the most recent RPC completed.
+		atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0)
+		m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout)
+		return
+	}
+
+	// Now that we've checked that there has been no activity, attempt to enter
+	// idle mode, which is very likely to succeed.
+	if m.tryEnterIdleMode() {
+		// Successfully entered idle mode. No timer needed until we exit idle.
+		return
+	}
+
+	// Failed to enter idle mode due to a concurrent RPC that kept the channel
+	// active, or because of an error from the channel. Undo the attempt to
+	// enter idle, and reset the timer to try again later.
+	m.resetIdleTimer(m.timeout)
+}
+
+// tryEnterIdleMode instructs the channel to enter idle mode. But before
+// that, it performs a last minute check to ensure that no new RPC has come in,
+// making the channel active.
+//
+// Return value indicates whether or not the channel moved to idle mode.
+//
+// Holds idleMu which ensures mutual exclusion with exitIdleMode.
+func (m *Manager) tryEnterIdleMode() bool {
+	// Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin()
+	// that the channel is either in idle mode or is trying to get there.
+	if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) {
+		// This CAS operation can fail if an RPC started after we checked for
+		// activity in the timer handler, or one was ongoing from before the
+		// last time the timer fired, or if a test is attempting to enter idle
+		// mode without checking.  In all cases, abort going into idle mode.
+		return false
+	}
+	// N.B. if we fail to enter idle mode after this, we must re-add
+	// math.MaxInt32 to m.activeCallsCount.
+
+	m.idleMu.Lock()
+	defer m.idleMu.Unlock()
+
+	if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 {
+		// We raced and lost to a new RPC. Very rare, but stop entering idle.
+		atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
+		return false
+	}
+	if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
+		// A very short RPC could have come in (and also finished) after we
+		// checked for calls count and activity in handleIdleTimeout(), but
+		// before the CAS operation. So, we need to check for activity again.
+		atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
+		return false
+	}
+
+	// No new RPCs have come in since we set the active calls count value to
+	// -math.MaxInt32. And since we have the lock, it is safe to enter idle mode
+	// unconditionally now.
+	m.enforcer.EnterIdleMode()
+	m.actuallyIdle = true
+	return true
+}
+
+// EnterIdleModeForTesting instructs the channel to enter idle mode.
+func (m *Manager) EnterIdleModeForTesting() {
+	m.tryEnterIdleMode()
+}
+
+// OnCallBegin is invoked at the start of every RPC.
+func (m *Manager) OnCallBegin() error {
+	if m.isClosed() {
+		return nil
+	}
+
+	if atomic.AddInt32(&m.activeCallsCount, 1) > 0 {
+		// Channel is not idle now. Set the activity bit and allow the call.
+		atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1)
+		return nil
+	}
+
+	// Channel is either in idle mode or is in the process of moving to idle
+	// mode. Attempt to exit idle mode to allow this RPC.
+	if err := m.ExitIdleMode(); err != nil {
+		// Undo the increment to calls count, and return an error causing the
+		// RPC to fail.
+		atomic.AddInt32(&m.activeCallsCount, -1)
+		return err
+	}
+
+	atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1)
+	return nil
+}
+
+// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's
+// internal state.
+func (m *Manager) ExitIdleMode() error {
+	// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
+	m.idleMu.Lock()
+	defer m.idleMu.Unlock()
+
+	if m.isClosed() || !m.actuallyIdle {
+		// This can happen in three scenarios:
+		// - handleIdleTimeout() set the calls count to -math.MaxInt32 and called
+		//   tryEnterIdleMode(). But before the latter could grab the lock, an RPC
+		//   came in and OnCallBegin() noticed that the calls count is negative.
+		// - Channel is in idle mode, and multiple new RPCs come in at the same
+		//   time, all of them notice a negative calls count in OnCallBegin and get
+		//   here. The first one to get the lock would get the channel to exit idle.
+		// - Channel is not in idle mode, and the user calls Connect which calls
+		//   m.ExitIdleMode.
+		//
+		// In any case, there is nothing to do here.
+		return nil
+	}
+
+	if err := m.enforcer.ExitIdleMode(); err != nil {
+		return fmt.Errorf("failed to exit idle mode: %w", err)
+	}
+
+	// Undo the idle entry process. This also respects any new RPC attempts.
+	atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
+	m.actuallyIdle = false
+
+	// Start a new timer to fire after the configured idle timeout.
+	m.resetIdleTimerLocked(m.timeout)
+	return nil
+}
+
+// OnCallEnd is invoked at the end of every RPC.
+func (m *Manager) OnCallEnd() {
+	if m.isClosed() {
+		return
+	}
+
+	// Record the time at which the most recent call finished.
+	atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano())
+
+	// Decrement the active calls count. This count can temporarily go negative
+	// when the timer callback is in the process of moving the channel to idle
+	// mode, but one or more RPCs come in and complete before the timer callback
+	// can get done with the process of moving to idle mode.
+	atomic.AddInt32(&m.activeCallsCount, -1)
+}
+
+func (m *Manager) isClosed() bool {
+	return atomic.LoadInt32(&m.closed) == 1
+}
+
+// Close stops the timer associated with the Manager, if it exists.
+func (m *Manager) Close() {
+	atomic.StoreInt32(&m.closed, 1)
+
+	m.idleMu.Lock()
+	if m.timer != nil {
+		m.timer.Stop()
+		m.timer = nil
+	}
+	m.idleMu.Unlock()
+}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index b96b359..2699223 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -25,42 +25,237 @@
 	"time"
 
 	"google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/serviceconfig"
 )
 
 var (
-	// WithResolverBuilder is set by dialoptions.go
-	WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption
-	// WithHealthCheckFunc is set by dialoptions.go
-	WithHealthCheckFunc interface{} // func (HealthChecker) DialOption
 	// HealthCheckFunc is used to provide client-side LB channel health checking
 	HealthCheckFunc HealthChecker
+	// RegisterClientHealthCheckListener is used to provide a listener for
+	// updates from the client-side health checking service. It returns a
+	// function that can be called to stop the health producer.
+	RegisterClientHealthCheckListener any // func(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) func()
 	// BalancerUnregister is exported by package balancer to unregister a balancer.
 	BalancerUnregister func(name string)
 	// KeepaliveMinPingTime is the minimum ping interval.  This must be 10s by
 	// default, but tests may wish to set it lower for convenience.
 	KeepaliveMinPingTime = 10 * time.Second
-	// StatusRawProto is exported by status/status.go. This func returns a
-	// pointer to the wrapped Status proto for a given status.Status without a
-	// call to proto.Clone(). The returned Status proto should not be mutated by
-	// the caller.
-	StatusRawProto interface{} // func (*status.Status) *spb.Status
-	// NewRequestInfoContext creates a new context based on the argument context attaching
-	// the passed in RequestInfo to the new context.
-	NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context
-	// ParseServiceConfigForTesting is for creating a fake
-	// ClientConn for resolver testing only
-	ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult
+	// KeepaliveMinServerPingTime is the minimum ping interval for servers.
+	// This must be 1s by default, but tests may wish to set it lower for
+	// convenience.
+	KeepaliveMinServerPingTime = time.Second
+	// ParseServiceConfig parses a JSON representation of the service config.
+	ParseServiceConfig any // func(string) *serviceconfig.ParseResult
+	// EqualServiceConfigForTesting is for testing service config generation and
+	// parsing. Both a and b should be returned by ParseServiceConfig.
+	// This function compares the config without rawJSON stripped, in case the
+	// there's difference in white space.
+	EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool
+	// GetCertificateProviderBuilder returns the registered builder for the
+	// given name. This is set by package certprovider for use from xDS
+	// bootstrap code while parsing certificate provider configs in the
+	// bootstrap file.
+	GetCertificateProviderBuilder any // func(string) certprovider.Builder
+	// GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
+	// stored in the passed in attributes. This is set by
+	// credentials/xds/xds.go.
+	GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer
+	// GetServerCredentials returns the transport credentials configured on a
+	// gRPC server. An xDS-enabled server needs to know what type of credentials
+	// is configured on the underlying gRPC server. This is set by server.go.
+	GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials
+	// MetricsRecorderForServer returns the MetricsRecorderList derived from a
+	// server's stats handlers.
+	MetricsRecorderForServer any // func (*grpc.Server) estats.MetricsRecorder
+	// CanonicalString returns the canonical string of the code defined here:
+	// https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.
+	//
+	// This is used in the 1.0 release of gcp/observability, and thus must not be
+	// deleted or changed.
+	CanonicalString any // func (codes.Code) string
+	// IsRegisteredMethod returns whether the passed in method is registered as
+	// a method on the server.
+	IsRegisteredMethod any // func(*grpc.Server, string) bool
+	// ServerFromContext returns the server from the context.
+	ServerFromContext any // func(context.Context) *grpc.Server
+	// AddGlobalServerOptions adds an array of ServerOption that will be
+	// effective globally for newly created servers. The priority will be: 1.
+	// user-provided; 2. this method; 3. default values.
+	//
+	// This is used in the 1.0 release of gcp/observability, and thus must not be
+	// deleted or changed.
+	AddGlobalServerOptions any // func(opt ...ServerOption)
+	// ClearGlobalServerOptions clears the array of extra ServerOption. This
+	// method is useful in testing and benchmarking.
+	//
+	// This is used in the 1.0 release of gcp/observability, and thus must not be
+	// deleted or changed.
+	ClearGlobalServerOptions func()
+	// AddGlobalDialOptions adds an array of DialOption that will be effective
+	// globally for newly created client channels. The priority will be: 1.
+	// user-provided; 2. this method; 3. default values.
+	//
+	// This is used in the 1.0 release of gcp/observability, and thus must not be
+	// deleted or changed.
+	AddGlobalDialOptions any // func(opt ...DialOption)
+	// DisableGlobalDialOptions returns a DialOption that prevents the
+	// ClientConn from applying the global DialOptions (set via
+	// AddGlobalDialOptions).
+	//
+	// This is used in the 1.0 release of gcp/observability, and thus must not be
+	// deleted or changed.
+	DisableGlobalDialOptions any // func() grpc.DialOption
+	// ClearGlobalDialOptions clears the array of extra DialOption. This
+	// method is useful in testing and benchmarking.
+	//
+	// This is used in the 1.0 release of gcp/observability, and thus must not be
+	// deleted or changed.
+	ClearGlobalDialOptions func()
+
+	// AddGlobalPerTargetDialOptions adds a PerTargetDialOption that will be
+	// configured for newly created ClientConns.
+	AddGlobalPerTargetDialOptions any // func (opt any)
+	// ClearGlobalPerTargetDialOptions clears the slice of global late apply
+	// dial options.
+	ClearGlobalPerTargetDialOptions func()
+
+	// JoinDialOptions combines the dial options passed as arguments into a
+	// single dial option.
+	JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption
+	// JoinServerOptions combines the server options passed as arguments into a
+	// single server option.
+	JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption
+
+	// WithBinaryLogger returns a DialOption that specifies the binary logger
+	// for a ClientConn.
+	//
+	// This is used in the 1.0 release of gcp/observability, and thus must not be
+	// deleted or changed.
+	WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption
+	// BinaryLogger returns a ServerOption that can set the binary logger for a
+	// server.
+	//
+	// This is used in the 1.0 release of gcp/observability, and thus must not be
+	// deleted or changed.
+	BinaryLogger any // func(binarylog.Logger) grpc.ServerOption
+
+	// SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a
+	// provided grpc.ClientConn.
+	SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber)
+
+	// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
+	// the provided xds bootstrap config instead of the global configuration from
+	// the supported environment variables.  The resolver.Builder is meant to be
+	// used in conjunction with the grpc.WithResolvers DialOption.
+	//
+	// Testing Only
+	//
+	// This function should ONLY be used for testing and may not work with some
+	// other features, including the CSDS service.
+	NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error)
+
+	// NewXDSResolverWithPoolForTesting creates a new xDS resolver builder
+	// using the provided xDS pool instead of creating a new one using the
+	// bootstrap configuration specified by the supported environment variables.
+	// The resolver.Builder is meant to be used in conjunction with the
+	// grpc.WithResolvers DialOption. The resolver.Builder does not take
+	// ownership of the provided xDS client and it is the responsibility of the
+	// caller to close the client when no longer required.
+	//
+	// Testing Only
+	//
+	// This function should ONLY be used for testing and may not work with some
+	// other features, including the CSDS service.
+	NewXDSResolverWithPoolForTesting any // func(*xdsclient.Pool) (resolver.Builder, error)
+
+	// NewXDSResolverWithClientForTesting creates a new xDS resolver builder
+	// using the provided xDS client instead of creating a new one using the
+	// bootstrap configuration specified by the supported environment variables.
+	// The resolver.Builder is meant to be used in conjunction with the
+	// grpc.WithResolvers DialOption. The resolver.Builder does not take
+	// ownership of the provided xDS client and it is the responsibility of the
+	// caller to close the client when no longer required.
+	//
+	// Testing Only
+	//
+	// This function should ONLY be used for testing and may not work with some
+	// other features, including the CSDS service.
+	NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error)
+
+	// ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY.
+	ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions)
+
+	// GRPCResolverSchemeExtraMetadata determines when gRPC will add extra
+	// metadata to RPCs.
+	GRPCResolverSchemeExtraMetadata = "xds"
+
+	// EnterIdleModeForTesting gets the ClientConn to enter IDLE mode.
+	EnterIdleModeForTesting any // func(*grpc.ClientConn)
+
+	// ExitIdleModeForTesting gets the ClientConn to exit IDLE mode.
+	ExitIdleModeForTesting any // func(*grpc.ClientConn) error
+
+	// ChannelzTurnOffForTesting disables the Channelz service for testing
+	// purposes.
+	ChannelzTurnOffForTesting func()
+
+	// TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to
+	// invoke resource-not-found error for the given resource type and name.
+	TriggerXDSResourceNotFoundForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error
+
+	// FromOutgoingContextRaw returns the un-merged, intermediary contents of
+	// metadata.rawMD.
+	FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool)
+
+	// UserSetDefaultScheme is set to true if the user has overridden the
+	// default resolver scheme.
+	UserSetDefaultScheme = false
+
+	// ConnectedAddress returns the connected address for a SubConnState. The
+	// address is only valid if the state is READY.
+	ConnectedAddress any // func (scs SubConnState) resolver.Address
+
+	// SetConnectedAddress sets the connected address for a SubConnState.
+	SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address)
+
+	// SnapshotMetricRegistryForTesting snapshots the global data of the metric
+	// registry. Returns a cleanup function that sets the metric registry to its
+	// original state. Only called in testing functions.
+	SnapshotMetricRegistryForTesting func() func()
+
+	// SetDefaultBufferPoolForTesting updates the default buffer pool, for
+	// testing purposes.
+	SetDefaultBufferPoolForTesting any // func(mem.BufferPool)
+
+	// SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for
+	// testing purposes.
+	SetBufferPoolingThresholdForTesting any // func(int)
+
+	// TimeAfterFunc is used to create timers. During tests the function is
+	// replaced to track allocated timers and fail the test if a timer isn't
+	// cancelled.
+	TimeAfterFunc = func(d time.Duration, f func()) Timer {
+		return time.AfterFunc(d, f)
+	}
+
+	// NewStreamWaitingForResolver is a test hook that is triggered when a
+	// new stream blocks while waiting for name resolution. This can be
+	// used in tests to synchronize resolver updates and avoid race conditions.
+	// When set, the function will be called before the stream enters
+	// the blocking state.
+	NewStreamWaitingForResolver = func() {}
 )
 
-// HealthChecker defines the signature of the client-side LB channel health checking function.
+// HealthChecker defines the signature of the client-side LB channel health
+// checking function.
 //
 // The implementation is expected to create a health checking RPC stream by
 // calling newStream(), watch for the health status of serviceName, and report
-// it's health back by calling setConnectivityState().
+// its health back by calling setConnectivityState().
 //
 // The health checking protocol is defined at:
 // https://github.com/grpc/grpc/blob/master/doc/health-checking.md
-type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), serviceName string) error
+type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error
 
 const (
 	// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
@@ -72,3 +267,27 @@
 	// that supports backend returned by grpclb balancer.
 	CredsBundleModeBackendFromBalancer = "backend-from-balancer"
 )
+
+// RLSLoadBalancingPolicyName is the name of the RLS LB policy.
+//
+// It currently has an experimental suffix which would be removed once
+// end-to-end testing of the policy is completed.
+const RLSLoadBalancingPolicyName = "rls_experimental"
+
+// EnforceSubConnEmbedding is used to enforce proper SubConn implementation
+// embedding.
+type EnforceSubConnEmbedding interface {
+	enforceSubConnEmbedding()
+}
+
+// EnforceClientConnEmbedding is used to enforce proper ClientConn implementation
+// embedding.
+type EnforceClientConnEmbedding interface {
+	enforceClientConnEmbedding()
+}
+
+// Timer is an interface to allow injecting different time.Timer implementations
+// during tests.
+type Timer interface {
+	Stop() bool
+}
diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
new file mode 100644
index 0000000..c4055bc
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
@@ -0,0 +1,144 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package metadata contains functions to set and get metadata from addresses.
+//
+// This package is experimental.
+package metadata
+
+import (
+	"fmt"
+	"strings"
+
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/resolver"
+)
+
+type mdKeyType string
+
+const mdKey = mdKeyType("grpc.internal.address.metadata")
+
+type mdValue metadata.MD
+
+func (m mdValue) Equal(o any) bool {
+	om, ok := o.(mdValue)
+	if !ok {
+		return false
+	}
+	if len(m) != len(om) {
+		return false
+	}
+	for k, v := range m {
+		ov := om[k]
+		if len(ov) != len(v) {
+			return false
+		}
+		for i, ve := range v {
+			if ov[i] != ve {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// Get returns the metadata of addr.
+func Get(addr resolver.Address) metadata.MD {
+	attrs := addr.Attributes
+	if attrs == nil {
+		return nil
+	}
+	md, _ := attrs.Value(mdKey).(mdValue)
+	return metadata.MD(md)
+}
+
+// Set sets (overrides) the metadata in addr.
+//
+// When a SubConn is created with this address, the RPCs sent on it will all
+// have this metadata.
+func Set(addr resolver.Address, md metadata.MD) resolver.Address {
+	addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md))
+	return addr
+}
+
+// Validate validates every pair in md with ValidatePair.
+func Validate(md metadata.MD) error {
+	for k, vals := range md {
+		if err := ValidatePair(k, vals...); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E
+func hasNotPrintable(msg string) bool {
+	// for i that saving a conversion if not using for range
+	for i := 0; i < len(msg); i++ {
+		if msg[i] < 0x20 || msg[i] > 0x7E {
+			return true
+		}
+	}
+	return false
+}
+
+// ValidateKey validates a key with the following rules (pseudo-headers are
+// skipped):
+// - the key must contain one or more characters.
+// - the characters in the key must be in [0-9 a-z _ - .].
+func ValidateKey(key string) error {
+	// key should not be empty
+	if key == "" {
+		return fmt.Errorf("there is an empty key in the header")
+	}
+	// pseudo-header will be ignored
+	if key[0] == ':' {
+		return nil
+	}
+	// check key, for i that saving a conversion if not using for range
+	for i := 0; i < len(key); i++ {
+		r := key[i]
+		if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
+			return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key)
+		}
+	}
+	return nil
+}
+
+// ValidatePair validates a key-value pair with the following rules
+// (pseudo-header are skipped):
+//   - the key must contain one or more characters.
+//   - the characters in the key must be in [0-9 a-z _ - .].
+//   - if the key ends with a "-bin" suffix, no validation of the corresponding
+//     value is performed.
+//   - the characters in every value must be printable (in [%x20-%x7E]).
+func ValidatePair(key string, vals ...string) error {
+	if err := ValidateKey(key); err != nil {
+		return err
+	}
+	if strings.HasSuffix(key, "-bin") {
+		return nil
+	}
+	// check value
+	for _, val := range vals {
+		if hasNotPrintable(val) {
+			return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key)
+		}
+	}
+	return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go
new file mode 100644
index 0000000..dbee7a6
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go
@@ -0,0 +1,73 @@
+/*
+ *
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package pretty defines helper functions to pretty-print structs for logging.
+package pretty
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+
+	"google.golang.org/protobuf/encoding/protojson"
+	"google.golang.org/protobuf/protoadapt"
+)
+
+const jsonIndent = "  "
+
+// ToJSON marshals the input into a json string.
+//
+// If marshal fails, it falls back to fmt.Sprintf("%+v").
+func ToJSON(e any) string {
+	if ee, ok := e.(protoadapt.MessageV1); ok {
+		e = protoadapt.MessageV2Of(ee)
+	}
+
+	if ee, ok := e.(protoadapt.MessageV2); ok {
+		mm := protojson.MarshalOptions{
+			Indent:    jsonIndent,
+			Multiline: true,
+		}
+		ret, err := mm.Marshal(ee)
+		if err != nil {
+			// This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
+			// messages are not imported, and this will fail because the message
+			// is not found.
+			return fmt.Sprintf("%+v", ee)
+		}
+		return string(ret)
+	}
+
+	ret, err := json.MarshalIndent(e, "", jsonIndent)
+	if err != nil {
+		return fmt.Sprintf("%+v", e)
+	}
+	return string(ret)
+}
+
+// FormatJSON formats the input json bytes with indentation.
+//
+// If Indent fails, it returns the unchanged input as string.
+func FormatJSON(b []byte) string {
+	var out bytes.Buffer
+	err := json.Indent(&out, b, "", jsonIndent)
+	if err != nil {
+		return string(b)
+	}
+	return out.String()
+}
diff --git a/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go b/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go
new file mode 100644
index 0000000..1f61f1a
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go
@@ -0,0 +1,54 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package proxyattributes contains functions for getting and setting proxy
+// attributes like the CONNECT address and user info.
+package proxyattributes
+
+import (
+	"net/url"
+
+	"google.golang.org/grpc/resolver"
+)
+
+type keyType string
+
+const proxyOptionsKey = keyType("grpc.resolver.delegatingresolver.proxyOptions")
+
+// Options holds the proxy connection details needed during the CONNECT
+// handshake.
+type Options struct {
+	User        *url.Userinfo
+	ConnectAddr string
+}
+
+// Set returns a copy of addr with opts set in its attributes.
+func Set(addr resolver.Address, opts Options) resolver.Address {
+	addr.Attributes = addr.Attributes.WithValue(proxyOptionsKey, opts)
+	return addr
+}
+
+// Get returns the Options for the proxy [resolver.Address] and a boolean
+// value representing if the attribute is present or not. The returned data
+// should not be mutated.
+func Get(addr resolver.Address) (Options, bool) {
+	if a := addr.Attributes.Value(proxyOptionsKey); a != nil {
+		return a.(Options), true
+	}
+	return Options{}, false
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go
new file mode 100644
index 0000000..f060387
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go
@@ -0,0 +1,167 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package resolver provides internal resolver-related functionality.
+package resolver
+
+import (
+	"context"
+	"sync"
+
+	"google.golang.org/grpc/internal/serviceconfig"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/resolver"
+)
+
+// ConfigSelector controls what configuration to use for every RPC.
+type ConfigSelector interface {
+	// Selects the configuration for the RPC, or terminates it using the error.
+	// This error will be converted by the gRPC library to a status error with
+	// code UNKNOWN if it is not returned as a status error.
+	SelectConfig(RPCInfo) (*RPCConfig, error)
+}
+
+// RPCInfo contains RPC information needed by a ConfigSelector.
+type RPCInfo struct {
+	// Context is the user's context for the RPC and contains headers and
+	// application timeout.  It is passed for interception purposes and for
+	// efficiency reasons.  SelectConfig should not be blocking.
+	Context context.Context
+	Method  string // i.e. "/Service/Method"
+}
+
+// RPCConfig describes the configuration to use for each RPC.
+type RPCConfig struct {
+	// The context to use for the remainder of the RPC; can pass info to LB
+	// policy or affect timeout or metadata.
+	Context      context.Context
+	MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC
+	OnCommitted  func()                     // Called when the RPC has been committed (retries no longer possible)
+	Interceptor  ClientInterceptor
+}
+
+// ClientStream is the same as grpc.ClientStream, but defined here for circular
+// dependency reasons.
+type ClientStream interface {
+	// Header returns the header metadata received from the server if there
+	// is any. It blocks if the metadata is not ready to read.
+	Header() (metadata.MD, error)
+	// Trailer returns the trailer metadata from the server, if there is any.
+	// It must only be called after stream.CloseAndRecv has returned, or
+	// stream.Recv has returned a non-nil error (including io.EOF).
+	Trailer() metadata.MD
+	// CloseSend closes the send direction of the stream. It closes the stream
+	// when non-nil error is met. It is also not safe to call CloseSend
+	// concurrently with SendMsg.
+	CloseSend() error
+	// Context returns the context for this stream.
+	//
+	// It should not be called until after Header or RecvMsg has returned. Once
+	// called, subsequent client-side retries are disabled.
+	Context() context.Context
+	// SendMsg is generally called by generated code. On error, SendMsg aborts
+	// the stream. If the error was generated by the client, the status is
+	// returned directly; otherwise, io.EOF is returned and the status of
+	// the stream may be discovered using RecvMsg.
+	//
+	// SendMsg blocks until:
+	//   - There is sufficient flow control to schedule m with the transport, or
+	//   - The stream is done, or
+	//   - The stream breaks.
+	//
+	// SendMsg does not wait until the message is received by the server. An
+	// untimely stream closure may result in lost messages. To ensure delivery,
+	// users should ensure the RPC completed successfully using RecvMsg.
+	//
+	// It is safe to have a goroutine calling SendMsg and another goroutine
+	// calling RecvMsg on the same stream at the same time, but it is not safe
+	// to call SendMsg on the same stream in different goroutines. It is also
+	// not safe to call CloseSend concurrently with SendMsg.
+	SendMsg(m any) error
+	// RecvMsg blocks until it receives a message into m or the stream is
+	// done. It returns io.EOF when the stream completes successfully. On
+	// any other error, the stream is aborted and the error contains the RPC
+	// status.
+	//
+	// It is safe to have a goroutine calling SendMsg and another goroutine
+	// calling RecvMsg on the same stream at the same time, but it is not
+	// safe to call RecvMsg on the same stream in different goroutines.
+	RecvMsg(m any) error
+}
+
+// ClientInterceptor is an interceptor for gRPC client streams.
+type ClientInterceptor interface {
+	// NewStream produces a ClientStream for an RPC which may optionally use
+	// the provided function to produce a stream for delegation.  Note:
+	// RPCInfo.Context should not be used (will be nil).
+	//
+	// done is invoked when the RPC is finished using its connection, or could
+	// not be assigned a connection.  RPC operations may still occur on
+	// ClientStream after done is called, since the interceptor is invoked by
+	// application-layer operations.  done must never be nil when called.
+	NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error)
+}
+
+// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side.
+type ServerInterceptor interface {
+	// AllowRPC checks if an incoming RPC is allowed to proceed based on
+	// information about connection RPC was received on, and HTTP Headers. This
+	// information will be piped into context.
+	AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting.
+}
+
+type csKeyType string
+
+const csKey = csKeyType("grpc.internal.resolver.configSelector")
+
+// SetConfigSelector sets the config selector in state and returns the new
+// state.
+func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State {
+	state.Attributes = state.Attributes.WithValue(csKey, cs)
+	return state
+}
+
+// GetConfigSelector retrieves the config selector from state, if present, and
+// returns it or nil if absent.
+func GetConfigSelector(state resolver.State) ConfigSelector {
+	cs, _ := state.Attributes.Value(csKey).(ConfigSelector)
+	return cs
+}
+
+// SafeConfigSelector allows for safe switching of ConfigSelector
+// implementations such that previous values are guaranteed to not be in use
+// when UpdateConfigSelector returns.
+type SafeConfigSelector struct {
+	mu sync.RWMutex
+	cs ConfigSelector
+}
+
+// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until
+// all uses of the previous ConfigSelector have completed.
+func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) {
+	scs.mu.Lock()
+	defer scs.mu.Unlock()
+	scs.cs = cs
+}
+
+// SelectConfig defers to the current ConfigSelector in scs.
+func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) {
+	scs.mu.RLock()
+	defer scs.mu.RUnlock()
+	return scs.cs.SelectConfig(r)
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
new file mode 100644
index 0000000..20b8fb0
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
@@ -0,0 +1,427 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package delegatingresolver implements a resolver capable of resolving both
+// target URIs and proxy addresses.
+package delegatingresolver
+
+import (
+	"fmt"
+	"net/http"
+	"net/url"
+	"sync"
+
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal/proxyattributes"
+	"google.golang.org/grpc/internal/transport"
+	"google.golang.org/grpc/internal/transport/networktype"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/serviceconfig"
+)
+
+var (
+	logger = grpclog.Component("delegating-resolver")
+	// HTTPSProxyFromEnvironment will be overwritten in the tests
+	HTTPSProxyFromEnvironment = http.ProxyFromEnvironment
+)
+
+// delegatingResolver manages both target URI and proxy address resolution by
+// delegating these tasks to separate child resolvers. Essentially, it acts as
+// an intermediary between the gRPC ClientConn and the child resolvers.
+//
+// It implements the [resolver.Resolver] interface.
+type delegatingResolver struct {
+	target   resolver.Target     // parsed target URI to be resolved
+	cc       resolver.ClientConn // gRPC ClientConn
+	proxyURL *url.URL            // proxy URL, derived from proxy environment and target
+
+	// We do not hold both mu and childMu in the same goroutine. Avoid holding
+	// both locks when calling into the child, as the child resolver may
+	// synchronously callback into the channel.
+	mu                  sync.Mutex         // protects all the fields below
+	targetResolverState *resolver.State    // state of the target resolver
+	proxyAddrs          []resolver.Address // resolved proxy addresses; empty if no proxy is configured
+
+	// childMu serializes calls into child resolvers. It also protects access to
+	// the following fields.
+	childMu        sync.Mutex
+	targetResolver resolver.Resolver // resolver for the target URI, based on its scheme
+	proxyResolver  resolver.Resolver // resolver for the proxy URI; nil if no proxy is configured
+}
+
+// nopResolver is a resolver that does nothing.
+type nopResolver struct{}
+
+func (nopResolver) ResolveNow(resolver.ResolveNowOptions) {}
+
+func (nopResolver) Close() {}
+
+// proxyURLForTarget determines the proxy URL for the given address based on the
+// environment. It can return the following:
+//   - nil URL, nil error: No proxy is configured or the address is excluded
+//     using the `NO_PROXY` environment variable or if req.URL.Host is
+//     "localhost" (with or without // a port number)
+//   - nil URL, non-nil error: An error occurred while retrieving the proxy URL.
+//   - non-nil URL, nil error: A proxy is configured, and the proxy URL was
+//     retrieved successfully without any errors.
+func proxyURLForTarget(address string) (*url.URL, error) {
+	req := &http.Request{URL: &url.URL{
+		Scheme: "https",
+		Host:   address,
+	}}
+	return HTTPSProxyFromEnvironment(req)
+}
+
+// New creates a new delegating resolver that can create up to two child
+// resolvers:
+//   - one to resolve the proxy address specified using the supported
+//     environment variables. This uses the registered resolver for the "dns"
+//     scheme. It is lazily built when a target resolver update contains at least
+//     one TCP address.
+//   - one to resolve the target URI using the resolver specified by the scheme
+//     in the target URI or specified by the user using the WithResolvers dial
+//     option. As a special case, if the target URI's scheme is "dns" and a
+//     proxy is specified using the supported environment variables, the target
+//     URI's path portion is used as the resolved address unless target
+//     resolution is enabled using the dial option.
+func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions, targetResolverBuilder resolver.Builder, targetResolutionEnabled bool) (resolver.Resolver, error) {
+	r := &delegatingResolver{
+		target:         target,
+		cc:             cc,
+		proxyResolver:  nopResolver{},
+		targetResolver: nopResolver{},
+	}
+
+	var err error
+	r.proxyURL, err = proxyURLForTarget(target.Endpoint())
+	if err != nil {
+		return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %s: %v", target, err)
+	}
+
+	// proxy is not configured or proxy address excluded using `NO_PROXY` env
+	// var, so only target resolver is used.
+	if r.proxyURL == nil {
+		return targetResolverBuilder.Build(target, cc, opts)
+	}
+
+	if logger.V(2) {
+		logger.Infof("Proxy URL detected : %s", r.proxyURL)
+	}
+
+	// Resolver updates from one child may trigger calls into the other. Block
+	// updates until the children are initialized.
+	r.childMu.Lock()
+	defer r.childMu.Unlock()
+	// When the scheme is 'dns' and target resolution on client is not enabled,
+	// resolution should be handled by the proxy, not the client. Therefore, we
+	// bypass the target resolver and store the unresolved target address.
+	if target.URL.Scheme == "dns" && !targetResolutionEnabled {
+		r.targetResolverState = &resolver.State{
+			Addresses: []resolver.Address{{Addr: target.Endpoint()}},
+			Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}},
+		}
+		r.updateTargetResolverState(*r.targetResolverState)
+		return r, nil
+	}
+	wcc := &wrappingClientConn{
+		stateListener: r.updateTargetResolverState,
+		parent:        r,
+	}
+	if r.targetResolver, err = targetResolverBuilder.Build(target, wcc, opts); err != nil {
+		return nil, fmt.Errorf("delegating_resolver: unable to build the resolver for target %s: %v", target, err)
+	}
+	return r, nil
+}
+
+// proxyURIResolver creates a resolver for resolving proxy URIs using the "dns"
+// scheme. It adjusts the proxyURL to conform to the "dns:///" format and builds
+// a resolver with a wrappingClientConn to capture resolved addresses.
+func (r *delegatingResolver) proxyURIResolver(opts resolver.BuildOptions) (resolver.Resolver, error) {
+	proxyBuilder := resolver.Get("dns")
+	if proxyBuilder == nil {
+		panic("delegating_resolver: resolver for proxy not found for scheme dns")
+	}
+	url := *r.proxyURL
+	url.Scheme = "dns"
+	url.Path = "/" + r.proxyURL.Host
+	url.Host = "" // Clear the Host field to conform to the "dns:///" format
+
+	proxyTarget := resolver.Target{URL: url}
+	wcc := &wrappingClientConn{
+		stateListener: r.updateProxyResolverState,
+		parent:        r,
+	}
+	return proxyBuilder.Build(proxyTarget, wcc, opts)
+}
+
+func (r *delegatingResolver) ResolveNow(o resolver.ResolveNowOptions) {
+	r.childMu.Lock()
+	defer r.childMu.Unlock()
+	r.targetResolver.ResolveNow(o)
+	r.proxyResolver.ResolveNow(o)
+}
+
+func (r *delegatingResolver) Close() {
+	r.childMu.Lock()
+	defer r.childMu.Unlock()
+	r.targetResolver.Close()
+	r.targetResolver = nil
+
+	r.proxyResolver.Close()
+	r.proxyResolver = nil
+}
+
+func needsProxyResolver(state *resolver.State) bool {
+	for _, addr := range state.Addresses {
+		if !skipProxy(addr) {
+			return true
+		}
+	}
+	for _, endpoint := range state.Endpoints {
+		for _, addr := range endpoint.Addresses {
+			if !skipProxy(addr) {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+func skipProxy(address resolver.Address) bool {
+	// Avoid proxy when network is not tcp.
+	networkType, ok := networktype.Get(address)
+	if !ok {
+		networkType, _ = transport.ParseDialTarget(address.Addr)
+	}
+	if networkType != "tcp" {
+		return true
+	}
+
+	req := &http.Request{URL: &url.URL{
+		Scheme: "https",
+		Host:   address.Addr,
+	}}
+	// Avoid proxy when address included in `NO_PROXY` environment variable or
+	// fails to get the proxy address.
+	url, err := HTTPSProxyFromEnvironment(req)
+	if err != nil || url == nil {
+		return true
+	}
+	return false
+}
+
+// updateClientConnStateLocked constructs a combined list of addresses by
+// pairing each proxy address with every target address of type TCP. For each
+// pair, it creates a new [resolver.Address] using the proxy address and
+// attaches the corresponding target address and user info as attributes. Target
+// addresses that are not of type TCP are appended to the list as-is. The
+// function returns nil if either resolver has not yet provided an update, and
+// returns the result of ClientConn.UpdateState once both resolvers have
+// provided at least one update.
+func (r *delegatingResolver) updateClientConnStateLocked() error {
+	if r.targetResolverState == nil || r.proxyAddrs == nil {
+		return nil
+	}
+
+	// If multiple resolved proxy addresses are present, we send only the
+	// unresolved proxy host and let net.Dial handle the proxy host name
+	// resolution when creating the transport. Sending all resolved addresses
+	// would increase the number of addresses passed to the ClientConn and
+	// subsequently to load balancing (LB) policies like Round Robin, leading
+	// to additional TCP connections. However, if there's only one resolved
+	// proxy address, we send it directly, as it doesn't affect the address
+	// count returned by the target resolver and the address count sent to the
+	// ClientConn.
+	var proxyAddr resolver.Address
+	if len(r.proxyAddrs) == 1 {
+		proxyAddr = r.proxyAddrs[0]
+	} else {
+		proxyAddr = resolver.Address{Addr: r.proxyURL.Host}
+	}
+	var addresses []resolver.Address
+	for _, targetAddr := range (*r.targetResolverState).Addresses {
+		if skipProxy(targetAddr) {
+			addresses = append(addresses, targetAddr)
+			continue
+		}
+		addresses = append(addresses, proxyattributes.Set(proxyAddr, proxyattributes.Options{
+			User:        r.proxyURL.User,
+			ConnectAddr: targetAddr.Addr,
+		}))
+	}
+
+	// For each target endpoint, construct a new [resolver.Endpoint] that
+	// includes all addresses from all proxy endpoints and the addresses from
+	// that target endpoint, preserving the number of target endpoints.
+	var endpoints []resolver.Endpoint
+	for _, endpt := range (*r.targetResolverState).Endpoints {
+		var addrs []resolver.Address
+		for _, targetAddr := range endpt.Addresses {
+			// Avoid proxy when network is not tcp.
+			if skipProxy(targetAddr) {
+				addrs = append(addrs, targetAddr)
+				continue
+			}
+			for _, proxyAddr := range r.proxyAddrs {
+				addrs = append(addrs, proxyattributes.Set(proxyAddr, proxyattributes.Options{
+					User:        r.proxyURL.User,
+					ConnectAddr: targetAddr.Addr,
+				}))
+			}
+		}
+		endpoints = append(endpoints, resolver.Endpoint{Addresses: addrs})
+	}
+	// Use the targetResolverState for its service config and attributes
+	// contents. The state update is only sent after both the target and proxy
+	// resolvers have sent their updates, and curState has been updated with the
+	// combined addresses.
+	curState := *r.targetResolverState
+	curState.Addresses = addresses
+	curState.Endpoints = endpoints
+	return r.cc.UpdateState(curState)
+}
+
+// updateProxyResolverState updates the proxy resolver state by storing proxy
+// addresses and endpoints, marking the resolver as ready, and triggering a
+// state update if both proxy and target resolvers are ready. If the ClientConn
+// returns a non-nil error, it calls `ResolveNow()` on the target resolver.  It
+// is a StateListener function of wrappingClientConn passed to the proxy
+// resolver.
+func (r *delegatingResolver) updateProxyResolverState(state resolver.State) error {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	if logger.V(2) {
+		logger.Infof("Addresses received from proxy resolver: %s", state.Addresses)
+	}
+	if len(state.Endpoints) > 0 {
+		// We expect exactly one address per endpoint because the proxy resolver
+		// uses "dns" resolution.
+		r.proxyAddrs = make([]resolver.Address, 0, len(state.Endpoints))
+		for _, endpoint := range state.Endpoints {
+			r.proxyAddrs = append(r.proxyAddrs, endpoint.Addresses...)
+		}
+	} else if state.Addresses != nil {
+		r.proxyAddrs = state.Addresses
+	} else {
+		r.proxyAddrs = []resolver.Address{} // ensure proxyAddrs is non-nil to indicate an update has been received
+	}
+	err := r.updateClientConnStateLocked()
+	// Another possible approach was to block until updates are received from
+	// both resolvers. But this is not used because calling `New()` triggers
+	// `Build()` for the first resolver, which calls `UpdateState()`. And the
+	// second resolver hasn't sent an update yet, so it would cause `New()` to
+	// block indefinitely.
+	if err != nil {
+		go func() {
+			r.childMu.Lock()
+			defer r.childMu.Unlock()
+			if r.targetResolver != nil {
+				r.targetResolver.ResolveNow(resolver.ResolveNowOptions{})
+			}
+		}()
+	}
+	return err
+}
+
+// updateTargetResolverState is the StateListener function provided to the
+// target resolver via wrappingClientConn. It updates the resolver state and
+// marks the target resolver as ready. If the update includes at least one TCP
+// address and the proxy resolver has not yet been constructed, it initializes
+// the proxy resolver. A combined state update is triggered once both resolvers
+// are ready. If all addresses are non-TCP, it proceeds without waiting for the
+// proxy resolver. If ClientConn.UpdateState returns a non-nil error,
+// ResolveNow() is called on the proxy resolver.
+func (r *delegatingResolver) updateTargetResolverState(state resolver.State) error {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+
+	if logger.V(2) {
+		logger.Infof("Addresses received from target resolver: %v", state.Addresses)
+	}
+	r.targetResolverState = &state
+	// If all addresses returned by the target resolver have a non-TCP network
+	// type, or are listed in the `NO_PROXY` environment variable, do not wait
+	// for proxy update.
+	if !needsProxyResolver(r.targetResolverState) {
+		return r.cc.UpdateState(*r.targetResolverState)
+	}
+
+	// The proxy resolver may be rebuilt multiple times, specifically each time
+	// the target resolver sends an update, even if the target resolver is built
+	// successfully but building the proxy resolver fails.
+	if len(r.proxyAddrs) == 0 {
+		go func() {
+			r.childMu.Lock()
+			defer r.childMu.Unlock()
+			if _, ok := r.proxyResolver.(nopResolver); !ok {
+				return
+			}
+			proxyResolver, err := r.proxyURIResolver(resolver.BuildOptions{})
+			if err != nil {
+				r.cc.ReportError(fmt.Errorf("delegating_resolver: unable to build the proxy resolver: %v", err))
+				return
+			}
+			r.proxyResolver = proxyResolver
+		}()
+	}
+
+	err := r.updateClientConnStateLocked()
+	if err != nil {
+		go func() {
+			r.childMu.Lock()
+			defer r.childMu.Unlock()
+			if r.proxyResolver != nil {
+				r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{})
+			}
+		}()
+	}
+	return nil
+}
+
+// wrappingClientConn serves as an intermediary between the parent ClientConn
+// and the child resolvers created here. It implements the resolver.ClientConn
+// interface and is passed in that capacity to the child resolvers.
+type wrappingClientConn struct {
+	// Callback to deliver resolver state updates
+	stateListener func(state resolver.State) error
+	parent        *delegatingResolver
+}
+
+// UpdateState receives resolver state updates and forwards them to the
+// appropriate listener function (either for the proxy or target resolver).
+func (wcc *wrappingClientConn) UpdateState(state resolver.State) error {
+	return wcc.stateListener(state)
+}
+
+// ReportError intercepts errors from the child resolvers and passes them to
+// ClientConn.
+func (wcc *wrappingClientConn) ReportError(err error) {
+	wcc.parent.cc.ReportError(err)
+}
+
+// NewAddress intercepts the new resolved address from the child resolvers and
+// passes them to ClientConn.
+func (wcc *wrappingClientConn) NewAddress(addrs []resolver.Address) {
+	wcc.UpdateState(resolver.State{Addresses: addrs})
+}
+
+// ParseServiceConfig parses the provided service config and returns an object
+// that provides the parsed config.
+func (wcc *wrappingClientConn) ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult {
+	return wcc.parent.cc.ParseServiceConfig(serviceConfigJSON)
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
index abc0f92..ada5251 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
@@ -23,63 +23,77 @@
 import (
 	"context"
 	"encoding/json"
-	"errors"
 	"fmt"
+	rand "math/rand/v2"
 	"net"
+	"net/netip"
 	"os"
 	"strconv"
 	"strings"
 	"sync"
 	"time"
 
-	"google.golang.org/grpc/backoff"
+	grpclbstate "google.golang.org/grpc/balancer/grpclb/state"
 	"google.golang.org/grpc/grpclog"
-	internalbackoff "google.golang.org/grpc/internal/backoff"
-	"google.golang.org/grpc/internal/grpcrand"
+	"google.golang.org/grpc/internal/backoff"
+	"google.golang.org/grpc/internal/envconfig"
+	"google.golang.org/grpc/internal/resolver/dns/internal"
 	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/serviceconfig"
+)
+
+var (
+	// EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB
+	// addresses from SRV records.  Must not be changed after init time.
+	EnableSRVLookups = false
+
+	// MinResolutionInterval is the minimum interval at which re-resolutions are
+	// allowed. This helps to prevent excessive re-resolution.
+	MinResolutionInterval = 30 * time.Second
+
+	// ResolvingTimeout specifies the maximum duration for a DNS resolution request.
+	// If the timeout expires before a response is received, the request will be canceled.
+	//
+	// It is recommended to set this value at application startup. Avoid modifying this variable
+	// after initialization as it's not thread-safe for concurrent modification.
+	ResolvingTimeout = 30 * time.Second
+
+	logger = grpclog.Component("dns")
 )
 
 func init() {
 	resolver.Register(NewBuilder())
+	internal.TimeAfterFunc = time.After
+	internal.TimeNowFunc = time.Now
+	internal.TimeUntilFunc = time.Until
+	internal.NewNetResolver = newNetResolver
+	internal.AddressDialer = addressDialer
 }
 
 const (
 	defaultPort       = "443"
-	defaultFreq       = time.Minute * 30
 	defaultDNSSvrPort = "53"
 	golang            = "GO"
-	// txtPrefix is the prefix string to be prepended to the host name for txt record lookup.
+	// txtPrefix is the prefix string to be prepended to the host name for txt
+	// record lookup.
 	txtPrefix = "_grpc_config."
 	// In DNS, service config is encoded in a TXT record via the mechanism
 	// described in RFC-1464 using the attribute name grpc_config.
 	txtAttribute = "grpc_config="
 )
 
-var (
-	errMissingAddr = errors.New("dns resolver: missing address")
-
-	// Addresses ending with a colon that is supposed to be the separator
-	// between host and port is not allowed.  E.g. "::" is a valid address as
-	// it is an IPv6 address (host only) and "[::]:" is invalid as it ends with
-	// a colon as the host and port separator
-	errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
-)
-
-var (
-	defaultResolver netResolver = net.DefaultResolver
-	// To prevent excessive re-resolution, we enforce a rate limit on DNS
-	// resolution requests.
-	minDNSResRate = 30 * time.Second
-)
-
-var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) {
-	return func(ctx context.Context, network, address string) (net.Conn, error) {
+var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) {
+	return func(ctx context.Context, network, _ string) (net.Conn, error) {
 		var dialer net.Dialer
-		return dialer.DialContext(ctx, network, authority)
+		return dialer.DialContext(ctx, network, address)
 	}
 }
 
-var customAuthorityResolver = func(authority string) (netResolver, error) {
+var newNetResolver = func(authority string) (internal.NetResolver, error) {
+	if authority == "" {
+		return net.DefaultResolver, nil
+	}
+
 	host, port, err := parseTarget(authority, defaultDNSSvrPort)
 	if err != nil {
 		return nil, err
@@ -89,66 +103,47 @@
 
 	return &net.Resolver{
 		PreferGo: true,
-		Dial:     customAuthorityDialler(authorityWithPort),
+		Dial:     internal.AddressDialer(authorityWithPort),
 	}, nil
 }
 
 // NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
 func NewBuilder() resolver.Builder {
-	return &dnsBuilder{minFreq: defaultFreq}
+	return &dnsBuilder{}
 }
 
-type dnsBuilder struct {
-	// minimum frequency of polling the DNS server.
-	minFreq time.Duration
-}
+type dnsBuilder struct{}
 
-// Build creates and starts a DNS resolver that watches the name resolution of the target.
-func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
-	host, port, err := parseTarget(target.Endpoint, defaultPort)
+// Build creates and starts a DNS resolver that watches the name resolution of
+// the target.
+func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
+	host, port, err := parseTarget(target.Endpoint(), defaultPort)
 	if err != nil {
 		return nil, err
 	}
 
 	// IP address.
-	if net.ParseIP(host) != nil {
-		host, _ = formatIP(host)
-		addr := []resolver.Address{{Addr: host + ":" + port}}
-		i := &ipResolver{
-			cc: cc,
-			ip: addr,
-			rn: make(chan struct{}, 1),
-			q:  make(chan struct{}),
-		}
-		cc.NewAddress(addr)
-		go i.watcher()
-		return i, nil
+	if ipAddr, err := formatIP(host); err == nil {
+		addr := []resolver.Address{{Addr: ipAddr + ":" + port}}
+		cc.UpdateState(resolver.State{Addresses: addr})
+		return deadResolver{}, nil
 	}
 
 	// DNS address (non-IP).
 	ctx, cancel := context.WithCancel(context.Background())
-	bc := backoff.DefaultConfig
-	bc.MaxDelay = b.minFreq
 	d := &dnsResolver{
-		freq:                 b.minFreq,
-		backoff:              internalbackoff.Exponential{Config: bc},
-		host:                 host,
-		port:                 port,
-		ctx:                  ctx,
-		cancel:               cancel,
-		cc:                   cc,
-		t:                    time.NewTimer(0),
-		rn:                   make(chan struct{}, 1),
-		disableServiceConfig: opts.DisableServiceConfig,
+		host:                host,
+		port:                port,
+		ctx:                 ctx,
+		cancel:              cancel,
+		cc:                  cc,
+		rn:                  make(chan struct{}, 1),
+		enableServiceConfig: envconfig.EnableTXTServiceConfig && !opts.DisableServiceConfig,
 	}
 
-	if target.Authority == "" {
-		d.resolver = defaultResolver
-	} else {
-		d.resolver, err = customAuthorityResolver(target.Authority)
-		if err != nil {
-			return nil, err
-		}
+	d.resolver, err = internal.NewNetResolver(target.URL.Host)
+	if err != nil {
+		return nil, err
 	}
 
 	d.wg.Add(1)
@@ -161,71 +156,38 @@
 	return "dns"
 }
 
-type netResolver interface {
-	LookupHost(ctx context.Context, host string) (addrs []string, err error)
-	LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
-	LookupTXT(ctx context.Context, name string) (txts []string, err error)
-}
+// deadResolver is a resolver that does nothing.
+type deadResolver struct{}
 
-// ipResolver watches for the name resolution update for an IP address.
-type ipResolver struct {
-	cc resolver.ClientConn
-	ip []resolver.Address
-	// rn channel is used by ResolveNow() to force an immediate resolution of the target.
-	rn chan struct{}
-	q  chan struct{}
-}
+func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {}
 
-// ResolveNow resend the address it stores, no resolution is needed.
-func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) {
-	select {
-	case i.rn <- struct{}{}:
-	default:
-	}
-}
-
-// Close closes the ipResolver.
-func (i *ipResolver) Close() {
-	close(i.q)
-}
-
-func (i *ipResolver) watcher() {
-	for {
-		select {
-		case <-i.rn:
-			i.cc.NewAddress(i.ip)
-		case <-i.q:
-			return
-		}
-	}
-}
+func (deadResolver) Close() {}
 
 // dnsResolver watches for the name resolution update for a non-IP target.
 type dnsResolver struct {
-	freq       time.Duration
-	backoff    internalbackoff.Exponential
-	retryCount int
-	host       string
-	port       string
-	resolver   netResolver
-	ctx        context.Context
-	cancel     context.CancelFunc
-	cc         resolver.ClientConn
-	// rn channel is used by ResolveNow() to force an immediate resolution of the target.
+	host     string
+	port     string
+	resolver internal.NetResolver
+	ctx      context.Context
+	cancel   context.CancelFunc
+	cc       resolver.ClientConn
+	// rn channel is used by ResolveNow() to force an immediate resolution of the
+	// target.
 	rn chan struct{}
-	t  *time.Timer
-	// wg is used to enforce Close() to return after the watcher() goroutine has finished.
-	// Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
-	// replace the real lookup functions with mocked ones to facilitate testing.
-	// If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
-	// will warns lookup (READ the lookup function pointers) inside watcher() goroutine
-	// has data race with replaceNetFunc (WRITE the lookup function pointers).
-	wg                   sync.WaitGroup
-	disableServiceConfig bool
+	// wg is used to enforce Close() to return after the watcher() goroutine has
+	// finished. Otherwise, data race will be possible. [Race Example] in
+	// dns_resolver_test we replace the real lookup functions with mocked ones to
+	// facilitate testing. If Close() doesn't wait for watcher() goroutine
+	// finishes, race detector sometimes will warn lookup (READ the lookup
+	// function pointers) inside watcher() goroutine has data race with
+	// replaceNetFunc (WRITE the lookup function pointers).
+	wg                  sync.WaitGroup
+	enableServiceConfig bool
 }
 
-// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
-func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) {
+// ResolveNow invoke an immediate resolution of the target that this
+// dnsResolver watches.
+func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) {
 	select {
 	case d.rn <- struct{}{}:
 	default:
@@ -236,142 +198,179 @@
 func (d *dnsResolver) Close() {
 	d.cancel()
 	d.wg.Wait()
-	d.t.Stop()
 }
 
 func (d *dnsResolver) watcher() {
 	defer d.wg.Done()
+	backoffIndex := 1
 	for {
-		select {
-		case <-d.ctx.Done():
-			return
-		case <-d.t.C:
-		case <-d.rn:
-			if !d.t.Stop() {
-				// Before resetting a timer, it should be stopped to prevent racing with
-				// reads on it's channel.
-				<-d.t.C
-			}
-		}
-
-		result, sc := d.lookup()
-		// Next lookup should happen within an interval defined by d.freq. It may be
-		// more often due to exponential retry on empty address list.
-		if len(result) == 0 {
-			d.retryCount++
-			d.t.Reset(d.backoff.Backoff(d.retryCount))
+		state, err := d.lookup()
+		if err != nil {
+			// Report error to the underlying grpc.ClientConn.
+			d.cc.ReportError(err)
 		} else {
-			d.retryCount = 0
-			d.t.Reset(d.freq)
+			err = d.cc.UpdateState(*state)
 		}
-		d.cc.NewServiceConfig(sc)
-		d.cc.NewAddress(result)
 
-		// Sleep to prevent excessive re-resolutions. Incoming resolution requests
-		// will be queued in d.rn.
-		t := time.NewTimer(minDNSResRate)
+		var nextResolutionTime time.Time
+		if err == nil {
+			// Success resolving, wait for the next ResolveNow. However, also wait 30
+			// seconds at the very least to prevent constantly re-resolving.
+			backoffIndex = 1
+			nextResolutionTime = internal.TimeNowFunc().Add(MinResolutionInterval)
+			select {
+			case <-d.ctx.Done():
+				return
+			case <-d.rn:
+			}
+		} else {
+			// Poll on an error found in DNS Resolver or an error received from
+			// ClientConn.
+			nextResolutionTime = internal.TimeNowFunc().Add(backoff.DefaultExponential.Backoff(backoffIndex))
+			backoffIndex++
+		}
 		select {
-		case <-t.C:
 		case <-d.ctx.Done():
-			t.Stop()
 			return
+		case <-internal.TimeAfterFunc(internal.TimeUntilFunc(nextResolutionTime)):
 		}
 	}
 }
 
-func (d *dnsResolver) lookupSRV() []resolver.Address {
+func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) {
+	// Skip this particular host to avoid timeouts with some versions of
+	// systemd-resolved.
+	if !EnableSRVLookups || d.host == "metadata.google.internal." {
+		return nil, nil
+	}
 	var newAddrs []resolver.Address
-	_, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host)
+	_, srvs, err := d.resolver.LookupSRV(ctx, "grpclb", "tcp", d.host)
 	if err != nil {
-		grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
-		return nil
+		err = handleDNSError(err, "SRV") // may become nil
+		return nil, err
 	}
 	for _, s := range srvs {
-		lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target)
+		lbAddrs, err := d.resolver.LookupHost(ctx, s.Target)
 		if err != nil {
-			grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err)
-			continue
-		}
-		for _, a := range lbAddrs {
-			a, ok := formatIP(a)
-			if !ok {
-				grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
+			err = handleDNSError(err, "A") // may become nil
+			if err == nil {
+				// If there are other SRV records, look them up and ignore this
+				// one that does not exist.
 				continue
 			}
-			addr := a + ":" + strconv.Itoa(int(s.Port))
-			newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
+			return nil, err
+		}
+		for _, a := range lbAddrs {
+			ip, err := formatIP(a)
+			if err != nil {
+				return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err)
+			}
+			addr := ip + ":" + strconv.Itoa(int(s.Port))
+			newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target})
 		}
 	}
-	return newAddrs
+	return newAddrs, nil
 }
 
-func (d *dnsResolver) lookupTXT() string {
-	ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host)
+func handleDNSError(err error, lookupType string) error {
+	dnsErr, ok := err.(*net.DNSError)
+	if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
+		// Timeouts and temporary errors should be communicated to gRPC to
+		// attempt another DNS query (with backoff).  Other errors should be
+		// suppressed (they may represent the absence of a TXT record).
+		return nil
+	}
 	if err != nil {
-		grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err)
-		return ""
+		err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err)
+		logger.Info(err)
+	}
+	return err
+}
+
+func (d *dnsResolver) lookupTXT(ctx context.Context) *serviceconfig.ParseResult {
+	ss, err := d.resolver.LookupTXT(ctx, txtPrefix+d.host)
+	if err != nil {
+		if envconfig.TXTErrIgnore {
+			return nil
+		}
+		if err = handleDNSError(err, "TXT"); err != nil {
+			return &serviceconfig.ParseResult{Err: err}
+		}
+		return nil
 	}
 	var res string
 	for _, s := range ss {
 		res += s
 	}
 
-	// TXT record must have "grpc_config=" attribute in order to be used as service config.
+	// TXT record must have "grpc_config=" attribute in order to be used as
+	// service config.
 	if !strings.HasPrefix(res, txtAttribute) {
-		grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute)
-		return ""
-	}
-	return strings.TrimPrefix(res, txtAttribute)
-}
-
-func (d *dnsResolver) lookupHost() []resolver.Address {
-	var newAddrs []resolver.Address
-	addrs, err := d.resolver.LookupHost(d.ctx, d.host)
-	if err != nil {
-		grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
+		logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
+		// This is not an error; it is the equivalent of not having a service
+		// config.
 		return nil
 	}
+	sc := canaryingSC(strings.TrimPrefix(res, txtAttribute))
+	return d.cc.ParseServiceConfig(sc)
+}
+
+func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error) {
+	addrs, err := d.resolver.LookupHost(ctx, d.host)
+	if err != nil {
+		err = handleDNSError(err, "A")
+		return nil, err
+	}
+	newAddrs := make([]resolver.Address, 0, len(addrs))
 	for _, a := range addrs {
-		a, ok := formatIP(a)
-		if !ok {
-			grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
-			continue
+		ip, err := formatIP(a)
+		if err != nil {
+			return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err)
 		}
-		addr := a + ":" + d.port
+		addr := ip + ":" + d.port
 		newAddrs = append(newAddrs, resolver.Address{Addr: addr})
 	}
-	return newAddrs
+	return newAddrs, nil
 }
 
-func (d *dnsResolver) lookup() ([]resolver.Address, string) {
-	newAddrs := d.lookupSRV()
-	// Support fallback to non-balancer address.
-	newAddrs = append(newAddrs, d.lookupHost()...)
-	if d.disableServiceConfig {
-		return newAddrs, ""
+func (d *dnsResolver) lookup() (*resolver.State, error) {
+	ctx, cancel := context.WithTimeout(d.ctx, ResolvingTimeout)
+	defer cancel()
+	srv, srvErr := d.lookupSRV(ctx)
+	addrs, hostErr := d.lookupHost(ctx)
+	if hostErr != nil && (srvErr != nil || len(srv) == 0) {
+		return nil, hostErr
 	}
-	sc := d.lookupTXT()
-	return newAddrs, canaryingSC(sc)
+
+	state := resolver.State{Addresses: addrs}
+	if len(srv) > 0 {
+		state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv})
+	}
+	if d.enableServiceConfig {
+		state.ServiceConfig = d.lookupTXT(ctx)
+	}
+	return &state, nil
 }
 
-// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
-// If addr is an IPv4 address, return the addr and ok = true.
-// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
-func formatIP(addr string) (addrIP string, ok bool) {
-	ip := net.ParseIP(addr)
-	if ip == nil {
-		return "", false
+// formatIP returns an error if addr is not a valid textual representation of
+// an IP address. If addr is an IPv4 address, return the addr and error = nil.
+// If addr is an IPv6 address, return the addr enclosed in square brackets and
+// error = nil.
+func formatIP(addr string) (string, error) {
+	ip, err := netip.ParseAddr(addr)
+	if err != nil {
+		return "", err
 	}
-	if ip.To4() != nil {
-		return addr, true
+	if ip.Is4() {
+		return addr, nil
 	}
-	return "[" + addr + "]", true
+	return "[" + addr + "]", nil
 }
 
-// parseTarget takes the user input target string and default port, returns formatted host and port info.
-// If target doesn't specify a port, set the port to be the defaultPort.
-// If target is in IPv6 format and host-name is enclosed in square brackets, brackets
-// are stripped when setting the host.
+// parseTarget takes the user input target string and default port, returns
+// formatted host and port info. If target doesn't specify a port, set the port
+// to be the defaultPort. If target is in IPv6 format and host-name is enclosed
+// in square brackets, brackets are stripped when setting the host.
 // examples:
 // target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443"
 // target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80"
@@ -379,20 +378,22 @@
 // target: ":80" defaultPort: "443" returns host: "localhost", port: "80"
 func parseTarget(target, defaultPort string) (host, port string, err error) {
 	if target == "" {
-		return "", "", errMissingAddr
+		return "", "", internal.ErrMissingAddr
 	}
-	if ip := net.ParseIP(target); ip != nil {
+	if _, err := netip.ParseAddr(target); err == nil {
 		// target is an IPv4 or IPv6(without brackets) address
 		return target, defaultPort, nil
 	}
 	if host, port, err = net.SplitHostPort(target); err == nil {
 		if port == "" {
-			// If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error.
-			return "", "", errEndsWithColon
+			// If the port field is empty (target ends with colon), e.g. "[::1]:",
+			// this is an error.
+			return "", "", internal.ErrEndsWithColon
 		}
 		// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
 		if host == "" {
-			// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
+			// Keep consistent with net.Dial(): If the host is empty, as in ":80",
+			// the local system is assumed.
 			host = "localhost"
 		}
 		return host, port, nil
@@ -427,7 +428,7 @@
 	if a == nil {
 		return true
 	}
-	return grpcrand.Intn(100)+1 <= *a
+	return rand.IntN(100)+1 <= *a
 }
 
 func canaryingSC(js string) string {
@@ -437,12 +438,12 @@
 	var rcs []rawChoice
 	err := json.Unmarshal([]byte(js), &rcs)
 	if err != nil {
-		grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err)
+		logger.Warningf("dns: error parsing service config json: %v", err)
 		return ""
 	}
 	cliHostname, err := os.Hostname()
 	if err != nil {
-		grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err)
+		logger.Warningf("dns: error getting client hostname: %v", err)
 		return ""
 	}
 	var sc string
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
new file mode 100644
index 0000000..c0eae4f
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
@@ -0,0 +1,77 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains functionality internal to the dns resolver package.
+package internal
+
+import (
+	"context"
+	"errors"
+	"net"
+	"time"
+)
+
+// NetResolver groups the methods on net.Resolver that are used by the DNS
+// resolver implementation. This allows the default net.Resolver instance to be
+// overridden from tests.
+type NetResolver interface {
+	LookupHost(ctx context.Context, host string) (addrs []string, err error)
+	LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
+	LookupTXT(ctx context.Context, name string) (txts []string, err error)
+}
+
+var (
+	// ErrMissingAddr is the error returned when building a DNS resolver when
+	// the provided target name is empty.
+	ErrMissingAddr = errors.New("dns resolver: missing address")
+
+	// ErrEndsWithColon is the error returned when building a DNS resolver when
+	// the provided target name ends with a colon that is supposed to be the
+	// separator between host and port.  E.g. "::" is a valid address as it is
+	// an IPv6 address (host only) and "[::]:" is invalid as it ends with a
+	// colon as the host and port separator
+	ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
+)
+
+// The following vars are overridden from tests.
+var (
+	// TimeAfterFunc is used by the DNS resolver to wait for the given duration
+	// to elapse. In non-test code, this is implemented by time.After. In test
+	// code, this can be used to control the amount of time the resolver is
+	// blocked waiting for the duration to elapse.
+	TimeAfterFunc func(time.Duration) <-chan time.Time
+
+	// TimeNowFunc is used by the DNS resolver to get the current time.
+	// In non-test code, this is implemented by time.Now. In test code,
+	// this can be used to control the current time for the resolver.
+	TimeNowFunc func() time.Time
+
+	// TimeUntilFunc is used by the DNS resolver to calculate the remaining
+	// wait time for re-resolution. In non-test code, this is implemented by
+	// time.Until. In test code, this can be used to control the remaining
+	// time for resolver to wait for re-resolution.
+	TimeUntilFunc func(time.Time) time.Duration
+
+	// NewNetResolver returns the net.Resolver instance for the given target.
+	NewNetResolver func(string) (NetResolver, error)
+
+	// AddressDialer is the dialer used to dial the DNS server. It accepts the
+	// Host portion of the URL corresponding to the user's dial target and
+	// returns a dial function.
+	AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error)
+)
diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
index 893d5d1..b901c7b 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
@@ -20,13 +20,20 @@
 // name without scheme back to gRPC as resolved address.
 package passthrough
 
-import "google.golang.org/grpc/resolver"
+import (
+	"errors"
+
+	"google.golang.org/grpc/resolver"
+)
 
 const scheme = "passthrough"
 
 type passthroughBuilder struct{}
 
-func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
+func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
+	if target.Endpoint() == "" && opts.Dialer == nil {
+		return nil, errors.New("passthrough: received empty target in Build()")
+	}
 	r := &passthroughResolver{
 		target: target,
 		cc:     cc,
@@ -45,10 +52,10 @@
 }
 
 func (r *passthroughResolver) start() {
-	r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}})
+	r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
 }
 
-func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}
+func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {}
 
 func (*passthroughResolver) Close() {}
 
diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
new file mode 100644
index 0000000..27cd81a
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
@@ -0,0 +1,78 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package unix implements a resolver for unix targets.
+package unix
+
+import (
+	"fmt"
+
+	"google.golang.org/grpc/internal/transport/networktype"
+	"google.golang.org/grpc/resolver"
+)
+
+const unixScheme = "unix"
+const unixAbstractScheme = "unix-abstract"
+
+type builder struct {
+	scheme string
+}
+
+func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
+	if target.URL.Host != "" {
+		return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host)
+	}
+
+	// gRPC was parsing the dial target manually before PR #4817, and we
+	// switched to using url.Parse() in that PR. To avoid breaking existing
+	// resolver implementations we ended up stripping the leading "/" from the
+	// endpoint. This obviously does not work for the "unix" scheme. Hence we
+	// end up using the parsed URL instead.
+	endpoint := target.URL.Path
+	if endpoint == "" {
+		endpoint = target.URL.Opaque
+	}
+	addr := resolver.Address{Addr: endpoint}
+	if b.scheme == unixAbstractScheme {
+		// We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do
+		// not want trailing \0 in address.
+		addr.Addr = "@" + addr.Addr
+	}
+	cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}})
+	return &nopResolver{}, nil
+}
+
+func (b *builder) Scheme() string {
+	return b.scheme
+}
+
+func (b *builder) OverrideAuthority(resolver.Target) string {
+	return "localhost"
+}
+
+type nopResolver struct {
+}
+
+func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {}
+
+func (*nopResolver) Close() {}
+
+func init() {
+	resolver.Register(&builder{scheme: unixScheme})
+	resolver.Register(&builder{scheme: unixAbstractScheme})
+}
diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go
new file mode 100644
index 0000000..11d82af
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go
@@ -0,0 +1,130 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package serviceconfig
+
+import (
+	"encoding/json"
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Duration defines JSON marshal and unmarshal methods to conform to the
+// protobuf JSON spec defined [here].
+//
+// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration
+type Duration time.Duration
+
+func (d Duration) String() string {
+	return fmt.Sprint(time.Duration(d))
+}
+
+// MarshalJSON converts from d to a JSON string output.
+func (d Duration) MarshalJSON() ([]byte, error) {
+	ns := time.Duration(d).Nanoseconds()
+	sec := ns / int64(time.Second)
+	ns = ns % int64(time.Second)
+
+	var sign string
+	if sec < 0 || ns < 0 {
+		sign, sec, ns = "-", -1*sec, -1*ns
+	}
+
+	// Generated output always contains 0, 3, 6, or 9 fractional digits,
+	// depending on required precision.
+	str := fmt.Sprintf("%s%d.%09d", sign, sec, ns)
+	str = strings.TrimSuffix(str, "000")
+	str = strings.TrimSuffix(str, "000")
+	str = strings.TrimSuffix(str, ".000")
+	return []byte(fmt.Sprintf("\"%ss\"", str)), nil
+}
+
+// UnmarshalJSON unmarshals b as a duration JSON string into d.
+func (d *Duration) UnmarshalJSON(b []byte) error {
+	var s string
+	if err := json.Unmarshal(b, &s); err != nil {
+		return err
+	}
+	if !strings.HasSuffix(s, "s") {
+		return fmt.Errorf("malformed duration %q: missing seconds unit", s)
+	}
+	neg := false
+	if s[0] == '-' {
+		neg = true
+		s = s[1:]
+	}
+	ss := strings.SplitN(s[:len(s)-1], ".", 3)
+	if len(ss) > 2 {
+		return fmt.Errorf("malformed duration %q: too many decimals", s)
+	}
+	// hasDigits is set if either the whole or fractional part of the number is
+	// present, since both are optional but one is required.
+	hasDigits := false
+	var sec, ns int64
+	if len(ss[0]) > 0 {
+		var err error
+		if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil {
+			return fmt.Errorf("malformed duration %q: %v", s, err)
+		}
+		// Maximum seconds value per the durationpb spec.
+		const maxProtoSeconds = 315_576_000_000
+		if sec > maxProtoSeconds {
+			return fmt.Errorf("out of range: %q", s)
+		}
+		hasDigits = true
+	}
+	if len(ss) == 2 && len(ss[1]) > 0 {
+		if len(ss[1]) > 9 {
+			return fmt.Errorf("malformed duration %q: too many digits after decimal", s)
+		}
+		var err error
+		if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil {
+			return fmt.Errorf("malformed duration %q: %v", s, err)
+		}
+		for i := 9; i > len(ss[1]); i-- {
+			ns *= 10
+		}
+		hasDigits = true
+	}
+	if !hasDigits {
+		return fmt.Errorf("malformed duration %q: contains no numbers", s)
+	}
+
+	if neg {
+		sec *= -1
+		ns *= -1
+	}
+
+	// Maximum/minimum seconds/nanoseconds representable by Go's time.Duration.
+	const maxSeconds = math.MaxInt64 / int64(time.Second)
+	const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second)
+	const minSeconds = math.MinInt64 / int64(time.Second)
+	const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second)
+
+	if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) {
+		*d = Duration(math.MaxInt64)
+	} else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) {
+		*d = Duration(math.MinInt64)
+	} else {
+		*d = Duration(sec*int64(time.Second) + ns)
+	}
+	return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
new file mode 100644
index 0000000..51e733e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
@@ -0,0 +1,180 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package serviceconfig contains utility functions to parse service config.
+package serviceconfig
+
+import (
+	"encoding/json"
+	"fmt"
+	"time"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	externalserviceconfig "google.golang.org/grpc/serviceconfig"
+)
+
+var logger = grpclog.Component("core")
+
+// BalancerConfig wraps the name and config associated with one load balancing
+// policy. It corresponds to a single entry of the loadBalancingConfig field
+// from ServiceConfig.
+//
+// It implements the json.Unmarshaler interface.
+//
+// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247
+type BalancerConfig struct {
+	Name   string
+	Config externalserviceconfig.LoadBalancingConfig
+}
+
+type intermediateBalancerConfig []map[string]json.RawMessage
+
+// MarshalJSON implements the json.Marshaler interface.
+//
+// It marshals the balancer and config into a length-1 slice
+// ([]map[string]config).
+func (bc *BalancerConfig) MarshalJSON() ([]byte, error) {
+	if bc.Config == nil {
+		// If config is nil, return empty config `{}`.
+		return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil
+	}
+	c, err := json.Marshal(bc.Config)
+	if err != nil {
+		return nil, err
+	}
+	return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+//
+// ServiceConfig contains a list of loadBalancingConfigs, each with a name and
+// config. This method iterates through that list in order, and stops at the
+// first policy that is supported.
+//   - If the config for the first supported policy is invalid, the whole service
+//     config is invalid.
+//   - If the list doesn't contain any supported policy, the whole service config
+//     is invalid.
+func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
+	var ir intermediateBalancerConfig
+	err := json.Unmarshal(b, &ir)
+	if err != nil {
+		return err
+	}
+
+	var names []string
+	for i, lbcfg := range ir {
+		if len(lbcfg) != 1 {
+			return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
+		}
+
+		var (
+			name    string
+			jsonCfg json.RawMessage
+		)
+		// Get the key:value pair from the map. We have already made sure that
+		// the map contains a single entry.
+		for name, jsonCfg = range lbcfg {
+		}
+
+		names = append(names, name)
+		builder := balancer.Get(name)
+		if builder == nil {
+			// If the balancer is not registered, move on to the next config.
+			// This is not an error.
+			continue
+		}
+		bc.Name = name
+
+		parser, ok := builder.(balancer.ConfigParser)
+		if !ok {
+			if string(jsonCfg) != "{}" {
+				logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
+			}
+			// Stop at this, though the builder doesn't support parsing config.
+			return nil
+		}
+
+		cfg, err := parser.ParseConfig(jsonCfg)
+		if err != nil {
+			return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
+		}
+		bc.Config = cfg
+		return nil
+	}
+	// This is reached when the for loop iterates over all entries, but didn't
+	// return. This means we had a loadBalancingConfig slice but did not
+	// encounter a registered policy. The config is considered invalid in this
+	// case.
+	return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names)
+}
+
+// MethodConfig defines the configuration recommended by the service providers for a
+// particular method.
+type MethodConfig struct {
+	// WaitForReady indicates whether RPCs sent to this method should wait until
+	// the connection is ready by default (!failfast). The value specified via the
+	// gRPC client API will override the value set here.
+	WaitForReady *bool
+	// Timeout is the default timeout for RPCs sent to this method. The actual
+	// deadline used will be the minimum of the value specified here and the value
+	// set by the application via the gRPC client API.  If either one is not set,
+	// then the other will be used.  If neither is set, then the RPC has no deadline.
+	Timeout *time.Duration
+	// MaxReqSize is the maximum allowed payload size for an individual request in a
+	// stream (client->server) in bytes. The size which is measured is the serialized
+	// payload after per-message compression (but before stream compression) in bytes.
+	// The actual value used is the minimum of the value specified here and the value set
+	// by the application via the gRPC client API. If either one is not set, then the other
+	// will be used.  If neither is set, then the built-in default is used.
+	MaxReqSize *int
+	// MaxRespSize is the maximum allowed payload size for an individual response in a
+	// stream (server->client) in bytes.
+	MaxRespSize *int
+	// RetryPolicy configures retry options for the method.
+	RetryPolicy *RetryPolicy
+}
+
+// RetryPolicy defines the go-native version of the retry policy defined by the
+// service config here:
+// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
+type RetryPolicy struct {
+	// MaxAttempts is the maximum number of attempts, including the original RPC.
+	//
+	// This field is required and must be two or greater.
+	MaxAttempts int
+
+	// Exponential backoff parameters. The initial retry attempt will occur at
+	// random(0, initialBackoff). In general, the nth attempt will occur at
+	// random(0,
+	//   min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)).
+	//
+	// These fields are required and must be greater than zero.
+	InitialBackoff    time.Duration
+	MaxBackoff        time.Duration
+	BackoffMultiplier float64
+
+	// The set of status codes which may be retried.
+	//
+	// Status codes are specified as strings, e.g., "UNAVAILABLE".
+	//
+	// This field is required and must be non-empty.
+	// Note: a set is used to store this for easy lookup.
+	RetryableStatusCodes map[codes.Code]bool
+}
diff --git a/vendor/google.golang.org/grpc/internal/stats/labels.go b/vendor/google.golang.org/grpc/internal/stats/labels.go
new file mode 100644
index 0000000..fd33af5
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/stats/labels.go
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package stats provides internal stats related functionality.
+package stats
+
+import "context"
+
+// Labels are the labels for metrics.
+type Labels struct {
+	// TelemetryLabels are the telemetry labels to record.
+	TelemetryLabels map[string]string
+}
+
+type labelsKey struct{}
+
+// GetLabels returns the Labels stored in the context, or nil if there is one.
+func GetLabels(ctx context.Context) *Labels {
+	labels, _ := ctx.Value(labelsKey{}).(*Labels)
+	return labels
+}
+
+// SetLabels sets the Labels in the context.
+func SetLabels(ctx context.Context, labels *Labels) context.Context {
+	// could also append
+	return context.WithValue(ctx, labelsKey{}, labels)
+}
diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
new file mode 100644
index 0000000..7904465
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package stats
+
+import (
+	"fmt"
+
+	estats "google.golang.org/grpc/experimental/stats"
+	"google.golang.org/grpc/stats"
+)
+
+// MetricsRecorderList forwards Record calls to all of its metricsRecorders.
+//
+// It eats any record calls where the label values provided do not match the
+// number of label keys.
+type MetricsRecorderList struct {
+	// metricsRecorders are the metrics recorders this list will forward to.
+	metricsRecorders []estats.MetricsRecorder
+}
+
+// NewMetricsRecorderList creates a new metric recorder list with all the stats
+// handlers provided which implement the MetricsRecorder interface.
+// If no stats handlers provided implement the MetricsRecorder interface,
+// the MetricsRecorder list returned is a no-op.
+func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList {
+	var mrs []estats.MetricsRecorder
+	for _, sh := range shs {
+		if mr, ok := sh.(estats.MetricsRecorder); ok {
+			mrs = append(mrs, mr)
+		}
+	}
+	return &MetricsRecorderList{
+		metricsRecorders: mrs,
+	}
+}
+
+func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) {
+	if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want {
+		panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want))
+	}
+}
+
+// RecordInt64Count records the measurement alongside labels on the int
+// count associated with the provided handle.
+func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) {
+	verifyLabels(handle.Descriptor(), labels...)
+
+	for _, metricRecorder := range l.metricsRecorders {
+		metricRecorder.RecordInt64Count(handle, incr, labels...)
+	}
+}
+
+// RecordFloat64Count records the measurement alongside labels on the float
+// count associated with the provided handle.
+func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) {
+	verifyLabels(handle.Descriptor(), labels...)
+
+	for _, metricRecorder := range l.metricsRecorders {
+		metricRecorder.RecordFloat64Count(handle, incr, labels...)
+	}
+}
+
+// RecordInt64Histo records the measurement alongside labels on the int
+// histo associated with the provided handle.
+func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) {
+	verifyLabels(handle.Descriptor(), labels...)
+
+	for _, metricRecorder := range l.metricsRecorders {
+		metricRecorder.RecordInt64Histo(handle, incr, labels...)
+	}
+}
+
+// RecordFloat64Histo records the measurement alongside labels on the float
+// histo associated with the provided handle.
+func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) {
+	verifyLabels(handle.Descriptor(), labels...)
+
+	for _, metricRecorder := range l.metricsRecorders {
+		metricRecorder.RecordFloat64Histo(handle, incr, labels...)
+	}
+}
+
+// RecordInt64Gauge records the measurement alongside labels on the int
+// gauge associated with the provided handle.
+func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) {
+	verifyLabels(handle.Descriptor(), labels...)
+
+	for _, metricRecorder := range l.metricsRecorders {
+		metricRecorder.RecordInt64Gauge(handle, incr, labels...)
+	}
+}
diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go
new file mode 100644
index 0000000..aad171c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/status/status.go
@@ -0,0 +1,246 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package status implements errors returned by gRPC.  These errors are
+// serialized and transmitted on the wire between server and client, and allow
+// for additional data to be transmitted via the Details field in the status
+// proto.  gRPC service handlers should return an error created by this
+// package, and gRPC clients should expect a corresponding error to be
+// returned from the RPC call.
+//
+// This package upholds the invariants that a non-nil error may not
+// contain an OK code, and an OK code must result in a nil error.
+package status
+
+import (
+	"errors"
+	"fmt"
+
+	spb "google.golang.org/genproto/googleapis/rpc/status"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/protoadapt"
+	"google.golang.org/protobuf/types/known/anypb"
+)
+
+// Status represents an RPC status code, message, and details.  It is immutable
+// and should be created with New, Newf, or FromProto.
+type Status struct {
+	s *spb.Status
+}
+
+// NewWithProto returns a new status including details from statusProto.  This
+// is meant to be used by the gRPC library only.
+func NewWithProto(code codes.Code, message string, statusProto []string) *Status {
+	if len(statusProto) != 1 {
+		// No grpc-status-details bin header, or multiple; just ignore.
+		return &Status{s: &spb.Status{Code: int32(code), Message: message}}
+	}
+	st := &spb.Status{}
+	if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil {
+		// Probably not a google.rpc.Status proto; do not provide details.
+		return &Status{s: &spb.Status{Code: int32(code), Message: message}}
+	}
+	if st.Code == int32(code) {
+		// The codes match between the grpc-status header and the
+		// grpc-status-details-bin header; use the full details proto.
+		return &Status{s: st}
+	}
+	return &Status{
+		s: &spb.Status{
+			Code: int32(codes.Internal),
+			Message: fmt.Sprintf(
+				"grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v",
+				code, message, st,
+			),
+		},
+	}
+}
+
+// New returns a Status representing c and msg.
+func New(c codes.Code, msg string) *Status {
+	return &Status{s: &spb.Status{Code: int32(c), Message: msg}}
+}
+
+// Newf returns New(c, fmt.Sprintf(format, a...)).
+func Newf(c codes.Code, format string, a ...any) *Status {
+	return New(c, fmt.Sprintf(format, a...))
+}
+
+// FromProto returns a Status representing s.
+func FromProto(s *spb.Status) *Status {
+	return &Status{s: proto.Clone(s).(*spb.Status)}
+}
+
+// Err returns an error representing c and msg.  If c is OK, returns nil.
+func Err(c codes.Code, msg string) error {
+	return New(c, msg).Err()
+}
+
+// Errorf returns Error(c, fmt.Sprintf(format, a...)).
+func Errorf(c codes.Code, format string, a ...any) error {
+	return Err(c, fmt.Sprintf(format, a...))
+}
+
+// Code returns the status code contained in s.
+func (s *Status) Code() codes.Code {
+	if s == nil || s.s == nil {
+		return codes.OK
+	}
+	return codes.Code(s.s.Code)
+}
+
+// Message returns the message contained in s.
+func (s *Status) Message() string {
+	if s == nil || s.s == nil {
+		return ""
+	}
+	return s.s.Message
+}
+
+// Proto returns s's status as an spb.Status proto message.
+func (s *Status) Proto() *spb.Status {
+	if s == nil {
+		return nil
+	}
+	return proto.Clone(s.s).(*spb.Status)
+}
+
+// Err returns an immutable error representing s; returns nil if s.Code() is OK.
+func (s *Status) Err() error {
+	if s.Code() == codes.OK {
+		return nil
+	}
+	return &Error{s: s}
+}
+
+// WithDetails returns a new status with the provided details messages appended to the status.
+// If any errors are encountered, it returns nil and the first error encountered.
+func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) {
+	if s.Code() == codes.OK {
+		return nil, errors.New("no error details for status with code OK")
+	}
+	// s.Code() != OK implies that s.Proto() != nil.
+	p := s.Proto()
+	for _, detail := range details {
+		m, err := anypb.New(protoadapt.MessageV2Of(detail))
+		if err != nil {
+			return nil, err
+		}
+		p.Details = append(p.Details, m)
+	}
+	return &Status{s: p}, nil
+}
+
+// Details returns a slice of details messages attached to the status.
+// If a detail cannot be decoded, the error is returned in place of the detail.
+// If the detail can be decoded, the proto message returned is of the same
+// type that was given to WithDetails().
+func (s *Status) Details() []any {
+	if s == nil || s.s == nil {
+		return nil
+	}
+	details := make([]any, 0, len(s.s.Details))
+	for _, any := range s.s.Details {
+		detail, err := any.UnmarshalNew()
+		if err != nil {
+			details = append(details, err)
+			continue
+		}
+		// The call to MessageV1Of is required to unwrap the proto message if
+		// it implemented only the MessageV1 API. The proto message would have
+		// been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are
+		// added to a global registry used by any.UnmarshalNew().
+		// MessageV1Of has the following behaviour:
+		// 1. If the given message is a wrapped MessageV1, it returns the
+		//   unwrapped value.
+		// 2. If the given message already implements MessageV1, it returns it
+		//   as is.
+		// 3. Else, it wraps the MessageV2 in a MessageV1 wrapper.
+		//
+		// Since the Status.WithDetails() API only accepts MessageV1, calling
+		// MessageV1Of ensures we return the same type that was given to
+		// WithDetails:
+		// * If the give type implemented only MessageV1, the unwrapping from
+		//   point 1 above will restore the type.
+		// * If the given type implemented both MessageV1 and MessageV2, point 2
+		//   above will ensure no wrapping is performed.
+		// * If the given type implemented only MessageV2 and was wrapped using
+		//   MessageV1Of before passing to WithDetails(), it would be unwrapped
+		//   in WithDetails by calling MessageV2Of(). Point 3 above will ensure
+		//   that the type is wrapped in a MessageV1 wrapper again before
+		//   returning. Note that protoc-gen-go doesn't generate code which
+		//   implements ONLY MessageV2 at the time of writing.
+		//
+		// NOTE: Status details can also be added using the FromProto method.
+		// This could theoretically allow passing a Detail message that only
+		// implements the V2 API. In such a case the message will be wrapped in
+		// a MessageV1 wrapper when fetched using Details().
+		// Since protoc-gen-go generates only code that implements both V1 and
+		// V2 APIs for backward compatibility, this is not a concern.
+		details = append(details, protoadapt.MessageV1Of(detail))
+	}
+	return details
+}
+
+func (s *Status) String() string {
+	return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message())
+}
+
+// Error wraps a pointer of a status proto. It implements error and Status,
+// and a nil *Error should never be returned by this package.
+type Error struct {
+	s *Status
+}
+
+func (e *Error) Error() string {
+	return e.s.String()
+}
+
+// GRPCStatus returns the Status represented by se.
+func (e *Error) GRPCStatus() *Status {
+	return e.s
+}
+
+// Is implements future error.Is functionality.
+// A Error is equivalent if the code and message are identical.
+func (e *Error) Is(target error) bool {
+	tse, ok := target.(*Error)
+	if !ok {
+		return false
+	}
+	return proto.Equal(e.s.s, tse.s.s)
+}
+
+// IsRestrictedControlPlaneCode returns whether the status includes a code
+// restricted for control plane usage as defined by gRFC A54.
+func IsRestrictedControlPlaneCode(s *Status) bool {
+	switch s.Code() {
+	case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss:
+		return true
+	}
+	return false
+}
+
+// RawStatusProto returns the internal protobuf message for use by gRPC itself.
+func RawStatusProto(s *Status) *spb.Status {
+	if s == nil {
+		return nil
+	}
+	return s.s
+}
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
index 43281a3..b3a7227 100644
--- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
@@ -1,5 +1,3 @@
-// +build !appengine
-
 /*
  *
  * Copyright 2018 gRPC authors.
@@ -32,35 +30,35 @@
 	"google.golang.org/grpc/grpclog"
 )
 
+var logger = grpclog.Component("core")
+
 // GetCPUTime returns the how much CPU time has passed since the start of this process.
 func GetCPUTime() int64 {
 	var ts unix.Timespec
 	if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil {
-		grpclog.Fatal(err)
+		logger.Fatal(err)
 	}
 	return ts.Nano()
 }
 
-// Rusage is an alias for syscall.Rusage under linux non-appengine environment.
-type Rusage syscall.Rusage
+// Rusage is an alias for syscall.Rusage under linux environment.
+type Rusage = syscall.Rusage
 
 // GetRusage returns the resource usage of current process.
-func GetRusage() (rusage *Rusage) {
-	rusage = new(Rusage)
-	syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage))
-	return
+func GetRusage() *Rusage {
+	rusage := new(Rusage)
+	syscall.Getrusage(syscall.RUSAGE_SELF, rusage)
+	return rusage
 }
 
 // CPUTimeDiff returns the differences of user CPU time and system CPU time used
 // between two Rusage structs.
 func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
-	f := (*syscall.Rusage)(first)
-	l := (*syscall.Rusage)(latest)
 	var (
-		utimeDiffs  = l.Utime.Sec - f.Utime.Sec
-		utimeDiffus = l.Utime.Usec - f.Utime.Usec
-		stimeDiffs  = l.Stime.Sec - f.Stime.Sec
-		stimeDiffus = l.Stime.Usec - f.Stime.Usec
+		utimeDiffs  = latest.Utime.Sec - first.Utime.Sec
+		utimeDiffus = latest.Utime.Usec - first.Utime.Usec
+		stimeDiffs  = latest.Stime.Sec - first.Stime.Sec
+		stimeDiffus = latest.Stime.Usec - first.Stime.Usec
 	)
 
 	uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
index d3fd9da..54c24c2 100644
--- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
@@ -1,4 +1,5 @@
-// +build !linux appengine
+//go:build !linux
+// +build !linux
 
 /*
  *
@@ -18,6 +19,8 @@
  *
  */
 
+// Package syscall provides functionalities that grpc uses to get low-level
+// operating system stats/info.
 package syscall
 
 import (
@@ -29,45 +32,46 @@
 )
 
 var once sync.Once
+var logger = grpclog.Component("core")
 
 func log() {
 	once.Do(func() {
-		grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.")
+		logger.Info("CPU time info is unavailable on non-linux environments.")
 	})
 }
 
-// GetCPUTime returns the how much CPU time has passed since the start of this process.
-// It always returns 0 under non-linux or appengine environment.
+// GetCPUTime returns the how much CPU time has passed since the start of this
+// process. It always returns 0 under non-linux environments.
 func GetCPUTime() int64 {
 	log()
 	return 0
 }
 
-// Rusage is an empty struct under non-linux or appengine environment.
+// Rusage is an empty struct under non-linux environments.
 type Rusage struct{}
 
-// GetRusage is a no-op function under non-linux or appengine environment.
-func GetRusage() (rusage *Rusage) {
+// GetRusage is a no-op function under non-linux environments.
+func GetRusage() *Rusage {
 	log()
 	return nil
 }
 
 // CPUTimeDiff returns the differences of user CPU time and system CPU time used
-// between two Rusage structs. It a no-op function for non-linux or appengine environment.
-func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
+// between two Rusage structs. It a no-op function for non-linux environments.
+func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) {
 	log()
 	return 0, 0
 }
 
-// SetTCPUserTimeout is a no-op function under non-linux or appengine environments
-func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
+// SetTCPUserTimeout is a no-op function under non-linux environments.
+func SetTCPUserTimeout(net.Conn, time.Duration) error {
 	log()
 	return nil
 }
 
-// GetTCPUserTimeout is a no-op function under non-linux or appengine environments
-// a negative return value indicates the operation is not supported
-func GetTCPUserTimeout(conn net.Conn) (int, error) {
+// GetTCPUserTimeout is a no-op function under non-linux environments.
+// A negative return value indicates the operation is not supported
+func GetTCPUserTimeout(net.Conn) (int, error) {
 	log()
 	return -1, nil
 }
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go
new file mode 100644
index 0000000..4f347ed
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go
@@ -0,0 +1,29 @@
+//go:build !unix && !windows
+
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+import (
+	"net"
+)
+
+// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms.
+func NetDialerWithTCPKeepalive() *net.Dialer {
+	return &net.Dialer{}
+}
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
new file mode 100644
index 0000000..7e7aaa5
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
@@ -0,0 +1,54 @@
+//go:build unix
+
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+import (
+	"net"
+	"syscall"
+	"time"
+
+	"golang.org/x/sys/unix"
+)
+
+// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on
+// the underlying connection with OS default values for keepalive parameters.
+//
+// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
+// appropriate Go version becomes less than our least supported Go version, we
+// should look into using the new API to make things more straightforward.
+func NetDialerWithTCPKeepalive() *net.Dialer {
+	return &net.Dialer{
+		// Setting a negative value here prevents the Go stdlib from overriding
+		// the values of TCP keepalive time and interval. It also prevents the
+		// Go stdlib from enabling TCP keepalives by default.
+		KeepAlive: time.Duration(-1),
+		// This method is called after the underlying network socket is created,
+		// but before dialing the socket (or calling its connect() method). The
+		// combination of unconditionally enabling TCP keepalives here, and
+		// disabling the overriding of TCP keepalive parameters by setting the
+		// KeepAlive field to a negative value above, results in OS defaults for
+		// the TCP keepalive interval and time parameters.
+		Control: func(_, _ string, c syscall.RawConn) error {
+			return c.Control(func(fd uintptr) {
+				unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
+			})
+		},
+	}
+}
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
new file mode 100644
index 0000000..d5c1085
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
@@ -0,0 +1,54 @@
+//go:build windows
+
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+import (
+	"net"
+	"syscall"
+	"time"
+
+	"golang.org/x/sys/windows"
+)
+
+// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on
+// the underlying connection with OS default values for keepalive parameters.
+//
+// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
+// appropriate Go version becomes less than our least supported Go version, we
+// should look into using the new API to make things more straightforward.
+func NetDialerWithTCPKeepalive() *net.Dialer {
+	return &net.Dialer{
+		// Setting a negative value here prevents the Go stdlib from overriding
+		// the values of TCP keepalive time and interval. It also prevents the
+		// Go stdlib from enabling TCP keepalives by default.
+		KeepAlive: time.Duration(-1),
+		// This method is called after the underlying network socket is created,
+		// but before dialing the socket (or calling its connect() method). The
+		// combination of unconditionally enabling TCP keepalives here, and
+		// disabling the overriding of TCP keepalive parameters by setting the
+		// KeepAlive field to a negative value above, results in OS defaults for
+		// the TCP keepalive interval and time parameters.
+		Control: func(_, _ string, c syscall.RawConn) error {
+			return c.Control(func(fd uintptr) {
+				windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1)
+			})
+		},
+	}
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
new file mode 100644
index 0000000..ccc0e01
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
@@ -0,0 +1,144 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"sync/atomic"
+
+	"golang.org/x/net/http2"
+	"google.golang.org/grpc/mem"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+)
+
+// ClientStream implements streaming functionality for a gRPC client.
+type ClientStream struct {
+	*Stream // Embed for common stream functionality.
+
+	ct       *http2Client
+	done     chan struct{} // closed at the end of stream to unblock writers.
+	doneFunc func()        // invoked at the end of stream.
+
+	headerChan       chan struct{} // closed to indicate the end of header metadata.
+	headerChanClosed uint32        // set when headerChan is closed. Used to avoid closing headerChan multiple times.
+	// headerValid indicates whether a valid header was received.  Only
+	// meaningful after headerChan is closed (always call waitOnHeader() before
+	// reading its value).
+	headerValid bool
+	header      metadata.MD // the received header metadata
+	noHeaders   bool        // set if the client never received headers (set only after the stream is done).
+
+	bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream
+	unprocessed   atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream
+
+	status *status.Status // the status error received from the server
+}
+
+// Read reads an n byte message from the input stream.
+func (s *ClientStream) Read(n int) (mem.BufferSlice, error) {
+	b, err := s.Stream.read(n)
+	if err == nil {
+		s.ct.incrMsgRecv()
+	}
+	return b, err
+}
+
+// Close closes the stream and propagates err to any readers.
+func (s *ClientStream) Close(err error) {
+	var (
+		rst     bool
+		rstCode http2.ErrCode
+	)
+	if err != nil {
+		rst = true
+		rstCode = http2.ErrCodeCancel
+	}
+	s.ct.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false)
+}
+
+// Write writes the hdr and data bytes to the output stream.
+func (s *ClientStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
+	return s.ct.write(s, hdr, data, opts)
+}
+
+// BytesReceived indicates whether any bytes have been received on this stream.
+func (s *ClientStream) BytesReceived() bool {
+	return s.bytesReceived.Load()
+}
+
+// Unprocessed indicates whether the server did not process this stream --
+// i.e. it sent a refused stream or GOAWAY including this stream ID.
+func (s *ClientStream) Unprocessed() bool {
+	return s.unprocessed.Load()
+}
+
+func (s *ClientStream) waitOnHeader() {
+	select {
+	case <-s.ctx.Done():
+		// Close the stream to prevent headers/trailers from changing after
+		// this function returns.
+		s.Close(ContextErr(s.ctx.Err()))
+		// headerChan could possibly not be closed yet if closeStream raced
+		// with operateHeaders; wait until it is closed explicitly here.
+		<-s.headerChan
+	case <-s.headerChan:
+	}
+}
+
+// RecvCompress returns the compression algorithm applied to the inbound
+// message. It is empty string if there is no compression applied.
+func (s *ClientStream) RecvCompress() string {
+	s.waitOnHeader()
+	return s.recvCompress
+}
+
+// Done returns a channel which is closed when it receives the final status
+// from the server.
+func (s *ClientStream) Done() <-chan struct{} {
+	return s.done
+}
+
+// Header returns the header metadata of the stream. Acquires the key-value
+// pairs of header metadata once it is available. It blocks until i) the
+// metadata is ready or ii) there is no header metadata or iii) the stream is
+// canceled/expired.
+func (s *ClientStream) Header() (metadata.MD, error) {
+	s.waitOnHeader()
+
+	if !s.headerValid || s.noHeaders {
+		return nil, s.status.Err()
+	}
+
+	return s.header.Copy(), nil
+}
+
+// TrailersOnly blocks until a header or trailers-only frame is received and
+// then returns true if the stream was trailers-only.  If the stream ends
+// before headers are received, returns true, nil.
+func (s *ClientStream) TrailersOnly() bool {
+	s.waitOnHeader()
+	return s.noHeaders
+}
+
+// Status returns the status received from the server.
+// Status can be read safely only after the stream has ended,
+// that is, after Done() is closed.
+func (s *ClientStream) Status() *status.Status {
+	return s.status
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index ddee20b..a2831e5 100644
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -20,21 +20,35 @@
 
 import (
 	"bytes"
+	"errors"
 	"fmt"
+	"net"
 	"runtime"
+	"strconv"
 	"sync"
 	"sync/atomic"
 
 	"golang.org/x/net/http2"
 	"golang.org/x/net/http2/hpack"
+	"google.golang.org/grpc/internal/grpclog"
+	"google.golang.org/grpc/internal/grpcutil"
+	"google.golang.org/grpc/mem"
+	"google.golang.org/grpc/status"
 )
 
 var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
 	e.SetMaxDynamicTableSizeLimit(v)
 }
 
+// itemNodePool is used to reduce heap allocations.
+var itemNodePool = sync.Pool{
+	New: func() any {
+		return &itemNode{}
+	},
+}
+
 type itemNode struct {
-	it   interface{}
+	it   any
 	next *itemNode
 }
 
@@ -43,8 +57,10 @@
 	tail *itemNode
 }
 
-func (il *itemList) enqueue(i interface{}) {
-	n := &itemNode{it: i}
+func (il *itemList) enqueue(i any) {
+	n := itemNodePool.Get().(*itemNode)
+	n.next = nil
+	n.it = i
 	if il.tail == nil {
 		il.head, il.tail = n, n
 		return
@@ -55,16 +71,18 @@
 
 // peek returns the first item in the list without removing it from the
 // list.
-func (il *itemList) peek() interface{} {
+func (il *itemList) peek() any {
 	return il.head.it
 }
 
-func (il *itemList) dequeue() interface{} {
+func (il *itemList) dequeue() any {
 	if il.head == nil {
 		return nil
 	}
 	i := il.head.it
+	temp := il.head
 	il.head = il.head.next
+	itemNodePool.Put(temp)
 	if il.head == nil {
 		il.tail = nil
 	}
@@ -128,13 +146,24 @@
 
 func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM
 
+type earlyAbortStream struct {
+	httpStatus     uint32
+	streamID       uint32
+	contentSubtype string
+	status         *status.Status
+	rst            bool
+}
+
+func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
+
 type dataFrame struct {
-	streamID  uint32
-	endStream bool
-	h         []byte
-	d         []byte
+	streamID   uint32
+	endStream  bool
+	h          []byte
+	data       mem.BufferSlice
+	processing bool
 	// onEachWrite is called every time
-	// a part of d is written out.
+	// a part of data is written out.
 	onEachWrite func()
 }
 
@@ -177,7 +206,7 @@
 	code      http2.ErrCode
 	debugData []byte
 	headsUp   bool
-	closeConn bool
+	closeConn error // if set, loopyWriter will exit with this error
 }
 
 func (*goAway) isTransportResponseFrame() bool { return false }
@@ -195,6 +224,14 @@
 
 func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
 
+// closeConnection is an instruction to tell the loopy writer to flush the
+// framer and exit, which will cause the transport's connection to be closed
+// (by the client or server).  The transport itself will close after the reader
+// encounters the EOF caused by the connection closure.
+type closeConnection struct{}
+
+func (closeConnection) isTransportResponseFrame() bool { return false }
+
 type outStreamState int
 
 const (
@@ -209,6 +246,7 @@
 	itl              *itemList
 	bytesOutStanding int
 	wq               *writeQuota
+	reader           mem.Reader
 
 	next *outStream
 	prev *outStream
@@ -265,18 +303,22 @@
 }
 
 // controlBuffer is a way to pass information to loopy.
-// Information is passed as specific struct types called control frames.
-// A control frame not only represents data, messages or headers to be sent out
-// but can also be used to instruct loopy to update its internal state.
-// It shouldn't be confused with an HTTP2 frame, although some of the control frames
-// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
+//
+// Information is passed as specific struct types called control frames. A
+// control frame not only represents data, messages or headers to be sent out
+// but can also be used to instruct loopy to update its internal state. It
+// shouldn't be confused with an HTTP2 frame, although some of the control
+// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames.
 type controlBuffer struct {
-	ch              chan struct{}
-	done            <-chan struct{}
+	wakeupCh chan struct{}   // Unblocks readers waiting for something to read.
+	done     <-chan struct{} // Closed when the transport is done.
+
+	// Mutex guards all the fields below, except trfChan which can be read
+	// atomically without holding mu.
 	mu              sync.Mutex
-	consumerWaiting bool
-	list            *itemList
-	err             error
+	consumerWaiting bool      // True when readers are blocked waiting for new data.
+	closed          bool      // True when the controlbuf is finished.
+	list            *itemList // List of queued control frames.
 
 	// transportResponseFrames counts the number of queued items that represent
 	// the response of an action initiated by the peer.  trfChan is created
@@ -284,47 +326,59 @@
 	// closed and nilled when transportResponseFrames drops below the
 	// threshold.  Both fields are protected by mu.
 	transportResponseFrames int
-	trfChan                 atomic.Value // *chan struct{}
+	trfChan                 atomic.Pointer[chan struct{}]
 }
 
 func newControlBuffer(done <-chan struct{}) *controlBuffer {
 	return &controlBuffer{
-		ch:   make(chan struct{}, 1),
-		list: &itemList{},
-		done: done,
+		wakeupCh: make(chan struct{}, 1),
+		list:     &itemList{},
+		done:     done,
 	}
 }
 
-// throttle blocks if there are too many incomingSettings/cleanupStreams in the
-// controlbuf.
+// throttle blocks if there are too many frames in the control buf that
+// represent the response of an action initiated by the peer, like
+// incomingSettings cleanupStreams etc.
 func (c *controlBuffer) throttle() {
-	ch, _ := c.trfChan.Load().(*chan struct{})
-	if ch != nil {
+	if ch := c.trfChan.Load(); ch != nil {
 		select {
-		case <-*ch:
+		case <-(*ch):
 		case <-c.done:
 		}
 	}
 }
 
+// put adds an item to the controlbuf.
 func (c *controlBuffer) put(it cbItem) error {
 	_, err := c.executeAndPut(nil, it)
 	return err
 }
 
-func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) {
-	var wakeUp bool
+// executeAndPut runs f, and if the return value is true, adds the given item to
+// the controlbuf. The item could be nil, in which case, this method simply
+// executes f and does not add the item to the controlbuf.
+//
+// The first return value indicates whether the item was successfully added to
+// the control buffer. A non-nil error, specifically ErrConnClosing, is returned
+// if the control buffer is already closed.
+func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) {
 	c.mu.Lock()
-	if c.err != nil {
-		c.mu.Unlock()
-		return false, c.err
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return false, ErrConnClosing
 	}
 	if f != nil {
-		if !f(it) { // f wasn't successful
-			c.mu.Unlock()
+		if !f() { // f wasn't successful
 			return false, nil
 		}
 	}
+	if it == nil {
+		return true, nil
+	}
+
+	var wakeUp bool
 	if c.consumerWaiting {
 		wakeUp = true
 		c.consumerWaiting = false
@@ -339,88 +393,100 @@
 			c.trfChan.Store(&ch)
 		}
 	}
-	c.mu.Unlock()
 	if wakeUp {
 		select {
-		case c.ch <- struct{}{}:
+		case c.wakeupCh <- struct{}{}:
 		default:
 		}
 	}
 	return true, nil
 }
 
-// Note argument f should never be nil.
-func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) {
-	c.mu.Lock()
-	if c.err != nil {
-		c.mu.Unlock()
-		return false, c.err
-	}
-	if !f(it) { // f wasn't successful
-		c.mu.Unlock()
-		return false, nil
-	}
-	c.mu.Unlock()
-	return true, nil
-}
-
-func (c *controlBuffer) get(block bool) (interface{}, error) {
+// get returns the next control frame from the control buffer. If block is true
+// **and** there are no control frames in the control buffer, the call blocks
+// until one of the conditions is met: there is a frame to return or the
+// transport is closed.
+func (c *controlBuffer) get(block bool) (any, error) {
 	for {
 		c.mu.Lock()
-		if c.err != nil {
+		frame, err := c.getOnceLocked()
+		if frame != nil || err != nil || !block {
+			// If we read a frame or an error, we can return to the caller. The
+			// call to getOnceLocked() returns a nil frame and a nil error if
+			// there is nothing to read, and in that case, if the caller asked
+			// us not to block, we can return now as well.
 			c.mu.Unlock()
-			return nil, c.err
-		}
-		if !c.list.isEmpty() {
-			h := c.list.dequeue().(cbItem)
-			if h.isTransportResponseFrame() {
-				if c.transportResponseFrames == maxQueuedTransportResponseFrames {
-					// We are removing the frame that put us over the
-					// threshold; close and clear the throttling channel.
-					ch := c.trfChan.Load().(*chan struct{})
-					close(*ch)
-					c.trfChan.Store((*chan struct{})(nil))
-				}
-				c.transportResponseFrames--
-			}
-			c.mu.Unlock()
-			return h, nil
-		}
-		if !block {
-			c.mu.Unlock()
-			return nil, nil
+			return frame, err
 		}
 		c.consumerWaiting = true
 		c.mu.Unlock()
+
+		// Release the lock above and wait to be woken up.
 		select {
-		case <-c.ch:
+		case <-c.wakeupCh:
 		case <-c.done:
-			c.finish()
-			return nil, ErrConnClosing
+			return nil, errors.New("transport closed by client")
 		}
 	}
 }
 
+// Callers must not use this method, but should instead use get().
+//
+// Caller must hold c.mu.
+func (c *controlBuffer) getOnceLocked() (any, error) {
+	if c.closed {
+		return false, ErrConnClosing
+	}
+	if c.list.isEmpty() {
+		return nil, nil
+	}
+	h := c.list.dequeue().(cbItem)
+	if h.isTransportResponseFrame() {
+		if c.transportResponseFrames == maxQueuedTransportResponseFrames {
+			// We are removing the frame that put us over the
+			// threshold; close and clear the throttling channel.
+			ch := c.trfChan.Swap(nil)
+			close(*ch)
+		}
+		c.transportResponseFrames--
+	}
+	return h, nil
+}
+
+// finish closes the control buffer, cleaning up any streams that have queued
+// header frames. Once this method returns, no more frames can be added to the
+// control buffer, and attempts to do so will return ErrConnClosing.
 func (c *controlBuffer) finish() {
 	c.mu.Lock()
-	if c.err != nil {
-		c.mu.Unlock()
+	defer c.mu.Unlock()
+
+	if c.closed {
 		return
 	}
-	c.err = ErrConnClosing
+	c.closed = true
 	// There may be headers for streams in the control buffer.
 	// These streams need to be cleaned out since the transport
 	// is still not aware of these yet.
 	for head := c.list.dequeueAll(); head != nil; head = head.next {
-		hdr, ok := head.it.(*headerFrame)
-		if !ok {
-			continue
-		}
-		if hdr.onOrphaned != nil { // It will be nil on the server-side.
-			hdr.onOrphaned(ErrConnClosing)
+		switch v := head.it.(type) {
+		case *headerFrame:
+			if v.onOrphaned != nil { // It will be nil on the server-side.
+				v.onOrphaned(ErrConnClosing)
+			}
+		case *dataFrame:
+			if !v.processing {
+				v.data.Free()
+			}
 		}
 	}
-	c.mu.Unlock()
+
+	// In case throttle() is currently in flight, it needs to be unblocked.
+	// Otherwise, the transport may not close, since the transport is closed by
+	// the reader encountering the connection error.
+	ch := c.trfChan.Swap(nil)
+	if ch != nil {
+		close(*ch)
+	}
 }
 
 type side int
@@ -436,7 +502,7 @@
 // stream maintains a queue of data frames; as loopy receives data frames
 // it gets added to the queue of the relevant stream.
 // Loopy goes over this list of active streams by processing one node every iteration,
-// thereby closely resemebling to a round-robin scheduling over all streams. While
+// thereby closely resembling a round-robin scheduling over all streams. While
 // processing a stream, loopy writes out data bytes from this stream capped by the min
 // of http2MaxFrameLen, connection-level flow control and stream-level flow control.
 type loopyWriter struct {
@@ -458,24 +524,31 @@
 	hEnc          *hpack.Encoder // HPACK encoder.
 	bdpEst        *bdpEstimator
 	draining      bool
+	conn          net.Conn
+	logger        *grpclog.PrefixLogger
+	bufferPool    mem.BufferPool
 
 	// Side-specific handlers
 	ssGoAwayHandler func(*goAway) (bool, error)
 }
 
-func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
+func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter {
 	var buf bytes.Buffer
 	l := &loopyWriter{
-		side:          s,
-		cbuf:          cbuf,
-		sendQuota:     defaultWindowSize,
-		oiws:          defaultWindowSize,
-		estdStreams:   make(map[uint32]*outStream),
-		activeStreams: newOutStreamList(),
-		framer:        fr,
-		hBuf:          &buf,
-		hEnc:          hpack.NewEncoder(&buf),
-		bdpEst:        bdpEst,
+		side:            s,
+		cbuf:            cbuf,
+		sendQuota:       defaultWindowSize,
+		oiws:            defaultWindowSize,
+		estdStreams:     make(map[uint32]*outStream),
+		activeStreams:   newOutStreamList(),
+		framer:          fr,
+		hBuf:            &buf,
+		hEnc:            hpack.NewEncoder(&buf),
+		bdpEst:          bdpEst,
+		conn:            conn,
+		logger:          logger,
+		ssGoAwayHandler: goAwayHandler,
+		bufferPool:      bufferPool,
 	}
 	return l
 }
@@ -493,21 +566,25 @@
 // 2. Stream level flow control quota available.
 //
 // In each iteration of run loop, other than processing the incoming control
-// frame, loopy calls processData, which processes one node from the activeStreams linked-list.
-// This results in writing of HTTP2 frames into an underlying write buffer.
-// When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
-// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
-// if the batch size is too low to give stream goroutines a chance to fill it up.
+// frame, loopy calls processData, which processes one node from the
+// activeStreams linked-list.  This results in writing of HTTP2 frames into an
+// underlying write buffer.  When there's no more control frames to read from
+// controlBuf, loopy flushes the write buffer.  As an optimization, to increase
+// the batch size for each flush, loopy yields the processor, once if the batch
+// size is too low to give stream goroutines a chance to fill it up.
+//
+// Upon exiting, if the error causing the exit is not an I/O error, run()
+// flushes the underlying connection.  The connection is always left open to
+// allow different closing behavior on the client and server.
 func (l *loopyWriter) run() (err error) {
 	defer func() {
-		if err == ErrConnClosing {
-			// Don't log ErrConnClosing as error since it happens
-			// 1. When the connection is closed by some other known issue.
-			// 2. User closed the connection.
-			// 3. A graceful close of connection.
-			infof("transport: loopyWriter.run returning. %v", err)
-			err = nil
+		if l.logger.V(logLevel) {
+			l.logger.Infof("loopyWriter exiting with error: %v", err)
 		}
+		if !isIOError(err) {
+			l.framer.writer.Flush()
+		}
+		l.cbuf.finish()
 	}()
 	for {
 		it, err := l.cbuf.get(true)
@@ -552,7 +629,6 @@
 			}
 			l.framer.writer.Flush()
 			break hasdata
-
 		}
 	}
 }
@@ -561,11 +637,11 @@
 	return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
 }
 
-func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
+func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) {
 	// Otherwise update the quota.
 	if w.streamID == 0 {
 		l.sendQuota += w.increment
-		return nil
+		return
 	}
 	// Find the stream and update it.
 	if str, ok := l.estdStreams[w.streamID]; ok {
@@ -573,10 +649,9 @@
 		if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
 			str.state = active
 			l.activeStreams.enqueue(str)
-			return nil
+			return
 		}
 	}
-	return nil
 }
 
 func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
@@ -584,28 +659,28 @@
 }
 
 func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
-	if err := l.applySettings(s.ss); err != nil {
-		return err
-	}
+	l.applySettings(s.ss)
 	return l.framer.fr.WriteSettingsAck()
 }
 
-func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
+func (l *loopyWriter) registerStreamHandler(h *registerStream) {
 	str := &outStream{
-		id:    h.streamID,
-		state: empty,
-		itl:   &itemList{},
-		wq:    h.wq,
+		id:     h.streamID,
+		state:  empty,
+		itl:    &itemList{},
+		wq:     h.wq,
+		reader: mem.BufferSlice{}.Reader(),
 	}
 	l.estdStreams[h.streamID] = str
-	return nil
 }
 
 func (l *loopyWriter) headerHandler(h *headerFrame) error {
 	if l.side == serverSide {
 		str, ok := l.estdStreams[h.streamID]
 		if !ok {
-			warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
+			if l.logger.V(logLevel) {
+				l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID)
+			}
 			return nil
 		}
 		// Case 1.A: Server is responding back with headers.
@@ -626,24 +701,26 @@
 	}
 	// Case 2: Client wants to originate stream.
 	str := &outStream{
-		id:    h.streamID,
-		state: empty,
-		itl:   &itemList{},
-		wq:    h.wq,
+		id:     h.streamID,
+		state:  empty,
+		itl:    &itemList{},
+		wq:     h.wq,
+		reader: mem.BufferSlice{}.Reader(),
 	}
-	str.itl.enqueue(h)
-	return l.originateStream(str)
+	return l.originateStream(str, h)
 }
 
-func (l *loopyWriter) originateStream(str *outStream) error {
-	hdr := str.itl.dequeue().(*headerFrame)
-	if err := hdr.initStream(str.id); err != nil {
-		if err == ErrConnClosing {
-			return err
-		}
-		// Other errors(errStreamDrain) need not close transport.
+func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error {
+	// l.draining is set when handling GoAway. In which case, we want to avoid
+	// creating new streams.
+	if l.draining {
+		// TODO: provide a better error with the reason we are in draining.
+		hdr.onOrphaned(errStreamDrain)
 		return nil
 	}
+	if err := hdr.initStream(str.id); err != nil {
+		return err
+	}
 	if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
 		return err
 	}
@@ -658,7 +735,9 @@
 	l.hBuf.Reset()
 	for _, f := range hf {
 		if err := l.hEnc.WriteField(f); err != nil {
-			warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err)
+			if l.logger.V(logLevel) {
+				l.logger.Warningf("Encountered error while encoding headers: %v", err)
+			}
 		}
 	}
 	var (
@@ -695,10 +774,10 @@
 	return nil
 }
 
-func (l *loopyWriter) preprocessData(df *dataFrame) error {
+func (l *loopyWriter) preprocessData(df *dataFrame) {
 	str, ok := l.estdStreams[df.streamID]
 	if !ok {
-		return nil
+		return
 	}
 	// If we got data for a stream it means that
 	// stream was originated and the headers were sent out.
@@ -707,7 +786,6 @@
 		str.state = active
 		l.activeStreams.enqueue(str)
 	}
-	return nil
 }
 
 func (l *loopyWriter) pingHandler(p *ping) error {
@@ -718,9 +796,8 @@
 
 }
 
-func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
+func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) {
 	o.resp <- l.sendQuota
-	return nil
 }
 
 func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
@@ -730,15 +807,50 @@
 		// a RST_STREAM before stream initialization thus the stream might
 		// not be established yet.
 		delete(l.estdStreams, c.streamID)
+		str.reader.Close()
 		str.deleteSelf()
+		for head := str.itl.dequeueAll(); head != nil; head = head.next {
+			if df, ok := head.it.(*dataFrame); ok {
+				if !df.processing {
+					df.data.Free()
+				}
+			}
+		}
 	}
 	if c.rst { // If RST_STREAM needs to be sent.
 		if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
 			return err
 		}
 	}
-	if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
-		return ErrConnClosing
+	if l.draining && len(l.estdStreams) == 0 {
+		// Flush and close the connection; we are done with it.
+		return errors.New("finished processing active streams while in draining mode")
+	}
+	return nil
+}
+
+func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error {
+	if l.side == clientSide {
+		return errors.New("earlyAbortStream not handled on client")
+	}
+	// In case the caller forgets to set the http status, default to 200.
+	if eas.httpStatus == 0 {
+		eas.httpStatus = 200
+	}
+	headerFields := []hpack.HeaderField{
+		{Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))},
+		{Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)},
+		{Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))},
+		{Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())},
+	}
+
+	if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil {
+		return err
+	}
+	if eas.rst {
+		if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil {
+			return err
+		}
 	}
 	return nil
 }
@@ -747,7 +859,8 @@
 	if l.side == clientSide {
 		l.draining = true
 		if len(l.estdStreams) == 0 {
-			return ErrConnClosing
+			// Flush and close the connection; we are done with it.
+			return errors.New("received GOAWAY with no active streams")
 		}
 	}
 	return nil
@@ -765,10 +878,10 @@
 	return nil
 }
 
-func (l *loopyWriter) handle(i interface{}) error {
+func (l *loopyWriter) handle(i any) error {
 	switch i := i.(type) {
 	case *incomingWindowUpdate:
-		return l.incomingWindowUpdateHandler(i)
+		l.incomingWindowUpdateHandler(i)
 	case *outgoingWindowUpdate:
 		return l.outgoingWindowUpdateHandler(i)
 	case *incomingSettings:
@@ -778,25 +891,32 @@
 	case *headerFrame:
 		return l.headerHandler(i)
 	case *registerStream:
-		return l.registerStreamHandler(i)
+		l.registerStreamHandler(i)
 	case *cleanupStream:
 		return l.cleanupStreamHandler(i)
+	case *earlyAbortStream:
+		return l.earlyAbortStreamHandler(i)
 	case *incomingGoAway:
 		return l.incomingGoAwayHandler(i)
 	case *dataFrame:
-		return l.preprocessData(i)
+		l.preprocessData(i)
 	case *ping:
 		return l.pingHandler(i)
 	case *goAway:
 		return l.goAwayHandler(i)
 	case *outFlowControlSizeRequest:
-		return l.outFlowControlSizeRequestHandler(i)
+		l.outFlowControlSizeRequestHandler(i)
+	case closeConnection:
+		// Just return a non-I/O error and run() will flush and close the
+		// connection.
+		return ErrConnClosing
 	default:
 		return fmt.Errorf("transport: unknown control message type %T", i)
 	}
+	return nil
 }
 
-func (l *loopyWriter) applySettings(ss []http2.Setting) error {
+func (l *loopyWriter) applySettings(ss []http2.Setting) {
 	for _, s := range ss {
 		switch s.ID {
 		case http2.SettingInitialWindowSize:
@@ -815,7 +935,6 @@
 			updateHeaderTblSize(l.hEnc, s.Val)
 		}
 	}
-	return nil
 }
 
 // processData removes the first stream from active streams, writes out at most 16KB
@@ -829,19 +948,27 @@
 	if str == nil {
 		return true, nil
 	}
+	reader := str.reader
 	dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
+	if !dataItem.processing {
+		dataItem.processing = true
+		str.reader.Reset(dataItem.data)
+		dataItem.data.Free()
+	}
 	// A data item is represented by a dataFrame, since it later translates into
 	// multiple HTTP2 data frames.
-	// Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data.
-	// As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
-	// maximum possilbe HTTP2 frame size.
+	// Every dataFrame has two buffers; h that keeps grpc-message header and data
+	// that is the actual message. As an optimization to keep wire traffic low, data
+	// from data is copied to h to make as big as the maximum possible HTTP2 frame
+	// size.
 
-	if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
+	if len(dataItem.h) == 0 && reader.Remaining() == 0 { // Empty data frame
 		// Client sends out empty data frame with endStream = true
 		if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
 			return false, err
 		}
 		str.itl.dequeue() // remove the empty data item from stream
+		_ = reader.Close()
 		if str.itl.isEmpty() {
 			str.state = empty
 		} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
@@ -849,63 +976,71 @@
 				return false, err
 			}
 			if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
-				return false, nil
+				return false, err
 			}
 		} else {
 			l.activeStreams.enqueue(str)
 		}
 		return false, nil
 	}
-	var (
-		idx int
-		buf []byte
-	)
-	if len(dataItem.h) != 0 { // data header has not been written out yet.
-		buf = dataItem.h
-	} else {
-		idx = 1
-		buf = dataItem.d
-	}
-	size := http2MaxFrameLen
-	if len(buf) < size {
-		size = len(buf)
-	}
+
+	// Figure out the maximum size we can send
+	maxSize := http2MaxFrameLen
 	if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
 		str.state = waitingOnStreamQuota
 		return false, nil
-	} else if strQuota < size {
-		size = strQuota
+	} else if maxSize > strQuota {
+		maxSize = strQuota
+	}
+	if maxSize > int(l.sendQuota) { // connection-level flow control.
+		maxSize = int(l.sendQuota)
+	}
+	// Compute how much of the header and data we can send within quota and max frame length
+	hSize := min(maxSize, len(dataItem.h))
+	dSize := min(maxSize-hSize, reader.Remaining())
+	remainingBytes := len(dataItem.h) + reader.Remaining() - hSize - dSize
+	size := hSize + dSize
+
+	var buf *[]byte
+
+	if hSize != 0 && dSize == 0 {
+		buf = &dataItem.h
+	} else {
+		// Note: this is only necessary because the http2.Framer does not support
+		// partially writing a frame, so the sequence must be materialized into a buffer.
+		// TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed.
+		pool := l.bufferPool
+		if pool == nil {
+			// Note that this is only supposed to be nil in tests. Otherwise, stream is
+			// always initialized with a BufferPool.
+			pool = mem.DefaultBufferPool()
+		}
+		buf = pool.Get(size)
+		defer pool.Put(buf)
+
+		copy((*buf)[:hSize], dataItem.h)
+		_, _ = reader.Read((*buf)[hSize:])
 	}
 
-	if l.sendQuota < uint32(size) { // connection-level flow control.
-		size = int(l.sendQuota)
-	}
 	// Now that outgoing flow controls are checked we can replenish str's write quota
 	str.wq.replenish(size)
 	var endStream bool
 	// If this is the last data message on this stream and all of it can be written in this iteration.
-	if dataItem.endStream && size == len(buf) {
-		// buf contains either data or it contains header but data is empty.
-		if idx == 1 || len(dataItem.d) == 0 {
-			endStream = true
-		}
+	if dataItem.endStream && remainingBytes == 0 {
+		endStream = true
 	}
 	if dataItem.onEachWrite != nil {
 		dataItem.onEachWrite()
 	}
-	if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
+	if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil {
 		return false, err
 	}
-	buf = buf[size:]
 	str.bytesOutStanding += size
 	l.sendQuota -= uint32(size)
-	if idx == 0 {
-		dataItem.h = buf
-	} else {
-		dataItem.d = buf
-	}
+	dataItem.h = dataItem.h[hSize:]
 
-	if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
+	if remainingBytes == 0 { // All the data from that message was written out.
+		_ = reader.Close()
 		str.itl.dequeue()
 	}
 	if str.itl.isEmpty() {
diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go
index 9fa306b..bc8ee07 100644
--- a/vendor/google.golang.org/grpc/internal/transport/defaults.go
+++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go
@@ -47,3 +47,9 @@
 	defaultClientMaxHeaderListSize = uint32(16 << 20)
 	defaultServerMaxHeaderListSize = uint32(16 << 20)
 )
+
+// MaxStreamID is the upper bound for the stream ID before the current
+// transport gracefully closes and new transport is created for subsequent RPCs.
+// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit
+// integer. It's exported so that tests can override it.
+var MaxStreamID = uint32(math.MaxInt32 * 3 / 4)
diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
index f262edd..dfc0f22 100644
--- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
+++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
@@ -92,14 +92,11 @@
 
 func (f *trInFlow) onData(n uint32) uint32 {
 	f.unacked += n
-	if f.unacked >= f.limit/4 {
-		w := f.unacked
-		f.unacked = 0
+	if f.unacked < f.limit/4 {
 		f.updateEffectiveWindowSize()
-		return w
+		return 0
 	}
-	f.updateEffectiveWindowSize()
-	return 0
+	return f.reset()
 }
 
 func (f *trInFlow) reset() uint32 {
@@ -136,12 +133,10 @@
 
 // newLimit updates the inflow window to a new value n.
 // It assumes that n is always greater than the old limit.
-func (f *inFlow) newLimit(n uint32) uint32 {
+func (f *inFlow) newLimit(n uint32) {
 	f.mu.Lock()
-	d := n - f.limit
 	f.limit = n
 	f.mu.Unlock()
-	return d
 }
 
 func (f *inFlow) maybeAdjust(n uint32) uint32 {
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index 78f9ddc..d954a64 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -24,7 +24,6 @@
 package transport
 
 import (
-	"bytes"
 	"context"
 	"errors"
 	"fmt"
@@ -35,50 +34,80 @@
 	"sync"
 	"time"
 
-	"github.com/golang/protobuf/proto"
 	"golang.org/x/net/http2"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/internal/grpclog"
+	"google.golang.org/grpc/internal/grpcutil"
+	"google.golang.org/grpc/mem"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/peer"
 	"google.golang.org/grpc/stats"
 	"google.golang.org/grpc/status"
+	"google.golang.org/protobuf/proto"
 )
 
-// NewServerHandlerTransport returns a ServerTransport handling gRPC
-// from inside an http.Handler. It requires that the http Server
-// supports HTTP/2.
-func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) {
-	if r.ProtoMajor != 2 {
-		return nil, errors.New("gRPC requires HTTP/2")
-	}
-	if r.Method != "POST" {
-		return nil, errors.New("invalid gRPC request method")
+// NewServerHandlerTransport returns a ServerTransport handling gRPC from
+// inside an http.Handler, or writes an HTTP error to w and returns an error.
+// It requires that the http Server supports HTTP/2.
+func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
+	if r.Method != http.MethodPost {
+		w.Header().Set("Allow", http.MethodPost)
+		msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
+		http.Error(w, msg, http.StatusMethodNotAllowed)
+		return nil, errors.New(msg)
 	}
 	contentType := r.Header.Get("Content-Type")
 	// TODO: do we assume contentType is lowercase? we did before
-	contentSubtype, validContentType := contentSubtype(contentType)
+	contentSubtype, validContentType := grpcutil.ContentSubtype(contentType)
 	if !validContentType {
-		return nil, errors.New("invalid gRPC request content-type")
+		msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType)
+		http.Error(w, msg, http.StatusUnsupportedMediaType)
+		return nil, errors.New(msg)
+	}
+	if r.ProtoMajor != 2 {
+		msg := "gRPC requires HTTP/2"
+		http.Error(w, msg, http.StatusHTTPVersionNotSupported)
+		return nil, errors.New(msg)
 	}
 	if _, ok := w.(http.Flusher); !ok {
-		return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
+		msg := "gRPC requires a ResponseWriter supporting http.Flusher"
+		http.Error(w, msg, http.StatusInternalServerError)
+		return nil, errors.New(msg)
 	}
 
+	var localAddr net.Addr
+	if la := r.Context().Value(http.LocalAddrContextKey); la != nil {
+		localAddr, _ = la.(net.Addr)
+	}
+	var authInfo credentials.AuthInfo
+	if r.TLS != nil {
+		authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}}
+	}
+	p := peer.Peer{
+		Addr:      strAddr(r.RemoteAddr),
+		LocalAddr: localAddr,
+		AuthInfo:  authInfo,
+	}
 	st := &serverHandlerTransport{
 		rw:             w,
 		req:            r,
 		closedCh:       make(chan struct{}),
 		writes:         make(chan func()),
+		peer:           p,
 		contentType:    contentType,
 		contentSubtype: contentSubtype,
 		stats:          stats,
+		bufferPool:     bufferPool,
 	}
+	st.logger = prefixLoggerForServerHandlerTransport(st)
 
 	if v := r.Header.Get("grpc-timeout"); v != "" {
 		to, err := decodeTimeout(v)
 		if err != nil {
-			return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err)
+			msg := fmt.Sprintf("malformed grpc-timeout: %v", err)
+			http.Error(w, msg, http.StatusBadRequest)
+			return nil, status.Error(codes.Internal, msg)
 		}
 		st.timeoutSet = true
 		st.timeout = to
@@ -96,7 +125,9 @@
 		for _, v := range vv {
 			v, err := decodeMetadataHeader(k, v)
 			if err != nil {
-				return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err)
+				msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err)
+				http.Error(w, msg, http.StatusBadRequest)
+				return nil, status.Error(codes.Internal, msg)
 			}
 			metakv = append(metakv, k, v)
 		}
@@ -112,14 +143,15 @@
 // at this point to be speaking over HTTP/2, so it's able to speak valid
 // gRPC.
 type serverHandlerTransport struct {
-	rw               http.ResponseWriter
-	req              *http.Request
-	timeoutSet       bool
-	timeout          time.Duration
-	didCommonHeaders bool
+	rw         http.ResponseWriter
+	req        *http.Request
+	timeoutSet bool
+	timeout    time.Duration
 
 	headerMD metadata.MD
 
+	peer peer.Peer
+
 	closeOnce sync.Once
 	closedCh  chan struct{} // closed on Close
 
@@ -138,17 +170,28 @@
 	// TODO make sure this is consistent across handler_server and http2_server
 	contentSubtype string
 
-	stats stats.Handler
+	stats  []stats.Handler
+	logger *grpclog.PrefixLogger
+
+	bufferPool mem.BufferPool
 }
 
-func (ht *serverHandlerTransport) Close() error {
-	ht.closeOnce.Do(ht.closeCloseChanOnce)
-	return nil
+func (ht *serverHandlerTransport) Close(err error) {
+	ht.closeOnce.Do(func() {
+		if ht.logger.V(logLevel) {
+			ht.logger.Infof("Closing: %v", err)
+		}
+		close(ht.closedCh)
+	})
 }
 
-func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
-
-func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
+func (ht *serverHandlerTransport) Peer() *peer.Peer {
+	return &peer.Peer{
+		Addr:      ht.peer.Addr,
+		LocalAddr: ht.peer.LocalAddr,
+		AuthInfo:  ht.peer.AuthInfo,
+	}
+}
 
 // strAddr is a net.Addr backed by either a TCP "ip:port" string, or
 // the empty string if unknown.
@@ -182,12 +225,15 @@
 	}
 }
 
-func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error {
+func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status) error {
 	ht.writeStatusMu.Lock()
 	defer ht.writeStatusMu.Unlock()
 
+	headersWritten := s.updateHeaderSent()
 	err := ht.do(func() {
-		ht.writeCommonHeaders(s)
+		if !headersWritten {
+			ht.writePendingHeaders(s)
+		}
 
 		// And flush, in case no header or body has been sent yet.
 		// This forces a separation of headers and trailers if this is the
@@ -200,18 +246,21 @@
 			h.Set("Grpc-Message", encodeGrpcMessage(m))
 		}
 
+		s.hdrMu.Lock()
+		defer s.hdrMu.Unlock()
 		if p := st.Proto(); p != nil && len(p.Details) > 0 {
+			delete(s.trailer, grpcStatusDetailsBinHeader)
 			stBytes, err := proto.Marshal(p)
 			if err != nil {
 				// TODO: return error instead, when callers are able to handle it.
 				panic(err)
 			}
 
-			h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes))
+			h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes))
 		}
 
-		if md := s.Trailer(); len(md) > 0 {
-			for k, vv := range md {
+		if len(s.trailer) > 0 {
+			for k, vv := range s.trailer {
 				// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
 				if isReservedHeader(k) {
 					continue
@@ -226,22 +275,30 @@
 	})
 
 	if err == nil { // transport has not been closed
-		if ht.stats != nil {
-			ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
+		// Note: The trailer fields are compressed with hpack after this call returns.
+		// No WireLength field is set here.
+		s.hdrMu.Lock()
+		for _, sh := range ht.stats {
+			sh.HandleRPC(s.Context(), &stats.OutTrailer{
+				Trailer: s.trailer.Copy(),
+			})
 		}
+		s.hdrMu.Unlock()
 	}
-	ht.Close()
+	ht.Close(errors.New("finished writing status"))
 	return err
 }
 
+// writePendingHeaders sets common and custom headers on the first
+// write call (Write, WriteHeader, or WriteStatus)
+func (ht *serverHandlerTransport) writePendingHeaders(s *ServerStream) {
+	ht.writeCommonHeaders(s)
+	ht.writeCustomHeaders(s)
+}
+
 // writeCommonHeaders sets common headers on the first write
 // call (Write, WriteHeader, or WriteStatus).
-func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
-	if ht.didCommonHeaders {
-		return
-	}
-	ht.didCommonHeaders = true
-
+func (ht *serverHandlerTransport) writeCommonHeaders(s *ServerStream) {
 	h := ht.rw.Header()
 	h["Date"] = nil // suppress Date to make tests happy; TODO: restore
 	h.Set("Content-Type", ht.contentType)
@@ -260,45 +317,78 @@
 	}
 }
 
-func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
-	return ht.do(func() {
-		ht.writeCommonHeaders(s)
-		ht.rw.Write(hdr)
-		ht.rw.Write(data)
-		ht.rw.(http.Flusher).Flush()
-	})
+// writeCustomHeaders sets custom headers set on the stream via SetHeader
+// on the first write call (Write, WriteHeader, or WriteStatus)
+func (ht *serverHandlerTransport) writeCustomHeaders(s *ServerStream) {
+	h := ht.rw.Header()
+
+	s.hdrMu.Lock()
+	for k, vv := range s.header {
+		if isReservedHeader(k) {
+			continue
+		}
+		for _, v := range vv {
+			h.Add(k, encodeMetadataHeader(k, v))
+		}
+	}
+
+	s.hdrMu.Unlock()
 }
 
-func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
+func (ht *serverHandlerTransport) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error {
+	// Always take a reference because otherwise there is no guarantee the data will
+	// be available after this function returns. This is what callers to Write
+	// expect.
+	data.Ref()
+	headersWritten := s.updateHeaderSent()
 	err := ht.do(func() {
-		ht.writeCommonHeaders(s)
-		h := ht.rw.Header()
-		for k, vv := range md {
-			// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
-			if isReservedHeader(k) {
-				continue
-			}
-			for _, v := range vv {
-				v = encodeMetadataHeader(k, v)
-				h.Add(k, v)
-			}
+		defer data.Free()
+		if !headersWritten {
+			ht.writePendingHeaders(s)
 		}
+		ht.rw.Write(hdr)
+		for _, b := range data {
+			_, _ = ht.rw.Write(b.ReadOnlyData())
+		}
+		ht.rw.(http.Flusher).Flush()
+	})
+	if err != nil {
+		data.Free()
+		return err
+	}
+	return nil
+}
+
+func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) error {
+	if err := s.SetHeader(md); err != nil {
+		return err
+	}
+
+	headersWritten := s.updateHeaderSent()
+	err := ht.do(func() {
+		if !headersWritten {
+			ht.writePendingHeaders(s)
+		}
+
 		ht.rw.WriteHeader(200)
 		ht.rw.(http.Flusher).Flush()
 	})
 
 	if err == nil {
-		if ht.stats != nil {
-			ht.stats.HandleRPC(s.Context(), &stats.OutHeader{})
+		for _, sh := range ht.stats {
+			// Note: The header fields are compressed with hpack after this call returns.
+			// No WireLength field is set here.
+			sh.HandleRPC(s.Context(), &stats.OutHeader{
+				Header:      md.Copy(),
+				Compression: s.sendCompress,
+			})
 		}
 	}
 	return err
 }
 
-func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
+func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) {
 	// With this transport type there will be exactly 1 stream: this HTTP request.
-
-	ctx := ht.req.Context()
 	var cancel context.CancelFunc
 	if ht.timeoutSet {
 		ctx, cancel = context.WithTimeout(ctx, ht.timeout)
@@ -315,40 +405,27 @@
 		case <-ht.req.Context().Done():
 		}
 		cancel()
-		ht.Close()
+		ht.Close(errors.New("request is done processing"))
 	}()
 
-	req := ht.req
-
-	s := &Stream{
-		id:             0, // irrelevant
-		requestRead:    func(int) {},
-		cancel:         cancel,
-		buf:            newRecvBuffer(),
-		st:             ht,
-		method:         req.URL.Path,
-		recvCompress:   req.Header.Get("grpc-encoding"),
-		contentSubtype: ht.contentSubtype,
-	}
-	pr := &peer.Peer{
-		Addr: ht.RemoteAddr(),
-	}
-	if req.TLS != nil {
-		pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
-	}
 	ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
-	s.ctx = peer.NewContext(ctx, pr)
-	if ht.stats != nil {
-		s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
-		inHeader := &stats.InHeader{
-			FullMethod:  s.method,
-			RemoteAddr:  ht.RemoteAddr(),
-			Compression: s.recvCompress,
-		}
-		ht.stats.HandleRPC(s.ctx, inHeader)
+	req := ht.req
+	s := &ServerStream{
+		Stream: &Stream{
+			id:             0, // irrelevant
+			ctx:            ctx,
+			requestRead:    func(int) {},
+			buf:            newRecvBuffer(),
+			method:         req.URL.Path,
+			recvCompress:   req.Header.Get("grpc-encoding"),
+			contentSubtype: ht.contentSubtype,
+		},
+		cancel:           cancel,
+		st:               ht,
+		headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
 	}
 	s.trReader = &transportReader{
-		reader:        &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
+		reader:        &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
 		windowHandler: func(int) {},
 	}
 
@@ -357,21 +434,19 @@
 	go func() {
 		defer close(readerDone)
 
-		// TODO: minimize garbage, optimize recvBuffer code/ownership
-		const readSize = 8196
-		for buf := make([]byte, readSize); ; {
-			n, err := req.Body.Read(buf)
+		for {
+			buf := ht.bufferPool.Get(http2MaxFrameLen)
+			n, err := req.Body.Read(*buf)
 			if n > 0 {
-				s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])})
-				buf = buf[n:]
+				*buf = (*buf)[:n]
+				s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)})
+			} else {
+				ht.bufferPool.Put(buf)
 			}
 			if err != nil {
 				s.buf.put(recvMsg{err: mapRecvMsgError(err)})
 				return
 			}
-			if len(buf) == 0 {
-				buf = make([]byte, readSize)
-			}
 		}
 	}()
 
@@ -400,21 +475,19 @@
 	}
 }
 
-func (ht *serverHandlerTransport) IncrMsgSent() {}
+func (ht *serverHandlerTransport) incrMsgRecv() {}
 
-func (ht *serverHandlerTransport) IncrMsgRecv() {}
-
-func (ht *serverHandlerTransport) Drain() {
+func (ht *serverHandlerTransport) Drain(string) {
 	panic("Drain() is not implemented")
 }
 
 // mapRecvMsgError returns the non-nil err into the appropriate
 // error value as expected by callers of *grpc.parser.recvMsg.
 // In particular, in can only be:
-//   * io.EOF
-//   * io.ErrUnexpectedEOF
-//   * of type transport.ConnectionError
-//   * an error from the status package
+//   - io.EOF
+//   - io.ErrUnexpectedEOF
+//   - of type transport.ConnectionError
+//   - an error from the status package
 func mapRecvMsgError(err error) error {
 	if err == io.EOF || err == io.ErrUnexpectedEOF {
 		return err
@@ -427,5 +500,5 @@
 	if strings.Contains(err.Error(), "body closed by handler") {
 		return status.Error(codes.Canceled, err.Error())
 	}
-	return connectionErrorf(true, err, err.Error())
+	return connectionErrorf(true, err, "%s", err.Error())
 }
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 294661a..7cb2387 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -24,6 +24,8 @@
 	"io"
 	"math"
 	"net"
+	"net/http"
+	"path/filepath"
 	"strconv"
 	"strings"
 	"sync"
@@ -32,27 +34,49 @@
 
 	"golang.org/x/net/http2"
 	"golang.org/x/net/http2/hpack"
-
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/internal"
 	"google.golang.org/grpc/internal/channelz"
-	"google.golang.org/grpc/internal/syscall"
+	icredentials "google.golang.org/grpc/internal/credentials"
+	"google.golang.org/grpc/internal/grpclog"
+	"google.golang.org/grpc/internal/grpcsync"
+	"google.golang.org/grpc/internal/grpcutil"
+	imetadata "google.golang.org/grpc/internal/metadata"
+	"google.golang.org/grpc/internal/proxyattributes"
+	istatus "google.golang.org/grpc/internal/status"
+	isyscall "google.golang.org/grpc/internal/syscall"
+	"google.golang.org/grpc/internal/transport/networktype"
 	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/mem"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/peer"
+	"google.golang.org/grpc/resolver"
 	"google.golang.org/grpc/stats"
 	"google.golang.org/grpc/status"
 )
 
+// clientConnectionCounter counts the number of connections a client has
+// initiated (equal to the number of http2Clients created). Must be accessed
+// atomically.
+var clientConnectionCounter uint64
+
+var goAwayLoopyWriterTimeout = 5 * time.Second
+
+var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
+
 // http2Client implements the ClientTransport interface with HTTP2.
 type http2Client struct {
-	lastRead   int64 // keep this field 64-bit aligned
-	ctx        context.Context
-	cancel     context.CancelFunc
-	ctxDone    <-chan struct{} // Cache the ctx.Done() chan.
-	userAgent  string
-	md         interface{}
+	lastRead  int64 // Keep this field 64-bit aligned. Accessed atomically.
+	ctx       context.Context
+	cancel    context.CancelFunc
+	ctxDone   <-chan struct{} // Cache the ctx.Done() chan.
+	userAgent string
+	// address contains the resolver returned address for this transport.
+	// If the `ServerName` field is set, it takes precedence over `CallHdr.Host`
+	// passed to `NewStream`, when determining the :authority header.
+	address    resolver.Address
+	md         metadata.MD
 	conn       net.Conn // underlying communication channel
 	loopy      *loopyWriter
 	remoteAddr net.Addr
@@ -63,11 +87,12 @@
 	writerDone chan struct{} // sync point to enable testing.
 	// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
 	// that the server sent GoAway on this transport.
-	goAway chan struct{}
-
-	framer *framer
+	goAway        chan struct{}
+	keepaliveDone chan struct{} // Closed when the keepalive goroutine exits.
+	framer        *framer
 	// controlBuf delivers all the control related tasks (e.g., window
 	// updates, reset streams, and various settings) to the controller.
+	// Do not access controlBuf with mu held.
 	controlBuf *controlBuffer
 	fc         *trInFlow
 	// The scheme used: https if TLS is on, http otherwise.
@@ -80,7 +105,7 @@
 	kp               keepalive.ClientParameters
 	keepaliveEnabled bool
 
-	statsHandler stats.Handler
+	statsHandlers []stats.Handler
 
 	initialWindowSize int32
 
@@ -88,25 +113,26 @@
 	maxSendHeaderListSize *uint32
 
 	bdpEst *bdpEstimator
-	// onPrefaceReceipt is a callback that client transport calls upon
-	// receiving server preface to signal that a succefull HTTP2
-	// connection was established.
-	onPrefaceReceipt func()
 
 	maxConcurrentStreams  uint32
 	streamQuota           int64
 	streamsQuotaAvailable chan struct{}
 	waitingStreams        uint32
-	nextID                uint32
+	registeredCompressors string
 
+	// Do not access controlBuf with mu held.
 	mu            sync.Mutex // guard the following variables
+	nextID        uint32
 	state         transportState
-	activeStreams map[uint32]*Stream
+	activeStreams map[uint32]*ClientStream
 	// prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
 	prevGoAwayID uint32
 	// goAwayReason records the http2.ErrCode and debug data received with the
 	// GoAway frame.
 	goAwayReason GoAwayReason
+	// goAwayDebugMessage contains a detailed human readable string about a
+	// GoAway frame, useful for error messages.
+	goAwayDebugMessage string
 	// A condition variable used to signal when the keepalive goroutine should
 	// go dormant. The condition for dormancy is based on the number of active
 	// streams and the `PermitWithoutStream` keepalive client parameter. And
@@ -118,21 +144,44 @@
 	// variable.
 	kpDormant bool
 
-	// Fields below are for channelz metric collection.
-	channelzID int64 // channelz unique identification number
-	czData     *channelzData
+	channelz *channelz.Socket
 
-	onGoAway func(GoAwayReason)
-	onClose  func()
+	onClose func(GoAwayReason)
 
-	bufferPool *bufferPool
+	bufferPool mem.BufferPool
+
+	connectionID uint64
+	logger       *grpclog.PrefixLogger
 }
 
-func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
+func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, grpcUA string) (net.Conn, error) {
+	address := addr.Addr
+	networkType, ok := networktype.Get(addr)
 	if fn != nil {
-		return fn(ctx, addr)
+		// Special handling for unix scheme with custom dialer. Back in the day,
+		// we did not have a unix resolver and therefore targets with a unix
+		// scheme would end up using the passthrough resolver. So, user's used a
+		// custom dialer in this case and expected the original dial target to
+		// be passed to the custom dialer. Now, we have a unix resolver. But if
+		// a custom dialer is specified, we want to retain the old behavior in
+		// terms of the address being passed to the custom dialer.
+		if networkType == "unix" && !strings.HasPrefix(address, "\x00") {
+			// Supported unix targets are either "unix://absolute-path" or
+			// "unix:relative-path".
+			if filepath.IsAbs(address) {
+				return fn(ctx, "unix://"+address)
+			}
+			return fn(ctx, "unix:"+address)
+		}
+		return fn(ctx, address)
 	}
-	return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
+	if !ok {
+		networkType, address = ParseDialTarget(address)
+	}
+	if opts, present := proxyattributes.Get(addr); present {
+		return proxyDial(ctx, addr, grpcUA, opts)
+	}
+	return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address)
 }
 
 func isTemporary(err error) bool {
@@ -151,10 +200,10 @@
 	return true
 }
 
-// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
+// NewHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
 // and starts to receive messages on it. Non-nil error returns if construction
 // fails.
-func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
+func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ ClientTransport, err error) {
 	scheme := "http"
 	ctx, cancel := context.WithCancel(ctx)
 	defer func() {
@@ -163,19 +212,51 @@
 		}
 	}()
 
-	conn, err := dial(connectCtx, opts.Dialer, addr.Addr)
+	// gRPC, resolver, balancer etc. can specify arbitrary data in the
+	// Attributes field of resolver.Address, which is shoved into connectCtx
+	// and passed to the dialer and credential handshaker. This makes it possible for
+	// address specific arbitrary data to reach custom dialers and credential handshakers.
+	connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes})
+
+	conn, err := dial(connectCtx, opts.Dialer, addr, opts.UserAgent)
 	if err != nil {
 		if opts.FailOnNonTempDialError {
 			return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
 		}
-		return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err)
+		return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err)
 	}
+
 	// Any further errors will close the underlying connection
 	defer func(conn net.Conn) {
 		if err != nil {
 			conn.Close()
 		}
 	}(conn)
+
+	// The following defer and goroutine monitor the connectCtx for cancellation
+	// and deadline.  On context expiration, the connection is hard closed and
+	// this function will naturally fail as a result.  Otherwise, the defer
+	// waits for the goroutine to exit to prevent the context from being
+	// monitored (and to prevent the connection from ever being closed) after
+	// returning from this function.
+	ctxMonitorDone := grpcsync.NewEvent()
+	newClientCtx, newClientDone := context.WithCancel(connectCtx)
+	defer func() {
+		newClientDone()         // Awaken the goroutine below if connectCtx hasn't expired.
+		<-ctxMonitorDone.Done() // Wait for the goroutine below to exit.
+	}()
+	go func(conn net.Conn) {
+		defer ctxMonitorDone.Fire() // Signal this goroutine has exited.
+		<-newClientCtx.Done()       // Block until connectCtx expires or the defer above executes.
+		if err := connectCtx.Err(); err != nil {
+			// connectCtx expired before exiting the function.  Hard close the connection.
+			if logger.V(logLevel) {
+				logger.Infof("Aborting due to connect deadline expiring: %v", err)
+			}
+			conn.Close()
+		}
+	}(conn)
+
 	kp := opts.KeepaliveParams
 	// Validate keepalive parameters.
 	if kp.Time == 0 {
@@ -186,7 +267,7 @@
 	}
 	keepaliveEnabled := false
 	if kp.Time != infinity {
-		if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
+		if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
 			return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
 		}
 		keepaliveEnabled = true
@@ -207,18 +288,30 @@
 		}
 	}
 	if transportCreds != nil {
-		scheme = "https"
-		conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn)
+		conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn)
 		if err != nil {
 			return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
 		}
+		for _, cd := range perRPCCreds {
+			if cd.RequireTransportSecurity() {
+				if ci, ok := authInfo.(interface {
+					GetCommonAuthInfo() credentials.CommonAuthInfo
+				}); ok {
+					secLevel := ci.GetCommonAuthInfo().SecurityLevel
+					if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity {
+						return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection")
+					}
+				}
+			}
+		}
 		isSecure = true
+		if transportCreds.Info().SecurityProtocol == "tls" {
+			scheme = "https"
+		}
 	}
-	dynamicWindow := true
 	icwz := int32(initialWindowSize)
 	if opts.InitialConnWindowSize >= defaultWindowSize {
 		icwz = opts.InitialConnWindowSize
-		dynamicWindow = false
 	}
 	writeBufSize := opts.WriteBufferSize
 	readBufSize := opts.ReadBufferSize
@@ -226,12 +319,14 @@
 	if opts.MaxHeaderListSize != nil {
 		maxHeaderListSize = *opts.MaxHeaderListSize
 	}
+
 	t := &http2Client{
 		ctx:                   ctx,
 		ctxDone:               ctx.Done(), // Cache Done chan.
 		cancel:                cancel,
 		userAgent:             opts.UserAgent,
-		md:                    addr.Metadata,
+		registeredCompressors: grpcutil.RegisteredCompressors(),
+		address:               addr,
 		conn:                  conn,
 		remoteAddr:            conn.RemoteAddr(),
 		localAddr:             conn.LocalAddr(),
@@ -239,68 +334,99 @@
 		readerDone:            make(chan struct{}),
 		writerDone:            make(chan struct{}),
 		goAway:                make(chan struct{}),
-		framer:                newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize),
+		keepaliveDone:         make(chan struct{}),
+		framer:                newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize),
 		fc:                    &trInFlow{limit: uint32(icwz)},
 		scheme:                scheme,
-		activeStreams:         make(map[uint32]*Stream),
+		activeStreams:         make(map[uint32]*ClientStream),
 		isSecure:              isSecure,
 		perRPCCreds:           perRPCCreds,
 		kp:                    kp,
-		statsHandler:          opts.StatsHandler,
+		statsHandlers:         opts.StatsHandlers,
 		initialWindowSize:     initialWindowSize,
-		onPrefaceReceipt:      onPrefaceReceipt,
 		nextID:                1,
 		maxConcurrentStreams:  defaultMaxStreamsClient,
 		streamQuota:           defaultMaxStreamsClient,
 		streamsQuotaAvailable: make(chan struct{}, 1),
-		czData:                new(channelzData),
-		onGoAway:              onGoAway,
-		onClose:               onClose,
 		keepaliveEnabled:      keepaliveEnabled,
-		bufferPool:            newBufferPool(),
+		bufferPool:            opts.BufferPool,
+		onClose:               onClose,
+	}
+	var czSecurity credentials.ChannelzSecurityValue
+	if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
+		czSecurity = au.GetSecurityValue()
+	}
+	t.channelz = channelz.RegisterSocket(
+		&channelz.Socket{
+			SocketType:       channelz.SocketTypeNormal,
+			Parent:           opts.ChannelzParent,
+			SocketMetrics:    channelz.SocketMetrics{},
+			EphemeralMetrics: t.socketMetrics,
+			LocalAddr:        t.localAddr,
+			RemoteAddr:       t.remoteAddr,
+			SocketOptions:    channelz.GetSocketOption(t.conn),
+			Security:         czSecurity,
+		})
+	t.logger = prefixLoggerForClientTransport(t)
+	// Add peer information to the http2client context.
+	t.ctx = peer.NewContext(t.ctx, t.getPeer())
+
+	if md, ok := addr.Metadata.(*metadata.MD); ok {
+		t.md = *md
+	} else if md := imetadata.Get(addr); md != nil {
+		t.md = md
 	}
 	t.controlBuf = newControlBuffer(t.ctxDone)
 	if opts.InitialWindowSize >= defaultWindowSize {
 		t.initialWindowSize = opts.InitialWindowSize
-		dynamicWindow = false
 	}
-	if dynamicWindow {
+	if !opts.StaticWindowSize {
 		t.bdpEst = &bdpEstimator{
 			bdp:               initialWindowSize,
 			updateFlowControl: t.updateFlowControl,
 		}
 	}
-	if t.statsHandler != nil {
-		t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
+	for _, sh := range t.statsHandlers {
+		t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
 			RemoteAddr: t.remoteAddr,
 			LocalAddr:  t.localAddr,
 		})
 		connBegin := &stats.ConnBegin{
 			Client: true,
 		}
-		t.statsHandler.HandleConn(t.ctx, connBegin)
-	}
-	if channelz.IsOn() {
-		t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
+		sh.HandleConn(t.ctx, connBegin)
 	}
 	if t.keepaliveEnabled {
 		t.kpDormancyCond = sync.NewCond(&t.mu)
 		go t.keepalive()
 	}
-	// Start the reader goroutine for incoming message. Each transport has
-	// a dedicated goroutine which reads HTTP2 frame from network. Then it
-	// dispatches the frame to the corresponding stream entity.
-	go t.reader()
+
+	// Start the reader goroutine for incoming messages. Each transport has a
+	// dedicated goroutine which reads HTTP2 frames from the network. Then it
+	// dispatches the frame to the corresponding stream entity.  When the
+	// server preface is received, readerErrCh is closed.  If an error occurs
+	// first, an error is pushed to the channel.  This must be checked before
+	// returning from this function.
+	readerErrCh := make(chan error, 1)
+	go t.reader(readerErrCh)
+	defer func() {
+		if err != nil {
+			// writerDone should be closed since the loopy goroutine
+			// wouldn't have started in the case this function returns an error.
+			close(t.writerDone)
+			t.Close(err)
+		}
+	}()
 
 	// Send connection preface to server.
 	n, err := t.conn.Write(clientPreface)
 	if err != nil {
-		t.Close()
-		return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
+		err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
+		return nil, err
 	}
 	if n != len(clientPreface) {
-		t.Close()
-		return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
+		err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
+		return nil, err
 	}
 	var ss []http2.Setting
 
@@ -318,29 +444,33 @@
 	}
 	err = t.framer.fr.WriteSettings(ss...)
 	if err != nil {
-		t.Close()
-		return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
+		err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
+		return nil, err
 	}
 	// Adjust the connection flow control window if needed.
 	if delta := uint32(icwz - defaultWindowSize); delta > 0 {
 		if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
-			t.Close()
-			return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err)
+			err = connectionErrorf(true, err, "transport: failed to write window update: %v", err)
+			return nil, err
 		}
 	}
 
+	t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1)
+
 	if err := t.framer.writer.Flush(); err != nil {
 		return nil, err
 	}
+	// Block until the server preface is received successfully or an error occurs.
+	if err = <-readerErrCh; err != nil {
+		return nil, err
+	}
 	go func() {
-		t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
-		err := t.loopy.run()
-		if err != nil {
-			errorf("transport: loopyWriter.run returning. Err: %v", err)
-		}
-		// If it's a connection error, let reader goroutine handle it
-		// since there might be data in the buffers.
-		if _, ok := err.(net.Error); !ok {
+		t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
+		if err := t.loopy.run(); !isIOError(err) {
+			// Immediately close the connection, as the loopy writer returns
+			// when there are no more active streams and we were draining (the
+			// server sent a GOAWAY).  For I/O errors, the reader will hit it
+			// after draining any remaining incoming data.
 			t.conn.Close()
 		}
 		close(t.writerDone)
@@ -348,16 +478,19 @@
 	return t, nil
 }
 
-func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
+func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream {
 	// TODO(zhaoq): Handle uint32 overflow of Stream.id.
-	s := &Stream{
-		ct:             t,
-		done:           make(chan struct{}),
-		method:         callHdr.Method,
-		sendCompress:   callHdr.SendCompress,
-		buf:            newRecvBuffer(),
-		headerChan:     make(chan struct{}),
-		contentSubtype: callHdr.ContentSubtype,
+	s := &ClientStream{
+		Stream: &Stream{
+			method:         callHdr.Method,
+			sendCompress:   callHdr.SendCompress,
+			buf:            newRecvBuffer(),
+			contentSubtype: callHdr.ContentSubtype,
+		},
+		ct:         t,
+		done:       make(chan struct{}),
+		headerChan: make(chan struct{}),
+		doneFunc:   callHdr.DoneFunc,
 	}
 	s.wq = newWriteQuota(defaultWriteQuota, s.done)
 	s.requestRead = func(n int) {
@@ -373,9 +506,8 @@
 			ctxDone: s.ctx.Done(),
 			recv:    s.buf,
 			closeStream: func(err error) {
-				t.CloseStream(s, err)
+				s.Close(err)
 			},
-			freeBuffer: t.bufferPool.put,
 		},
 		windowHandler: func(n int) {
 			t.updateWindow(s, uint32(n))
@@ -386,17 +518,31 @@
 
 func (t *http2Client) getPeer() *peer.Peer {
 	return &peer.Peer{
-		Addr:     t.remoteAddr,
-		AuthInfo: t.authInfo,
+		Addr:      t.remoteAddr,
+		AuthInfo:  t.authInfo, // Can be nil
+		LocalAddr: t.localAddr,
 	}
 }
 
+// OutgoingGoAwayHandler writes a GOAWAY to the connection.  Always returns (false, err) as we want the GoAway
+// to be the last frame loopy writes to the transport.
+func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) {
+	t.mu.Lock()
+	maxStreamID := t.nextID - 2
+	t.mu.Unlock()
+	if err := t.framer.fr.WriteGoAway(maxStreamID, http2.ErrCodeNo, g.debugData); err != nil {
+		return false, err
+	}
+	return false, g.closeConn
+}
+
 func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) {
 	aud := t.createAudience(callHdr)
 	ri := credentials.RequestInfo{
-		Method: callHdr.Method,
+		Method:   callHdr.Method,
+		AuthInfo: t.authInfo,
 	}
-	ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri)
+	ctxWithRequestInfo := credentials.NewContextWithRequestInfo(ctx, ri)
 	authData, err := t.getTrAuthData(ctxWithRequestInfo, aud)
 	if err != nil {
 		return nil, err
@@ -410,12 +556,25 @@
 	// Make the slice of certain predictable size to reduce allocations made by append.
 	hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te
 	hfLen += len(authData) + len(callAuthData)
+	registeredCompressors := t.registeredCompressors
+	if callHdr.PreviousAttempts > 0 {
+		hfLen++
+	}
+	if callHdr.SendCompress != "" {
+		hfLen++
+	}
+	if registeredCompressors != "" {
+		hfLen++
+	}
+	if _, ok := ctx.Deadline(); ok {
+		hfLen++
+	}
 	headerFields := make([]hpack.HeaderField, 0, hfLen)
 	headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"})
 	headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
 	headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
 	headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
-	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)})
+	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)})
 	headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
 	headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
 	if callHdr.PreviousAttempts > 0 {
@@ -424,12 +583,28 @@
 
 	if callHdr.SendCompress != "" {
 		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
+		// Include the outgoing compressor name when compressor is not registered
+		// via encoding.RegisterCompressor. This is possible when client uses
+		// WithCompressor dial option.
+		if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) {
+			if registeredCompressors != "" {
+				registeredCompressors += ","
+			}
+			registeredCompressors += callHdr.SendCompress
+		}
+	}
+
+	if registeredCompressors != "" {
+		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors})
 	}
 	if dl, ok := ctx.Deadline(); ok {
 		// Send out timeout regardless its value. The server can detect timeout context by itself.
 		// TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
 		timeout := time.Until(dl)
-		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
+		if timeout <= 0 {
+			return nil, status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error())
+		}
+		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)})
 	}
 	for k, v := range authData {
 		headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
@@ -437,14 +612,8 @@
 	for k, v := range callAuthData {
 		headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
 	}
-	if b := stats.OutgoingTags(ctx); b != nil {
-		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)})
-	}
-	if b := stats.OutgoingTrace(ctx); b != nil {
-		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
-	}
 
-	if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
+	if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
 		var k string
 		for k, vv := range md {
 			// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
@@ -458,25 +627,23 @@
 		for _, vv := range added {
 			for i, v := range vv {
 				if i%2 == 0 {
-					k = v
+					k = strings.ToLower(v)
 					continue
 				}
 				// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
 				if isReservedHeader(k) {
 					continue
 				}
-				headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)})
+				headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
 			}
 		}
 	}
-	if md, ok := t.md.(*metadata.MD); ok {
-		for k, vv := range *md {
-			if isReservedHeader(k) {
-				continue
-			}
-			for _, v := range vv {
-				headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
-			}
+	for k, vv := range t.md {
+		if isReservedHeader(k) {
+			continue
+		}
+		for _, v := range vv {
+			headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
 		}
 	}
 	return headerFields, nil
@@ -505,11 +672,15 @@
 	for _, c := range t.perRPCCreds {
 		data, err := c.GetRequestMetadata(ctx, audience)
 		if err != nil {
-			if _, ok := status.FromError(err); ok {
+			if st, ok := status.FromError(err); ok {
+				// Restrict the code to the list allowed by gRFC A54.
+				if istatus.IsRestrictedControlPlaneCode(st) {
+					err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
+				}
 				return nil, err
 			}
 
-			return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err)
+			return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err)
 		}
 		for k, v := range data {
 			// Capital header names are illegal in HTTP/2.
@@ -526,12 +697,22 @@
 	// Note: if these credentials are provided both via dial options and call
 	// options, then both sets of credentials will be applied.
 	if callCreds := callHdr.Creds; callCreds != nil {
-		if !t.isSecure && callCreds.RequireTransportSecurity() {
-			return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
+		if callCreds.RequireTransportSecurity() {
+			ri, _ := credentials.RequestInfoFromContext(ctx)
+			if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil {
+				return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
+			}
 		}
 		data, err := callCreds.GetRequestMetadata(ctx, audience)
 		if err != nil {
-			return nil, status.Errorf(codes.Internal, "transport: %v", err)
+			if st, ok := status.FromError(err); ok {
+				// Restrict the code to the list allowed by gRFC A54.
+				if istatus.IsRestrictedControlPlaneCode(st) {
+					err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
+				}
+				return nil, err
+			}
+			return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err)
 		}
 		callAuthData = make(map[string]string, len(data))
 		for k, v := range data {
@@ -543,13 +724,65 @@
 	return callAuthData, nil
 }
 
+// NewStreamError wraps an error and reports additional information.  Typically
+// NewStream errors result in transparent retry, as they mean nothing went onto
+// the wire.  However, there are two notable exceptions:
+//
+//  1. If the stream headers violate the max header list size allowed by the
+//     server.  It's possible this could succeed on another transport, even if
+//     it's unlikely, but do not transparently retry.
+//  2. If the credentials errored when requesting their headers.  In this case,
+//     it's possible a retry can fix the problem, but indefinitely transparently
+//     retrying is not appropriate as it is likely the credentials, if they can
+//     eventually succeed, would need I/O to do so.
+type NewStreamError struct {
+	Err error
+
+	AllowTransparentRetry bool
+}
+
+func (e NewStreamError) Error() string {
+	return e.Err.Error()
+}
+
 // NewStream creates a stream and registers it into the transport as "active"
-// streams.
-func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
+// streams.  All non-nil errors returned will be *NewStreamError.
+func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) {
 	ctx = peer.NewContext(ctx, t.getPeer())
+
+	// ServerName field of the resolver returned address takes precedence over
+	// Host field of CallHdr to determine the :authority header. This is because,
+	// the ServerName field takes precedence for server authentication during
+	// TLS handshake, and the :authority header should match the value used
+	// for server authentication.
+	if t.address.ServerName != "" {
+		newCallHdr := *callHdr
+		newCallHdr.Host = t.address.ServerName
+		callHdr = &newCallHdr
+	}
+
+	// The authority specified via the `CallAuthority` CallOption takes the
+	// highest precedence when determining the `:authority` header. It overrides
+	// any value present in the Host field of CallHdr. Before applying this
+	// override, the authority string is validated. If the credentials do not
+	// implement the AuthorityValidator interface, or if validation fails, the
+	// RPC is failed with a status code of `UNAVAILABLE`.
+	if callHdr.Authority != "" {
+		auth, ok := t.authInfo.(credentials.AuthorityValidator)
+		if !ok {
+			return nil, &NewStreamError{Err: status.Errorf(codes.Unavailable, "credentials type %q does not implement the AuthorityValidator interface, but authority override specified with CallAuthority call option", t.authInfo.AuthType())}
+		}
+		if err := auth.ValidateAuthority(callHdr.Authority); err != nil {
+			return nil, &NewStreamError{Err: status.Errorf(codes.Unavailable, "failed to validate authority %q : %v", callHdr.Authority, err)}
+		}
+		newCallHdr := *callHdr
+		newCallHdr.Host = callHdr.Authority
+		callHdr = &newCallHdr
+	}
+
 	headerFields, err := t.createHeaderFields(ctx, callHdr)
 	if err != nil {
-		return nil, err
+		return nil, &NewStreamError{Err: err, AllowTransparentRetry: false}
 	}
 	s := t.newStream(ctx, callHdr)
 	cleanup := func(err error) {
@@ -558,7 +791,7 @@
 			return
 		}
 		// The stream was unprocessed by the server.
-		atomic.StoreUint32(&s.unprocessed, 1)
+		s.unprocessed.Store(true)
 		s.write(recvMsg{err: err})
 		close(s.done)
 		// If headerChan isn't closed, then close it.
@@ -569,22 +802,18 @@
 	hdr := &headerFrame{
 		hf:        headerFields,
 		endStream: false,
-		initStream: func(id uint32) error {
+		initStream: func(uint32) error {
 			t.mu.Lock()
-			if state := t.state; state != reachable {
+			// TODO: handle transport closure in loopy instead and remove this
+			// initStream is never called when transport is draining.
+			if t.state == closing {
 				t.mu.Unlock()
-				// Do a quick cleanup.
-				err := error(errStreamDrain)
-				if state == closing {
-					err = ErrConnClosing
-				}
-				cleanup(err)
-				return err
+				cleanup(ErrConnClosing)
+				return ErrConnClosing
 			}
-			t.activeStreams[id] = s
 			if channelz.IsOn() {
-				atomic.AddInt64(&t.czData.streamsStarted, 1)
-				atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
+				t.channelz.SocketMetrics.StreamsStarted.Add(1)
+				t.channelz.SocketMetrics.LastLocalStreamCreatedTimestamp.Store(time.Now().UnixNano())
 			}
 			// If the keepalive goroutine has gone dormant, wake it up.
 			if t.kpDormant {
@@ -598,7 +827,8 @@
 	}
 	firstTry := true
 	var ch chan struct{}
-	checkForStreamQuota := func(it interface{}) bool {
+	transportDrainRequired := false
+	checkForStreamQuota := func() bool {
 		if t.streamQuota <= 0 { // Can go negative if server decreases it.
 			if firstTry {
 				t.waitingStreams++
@@ -610,11 +840,24 @@
 			t.waitingStreams--
 		}
 		t.streamQuota--
-		h := it.(*headerFrame)
-		h.streamID = t.nextID
+
+		t.mu.Lock()
+		if t.state == draining || t.activeStreams == nil { // Can be niled from Close().
+			t.mu.Unlock()
+			return false // Don't create a stream if the transport is already closed.
+		}
+
+		hdr.streamID = t.nextID
 		t.nextID += 2
-		s.id = h.streamID
+		// Drain client transport if nextID > MaxStreamID which signals gRPC that
+		// the connection is closed and a new one must be created for subsequent RPCs.
+		transportDrainRequired = t.nextID > MaxStreamID
+
+		s.id = hdr.streamID
 		s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
+		t.activeStreams[s.id] = s
+		t.mu.Unlock()
+
 		if t.streamQuota > 0 && t.waitingStreams > 0 {
 			select {
 			case t.streamsQuotaAvailable <- struct{}{}:
@@ -624,13 +867,12 @@
 		return true
 	}
 	var hdrListSizeErr error
-	checkForHeaderListSize := func(it interface{}) bool {
+	checkForHeaderListSize := func() bool {
 		if t.maxSendHeaderListSize == nil {
 			return true
 		}
-		hdrFrame := it.(*headerFrame)
 		var sz int64
-		for _, f := range hdrFrame.hf {
+		for _, f := range hdr.hf {
 			if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
 				hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize)
 				return false
@@ -639,63 +881,62 @@
 		return true
 	}
 	for {
-		success, err := t.controlBuf.executeAndPut(func(it interface{}) bool {
-			if !checkForStreamQuota(it) {
-				return false
-			}
-			if !checkForHeaderListSize(it) {
-				return false
-			}
-			return true
+		success, err := t.controlBuf.executeAndPut(func() bool {
+			return checkForHeaderListSize() && checkForStreamQuota()
 		}, hdr)
 		if err != nil {
-			return nil, err
+			// Connection closed.
+			return nil, &NewStreamError{Err: err, AllowTransparentRetry: true}
 		}
 		if success {
 			break
 		}
 		if hdrListSizeErr != nil {
-			return nil, hdrListSizeErr
+			return nil, &NewStreamError{Err: hdrListSizeErr}
 		}
 		firstTry = false
 		select {
 		case <-ch:
-		case <-s.ctx.Done():
-			return nil, ContextErr(s.ctx.Err())
+		case <-ctx.Done():
+			return nil, &NewStreamError{Err: ContextErr(ctx.Err())}
 		case <-t.goAway:
-			return nil, errStreamDrain
+			return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true}
 		case <-t.ctx.Done():
-			return nil, ErrConnClosing
+			return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true}
 		}
 	}
-	if t.statsHandler != nil {
-		outHeader := &stats.OutHeader{
-			Client:      true,
-			FullMethod:  callHdr.Method,
-			RemoteAddr:  t.remoteAddr,
-			LocalAddr:   t.localAddr,
-			Compression: callHdr.SendCompress,
+	if len(t.statsHandlers) != 0 {
+		header, ok := metadata.FromOutgoingContext(ctx)
+		if ok {
+			header.Set("user-agent", t.userAgent)
+		} else {
+			header = metadata.Pairs("user-agent", t.userAgent)
 		}
-		t.statsHandler.HandleRPC(s.ctx, outHeader)
+		for _, sh := range t.statsHandlers {
+			// Note: The header fields are compressed with hpack after this call returns.
+			// No WireLength field is set here.
+			// Note: Creating a new stats object to prevent pollution.
+			outHeader := &stats.OutHeader{
+				Client:      true,
+				FullMethod:  callHdr.Method,
+				RemoteAddr:  t.remoteAddr,
+				LocalAddr:   t.localAddr,
+				Compression: callHdr.SendCompress,
+				Header:      header,
+			}
+			sh.HandleRPC(s.ctx, outHeader)
+		}
+	}
+	if transportDrainRequired {
+		if t.logger.V(logLevel) {
+			t.logger.Infof("Draining transport: t.nextID > MaxStreamID")
+		}
+		t.GracefulClose()
 	}
 	return s, nil
 }
 
-// CloseStream clears the footprint of a stream when the stream is not needed any more.
-// This must not be executed in reader's goroutine.
-func (t *http2Client) CloseStream(s *Stream, err error) {
-	var (
-		rst     bool
-		rstCode http2.ErrCode
-	)
-	if err != nil {
-		rst = true
-		rstCode = http2.ErrCodeCancel
-	}
-	t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false)
-}
-
-func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
+func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
 	// Set stream status to done.
 	if s.swapState(streamDone) == streamDone {
 		// If it was already done, return.  If multiple closeStream calls
@@ -729,16 +970,16 @@
 			t.mu.Unlock()
 			if channelz.IsOn() {
 				if eosReceived {
-					atomic.AddInt64(&t.czData.streamsSucceeded, 1)
+					t.channelz.SocketMetrics.StreamsSucceeded.Add(1)
 				} else {
-					atomic.AddInt64(&t.czData.streamsFailed, 1)
+					t.channelz.SocketMetrics.StreamsFailed.Add(1)
 				}
 			}
 		},
 		rst:     rst,
 		rstCode: rstCode,
 	}
-	addBackStreamQuota := func(interface{}) bool {
+	addBackStreamQuota := func() bool {
 		t.streamQuota++
 		if t.streamQuota > 0 && t.waitingStreams > 0 {
 			select {
@@ -751,25 +992,30 @@
 	t.controlBuf.executeAndPut(addBackStreamQuota, cleanup)
 	// This will unblock write.
 	close(s.done)
+	if s.doneFunc != nil {
+		s.doneFunc()
+	}
 }
 
 // Close kicks off the shutdown process of the transport. This should be called
 // only once on a transport. Once it is called, the transport should not be
-// accessed any more.
-//
-// This method blocks until the addrConn that initiated this transport is
-// re-connected. This happens because t.onClose() begins reconnect logic at the
-// addrConn level and blocks until the addrConn is successfully connected.
-func (t *http2Client) Close() error {
+// accessed anymore.
+func (t *http2Client) Close(err error) {
+	t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10))
 	t.mu.Lock()
-	// Make sure we only Close once.
+	// Make sure we only close once.
 	if t.state == closing {
 		t.mu.Unlock()
-		return nil
+		return
 	}
-	// Call t.onClose before setting the state to closing to prevent the client
-	// from attempting to create new streams ASAP.
-	t.onClose()
+	if t.logger.V(logLevel) {
+		t.logger.Infof("Closing: %v", err)
+	}
+	// Call t.onClose ASAP to prevent the client from attempting to create new
+	// streams.
+	if t.state != draining {
+		t.onClose(GoAwayInvalid)
+	}
 	t.state = closing
 	streams := t.activeStreams
 	t.activeStreams = nil
@@ -778,24 +1024,51 @@
 		// should unblock it so that the goroutine eventually exits.
 		t.kpDormancyCond.Signal()
 	}
+	// Append info about previous goaways if there were any, since this may be important
+	// for understanding the root cause for this connection to be closed.
+	goAwayDebugMessage := t.goAwayDebugMessage
 	t.mu.Unlock()
-	t.controlBuf.finish()
-	t.cancel()
-	err := t.conn.Close()
-	if channelz.IsOn() {
-		channelz.RemoveEntry(t.channelzID)
+
+	// Per HTTP/2 spec, a GOAWAY frame must be sent before closing the
+	// connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It
+	// also waits for loopyWriter to be closed with a timer to avoid the
+	// long blocking in case the connection is blackholed, i.e. TCP is
+	// just stuck.
+	t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err})
+	timer := time.NewTimer(goAwayLoopyWriterTimeout)
+	defer timer.Stop()
+	select {
+	case <-t.writerDone: // success
+	case <-timer.C:
+		t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout)
 	}
+	t.cancel()
+	t.conn.Close()
+	// Waits for the reader and keepalive goroutines to exit before returning to
+	// ensure all resources are cleaned up before Close can return.
+	<-t.readerDone
+	if t.keepaliveEnabled {
+		<-t.keepaliveDone
+	}
+	channelz.RemoveEntry(t.channelz.ID)
+	var st *status.Status
+	if len(goAwayDebugMessage) > 0 {
+		st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage)
+		err = st.Err()
+	} else {
+		st = status.New(codes.Unavailable, err.Error())
+	}
+
 	// Notify all active streams.
 	for _, s := range streams {
-		t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false)
+		t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false)
 	}
-	if t.statsHandler != nil {
+	for _, sh := range t.statsHandlers {
 		connEnd := &stats.ConnEnd{
 			Client: true,
 		}
-		t.statsHandler.HandleConn(t.ctx, connEnd)
+		sh.HandleConn(t.ctx, connEnd)
 	}
-	return err
 }
 
 // GracefulClose sets the state to draining, which prevents new streams from
@@ -810,11 +1083,15 @@
 		t.mu.Unlock()
 		return
 	}
+	if t.logger.V(logLevel) {
+		t.logger.Infof("GracefulClose called")
+	}
+	t.onClose(GoAwayInvalid)
 	t.state = draining
 	active := len(t.activeStreams)
 	t.mu.Unlock()
 	if active == 0 {
-		t.Close()
+		t.Close(connectionErrorf(true, nil, "no active streams left to process while draining"))
 		return
 	}
 	t.controlBuf.put(&incomingGoAway{})
@@ -822,7 +1099,7 @@
 
 // Write formats the data into HTTP2 data frame(s) and sends it out. The caller
 // should proceed only if Write returns nil.
-func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
 	if opts.Last {
 		// If it's the last message, update stream state.
 		if !s.compareAndSwapState(streamActive, streamWriteDone) {
@@ -834,26 +1111,25 @@
 	df := &dataFrame{
 		streamID:  s.id,
 		endStream: opts.Last,
+		h:         hdr,
+		data:      data,
 	}
-	if hdr != nil || data != nil { // If it's not an empty data frame.
-		// Add some data to grpc message header so that we can equally
-		// distribute bytes across frames.
-		emptyLen := http2MaxFrameLen - len(hdr)
-		if emptyLen > len(data) {
-			emptyLen = len(data)
-		}
-		hdr = append(hdr, data[:emptyLen]...)
-		data = data[emptyLen:]
-		df.h, df.d = hdr, data
-		// TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler.
-		if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
+	dataLen := data.Len()
+	if hdr != nil || dataLen != 0 { // If it's not an empty data frame, check quota.
+		if err := s.wq.get(int32(len(hdr) + dataLen)); err != nil {
 			return err
 		}
 	}
-	return t.controlBuf.put(df)
+	data.Ref()
+	if err := t.controlBuf.put(df); err != nil {
+		data.Free()
+		return err
+	}
+	t.incrMsgSent()
+	return nil
 }
 
-func (t *http2Client) getStream(f http2.Frame) *Stream {
+func (t *http2Client) getStream(f http2.Frame) *ClientStream {
 	t.mu.Lock()
 	s := t.activeStreams[f.Header().StreamID]
 	t.mu.Unlock()
@@ -863,7 +1139,7 @@
 // adjustWindow sends out extra window update over the initial window size
 // of stream if the application is requesting data larger in size than
 // the window.
-func (t *http2Client) adjustWindow(s *Stream, n uint32) {
+func (t *http2Client) adjustWindow(s *ClientStream, n uint32) {
 	if w := s.fc.maybeAdjust(n); w > 0 {
 		t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
 	}
@@ -872,7 +1148,7 @@
 // updateWindow adjusts the inbound quota for the stream.
 // Window updates will be sent out when the cumulative quota
 // exceeds the corresponding threshold.
-func (t *http2Client) updateWindow(s *Stream, n uint32) {
+func (t *http2Client) updateWindow(s *ClientStream, n uint32) {
 	if w := s.fc.onRead(n); w > 0 {
 		t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
 	}
@@ -882,13 +1158,13 @@
 // for the transport and the stream based on the current bdp
 // estimation.
 func (t *http2Client) updateFlowControl(n uint32) {
-	t.mu.Lock()
-	for _, s := range t.activeStreams {
-		s.fc.newLimit(n)
-	}
-	t.mu.Unlock()
-	updateIWS := func(interface{}) bool {
+	updateIWS := func() bool {
 		t.initialWindowSize = int32(n)
+		t.mu.Lock()
+		for _, s := range t.activeStreams {
+			s.fc.newLimit(n)
+		}
+		t.mu.Unlock()
 		return true
 	}
 	t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)})
@@ -955,15 +1231,18 @@
 		// guarantee f.Data() is consumed before the arrival of next frame.
 		// Can this copy be eliminated?
 		if len(f.Data()) > 0 {
-			buffer := t.bufferPool.get()
-			buffer.Reset()
-			buffer.Write(f.Data())
-			s.write(recvMsg{buffer: buffer})
+			pool := t.bufferPool
+			if pool == nil {
+				// Note that this is only supposed to be nil in tests. Otherwise, stream is
+				// always initialized with a BufferPool.
+				pool = mem.DefaultBufferPool()
+			}
+			s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
 		}
 	}
 	// The server has closed the stream without sending trailers.  Record that
 	// the read direction is closed, and set the status appropriately.
-	if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) {
+	if f.StreamEnded() {
 		t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true)
 	}
 }
@@ -975,21 +1254,24 @@
 	}
 	if f.ErrCode == http2.ErrCodeRefusedStream {
 		// The stream was unprocessed by the server.
-		atomic.StoreUint32(&s.unprocessed, 1)
+		s.unprocessed.Store(true)
 	}
 	statusCode, ok := http2ErrConvTab[f.ErrCode]
 	if !ok {
-		warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
+		if t.logger.V(logLevel) {
+			t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode)
+		}
 		statusCode = codes.Unknown
 	}
 	if statusCode == codes.Canceled {
 		if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) {
 			// Our deadline was already exceeded, and that was likely the cause
-			// of this cancelation.  Alter the status code accordingly.
+			// of this cancellation.  Alter the status code accordingly.
 			statusCode = codes.DeadlineExceeded
 		}
 	}
-	t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false)
+	st := status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode)
+	t.closeStream(s, st.Err(), false, http2.ErrCodeNo, st, nil, false)
 }
 
 func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
@@ -1033,7 +1315,7 @@
 		}
 		updateFuncs = append(updateFuncs, updateStreamQuota)
 	}
-	t.controlBuf.executeAndPut(func(interface{}) bool {
+	t.controlBuf.executeAndPut(func() bool {
 		for _, f := range updateFuncs {
 			f()
 		}
@@ -1054,20 +1336,23 @@
 	t.controlBuf.put(pingAck)
 }
 
-func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
+func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error {
 	t.mu.Lock()
 	if t.state == closing {
 		t.mu.Unlock()
-		return
+		return nil
 	}
-	if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
-		infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
+	if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" {
+		// When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug
+		// data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is
+		// enabled by default and double the configure KEEPALIVE_TIME used for new connections
+		// on that channel.
+		logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".")
 	}
 	id := f.LastStreamID
-	if id > 0 && id%2 != 1 {
+	if id > 0 && id%2 == 0 {
 		t.mu.Unlock()
-		t.Close()
-		return
+		return connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)
 	}
 	// A client can receive multiple GoAways from the server (see
 	// https://github.com/grpc/grpc-go/issues/1387).  The idea is that the first
@@ -1084,18 +1369,19 @@
 		// If there are multiple GoAways the first one should always have an ID greater than the following ones.
 		if id > t.prevGoAwayID {
 			t.mu.Unlock()
-			t.Close()
-			return
+			return connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)
 		}
 	default:
 		t.setGoAwayReason(f)
 		close(t.goAway)
-		t.controlBuf.put(&incomingGoAway{})
+		defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held.
 		// Notify the clientconn about the GOAWAY before we set the state to
 		// draining, to allow the client to stop attempting to create streams
 		// before disallowing new streams on this connection.
-		t.onGoAway(t.goAwayReason)
-		t.state = draining
+		if t.state != draining {
+			t.onClose(t.goAwayReason)
+			t.state = draining
+		}
 	}
 	// All streams with IDs greater than the GoAwayId
 	// and smaller than the previous GoAway ID should be killed.
@@ -1103,39 +1389,52 @@
 	if upperLimit == 0 { // This is the first GoAway Frame.
 		upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID.
 	}
+
+	t.prevGoAwayID = id
+	if len(t.activeStreams) == 0 {
+		t.mu.Unlock()
+		return connectionErrorf(true, nil, "received goaway and there are no active streams")
+	}
+
+	streamsToClose := make([]*ClientStream, 0)
 	for streamID, stream := range t.activeStreams {
 		if streamID > id && streamID <= upperLimit {
 			// The stream was unprocessed by the server.
-			atomic.StoreUint32(&stream.unprocessed, 1)
-			t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
+			stream.unprocessed.Store(true)
+			streamsToClose = append(streamsToClose, stream)
 		}
 	}
-	t.prevGoAwayID = id
-	active := len(t.activeStreams)
 	t.mu.Unlock()
-	if active == 0 {
-		t.Close()
+	// Called outside t.mu because closeStream can take controlBuf's mu, which
+	// could induce deadlock and is not allowed.
+	for _, stream := range streamsToClose {
+		t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
 	}
+	return nil
 }
 
 // setGoAwayReason sets the value of t.goAwayReason based
 // on the GoAway frame received.
-// It expects a lock on transport's mutext to be held by
+// It expects a lock on transport's mutex to be held by
 // the caller.
 func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
 	t.goAwayReason = GoAwayNoReason
-	switch f.ErrCode {
-	case http2.ErrCodeEnhanceYourCalm:
+	if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
 		if string(f.DebugData()) == "too_many_pings" {
 			t.goAwayReason = GoAwayTooManyPings
 		}
 	}
+	if len(f.DebugData()) == 0 {
+		t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode)
+	} else {
+		t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData()))
+	}
 }
 
-func (t *http2Client) GetGoAwayReason() GoAwayReason {
+func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) {
 	t.mu.Lock()
 	defer t.mu.Unlock()
-	return t.goAwayReason
+	return t.goAwayReason, t.goAwayDebugMessage
 }
 
 func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
@@ -1152,7 +1451,7 @@
 		return
 	}
 	endStream := frame.StreamEnded()
-	atomic.StoreUint32(&s.bytesReceived, 1)
+	s.bytesReceived.Store(true)
 	initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0
 
 	if !initialHeader && !endStream {
@@ -1162,87 +1461,211 @@
 		return
 	}
 
-	state := &decodeState{}
-	// Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode.
-	state.data.isGRPC = !initialHeader
-	if err := state.decodeHeader(frame); err != nil {
-		t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream)
+	// frame.Truncated is set to true when framer detects that the current header
+	// list size hits MaxHeaderListSize limit.
+	if frame.Truncated {
+		se := status.New(codes.Internal, "peer header list size exceeded limit")
+		t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream)
 		return
 	}
 
-	isHeader := false
-	defer func() {
-		if t.statsHandler != nil {
-			if isHeader {
-				inHeader := &stats.InHeader{
-					Client:     true,
-					WireLength: int(frame.Header().Length),
+	var (
+		// If a gRPC Response-Headers has already been received, then it means
+		// that the peer is speaking gRPC and we are in gRPC mode.
+		isGRPC         = !initialHeader
+		mdata          = make(map[string][]string)
+		contentTypeErr = "malformed header: missing HTTP content-type"
+		grpcMessage    string
+		recvCompress   string
+		httpStatusCode *int
+		httpStatusErr  string
+		rawStatusCode  = codes.Unknown
+		// headerError is set if an error is encountered while parsing the headers
+		headerError string
+	)
+
+	if initialHeader {
+		httpStatusErr = "malformed header: missing HTTP status"
+	}
+
+	for _, hf := range frame.Fields {
+		switch hf.Name {
+		case "content-type":
+			if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType {
+				contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value)
+				break
+			}
+			contentTypeErr = ""
+			mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
+			isGRPC = true
+		case "grpc-encoding":
+			recvCompress = hf.Value
+		case "grpc-status":
+			code, err := strconv.ParseInt(hf.Value, 10, 32)
+			if err != nil {
+				se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err))
+				t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+				return
+			}
+			rawStatusCode = codes.Code(uint32(code))
+		case "grpc-message":
+			grpcMessage = decodeGrpcMessage(hf.Value)
+		case ":status":
+			c, err := strconv.ParseInt(hf.Value, 10, 32)
+			if err != nil {
+				se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err))
+				t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+				return
+			}
+			statusCode := int(c)
+			if statusCode >= 100 && statusCode < 200 {
+				if endStream {
+					se := status.New(codes.Internal, fmt.Sprintf(
+						"protocol error: informational header with status code %d must not have END_STREAM set", statusCode))
+					t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
 				}
-				t.statsHandler.HandleRPC(s.ctx, inHeader)
-			} else {
-				inTrailer := &stats.InTrailer{
-					Client:     true,
-					WireLength: int(frame.Header().Length),
-				}
-				t.statsHandler.HandleRPC(s.ctx, inTrailer)
+				return
+			}
+			httpStatusCode = &statusCode
+			if statusCode == 200 {
+				httpStatusErr = ""
+				break
+			}
+
+			httpStatusErr = fmt.Sprintf(
+				"unexpected HTTP status code received from server: %d (%s)",
+				statusCode,
+				http.StatusText(statusCode),
+			)
+		default:
+			if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
+				break
+			}
+			v, err := decodeMetadataHeader(hf.Name, hf.Value)
+			if err != nil {
+				headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err)
+				logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
+				break
+			}
+			mdata[hf.Name] = append(mdata[hf.Name], v)
+		}
+	}
+
+	if !isGRPC || httpStatusErr != "" {
+		var code = codes.Internal // when header does not include HTTP status, return INTERNAL
+
+		if httpStatusCode != nil {
+			var ok bool
+			code, ok = HTTPStatusConvTab[*httpStatusCode]
+			if !ok {
+				code = codes.Unknown
 			}
 		}
-	}()
+		var errs []string
+		if httpStatusErr != "" {
+			errs = append(errs, httpStatusErr)
+		}
+		if contentTypeErr != "" {
+			errs = append(errs, contentTypeErr)
+		}
+		// Verify the HTTP response is a 200.
+		se := status.New(code, strings.Join(errs, "; "))
+		t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+		return
+	}
 
-	// If headerChan hasn't been closed yet
-	if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
-		s.headerValid = true
-		if !endStream {
-			// HEADERS frame block carries a Response-Headers.
-			isHeader = true
+	if headerError != "" {
+		se := status.New(codes.Internal, headerError)
+		t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+		return
+	}
+
+	// For headers, set them in s.header and close headerChan.  For trailers or
+	// trailers-only, closeStream will set the trailers and close headerChan as
+	// needed.
+	if !endStream {
+		// If headerChan hasn't been closed yet (expected, given we checked it
+		// above, but something else could have potentially closed the whole
+		// stream).
+		if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
+			s.headerValid = true
 			// These values can be set without any synchronization because
 			// stream goroutine will read it only after seeing a closed
 			// headerChan which we'll close after setting this.
-			s.recvCompress = state.data.encoding
-			if len(state.data.mdata) > 0 {
-				s.header = state.data.mdata
+			s.recvCompress = recvCompress
+			if len(mdata) > 0 {
+				s.header = mdata
 			}
-		} else {
-			// HEADERS frame block carries a Trailers-Only.
-			s.noHeaders = true
+			close(s.headerChan)
 		}
-		close(s.headerChan)
+	}
+
+	for _, sh := range t.statsHandlers {
+		if !endStream {
+			inHeader := &stats.InHeader{
+				Client:      true,
+				WireLength:  int(frame.Header().Length),
+				Header:      metadata.MD(mdata).Copy(),
+				Compression: s.recvCompress,
+			}
+			sh.HandleRPC(s.ctx, inHeader)
+		} else {
+			inTrailer := &stats.InTrailer{
+				Client:     true,
+				WireLength: int(frame.Header().Length),
+				Trailer:    metadata.MD(mdata).Copy(),
+			}
+			sh.HandleRPC(s.ctx, inTrailer)
+		}
 	}
 
 	if !endStream {
 		return
 	}
 
-	// if client received END_STREAM from server while stream was still active, send RST_STREAM
-	rst := s.getState() == streamActive
-	t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true)
+	status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader])
+
+	// If client received END_STREAM from server while stream was still active,
+	// send RST_STREAM.
+	rstStream := s.getState() == streamActive
+	t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true)
 }
 
-// reader runs as a separate goroutine in charge of reading data from network
-// connection.
-//
-// TODO(zhaoq): currently one reader per transport. Investigate whether this is
-// optimal.
-// TODO(zhaoq): Check the validity of the incoming frame sequence.
-func (t *http2Client) reader() {
-	defer close(t.readerDone)
-	// Check the validity of server preface.
+// readServerPreface reads and handles the initial settings frame from the
+// server.
+func (t *http2Client) readServerPreface() error {
 	frame, err := t.framer.fr.ReadFrame()
 	if err != nil {
-		t.Close() // this kicks off resetTransport, so must be last before return
-		return
-	}
-	t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
-	if t.keepaliveEnabled {
-		atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
+		return connectionErrorf(true, err, "error reading server preface: %v", err)
 	}
 	sf, ok := frame.(*http2.SettingsFrame)
 	if !ok {
-		t.Close() // this kicks off resetTransport, so must be last before return
+		return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)
+	}
+	t.handleSettings(sf, true)
+	return nil
+}
+
+// reader verifies the server preface and reads all subsequent data from
+// network connection.  If the server preface is not read successfully, an
+// error is pushed to errCh; otherwise errCh is closed with no error.
+func (t *http2Client) reader(errCh chan<- error) {
+	var errClose error
+	defer func() {
+		close(t.readerDone)
+		if errClose != nil {
+			t.Close(errClose)
+		}
+	}()
+
+	if err := t.readServerPreface(); err != nil {
+		errCh <- err
 		return
 	}
-	t.onPrefaceReceipt()
-	t.handleSettings(sf, true)
+	close(errCh)
+	if t.keepaliveEnabled {
+		atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
+	}
 
 	// loop to keep reading incoming messages on this transport.
 	for {
@@ -1262,15 +1685,20 @@
 				if s != nil {
 					// use error detail to provide better err message
 					code := http2ErrConvTab[se.Code]
-					msg := t.framer.fr.ErrorDetail().Error()
+					errorDetail := t.framer.fr.ErrorDetail()
+					var msg string
+					if errorDetail != nil {
+						msg = errorDetail.Error()
+					} else {
+						msg = "received invalid frame"
+					}
 					t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
 				}
 				continue
-			} else {
-				// Transport error.
-				t.Close()
-				return
 			}
+			// Transport error.
+			errClose = connectionErrorf(true, err, "error reading from server: %v", err)
+			return
 		}
 		switch frame := frame.(type) {
 		case *http2.MetaHeadersFrame:
@@ -1284,24 +1712,26 @@
 		case *http2.PingFrame:
 			t.handlePing(frame)
 		case *http2.GoAwayFrame:
-			t.handleGoAway(frame)
+			errClose = t.handleGoAway(frame)
 		case *http2.WindowUpdateFrame:
 			t.handleWindowUpdate(frame)
 		default:
-			errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
+			if logger.V(logLevel) {
+				logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
+			}
 		}
 	}
 }
 
-func minTime(a, b time.Duration) time.Duration {
-	if a < b {
-		return a
-	}
-	return b
-}
-
-// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
+// keepalive running in a separate goroutine makes sure the connection is alive by sending pings.
 func (t *http2Client) keepalive() {
+	var err error
+	defer func() {
+		close(t.keepaliveDone)
+		if err != nil {
+			t.Close(err)
+		}
+	}()
 	p := &ping{data: [8]byte{}}
 	// True iff a ping has been sent, and no data has been received since then.
 	outstandingPing := false
@@ -1325,7 +1755,7 @@
 				continue
 			}
 			if outstandingPing && timeoutLeft <= 0 {
-				t.Close()
+				err = connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")
 				return
 			}
 			t.mu.Lock()
@@ -1357,7 +1787,7 @@
 			// keepalive timer expired. In both cases, we need to send a ping.
 			if !outstandingPing {
 				if channelz.IsOn() {
-					atomic.AddInt64(&t.czData.kpCount, 1)
+					t.channelz.SocketMetrics.KeepAlivesSent.Add(1)
 				}
 				t.controlBuf.put(p)
 				timeoutLeft = t.kp.Timeout
@@ -1367,9 +1797,8 @@
 			// timeoutLeft. This will ensure that we wait only for kp.Time
 			// before sending out the next ping (for cases where the ping is
 			// acked).
-			sleepDuration := minTime(t.kp.Time, timeoutLeft)
+			sleepDuration := min(t.kp.Time, timeoutLeft)
 			timeoutLeft -= sleepDuration
-			prevNano = lastRead
 			timer.Reset(sleepDuration)
 		case <-t.ctx.Done():
 			if !timer.Stop() {
@@ -1388,40 +1817,27 @@
 	return t.goAway
 }
 
-func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric {
-	s := channelz.SocketInternalMetric{
-		StreamsStarted:                  atomic.LoadInt64(&t.czData.streamsStarted),
-		StreamsSucceeded:                atomic.LoadInt64(&t.czData.streamsSucceeded),
-		StreamsFailed:                   atomic.LoadInt64(&t.czData.streamsFailed),
-		MessagesSent:                    atomic.LoadInt64(&t.czData.msgSent),
-		MessagesReceived:                atomic.LoadInt64(&t.czData.msgRecv),
-		KeepAlivesSent:                  atomic.LoadInt64(&t.czData.kpCount),
-		LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
-		LastMessageSentTimestamp:        time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
-		LastMessageReceivedTimestamp:    time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
-		LocalFlowControlWindow:          int64(t.fc.getSize()),
-		SocketOptions:                   channelz.GetSocketOption(t.conn),
-		LocalAddr:                       t.localAddr,
-		RemoteAddr:                      t.remoteAddr,
-		// RemoteName :
+func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics {
+	return &channelz.EphemeralSocketMetrics{
+		LocalFlowControlWindow:  int64(t.fc.getSize()),
+		RemoteFlowControlWindow: t.getOutFlowWindow(),
 	}
-	if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
-		s.Security = au.GetSecurityValue()
-	}
-	s.RemoteFlowControlWindow = t.getOutFlowWindow()
-	return &s
 }
 
 func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr }
 
-func (t *http2Client) IncrMsgSent() {
-	atomic.AddInt64(&t.czData.msgSent, 1)
-	atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
+func (t *http2Client) incrMsgSent() {
+	if channelz.IsOn() {
+		t.channelz.SocketMetrics.MessagesSent.Add(1)
+		t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano())
+	}
 }
 
-func (t *http2Client) IncrMsgRecv() {
-	atomic.AddInt64(&t.czData.msgRecv, 1)
-	atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
+func (t *http2Client) incrMsgRecv() {
+	if channelz.IsOn() {
+		t.channelz.SocketMetrics.MessagesReceived.Add(1)
+		t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano())
+	}
 }
 
 func (t *http2Client) getOutFlowWindow() int64 {
@@ -1438,3 +1854,9 @@
 		return -2
 	}
 }
+
+func (t *http2Client) stateForTesting() transportState {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	return t.state
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index 0760383..83cee31 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -25,23 +25,29 @@
 	"fmt"
 	"io"
 	"math"
+	rand "math/rand/v2"
 	"net"
+	"net/http"
 	"strconv"
 	"sync"
 	"sync/atomic"
 	"time"
 
-	"github.com/golang/protobuf/proto"
 	"golang.org/x/net/http2"
 	"golang.org/x/net/http2/hpack"
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/internal/grpclog"
+	"google.golang.org/grpc/internal/grpcutil"
+	"google.golang.org/grpc/internal/pretty"
+	istatus "google.golang.org/grpc/internal/status"
+	"google.golang.org/grpc/internal/syscall"
+	"google.golang.org/grpc/mem"
+	"google.golang.org/protobuf/proto"
 
-	spb "google.golang.org/genproto/googleapis/rpc/status"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
-	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/internal"
 	"google.golang.org/grpc/internal/channelz"
-	"google.golang.org/grpc/internal/grpcrand"
+	"google.golang.org/grpc/internal/grpcsync"
 	"google.golang.org/grpc/keepalive"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/peer"
@@ -53,42 +59,36 @@
 var (
 	// ErrIllegalHeaderWrite indicates that setting header is illegal because of
 	// the stream's state.
-	ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
+	ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times")
 	// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
 	// than the limit set by peer.
-	ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
-	// statusRawProto is a function to get to the raw status proto wrapped in a
-	// status.Status without a proto.Clone().
-	statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status)
+	ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer")
 )
 
+// serverConnectionCounter counts the number of connections a server has seen
+// (equal to the number of http2Servers created). Must be accessed atomically.
+var serverConnectionCounter uint64
+
 // http2Server implements the ServerTransport interface with HTTP2.
 type http2Server struct {
-	ctx         context.Context
-	done        chan struct{}
-	conn        net.Conn
-	loopy       *loopyWriter
-	readerDone  chan struct{} // sync point to enable testing.
-	writerDone  chan struct{} // sync point to enable testing.
-	remoteAddr  net.Addr
-	localAddr   net.Addr
-	maxStreamID uint32               // max stream ID ever seen
-	authInfo    credentials.AuthInfo // auth info about the connection
-	inTapHandle tap.ServerInHandle
-	framer      *framer
+	lastRead        int64 // Keep this field 64-bit aligned. Accessed atomically.
+	done            chan struct{}
+	conn            net.Conn
+	loopy           *loopyWriter
+	readerDone      chan struct{} // sync point to enable testing.
+	loopyWriterDone chan struct{}
+	peer            peer.Peer
+	inTapHandle     tap.ServerInHandle
+	framer          *framer
 	// The max number of concurrent streams.
 	maxStreams uint32
 	// controlBuf delivers all the control related tasks (e.g., window
 	// updates, reset streams, and various settings) to the controller.
 	controlBuf *controlBuffer
 	fc         *trInFlow
-	stats      stats.Handler
-	// Flag to keep track of reading activity on transport.
-	// 1 is true and 0 is false.
-	activity uint32 // Accessed atomically.
+	stats      []stats.Handler
 	// Keepalive and max-age parameters for the server.
 	kp keepalive.ServerParameters
-
 	// Keepalive enforcement policy.
 	kep keepalive.EnforcementPolicy
 	// The time instance last ping was received.
@@ -105,15 +105,15 @@
 
 	mu sync.Mutex // guard the following
 
-	// drainChan is initialized when drain(...) is called the first time.
-	// After which the server writes out the first GoAway(with ID 2^31-1) frame.
-	// Then an independent goroutine will be launched to later send the second GoAway.
-	// During this time we don't want to write another first GoAway(with ID 2^31 -1) frame.
-	// Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is
-	// already underway.
-	drainChan     chan struct{}
+	// drainEvent is initialized when Drain() is called the first time. After
+	// which the server writes out the first GoAway(with ID 2^31-1) frame. Then
+	// an independent goroutine will be launched to later send the second
+	// GoAway. During this time we don't want to write another first GoAway(with
+	// ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is
+	// already initialized since draining is already underway.
+	drainEvent    *grpcsync.Event
 	state         transportState
-	activeStreams map[uint32]*Stream
+	activeStreams map[uint32]*ServerStream
 	// idle is the time instant when the connection went idle.
 	// This is either the beginning of the connection or when the number of
 	// RPCs go down to 0.
@@ -121,47 +121,72 @@
 	idle time.Time
 
 	// Fields below are for channelz metric collection.
-	channelzID int64 // channelz unique identification number
-	czData     *channelzData
-	bufferPool *bufferPool
+	channelz   *channelz.Socket
+	bufferPool mem.BufferPool
+
+	connectionID uint64
+
+	// maxStreamMu guards the maximum stream ID
+	// This lock may not be taken if mu is already held.
+	maxStreamMu sync.Mutex
+	maxStreamID uint32 // max stream ID ever seen
+
+	logger *grpclog.PrefixLogger
+	// setResetPingStrikes is stored as a closure instead of making this a
+	// method on http2Server to avoid a heap allocation when converting a method
+	// to a closure for passing to frames objects.
+	setResetPingStrikes func()
 }
 
-// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
-// returned if something goes wrong.
-func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
+// NewServerTransport creates a http2 transport with conn and configuration
+// options from config.
+//
+// It returns a non-nil transport and a nil error on success. On failure, it
+// returns a nil transport and a non-nil error. For a special case where the
+// underlying conn gets closed before the client preface could be read, it
+// returns a nil transport and a nil error.
+func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
+	var authInfo credentials.AuthInfo
+	rawConn := conn
+	if config.Credentials != nil {
+		var err error
+		conn, authInfo, err = config.Credentials.ServerHandshake(rawConn)
+		if err != nil {
+			// ErrConnDispatched means that the connection was dispatched away
+			// from gRPC; those connections should be left open. io.EOF means
+			// the connection was closed before handshaking completed, which can
+			// happen naturally from probers. Return these errors directly.
+			if err == credentials.ErrConnDispatched || err == io.EOF {
+				return nil, err
+			}
+			return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
+		}
+	}
 	writeBufSize := config.WriteBufferSize
 	readBufSize := config.ReadBufferSize
 	maxHeaderListSize := defaultServerMaxHeaderListSize
 	if config.MaxHeaderListSize != nil {
 		maxHeaderListSize = *config.MaxHeaderListSize
 	}
-	framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize)
+	framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize)
 	// Send initial settings as connection preface to client.
 	isettings := []http2.Setting{{
 		ID:  http2.SettingMaxFrameSize,
 		Val: http2MaxFrameLen,
 	}}
-	// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
-	// permitted in the HTTP2 spec.
-	maxStreams := config.MaxStreams
-	if maxStreams == 0 {
-		maxStreams = math.MaxUint32
-	} else {
+	if config.MaxStreams != math.MaxUint32 {
 		isettings = append(isettings, http2.Setting{
 			ID:  http2.SettingMaxConcurrentStreams,
-			Val: maxStreams,
+			Val: config.MaxStreams,
 		})
 	}
-	dynamicWindow := true
 	iwz := int32(initialWindowSize)
 	if config.InitialWindowSize >= defaultWindowSize {
 		iwz = config.InitialWindowSize
-		dynamicWindow = false
 	}
 	icwz := int32(initialWindowSize)
 	if config.InitialConnWindowSize >= defaultWindowSize {
 		icwz = config.InitialConnWindowSize
-		dynamicWindow = false
 	}
 	if iwz != defaultWindowSize {
 		isettings = append(isettings, http2.Setting{
@@ -207,63 +232,90 @@
 	if kp.Timeout == 0 {
 		kp.Timeout = defaultServerKeepaliveTimeout
 	}
+	if kp.Time != infinity {
+		if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil {
+			return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
+		}
+	}
 	kep := config.KeepalivePolicy
 	if kep.MinTime == 0 {
 		kep.MinTime = defaultKeepalivePolicyMinTime
 	}
+
 	done := make(chan struct{})
+	peer := peer.Peer{
+		Addr:      conn.RemoteAddr(),
+		LocalAddr: conn.LocalAddr(),
+		AuthInfo:  authInfo,
+	}
 	t := &http2Server{
-		ctx:               context.Background(),
 		done:              done,
 		conn:              conn,
-		remoteAddr:        conn.RemoteAddr(),
-		localAddr:         conn.LocalAddr(),
-		authInfo:          config.AuthInfo,
+		peer:              peer,
 		framer:            framer,
 		readerDone:        make(chan struct{}),
-		writerDone:        make(chan struct{}),
-		maxStreams:        maxStreams,
+		loopyWriterDone:   make(chan struct{}),
+		maxStreams:        config.MaxStreams,
 		inTapHandle:       config.InTapHandle,
 		fc:                &trInFlow{limit: uint32(icwz)},
 		state:             reachable,
-		activeStreams:     make(map[uint32]*Stream),
-		stats:             config.StatsHandler,
+		activeStreams:     make(map[uint32]*ServerStream),
+		stats:             config.StatsHandlers,
 		kp:                kp,
 		idle:              time.Now(),
 		kep:               kep,
 		initialWindowSize: iwz,
-		czData:            new(channelzData),
-		bufferPool:        newBufferPool(),
+		bufferPool:        config.BufferPool,
 	}
+	t.setResetPingStrikes = func() {
+		atomic.StoreUint32(&t.resetPingStrikes, 1)
+	}
+	var czSecurity credentials.ChannelzSecurityValue
+	if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
+		czSecurity = au.GetSecurityValue()
+	}
+	t.channelz = channelz.RegisterSocket(
+		&channelz.Socket{
+			SocketType:       channelz.SocketTypeNormal,
+			Parent:           config.ChannelzParent,
+			SocketMetrics:    channelz.SocketMetrics{},
+			EphemeralMetrics: t.socketMetrics,
+			LocalAddr:        t.peer.LocalAddr,
+			RemoteAddr:       t.peer.Addr,
+			SocketOptions:    channelz.GetSocketOption(t.conn),
+			Security:         czSecurity,
+		},
+	)
+	t.logger = prefixLoggerForServerTransport(t)
+
 	t.controlBuf = newControlBuffer(t.done)
-	if dynamicWindow {
+	if !config.StaticWindowSize {
 		t.bdpEst = &bdpEstimator{
 			bdp:               initialWindowSize,
 			updateFlowControl: t.updateFlowControl,
 		}
 	}
-	if t.stats != nil {
-		t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{
-			RemoteAddr: t.remoteAddr,
-			LocalAddr:  t.localAddr,
-		})
-		connBegin := &stats.ConnBegin{}
-		t.stats.HandleConn(t.ctx, connBegin)
-	}
-	if channelz.IsOn() {
-		t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
-	}
+
+	t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
 	t.framer.writer.Flush()
 
 	defer func() {
 		if err != nil {
-			t.Close()
+			t.Close(err)
 		}
 	}()
 
 	// Check the validity of client preface.
 	preface := make([]byte, len(clientPreface))
 	if _, err := io.ReadFull(t.conn, preface); err != nil {
+		// In deployments where a gRPC server runs behind a cloud load balancer
+		// which performs regular TCP level health checks, the connection is
+		// closed immediately by the latter.  Returning io.EOF here allows the
+		// grpc server implementation to recognize this scenario and suppress
+		// logging to reduce spam.
+		if err == io.EOF {
+			return nil, io.EOF
+		}
 		return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
 	}
 	if !bytes.Equal(preface, clientPreface) {
@@ -277,7 +329,7 @@
 	if err != nil {
 		return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
 	}
-	atomic.StoreUint32(&t.activity, 1)
+	atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
 	sf, ok := frame.(*http2.SettingsFrame)
 	if !ok {
 		return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
@@ -285,96 +337,220 @@
 	t.handleSettings(sf)
 
 	go func() {
-		t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
-		t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
-		if err := t.loopy.run(); err != nil {
-			errorf("transport: loopyWriter.run returning. Err: %v", err)
+		t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
+		err := t.loopy.run()
+		close(t.loopyWriterDone)
+		if !isIOError(err) {
+			// Close the connection if a non-I/O error occurs (for I/O errors
+			// the reader will also encounter the error and close).  Wait 1
+			// second before closing the connection, or when the reader is done
+			// (i.e. the client already closed the connection or a connection
+			// error occurred).  This avoids the potential problem where there
+			// is unread data on the receive side of the connection, which, if
+			// closed, would lead to a TCP RST instead of FIN, and the client
+			// encountering errors.  For more info:
+			// https://github.com/grpc/grpc-go/issues/5358
+			timer := time.NewTimer(time.Second)
+			defer timer.Stop()
+			select {
+			case <-t.readerDone:
+			case <-timer.C:
+			}
+			t.conn.Close()
 		}
-		t.conn.Close()
-		close(t.writerDone)
 	}()
 	go t.keepalive()
 	return t, nil
 }
 
-// operateHeader takes action on the decoded headers.
-func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
+// operateHeaders takes action on the decoded headers. Returns an error if fatal
+// error encountered and transport needs to close, otherwise returns nil.
+func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*ServerStream)) error {
+	// Acquire max stream ID lock for entire duration
+	t.maxStreamMu.Lock()
+	defer t.maxStreamMu.Unlock()
+
 	streamID := frame.Header().StreamID
-	state := &decodeState{
-		serverSide: true,
-	}
-	if err := state.decodeHeader(frame); err != nil {
-		if se, ok := status.FromError(err); ok {
-			t.controlBuf.put(&cleanupStream{
-				streamID: streamID,
-				rst:      true,
-				rstCode:  statusCodeConvTab[se.Code()],
-				onWrite:  func() {},
-			})
-		}
-		return false
+
+	// frame.Truncated is set to true when framer detects that the current header
+	// list size hits MaxHeaderListSize limit.
+	if frame.Truncated {
+		t.controlBuf.put(&cleanupStream{
+			streamID: streamID,
+			rst:      true,
+			rstCode:  http2.ErrCodeFrameSize,
+			onWrite:  func() {},
+		})
+		return nil
 	}
 
-	buf := newRecvBuffer()
-	s := &Stream{
-		id:             streamID,
-		st:             t,
-		buf:            buf,
-		fc:             &inFlow{limit: uint32(t.initialWindowSize)},
-		recvCompress:   state.data.encoding,
-		method:         state.data.method,
-		contentSubtype: state.data.contentSubtype,
+	if streamID%2 != 1 || streamID <= t.maxStreamID {
+		// illegal gRPC stream id.
+		return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame)
 	}
+	t.maxStreamID = streamID
+
+	buf := newRecvBuffer()
+	s := &ServerStream{
+		Stream: &Stream{
+			id:  streamID,
+			buf: buf,
+			fc:  &inFlow{limit: uint32(t.initialWindowSize)},
+		},
+		st:               t,
+		headerWireLength: int(frame.Header().Length),
+	}
+	var (
+		// if false, content-type was missing or invalid
+		isGRPC      = false
+		contentType = ""
+		mdata       = make(metadata.MD, len(frame.Fields))
+		httpMethod  string
+		// these are set if an error is encountered while parsing the headers
+		protocolError bool
+		headerError   *status.Status
+
+		timeoutSet bool
+		timeout    time.Duration
+	)
+
+	for _, hf := range frame.Fields {
+		switch hf.Name {
+		case "content-type":
+			contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value)
+			if !validContentType {
+				contentType = hf.Value
+				break
+			}
+			mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
+			s.contentSubtype = contentSubtype
+			isGRPC = true
+
+		case "grpc-accept-encoding":
+			mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
+			if hf.Value == "" {
+				continue
+			}
+			compressors := hf.Value
+			if s.clientAdvertisedCompressors != "" {
+				compressors = s.clientAdvertisedCompressors + "," + compressors
+			}
+			s.clientAdvertisedCompressors = compressors
+		case "grpc-encoding":
+			s.recvCompress = hf.Value
+		case ":method":
+			httpMethod = hf.Value
+		case ":path":
+			s.method = hf.Value
+		case "grpc-timeout":
+			timeoutSet = true
+			var err error
+			if timeout, err = decodeTimeout(hf.Value); err != nil {
+				headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err)
+			}
+		// "Transports must consider requests containing the Connection header
+		// as malformed." - A41
+		case "connection":
+			if t.logger.V(logLevel) {
+				t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec")
+			}
+			protocolError = true
+		default:
+			if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
+				break
+			}
+			v, err := decodeMetadataHeader(hf.Name, hf.Value)
+			if err != nil {
+				headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err)
+				t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
+				break
+			}
+			mdata[hf.Name] = append(mdata[hf.Name], v)
+		}
+	}
+
+	// "If multiple Host headers or multiple :authority headers are present, the
+	// request must be rejected with an HTTP status code 400 as required by Host
+	// validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM
+	// with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2
+	// error, this takes precedence over a client not speaking gRPC.
+	if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 {
+		errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"]))
+		if t.logger.V(logLevel) {
+			t.logger.Infof("Aborting the stream early: %v", errMsg)
+		}
+		t.controlBuf.put(&earlyAbortStream{
+			httpStatus:     http.StatusBadRequest,
+			streamID:       streamID,
+			contentSubtype: s.contentSubtype,
+			status:         status.New(codes.Internal, errMsg),
+			rst:            !frame.StreamEnded(),
+		})
+		return nil
+	}
+
+	if protocolError {
+		t.controlBuf.put(&cleanupStream{
+			streamID: streamID,
+			rst:      true,
+			rstCode:  http2.ErrCodeProtocol,
+			onWrite:  func() {},
+		})
+		return nil
+	}
+	if !isGRPC {
+		t.controlBuf.put(&earlyAbortStream{
+			httpStatus:     http.StatusUnsupportedMediaType,
+			streamID:       streamID,
+			contentSubtype: s.contentSubtype,
+			status:         status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType),
+			rst:            !frame.StreamEnded(),
+		})
+		return nil
+	}
+	if headerError != nil {
+		t.controlBuf.put(&earlyAbortStream{
+			httpStatus:     http.StatusBadRequest,
+			streamID:       streamID,
+			contentSubtype: s.contentSubtype,
+			status:         headerError,
+			rst:            !frame.StreamEnded(),
+		})
+		return nil
+	}
+
+	// "If :authority is missing, Host must be renamed to :authority." - A41
+	if len(mdata[":authority"]) == 0 {
+		// No-op if host isn't present, no eventual :authority header is a valid
+		// RPC.
+		if host, ok := mdata["host"]; ok {
+			mdata[":authority"] = host
+			delete(mdata, "host")
+		}
+	} else {
+		// "If :authority is present, Host must be discarded" - A41
+		delete(mdata, "host")
+	}
+
 	if frame.StreamEnded() {
 		// s is just created by the caller. No lock needed.
 		s.state = streamReadDone
 	}
-	if state.data.timeoutSet {
-		s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout)
+	if timeoutSet {
+		s.ctx, s.cancel = context.WithTimeout(ctx, timeout)
 	} else {
-		s.ctx, s.cancel = context.WithCancel(t.ctx)
+		s.ctx, s.cancel = context.WithCancel(ctx)
 	}
-	pr := &peer.Peer{
-		Addr: t.remoteAddr,
-	}
-	// Attach Auth info if there is any.
-	if t.authInfo != nil {
-		pr.AuthInfo = t.authInfo
-	}
-	s.ctx = peer.NewContext(s.ctx, pr)
+
 	// Attach the received metadata to the context.
-	if len(state.data.mdata) > 0 {
-		s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata)
-	}
-	if state.data.statsTags != nil {
-		s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags)
-	}
-	if state.data.statsTrace != nil {
-		s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace)
-	}
-	if t.inTapHandle != nil {
-		var err error
-		info := &tap.Info{
-			FullMethodName: state.data.method,
-		}
-		s.ctx, err = t.inTapHandle(s.ctx, info)
-		if err != nil {
-			warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
-			t.controlBuf.put(&cleanupStream{
-				streamID: s.id,
-				rst:      true,
-				rstCode:  http2.ErrCodeRefusedStream,
-				onWrite:  func() {},
-			})
-			s.cancel()
-			return false
-		}
+	if len(mdata) > 0 {
+		s.ctx = metadata.NewIncomingContext(s.ctx, mdata)
 	}
 	t.mu.Lock()
 	if t.state != reachable {
 		t.mu.Unlock()
 		s.cancel()
-		return false
+		return nil
 	}
 	if uint32(len(t.activeStreams)) >= t.maxStreams {
 		t.mu.Unlock()
@@ -385,48 +561,95 @@
 			onWrite:  func() {},
 		})
 		s.cancel()
-		return false
+		return nil
 	}
-	if streamID%2 != 1 || streamID <= t.maxStreamID {
+	if httpMethod != http.MethodPost {
 		t.mu.Unlock()
-		// illegal gRPC stream id.
-		errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
+		errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod)
+		if t.logger.V(logLevel) {
+			t.logger.Infof("Aborting the stream early: %v", errMsg)
+		}
+		t.controlBuf.put(&earlyAbortStream{
+			httpStatus:     http.StatusMethodNotAllowed,
+			streamID:       streamID,
+			contentSubtype: s.contentSubtype,
+			status:         status.New(codes.Internal, errMsg),
+			rst:            !frame.StreamEnded(),
+		})
 		s.cancel()
-		return true
+		return nil
 	}
-	t.maxStreamID = streamID
+	if t.inTapHandle != nil {
+		var err error
+		if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil {
+			t.mu.Unlock()
+			if t.logger.V(logLevel) {
+				t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err)
+			}
+			stat, ok := status.FromError(err)
+			if !ok {
+				stat = status.New(codes.PermissionDenied, err.Error())
+			}
+			t.controlBuf.put(&earlyAbortStream{
+				httpStatus:     http.StatusOK,
+				streamID:       s.id,
+				contentSubtype: s.contentSubtype,
+				status:         stat,
+				rst:            !frame.StreamEnded(),
+			})
+			return nil
+		}
+	}
+
+	if s.ctx.Err() != nil {
+		t.mu.Unlock()
+		// Early abort in case the timeout was zero or so low it already fired.
+		t.controlBuf.put(&earlyAbortStream{
+			httpStatus:     http.StatusOK,
+			streamID:       s.id,
+			contentSubtype: s.contentSubtype,
+			status:         status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()),
+			rst:            !frame.StreamEnded(),
+		})
+		return nil
+	}
+
 	t.activeStreams[streamID] = s
 	if len(t.activeStreams) == 1 {
 		t.idle = time.Time{}
 	}
+
+	// Start a timer to close the stream on reaching the deadline.
+	if timeoutSet {
+		// We need to wait for s.cancel to be updated before calling
+		// t.closeStream to avoid data races.
+		cancelUpdated := make(chan struct{})
+		timer := internal.TimeAfterFunc(timeout, func() {
+			<-cancelUpdated
+			t.closeStream(s, true, http2.ErrCodeCancel, false)
+		})
+		oldCancel := s.cancel
+		s.cancel = func() {
+			oldCancel()
+			timer.Stop()
+		}
+		close(cancelUpdated)
+	}
 	t.mu.Unlock()
 	if channelz.IsOn() {
-		atomic.AddInt64(&t.czData.streamsStarted, 1)
-		atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
+		t.channelz.SocketMetrics.StreamsStarted.Add(1)
+		t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano())
 	}
 	s.requestRead = func(n int) {
 		t.adjustWindow(s, uint32(n))
 	}
-	s.ctx = traceCtx(s.ctx, s.method)
-	if t.stats != nil {
-		s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
-		inHeader := &stats.InHeader{
-			FullMethod:  s.method,
-			RemoteAddr:  t.remoteAddr,
-			LocalAddr:   t.localAddr,
-			Compression: s.recvCompress,
-			WireLength:  int(frame.Header().Length),
-		}
-		t.stats.HandleRPC(s.ctx, inHeader)
-	}
 	s.ctxDone = s.ctx.Done()
 	s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
 	s.trReader = &transportReader{
 		reader: &recvBufferReader{
-			ctx:        s.ctx,
-			ctxDone:    s.ctxDone,
-			recv:       s.buf,
-			freeBuffer: t.bufferPool.put,
+			ctx:     s.ctx,
+			ctxDone: s.ctxDone,
+			recv:    s.buf,
 		},
 		windowHandler: func(n int) {
 			t.updateWindow(s, uint32(n))
@@ -438,21 +661,26 @@
 		wq:       s.wq,
 	})
 	handle(s)
-	return false
+	return nil
 }
 
 // HandleStreams receives incoming streams using the given handler. This is
 // typically run in a separate goroutine.
 // traceCtx attaches trace to ctx and returns the new context.
-func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
-	defer close(t.readerDone)
+func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStream)) {
+	defer func() {
+		close(t.readerDone)
+		<-t.loopyWriterDone
+	}()
 	for {
 		t.controlBuf.throttle()
 		frame, err := t.framer.fr.ReadFrame()
-		atomic.StoreUint32(&t.activity, 1)
+		atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
 		if err != nil {
 			if se, ok := err.(http2.StreamError); ok {
-				warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
+				if t.logger.V(logLevel) {
+					t.logger.Warningf("Encountered http2.StreamError: %v", se)
+				}
 				t.mu.Lock()
 				s := t.activeStreams[se.StreamID]
 				t.mu.Unlock()
@@ -468,19 +696,20 @@
 				}
 				continue
 			}
-			if err == io.EOF || err == io.ErrUnexpectedEOF {
-				t.Close()
-				return
-			}
-			warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
-			t.Close()
+			t.Close(err)
 			return
 		}
 		switch frame := frame.(type) {
 		case *http2.MetaHeadersFrame:
-			if t.operateHeaders(frame, handle, traceCtx) {
-				t.Close()
-				break
+			if err := t.operateHeaders(ctx, frame, handle); err != nil {
+				// Any error processing client headers, e.g. invalid stream ID,
+				// is considered a protocol violation.
+				t.controlBuf.put(&goAway{
+					code:      http2.ErrCodeProtocol,
+					debugData: []byte(err.Error()),
+					closeConn: err,
+				})
+				continue
 			}
 		case *http2.DataFrame:
 			t.handleData(frame)
@@ -495,12 +724,14 @@
 		case *http2.GoAwayFrame:
 			// TODO: Handle GoAway from the client appropriately.
 		default:
-			errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
+			if t.logger.V(logLevel) {
+				t.logger.Infof("Received unsupported frame type %T", frame)
+			}
 		}
 	}
 }
 
-func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
+func (t *http2Server) getStream(f http2.Frame) (*ServerStream, bool) {
 	t.mu.Lock()
 	defer t.mu.Unlock()
 	if t.activeStreams == nil {
@@ -518,7 +749,7 @@
 // adjustWindow sends out extra window update over the initial window size
 // of stream if the application is requesting data larger in size than
 // the window.
-func (t *http2Server) adjustWindow(s *Stream, n uint32) {
+func (t *http2Server) adjustWindow(s *ServerStream, n uint32) {
 	if w := s.fc.maybeAdjust(n); w > 0 {
 		t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
 	}
@@ -528,7 +759,7 @@
 // updateWindow adjusts the inbound quota for the stream and the transport.
 // Window updates will deliver to the controller for sending when
 // the cumulative quota exceeds the corresponding threshold.
-func (t *http2Server) updateWindow(s *Stream, n uint32) {
+func (t *http2Server) updateWindow(s *ServerStream, n uint32) {
 	if w := s.fc.onRead(n); w > 0 {
 		t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id,
 			increment: w,
@@ -597,6 +828,10 @@
 	if !ok {
 		return
 	}
+	if s.getState() == streamReadDone {
+		t.closeStream(s, true, http2.ErrCodeStreamClosed, false)
+		return
+	}
 	if size > 0 {
 		if err := s.fc.onData(size); err != nil {
 			t.closeStream(s, true, http2.ErrCodeFlowControl, false)
@@ -611,13 +846,16 @@
 		// guarantee f.Data() is consumed before the arrival of next frame.
 		// Can this copy be eliminated?
 		if len(f.Data()) > 0 {
-			buffer := t.bufferPool.get()
-			buffer.Reset()
-			buffer.Write(f.Data())
-			s.write(recvMsg{buffer: buffer})
+			pool := t.bufferPool
+			if pool == nil {
+				// Note that this is only supposed to be nil in tests. Otherwise, stream is
+				// always initialized with a BufferPool.
+				pool = mem.DefaultBufferPool()
+			}
+			s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
 		}
 	}
-	if f.Header().Flags.Has(http2.FlagDataEndStream) {
+	if f.StreamEnded() {
 		// Received the end of stream from the client.
 		s.compareAndSwapState(streamActive, streamReadDone)
 		s.write(recvMsg{err: io.EOF})
@@ -657,7 +895,7 @@
 		}
 		return nil
 	})
-	t.controlBuf.executeAndPut(func(interface{}) bool {
+	t.controlBuf.executeAndPut(func() bool {
 		for _, f := range updateFuncs {
 			f()
 		}
@@ -674,8 +912,8 @@
 
 func (t *http2Server) handlePing(f *http2.PingFrame) {
 	if f.IsAck() {
-		if f.Data == goAwayPing.data && t.drainChan != nil {
-			close(t.drainChan)
+		if f.Data == goAwayPing.data && t.drainEvent != nil {
+			t.drainEvent.Fire()
 			return
 		}
 		// Maybe it's a BDP ping.
@@ -717,8 +955,7 @@
 
 	if t.pingStrikes > maxPingStrikes {
 		// Send goaway and close the connection.
-		errorf("transport: Got too many pings from the client, closing the connection.")
-		t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
+		t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")})
 	}
 }
 
@@ -742,7 +979,7 @@
 	return headerFields
 }
 
-func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
+func (t *http2Server) checkForHeaderListSize(it any) bool {
 	if t.maxSendHeaderListSize == nil {
 		return true
 	}
@@ -750,19 +987,36 @@
 	var sz int64
 	for _, f := range hdrFrame.hf {
 		if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
-			errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
+			if t.logger.V(logLevel) {
+				t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
+			}
 			return false
 		}
 	}
 	return true
 }
 
+func (t *http2Server) streamContextErr(s *ServerStream) error {
+	select {
+	case <-t.done:
+		return ErrConnClosing
+	default:
+	}
+	return ContextErr(s.ctx.Err())
+}
+
 // WriteHeader sends the header metadata md back to the client.
-func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
-	if s.updateHeaderSent() || s.getState() == streamDone {
+func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error {
+	s.hdrMu.Lock()
+	defer s.hdrMu.Unlock()
+	if s.getState() == streamDone {
+		return t.streamContextErr(s)
+	}
+
+	if s.updateHeaderSent() {
 		return ErrIllegalHeaderWrite
 	}
-	s.hdrMu.Lock()
+
 	if md.Len() > 0 {
 		if s.header.Len() > 0 {
 			s.header = metadata.Join(s.header, md)
@@ -771,33 +1025,33 @@
 		}
 	}
 	if err := t.writeHeaderLocked(s); err != nil {
-		s.hdrMu.Unlock()
-		return err
+		switch e := err.(type) {
+		case ConnectionError:
+			return status.Error(codes.Unavailable, e.Desc)
+		default:
+			return status.Convert(err).Err()
+		}
 	}
-	s.hdrMu.Unlock()
 	return nil
 }
 
-func (t *http2Server) setResetPingStrikes() {
-	atomic.StoreUint32(&t.resetPingStrikes, 1)
-}
-
-func (t *http2Server) writeHeaderLocked(s *Stream) error {
+func (t *http2Server) writeHeaderLocked(s *ServerStream) error {
 	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
 	// first and create a slice of that exact size.
 	headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
 	headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
-	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
+	headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
 	if s.sendCompress != "" {
 		headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
 	}
 	headerFields = appendHeaderFieldsFromMD(headerFields, s.header)
-	success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{
+	hf := &headerFrame{
 		streamID:  s.id,
 		hf:        headerFields,
 		endStream: false,
 		onWrite:   t.setResetPingStrikes,
-	})
+	}
+	success, err := t.controlBuf.executeAndPut(func() bool { return t.checkForHeaderListSize(hf) }, hf)
 	if !success {
 		if err != nil {
 			return err
@@ -805,48 +1059,56 @@
 		t.closeStream(s, true, http2.ErrCodeInternal, false)
 		return ErrHeaderListSizeLimitViolation
 	}
-	if t.stats != nil {
-		// Note: WireLength is not set in outHeader.
-		// TODO(mmukhi): Revisit this later, if needed.
-		outHeader := &stats.OutHeader{}
-		t.stats.HandleRPC(s.Context(), outHeader)
+	for _, sh := range t.stats {
+		// Note: Headers are compressed with hpack after this call returns.
+		// No WireLength field is set here.
+		outHeader := &stats.OutHeader{
+			Header:      s.header.Copy(),
+			Compression: s.sendCompress,
+		}
+		sh.HandleRPC(s.Context(), outHeader)
 	}
 	return nil
 }
 
-// WriteStatus sends stream status to the client and terminates the stream.
+// writeStatus sends stream status to the client and terminates the stream.
 // There is no further I/O operations being able to perform on this stream.
 // TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
 // OK is adopted.
-func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
+func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error {
+	s.hdrMu.Lock()
+	defer s.hdrMu.Unlock()
+
 	if s.getState() == streamDone {
 		return nil
 	}
-	s.hdrMu.Lock()
+
 	// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
 	// first and create a slice of that exact size.
 	headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
 	if !s.updateHeaderSent() {                      // No headers have been sent.
 		if len(s.header) > 0 { // Send a separate header frame.
 			if err := t.writeHeaderLocked(s); err != nil {
-				s.hdrMu.Unlock()
 				return err
 			}
 		} else { // Send a trailer only response.
 			headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
-			headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
+			headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
 		}
 	}
 	headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
 	headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
 
-	if p := statusRawProto(st); p != nil && len(p.Details) > 0 {
+	if p := istatus.RawStatusProto(st); len(p.GetDetails()) > 0 {
+		// Do not use the user's grpc-status-details-bin (if present) if we are
+		// even attempting to set our own.
+		delete(s.trailer, grpcStatusDetailsBinHeader)
 		stBytes, err := proto.Marshal(p)
 		if err != nil {
 			// TODO: return error instead, when callers are able to handle it.
-			grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
+			t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err)
 		} else {
-			headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
+			headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)})
 		}
 	}
 
@@ -858,8 +1120,10 @@
 		endStream: true,
 		onWrite:   t.setResetPingStrikes,
 	}
-	s.hdrMu.Unlock()
-	success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
+
+	success, err := t.controlBuf.executeAndPut(func() bool {
+		return t.checkForHeaderListSize(trailingHeader)
+	}, nil)
 	if !success {
 		if err != nil {
 			return err
@@ -870,58 +1134,47 @@
 	// Send a RST_STREAM after the trailers if the client has not already half-closed.
 	rst := s.getState() == streamActive
 	t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
-	if t.stats != nil {
-		t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
+	for _, sh := range t.stats {
+		// Note: The trailer fields are compressed with hpack after this call returns.
+		// No WireLength field is set here.
+		sh.HandleRPC(s.Context(), &stats.OutTrailer{
+			Trailer: s.trailer.Copy(),
+		})
 	}
 	return nil
 }
 
 // Write converts the data into HTTP2 data frame and sends it out. Non-nil error
 // is returns if it fails (e.g., framing error, transport error).
-func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error {
 	if !s.isHeaderSent() { // Headers haven't been written yet.
-		if err := t.WriteHeader(s, nil); err != nil {
-			if _, ok := err.(ConnectionError); ok {
-				return err
-			}
-			// TODO(mmukhi, dfawley): Make sure this is the right code to return.
-			return status.Errorf(codes.Internal, "transport: %v", err)
+		if err := t.writeHeader(s, nil); err != nil {
+			return err
 		}
 	} else {
 		// Writing headers checks for this condition.
 		if s.getState() == streamDone {
-			// TODO(mmukhi, dfawley): Should the server write also return io.EOF?
-			s.cancel()
-			select {
-			case <-t.done:
-				return ErrConnClosing
-			default:
-			}
-			return ContextErr(s.ctx.Err())
+			return t.streamContextErr(s)
 		}
 	}
-	// Add some data to header frame so that we can equally distribute bytes across frames.
-	emptyLen := http2MaxFrameLen - len(hdr)
-	if emptyLen > len(data) {
-		emptyLen = len(data)
-	}
-	hdr = append(hdr, data[:emptyLen]...)
-	data = data[emptyLen:]
+
 	df := &dataFrame{
 		streamID:    s.id,
 		h:           hdr,
-		d:           data,
+		data:        data,
 		onEachWrite: t.setResetPingStrikes,
 	}
-	if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
-		select {
-		case <-t.done:
-			return ErrConnClosing
-		default:
-		}
-		return ContextErr(s.ctx.Err())
+	dataLen := data.Len()
+	if err := s.wq.get(int32(len(hdr) + dataLen)); err != nil {
+		return t.streamContextErr(s)
 	}
-	return t.controlBuf.put(df)
+	data.Ref()
+	if err := t.controlBuf.put(df); err != nil {
+		data.Free()
+		return err
+	}
+	t.incrMsgSent()
+	return nil
 }
 
 // keepalive running in a separate goroutine does the following:
@@ -932,32 +1185,35 @@
 // after an additional duration of keepalive.Timeout.
 func (t *http2Server) keepalive() {
 	p := &ping{}
-	var pingSent bool
-	maxIdle := time.NewTimer(t.kp.MaxConnectionIdle)
-	maxAge := time.NewTimer(t.kp.MaxConnectionAge)
-	keepalive := time.NewTimer(t.kp.Time)
-	// NOTE: All exit paths of this function should reset their
-	// respective timers. A failure to do so will cause the
-	// following clean-up to deadlock and eventually leak.
+	// True iff a ping has been sent, and no data has been received since then.
+	outstandingPing := false
+	// Amount of time remaining before which we should receive an ACK for the
+	// last sent ping.
+	kpTimeoutLeft := time.Duration(0)
+	// Records the last value of t.lastRead before we go block on the timer.
+	// This is required to check for read activity since then.
+	prevNano := time.Now().UnixNano()
+	// Initialize the different timers to their default values.
+	idleTimer := time.NewTimer(t.kp.MaxConnectionIdle)
+	ageTimer := time.NewTimer(t.kp.MaxConnectionAge)
+	kpTimer := time.NewTimer(t.kp.Time)
 	defer func() {
-		if !maxIdle.Stop() {
-			<-maxIdle.C
-		}
-		if !maxAge.Stop() {
-			<-maxAge.C
-		}
-		if !keepalive.Stop() {
-			<-keepalive.C
-		}
+		// We need to drain the underlying channel in these timers after a call
+		// to Stop(), only if we are interested in resetting them. Clearly we
+		// are not interested in resetting them here.
+		idleTimer.Stop()
+		ageTimer.Stop()
+		kpTimer.Stop()
 	}()
+
 	for {
 		select {
-		case <-maxIdle.C:
+		case <-idleTimer.C:
 			t.mu.Lock()
 			idle := t.idle
 			if idle.IsZero() { // The connection is non-idle.
 				t.mu.Unlock()
-				maxIdle.Reset(t.kp.MaxConnectionIdle)
+				idleTimer.Reset(t.kp.MaxConnectionIdle)
 				continue
 			}
 			val := t.kp.MaxConnectionIdle - time.Since(idle)
@@ -965,44 +1221,53 @@
 			if val <= 0 {
 				// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
 				// Gracefully close the connection.
-				t.drain(http2.ErrCodeNo, []byte{})
-				// Resetting the timer so that the clean-up doesn't deadlock.
-				maxIdle.Reset(infinity)
+				t.Drain("max_idle")
 				return
 			}
-			maxIdle.Reset(val)
-		case <-maxAge.C:
-			t.drain(http2.ErrCodeNo, []byte{})
-			maxAge.Reset(t.kp.MaxConnectionAgeGrace)
+			idleTimer.Reset(val)
+		case <-ageTimer.C:
+			t.Drain("max_age")
+			ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
 			select {
-			case <-maxAge.C:
+			case <-ageTimer.C:
 				// Close the connection after grace period.
-				infof("transport: closing server transport due to maximum connection age.")
-				t.Close()
-				// Resetting the timer so that the clean-up doesn't deadlock.
-				maxAge.Reset(infinity)
+				if t.logger.V(logLevel) {
+					t.logger.Infof("Closing server transport due to maximum connection age")
+				}
+				t.controlBuf.put(closeConnection{})
 			case <-t.done:
 			}
 			return
-		case <-keepalive.C:
-			if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
-				pingSent = false
-				keepalive.Reset(t.kp.Time)
+		case <-kpTimer.C:
+			lastRead := atomic.LoadInt64(&t.lastRead)
+			if lastRead > prevNano {
+				// There has been read activity since the last time we were
+				// here. Setup the timer to fire at kp.Time seconds from
+				// lastRead time and continue.
+				outstandingPing = false
+				kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
+				prevNano = lastRead
 				continue
 			}
-			if pingSent {
-				infof("transport: closing server transport due to idleness.")
-				t.Close()
-				// Resetting the timer so that the clean-up doesn't deadlock.
-				keepalive.Reset(infinity)
+			if outstandingPing && kpTimeoutLeft <= 0 {
+				t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Timeout))
 				return
 			}
-			pingSent = true
-			if channelz.IsOn() {
-				atomic.AddInt64(&t.czData.kpCount, 1)
+			if !outstandingPing {
+				if channelz.IsOn() {
+					t.channelz.SocketMetrics.KeepAlivesSent.Add(1)
+				}
+				t.controlBuf.put(p)
+				kpTimeoutLeft = t.kp.Timeout
+				outstandingPing = true
 			}
-			t.controlBuf.put(p)
-			keepalive.Reset(t.kp.Timeout)
+			// The amount of time to sleep here is the minimum of kp.Time and
+			// timeoutLeft. This will ensure that we wait only for kp.Time
+			// before sending out the next ping (for cases where the ping is
+			// acked).
+			sleepDuration := min(t.kp.Time, kpTimeoutLeft)
+			kpTimeoutLeft -= sleepDuration
+			kpTimer.Reset(sleepDuration)
 		case <-t.done:
 			return
 		}
@@ -1012,11 +1277,14 @@
 // Close starts shutting down the http2Server transport.
 // TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
 // could cause some resource issue. Revisit this later.
-func (t *http2Server) Close() error {
+func (t *http2Server) Close(err error) {
 	t.mu.Lock()
 	if t.state == closing {
 		t.mu.Unlock()
-		return errors.New("transport: Close() was already called")
+		return
+	}
+	if t.logger.V(logLevel) {
+		t.logger.Infof("Closing: %v", err)
 	}
 	t.state = closing
 	streams := t.activeStreams
@@ -1024,28 +1292,18 @@
 	t.mu.Unlock()
 	t.controlBuf.finish()
 	close(t.done)
-	err := t.conn.Close()
-	if channelz.IsOn() {
-		channelz.RemoveEntry(t.channelzID)
+	if err := t.conn.Close(); err != nil && t.logger.V(logLevel) {
+		t.logger.Infof("Error closing underlying net.Conn during Close: %v", err)
 	}
+	channelz.RemoveEntry(t.channelz.ID)
 	// Cancel all active streams.
 	for _, s := range streams {
 		s.cancel()
 	}
-	if t.stats != nil {
-		connEnd := &stats.ConnEnd{}
-		t.stats.HandleConn(t.ctx, connEnd)
-	}
-	return err
 }
 
 // deleteStream deletes the stream s from transport's active streams.
-func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
-	// In case stream sending and receiving are invoked in separate
-	// goroutines (e.g., bi-directional streaming), cancel needs to be
-	// called to interrupt the potential blocking on other goroutines.
-	s.cancel()
-
+func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) {
 	t.mu.Lock()
 	if _, ok := t.activeStreams[s.id]; ok {
 		delete(t.activeStreams, s.id)
@@ -1057,15 +1315,20 @@
 
 	if channelz.IsOn() {
 		if eosReceived {
-			atomic.AddInt64(&t.czData.streamsSucceeded, 1)
+			t.channelz.SocketMetrics.StreamsSucceeded.Add(1)
 		} else {
-			atomic.AddInt64(&t.czData.streamsFailed, 1)
+			t.channelz.SocketMetrics.StreamsFailed.Add(1)
 		}
 	}
 }
 
 // finishStream closes the stream and puts the trailing headerFrame into controlbuf.
-func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
+func (t *http2Server) finishStream(s *ServerStream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
+	// In case stream sending and receiving are invoked in separate
+	// goroutines (e.g., bi-directional streaming), cancel needs to be
+	// called to interrupt the potential blocking on other goroutines.
+	s.cancel()
+
 	oldState := s.swapState(streamDone)
 	if oldState == streamDone {
 		// If the stream was already done, return.
@@ -1084,7 +1347,15 @@
 }
 
 // closeStream clears the footprint of a stream when the stream is not needed any more.
-func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
+func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
+	// In case stream sending and receiving are invoked in separate
+	// goroutines (e.g., bi-directional streaming), cancel needs to be
+	// called to interrupt the potential blocking on other goroutines.
+	s.cancel()
+
+	// We can't return early even if the stream's state is "done" as the state
+	// might have been set by the `finishStream` method. Deleting the stream via
+	// `finishStream` can get blocked on flow control.
 	s.swapState(streamDone)
 	t.deleteStream(s, eosReceived)
 
@@ -1096,22 +1367,14 @@
 	})
 }
 
-func (t *http2Server) RemoteAddr() net.Addr {
-	return t.remoteAddr
-}
-
-func (t *http2Server) Drain() {
-	t.drain(http2.ErrCodeNo, []byte{})
-}
-
-func (t *http2Server) drain(code http2.ErrCode, debugData []byte) {
+func (t *http2Server) Drain(debugData string) {
 	t.mu.Lock()
 	defer t.mu.Unlock()
-	if t.drainChan != nil {
+	if t.drainEvent != nil {
 		return
 	}
-	t.drainChan = make(chan struct{})
-	t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true})
+	t.drainEvent = grpcsync.NewEvent()
+	t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true})
 }
 
 var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
@@ -1119,49 +1382,52 @@
 // Handles outgoing GoAway and returns true if loopy needs to put itself
 // in draining mode.
 func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
+	t.maxStreamMu.Lock()
 	t.mu.Lock()
 	if t.state == closing { // TODO(mmukhi): This seems unnecessary.
 		t.mu.Unlock()
+		t.maxStreamMu.Unlock()
 		// The transport is closing.
 		return false, ErrConnClosing
 	}
-	sid := t.maxStreamID
 	if !g.headsUp {
 		// Stop accepting more streams now.
 		t.state = draining
+		sid := t.maxStreamID
+		retErr := g.closeConn
 		if len(t.activeStreams) == 0 {
-			g.closeConn = true
+			retErr = errors.New("second GOAWAY written and no active streams left to process")
 		}
 		t.mu.Unlock()
+		t.maxStreamMu.Unlock()
 		if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
 			return false, err
 		}
-		if g.closeConn {
-			// Abruptly close the connection following the GoAway (via
-			// loopywriter).  But flush out what's inside the buffer first.
-			t.framer.writer.Flush()
-			return false, fmt.Errorf("transport: Connection closing")
+		t.framer.writer.Flush()
+		if retErr != nil {
+			return false, retErr
 		}
 		return true, nil
 	}
 	t.mu.Unlock()
+	t.maxStreamMu.Unlock()
 	// For a graceful close, send out a GoAway with stream ID of MaxUInt32,
 	// Follow that with a ping and wait for the ack to come back or a timer
 	// to expire. During this time accept new streams since they might have
 	// originated before the GoAway reaches the client.
 	// After getting the ack or timer expiration send out another GoAway this
 	// time with an ID of the max stream server intends to process.
-	if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil {
+	if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil {
 		return false, err
 	}
 	if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil {
 		return false, err
 	}
 	go func() {
-		timer := time.NewTimer(time.Minute)
+		timer := time.NewTimer(5 * time.Second)
 		defer timer.Stop()
 		select {
-		case <-t.drainChan:
+		case <-t.drainEvent.Done():
 		case <-timer.C:
 		case <-t.done:
 			return
@@ -1171,38 +1437,25 @@
 	return false, nil
 }
 
-func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric {
-	s := channelz.SocketInternalMetric{
-		StreamsStarted:                   atomic.LoadInt64(&t.czData.streamsStarted),
-		StreamsSucceeded:                 atomic.LoadInt64(&t.czData.streamsSucceeded),
-		StreamsFailed:                    atomic.LoadInt64(&t.czData.streamsFailed),
-		MessagesSent:                     atomic.LoadInt64(&t.czData.msgSent),
-		MessagesReceived:                 atomic.LoadInt64(&t.czData.msgRecv),
-		KeepAlivesSent:                   atomic.LoadInt64(&t.czData.kpCount),
-		LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
-		LastMessageSentTimestamp:         time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
-		LastMessageReceivedTimestamp:     time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
-		LocalFlowControlWindow:           int64(t.fc.getSize()),
-		SocketOptions:                    channelz.GetSocketOption(t.conn),
-		LocalAddr:                        t.localAddr,
-		RemoteAddr:                       t.remoteAddr,
-		// RemoteName :
+func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics {
+	return &channelz.EphemeralSocketMetrics{
+		LocalFlowControlWindow:  int64(t.fc.getSize()),
+		RemoteFlowControlWindow: t.getOutFlowWindow(),
 	}
-	if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
-		s.Security = au.GetSecurityValue()
-	}
-	s.RemoteFlowControlWindow = t.getOutFlowWindow()
-	return &s
 }
 
-func (t *http2Server) IncrMsgSent() {
-	atomic.AddInt64(&t.czData.msgSent, 1)
-	atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
+func (t *http2Server) incrMsgSent() {
+	if channelz.IsOn() {
+		t.channelz.SocketMetrics.MessagesSent.Add(1)
+		t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1)
+	}
 }
 
-func (t *http2Server) IncrMsgRecv() {
-	atomic.AddInt64(&t.czData.msgRecv, 1)
-	atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
+func (t *http2Server) incrMsgRecv() {
+	if channelz.IsOn() {
+		t.channelz.SocketMetrics.MessagesReceived.Add(1)
+		t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1)
+	}
 }
 
 func (t *http2Server) getOutFlowWindow() int64 {
@@ -1220,12 +1473,36 @@
 	}
 }
 
+// Peer returns the peer of the transport.
+func (t *http2Server) Peer() *peer.Peer {
+	return &peer.Peer{
+		Addr:      t.peer.Addr,
+		LocalAddr: t.peer.LocalAddr,
+		AuthInfo:  t.peer.AuthInfo, // Can be nil
+	}
+}
+
 func getJitter(v time.Duration) time.Duration {
 	if v == infinity {
 		return 0
 	}
 	// Generate a jitter between +/- 10% of the value.
 	r := int64(v / 10)
-	j := grpcrand.Int63n(2*r) - r
+	j := rand.Int64N(2*r) - r
 	return time.Duration(j)
 }
+
+type connectionKey struct{}
+
+// GetConnection gets the connection from the context.
+func GetConnection(ctx context.Context) net.Conn {
+	conn, _ := ctx.Value(connectionKey{}).(net.Conn)
+	return conn
+}
+
+// SetConnection adds the connection to the context to be able to get
+// information about the destination ip and port for an incoming RPC. This also
+// allows any unary or streaming interceptors to see the connection.
+func SetConnection(ctx context.Context, conn net.Conn) context.Context {
+	return context.WithValue(ctx, connectionKey{}, conn)
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index 8f5f334..e3663f8 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -20,37 +20,30 @@
 
 import (
 	"bufio"
-	"bytes"
 	"encoding/base64"
+	"errors"
 	"fmt"
 	"io"
 	"math"
 	"net"
 	"net/http"
+	"net/url"
 	"strconv"
 	"strings"
+	"sync"
 	"time"
 	"unicode/utf8"
 
-	"github.com/golang/protobuf/proto"
 	"golang.org/x/net/http2"
 	"golang.org/x/net/http2/hpack"
-	spb "google.golang.org/genproto/googleapis/rpc/status"
 	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/status"
 )
 
 const (
 	// http2MaxFrameLen specifies the max length of a HTTP2 frame.
 	http2MaxFrameLen = 16384 // 16KB frame
-	// http://http2.github.io/http2-spec/#SettingValues
+	// https://httpwg.org/specs/rfc7540.html#SettingValues
 	http2InitHeaderTableSize = 4096
-	// baseContentType is the base content-type for gRPC.  This is a valid
-	// content-type on it's own, but can also include a content-subtype such as
-	// "proto" as a suffix after "+" or ";".  See
-	// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
-	// for more details.
-	baseContentType = "application/grpc"
 )
 
 var (
@@ -71,13 +64,6 @@
 		http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
 		http2.ErrCodeHTTP11Required:     codes.Internal,
 	}
-	statusCodeConvTab = map[codes.Code]http2.ErrCode{
-		codes.Internal:          http2.ErrCodeInternal,
-		codes.Canceled:          http2.ErrCodeCancel,
-		codes.Unavailable:       http2.ErrCodeRefusedStream,
-		codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
-		codes.PermissionDenied:  http2.ErrCodeInadequateSecurity,
-	}
 	// HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table.
 	HTTPStatusConvTab = map[int]codes.Code{
 		// 400 Bad Request - INTERNAL.
@@ -99,51 +85,7 @@
 	}
 )
 
-type parsedHeaderData struct {
-	encoding string
-	// statusGen caches the stream status received from the trailer the server
-	// sent.  Client side only.  Do not access directly.  After all trailers are
-	// parsed, use the status method to retrieve the status.
-	statusGen *status.Status
-	// rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not
-	// intended for direct access outside of parsing.
-	rawStatusCode *int
-	rawStatusMsg  string
-	httpStatus    *int
-	// Server side only fields.
-	timeoutSet bool
-	timeout    time.Duration
-	method     string
-	// key-value metadata map from the peer.
-	mdata          map[string][]string
-	statsTags      []byte
-	statsTrace     []byte
-	contentSubtype string
-
-	// isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP).
-	//
-	// We are in gRPC mode (peer speaking gRPC) if:
-	// 	* We are client side and have already received a HEADER frame that indicates gRPC peer.
-	//  * The header contains valid  a content-type, i.e. a string starts with "application/grpc"
-	// And we should handle error specific to gRPC.
-	//
-	// Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we
-	// are in HTTP fallback mode, and should handle error specific to HTTP.
-	isGRPC         bool
-	grpcErr        error
-	httpErr        error
-	contentTypeErr string
-}
-
-// decodeState configures decoding criteria and records the decoded data.
-type decodeState struct {
-	// whether decoding on server side or not
-	serverSide bool
-
-	// Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS
-	// frame once decodeHeader function has been invoked and returned.
-	data parsedHeaderData
-}
+var grpcStatusDetailsBinHeader = "grpc-status-details-bin"
 
 // isReservedHeader checks whether hdr belongs to HTTP2 headers
 // reserved by gRPC protocol. Any other headers are classified as the
@@ -160,7 +102,6 @@
 		"grpc-message",
 		"grpc-status",
 		"grpc-timeout",
-		"grpc-status-details-bin",
 		// Intentionally exclude grpc-previous-rpc-attempts and
 		// grpc-retry-pushback-ms, which are "reserved", but their API
 		// intentionally works via metadata.
@@ -182,54 +123,6 @@
 	}
 }
 
-// contentSubtype returns the content-subtype for the given content-type.  The
-// given content-type must be a valid content-type that starts with
-// "application/grpc". A content-subtype will follow "application/grpc" after a
-// "+" or ";". See
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-//
-// If contentType is not a valid content-type for gRPC, the boolean
-// will be false, otherwise true. If content-type == "application/grpc",
-// "application/grpc+", or "application/grpc;", the boolean will be true,
-// but no content-subtype will be returned.
-//
-// contentType is assumed to be lowercase already.
-func contentSubtype(contentType string) (string, bool) {
-	if contentType == baseContentType {
-		return "", true
-	}
-	if !strings.HasPrefix(contentType, baseContentType) {
-		return "", false
-	}
-	// guaranteed since != baseContentType and has baseContentType prefix
-	switch contentType[len(baseContentType)] {
-	case '+', ';':
-		// this will return true for "application/grpc+" or "application/grpc;"
-		// which the previous validContentType function tested to be valid, so we
-		// just say that no content-subtype is specified in this case
-		return contentType[len(baseContentType)+1:], true
-	default:
-		return "", false
-	}
-}
-
-// contentSubtype is assumed to be lowercase
-func contentType(contentSubtype string) string {
-	if contentSubtype == "" {
-		return baseContentType
-	}
-	return baseContentType + "+" + contentSubtype
-}
-
-func (d *decodeState) status() *status.Status {
-	if d.data.statusGen == nil {
-		// No status-details were provided; generate status using code/msg.
-		d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg)
-	}
-	return d.data.statusGen
-}
-
 const binHdrSuffix = "-bin"
 
 func encodeBinHeader(v []byte) string {
@@ -259,166 +152,6 @@
 	return v, nil
 }
 
-func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error {
-	// frame.Truncated is set to true when framer detects that the current header
-	// list size hits MaxHeaderListSize limit.
-	if frame.Truncated {
-		return status.Error(codes.Internal, "peer header list size exceeded limit")
-	}
-
-	for _, hf := range frame.Fields {
-		d.processHeaderField(hf)
-	}
-
-	if d.data.isGRPC {
-		if d.data.grpcErr != nil {
-			return d.data.grpcErr
-		}
-		if d.serverSide {
-			return nil
-		}
-		if d.data.rawStatusCode == nil && d.data.statusGen == nil {
-			// gRPC status doesn't exist.
-			// Set rawStatusCode to be unknown and return nil error.
-			// So that, if the stream has ended this Unknown status
-			// will be propagated to the user.
-			// Otherwise, it will be ignored. In which case, status from
-			// a later trailer, that has StreamEnded flag set, is propagated.
-			code := int(codes.Unknown)
-			d.data.rawStatusCode = &code
-		}
-		return nil
-	}
-
-	// HTTP fallback mode
-	if d.data.httpErr != nil {
-		return d.data.httpErr
-	}
-
-	var (
-		code = codes.Internal // when header does not include HTTP status, return INTERNAL
-		ok   bool
-	)
-
-	if d.data.httpStatus != nil {
-		code, ok = HTTPStatusConvTab[*(d.data.httpStatus)]
-		if !ok {
-			code = codes.Unknown
-		}
-	}
-
-	return status.Error(code, d.constructHTTPErrMsg())
-}
-
-// constructErrMsg constructs error message to be returned in HTTP fallback mode.
-// Format: HTTP status code and its corresponding message + content-type error message.
-func (d *decodeState) constructHTTPErrMsg() string {
-	var errMsgs []string
-
-	if d.data.httpStatus == nil {
-		errMsgs = append(errMsgs, "malformed header: missing HTTP status")
-	} else {
-		errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus))
-	}
-
-	if d.data.contentTypeErr == "" {
-		errMsgs = append(errMsgs, "transport: missing content-type field")
-	} else {
-		errMsgs = append(errMsgs, d.data.contentTypeErr)
-	}
-
-	return strings.Join(errMsgs, "; ")
-}
-
-func (d *decodeState) addMetadata(k, v string) {
-	if d.data.mdata == nil {
-		d.data.mdata = make(map[string][]string)
-	}
-	d.data.mdata[k] = append(d.data.mdata[k], v)
-}
-
-func (d *decodeState) processHeaderField(f hpack.HeaderField) {
-	switch f.Name {
-	case "content-type":
-		contentSubtype, validContentType := contentSubtype(f.Value)
-		if !validContentType {
-			d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value)
-			return
-		}
-		d.data.contentSubtype = contentSubtype
-		// TODO: do we want to propagate the whole content-type in the metadata,
-		// or come up with a way to just propagate the content-subtype if it was set?
-		// ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"}
-		// in the metadata?
-		d.addMetadata(f.Name, f.Value)
-		d.data.isGRPC = true
-	case "grpc-encoding":
-		d.data.encoding = f.Value
-	case "grpc-status":
-		code, err := strconv.Atoi(f.Value)
-		if err != nil {
-			d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err)
-			return
-		}
-		d.data.rawStatusCode = &code
-	case "grpc-message":
-		d.data.rawStatusMsg = decodeGrpcMessage(f.Value)
-	case "grpc-status-details-bin":
-		v, err := decodeBinHeader(f.Value)
-		if err != nil {
-			d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
-			return
-		}
-		s := &spb.Status{}
-		if err := proto.Unmarshal(v, s); err != nil {
-			d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
-			return
-		}
-		d.data.statusGen = status.FromProto(s)
-	case "grpc-timeout":
-		d.data.timeoutSet = true
-		var err error
-		if d.data.timeout, err = decodeTimeout(f.Value); err != nil {
-			d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err)
-		}
-	case ":path":
-		d.data.method = f.Value
-	case ":status":
-		code, err := strconv.Atoi(f.Value)
-		if err != nil {
-			d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err)
-			return
-		}
-		d.data.httpStatus = &code
-	case "grpc-tags-bin":
-		v, err := decodeBinHeader(f.Value)
-		if err != nil {
-			d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
-			return
-		}
-		d.data.statsTags = v
-		d.addMetadata(f.Name, string(v))
-	case "grpc-trace-bin":
-		v, err := decodeBinHeader(f.Value)
-		if err != nil {
-			d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
-			return
-		}
-		d.data.statsTrace = v
-		d.addMetadata(f.Name, string(v))
-	default:
-		if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) {
-			break
-		}
-		v, err := decodeMetadataHeader(f.Name, f.Value)
-		if err != nil {
-			errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
-			return
-		}
-		d.addMetadata(f.Name, v)
-	}
-}
-
 type timeoutUnit uint8
 
 const (
@@ -449,41 +182,6 @@
 	return
 }
 
-const maxTimeoutValue int64 = 100000000 - 1
-
-// div does integer division and round-up the result. Note that this is
-// equivalent to (d+r-1)/r but has less chance to overflow.
-func div(d, r time.Duration) int64 {
-	if m := d % r; m > 0 {
-		return int64(d/r + 1)
-	}
-	return int64(d / r)
-}
-
-// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it.
-func encodeTimeout(t time.Duration) string {
-	if t <= 0 {
-		return "0n"
-	}
-	if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
-		return strconv.FormatInt(d, 10) + "n"
-	}
-	if d := div(t, time.Microsecond); d <= maxTimeoutValue {
-		return strconv.FormatInt(d, 10) + "u"
-	}
-	if d := div(t, time.Millisecond); d <= maxTimeoutValue {
-		return strconv.FormatInt(d, 10) + "m"
-	}
-	if d := div(t, time.Second); d <= maxTimeoutValue {
-		return strconv.FormatInt(d, 10) + "S"
-	}
-	if d := div(t, time.Minute); d <= maxTimeoutValue {
-		return strconv.FormatInt(d, 10) + "M"
-	}
-	// Note that maxTimeoutValue * time.Hour > MaxInt64.
-	return strconv.FormatInt(div(t, time.Hour), 10) + "H"
-}
-
 func decodeTimeout(s string) (time.Duration, error) {
 	size := len(s)
 	if size < 2 {
@@ -498,11 +196,11 @@
 	if !ok {
 		return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s)
 	}
-	t, err := strconv.ParseInt(s[:size-1], 10, 64)
+	t, err := strconv.ParseUint(s[:size-1], 10, 64)
 	if err != nil {
 		return 0, err
 	}
-	const maxHours = math.MaxInt64 / int64(time.Hour)
+	const maxHours = math.MaxInt64 / uint64(time.Hour)
 	if d == time.Hour && t > maxHours {
 		// This timeout would overflow math.MaxInt64; clamp it.
 		return time.Duration(math.MaxInt64), nil
@@ -538,13 +236,13 @@
 }
 
 func encodeGrpcMessageUnchecked(msg string) string {
-	var buf bytes.Buffer
+	var sb strings.Builder
 	for len(msg) > 0 {
 		r, size := utf8.DecodeRuneInString(msg)
 		for _, b := range []byte(string(r)) {
 			if size > 1 {
 				// If size > 1, r is not ascii. Always do percent encoding.
-				buf.WriteString(fmt.Sprintf("%%%02X", b))
+				fmt.Fprintf(&sb, "%%%02X", b)
 				continue
 			}
 
@@ -553,14 +251,14 @@
 			//
 			// fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD".
 			if b >= spaceByte && b <= tildeByte && b != percentByte {
-				buf.WriteByte(b)
+				sb.WriteByte(b)
 			} else {
-				buf.WriteString(fmt.Sprintf("%%%02X", b))
+				fmt.Fprintf(&sb, "%%%02X", b)
 			}
 		}
 		msg = msg[size:]
 	}
-	return buf.String()
+	return sb.String()
 }
 
 // decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage.
@@ -578,83 +276,127 @@
 }
 
 func decodeGrpcMessageUnchecked(msg string) string {
-	var buf bytes.Buffer
+	var sb strings.Builder
 	lenMsg := len(msg)
 	for i := 0; i < lenMsg; i++ {
 		c := msg[i]
 		if c == percentByte && i+2 < lenMsg {
 			parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8)
 			if err != nil {
-				buf.WriteByte(c)
+				sb.WriteByte(c)
 			} else {
-				buf.WriteByte(byte(parsed))
+				sb.WriteByte(byte(parsed))
 				i += 2
 			}
 		} else {
-			buf.WriteByte(c)
+			sb.WriteByte(c)
 		}
 	}
-	return buf.String()
+	return sb.String()
 }
 
 type bufWriter struct {
+	pool      *sync.Pool
 	buf       []byte
 	offset    int
 	batchSize int
 	conn      net.Conn
 	err       error
-
-	onFlush func()
 }
 
-func newBufWriter(conn net.Conn, batchSize int) *bufWriter {
-	return &bufWriter{
-		buf:       make([]byte, batchSize*2),
+func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter {
+	w := &bufWriter{
 		batchSize: batchSize,
 		conn:      conn,
+		pool:      pool,
 	}
+	// this indicates that we should use non shared buf
+	if pool == nil {
+		w.buf = make([]byte, batchSize)
+	}
+	return w
 }
 
-func (w *bufWriter) Write(b []byte) (n int, err error) {
+func (w *bufWriter) Write(b []byte) (int, error) {
 	if w.err != nil {
 		return 0, w.err
 	}
 	if w.batchSize == 0 { // Buffer has been disabled.
-		return w.conn.Write(b)
+		n, err := w.conn.Write(b)
+		return n, toIOError(err)
 	}
+	if w.buf == nil {
+		b := w.pool.Get().(*[]byte)
+		w.buf = *b
+	}
+	written := 0
 	for len(b) > 0 {
-		nn := copy(w.buf[w.offset:], b)
-		b = b[nn:]
-		w.offset += nn
-		n += nn
-		if w.offset >= w.batchSize {
-			err = w.Flush()
+		copied := copy(w.buf[w.offset:], b)
+		b = b[copied:]
+		written += copied
+		w.offset += copied
+		if w.offset < w.batchSize {
+			continue
+		}
+		if err := w.flushKeepBuffer(); err != nil {
+			return written, err
 		}
 	}
-	return n, err
+	return written, nil
 }
 
 func (w *bufWriter) Flush() error {
+	err := w.flushKeepBuffer()
+	// Only release the buffer if we are in a "shared" mode
+	if w.buf != nil && w.pool != nil {
+		b := w.buf
+		w.pool.Put(&b)
+		w.buf = nil
+	}
+	return err
+}
+
+func (w *bufWriter) flushKeepBuffer() error {
 	if w.err != nil {
 		return w.err
 	}
 	if w.offset == 0 {
 		return nil
 	}
-	if w.onFlush != nil {
-		w.onFlush()
-	}
 	_, w.err = w.conn.Write(w.buf[:w.offset])
+	w.err = toIOError(w.err)
 	w.offset = 0
 	return w.err
 }
 
+type ioError struct {
+	error
+}
+
+func (i ioError) Unwrap() error {
+	return i.error
+}
+
+func isIOError(err error) bool {
+	return errors.As(err, &ioError{})
+}
+
+func toIOError(err error) error {
+	if err == nil {
+		return nil
+	}
+	return ioError{error: err}
+}
+
 type framer struct {
 	writer *bufWriter
 	fr     *http2.Framer
 }
 
-func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer {
+var writeBufferPoolMap = make(map[int]*sync.Pool)
+var writeBufferMutex sync.Mutex
+
+func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer {
 	if writeBufferSize < 0 {
 		writeBufferSize = 0
 	}
@@ -662,7 +404,11 @@
 	if readBufferSize > 0 {
 		r = bufio.NewReaderSize(r, readBufferSize)
 	}
-	w := newBufWriter(conn, writeBufferSize)
+	var pool *sync.Pool
+	if sharedWriteBuffer {
+		pool = getWriteBufferPool(writeBufferSize)
+	}
+	w := newBufWriter(conn, writeBufferSize, pool)
 	f := &framer{
 		writer: w,
 		fr:     http2.NewFramer(w, r),
@@ -675,3 +421,48 @@
 	f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
 	return f
 }
+
+func getWriteBufferPool(size int) *sync.Pool {
+	writeBufferMutex.Lock()
+	defer writeBufferMutex.Unlock()
+	pool, ok := writeBufferPoolMap[size]
+	if ok {
+		return pool
+	}
+	pool = &sync.Pool{
+		New: func() any {
+			b := make([]byte, size)
+			return &b
+		},
+	}
+	writeBufferPoolMap[size] = pool
+	return pool
+}
+
+// ParseDialTarget returns the network and address to pass to dialer.
+func ParseDialTarget(target string) (string, string) {
+	net := "tcp"
+	m1 := strings.Index(target, ":")
+	m2 := strings.Index(target, ":/")
+	// handle unix:addr which will fail with url.Parse
+	if m1 >= 0 && m2 < 0 {
+		if n := target[0:m1]; n == "unix" {
+			return n, target[m1+1:]
+		}
+	}
+	if m2 >= 0 {
+		t, err := url.Parse(target)
+		if err != nil {
+			return net, target
+		}
+		scheme := t.Scheme
+		addr := t.Path
+		if scheme == "unix" {
+			if addr == "" {
+				addr = t.Host
+			}
+			return scheme, addr
+		}
+	}
+	return net, target
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/log.go b/vendor/google.golang.org/grpc/internal/transport/log.go
deleted file mode 100644
index 879df80..0000000
--- a/vendor/google.golang.org/grpc/internal/transport/log.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// This file contains wrappers for grpclog functions.
-// The transport package only logs to verbose level 2 by default.
-
-package transport
-
-import "google.golang.org/grpc/grpclog"
-
-const logLevel = 2
-
-func infof(format string, args ...interface{}) {
-	if grpclog.V(logLevel) {
-		grpclog.Infof(format, args...)
-	}
-}
-
-func warningf(format string, args ...interface{}) {
-	if grpclog.V(logLevel) {
-		grpclog.Warningf(format, args...)
-	}
-}
-
-func errorf(format string, args ...interface{}) {
-	if grpclog.V(logLevel) {
-		grpclog.Errorf(format, args...)
-	}
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/logging.go b/vendor/google.golang.org/grpc/internal/transport/logging.go
new file mode 100644
index 0000000..42ed2b0
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/logging.go
@@ -0,0 +1,40 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"fmt"
+
+	"google.golang.org/grpc/grpclog"
+	internalgrpclog "google.golang.org/grpc/internal/grpclog"
+)
+
+var logger = grpclog.Component("transport")
+
+func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger {
+	return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p))
+}
+
+func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger {
+	return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p))
+}
+
+func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger {
+	return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p))
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
new file mode 100644
index 0000000..c11b527
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
@@ -0,0 +1,46 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package networktype declares the network type to be used in the default
+// dialer. Attribute of a resolver.Address.
+package networktype
+
+import (
+	"google.golang.org/grpc/resolver"
+)
+
+// keyType is the key to use for storing State in Attributes.
+type keyType string
+
+const key = keyType("grpc.internal.transport.networktype")
+
+// Set returns a copy of the provided address with attributes containing networkType.
+func Set(address resolver.Address, networkType string) resolver.Address {
+	address.Attributes = address.Attributes.WithValue(key, networkType)
+	return address
+}
+
+// Get returns the network type in the resolver.Address and true, or "", false
+// if not present.
+func Get(address resolver.Address) (string, bool) {
+	v := address.Attributes.Value(key)
+	if v == nil {
+		return "", false
+	}
+	return v.(string), true
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go
new file mode 100644
index 0000000..d773845
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go
@@ -0,0 +1,116 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"bufio"
+	"context"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"net/http/httputil"
+	"net/url"
+
+	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/internal/proxyattributes"
+	"google.golang.org/grpc/resolver"
+)
+
+const proxyAuthHeaderKey = "Proxy-Authorization"
+
+// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
+// It's possible that this reader reads more than what's need for the response
+// and stores those bytes in the buffer. bufConn wraps the original net.Conn
+// and the bufio.Reader to make sure we don't lose the bytes in the buffer.
+type bufConn struct {
+	net.Conn
+	r io.Reader
+}
+
+func (c *bufConn) Read(b []byte) (int, error) {
+	return c.r.Read(b)
+}
+
+func basicAuth(username, password string) string {
+	auth := username + ":" + password
+	return base64.StdEncoding.EncodeToString([]byte(auth))
+}
+
+func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, grpcUA string, opts proxyattributes.Options) (_ net.Conn, err error) {
+	defer func() {
+		if err != nil {
+			conn.Close()
+		}
+	}()
+
+	req := &http.Request{
+		Method: http.MethodConnect,
+		URL:    &url.URL{Host: opts.ConnectAddr},
+		Header: map[string][]string{"User-Agent": {grpcUA}},
+	}
+	if user := opts.User; user != nil {
+		u := user.Username()
+		p, _ := user.Password()
+		req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p))
+	}
+	if err := sendHTTPRequest(ctx, req, conn); err != nil {
+		return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
+	}
+
+	r := bufio.NewReader(conn)
+	resp, err := http.ReadResponse(r, req)
+	if err != nil {
+		return nil, fmt.Errorf("reading server HTTP response: %v", err)
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != http.StatusOK {
+		dump, err := httputil.DumpResponse(resp, true)
+		if err != nil {
+			return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status)
+		}
+		return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
+	}
+	// The buffer could contain extra bytes from the target server, so we can't
+	// discard it. However, in many cases where the server waits for the client
+	// to send the first message (e.g. when TLS is being used), the buffer will
+	// be empty, so we can avoid the overhead of reading through this buffer.
+	if r.Buffered() != 0 {
+		return &bufConn{Conn: conn, r: r}, nil
+	}
+	return conn, nil
+}
+
+// proxyDial establishes a TCP connection to the specified address and performs an HTTP CONNECT handshake.
+func proxyDial(ctx context.Context, addr resolver.Address, grpcUA string, opts proxyattributes.Options) (net.Conn, error) {
+	conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", addr.Addr)
+	if err != nil {
+		return nil, err
+	}
+	return doHTTPConnectHandshake(ctx, conn, grpcUA, opts)
+}
+
+func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
+	req = req.WithContext(ctx)
+	if err := req.Write(conn); err != nil {
+		return fmt.Errorf("failed to write the HTTP request: %v", err)
+	}
+	return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
new file mode 100644
index 0000000..cf8da0b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
@@ -0,0 +1,180 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+	"context"
+	"errors"
+	"strings"
+	"sync"
+	"sync/atomic"
+
+	"google.golang.org/grpc/mem"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+)
+
+// ServerStream implements streaming functionality for a gRPC server.
+type ServerStream struct {
+	*Stream // Embed for common stream functionality.
+
+	st      internalServerTransport
+	ctxDone <-chan struct{} // closed at the end of stream.  Cache of ctx.Done() (for performance)
+	// cancel is invoked at the end of stream to cancel ctx. It also stops the
+	// timer for monitoring the rpc deadline if configured.
+	cancel func()
+
+	// Holds compressor names passed in grpc-accept-encoding metadata from the
+	// client.
+	clientAdvertisedCompressors string
+	headerWireLength            int
+
+	// hdrMu protects outgoing header and trailer metadata.
+	hdrMu      sync.Mutex
+	header     metadata.MD // the outgoing header metadata.  Updated by WriteHeader.
+	headerSent atomic.Bool // atomically set when the headers are sent out.
+}
+
+// Read reads an n byte message from the input stream.
+func (s *ServerStream) Read(n int) (mem.BufferSlice, error) {
+	b, err := s.Stream.read(n)
+	if err == nil {
+		s.st.incrMsgRecv()
+	}
+	return b, err
+}
+
+// SendHeader sends the header metadata for the given stream.
+func (s *ServerStream) SendHeader(md metadata.MD) error {
+	return s.st.writeHeader(s, md)
+}
+
+// Write writes the hdr and data bytes to the output stream.
+func (s *ServerStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
+	return s.st.write(s, hdr, data, opts)
+}
+
+// WriteStatus sends the status of a stream to the client.  WriteStatus is
+// the final call made on a stream and always occurs.
+func (s *ServerStream) WriteStatus(st *status.Status) error {
+	return s.st.writeStatus(s, st)
+}
+
+// isHeaderSent indicates whether headers have been sent.
+func (s *ServerStream) isHeaderSent() bool {
+	return s.headerSent.Load()
+}
+
+// updateHeaderSent updates headerSent and returns true
+// if it was already set.
+func (s *ServerStream) updateHeaderSent() bool {
+	return s.headerSent.Swap(true)
+}
+
+// RecvCompress returns the compression algorithm applied to the inbound
+// message. It is empty string if there is no compression applied.
+func (s *ServerStream) RecvCompress() string {
+	return s.recvCompress
+}
+
+// SendCompress returns the send compressor name.
+func (s *ServerStream) SendCompress() string {
+	return s.sendCompress
+}
+
+// ContentSubtype returns the content-subtype for a request. For example, a
+// content-subtype of "proto" will result in a content-type of
+// "application/grpc+proto". This will always be lowercase.  See
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+func (s *ServerStream) ContentSubtype() string {
+	return s.contentSubtype
+}
+
+// SetSendCompress sets the compression algorithm to the stream.
+func (s *ServerStream) SetSendCompress(name string) error {
+	if s.isHeaderSent() || s.getState() == streamDone {
+		return errors.New("transport: set send compressor called after headers sent or stream done")
+	}
+
+	s.sendCompress = name
+	return nil
+}
+
+// SetContext sets the context of the stream. This will be deleted once the
+// stats handler callouts all move to gRPC layer.
+func (s *ServerStream) SetContext(ctx context.Context) {
+	s.ctx = ctx
+}
+
+// ClientAdvertisedCompressors returns the compressor names advertised by the
+// client via grpc-accept-encoding header.
+func (s *ServerStream) ClientAdvertisedCompressors() []string {
+	values := strings.Split(s.clientAdvertisedCompressors, ",")
+	for i, v := range values {
+		values[i] = strings.TrimSpace(v)
+	}
+	return values
+}
+
+// Header returns the header metadata of the stream.  It returns the out header
+// after t.WriteHeader is called.  It does not block and must not be called
+// until after WriteHeader.
+func (s *ServerStream) Header() (metadata.MD, error) {
+	// Return the header in stream. It will be the out
+	// header after t.WriteHeader is called.
+	return s.header.Copy(), nil
+}
+
+// HeaderWireLength returns the size of the headers of the stream as received
+// from the wire.
+func (s *ServerStream) HeaderWireLength() int {
+	return s.headerWireLength
+}
+
+// SetHeader sets the header metadata. This can be called multiple times.
+// This should not be called in parallel to other data writes.
+func (s *ServerStream) SetHeader(md metadata.MD) error {
+	if md.Len() == 0 {
+		return nil
+	}
+	if s.isHeaderSent() || s.getState() == streamDone {
+		return ErrIllegalHeaderWrite
+	}
+	s.hdrMu.Lock()
+	s.header = metadata.Join(s.header, md)
+	s.hdrMu.Unlock()
+	return nil
+}
+
+// SetTrailer sets the trailer metadata which will be sent with the RPC status
+// by the server. This can be called multiple times.
+// This should not be called parallel to other data writes.
+func (s *ServerStream) SetTrailer(md metadata.MD) error {
+	if md.Len() == 0 {
+		return nil
+	}
+	if s.getState() == streamDone {
+		return ErrIllegalHeaderWrite
+	}
+	s.hdrMu.Lock()
+	s.trailer = metadata.Join(s.trailer, md)
+	s.hdrMu.Unlock()
+	return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index bfab940..7dd53e8 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -22,7 +22,6 @@
 package transport
 
 import (
-	"bytes"
 	"context"
 	"errors"
 	"fmt"
@@ -30,42 +29,26 @@
 	"net"
 	"sync"
 	"sync/atomic"
+	"time"
 
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/internal/channelz"
 	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/mem"
 	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/peer"
 	"google.golang.org/grpc/stats"
 	"google.golang.org/grpc/status"
 	"google.golang.org/grpc/tap"
 )
 
-type bufferPool struct {
-	pool sync.Pool
-}
-
-func newBufferPool() *bufferPool {
-	return &bufferPool{
-		pool: sync.Pool{
-			New: func() interface{} {
-				return new(bytes.Buffer)
-			},
-		},
-	}
-}
-
-func (p *bufferPool) get() *bytes.Buffer {
-	return p.pool.Get().(*bytes.Buffer)
-}
-
-func (p *bufferPool) put(b *bytes.Buffer) {
-	p.pool.Put(b)
-}
+const logLevel = 2
 
 // recvMsg represents the received msg from the transport. All transport
 // protocol specific info has been removed.
 type recvMsg struct {
-	buffer *bytes.Buffer
+	buffer mem.Buffer
 	// nil: received some data
 	// io.EOF: stream is completed. data is nil.
 	// other non-nil error: transport failure. data is nil.
@@ -73,10 +56,11 @@
 }
 
 // recvBuffer is an unbounded channel of recvMsg structs.
-// Note recvBuffer differs from controlBuffer only in that recvBuffer
-// holds a channel of only recvMsg structs instead of objects implementing "item" interface.
-// recvBuffer is written to much more often than
-// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put"
+//
+// Note: recvBuffer differs from buffer.Unbounded only in the fact that it
+// holds a channel of recvMsg structs instead of objects implementing "item"
+// interface. recvBuffer is written to much more often and using strict recvMsg
+// structs helps avoid allocation in "recvBuffer.put"
 type recvBuffer struct {
 	c       chan recvMsg
 	mu      sync.Mutex
@@ -94,6 +78,9 @@
 func (b *recvBuffer) put(r recvMsg) {
 	b.mu.Lock()
 	if b.err != nil {
+		// drop the buffer on the floor. Since b.err is not nil, any subsequent reads
+		// will always return an error, making this buffer inaccessible.
+		r.buffer.Free()
 		b.mu.Unlock()
 		// An error had occurred earlier, don't accept more
 		// data or errors.
@@ -140,45 +127,70 @@
 	ctx         context.Context
 	ctxDone     <-chan struct{} // cache of ctx.Done() (for performance).
 	recv        *recvBuffer
-	last        *bytes.Buffer // Stores the remaining data in the previous calls.
+	last        mem.Buffer // Stores the remaining data in the previous calls.
 	err         error
-	freeBuffer  func(*bytes.Buffer)
 }
 
-// Read reads the next len(p) bytes from last. If last is drained, it tries to
-// read additional data from recv. It blocks if there no additional data available
-// in recv. If Read returns any non-nil error, it will continue to return that error.
-func (r *recvBufferReader) Read(p []byte) (n int, err error) {
+func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) {
 	if r.err != nil {
 		return 0, r.err
 	}
 	if r.last != nil {
-		// Read remaining data left in last call.
-		copied, _ := r.last.Read(p)
-		if r.last.Len() == 0 {
-			r.freeBuffer(r.last)
-			r.last = nil
-		}
-		return copied, nil
+		n, r.last = mem.ReadUnsafe(header, r.last)
+		return n, nil
 	}
 	if r.closeStream != nil {
-		n, r.err = r.readClient(p)
+		n, r.err = r.readMessageHeaderClient(header)
 	} else {
-		n, r.err = r.read(p)
+		n, r.err = r.readMessageHeader(header)
 	}
 	return n, r.err
 }
 
-func (r *recvBufferReader) read(p []byte) (n int, err error) {
+// Read reads the next n bytes from last. If last is drained, it tries to read
+// additional data from recv. It blocks if there no additional data available in
+// recv. If Read returns any non-nil error, it will continue to return that
+// error.
+func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.last != nil {
+		buf = r.last
+		if r.last.Len() > n {
+			buf, r.last = mem.SplitUnsafe(buf, n)
+		} else {
+			r.last = nil
+		}
+		return buf, nil
+	}
+	if r.closeStream != nil {
+		buf, r.err = r.readClient(n)
+	} else {
+		buf, r.err = r.read(n)
+	}
+	return buf, r.err
+}
+
+func (r *recvBufferReader) readMessageHeader(header []byte) (n int, err error) {
 	select {
 	case <-r.ctxDone:
 		return 0, ContextErr(r.ctx.Err())
 	case m := <-r.recv.get():
-		return r.readAdditional(m, p)
+		return r.readMessageHeaderAdditional(m, header)
 	}
 }
 
-func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
+func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) {
+	select {
+	case <-r.ctxDone:
+		return nil, ContextErr(r.ctx.Err())
+	case m := <-r.recv.get():
+		return r.readAdditional(m, n)
+	}
+}
+
+func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err error) {
 	// If the context is canceled, then closes the stream with nil metadata.
 	// closeStream writes its error parameter to r.recv as a recvMsg.
 	// r.readAdditional acts on that message and returns the necessary error.
@@ -199,25 +211,67 @@
 		// faster.
 		r.closeStream(ContextErr(r.ctx.Err()))
 		m := <-r.recv.get()
-		return r.readAdditional(m, p)
+		return r.readMessageHeaderAdditional(m, header)
 	case m := <-r.recv.get():
-		return r.readAdditional(m, p)
+		return r.readMessageHeaderAdditional(m, header)
 	}
 }
 
-func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) {
+func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) {
+	// If the context is canceled, then closes the stream with nil metadata.
+	// closeStream writes its error parameter to r.recv as a recvMsg.
+	// r.readAdditional acts on that message and returns the necessary error.
+	select {
+	case <-r.ctxDone:
+		// Note that this adds the ctx error to the end of recv buffer, and
+		// reads from the head. This will delay the error until recv buffer is
+		// empty, thus will delay ctx cancellation in Recv().
+		//
+		// It's done this way to fix a race between ctx cancel and trailer. The
+		// race was, stream.Recv() may return ctx error if ctxDone wins the
+		// race, but stream.Trailer() may return a non-nil md because the stream
+		// was not marked as done when trailer is received. This closeStream
+		// call will mark stream as done, thus fix the race.
+		//
+		// TODO: delaying ctx error seems like a unnecessary side effect. What
+		// we really want is to mark the stream as done, and return ctx error
+		// faster.
+		r.closeStream(ContextErr(r.ctx.Err()))
+		m := <-r.recv.get()
+		return r.readAdditional(m, n)
+	case m := <-r.recv.get():
+		return r.readAdditional(m, n)
+	}
+}
+
+func (r *recvBufferReader) readMessageHeaderAdditional(m recvMsg, header []byte) (n int, err error) {
 	r.recv.load()
 	if m.err != nil {
+		if m.buffer != nil {
+			m.buffer.Free()
+		}
 		return 0, m.err
 	}
-	copied, _ := m.buffer.Read(p)
-	if m.buffer.Len() == 0 {
-		r.freeBuffer(m.buffer)
-		r.last = nil
-	} else {
-		r.last = m.buffer
+
+	n, r.last = mem.ReadUnsafe(header, m.buffer)
+
+	return n, nil
+}
+
+func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) {
+	r.recv.load()
+	if m.err != nil {
+		if m.buffer != nil {
+			m.buffer.Free()
+		}
+		return nil, m.err
 	}
-	return copied, nil
+
+	if m.buffer.Len() > n {
+		m.buffer, r.last = mem.SplitUnsafe(m.buffer, n)
+	}
+
+	return m.buffer, nil
 }
 
 type streamState uint32
@@ -232,17 +286,12 @@
 // Stream represents an RPC in the transport layer.
 type Stream struct {
 	id           uint32
-	st           ServerTransport    // nil for client side Stream
-	ct           *http2Client       // nil for server side Stream
-	ctx          context.Context    // the associated context of the stream
-	cancel       context.CancelFunc // always nil for client side Stream
-	done         chan struct{}      // closed at the end of stream to unblock writers. On the client side.
-	ctxDone      <-chan struct{}    // same as done chan but for server side. Cache of ctx.Done() (for performance)
-	method       string             // the associated RPC method of the stream
+	ctx          context.Context // the associated context of the stream
+	method       string          // the associated RPC method of the stream
 	recvCompress string
 	sendCompress string
 	buf          *recvBuffer
-	trReader     io.Reader
+	trReader     *transportReader
 	fc           *inFlow
 	wq           *writeQuota
 
@@ -250,50 +299,13 @@
 	// is used to adjust flow control, if needed.
 	requestRead func(int)
 
-	headerChan       chan struct{} // closed to indicate the end of header metadata.
-	headerChanClosed uint32        // set when headerChan is closed. Used to avoid closing headerChan multiple times.
-	// headerValid indicates whether a valid header was received.  Only
-	// meaningful after headerChan is closed (always call waitOnHeader() before
-	// reading its value).  Not valid on server side.
-	headerValid bool
-
-	// hdrMu protects header and trailer metadata on the server-side.
-	hdrMu sync.Mutex
-	// On client side, header keeps the received header metadata.
-	//
-	// On server side, header keeps the header set by SetHeader(). The complete
-	// header will merged into this after t.WriteHeader() is called.
-	header  metadata.MD
-	trailer metadata.MD // the key-value map of trailer metadata.
-
-	noHeaders bool // set if the client never received headers (set only after the stream is done).
-
-	// On the server-side, headerSent is atomically set to 1 when the headers are sent out.
-	headerSent uint32
-
 	state streamState
 
-	// On client-side it is the status error received from the server.
-	// On server-side it is unused.
-	status *status.Status
-
-	bytesReceived uint32 // indicates whether any bytes have been received on this stream
-	unprocessed   uint32 // set if the server sends a refused stream or GOAWAY including this stream
-
 	// contentSubtype is the content-subtype for requests.
 	// this must be lowercase or the behavior is undefined.
 	contentSubtype string
-}
 
-// isHeaderSent is only valid on the server-side.
-func (s *Stream) isHeaderSent() bool {
-	return atomic.LoadUint32(&s.headerSent) == 1
-}
-
-// updateHeaderSent updates headerSent and returns true
-// if it was alreay set. It is valid only on server-side.
-func (s *Stream) updateHeaderSent() bool {
-	return atomic.SwapUint32(&s.headerSent, 1) == 1
+	trailer metadata.MD // the key-value map of trailer metadata.
 }
 
 func (s *Stream) swapState(st streamState) streamState {
@@ -308,88 +320,12 @@
 	return streamState(atomic.LoadUint32((*uint32)(&s.state)))
 }
 
-func (s *Stream) waitOnHeader() {
-	if s.headerChan == nil {
-		// On the server headerChan is always nil since a stream originates
-		// only after having received headers.
-		return
-	}
-	select {
-	case <-s.ctx.Done():
-		// Close the stream to prevent headers/trailers from changing after
-		// this function returns.
-		s.ct.CloseStream(s, ContextErr(s.ctx.Err()))
-		// headerChan could possibly not be closed yet if closeStream raced
-		// with operateHeaders; wait until it is closed explicitly here.
-		<-s.headerChan
-	case <-s.headerChan:
-	}
-}
-
-// RecvCompress returns the compression algorithm applied to the inbound
-// message. It is empty string if there is no compression applied.
-func (s *Stream) RecvCompress() string {
-	s.waitOnHeader()
-	return s.recvCompress
-}
-
-// SetSendCompress sets the compression algorithm to the stream.
-func (s *Stream) SetSendCompress(str string) {
-	s.sendCompress = str
-}
-
-// Done returns a channel which is closed when it receives the final status
-// from the server.
-func (s *Stream) Done() <-chan struct{} {
-	return s.done
-}
-
-// Header returns the header metadata of the stream.
-//
-// On client side, it acquires the key-value pairs of header metadata once it is
-// available. It blocks until i) the metadata is ready or ii) there is no header
-// metadata or iii) the stream is canceled/expired.
-//
-// On server side, it returns the out header after t.WriteHeader is called.  It
-// does not block and must not be called until after WriteHeader.
-func (s *Stream) Header() (metadata.MD, error) {
-	if s.headerChan == nil {
-		// On server side, return the header in stream. It will be the out
-		// header after t.WriteHeader is called.
-		return s.header.Copy(), nil
-	}
-	s.waitOnHeader()
-	if !s.headerValid {
-		return nil, s.status.Err()
-	}
-	return s.header.Copy(), nil
-}
-
-// TrailersOnly blocks until a header or trailers-only frame is received and
-// then returns true if the stream was trailers-only.  If the stream ends
-// before headers are received, returns true, nil.  Client-side only.
-func (s *Stream) TrailersOnly() bool {
-	s.waitOnHeader()
-	return s.noHeaders
-}
-
-// Trailer returns the cached trailer metedata. Note that if it is not called
-// after the entire stream is done, it could return an empty MD. Client
-// side only.
+// Trailer returns the cached trailer metadata. Note that if it is not called
+// after the entire stream is done, it could return an empty MD.
 // It can be safely read only after stream has ended that is either read
 // or write have returned io.EOF.
 func (s *Stream) Trailer() metadata.MD {
-	c := s.trailer.Copy()
-	return c
-}
-
-// ContentSubtype returns the content-subtype for a request. For example, a
-// content-subtype of "proto" will result in a content-type of
-// "application/grpc+proto". This will always be lowercase.  See
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-func (s *Stream) ContentSubtype() string {
-	return s.contentSubtype
+	return s.trailer.Copy()
 }
 
 // Context returns the context of the stream.
@@ -402,97 +338,99 @@
 	return s.method
 }
 
-// Status returns the status received from the server.
-// Status can be read safely only after the stream has ended,
-// that is, after Done() is closed.
-func (s *Stream) Status() *status.Status {
-	return s.status
-}
-
-// SetHeader sets the header metadata. This can be called multiple times.
-// Server side only.
-// This should not be called in parallel to other data writes.
-func (s *Stream) SetHeader(md metadata.MD) error {
-	if md.Len() == 0 {
-		return nil
-	}
-	if s.isHeaderSent() || s.getState() == streamDone {
-		return ErrIllegalHeaderWrite
-	}
-	s.hdrMu.Lock()
-	s.header = metadata.Join(s.header, md)
-	s.hdrMu.Unlock()
-	return nil
-}
-
-// SendHeader sends the given header metadata. The given metadata is
-// combined with any metadata set by previous calls to SetHeader and
-// then written to the transport stream.
-func (s *Stream) SendHeader(md metadata.MD) error {
-	return s.st.WriteHeader(s, md)
-}
-
-// SetTrailer sets the trailer metadata which will be sent with the RPC status
-// by the server. This can be called multiple times. Server side only.
-// This should not be called parallel to other data writes.
-func (s *Stream) SetTrailer(md metadata.MD) error {
-	if md.Len() == 0 {
-		return nil
-	}
-	if s.getState() == streamDone {
-		return ErrIllegalHeaderWrite
-	}
-	s.hdrMu.Lock()
-	s.trailer = metadata.Join(s.trailer, md)
-	s.hdrMu.Unlock()
-	return nil
-}
-
 func (s *Stream) write(m recvMsg) {
 	s.buf.put(m)
 }
 
-// Read reads all p bytes from the wire for this stream.
-func (s *Stream) Read(p []byte) (n int, err error) {
+// ReadMessageHeader reads data into the provided header slice from the stream.
+// It first checks if there was an error during a previous read operation and
+// returns it if present. It then requests a read operation for the length of
+// the header. It continues to read from the stream until the entire header
+// slice is filled or an error occurs. If an `io.EOF` error is encountered with
+// partially read data, it is converted to `io.ErrUnexpectedEOF` to indicate an
+// unexpected end of the stream. The method returns any error encountered during
+// the read process or nil if the header was successfully read.
+func (s *Stream) ReadMessageHeader(header []byte) (err error) {
 	// Don't request a read if there was an error earlier
-	if er := s.trReader.(*transportReader).er; er != nil {
-		return 0, er
+	if er := s.trReader.er; er != nil {
+		return er
 	}
-	s.requestRead(len(p))
-	return io.ReadFull(s.trReader, p)
+	s.requestRead(len(header))
+	for len(header) != 0 {
+		n, err := s.trReader.ReadMessageHeader(header)
+		header = header[n:]
+		if len(header) == 0 {
+			err = nil
+		}
+		if err != nil {
+			if n > 0 && err == io.EOF {
+				err = io.ErrUnexpectedEOF
+			}
+			return err
+		}
+	}
+	return nil
 }
 
-// tranportReader reads all the data available for this Stream from the transport and
+// Read reads n bytes from the wire for this stream.
+func (s *Stream) read(n int) (data mem.BufferSlice, err error) {
+	// Don't request a read if there was an error earlier
+	if er := s.trReader.er; er != nil {
+		return nil, er
+	}
+	s.requestRead(n)
+	for n != 0 {
+		buf, err := s.trReader.Read(n)
+		var bufLen int
+		if buf != nil {
+			bufLen = buf.Len()
+		}
+		n -= bufLen
+		if n == 0 {
+			err = nil
+		}
+		if err != nil {
+			if bufLen > 0 && err == io.EOF {
+				err = io.ErrUnexpectedEOF
+			}
+			data.Free()
+			return nil, err
+		}
+		data = append(data, buf)
+	}
+	return data, nil
+}
+
+// transportReader reads all the data available for this Stream from the transport and
 // passes them into the decoder, which converts them into a gRPC message stream.
 // The error is io.EOF when the stream is done or another non-nil error if
 // the stream broke.
 type transportReader struct {
-	reader io.Reader
+	reader *recvBufferReader
 	// The handler to control the window update procedure for both this
 	// particular stream and the associated transport.
 	windowHandler func(int)
 	er            error
 }
 
-func (t *transportReader) Read(p []byte) (n int, err error) {
-	n, err = t.reader.Read(p)
+func (t *transportReader) ReadMessageHeader(header []byte) (int, error) {
+	n, err := t.reader.ReadMessageHeader(header)
 	if err != nil {
 		t.er = err
-		return
+		return 0, err
 	}
 	t.windowHandler(n)
-	return
+	return n, nil
 }
 
-// BytesReceived indicates whether any bytes have been received on this stream.
-func (s *Stream) BytesReceived() bool {
-	return atomic.LoadUint32(&s.bytesReceived) == 1
-}
-
-// Unprocessed indicates whether the server did not process this stream --
-// i.e. it sent a refused stream or GOAWAY including this stream ID.
-func (s *Stream) Unprocessed() bool {
-	return atomic.LoadUint32(&s.unprocessed) == 1
+func (t *transportReader) Read(n int) (mem.Buffer, error) {
+	buf, err := t.reader.Read(n)
+	if err != nil {
+		t.er = err
+		return buf, err
+	}
+	t.windowHandler(buf.Len())
+	return buf, nil
 }
 
 // GoString is implemented by Stream so context.String() won't
@@ -513,24 +451,22 @@
 // ServerConfig consists of all the configurations to establish a server transport.
 type ServerConfig struct {
 	MaxStreams            uint32
-	AuthInfo              credentials.AuthInfo
+	ConnectionTimeout     time.Duration
+	Credentials           credentials.TransportCredentials
 	InTapHandle           tap.ServerInHandle
-	StatsHandler          stats.Handler
+	StatsHandlers         []stats.Handler
 	KeepaliveParams       keepalive.ServerParameters
 	KeepalivePolicy       keepalive.EnforcementPolicy
 	InitialWindowSize     int32
 	InitialConnWindowSize int32
 	WriteBufferSize       int
 	ReadBufferSize        int
-	ChannelzParentID      int64
+	SharedWriteBuffer     bool
+	ChannelzParent        *channelz.Server
 	MaxHeaderListSize     *uint32
 	HeaderTableSize       *uint32
-}
-
-// NewServerTransport creates a ServerTransport with conn or non-nil error
-// if it fails.
-func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) {
-	return newHTTP2Server(conn, config)
+	BufferPool            mem.BufferPool
+	StaticWindowSize      bool
 }
 
 // ConnectOptions covers all relevant options for communicating with the server.
@@ -551,8 +487,8 @@
 	CredsBundle credentials.Bundle
 	// KeepaliveParams stores the keepalive parameters.
 	KeepaliveParams keepalive.ClientParameters
-	// StatsHandler stores the handler for stats.
-	StatsHandler stats.Handler
+	// StatsHandlers stores the handler for stats.
+	StatsHandlers []stats.Handler
 	// InitialWindowSize sets the initial window size for a stream.
 	InitialWindowSize int32
 	// InitialConnWindowSize sets the initial window size for a connection.
@@ -561,28 +497,21 @@
 	WriteBufferSize int
 	// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
 	ReadBufferSize int
-	// ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
-	ChannelzParentID int64
+	// SharedWriteBuffer indicates whether connections should reuse write buffer
+	SharedWriteBuffer bool
+	// ChannelzParent sets the addrConn id which initiated the creation of this client transport.
+	ChannelzParent *channelz.SubChannel
 	// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
 	MaxHeaderListSize *uint32
+	// The mem.BufferPool to use when reading/writing to the wire.
+	BufferPool mem.BufferPool
+	// StaticWindowSize controls whether dynamic window sizing is enabled.
+	StaticWindowSize bool
 }
 
-// TargetInfo contains the information of the target such as network address and metadata.
-type TargetInfo struct {
-	Addr      string
-	Metadata  interface{}
-	Authority string
-}
-
-// NewClientTransport establishes the transport with the required ConnectOptions
-// and returns it to the caller.
-func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
-	return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose)
-}
-
-// Options provides additional hints and information for message
+// WriteOptions provides additional hints and information for message
 // transmission.
-type Options struct {
+type WriteOptions struct {
 	// Last indicates whether this write is the last piece for
 	// this stream.
 	Last bool
@@ -612,6 +541,13 @@
 	ContentSubtype string
 
 	PreviousAttempts int // value of grpc-previous-rpc-attempts header to set
+
+	DoneFunc func() // called when the stream is finished
+
+	// Authority is used to explicitly override the `:authority` header. If set,
+	// this value takes precedence over the Host field and will be used as the
+	// value for the `:authority` header.
+	Authority string
 }
 
 // ClientTransport is the common interface for all gRPC client-side transport
@@ -620,7 +556,7 @@
 	// Close tears down this transport. Once it returns, the transport
 	// should not be accessed any more. The caller must make sure this
 	// is called only once.
-	Close() error
+	Close(err error)
 
 	// GracefulClose starts to tear down the transport: the transport will stop
 	// accepting new RPCs and NewStream will return error. Once all streams are
@@ -629,18 +565,8 @@
 	// It does not block.
 	GracefulClose()
 
-	// Write sends the data for the given stream. A nil stream indicates
-	// the write is to be performed on the transport as a whole.
-	Write(s *Stream, hdr []byte, data []byte, opts *Options) error
-
 	// NewStream creates a Stream for an RPC.
-	NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
-
-	// CloseStream clears the footprint of a stream when the stream is
-	// not needed any more. The err indicates the error incurred when
-	// CloseStream is called. Must be called when a stream is finished
-	// unless the associated transport is closing.
-	CloseStream(stream *Stream, err error)
+	NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error)
 
 	// Error returns a channel that is closed when some I/O error
 	// happens. Typically the caller should have a goroutine to monitor
@@ -654,17 +580,12 @@
 	// HTTP/2).
 	GoAway() <-chan struct{}
 
-	// GetGoAwayReason returns the reason why GoAway frame was received.
-	GetGoAwayReason() GoAwayReason
+	// GetGoAwayReason returns the reason why GoAway frame was received, along
+	// with a human readable string with debug info.
+	GetGoAwayReason() (GoAwayReason, string)
 
 	// RemoteAddr returns the remote network address.
 	RemoteAddr() net.Addr
-
-	// IncrMsgSent increments the number of message sent through this transport.
-	IncrMsgSent()
-
-	// IncrMsgRecv increments the number of message received through this transport.
-	IncrMsgRecv()
 }
 
 // ServerTransport is the common interface for all gRPC server-side transport
@@ -674,40 +595,30 @@
 // Write methods for a given Stream will be called serially.
 type ServerTransport interface {
 	// HandleStreams receives incoming streams using the given handler.
-	HandleStreams(func(*Stream), func(context.Context, string) context.Context)
-
-	// WriteHeader sends the header metadata for the given stream.
-	// WriteHeader may not be called on all streams.
-	WriteHeader(s *Stream, md metadata.MD) error
-
-	// Write sends the data for the given stream.
-	// Write may not be called on all streams.
-	Write(s *Stream, hdr []byte, data []byte, opts *Options) error
-
-	// WriteStatus sends the status of a stream to the client.  WriteStatus is
-	// the final call made on a stream and always occurs.
-	WriteStatus(s *Stream, st *status.Status) error
+	HandleStreams(context.Context, func(*ServerStream))
 
 	// Close tears down the transport. Once it is called, the transport
 	// should not be accessed any more. All the pending streams and their
 	// handlers will be terminated asynchronously.
-	Close() error
+	Close(err error)
 
-	// RemoteAddr returns the remote network address.
-	RemoteAddr() net.Addr
+	// Peer returns the peer of the server transport.
+	Peer() *peer.Peer
 
 	// Drain notifies the client this ServerTransport stops accepting new RPCs.
-	Drain()
+	Drain(debugData string)
+}
 
-	// IncrMsgSent increments the number of message sent through this transport.
-	IncrMsgSent()
-
-	// IncrMsgRecv increments the number of message received through this transport.
-	IncrMsgRecv()
+type internalServerTransport interface {
+	ServerTransport
+	writeHeader(s *ServerStream, md metadata.MD) error
+	write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error
+	writeStatus(s *ServerStream, st *status.Status) error
+	incrMsgRecv()
 }
 
 // connectionErrorf creates an ConnectionError with the specified error description.
-func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError {
+func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError {
 	return ConnectionError{
 		Desc: fmt.Sprintf(format, a...),
 		temp: temp,
@@ -742,6 +653,12 @@
 	return e.err
 }
 
+// Unwrap returns the original error of this connection error or nil when the
+// origin is nil.
+func (e ConnectionError) Unwrap() error {
+	return e.err
+}
+
 var (
 	// ErrConnClosing indicates that the transport is closing.
 	ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
@@ -749,7 +666,7 @@
 	// connection is draining. This could be caused by goaway or balancer
 	// removing the address.
 	errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
-	// errStreamDone is returned from write at the client side to indiacte application
+	// errStreamDone is returned from write at the client side to indicate application
 	// layer of an error.
 	errStreamDone = errors.New("the stream is done")
 	// StatusGoAway indicates that the server sent a GOAWAY that included this
@@ -771,30 +688,6 @@
 	GoAwayTooManyPings GoAwayReason = 2
 )
 
-// channelzData is used to store channelz related data for http2Client and http2Server.
-// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic
-// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
-// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
-type channelzData struct {
-	kpCount int64
-	// The number of streams that have started, including already finished ones.
-	streamsStarted int64
-	// Client side: The number of streams that have ended successfully by receiving
-	// EoS bit set frame from server.
-	// Server side: The number of streams that have ended successfully by sending
-	// frame with EoS bit set.
-	streamsSucceeded int64
-	streamsFailed    int64
-	// lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type
-	// instead of time.Time since it's more costly to atomically update time.Time variable than int64
-	// variable. The same goes for lastMsgSentTime and lastMsgRecvTime.
-	lastStreamCreatedTime int64
-	msgSent               int64
-	msgRecv               int64
-	lastMsgSentTime       int64
-	lastMsgRecvTime       int64
-}
-
 // ContextErr converts the error from context package into a status error.
 func ContextErr(err error) error {
 	switch err {
diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go
index 34d31b5..eb42b19 100644
--- a/vendor/google.golang.org/grpc/keepalive/keepalive.go
+++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go
@@ -34,15 +34,29 @@
 	// After a duration of this time if the client doesn't see any activity it
 	// pings the server to see if the transport is still alive.
 	// If set below 10s, a minimum value of 10s will be used instead.
-	Time time.Duration // The current default value is infinity.
+	//
+	// Note that gRPC servers have a default EnforcementPolicy.MinTime of 5
+	// minutes (which means the client shouldn't ping more frequently than every
+	// 5 minutes).
+	//
+	// Though not ideal, it's not a strong requirement for Time to be less than
+	// EnforcementPolicy.MinTime.  Time will automatically double if the server
+	// disconnects due to its enforcement policy.
+	//
+	// For more details, see
+	// https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md
+	Time time.Duration
 	// After having pinged for keepalive check, the client waits for a duration
 	// of Timeout and if no activity is seen even after that the connection is
 	// closed.
-	Timeout time.Duration // The current default value is 20 seconds.
+	//
+	// If keepalive is enabled, and this value is not explicitly set, the default
+	// is 20 seconds.
+	Timeout time.Duration
 	// If true, client sends keepalive pings even with no active RPCs. If false,
 	// when there are no active RPCs, Time and Timeout will be ignored and no
 	// keepalive pings will be sent.
-	PermitWithoutStream bool // false by default.
+	PermitWithoutStream bool
 }
 
 // ServerParameters is used to set keepalive and max-age parameters on the
diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go
new file mode 100644
index 0000000..c37c58c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go
@@ -0,0 +1,194 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package mem
+
+import (
+	"sort"
+	"sync"
+
+	"google.golang.org/grpc/internal"
+)
+
+// BufferPool is a pool of buffers that can be shared and reused, resulting in
+// decreased memory allocation.
+type BufferPool interface {
+	// Get returns a buffer with specified length from the pool.
+	Get(length int) *[]byte
+
+	// Put returns a buffer to the pool.
+	Put(*[]byte)
+}
+
+var defaultBufferPoolSizes = []int{
+	256,
+	4 << 10,  // 4KB (go page size)
+	16 << 10, // 16KB (max HTTP/2 frame size used by gRPC)
+	32 << 10, // 32KB (default buffer size for io.Copy)
+	1 << 20,  // 1MB
+}
+
+var defaultBufferPool BufferPool
+
+func init() {
+	defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...)
+
+	internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) {
+		defaultBufferPool = pool
+	}
+
+	internal.SetBufferPoolingThresholdForTesting = func(threshold int) {
+		bufferPoolingThreshold = threshold
+	}
+}
+
+// DefaultBufferPool returns the current default buffer pool. It is a BufferPool
+// created with NewBufferPool that uses a set of default sizes optimized for
+// expected workflows.
+func DefaultBufferPool() BufferPool {
+	return defaultBufferPool
+}
+
+// NewTieredBufferPool returns a BufferPool implementation that uses multiple
+// underlying pools of the given pool sizes.
+func NewTieredBufferPool(poolSizes ...int) BufferPool {
+	sort.Ints(poolSizes)
+	pools := make([]*sizedBufferPool, len(poolSizes))
+	for i, s := range poolSizes {
+		pools[i] = newSizedBufferPool(s)
+	}
+	return &tieredBufferPool{
+		sizedPools: pools,
+	}
+}
+
+// tieredBufferPool implements the BufferPool interface with multiple tiers of
+// buffer pools for different sizes of buffers.
+type tieredBufferPool struct {
+	sizedPools   []*sizedBufferPool
+	fallbackPool simpleBufferPool
+}
+
+func (p *tieredBufferPool) Get(size int) *[]byte {
+	return p.getPool(size).Get(size)
+}
+
+func (p *tieredBufferPool) Put(buf *[]byte) {
+	p.getPool(cap(*buf)).Put(buf)
+}
+
+func (p *tieredBufferPool) getPool(size int) BufferPool {
+	poolIdx := sort.Search(len(p.sizedPools), func(i int) bool {
+		return p.sizedPools[i].defaultSize >= size
+	})
+
+	if poolIdx == len(p.sizedPools) {
+		return &p.fallbackPool
+	}
+
+	return p.sizedPools[poolIdx]
+}
+
+// sizedBufferPool is a BufferPool implementation that is optimized for specific
+// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size
+// of 16kb and a sizedBufferPool can be configured to only return buffers with a
+// capacity of 16kb. Note that however it does not support returning larger
+// buffers and in fact panics if such a buffer is requested. Because of this,
+// this BufferPool implementation is not meant to be used on its own and rather
+// is intended to be embedded in a tieredBufferPool such that Get is only
+// invoked when the required size is smaller than or equal to defaultSize.
+type sizedBufferPool struct {
+	pool        sync.Pool
+	defaultSize int
+}
+
+func (p *sizedBufferPool) Get(size int) *[]byte {
+	buf := p.pool.Get().(*[]byte)
+	b := *buf
+	clear(b[:cap(b)])
+	*buf = b[:size]
+	return buf
+}
+
+func (p *sizedBufferPool) Put(buf *[]byte) {
+	if cap(*buf) < p.defaultSize {
+		// Ignore buffers that are too small to fit in the pool. Otherwise, when
+		// Get is called it will panic as it tries to index outside the bounds
+		// of the buffer.
+		return
+	}
+	p.pool.Put(buf)
+}
+
+func newSizedBufferPool(size int) *sizedBufferPool {
+	return &sizedBufferPool{
+		pool: sync.Pool{
+			New: func() any {
+				buf := make([]byte, size)
+				return &buf
+			},
+		},
+		defaultSize: size,
+	}
+}
+
+var _ BufferPool = (*simpleBufferPool)(nil)
+
+// simpleBufferPool is an implementation of the BufferPool interface that
+// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to
+// acquire a buffer from the pool but if that buffer is too small, it returns it
+// to the pool and creates a new one.
+type simpleBufferPool struct {
+	pool sync.Pool
+}
+
+func (p *simpleBufferPool) Get(size int) *[]byte {
+	bs, ok := p.pool.Get().(*[]byte)
+	if ok && cap(*bs) >= size {
+		*bs = (*bs)[:size]
+		return bs
+	}
+
+	// A buffer was pulled from the pool, but it is too small. Put it back in
+	// the pool and create one large enough.
+	if ok {
+		p.pool.Put(bs)
+	}
+
+	b := make([]byte, size)
+	return &b
+}
+
+func (p *simpleBufferPool) Put(buf *[]byte) {
+	p.pool.Put(buf)
+}
+
+var _ BufferPool = NopBufferPool{}
+
+// NopBufferPool is a buffer pool that returns new buffers without pooling.
+type NopBufferPool struct{}
+
+// Get returns a buffer with specified length from the pool.
+func (NopBufferPool) Get(length int) *[]byte {
+	b := make([]byte, length)
+	return &b
+}
+
+// Put returns a buffer to the pool.
+func (NopBufferPool) Put(*[]byte) {
+}
diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go
new file mode 100644
index 0000000..af510d2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go
@@ -0,0 +1,292 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package mem
+
+import (
+	"io"
+)
+
+const (
+	// 32 KiB is what io.Copy uses.
+	readAllBufSize = 32 * 1024
+)
+
+// BufferSlice offers a means to represent data that spans one or more Buffer
+// instances. A BufferSlice is meant to be immutable after creation, and methods
+// like Ref create and return copies of the slice. This is why all methods have
+// value receivers rather than pointer receivers.
+//
+// Note that any of the methods that read the underlying buffers such as Ref,
+// Len or CopyTo etc., will panic if any underlying buffers have already been
+// freed. It is recommended to not directly interact with any of the underlying
+// buffers directly, rather such interactions should be mediated through the
+// various methods on this type.
+//
+// By convention, any APIs that return (mem.BufferSlice, error) should reduce
+// the burden on the caller by never returning a mem.BufferSlice that needs to
+// be freed if the error is non-nil, unless explicitly stated.
+type BufferSlice []Buffer
+
+// Len returns the sum of the length of all the Buffers in this slice.
+//
+// # Warning
+//
+// Invoking the built-in len on a BufferSlice will return the number of buffers
+// in the slice, and *not* the value returned by this function.
+func (s BufferSlice) Len() int {
+	var length int
+	for _, b := range s {
+		length += b.Len()
+	}
+	return length
+}
+
+// Ref invokes Ref on each buffer in the slice.
+func (s BufferSlice) Ref() {
+	for _, b := range s {
+		b.Ref()
+	}
+}
+
+// Free invokes Buffer.Free() on each Buffer in the slice.
+func (s BufferSlice) Free() {
+	for _, b := range s {
+		b.Free()
+	}
+}
+
+// CopyTo copies each of the underlying Buffer's data into the given buffer,
+// returning the number of bytes copied. Has the same semantics as the copy
+// builtin in that it will copy as many bytes as it can, stopping when either dst
+// is full or s runs out of data, returning the minimum of s.Len() and len(dst).
+func (s BufferSlice) CopyTo(dst []byte) int {
+	off := 0
+	for _, b := range s {
+		off += copy(dst[off:], b.ReadOnlyData())
+	}
+	return off
+}
+
+// Materialize concatenates all the underlying Buffer's data into a single
+// contiguous buffer using CopyTo.
+func (s BufferSlice) Materialize() []byte {
+	l := s.Len()
+	if l == 0 {
+		return nil
+	}
+	out := make([]byte, l)
+	s.CopyTo(out)
+	return out
+}
+
+// MaterializeToBuffer functions like Materialize except that it writes the data
+// to a single Buffer pulled from the given BufferPool.
+//
+// As a special case, if the input BufferSlice only actually has one Buffer, this
+// function simply increases the refcount before returning said Buffer. Freeing this
+// buffer won't release it until the BufferSlice is itself released.
+func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer {
+	if len(s) == 1 {
+		s[0].Ref()
+		return s[0]
+	}
+	sLen := s.Len()
+	if sLen == 0 {
+		return emptyBuffer{}
+	}
+	buf := pool.Get(sLen)
+	s.CopyTo(*buf)
+	return NewBuffer(buf, pool)
+}
+
+// Reader returns a new Reader for the input slice after taking references to
+// each underlying buffer.
+func (s BufferSlice) Reader() Reader {
+	s.Ref()
+	return &sliceReader{
+		data: s,
+		len:  s.Len(),
+	}
+}
+
+// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface
+// with other parts systems. It also provides an additional convenience method
+// Remaining(), which returns the number of unread bytes remaining in the slice.
+// Buffers will be freed as they are read.
+type Reader interface {
+	io.Reader
+	io.ByteReader
+	// Close frees the underlying BufferSlice and never returns an error. Subsequent
+	// calls to Read will return (0, io.EOF).
+	Close() error
+	// Remaining returns the number of unread bytes remaining in the slice.
+	Remaining() int
+	// Reset frees the currently held buffer slice and starts reading from the
+	// provided slice. This allows reusing the reader object.
+	Reset(s BufferSlice)
+}
+
+type sliceReader struct {
+	data BufferSlice
+	len  int
+	// The index into data[0].ReadOnlyData().
+	bufferIdx int
+}
+
+func (r *sliceReader) Remaining() int {
+	return r.len
+}
+
+func (r *sliceReader) Reset(s BufferSlice) {
+	r.data.Free()
+	s.Ref()
+	r.data = s
+	r.len = s.Len()
+	r.bufferIdx = 0
+}
+
+func (r *sliceReader) Close() error {
+	r.data.Free()
+	r.data = nil
+	r.len = 0
+	return nil
+}
+
+func (r *sliceReader) freeFirstBufferIfEmpty() bool {
+	if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) {
+		return false
+	}
+
+	r.data[0].Free()
+	r.data = r.data[1:]
+	r.bufferIdx = 0
+	return true
+}
+
+func (r *sliceReader) Read(buf []byte) (n int, _ error) {
+	if r.len == 0 {
+		return 0, io.EOF
+	}
+
+	for len(buf) != 0 && r.len != 0 {
+		// Copy as much as possible from the first Buffer in the slice into the
+		// given byte slice.
+		data := r.data[0].ReadOnlyData()
+		copied := copy(buf, data[r.bufferIdx:])
+		r.len -= copied       // Reduce len by the number of bytes copied.
+		r.bufferIdx += copied // Increment the buffer index.
+		n += copied           // Increment the total number of bytes read.
+		buf = buf[copied:]    // Shrink the given byte slice.
+
+		// If we have copied all the data from the first Buffer, free it and advance to
+		// the next in the slice.
+		r.freeFirstBufferIfEmpty()
+	}
+
+	return n, nil
+}
+
+func (r *sliceReader) ReadByte() (byte, error) {
+	if r.len == 0 {
+		return 0, io.EOF
+	}
+
+	// There may be any number of empty buffers in the slice, clear them all until a
+	// non-empty buffer is reached. This is guaranteed to exit since r.len is not 0.
+	for r.freeFirstBufferIfEmpty() {
+	}
+
+	b := r.data[0].ReadOnlyData()[r.bufferIdx]
+	r.len--
+	r.bufferIdx++
+	// Free the first buffer in the slice if the last byte was read
+	r.freeFirstBufferIfEmpty()
+	return b, nil
+}
+
+var _ io.Writer = (*writer)(nil)
+
+type writer struct {
+	buffers *BufferSlice
+	pool    BufferPool
+}
+
+func (w *writer) Write(p []byte) (n int, err error) {
+	b := Copy(p, w.pool)
+	*w.buffers = append(*w.buffers, b)
+	return b.Len(), nil
+}
+
+// NewWriter wraps the given BufferSlice and BufferPool to implement the
+// io.Writer interface. Every call to Write copies the contents of the given
+// buffer into a new Buffer pulled from the given pool and the Buffer is
+// added to the given BufferSlice.
+func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer {
+	return &writer{buffers: buffers, pool: pool}
+}
+
+// ReadAll reads from r until an error or EOF and returns the data it read.
+// A successful call returns err == nil, not err == EOF. Because ReadAll is
+// defined to read from src until EOF, it does not treat an EOF from Read
+// as an error to be reported.
+//
+// Important: A failed call returns a non-nil error and may also return
+// partially read buffers. It is the responsibility of the caller to free the
+// BufferSlice returned, or its memory will not be reused.
+func ReadAll(r io.Reader, pool BufferPool) (BufferSlice, error) {
+	var result BufferSlice
+	if wt, ok := r.(io.WriterTo); ok {
+		// This is more optimal since wt knows the size of chunks it wants to
+		// write and, hence, we can allocate buffers of an optimal size to fit
+		// them. E.g. might be a single big chunk, and we wouldn't chop it
+		// into pieces.
+		w := NewWriter(&result, pool)
+		_, err := wt.WriteTo(w)
+		return result, err
+	}
+nextBuffer:
+	for {
+		buf := pool.Get(readAllBufSize)
+		// We asked for 32KiB but may have been given a bigger buffer.
+		// Use all of it if that's the case.
+		*buf = (*buf)[:cap(*buf)]
+		usedCap := 0
+		for {
+			n, err := r.Read((*buf)[usedCap:])
+			usedCap += n
+			if err != nil {
+				if usedCap == 0 {
+					// Nothing in this buf, put it back
+					pool.Put(buf)
+				} else {
+					*buf = (*buf)[:usedCap]
+					result = append(result, NewBuffer(buf, pool))
+				}
+				if err == io.EOF {
+					err = nil
+				}
+				return result, err
+			}
+			if len(*buf) == usedCap {
+				result = append(result, NewBuffer(buf, pool))
+				continue nextBuffer
+			}
+		}
+	}
+}
diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go
new file mode 100644
index 0000000..ecbf0b9
--- /dev/null
+++ b/vendor/google.golang.org/grpc/mem/buffers.go
@@ -0,0 +1,268 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package mem provides utilities that facilitate memory reuse in byte slices
+// that are used as buffers.
+//
+// # Experimental
+//
+// Notice: All APIs in this package are EXPERIMENTAL and may be changed or
+// removed in a later release.
+package mem
+
+import (
+	"fmt"
+	"sync"
+	"sync/atomic"
+)
+
+// A Buffer represents a reference counted piece of data (in bytes) that can be
+// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be
+// released by calling Free(), which invokes the free function given at creation
+// only after all references are released.
+//
+// Note that a Buffer is not safe for concurrent access and instead each
+// goroutine should use its own reference to the data, which can be acquired via
+// a call to Ref().
+//
+// Attempts to access the underlying data after releasing the reference to the
+// Buffer will panic.
+type Buffer interface {
+	// ReadOnlyData returns the underlying byte slice. Note that it is undefined
+	// behavior to modify the contents of this slice in any way.
+	ReadOnlyData() []byte
+	// Ref increases the reference counter for this Buffer.
+	Ref()
+	// Free decrements this Buffer's reference counter and frees the underlying
+	// byte slice if the counter reaches 0 as a result of this call.
+	Free()
+	// Len returns the Buffer's size.
+	Len() int
+
+	split(n int) (left, right Buffer)
+	read(buf []byte) (int, Buffer)
+}
+
+var (
+	bufferPoolingThreshold = 1 << 10
+
+	bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }}
+	refObjectPool    = sync.Pool{New: func() any { return new(atomic.Int32) }}
+)
+
+// IsBelowBufferPoolingThreshold returns true if the given size is less than or
+// equal to the threshold for buffer pooling. This is used to determine whether
+// to pool buffers or allocate them directly.
+func IsBelowBufferPoolingThreshold(size int) bool {
+	return size <= bufferPoolingThreshold
+}
+
+type buffer struct {
+	origData *[]byte
+	data     []byte
+	refs     *atomic.Int32
+	pool     BufferPool
+}
+
+func newBuffer() *buffer {
+	return bufferObjectPool.Get().(*buffer)
+}
+
+// NewBuffer creates a new Buffer from the given data, initializing the reference
+// counter to 1. The data will then be returned to the given pool when all
+// references to the returned Buffer are released. As a special case to avoid
+// additional allocations, if the given buffer pool is nil, the returned buffer
+// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the
+// underlying data is never freed.
+//
+// Note that the backing array of the given data is not copied.
+func NewBuffer(data *[]byte, pool BufferPool) Buffer {
+	// Use the buffer's capacity instead of the length, otherwise buffers may
+	// not be reused under certain conditions. For example, if a large buffer
+	// is acquired from the pool, but fewer bytes than the buffering threshold
+	// are written to it, the buffer will not be returned to the pool.
+	if pool == nil || IsBelowBufferPoolingThreshold(cap(*data)) {
+		return (SliceBuffer)(*data)
+	}
+	b := newBuffer()
+	b.origData = data
+	b.data = *data
+	b.pool = pool
+	b.refs = refObjectPool.Get().(*atomic.Int32)
+	b.refs.Add(1)
+	return b
+}
+
+// Copy creates a new Buffer from the given data, initializing the reference
+// counter to 1.
+//
+// It acquires a []byte from the given pool and copies over the backing array
+// of the given data. The []byte acquired from the pool is returned to the
+// pool when all references to the returned Buffer are released.
+func Copy(data []byte, pool BufferPool) Buffer {
+	if IsBelowBufferPoolingThreshold(len(data)) {
+		buf := make(SliceBuffer, len(data))
+		copy(buf, data)
+		return buf
+	}
+
+	buf := pool.Get(len(data))
+	copy(*buf, data)
+	return NewBuffer(buf, pool)
+}
+
+func (b *buffer) ReadOnlyData() []byte {
+	if b.refs == nil {
+		panic("Cannot read freed buffer")
+	}
+	return b.data
+}
+
+func (b *buffer) Ref() {
+	if b.refs == nil {
+		panic("Cannot ref freed buffer")
+	}
+	b.refs.Add(1)
+}
+
+func (b *buffer) Free() {
+	if b.refs == nil {
+		panic("Cannot free freed buffer")
+	}
+
+	refs := b.refs.Add(-1)
+	switch {
+	case refs > 0:
+		return
+	case refs == 0:
+		if b.pool != nil {
+			b.pool.Put(b.origData)
+		}
+
+		refObjectPool.Put(b.refs)
+		b.origData = nil
+		b.data = nil
+		b.refs = nil
+		b.pool = nil
+		bufferObjectPool.Put(b)
+	default:
+		panic("Cannot free freed buffer")
+	}
+}
+
+func (b *buffer) Len() int {
+	return len(b.ReadOnlyData())
+}
+
+func (b *buffer) split(n int) (Buffer, Buffer) {
+	if b.refs == nil {
+		panic("Cannot split freed buffer")
+	}
+
+	b.refs.Add(1)
+	split := newBuffer()
+	split.origData = b.origData
+	split.data = b.data[n:]
+	split.refs = b.refs
+	split.pool = b.pool
+
+	b.data = b.data[:n]
+
+	return b, split
+}
+
+func (b *buffer) read(buf []byte) (int, Buffer) {
+	if b.refs == nil {
+		panic("Cannot read freed buffer")
+	}
+
+	n := copy(buf, b.data)
+	if n == len(b.data) {
+		b.Free()
+		return n, nil
+	}
+
+	b.data = b.data[n:]
+	return n, b
+}
+
+func (b *buffer) String() string {
+	return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData()))
+}
+
+// ReadUnsafe reads bytes from the given Buffer into the provided slice.
+// It does not perform safety checks.
+func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) {
+	return buf.read(dst)
+}
+
+// SplitUnsafe modifies the receiver to point to the first n bytes while it
+// returns a new reference to the remaining bytes. The returned Buffer
+// functions just like a normal reference acquired using Ref().
+func SplitUnsafe(buf Buffer, n int) (left, right Buffer) {
+	return buf.split(n)
+}
+
+type emptyBuffer struct{}
+
+func (e emptyBuffer) ReadOnlyData() []byte {
+	return nil
+}
+
+func (e emptyBuffer) Ref()  {}
+func (e emptyBuffer) Free() {}
+
+func (e emptyBuffer) Len() int {
+	return 0
+}
+
+func (e emptyBuffer) split(int) (left, right Buffer) {
+	return e, e
+}
+
+func (e emptyBuffer) read([]byte) (int, Buffer) {
+	return 0, e
+}
+
+// SliceBuffer is a Buffer implementation that wraps a byte slice. It provides
+// methods for reading, splitting, and managing the byte slice.
+type SliceBuffer []byte
+
+// ReadOnlyData returns the byte slice.
+func (s SliceBuffer) ReadOnlyData() []byte { return s }
+
+// Ref is a noop implementation of Ref.
+func (s SliceBuffer) Ref() {}
+
+// Free is a noop implementation of Free.
+func (s SliceBuffer) Free() {}
+
+// Len is a noop implementation of Len.
+func (s SliceBuffer) Len() int { return len(s) }
+
+func (s SliceBuffer) split(n int) (left, right Buffer) {
+	return s[:n], s[n:]
+}
+
+func (s SliceBuffer) read(buf []byte) (int, Buffer) {
+	n := copy(buf, s)
+	if n == len(s) {
+		return n, nil
+	}
+	return n, s[n:]
+}
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
index cf6d1b9..d2e1525 100644
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -25,8 +25,14 @@
 	"context"
 	"fmt"
 	"strings"
+
+	"google.golang.org/grpc/internal"
 )
 
+func init() {
+	internal.FromOutgoingContextRaw = fromOutgoingContextRaw
+}
+
 // DecodeKeyValue returns k, v, nil.
 //
 // Deprecated: use k and v directly instead.
@@ -41,16 +47,17 @@
 // New creates an MD from a given key-value map.
 //
 // Only the following ASCII characters are allowed in keys:
-//  - digits: 0-9
-//  - uppercase letters: A-Z (normalized to lower)
-//  - lowercase letters: a-z
-//  - special characters: -_.
+//   - digits: 0-9
+//   - uppercase letters: A-Z (normalized to lower)
+//   - lowercase letters: a-z
+//   - special characters: -_.
+//
 // Uppercase letters are automatically converted to lowercase.
 //
 // Keys beginning with "grpc-" are reserved for grpc-internal use only and may
 // result in errors if set in metadata.
 func New(m map[string]string) MD {
-	md := MD{}
+	md := make(MD, len(m))
 	for k, val := range m {
 		key := strings.ToLower(k)
 		md[key] = append(md[key], val)
@@ -62,10 +69,11 @@
 // Pairs panics if len(kv) is odd.
 //
 // Only the following ASCII characters are allowed in keys:
-//  - digits: 0-9
-//  - uppercase letters: A-Z (normalized to lower)
-//  - lowercase letters: a-z
-//  - special characters: -_.
+//   - digits: 0-9
+//   - uppercase letters: A-Z (normalized to lower)
+//   - lowercase letters: a-z
+//   - special characters: -_.
+//
 // Uppercase letters are automatically converted to lowercase.
 //
 // Keys beginning with "grpc-" are reserved for grpc-internal use only and may
@@ -74,14 +82,10 @@
 	if len(kv)%2 == 1 {
 		panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
 	}
-	md := MD{}
-	var key string
-	for i, s := range kv {
-		if i%2 == 0 {
-			key = strings.ToLower(s)
-			continue
-		}
-		md[key] = append(md[key], s)
+	md := make(MD, len(kv)/2)
+	for i := 0; i < len(kv); i += 2 {
+		key := strings.ToLower(kv[i])
+		md[key] = append(md[key], kv[i+1])
 	}
 	return md
 }
@@ -93,16 +97,24 @@
 
 // Copy returns a copy of md.
 func (md MD) Copy() MD {
-	return Join(md)
+	out := make(MD, len(md))
+	for k, v := range md {
+		out[k] = copyOf(v)
+	}
+	return out
 }
 
 // Get obtains the values for a given key.
+//
+// k is converted to lowercase before searching in md.
 func (md MD) Get(k string) []string {
 	k = strings.ToLower(k)
 	return md[k]
 }
 
 // Set sets the value of a given key with a slice of values.
+//
+// k is converted to lowercase before storing in md.
 func (md MD) Set(k string, vals ...string) {
 	if len(vals) == 0 {
 		return
@@ -111,7 +123,10 @@
 	md[k] = vals
 }
 
-// Append adds the values to key k, not overwriting what was already stored at that key.
+// Append adds the values to key k, not overwriting what was already stored at
+// that key.
+//
+// k is converted to lowercase before storing in md.
 func (md MD) Append(k string, vals ...string) {
 	if len(vals) == 0 {
 		return
@@ -120,9 +135,17 @@
 	md[k] = append(md[k], vals...)
 }
 
+// Delete removes the values for a given key k which is converted to lowercase
+// before removing it from md.
+func (md MD) Delete(k string) {
+	k = strings.ToLower(k)
+	delete(md, k)
+}
+
 // Join joins any number of mds into a single MD.
-// The order of values for each key is determined by the order in which
-// the mds containing those values are presented to Join.
+//
+// The order of values for each key is determined by the order in which the mds
+// containing those values are presented to Join.
 func Join(mds ...MD) MD {
 	out := MD{}
 	for _, md := range mds {
@@ -136,21 +159,23 @@
 type mdIncomingKey struct{}
 type mdOutgoingKey struct{}
 
-// NewIncomingContext creates a new context with incoming md attached.
+// NewIncomingContext creates a new context with incoming md attached. md must
+// not be modified after calling this function.
 func NewIncomingContext(ctx context.Context, md MD) context.Context {
 	return context.WithValue(ctx, mdIncomingKey{}, md)
 }
 
 // NewOutgoingContext creates a new context with outgoing md attached. If used
 // in conjunction with AppendToOutgoingContext, NewOutgoingContext will
-// overwrite any previously-appended metadata.
+// overwrite any previously-appended metadata. md must not be modified after
+// calling this function.
 func NewOutgoingContext(ctx context.Context, md MD) context.Context {
 	return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md})
 }
 
 // AppendToOutgoingContext returns a new context with the provided kv merged
-// with any existing metadata in the context. Please refer to the
-// documentation of Pairs for a description of kv.
+// with any existing metadata in the context. Please refer to the documentation
+// of Pairs for a description of kv.
 func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context {
 	if len(kv)%2 == 1 {
 		panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
@@ -158,26 +183,69 @@
 	md, _ := ctx.Value(mdOutgoingKey{}).(rawMD)
 	added := make([][]string, len(md.added)+1)
 	copy(added, md.added)
-	added[len(added)-1] = make([]string, len(kv))
-	copy(added[len(added)-1], kv)
+	kvCopy := make([]string, 0, len(kv))
+	for i := 0; i < len(kv); i += 2 {
+		kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
+	}
+	added[len(added)-1] = kvCopy
 	return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added})
 }
 
-// FromIncomingContext returns the incoming metadata in ctx if it exists.  The
-// returned MD should not be modified. Writing to it may cause races.
-// Modification should be made to copies of the returned MD.
-func FromIncomingContext(ctx context.Context) (md MD, ok bool) {
-	md, ok = ctx.Value(mdIncomingKey{}).(MD)
-	return
+// FromIncomingContext returns the incoming metadata in ctx if it exists.
+//
+// All keys in the returned MD are lowercase.
+func FromIncomingContext(ctx context.Context) (MD, bool) {
+	md, ok := ctx.Value(mdIncomingKey{}).(MD)
+	if !ok {
+		return nil, false
+	}
+	out := make(MD, len(md))
+	for k, v := range md {
+		// We need to manually convert all keys to lower case, because MD is a
+		// map, and there's no guarantee that the MD attached to the context is
+		// created using our helper functions.
+		key := strings.ToLower(k)
+		out[key] = copyOf(v)
+	}
+	return out, true
 }
 
-// FromOutgoingContextRaw returns the un-merged, intermediary contents
-// of rawMD. Remember to perform strings.ToLower on the keys. The returned
-// MD should not be modified. Writing to it may cause races. Modification
-// should be made to copies of the returned MD.
+// ValueFromIncomingContext returns the metadata value corresponding to the metadata
+// key from the incoming metadata if it exists. Keys are matched in a case insensitive
+// manner.
+func ValueFromIncomingContext(ctx context.Context, key string) []string {
+	md, ok := ctx.Value(mdIncomingKey{}).(MD)
+	if !ok {
+		return nil
+	}
+
+	if v, ok := md[key]; ok {
+		return copyOf(v)
+	}
+	for k, v := range md {
+		// Case insensitive comparison: MD is a map, and there's no guarantee
+		// that the MD attached to the context is created using our helper
+		// functions.
+		if strings.EqualFold(k, key) {
+			return copyOf(v)
+		}
+	}
+	return nil
+}
+
+func copyOf(v []string) []string {
+	vals := make([]string, len(v))
+	copy(vals, v)
+	return vals
+}
+
+// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
 //
-// This is intended for gRPC-internal use ONLY.
-func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
+// Remember to perform strings.ToLower on the keys, for both the returned MD (MD
+// is a map, there's no guarantee it's created using our helper functions) and
+// the extra kv pairs (AppendToOutgoingContext doesn't turn them into
+// lowercase).
+func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
 	raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
 	if !ok {
 		return nil, nil, false
@@ -186,21 +254,39 @@
 	return raw.md, raw.added, true
 }
 
-// FromOutgoingContext returns the outgoing metadata in ctx if it exists.  The
-// returned MD should not be modified. Writing to it may cause races.
-// Modification should be made to copies of the returned MD.
+// FromOutgoingContext returns the outgoing metadata in ctx if it exists.
+//
+// All keys in the returned MD are lowercase.
 func FromOutgoingContext(ctx context.Context) (MD, bool) {
 	raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
 	if !ok {
 		return nil, false
 	}
 
-	mds := make([]MD, 0, len(raw.added)+1)
-	mds = append(mds, raw.md)
-	for _, vv := range raw.added {
-		mds = append(mds, Pairs(vv...))
+	mdSize := len(raw.md)
+	for i := range raw.added {
+		mdSize += len(raw.added[i]) / 2
 	}
-	return Join(mds...), ok
+
+	out := make(MD, mdSize)
+	for k, v := range raw.md {
+		// We need to manually convert all keys to lower case, because MD is a
+		// map, and there's no guarantee that the MD attached to the context is
+		// created using our helper functions.
+		key := strings.ToLower(k)
+		out[key] = copyOf(v)
+	}
+	for _, added := range raw.added {
+		if len(added)%2 == 1 {
+			panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added)))
+		}
+
+		for i := 0; i < len(added); i += 2 {
+			key := strings.ToLower(added[i])
+			out[key] = append(out[key], added[i+1])
+		}
+	}
+	return out, ok
 }
 
 type rawMD struct {
diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go
deleted file mode 100644
index c9f79dc..0000000
--- a/vendor/google.golang.org/grpc/naming/dns_resolver.go
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package naming
-
-import (
-	"context"
-	"errors"
-	"fmt"
-	"net"
-	"strconv"
-	"time"
-
-	"google.golang.org/grpc/grpclog"
-)
-
-const (
-	defaultPort = "443"
-	defaultFreq = time.Minute * 30
-)
-
-var (
-	errMissingAddr  = errors.New("missing address")
-	errWatcherClose = errors.New("watcher has been closed")
-
-	lookupHost = net.DefaultResolver.LookupHost
-	lookupSRV  = net.DefaultResolver.LookupSRV
-)
-
-// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and
-// create watchers that poll the DNS server using the frequency set by freq.
-func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) {
-	return &dnsResolver{freq: freq}, nil
-}
-
-// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create
-// watchers that poll the DNS server using the default frequency defined by defaultFreq.
-func NewDNSResolver() (Resolver, error) {
-	return NewDNSResolverWithFreq(defaultFreq)
-}
-
-// dnsResolver handles name resolution for names following the DNS scheme
-type dnsResolver struct {
-	// frequency of polling the DNS server that the watchers created by this resolver will use.
-	freq time.Duration
-}
-
-// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
-// If addr is an IPv4 address, return the addr and ok = true.
-// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
-func formatIP(addr string) (addrIP string, ok bool) {
-	ip := net.ParseIP(addr)
-	if ip == nil {
-		return "", false
-	}
-	if ip.To4() != nil {
-		return addr, true
-	}
-	return "[" + addr + "]", true
-}
-
-// parseTarget takes the user input target string, returns formatted host and port info.
-// If target doesn't specify a port, set the port to be the defaultPort.
-// If target is in IPv6 format and host-name is enclosed in square brackets, brackets
-// are stripped when setting the host.
-// examples:
-// target: "www.google.com" returns host: "www.google.com", port: "443"
-// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
-// target: "[ipv6-host]" returns host: "ipv6-host", port: "443"
-// target: ":80" returns host: "localhost", port: "80"
-// target: ":" returns host: "localhost", port: "443"
-func parseTarget(target string) (host, port string, err error) {
-	if target == "" {
-		return "", "", errMissingAddr
-	}
-
-	if ip := net.ParseIP(target); ip != nil {
-		// target is an IPv4 or IPv6(without brackets) address
-		return target, defaultPort, nil
-	}
-	if host, port, err := net.SplitHostPort(target); err == nil {
-		// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
-		if host == "" {
-			// Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
-			host = "localhost"
-		}
-		if port == "" {
-			// If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used.
-			port = defaultPort
-		}
-		return host, port, nil
-	}
-	if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil {
-		// target doesn't have port
-		return host, port, nil
-	}
-	return "", "", fmt.Errorf("invalid target address %v", target)
-}
-
-// Resolve creates a watcher that watches the name resolution of the target.
-func (r *dnsResolver) Resolve(target string) (Watcher, error) {
-	host, port, err := parseTarget(target)
-	if err != nil {
-		return nil, err
-	}
-
-	if net.ParseIP(host) != nil {
-		ipWatcher := &ipWatcher{
-			updateChan: make(chan *Update, 1),
-		}
-		host, _ = formatIP(host)
-		ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port}
-		return ipWatcher, nil
-	}
-
-	ctx, cancel := context.WithCancel(context.Background())
-	return &dnsWatcher{
-		r:      r,
-		host:   host,
-		port:   port,
-		ctx:    ctx,
-		cancel: cancel,
-		t:      time.NewTimer(0),
-	}, nil
-}
-
-// dnsWatcher watches for the name resolution update for a specific target
-type dnsWatcher struct {
-	r    *dnsResolver
-	host string
-	port string
-	// The latest resolved address set
-	curAddrs map[string]*Update
-	ctx      context.Context
-	cancel   context.CancelFunc
-	t        *time.Timer
-}
-
-// ipWatcher watches for the name resolution update for an IP address.
-type ipWatcher struct {
-	updateChan chan *Update
-}
-
-// Next returns the address resolution Update for the target. For IP address,
-// the resolution is itself, thus polling name server is unnecessary. Therefore,
-// Next() will return an Update the first time it is called, and will be blocked
-// for all following calls as no Update exists until watcher is closed.
-func (i *ipWatcher) Next() ([]*Update, error) {
-	u, ok := <-i.updateChan
-	if !ok {
-		return nil, errWatcherClose
-	}
-	return []*Update{u}, nil
-}
-
-// Close closes the ipWatcher.
-func (i *ipWatcher) Close() {
-	close(i.updateChan)
-}
-
-// AddressType indicates the address type returned by name resolution.
-type AddressType uint8
-
-const (
-	// Backend indicates the server is a backend server.
-	Backend AddressType = iota
-	// GRPCLB indicates the server is a grpclb load balancer.
-	GRPCLB
-)
-
-// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The
-// name resolver used by the grpclb balancer is required to provide this type of metadata in
-// its address updates.
-type AddrMetadataGRPCLB struct {
-	// AddrType is the type of server (grpc load balancer or backend).
-	AddrType AddressType
-	// ServerName is the name of the grpc load balancer. Used for authentication.
-	ServerName string
-}
-
-// compileUpdate compares the old resolved addresses and newly resolved addresses,
-// and generates an update list
-func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update {
-	var res []*Update
-	for a, u := range w.curAddrs {
-		if _, ok := newAddrs[a]; !ok {
-			u.Op = Delete
-			res = append(res, u)
-		}
-	}
-	for a, u := range newAddrs {
-		if _, ok := w.curAddrs[a]; !ok {
-			res = append(res, u)
-		}
-	}
-	return res
-}
-
-func (w *dnsWatcher) lookupSRV() map[string]*Update {
-	newAddrs := make(map[string]*Update)
-	_, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host)
-	if err != nil {
-		grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
-		return nil
-	}
-	for _, s := range srvs {
-		lbAddrs, err := lookupHost(w.ctx, s.Target)
-		if err != nil {
-			grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err)
-			continue
-		}
-		for _, a := range lbAddrs {
-			a, ok := formatIP(a)
-			if !ok {
-				grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
-				continue
-			}
-			addr := a + ":" + strconv.Itoa(int(s.Port))
-			newAddrs[addr] = &Update{Addr: addr,
-				Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}}
-		}
-	}
-	return newAddrs
-}
-
-func (w *dnsWatcher) lookupHost() map[string]*Update {
-	newAddrs := make(map[string]*Update)
-	addrs, err := lookupHost(w.ctx, w.host)
-	if err != nil {
-		grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
-		return nil
-	}
-	for _, a := range addrs {
-		a, ok := formatIP(a)
-		if !ok {
-			grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
-			continue
-		}
-		addr := a + ":" + w.port
-		newAddrs[addr] = &Update{Addr: addr}
-	}
-	return newAddrs
-}
-
-func (w *dnsWatcher) lookup() []*Update {
-	newAddrs := w.lookupSRV()
-	if newAddrs == nil {
-		// If failed to get any balancer address (either no corresponding SRV for the
-		// target, or caused by failure during resolution/parsing of the balancer target),
-		// return any A record info available.
-		newAddrs = w.lookupHost()
-	}
-	result := w.compileUpdate(newAddrs)
-	w.curAddrs = newAddrs
-	return result
-}
-
-// Next returns the resolved address update(delta) for the target. If there's no
-// change, it will sleep for 30 mins and try to resolve again after that.
-func (w *dnsWatcher) Next() ([]*Update, error) {
-	for {
-		select {
-		case <-w.ctx.Done():
-			return nil, errWatcherClose
-		case <-w.t.C:
-		}
-		result := w.lookup()
-		// Next lookup should happen after an interval defined by w.r.freq.
-		w.t.Reset(w.r.freq)
-		if len(result) > 0 {
-			return result, nil
-		}
-	}
-}
-
-func (w *dnsWatcher) Close() {
-	w.cancel()
-}
diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go
deleted file mode 100644
index f4c1c8b..0000000
--- a/vendor/google.golang.org/grpc/naming/naming.go
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package naming defines the naming API and related data structures for gRPC.
-//
-// This package is deprecated: please use package resolver instead.
-package naming
-
-// Operation defines the corresponding operations for a name resolution change.
-//
-// Deprecated: please use package resolver.
-type Operation uint8
-
-const (
-	// Add indicates a new address is added.
-	Add Operation = iota
-	// Delete indicates an existing address is deleted.
-	Delete
-)
-
-// Update defines a name resolution update. Notice that it is not valid having both
-// empty string Addr and nil Metadata in an Update.
-//
-// Deprecated: please use package resolver.
-type Update struct {
-	// Op indicates the operation of the update.
-	Op Operation
-	// Addr is the updated address. It is empty string if there is no address update.
-	Addr string
-	// Metadata is the updated metadata. It is nil if there is no metadata update.
-	// Metadata is not required for a custom naming implementation.
-	Metadata interface{}
-}
-
-// Resolver creates a Watcher for a target to track its resolution changes.
-//
-// Deprecated: please use package resolver.
-type Resolver interface {
-	// Resolve creates a Watcher for target.
-	Resolve(target string) (Watcher, error)
-}
-
-// Watcher watches for the updates on the specified target.
-//
-// Deprecated: please use package resolver.
-type Watcher interface {
-	// Next blocks until an update or error happens. It may return one or more
-	// updates. The first call should get the full set of the results. It should
-	// return an error if and only if Watcher cannot recover.
-	Next() ([]*Update, error)
-	// Close closes the Watcher.
-	Close()
-}
diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go
index e01d219..499a49c 100644
--- a/vendor/google.golang.org/grpc/peer/peer.go
+++ b/vendor/google.golang.org/grpc/peer/peer.go
@@ -22,7 +22,9 @@
 
 import (
 	"context"
+	"fmt"
 	"net"
+	"strings"
 
 	"google.golang.org/grpc/credentials"
 )
@@ -32,11 +34,41 @@
 type Peer struct {
 	// Addr is the peer address.
 	Addr net.Addr
+	// LocalAddr is the local address.
+	LocalAddr net.Addr
 	// AuthInfo is the authentication information of the transport.
 	// It is nil if there is no transport security being used.
 	AuthInfo credentials.AuthInfo
 }
 
+// String ensures the Peer types implements the Stringer interface in order to
+// allow to print a context with a peerKey value effectively.
+func (p *Peer) String() string {
+	if p == nil {
+		return "Peer<nil>"
+	}
+	sb := &strings.Builder{}
+	sb.WriteString("Peer{")
+	if p.Addr != nil {
+		fmt.Fprintf(sb, "Addr: '%s', ", p.Addr.String())
+	} else {
+		fmt.Fprintf(sb, "Addr: <nil>, ")
+	}
+	if p.LocalAddr != nil {
+		fmt.Fprintf(sb, "LocalAddr: '%s', ", p.LocalAddr.String())
+	} else {
+		fmt.Fprintf(sb, "LocalAddr: <nil>, ")
+	}
+	if p.AuthInfo != nil {
+		fmt.Fprintf(sb, "AuthInfo: '%s'", p.AuthInfo.AuthType())
+	} else {
+		fmt.Fprintf(sb, "AuthInfo: <nil>")
+	}
+	sb.WriteString("}")
+
+	return sb.String()
+}
+
 type peerKey struct{}
 
 // NewContext creates a new context with peer information attached.
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
index 45baa2a..aa52bfe 100644
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -20,68 +20,64 @@
 
 import (
 	"context"
+	"fmt"
 	"io"
-	"sync"
+	"sync/atomic"
 
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/internal/channelz"
+	istatus "google.golang.org/grpc/internal/status"
 	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/status"
 )
 
+// pickerGeneration stores a picker and a channel used to signal that a picker
+// newer than this one is available.
+type pickerGeneration struct {
+	// picker is the picker produced by the LB policy.  May be nil if a picker
+	// has never been produced.
+	picker balancer.Picker
+	// blockingCh is closed when the picker has been invalidated because there
+	// is a new one available.
+	blockingCh chan struct{}
+}
+
 // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
 // actions and unblock when there's a picker update.
 type pickerWrapper struct {
-	mu         sync.Mutex
-	done       bool
-	blockingCh chan struct{}
-	picker     balancer.Picker
-
-	// The latest connection happened.
-	connErrMu sync.Mutex
-	connErr   error
+	// If pickerGen holds a nil pointer, the pickerWrapper is closed.
+	pickerGen atomic.Pointer[pickerGeneration]
 }
 
 func newPickerWrapper() *pickerWrapper {
-	bp := &pickerWrapper{blockingCh: make(chan struct{})}
-	return bp
+	pw := &pickerWrapper{}
+	pw.pickerGen.Store(&pickerGeneration{
+		blockingCh: make(chan struct{}),
+	})
+	return pw
 }
 
-func (bp *pickerWrapper) updateConnectionError(err error) {
-	bp.connErrMu.Lock()
-	bp.connErr = err
-	bp.connErrMu.Unlock()
+// updatePicker is called by UpdateState calls from the LB policy. It
+// unblocks all blocked pick.
+func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
+	old := pw.pickerGen.Swap(&pickerGeneration{
+		picker:     p,
+		blockingCh: make(chan struct{}),
+	})
+	close(old.blockingCh)
 }
 
-func (bp *pickerWrapper) connectionError() error {
-	bp.connErrMu.Lock()
-	err := bp.connErr
-	bp.connErrMu.Unlock()
-	return err
-}
-
-// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
-func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
-	bp.mu.Lock()
-	if bp.done {
-		bp.mu.Unlock()
-		return
-	}
-	bp.picker = p
-	// bp.blockingCh should never be nil.
-	close(bp.blockingCh)
-	bp.blockingCh = make(chan struct{})
-	bp.mu.Unlock()
-}
-
-func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) {
-	acw.mu.Lock()
-	ac := acw.ac
-	acw.mu.Unlock()
+// doneChannelzWrapper performs the following:
+//   - increments the calls started channelz counter
+//   - wraps the done function in the passed in result to increment the calls
+//     failed or calls succeeded channelz counter before invoking the actual
+//     done function.
+func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) {
+	ac := acbw.ac
 	ac.incrCallsStarted()
-	return func(b balancer.DoneInfo) {
+	done := result.Done
+	result.Done = func(b balancer.DoneInfo) {
 		if b.Err != nil && b.Err != io.EOF {
 			ac.incrCallsFailed()
 		} else {
@@ -93,6 +89,12 @@
 	}
 }
 
+type pick struct {
+	transport transport.ClientTransport // the selected transport
+	result    balancer.PickResult       // the contents of the pick from the LB policy
+	blocked   bool                      // set if a picker call queued for a new picker
+}
+
 // pick returns the transport that will be used for the RPC.
 // It may block in the following cases:
 // - there's no picker
@@ -100,85 +102,97 @@
 // - the current picker returns other errors and failfast is false.
 // - the subConn returned by the current picker is not READY
 // When one of these situations happens, pick blocks until the picker gets updated.
-func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) {
+func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (pick, error) {
 	var ch chan struct{}
 
-	for {
-		bp.mu.Lock()
-		if bp.done {
-			bp.mu.Unlock()
-			return nil, nil, ErrClientConnClosing
-		}
+	var lastPickErr error
+	pickBlocked := false
 
-		if bp.picker == nil {
-			ch = bp.blockingCh
+	for {
+		pg := pw.pickerGen.Load()
+		if pg == nil {
+			return pick{}, ErrClientConnClosing
 		}
-		if ch == bp.blockingCh {
+		if pg.picker == nil {
+			ch = pg.blockingCh
+		}
+		if ch == pg.blockingCh {
 			// This could happen when either:
-			// - bp.picker is nil (the previous if condition), or
-			// - has called pick on the current picker.
-			bp.mu.Unlock()
+			// - pw.picker is nil (the previous if condition), or
+			// - we have already called pick on the current picker.
 			select {
 			case <-ctx.Done():
-				if connectionErr := bp.connectionError(); connectionErr != nil {
-					switch ctx.Err() {
-					case context.DeadlineExceeded:
-						return nil, nil, status.Errorf(codes.DeadlineExceeded, "latest connection error: %v", connectionErr)
-					case context.Canceled:
-						return nil, nil, status.Errorf(codes.Canceled, "latest connection error: %v", connectionErr)
-					}
+				var errStr string
+				if lastPickErr != nil {
+					errStr = "latest balancer error: " + lastPickErr.Error()
+				} else {
+					errStr = fmt.Sprintf("%v while waiting for connections to become ready", ctx.Err())
 				}
-				return nil, nil, ctx.Err()
+				switch ctx.Err() {
+				case context.DeadlineExceeded:
+					return pick{}, status.Error(codes.DeadlineExceeded, errStr)
+				case context.Canceled:
+					return pick{}, status.Error(codes.Canceled, errStr)
+				}
 			case <-ch:
 			}
 			continue
 		}
 
-		ch = bp.blockingCh
-		p := bp.picker
-		bp.mu.Unlock()
-
-		subConn, done, err := p.Pick(ctx, opts)
-
-		if err != nil {
-			switch err {
-			case balancer.ErrNoSubConnAvailable:
-				continue
-			case balancer.ErrTransientFailure:
-				if !failfast {
-					continue
-				}
-				return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError())
-			case context.DeadlineExceeded:
-				return nil, nil, status.Error(codes.DeadlineExceeded, err.Error())
-			case context.Canceled:
-				return nil, nil, status.Error(codes.Canceled, err.Error())
-			default:
-				if _, ok := status.FromError(err); ok {
-					return nil, nil, err
-				}
-				// err is some other error.
-				return nil, nil, status.Error(codes.Unknown, err.Error())
-			}
+		// If the channel is set, it means that the pick call had to wait for a
+		// new picker at some point. Either it's the first iteration and this
+		// function received the first picker, or a picker errored with
+		// ErrNoSubConnAvailable or errored with failfast set to false, which
+		// will trigger a continue to the next iteration. In the first case this
+		// conditional will hit if this call had to block (the channel is set).
+		// In the second case, the only way it will get to this conditional is
+		// if there is a new picker.
+		if ch != nil {
+			pickBlocked = true
 		}
 
-		acw, ok := subConn.(*acBalancerWrapper)
+		ch = pg.blockingCh
+		p := pg.picker
+
+		pickResult, err := p.Pick(info)
+		if err != nil {
+			if err == balancer.ErrNoSubConnAvailable {
+				continue
+			}
+			if st, ok := status.FromError(err); ok {
+				// Status error: end the RPC unconditionally with this status.
+				// First restrict the code to the list allowed by gRFC A54.
+				if istatus.IsRestrictedControlPlaneCode(st) {
+					err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err)
+				}
+				return pick{}, dropError{error: err}
+			}
+			// For all other errors, wait for ready RPCs should block and other
+			// RPCs should fail with unavailable.
+			if !failfast {
+				lastPickErr = err
+				continue
+			}
+			return pick{}, status.Error(codes.Unavailable, err.Error())
+		}
+
+		acbw, ok := pickResult.SubConn.(*acBalancerWrapper)
 		if !ok {
-			grpclog.Error("subconn returned from pick is not *acBalancerWrapper")
+			logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn)
 			continue
 		}
-		if t, ok := acw.getAddrConn().getReadyTransport(); ok {
+		if t := acbw.ac.getReadyTransport(); t != nil {
 			if channelz.IsOn() {
-				return t, doneChannelzWrapper(acw, done), nil
+				doneChannelzWrapper(acbw, &pickResult)
 			}
-			return t, done, nil
+			return pick{transport: t, result: pickResult, blocked: pickBlocked}, nil
 		}
-		if done != nil {
+		if pickResult.Done != nil {
 			// Calling done with nil error, no bytes sent and no bytes received.
 			// DoneInfo with default value works.
-			done(balancer.DoneInfo{})
+			pickResult.Done(balancer.DoneInfo{})
 		}
-		grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
+		logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
 		// If ok == false, ac.state is not READY.
 		// A valid picker always returns READY subConn. This means the state of ac
 		// just changed, and picker will be updated shortly.
@@ -186,12 +200,20 @@
 	}
 }
 
-func (bp *pickerWrapper) close() {
-	bp.mu.Lock()
-	defer bp.mu.Unlock()
-	if bp.done {
-		return
-	}
-	bp.done = true
-	close(bp.blockingCh)
+func (pw *pickerWrapper) close() {
+	old := pw.pickerGen.Swap(nil)
+	close(old.blockingCh)
+}
+
+// reset clears the pickerWrapper and prepares it for being used again when idle
+// mode is exited.
+func (pw *pickerWrapper) reset() {
+	old := pw.pickerGen.Swap(&pickerGeneration{blockingCh: make(chan struct{})})
+	close(old.blockingCh)
+}
+
+// dropError is a wrapper error that indicates the LB policy wishes to drop the
+// RPC and not retry it.
+type dropError struct {
+	error
 }
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
deleted file mode 100644
index ed05b02..0000000
--- a/vendor/google.golang.org/grpc/pickfirst.go
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
-	"context"
-
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/connectivity"
-	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/resolver"
-)
-
-// PickFirstBalancerName is the name of the pick_first balancer.
-const PickFirstBalancerName = "pick_first"
-
-func newPickfirstBuilder() balancer.Builder {
-	return &pickfirstBuilder{}
-}
-
-type pickfirstBuilder struct{}
-
-func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
-	return &pickfirstBalancer{cc: cc}
-}
-
-func (*pickfirstBuilder) Name() string {
-	return PickFirstBalancerName
-}
-
-type pickfirstBalancer struct {
-	cc balancer.ClientConn
-	sc balancer.SubConn
-}
-
-func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
-	if err != nil {
-		if grpclog.V(2) {
-			grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err)
-		}
-		return
-	}
-	if b.sc == nil {
-		b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
-		if err != nil {
-			//TODO(yuxuanli): why not change the cc state to Idle?
-			if grpclog.V(2) {
-				grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
-			}
-			return
-		}
-		b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc})
-		b.sc.Connect()
-	} else {
-		b.sc.UpdateAddresses(addrs)
-		b.sc.Connect()
-	}
-}
-
-func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
-	if grpclog.V(2) {
-		grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
-	}
-	if b.sc != sc {
-		if grpclog.V(2) {
-			grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
-		}
-		return
-	}
-	if s == connectivity.Shutdown {
-		b.sc = nil
-		return
-	}
-
-	switch s {
-	case connectivity.Ready, connectivity.Idle:
-		b.cc.UpdateBalancerState(s, &picker{sc: sc})
-	case connectivity.Connecting:
-		b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable})
-	case connectivity.TransientFailure:
-		b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure})
-	}
-}
-
-func (b *pickfirstBalancer) Close() {
-}
-
-type picker struct {
-	err error
-	sc  balancer.SubConn
-}
-
-func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
-	if p.err != nil {
-		return nil, nil, p.err
-	}
-	return p.sc, nil, nil
-}
-
-func init() {
-	balancer.Register(newPickfirstBuilder())
-}
diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go
index 76acbbc..ee0ff96 100644
--- a/vendor/google.golang.org/grpc/preloader.go
+++ b/vendor/google.golang.org/grpc/preloader.go
@@ -20,21 +20,26 @@
 
 import (
 	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/mem"
 	"google.golang.org/grpc/status"
 )
 
 // PreparedMsg is responsible for creating a Marshalled and Compressed object.
 //
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type PreparedMsg struct {
 	// Struct for preparing msg before sending them
-	encodedData []byte
+	encodedData mem.BufferSlice
 	hdr         []byte
-	payload     []byte
+	payload     mem.BufferSlice
+	pf          payloadFormat
 }
 
 // Encode marshalls and compresses the message using the codec and compressor for the stream.
-func (p *PreparedMsg) Encode(s Stream, msg interface{}) error {
+func (p *PreparedMsg) Encode(s Stream, msg any) error {
 	ctx := s.Context()
 	rpcInfo, ok := rpcInfoFromContext(ctx)
 	if !ok {
@@ -54,11 +59,27 @@
 	if err != nil {
 		return err
 	}
-	p.encodedData = data
-	compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp)
+
+	materializedData := data.Materialize()
+	data.Free()
+	p.encodedData = mem.BufferSlice{mem.SliceBuffer(materializedData)}
+
+	// TODO: it should be possible to grab the bufferPool from the underlying
+	//  stream implementation with a type cast to its actual type (such as
+	//  addrConnStream) and accessing the buffer pool directly.
+	var compData mem.BufferSlice
+	compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool())
 	if err != nil {
 		return err
 	}
-	p.hdr, p.payload = msgHeader(data, compData)
+
+	if p.pf.isCompressed() {
+		materializedCompData := compData.Materialize()
+		compData.Free()
+		compData = mem.BufferSlice{mem.SliceBuffer(materializedCompData)}
+	}
+
+	p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf)
+
 	return nil
 }
diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go
deleted file mode 100644
index f8f69bf..0000000
--- a/vendor/google.golang.org/grpc/proxy.go
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
-	"bufio"
-	"context"
-	"encoding/base64"
-	"errors"
-	"fmt"
-	"io"
-	"net"
-	"net/http"
-	"net/http/httputil"
-	"net/url"
-)
-
-const proxyAuthHeaderKey = "Proxy-Authorization"
-
-var (
-	// errDisabled indicates that proxy is disabled for the address.
-	errDisabled = errors.New("proxy is disabled for the address")
-	// The following variable will be overwritten in the tests.
-	httpProxyFromEnvironment = http.ProxyFromEnvironment
-)
-
-func mapAddress(ctx context.Context, address string) (*url.URL, error) {
-	req := &http.Request{
-		URL: &url.URL{
-			Scheme: "https",
-			Host:   address,
-		},
-	}
-	url, err := httpProxyFromEnvironment(req)
-	if err != nil {
-		return nil, err
-	}
-	if url == nil {
-		return nil, errDisabled
-	}
-	return url, nil
-}
-
-// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
-// It's possible that this reader reads more than what's need for the response and stores
-// those bytes in the buffer.
-// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the
-// bytes in the buffer.
-type bufConn struct {
-	net.Conn
-	r io.Reader
-}
-
-func (c *bufConn) Read(b []byte) (int, error) {
-	return c.r.Read(b)
-}
-
-func basicAuth(username, password string) string {
-	auth := username + ":" + password
-	return base64.StdEncoding.EncodeToString([]byte(auth))
-}
-
-func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) {
-	defer func() {
-		if err != nil {
-			conn.Close()
-		}
-	}()
-
-	req := &http.Request{
-		Method: http.MethodConnect,
-		URL:    &url.URL{Host: backendAddr},
-		Header: map[string][]string{"User-Agent": {grpcUA}},
-	}
-	if t := proxyURL.User; t != nil {
-		u := t.Username()
-		p, _ := t.Password()
-		req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p))
-	}
-
-	if err := sendHTTPRequest(ctx, req, conn); err != nil {
-		return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
-	}
-
-	r := bufio.NewReader(conn)
-	resp, err := http.ReadResponse(r, req)
-	if err != nil {
-		return nil, fmt.Errorf("reading server HTTP response: %v", err)
-	}
-	defer resp.Body.Close()
-	if resp.StatusCode != http.StatusOK {
-		dump, err := httputil.DumpResponse(resp, true)
-		if err != nil {
-			return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status)
-		}
-		return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
-	}
-
-	return &bufConn{Conn: conn, r: r}, nil
-}
-
-// newProxyDialer returns a dialer that connects to proxy first if necessary.
-// The returned dialer checks if a proxy is necessary, dial to the proxy with the
-// provided dialer, does HTTP CONNECT handshake and returns the connection.
-func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) {
-	return func(ctx context.Context, addr string) (conn net.Conn, err error) {
-		var newAddr string
-		proxyURL, err := mapAddress(ctx, addr)
-		if err != nil {
-			if err != errDisabled {
-				return nil, err
-			}
-			newAddr = addr
-		} else {
-			newAddr = proxyURL.Host
-		}
-
-		conn, err = dialer(ctx, newAddr)
-		if err != nil {
-			return
-		}
-		if proxyURL != nil {
-			// proxy is disabled if proxyURL is nil.
-			conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL)
-		}
-		return
-	}
-}
-
-func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
-	req = req.WithContext(ctx)
-	if err := req.Write(conn); err != nil {
-		return fmt.Errorf("failed to write the HTTP request: %v", err)
-	}
-	return nil
-}
diff --git a/vendor/google.golang.org/grpc/reflection/README.md b/vendor/google.golang.org/grpc/reflection/README.md
index 04b6371..9ace83c 100644
--- a/vendor/google.golang.org/grpc/reflection/README.md
+++ b/vendor/google.golang.org/grpc/reflection/README.md
@@ -2,7 +2,7 @@
 
 Package reflection implements server reflection service.
 
-The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto.
+The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1/reflection.proto.
 
 To register server reflection on a gRPC server:
 ```go
diff --git a/vendor/google.golang.org/grpc/reflection/adapt.go b/vendor/google.golang.org/grpc/reflection/adapt.go
new file mode 100644
index 0000000..6997e47
--- /dev/null
+++ b/vendor/google.golang.org/grpc/reflection/adapt.go
@@ -0,0 +1,57 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package reflection
+
+import (
+	"google.golang.org/grpc/reflection/internal"
+
+	v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1"
+	v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1"
+	v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
+)
+
+// asV1Alpha returns an implementation of the v1alpha version of the reflection
+// interface that delegates all calls to the given v1 version.
+func asV1Alpha(svr v1reflectiongrpc.ServerReflectionServer) v1alphareflectiongrpc.ServerReflectionServer {
+	return v1AlphaServerImpl{svr: svr}
+}
+
+type v1AlphaServerImpl struct {
+	svr v1reflectiongrpc.ServerReflectionServer
+}
+
+func (s v1AlphaServerImpl) ServerReflectionInfo(stream v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer) error {
+	return s.svr.ServerReflectionInfo(v1AlphaServerStreamAdapter{stream})
+}
+
+type v1AlphaServerStreamAdapter struct {
+	v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer
+}
+
+func (s v1AlphaServerStreamAdapter) Send(response *v1reflectionpb.ServerReflectionResponse) error {
+	return s.ServerReflection_ServerReflectionInfoServer.Send(internal.V1ToV1AlphaResponse(response))
+}
+
+func (s v1AlphaServerStreamAdapter) Recv() (*v1reflectionpb.ServerReflectionRequest, error) {
+	resp, err := s.ServerReflection_ServerReflectionInfoServer.Recv()
+	if err != nil {
+		return nil, err
+	}
+	return internal.V1AlphaToV1Request(resp), nil
+}
diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go
new file mode 100644
index 0000000..92f5292
--- /dev/null
+++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go
@@ -0,0 +1,777 @@
+// Copyright 2016 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Service exported by server reflection.  A more complete description of how
+// server reflection works can be found at
+// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md
+//
+// The canonical version of this proto can be found at
+// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// 	protoc-gen-go v1.36.6
+// 	protoc        v5.27.1
+// source: grpc/reflection/v1/reflection.proto
+
+package grpc_reflection_v1
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+	unsafe "unsafe"
+)
+
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The message sent by the client when calling ServerReflectionInfo method.
+type ServerReflectionRequest struct {
+	state protoimpl.MessageState `protogen:"open.v1"`
+	Host  string                 `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
+	// To use reflection service, the client should set one of the following
+	// fields in message_request. The server distinguishes requests by their
+	// defined field and then handles them using corresponding methods.
+	//
+	// Types that are valid to be assigned to MessageRequest:
+	//
+	//	*ServerReflectionRequest_FileByFilename
+	//	*ServerReflectionRequest_FileContainingSymbol
+	//	*ServerReflectionRequest_FileContainingExtension
+	//	*ServerReflectionRequest_AllExtensionNumbersOfType
+	//	*ServerReflectionRequest_ListServices
+	MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"`
+	unknownFields  protoimpl.UnknownFields
+	sizeCache      protoimpl.SizeCache
+}
+
+func (x *ServerReflectionRequest) Reset() {
+	*x = ServerReflectionRequest{}
+	mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ServerReflectionRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServerReflectionRequest) ProtoMessage() {}
+
+func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead.
+func (*ServerReflectionRequest) Descriptor() ([]byte, []int) {
+	return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ServerReflectionRequest) GetHost() string {
+	if x != nil {
+		return x.Host
+	}
+	return ""
+}
+
+func (x *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest {
+	if x != nil {
+		return x.MessageRequest
+	}
+	return nil
+}
+
+func (x *ServerReflectionRequest) GetFileByFilename() string {
+	if x != nil {
+		if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileByFilename); ok {
+			return x.FileByFilename
+		}
+	}
+	return ""
+}
+
+func (x *ServerReflectionRequest) GetFileContainingSymbol() string {
+	if x != nil {
+		if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileContainingSymbol); ok {
+			return x.FileContainingSymbol
+		}
+	}
+	return ""
+}
+
+func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest {
+	if x != nil {
+		if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileContainingExtension); ok {
+			return x.FileContainingExtension
+		}
+	}
+	return nil
+}
+
+func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string {
+	if x != nil {
+		if x, ok := x.MessageRequest.(*ServerReflectionRequest_AllExtensionNumbersOfType); ok {
+			return x.AllExtensionNumbersOfType
+		}
+	}
+	return ""
+}
+
+func (x *ServerReflectionRequest) GetListServices() string {
+	if x != nil {
+		if x, ok := x.MessageRequest.(*ServerReflectionRequest_ListServices); ok {
+			return x.ListServices
+		}
+	}
+	return ""
+}
+
+type isServerReflectionRequest_MessageRequest interface {
+	isServerReflectionRequest_MessageRequest()
+}
+
+type ServerReflectionRequest_FileByFilename struct {
+	// Find a proto file by the file name.
+	FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"`
+}
+
+type ServerReflectionRequest_FileContainingSymbol struct {
+	// Find the proto file that declares the given fully-qualified symbol name.
+	// This field should be a fully-qualified symbol name
+	// (e.g. <package>.<service>[.<method>] or <package>.<type>).
+	FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"`
+}
+
+type ServerReflectionRequest_FileContainingExtension struct {
+	// Find the proto file which defines an extension extending the given
+	// message type with the given field number.
+	FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"`
+}
+
+type ServerReflectionRequest_AllExtensionNumbersOfType struct {
+	// Finds the tag numbers used by all known extensions of the given message
+	// type, and appends them to ExtensionNumberResponse in an undefined order.
+	// Its corresponding method is best-effort: it's not guaranteed that the
+	// reflection service will implement this method, and it's not guaranteed
+	// that this method will provide all extensions. Returns
+	// StatusCode::UNIMPLEMENTED if it's not implemented.
+	// This field should be a fully-qualified type name. The format is
+	// <package>.<type>
+	AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"`
+}
+
+type ServerReflectionRequest_ListServices struct {
+	// List the full names of registered services. The content will not be
+	// checked.
+	ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"`
+}
+
+func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {}
+
+func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {}
+
+func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {}
+
+func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() {
+}
+
+func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {}
+
+// The type name and extension number sent by the client when requesting
+// file_containing_extension.
+type ExtensionRequest struct {
+	state protoimpl.MessageState `protogen:"open.v1"`
+	// Fully-qualified type name. The format should be <package>.<type>
+	ContainingType  string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"`
+	ExtensionNumber int32  `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"`
+	unknownFields   protoimpl.UnknownFields
+	sizeCache       protoimpl.SizeCache
+}
+
+func (x *ExtensionRequest) Reset() {
+	*x = ExtensionRequest{}
+	mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ExtensionRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExtensionRequest) ProtoMessage() {}
+
+func (x *ExtensionRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead.
+func (*ExtensionRequest) Descriptor() ([]byte, []int) {
+	return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ExtensionRequest) GetContainingType() string {
+	if x != nil {
+		return x.ContainingType
+	}
+	return ""
+}
+
+func (x *ExtensionRequest) GetExtensionNumber() int32 {
+	if x != nil {
+		return x.ExtensionNumber
+	}
+	return 0
+}
+
+// The message sent by the server to answer ServerReflectionInfo method.
+type ServerReflectionResponse struct {
+	state           protoimpl.MessageState   `protogen:"open.v1"`
+	ValidHost       string                   `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"`
+	OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"`
+	// The server sets one of the following fields according to the message_request
+	// in the request.
+	//
+	// Types that are valid to be assigned to MessageResponse:
+	//
+	//	*ServerReflectionResponse_FileDescriptorResponse
+	//	*ServerReflectionResponse_AllExtensionNumbersResponse
+	//	*ServerReflectionResponse_ListServicesResponse
+	//	*ServerReflectionResponse_ErrorResponse
+	MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"`
+	unknownFields   protoimpl.UnknownFields
+	sizeCache       protoimpl.SizeCache
+}
+
+func (x *ServerReflectionResponse) Reset() {
+	*x = ServerReflectionResponse{}
+	mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ServerReflectionResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServerReflectionResponse) ProtoMessage() {}
+
+func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead.
+func (*ServerReflectionResponse) Descriptor() ([]byte, []int) {
+	return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ServerReflectionResponse) GetValidHost() string {
+	if x != nil {
+		return x.ValidHost
+	}
+	return ""
+}
+
+func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest {
+	if x != nil {
+		return x.OriginalRequest
+	}
+	return nil
+}
+
+func (x *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse {
+	if x != nil {
+		return x.MessageResponse
+	}
+	return nil
+}
+
+func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse {
+	if x != nil {
+		if x, ok := x.MessageResponse.(*ServerReflectionResponse_FileDescriptorResponse); ok {
+			return x.FileDescriptorResponse
+		}
+	}
+	return nil
+}
+
+func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse {
+	if x != nil {
+		if x, ok := x.MessageResponse.(*ServerReflectionResponse_AllExtensionNumbersResponse); ok {
+			return x.AllExtensionNumbersResponse
+		}
+	}
+	return nil
+}
+
+func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse {
+	if x != nil {
+		if x, ok := x.MessageResponse.(*ServerReflectionResponse_ListServicesResponse); ok {
+			return x.ListServicesResponse
+		}
+	}
+	return nil
+}
+
+func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse {
+	if x != nil {
+		if x, ok := x.MessageResponse.(*ServerReflectionResponse_ErrorResponse); ok {
+			return x.ErrorResponse
+		}
+	}
+	return nil
+}
+
+type isServerReflectionResponse_MessageResponse interface {
+	isServerReflectionResponse_MessageResponse()
+}
+
+type ServerReflectionResponse_FileDescriptorResponse struct {
+	// This message is used to answer file_by_filename, file_containing_symbol,
+	// file_containing_extension requests with transitive dependencies.
+	// As the repeated label is not allowed in oneof fields, we use a
+	// FileDescriptorResponse message to encapsulate the repeated fields.
+	// The reflection service is allowed to avoid sending FileDescriptorProtos
+	// that were previously sent in response to earlier requests in the stream.
+	FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"`
+}
+
+type ServerReflectionResponse_AllExtensionNumbersResponse struct {
+	// This message is used to answer all_extension_numbers_of_type requests.
+	AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"`
+}
+
+type ServerReflectionResponse_ListServicesResponse struct {
+	// This message is used to answer list_services requests.
+	ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"`
+}
+
+type ServerReflectionResponse_ErrorResponse struct {
+	// This message is used when an error occurs.
+	ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"`
+}
+
+func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() {
+}
+
+func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() {
+}
+
+func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {}
+
+func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {}
+
+// Serialized FileDescriptorProto messages sent by the server answering
+// a file_by_filename, file_containing_symbol, or file_containing_extension
+// request.
+type FileDescriptorResponse struct {
+	state protoimpl.MessageState `protogen:"open.v1"`
+	// Serialized FileDescriptorProto messages. We avoid taking a dependency on
+	// descriptor.proto, which uses proto2 only features, by making them opaque
+	// bytes instead.
+	FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"`
+	unknownFields       protoimpl.UnknownFields
+	sizeCache           protoimpl.SizeCache
+}
+
+func (x *FileDescriptorResponse) Reset() {
+	*x = FileDescriptorResponse{}
+	mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *FileDescriptorResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FileDescriptorResponse) ProtoMessage() {}
+
+func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead.
+func (*FileDescriptorResponse) Descriptor() ([]byte, []int) {
+	return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte {
+	if x != nil {
+		return x.FileDescriptorProto
+	}
+	return nil
+}
+
+// A list of extension numbers sent by the server answering
+// all_extension_numbers_of_type request.
+type ExtensionNumberResponse struct {
+	state protoimpl.MessageState `protogen:"open.v1"`
+	// Full name of the base type, including the package name. The format
+	// is <package>.<type>
+	BaseTypeName    string  `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"`
+	ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"`
+	unknownFields   protoimpl.UnknownFields
+	sizeCache       protoimpl.SizeCache
+}
+
+func (x *ExtensionNumberResponse) Reset() {
+	*x = ExtensionNumberResponse{}
+	mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ExtensionNumberResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExtensionNumberResponse) ProtoMessage() {}
+
+func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead.
+func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) {
+	return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *ExtensionNumberResponse) GetBaseTypeName() string {
+	if x != nil {
+		return x.BaseTypeName
+	}
+	return ""
+}
+
+func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 {
+	if x != nil {
+		return x.ExtensionNumber
+	}
+	return nil
+}
+
+// A list of ServiceResponse sent by the server answering list_services request.
+type ListServiceResponse struct {
+	state protoimpl.MessageState `protogen:"open.v1"`
+	// The information of each service may be expanded in the future, so we use
+	// ServiceResponse message to encapsulate it.
+	Service       []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *ListServiceResponse) Reset() {
+	*x = ListServiceResponse{}
+	mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ListServiceResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListServiceResponse) ProtoMessage() {}
+
+func (x *ListServiceResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead.
+func (*ListServiceResponse) Descriptor() ([]byte, []int) {
+	return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *ListServiceResponse) GetService() []*ServiceResponse {
+	if x != nil {
+		return x.Service
+	}
+	return nil
+}
+
+// The information of a single service used by ListServiceResponse to answer
+// list_services request.
+type ServiceResponse struct {
+	state protoimpl.MessageState `protogen:"open.v1"`
+	// Full name of a registered service, including its package name. The format
+	// is <package>.<service>
+	Name          string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *ServiceResponse) Reset() {
+	*x = ServiceResponse{}
+	mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ServiceResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServiceResponse) ProtoMessage() {}
+
+func (x *ServiceResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead.
+func (*ServiceResponse) Descriptor() ([]byte, []int) {
+	return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *ServiceResponse) GetName() string {
+	if x != nil {
+		return x.Name
+	}
+	return ""
+}
+
+// The error code and error message sent by the server when an error occurs.
+type ErrorResponse struct {
+	state protoimpl.MessageState `protogen:"open.v1"`
+	// This field uses the error codes defined in grpc::StatusCode.
+	ErrorCode     int32  `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"`
+	ErrorMessage  string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *ErrorResponse) Reset() {
+	*x = ErrorResponse{}
+	mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ErrorResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErrorResponse) ProtoMessage() {}
+
+func (x *ErrorResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead.
+func (*ErrorResponse) Descriptor() ([]byte, []int) {
+	return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *ErrorResponse) GetErrorCode() int32 {
+	if x != nil {
+		return x.ErrorCode
+	}
+	return 0
+}
+
+func (x *ErrorResponse) GetErrorMessage() string {
+	if x != nil {
+		return x.ErrorMessage
+	}
+	return ""
+}
+
+var File_grpc_reflection_v1_reflection_proto protoreflect.FileDescriptor
+
+const file_grpc_reflection_v1_reflection_proto_rawDesc = "" +
+	"\n" +
+	"#grpc/reflection/v1/reflection.proto\x12\x12grpc.reflection.v1\"\xf3\x02\n" +
+	"\x17ServerReflectionRequest\x12\x12\n" +
+	"\x04host\x18\x01 \x01(\tR\x04host\x12*\n" +
+	"\x10file_by_filename\x18\x03 \x01(\tH\x00R\x0efileByFilename\x126\n" +
+	"\x16file_containing_symbol\x18\x04 \x01(\tH\x00R\x14fileContainingSymbol\x12b\n" +
+	"\x19file_containing_extension\x18\x05 \x01(\v2$.grpc.reflection.v1.ExtensionRequestH\x00R\x17fileContainingExtension\x12B\n" +
+	"\x1dall_extension_numbers_of_type\x18\x06 \x01(\tH\x00R\x19allExtensionNumbersOfType\x12%\n" +
+	"\rlist_services\x18\a \x01(\tH\x00R\flistServicesB\x11\n" +
+	"\x0fmessage_request\"f\n" +
+	"\x10ExtensionRequest\x12'\n" +
+	"\x0fcontaining_type\x18\x01 \x01(\tR\x0econtainingType\x12)\n" +
+	"\x10extension_number\x18\x02 \x01(\x05R\x0fextensionNumber\"\xae\x04\n" +
+	"\x18ServerReflectionResponse\x12\x1d\n" +
+	"\n" +
+	"valid_host\x18\x01 \x01(\tR\tvalidHost\x12V\n" +
+	"\x10original_request\x18\x02 \x01(\v2+.grpc.reflection.v1.ServerReflectionRequestR\x0foriginalRequest\x12f\n" +
+	"\x18file_descriptor_response\x18\x04 \x01(\v2*.grpc.reflection.v1.FileDescriptorResponseH\x00R\x16fileDescriptorResponse\x12r\n" +
+	"\x1eall_extension_numbers_response\x18\x05 \x01(\v2+.grpc.reflection.v1.ExtensionNumberResponseH\x00R\x1ballExtensionNumbersResponse\x12_\n" +
+	"\x16list_services_response\x18\x06 \x01(\v2'.grpc.reflection.v1.ListServiceResponseH\x00R\x14listServicesResponse\x12J\n" +
+	"\x0eerror_response\x18\a \x01(\v2!.grpc.reflection.v1.ErrorResponseH\x00R\rerrorResponseB\x12\n" +
+	"\x10message_response\"L\n" +
+	"\x16FileDescriptorResponse\x122\n" +
+	"\x15file_descriptor_proto\x18\x01 \x03(\fR\x13fileDescriptorProto\"j\n" +
+	"\x17ExtensionNumberResponse\x12$\n" +
+	"\x0ebase_type_name\x18\x01 \x01(\tR\fbaseTypeName\x12)\n" +
+	"\x10extension_number\x18\x02 \x03(\x05R\x0fextensionNumber\"T\n" +
+	"\x13ListServiceResponse\x12=\n" +
+	"\aservice\x18\x01 \x03(\v2#.grpc.reflection.v1.ServiceResponseR\aservice\"%\n" +
+	"\x0fServiceResponse\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\"S\n" +
+	"\rErrorResponse\x12\x1d\n" +
+	"\n" +
+	"error_code\x18\x01 \x01(\x05R\terrorCode\x12#\n" +
+	"\rerror_message\x18\x02 \x01(\tR\ferrorMessage2\x89\x01\n" +
+	"\x10ServerReflection\x12u\n" +
+	"\x14ServerReflectionInfo\x12+.grpc.reflection.v1.ServerReflectionRequest\x1a,.grpc.reflection.v1.ServerReflectionResponse(\x010\x01Bf\n" +
+	"\x15io.grpc.reflection.v1B\x15ServerReflectionProtoP\x01Z4google.golang.org/grpc/reflection/grpc_reflection_v1b\x06proto3"
+
+var (
+	file_grpc_reflection_v1_reflection_proto_rawDescOnce sync.Once
+	file_grpc_reflection_v1_reflection_proto_rawDescData []byte
+)
+
+func file_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte {
+	file_grpc_reflection_v1_reflection_proto_rawDescOnce.Do(func() {
+		file_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_reflection_v1_reflection_proto_rawDesc), len(file_grpc_reflection_v1_reflection_proto_rawDesc)))
+	})
+	return file_grpc_reflection_v1_reflection_proto_rawDescData
+}
+
+var file_grpc_reflection_v1_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_grpc_reflection_v1_reflection_proto_goTypes = []any{
+	(*ServerReflectionRequest)(nil),  // 0: grpc.reflection.v1.ServerReflectionRequest
+	(*ExtensionRequest)(nil),         // 1: grpc.reflection.v1.ExtensionRequest
+	(*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1.ServerReflectionResponse
+	(*FileDescriptorResponse)(nil),   // 3: grpc.reflection.v1.FileDescriptorResponse
+	(*ExtensionNumberResponse)(nil),  // 4: grpc.reflection.v1.ExtensionNumberResponse
+	(*ListServiceResponse)(nil),      // 5: grpc.reflection.v1.ListServiceResponse
+	(*ServiceResponse)(nil),          // 6: grpc.reflection.v1.ServiceResponse
+	(*ErrorResponse)(nil),            // 7: grpc.reflection.v1.ErrorResponse
+}
+var file_grpc_reflection_v1_reflection_proto_depIdxs = []int32{
+	1, // 0: grpc.reflection.v1.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1.ExtensionRequest
+	0, // 1: grpc.reflection.v1.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1.ServerReflectionRequest
+	3, // 2: grpc.reflection.v1.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1.FileDescriptorResponse
+	4, // 3: grpc.reflection.v1.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1.ExtensionNumberResponse
+	5, // 4: grpc.reflection.v1.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1.ListServiceResponse
+	7, // 5: grpc.reflection.v1.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1.ErrorResponse
+	6, // 6: grpc.reflection.v1.ListServiceResponse.service:type_name -> grpc.reflection.v1.ServiceResponse
+	0, // 7: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1.ServerReflectionRequest
+	2, // 8: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1.ServerReflectionResponse
+	8, // [8:9] is the sub-list for method output_type
+	7, // [7:8] is the sub-list for method input_type
+	7, // [7:7] is the sub-list for extension type_name
+	7, // [7:7] is the sub-list for extension extendee
+	0, // [0:7] is the sub-list for field type_name
+}
+
+func init() { file_grpc_reflection_v1_reflection_proto_init() }
+func file_grpc_reflection_v1_reflection_proto_init() {
+	if File_grpc_reflection_v1_reflection_proto != nil {
+		return
+	}
+	file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []any{
+		(*ServerReflectionRequest_FileByFilename)(nil),
+		(*ServerReflectionRequest_FileContainingSymbol)(nil),
+		(*ServerReflectionRequest_FileContainingExtension)(nil),
+		(*ServerReflectionRequest_AllExtensionNumbersOfType)(nil),
+		(*ServerReflectionRequest_ListServices)(nil),
+	}
+	file_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []any{
+		(*ServerReflectionResponse_FileDescriptorResponse)(nil),
+		(*ServerReflectionResponse_AllExtensionNumbersResponse)(nil),
+		(*ServerReflectionResponse_ListServicesResponse)(nil),
+		(*ServerReflectionResponse_ErrorResponse)(nil),
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_reflection_v1_reflection_proto_rawDesc), len(file_grpc_reflection_v1_reflection_proto_rawDesc)),
+			NumEnums:      0,
+			NumMessages:   8,
+			NumExtensions: 0,
+			NumServices:   1,
+		},
+		GoTypes:           file_grpc_reflection_v1_reflection_proto_goTypes,
+		DependencyIndexes: file_grpc_reflection_v1_reflection_proto_depIdxs,
+		MessageInfos:      file_grpc_reflection_v1_reflection_proto_msgTypes,
+	}.Build()
+	File_grpc_reflection_v1_reflection_proto = out.File
+	file_grpc_reflection_v1_reflection_proto_goTypes = nil
+	file_grpc_reflection_v1_reflection_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go
new file mode 100644
index 0000000..f4a361c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go
@@ -0,0 +1,138 @@
+// Copyright 2016 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Service exported by server reflection.  A more complete description of how
+// server reflection works can be found at
+// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md
+//
+// The canonical version of this proto can be found at
+// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto
+
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.5.1
+// - protoc             v5.27.1
+// source: grpc/reflection/v1/reflection.proto
+
+package grpc_reflection_v1
+
+import (
+	context "context"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
+
+const (
+	ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo"
+)
+
+// ServerReflectionClient is the client API for ServerReflection service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type ServerReflectionClient interface {
+	// The reflection service is structured as a bidirectional stream, ensuring
+	// all related requests go to a single server.
+	ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse], error)
+}
+
+type serverReflectionClient struct {
+	cc grpc.ClientConnInterface
+}
+
+func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient {
+	return &serverReflectionClient{cc}
+}
+
+func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse], error) {
+	cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+	stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, cOpts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &grpc.GenericClientStream[ServerReflectionRequest, ServerReflectionResponse]{ClientStream: stream}
+	return x, nil
+}
+
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type ServerReflection_ServerReflectionInfoClient = grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse]
+
+// ServerReflectionServer is the server API for ServerReflection service.
+// All implementations should embed UnimplementedServerReflectionServer
+// for forward compatibility.
+type ServerReflectionServer interface {
+	// The reflection service is structured as a bidirectional stream, ensuring
+	// all related requests go to a single server.
+	ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error
+}
+
+// UnimplementedServerReflectionServer should be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedServerReflectionServer struct{}
+
+func (UnimplementedServerReflectionServer) ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error {
+	return status.Error(codes.Unimplemented, "method ServerReflectionInfo not implemented")
+}
+func (UnimplementedServerReflectionServer) testEmbeddedByValue() {}
+
+// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to ServerReflectionServer will
+// result in compilation errors.
+type UnsafeServerReflectionServer interface {
+	mustEmbedUnimplementedServerReflectionServer()
+}
+
+func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) {
+	// If the following call panics, it indicates UnimplementedServerReflectionServer was
+	// embedded by pointer and is nil.  This will cause panics if an
+	// unimplemented method is ever invoked, so we test this at initialization
+	// time to prevent it from happening at runtime later due to I/O.
+	if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+		t.testEmbeddedByValue()
+	}
+	s.RegisterService(&ServerReflection_ServiceDesc, srv)
+}
+
+func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(ServerReflectionServer).ServerReflectionInfo(&grpc.GenericServerStream[ServerReflectionRequest, ServerReflectionResponse]{ServerStream: stream})
+}
+
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type ServerReflection_ServerReflectionInfoServer = grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]
+
+// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var ServerReflection_ServiceDesc = grpc.ServiceDesc{
+	ServiceName: "grpc.reflection.v1.ServerReflection",
+	HandlerType: (*ServerReflectionServer)(nil),
+	Methods:     []grpc.MethodDesc{},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "ServerReflectionInfo",
+			Handler:       _ServerReflection_ServerReflectionInfo_Handler,
+			ServerStreams: true,
+			ClientStreams: true,
+		},
+	},
+	Metadata: "grpc/reflection/v1/reflection.proto",
+}
diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go
index 0a12ad2..5253e86 100644
--- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go
+++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go
@@ -1,76 +1,158 @@
+// Copyright 2016 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// Service exported by server reflection
+
+// Warning: this entire file is deprecated. Use this instead:
+// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto
+
 // Code generated by protoc-gen-go. DO NOT EDIT.
-// source: grpc_reflection_v1alpha/reflection.proto
+// versions:
+// 	protoc-gen-go v1.36.6
+// 	protoc        v5.27.1
+// grpc/reflection/v1alpha/reflection.proto is a deprecated file.
 
 package grpc_reflection_v1alpha
 
 import (
-	context "context"
-	fmt "fmt"
-	proto "github.com/golang/protobuf/proto"
-	grpc "google.golang.org/grpc"
-	codes "google.golang.org/grpc/codes"
-	status "google.golang.org/grpc/status"
-	math "math"
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	reflect "reflect"
+	sync "sync"
+	unsafe "unsafe"
 )
 
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+const (
+	// Verify that this generated code is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+	// Verify that runtime/protoimpl is sufficiently up-to-date.
+	_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
 
 // The message sent by the client when calling ServerReflectionInfo method.
+//
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
 type ServerReflectionRequest struct {
+	state protoimpl.MessageState `protogen:"open.v1"`
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
 	Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
 	// To use reflection service, the client should set one of the following
 	// fields in message_request. The server distinguishes requests by their
 	// defined field and then handles them using corresponding methods.
 	//
 	// Types that are valid to be assigned to MessageRequest:
+	//
 	//	*ServerReflectionRequest_FileByFilename
 	//	*ServerReflectionRequest_FileContainingSymbol
 	//	*ServerReflectionRequest_FileContainingExtension
 	//	*ServerReflectionRequest_AllExtensionNumbersOfType
 	//	*ServerReflectionRequest_ListServices
-	MessageRequest       isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"`
-	XXX_NoUnkeyedLiteral struct{}                                 `json:"-"`
-	XXX_unrecognized     []byte                                   `json:"-"`
-	XXX_sizecache        int32                                    `json:"-"`
+	MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"`
+	unknownFields  protoimpl.UnknownFields
+	sizeCache      protoimpl.SizeCache
 }
 
-func (m *ServerReflectionRequest) Reset()         { *m = ServerReflectionRequest{} }
-func (m *ServerReflectionRequest) String() string { return proto.CompactTextString(m) }
-func (*ServerReflectionRequest) ProtoMessage()    {}
+func (x *ServerReflectionRequest) Reset() {
+	*x = ServerReflectionRequest{}
+	mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ServerReflectionRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServerReflectionRequest) ProtoMessage() {}
+
+func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead.
 func (*ServerReflectionRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_42a8ac412db3cb03, []int{0}
+	return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0}
 }
 
-func (m *ServerReflectionRequest) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ServerReflectionRequest.Unmarshal(m, b)
-}
-func (m *ServerReflectionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ServerReflectionRequest.Marshal(b, m, deterministic)
-}
-func (m *ServerReflectionRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ServerReflectionRequest.Merge(m, src)
-}
-func (m *ServerReflectionRequest) XXX_Size() int {
-	return xxx_messageInfo_ServerReflectionRequest.Size(m)
-}
-func (m *ServerReflectionRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_ServerReflectionRequest.DiscardUnknown(m)
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionRequest) GetHost() string {
+	if x != nil {
+		return x.Host
+	}
+	return ""
 }
 
-var xxx_messageInfo_ServerReflectionRequest proto.InternalMessageInfo
+func (x *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest {
+	if x != nil {
+		return x.MessageRequest
+	}
+	return nil
+}
 
-func (m *ServerReflectionRequest) GetHost() string {
-	if m != nil {
-		return m.Host
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionRequest) GetFileByFilename() string {
+	if x != nil {
+		if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileByFilename); ok {
+			return x.FileByFilename
+		}
+	}
+	return ""
+}
+
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionRequest) GetFileContainingSymbol() string {
+	if x != nil {
+		if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileContainingSymbol); ok {
+			return x.FileContainingSymbol
+		}
+	}
+	return ""
+}
+
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest {
+	if x != nil {
+		if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileContainingExtension); ok {
+			return x.FileContainingExtension
+		}
+	}
+	return nil
+}
+
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string {
+	if x != nil {
+		if x, ok := x.MessageRequest.(*ServerReflectionRequest_AllExtensionNumbersOfType); ok {
+			return x.AllExtensionNumbersOfType
+		}
+	}
+	return ""
+}
+
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionRequest) GetListServices() string {
+	if x != nil {
+		if x, ok := x.MessageRequest.(*ServerReflectionRequest_ListServices); ok {
+			return x.ListServices
+		}
 	}
 	return ""
 }
@@ -80,22 +162,48 @@
 }
 
 type ServerReflectionRequest_FileByFilename struct {
+	// Find a proto file by the file name.
+	//
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
 	FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"`
 }
 
 type ServerReflectionRequest_FileContainingSymbol struct {
+	// Find the proto file that declares the given fully-qualified symbol name.
+	// This field should be a fully-qualified symbol name
+	// (e.g. <package>.<service>[.<method>] or <package>.<type>).
+	//
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
 	FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"`
 }
 
 type ServerReflectionRequest_FileContainingExtension struct {
+	// Find the proto file which defines an extension extending the given
+	// message type with the given field number.
+	//
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
 	FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"`
 }
 
 type ServerReflectionRequest_AllExtensionNumbersOfType struct {
+	// Finds the tag numbers used by all known extensions of extendee_type, and
+	// appends them to ExtensionNumberResponse in an undefined order.
+	// Its corresponding method is best-effort: it's not guaranteed that the
+	// reflection service will implement this method, and it's not guaranteed
+	// that this method will provide all extensions. Returns
+	// StatusCode::UNIMPLEMENTED if it's not implemented.
+	// This field should be a fully-qualified type name. The format is
+	// <package>.<type>
+	//
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
 	AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"`
 }
 
 type ServerReflectionRequest_ListServices struct {
+	// List the full names of registered services. The content will not be
+	// checked.
+	//
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
 	ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"`
 }
 
@@ -105,166 +213,185 @@
 
 func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {}
 
-func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() {}
+func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() {
+}
 
 func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {}
 
-func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest {
-	if m != nil {
-		return m.MessageRequest
-	}
-	return nil
-}
-
-func (m *ServerReflectionRequest) GetFileByFilename() string {
-	if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok {
-		return x.FileByFilename
-	}
-	return ""
-}
-
-func (m *ServerReflectionRequest) GetFileContainingSymbol() string {
-	if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok {
-		return x.FileContainingSymbol
-	}
-	return ""
-}
-
-func (m *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest {
-	if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok {
-		return x.FileContainingExtension
-	}
-	return nil
-}
-
-func (m *ServerReflectionRequest) GetAllExtensionNumbersOfType() string {
-	if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok {
-		return x.AllExtensionNumbersOfType
-	}
-	return ""
-}
-
-func (m *ServerReflectionRequest) GetListServices() string {
-	if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok {
-		return x.ListServices
-	}
-	return ""
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*ServerReflectionRequest) XXX_OneofWrappers() []interface{} {
-	return []interface{}{
-		(*ServerReflectionRequest_FileByFilename)(nil),
-		(*ServerReflectionRequest_FileContainingSymbol)(nil),
-		(*ServerReflectionRequest_FileContainingExtension)(nil),
-		(*ServerReflectionRequest_AllExtensionNumbersOfType)(nil),
-		(*ServerReflectionRequest_ListServices)(nil),
-	}
-}
-
 // The type name and extension number sent by the client when requesting
 // file_containing_extension.
+//
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
 type ExtensionRequest struct {
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Fully-qualified type name. The format should be <package>.<type>
-	ContainingType       string   `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"`
-	ExtensionNumber      int32    `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	//
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+	ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"`
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+	ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"`
+	unknownFields   protoimpl.UnknownFields
+	sizeCache       protoimpl.SizeCache
 }
 
-func (m *ExtensionRequest) Reset()         { *m = ExtensionRequest{} }
-func (m *ExtensionRequest) String() string { return proto.CompactTextString(m) }
-func (*ExtensionRequest) ProtoMessage()    {}
+func (x *ExtensionRequest) Reset() {
+	*x = ExtensionRequest{}
+	mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ExtensionRequest) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExtensionRequest) ProtoMessage() {}
+
+func (x *ExtensionRequest) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead.
 func (*ExtensionRequest) Descriptor() ([]byte, []int) {
-	return fileDescriptor_42a8ac412db3cb03, []int{1}
+	return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1}
 }
 
-func (m *ExtensionRequest) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ExtensionRequest.Unmarshal(m, b)
-}
-func (m *ExtensionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ExtensionRequest.Marshal(b, m, deterministic)
-}
-func (m *ExtensionRequest) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ExtensionRequest.Merge(m, src)
-}
-func (m *ExtensionRequest) XXX_Size() int {
-	return xxx_messageInfo_ExtensionRequest.Size(m)
-}
-func (m *ExtensionRequest) XXX_DiscardUnknown() {
-	xxx_messageInfo_ExtensionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExtensionRequest proto.InternalMessageInfo
-
-func (m *ExtensionRequest) GetContainingType() string {
-	if m != nil {
-		return m.ContainingType
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ExtensionRequest) GetContainingType() string {
+	if x != nil {
+		return x.ContainingType
 	}
 	return ""
 }
 
-func (m *ExtensionRequest) GetExtensionNumber() int32 {
-	if m != nil {
-		return m.ExtensionNumber
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ExtensionRequest) GetExtensionNumber() int32 {
+	if x != nil {
+		return x.ExtensionNumber
 	}
 	return 0
 }
 
 // The message sent by the server to answer ServerReflectionInfo method.
+//
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
 type ServerReflectionResponse struct {
-	ValidHost       string                   `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"`
+	state protoimpl.MessageState `protogen:"open.v1"`
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+	ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"`
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
 	OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"`
-	// The server sets one of the following fields according to the
-	// message_request in the request.
+	// The server set one of the following fields according to the message_request
+	// in the request.
 	//
 	// Types that are valid to be assigned to MessageResponse:
+	//
 	//	*ServerReflectionResponse_FileDescriptorResponse
 	//	*ServerReflectionResponse_AllExtensionNumbersResponse
 	//	*ServerReflectionResponse_ListServicesResponse
 	//	*ServerReflectionResponse_ErrorResponse
-	MessageResponse      isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"`
-	XXX_NoUnkeyedLiteral struct{}                                   `json:"-"`
-	XXX_unrecognized     []byte                                     `json:"-"`
-	XXX_sizecache        int32                                      `json:"-"`
+	MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"`
+	unknownFields   protoimpl.UnknownFields
+	sizeCache       protoimpl.SizeCache
 }
 
-func (m *ServerReflectionResponse) Reset()         { *m = ServerReflectionResponse{} }
-func (m *ServerReflectionResponse) String() string { return proto.CompactTextString(m) }
-func (*ServerReflectionResponse) ProtoMessage()    {}
+func (x *ServerReflectionResponse) Reset() {
+	*x = ServerReflectionResponse{}
+	mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ServerReflectionResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServerReflectionResponse) ProtoMessage() {}
+
+func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead.
 func (*ServerReflectionResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_42a8ac412db3cb03, []int{2}
+	return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2}
 }
 
-func (m *ServerReflectionResponse) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ServerReflectionResponse.Unmarshal(m, b)
-}
-func (m *ServerReflectionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ServerReflectionResponse.Marshal(b, m, deterministic)
-}
-func (m *ServerReflectionResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ServerReflectionResponse.Merge(m, src)
-}
-func (m *ServerReflectionResponse) XXX_Size() int {
-	return xxx_messageInfo_ServerReflectionResponse.Size(m)
-}
-func (m *ServerReflectionResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_ServerReflectionResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServerReflectionResponse proto.InternalMessageInfo
-
-func (m *ServerReflectionResponse) GetValidHost() string {
-	if m != nil {
-		return m.ValidHost
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionResponse) GetValidHost() string {
+	if x != nil {
+		return x.ValidHost
 	}
 	return ""
 }
 
-func (m *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest {
-	if m != nil {
-		return m.OriginalRequest
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest {
+	if x != nil {
+		return x.OriginalRequest
+	}
+	return nil
+}
+
+func (x *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse {
+	if x != nil {
+		return x.MessageResponse
+	}
+	return nil
+}
+
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse {
+	if x != nil {
+		if x, ok := x.MessageResponse.(*ServerReflectionResponse_FileDescriptorResponse); ok {
+			return x.FileDescriptorResponse
+		}
+	}
+	return nil
+}
+
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse {
+	if x != nil {
+		if x, ok := x.MessageResponse.(*ServerReflectionResponse_AllExtensionNumbersResponse); ok {
+			return x.AllExtensionNumbersResponse
+		}
+	}
+	return nil
+}
+
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse {
+	if x != nil {
+		if x, ok := x.MessageResponse.(*ServerReflectionResponse_ListServicesResponse); ok {
+			return x.ListServicesResponse
+		}
+	}
+	return nil
+}
+
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse {
+	if x != nil {
+		if x, ok := x.MessageResponse.(*ServerReflectionResponse_ErrorResponse); ok {
+			return x.ErrorResponse
+		}
 	}
 	return nil
 }
@@ -274,22 +401,40 @@
 }
 
 type ServerReflectionResponse_FileDescriptorResponse struct {
+	// This message is used to answer file_by_filename, file_containing_symbol,
+	// file_containing_extension requests with transitive dependencies. As
+	// the repeated label is not allowed in oneof fields, we use a
+	// FileDescriptorResponse message to encapsulate the repeated fields.
+	// The reflection service is allowed to avoid sending FileDescriptorProtos
+	// that were previously sent in response to earlier requests in the stream.
+	//
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
 	FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"`
 }
 
 type ServerReflectionResponse_AllExtensionNumbersResponse struct {
+	// This message is used to answer all_extension_numbers_of_type request.
+	//
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
 	AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"`
 }
 
 type ServerReflectionResponse_ListServicesResponse struct {
+	// This message is used to answer list_services request.
+	//
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
 	ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"`
 }
 
 type ServerReflectionResponse_ErrorResponse struct {
+	// This message is used when an error occurs.
+	//
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
 	ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"`
 }
 
-func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() {}
+func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() {
+}
 
 func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() {
 }
@@ -298,453 +443,405 @@
 
 func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {}
 
-func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse {
-	if m != nil {
-		return m.MessageResponse
-	}
-	return nil
-}
-
-func (m *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse {
-	if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok {
-		return x.FileDescriptorResponse
-	}
-	return nil
-}
-
-func (m *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse {
-	if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok {
-		return x.AllExtensionNumbersResponse
-	}
-	return nil
-}
-
-func (m *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse {
-	if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok {
-		return x.ListServicesResponse
-	}
-	return nil
-}
-
-func (m *ServerReflectionResponse) GetErrorResponse() *ErrorResponse {
-	if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok {
-		return x.ErrorResponse
-	}
-	return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*ServerReflectionResponse) XXX_OneofWrappers() []interface{} {
-	return []interface{}{
-		(*ServerReflectionResponse_FileDescriptorResponse)(nil),
-		(*ServerReflectionResponse_AllExtensionNumbersResponse)(nil),
-		(*ServerReflectionResponse_ListServicesResponse)(nil),
-		(*ServerReflectionResponse_ErrorResponse)(nil),
-	}
-}
-
 // Serialized FileDescriptorProto messages sent by the server answering
 // a file_by_filename, file_containing_symbol, or file_containing_extension
 // request.
+//
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
 type FileDescriptorResponse struct {
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Serialized FileDescriptorProto messages. We avoid taking a dependency on
 	// descriptor.proto, which uses proto2 only features, by making them opaque
 	// bytes instead.
-	FileDescriptorProto  [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	//
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+	FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"`
+	unknownFields       protoimpl.UnknownFields
+	sizeCache           protoimpl.SizeCache
 }
 
-func (m *FileDescriptorResponse) Reset()         { *m = FileDescriptorResponse{} }
-func (m *FileDescriptorResponse) String() string { return proto.CompactTextString(m) }
-func (*FileDescriptorResponse) ProtoMessage()    {}
+func (x *FileDescriptorResponse) Reset() {
+	*x = FileDescriptorResponse{}
+	mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *FileDescriptorResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FileDescriptorResponse) ProtoMessage() {}
+
+func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead.
 func (*FileDescriptorResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_42a8ac412db3cb03, []int{3}
+	return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3}
 }
 
-func (m *FileDescriptorResponse) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_FileDescriptorResponse.Unmarshal(m, b)
-}
-func (m *FileDescriptorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_FileDescriptorResponse.Marshal(b, m, deterministic)
-}
-func (m *FileDescriptorResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_FileDescriptorResponse.Merge(m, src)
-}
-func (m *FileDescriptorResponse) XXX_Size() int {
-	return xxx_messageInfo_FileDescriptorResponse.Size(m)
-}
-func (m *FileDescriptorResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_FileDescriptorResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FileDescriptorResponse proto.InternalMessageInfo
-
-func (m *FileDescriptorResponse) GetFileDescriptorProto() [][]byte {
-	if m != nil {
-		return m.FileDescriptorProto
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte {
+	if x != nil {
+		return x.FileDescriptorProto
 	}
 	return nil
 }
 
 // A list of extension numbers sent by the server answering
 // all_extension_numbers_of_type request.
+//
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
 type ExtensionNumberResponse struct {
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Full name of the base type, including the package name. The format
 	// is <package>.<type>
-	BaseTypeName         string   `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"`
-	ExtensionNumber      []int32  `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	//
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+	BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"`
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+	ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"`
+	unknownFields   protoimpl.UnknownFields
+	sizeCache       protoimpl.SizeCache
 }
 
-func (m *ExtensionNumberResponse) Reset()         { *m = ExtensionNumberResponse{} }
-func (m *ExtensionNumberResponse) String() string { return proto.CompactTextString(m) }
-func (*ExtensionNumberResponse) ProtoMessage()    {}
+func (x *ExtensionNumberResponse) Reset() {
+	*x = ExtensionNumberResponse{}
+	mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ExtensionNumberResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExtensionNumberResponse) ProtoMessage() {}
+
+func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead.
 func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_42a8ac412db3cb03, []int{4}
+	return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4}
 }
 
-func (m *ExtensionNumberResponse) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ExtensionNumberResponse.Unmarshal(m, b)
-}
-func (m *ExtensionNumberResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ExtensionNumberResponse.Marshal(b, m, deterministic)
-}
-func (m *ExtensionNumberResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ExtensionNumberResponse.Merge(m, src)
-}
-func (m *ExtensionNumberResponse) XXX_Size() int {
-	return xxx_messageInfo_ExtensionNumberResponse.Size(m)
-}
-func (m *ExtensionNumberResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_ExtensionNumberResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExtensionNumberResponse proto.InternalMessageInfo
-
-func (m *ExtensionNumberResponse) GetBaseTypeName() string {
-	if m != nil {
-		return m.BaseTypeName
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ExtensionNumberResponse) GetBaseTypeName() string {
+	if x != nil {
+		return x.BaseTypeName
 	}
 	return ""
 }
 
-func (m *ExtensionNumberResponse) GetExtensionNumber() []int32 {
-	if m != nil {
-		return m.ExtensionNumber
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 {
+	if x != nil {
+		return x.ExtensionNumber
 	}
 	return nil
 }
 
 // A list of ServiceResponse sent by the server answering list_services request.
+//
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
 type ListServiceResponse struct {
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// The information of each service may be expanded in the future, so we use
 	// ServiceResponse message to encapsulate it.
-	Service              []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"`
-	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
-	XXX_unrecognized     []byte             `json:"-"`
-	XXX_sizecache        int32              `json:"-"`
+	//
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+	Service       []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
-func (m *ListServiceResponse) Reset()         { *m = ListServiceResponse{} }
-func (m *ListServiceResponse) String() string { return proto.CompactTextString(m) }
-func (*ListServiceResponse) ProtoMessage()    {}
+func (x *ListServiceResponse) Reset() {
+	*x = ListServiceResponse{}
+	mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ListServiceResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListServiceResponse) ProtoMessage() {}
+
+func (x *ListServiceResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead.
 func (*ListServiceResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_42a8ac412db3cb03, []int{5}
+	return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5}
 }
 
-func (m *ListServiceResponse) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ListServiceResponse.Unmarshal(m, b)
-}
-func (m *ListServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ListServiceResponse.Marshal(b, m, deterministic)
-}
-func (m *ListServiceResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ListServiceResponse.Merge(m, src)
-}
-func (m *ListServiceResponse) XXX_Size() int {
-	return xxx_messageInfo_ListServiceResponse.Size(m)
-}
-func (m *ListServiceResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_ListServiceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ListServiceResponse proto.InternalMessageInfo
-
-func (m *ListServiceResponse) GetService() []*ServiceResponse {
-	if m != nil {
-		return m.Service
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ListServiceResponse) GetService() []*ServiceResponse {
+	if x != nil {
+		return x.Service
 	}
 	return nil
 }
 
 // The information of a single service used by ListServiceResponse to answer
 // list_services request.
+//
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
 type ServiceResponse struct {
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// Full name of a registered service, including its package name. The format
 	// is <package>.<service>
-	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	//
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+	Name          string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
-func (m *ServiceResponse) Reset()         { *m = ServiceResponse{} }
-func (m *ServiceResponse) String() string { return proto.CompactTextString(m) }
-func (*ServiceResponse) ProtoMessage()    {}
+func (x *ServiceResponse) Reset() {
+	*x = ServiceResponse{}
+	mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ServiceResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServiceResponse) ProtoMessage() {}
+
+func (x *ServiceResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead.
 func (*ServiceResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_42a8ac412db3cb03, []int{6}
+	return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6}
 }
 
-func (m *ServiceResponse) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ServiceResponse.Unmarshal(m, b)
-}
-func (m *ServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ServiceResponse.Marshal(b, m, deterministic)
-}
-func (m *ServiceResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ServiceResponse.Merge(m, src)
-}
-func (m *ServiceResponse) XXX_Size() int {
-	return xxx_messageInfo_ServiceResponse.Size(m)
-}
-func (m *ServiceResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_ServiceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceResponse proto.InternalMessageInfo
-
-func (m *ServiceResponse) GetName() string {
-	if m != nil {
-		return m.Name
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServiceResponse) GetName() string {
+	if x != nil {
+		return x.Name
 	}
 	return ""
 }
 
 // The error code and error message sent by the server when an error occurs.
+//
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
 type ErrorResponse struct {
+	state protoimpl.MessageState `protogen:"open.v1"`
 	// This field uses the error codes defined in grpc::StatusCode.
-	ErrorCode            int32    `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"`
-	ErrorMessage         string   `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
-	XXX_NoUnkeyedLiteral struct{} `json:"-"`
-	XXX_unrecognized     []byte   `json:"-"`
-	XXX_sizecache        int32    `json:"-"`
+	//
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+	ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"`
+	// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+	ErrorMessage  string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
 }
 
-func (m *ErrorResponse) Reset()         { *m = ErrorResponse{} }
-func (m *ErrorResponse) String() string { return proto.CompactTextString(m) }
-func (*ErrorResponse) ProtoMessage()    {}
+func (x *ErrorResponse) Reset() {
+	*x = ErrorResponse{}
+	mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *ErrorResponse) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErrorResponse) ProtoMessage() {}
+
+func (x *ErrorResponse) ProtoReflect() protoreflect.Message {
+	mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead.
 func (*ErrorResponse) Descriptor() ([]byte, []int) {
-	return fileDescriptor_42a8ac412db3cb03, []int{7}
+	return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7}
 }
 
-func (m *ErrorResponse) XXX_Unmarshal(b []byte) error {
-	return xxx_messageInfo_ErrorResponse.Unmarshal(m, b)
-}
-func (m *ErrorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
-	return xxx_messageInfo_ErrorResponse.Marshal(b, m, deterministic)
-}
-func (m *ErrorResponse) XXX_Merge(src proto.Message) {
-	xxx_messageInfo_ErrorResponse.Merge(m, src)
-}
-func (m *ErrorResponse) XXX_Size() int {
-	return xxx_messageInfo_ErrorResponse.Size(m)
-}
-func (m *ErrorResponse) XXX_DiscardUnknown() {
-	xxx_messageInfo_ErrorResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ErrorResponse proto.InternalMessageInfo
-
-func (m *ErrorResponse) GetErrorCode() int32 {
-	if m != nil {
-		return m.ErrorCode
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ErrorResponse) GetErrorCode() int32 {
+	if x != nil {
+		return x.ErrorCode
 	}
 	return 0
 }
 
-func (m *ErrorResponse) GetErrorMessage() string {
-	if m != nil {
-		return m.ErrorMessage
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ErrorResponse) GetErrorMessage() string {
+	if x != nil {
+		return x.ErrorMessage
 	}
 	return ""
 }
 
-func init() {
-	proto.RegisterType((*ServerReflectionRequest)(nil), "grpc.reflection.v1alpha.ServerReflectionRequest")
-	proto.RegisterType((*ExtensionRequest)(nil), "grpc.reflection.v1alpha.ExtensionRequest")
-	proto.RegisterType((*ServerReflectionResponse)(nil), "grpc.reflection.v1alpha.ServerReflectionResponse")
-	proto.RegisterType((*FileDescriptorResponse)(nil), "grpc.reflection.v1alpha.FileDescriptorResponse")
-	proto.RegisterType((*ExtensionNumberResponse)(nil), "grpc.reflection.v1alpha.ExtensionNumberResponse")
-	proto.RegisterType((*ListServiceResponse)(nil), "grpc.reflection.v1alpha.ListServiceResponse")
-	proto.RegisterType((*ServiceResponse)(nil), "grpc.reflection.v1alpha.ServiceResponse")
-	proto.RegisterType((*ErrorResponse)(nil), "grpc.reflection.v1alpha.ErrorResponse")
+var File_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor
+
+const file_grpc_reflection_v1alpha_reflection_proto_rawDesc = "" +
+	"\n" +
+	"(grpc/reflection/v1alpha/reflection.proto\x12\x17grpc.reflection.v1alpha\"\xf8\x02\n" +
+	"\x17ServerReflectionRequest\x12\x12\n" +
+	"\x04host\x18\x01 \x01(\tR\x04host\x12*\n" +
+	"\x10file_by_filename\x18\x03 \x01(\tH\x00R\x0efileByFilename\x126\n" +
+	"\x16file_containing_symbol\x18\x04 \x01(\tH\x00R\x14fileContainingSymbol\x12g\n" +
+	"\x19file_containing_extension\x18\x05 \x01(\v2).grpc.reflection.v1alpha.ExtensionRequestH\x00R\x17fileContainingExtension\x12B\n" +
+	"\x1dall_extension_numbers_of_type\x18\x06 \x01(\tH\x00R\x19allExtensionNumbersOfType\x12%\n" +
+	"\rlist_services\x18\a \x01(\tH\x00R\flistServicesB\x11\n" +
+	"\x0fmessage_request\"f\n" +
+	"\x10ExtensionRequest\x12'\n" +
+	"\x0fcontaining_type\x18\x01 \x01(\tR\x0econtainingType\x12)\n" +
+	"\x10extension_number\x18\x02 \x01(\x05R\x0fextensionNumber\"\xc7\x04\n" +
+	"\x18ServerReflectionResponse\x12\x1d\n" +
+	"\n" +
+	"valid_host\x18\x01 \x01(\tR\tvalidHost\x12[\n" +
+	"\x10original_request\x18\x02 \x01(\v20.grpc.reflection.v1alpha.ServerReflectionRequestR\x0foriginalRequest\x12k\n" +
+	"\x18file_descriptor_response\x18\x04 \x01(\v2/.grpc.reflection.v1alpha.FileDescriptorResponseH\x00R\x16fileDescriptorResponse\x12w\n" +
+	"\x1eall_extension_numbers_response\x18\x05 \x01(\v20.grpc.reflection.v1alpha.ExtensionNumberResponseH\x00R\x1ballExtensionNumbersResponse\x12d\n" +
+	"\x16list_services_response\x18\x06 \x01(\v2,.grpc.reflection.v1alpha.ListServiceResponseH\x00R\x14listServicesResponse\x12O\n" +
+	"\x0eerror_response\x18\a \x01(\v2&.grpc.reflection.v1alpha.ErrorResponseH\x00R\rerrorResponseB\x12\n" +
+	"\x10message_response\"L\n" +
+	"\x16FileDescriptorResponse\x122\n" +
+	"\x15file_descriptor_proto\x18\x01 \x03(\fR\x13fileDescriptorProto\"j\n" +
+	"\x17ExtensionNumberResponse\x12$\n" +
+	"\x0ebase_type_name\x18\x01 \x01(\tR\fbaseTypeName\x12)\n" +
+	"\x10extension_number\x18\x02 \x03(\x05R\x0fextensionNumber\"Y\n" +
+	"\x13ListServiceResponse\x12B\n" +
+	"\aservice\x18\x01 \x03(\v2(.grpc.reflection.v1alpha.ServiceResponseR\aservice\"%\n" +
+	"\x0fServiceResponse\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\"S\n" +
+	"\rErrorResponse\x12\x1d\n" +
+	"\n" +
+	"error_code\x18\x01 \x01(\x05R\terrorCode\x12#\n" +
+	"\rerror_message\x18\x02 \x01(\tR\ferrorMessage2\x93\x01\n" +
+	"\x10ServerReflection\x12\x7f\n" +
+	"\x14ServerReflectionInfo\x120.grpc.reflection.v1alpha.ServerReflectionRequest\x1a1.grpc.reflection.v1alpha.ServerReflectionResponse(\x010\x01Bs\n" +
+	"\x1aio.grpc.reflection.v1alphaB\x15ServerReflectionProtoP\x01Z9google.golang.org/grpc/reflection/grpc_reflection_v1alpha\xb8\x01\x01b\x06proto3"
+
+var (
+	file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once
+	file_grpc_reflection_v1alpha_reflection_proto_rawDescData []byte
+)
+
+func file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte {
+	file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() {
+		file_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_reflection_v1alpha_reflection_proto_rawDesc), len(file_grpc_reflection_v1alpha_reflection_proto_rawDesc)))
+	})
+	return file_grpc_reflection_v1alpha_reflection_proto_rawDescData
 }
 
-func init() {
-	proto.RegisterFile("grpc_reflection_v1alpha/reflection.proto", fileDescriptor_42a8ac412db3cb03)
+var file_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_grpc_reflection_v1alpha_reflection_proto_goTypes = []any{
+	(*ServerReflectionRequest)(nil),  // 0: grpc.reflection.v1alpha.ServerReflectionRequest
+	(*ExtensionRequest)(nil),         // 1: grpc.reflection.v1alpha.ExtensionRequest
+	(*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1alpha.ServerReflectionResponse
+	(*FileDescriptorResponse)(nil),   // 3: grpc.reflection.v1alpha.FileDescriptorResponse
+	(*ExtensionNumberResponse)(nil),  // 4: grpc.reflection.v1alpha.ExtensionNumberResponse
+	(*ListServiceResponse)(nil),      // 5: grpc.reflection.v1alpha.ListServiceResponse
+	(*ServiceResponse)(nil),          // 6: grpc.reflection.v1alpha.ServiceResponse
+	(*ErrorResponse)(nil),            // 7: grpc.reflection.v1alpha.ErrorResponse
+}
+var file_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{
+	1, // 0: grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1alpha.ExtensionRequest
+	0, // 1: grpc.reflection.v1alpha.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1alpha.ServerReflectionRequest
+	3, // 2: grpc.reflection.v1alpha.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1alpha.FileDescriptorResponse
+	4, // 3: grpc.reflection.v1alpha.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1alpha.ExtensionNumberResponse
+	5, // 4: grpc.reflection.v1alpha.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1alpha.ListServiceResponse
+	7, // 5: grpc.reflection.v1alpha.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1alpha.ErrorResponse
+	6, // 6: grpc.reflection.v1alpha.ListServiceResponse.service:type_name -> grpc.reflection.v1alpha.ServiceResponse
+	0, // 7: grpc.reflection.v1alpha.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1alpha.ServerReflectionRequest
+	2, // 8: grpc.reflection.v1alpha.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1alpha.ServerReflectionResponse
+	8, // [8:9] is the sub-list for method output_type
+	7, // [7:8] is the sub-list for method input_type
+	7, // [7:7] is the sub-list for extension type_name
+	7, // [7:7] is the sub-list for extension extendee
+	0, // [0:7] is the sub-list for field type_name
 }
 
-var fileDescriptor_42a8ac412db3cb03 = []byte{
-	// 656 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x51, 0x73, 0xd2, 0x40,
-	0x10, 0x6e, 0x5a, 0x68, 0x87, 0x85, 0x02, 0x5e, 0x2b, 0xa4, 0x3a, 0x75, 0x98, 0x68, 0x35, 0x75,
-	0x1c, 0xda, 0xe2, 0x8c, 0x3f, 0x80, 0xaa, 0x83, 0x33, 0xb5, 0x75, 0x0e, 0x5f, 0x1c, 0x1f, 0x6e,
-	0x02, 0x2c, 0x34, 0x1a, 0x72, 0xf1, 0x2e, 0x45, 0x79, 0xf2, 0x47, 0xf8, 0xa3, 0xfc, 0x4b, 0x3e,
-	0x3a, 0x77, 0x09, 0x21, 0xa4, 0x44, 0xa7, 0x4f, 0x30, 0xdf, 0xee, 0xde, 0xb7, 0xbb, 0xdf, 0xb7,
-	0x01, 0x7b, 0x22, 0x82, 0x21, 0x13, 0x38, 0xf6, 0x70, 0x18, 0xba, 0xdc, 0x67, 0xb3, 0x33, 0xc7,
-	0x0b, 0xae, 0x9d, 0x93, 0x25, 0xd4, 0x0e, 0x04, 0x0f, 0x39, 0x69, 0xaa, 0xcc, 0x76, 0x0a, 0x8e,
-	0x33, 0xad, 0x3f, 0x9b, 0xd0, 0xec, 0xa3, 0x98, 0xa1, 0xa0, 0x49, 0x90, 0xe2, 0xb7, 0x1b, 0x94,
-	0x21, 0x21, 0x50, 0xb8, 0xe6, 0x32, 0x34, 0x8d, 0x96, 0x61, 0x97, 0xa8, 0xfe, 0x4f, 0x9e, 0x43,
-	0x7d, 0xec, 0x7a, 0xc8, 0x06, 0x73, 0xa6, 0x7e, 0x7d, 0x67, 0x8a, 0xe6, 0x96, 0x8a, 0xf7, 0x36,
-	0x68, 0x55, 0x21, 0xdd, 0xf9, 0xdb, 0x18, 0x27, 0xaf, 0xa0, 0xa1, 0x73, 0x87, 0xdc, 0x0f, 0x1d,
-	0xd7, 0x77, 0xfd, 0x09, 0x93, 0xf3, 0xe9, 0x80, 0x7b, 0x66, 0x21, 0xae, 0xd8, 0x57, 0xf1, 0xf3,
-	0x24, 0xdc, 0xd7, 0x51, 0x32, 0x81, 0x83, 0x6c, 0x1d, 0xfe, 0x08, 0xd1, 0x97, 0x2e, 0xf7, 0xcd,
-	0x62, 0xcb, 0xb0, 0xcb, 0x9d, 0xe3, 0x76, 0xce, 0x40, 0xed, 0x37, 0x8b, 0xcc, 0x78, 0x8a, 0xde,
-	0x06, 0x6d, 0xae, 0xb2, 0x24, 0x19, 0xa4, 0x0b, 0x87, 0x8e, 0xe7, 0x2d, 0x1f, 0x67, 0xfe, 0xcd,
-	0x74, 0x80, 0x42, 0x32, 0x3e, 0x66, 0xe1, 0x3c, 0x40, 0x73, 0x3b, 0xee, 0xf3, 0xc0, 0xf1, 0xbc,
-	0xa4, 0xec, 0x32, 0x4a, 0xba, 0x1a, 0x7f, 0x9c, 0x07, 0x48, 0x8e, 0x60, 0xd7, 0x73, 0x65, 0xc8,
-	0x24, 0x8a, 0x99, 0x3b, 0x44, 0x69, 0xee, 0xc4, 0x35, 0x15, 0x05, 0xf7, 0x63, 0xb4, 0x7b, 0x0f,
-	0x6a, 0x53, 0x94, 0xd2, 0x99, 0x20, 0x13, 0x51, 0x63, 0xd6, 0x18, 0xea, 0xd9, 0x66, 0xc9, 0x33,
-	0xa8, 0xa5, 0xa6, 0xd6, 0x3d, 0x44, 0xdb, 0xaf, 0x2e, 0x61, 0x4d, 0x7b, 0x0c, 0xf5, 0x6c, 0xdb,
-	0xe6, 0x66, 0xcb, 0xb0, 0x8b, 0xb4, 0x86, 0xab, 0x8d, 0x5a, 0xbf, 0x0b, 0x60, 0xde, 0x96, 0x58,
-	0x06, 0xdc, 0x97, 0x48, 0x0e, 0x01, 0x66, 0x8e, 0xe7, 0x8e, 0x58, 0x4a, 0xe9, 0x92, 0x46, 0x7a,
-	0x4a, 0xee, 0xcf, 0x50, 0xe7, 0xc2, 0x9d, 0xb8, 0xbe, 0xe3, 0x2d, 0xfa, 0xd6, 0x34, 0xe5, 0xce,
-	0x69, 0xae, 0x02, 0x39, 0x76, 0xa2, 0xb5, 0xc5, 0x4b, 0x8b, 0x61, 0xbf, 0x82, 0xa9, 0x75, 0x1e,
-	0xa1, 0x1c, 0x0a, 0x37, 0x08, 0xb9, 0x60, 0x22, 0xee, 0x4b, 0x3b, 0xa4, 0xdc, 0x39, 0xc9, 0x25,
-	0x51, 0x26, 0x7b, 0x9d, 0xd4, 0x2d, 0xc6, 0xe9, 0x6d, 0x50, 0x6d, 0xb9, 0xdb, 0x11, 0xf2, 0x1d,
-	0x1e, 0xad, 0xd7, 0x3a, 0xa1, 0x2c, 0xfe, 0x67, 0xae, 0x8c, 0x01, 0x52, 0x9c, 0x0f, 0xd7, 0xd8,
-	0x23, 0x21, 0x1e, 0x41, 0x63, 0xc5, 0x20, 0x4b, 0xc2, 0x6d, 0x4d, 0xf8, 0x22, 0x97, 0xf0, 0x62,
-	0x69, 0xa0, 0x14, 0xd9, 0x7e, 0xda, 0x57, 0x09, 0xcb, 0x15, 0x54, 0x51, 0x88, 0xf4, 0x06, 0x77,
-	0xf4, 0xeb, 0x4f, 0xf3, 0xc7, 0x51, 0xe9, 0xa9, 0x77, 0x77, 0x31, 0x0d, 0x74, 0x09, 0xd4, 0x97,
-	0x86, 0x8d, 0x30, 0xeb, 0x02, 0x1a, 0xeb, 0xf7, 0x4e, 0x3a, 0x70, 0x3f, 0x2b, 0xa5, 0xfe, 0xf0,
-	0x98, 0x46, 0x6b, 0xcb, 0xae, 0xd0, 0xbd, 0x55, 0x51, 0x3e, 0xa8, 0x90, 0xf5, 0x05, 0x9a, 0x39,
-	0x2b, 0x25, 0x4f, 0xa0, 0x3a, 0x70, 0x24, 0xea, 0x03, 0x60, 0xfa, 0x1b, 0x13, 0x39, 0xb3, 0xa2,
-	0x50, 0xe5, 0xff, 0x4b, 0xf5, 0x7d, 0x59, 0x7f, 0x03, 0x5b, 0xeb, 0x6e, 0xe0, 0x13, 0xec, 0xad,
-	0xd9, 0x26, 0xe9, 0xc2, 0x4e, 0x2c, 0x8b, 0x6e, 0xb4, 0xdc, 0xb1, 0xff, 0xe9, 0xea, 0x54, 0x29,
-	0x5d, 0x14, 0x5a, 0x47, 0x50, 0xcb, 0x3e, 0x4b, 0xa0, 0x90, 0x6a, 0x5a, 0xff, 0xb7, 0xfa, 0xb0,
-	0xbb, 0xb2, 0x71, 0x75, 0x79, 0x91, 0x62, 0x43, 0x3e, 0x8a, 0x52, 0x8b, 0xb4, 0xa4, 0x91, 0x73,
-	0x3e, 0x42, 0xf2, 0x18, 0x22, 0x41, 0x58, 0xac, 0x82, 0x3e, 0xbb, 0x12, 0xad, 0x68, 0xf0, 0x7d,
-	0x84, 0x75, 0x7e, 0x19, 0x50, 0xcf, 0x9e, 0x1b, 0xf9, 0x09, 0xfb, 0x59, 0xec, 0x9d, 0x3f, 0xe6,
-	0xe4, 0xce, 0x17, 0xfb, 0xe0, 0xec, 0x0e, 0x15, 0xd1, 0x54, 0xb6, 0x71, 0x6a, 0x0c, 0xb6, 0xb5,
-	0xf4, 0x2f, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x85, 0x02, 0x09, 0x9d, 0x9f, 0x06, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// ServerReflectionClient is the client API for ServerReflection service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type ServerReflectionClient interface {
-	// The reflection service is structured as a bidirectional stream, ensuring
-	// all related requests go to a single server.
-	ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error)
-}
-
-type serverReflectionClient struct {
-	cc *grpc.ClientConn
-}
-
-func NewServerReflectionClient(cc *grpc.ClientConn) ServerReflectionClient {
-	return &serverReflectionClient{cc}
-}
-
-func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) {
-	stream, err := c.cc.NewStream(ctx, &_ServerReflection_serviceDesc.Streams[0], "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...)
-	if err != nil {
-		return nil, err
+func init() { file_grpc_reflection_v1alpha_reflection_proto_init() }
+func file_grpc_reflection_v1alpha_reflection_proto_init() {
+	if File_grpc_reflection_v1alpha_reflection_proto != nil {
+		return
 	}
-	x := &serverReflectionServerReflectionInfoClient{stream}
-	return x, nil
-}
-
-type ServerReflection_ServerReflectionInfoClient interface {
-	Send(*ServerReflectionRequest) error
-	Recv() (*ServerReflectionResponse, error)
-	grpc.ClientStream
-}
-
-type serverReflectionServerReflectionInfoClient struct {
-	grpc.ClientStream
-}
-
-func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error {
-	return x.ClientStream.SendMsg(m)
-}
-
-func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) {
-	m := new(ServerReflectionResponse)
-	if err := x.ClientStream.RecvMsg(m); err != nil {
-		return nil, err
+	file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []any{
+		(*ServerReflectionRequest_FileByFilename)(nil),
+		(*ServerReflectionRequest_FileContainingSymbol)(nil),
+		(*ServerReflectionRequest_FileContainingExtension)(nil),
+		(*ServerReflectionRequest_AllExtensionNumbersOfType)(nil),
+		(*ServerReflectionRequest_ListServices)(nil),
 	}
-	return m, nil
-}
-
-// ServerReflectionServer is the server API for ServerReflection service.
-type ServerReflectionServer interface {
-	// The reflection service is structured as a bidirectional stream, ensuring
-	// all related requests go to a single server.
-	ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error
-}
-
-// UnimplementedServerReflectionServer can be embedded to have forward compatible implementations.
-type UnimplementedServerReflectionServer struct {
-}
-
-func (*UnimplementedServerReflectionServer) ServerReflectionInfo(srv ServerReflection_ServerReflectionInfoServer) error {
-	return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented")
-}
-
-func RegisterServerReflectionServer(s *grpc.Server, srv ServerReflectionServer) {
-	s.RegisterService(&_ServerReflection_serviceDesc, srv)
-}
-
-func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error {
-	return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream})
-}
-
-type ServerReflection_ServerReflectionInfoServer interface {
-	Send(*ServerReflectionResponse) error
-	Recv() (*ServerReflectionRequest, error)
-	grpc.ServerStream
-}
-
-type serverReflectionServerReflectionInfoServer struct {
-	grpc.ServerStream
-}
-
-func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error {
-	return x.ServerStream.SendMsg(m)
-}
-
-func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) {
-	m := new(ServerReflectionRequest)
-	if err := x.ServerStream.RecvMsg(m); err != nil {
-		return nil, err
+	file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []any{
+		(*ServerReflectionResponse_FileDescriptorResponse)(nil),
+		(*ServerReflectionResponse_AllExtensionNumbersResponse)(nil),
+		(*ServerReflectionResponse_ListServicesResponse)(nil),
+		(*ServerReflectionResponse_ErrorResponse)(nil),
 	}
-	return m, nil
-}
-
-var _ServerReflection_serviceDesc = grpc.ServiceDesc{
-	ServiceName: "grpc.reflection.v1alpha.ServerReflection",
-	HandlerType: (*ServerReflectionServer)(nil),
-	Methods:     []grpc.MethodDesc{},
-	Streams: []grpc.StreamDesc{
-		{
-			StreamName:    "ServerReflectionInfo",
-			Handler:       _ServerReflection_ServerReflectionInfo_Handler,
-			ServerStreams: true,
-			ClientStreams: true,
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_reflection_v1alpha_reflection_proto_rawDesc), len(file_grpc_reflection_v1alpha_reflection_proto_rawDesc)),
+			NumEnums:      0,
+			NumMessages:   8,
+			NumExtensions: 0,
+			NumServices:   1,
 		},
-	},
-	Metadata: "grpc_reflection_v1alpha/reflection.proto",
+		GoTypes:           file_grpc_reflection_v1alpha_reflection_proto_goTypes,
+		DependencyIndexes: file_grpc_reflection_v1alpha_reflection_proto_depIdxs,
+		MessageInfos:      file_grpc_reflection_v1alpha_reflection_proto_msgTypes,
+	}.Build()
+	File_grpc_reflection_v1alpha_reflection_proto = out.File
+	file_grpc_reflection_v1alpha_reflection_proto_goTypes = nil
+	file_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil
 }
diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto
deleted file mode 100644
index 99b00df..0000000
--- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright 2016 gRPC authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Service exported by server reflection
-
-syntax = "proto3";
-
-package grpc.reflection.v1alpha;
-
-service ServerReflection {
-  // The reflection service is structured as a bidirectional stream, ensuring
-  // all related requests go to a single server.
-  rpc ServerReflectionInfo(stream ServerReflectionRequest)
-      returns (stream ServerReflectionResponse);
-}
-
-// The message sent by the client when calling ServerReflectionInfo method.
-message ServerReflectionRequest {
-  string host = 1;
-  // To use reflection service, the client should set one of the following
-  // fields in message_request. The server distinguishes requests by their
-  // defined field and then handles them using corresponding methods.
-  oneof message_request {
-    // Find a proto file by the file name.
-    string file_by_filename = 3;
-
-    // Find the proto file that declares the given fully-qualified symbol name.
-    // This field should be a fully-qualified symbol name
-    // (e.g. <package>.<service>[.<method>] or <package>.<type>).
-    string file_containing_symbol = 4;
-
-    // Find the proto file which defines an extension extending the given
-    // message type with the given field number.
-    ExtensionRequest file_containing_extension = 5;
-
-    // Finds the tag numbers used by all known extensions of extendee_type, and
-    // appends them to ExtensionNumberResponse in an undefined order.
-    // Its corresponding method is best-effort: it's not guaranteed that the
-    // reflection service will implement this method, and it's not guaranteed
-    // that this method will provide all extensions. Returns
-    // StatusCode::UNIMPLEMENTED if it's not implemented.
-    // This field should be a fully-qualified type name. The format is
-    // <package>.<type>
-    string all_extension_numbers_of_type = 6;
-
-    // List the full names of registered services. The content will not be
-    // checked.
-    string list_services = 7;
-  }
-}
-
-// The type name and extension number sent by the client when requesting
-// file_containing_extension.
-message ExtensionRequest {
-  // Fully-qualified type name. The format should be <package>.<type>
-  string containing_type = 1;
-  int32 extension_number = 2;
-}
-
-// The message sent by the server to answer ServerReflectionInfo method.
-message ServerReflectionResponse {
-  string valid_host = 1;
-  ServerReflectionRequest original_request = 2;
-  // The server sets one of the following fields according to the
-  // message_request in the request.
-  oneof message_response {
-    // This message is used to answer file_by_filename, file_containing_symbol,
-    // file_containing_extension requests with transitive dependencies.
-    // As the repeated label is not allowed in oneof fields, we use a
-    // FileDescriptorResponse message to encapsulate the repeated fields.
-    // The reflection service is allowed to avoid sending FileDescriptorProtos
-    // that were previously sent in response to earlier requests in the stream.
-    FileDescriptorResponse file_descriptor_response = 4;
-
-    // This message is used to answer all_extension_numbers_of_type requests.
-    ExtensionNumberResponse all_extension_numbers_response = 5;
-
-    // This message is used to answer list_services requests.
-    ListServiceResponse list_services_response = 6;
-
-    // This message is used when an error occurs.
-    ErrorResponse error_response = 7;
-  }
-}
-
-// Serialized FileDescriptorProto messages sent by the server answering
-// a file_by_filename, file_containing_symbol, or file_containing_extension
-// request.
-message FileDescriptorResponse {
-  // Serialized FileDescriptorProto messages. We avoid taking a dependency on
-  // descriptor.proto, which uses proto2 only features, by making them opaque
-  // bytes instead.
-  repeated bytes file_descriptor_proto = 1;
-}
-
-// A list of extension numbers sent by the server answering
-// all_extension_numbers_of_type request.
-message ExtensionNumberResponse {
-  // Full name of the base type, including the package name. The format
-  // is <package>.<type>
-  string base_type_name = 1;
-  repeated int32 extension_number = 2;
-}
-
-// A list of ServiceResponse sent by the server answering list_services request.
-message ListServiceResponse {
-  // The information of each service may be expanded in the future, so we use
-  // ServiceResponse message to encapsulate it.
-  repeated ServiceResponse service = 1;
-}
-
-// The information of a single service used by ListServiceResponse to answer
-// list_services request.
-message ServiceResponse {
-  // Full name of a registered service, including its package name. The format
-  // is <package>.<service>
-  string name = 1;
-}
-
-// The error code and error message sent by the server when an error occurs.
-message ErrorResponse {
-  // This field uses the error codes defined in grpc::StatusCode.
-  int32 error_code = 1;
-  string error_message = 2;
-}
diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go
new file mode 100644
index 0000000..0a43b52
--- /dev/null
+++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go
@@ -0,0 +1,135 @@
+// Copyright 2016 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// Service exported by server reflection
+
+// Warning: this entire file is deprecated. Use this instead:
+// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto
+
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.5.1
+// - protoc             v5.27.1
+// grpc/reflection/v1alpha/reflection.proto is a deprecated file.
+
+package grpc_reflection_v1alpha
+
+import (
+	context "context"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
+
+const (
+	ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo"
+)
+
+// ServerReflectionClient is the client API for ServerReflection service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type ServerReflectionClient interface {
+	// The reflection service is structured as a bidirectional stream, ensuring
+	// all related requests go to a single server.
+	ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse], error)
+}
+
+type serverReflectionClient struct {
+	cc grpc.ClientConnInterface
+}
+
+func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient {
+	return &serverReflectionClient{cc}
+}
+
+func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse], error) {
+	cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+	stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, cOpts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &grpc.GenericClientStream[ServerReflectionRequest, ServerReflectionResponse]{ClientStream: stream}
+	return x, nil
+}
+
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type ServerReflection_ServerReflectionInfoClient = grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse]
+
+// ServerReflectionServer is the server API for ServerReflection service.
+// All implementations should embed UnimplementedServerReflectionServer
+// for forward compatibility.
+type ServerReflectionServer interface {
+	// The reflection service is structured as a bidirectional stream, ensuring
+	// all related requests go to a single server.
+	ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error
+}
+
+// UnimplementedServerReflectionServer should be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedServerReflectionServer struct{}
+
+func (UnimplementedServerReflectionServer) ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error {
+	return status.Error(codes.Unimplemented, "method ServerReflectionInfo not implemented")
+}
+func (UnimplementedServerReflectionServer) testEmbeddedByValue() {}
+
+// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to ServerReflectionServer will
+// result in compilation errors.
+type UnsafeServerReflectionServer interface {
+	mustEmbedUnimplementedServerReflectionServer()
+}
+
+func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) {
+	// If the following call panics, it indicates UnimplementedServerReflectionServer was
+	// embedded by pointer and is nil.  This will cause panics if an
+	// unimplemented method is ever invoked, so we test this at initialization
+	// time to prevent it from happening at runtime later due to I/O.
+	if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+		t.testEmbeddedByValue()
+	}
+	s.RegisterService(&ServerReflection_ServiceDesc, srv)
+}
+
+func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(ServerReflectionServer).ServerReflectionInfo(&grpc.GenericServerStream[ServerReflectionRequest, ServerReflectionResponse]{ServerStream: stream})
+}
+
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type ServerReflection_ServerReflectionInfoServer = grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]
+
+// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var ServerReflection_ServiceDesc = grpc.ServiceDesc{
+	ServiceName: "grpc.reflection.v1alpha.ServerReflection",
+	HandlerType: (*ServerReflectionServer)(nil),
+	Methods:     []grpc.MethodDesc{},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "ServerReflectionInfo",
+			Handler:       _ServerReflection_ServerReflectionInfo_Handler,
+			ServerStreams: true,
+			ClientStreams: true,
+		},
+	},
+	Metadata: "grpc/reflection/v1alpha/reflection.proto",
+}
diff --git a/vendor/google.golang.org/grpc/reflection/internal/internal.go b/vendor/google.golang.org/grpc/reflection/internal/internal.go
new file mode 100644
index 0000000..902fc6d
--- /dev/null
+++ b/vendor/google.golang.org/grpc/reflection/internal/internal.go
@@ -0,0 +1,436 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains code that is shared by both reflection package and
+// the test package. The packages are split in this way inorder to avoid
+// dependency to deprecated package github.com/golang/protobuf.
+package internal
+
+import (
+	"io"
+	"sort"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protodesc"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+
+	v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1"
+	v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1"
+	v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
+	v1alphareflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
+)
+
+// ServiceInfoProvider is an interface used to retrieve metadata about the
+// services to expose.
+type ServiceInfoProvider interface {
+	GetServiceInfo() map[string]grpc.ServiceInfo
+}
+
+// ExtensionResolver is the interface used to query details about extensions.
+// This interface is satisfied by protoregistry.GlobalTypes.
+type ExtensionResolver interface {
+	protoregistry.ExtensionTypeResolver
+	RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
+}
+
+// ServerReflectionServer is the server API for ServerReflection service.
+type ServerReflectionServer struct {
+	v1alphareflectiongrpc.UnimplementedServerReflectionServer
+	S            ServiceInfoProvider
+	DescResolver protodesc.Resolver
+	ExtResolver  ExtensionResolver
+}
+
+// FileDescWithDependencies returns a slice of serialized fileDescriptors in
+// wire format ([]byte). The fileDescriptors will include fd and all the
+// transitive dependencies of fd with names not in sentFileDescriptors.
+func (s *ServerReflectionServer) FileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) {
+	if fd.IsPlaceholder() {
+		// If the given root file is a placeholder, treat it
+		// as missing instead of serializing it.
+		return nil, protoregistry.NotFound
+	}
+	var r [][]byte
+	queue := []protoreflect.FileDescriptor{fd}
+	for len(queue) > 0 {
+		currentfd := queue[0]
+		queue = queue[1:]
+		if currentfd.IsPlaceholder() {
+			// Skip any missing files in the dependency graph.
+			continue
+		}
+		if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent {
+			sentFileDescriptors[currentfd.Path()] = true
+			fdProto := protodesc.ToFileDescriptorProto(currentfd)
+			currentfdEncoded, err := proto.Marshal(fdProto)
+			if err != nil {
+				return nil, err
+			}
+			r = append(r, currentfdEncoded)
+		}
+		for i := 0; i < currentfd.Imports().Len(); i++ {
+			queue = append(queue, currentfd.Imports().Get(i))
+		}
+	}
+	return r, nil
+}
+
+// FileDescEncodingContainingSymbol finds the file descriptor containing the
+// given symbol, finds all of its previously unsent transitive dependencies,
+// does marshalling on them, and returns the marshalled result. The given symbol
+// can be a type, a service or a method.
+func (s *ServerReflectionServer) FileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) {
+	d, err := s.DescResolver.FindDescriptorByName(protoreflect.FullName(name))
+	if err != nil {
+		return nil, err
+	}
+	return s.FileDescWithDependencies(d.ParentFile(), sentFileDescriptors)
+}
+
+// FileDescEncodingContainingExtension finds the file descriptor containing
+// given extension, finds all of its previously unsent transitive dependencies,
+// does marshalling on them, and returns the marshalled result.
+func (s *ServerReflectionServer) FileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) {
+	xt, err := s.ExtResolver.FindExtensionByNumber(protoreflect.FullName(typeName), protoreflect.FieldNumber(extNum))
+	if err != nil {
+		return nil, err
+	}
+	return s.FileDescWithDependencies(xt.TypeDescriptor().ParentFile(), sentFileDescriptors)
+}
+
+// AllExtensionNumbersForTypeName returns all extension numbers for the given type.
+func (s *ServerReflectionServer) AllExtensionNumbersForTypeName(name string) ([]int32, error) {
+	var numbers []int32
+	s.ExtResolver.RangeExtensionsByMessage(protoreflect.FullName(name), func(xt protoreflect.ExtensionType) bool {
+		numbers = append(numbers, int32(xt.TypeDescriptor().Number()))
+		return true
+	})
+	sort.Slice(numbers, func(i, j int) bool {
+		return numbers[i] < numbers[j]
+	})
+	if len(numbers) == 0 {
+		// maybe return an error if given type name is not known
+		if _, err := s.DescResolver.FindDescriptorByName(protoreflect.FullName(name)); err != nil {
+			return nil, err
+		}
+	}
+	return numbers, nil
+}
+
+// ListServices returns the names of services this server exposes.
+func (s *ServerReflectionServer) ListServices() []*v1reflectionpb.ServiceResponse {
+	serviceInfo := s.S.GetServiceInfo()
+	resp := make([]*v1reflectionpb.ServiceResponse, 0, len(serviceInfo))
+	for svc := range serviceInfo {
+		resp = append(resp, &v1reflectionpb.ServiceResponse{Name: svc})
+	}
+	sort.Slice(resp, func(i, j int) bool {
+		return resp[i].Name < resp[j].Name
+	})
+	return resp
+}
+
+// ServerReflectionInfo is the reflection service handler.
+func (s *ServerReflectionServer) ServerReflectionInfo(stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoServer) error {
+	sentFileDescriptors := make(map[string]bool)
+	for {
+		in, err := stream.Recv()
+		if err == io.EOF {
+			return nil
+		}
+		if err != nil {
+			return err
+		}
+
+		out := &v1reflectionpb.ServerReflectionResponse{
+			ValidHost:       in.Host,
+			OriginalRequest: in,
+		}
+		switch req := in.MessageRequest.(type) {
+		case *v1reflectionpb.ServerReflectionRequest_FileByFilename:
+			var b [][]byte
+			fd, err := s.DescResolver.FindFileByPath(req.FileByFilename)
+			if err == nil {
+				b, err = s.FileDescWithDependencies(fd, sentFileDescriptors)
+			}
+			if err != nil {
+				out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{
+					ErrorResponse: &v1reflectionpb.ErrorResponse{
+						ErrorCode:    int32(codes.NotFound),
+						ErrorMessage: err.Error(),
+					},
+				}
+			} else {
+				out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{
+					FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b},
+				}
+			}
+		case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol:
+			b, err := s.FileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors)
+			if err != nil {
+				out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{
+					ErrorResponse: &v1reflectionpb.ErrorResponse{
+						ErrorCode:    int32(codes.NotFound),
+						ErrorMessage: err.Error(),
+					},
+				}
+			} else {
+				out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{
+					FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b},
+				}
+			}
+		case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension:
+			typeName := req.FileContainingExtension.ContainingType
+			extNum := req.FileContainingExtension.ExtensionNumber
+			b, err := s.FileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors)
+			if err != nil {
+				out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{
+					ErrorResponse: &v1reflectionpb.ErrorResponse{
+						ErrorCode:    int32(codes.NotFound),
+						ErrorMessage: err.Error(),
+					},
+				}
+			} else {
+				out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{
+					FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b},
+				}
+			}
+		case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType:
+			extNums, err := s.AllExtensionNumbersForTypeName(req.AllExtensionNumbersOfType)
+			if err != nil {
+				out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{
+					ErrorResponse: &v1reflectionpb.ErrorResponse{
+						ErrorCode:    int32(codes.NotFound),
+						ErrorMessage: err.Error(),
+					},
+				}
+			} else {
+				out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{
+					AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{
+						BaseTypeName:    req.AllExtensionNumbersOfType,
+						ExtensionNumber: extNums,
+					},
+				}
+			}
+		case *v1reflectionpb.ServerReflectionRequest_ListServices:
+			out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{
+				ListServicesResponse: &v1reflectionpb.ListServiceResponse{
+					Service: s.ListServices(),
+				},
+			}
+		default:
+			return status.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest)
+		}
+
+		if err := stream.Send(out); err != nil {
+			return err
+		}
+	}
+}
+
+// V1ToV1AlphaResponse converts a v1 ServerReflectionResponse to a v1alpha.
+func V1ToV1AlphaResponse(v1 *v1reflectionpb.ServerReflectionResponse) *v1alphareflectionpb.ServerReflectionResponse {
+	var v1alpha v1alphareflectionpb.ServerReflectionResponse
+	v1alpha.ValidHost = v1.ValidHost
+	if v1.OriginalRequest != nil {
+		v1alpha.OriginalRequest = V1ToV1AlphaRequest(v1.OriginalRequest)
+	}
+	switch mr := v1.MessageResponse.(type) {
+	case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse:
+		if mr != nil {
+			v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse{
+				FileDescriptorResponse: &v1alphareflectionpb.FileDescriptorResponse{
+					FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(),
+				},
+			}
+		}
+	case *v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse:
+		if mr != nil {
+			v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{
+				AllExtensionNumbersResponse: &v1alphareflectionpb.ExtensionNumberResponse{
+					BaseTypeName:    mr.AllExtensionNumbersResponse.GetBaseTypeName(),
+					ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(),
+				},
+			}
+		}
+	case *v1reflectionpb.ServerReflectionResponse_ListServicesResponse:
+		if mr != nil {
+			svcs := make([]*v1alphareflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService()))
+			for i, svc := range mr.ListServicesResponse.GetService() {
+				svcs[i] = &v1alphareflectionpb.ServiceResponse{
+					Name: svc.GetName(),
+				}
+			}
+			v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse{
+				ListServicesResponse: &v1alphareflectionpb.ListServiceResponse{
+					Service: svcs,
+				},
+			}
+		}
+	case *v1reflectionpb.ServerReflectionResponse_ErrorResponse:
+		if mr != nil {
+			v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ErrorResponse{
+				ErrorResponse: &v1alphareflectionpb.ErrorResponse{
+					ErrorCode:    mr.ErrorResponse.GetErrorCode(),
+					ErrorMessage: mr.ErrorResponse.GetErrorMessage(),
+				},
+			}
+		}
+	default:
+		// no value set
+	}
+	return &v1alpha
+}
+
+// V1AlphaToV1Request converts a v1alpha ServerReflectionRequest to a v1.
+func V1AlphaToV1Request(v1alpha *v1alphareflectionpb.ServerReflectionRequest) *v1reflectionpb.ServerReflectionRequest {
+	var v1 v1reflectionpb.ServerReflectionRequest
+	v1.Host = v1alpha.Host
+	switch mr := v1alpha.MessageRequest.(type) {
+	case *v1alphareflectionpb.ServerReflectionRequest_FileByFilename:
+		v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileByFilename{
+			FileByFilename: mr.FileByFilename,
+		}
+	case *v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol:
+		v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{
+			FileContainingSymbol: mr.FileContainingSymbol,
+		}
+	case *v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension:
+		if mr.FileContainingExtension != nil {
+			v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{
+				FileContainingExtension: &v1reflectionpb.ExtensionRequest{
+					ContainingType:  mr.FileContainingExtension.GetContainingType(),
+					ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(),
+				},
+			}
+		}
+	case *v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType:
+		v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{
+			AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType,
+		}
+	case *v1alphareflectionpb.ServerReflectionRequest_ListServices:
+		v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_ListServices{
+			ListServices: mr.ListServices,
+		}
+	default:
+		// no value set
+	}
+	return &v1
+}
+
+// V1ToV1AlphaRequest converts a v1 ServerReflectionRequest to a v1alpha.
+func V1ToV1AlphaRequest(v1 *v1reflectionpb.ServerReflectionRequest) *v1alphareflectionpb.ServerReflectionRequest {
+	var v1alpha v1alphareflectionpb.ServerReflectionRequest
+	v1alpha.Host = v1.Host
+	switch mr := v1.MessageRequest.(type) {
+	case *v1reflectionpb.ServerReflectionRequest_FileByFilename:
+		if mr != nil {
+			v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileByFilename{
+				FileByFilename: mr.FileByFilename,
+			}
+		}
+	case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol:
+		if mr != nil {
+			v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol{
+				FileContainingSymbol: mr.FileContainingSymbol,
+			}
+		}
+	case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension:
+		if mr != nil {
+			v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension{
+				FileContainingExtension: &v1alphareflectionpb.ExtensionRequest{
+					ContainingType:  mr.FileContainingExtension.GetContainingType(),
+					ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(),
+				},
+			}
+		}
+	case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType:
+		if mr != nil {
+			v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{
+				AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType,
+			}
+		}
+	case *v1reflectionpb.ServerReflectionRequest_ListServices:
+		if mr != nil {
+			v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_ListServices{
+				ListServices: mr.ListServices,
+			}
+		}
+	default:
+		// no value set
+	}
+	return &v1alpha
+}
+
+// V1AlphaToV1Response converts a v1alpha ServerReflectionResponse to a v1.
+func V1AlphaToV1Response(v1alpha *v1alphareflectionpb.ServerReflectionResponse) *v1reflectionpb.ServerReflectionResponse {
+	var v1 v1reflectionpb.ServerReflectionResponse
+	v1.ValidHost = v1alpha.ValidHost
+	if v1alpha.OriginalRequest != nil {
+		v1.OriginalRequest = V1AlphaToV1Request(v1alpha.OriginalRequest)
+	}
+	switch mr := v1alpha.MessageResponse.(type) {
+	case *v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse:
+		if mr != nil {
+			v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{
+				FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{
+					FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(),
+				},
+			}
+		}
+	case *v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse:
+		if mr != nil {
+			v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{
+				AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{
+					BaseTypeName:    mr.AllExtensionNumbersResponse.GetBaseTypeName(),
+					ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(),
+				},
+			}
+		}
+	case *v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse:
+		if mr != nil {
+			svcs := make([]*v1reflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService()))
+			for i, svc := range mr.ListServicesResponse.GetService() {
+				svcs[i] = &v1reflectionpb.ServiceResponse{
+					Name: svc.GetName(),
+				}
+			}
+			v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{
+				ListServicesResponse: &v1reflectionpb.ListServiceResponse{
+					Service: svcs,
+				},
+			}
+		}
+	case *v1alphareflectionpb.ServerReflectionResponse_ErrorResponse:
+		if mr != nil {
+			v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{
+				ErrorResponse: &v1reflectionpb.ErrorResponse{
+					ErrorCode:    mr.ErrorResponse.GetErrorCode(),
+					ErrorMessage: mr.ErrorResponse.GetErrorMessage(),
+				},
+			}
+		}
+	default:
+		// no value set
+	}
+	return &v1
+}
diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go
index dd22a2d..13a94e2 100644
--- a/vendor/google.golang.org/grpc/reflection/serverreflection.go
+++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go
@@ -16,8 +16,6 @@
  *
  */
 
-//go:generate protoc --go_out=plugins=grpc:. grpc_reflection_v1alpha/reflection.proto
-
 /*
 Package reflection implements server reflection service.
 
@@ -25,6 +23,7 @@
 https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto.
 
 To register server reflection on a gRPC server:
+
 	import "google.golang.org/grpc/reflection"
 
 	s := grpc.NewServer()
@@ -34,421 +33,128 @@
 	reflection.Register(s)
 
 	s.Serve(lis)
-
 */
 package reflection // import "google.golang.org/grpc/reflection"
 
 import (
-	"bytes"
-	"compress/gzip"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"reflect"
-	"sort"
-	"sync"
-
-	"github.com/golang/protobuf/proto"
-	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
 	"google.golang.org/grpc"
-	"google.golang.org/grpc/codes"
-	rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
-	"google.golang.org/grpc/status"
+	"google.golang.org/grpc/reflection/internal"
+	"google.golang.org/protobuf/reflect/protodesc"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+
+	v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1"
+	v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
 )
 
-type serverReflectionServer struct {
-	s *grpc.Server
-
-	initSymbols  sync.Once
-	serviceNames []string
-	symbols      map[string]*dpb.FileDescriptorProto // map of fully-qualified names to files
+// GRPCServer is the interface provided by a gRPC server. It is implemented by
+// *grpc.Server, but could also be implemented by other concrete types. It acts
+// as a registry, for accumulating the services exposed by the server.
+type GRPCServer interface {
+	grpc.ServiceRegistrar
+	ServiceInfoProvider
 }
 
+var _ GRPCServer = (*grpc.Server)(nil)
+
 // Register registers the server reflection service on the given gRPC server.
-func Register(s *grpc.Server) {
-	rpb.RegisterServerReflectionServer(s, &serverReflectionServer{
-		s: s,
-	})
+// Both the v1 and v1alpha versions are registered.
+func Register(s GRPCServer) {
+	svr := NewServerV1(ServerOptions{Services: s})
+	v1alphareflectiongrpc.RegisterServerReflectionServer(s, asV1Alpha(svr))
+	v1reflectiongrpc.RegisterServerReflectionServer(s, svr)
 }
 
-// protoMessage is used for type assertion on proto messages.
-// Generated proto message implements function Descriptor(), but Descriptor()
-// is not part of interface proto.Message. This interface is needed to
-// call Descriptor().
-type protoMessage interface {
-	Descriptor() ([]byte, []int)
+// RegisterV1 registers only the v1 version of the server reflection service
+// on the given gRPC server. Many clients may only support v1alpha so most
+// users should use Register instead, at least until clients have upgraded.
+func RegisterV1(s GRPCServer) {
+	svr := NewServerV1(ServerOptions{Services: s})
+	v1reflectiongrpc.RegisterServerReflectionServer(s, svr)
 }
 
-func (s *serverReflectionServer) getSymbols() (svcNames []string, symbolIndex map[string]*dpb.FileDescriptorProto) {
-	s.initSymbols.Do(func() {
-		serviceInfo := s.s.GetServiceInfo()
-
-		s.symbols = map[string]*dpb.FileDescriptorProto{}
-		s.serviceNames = make([]string, 0, len(serviceInfo))
-		processed := map[string]struct{}{}
-		for svc, info := range serviceInfo {
-			s.serviceNames = append(s.serviceNames, svc)
-			fdenc, ok := parseMetadata(info.Metadata)
-			if !ok {
-				continue
-			}
-			fd, err := decodeFileDesc(fdenc)
-			if err != nil {
-				continue
-			}
-			s.processFile(fd, processed)
-		}
-		sort.Strings(s.serviceNames)
-	})
-
-	return s.serviceNames, s.symbols
+// ServiceInfoProvider is an interface used to retrieve metadata about the
+// services to expose.
+//
+// The reflection service is only interested in the service names, but the
+// signature is this way so that *grpc.Server implements it. So it is okay
+// for a custom implementation to return zero values for the
+// grpc.ServiceInfo values in the map.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ServiceInfoProvider interface {
+	GetServiceInfo() map[string]grpc.ServiceInfo
 }
 
-func (s *serverReflectionServer) processFile(fd *dpb.FileDescriptorProto, processed map[string]struct{}) {
-	filename := fd.GetName()
-	if _, ok := processed[filename]; ok {
-		return
-	}
-	processed[filename] = struct{}{}
-
-	prefix := fd.GetPackage()
-
-	for _, msg := range fd.MessageType {
-		s.processMessage(fd, prefix, msg)
-	}
-	for _, en := range fd.EnumType {
-		s.processEnum(fd, prefix, en)
-	}
-	for _, ext := range fd.Extension {
-		s.processField(fd, prefix, ext)
-	}
-	for _, svc := range fd.Service {
-		svcName := fqn(prefix, svc.GetName())
-		s.symbols[svcName] = fd
-		for _, meth := range svc.Method {
-			name := fqn(svcName, meth.GetName())
-			s.symbols[name] = fd
-		}
-	}
-
-	for _, dep := range fd.Dependency {
-		fdenc := proto.FileDescriptor(dep)
-		fdDep, err := decodeFileDesc(fdenc)
-		if err != nil {
-			continue
-		}
-		s.processFile(fdDep, processed)
-	}
+// ExtensionResolver is the interface used to query details about extensions.
+// This interface is satisfied by protoregistry.GlobalTypes.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ExtensionResolver interface {
+	protoregistry.ExtensionTypeResolver
+	RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
 }
 
-func (s *serverReflectionServer) processMessage(fd *dpb.FileDescriptorProto, prefix string, msg *dpb.DescriptorProto) {
-	msgName := fqn(prefix, msg.GetName())
-	s.symbols[msgName] = fd
-
-	for _, nested := range msg.NestedType {
-		s.processMessage(fd, msgName, nested)
-	}
-	for _, en := range msg.EnumType {
-		s.processEnum(fd, msgName, en)
-	}
-	for _, ext := range msg.Extension {
-		s.processField(fd, msgName, ext)
-	}
-	for _, fld := range msg.Field {
-		s.processField(fd, msgName, fld)
-	}
-	for _, oneof := range msg.OneofDecl {
-		oneofName := fqn(msgName, oneof.GetName())
-		s.symbols[oneofName] = fd
-	}
+// ServerOptions represents the options used to construct a reflection server.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ServerOptions struct {
+	// The source of advertised RPC services. If not specified, the reflection
+	// server will report an empty list when asked to list services.
+	//
+	// This value will typically be a *grpc.Server. But the set of advertised
+	// services can be customized by wrapping a *grpc.Server or using an
+	// alternate implementation that returns a custom set of service names.
+	Services ServiceInfoProvider
+	// Optional resolver used to load descriptors. If not specified,
+	// protoregistry.GlobalFiles will be used.
+	DescriptorResolver protodesc.Resolver
+	// Optional resolver used to query for known extensions. If not specified,
+	// protoregistry.GlobalTypes will be used.
+	ExtensionResolver ExtensionResolver
 }
 
-func (s *serverReflectionServer) processEnum(fd *dpb.FileDescriptorProto, prefix string, en *dpb.EnumDescriptorProto) {
-	enName := fqn(prefix, en.GetName())
-	s.symbols[enName] = fd
-
-	for _, val := range en.Value {
-		valName := fqn(enName, val.GetName())
-		s.symbols[valName] = fd
-	}
+// NewServer returns a reflection server implementation using the given options.
+// This can be used to customize behavior of the reflection service. Most usages
+// should prefer to use Register instead. For backwards compatibility reasons,
+// this returns the v1alpha version of the reflection server. For a v1 version
+// of the reflection server, see NewServerV1.
+//
+// # Experimental
+//
+// Notice: This function is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func NewServer(opts ServerOptions) v1alphareflectiongrpc.ServerReflectionServer {
+	return asV1Alpha(NewServerV1(opts))
 }
 
-func (s *serverReflectionServer) processField(fd *dpb.FileDescriptorProto, prefix string, fld *dpb.FieldDescriptorProto) {
-	fldName := fqn(prefix, fld.GetName())
-	s.symbols[fldName] = fd
-}
-
-func fqn(prefix, name string) string {
-	if prefix == "" {
-		return name
+// NewServerV1 returns a reflection server implementation using the given options.
+// This can be used to customize behavior of the reflection service. Most usages
+// should prefer to use Register instead.
+//
+// # Experimental
+//
+// Notice: This function is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func NewServerV1(opts ServerOptions) v1reflectiongrpc.ServerReflectionServer {
+	if opts.DescriptorResolver == nil {
+		opts.DescriptorResolver = protoregistry.GlobalFiles
 	}
-	return prefix + "." + name
-}
-
-// fileDescForType gets the file descriptor for the given type.
-// The given type should be a proto message.
-func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDescriptorProto, error) {
-	m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(protoMessage)
-	if !ok {
-		return nil, fmt.Errorf("failed to create message from type: %v", st)
+	if opts.ExtensionResolver == nil {
+		opts.ExtensionResolver = protoregistry.GlobalTypes
 	}
-	enc, _ := m.Descriptor()
-
-	return decodeFileDesc(enc)
-}
-
-// decodeFileDesc does decompression and unmarshalling on the given
-// file descriptor byte slice.
-func decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) {
-	raw, err := decompress(enc)
-	if err != nil {
-		return nil, fmt.Errorf("failed to decompress enc: %v", err)
-	}
-
-	fd := new(dpb.FileDescriptorProto)
-	if err := proto.Unmarshal(raw, fd); err != nil {
-		return nil, fmt.Errorf("bad descriptor: %v", err)
-	}
-	return fd, nil
-}
-
-// decompress does gzip decompression.
-func decompress(b []byte) ([]byte, error) {
-	r, err := gzip.NewReader(bytes.NewReader(b))
-	if err != nil {
-		return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
-	}
-	out, err := ioutil.ReadAll(r)
-	if err != nil {
-		return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
-	}
-	return out, nil
-}
-
-func typeForName(name string) (reflect.Type, error) {
-	pt := proto.MessageType(name)
-	if pt == nil {
-		return nil, fmt.Errorf("unknown type: %q", name)
-	}
-	st := pt.Elem()
-
-	return st, nil
-}
-
-func fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) {
-	m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message)
-	if !ok {
-		return nil, fmt.Errorf("failed to create message from type: %v", st)
-	}
-
-	var extDesc *proto.ExtensionDesc
-	for id, desc := range proto.RegisteredExtensions(m) {
-		if id == ext {
-			extDesc = desc
-			break
-		}
-	}
-
-	if extDesc == nil {
-		return nil, fmt.Errorf("failed to find registered extension for extension number %v", ext)
-	}
-
-	return decodeFileDesc(proto.FileDescriptor(extDesc.Filename))
-}
-
-func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) {
-	m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message)
-	if !ok {
-		return nil, fmt.Errorf("failed to create message from type: %v", st)
-	}
-
-	exts := proto.RegisteredExtensions(m)
-	out := make([]int32, 0, len(exts))
-	for id := range exts {
-		out = append(out, id)
-	}
-	return out, nil
-}
-
-// fileDescEncodingByFilename finds the file descriptor for given filename,
-// does marshalling on it and returns the marshalled result.
-func (s *serverReflectionServer) fileDescEncodingByFilename(name string) ([]byte, error) {
-	enc := proto.FileDescriptor(name)
-	if enc == nil {
-		return nil, fmt.Errorf("unknown file: %v", name)
-	}
-	fd, err := decodeFileDesc(enc)
-	if err != nil {
-		return nil, err
-	}
-	return proto.Marshal(fd)
-}
-
-// parseMetadata finds the file descriptor bytes specified meta.
-// For SupportPackageIsVersion4, m is the name of the proto file, we
-// call proto.FileDescriptor to get the byte slice.
-// For SupportPackageIsVersion3, m is a byte slice itself.
-func parseMetadata(meta interface{}) ([]byte, bool) {
-	// Check if meta is the file name.
-	if fileNameForMeta, ok := meta.(string); ok {
-		return proto.FileDescriptor(fileNameForMeta), true
-	}
-
-	// Check if meta is the byte slice.
-	if enc, ok := meta.([]byte); ok {
-		return enc, true
-	}
-
-	return nil, false
-}
-
-// fileDescEncodingContainingSymbol finds the file descriptor containing the given symbol,
-// does marshalling on it and returns the marshalled result.
-// The given symbol can be a type, a service or a method.
-func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string) ([]byte, error) {
-	_, symbols := s.getSymbols()
-	fd := symbols[name]
-	if fd == nil {
-		// Check if it's a type name that was not present in the
-		// transitive dependencies of the registered services.
-		if st, err := typeForName(name); err == nil {
-			fd, err = s.fileDescForType(st)
-			if err != nil {
-				return nil, err
-			}
-		}
-	}
-
-	if fd == nil {
-		return nil, fmt.Errorf("unknown symbol: %v", name)
-	}
-
-	return proto.Marshal(fd)
-}
-
-// fileDescEncodingContainingExtension finds the file descriptor containing given extension,
-// does marshalling on it and returns the marshalled result.
-func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32) ([]byte, error) {
-	st, err := typeForName(typeName)
-	if err != nil {
-		return nil, err
-	}
-	fd, err := fileDescContainingExtension(st, extNum)
-	if err != nil {
-		return nil, err
-	}
-	return proto.Marshal(fd)
-}
-
-// allExtensionNumbersForTypeName returns all extension numbers for the given type.
-func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) {
-	st, err := typeForName(name)
-	if err != nil {
-		return nil, err
-	}
-	extNums, err := s.allExtensionNumbersForType(st)
-	if err != nil {
-		return nil, err
-	}
-	return extNums, nil
-}
-
-// ServerReflectionInfo is the reflection service handler.
-func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflection_ServerReflectionInfoServer) error {
-	for {
-		in, err := stream.Recv()
-		if err == io.EOF {
-			return nil
-		}
-		if err != nil {
-			return err
-		}
-
-		out := &rpb.ServerReflectionResponse{
-			ValidHost:       in.Host,
-			OriginalRequest: in,
-		}
-		switch req := in.MessageRequest.(type) {
-		case *rpb.ServerReflectionRequest_FileByFilename:
-			b, err := s.fileDescEncodingByFilename(req.FileByFilename)
-			if err != nil {
-				out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{
-					ErrorResponse: &rpb.ErrorResponse{
-						ErrorCode:    int32(codes.NotFound),
-						ErrorMessage: err.Error(),
-					},
-				}
-			} else {
-				out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{
-					FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}},
-				}
-			}
-		case *rpb.ServerReflectionRequest_FileContainingSymbol:
-			b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol)
-			if err != nil {
-				out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{
-					ErrorResponse: &rpb.ErrorResponse{
-						ErrorCode:    int32(codes.NotFound),
-						ErrorMessage: err.Error(),
-					},
-				}
-			} else {
-				out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{
-					FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}},
-				}
-			}
-		case *rpb.ServerReflectionRequest_FileContainingExtension:
-			typeName := req.FileContainingExtension.ContainingType
-			extNum := req.FileContainingExtension.ExtensionNumber
-			b, err := s.fileDescEncodingContainingExtension(typeName, extNum)
-			if err != nil {
-				out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{
-					ErrorResponse: &rpb.ErrorResponse{
-						ErrorCode:    int32(codes.NotFound),
-						ErrorMessage: err.Error(),
-					},
-				}
-			} else {
-				out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{
-					FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}},
-				}
-			}
-		case *rpb.ServerReflectionRequest_AllExtensionNumbersOfType:
-			extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType)
-			if err != nil {
-				out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{
-					ErrorResponse: &rpb.ErrorResponse{
-						ErrorCode:    int32(codes.NotFound),
-						ErrorMessage: err.Error(),
-					},
-				}
-			} else {
-				out.MessageResponse = &rpb.ServerReflectionResponse_AllExtensionNumbersResponse{
-					AllExtensionNumbersResponse: &rpb.ExtensionNumberResponse{
-						BaseTypeName:    req.AllExtensionNumbersOfType,
-						ExtensionNumber: extNums,
-					},
-				}
-			}
-		case *rpb.ServerReflectionRequest_ListServices:
-			svcNames, _ := s.getSymbols()
-			serviceResponses := make([]*rpb.ServiceResponse, len(svcNames))
-			for i, n := range svcNames {
-				serviceResponses[i] = &rpb.ServiceResponse{
-					Name: n,
-				}
-			}
-			out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{
-				ListServicesResponse: &rpb.ListServiceResponse{
-					Service: serviceResponses,
-				},
-			}
-		default:
-			return status.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest)
-		}
-
-		if err := stream.Send(out); err != nil {
-			return err
-		}
+	return &internal.ServerReflectionServer{
+		S:            opts.Services,
+		DescResolver: opts.DescriptorResolver,
+		ExtResolver:  opts.ExtensionResolver,
 	}
 }
diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
index 14aa6f2..ef3d6ed 100644
--- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
@@ -18,19 +18,43 @@
 
 // Package dns implements a dns resolver to be installed as the default resolver
 // in grpc.
-//
-// Deprecated: this package is imported by grpc and should not need to be
-// imported directly by users.
 package dns
 
 import (
+	"time"
+
 	"google.golang.org/grpc/internal/resolver/dns"
 	"google.golang.org/grpc/resolver"
 )
 
+// SetResolvingTimeout sets the maximum duration for DNS resolution requests.
+//
+// This function affects the global timeout used by all channels using the DNS
+// name resolver scheme.
+//
+// It must be called only at application startup, before any gRPC calls are
+// made. Modifying this value after initialization is not thread-safe.
+//
+// The default value is 30 seconds. Setting the timeout too low may result in
+// premature timeouts during resolution, while setting it too high may lead to
+// unnecessary delays in service discovery. Choose a value appropriate for your
+// specific needs and network environment.
+func SetResolvingTimeout(timeout time.Duration) {
+	dns.ResolvingTimeout = timeout
+}
+
 // NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
 //
 // Deprecated: import grpc and use resolver.Get("dns") instead.
 func NewBuilder() resolver.Builder {
 	return dns.NewBuilder()
 }
+
+// SetMinResolutionInterval sets the default minimum interval at which DNS
+// re-resolutions are allowed. This helps to prevent excessive re-resolution.
+//
+// It must be called only at application startup, before any gRPC calls are
+// made. Modifying this value after initialization is not thread-safe.
+func SetMinResolutionInterval(d time.Duration) {
+	dns.MinResolutionInterval = d
+}
diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go
new file mode 100644
index 0000000..82fcc2e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go
@@ -0,0 +1,130 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package manual defines a resolver that can be used to manually send resolved
+// addresses to ClientConn.
+package manual
+
+import (
+	"sync"
+
+	"google.golang.org/grpc/resolver"
+)
+
+// NewBuilderWithScheme creates a new manual resolver builder with the given
+// scheme. Every instance of the manual resolver may only ever be used with a
+// single grpc.ClientConn. Otherwise, bad things will happen.
+func NewBuilderWithScheme(scheme string) *Resolver {
+	return &Resolver{
+		BuildCallback:       func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {},
+		UpdateStateCallback: func(error) {},
+		ResolveNowCallback:  func(resolver.ResolveNowOptions) {},
+		CloseCallback:       func() {},
+		scheme:              scheme,
+	}
+}
+
+// Resolver is also a resolver builder.
+// It's build() function always returns itself.
+type Resolver struct {
+	// BuildCallback is called when the Build method is called.  Must not be
+	// nil.  Must not be changed after the resolver may be built.
+	BuildCallback func(resolver.Target, resolver.ClientConn, resolver.BuildOptions)
+	// UpdateStateCallback is called when the UpdateState method is called on
+	// the resolver.  The value passed as argument to this callback is the value
+	// returned by the resolver.ClientConn.  Must not be nil.  Must not be
+	// changed after the resolver may be built.
+	UpdateStateCallback func(err error)
+	// ResolveNowCallback is called when the ResolveNow method is called on the
+	// resolver.  Must not be nil.  Must not be changed after the resolver may
+	// be built.
+	ResolveNowCallback func(resolver.ResolveNowOptions)
+	// CloseCallback is called when the Close method is called.  Must not be
+	// nil.  Must not be changed after the resolver may be built.
+	CloseCallback func()
+	scheme        string
+
+	// Fields actually belong to the resolver.
+	// Guards access to below fields.
+	mu sync.Mutex
+	cc resolver.ClientConn
+	// Storing the most recent state update makes this resolver resilient to
+	// restarts, which is possible with channel idleness.
+	lastSeenState *resolver.State
+}
+
+// InitialState adds initial state to the resolver so that UpdateState doesn't
+// need to be explicitly called after Dial.
+func (r *Resolver) InitialState(s resolver.State) {
+	r.lastSeenState = &s
+}
+
+// Build returns itself for Resolver, because it's both a builder and a resolver.
+func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	// Call BuildCallback after locking to avoid a race when UpdateState or CC
+	// is called before Build returns.
+	r.BuildCallback(target, cc, opts)
+	r.cc = cc
+	if r.lastSeenState != nil {
+		err := r.cc.UpdateState(*r.lastSeenState)
+		go r.UpdateStateCallback(err)
+	}
+	return r, nil
+}
+
+// Scheme returns the manual resolver's scheme.
+func (r *Resolver) Scheme() string {
+	return r.scheme
+}
+
+// ResolveNow is a noop for Resolver.
+func (r *Resolver) ResolveNow(o resolver.ResolveNowOptions) {
+	r.ResolveNowCallback(o)
+}
+
+// Close is a noop for Resolver.
+func (r *Resolver) Close() {
+	r.CloseCallback()
+}
+
+// UpdateState calls UpdateState(s) on the channel.  If the resolver has not
+// been Built before, this instead sets the initial state of the resolver, like
+// InitialState.
+func (r *Resolver) UpdateState(s resolver.State) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	r.lastSeenState = &s
+	if r.cc == nil {
+		return
+	}
+	err := r.cc.UpdateState(s)
+	r.UpdateStateCallback(err)
+}
+
+// CC returns r's ClientConn when r was last Built.  Panics if the resolver has
+// not been Built before.
+func (r *Resolver) CC() resolver.ClientConn {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	if r.cc == nil {
+		panic("Manual resolver instance has not yet been built.")
+	}
+	return r.cc
+}
diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go
new file mode 100644
index 0000000..c3c15ac
--- /dev/null
+++ b/vendor/google.golang.org/grpc/resolver/map.go
@@ -0,0 +1,247 @@
+/*
+ *
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package resolver
+
+import (
+	"encoding/base64"
+	"sort"
+	"strings"
+)
+
+type addressMapEntry[T any] struct {
+	addr  Address
+	value T
+}
+
+// AddressMap is an AddressMapV2[any].  It will be deleted in an upcoming
+// release of grpc-go.
+//
+// Deprecated: use the generic AddressMapV2 type instead.
+type AddressMap = AddressMapV2[any]
+
+// AddressMapV2 is a map of addresses to arbitrary values taking into account
+// Attributes.  BalancerAttributes are ignored, as are Metadata and Type.
+// Multiple accesses may not be performed concurrently.  Must be created via
+// NewAddressMap; do not construct directly.
+type AddressMapV2[T any] struct {
+	// The underlying map is keyed by an Address with fields that we don't care
+	// about being set to their zero values. The only fields that we care about
+	// are `Addr`, `ServerName` and `Attributes`. Since we need to be able to
+	// distinguish between addresses with same `Addr` and `ServerName`, but
+	// different `Attributes`, we cannot store the `Attributes` in the map key.
+	//
+	// The comparison operation for structs work as follows:
+	//  Struct values are comparable if all their fields are comparable. Two
+	//  struct values are equal if their corresponding non-blank fields are equal.
+	//
+	// The value type of the map contains a slice of addresses which match the key
+	// in their `Addr` and `ServerName` fields and contain the corresponding value
+	// associated with them.
+	m map[Address]addressMapEntryList[T]
+}
+
+func toMapKey(addr *Address) Address {
+	return Address{Addr: addr.Addr, ServerName: addr.ServerName}
+}
+
+type addressMapEntryList[T any] []*addressMapEntry[T]
+
+// NewAddressMap creates a new AddressMapV2[any].
+//
+// Deprecated: use the generic NewAddressMapV2 constructor instead.
+func NewAddressMap() *AddressMap {
+	return NewAddressMapV2[any]()
+}
+
+// NewAddressMapV2 creates a new AddressMapV2.
+func NewAddressMapV2[T any]() *AddressMapV2[T] {
+	return &AddressMapV2[T]{m: make(map[Address]addressMapEntryList[T])}
+}
+
+// find returns the index of addr in the addressMapEntry slice, or -1 if not
+// present.
+func (l addressMapEntryList[T]) find(addr Address) int {
+	for i, entry := range l {
+		// Attributes are the only thing to match on here, since `Addr` and
+		// `ServerName` are already equal.
+		if entry.addr.Attributes.Equal(addr.Attributes) {
+			return i
+		}
+	}
+	return -1
+}
+
+// Get returns the value for the address in the map, if present.
+func (a *AddressMapV2[T]) Get(addr Address) (value T, ok bool) {
+	addrKey := toMapKey(&addr)
+	entryList := a.m[addrKey]
+	if entry := entryList.find(addr); entry != -1 {
+		return entryList[entry].value, true
+	}
+	return value, false
+}
+
+// Set updates or adds the value to the address in the map.
+func (a *AddressMapV2[T]) Set(addr Address, value T) {
+	addrKey := toMapKey(&addr)
+	entryList := a.m[addrKey]
+	if entry := entryList.find(addr); entry != -1 {
+		entryList[entry].value = value
+		return
+	}
+	a.m[addrKey] = append(entryList, &addressMapEntry[T]{addr: addr, value: value})
+}
+
+// Delete removes addr from the map.
+func (a *AddressMapV2[T]) Delete(addr Address) {
+	addrKey := toMapKey(&addr)
+	entryList := a.m[addrKey]
+	entry := entryList.find(addr)
+	if entry == -1 {
+		return
+	}
+	if len(entryList) == 1 {
+		entryList = nil
+	} else {
+		copy(entryList[entry:], entryList[entry+1:])
+		entryList = entryList[:len(entryList)-1]
+	}
+	a.m[addrKey] = entryList
+}
+
+// Len returns the number of entries in the map.
+func (a *AddressMapV2[T]) Len() int {
+	ret := 0
+	for _, entryList := range a.m {
+		ret += len(entryList)
+	}
+	return ret
+}
+
+// Keys returns a slice of all current map keys.
+func (a *AddressMapV2[T]) Keys() []Address {
+	ret := make([]Address, 0, a.Len())
+	for _, entryList := range a.m {
+		for _, entry := range entryList {
+			ret = append(ret, entry.addr)
+		}
+	}
+	return ret
+}
+
+// Values returns a slice of all current map values.
+func (a *AddressMapV2[T]) Values() []T {
+	ret := make([]T, 0, a.Len())
+	for _, entryList := range a.m {
+		for _, entry := range entryList {
+			ret = append(ret, entry.value)
+		}
+	}
+	return ret
+}
+
+type endpointMapKey string
+
+// EndpointMap is a map of endpoints to arbitrary values keyed on only the
+// unordered set of address strings within an endpoint. This map is not thread
+// safe, thus it is unsafe to access concurrently. Must be created via
+// NewEndpointMap; do not construct directly.
+type EndpointMap[T any] struct {
+	endpoints map[endpointMapKey]endpointData[T]
+}
+
+type endpointData[T any] struct {
+	// decodedKey stores the original key to avoid decoding when iterating on
+	// EndpointMap keys.
+	decodedKey Endpoint
+	value      T
+}
+
+// NewEndpointMap creates a new EndpointMap.
+func NewEndpointMap[T any]() *EndpointMap[T] {
+	return &EndpointMap[T]{
+		endpoints: make(map[endpointMapKey]endpointData[T]),
+	}
+}
+
+// encodeEndpoint returns a string that uniquely identifies the unordered set of
+// addresses within an endpoint.
+func encodeEndpoint(e Endpoint) endpointMapKey {
+	addrs := make([]string, 0, len(e.Addresses))
+	// base64 encoding the address strings restricts the characters present
+	// within the strings. This allows us to use a delimiter without the need of
+	// escape characters.
+	for _, addr := range e.Addresses {
+		addrs = append(addrs, base64.StdEncoding.EncodeToString([]byte(addr.Addr)))
+	}
+	sort.Strings(addrs)
+	// " " should not appear in base64 encoded strings.
+	return endpointMapKey(strings.Join(addrs, " "))
+}
+
+// Get returns the value for the address in the map, if present.
+func (em *EndpointMap[T]) Get(e Endpoint) (value T, ok bool) {
+	val, found := em.endpoints[encodeEndpoint(e)]
+	if found {
+		return val.value, true
+	}
+	return value, false
+}
+
+// Set updates or adds the value to the address in the map.
+func (em *EndpointMap[T]) Set(e Endpoint, value T) {
+	en := encodeEndpoint(e)
+	em.endpoints[en] = endpointData[T]{
+		decodedKey: Endpoint{Addresses: e.Addresses},
+		value:      value,
+	}
+}
+
+// Len returns the number of entries in the map.
+func (em *EndpointMap[T]) Len() int {
+	return len(em.endpoints)
+}
+
+// Keys returns a slice of all current map keys, as endpoints specifying the
+// addresses present in the endpoint keys, in which uniqueness is determined by
+// the unordered set of addresses. Thus, endpoint information returned is not
+// the full endpoint data (drops duplicated addresses and attributes) but can be
+// used for EndpointMap accesses.
+func (em *EndpointMap[T]) Keys() []Endpoint {
+	ret := make([]Endpoint, 0, len(em.endpoints))
+	for _, en := range em.endpoints {
+		ret = append(ret, en.decodedKey)
+	}
+	return ret
+}
+
+// Values returns a slice of all current map values.
+func (em *EndpointMap[T]) Values() []T {
+	ret := make([]T, 0, len(em.endpoints))
+	for _, val := range em.endpoints {
+		ret = append(ret, val.value)
+	}
+	return ret
+}
+
+// Delete removes the specified endpoint from the map.
+func (em *EndpointMap[T]) Delete(e Endpoint) {
+	en := encodeEndpoint(e)
+	delete(em.endpoints, en)
+}
diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
deleted file mode 100644
index c8a0c3d..0000000
--- a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package passthrough implements a pass-through resolver. It sends the target
-// name without scheme back to gRPC as resolved address.
-//
-// Deprecated: this package is imported by grpc and should not need to be
-// imported directly by users.
-package passthrough
-
-import _ "google.golang.org/grpc/internal/resolver/passthrough" // import for side effects after package was moved
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
index 4c5423b..8e6af95 100644
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -22,9 +22,16 @@
 
 import (
 	"context"
+	"errors"
+	"fmt"
 	"net"
+	"net/url"
+	"strings"
 
+	"google.golang.org/grpc/attributes"
 	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/experimental/stats"
+	"google.golang.org/grpc/internal"
 	"google.golang.org/grpc/serviceconfig"
 )
 
@@ -37,8 +44,9 @@
 
 // TODO(bar) install dns resolver in init(){}.
 
-// Register registers the resolver builder to the resolver map. b.Scheme will be
-// used as the scheme registered with this builder.
+// Register registers the resolver builder to the resolver map. b.Scheme will
+// be used as the scheme registered with this builder. The registry is case
+// sensitive, and schemes should not contain any uppercase characters.
 //
 // NOTE: this function must only be called during initialization time (i.e. in
 // an init() function), and is not thread-safe. If multiple Resolvers are
@@ -58,57 +66,92 @@
 }
 
 // SetDefaultScheme sets the default scheme that will be used. The default
-// default scheme is "passthrough".
+// scheme is initially set to "passthrough".
 //
 // NOTE: this function must only be called during initialization time (i.e. in
 // an init() function), and is not thread-safe. The scheme set last overrides
 // previously set values.
 func SetDefaultScheme(scheme string) {
 	defaultScheme = scheme
+	internal.UserSetDefaultScheme = true
 }
 
-// GetDefaultScheme gets the default scheme that will be used.
+// GetDefaultScheme gets the default scheme that will be used by grpc.Dial.  If
+// SetDefaultScheme is never called, the default scheme used by grpc.NewClient is "dns" instead.
 func GetDefaultScheme() string {
 	return defaultScheme
 }
 
-// AddressType indicates the address type returned by name resolution.
-type AddressType uint8
-
-const (
-	// Backend indicates the address is for a backend server.
-	Backend AddressType = iota
-	// GRPCLB indicates the address is for a grpclb load balancer.
-	GRPCLB
-)
-
 // Address represents a server the client connects to.
-// This is the EXPERIMENTAL API and may be changed or extended in the future.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type Address struct {
 	// Addr is the server address on which a connection will be established.
 	Addr string
-	// Type is the type of this address.
-	Type AddressType
+
 	// ServerName is the name of this address.
 	// If non-empty, the ServerName is used as the transport certification authority for
 	// the address, instead of the hostname from the Dial target string. In most cases,
 	// this should not be set.
 	//
-	// If Type is GRPCLB, ServerName should be the name of the remote load
-	// balancer, not the name of the backend.
-	//
 	// WARNING: ServerName must only be populated with trusted values. It
 	// is insecure to populate it with data from untrusted inputs since untrusted
 	// values could be used to bypass the authority checks performed by TLS.
 	ServerName string
+
+	// Attributes contains arbitrary data about this address intended for
+	// consumption by the SubConn.
+	Attributes *attributes.Attributes
+
+	// BalancerAttributes contains arbitrary data about this address intended
+	// for consumption by the LB policy.  These attributes do not affect SubConn
+	// creation, connection establishment, handshaking, etc.
+	//
+	// Deprecated: when an Address is inside an Endpoint, this field should not
+	// be used, and it will eventually be removed entirely.
+	BalancerAttributes *attributes.Attributes
+
 	// Metadata is the information associated with Addr, which may be used
 	// to make load balancing decision.
-	Metadata interface{}
+	//
+	// Deprecated: use Attributes instead.
+	Metadata any
 }
 
-// BuildOption includes additional information for the builder to create
+// Equal returns whether a and o are identical.  Metadata is compared directly,
+// not with any recursive introspection.
+//
+// This method compares all fields of the address. When used to tell apart
+// addresses during subchannel creation or connection establishment, it might be
+// more appropriate for the caller to implement custom equality logic.
+func (a Address) Equal(o Address) bool {
+	return a.Addr == o.Addr && a.ServerName == o.ServerName &&
+		a.Attributes.Equal(o.Attributes) &&
+		a.BalancerAttributes.Equal(o.BalancerAttributes) &&
+		a.Metadata == o.Metadata
+}
+
+// String returns JSON formatted string representation of the address.
+func (a Address) String() string {
+	var sb strings.Builder
+	sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr))
+	sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName))
+	if a.Attributes != nil {
+		sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String()))
+	}
+	if a.BalancerAttributes != nil {
+		sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String()))
+	}
+	sb.WriteString("}")
+	return sb.String()
+}
+
+// BuildOptions includes additional information for the builder to create
 // the resolver.
-type BuildOption struct {
+type BuildOptions struct {
 	// DisableServiceConfig indicates whether a resolver implementation should
 	// fetch service config data.
 	DisableServiceConfig bool
@@ -130,17 +173,52 @@
 	// field. In most cases though, it is not appropriate, and this field may
 	// be ignored.
 	Dialer func(context.Context, string) (net.Conn, error)
+	// Authority is the effective authority of the clientconn for which the
+	// resolver is built.
+	Authority string
+	// MetricsRecorder is the metrics recorder to do recording.
+	MetricsRecorder stats.MetricsRecorder
+}
+
+// An Endpoint is one network endpoint, or server, which may have multiple
+// addresses with which it can be accessed.
+type Endpoint struct {
+	// Addresses contains a list of addresses used to access this endpoint.
+	Addresses []Address
+
+	// Attributes contains arbitrary data about this endpoint intended for
+	// consumption by the LB policy.
+	Attributes *attributes.Attributes
 }
 
 // State contains the current Resolver state relevant to the ClientConn.
 type State struct {
 	// Addresses is the latest set of resolved addresses for the target.
+	//
+	// If a resolver sets Addresses but does not set Endpoints, one Endpoint
+	// will be created for each Address before the State is passed to the LB
+	// policy.  The BalancerAttributes of each entry in Addresses will be set
+	// in Endpoints.Attributes, and be cleared in the Endpoint's Address's
+	// BalancerAttributes.
+	//
+	// Soon, Addresses will be deprecated and replaced fully by Endpoints.
 	Addresses []Address
 
+	// Endpoints is the latest set of resolved endpoints for the target.
+	//
+	// If a resolver produces a State containing Endpoints but not Addresses,
+	// it must take care to ensure the LB policies it selects will support
+	// Endpoints.
+	Endpoints []Endpoint
+
 	// ServiceConfig contains the result from parsing the latest service
 	// config.  If it is nil, it indicates no service config is present or the
 	// resolver does not provide service configs.
 	ServiceConfig *serviceconfig.ParseResult
+
+	// Attributes contains arbitrary data about the resolver intended for
+	// consumption by the load balancing policy.
+	Attributes *attributes.Attributes
 }
 
 // ClientConn contains the callbacks for resolver to notify any updates
@@ -152,10 +230,19 @@
 // gRPC to add new methods to this interface.
 type ClientConn interface {
 	// UpdateState updates the state of the ClientConn appropriately.
-	UpdateState(State)
+	//
+	// If an error is returned, the resolver should try to resolve the
+	// target again. The resolver should use a backoff timer to prevent
+	// overloading the server with requests. If a resolver is certain that
+	// reresolving will not change the result, e.g. because it is
+	// a watch-based resolver, returned errors can be ignored.
+	//
+	// If the resolved State is the same as the last reported one, calling
+	// UpdateState can be omitted.
+	UpdateState(State) error
 	// ReportError notifies the ClientConn that the Resolver encountered an
-	// error.  The ClientConn will notify the load balancer and begin calling
-	// ResolveNow on the Resolver with exponential backoff.
+	// error. The ClientConn then forwards this error to the load balancing
+	// policy.
 	ReportError(error)
 	// NewAddress is called by resolver to notify ClientConn a new list
 	// of resolved addresses.
@@ -163,11 +250,6 @@
 	//
 	// Deprecated: Use UpdateState instead.
 	NewAddress(addresses []Address)
-	// NewServiceConfig is called by resolver to notify ClientConn a new
-	// service config. The service config should be provided as a json string.
-	//
-	// Deprecated: Use UpdateState instead.
-	NewServiceConfig(serviceConfig string)
 	// ParseServiceConfig parses the provided service config and returns an
 	// object that provides the parsed config.
 	ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult
@@ -175,25 +257,43 @@
 
 // Target represents a target for gRPC, as specified in:
 // https://github.com/grpc/grpc/blob/master/doc/naming.md.
-// It is parsed from the target string that gets passed into Dial or DialContext by the user. And
-// grpc passes it to the resolver and the balancer.
+// It is parsed from the target string that gets passed into Dial or DialContext
+// by the user. And gRPC passes it to the resolver and the balancer.
 //
-// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will
-// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed
-// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
-//
-// If the target does not contain a scheme, we will apply the default scheme, and set the Target to
-// be the full target string. e.g. "foo.bar" will be parsed into
-// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}.
-//
-// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the
-// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target
-// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into
-// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}.
+// If the target follows the naming spec, and the parsed scheme is registered
+// with gRPC, we will parse the target string according to the spec. If the
+// target does not contain a scheme or if the parsed scheme is not registered
+// (i.e. no corresponding resolver available to resolve the endpoint), we will
+// apply the default scheme, and will attempt to reparse it.
 type Target struct {
-	Scheme    string
-	Authority string
-	Endpoint  string
+	// URL contains the parsed dial target with an optional default scheme added
+	// to it if the original dial target contained no scheme or contained an
+	// unregistered scheme. Any query params specified in the original dial
+	// target can be accessed from here.
+	URL url.URL
+}
+
+// Endpoint retrieves endpoint without leading "/" from either `URL.Path`
+// or `URL.Opaque`. The latter is used when the former is empty.
+func (t Target) Endpoint() string {
+	endpoint := t.URL.Path
+	if endpoint == "" {
+		endpoint = t.URL.Opaque
+	}
+	// For targets of the form "[scheme]://[authority]/endpoint, the endpoint
+	// value returned from url.Parse() contains a leading "/". Although this is
+	// in accordance with RFC 3986, we do not want to break existing resolver
+	// implementations which expect the endpoint without the leading "/". So, we
+	// end up stripping the leading "/" here. But this will result in an
+	// incorrect parsing for something like "unix:///path/to/socket". Since we
+	// own the "unix" resolver, we can workaround in the unix resolver by using
+	// the `URL` field.
+	return strings.TrimPrefix(endpoint, "/")
+}
+
+// String returns the canonical string representation of Target.
+func (t Target) String() string {
+	return t.URL.Scheme + "://" + t.URL.Host + "/" + t.Endpoint()
 }
 
 // Builder creates a resolver that will be used to watch name resolution updates.
@@ -202,14 +302,16 @@
 	//
 	// gRPC dial calls Build synchronously, and fails if the returned error is
 	// not nil.
-	Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error)
-	// Scheme returns the scheme supported by this resolver.
-	// Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md.
+	Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error)
+	// Scheme returns the scheme supported by this resolver.  Scheme is defined
+	// at https://github.com/grpc/grpc/blob/master/doc/naming.md.  The returned
+	// string should not contain uppercase characters, as they will not match
+	// the parsed target's scheme as defined in RFC 3986.
 	Scheme() string
 }
 
-// ResolveNowOption includes additional information for ResolveNow.
-type ResolveNowOption struct{}
+// ResolveNowOptions includes additional information for ResolveNow.
+type ResolveNowOptions struct{}
 
 // Resolver watches for the updates on the specified target.
 // Updates include address updates and service config updates.
@@ -218,14 +320,39 @@
 	// again. It's just a hint, resolver can ignore this if it's not necessary.
 	//
 	// It could be called multiple times concurrently.
-	ResolveNow(ResolveNowOption)
+	ResolveNow(ResolveNowOptions)
 	// Close closes the resolver.
 	Close()
 }
 
-// UnregisterForTesting removes the resolver builder with the given scheme from the
-// resolver map.
-// This function is for testing only.
-func UnregisterForTesting(scheme string) {
-	delete(m, scheme)
+// AuthorityOverrider is implemented by Builders that wish to override the
+// default authority for the ClientConn.
+// By default, the authority used is target.Endpoint().
+type AuthorityOverrider interface {
+	// OverrideAuthority returns the authority to use for a ClientConn with the
+	// given target. The implementation must generate it without blocking,
+	// typically in line, and must keep it unchanged.
+	//
+	// The returned string must be a valid ":authority" header value, i.e. be
+	// encoded according to
+	// [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2) as
+	// necessary.
+	OverrideAuthority(Target) string
+}
+
+// ValidateEndpoints validates endpoints from a petiole policy's perspective.
+// Petiole policies should call this before calling into their children. See
+// [gRPC A61](https://github.com/grpc/proposal/blob/master/A61-IPv4-IPv6-dualstack-backends.md)
+// for details.
+func ValidateEndpoints(endpoints []Endpoint) error {
+	if len(endpoints) == 0 {
+		return errors.New("endpoints list is empty")
+	}
+
+	for _, endpoint := range endpoints {
+		for range endpoint.Addresses {
+			return nil
+		}
+	}
+	return errors.New("endpoints list contains no addresses")
 }
diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
deleted file mode 100644
index 7dcefcf..0000000
--- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
-	"fmt"
-	"strings"
-	"sync"
-	"time"
-
-	"google.golang.org/grpc/balancer"
-	"google.golang.org/grpc/credentials"
-	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/internal/channelz"
-	"google.golang.org/grpc/internal/grpcsync"
-	"google.golang.org/grpc/resolver"
-	"google.golang.org/grpc/serviceconfig"
-)
-
-// ccResolverWrapper is a wrapper on top of cc for resolvers.
-// It implements resolver.ClientConnection interface.
-type ccResolverWrapper struct {
-	cc         *ClientConn
-	resolverMu sync.Mutex
-	resolver   resolver.Resolver
-	done       *grpcsync.Event
-	curState   resolver.State
-
-	pollingMu sync.Mutex
-	polling   chan struct{}
-}
-
-// split2 returns the values from strings.SplitN(s, sep, 2).
-// If sep is not found, it returns ("", "", false) instead.
-func split2(s, sep string) (string, string, bool) {
-	spl := strings.SplitN(s, sep, 2)
-	if len(spl) < 2 {
-		return "", "", false
-	}
-	return spl[0], spl[1], true
-}
-
-// parseTarget splits target into a struct containing scheme, authority and
-// endpoint.
-//
-// If target is not a valid scheme://authority/endpoint, it returns {Endpoint:
-// target}.
-func parseTarget(target string) (ret resolver.Target) {
-	var ok bool
-	ret.Scheme, ret.Endpoint, ok = split2(target, "://")
-	if !ok {
-		return resolver.Target{Endpoint: target}
-	}
-	ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/")
-	if !ok {
-		return resolver.Target{Endpoint: target}
-	}
-	return ret
-}
-
-// newCCResolverWrapper uses the resolver.Builder stored in the ClientConn to
-// build a Resolver and returns a ccResolverWrapper object which wraps the
-// newly built resolver.
-func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
-	rb := cc.dopts.resolverBuilder
-	if rb == nil {
-		return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme)
-	}
-
-	ccr := &ccResolverWrapper{
-		cc:   cc,
-		done: grpcsync.NewEvent(),
-	}
-
-	var credsClone credentials.TransportCredentials
-	if creds := cc.dopts.copts.TransportCredentials; creds != nil {
-		credsClone = creds.Clone()
-	}
-	rbo := resolver.BuildOption{
-		DisableServiceConfig: cc.dopts.disableServiceConfig,
-		DialCreds:            credsClone,
-		CredsBundle:          cc.dopts.copts.CredsBundle,
-		Dialer:               cc.dopts.copts.Dialer,
-	}
-
-	var err error
-	// We need to hold the lock here while we assign to the ccr.resolver field
-	// to guard against a data race caused by the following code path,
-	// rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up
-	// accessing ccr.resolver which is being assigned here.
-	ccr.resolverMu.Lock()
-	ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo)
-	if err != nil {
-		return nil, err
-	}
-	ccr.resolverMu.Unlock()
-	return ccr, nil
-}
-
-func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
-	ccr.resolverMu.Lock()
-	if !ccr.done.HasFired() {
-		ccr.resolver.ResolveNow(o)
-	}
-	ccr.resolverMu.Unlock()
-}
-
-func (ccr *ccResolverWrapper) close() {
-	ccr.resolverMu.Lock()
-	ccr.resolver.Close()
-	ccr.done.Fire()
-	ccr.resolverMu.Unlock()
-}
-
-// poll begins or ends asynchronous polling of the resolver based on whether
-// err is ErrBadResolverState.
-func (ccr *ccResolverWrapper) poll(err error) {
-	ccr.pollingMu.Lock()
-	defer ccr.pollingMu.Unlock()
-	if err != balancer.ErrBadResolverState {
-		// stop polling
-		if ccr.polling != nil {
-			close(ccr.polling)
-			ccr.polling = nil
-		}
-		return
-	}
-	if ccr.polling != nil {
-		// already polling
-		return
-	}
-	p := make(chan struct{})
-	ccr.polling = p
-	go func() {
-		for i := 0; ; i++ {
-			ccr.resolveNow(resolver.ResolveNowOption{})
-			t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i))
-			select {
-			case <-p:
-				t.Stop()
-				return
-			case <-ccr.done.Done():
-				// Resolver has been closed.
-				t.Stop()
-				return
-			case <-t.C:
-				select {
-				case <-p:
-					return
-				default:
-				}
-				// Timer expired; re-resolve.
-			}
-		}
-	}()
-}
-
-func (ccr *ccResolverWrapper) UpdateState(s resolver.State) {
-	if ccr.done.HasFired() {
-		return
-	}
-	grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s)
-	if channelz.IsOn() {
-		ccr.addChannelzTraceEvent(s)
-	}
-	ccr.curState = s
-	ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
-}
-
-func (ccr *ccResolverWrapper) ReportError(err error) {
-	if ccr.done.HasFired() {
-		return
-	}
-	grpclog.Warningf("ccResolverWrapper: reporting error to cc: %v", err)
-	if channelz.IsOn() {
-		channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
-			Desc:     fmt.Sprintf("Resolver reported error: %v", err),
-			Severity: channelz.CtWarning,
-		})
-	}
-	ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err))
-}
-
-// NewAddress is called by the resolver implementation to send addresses to gRPC.
-func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
-	if ccr.done.HasFired() {
-		return
-	}
-	grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
-	if channelz.IsOn() {
-		ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
-	}
-	ccr.curState.Addresses = addrs
-	ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
-}
-
-// NewServiceConfig is called by the resolver implementation to send service
-// configs to gRPC.
-func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
-	if ccr.done.HasFired() {
-		return
-	}
-	grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
-	scpr := parseServiceConfig(sc)
-	if scpr.Err != nil {
-		grpclog.Warningf("ccResolverWrapper: error parsing service config: %v", scpr.Err)
-		if channelz.IsOn() {
-			channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
-				Desc:     fmt.Sprintf("Error parsing service config: %v", scpr.Err),
-				Severity: channelz.CtWarning,
-			})
-		}
-		ccr.poll(balancer.ErrBadResolverState)
-		return
-	}
-	if channelz.IsOn() {
-		ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
-	}
-	ccr.curState.ServiceConfig = scpr
-	ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
-}
-
-func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
-	return parseServiceConfig(scJSON)
-}
-
-func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
-	var updates []string
-	var oldSC, newSC *ServiceConfig
-	var oldOK, newOK bool
-	if ccr.curState.ServiceConfig != nil {
-		oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig)
-	}
-	if s.ServiceConfig != nil {
-		newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig)
-	}
-	if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
-		updates = append(updates, "service config updated")
-	}
-	if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 {
-		updates = append(updates, "resolver returned an empty address list")
-	} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
-		updates = append(updates, "resolver returned new addresses")
-	}
-	channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
-		Desc:     fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")),
-		Severity: channelz.CtINFO,
-	})
-}
diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go
new file mode 100644
index 0000000..80e16a3
--- /dev/null
+++ b/vendor/google.golang.org/grpc/resolver_wrapper.go
@@ -0,0 +1,221 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+	"strings"
+	"sync"
+
+	"google.golang.org/grpc/internal/channelz"
+	"google.golang.org/grpc/internal/grpcsync"
+	"google.golang.org/grpc/internal/pretty"
+	"google.golang.org/grpc/internal/resolver/delegatingresolver"
+	"google.golang.org/grpc/resolver"
+	"google.golang.org/grpc/serviceconfig"
+)
+
+// ccResolverWrapper is a wrapper on top of cc for resolvers.
+// It implements resolver.ClientConn interface.
+type ccResolverWrapper struct {
+	// The following fields are initialized when the wrapper is created and are
+	// read-only afterwards, and therefore can be accessed without a mutex.
+	cc                  *ClientConn
+	ignoreServiceConfig bool
+	serializer          *grpcsync.CallbackSerializer
+	serializerCancel    context.CancelFunc
+
+	resolver resolver.Resolver // only accessed within the serializer
+
+	// The following fields are protected by mu.  Caller must take cc.mu before
+	// taking mu.
+	mu       sync.Mutex
+	curState resolver.State
+	closed   bool
+}
+
+// newCCResolverWrapper initializes the ccResolverWrapper.  It can only be used
+// after calling start, which builds the resolver.
+func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper {
+	ctx, cancel := context.WithCancel(cc.ctx)
+	return &ccResolverWrapper{
+		cc:                  cc,
+		ignoreServiceConfig: cc.dopts.disableServiceConfig,
+		serializer:          grpcsync.NewCallbackSerializer(ctx),
+		serializerCancel:    cancel,
+	}
+}
+
+// start builds the name resolver using the resolver.Builder in cc and returns
+// any error encountered.  It must always be the first operation performed on
+// any newly created ccResolverWrapper, except that close may be called instead.
+func (ccr *ccResolverWrapper) start() error {
+	errCh := make(chan error)
+	ccr.serializer.TrySchedule(func(ctx context.Context) {
+		if ctx.Err() != nil {
+			return
+		}
+		opts := resolver.BuildOptions{
+			DisableServiceConfig: ccr.cc.dopts.disableServiceConfig,
+			DialCreds:            ccr.cc.dopts.copts.TransportCredentials,
+			CredsBundle:          ccr.cc.dopts.copts.CredsBundle,
+			Dialer:               ccr.cc.dopts.copts.Dialer,
+			Authority:            ccr.cc.authority,
+			MetricsRecorder:      ccr.cc.metricsRecorderList,
+		}
+		var err error
+		// The delegating resolver is used unless:
+		//   - A custom dialer is provided via WithContextDialer dialoption or
+		//   - Proxy usage is disabled through WithNoProxy dialoption.
+		// In these cases, the resolver is built based on the scheme of target,
+		// using the appropriate resolver builder.
+		if ccr.cc.dopts.copts.Dialer != nil || !ccr.cc.dopts.useProxy {
+			ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts)
+		} else {
+			ccr.resolver, err = delegatingresolver.New(ccr.cc.parsedTarget, ccr, opts, ccr.cc.resolverBuilder, ccr.cc.dopts.enableLocalDNSResolution)
+		}
+		errCh <- err
+	})
+	return <-errCh
+}
+
+func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
+	ccr.serializer.TrySchedule(func(ctx context.Context) {
+		if ctx.Err() != nil || ccr.resolver == nil {
+			return
+		}
+		ccr.resolver.ResolveNow(o)
+	})
+}
+
+// close initiates async shutdown of the wrapper.  To determine the wrapper has
+// finished shutting down, the channel should block on ccr.serializer.Done()
+// without cc.mu held.
+func (ccr *ccResolverWrapper) close() {
+	channelz.Info(logger, ccr.cc.channelz, "Closing the name resolver")
+	ccr.mu.Lock()
+	ccr.closed = true
+	ccr.mu.Unlock()
+
+	ccr.serializer.TrySchedule(func(context.Context) {
+		if ccr.resolver == nil {
+			return
+		}
+		ccr.resolver.Close()
+		ccr.resolver = nil
+	})
+	ccr.serializerCancel()
+}
+
+// UpdateState is called by resolver implementations to report new state to gRPC
+// which includes addresses and service config.
+func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
+	ccr.cc.mu.Lock()
+	ccr.mu.Lock()
+	if ccr.closed {
+		ccr.mu.Unlock()
+		ccr.cc.mu.Unlock()
+		return nil
+	}
+	if s.Endpoints == nil {
+		s.Endpoints = addressesToEndpoints(s.Addresses)
+	}
+	ccr.addChannelzTraceEvent(s)
+	ccr.curState = s
+	ccr.mu.Unlock()
+	return ccr.cc.updateResolverStateAndUnlock(s, nil)
+}
+
+// ReportError is called by resolver implementations to report errors
+// encountered during name resolution to gRPC.
+func (ccr *ccResolverWrapper) ReportError(err error) {
+	ccr.cc.mu.Lock()
+	ccr.mu.Lock()
+	if ccr.closed {
+		ccr.mu.Unlock()
+		ccr.cc.mu.Unlock()
+		return
+	}
+	ccr.mu.Unlock()
+	channelz.Warningf(logger, ccr.cc.channelz, "ccResolverWrapper: reporting error to cc: %v", err)
+	ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err)
+}
+
+// NewAddress is called by the resolver implementation to send addresses to
+// gRPC.
+func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
+	ccr.cc.mu.Lock()
+	ccr.mu.Lock()
+	if ccr.closed {
+		ccr.mu.Unlock()
+		ccr.cc.mu.Unlock()
+		return
+	}
+	s := resolver.State{
+		Addresses:     addrs,
+		ServiceConfig: ccr.curState.ServiceConfig,
+		Endpoints:     addressesToEndpoints(addrs),
+	}
+	ccr.addChannelzTraceEvent(s)
+	ccr.curState = s
+	ccr.mu.Unlock()
+	ccr.cc.updateResolverStateAndUnlock(s, nil)
+}
+
+// ParseServiceConfig is called by resolver implementations to parse a JSON
+// representation of the service config.
+func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
+	return parseServiceConfig(scJSON, ccr.cc.dopts.maxCallAttempts)
+}
+
+// addChannelzTraceEvent adds a channelz trace event containing the new
+// state received from resolver implementations.
+func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
+	if !logger.V(0) && !channelz.IsOn() {
+		return
+	}
+	var updates []string
+	var oldSC, newSC *ServiceConfig
+	var oldOK, newOK bool
+	if ccr.curState.ServiceConfig != nil {
+		oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig)
+	}
+	if s.ServiceConfig != nil {
+		newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig)
+	}
+	if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
+		updates = append(updates, "service config updated")
+	}
+	if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 {
+		updates = append(updates, "resolver returned an empty address list")
+	} else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
+		updates = append(updates, "resolver returned new addresses")
+	}
+	channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
+}
+
+func addressesToEndpoints(addrs []resolver.Address) []resolver.Endpoint {
+	endpoints := make([]resolver.Endpoint, 0, len(addrs))
+	for _, a := range addrs {
+		ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
+		ep.Addresses[0].BalancerAttributes = nil
+		endpoints = append(endpoints, ep)
+	}
+	return endpoints
+}
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index edaba79..47ea09f 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -19,15 +19,12 @@
 package grpc
 
 import (
-	"bytes"
 	"compress/gzip"
 	"context"
 	"encoding/binary"
 	"fmt"
 	"io"
-	"io/ioutil"
 	"math"
-	"net/url"
 	"strings"
 	"sync"
 	"time"
@@ -37,6 +34,7 @@
 	"google.golang.org/grpc/encoding"
 	"google.golang.org/grpc/encoding/proto"
 	"google.golang.org/grpc/internal/transport"
+	"google.golang.org/grpc/mem"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/peer"
 	"google.golang.org/grpc/stats"
@@ -77,8 +75,8 @@
 	}
 	return &gzipCompressor{
 		pool: sync.Pool{
-			New: func() interface{} {
-				w, err := gzip.NewWriterLevel(ioutil.Discard, level)
+			New: func() any {
+				w, err := gzip.NewWriterLevel(io.Discard, level)
 				if err != nil {
 					panic(err)
 				}
@@ -144,7 +142,7 @@
 		z.Close()
 		d.pool.Put(z)
 	}()
-	return ioutil.ReadAll(z)
+	return io.ReadAll(z)
 }
 
 func (d *gzipDecompressor) Type() string {
@@ -153,15 +151,16 @@
 
 // callInfo contains all related configuration and information about an RPC.
 type callInfo struct {
-	compressorType        string
+	compressorName        string
 	failFast              bool
-	stream                ClientStream
 	maxReceiveMessageSize *int
 	maxSendMessageSize    *int
 	creds                 credentials.PerRPCCredentials
 	contentSubtype        string
 	codec                 baseCodec
 	maxRetryRPCBufferSize int
+	onFinish              []func(err error)
+	authority             string
 }
 
 func defaultCallInfo() *callInfo {
@@ -180,7 +179,7 @@
 
 	// after is called after the call has completed.  after cannot return an
 	// error, so any failures should be reported via output parameters.
-	after(*callInfo)
+	after(*callInfo, *csAttempt)
 }
 
 // EmptyCallOption does not alter the Call configuration.
@@ -188,8 +187,22 @@
 // by interceptors.
 type EmptyCallOption struct{}
 
-func (EmptyCallOption) before(*callInfo) error { return nil }
-func (EmptyCallOption) after(*callInfo)        {}
+func (EmptyCallOption) before(*callInfo) error      { return nil }
+func (EmptyCallOption) after(*callInfo, *csAttempt) {}
+
+// StaticMethod returns a CallOption which specifies that a call is being made
+// to a method that is static, which means the method is known at compile time
+// and doesn't change at runtime. This can be used as a signal to stats plugins
+// that this method is safe to include as a key to a measurement.
+func StaticMethod() CallOption {
+	return StaticMethodCallOption{}
+}
+
+// StaticMethodCallOption is a CallOption that specifies that a call comes
+// from a static method.
+type StaticMethodCallOption struct {
+	EmptyCallOption
+}
 
 // Header returns a CallOptions that retrieves the header metadata
 // for a unary RPC.
@@ -199,16 +212,18 @@
 
 // HeaderCallOption is a CallOption for collecting response header metadata.
 // The metadata field will be populated *after* the RPC completes.
-// This is an EXPERIMENTAL API.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type HeaderCallOption struct {
 	HeaderAddr *metadata.MD
 }
 
-func (o HeaderCallOption) before(c *callInfo) error { return nil }
-func (o HeaderCallOption) after(c *callInfo) {
-	if c.stream != nil {
-		*o.HeaderAddr, _ = c.stream.Header()
-	}
+func (o HeaderCallOption) before(*callInfo) error { return nil }
+func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) {
+	*o.HeaderAddr, _ = attempt.transportStream.Header()
 }
 
 // Trailer returns a CallOptions that retrieves the trailer metadata
@@ -219,16 +234,18 @@
 
 // TrailerCallOption is a CallOption for collecting response trailer metadata.
 // The metadata field will be populated *after* the RPC completes.
-// This is an EXPERIMENTAL API.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type TrailerCallOption struct {
 	TrailerAddr *metadata.MD
 }
 
-func (o TrailerCallOption) before(c *callInfo) error { return nil }
-func (o TrailerCallOption) after(c *callInfo) {
-	if c.stream != nil {
-		*o.TrailerAddr = c.stream.Trailer()
-	}
+func (o TrailerCallOption) before(*callInfo) error { return nil }
+func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) {
+	*o.TrailerAddr = attempt.transportStream.Trailer()
 }
 
 // Peer returns a CallOption that retrieves peer information for a unary RPC.
@@ -239,30 +256,29 @@
 
 // PeerCallOption is a CallOption for collecting the identity of the remote
 // peer. The peer field will be populated *after* the RPC completes.
-// This is an EXPERIMENTAL API.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type PeerCallOption struct {
 	PeerAddr *peer.Peer
 }
 
-func (o PeerCallOption) before(c *callInfo) error { return nil }
-func (o PeerCallOption) after(c *callInfo) {
-	if c.stream != nil {
-		if x, ok := peer.FromContext(c.stream.Context()); ok {
-			*o.PeerAddr = *x
-		}
+func (o PeerCallOption) before(*callInfo) error { return nil }
+func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) {
+	if x, ok := peer.FromContext(attempt.transportStream.Context()); ok {
+		*o.PeerAddr = *x
 	}
 }
 
-// WaitForReady configures the action to take when an RPC is attempted on broken
-// connections or unreachable servers. If waitForReady is false, the RPC will fail
-// immediately. Otherwise, the RPC client will block the call until a
-// connection is available (or the call is canceled or times out) and will
-// retry the call if it fails due to a transient error.  gRPC will not retry if
-// data was written to the wire unless the server indicates it did not process
-// the data.  Please refer to
-// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
+// WaitForReady configures the RPC's behavior when the client is in
+// TRANSIENT_FAILURE, which occurs when all addresses fail to connect.  If
+// waitForReady is false, the RPC will fail immediately.  Otherwise, the client
+// will wait until a connection becomes available or the RPC's deadline is
+// reached.
 //
-// By default, RPCs don't "wait for ready".
+// By default, RPCs do not "wait for ready".
 func WaitForReady(waitForReady bool) CallOption {
 	return FailFastCallOption{FailFast: !waitForReady}
 }
@@ -276,7 +292,11 @@
 
 // FailFastCallOption is a CallOption for indicating whether an RPC should fail
 // fast or not.
-// This is an EXPERIMENTAL API.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type FailFastCallOption struct {
 	FailFast bool
 }
@@ -285,16 +305,57 @@
 	c.failFast = o.FailFast
 	return nil
 }
-func (o FailFastCallOption) after(c *callInfo) {}
+func (o FailFastCallOption) after(*callInfo, *csAttempt) {}
 
-// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
-func MaxCallRecvMsgSize(s int) CallOption {
-	return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s}
+// OnFinish returns a CallOption that configures a callback to be called when
+// the call completes. The error passed to the callback is the status of the
+// RPC, and may be nil. The onFinish callback provided will only be called once
+// by gRPC. This is mainly used to be used by streaming interceptors, to be
+// notified when the RPC completes along with information about the status of
+// the RPC.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func OnFinish(onFinish func(err error)) CallOption {
+	return OnFinishCallOption{
+		OnFinish: onFinish,
+	}
+}
+
+// OnFinishCallOption is CallOption that indicates a callback to be called when
+// the call completes.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type OnFinishCallOption struct {
+	OnFinish func(error)
+}
+
+func (o OnFinishCallOption) before(c *callInfo) error {
+	c.onFinish = append(c.onFinish, o.OnFinish)
+	return nil
+}
+
+func (o OnFinishCallOption) after(*callInfo, *csAttempt) {}
+
+// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
+// in bytes the client can receive. If this is not set, gRPC uses the default
+// 4MB.
+func MaxCallRecvMsgSize(bytes int) CallOption {
+	return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes}
 }
 
 // MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
-// size the client can receive.
-// This is an EXPERIMENTAL API.
+// size in bytes the client can receive.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type MaxRecvMsgSizeCallOption struct {
 	MaxRecvMsgSize int
 }
@@ -303,16 +364,52 @@
 	c.maxReceiveMessageSize = &o.MaxRecvMsgSize
 	return nil
 }
-func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {}
+func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {}
 
-// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
-func MaxCallSendMsgSize(s int) CallOption {
-	return MaxSendMsgSizeCallOption{MaxSendMsgSize: s}
+// CallAuthority returns a CallOption that sets the HTTP/2 :authority header of
+// an RPC to the specified value. When using CallAuthority, the credentials in
+// use must implement the AuthorityValidator interface.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
+// release.
+func CallAuthority(authority string) CallOption {
+	return AuthorityOverrideCallOption{Authority: authority}
+}
+
+// AuthorityOverrideCallOption is a CallOption that indicates the HTTP/2
+// :authority header value to use for the call.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a later
+// release.
+type AuthorityOverrideCallOption struct {
+	Authority string
+}
+
+func (o AuthorityOverrideCallOption) before(c *callInfo) error {
+	c.authority = o.Authority
+	return nil
+}
+
+func (o AuthorityOverrideCallOption) after(*callInfo, *csAttempt) {}
+
+// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
+// in bytes the client can send. If this is not set, gRPC uses the default
+// `math.MaxInt32`.
+func MaxCallSendMsgSize(bytes int) CallOption {
+	return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes}
 }
 
 // MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
-// size the client can send.
-// This is an EXPERIMENTAL API.
+// size in bytes the client can send.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type MaxSendMsgSizeCallOption struct {
 	MaxSendMsgSize int
 }
@@ -321,7 +418,7 @@
 	c.maxSendMessageSize = &o.MaxSendMsgSize
 	return nil
 }
-func (o MaxSendMsgSizeCallOption) after(c *callInfo) {}
+func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {}
 
 // PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
 // for a call.
@@ -331,7 +428,11 @@
 
 // PerRPCCredsCallOption is a CallOption that indicates the per-RPC
 // credentials to use for the call.
-// This is an EXPERIMENTAL API.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type PerRPCCredsCallOption struct {
 	Creds credentials.PerRPCCredentials
 }
@@ -340,28 +441,35 @@
 	c.creds = o.Creds
 	return nil
 }
-func (o PerRPCCredsCallOption) after(c *callInfo) {}
+func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {}
 
 // UseCompressor returns a CallOption which sets the compressor used when
 // sending the request.  If WithCompressor is also set, UseCompressor has
 // higher priority.
 //
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func UseCompressor(name string) CallOption {
 	return CompressorCallOption{CompressorType: name}
 }
 
 // CompressorCallOption is a CallOption that indicates the compressor to use.
-// This is an EXPERIMENTAL API.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type CompressorCallOption struct {
 	CompressorType string
 }
 
 func (o CompressorCallOption) before(c *callInfo) error {
-	c.compressorType = o.CompressorType
+	c.compressorName = o.CompressorType
 	return nil
 }
-func (o CompressorCallOption) after(c *callInfo) {}
+func (o CompressorCallOption) after(*callInfo, *csAttempt) {}
 
 // CallContentSubtype returns a CallOption that will set the content-subtype
 // for a call. For example, if content-subtype is "json", the Content-Type over
@@ -385,7 +493,11 @@
 
 // ContentSubtypeCallOption is a CallOption that indicates the content-subtype
 // used for marshaling messages.
-// This is an EXPERIMENTAL API.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type ContentSubtypeCallOption struct {
 	ContentSubtype string
 }
@@ -394,11 +506,12 @@
 	c.contentSubtype = o.ContentSubtype
 	return nil
 }
-func (o ContentSubtypeCallOption) after(c *callInfo) {}
+func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {}
 
-// ForceCodec returns a CallOption that will set the given Codec to be
-// used for all request and response messages for a call. The result of calling
-// String() will be used as the content-subtype in a case-insensitive manner.
+// ForceCodec returns a CallOption that will set codec to be used for all
+// request and response messages for a call. The result of calling Name() will
+// be used as the content-subtype after converting to lowercase, unless
+// CallContentSubtype is also used.
 //
 // See Content-Type on
 // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
@@ -409,7 +522,10 @@
 // This function is provided for advanced users; prefer to use only
 // CallContentSubtype to select a registered codec instead.
 //
-// This is an EXPERIMENTAL API.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func ForceCodec(codec encoding.Codec) CallOption {
 	return ForceCodecCallOption{Codec: codec}
 }
@@ -417,16 +533,59 @@
 // ForceCodecCallOption is a CallOption that indicates the codec used for
 // marshaling messages.
 //
-// This is an EXPERIMENTAL API.
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type ForceCodecCallOption struct {
 	Codec encoding.Codec
 }
 
 func (o ForceCodecCallOption) before(c *callInfo) error {
-	c.codec = o.Codec
+	c.codec = newCodecV1Bridge(o.Codec)
 	return nil
 }
-func (o ForceCodecCallOption) after(c *callInfo) {}
+func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {}
+
+// ForceCodecV2 returns a CallOption that will set codec to be used for all
+// request and response messages for a call. The result of calling Name() will
+// be used as the content-subtype after converting to lowercase, unless
+// CallContentSubtype is also used.
+//
+// See Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details. Also see the documentation on RegisterCodec and
+// CallContentSubtype for more details on the interaction between Codec and
+// content-subtype.
+//
+// This function is provided for advanced users; prefer to use only
+// CallContentSubtype to select a registered codec instead.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceCodecV2(codec encoding.CodecV2) CallOption {
+	return ForceCodecV2CallOption{CodecV2: codec}
+}
+
+// ForceCodecV2CallOption is a CallOption that indicates the codec used for
+// marshaling messages.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ForceCodecV2CallOption struct {
+	CodecV2 encoding.CodecV2
+}
+
+func (o ForceCodecV2CallOption) before(c *callInfo) error {
+	c.codec = o.CodecV2
+	return nil
+}
+
+func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {}
 
 // CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of
 // an encoding.Codec.
@@ -439,28 +598,38 @@
 // CustomCodecCallOption is a CallOption that indicates the codec used for
 // marshaling messages.
 //
-// This is an EXPERIMENTAL API.
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type CustomCodecCallOption struct {
 	Codec Codec
 }
 
 func (o CustomCodecCallOption) before(c *callInfo) error {
-	c.codec = o.Codec
+	c.codec = newCodecV0Bridge(o.Codec)
 	return nil
 }
-func (o CustomCodecCallOption) after(c *callInfo) {}
+func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {}
 
 // MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
 // used for buffering this RPC's requests for retry purposes.
 //
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func MaxRetryRPCBufferSize(bytes int) CallOption {
 	return MaxRetryRPCBufferSizeCallOption{bytes}
 }
 
 // MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
 // memory to be used for caching this RPC for retry purposes.
-// This is an EXPERIMENTAL API.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type MaxRetryRPCBufferSizeCallOption struct {
 	MaxRetryRPCBufferSize int
 }
@@ -469,7 +638,7 @@
 	c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
 	return nil
 }
-func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {}
+func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {}
 
 // The format of the payload: compressed or not?
 type payloadFormat uint8
@@ -479,16 +648,28 @@
 	compressionMade payloadFormat = 1 // compressed
 )
 
+func (pf payloadFormat) isCompressed() bool {
+	return pf == compressionMade
+}
+
+type streamReader interface {
+	ReadMessageHeader(header []byte) error
+	Read(n int) (mem.BufferSlice, error)
+}
+
 // parser reads complete gRPC messages from the underlying reader.
 type parser struct {
 	// r is the underlying reader.
 	// See the comment on recvMsg for the permissible
 	// error types.
-	r io.Reader
+	r streamReader
 
 	// The header of a gRPC message. Find more detail at
 	// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
 	header [5]byte
+
+	// bufferPool is the pool of shared receive buffers.
+	bufferPool mem.BufferPool
 }
 
 // recvMsg reads a complete gRPC message from the stream.
@@ -497,46 +678,44 @@
 // format. The caller owns the returned msg memory.
 //
 // If there is an error, possible values are:
-//   * io.EOF, when no messages remain
-//   * io.ErrUnexpectedEOF
-//   * of type transport.ConnectionError
-//   * an error from the status package
+//   - io.EOF, when no messages remain
+//   - io.ErrUnexpectedEOF
+//   - of type transport.ConnectionError
+//   - an error from the status package
+//
 // No other error values or types must be returned, which also means
-// that the underlying io.Reader must not return an incompatible
+// that the underlying streamReader must not return an incompatible
 // error.
-func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) {
-	if _, err := p.r.Read(p.header[:]); err != nil {
+func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) {
+	err := p.r.ReadMessageHeader(p.header[:])
+	if err != nil {
 		return 0, nil, err
 	}
 
-	pf = payloadFormat(p.header[0])
+	pf := payloadFormat(p.header[0])
 	length := binary.BigEndian.Uint32(p.header[1:])
 
-	if length == 0 {
-		return pf, nil, nil
-	}
 	if int64(length) > int64(maxInt) {
 		return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt)
 	}
 	if int(length) > maxReceiveMessageSize {
 		return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
 	}
-	// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
-	// of making it for each message:
-	msg = make([]byte, int(length))
-	if _, err := p.r.Read(msg); err != nil {
+
+	data, err := p.r.Read(int(length))
+	if err != nil {
 		if err == io.EOF {
 			err = io.ErrUnexpectedEOF
 		}
 		return 0, nil, err
 	}
-	return pf, msg, nil
+	return pf, data, nil
 }
 
 // encode serializes msg and returns a buffer containing the message, or an
 // error if it is too large to be transmitted by grpc.  If msg is nil, it
 // generates an empty message.
-func encode(c baseCodec, msg interface{}) ([]byte, error) {
+func encode(c baseCodec, msg any) (mem.BufferSlice, error) {
 	if msg == nil { // NOTE: typed nils will not be caught by this check
 		return nil, nil
 	}
@@ -544,41 +723,53 @@
 	if err != nil {
 		return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
 	}
-	if uint(len(b)) > math.MaxUint32 {
-		return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
+	if bufSize := uint(b.Len()); bufSize > math.MaxUint32 {
+		b.Free()
+		return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", bufSize)
 	}
 	return b, nil
 }
 
-// compress returns the input bytes compressed by compressor or cp.  If both
-// compressors are nil, returns nil.
+// compress returns the input bytes compressed by compressor or cp.
+// If both compressors are nil, or if the message has zero length, returns nil,
+// indicating no compression was done.
 //
 // TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
-func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
-	if compressor == nil && cp == nil {
-		return nil, nil
+func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) {
+	if (compressor == nil && cp == nil) || in.Len() == 0 {
+		return nil, compressionNone, nil
 	}
+	var out mem.BufferSlice
+	w := mem.NewWriter(&out, pool)
 	wrapErr := func(err error) error {
+		out.Free()
 		return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
 	}
-	cbuf := &bytes.Buffer{}
 	if compressor != nil {
-		z, err := compressor.Compress(cbuf)
+		z, err := compressor.Compress(w)
 		if err != nil {
-			return nil, wrapErr(err)
+			return nil, 0, wrapErr(err)
 		}
-		if _, err := z.Write(in); err != nil {
-			return nil, wrapErr(err)
+		for _, b := range in {
+			if _, err := z.Write(b.ReadOnlyData()); err != nil {
+				return nil, 0, wrapErr(err)
+			}
 		}
 		if err := z.Close(); err != nil {
-			return nil, wrapErr(err)
+			return nil, 0, wrapErr(err)
 		}
 	} else {
-		if err := cp.Do(cbuf, in); err != nil {
-			return nil, wrapErr(err)
+		// This is obviously really inefficient since it fully materializes the data, but
+		// there is no way around this with the old Compressor API. At least it attempts
+		// to return the buffer to the provider, in the hopes it can be reused (maybe
+		// even by a subsequent call to this very function).
+		buf := in.MaterializeToBuffer(pool)
+		defer buf.Free()
+		if err := cp.Do(w, buf.ReadOnlyData()); err != nil {
+			return nil, 0, wrapErr(err)
 		}
 	}
-	return cbuf.Bytes(), nil
+	return out, compressionMade, nil
 }
 
 const (
@@ -589,32 +780,36 @@
 
 // msgHeader returns a 5-byte header for the message being transmitted and the
 // payload, which is compData if non-nil or data otherwise.
-func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
+func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) {
 	hdr = make([]byte, headerLen)
-	if compData != nil {
-		hdr[0] = byte(compressionMade)
-		data = compData
+	hdr[0] = byte(pf)
+
+	var length uint32
+	if pf.isCompressed() {
+		length = uint32(compData.Len())
+		payload = compData
 	} else {
-		hdr[0] = byte(compressionNone)
+		length = uint32(data.Len())
+		payload = data
 	}
 
 	// Write length of payload into buf
-	binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data)))
-	return hdr, data
+	binary.BigEndian.PutUint32(hdr[payloadLen:], length)
+	return hdr, payload
 }
 
-func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload {
+func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload {
 	return &stats.OutPayload{
-		Client:     client,
-		Payload:    msg,
-		Data:       data,
-		Length:     len(data),
-		WireLength: len(payload) + headerLen,
-		SentTime:   t,
+		Client:           client,
+		Payload:          msg,
+		Length:           dataLength,
+		WireLength:       payloadLength + headerLen,
+		CompressedLength: payloadLength,
+		SentTime:         t,
 	}
 }
 
-func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
+func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status {
 	switch pf {
 	case compressionNone:
 	case compressionMade:
@@ -622,7 +817,10 @@
 			return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding")
 		}
 		if !haveCompressor {
-			return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+			if isServer {
+				return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+			}
+			return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
 		}
 	default:
 		return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
@@ -631,89 +829,120 @@
 }
 
 type payloadInfo struct {
-	wireLength        int // The compressed length got from wire.
-	uncompressedBytes []byte
+	compressedLength  int // The compressed length got from wire.
+	uncompressedBytes mem.BufferSlice
 }
 
-func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) {
-	pf, d, err := p.recvMsg(maxReceiveMessageSize)
+func (p *payloadInfo) free() {
+	if p != nil && p.uncompressedBytes != nil {
+		p.uncompressedBytes.Free()
+	}
+}
+
+// recvAndDecompress reads a message from the stream, decompressing it if necessary.
+//
+// Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as
+// the buffer is no longer needed.
+// TODO: Refactor this function to reduce the number of arguments.
+// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists
+func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool,
+) (out mem.BufferSlice, err error) {
+	pf, compressed, err := p.recvMsg(maxReceiveMessageSize)
 	if err != nil {
 		return nil, err
 	}
-	if payInfo != nil {
-		payInfo.wireLength = len(d)
-	}
 
-	if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
+	compressedLength := compressed.Len()
+
+	if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil {
+		compressed.Free()
 		return nil, st.Err()
 	}
 
-	var size int
-	if pf == compressionMade {
+	if pf.isCompressed() {
+		defer compressed.Free()
 		// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
 		// use this decompressor as the default.
-		if dc != nil {
-			d, err = dc.Do(bytes.NewReader(d))
-			size = len(d)
-		} else {
-			d, size, err = decompress(compressor, d, maxReceiveMessageSize)
-		}
+		out, err = decompress(compressor, compressed, dc, maxReceiveMessageSize, p.bufferPool)
 		if err != nil {
-			return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+			return nil, err
 		}
 	} else {
-		size = len(d)
+		out = compressed
 	}
-	if size > maxReceiveMessageSize {
-		// TODO: Revisit the error code. Currently keep it consistent with java
-		// implementation.
-		return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize)
+
+	if payInfo != nil {
+		payInfo.compressedLength = compressedLength
+		out.Ref()
+		payInfo.uncompressedBytes = out
 	}
-	return d, nil
+
+	return out, nil
 }
 
-// Using compressor, decompress d, returning data and size.
-// Optionally, if data will be over maxReceiveMessageSize, just return the size.
-func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) {
-	dcReader, err := compressor.Decompress(bytes.NewReader(d))
-	if err != nil {
-		return nil, 0, err
-	}
-	if sizer, ok := compressor.(interface {
-		DecompressedSize(compressedBytes []byte) int
-	}); ok {
-		if size := sizer.DecompressedSize(d); size >= 0 {
-			if size > maxReceiveMessageSize {
-				return nil, size, nil
-			}
-			// size is used as an estimate to size the buffer, but we
-			// will read more data if available.
-			// +MinRead so ReadFrom will not reallocate if size is correct.
-			buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
-			bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
-			return buf.Bytes(), int(bytesRead), err
+// decompress processes the given data by decompressing it using either a custom decompressor or a standard compressor.
+// If a custom decompressor is provided, it takes precedence. The function validates that the decompressed data
+// does not exceed the specified maximum size and returns an error if this limit is exceeded.
+// On success, it returns the decompressed data. Otherwise, it returns an error if decompression fails or the data exceeds the size limit.
+func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompressor, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, error) {
+	if dc != nil {
+		uncompressed, err := dc.Do(d.Reader())
+		if err != nil {
+			return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
 		}
+		if len(uncompressed) > maxReceiveMessageSize {
+			return nil, status.Errorf(codes.ResourceExhausted, "grpc: message after decompression larger than max (%d vs. %d)", len(uncompressed), maxReceiveMessageSize)
+		}
+		return mem.BufferSlice{mem.SliceBuffer(uncompressed)}, nil
 	}
-	// Read from LimitReader with limit max+1. So if the underlying
-	// reader is over limit, the result will be bigger than max.
-	d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
-	return d, len(d), err
+	if compressor != nil {
+		dcReader, err := compressor.Decompress(d.Reader())
+		if err != nil {
+			return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the message: %v", err)
+		}
+
+		// Read at most one byte more than the limit from the decompressor.
+		// Unless the limit is MaxInt64, in which case, that's impossible, so
+		// apply no limit.
+		if limit := int64(maxReceiveMessageSize); limit < math.MaxInt64 {
+			dcReader = io.LimitReader(dcReader, limit+1)
+		}
+		out, err := mem.ReadAll(dcReader, pool)
+		if err != nil {
+			out.Free()
+			return nil, status.Errorf(codes.Internal, "grpc: failed to read decompressed data: %v", err)
+		}
+
+		if out.Len() > maxReceiveMessageSize {
+			out.Free()
+			return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max %d", maxReceiveMessageSize)
+		}
+		return out, nil
+	}
+	return nil, status.Errorf(codes.Internal, "grpc: no decompressor available for compressed payload")
+}
+
+type recvCompressor interface {
+	RecvCompress() string
 }
 
 // For the two compressor parameters, both should not be set, but if they are,
 // dc takes precedence over compressor.
 // TODO(dfawley): wrap the old compressor/decompressor using the new API?
-func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
-	d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
+func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error {
+	data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer)
 	if err != nil {
 		return err
 	}
-	if err := c.Unmarshal(d, m); err != nil {
-		return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
+
+	// If the codec wants its own reference to the data, it can get it. Otherwise, always
+	// free the buffers.
+	defer data.Free()
+
+	if err := c.Unmarshal(data, m); err != nil {
+		return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
 	}
-	if payInfo != nil {
-		payInfo.uncompressedBytes = d
-	}
+
 	return nil
 }
 
@@ -772,115 +1001,86 @@
 // Errorf returns nil if c is OK.
 //
 // Deprecated: use status.Errorf instead.
-func Errorf(c codes.Code, format string, a ...interface{}) error {
+func Errorf(c codes.Code, format string, a ...any) error {
 	return status.Errorf(c, format, a...)
 }
 
+var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error())
+var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error())
+
 // toRPCErr converts an error into an error from the status package.
 func toRPCErr(err error) error {
-	if err == nil || err == io.EOF {
+	switch err {
+	case nil, io.EOF:
 		return err
-	}
-	if err == io.ErrUnexpectedEOF {
+	case context.DeadlineExceeded:
+		return errContextDeadline
+	case context.Canceled:
+		return errContextCanceled
+	case io.ErrUnexpectedEOF:
 		return status.Error(codes.Internal, err.Error())
 	}
-	if _, ok := status.FromError(err); ok {
-		return err
-	}
+
 	switch e := err.(type) {
 	case transport.ConnectionError:
 		return status.Error(codes.Unavailable, e.Desc)
-	default:
-		switch err {
-		case context.DeadlineExceeded:
-			return status.Error(codes.DeadlineExceeded, err.Error())
-		case context.Canceled:
-			return status.Error(codes.Canceled, err.Error())
-		}
+	case *transport.NewStreamError:
+		return toRPCErr(e.Err)
 	}
+
+	if _, ok := status.FromError(err); ok {
+		return err
+	}
+
 	return status.Error(codes.Unknown, err.Error())
 }
 
 // setCallInfoCodec should only be called after CallOptions have been applied.
 func setCallInfoCodec(c *callInfo) error {
 	if c.codec != nil {
-		// codec was already set by a CallOption; use it.
+		// codec was already set by a CallOption; use it, but set the content
+		// subtype if it is not set.
+		if c.contentSubtype == "" {
+			// c.codec is a baseCodec to hide the difference between grpc.Codec and
+			// encoding.Codec (Name vs. String method name).  We only support
+			// setting content subtype from encoding.Codec to avoid a behavior
+			// change with the deprecated version.
+			if ec, ok := c.codec.(encoding.CodecV2); ok {
+				c.contentSubtype = strings.ToLower(ec.Name())
+			}
+		}
 		return nil
 	}
 
 	if c.contentSubtype == "" {
 		// No codec specified in CallOptions; use proto by default.
-		c.codec = encoding.GetCodec(proto.Name)
+		c.codec = getCodec(proto.Name)
 		return nil
 	}
 
 	// c.contentSubtype is already lowercased in CallContentSubtype
-	c.codec = encoding.GetCodec(c.contentSubtype)
+	c.codec = getCodec(c.contentSubtype)
 	if c.codec == nil {
 		return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype)
 	}
 	return nil
 }
 
-// parseDialTarget returns the network and address to pass to dialer
-func parseDialTarget(target string) (net string, addr string) {
-	net = "tcp"
-
-	m1 := strings.Index(target, ":")
-	m2 := strings.Index(target, ":/")
-
-	// handle unix:addr which will fail with url.Parse
-	if m1 >= 0 && m2 < 0 {
-		if n := target[0:m1]; n == "unix" {
-			net = n
-			addr = target[m1+1:]
-			return net, addr
-		}
-	}
-	if m2 >= 0 {
-		t, err := url.Parse(target)
-		if err != nil {
-			return net, target
-		}
-		scheme := t.Scheme
-		addr = t.Path
-		if scheme == "unix" {
-			net = scheme
-			if addr == "" {
-				addr = t.Host
-			}
-			return net, addr
-		}
-	}
-
-	return net, target
-}
-
-// channelzData is used to store channelz related data for ClientConn, addrConn and Server.
-// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic
-// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
-// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
-type channelzData struct {
-	callsStarted   int64
-	callsFailed    int64
-	callsSucceeded int64
-	// lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of
-	// time.Time since it's more costly to atomically update time.Time variable than int64 variable.
-	lastCallStartedTime int64
-}
-
 // The SupportPackageIsVersion variables are referenced from generated protocol
 // buffer files to ensure compatibility with the gRPC version used.  The latest
-// support package version is 5.
+// support package version is 9.
 //
-// Older versions are kept for compatibility. They may be removed if
-// compatibility cannot be maintained.
+// Older versions are kept for compatibility.
 //
 // These constants should not be referenced from any other code.
 const (
 	SupportPackageIsVersion3 = true
 	SupportPackageIsVersion4 = true
 	SupportPackageIsVersion5 = true
+	SupportPackageIsVersion6 = true
+	SupportPackageIsVersion7 = true
+	SupportPackageIsVersion8 = true
+	SupportPackageIsVersion9 = true
 )
 
 const grpcUA = "grpc-go/" + Version
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index e54083d..1da2a54 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -33,18 +33,21 @@
 	"sync/atomic"
 	"time"
 
-	"golang.org/x/net/trace"
-
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/credentials"
 	"google.golang.org/grpc/encoding"
 	"google.golang.org/grpc/encoding/proto"
+	estats "google.golang.org/grpc/experimental/stats"
 	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal"
 	"google.golang.org/grpc/internal/binarylog"
 	"google.golang.org/grpc/internal/channelz"
 	"google.golang.org/grpc/internal/grpcsync"
+	"google.golang.org/grpc/internal/grpcutil"
+	istats "google.golang.org/grpc/internal/stats"
 	"google.golang.org/grpc/internal/transport"
 	"google.golang.org/grpc/keepalive"
+	"google.golang.org/grpc/mem"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/peer"
 	"google.golang.org/grpc/stats"
@@ -55,16 +58,47 @@
 const (
 	defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4
 	defaultServerMaxSendMessageSize    = math.MaxInt32
+
+	// Server transports are tracked in a map which is keyed on listener
+	// address. For regular gRPC traffic, connections are accepted in Serve()
+	// through a call to Accept(), and we use the actual listener address as key
+	// when we add it to the map. But for connections received through
+	// ServeHTTP(), we do not have a listener and hence use this dummy value.
+	listenerAddressForServeHTTP = "listenerAddressForServeHTTP"
 )
 
-var statusOK = status.New(codes.OK, "")
+func init() {
+	internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials {
+		return srv.opts.creds
+	}
+	internal.IsRegisteredMethod = func(srv *Server, method string) bool {
+		return srv.isRegisteredMethod(method)
+	}
+	internal.ServerFromContext = serverFromContext
+	internal.AddGlobalServerOptions = func(opt ...ServerOption) {
+		globalServerOptions = append(globalServerOptions, opt...)
+	}
+	internal.ClearGlobalServerOptions = func() {
+		globalServerOptions = nil
+	}
+	internal.BinaryLogger = binaryLogger
+	internal.JoinServerOptions = newJoinServerOption
+	internal.BufferPool = bufferPool
+	internal.MetricsRecorderForServer = func(srv *Server) estats.MetricsRecorder {
+		return istats.NewMetricsRecorderList(srv.opts.statsHandlers)
+	}
+}
 
-type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
+var statusOK = status.New(codes.OK, "")
+var logger = grpclog.Component("core")
+
+// MethodHandler is a function type that processes a unary RPC method call.
+type MethodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error)
 
 // MethodDesc represents an RPC service's method specification.
 type MethodDesc struct {
 	MethodName string
-	Handler    methodHandler
+	Handler    MethodHandler
 }
 
 // ServiceDesc represents an RPC service's specification.
@@ -72,41 +106,48 @@
 	ServiceName string
 	// The pointer to the service interface. Used to check whether the user
 	// provided implementation satisfies the interface requirements.
-	HandlerType interface{}
+	HandlerType any
 	Methods     []MethodDesc
 	Streams     []StreamDesc
-	Metadata    interface{}
+	Metadata    any
 }
 
-// service consists of the information of the server serving this service and
-// the methods in this service.
-type service struct {
-	server interface{} // the server for service methods
-	md     map[string]*MethodDesc
-	sd     map[string]*StreamDesc
-	mdata  interface{}
+// serviceInfo wraps information about a service. It is very similar to
+// ServiceDesc and is constructed from it for internal purposes.
+type serviceInfo struct {
+	// Contains the implementation for the methods in this service.
+	serviceImpl any
+	methods     map[string]*MethodDesc
+	streams     map[string]*StreamDesc
+	mdata       any
 }
 
 // Server is a gRPC server to serve RPC requests.
 type Server struct {
 	opts serverOptions
 
-	mu     sync.Mutex // guards following
-	lis    map[net.Listener]bool
-	conns  map[transport.ServerTransport]bool
-	serve  bool
-	drain  bool
-	cv     *sync.Cond          // signaled when connections close for GracefulStop
-	m      map[string]*service // service name -> service info
-	events trace.EventLog
+	mu  sync.Mutex // guards following
+	lis map[net.Listener]bool
+	// conns contains all active server transports. It is a map keyed on a
+	// listener address with the value being the set of active transports
+	// belonging to that listener.
+	conns    map[string]map[transport.ServerTransport]bool
+	serve    bool
+	drain    bool
+	cv       *sync.Cond              // signaled when connections close for GracefulStop
+	services map[string]*serviceInfo // service name -> service info
+	events   traceEventLog
 
 	quit               *grpcsync.Event
 	done               *grpcsync.Event
 	channelzRemoveOnce sync.Once
-	serveWG            sync.WaitGroup // counts active Serve goroutines for GracefulStop
+	serveWG            sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop
+	handlersWG         sync.WaitGroup // counts active method handler goroutines
 
-	channelzID int64 // channelz unique identification number
-	czData     *channelzData
+	channelz *channelz.Server
+
+	serverWorkerChannel      chan func()
+	serverWorkerChannelClose func()
 }
 
 type serverOptions struct {
@@ -116,8 +157,11 @@
 	dc                    Decompressor
 	unaryInt              UnaryServerInterceptor
 	streamInt             StreamServerInterceptor
+	chainUnaryInts        []UnaryServerInterceptor
+	chainStreamInts       []StreamServerInterceptor
+	binaryLogger          binarylog.Logger
 	inTapHandle           tap.ServerInHandle
-	statsHandler          stats.Handler
+	statsHandlers         []stats.Handler
 	maxConcurrentStreams  uint32
 	maxReceiveMessageSize int
 	maxSendMessageSize    int
@@ -128,18 +172,26 @@
 	initialConnWindowSize int32
 	writeBufferSize       int
 	readBufferSize        int
+	sharedWriteBuffer     bool
 	connectionTimeout     time.Duration
 	maxHeaderListSize     *uint32
 	headerTableSize       *uint32
+	numServerWorkers      uint32
+	bufferPool            mem.BufferPool
+	waitForHandlers       bool
+	staticWindowSize      bool
 }
 
 var defaultServerOptions = serverOptions{
+	maxConcurrentStreams:  math.MaxUint32,
 	maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
 	maxSendMessageSize:    defaultServerMaxSendMessageSize,
 	connectionTimeout:     120 * time.Second,
 	writeBufferSize:       defaultWriteBufSize,
 	readBufferSize:        defaultReadBufSize,
+	bufferPool:            mem.DefaultBufferPool(),
 }
+var globalServerOptions []ServerOption
 
 // A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
 type ServerOption interface {
@@ -149,7 +201,10 @@
 // EmptyServerOption does not alter the server configuration. It can be embedded
 // in another structure to build custom server options.
 //
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type EmptyServerOption struct{}
 
 func (EmptyServerOption) apply(*serverOptions) {}
@@ -170,22 +225,50 @@
 	}
 }
 
-// WriteBufferSize determines how much data can be batched before doing a write on the wire.
-// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low.
-// The default value for this buffer is 32KB.
-// Zero will disable the write buffer such that each write will be on underlying connection.
-// Note: A Send call may not directly translate to a write.
+// joinServerOption provides a way to combine arbitrary number of server
+// options into one.
+type joinServerOption struct {
+	opts []ServerOption
+}
+
+func (mdo *joinServerOption) apply(do *serverOptions) {
+	for _, opt := range mdo.opts {
+		opt.apply(do)
+	}
+}
+
+func newJoinServerOption(opts ...ServerOption) ServerOption {
+	return &joinServerOption{opts: opts}
+}
+
+// SharedWriteBuffer allows reusing per-connection transport write buffer.
+// If this option is set to true every connection will release the buffer after
+// flushing the data on the wire.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func SharedWriteBuffer(val bool) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.sharedWriteBuffer = val
+	})
+}
+
+// WriteBufferSize determines how much data can be batched before doing a write
+// on the wire. The default value for this buffer is 32KB. Zero or negative
+// values will disable the write buffer such that each write will be on underlying
+// connection. Note: A Send call may not directly translate to a write.
 func WriteBufferSize(s int) ServerOption {
 	return newFuncServerOption(func(o *serverOptions) {
 		o.writeBufferSize = s
 	})
 }
 
-// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
-// for one read syscall.
-// The default value for this buffer is 32KB.
-// Zero will disable read buffer for a connection so data framer can access the underlying
-// conn directly.
+// ReadBufferSize lets you set the size of read buffer, this determines how much
+// data can be read at most for one read syscall. The default value for this
+// buffer is 32KB. Zero or negative values will disable read buffer for a
+// connection so data framer can access the underlying conn directly.
 func ReadBufferSize(s int) ServerOption {
 	return newFuncServerOption(func(o *serverOptions) {
 		o.readBufferSize = s
@@ -197,6 +280,7 @@
 func InitialWindowSize(s int32) ServerOption {
 	return newFuncServerOption(func(o *serverOptions) {
 		o.initialWindowSize = s
+		o.staticWindowSize = true
 	})
 }
 
@@ -205,14 +289,37 @@
 func InitialConnWindowSize(s int32) ServerOption {
 	return newFuncServerOption(func(o *serverOptions) {
 		o.initialConnWindowSize = s
+		o.staticWindowSize = true
+	})
+}
+
+// StaticStreamWindowSize returns a ServerOption to set the initial stream
+// window size to the value provided and disables dynamic flow control.
+// The lower bound for window size is 64K and any value smaller than that
+// will be ignored.
+func StaticStreamWindowSize(s int32) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.initialWindowSize = s
+		o.staticWindowSize = true
+	})
+}
+
+// StaticConnWindowSize returns a ServerOption to set the initial connection
+// window size to the value provided and disables dynamic flow control.
+// The lower bound for window size is 64K and any value smaller than that
+// will be ignored.
+func StaticConnWindowSize(s int32) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.initialConnWindowSize = s
+		o.staticWindowSize = true
 	})
 }
 
 // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
 func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
-	if kp.Time > 0 && kp.Time < time.Second {
-		grpclog.Warning("Adjusting keepalive ping interval to minimum period of 1s")
-		kp.Time = time.Second
+	if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime {
+		logger.Warning("Adjusting keepalive ping interval to minimum period of 1s")
+		kp.Time = internal.KeepaliveMinServerPingTime
 	}
 
 	return newFuncServerOption(func(o *serverOptions) {
@@ -230,9 +337,59 @@
 // CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
 //
 // This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
+//
+// Deprecated: register codecs using encoding.RegisterCodec. The server will
+// automatically use registered codecs based on the incoming requests' headers.
+// See also
+// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
+// Will be supported throughout 1.x.
 func CustomCodec(codec Codec) ServerOption {
 	return newFuncServerOption(func(o *serverOptions) {
-		o.codec = codec
+		o.codec = newCodecV0Bridge(codec)
+	})
+}
+
+// ForceServerCodec returns a ServerOption that sets a codec for message
+// marshaling and unmarshaling.
+//
+// This will override any lookups by content-subtype for Codecs registered
+// with RegisterCodec.
+//
+// See Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details. Also see the documentation on RegisterCodec and
+// CallContentSubtype for more details on the interaction between encoding.Codec
+// and content-subtype.
+//
+// This function is provided for advanced users; prefer to register codecs
+// using encoding.RegisterCodec.
+// The server will automatically use registered codecs based on the incoming
+// requests' headers. See also
+// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
+// Will be supported throughout 1.x.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceServerCodec(codec encoding.Codec) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.codec = newCodecV1Bridge(codec)
+	})
+}
+
+// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new
+// CodecV2 interface.
+//
+// Will be supported throughout 1.x.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.codec = codecV2
 	})
 }
 
@@ -242,7 +399,8 @@
 // default, server messages will be sent using the same compressor with which
 // request messages were sent.
 //
-// Deprecated: use encoding.RegisterCompressor instead.
+// Deprecated: use encoding.RegisterCompressor instead. Will be supported
+// throughout 1.x.
 func RPCCompressor(cp Compressor) ServerOption {
 	return newFuncServerOption(func(o *serverOptions) {
 		o.cp = cp
@@ -253,7 +411,8 @@
 // messages.  It has higher priority than decompressors registered via
 // encoding.RegisterCompressor.
 //
-// Deprecated: use encoding.RegisterCompressor instead.
+// Deprecated: use encoding.RegisterCompressor instead. Will be supported
+// throughout 1.x.
 func RPCDecompressor(dc Decompressor) ServerOption {
 	return newFuncServerOption(func(o *serverOptions) {
 		o.dc = dc
@@ -263,7 +422,7 @@
 // MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
 // If this is not set, gRPC uses the default limit.
 //
-// Deprecated: use MaxRecvMsgSize instead.
+// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x.
 func MaxMsgSize(m int) ServerOption {
 	return MaxRecvMsgSize(m)
 }
@@ -287,6 +446,9 @@
 // MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
 // of concurrent streams to each ServerTransport.
 func MaxConcurrentStreams(n uint32) ServerOption {
+	if n == 0 {
+		n = math.MaxUint32
+	}
 	return newFuncServerOption(func(o *serverOptions) {
 		o.maxConcurrentStreams = n
 	})
@@ -311,6 +473,16 @@
 	})
 }
 
+// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor
+// for unary RPCs. The first interceptor will be the outer most,
+// while the last interceptor will be the inner most wrapper around the real call.
+// All unary interceptors added by this method will be chained.
+func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.chainUnaryInts = append(o.chainUnaryInts, interceptors...)
+	})
+}
+
 // StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the
 // server. Only one stream interceptor can be installed.
 func StreamInterceptor(i StreamServerInterceptor) ServerOption {
@@ -322,8 +494,23 @@
 	})
 }
 
+// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor
+// for streaming RPCs. The first interceptor will be the outer most,
+// while the last interceptor will be the inner most wrapper around the real call.
+// All stream interceptors added by this method will be chained.
+func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.chainStreamInts = append(o.chainStreamInts, interceptors...)
+	})
+}
+
 // InTapHandle returns a ServerOption that sets the tap handle for all the server
 // transport to be created. Only one can be installed.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func InTapHandle(h tap.ServerInHandle) ServerOption {
 	return newFuncServerOption(func(o *serverOptions) {
 		if o.inTapHandle != nil {
@@ -336,7 +523,21 @@
 // StatsHandler returns a ServerOption that sets the stats handler for the server.
 func StatsHandler(h stats.Handler) ServerOption {
 	return newFuncServerOption(func(o *serverOptions) {
-		o.statsHandler = h
+		if h == nil {
+			logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption")
+			// Do not allow a nil stats handler, which would otherwise cause
+			// panics.
+			return
+		}
+		o.statsHandlers = append(o.statsHandlers, h)
+	})
+}
+
+// binaryLogger returns a ServerOption that can set the binary logger for the
+// server.
+func binaryLogger(bl binarylog.Logger) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.binaryLogger = bl
 	})
 }
 
@@ -344,8 +545,8 @@
 // unknown service handler. The provided method is a bidi-streaming RPC service
 // handler that will be invoked instead of returning the "unimplemented" gRPC
 // error whenever a request is received for an unregistered service or method.
-// The handling function has full access to the Context of the request and the
-// stream, and the invocation bypasses interceptors.
+// The handling function and stream interceptor (if set) have full access to
+// the ServerStream, including its Context.
 func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
 	return newFuncServerOption(func(o *serverOptions) {
 		o.unknownStreamDesc = &StreamDesc{
@@ -363,62 +564,161 @@
 // new connections.  If this is not set, the default is 120 seconds.  A zero or
 // negative value will result in an immediate timeout.
 //
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func ConnectionTimeout(d time.Duration) ServerOption {
 	return newFuncServerOption(func(o *serverOptions) {
 		o.connectionTimeout = d
 	})
 }
 
+// MaxHeaderListSizeServerOption is a ServerOption that sets the max
+// (uncompressed) size of header list that the server is prepared to accept.
+type MaxHeaderListSizeServerOption struct {
+	MaxHeaderListSize uint32
+}
+
+func (o MaxHeaderListSizeServerOption) apply(so *serverOptions) {
+	so.maxHeaderListSize = &o.MaxHeaderListSize
+}
+
 // MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
 // of header list that the server is prepared to accept.
 func MaxHeaderListSize(s uint32) ServerOption {
-	return newFuncServerOption(func(o *serverOptions) {
-		o.maxHeaderListSize = &s
-	})
+	return MaxHeaderListSizeServerOption{
+		MaxHeaderListSize: s,
+	}
 }
 
 // HeaderTableSize returns a ServerOption that sets the size of dynamic
 // header table for stream.
 //
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func HeaderTableSize(s uint32) ServerOption {
 	return newFuncServerOption(func(o *serverOptions) {
 		o.headerTableSize = &s
 	})
 }
 
+// NumStreamWorkers returns a ServerOption that sets the number of worker
+// goroutines that should be used to process incoming streams. Setting this to
+// zero (default) will disable workers and spawn a new goroutine for each
+// stream.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func NumStreamWorkers(numServerWorkers uint32) ServerOption {
+	// TODO: If/when this API gets stabilized (i.e. stream workers become the
+	// only way streams are processed), change the behavior of the zero value to
+	// a sane default. Preliminary experiments suggest that a value equal to the
+	// number of CPUs available is most performant; requires thorough testing.
+	return newFuncServerOption(func(o *serverOptions) {
+		o.numServerWorkers = numServerWorkers
+	})
+}
+
+// WaitForHandlers cause Stop to wait until all outstanding method handlers have
+// exited before returning.  If false, Stop will return as soon as all
+// connections have closed, but method handlers may still be running. By
+// default, Stop does not wait for method handlers to return.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WaitForHandlers(w bool) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.waitForHandlers = w
+	})
+}
+
+func bufferPool(bufferPool mem.BufferPool) ServerOption {
+	return newFuncServerOption(func(o *serverOptions) {
+		o.bufferPool = bufferPool
+	})
+}
+
+// serverWorkerResetThreshold defines how often the stack must be reset. Every
+// N requests, by spawning a new goroutine in its place, a worker can reset its
+// stack so that large stacks don't live in memory forever. 2^16 should allow
+// each goroutine stack to live for at least a few seconds in a typical
+// workload (assuming a QPS of a few thousand requests/sec).
+const serverWorkerResetThreshold = 1 << 16
+
+// serverWorker blocks on a *transport.ServerStream channel forever and waits
+// for data to be fed by serveStreams. This allows multiple requests to be
+// processed by the same goroutine, removing the need for expensive stack
+// re-allocations (see the runtime.morestack problem [1]).
+//
+// [1] https://github.com/golang/go/issues/18138
+func (s *Server) serverWorker() {
+	for completed := 0; completed < serverWorkerResetThreshold; completed++ {
+		f, ok := <-s.serverWorkerChannel
+		if !ok {
+			return
+		}
+		f()
+	}
+	go s.serverWorker()
+}
+
+// initServerWorkers creates worker goroutines and a channel to process incoming
+// connections to reduce the time spent overall on runtime.morestack.
+func (s *Server) initServerWorkers() {
+	s.serverWorkerChannel = make(chan func())
+	s.serverWorkerChannelClose = sync.OnceFunc(func() {
+		close(s.serverWorkerChannel)
+	})
+	for i := uint32(0); i < s.opts.numServerWorkers; i++ {
+		go s.serverWorker()
+	}
+}
+
 // NewServer creates a gRPC server which has no service registered and has not
 // started to accept requests yet.
 func NewServer(opt ...ServerOption) *Server {
 	opts := defaultServerOptions
+	for _, o := range globalServerOptions {
+		o.apply(&opts)
+	}
 	for _, o := range opt {
 		o.apply(&opts)
 	}
 	s := &Server{
-		lis:    make(map[net.Listener]bool),
-		opts:   opts,
-		conns:  make(map[transport.ServerTransport]bool),
-		m:      make(map[string]*service),
-		quit:   grpcsync.NewEvent(),
-		done:   grpcsync.NewEvent(),
-		czData: new(channelzData),
+		lis:      make(map[net.Listener]bool),
+		opts:     opts,
+		conns:    make(map[string]map[transport.ServerTransport]bool),
+		services: make(map[string]*serviceInfo),
+		quit:     grpcsync.NewEvent(),
+		done:     grpcsync.NewEvent(),
+		channelz: channelz.RegisterServer(""),
 	}
+	chainUnaryServerInterceptors(s)
+	chainStreamServerInterceptors(s)
 	s.cv = sync.NewCond(&s.mu)
 	if EnableTracing {
 		_, file, line, _ := runtime.Caller(1)
-		s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
+		s.events = newTraceEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
 	}
 
-	if channelz.IsOn() {
-		s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
+	if s.opts.numServerWorkers > 0 {
+		s.initServerWorkers()
 	}
+
+	channelz.Info(logger, s.channelz, "Server created")
 	return s
 }
 
 // printf records an event in s's event log, unless s has been stopped.
 // REQUIRES s.mu is held.
-func (s *Server) printf(format string, a ...interface{}) {
+func (s *Server) printf(format string, a ...any) {
 	if s.events != nil {
 		s.events.Printf(format, a...)
 	}
@@ -426,49 +726,64 @@
 
 // errorf records an error in s's event log, unless s has been stopped.
 // REQUIRES s.mu is held.
-func (s *Server) errorf(format string, a ...interface{}) {
+func (s *Server) errorf(format string, a ...any) {
 	if s.events != nil {
 		s.events.Errorf(format, a...)
 	}
 }
 
+// ServiceRegistrar wraps a single method that supports service registration. It
+// enables users to pass concrete types other than grpc.Server to the service
+// registration methods exported by the IDL generated code.
+type ServiceRegistrar interface {
+	// RegisterService registers a service and its implementation to the
+	// concrete type implementing this interface.  It may not be called
+	// once the server has started serving.
+	// desc describes the service and its methods and handlers. impl is the
+	// service implementation which is passed to the method handlers.
+	RegisterService(desc *ServiceDesc, impl any)
+}
+
 // RegisterService registers a service and its implementation to the gRPC
 // server. It is called from the IDL generated code. This must be called before
-// invoking Serve.
-func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
-	ht := reflect.TypeOf(sd.HandlerType).Elem()
-	st := reflect.TypeOf(ss)
-	if !st.Implements(ht) {
-		grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
+// invoking Serve. If ss is non-nil (for legacy code), its type is checked to
+// ensure it implements sd.HandlerType.
+func (s *Server) RegisterService(sd *ServiceDesc, ss any) {
+	if ss != nil {
+		ht := reflect.TypeOf(sd.HandlerType).Elem()
+		st := reflect.TypeOf(ss)
+		if !st.Implements(ht) {
+			logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
+		}
 	}
 	s.register(sd, ss)
 }
 
-func (s *Server) register(sd *ServiceDesc, ss interface{}) {
+func (s *Server) register(sd *ServiceDesc, ss any) {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 	s.printf("RegisterService(%q)", sd.ServiceName)
 	if s.serve {
-		grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName)
+		logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName)
 	}
-	if _, ok := s.m[sd.ServiceName]; ok {
-		grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
+	if _, ok := s.services[sd.ServiceName]; ok {
+		logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
 	}
-	srv := &service{
-		server: ss,
-		md:     make(map[string]*MethodDesc),
-		sd:     make(map[string]*StreamDesc),
-		mdata:  sd.Metadata,
+	info := &serviceInfo{
+		serviceImpl: ss,
+		methods:     make(map[string]*MethodDesc),
+		streams:     make(map[string]*StreamDesc),
+		mdata:       sd.Metadata,
 	}
 	for i := range sd.Methods {
 		d := &sd.Methods[i]
-		srv.md[d.MethodName] = d
+		info.methods[d.MethodName] = d
 	}
 	for i := range sd.Streams {
 		d := &sd.Streams[i]
-		srv.sd[d.StreamName] = d
+		info.streams[d.StreamName] = d
 	}
-	s.m[sd.ServiceName] = srv
+	s.services[sd.ServiceName] = info
 }
 
 // MethodInfo contains the information of an RPC including its method name and type.
@@ -485,23 +800,23 @@
 type ServiceInfo struct {
 	Methods []MethodInfo
 	// Metadata is the metadata specified in ServiceDesc when registering service.
-	Metadata interface{}
+	Metadata any
 }
 
 // GetServiceInfo returns a map from service names to ServiceInfo.
 // Service names include the package names, in the form of <package>.<service>.
 func (s *Server) GetServiceInfo() map[string]ServiceInfo {
 	ret := make(map[string]ServiceInfo)
-	for n, srv := range s.m {
-		methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd))
-		for m := range srv.md {
+	for n, srv := range s.services {
+		methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams))
+		for m := range srv.methods {
 			methods = append(methods, MethodInfo{
 				Name:           m,
 				IsClientStream: false,
 				IsServerStream: false,
 			})
 		}
-		for m, d := range srv.sd {
+		for m, d := range srv.streams {
 			methods = append(methods, MethodInfo{
 				Name:           m,
 				IsClientStream: d.ClientStreams,
@@ -521,30 +836,15 @@
 // the server being stopped.
 var ErrServerStopped = errors.New("grpc: the server has been stopped")
 
-func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
-	if s.opts.creds == nil {
-		return rawConn, nil, nil
-	}
-	return s.opts.creds.ServerHandshake(rawConn)
-}
-
 type listenSocket struct {
 	net.Listener
-	channelzID int64
-}
-
-func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
-	return &channelz.SocketInternalMetric{
-		SocketOptions: channelz.GetSocketOption(l.Listener),
-		LocalAddr:     l.Listener.Addr(),
-	}
+	channelz *channelz.Socket
 }
 
 func (l *listenSocket) Close() error {
 	err := l.Listener.Close()
-	if channelz.IsOn() {
-		channelz.RemoveEntry(l.channelzID)
-	}
+	channelz.RemoveEntry(l.channelz.ID)
+	channelz.Info(logger, l.channelz, "ListenSocket deleted")
 	return err
 }
 
@@ -554,6 +854,18 @@
 // Serve returns when lis.Accept fails with fatal errors.  lis will be closed when
 // this method returns.
 // Serve will return a non-nil error unless Stop or GracefulStop is called.
+//
+// Note: All supported releases of Go (as of December 2023) override the OS
+// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
+// with OS defaults for keepalive time and interval, callers need to do the
+// following two things:
+//   - pass a net.Listener created by calling the Listen method on a
+//     net.ListenConfig with the `KeepAlive` field set to a negative value. This
+//     will result in the Go standard library not overriding OS defaults for TCP
+//     keepalive interval and time. But this will also result in the Go standard
+//     library not enabling TCP keepalives by default.
+//   - override the Accept method on the passed in net.Listener and set the
+//     SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults.
 func (s *Server) Serve(lis net.Listener) error {
 	s.mu.Lock()
 	s.printf("serving")
@@ -574,13 +886,17 @@
 		}
 	}()
 
-	ls := &listenSocket{Listener: lis}
-	s.lis[ls] = true
-
-	if channelz.IsOn() {
-		ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String())
+	ls := &listenSocket{
+		Listener: lis,
+		channelz: channelz.RegisterSocket(&channelz.Socket{
+			SocketType:    channelz.SocketTypeListen,
+			Parent:        s.channelz,
+			RefName:       lis.Addr().String(),
+			LocalAddr:     lis.Addr(),
+			SocketOptions: channelz.GetSocketOption(lis)},
+		),
 	}
-	s.mu.Unlock()
+	s.lis[ls] = true
 
 	defer func() {
 		s.mu.Lock()
@@ -591,8 +907,10 @@
 		s.mu.Unlock()
 	}()
 
-	var tempDelay time.Duration // how long to sleep on accept failure
+	s.mu.Unlock()
+	channelz.Info(logger, ls.channelz, "ListenSocket created")
 
+	var tempDelay time.Duration // how long to sleep on accept failure
 	for {
 		rawConn, err := lis.Accept()
 		if err != nil {
@@ -636,7 +954,7 @@
 		// s.conns before this conn can be added.
 		s.serveWG.Add(1)
 		go func() {
-			s.handleRawConn(rawConn)
+			s.handleRawConn(lis.Addr().String(), rawConn)
 			s.serveWG.Done()
 		}()
 	}
@@ -644,91 +962,115 @@
 
 // handleRawConn forks a goroutine to handle a just-accepted connection that
 // has not had any I/O performed on it yet.
-func (s *Server) handleRawConn(rawConn net.Conn) {
+func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
 	if s.quit.HasFired() {
 		rawConn.Close()
 		return
 	}
 	rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
-	conn, authInfo, err := s.useTransportAuthenticator(rawConn)
-	if err != nil {
-		// ErrConnDispatched means that the connection was dispatched away from
-		// gRPC; those connections should be left open.
-		if err != credentials.ErrConnDispatched {
-			s.mu.Lock()
-			s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
-			s.mu.Unlock()
-			grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
-			rawConn.Close()
-		}
-		rawConn.SetDeadline(time.Time{})
-		return
-	}
 
 	// Finish handshaking (HTTP2)
-	st := s.newHTTP2Transport(conn, authInfo)
+	st := s.newHTTP2Transport(rawConn)
+	rawConn.SetDeadline(time.Time{})
 	if st == nil {
 		return
 	}
 
-	rawConn.SetDeadline(time.Time{})
-	if !s.addConn(st) {
+	if cc, ok := rawConn.(interface {
+		PassServerTransport(transport.ServerTransport)
+	}); ok {
+		cc.PassServerTransport(st)
+	}
+
+	if !s.addConn(lisAddr, st) {
 		return
 	}
 	go func() {
-		s.serveStreams(st)
-		s.removeConn(st)
+		s.serveStreams(context.Background(), st, rawConn)
+		s.removeConn(lisAddr, st)
 	}()
 }
 
 // newHTTP2Transport sets up a http/2 transport (using the
 // gRPC http2 server transport in transport/http2_server.go).
-func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport {
+func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
 	config := &transport.ServerConfig{
 		MaxStreams:            s.opts.maxConcurrentStreams,
-		AuthInfo:              authInfo,
+		ConnectionTimeout:     s.opts.connectionTimeout,
+		Credentials:           s.opts.creds,
 		InTapHandle:           s.opts.inTapHandle,
-		StatsHandler:          s.opts.statsHandler,
+		StatsHandlers:         s.opts.statsHandlers,
 		KeepaliveParams:       s.opts.keepaliveParams,
 		KeepalivePolicy:       s.opts.keepalivePolicy,
 		InitialWindowSize:     s.opts.initialWindowSize,
 		InitialConnWindowSize: s.opts.initialConnWindowSize,
 		WriteBufferSize:       s.opts.writeBufferSize,
 		ReadBufferSize:        s.opts.readBufferSize,
-		ChannelzParentID:      s.channelzID,
+		SharedWriteBuffer:     s.opts.sharedWriteBuffer,
+		ChannelzParent:        s.channelz,
 		MaxHeaderListSize:     s.opts.maxHeaderListSize,
 		HeaderTableSize:       s.opts.headerTableSize,
+		BufferPool:            s.opts.bufferPool,
+		StaticWindowSize:      s.opts.staticWindowSize,
 	}
-	st, err := transport.NewServerTransport("http2", c, config)
+	st, err := transport.NewServerTransport(c, config)
 	if err != nil {
 		s.mu.Lock()
 		s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
 		s.mu.Unlock()
-		c.Close()
-		grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err)
+		// ErrConnDispatched means that the connection was dispatched away from
+		// gRPC; those connections should be left open.
+		if err != credentials.ErrConnDispatched {
+			// Don't log on ErrConnDispatched and io.EOF to prevent log spam.
+			if err != io.EOF {
+				channelz.Info(logger, s.channelz, "grpc: Server.Serve failed to create ServerTransport: ", err)
+			}
+			c.Close()
+		}
 		return nil
 	}
 
 	return st
 }
 
-func (s *Server) serveStreams(st transport.ServerTransport) {
-	defer st.Close()
-	var wg sync.WaitGroup
-	st.HandleStreams(func(stream *transport.Stream) {
-		wg.Add(1)
-		go func() {
-			defer wg.Done()
-			s.handleStream(st, stream, s.traceInfo(st, stream))
-		}()
-	}, func(ctx context.Context, method string) context.Context {
-		if !EnableTracing {
-			return ctx
+func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) {
+	ctx = transport.SetConnection(ctx, rawConn)
+	ctx = peer.NewContext(ctx, st.Peer())
+	for _, sh := range s.opts.statsHandlers {
+		ctx = sh.TagConn(ctx, &stats.ConnTagInfo{
+			RemoteAddr: st.Peer().Addr,
+			LocalAddr:  st.Peer().LocalAddr,
+		})
+		sh.HandleConn(ctx, &stats.ConnBegin{})
+	}
+
+	defer func() {
+		st.Close(errors.New("finished serving streams for the server transport"))
+		for _, sh := range s.opts.statsHandlers {
+			sh.HandleConn(ctx, &stats.ConnEnd{})
 		}
-		tr := trace.New("grpc.Recv."+methodFamily(method), method)
-		return trace.NewContext(ctx, tr)
+	}()
+
+	streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
+	st.HandleStreams(ctx, func(stream *transport.ServerStream) {
+		s.handlersWG.Add(1)
+		streamQuota.acquire()
+		f := func() {
+			defer streamQuota.release()
+			defer s.handlersWG.Done()
+			s.handleStream(st, stream)
+		}
+
+		if s.opts.numServerWorkers > 0 {
+			select {
+			case s.serverWorkerChannel <- f:
+				return
+			default:
+				// If all stream workers are busy, fallback to the default code path.
+			}
+		}
+		go f()
 	})
-	wg.Wait()
 }
 
 var _ http.Handler = (*Server)(nil)
@@ -745,168 +1087,231 @@
 // To share one port (such as 443 for https) between gRPC and an
 // existing http.Handler, use a root http.Handler such as:
 //
-//   if r.ProtoMajor == 2 && strings.HasPrefix(
-//   	r.Header.Get("Content-Type"), "application/grpc") {
-//   	grpcServer.ServeHTTP(w, r)
-//   } else {
-//   	yourMux.ServeHTTP(w, r)
-//   }
+//	if r.ProtoMajor == 2 && strings.HasPrefix(
+//		r.Header.Get("Content-Type"), "application/grpc") {
+//		grpcServer.ServeHTTP(w, r)
+//	} else {
+//		yourMux.ServeHTTP(w, r)
+//	}
 //
 // Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally
 // separate from grpc-go's HTTP/2 server. Performance and features may vary
 // between the two paths. ServeHTTP does not support some gRPC features
-// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL
-// and subject to change.
+// available through grpc-go's HTTP/2 server.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler)
+	st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool)
 	if err != nil {
-		http.Error(w, err.Error(), http.StatusInternalServerError)
+		// Errors returned from transport.NewServerHandlerTransport have
+		// already been written to w.
 		return
 	}
-	if !s.addConn(st) {
+	if !s.addConn(listenerAddressForServeHTTP, st) {
 		return
 	}
-	defer s.removeConn(st)
-	s.serveStreams(st)
+	defer s.removeConn(listenerAddressForServeHTTP, st)
+	s.serveStreams(r.Context(), st, nil)
 }
 
-// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
-// If tracing is not enabled, it returns nil.
-func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
-	if !EnableTracing {
-		return nil
-	}
-	tr, ok := trace.FromContext(stream.Context())
-	if !ok {
-		return nil
-	}
-
-	trInfo = &traceInfo{
-		tr: tr,
-		firstLine: firstLine{
-			client:     false,
-			remoteAddr: st.RemoteAddr(),
-		},
-	}
-	if dl, ok := stream.Context().Deadline(); ok {
-		trInfo.firstLine.deadline = time.Until(dl)
-	}
-	return trInfo
-}
-
-func (s *Server) addConn(st transport.ServerTransport) bool {
+func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
 	s.mu.Lock()
 	defer s.mu.Unlock()
 	if s.conns == nil {
-		st.Close()
+		st.Close(errors.New("Server.addConn called when server has already been stopped"))
 		return false
 	}
 	if s.drain {
 		// Transport added after we drained our existing conns: drain it
 		// immediately.
-		st.Drain()
+		st.Drain("")
 	}
-	s.conns[st] = true
+
+	if s.conns[addr] == nil {
+		// Create a map entry if this is the first connection on this listener.
+		s.conns[addr] = make(map[transport.ServerTransport]bool)
+	}
+	s.conns[addr][st] = true
 	return true
 }
 
-func (s *Server) removeConn(st transport.ServerTransport) {
+func (s *Server) removeConn(addr string, st transport.ServerTransport) {
 	s.mu.Lock()
 	defer s.mu.Unlock()
-	if s.conns != nil {
-		delete(s.conns, st)
+
+	conns := s.conns[addr]
+	if conns != nil {
+		delete(conns, st)
+		if len(conns) == 0 {
+			// If the last connection for this address is being removed, also
+			// remove the map entry corresponding to the address. This is used
+			// in GracefulStop() when waiting for all connections to be closed.
+			delete(s.conns, addr)
+		}
 		s.cv.Broadcast()
 	}
 }
 
-func (s *Server) channelzMetric() *channelz.ServerInternalMetric {
-	return &channelz.ServerInternalMetric{
-		CallsStarted:             atomic.LoadInt64(&s.czData.callsStarted),
-		CallsSucceeded:           atomic.LoadInt64(&s.czData.callsSucceeded),
-		CallsFailed:              atomic.LoadInt64(&s.czData.callsFailed),
-		LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)),
-	}
-}
-
 func (s *Server) incrCallsStarted() {
-	atomic.AddInt64(&s.czData.callsStarted, 1)
-	atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano())
+	s.channelz.ServerMetrics.CallsStarted.Add(1)
+	s.channelz.ServerMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano())
 }
 
 func (s *Server) incrCallsSucceeded() {
-	atomic.AddInt64(&s.czData.callsSucceeded, 1)
+	s.channelz.ServerMetrics.CallsSucceeded.Add(1)
 }
 
 func (s *Server) incrCallsFailed() {
-	atomic.AddInt64(&s.czData.callsFailed, 1)
+	s.channelz.ServerMetrics.CallsFailed.Add(1)
 }
 
-func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
+func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStream, msg any, cp Compressor, opts *transport.WriteOptions, comp encoding.Compressor) error {
 	data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
 	if err != nil {
-		grpclog.Errorln("grpc: server failed to encode response: ", err)
+		channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err)
 		return err
 	}
-	compData, err := compress(data, cp, comp)
+
+	compData, pf, err := compress(data, cp, comp, s.opts.bufferPool)
 	if err != nil {
-		grpclog.Errorln("grpc: server failed to compress response: ", err)
+		data.Free()
+		channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err)
 		return err
 	}
-	hdr, payload := msgHeader(data, compData)
+
+	hdr, payload := msgHeader(data, compData, pf)
+
+	defer func() {
+		compData.Free()
+		data.Free()
+		// payload does not need to be freed here, it is either data or compData, both of
+		// which are already freed.
+	}()
+
+	dataLen := data.Len()
+	payloadLen := payload.Len()
 	// TODO(dfawley): should we be checking len(data) instead?
-	if len(payload) > s.opts.maxSendMessageSize {
-		return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize)
+	if payloadLen > s.opts.maxSendMessageSize {
+		return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize)
 	}
-	err = t.Write(stream, hdr, payload, opts)
-	if err == nil && s.opts.statsHandler != nil {
-		s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now()))
+	err = stream.Write(hdr, payload, opts)
+	if err == nil {
+		if len(s.opts.statsHandlers) != 0 {
+			for _, sh := range s.opts.statsHandlers {
+				sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
+			}
+		}
 	}
 	return err
 }
 
-func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
-	if channelz.IsOn() {
-		s.incrCallsStarted()
-		defer func() {
-			if err != nil && err != io.EOF {
-				s.incrCallsFailed()
-			} else {
-				s.incrCallsSucceeded()
-			}
-		}()
-	}
-	sh := s.opts.statsHandler
-	if sh != nil {
-		beginTime := time.Now()
-		begin := &stats.Begin{
-			BeginTime: beginTime,
-		}
-		sh.HandleRPC(stream.Context(), begin)
-		defer func() {
-			end := &stats.End{
-				BeginTime: beginTime,
-				EndTime:   time.Now(),
-			}
-			if err != nil && err != io.EOF {
-				end.Error = toRPCErr(err)
-			}
-			sh.HandleRPC(stream.Context(), end)
-		}()
-	}
-	if trInfo != nil {
-		defer trInfo.tr.Finish()
-		trInfo.tr.LazyLog(&trInfo.firstLine, false)
-		defer func() {
-			if err != nil && err != io.EOF {
-				trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
-				trInfo.tr.SetError()
-			}
-		}()
+// chainUnaryServerInterceptors chains all unary server interceptors into one.
+func chainUnaryServerInterceptors(s *Server) {
+	// Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will
+	// be executed before any other chained interceptors.
+	interceptors := s.opts.chainUnaryInts
+	if s.opts.unaryInt != nil {
+		interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...)
 	}
 
-	binlog := binarylog.GetMethodLogger(stream.Method())
-	if binlog != nil {
-		ctx := stream.Context()
+	var chainedInt UnaryServerInterceptor
+	if len(interceptors) == 0 {
+		chainedInt = nil
+	} else if len(interceptors) == 1 {
+		chainedInt = interceptors[0]
+	} else {
+		chainedInt = chainUnaryInterceptors(interceptors)
+	}
+
+	s.opts.unaryInt = chainedInt
+}
+
+func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor {
+	return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) {
+		return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler))
+	}
+}
+
+func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler {
+	if curr == len(interceptors)-1 {
+		return finalHandler
+	}
+	return func(ctx context.Context, req any) (any, error) {
+		return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler))
+	}
+}
+
+func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
+	shs := s.opts.statsHandlers
+	if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
+		if channelz.IsOn() {
+			s.incrCallsStarted()
+		}
+		var statsBegin *stats.Begin
+		for _, sh := range shs {
+			beginTime := time.Now()
+			statsBegin = &stats.Begin{
+				BeginTime:      beginTime,
+				IsClientStream: false,
+				IsServerStream: false,
+			}
+			sh.HandleRPC(ctx, statsBegin)
+		}
+		if trInfo != nil {
+			trInfo.tr.LazyLog(&trInfo.firstLine, false)
+		}
+		// The deferred error handling for tracing, stats handler and channelz are
+		// combined into one function to reduce stack usage -- a defer takes ~56-64
+		// bytes on the stack, so overflowing the stack will require a stack
+		// re-allocation, which is expensive.
+		//
+		// To maintain behavior similar to separate deferred statements, statements
+		// should be executed in the reverse order. That is, tracing first, stats
+		// handler second, and channelz last. Note that panics *within* defers will
+		// lead to different behavior, but that's an acceptable compromise; that
+		// would be undefined behavior territory anyway.
+		defer func() {
+			if trInfo != nil {
+				if err != nil && err != io.EOF {
+					trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
+					trInfo.tr.SetError()
+				}
+				trInfo.tr.Finish()
+			}
+
+			for _, sh := range shs {
+				end := &stats.End{
+					BeginTime: statsBegin.BeginTime,
+					EndTime:   time.Now(),
+				}
+				if err != nil && err != io.EOF {
+					end.Error = toRPCErr(err)
+				}
+				sh.HandleRPC(ctx, end)
+			}
+
+			if channelz.IsOn() {
+				if err != nil && err != io.EOF {
+					s.incrCallsFailed()
+				} else {
+					s.incrCallsSucceeded()
+				}
+			}
+		}()
+	}
+	var binlogs []binarylog.MethodLogger
+	if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil {
+		binlogs = append(binlogs, ml)
+	}
+	if s.opts.binaryLogger != nil {
+		if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil {
+			binlogs = append(binlogs, ml)
+		}
+	}
+	if len(binlogs) != 0 {
 		md, _ := metadata.FromIncomingContext(ctx)
 		logEntry := &binarylog.ClientHeader{
 			Header:     md,
@@ -925,7 +1330,9 @@
 		if peer, ok := peer.FromContext(ctx); ok {
 			logEntry.PeerAddr = peer.Addr
 		}
-		binlog.Log(logEntry)
+		for _, binlog := range binlogs {
+			binlog.Log(ctx, logEntry)
+		}
 	}
 
 	// comp and cp are used for compression.  decomp and dc are used for
@@ -935,6 +1342,7 @@
 	var comp, decomp encoding.Compressor
 	var cp Compressor
 	var dc Decompressor
+	var sendCompressorName string
 
 	// If dc is set and matches the stream's compression, use it.  Otherwise, try
 	// to find a matching registered compressor for decomp.
@@ -944,7 +1352,7 @@
 		decomp = encoding.GetCompressor(rc)
 		if decomp == nil {
 			st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
-			t.WriteStatus(stream, st)
+			stream.WriteStatus(st)
 			return st.Err()
 		}
 	}
@@ -955,98 +1363,126 @@
 	// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
 	if s.opts.cp != nil {
 		cp = s.opts.cp
-		stream.SetSendCompress(cp.Type())
+		sendCompressorName = cp.Type()
 	} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
 		// Legacy compressor not specified; attempt to respond with same encoding.
 		comp = encoding.GetCompressor(rc)
 		if comp != nil {
-			stream.SetSendCompress(rc)
+			sendCompressorName = comp.Name()
+		}
+	}
+
+	if sendCompressorName != "" {
+		if err := stream.SetSendCompress(sendCompressorName); err != nil {
+			return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err)
 		}
 	}
 
 	var payInfo *payloadInfo
-	if sh != nil || binlog != nil {
+	if len(shs) != 0 || len(binlogs) != 0 {
 		payInfo = &payloadInfo{}
+		defer payInfo.free()
 	}
-	d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
+
+	d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true)
 	if err != nil {
-		if st, ok := status.FromError(err); ok {
-			if e := t.WriteStatus(stream, st); e != nil {
-				grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
-			}
+		if e := stream.WriteStatus(status.Convert(err)); e != nil {
+			channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
 		}
 		return err
 	}
-	if channelz.IsOn() {
-		t.IncrMsgRecv()
+	freed := false
+	dataFree := func() {
+		if !freed {
+			d.Free()
+			freed = true
+		}
 	}
-	df := func(v interface{}) error {
+	defer dataFree()
+	df := func(v any) error {
+		defer dataFree()
 		if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
 			return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
 		}
-		if sh != nil {
-			sh.HandleRPC(stream.Context(), &stats.InPayload{
-				RecvTime:   time.Now(),
-				Payload:    v,
-				WireLength: payInfo.wireLength,
-				Data:       d,
-				Length:     len(d),
+
+		for _, sh := range shs {
+			sh.HandleRPC(ctx, &stats.InPayload{
+				RecvTime:         time.Now(),
+				Payload:          v,
+				Length:           d.Len(),
+				WireLength:       payInfo.compressedLength + headerLen,
+				CompressedLength: payInfo.compressedLength,
 			})
 		}
-		if binlog != nil {
-			binlog.Log(&binarylog.ClientMessage{
-				Message: d,
-			})
+		if len(binlogs) != 0 {
+			cm := &binarylog.ClientMessage{
+				Message: d.Materialize(),
+			}
+			for _, binlog := range binlogs {
+				binlog.Log(ctx, cm)
+			}
 		}
 		if trInfo != nil {
 			trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
 		}
 		return nil
 	}
-	ctx := NewContextWithServerTransportStream(stream.Context(), stream)
-	reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt)
+	ctx = NewContextWithServerTransportStream(ctx, stream)
+	reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt)
 	if appErr != nil {
 		appStatus, ok := status.FromError(appErr)
 		if !ok {
-			// Convert appErr if it is not a grpc status error.
-			appErr = status.Error(codes.Unknown, appErr.Error())
-			appStatus, _ = status.FromError(appErr)
+			// Convert non-status application error to a status error with code
+			// Unknown, but handle context errors specifically.
+			appStatus = status.FromContextError(appErr)
+			appErr = appStatus.Err()
 		}
 		if trInfo != nil {
 			trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
 			trInfo.tr.SetError()
 		}
-		if e := t.WriteStatus(stream, appStatus); e != nil {
-			grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e)
+		if e := stream.WriteStatus(appStatus); e != nil {
+			channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
 		}
-		if binlog != nil {
+		if len(binlogs) != 0 {
 			if h, _ := stream.Header(); h.Len() > 0 {
 				// Only log serverHeader if there was header. Otherwise it can
 				// be trailer only.
-				binlog.Log(&binarylog.ServerHeader{
+				sh := &binarylog.ServerHeader{
 					Header: h,
-				})
+				}
+				for _, binlog := range binlogs {
+					binlog.Log(ctx, sh)
+				}
 			}
-			binlog.Log(&binarylog.ServerTrailer{
+			st := &binarylog.ServerTrailer{
 				Trailer: stream.Trailer(),
 				Err:     appErr,
-			})
+			}
+			for _, binlog := range binlogs {
+				binlog.Log(ctx, st)
+			}
 		}
 		return appErr
 	}
 	if trInfo != nil {
 		trInfo.tr.LazyLog(stringer("OK"), false)
 	}
-	opts := &transport.Options{Last: true}
+	opts := &transport.WriteOptions{Last: true}
 
-	if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
+	// Server handler could have set new compressor by calling SetSendCompressor.
+	// In case it is set, we need to use it for compressing outbound message.
+	if stream.SendCompress() != sendCompressorName {
+		comp = encoding.GetCompressor(stream.SendCompress())
+	}
+	if err := s.sendResponse(ctx, stream, reply, cp, opts, comp); err != nil {
 		if err == io.EOF {
 			// The entire stream is done (for unary RPC only).
 			return err
 		}
-		if s, ok := status.FromError(err); ok {
-			if e := t.WriteStatus(stream, s); e != nil {
-				grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e)
+		if sts, ok := status.FromError(err); ok {
+			if e := stream.WriteStatus(sts); e != nil {
+				channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
 			}
 		} else {
 			switch st := err.(type) {
@@ -1056,29 +1492,34 @@
 				panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
 			}
 		}
-		if binlog != nil {
+		if len(binlogs) != 0 {
 			h, _ := stream.Header()
-			binlog.Log(&binarylog.ServerHeader{
+			sh := &binarylog.ServerHeader{
 				Header: h,
-			})
-			binlog.Log(&binarylog.ServerTrailer{
+			}
+			st := &binarylog.ServerTrailer{
 				Trailer: stream.Trailer(),
 				Err:     appErr,
-			})
+			}
+			for _, binlog := range binlogs {
+				binlog.Log(ctx, sh)
+				binlog.Log(ctx, st)
+			}
 		}
 		return err
 	}
-	if binlog != nil {
+	if len(binlogs) != 0 {
 		h, _ := stream.Header()
-		binlog.Log(&binarylog.ServerHeader{
+		sh := &binarylog.ServerHeader{
 			Header: h,
-		})
-		binlog.Log(&binarylog.ServerMessage{
+		}
+		sm := &binarylog.ServerMessage{
 			Message: reply,
-		})
-	}
-	if channelz.IsOn() {
-		t.IncrMsgSent()
+		}
+		for _, binlog := range binlogs {
+			binlog.Log(ctx, sh)
+			binlog.Log(ctx, sm)
+		}
 	}
 	if trInfo != nil {
 		trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
@@ -1086,60 +1527,130 @@
 	// TODO: Should we be logging if writing status failed here, like above?
 	// Should the logging be in WriteStatus?  Should we ignore the WriteStatus
 	// error or allow the stats handler to see it?
-	err = t.WriteStatus(stream, statusOK)
-	if binlog != nil {
-		binlog.Log(&binarylog.ServerTrailer{
+	if len(binlogs) != 0 {
+		st := &binarylog.ServerTrailer{
 			Trailer: stream.Trailer(),
 			Err:     appErr,
-		})
+		}
+		for _, binlog := range binlogs {
+			binlog.Log(ctx, st)
+		}
 	}
-	return err
+	return stream.WriteStatus(statusOK)
 }
 
-func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
+// chainStreamServerInterceptors chains all stream server interceptors into one.
+func chainStreamServerInterceptors(s *Server) {
+	// Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will
+	// be executed before any other chained interceptors.
+	interceptors := s.opts.chainStreamInts
+	if s.opts.streamInt != nil {
+		interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...)
+	}
+
+	var chainedInt StreamServerInterceptor
+	if len(interceptors) == 0 {
+		chainedInt = nil
+	} else if len(interceptors) == 1 {
+		chainedInt = interceptors[0]
+	} else {
+		chainedInt = chainStreamInterceptors(interceptors)
+	}
+
+	s.opts.streamInt = chainedInt
+}
+
+func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor {
+	return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
+		return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler))
+	}
+}
+
+func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler {
+	if curr == len(interceptors)-1 {
+		return finalHandler
+	}
+	return func(srv any, stream ServerStream) error {
+		return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler))
+	}
+}
+
+func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
 	if channelz.IsOn() {
 		s.incrCallsStarted()
-		defer func() {
-			if err != nil && err != io.EOF {
-				s.incrCallsFailed()
-			} else {
-				s.incrCallsSucceeded()
-			}
-		}()
 	}
-	sh := s.opts.statsHandler
-	if sh != nil {
+	shs := s.opts.statsHandlers
+	var statsBegin *stats.Begin
+	if len(shs) != 0 {
 		beginTime := time.Now()
-		begin := &stats.Begin{
-			BeginTime: beginTime,
+		statsBegin = &stats.Begin{
+			BeginTime:      beginTime,
+			IsClientStream: sd.ClientStreams,
+			IsServerStream: sd.ServerStreams,
 		}
-		sh.HandleRPC(stream.Context(), begin)
-		defer func() {
-			end := &stats.End{
-				BeginTime: beginTime,
-				EndTime:   time.Now(),
-			}
-			if err != nil && err != io.EOF {
-				end.Error = toRPCErr(err)
-			}
-			sh.HandleRPC(stream.Context(), end)
-		}()
+		for _, sh := range shs {
+			sh.HandleRPC(ctx, statsBegin)
+		}
 	}
-	ctx := NewContextWithServerTransportStream(stream.Context(), stream)
+	ctx = NewContextWithServerTransportStream(ctx, stream)
 	ss := &serverStream{
 		ctx:                   ctx,
-		t:                     t,
 		s:                     stream,
-		p:                     &parser{r: stream},
+		p:                     &parser{r: stream, bufferPool: s.opts.bufferPool},
 		codec:                 s.getCodec(stream.ContentSubtype()),
+		desc:                  sd,
 		maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
 		maxSendMessageSize:    s.opts.maxSendMessageSize,
 		trInfo:                trInfo,
-		statsHandler:          sh,
+		statsHandler:          shs,
 	}
 
-	ss.binlog = binarylog.GetMethodLogger(stream.Method())
-	if ss.binlog != nil {
+	if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
+		// See comment in processUnaryRPC on defers.
+		defer func() {
+			if trInfo != nil {
+				ss.mu.Lock()
+				if err != nil && err != io.EOF {
+					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
+					ss.trInfo.tr.SetError()
+				}
+				ss.trInfo.tr.Finish()
+				ss.trInfo.tr = nil
+				ss.mu.Unlock()
+			}
+
+			if len(shs) != 0 {
+				end := &stats.End{
+					BeginTime: statsBegin.BeginTime,
+					EndTime:   time.Now(),
+				}
+				if err != nil && err != io.EOF {
+					end.Error = toRPCErr(err)
+				}
+				for _, sh := range shs {
+					sh.HandleRPC(ctx, end)
+				}
+			}
+
+			if channelz.IsOn() {
+				if err != nil && err != io.EOF {
+					s.incrCallsFailed()
+				} else {
+					s.incrCallsSucceeded()
+				}
+			}
+		}()
+	}
+
+	if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil {
+		ss.binlogs = append(ss.binlogs, ml)
+	}
+	if s.opts.binaryLogger != nil {
+		if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil {
+			ss.binlogs = append(ss.binlogs, ml)
+		}
+	}
+	if len(ss.binlogs) != 0 {
 		md, _ := metadata.FromIncomingContext(ctx)
 		logEntry := &binarylog.ClientHeader{
 			Header:     md,
@@ -1158,18 +1669,20 @@
 		if peer, ok := peer.FromContext(ss.Context()); ok {
 			logEntry.PeerAddr = peer.Addr
 		}
-		ss.binlog.Log(logEntry)
+		for _, binlog := range ss.binlogs {
+			binlog.Log(ctx, logEntry)
+		}
 	}
 
 	// If dc is set and matches the stream's compression, use it.  Otherwise, try
 	// to find a matching registered compressor for decomp.
 	if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
-		ss.dc = s.opts.dc
+		ss.decompressorV0 = s.opts.dc
 	} else if rc != "" && rc != encoding.Identity {
-		ss.decomp = encoding.GetCompressor(rc)
-		if ss.decomp == nil {
+		ss.decompressorV1 = encoding.GetCompressor(rc)
+		if ss.decompressorV1 == nil {
 			st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
-			t.WriteStatus(ss.s, st)
+			ss.s.WriteStatus(st)
 			return st.Err()
 		}
 	}
@@ -1179,33 +1692,31 @@
 	//
 	// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
 	if s.opts.cp != nil {
-		ss.cp = s.opts.cp
-		stream.SetSendCompress(s.opts.cp.Type())
+		ss.compressorV0 = s.opts.cp
+		ss.sendCompressorName = s.opts.cp.Type()
 	} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
 		// Legacy compressor not specified; attempt to respond with same encoding.
-		ss.comp = encoding.GetCompressor(rc)
-		if ss.comp != nil {
-			stream.SetSendCompress(rc)
+		ss.compressorV1 = encoding.GetCompressor(rc)
+		if ss.compressorV1 != nil {
+			ss.sendCompressorName = rc
 		}
 	}
 
+	if ss.sendCompressorName != "" {
+		if err := stream.SetSendCompress(ss.sendCompressorName); err != nil {
+			return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err)
+		}
+	}
+
+	ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.compressorV0, ss.compressorV1)
+
 	if trInfo != nil {
 		trInfo.tr.LazyLog(&trInfo.firstLine, false)
-		defer func() {
-			ss.mu.Lock()
-			if err != nil && err != io.EOF {
-				ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
-				ss.trInfo.tr.SetError()
-			}
-			ss.trInfo.tr.Finish()
-			ss.trInfo.tr = nil
-			ss.mu.Unlock()
-		}()
 	}
 	var appErr error
-	var server interface{}
-	if srv != nil {
-		server = srv.server
+	var server any
+	if info != nil {
+		server = info.serviceImpl
 	}
 	if s.opts.streamInt == nil {
 		appErr = sd.Handler(server, ss)
@@ -1220,7 +1731,9 @@
 	if appErr != nil {
 		appStatus, ok := status.FromError(appErr)
 		if !ok {
-			appStatus = status.New(codes.Unknown, appErr.Error())
+			// Convert non-status application error to a status error with code
+			// Unknown, but handle context errors specifically.
+			appStatus = status.FromContextError(appErr)
 			appErr = appStatus.Err()
 		}
 		if trInfo != nil {
@@ -1229,13 +1742,16 @@
 			ss.trInfo.tr.SetError()
 			ss.mu.Unlock()
 		}
-		t.WriteStatus(ss.s, appStatus)
-		if ss.binlog != nil {
-			ss.binlog.Log(&binarylog.ServerTrailer{
+		if len(ss.binlogs) != 0 {
+			st := &binarylog.ServerTrailer{
 				Trailer: ss.s.Trailer(),
 				Err:     appErr,
-			})
+			}
+			for _, binlog := range ss.binlogs {
+				binlog.Log(ctx, st)
+			}
 		}
+		ss.s.WriteStatus(appStatus)
 		// TODO: Should we log an error from WriteStatus here and below?
 		return appErr
 	}
@@ -1244,57 +1760,96 @@
 		ss.trInfo.tr.LazyLog(stringer("OK"), false)
 		ss.mu.Unlock()
 	}
-	err = t.WriteStatus(ss.s, statusOK)
-	if ss.binlog != nil {
-		ss.binlog.Log(&binarylog.ServerTrailer{
+	if len(ss.binlogs) != 0 {
+		st := &binarylog.ServerTrailer{
 			Trailer: ss.s.Trailer(),
 			Err:     appErr,
-		})
+		}
+		for _, binlog := range ss.binlogs {
+			binlog.Log(ctx, st)
+		}
 	}
-	return err
+	return ss.s.WriteStatus(statusOK)
 }
 
-func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
+func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) {
+	ctx := stream.Context()
+	ctx = contextWithServer(ctx, s)
+	var ti *traceInfo
+	if EnableTracing {
+		tr := newTrace("grpc.Recv."+methodFamily(stream.Method()), stream.Method())
+		ctx = newTraceContext(ctx, tr)
+		ti = &traceInfo{
+			tr: tr,
+			firstLine: firstLine{
+				client:     false,
+				remoteAddr: t.Peer().Addr,
+			},
+		}
+		if dl, ok := ctx.Deadline(); ok {
+			ti.firstLine.deadline = time.Until(dl)
+		}
+	}
+
 	sm := stream.Method()
 	if sm != "" && sm[0] == '/' {
 		sm = sm[1:]
 	}
 	pos := strings.LastIndex(sm, "/")
 	if pos == -1 {
-		if trInfo != nil {
-			trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true)
-			trInfo.tr.SetError()
+		if ti != nil {
+			ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true)
+			ti.tr.SetError()
 		}
 		errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
-		if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil {
-			if trInfo != nil {
-				trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
-				trInfo.tr.SetError()
+		if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil {
+			if ti != nil {
+				ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
+				ti.tr.SetError()
 			}
-			grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
+			channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err)
 		}
-		if trInfo != nil {
-			trInfo.tr.Finish()
+		if ti != nil {
+			ti.tr.Finish()
 		}
 		return
 	}
 	service := sm[:pos]
 	method := sm[pos+1:]
 
-	srv, knownService := s.m[service]
+	// FromIncomingContext is expensive: skip if there are no statsHandlers
+	if len(s.opts.statsHandlers) > 0 {
+		md, _ := metadata.FromIncomingContext(ctx)
+		for _, sh := range s.opts.statsHandlers {
+			ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
+			sh.HandleRPC(ctx, &stats.InHeader{
+				FullMethod:  stream.Method(),
+				RemoteAddr:  t.Peer().Addr,
+				LocalAddr:   t.Peer().LocalAddr,
+				Compression: stream.RecvCompress(),
+				WireLength:  stream.HeaderWireLength(),
+				Header:      md,
+			})
+		}
+	}
+	// To have calls in stream callouts work. Will delete once all stats handler
+	// calls come from the gRPC layer.
+	stream.SetContext(ctx)
+
+	srv, knownService := s.services[service]
 	if knownService {
-		if md, ok := srv.md[method]; ok {
-			s.processUnaryRPC(t, stream, srv, md, trInfo)
+		if md, ok := srv.methods[method]; ok {
+			s.processUnaryRPC(ctx, stream, srv, md, ti)
 			return
 		}
-		if sd, ok := srv.sd[method]; ok {
-			s.processStreamingRPC(t, stream, srv, sd, trInfo)
+		if sd, ok := srv.streams[method]; ok {
+			s.processStreamingRPC(ctx, stream, srv, sd, ti)
 			return
 		}
 	}
 	// Unknown service, or known server unknown method.
 	if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
-		s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
+		s.processStreamingRPC(ctx, stream, nil, unknownDesc, ti)
 		return
 	}
 	var errDesc string
@@ -1303,19 +1858,19 @@
 	} else {
 		errDesc = fmt.Sprintf("unknown method %v for service %v", method, service)
 	}
-	if trInfo != nil {
-		trInfo.tr.LazyPrintf("%s", errDesc)
-		trInfo.tr.SetError()
+	if ti != nil {
+		ti.tr.LazyPrintf("%s", errDesc)
+		ti.tr.SetError()
 	}
-	if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
-		if trInfo != nil {
-			trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
-			trInfo.tr.SetError()
+	if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil {
+		if ti != nil {
+			ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
+			ti.tr.SetError()
 		}
-		grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
+		channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err)
 	}
-	if trInfo != nil {
-		trInfo.tr.Finish()
+	if ti != nil {
+		ti.tr.Finish()
 	}
 }
 
@@ -1325,7 +1880,10 @@
 // NewContextWithServerTransportStream creates a new context from ctx and
 // attaches stream to it.
 //
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context {
 	return context.WithValue(ctx, streamKey{}, stream)
 }
@@ -1337,7 +1895,10 @@
 //
 // See also NewContextWithServerTransportStream.
 //
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
 type ServerTransportStream interface {
 	Method() string
 	SetHeader(md metadata.MD) error
@@ -1349,7 +1910,10 @@
 // ctx. Returns nil if the given context has no stream associated with it
 // (which implies it is not an RPC invocation context).
 //
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream {
 	s, _ := ctx.Value(streamKey{}).(ServerTransportStream)
 	return s
@@ -1361,87 +1925,87 @@
 // pending RPCs on the client side will get notified by connection
 // errors.
 func (s *Server) Stop() {
-	s.quit.Fire()
-
-	defer func() {
-		s.serveWG.Wait()
-		s.done.Fire()
-	}()
-
-	s.channelzRemoveOnce.Do(func() {
-		if channelz.IsOn() {
-			channelz.RemoveEntry(s.channelzID)
-		}
-	})
-
-	s.mu.Lock()
-	listeners := s.lis
-	s.lis = nil
-	st := s.conns
-	s.conns = nil
-	// interrupt GracefulStop if Stop and GracefulStop are called concurrently.
-	s.cv.Broadcast()
-	s.mu.Unlock()
-
-	for lis := range listeners {
-		lis.Close()
-	}
-	for c := range st {
-		c.Close()
-	}
-
-	s.mu.Lock()
-	if s.events != nil {
-		s.events.Finish()
-		s.events = nil
-	}
-	s.mu.Unlock()
+	s.stop(false)
 }
 
 // GracefulStop stops the gRPC server gracefully. It stops the server from
 // accepting new connections and RPCs and blocks until all the pending RPCs are
 // finished.
 func (s *Server) GracefulStop() {
+	s.stop(true)
+}
+
+func (s *Server) stop(graceful bool) {
 	s.quit.Fire()
 	defer s.done.Fire()
 
-	s.channelzRemoveOnce.Do(func() {
-		if channelz.IsOn() {
-			channelz.RemoveEntry(s.channelzID)
-		}
-	})
+	s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelz.ID) })
 	s.mu.Lock()
-	if s.conns == nil {
-		s.mu.Unlock()
-		return
-	}
-
-	for lis := range s.lis {
-		lis.Close()
-	}
-	s.lis = nil
-	if !s.drain {
-		for st := range s.conns {
-			st.Drain()
-		}
-		s.drain = true
-	}
-
+	s.closeListenersLocked()
 	// Wait for serving threads to be ready to exit.  Only then can we be sure no
 	// new conns will be created.
 	s.mu.Unlock()
 	s.serveWG.Wait()
+
 	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	if graceful {
+		s.drainAllServerTransportsLocked()
+	} else {
+		s.closeServerTransportsLocked()
+	}
 
 	for len(s.conns) != 0 {
 		s.cv.Wait()
 	}
 	s.conns = nil
+
+	if s.opts.numServerWorkers > 0 {
+		// Closing the channel (only once, via sync.OnceFunc) after all the
+		// connections have been closed above ensures that there are no
+		// goroutines executing the callback passed to st.HandleStreams (where
+		// the channel is written to).
+		s.serverWorkerChannelClose()
+	}
+
+	if graceful || s.opts.waitForHandlers {
+		s.handlersWG.Wait()
+	}
+
 	if s.events != nil {
 		s.events.Finish()
 		s.events = nil
 	}
-	s.mu.Unlock()
+}
+
+// s.mu must be held by the caller.
+func (s *Server) closeServerTransportsLocked() {
+	for _, conns := range s.conns {
+		for st := range conns {
+			st.Close(errors.New("Server.Stop called"))
+		}
+	}
+}
+
+// s.mu must be held by the caller.
+func (s *Server) drainAllServerTransportsLocked() {
+	if !s.drain {
+		for _, conns := range s.conns {
+			for st := range conns {
+				st.Drain("graceful_stop")
+			}
+		}
+		s.drain = true
+	}
+}
+
+// s.mu must be held by the caller.
+func (s *Server) closeListenersLocked() {
+	for lis := range s.lis {
+		lis.Close()
+	}
+	s.lis = nil
 }
 
 // contentSubtype must be lowercase
@@ -1451,21 +2015,74 @@
 		return s.opts.codec
 	}
 	if contentSubtype == "" {
-		return encoding.GetCodec(proto.Name)
+		return getCodec(proto.Name)
 	}
-	codec := encoding.GetCodec(contentSubtype)
+	codec := getCodec(contentSubtype)
 	if codec == nil {
-		return encoding.GetCodec(proto.Name)
+		logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name)
+		return getCodec(proto.Name)
 	}
 	return codec
 }
 
-// SetHeader sets the header metadata.
-// When called multiple times, all the provided metadata will be merged.
-// All the metadata will be sent out when one of the following happens:
-//  - grpc.SendHeader() is called;
-//  - The first response is sent out;
-//  - An RPC status is sent out (error or success).
+type serverKey struct{}
+
+// serverFromContext gets the Server from the context.
+func serverFromContext(ctx context.Context) *Server {
+	s, _ := ctx.Value(serverKey{}).(*Server)
+	return s
+}
+
+// contextWithServer sets the Server in the context.
+func contextWithServer(ctx context.Context, server *Server) context.Context {
+	return context.WithValue(ctx, serverKey{}, server)
+}
+
+// isRegisteredMethod returns whether the passed in method is registered as a
+// method on the server. /service/method and service/method will match if the
+// service and method are registered on the server.
+func (s *Server) isRegisteredMethod(serviceMethod string) bool {
+	if serviceMethod != "" && serviceMethod[0] == '/' {
+		serviceMethod = serviceMethod[1:]
+	}
+	pos := strings.LastIndex(serviceMethod, "/")
+	if pos == -1 { // Invalid method name syntax.
+		return false
+	}
+	service := serviceMethod[:pos]
+	method := serviceMethod[pos+1:]
+	srv, knownService := s.services[service]
+	if knownService {
+		if _, ok := srv.methods[method]; ok {
+			return true
+		}
+		if _, ok := srv.streams[method]; ok {
+			return true
+		}
+	}
+	return false
+}
+
+// SetHeader sets the header metadata to be sent from the server to the client.
+// The context provided must be the context passed to the server's handler.
+//
+// Streaming RPCs should prefer the SetHeader method of the ServerStream.
+//
+// When called multiple times, all the provided metadata will be merged.  All
+// the metadata will be sent out when one of the following happens:
+//
+//   - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader.
+//   - The first response message is sent.  For unary handlers, this occurs when
+//     the handler returns; for streaming handlers, this can happen when stream's
+//     SendMsg method is called.
+//   - An RPC status is sent out (error or success).  This occurs when the handler
+//     returns.
+//
+// SetHeader will fail if called after any of the events above.
+//
+// The error returned is compatible with the status package.  However, the
+// status code will often not match the RPC status as seen by the client
+// application, and therefore, should not be relied upon for this purpose.
 func SetHeader(ctx context.Context, md metadata.MD) error {
 	if md.Len() == 0 {
 		return nil
@@ -1477,8 +2094,14 @@
 	return stream.SetHeader(md)
 }
 
-// SendHeader sends header metadata. It may be called at most once.
-// The provided md and headers set by SetHeader() will be sent.
+// SendHeader sends header metadata. It may be called at most once, and may not
+// be called after any event that causes headers to be sent (see SetHeader for
+// a complete list).  The provided md and headers set by SetHeader() will be
+// sent.
+//
+// The error returned is compatible with the status package.  However, the
+// status code will often not match the RPC status as seen by the client
+// application, and therefore, should not be relied upon for this purpose.
 func SendHeader(ctx context.Context, md metadata.MD) error {
 	stream := ServerTransportStreamFromContext(ctx)
 	if stream == nil {
@@ -1490,8 +2113,66 @@
 	return nil
 }
 
+// SetSendCompressor sets a compressor for outbound messages from the server.
+// It must not be called after any event that causes headers to be sent
+// (see ServerStream.SetHeader for the complete list). Provided compressor is
+// used when below conditions are met:
+//
+//   - compressor is registered via encoding.RegisterCompressor
+//   - compressor name must exist in the client advertised compressor names
+//     sent in grpc-accept-encoding header. Use ClientSupportedCompressors to
+//     get client supported compressor names.
+//
+// The context provided must be the context passed to the server's handler.
+// It must be noted that compressor name encoding.Identity disables the
+// outbound compression.
+// By default, server messages will be sent using the same compressor with
+// which request messages were sent.
+//
+// It is not safe to call SetSendCompressor concurrently with SendHeader and
+// SendMsg.
+//
+// # Experimental
+//
+// Notice: This function is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func SetSendCompressor(ctx context.Context, name string) error {
+	stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream)
+	if !ok || stream == nil {
+		return fmt.Errorf("failed to fetch the stream from the given context")
+	}
+
+	if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil {
+		return fmt.Errorf("unable to set send compressor: %w", err)
+	}
+
+	return stream.SetSendCompress(name)
+}
+
+// ClientSupportedCompressors returns compressor names advertised by the client
+// via grpc-accept-encoding header.
+//
+// The context provided must be the context passed to the server's handler.
+//
+// # Experimental
+//
+// Notice: This function is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ClientSupportedCompressors(ctx context.Context) ([]string, error) {
+	stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream)
+	if !ok || stream == nil {
+		return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx)
+	}
+
+	return stream.ClientAdvertisedCompressors(), nil
+}
+
 // SetTrailer sets the trailer metadata that will be sent when an RPC returns.
 // When called more than once, all the provided metadata will be merged.
+//
+// The error returned is compatible with the status package.  However, the
+// status code will often not match the RPC status as seen by the client
+// application, and therefore, should not be relied upon for this purpose.
 func SetTrailer(ctx context.Context, md metadata.MD) error {
 	if md.Len() == 0 {
 		return nil
@@ -1513,10 +2194,52 @@
 	return s.Method(), true
 }
 
-type channelzServer struct {
-	s *Server
+// validateSendCompressor returns an error when given compressor name cannot be
+// handled by the server or the client based on the advertised compressors.
+func validateSendCompressor(name string, clientCompressors []string) error {
+	if name == encoding.Identity {
+		return nil
+	}
+
+	if !grpcutil.IsCompressorNameRegistered(name) {
+		return fmt.Errorf("compressor not registered %q", name)
+	}
+
+	for _, c := range clientCompressors {
+		if c == name {
+			return nil // found match
+		}
+	}
+	return fmt.Errorf("client does not support compressor %q", name)
 }
 
-func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric {
-	return c.s.channelzMetric()
+// atomicSemaphore implements a blocking, counting semaphore. acquire should be
+// called synchronously; release may be called asynchronously.
+type atomicSemaphore struct {
+	n    atomic.Int64
+	wait chan struct{}
+}
+
+func (q *atomicSemaphore) acquire() {
+	if q.n.Add(-1) < 0 {
+		// We ran out of quota.  Block until a release happens.
+		<-q.wait
+	}
+}
+
+func (q *atomicSemaphore) release() {
+	// N.B. the "<= 0" check below should allow for this to work with multiple
+	// concurrent calls to acquire, but also note that with synchronous calls to
+	// acquire, as our system does, n will never be less than -1.  There are
+	// fairness issues (queuing) to consider if this was to be generalized.
+	if q.n.Add(1) <= 0 {
+		// An acquire was waiting on us.  Unblock it.
+		q.wait <- struct{}{}
+	}
+}
+
+func newHandlerQuota(n uint32) *atomicSemaphore {
+	a := &atomicSemaphore{wait: make(chan struct{}, 1)}
+	a.n.Store(int64(n))
+	return a
 }
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
index 4f8836d..8d451e0 100644
--- a/vendor/google.golang.org/grpc/service_config.go
+++ b/vendor/google.golang.org/grpc/service_config.go
@@ -20,15 +20,17 @@
 
 import (
 	"encoding/json"
+	"errors"
 	"fmt"
-	"strconv"
-	"strings"
+	"reflect"
 	"time"
 
 	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/balancer/pickfirst"
 	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/grpclog"
 	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/internal/balancer/gracefulswitch"
+	internalserviceconfig "google.golang.org/grpc/internal/serviceconfig"
 	"google.golang.org/grpc/serviceconfig"
 )
 
@@ -40,34 +42,7 @@
 // Deprecated: Users should not use this struct. Service config should be received
 // through name resolver, as specified here
 // https://github.com/grpc/grpc/blob/master/doc/service_config.md
-type MethodConfig struct {
-	// WaitForReady indicates whether RPCs sent to this method should wait until
-	// the connection is ready by default (!failfast). The value specified via the
-	// gRPC client API will override the value set here.
-	WaitForReady *bool
-	// Timeout is the default timeout for RPCs sent to this method. The actual
-	// deadline used will be the minimum of the value specified here and the value
-	// set by the application via the gRPC client API.  If either one is not set,
-	// then the other will be used.  If neither is set, then the RPC has no deadline.
-	Timeout *time.Duration
-	// MaxReqSize is the maximum allowed payload size for an individual request in a
-	// stream (client->server) in bytes. The size which is measured is the serialized
-	// payload after per-message compression (but before stream compression) in bytes.
-	// The actual value used is the minimum of the value specified here and the value set
-	// by the application via the gRPC client API. If either one is not set, then the other
-	// will be used.  If neither is set, then the built-in default is used.
-	MaxReqSize *int
-	// MaxRespSize is the maximum allowed payload size for an individual response in a
-	// stream (server->client) in bytes.
-	MaxRespSize *int
-	// RetryPolicy configures retry options for the method.
-	retryPolicy *retryPolicy
-}
-
-type lbConfig struct {
-	name string
-	cfg  serviceconfig.LoadBalancingConfig
-}
+type MethodConfig = internalserviceconfig.MethodConfig
 
 // ServiceConfig is provided by the service provider and contains parameters for how
 // clients that connect to the service should behave.
@@ -78,15 +53,9 @@
 type ServiceConfig struct {
 	serviceconfig.Config
 
-	// LB is the load balancer the service providers recommends. The balancer
-	// specified via grpc.WithBalancer will override this.  This is deprecated;
-	// lbConfigs is preferred.  If lbConfig and LB are both present, lbConfig
-	// will be used.
-	LB *string
-
 	// lbConfig is the service config's load balancing configuration.  If
 	// lbConfig and LB are both present, lbConfig will be used.
-	lbConfig *lbConfig
+	lbConfig serviceconfig.LoadBalancingConfig
 
 	// Methods contains a map for the methods in this service.  If there is an
 	// exact match for a method (i.e. /service/method) in the map, use the
@@ -126,38 +95,10 @@
 	ServiceName string
 }
 
-// retryPolicy defines the go-native version of the retry policy defined by the
-// service config here:
-// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
-type retryPolicy struct {
-	// MaxAttempts is the maximum number of attempts, including the original RPC.
-	//
-	// This field is required and must be two or greater.
-	maxAttempts int
-
-	// Exponential backoff parameters. The initial retry attempt will occur at
-	// random(0, initialBackoffMS). In general, the nth attempt will occur at
-	// random(0,
-	//   min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)).
-	//
-	// These fields are required and must be greater than zero.
-	initialBackoff    time.Duration
-	maxBackoff        time.Duration
-	backoffMultiplier float64
-
-	// The set of status codes which may be retried.
-	//
-	// Status codes are specified as strings, e.g., "UNAVAILABLE".
-	//
-	// This field is required and must be non-empty.
-	// Note: a set is used to store this for easy lookup.
-	retryableStatusCodes map[codes.Code]bool
-}
-
 type jsonRetryPolicy struct {
 	MaxAttempts          int
-	InitialBackoff       string
-	MaxBackoff           string
+	InitialBackoff       internalserviceconfig.Duration
+	MaxBackoff           internalserviceconfig.Duration
 	BackoffMultiplier    float64
 	RetryableStatusCodes []codes.Code
 }
@@ -179,163 +120,110 @@
 	TokenRatio float64
 }
 
-func parseDuration(s *string) (*time.Duration, error) {
-	if s == nil {
-		return nil, nil
-	}
-	if !strings.HasSuffix(*s, "s") {
-		return nil, fmt.Errorf("malformed duration %q", *s)
-	}
-	ss := strings.SplitN((*s)[:len(*s)-1], ".", 3)
-	if len(ss) > 2 {
-		return nil, fmt.Errorf("malformed duration %q", *s)
-	}
-	// hasDigits is set if either the whole or fractional part of the number is
-	// present, since both are optional but one is required.
-	hasDigits := false
-	var d time.Duration
-	if len(ss[0]) > 0 {
-		i, err := strconv.ParseInt(ss[0], 10, 32)
-		if err != nil {
-			return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
-		}
-		d = time.Duration(i) * time.Second
-		hasDigits = true
-	}
-	if len(ss) == 2 && len(ss[1]) > 0 {
-		if len(ss[1]) > 9 {
-			return nil, fmt.Errorf("malformed duration %q", *s)
-		}
-		f, err := strconv.ParseInt(ss[1], 10, 64)
-		if err != nil {
-			return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
-		}
-		for i := 9; i > len(ss[1]); i-- {
-			f *= 10
-		}
-		d += time.Duration(f)
-		hasDigits = true
-	}
-	if !hasDigits {
-		return nil, fmt.Errorf("malformed duration %q", *s)
-	}
-
-	return &d, nil
-}
-
 type jsonName struct {
-	Service *string
-	Method  *string
+	Service string
+	Method  string
 }
 
-func (j jsonName) generatePath() (string, bool) {
-	if j.Service == nil {
-		return "", false
+var (
+	errDuplicatedName             = errors.New("duplicated name")
+	errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'")
+)
+
+func (j jsonName) generatePath() (string, error) {
+	if j.Service == "" {
+		if j.Method != "" {
+			return "", errEmptyServiceNonEmptyMethod
+		}
+		return "", nil
 	}
-	res := "/" + *j.Service + "/"
-	if j.Method != nil {
-		res += *j.Method
+	res := "/" + j.Service + "/"
+	if j.Method != "" {
+		res += j.Method
 	}
-	return res, true
+	return res, nil
 }
 
 // TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
 type jsonMC struct {
 	Name                    *[]jsonName
 	WaitForReady            *bool
-	Timeout                 *string
+	Timeout                 *internalserviceconfig.Duration
 	MaxRequestMessageBytes  *int64
 	MaxResponseMessageBytes *int64
 	RetryPolicy             *jsonRetryPolicy
 }
 
-type loadBalancingConfig map[string]json.RawMessage
-
 // TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
 type jsonSC struct {
 	LoadBalancingPolicy *string
-	LoadBalancingConfig *[]loadBalancingConfig
+	LoadBalancingConfig *json.RawMessage
 	MethodConfig        *[]jsonMC
 	RetryThrottling     *retryThrottlingPolicy
 	HealthCheckConfig   *healthCheckConfig
 }
 
 func init() {
-	internal.ParseServiceConfigForTesting = parseServiceConfig
+	internal.ParseServiceConfig = func(js string) *serviceconfig.ParseResult {
+		return parseServiceConfig(js, defaultMaxCallAttempts)
+	}
 }
-func parseServiceConfig(js string) *serviceconfig.ParseResult {
+
+func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult {
 	if len(js) == 0 {
 		return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")}
 	}
 	var rsc jsonSC
 	err := json.Unmarshal([]byte(js), &rsc)
 	if err != nil {
-		grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
+		logger.Warningf("grpc: unmarshalling service config %s: %v", js, err)
 		return &serviceconfig.ParseResult{Err: err}
 	}
 	sc := ServiceConfig{
-		LB:                rsc.LoadBalancingPolicy,
 		Methods:           make(map[string]MethodConfig),
 		retryThrottling:   rsc.RetryThrottling,
 		healthCheckConfig: rsc.HealthCheckConfig,
 		rawJSONString:     js,
 	}
-	if rsc.LoadBalancingConfig != nil {
-		for i, lbcfg := range *rsc.LoadBalancingConfig {
-			if len(lbcfg) != 1 {
-				err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
-				grpclog.Warningf(err.Error())
-				return &serviceconfig.ParseResult{Err: err}
-			}
-			var name string
-			var jsonCfg json.RawMessage
-			for name, jsonCfg = range lbcfg {
-			}
-			builder := balancer.Get(name)
-			if builder == nil {
-				continue
-			}
-			sc.lbConfig = &lbConfig{name: name}
-			if parser, ok := builder.(balancer.ConfigParser); ok {
-				var err error
-				sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg)
-				if err != nil {
-					return &serviceconfig.ParseResult{Err: fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)}
-				}
-			} else if string(jsonCfg) != "{}" {
-				grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
-			}
-			break
+	c := rsc.LoadBalancingConfig
+	if c == nil {
+		name := pickfirst.Name
+		if rsc.LoadBalancingPolicy != nil {
+			name = *rsc.LoadBalancingPolicy
 		}
-		if sc.lbConfig == nil {
-			// We had a loadBalancingConfig field but did not encounter a
-			// supported policy.  The config is considered invalid in this
-			// case.
-			err := fmt.Errorf("invalid loadBalancingConfig: no supported policies found")
-			grpclog.Warningf(err.Error())
-			return &serviceconfig.ParseResult{Err: err}
+		if balancer.Get(name) == nil {
+			name = pickfirst.Name
 		}
+		cfg := []map[string]any{{name: struct{}{}}}
+		strCfg, err := json.Marshal(cfg)
+		if err != nil {
+			return &serviceconfig.ParseResult{Err: fmt.Errorf("unexpected error marshaling simple LB config: %w", err)}
+		}
+		r := json.RawMessage(strCfg)
+		c = &r
 	}
+	cfg, err := gracefulswitch.ParseConfig(*c)
+	if err != nil {
+		return &serviceconfig.ParseResult{Err: err}
+	}
+	sc.lbConfig = cfg
 
 	if rsc.MethodConfig == nil {
 		return &serviceconfig.ParseResult{Config: &sc}
 	}
+
+	paths := map[string]struct{}{}
 	for _, m := range *rsc.MethodConfig {
 		if m.Name == nil {
 			continue
 		}
-		d, err := parseDuration(m.Timeout)
-		if err != nil {
-			grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
-			return &serviceconfig.ParseResult{Err: err}
-		}
 
 		mc := MethodConfig{
 			WaitForReady: m.WaitForReady,
-			Timeout:      d,
+			Timeout:      (*time.Duration)(m.Timeout),
 		}
-		if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
-			grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
+		if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy, maxAttempts); err != nil {
+			logger.Warningf("grpc: unmarshalling service config %s: %v", js, err)
 			return &serviceconfig.ParseResult{Err: err}
 		}
 		if m.MaxRequestMessageBytes != nil {
@@ -352,10 +240,20 @@
 				mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes))
 			}
 		}
-		for _, n := range *m.Name {
-			if path, valid := n.generatePath(); valid {
-				sc.Methods[path] = mc
+		for i, n := range *m.Name {
+			path, err := n.generatePath()
+			if err != nil {
+				logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err)
+				return &serviceconfig.ParseResult{Err: err}
 			}
+
+			if _, ok := paths[path]; ok {
+				err = errDuplicatedName
+				logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err)
+				return &serviceconfig.ParseResult{Err: err}
+			}
+			paths[path] = struct{}{}
+			sc.Methods[path] = mc
 		}
 	}
 
@@ -370,46 +268,40 @@
 	return &serviceconfig.ParseResult{Config: &sc}
 }
 
-func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) {
+func isValidRetryPolicy(jrp *jsonRetryPolicy) bool {
+	return jrp.MaxAttempts > 1 &&
+		jrp.InitialBackoff > 0 &&
+		jrp.MaxBackoff > 0 &&
+		jrp.BackoffMultiplier > 0 &&
+		len(jrp.RetryableStatusCodes) > 0
+}
+
+func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) {
 	if jrp == nil {
 		return nil, nil
 	}
-	ib, err := parseDuration(&jrp.InitialBackoff)
-	if err != nil {
-		return nil, err
-	}
-	mb, err := parseDuration(&jrp.MaxBackoff)
-	if err != nil {
-		return nil, err
+
+	if !isValidRetryPolicy(jrp) {
+		return nil, fmt.Errorf("invalid retry policy (%+v): ", jrp)
 	}
 
-	if jrp.MaxAttempts <= 1 ||
-		*ib <= 0 ||
-		*mb <= 0 ||
-		jrp.BackoffMultiplier <= 0 ||
-		len(jrp.RetryableStatusCodes) == 0 {
-		grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
-		return nil, nil
+	if jrp.MaxAttempts < maxAttempts {
+		maxAttempts = jrp.MaxAttempts
 	}
-
-	rp := &retryPolicy{
-		maxAttempts:          jrp.MaxAttempts,
-		initialBackoff:       *ib,
-		maxBackoff:           *mb,
-		backoffMultiplier:    jrp.BackoffMultiplier,
-		retryableStatusCodes: make(map[codes.Code]bool),
-	}
-	if rp.maxAttempts > 5 {
-		// TODO(retry): Make the max maxAttempts configurable.
-		rp.maxAttempts = 5
+	rp := &internalserviceconfig.RetryPolicy{
+		MaxAttempts:          maxAttempts,
+		InitialBackoff:       time.Duration(jrp.InitialBackoff),
+		MaxBackoff:           time.Duration(jrp.MaxBackoff),
+		BackoffMultiplier:    jrp.BackoffMultiplier,
+		RetryableStatusCodes: make(map[codes.Code]bool),
 	}
 	for _, code := range jrp.RetryableStatusCodes {
-		rp.retryableStatusCodes[code] = true
+		rp.RetryableStatusCodes[code] = true
 	}
 	return rp, nil
 }
 
-func min(a, b *int) *int {
+func minPointers(a, b *int) *int {
 	if *a < *b {
 		return a
 	}
@@ -421,7 +313,7 @@
 		return &defaultVal
 	}
 	if mcMax != nil && doptMax != nil {
-		return min(mcMax, doptMax)
+		return minPointers(mcMax, doptMax)
 	}
 	if mcMax != nil {
 		return mcMax
@@ -432,3 +324,37 @@
 func newInt(b int) *int {
 	return &b
 }
+
+func init() {
+	internal.EqualServiceConfigForTesting = equalServiceConfig
+}
+
+// equalServiceConfig compares two configs. The rawJSONString field is ignored,
+// because they may diff in white spaces.
+//
+// If any of them is NOT *ServiceConfig, return false.
+func equalServiceConfig(a, b serviceconfig.Config) bool {
+	if a == nil && b == nil {
+		return true
+	}
+	aa, ok := a.(*ServiceConfig)
+	if !ok {
+		return false
+	}
+	bb, ok := b.(*ServiceConfig)
+	if !ok {
+		return false
+	}
+	aaRaw := aa.rawJSONString
+	aa.rawJSONString = ""
+	bbRaw := bb.rawJSONString
+	bb.rawJSONString = ""
+	defer func() {
+		aa.rawJSONString = aaRaw
+		bb.rawJSONString = bbRaw
+	}()
+	// Using reflect.DeepEqual instead of cmp.Equal because many balancer
+	// configs are unexported, and cmp.Equal cannot compare unexported fields
+	// from unexported structs.
+	return reflect.DeepEqual(aa, bb)
+}
diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
index 187c304..35e7a20 100644
--- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
+++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
@@ -19,7 +19,10 @@
 // Package serviceconfig defines types and methods for operating on gRPC
 // service configs.
 //
-// This package is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
 package serviceconfig
 
 // Config represents an opaque data structure holding a service config.
diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go
index dc03731..67194a5 100644
--- a/vendor/google.golang.org/grpc/stats/handlers.go
+++ b/vendor/google.golang.org/grpc/stats/handlers.go
@@ -38,6 +38,15 @@
 	// FailFast indicates if this RPC is failfast.
 	// This field is only valid on client side, it's always false on server side.
 	FailFast bool
+	// NameResolutionDelay indicates if the RPC needed to wait for the
+	// initial name resolver update before it could begin. This should only
+	// happen if the channel is IDLE when the RPC is started.  Note that
+	// all retry or hedging attempts for an RPC that experienced a delay
+	// will have it set.
+	//
+	// This field is only valid on the client side; it is always false on
+	// the server side.
+	NameResolutionDelay bool
 }
 
 // Handler defines the interface for the related stats handling (e.g., RPCs, connections).
diff --git a/vendor/google.golang.org/grpc/stats/metrics.go b/vendor/google.golang.org/grpc/stats/metrics.go
new file mode 100644
index 0000000..641c8e9
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stats/metrics.go
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package stats
+
+import "maps"
+
+// MetricSet is a set of metrics to record. Once created, MetricSet is immutable,
+// however Add and Remove can make copies with specific metrics added or
+// removed, respectively.
+//
+// Do not construct directly; use NewMetricSet instead.
+type MetricSet struct {
+	// metrics are the set of metrics to initialize.
+	metrics map[string]bool
+}
+
+// NewMetricSet returns a MetricSet containing metricNames.
+func NewMetricSet(metricNames ...string) *MetricSet {
+	newMetrics := make(map[string]bool)
+	for _, metric := range metricNames {
+		newMetrics[metric] = true
+	}
+	return &MetricSet{metrics: newMetrics}
+}
+
+// Metrics returns the metrics set. The returned map is read-only and must not
+// be modified.
+func (m *MetricSet) Metrics() map[string]bool {
+	return m.metrics
+}
+
+// Add adds the metricNames to the metrics set and returns a new copy with the
+// additional metrics.
+func (m *MetricSet) Add(metricNames ...string) *MetricSet {
+	newMetrics := make(map[string]bool)
+	for metric := range m.metrics {
+		newMetrics[metric] = true
+	}
+
+	for _, metric := range metricNames {
+		newMetrics[metric] = true
+	}
+	return &MetricSet{metrics: newMetrics}
+}
+
+// Join joins the metrics passed in with the metrics set, and returns a new copy
+// with the merged metrics.
+func (m *MetricSet) Join(metrics *MetricSet) *MetricSet {
+	newMetrics := make(map[string]bool)
+	maps.Copy(newMetrics, m.metrics)
+	maps.Copy(newMetrics, metrics.metrics)
+	return &MetricSet{metrics: newMetrics}
+}
+
+// Remove removes the metricNames from the metrics set and returns a new copy
+// with the metrics removed.
+func (m *MetricSet) Remove(metricNames ...string) *MetricSet {
+	newMetrics := make(map[string]bool)
+	for metric := range m.metrics {
+		newMetrics[metric] = true
+	}
+
+	for _, metric := range metricNames {
+		delete(newMetrics, metric)
+	}
+	return &MetricSet{metrics: newMetrics}
+}
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
index f3f593c..10bf998 100644
--- a/vendor/google.golang.org/grpc/stats/stats.go
+++ b/vendor/google.golang.org/grpc/stats/stats.go
@@ -16,8 +16,6 @@
  *
  */
 
-//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto
-
 // Package stats is for collecting and reporting various network and RPC stats.
 // This package is for monitoring purpose only. All fields are read-only.
 // All APIs are experimental.
@@ -38,15 +36,27 @@
 	IsClient() bool
 }
 
-// Begin contains stats when an RPC begins.
+// Begin contains stats for the start of an RPC attempt.
+//
+//   - Server-side: Triggered after `InHeader`, as headers are processed
+//     before the RPC lifecycle begins.
+//   - Client-side: The first stats event recorded.
+//
 // FailFast is only valid if this Begin is from client side.
 type Begin struct {
 	// Client is true if this Begin is from client side.
 	Client bool
-	// BeginTime is the time when the RPC begins.
+	// BeginTime is the time when the RPC attempt begins.
 	BeginTime time.Time
 	// FailFast indicates if this RPC is failfast.
 	FailFast bool
+	// IsClientStream indicates whether the RPC is a client streaming RPC.
+	IsClientStream bool
+	// IsServerStream indicates whether the RPC is a server streaming RPC.
+	IsServerStream bool
+	// IsTransparentRetryAttempt indicates whether this attempt was initiated
+	// due to transparently retrying a previous attempt.
+	IsTransparentRetryAttempt bool
 }
 
 // IsClient indicates if the stats information is from client side.
@@ -54,18 +64,42 @@
 
 func (s *Begin) isRPCStats() {}
 
-// InPayload contains the information for an incoming payload.
+// DelayedPickComplete indicates that the RPC is unblocked following a delay in
+// selecting a connection for the call.
+type DelayedPickComplete struct{}
+
+// IsClient indicates DelayedPickComplete is available on the client.
+func (*DelayedPickComplete) IsClient() bool { return true }
+
+func (*DelayedPickComplete) isRPCStats() {}
+
+// PickerUpdated indicates that the RPC is unblocked following a delay in
+// selecting a connection for the call.
+//
+// Deprecated: will be removed in a future release; use DelayedPickComplete
+// instead.
+type PickerUpdated = DelayedPickComplete
+
+// InPayload contains stats about an incoming payload.
 type InPayload struct {
 	// Client is true if this InPayload is from client side.
 	Client bool
-	// Payload is the payload with original type.
-	Payload interface{}
-	// Data is the serialized message payload.
-	Data []byte
-	// Length is the length of uncompressed data.
+	// Payload is the payload with original type.  This may be modified after
+	// the call to HandleRPC which provides the InPayload returns and must be
+	// copied if needed later.
+	Payload any
+
+	// Length is the size of the uncompressed payload data. Does not include any
+	// framing (gRPC or HTTP/2).
 	Length int
-	// WireLength is the length of data on wire (compressed, signed, encrypted).
+	// CompressedLength is the size of the compressed payload data. Does not
+	// include any framing (gRPC or HTTP/2). Same as Length if compression not
+	// enabled.
+	CompressedLength int
+	// WireLength is the size of the compressed payload data plus gRPC framing.
+	// Does not include HTTP/2 framing.
 	WireLength int
+
 	// RecvTime is the time when the payload is received.
 	RecvTime time.Time
 }
@@ -75,12 +109,18 @@
 
 func (s *InPayload) isRPCStats() {}
 
-// InHeader contains stats when a header is received.
+// InHeader contains stats about header reception.
+//
+// - Server-side: The first stats event after the RPC request is received.
 type InHeader struct {
 	// Client is true if this InHeader is from client side.
 	Client bool
 	// WireLength is the wire length of header.
 	WireLength int
+	// Compression is the compression algorithm used for the RPC.
+	Compression string
+	// Header contains the header metadata received.
+	Header metadata.MD
 
 	// The following fields are valid only if Client is false.
 	// FullMethod is the full RPC method string, i.e., /package.service/method.
@@ -89,8 +129,6 @@
 	RemoteAddr net.Addr
 	// LocalAddr is the local address of the corresponding connection.
 	LocalAddr net.Addr
-	// Compression is the compression algorithm used for the RPC.
-	Compression string
 }
 
 // IsClient indicates if the stats information is from client side.
@@ -98,12 +136,15 @@
 
 func (s *InHeader) isRPCStats() {}
 
-// InTrailer contains stats when a trailer is received.
+// InTrailer contains stats about trailer reception.
 type InTrailer struct {
 	// Client is true if this InTrailer is from client side.
 	Client bool
 	// WireLength is the wire length of trailer.
 	WireLength int
+	// Trailer contains the trailer metadata received from the server. This
+	// field is only valid if this InTrailer is from the client side.
+	Trailer metadata.MD
 }
 
 // IsClient indicates if the stats information is from client side.
@@ -111,17 +152,23 @@
 
 func (s *InTrailer) isRPCStats() {}
 
-// OutPayload contains the information for an outgoing payload.
+// OutPayload contains stats about an outgoing payload.
 type OutPayload struct {
 	// Client is true if this OutPayload is from client side.
 	Client bool
-	// Payload is the payload with original type.
-	Payload interface{}
-	// Data is the serialized message payload.
-	Data []byte
-	// Length is the length of uncompressed data.
+	// Payload is the payload with original type.  This may be modified after
+	// the call to HandleRPC which provides the OutPayload returns and must be
+	// copied if needed later.
+	Payload any
+	// Length is the size of the uncompressed payload data. Does not include any
+	// framing (gRPC or HTTP/2).
 	Length int
-	// WireLength is the length of data on wire (compressed, signed, encrypted).
+	// CompressedLength is the size of the compressed payload data. Does not
+	// include any framing (gRPC or HTTP/2). Same as Length if compression not
+	// enabled.
+	CompressedLength int
+	// WireLength is the size of the compressed payload data plus gRPC framing.
+	// Does not include HTTP/2 framing.
 	WireLength int
 	// SentTime is the time when the payload is sent.
 	SentTime time.Time
@@ -132,10 +179,17 @@
 
 func (s *OutPayload) isRPCStats() {}
 
-// OutHeader contains stats when a header is sent.
+// OutHeader contains stats about header transmission.
+//
+//   - Client-side: Only occurs after 'Begin', as headers are always the first
+//     thing sent on a stream.
 type OutHeader struct {
 	// Client is true if this OutHeader is from client side.
 	Client bool
+	// Compression is the compression algorithm used for the RPC.
+	Compression string
+	// Header contains the header metadata sent.
+	Header metadata.MD
 
 	// The following fields are valid only if Client is true.
 	// FullMethod is the full RPC method string, i.e., /package.service/method.
@@ -144,8 +198,6 @@
 	RemoteAddr net.Addr
 	// LocalAddr is the local address of the corresponding connection.
 	LocalAddr net.Addr
-	// Compression is the compression algorithm used for the RPC.
-	Compression string
 }
 
 // IsClient indicates if this stats information is from client side.
@@ -153,12 +205,19 @@
 
 func (s *OutHeader) isRPCStats() {}
 
-// OutTrailer contains stats when a trailer is sent.
+// OutTrailer contains stats about trailer transmission.
 type OutTrailer struct {
 	// Client is true if this OutTrailer is from client side.
 	Client bool
 	// WireLength is the wire length of trailer.
+	//
+	// Deprecated: This field is never set. The length is not known when this
+	// message is emitted because the trailer fields are compressed with hpack
+	// after that.
 	WireLength int
+	// Trailer contains the trailer metadata sent to the client. This
+	// field is only valid if this OutTrailer is from the server side.
+	Trailer metadata.MD
 }
 
 // IsClient indicates if this stats information is from client side.
@@ -166,7 +225,7 @@
 
 func (s *OutTrailer) isRPCStats() {}
 
-// End contains stats when an RPC ends.
+// End contains stats about RPC completion.
 type End struct {
 	// Client is true if this End is from client side.
 	Client bool
@@ -176,6 +235,7 @@
 	EndTime time.Time
 	// Trailer contains the trailer metadata received from the server. This
 	// field is only valid if this End is from the client side.
+	// Deprecated: use Trailer in InTrailer instead.
 	Trailer metadata.MD
 	// Error is the error the RPC ended with. It is an error generated from
 	// status.Status and can be converted back to status.Status using
@@ -195,7 +255,7 @@
 	IsClient() bool
 }
 
-// ConnBegin contains the stats of a connection when it is established.
+// ConnBegin contains stats about connection establishment.
 type ConnBegin struct {
 	// Client is true if this ConnBegin is from client side.
 	Client bool
@@ -206,7 +266,7 @@
 
 func (s *ConnBegin) isConnStats() {}
 
-// ConnEnd contains the stats of a connection when it ends.
+// ConnEnd contains stats about connection termination.
 type ConnEnd struct {
 	// Client is true if this ConnEnd is from client side.
 	Client bool
@@ -217,84 +277,42 @@
 
 func (s *ConnEnd) isConnStats() {}
 
-type incomingTagsKey struct{}
-type outgoingTagsKey struct{}
-
 // SetTags attaches stats tagging data to the context, which will be sent in
 // the outgoing RPC with the header grpc-tags-bin.  Subsequent calls to
 // SetTags will overwrite the values from earlier calls.
 //
-// NOTE: this is provided only for backward compatibility with existing clients
-// and will likely be removed in an upcoming release.  New uses should transmit
-// this type of data using metadata with a different, non-reserved (i.e. does
-// not begin with "grpc-") header name.
+// Deprecated: set the `grpc-tags-bin` header in the metadata instead.
 func SetTags(ctx context.Context, b []byte) context.Context {
-	return context.WithValue(ctx, outgoingTagsKey{}, b)
+	return metadata.AppendToOutgoingContext(ctx, "grpc-tags-bin", string(b))
 }
 
 // Tags returns the tags from the context for the inbound RPC.
 //
-// NOTE: this is provided only for backward compatibility with existing clients
-// and will likely be removed in an upcoming release.  New uses should transmit
-// this type of data using metadata with a different, non-reserved (i.e. does
-// not begin with "grpc-") header name.
+// Deprecated: obtain the `grpc-tags-bin` header from metadata instead.
 func Tags(ctx context.Context) []byte {
-	b, _ := ctx.Value(incomingTagsKey{}).([]byte)
-	return b
+	traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-tags-bin")
+	if len(traceValues) == 0 {
+		return nil
+	}
+	return []byte(traceValues[len(traceValues)-1])
 }
 
-// SetIncomingTags attaches stats tagging data to the context, to be read by
-// the application (not sent in outgoing RPCs).
-//
-// This is intended for gRPC-internal use ONLY.
-func SetIncomingTags(ctx context.Context, b []byte) context.Context {
-	return context.WithValue(ctx, incomingTagsKey{}, b)
-}
-
-// OutgoingTags returns the tags from the context for the outbound RPC.
-//
-// This is intended for gRPC-internal use ONLY.
-func OutgoingTags(ctx context.Context) []byte {
-	b, _ := ctx.Value(outgoingTagsKey{}).([]byte)
-	return b
-}
-
-type incomingTraceKey struct{}
-type outgoingTraceKey struct{}
-
 // SetTrace attaches stats tagging data to the context, which will be sent in
 // the outgoing RPC with the header grpc-trace-bin.  Subsequent calls to
 // SetTrace will overwrite the values from earlier calls.
 //
-// NOTE: this is provided only for backward compatibility with existing clients
-// and will likely be removed in an upcoming release.  New uses should transmit
-// this type of data using metadata with a different, non-reserved (i.e. does
-// not begin with "grpc-") header name.
+// Deprecated: set the `grpc-trace-bin` header in the metadata instead.
 func SetTrace(ctx context.Context, b []byte) context.Context {
-	return context.WithValue(ctx, outgoingTraceKey{}, b)
+	return metadata.AppendToOutgoingContext(ctx, "grpc-trace-bin", string(b))
 }
 
 // Trace returns the trace from the context for the inbound RPC.
 //
-// NOTE: this is provided only for backward compatibility with existing clients
-// and will likely be removed in an upcoming release.  New uses should transmit
-// this type of data using metadata with a different, non-reserved (i.e. does
-// not begin with "grpc-") header name.
+// Deprecated: obtain the `grpc-trace-bin` header from metadata instead.
 func Trace(ctx context.Context) []byte {
-	b, _ := ctx.Value(incomingTraceKey{}).([]byte)
-	return b
-}
-
-// SetIncomingTrace attaches stats tagging data to the context, to be read by
-// the application (not sent in outgoing RPCs).  It is intended for
-// gRPC-internal use.
-func SetIncomingTrace(ctx context.Context, b []byte) context.Context {
-	return context.WithValue(ctx, incomingTraceKey{}, b)
-}
-
-// OutgoingTrace returns the trace from the context for the outbound RPC.  It is
-// intended for gRPC-internal use.
-func OutgoingTrace(ctx context.Context) []byte {
-	b, _ := ctx.Value(outgoingTraceKey{}).([]byte)
-	return b
+	traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-trace-bin")
+	if len(traceValues) == 0 {
+		return nil
+	}
+	return []byte(traceValues[len(traceValues)-1])
 }
diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go
index a1348e9..a93360e 100644
--- a/vendor/google.golang.org/grpc/status/status.go
+++ b/vendor/google.golang.org/grpc/status/status.go
@@ -32,89 +32,25 @@
 	"errors"
 	"fmt"
 
-	"github.com/golang/protobuf/proto"
-	"github.com/golang/protobuf/ptypes"
 	spb "google.golang.org/genproto/googleapis/rpc/status"
+
 	"google.golang.org/grpc/codes"
-	"google.golang.org/grpc/internal"
+	"google.golang.org/grpc/internal/status"
 )
 
-func init() {
-	internal.StatusRawProto = statusRawProto
-}
-
-func statusRawProto(s *Status) *spb.Status { return s.s }
-
-// statusError is an alias of a status proto.  It implements error and Status,
-// and a nil statusError should never be returned by this package.
-type statusError spb.Status
-
-func (se *statusError) Error() string {
-	p := (*spb.Status)(se)
-	return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
-}
-
-func (se *statusError) GRPCStatus() *Status {
-	return &Status{s: (*spb.Status)(se)}
-}
-
-// Is implements future error.Is functionality.
-// A statusError is equivalent if the code and message are identical.
-func (se *statusError) Is(target error) bool {
-	tse, ok := target.(*statusError)
-	if !ok {
-		return false
-	}
-
-	return proto.Equal((*spb.Status)(se), (*spb.Status)(tse))
-}
-
-// Status represents an RPC status code, message, and details.  It is immutable
-// and should be created with New, Newf, or FromProto.
-type Status struct {
-	s *spb.Status
-}
-
-// Code returns the status code contained in s.
-func (s *Status) Code() codes.Code {
-	if s == nil || s.s == nil {
-		return codes.OK
-	}
-	return codes.Code(s.s.Code)
-}
-
-// Message returns the message contained in s.
-func (s *Status) Message() string {
-	if s == nil || s.s == nil {
-		return ""
-	}
-	return s.s.Message
-}
-
-// Proto returns s's status as an spb.Status proto message.
-func (s *Status) Proto() *spb.Status {
-	if s == nil {
-		return nil
-	}
-	return proto.Clone(s.s).(*spb.Status)
-}
-
-// Err returns an immutable error representing s; returns nil if s.Code() is
-// OK.
-func (s *Status) Err() error {
-	if s.Code() == codes.OK {
-		return nil
-	}
-	return (*statusError)(s.s)
-}
+// Status references google.golang.org/grpc/internal/status. It represents an
+// RPC status code, message, and details.  It is immutable and should be
+// created with New, Newf, or FromProto.
+// https://godoc.org/google.golang.org/grpc/internal/status
+type Status = status.Status
 
 // New returns a Status representing c and msg.
 func New(c codes.Code, msg string) *Status {
-	return &Status{s: &spb.Status{Code: int32(c), Message: msg}}
+	return status.New(c, msg)
 }
 
 // Newf returns New(c, fmt.Sprintf(format, a...)).
-func Newf(c codes.Code, format string, a ...interface{}) *Status {
+func Newf(c codes.Code, format string, a ...any) *Status {
 	return New(c, fmt.Sprintf(format, a...))
 }
 
@@ -124,7 +60,7 @@
 }
 
 // Errorf returns Error(c, fmt.Sprintf(format, a...)).
-func Errorf(c codes.Code, format string, a ...interface{}) error {
+func Errorf(c codes.Code, format string, a ...any) error {
 	return Error(c, fmt.Sprintf(format, a...))
 }
 
@@ -135,20 +71,57 @@
 
 // FromProto returns a Status representing s.
 func FromProto(s *spb.Status) *Status {
-	return &Status{s: proto.Clone(s).(*spb.Status)}
+	return status.FromProto(s)
 }
 
-// FromError returns a Status representing err if it was produced from this
-// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a
-// Status is returned with codes.Unknown and the original error message.
+// FromError returns a Status representation of err.
+//
+//   - If err was produced by this package or implements the method `GRPCStatus()
+//     *Status` and `GRPCStatus()` does not return nil, or if err wraps a type
+//     satisfying this, the Status from `GRPCStatus()` is returned.  For wrapped
+//     errors, the message returned contains the entire err.Error() text and not
+//     just the wrapped status. In that case, ok is true.
+//
+//   - If err is nil, a Status is returned with codes.OK and no message, and ok
+//     is true.
+//
+//   - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()`
+//     returns nil (which maps to Codes.OK), or if err wraps a type
+//     satisfying this, a Status is returned with codes.Unknown and err's
+//     Error() message, and ok is false.
+//
+//   - Otherwise, err is an error not compatible with this package.  In this
+//     case, a Status is returned with codes.Unknown and err's Error() message,
+//     and ok is false.
 func FromError(err error) (s *Status, ok bool) {
 	if err == nil {
 		return nil, true
 	}
-	if se, ok := err.(interface {
-		GRPCStatus() *Status
-	}); ok {
-		return se.GRPCStatus(), true
+	type grpcstatus interface{ GRPCStatus() *Status }
+	if gs, ok := err.(grpcstatus); ok {
+		grpcStatus := gs.GRPCStatus()
+		if grpcStatus == nil {
+			// Error has status nil, which maps to codes.OK. There
+			// is no sensible behavior for this, so we turn it into
+			// an error with codes.Unknown and discard the existing
+			// status.
+			return New(codes.Unknown, err.Error()), false
+		}
+		return grpcStatus, true
+	}
+	var gs grpcstatus
+	if errors.As(err, &gs) {
+		grpcStatus := gs.GRPCStatus()
+		if grpcStatus == nil {
+			// Error wraps an error that has status nil, which maps
+			// to codes.OK.  There is no sensible behavior for this,
+			// so we turn it into an error with codes.Unknown and
+			// discard the existing status.
+			return New(codes.Unknown, err.Error()), false
+		}
+		p := grpcStatus.Proto()
+		p.Message = err.Error()
+		return status.FromProto(p), true
 	}
 	return New(codes.Unknown, err.Error()), false
 }
@@ -160,69 +133,30 @@
 	return s
 }
 
-// WithDetails returns a new status with the provided details messages appended to the status.
-// If any errors are encountered, it returns nil and the first error encountered.
-func (s *Status) WithDetails(details ...proto.Message) (*Status, error) {
-	if s.Code() == codes.OK {
-		return nil, errors.New("no error details for status with code OK")
-	}
-	// s.Code() != OK implies that s.Proto() != nil.
-	p := s.Proto()
-	for _, detail := range details {
-		any, err := ptypes.MarshalAny(detail)
-		if err != nil {
-			return nil, err
-		}
-		p.Details = append(p.Details, any)
-	}
-	return &Status{s: p}, nil
-}
-
-// Details returns a slice of details messages attached to the status.
-// If a detail cannot be decoded, the error is returned in place of the detail.
-func (s *Status) Details() []interface{} {
-	if s == nil || s.s == nil {
-		return nil
-	}
-	details := make([]interface{}, 0, len(s.s.Details))
-	for _, any := range s.s.Details {
-		detail := &ptypes.DynamicAny{}
-		if err := ptypes.UnmarshalAny(any, detail); err != nil {
-			details = append(details, err)
-			continue
-		}
-		details = append(details, detail.Message)
-	}
-	return details
-}
-
-// Code returns the Code of the error if it is a Status error, codes.OK if err
-// is nil, or codes.Unknown otherwise.
+// Code returns the Code of the error if it is a Status error or if it wraps a
+// Status error. If that is not the case, it returns codes.OK if err is nil, or
+// codes.Unknown otherwise.
 func Code(err error) codes.Code {
 	// Don't use FromError to avoid allocation of OK status.
 	if err == nil {
 		return codes.OK
 	}
-	if se, ok := err.(interface {
-		GRPCStatus() *Status
-	}); ok {
-		return se.GRPCStatus().Code()
-	}
-	return codes.Unknown
+
+	return Convert(err).Code()
 }
 
-// FromContextError converts a context error into a Status.  It returns a
-// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is
-// non-nil and not a context error.
+// FromContextError converts a context error or wrapped context error into a
+// Status.  It returns a Status with codes.OK if err is nil, or a Status with
+// codes.Unknown if err is non-nil and not a context error.
 func FromContextError(err error) *Status {
-	switch err {
-	case nil:
+	if err == nil {
 		return nil
-	case context.DeadlineExceeded:
-		return New(codes.DeadlineExceeded, err.Error())
-	case context.Canceled:
-		return New(codes.Canceled, err.Error())
-	default:
-		return New(codes.Unknown, err.Error())
 	}
+	if errors.Is(err, context.DeadlineExceeded) {
+		return New(codes.DeadlineExceeded, err.Error())
+	}
+	if errors.Is(err, context.Canceled) {
+		return New(codes.Canceled, err.Error())
+	}
+	return New(codes.Unknown, err.Error())
 }
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index bb99940..0a0af89 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -23,41 +23,56 @@
 	"errors"
 	"io"
 	"math"
+	rand "math/rand/v2"
 	"strconv"
 	"sync"
 	"time"
 
-	"golang.org/x/net/trace"
 	"google.golang.org/grpc/balancer"
 	"google.golang.org/grpc/codes"
 	"google.golang.org/grpc/encoding"
-	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/internal"
 	"google.golang.org/grpc/internal/balancerload"
 	"google.golang.org/grpc/internal/binarylog"
 	"google.golang.org/grpc/internal/channelz"
-	"google.golang.org/grpc/internal/grpcrand"
+	"google.golang.org/grpc/internal/grpcutil"
+	imetadata "google.golang.org/grpc/internal/metadata"
+	iresolver "google.golang.org/grpc/internal/resolver"
+	"google.golang.org/grpc/internal/serviceconfig"
+	istatus "google.golang.org/grpc/internal/status"
 	"google.golang.org/grpc/internal/transport"
+	"google.golang.org/grpc/mem"
 	"google.golang.org/grpc/metadata"
 	"google.golang.org/grpc/peer"
 	"google.golang.org/grpc/stats"
 	"google.golang.org/grpc/status"
 )
 
+var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
+
 // StreamHandler defines the handler called by gRPC server to complete the
-// execution of a streaming RPC. If a StreamHandler returns an error, it
-// should be produced by the status package, or else gRPC will use
-// codes.Unknown as the status code and err.Error() as the status message
-// of the RPC.
-type StreamHandler func(srv interface{}, stream ServerStream) error
+// execution of a streaming RPC.
+//
+// If a StreamHandler returns an error, it should either be produced by the
+// status package, or be one of the context errors. Otherwise, gRPC will use
+// codes.Unknown as the status code and err.Error() as the status message of the
+// RPC.
+type StreamHandler func(srv any, stream ServerStream) error
 
-// StreamDesc represents a streaming RPC service's method specification.
+// StreamDesc represents a streaming RPC service's method specification.  Used
+// on the server when registering services and on the client when initiating
+// new streams.
 type StreamDesc struct {
-	StreamName string
-	Handler    StreamHandler
+	// StreamName and Handler are only used when registering handlers on a
+	// server.
+	StreamName string        // the name of the method excluding the service
+	Handler    StreamHandler // the handler called for the method
 
-	// At least one of these is true.
-	ServerStreams bool
-	ClientStreams bool
+	// ServerStreams and ClientStreams are used for registering handlers on a
+	// server as well as defining RPC behavior when passed to NewClientStream
+	// and ClientConn.NewStream.  At least one must be true.
+	ServerStreams bool // indicates the server can perform streaming sends
+	ClientStreams bool // indicates the client can perform streaming sends
 }
 
 // Stream defines the common interface a client or server stream has to satisfy.
@@ -67,9 +82,9 @@
 	// Deprecated: See ClientStream and ServerStream documentation instead.
 	Context() context.Context
 	// Deprecated: See ClientStream and ServerStream documentation instead.
-	SendMsg(m interface{}) error
+	SendMsg(m any) error
 	// Deprecated: See ClientStream and ServerStream documentation instead.
-	RecvMsg(m interface{}) error
+	RecvMsg(m any) error
 }
 
 // ClientStream defines the client-side behavior of a streaming RPC.
@@ -78,15 +93,17 @@
 // status package.
 type ClientStream interface {
 	// Header returns the header metadata received from the server if there
-	// is any. It blocks if the metadata is not ready to read.
+	// is any. It blocks if the metadata is not ready to read.  If the metadata
+	// is nil and the error is also nil, then the stream was terminated without
+	// headers, and the status can be discovered by calling RecvMsg.
 	Header() (metadata.MD, error)
 	// Trailer returns the trailer metadata from the server, if there is any.
 	// It must only be called after stream.CloseAndRecv has returned, or
 	// stream.Recv has returned a non-nil error (including io.EOF).
 	Trailer() metadata.MD
-	// CloseSend closes the send direction of the stream. It closes the stream
-	// when non-nil error is met. It is also not safe to call CloseSend
-	// concurrently with SendMsg.
+	// CloseSend closes the send direction of the stream. This method always
+	// returns a nil error. The status of the stream may be discovered using
+	// RecvMsg. It is also not safe to call CloseSend concurrently with SendMsg.
 	CloseSend() error
 	// Context returns the context for this stream.
 	//
@@ -96,7 +113,9 @@
 	// SendMsg is generally called by generated code. On error, SendMsg aborts
 	// the stream. If the error was generated by the client, the status is
 	// returned directly; otherwise, io.EOF is returned and the status of
-	// the stream may be discovered using RecvMsg.
+	// the stream may be discovered using RecvMsg. For unary or server-streaming
+	// RPCs (StreamDesc.ClientStreams is false), a nil error is returned
+	// unconditionally.
 	//
 	// SendMsg blocks until:
 	//   - There is sufficient flow control to schedule m with the transport, or
@@ -111,7 +130,10 @@
 	// calling RecvMsg on the same stream at the same time, but it is not safe
 	// to call SendMsg on the same stream in different goroutines. It is also
 	// not safe to call CloseSend concurrently with SendMsg.
-	SendMsg(m interface{}) error
+	//
+	// It is not safe to modify the message after calling SendMsg. Tracing
+	// libraries and stats handlers may use the message lazily.
+	SendMsg(m any) error
 	// RecvMsg blocks until it receives a message into m or the stream is
 	// done. It returns io.EOF when the stream completes successfully. On
 	// any other error, the stream is aborted and the error contains the RPC
@@ -120,7 +142,7 @@
 	// It is safe to have a goroutine calling SendMsg and another goroutine
 	// calling RecvMsg on the same stream at the same time, but it is not
 	// safe to call RecvMsg on the same stream in different goroutines.
-	RecvMsg(m interface{}) error
+	RecvMsg(m any) error
 }
 
 // NewStream creates a new Stream for the client side. This is typically
@@ -129,13 +151,13 @@
 // To ensure resources are not leaked due to the stream returned, one of the following
 // actions must be performed:
 //
-//      1. Call Close on the ClientConn.
-//      2. Cancel the context provided.
-//      3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
-//         client-streaming RPC, for instance, might use the helper function
-//         CloseAndRecv (note that CloseSend does not Recv, therefore is not
-//         guaranteed to release all resources).
-//      4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
+//  1. Call Close on the ClientConn.
+//  2. Cancel the context provided.
+//  3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
+//     client-streaming RPC, for instance, might use the helper function
+//     CloseAndRecv (note that CloseSend does not Recv, therefore is not
+//     guaranteed to release all resources).
+//  4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
 //
 // If none of the above happen, a goroutine and a context will be leaked, and grpc
 // will not call the optionally-configured stats handler with a stats.End message.
@@ -156,6 +178,30 @@
 }
 
 func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
+	// Start tracking the RPC for idleness purposes. This is where a stream is
+	// created for both streaming and unary RPCs, and hence is a good place to
+	// track active RPC count.
+	if err := cc.idlenessMgr.OnCallBegin(); err != nil {
+		return nil, err
+	}
+	// Add a calloption, to decrement the active call count, that gets executed
+	// when the RPC completes.
+	opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...)
+
+	if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
+		// validate md
+		if err := imetadata.Validate(md); err != nil {
+			return nil, status.Error(codes.Internal, err.Error())
+		}
+		// validate added
+		for _, kvs := range added {
+			for i := 0; i < len(kvs); i += 2 {
+				if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil {
+					return nil, status.Error(codes.Internal, err.Error())
+				}
+			}
+		}
+	}
 	if channelz.IsOn() {
 		cc.incrCallsStarted()
 		defer func() {
@@ -164,15 +210,58 @@
 			}
 		}()
 	}
-	c := defaultCallInfo()
 	// Provide an opportunity for the first RPC to see the first service config
 	// provided by the resolver.
-	if err := cc.waitForResolvedAddrs(ctx); err != nil {
+	nameResolutionDelayed, err := cc.waitForResolvedAddrs(ctx)
+	if err != nil {
 		return nil, err
 	}
-	mc := cc.GetMethodConfig(method)
+
+	var mc serviceconfig.MethodConfig
+	var onCommit func()
+	newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
+		return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, nameResolutionDelayed, opts...)
+	}
+
+	rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method}
+	rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo)
+	if err != nil {
+		if st, ok := status.FromError(err); ok {
+			// Restrict the code to the list allowed by gRFC A54.
+			if istatus.IsRestrictedControlPlaneCode(st) {
+				err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err)
+			}
+			return nil, err
+		}
+		return nil, toRPCErr(err)
+	}
+
+	if rpcConfig != nil {
+		if rpcConfig.Context != nil {
+			ctx = rpcConfig.Context
+		}
+		mc = rpcConfig.MethodConfig
+		onCommit = rpcConfig.OnCommitted
+		if rpcConfig.Interceptor != nil {
+			rpcInfo.Context = nil
+			ns := newStream
+			newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
+				cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns)
+				if err != nil {
+					return nil, toRPCErr(err)
+				}
+				return cs, nil
+			}
+		}
+	}
+
+	return newStream(ctx, func() {})
+}
+
+func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) {
+	callInfo := defaultCallInfo()
 	if mc.WaitForReady != nil {
-		c.failFast = !*mc.WaitForReady
+		callInfo.failFast = !*mc.WaitForReady
 	}
 
 	// Possible context leak:
@@ -193,106 +282,94 @@
 	}()
 
 	for _, o := range opts {
-		if err := o.before(c); err != nil {
+		if err := o.before(callInfo); err != nil {
 			return nil, toRPCErr(err)
 		}
 	}
-	c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
-	c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
-	if err := setCallInfoCodec(c); err != nil {
+	callInfo.maxSendMessageSize = getMaxSize(mc.MaxReqSize, callInfo.maxSendMessageSize, defaultClientMaxSendMessageSize)
+	callInfo.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, callInfo.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
+	if err := setCallInfoCodec(callInfo); err != nil {
 		return nil, err
 	}
 
 	callHdr := &transport.CallHdr{
 		Host:           cc.authority,
 		Method:         method,
-		ContentSubtype: c.contentSubtype,
+		ContentSubtype: callInfo.contentSubtype,
+		DoneFunc:       doneFunc,
+		Authority:      callInfo.authority,
 	}
 
 	// Set our outgoing compression according to the UseCompressor CallOption, if
 	// set.  In that case, also find the compressor from the encoding package.
 	// Otherwise, use the compressor configured by the WithCompressor DialOption,
 	// if set.
-	var cp Compressor
-	var comp encoding.Compressor
-	if ct := c.compressorType; ct != "" {
+	var compressorV0 Compressor
+	var compressorV1 encoding.Compressor
+	if ct := callInfo.compressorName; ct != "" {
 		callHdr.SendCompress = ct
 		if ct != encoding.Identity {
-			comp = encoding.GetCompressor(ct)
-			if comp == nil {
+			compressorV1 = encoding.GetCompressor(ct)
+			if compressorV1 == nil {
 				return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
 			}
 		}
-	} else if cc.dopts.cp != nil {
-		callHdr.SendCompress = cc.dopts.cp.Type()
-		cp = cc.dopts.cp
+	} else if cc.dopts.compressorV0 != nil {
+		callHdr.SendCompress = cc.dopts.compressorV0.Type()
+		compressorV0 = cc.dopts.compressorV0
 	}
-	if c.creds != nil {
-		callHdr.Creds = c.creds
-	}
-	var trInfo *traceInfo
-	if EnableTracing {
-		trInfo = &traceInfo{
-			tr: trace.New("grpc.Sent."+methodFamily(method), method),
-			firstLine: firstLine{
-				client: true,
-			},
-		}
-		if deadline, ok := ctx.Deadline(); ok {
-			trInfo.firstLine.deadline = time.Until(deadline)
-		}
-		trInfo.tr.LazyLog(&trInfo.firstLine, false)
-		ctx = trace.NewContext(ctx, trInfo.tr)
-	}
-	ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp)
-	sh := cc.dopts.copts.StatsHandler
-	var beginTime time.Time
-	if sh != nil {
-		ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
-		beginTime = time.Now()
-		begin := &stats.Begin{
-			Client:    true,
-			BeginTime: beginTime,
-			FailFast:  c.failFast,
-		}
-		sh.HandleRPC(ctx, begin)
+	if callInfo.creds != nil {
+		callHdr.Creds = callInfo.creds
 	}
 
 	cs := &clientStream{
-		callHdr:      callHdr,
-		ctx:          ctx,
-		methodConfig: &mc,
-		opts:         opts,
-		callInfo:     c,
-		cc:           cc,
-		desc:         desc,
-		codec:        c.codec,
-		cp:           cp,
-		comp:         comp,
-		cancel:       cancel,
-		beginTime:    beginTime,
-		firstAttempt: true,
+		callHdr:             callHdr,
+		ctx:                 ctx,
+		methodConfig:        &mc,
+		opts:                opts,
+		callInfo:            callInfo,
+		cc:                  cc,
+		desc:                desc,
+		codec:               callInfo.codec,
+		compressorV0:        compressorV0,
+		compressorV1:        compressorV1,
+		cancel:              cancel,
+		firstAttempt:        true,
+		onCommit:            onCommit,
+		nameResolutionDelay: nameResolutionDelayed,
 	}
 	if !cc.dopts.disableRetry {
 		cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
 	}
-	cs.binlog = binarylog.GetMethodLogger(method)
+	if ml := binarylog.GetMethodLogger(method); ml != nil {
+		cs.binlogs = append(cs.binlogs, ml)
+	}
+	if cc.dopts.binaryLogger != nil {
+		if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil {
+			cs.binlogs = append(cs.binlogs, ml)
+		}
+	}
 
-	cs.callInfo.stream = cs
-	// Only this initial attempt has stats/tracing.
-	// TODO(dfawley): move to newAttempt when per-attempt stats are implemented.
-	if err := cs.newAttemptLocked(sh, trInfo); err != nil {
-		cs.finish(err)
+	// Pick the transport to use and create a new stream on the transport.
+	// Assign cs.attempt upon success.
+	op := func(a *csAttempt) error {
+		if err := a.getTransport(); err != nil {
+			return err
+		}
+		if err := a.newStream(); err != nil {
+			return err
+		}
+		// Because this operation is always called either here (while creating
+		// the clientStream) or by the retry code while locked when replaying
+		// the operation, it is safe to access cs.attempt directly.
+		cs.attempt = a
+		return nil
+	}
+	if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil {
 		return nil, err
 	}
 
-	op := func(a *csAttempt) error { return a.newStream() }
-	if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
-		cs.finish(err)
-		return nil, err
-	}
-
-	if cs.binlog != nil {
+	if len(cs.binlogs) != 0 {
 		md, _ := metadata.FromOutgoingContext(ctx)
 		logEntry := &binarylog.ClientHeader{
 			OnClientSide: true,
@@ -306,7 +383,9 @@
 				logEntry.Timeout = 0
 			}
 		}
-		cs.binlog.Log(logEntry)
+		for _, binlog := range cs.binlogs {
+			binlog.Log(cs.ctx, logEntry)
+		}
 	}
 
 	if desc != unaryStreamDesc {
@@ -327,49 +406,130 @@
 	return cs, nil
 }
 
-// newAttemptLocked creates a new attempt with a transport.
-// If it succeeds, then it replaces clientStream's attempt with this new attempt.
-func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) {
-	newAttempt := &csAttempt{
-		cs:           cs,
-		dc:           cs.cc.dopts.dc,
-		statsHandler: sh,
-		trInfo:       trInfo,
-	}
-	defer func() {
-		if retErr != nil {
-			// This attempt is not set in the clientStream, so it's finish won't
-			// be called. Call it here for stats and trace in case they are not
-			// nil.
-			newAttempt.finish(retErr)
-		}
-	}()
-
+// newAttemptLocked creates a new csAttempt without a transport or stream.
+func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) {
 	if err := cs.ctx.Err(); err != nil {
-		return toRPCErr(err)
+		return nil, toRPCErr(err)
 	}
-	t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method)
+	if err := cs.cc.ctx.Err(); err != nil {
+		return nil, ErrClientConnClosing
+	}
+
+	ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.compressorV0, cs.compressorV1)
+	method := cs.callHdr.Method
+	var beginTime time.Time
+	shs := cs.cc.dopts.copts.StatsHandlers
+	for _, sh := range shs {
+		ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast, NameResolutionDelay: cs.nameResolutionDelay})
+		beginTime = time.Now()
+		begin := &stats.Begin{
+			Client:                    true,
+			BeginTime:                 beginTime,
+			FailFast:                  cs.callInfo.failFast,
+			IsClientStream:            cs.desc.ClientStreams,
+			IsServerStream:            cs.desc.ServerStreams,
+			IsTransparentRetryAttempt: isTransparent,
+		}
+		sh.HandleRPC(ctx, begin)
+	}
+
+	var trInfo *traceInfo
+	if EnableTracing {
+		trInfo = &traceInfo{
+			tr: newTrace("grpc.Sent."+methodFamily(method), method),
+			firstLine: firstLine{
+				client: true,
+			},
+		}
+		if deadline, ok := ctx.Deadline(); ok {
+			trInfo.firstLine.deadline = time.Until(deadline)
+		}
+		trInfo.tr.LazyLog(&trInfo.firstLine, false)
+		ctx = newTraceContext(ctx, trInfo.tr)
+	}
+
+	if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata {
+		// Add extra metadata (metadata that will be added by transport) to context
+		// so the balancer can see them.
+		ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs(
+			"content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),
+		))
+	}
+
+	return &csAttempt{
+		ctx:            ctx,
+		beginTime:      beginTime,
+		cs:             cs,
+		decompressorV0: cs.cc.dopts.dc,
+		statsHandlers:  shs,
+		trInfo:         trInfo,
+	}, nil
+}
+
+func (a *csAttempt) getTransport() error {
+	cs := a.cs
+
+	pickInfo := balancer.PickInfo{Ctx: a.ctx, FullMethodName: cs.callHdr.Method}
+	pick, err := cs.cc.pickerWrapper.pick(a.ctx, cs.callInfo.failFast, pickInfo)
+	a.transport, a.pickResult = pick.transport, pick.result
 	if err != nil {
+		if de, ok := err.(dropError); ok {
+			err = de.error
+			a.drop = true
+		}
 		return err
 	}
-	if trInfo != nil {
-		trInfo.firstLine.SetRemoteAddr(t.RemoteAddr())
+	if a.trInfo != nil {
+		a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr())
 	}
-	newAttempt.t = t
-	newAttempt.done = done
-	cs.attempt = newAttempt
+	if pick.blocked {
+		for _, sh := range a.statsHandlers {
+			sh.HandleRPC(a.ctx, &stats.DelayedPickComplete{})
+		}
+	}
 	return nil
 }
 
 func (a *csAttempt) newStream() error {
 	cs := a.cs
 	cs.callHdr.PreviousAttempts = cs.numRetries
-	s, err := a.t.NewStream(cs.ctx, cs.callHdr)
-	if err != nil {
-		return toRPCErr(err)
+
+	// Merge metadata stored in PickResult, if any, with existing call metadata.
+	// It is safe to overwrite the csAttempt's context here, since all state
+	// maintained in it are local to the attempt. When the attempt has to be
+	// retried, a new instance of csAttempt will be created.
+	if a.pickResult.Metadata != nil {
+		// We currently do not have a function it the metadata package which
+		// merges given metadata with existing metadata in a context. Existing
+		// function `AppendToOutgoingContext()` takes a variadic argument of key
+		// value pairs.
+		//
+		// TODO: Make it possible to retrieve key value pairs from metadata.MD
+		// in a form passable to AppendToOutgoingContext(), or create a version
+		// of AppendToOutgoingContext() that accepts a metadata.MD.
+		md, _ := metadata.FromOutgoingContext(a.ctx)
+		md = metadata.Join(md, a.pickResult.Metadata)
+		a.ctx = metadata.NewOutgoingContext(a.ctx, md)
 	}
-	cs.attempt.s = s
-	cs.attempt.p = &parser{r: s}
+
+	s, err := a.transport.NewStream(a.ctx, cs.callHdr)
+	if err != nil {
+		nse, ok := err.(*transport.NewStreamError)
+		if !ok {
+			// Unexpected.
+			return err
+		}
+
+		if nse.AllowTransparentRetry {
+			a.allowTransparentRetry = true
+		}
+
+		// Unwrap and convert error.
+		return toRPCErr(nse.Err)
+	}
+	a.transportStream = s
+	a.ctx = s.Context()
+	a.parser = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
 	return nil
 }
 
@@ -381,14 +541,15 @@
 	cc       *ClientConn
 	desc     *StreamDesc
 
-	codec baseCodec
-	cp    Compressor
-	comp  encoding.Compressor
+	codec        baseCodec
+	compressorV0 Compressor
+	compressorV1 encoding.Compressor
 
 	cancel context.CancelFunc // cancels all attempts
 
-	sentLast  bool // sent an end stream
-	beginTime time.Time
+	sentLast bool // sent an end stream
+
+	receivedFirstMsg bool // set after the first message is received
 
 	methodConfig *MethodConfig
 
@@ -396,7 +557,7 @@
 
 	retryThrottler *retryThrottler // The throttler active when the RPC began.
 
-	binlog *binarylog.MethodLogger // Binary logger, can be nil.
+	binlogs []binarylog.MethodLogger
 	// serverHeaderBinlogged is a boolean for whether server header has been
 	// logged. Server header will be logged when the first time one of those
 	// happens: stream.Header(), stream.Recv().
@@ -419,24 +580,34 @@
 	// place where we need to check if the attempt is nil.
 	attempt *csAttempt
 	// TODO(hedging): hedging will have multiple attempts simultaneously.
-	committed  bool                       // active attempt committed for retry?
-	buffer     []func(a *csAttempt) error // operations to replay on retry
-	bufferSize int                        // current size of buffer
+	committed        bool // active attempt committed for retry?
+	onCommit         func()
+	replayBuffer     []replayOp // operations to replay on retry
+	replayBufferSize int        // current size of replayBuffer
+	// nameResolutionDelay indicates if there was a delay in the name resolution.
+	// This field is only valid on client side, it's always false on server side.
+	nameResolutionDelay bool
+}
+
+type replayOp struct {
+	op      func(a *csAttempt) error
+	cleanup func()
 }
 
 // csAttempt implements a single transport stream attempt within a
 // clientStream.
 type csAttempt struct {
-	cs   *clientStream
-	t    transport.ClientTransport
-	s    *transport.Stream
-	p    *parser
-	done func(balancer.DoneInfo)
+	ctx             context.Context
+	cs              *clientStream
+	transport       transport.ClientTransport
+	transportStream *transport.ClientStream
+	parser          *parser
+	pickResult      balancer.PickResult
 
-	finished  bool
-	dc        Decompressor
-	decomp    encoding.Compressor
-	decompSet bool
+	finished        bool
+	decompressorV0  Decompressor
+	decompressorV1  encoding.Compressor
+	decompressorSet bool
 
 	mu sync.Mutex // guards trInfo.tr
 	// trInfo may be nil (if EnableTracing is false).
@@ -444,12 +615,26 @@
 	// and cleared when the finish method is called.
 	trInfo *traceInfo
 
-	statsHandler stats.Handler
+	statsHandlers []stats.Handler
+	beginTime     time.Time
+
+	// set for newStream errors that may be transparently retried
+	allowTransparentRetry bool
+	// set for pick errors that are returned as a status
+	drop bool
 }
 
 func (cs *clientStream) commitAttemptLocked() {
+	if !cs.committed && cs.onCommit != nil {
+		cs.onCommit()
+	}
 	cs.committed = true
-	cs.buffer = nil
+	for _, op := range cs.replayBuffer {
+		if op.cleanup != nil {
+			op.cleanup()
+		}
+	}
+	cs.replayBuffer = nil
 }
 
 func (cs *clientStream) commitAttempt() {
@@ -459,76 +644,76 @@
 }
 
 // shouldRetry returns nil if the RPC should be retried; otherwise it returns
-// the error that should be returned by the operation.
-func (cs *clientStream) shouldRetry(err error) error {
-	if cs.attempt.s == nil && !cs.callInfo.failFast {
-		// In the event of any error from NewStream (attempt.s == nil), we
-		// never attempted to write anything to the wire, so we can retry
-		// indefinitely for non-fail-fast RPCs.
-		return nil
+// the error that should be returned by the operation.  If the RPC should be
+// retried, the bool indicates whether it is being retried transparently.
+func (a *csAttempt) shouldRetry(err error) (bool, error) {
+	cs := a.cs
+
+	if cs.finished || cs.committed || a.drop {
+		// RPC is finished or committed or was dropped by the picker; cannot retry.
+		return false, err
 	}
-	if cs.finished || cs.committed {
-		// RPC is finished or committed; cannot retry.
-		return err
+	if a.transportStream == nil && a.allowTransparentRetry {
+		return true, nil
 	}
 	// Wait for the trailers.
-	if cs.attempt.s != nil {
-		<-cs.attempt.s.Done()
+	unprocessed := false
+	if a.transportStream != nil {
+		<-a.transportStream.Done()
+		unprocessed = a.transportStream.Unprocessed()
 	}
-	if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) {
+	if cs.firstAttempt && unprocessed {
 		// First attempt, stream unprocessed: transparently retry.
-		cs.firstAttempt = false
-		return nil
+		return true, nil
 	}
-	cs.firstAttempt = false
 	if cs.cc.dopts.disableRetry {
-		return err
+		return false, err
 	}
 
 	pushback := 0
 	hasPushback := false
-	if cs.attempt.s != nil {
-		if !cs.attempt.s.TrailersOnly() {
-			return err
+	if a.transportStream != nil {
+		if !a.transportStream.TrailersOnly() {
+			return false, err
 		}
 
 		// TODO(retry): Move down if the spec changes to not check server pushback
 		// before considering this a failure for throttling.
-		sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"]
+		sps := a.transportStream.Trailer()["grpc-retry-pushback-ms"]
 		if len(sps) == 1 {
 			var e error
 			if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
-				grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0])
+				channelz.Infof(logger, cs.cc.channelz, "Server retry pushback specified to abort (%q).", sps[0])
 				cs.retryThrottler.throttle() // This counts as a failure for throttling.
-				return err
+				return false, err
 			}
 			hasPushback = true
 		} else if len(sps) > 1 {
-			grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps)
+			channelz.Warningf(logger, cs.cc.channelz, "Server retry pushback specified multiple values (%q); not retrying.", sps)
 			cs.retryThrottler.throttle() // This counts as a failure for throttling.
-			return err
+			return false, err
 		}
 	}
 
 	var code codes.Code
-	if cs.attempt.s != nil {
-		code = cs.attempt.s.Status().Code()
+	if a.transportStream != nil {
+		code = a.transportStream.Status().Code()
 	} else {
-		code = status.Convert(err).Code()
+		code = status.Code(err)
 	}
 
-	rp := cs.methodConfig.retryPolicy
-	if rp == nil || !rp.retryableStatusCodes[code] {
-		return err
+	rp := cs.methodConfig.RetryPolicy
+	if rp == nil || !rp.RetryableStatusCodes[code] {
+		return false, err
 	}
 
 	// Note: the ordering here is important; we count this as a failure
 	// only if the code matched a retryable code.
 	if cs.retryThrottler.throttle() {
-		return err
+		return false, err
 	}
-	if cs.numRetries+1 >= rp.maxAttempts {
-		return err
+	if cs.numRetries+1 >= rp.MaxAttempts {
+		return false, err
 	}
 
 	var dur time.Duration
@@ -536,12 +721,11 @@
 		dur = time.Millisecond * time.Duration(pushback)
 		cs.numRetriesSincePushback = 0
 	} else {
-		fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback))
-		cur := float64(rp.initialBackoff) * fact
-		if max := float64(rp.maxBackoff); cur > max {
-			cur = max
-		}
-		dur = time.Duration(grpcrand.Int63n(int64(cur)))
+		fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback))
+		cur := min(float64(rp.InitialBackoff)*fact, float64(rp.MaxBackoff))
+		// Apply jitter by multiplying with a random factor between 0.8 and 1.2
+		cur *= 0.8 + 0.4*rand.Float64()
+		dur = time.Duration(int64(cur))
 		cs.numRetriesSincePushback++
 	}
 
@@ -551,25 +735,32 @@
 	select {
 	case <-t.C:
 		cs.numRetries++
-		return nil
+		return false, nil
 	case <-cs.ctx.Done():
 		t.Stop()
-		return status.FromContextError(cs.ctx.Err()).Err()
+		return false, status.FromContextError(cs.ctx.Err()).Err()
 	}
 }
 
 // Returns nil if a retry was performed and succeeded; error otherwise.
-func (cs *clientStream) retryLocked(lastErr error) error {
+func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error {
 	for {
-		cs.attempt.finish(lastErr)
-		if err := cs.shouldRetry(lastErr); err != nil {
+		attempt.finish(toRPCErr(lastErr))
+		isTransparent, err := attempt.shouldRetry(lastErr)
+		if err != nil {
 			cs.commitAttemptLocked()
 			return err
 		}
-		if err := cs.newAttemptLocked(nil, nil); err != nil {
+		cs.firstAttempt = false
+		attempt, err = cs.newAttemptLocked(isTransparent)
+		if err != nil {
+			// Only returns error if the clientconn is closed or the context of
+			// the stream is canceled.
 			return err
 		}
-		if lastErr = cs.replayBufferLocked(); lastErr == nil {
+		// Note that the first op in replayBuffer always sets cs.attempt
+		// if it is able to pick a transport and create a stream.
+		if lastErr = cs.replayBufferLocked(attempt); lastErr == nil {
 			return nil
 		}
 	}
@@ -579,7 +770,10 @@
 	cs.commitAttempt()
 	// No need to lock before using attempt, since we know it is committed and
 	// cannot change.
-	return cs.attempt.s.Context()
+	if cs.attempt.transportStream != nil {
+		return cs.attempt.transportStream.Context()
+	}
+	return cs.ctx
 }
 
 func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
@@ -587,7 +781,23 @@
 	for {
 		if cs.committed {
 			cs.mu.Unlock()
-			return op(cs.attempt)
+			// toRPCErr is used in case the error from the attempt comes from
+			// NewClientStream, which intentionally doesn't return a status
+			// error to allow for further inspection; all other errors should
+			// already be status errors.
+			return toRPCErr(op(cs.attempt))
+		}
+		if len(cs.replayBuffer) == 0 {
+			// For the first op, which controls creation of the stream and
+			// assigns cs.attempt, we need to create a new attempt inline
+			// before executing the first op.  On subsequent ops, the attempt
+			// is created immediately before replaying the ops.
+			var err error
+			if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil {
+				cs.mu.Unlock()
+				cs.finish(err)
+				return err
+			}
 		}
 		a := cs.attempt
 		cs.mu.Unlock()
@@ -598,14 +808,14 @@
 			continue
 		}
 		if err == io.EOF {
-			<-a.s.Done()
+			<-a.transportStream.Done()
 		}
-		if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) {
+		if err == nil || (err == io.EOF && a.transportStream.Status().Code() == codes.OK) {
 			onSuccess()
 			cs.mu.Unlock()
 			return err
 		}
-		if err := cs.retryLocked(err); err != nil {
+		if err := cs.retryLocked(a, err); err != nil {
 			cs.mu.Unlock()
 			return err
 		}
@@ -616,15 +826,24 @@
 	var m metadata.MD
 	err := cs.withRetry(func(a *csAttempt) error {
 		var err error
-		m, err = a.s.Header()
+		m, err = a.transportStream.Header()
 		return toRPCErr(err)
 	}, cs.commitAttemptLocked)
+
+	if m == nil && err == nil {
+		// The stream ended with success.  Finish the clientStream.
+		err = io.EOF
+	}
+
 	if err != nil {
 		cs.finish(err)
-		return nil, err
+		// Do not return the error.  The user should get it by calling Recv().
+		return nil, nil
 	}
-	if cs.binlog != nil && !cs.serverHeaderBinlogged {
-		// Only log if binary log is on and header has not been logged.
+
+	if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil {
+		// Only log if binary log is on and header has not been logged, and
+		// there is actually headers to log.
 		logEntry := &binarylog.ServerHeader{
 			OnClientSide: true,
 			Header:       m,
@@ -633,10 +852,13 @@
 		if peer, ok := peer.FromContext(cs.Context()); ok {
 			logEntry.PeerAddr = peer.Addr
 		}
-		cs.binlog.Log(logEntry)
 		cs.serverHeaderBinlogged = true
+		for _, binlog := range cs.binlogs {
+			binlog.Log(cs.ctx, logEntry)
+		}
 	}
-	return m, err
+
+	return m, nil
 }
 
 func (cs *clientStream) Trailer() metadata.MD {
@@ -648,36 +870,36 @@
 	// directions -- it will prevent races and should not meaningfully impact
 	// performance.
 	cs.commitAttempt()
-	if cs.attempt.s == nil {
+	if cs.attempt.transportStream == nil {
 		return nil
 	}
-	return cs.attempt.s.Trailer()
+	return cs.attempt.transportStream.Trailer()
 }
 
-func (cs *clientStream) replayBufferLocked() error {
-	a := cs.attempt
-	for _, f := range cs.buffer {
-		if err := f(a); err != nil {
+func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error {
+	for _, f := range cs.replayBuffer {
+		if err := f.op(attempt); err != nil {
 			return err
 		}
 	}
 	return nil
 }
 
-func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) {
+func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) {
 	// Note: we still will buffer if retry is disabled (for transparent retries).
 	if cs.committed {
 		return
 	}
-	cs.bufferSize += sz
-	if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize {
+	cs.replayBufferSize += sz
+	if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize {
 		cs.commitAttemptLocked()
+		cleanup()
 		return
 	}
-	cs.buffer = append(cs.buffer, op)
+	cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup})
 }
 
-func (cs *clientStream) SendMsg(m interface{}) (err error) {
+func (cs *clientStream) SendMsg(m any) (err error) {
 	defer func() {
 		if err != nil && err != io.EOF {
 			// Call finish on the client stream for errors generated by this SendMsg
@@ -696,95 +918,114 @@
 	}
 
 	// load hdr, payload, data
-	hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp)
+	hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.compressorV0, cs.compressorV1, cs.cc.dopts.copts.BufferPool)
 	if err != nil {
 		return err
 	}
 
+	defer func() {
+		data.Free()
+		// only free payload if compression was made, and therefore it is a different set
+		// of buffers from data.
+		if pf.isCompressed() {
+			payload.Free()
+		}
+	}()
+
+	dataLen := data.Len()
+	payloadLen := payload.Len()
 	// TODO(dfawley): should we be checking len(data) instead?
-	if len(payload) > *cs.callInfo.maxSendMessageSize {
-		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
+	if payloadLen > *cs.callInfo.maxSendMessageSize {
+		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize)
 	}
-	msgBytes := data // Store the pointer before setting to nil. For binary logging.
+
+	// always take an extra ref in case data == payload (i.e. when the data isn't
+	// compressed). The original ref will always be freed by the deferred free above.
+	payload.Ref()
 	op := func(a *csAttempt) error {
-		err := a.sendMsg(m, hdr, payload, data)
-		// nil out the message and uncomp when replaying; they are only needed for
-		// stats which is disabled for subsequent attempts.
-		m, data = nil, nil
-		return err
+		return a.sendMsg(m, hdr, payload, dataLen, payloadLen)
 	}
-	err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
-	if cs.binlog != nil && err == nil {
-		cs.binlog.Log(&binarylog.ClientMessage{
+
+	// onSuccess is invoked when the op is captured for a subsequent retry. If the
+	// stream was established by a previous message and therefore retries are
+	// disabled, onSuccess will not be invoked, and payloadRef can be freed
+	// immediately.
+	onSuccessCalled := false
+	err = cs.withRetry(op, func() {
+		cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free)
+		onSuccessCalled = true
+	})
+	if !onSuccessCalled {
+		payload.Free()
+	}
+	if len(cs.binlogs) != 0 && err == nil {
+		cm := &binarylog.ClientMessage{
 			OnClientSide: true,
-			Message:      msgBytes,
-		})
+			Message:      data.Materialize(),
+		}
+		for _, binlog := range cs.binlogs {
+			binlog.Log(cs.ctx, cm)
+		}
 	}
-	return
+	return err
 }
 
-func (cs *clientStream) RecvMsg(m interface{}) error {
-	if cs.binlog != nil && !cs.serverHeaderBinlogged {
+func (cs *clientStream) RecvMsg(m any) error {
+	if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged {
 		// Call Header() to binary log header if it's not already logged.
 		cs.Header()
 	}
 	var recvInfo *payloadInfo
-	if cs.binlog != nil {
+	if len(cs.binlogs) != 0 {
 		recvInfo = &payloadInfo{}
+		defer recvInfo.free()
 	}
 	err := cs.withRetry(func(a *csAttempt) error {
 		return a.recvMsg(m, recvInfo)
 	}, cs.commitAttemptLocked)
-	if cs.binlog != nil && err == nil {
-		cs.binlog.Log(&binarylog.ServerMessage{
+	if len(cs.binlogs) != 0 && err == nil {
+		sm := &binarylog.ServerMessage{
 			OnClientSide: true,
-			Message:      recvInfo.uncompressedBytes,
-		})
+			Message:      recvInfo.uncompressedBytes.Materialize(),
+		}
+		for _, binlog := range cs.binlogs {
+			binlog.Log(cs.ctx, sm)
+		}
 	}
 	if err != nil || !cs.desc.ServerStreams {
 		// err != nil or non-server-streaming indicates end of stream.
 		cs.finish(err)
-
-		if cs.binlog != nil {
-			// finish will not log Trailer. Log Trailer here.
-			logEntry := &binarylog.ServerTrailer{
-				OnClientSide: true,
-				Trailer:      cs.Trailer(),
-				Err:          err,
-			}
-			if logEntry.Err == io.EOF {
-				logEntry.Err = nil
-			}
-			if peer, ok := peer.FromContext(cs.Context()); ok {
-				logEntry.PeerAddr = peer.Addr
-			}
-			cs.binlog.Log(logEntry)
-		}
 	}
 	return err
 }
 
 func (cs *clientStream) CloseSend() error {
 	if cs.sentLast {
-		// TODO: return an error and finish the stream instead, due to API misuse?
+		// Return a nil error on repeated calls to this method.
 		return nil
 	}
 	cs.sentLast = true
 	op := func(a *csAttempt) error {
-		a.t.Write(a.s, nil, nil, &transport.Options{Last: true})
+		a.transportStream.Write(nil, nil, &transport.WriteOptions{Last: true})
 		// Always return nil; io.EOF is the only error that might make sense
 		// instead, but there is no need to signal the client to call RecvMsg
 		// as the only use left for the stream after CloseSend is to call
 		// RecvMsg.  This also matches historical behavior.
 		return nil
 	}
-	cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
-	if cs.binlog != nil {
-		cs.binlog.Log(&binarylog.ClientHalfClose{
+	cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) })
+	if len(cs.binlogs) != 0 {
+		chc := &binarylog.ClientHalfClose{
 			OnClientSide: true,
-		})
+		}
+		for _, binlog := range cs.binlogs {
+			binlog.Log(cs.ctx, chc)
+		}
 	}
-	// We never returned an error here for reasons.
+	// We don't return an error here as we expect users to read all messages
+	// from the stream and get the RPC status from RecvMsg().  Note that
+	// SendMsg() must return an error when one occurs so the application
+	// knows to stop sending messages, but that does not apply here.
 	return nil
 }
 
@@ -799,17 +1040,44 @@
 		return
 	}
 	cs.finished = true
+	for _, onFinish := range cs.callInfo.onFinish {
+		onFinish(err)
+	}
 	cs.commitAttemptLocked()
+	if cs.attempt != nil {
+		cs.attempt.finish(err)
+		// after functions all rely upon having a stream.
+		if cs.attempt.transportStream != nil {
+			for _, o := range cs.opts {
+				o.after(cs.callInfo, cs.attempt)
+			}
+		}
+	}
+
 	cs.mu.Unlock()
-	// For binary logging. only log cancel in finish (could be caused by RPC ctx
-	// canceled or ClientConn closed). Trailer will be logged in RecvMsg.
-	//
-	// Only one of cancel or trailer needs to be logged. In the cases where
-	// users don't call RecvMsg, users must have already canceled the RPC.
-	if cs.binlog != nil && status.Code(err) == codes.Canceled {
-		cs.binlog.Log(&binarylog.Cancel{
-			OnClientSide: true,
-		})
+	// Only one of cancel or trailer needs to be logged.
+	if len(cs.binlogs) != 0 {
+		switch err {
+		case errContextCanceled, errContextDeadline, ErrClientConnClosing:
+			c := &binarylog.Cancel{
+				OnClientSide: true,
+			}
+			for _, binlog := range cs.binlogs {
+				binlog.Log(cs.ctx, c)
+			}
+		default:
+			logEntry := &binarylog.ServerTrailer{
+				OnClientSide: true,
+				Trailer:      cs.Trailer(),
+				Err:          err,
+			}
+			if peer, ok := peer.FromContext(cs.Context()); ok {
+				logEntry.PeerAddr = peer.Addr
+			}
+			for _, binlog := range cs.binlogs {
+				binlog.Log(cs.ctx, logEntry)
+			}
+		}
 	}
 	if err == nil {
 		cs.retryThrottler.successfulRPC()
@@ -821,19 +1089,10 @@
 			cs.cc.incrCallsSucceeded()
 		}
 	}
-	if cs.attempt != nil {
-		cs.attempt.finish(err)
-		// after functions all rely upon having a stream.
-		if cs.attempt.s != nil {
-			for _, o := range cs.opts {
-				o.after(cs.callInfo)
-			}
-		}
-	}
 	cs.cancel()
 }
 
-func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
+func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error {
 	cs := a.cs
 	if a.trInfo != nil {
 		a.mu.Lock()
@@ -842,7 +1101,7 @@
 		}
 		a.mu.Unlock()
 	}
-	if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil {
+	if err := a.transportStream.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil {
 		if !cs.desc.ClientStreams {
 			// For non-client-streaming RPCs, we return nil instead of EOF on error
 			// because the generated code requires it.  finish is not called; RecvMsg()
@@ -851,47 +1110,52 @@
 		}
 		return io.EOF
 	}
-	if a.statsHandler != nil {
-		a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now()))
-	}
-	if channelz.IsOn() {
-		a.t.IncrMsgSent()
+	if len(a.statsHandlers) != 0 {
+		for _, sh := range a.statsHandlers {
+			sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
+		}
 	}
 	return nil
 }
 
-func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
+func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
 	cs := a.cs
-	if a.statsHandler != nil && payInfo == nil {
+	if len(a.statsHandlers) != 0 && payInfo == nil {
 		payInfo = &payloadInfo{}
+		defer payInfo.free()
 	}
 
-	if !a.decompSet {
+	if !a.decompressorSet {
 		// Block until we receive headers containing received message encoding.
-		if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
-			if a.dc == nil || a.dc.Type() != ct {
+		if ct := a.transportStream.RecvCompress(); ct != "" && ct != encoding.Identity {
+			if a.decompressorV0 == nil || a.decompressorV0.Type() != ct {
 				// No configured decompressor, or it does not match the incoming
 				// message encoding; attempt to find a registered compressor that does.
-				a.dc = nil
-				a.decomp = encoding.GetCompressor(ct)
+				a.decompressorV0 = nil
+				a.decompressorV1 = encoding.GetCompressor(ct)
 			}
 		} else {
 			// No compression is used; disable our decompressor.
-			a.dc = nil
+			a.decompressorV0 = nil
 		}
 		// Only initialize this state once per stream.
-		a.decompSet = true
+		a.decompressorSet = true
 	}
-	err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp)
-	if err != nil {
+	if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil {
 		if err == io.EOF {
-			if statusErr := a.s.Status().Err(); statusErr != nil {
+			if statusErr := a.transportStream.Status().Err(); statusErr != nil {
 				return statusErr
 			}
+			// Received no msg and status OK for non-server streaming rpcs.
+			if !cs.desc.ServerStreams && !cs.receivedFirstMsg {
+				return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC")
+			}
 			return io.EOF // indicates successful end of stream.
 		}
+
 		return toRPCErr(err)
 	}
+	cs.receivedFirstMsg = true
 	if a.trInfo != nil {
 		a.mu.Lock()
 		if a.trInfo.tr != nil {
@@ -899,34 +1163,28 @@
 		}
 		a.mu.Unlock()
 	}
-	if a.statsHandler != nil {
-		a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{
-			Client:   true,
-			RecvTime: time.Now(),
-			Payload:  m,
-			// TODO truncate large payload.
-			Data:       payInfo.uncompressedBytes,
-			WireLength: payInfo.wireLength,
-			Length:     len(payInfo.uncompressedBytes),
+	for _, sh := range a.statsHandlers {
+		sh.HandleRPC(a.ctx, &stats.InPayload{
+			Client:           true,
+			RecvTime:         time.Now(),
+			Payload:          m,
+			WireLength:       payInfo.compressedLength + headerLen,
+			CompressedLength: payInfo.compressedLength,
+			Length:           payInfo.uncompressedBytes.Len(),
 		})
 	}
-	if channelz.IsOn() {
-		a.t.IncrMsgRecv()
-	}
 	if cs.desc.ServerStreams {
 		// Subsequent messages should be received by subsequent RecvMsg calls.
 		return nil
 	}
 	// Special handling for non-server-stream rpcs.
 	// This recv expects EOF or errors, so we don't collect inPayload.
-	err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp)
-	if err == nil {
-		return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
+	if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF {
+		return a.transportStream.Status().Err() // non-server streaming Recv returns nil on success
+	} else if err != nil {
+		return toRPCErr(err)
 	}
-	if err == io.EOF {
-		return a.s.Status().Err() // non-server streaming Recv returns nil on success
-	}
-	return toRPCErr(err)
+	return status.Error(codes.Internal, "cardinality violation: expected <EOF> for non server-streaming RPCs, but received another message")
 }
 
 func (a *csAttempt) finish(err error) {
@@ -941,33 +1199,33 @@
 		err = nil
 	}
 	var tr metadata.MD
-	if a.s != nil {
-		a.t.CloseStream(a.s, err)
-		tr = a.s.Trailer()
+	if a.transportStream != nil {
+		a.transportStream.Close(err)
+		tr = a.transportStream.Trailer()
 	}
 
-	if a.done != nil {
+	if a.pickResult.Done != nil {
 		br := false
-		if a.s != nil {
-			br = a.s.BytesReceived()
+		if a.transportStream != nil {
+			br = a.transportStream.BytesReceived()
 		}
-		a.done(balancer.DoneInfo{
+		a.pickResult.Done(balancer.DoneInfo{
 			Err:           err,
 			Trailer:       tr,
-			BytesSent:     a.s != nil,
+			BytesSent:     a.transportStream != nil,
 			BytesReceived: br,
 			ServerLoad:    balancerload.Parse(tr),
 		})
 	}
-	if a.statsHandler != nil {
+	for _, sh := range a.statsHandlers {
 		end := &stats.End{
 			Client:    true,
-			BeginTime: a.cs.beginTime,
+			BeginTime: a.beginTime,
 			EndTime:   time.Now(),
 			Trailer:   tr,
 			Error:     err,
 		}
-		a.statsHandler.HandleRPC(a.cs.ctx, end)
+		sh.HandleRPC(a.ctx, end)
 	}
 	if a.trInfo != nil && a.trInfo.tr != nil {
 		if err == nil {
@@ -982,12 +1240,12 @@
 	a.mu.Unlock()
 }
 
-// newClientStream creates a ClientStream with the specified transport, on the
+// newNonRetryClientStream creates a ClientStream with the specified transport, on the
 // given addrConn.
 //
 // It's expected that the given transport is either the same one in addrConn, or
 // is already closed. To avoid race, transport is specified separately, instead
-// of using ac.transpot.
+// of using ac.transport.
 //
 // Main difference between this and ClientConn.NewStream:
 // - no retry
@@ -1036,7 +1294,7 @@
 	// if set.
 	var cp Compressor
 	var comp encoding.Compressor
-	if ct := c.compressorType; ct != "" {
+	if ct := c.compressorName; ct != "" {
 		callHdr.SendCompress = ct
 		if ct != encoding.Identity {
 			comp = encoding.GetCompressor(ct)
@@ -1044,9 +1302,9 @@
 				return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
 			}
 		}
-	} else if ac.cc.dopts.cp != nil {
-		callHdr.SendCompress = ac.cc.dopts.cp.Type()
-		cp = ac.cc.dopts.cp
+	} else if ac.cc.dopts.compressorV0 != nil {
+		callHdr.SendCompress = ac.cc.dopts.compressorV0.Type()
+		cp = ac.cc.dopts.compressorV0
 	}
 	if c.creds != nil {
 		callHdr.Creds = c.creds
@@ -1054,37 +1312,41 @@
 
 	// Use a special addrConnStream to avoid retry.
 	as := &addrConnStream{
-		callHdr:  callHdr,
-		ac:       ac,
-		ctx:      ctx,
-		cancel:   cancel,
-		opts:     opts,
-		callInfo: c,
-		desc:     desc,
-		codec:    c.codec,
-		cp:       cp,
-		comp:     comp,
-		t:        t,
+		callHdr:          callHdr,
+		ac:               ac,
+		ctx:              ctx,
+		cancel:           cancel,
+		opts:             opts,
+		callInfo:         c,
+		desc:             desc,
+		codec:            c.codec,
+		sendCompressorV0: cp,
+		sendCompressorV1: comp,
+		transport:        t,
 	}
 
-	as.callInfo.stream = as
-	s, err := as.t.NewStream(as.ctx, as.callHdr)
+	s, err := as.transport.NewStream(as.ctx, as.callHdr)
 	if err != nil {
 		err = toRPCErr(err)
 		return nil, err
 	}
-	as.s = s
-	as.p = &parser{r: s}
+	as.transportStream = s
+	as.parser = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
 	ac.incrCallsStarted()
 	if desc != unaryStreamDesc {
-		// Listen on cc and stream contexts to cleanup when the user closes the
-		// ClientConn or cancels the stream context.  In all other cases, an error
-		// should already be injected into the recv buffer by the transport, which
-		// the client will eventually receive, and then we will cancel the stream's
-		// context in clientStream.finish.
+		// Listen on stream context to cleanup when the stream context is
+		// canceled.  Also listen for the addrConn's context in case the
+		// addrConn is closed or reconnects to a different address.  In all
+		// other cases, an error should already be injected into the recv
+		// buffer by the transport, which the client will eventually receive,
+		// and then we will cancel the stream's context in
+		// addrConnStream.finish.
 		go func() {
+			ac.mu.Lock()
+			acCtx := ac.ctx
+			ac.mu.Unlock()
 			select {
-			case <-ac.ctx.Done():
+			case <-acCtx.Done():
 				as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing"))
 			case <-ctx.Done():
 				as.finish(toRPCErr(ctx.Err()))
@@ -1095,29 +1357,32 @@
 }
 
 type addrConnStream struct {
-	s         *transport.Stream
-	ac        *addrConn
-	callHdr   *transport.CallHdr
-	cancel    context.CancelFunc
-	opts      []CallOption
-	callInfo  *callInfo
-	t         transport.ClientTransport
-	ctx       context.Context
-	sentLast  bool
-	desc      *StreamDesc
-	codec     baseCodec
-	cp        Compressor
-	comp      encoding.Compressor
-	decompSet bool
-	dc        Decompressor
-	decomp    encoding.Compressor
-	p         *parser
-	mu        sync.Mutex
-	finished  bool
+	transportStream  *transport.ClientStream
+	ac               *addrConn
+	callHdr          *transport.CallHdr
+	cancel           context.CancelFunc
+	opts             []CallOption
+	callInfo         *callInfo
+	transport        transport.ClientTransport
+	ctx              context.Context
+	sentLast         bool
+	receivedFirstMsg bool
+	desc             *StreamDesc
+	codec            baseCodec
+	sendCompressorV0 Compressor
+	sendCompressorV1 encoding.Compressor
+	decompressorSet  bool
+	decompressorV0   Decompressor
+	decompressorV1   encoding.Compressor
+	parser           *parser
+
+	// mu guards finished and is held for the entire finish method.
+	mu       sync.Mutex
+	finished bool
 }
 
 func (as *addrConnStream) Header() (metadata.MD, error) {
-	m, err := as.s.Header()
+	m, err := as.transportStream.Header()
 	if err != nil {
 		as.finish(toRPCErr(err))
 	}
@@ -1125,17 +1390,17 @@
 }
 
 func (as *addrConnStream) Trailer() metadata.MD {
-	return as.s.Trailer()
+	return as.transportStream.Trailer()
 }
 
 func (as *addrConnStream) CloseSend() error {
 	if as.sentLast {
-		// TODO: return an error and finish the stream instead, due to API misuse?
+		// Return a nil error on repeated calls to this method.
 		return nil
 	}
 	as.sentLast = true
 
-	as.t.Write(as.s, nil, nil, &transport.Options{Last: true})
+	as.transportStream.Write(nil, nil, &transport.WriteOptions{Last: true})
 	// Always return nil; io.EOF is the only error that might make sense
 	// instead, but there is no need to signal the client to call RecvMsg
 	// as the only use left for the stream after CloseSend is to call
@@ -1144,10 +1409,10 @@
 }
 
 func (as *addrConnStream) Context() context.Context {
-	return as.s.Context()
+	return as.transportStream.Context()
 }
 
-func (as *addrConnStream) SendMsg(m interface{}) (err error) {
+func (as *addrConnStream) SendMsg(m any) (err error) {
 	defer func() {
 		if err != nil && err != io.EOF {
 			// Call finish on the client stream for errors generated by this SendMsg
@@ -1166,17 +1431,26 @@
 	}
 
 	// load hdr, payload, data
-	hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp)
+	hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.sendCompressorV0, as.sendCompressorV1, as.ac.dopts.copts.BufferPool)
 	if err != nil {
 		return err
 	}
 
+	defer func() {
+		data.Free()
+		// only free payload if compression was made, and therefore it is a different set
+		// of buffers from data.
+		if pf.isCompressed() {
+			payload.Free()
+		}
+	}()
+
 	// TODO(dfawley): should we be checking len(data) instead?
-	if len(payld) > *as.callInfo.maxSendMessageSize {
-		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
+	if payload.Len() > *as.callInfo.maxSendMessageSize {
+		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize)
 	}
 
-	if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
+	if err := as.transportStream.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil {
 		if !as.desc.ClientStreams {
 			// For non-client-streaming RPCs, we return nil instead of EOF on error
 			// because the generated code requires it.  finish is not called; RecvMsg()
@@ -1186,13 +1460,10 @@
 		return io.EOF
 	}
 
-	if channelz.IsOn() {
-		as.t.IncrMsgSent()
-	}
 	return nil
 }
 
-func (as *addrConnStream) RecvMsg(m interface{}) (err error) {
+func (as *addrConnStream) RecvMsg(m any) (err error) {
 	defer func() {
 		if err != nil || !as.desc.ServerStreams {
 			// err != nil or non-server-streaming indicates end of stream.
@@ -1200,36 +1471,37 @@
 		}
 	}()
 
-	if !as.decompSet {
+	if !as.decompressorSet {
 		// Block until we receive headers containing received message encoding.
-		if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity {
-			if as.dc == nil || as.dc.Type() != ct {
+		if ct := as.transportStream.RecvCompress(); ct != "" && ct != encoding.Identity {
+			if as.decompressorV0 == nil || as.decompressorV0.Type() != ct {
 				// No configured decompressor, or it does not match the incoming
 				// message encoding; attempt to find a registered compressor that does.
-				as.dc = nil
-				as.decomp = encoding.GetCompressor(ct)
+				as.decompressorV0 = nil
+				as.decompressorV1 = encoding.GetCompressor(ct)
 			}
 		} else {
 			// No compression is used; disable our decompressor.
-			as.dc = nil
+			as.decompressorV0 = nil
 		}
 		// Only initialize this state once per stream.
-		as.decompSet = true
+		as.decompressorSet = true
 	}
-	err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
-	if err != nil {
+	if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil {
 		if err == io.EOF {
-			if statusErr := as.s.Status().Err(); statusErr != nil {
+			if statusErr := as.transportStream.Status().Err(); statusErr != nil {
 				return statusErr
 			}
+			// Received no msg and status OK for non-server streaming rpcs.
+			if !as.desc.ServerStreams && !as.receivedFirstMsg {
+				return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC")
+			}
 			return io.EOF // indicates successful end of stream.
 		}
 		return toRPCErr(err)
 	}
+	as.receivedFirstMsg = true
 
-	if channelz.IsOn() {
-		as.t.IncrMsgRecv()
-	}
 	if as.desc.ServerStreams {
 		// Subsequent messages should be received by subsequent RecvMsg calls.
 		return nil
@@ -1237,14 +1509,12 @@
 
 	// Special handling for non-server-stream rpcs.
 	// This recv expects EOF or errors, so we don't collect inPayload.
-	err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
-	if err == nil {
-		return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
+	if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF {
+		return as.transportStream.Status().Err() // non-server streaming Recv returns nil on success
+	} else if err != nil {
+		return toRPCErr(err)
 	}
-	if err == io.EOF {
-		return as.s.Status().Err() // non-server streaming Recv returns nil on success
-	}
-	return toRPCErr(err)
+	return status.Error(codes.Internal, "cardinality violation: expected <EOF> for non server-streaming RPCs, but received another message")
 }
 
 func (as *addrConnStream) finish(err error) {
@@ -1258,8 +1528,8 @@
 		// Ending a stream with EOF indicates a success.
 		err = nil
 	}
-	if as.s != nil {
-		as.t.CloseStream(as.s, err)
+	if as.transportStream != nil {
+		as.transportStream.Close(err)
 	}
 
 	if err != nil {
@@ -1273,8 +1543,10 @@
 
 // ServerStream defines the server-side behavior of a streaming RPC.
 //
-// All errors returned from ServerStream methods are compatible with the
-// status package.
+// Errors returned from ServerStream methods are compatible with the status
+// package.  However, the status code will often not match the RPC status as
+// seen by the client application, and therefore, should not be relied upon for
+// this purpose.
 type ServerStream interface {
 	// SetHeader sets the header metadata. It may be called multiple times.
 	// When call multiple times, all the provided metadata will be merged.
@@ -1306,7 +1578,10 @@
 	// It is safe to have a goroutine calling SendMsg and another goroutine
 	// calling RecvMsg on the same stream at the same time, but it is not safe
 	// to call SendMsg on the same stream in different goroutines.
-	SendMsg(m interface{}) error
+	//
+	// It is not safe to modify the message after calling SendMsg. Tracing
+	// libraries and stats handlers may use the message lazily.
+	SendMsg(m any) error
 	// RecvMsg blocks until it receives a message into m or the stream is
 	// done. It returns io.EOF when the client has performed a CloseSend. On
 	// any non-EOF error, the stream is aborted and the error contains the
@@ -1315,29 +1590,33 @@
 	// It is safe to have a goroutine calling SendMsg and another goroutine
 	// calling RecvMsg on the same stream at the same time, but it is not
 	// safe to call RecvMsg on the same stream in different goroutines.
-	RecvMsg(m interface{}) error
+	RecvMsg(m any) error
 }
 
 // serverStream implements a server side Stream.
 type serverStream struct {
 	ctx   context.Context
-	t     transport.ServerTransport
-	s     *transport.Stream
+	s     *transport.ServerStream
 	p     *parser
 	codec baseCodec
+	desc  *StreamDesc
 
-	cp     Compressor
-	dc     Decompressor
-	comp   encoding.Compressor
-	decomp encoding.Compressor
+	compressorV0   Compressor
+	compressorV1   encoding.Compressor
+	decompressorV0 Decompressor
+	decompressorV1 encoding.Compressor
+
+	sendCompressorName string
+
+	recvFirstMsg bool // set after the first message is received
 
 	maxReceiveMessageSize int
 	maxSendMessageSize    int
 	trInfo                *traceInfo
 
-	statsHandler stats.Handler
+	statsHandler []stats.Handler
 
-	binlog *binarylog.MethodLogger
+	binlogs []binarylog.MethodLogger
 	// serverHeaderBinlogged indicates whether server header has been logged. It
 	// will happen when one of the following two happens: stream.SendHeader(),
 	// stream.Send().
@@ -1357,17 +1636,29 @@
 	if md.Len() == 0 {
 		return nil
 	}
+	err := imetadata.Validate(md)
+	if err != nil {
+		return status.Error(codes.Internal, err.Error())
+	}
 	return ss.s.SetHeader(md)
 }
 
 func (ss *serverStream) SendHeader(md metadata.MD) error {
-	err := ss.t.WriteHeader(ss.s, md)
-	if ss.binlog != nil && !ss.serverHeaderBinlogged {
+	err := imetadata.Validate(md)
+	if err != nil {
+		return status.Error(codes.Internal, err.Error())
+	}
+
+	err = ss.s.SendHeader(md)
+	if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged {
 		h, _ := ss.s.Header()
-		ss.binlog.Log(&binarylog.ServerHeader{
+		sh := &binarylog.ServerHeader{
 			Header: h,
-		})
+		}
 		ss.serverHeaderBinlogged = true
+		for _, binlog := range ss.binlogs {
+			binlog.Log(ss.ctx, sh)
+		}
 	}
 	return err
 }
@@ -1376,10 +1667,13 @@
 	if md.Len() == 0 {
 		return
 	}
+	if err := imetadata.Validate(md); err != nil {
+		logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err)
+	}
 	ss.s.SetTrailer(md)
 }
 
-func (ss *serverStream) SendMsg(m interface{}) (err error) {
+func (ss *serverStream) SendMsg(m any) (err error) {
 	defer func() {
 		if ss.trInfo != nil {
 			ss.mu.Lock()
@@ -1387,7 +1681,7 @@
 				if err == nil {
 					ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
 				} else {
-					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
 					ss.trInfo.tr.SetError()
 				}
 			}
@@ -1395,7 +1689,7 @@
 		}
 		if err != nil && err != io.EOF {
 			st, _ := status.FromError(toRPCErr(err))
-			ss.t.WriteStatus(ss.s, st)
+			ss.s.WriteStatus(st)
 			// Non-user specified status was sent out. This should be an error
 			// case (as a server side Cancel maybe).
 			//
@@ -1403,43 +1697,68 @@
 			// status from the service handler, we will log that error instead.
 			// This behavior is similar to an interceptor.
 		}
-		if channelz.IsOn() && err == nil {
-			ss.t.IncrMsgSent()
-		}
 	}()
 
+	// Server handler could have set new compressor by calling SetSendCompressor.
+	// In case it is set, we need to use it for compressing outbound message.
+	if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName {
+		ss.compressorV1 = encoding.GetCompressor(sendCompressorsName)
+		ss.sendCompressorName = sendCompressorsName
+	}
+
 	// load hdr, payload, data
-	hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
+	hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.compressorV0, ss.compressorV1, ss.p.bufferPool)
 	if err != nil {
 		return err
 	}
 
+	defer func() {
+		data.Free()
+		// only free payload if compression was made, and therefore it is a different set
+		// of buffers from data.
+		if pf.isCompressed() {
+			payload.Free()
+		}
+	}()
+
+	dataLen := data.Len()
+	payloadLen := payload.Len()
+
 	// TODO(dfawley): should we be checking len(data) instead?
-	if len(payload) > ss.maxSendMessageSize {
-		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
+	if payloadLen > ss.maxSendMessageSize {
+		return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize)
 	}
-	if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
+	if err := ss.s.Write(hdr, payload, &transport.WriteOptions{Last: false}); err != nil {
 		return toRPCErr(err)
 	}
-	if ss.binlog != nil {
+
+	if len(ss.binlogs) != 0 {
 		if !ss.serverHeaderBinlogged {
 			h, _ := ss.s.Header()
-			ss.binlog.Log(&binarylog.ServerHeader{
+			sh := &binarylog.ServerHeader{
 				Header: h,
-			})
+			}
 			ss.serverHeaderBinlogged = true
+			for _, binlog := range ss.binlogs {
+				binlog.Log(ss.ctx, sh)
+			}
 		}
-		ss.binlog.Log(&binarylog.ServerMessage{
-			Message: data,
-		})
+		sm := &binarylog.ServerMessage{
+			Message: data.Materialize(),
+		}
+		for _, binlog := range ss.binlogs {
+			binlog.Log(ss.ctx, sm)
+		}
 	}
-	if ss.statsHandler != nil {
-		ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
+	if len(ss.statsHandler) != 0 {
+		for _, sh := range ss.statsHandler {
+			sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now()))
+		}
 	}
 	return nil
 }
 
-func (ss *serverStream) RecvMsg(m interface{}) (err error) {
+func (ss *serverStream) RecvMsg(m any) (err error) {
 	defer func() {
 		if ss.trInfo != nil {
 			ss.mu.Lock()
@@ -1447,7 +1766,7 @@
 				if err == nil {
 					ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
 				} else if err != io.EOF {
-					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+					ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
 					ss.trInfo.tr.SetError()
 				}
 			}
@@ -1455,7 +1774,7 @@
 		}
 		if err != nil && err != io.EOF {
 			st, _ := status.FromError(toRPCErr(err))
-			ss.t.WriteStatus(ss.s, st)
+			ss.s.WriteStatus(st)
 			// Non-user specified status was sent out. This should be an error
 			// case (as a server side Cancel maybe).
 			//
@@ -1463,42 +1782,64 @@
 			// status from the service handler, we will log that error instead.
 			// This behavior is similar to an interceptor.
 		}
-		if channelz.IsOn() && err == nil {
-			ss.t.IncrMsgRecv()
-		}
 	}()
 	var payInfo *payloadInfo
-	if ss.statsHandler != nil || ss.binlog != nil {
+	if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
 		payInfo = &payloadInfo{}
+		defer payInfo.free()
 	}
-	if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
+	if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil {
 		if err == io.EOF {
-			if ss.binlog != nil {
-				ss.binlog.Log(&binarylog.ClientHalfClose{})
+			if len(ss.binlogs) != 0 {
+				chc := &binarylog.ClientHalfClose{}
+				for _, binlog := range ss.binlogs {
+					binlog.Log(ss.ctx, chc)
+				}
+			}
+			// Received no request msg for non-client streaming rpcs.
+			if !ss.desc.ClientStreams && !ss.recvFirstMsg {
+				return status.Error(codes.Internal, "cardinality violation: received no request message from non-client-streaming RPC")
 			}
 			return err
 		}
 		if err == io.ErrUnexpectedEOF {
-			err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
+			err = status.Error(codes.Internal, io.ErrUnexpectedEOF.Error())
 		}
 		return toRPCErr(err)
 	}
-	if ss.statsHandler != nil {
-		ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{
-			RecvTime: time.Now(),
-			Payload:  m,
-			// TODO truncate large payload.
-			Data:       payInfo.uncompressedBytes,
-			WireLength: payInfo.wireLength,
-			Length:     len(payInfo.uncompressedBytes),
-		})
+	ss.recvFirstMsg = true
+	if len(ss.statsHandler) != 0 {
+		for _, sh := range ss.statsHandler {
+			sh.HandleRPC(ss.s.Context(), &stats.InPayload{
+				RecvTime:         time.Now(),
+				Payload:          m,
+				Length:           payInfo.uncompressedBytes.Len(),
+				WireLength:       payInfo.compressedLength + headerLen,
+				CompressedLength: payInfo.compressedLength,
+			})
+		}
 	}
-	if ss.binlog != nil {
-		ss.binlog.Log(&binarylog.ClientMessage{
-			Message: payInfo.uncompressedBytes,
-		})
+	if len(ss.binlogs) != 0 {
+		cm := &binarylog.ClientMessage{
+			Message: payInfo.uncompressedBytes.Materialize(),
+		}
+		for _, binlog := range ss.binlogs {
+			binlog.Log(ss.ctx, cm)
+		}
 	}
-	return nil
+
+	if ss.desc.ClientStreams {
+		// Subsequent messages should be received by subsequent RecvMsg calls.
+		return nil
+	}
+	// Special handling for non-client-stream rpcs.
+	// This recv expects EOF or errors, so we don't collect inPayload.
+	if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF {
+		return nil
+	} else if err != nil {
+		return err
+	}
+	return status.Error(codes.Internal, "cardinality violation: received multiple request messages for non-client-streaming RPC")
 }
 
 // MethodFromServerStream returns the method string for the input stream.
@@ -1507,23 +1848,26 @@
 	return Method(stream.Context())
 }
 
-// prepareMsg returns the hdr, payload and data
-// using the compressors passed or using the
-// passed preparedmsg
-func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
+// prepareMsg returns the hdr, payload and data using the compressors passed or
+// using the passed preparedmsg. The returned boolean indicates whether
+// compression was made and therefore whether the payload needs to be freed in
+// addition to the returned data. Freeing the payload if the returned boolean is
+// false can lead to undefined behavior.
+func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) {
 	if preparedMsg, ok := m.(*PreparedMsg); ok {
-		return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
+		return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil
 	}
 	// The input interface is not a prepared msg.
 	// Marshal and Compress the data at this point
 	data, err = encode(codec, m)
 	if err != nil {
-		return nil, nil, nil, err
+		return nil, nil, nil, 0, err
 	}
-	compData, err := compress(data, cp, comp)
+	compData, pf, err := compress(data, cp, comp, pool)
 	if err != nil {
-		return nil, nil, nil, err
+		data.Free()
+		return nil, nil, nil, 0, err
 	}
-	hdr, payload = msgHeader(data, compData)
-	return hdr, payload, data, nil
+	hdr, payload = msgHeader(data, compData, pf)
+	return hdr, data, payload, pf, nil
 }
diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go
new file mode 100644
index 0000000..0037fee
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stream_interfaces.go
@@ -0,0 +1,238 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+// ServerStreamingClient represents the client side of a server-streaming (one
+// request, many responses) RPC. It is generic over the type of the response
+// message. It is used in generated code.
+type ServerStreamingClient[Res any] interface {
+	// Recv receives the next response message from the server. The client may
+	// repeatedly call Recv to read messages from the response stream.  If
+	// io.EOF is returned, the stream has terminated with an OK status.  Any
+	// other error is compatible with the status package and indicates the
+	// RPC's status code and message.
+	Recv() (*Res, error)
+
+	// ClientStream is embedded to provide Context, Header, and Trailer
+	// functionality.  No other methods in the ClientStream should be called
+	// directly.
+	ClientStream
+}
+
+// ServerStreamingServer represents the server side of a server-streaming (one
+// request, many responses) RPC. It is generic over the type of the response
+// message. It is used in generated code.
+//
+// To terminate the response stream, return from the handler method and return
+// an error from the status package, or use nil to indicate an OK status code.
+type ServerStreamingServer[Res any] interface {
+	// Send sends a response message to the client.  The server handler may
+	// call Send multiple times to send multiple messages to the client.  An
+	// error is returned if the stream was terminated unexpectedly, and the
+	// handler method should return, as the stream is no longer usable.
+	Send(*Res) error
+
+	// ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+	// SetTrailer functionality.  No other methods in the ServerStream should
+	// be called directly.
+	ServerStream
+}
+
+// ClientStreamingClient represents the client side of a client-streaming (many
+// requests, one response) RPC. It is generic over both the type of the request
+// message stream and the type of the unary response message. It is used in
+// generated code.
+type ClientStreamingClient[Req any, Res any] interface {
+	// Send sends a request message to the server.  The client may call Send
+	// multiple times to send multiple messages to the server.  On error, Send
+	// aborts the stream.  If the error was generated by the client, the status
+	// is returned directly.  Otherwise, io.EOF is returned, and the status of
+	// the stream may be discovered using CloseAndRecv().
+	Send(*Req) error
+
+	// CloseAndRecv closes the request stream and waits for the server's
+	// response.  This method must be called once and only once after sending
+	// all request messages.  Any error returned is implemented by the status
+	// package.
+	CloseAndRecv() (*Res, error)
+
+	// ClientStream is embedded to provide Context, Header, and Trailer
+	// functionality.  No other methods in the ClientStream should be called
+	// directly.
+	ClientStream
+}
+
+// ClientStreamingServer represents the server side of a client-streaming (many
+// requests, one response) RPC. It is generic over both the type of the request
+// message stream and the type of the unary response message. It is used in
+// generated code.
+//
+// To terminate the RPC, call SendAndClose and return nil from the method
+// handler or do not call SendAndClose and return an error from the status
+// package.
+type ClientStreamingServer[Req any, Res any] interface {
+	// Recv receives the next request message from the client.  The server may
+	// repeatedly call Recv to read messages from the request stream.  If
+	// io.EOF is returned, it indicates the client called CloseAndRecv on its
+	// ClientStreamingClient.  Any other error indicates the stream was
+	// terminated unexpectedly, and the handler method should return, as the
+	// stream is no longer usable.
+	Recv() (*Req, error)
+
+	// SendAndClose sends a single response message to the client and closes
+	// the stream.  This method must be called once and only once after all
+	// request messages have been processed.  Recv should not be called after
+	// calling SendAndClose.
+	SendAndClose(*Res) error
+
+	// ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+	// SetTrailer functionality.  No other methods in the ServerStream should
+	// be called directly.
+	ServerStream
+}
+
+// BidiStreamingClient represents the client side of a bidirectional-streaming
+// (many requests, many responses) RPC. It is generic over both the type of the
+// request message stream and the type of the response message stream. It is
+// used in generated code.
+type BidiStreamingClient[Req any, Res any] interface {
+	// Send sends a request message to the server.  The client may call Send
+	// multiple times to send multiple messages to the server.  On error, Send
+	// aborts the stream.  If the error was generated by the client, the status
+	// is returned directly.  Otherwise, io.EOF is returned, and the status of
+	// the stream may be discovered using Recv().
+	Send(*Req) error
+
+	// Recv receives the next response message from the server. The client may
+	// repeatedly call Recv to read messages from the response stream.  If
+	// io.EOF is returned, the stream has terminated with an OK status.  Any
+	// other error is compatible with the status package and indicates the
+	// RPC's status code and message.
+	Recv() (*Res, error)
+
+	// ClientStream is embedded to provide Context, Header, Trailer, and
+	// CloseSend functionality.  No other methods in the ClientStream should be
+	// called directly.
+	ClientStream
+}
+
+// BidiStreamingServer represents the server side of a bidirectional-streaming
+// (many requests, many responses) RPC. It is generic over both the type of the
+// request message stream and the type of the response message stream. It is
+// used in generated code.
+//
+// To terminate the stream, return from the handler method and return
+// an error from the status package, or use nil to indicate an OK status code.
+type BidiStreamingServer[Req any, Res any] interface {
+	// Recv receives the next request message from the client.  The server may
+	// repeatedly call Recv to read messages from the request stream.  If
+	// io.EOF is returned, it indicates the client called CloseSend on its
+	// BidiStreamingClient.  Any other error indicates the stream was
+	// terminated unexpectedly, and the handler method should return, as the
+	// stream is no longer usable.
+	Recv() (*Req, error)
+
+	// Send sends a response message to the client.  The server handler may
+	// call Send multiple times to send multiple messages to the client.  An
+	// error is returned if the stream was terminated unexpectedly, and the
+	// handler method should return, as the stream is no longer usable.
+	Send(*Res) error
+
+	// ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+	// SetTrailer functionality.  No other methods in the ServerStream should
+	// be called directly.
+	ServerStream
+}
+
+// GenericClientStream implements the ServerStreamingClient, ClientStreamingClient,
+// and BidiStreamingClient interfaces. It is used in generated code.
+type GenericClientStream[Req any, Res any] struct {
+	ClientStream
+}
+
+var _ ServerStreamingClient[string] = (*GenericClientStream[int, string])(nil)
+var _ ClientStreamingClient[int, string] = (*GenericClientStream[int, string])(nil)
+var _ BidiStreamingClient[int, string] = (*GenericClientStream[int, string])(nil)
+
+// Send pushes one message into the stream of requests to be consumed by the
+// server. The type of message which can be sent is determined by the Req type
+// parameter of the GenericClientStream receiver.
+func (x *GenericClientStream[Req, Res]) Send(m *Req) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+// Recv reads one message from the stream of responses generated by the server.
+// The type of the message returned is determined by the Res type parameter
+// of the GenericClientStream receiver.
+func (x *GenericClientStream[Req, Res]) Recv() (*Res, error) {
+	m := new(Res)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+// CloseAndRecv closes the sending side of the stream, then receives the unary
+// response from the server. The type of message which it returns is determined
+// by the Res type parameter of the GenericClientStream receiver.
+func (x *GenericClientStream[Req, Res]) CloseAndRecv() (*Res, error) {
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	m := new(Res)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+// GenericServerStream implements the ServerStreamingServer, ClientStreamingServer,
+// and BidiStreamingServer interfaces. It is used in generated code.
+type GenericServerStream[Req any, Res any] struct {
+	ServerStream
+}
+
+var _ ServerStreamingServer[string] = (*GenericServerStream[int, string])(nil)
+var _ ClientStreamingServer[int, string] = (*GenericServerStream[int, string])(nil)
+var _ BidiStreamingServer[int, string] = (*GenericServerStream[int, string])(nil)
+
+// Send pushes one message into the stream of responses to be consumed by the
+// client. The type of message which can be sent is determined by the Res
+// type parameter of the serverStreamServer receiver.
+func (x *GenericServerStream[Req, Res]) Send(m *Res) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+// SendAndClose pushes the unary response to the client. The type of message
+// which can be sent is determined by the Res type parameter of the
+// clientStreamServer receiver.
+func (x *GenericServerStream[Req, Res]) SendAndClose(m *Res) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+// Recv reads one message from the stream of requests generated by the client.
+// The type of the message returned is determined by the Req type parameter
+// of the clientStreamServer receiver.
+func (x *GenericServerStream[Req, Res]) Recv() (*Req, error) {
+	m := new(Req)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go
index 584360f..07f0125 100644
--- a/vendor/google.golang.org/grpc/tap/tap.go
+++ b/vendor/google.golang.org/grpc/tap/tap.go
@@ -17,11 +17,18 @@
  */
 
 // Package tap defines the function handles which are executed on the transport
-// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL.
+// layer of gRPC-Go and related information.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
 package tap
 
 import (
 	"context"
+
+	"google.golang.org/grpc/metadata"
 )
 
 // Info defines the relevant information needed by the handles.
@@ -29,19 +36,23 @@
 	// FullMethodName is the string of grpc method (in the format of
 	// /package.service/method).
 	FullMethodName string
+
+	// Header contains the header metadata received.
+	Header metadata.MD
+
 	// TODO: More to be added.
 }
 
-// ServerInHandle defines the function which runs before a new stream is created
-// on the server side. If it returns a non-nil error, the stream will not be
-// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM.
-// The client will receive an RPC error "code = Unavailable, desc = stream
-// terminated by RST_STREAM with error code: REFUSED_STREAM".
+// ServerInHandle defines the function which runs before a new stream is
+// created on the server side. If it returns a non-nil error, the stream will
+// not be created and an error will be returned to the client.  If the error
+// returned is a status error, that status code and message will be used,
+// otherwise PermissionDenied will be the code and err.Error() will be the
+// message.
 //
 // It's intended to be used in situations where you don't want to waste the
-// resources to accept the new stream (e.g. rate-limiting). And the content of
-// the error will be ignored and won't be sent back to the client. For other
-// general usages, please use interceptors.
+// resources to accept the new stream (e.g. rate-limiting). For other general
+// usages, please use interceptors.
 //
 // Note that it is executed in the per-connection I/O goroutine(s) instead of
 // per-RPC goroutine. Therefore, users should NOT have any
diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go
index 0a57b99..10f4f79 100644
--- a/vendor/google.golang.org/grpc/trace.go
+++ b/vendor/google.golang.org/grpc/trace.go
@@ -26,8 +26,6 @@
 	"strings"
 	"sync"
 	"time"
-
-	"golang.org/x/net/trace"
 )
 
 // EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package.
@@ -41,15 +39,34 @@
 	if i := strings.Index(m, "/"); i >= 0 {
 		m = m[:i] // remove everything from second slash
 	}
-	if i := strings.LastIndex(m, "."); i >= 0 {
-		m = m[i+1:] // cut down to last dotted component
-	}
 	return m
 }
 
+// traceEventLog mirrors golang.org/x/net/trace.EventLog.
+//
+// It exists in order to avoid importing x/net/trace on grpcnotrace builds.
+type traceEventLog interface {
+	Printf(format string, a ...any)
+	Errorf(format string, a ...any)
+	Finish()
+}
+
+// traceLog mirrors golang.org/x/net/trace.Trace.
+//
+// It exists in order to avoid importing x/net/trace on grpcnotrace builds.
+type traceLog interface {
+	LazyLog(x fmt.Stringer, sensitive bool)
+	LazyPrintf(format string, a ...any)
+	SetError()
+	SetRecycler(f func(any))
+	SetTraceInfo(traceID, spanID uint64)
+	SetMaxEvents(m int)
+	Finish()
+}
+
 // traceInfo contains tracing information for an RPC.
 type traceInfo struct {
-	tr        trace.Trace
+	tr        traceLog
 	firstLine firstLine
 }
 
@@ -100,8 +117,8 @@
 
 // payload represents an RPC request or response payload.
 type payload struct {
-	sent bool        // whether this is an outgoing payload
-	msg  interface{} // e.g. a proto.Message
+	sent bool // whether this is an outgoing payload
+	msg  any  // e.g. a proto.Message
 	// TODO(dsymonds): add stringifying info to codec, and limit how much we hold here?
 }
 
@@ -114,7 +131,7 @@
 
 type fmtStringer struct {
 	format string
-	a      []interface{}
+	a      []any
 }
 
 func (f *fmtStringer) String() string {
diff --git a/vendor/google.golang.org/grpc/trace_notrace.go b/vendor/google.golang.org/grpc/trace_notrace.go
new file mode 100644
index 0000000..1da3a23
--- /dev/null
+++ b/vendor/google.golang.org/grpc/trace_notrace.go
@@ -0,0 +1,52 @@
+//go:build grpcnotrace
+
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+// grpcnotrace can be used to avoid importing golang.org/x/net/trace, which in
+// turn enables binaries using gRPC-Go for dead code elimination, which can
+// yield 10-15% improvements in binary size when tracing is not needed.
+
+import (
+	"context"
+	"fmt"
+)
+
+type notrace struct{}
+
+func (notrace) LazyLog(x fmt.Stringer, sensitive bool) {}
+func (notrace) LazyPrintf(format string, a ...any)     {}
+func (notrace) SetError()                              {}
+func (notrace) SetRecycler(f func(any))                {}
+func (notrace) SetTraceInfo(traceID, spanID uint64)    {}
+func (notrace) SetMaxEvents(m int)                     {}
+func (notrace) Finish()                                {}
+
+func newTrace(family, title string) traceLog {
+	return notrace{}
+}
+
+func newTraceContext(ctx context.Context, tr traceLog) context.Context {
+	return ctx
+}
+
+func newTraceEventLog(family, title string) traceEventLog {
+	return nil
+}
diff --git a/vendor/google.golang.org/grpc/trace_withtrace.go b/vendor/google.golang.org/grpc/trace_withtrace.go
new file mode 100644
index 0000000..88d6e85
--- /dev/null
+++ b/vendor/google.golang.org/grpc/trace_withtrace.go
@@ -0,0 +1,39 @@
+//go:build !grpcnotrace
+
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+	"context"
+
+	t "golang.org/x/net/trace"
+)
+
+func newTrace(family, title string) traceLog {
+	return t.New(family, title)
+}
+
+func newTraceContext(ctx context.Context, tr traceLog) context.Context {
+	return t.NewContext(ctx, tr)
+}
+
+func newTraceEventLog(family, title string) traceEventLog {
+	return t.NewEventLog(family, title)
+}
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index 40af096..76f2e0d 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
 package grpc
 
 // Version is the current grpc version.
-const Version = "1.25.1"
+const Version = "1.76.0"
diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh
deleted file mode 100644
index f324be5..0000000
--- a/vendor/google.golang.org/grpc/vet.sh
+++ /dev/null
@@ -1,142 +0,0 @@
-#!/bin/bash
-
-if [[ `uname -a` = *"Darwin"* ]]; then
-  echo "It seems you are running on Mac. This script does not work on Mac. See https://github.com/grpc/grpc-go/issues/2047"
-  exit 1
-fi
-
-set -ex  # Exit on error; debugging enabled.
-set -o pipefail  # Fail a pipe if any sub-command fails.
-
-die() {
-  echo "$@" >&2
-  exit 1
-}
-
-fail_on_output() {
-  tee /dev/stderr | (! read)
-}
-
-# Check to make sure it's safe to modify the user's git repo.
-git status --porcelain | fail_on_output
-
-# Undo any edits made by this script.
-cleanup() {
-  git reset --hard HEAD
-}
-trap cleanup EXIT
-
-PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}"
-
-if [[ "$1" = "-install" ]]; then
-  # Check for module support
-  if go help mod >& /dev/null; then
-    # Install the pinned versions as defined in module tools.
-    pushd ./test/tools
-    go install \
-      golang.org/x/lint/golint \
-      golang.org/x/tools/cmd/goimports \
-      honnef.co/go/tools/cmd/staticcheck \
-      github.com/client9/misspell/cmd/misspell \
-      github.com/golang/protobuf/protoc-gen-go
-    popd
-  else
-    # Ye olde `go get` incantation.
-    # Note: this gets the latest version of all tools (vs. the pinned versions
-    # with Go modules).
-    go get -u \
-      golang.org/x/lint/golint \
-      golang.org/x/tools/cmd/goimports \
-      honnef.co/go/tools/cmd/staticcheck \
-      github.com/client9/misspell/cmd/misspell \
-      github.com/golang/protobuf/protoc-gen-go
-  fi
-  if [[ -z "${VET_SKIP_PROTO}" ]]; then
-    if [[ "${TRAVIS}" = "true" ]]; then
-      PROTOBUF_VERSION=3.3.0
-      PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
-      pushd /home/travis
-      wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
-      unzip ${PROTOC_FILENAME}
-      bin/protoc --version
-      popd
-    elif ! which protoc > /dev/null; then
-      die "Please install protoc into your path"
-    fi
-  fi
-  exit 0
-elif [[ "$#" -ne 0 ]]; then
-  die "Unknown argument(s): $*"
-fi
-
-# - Ensure all source files contain a copyright message.
-(! git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go')
-
-# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown.
-(! grep 'func Test[^(]' *_test.go)
-(! grep 'func Test[^(]' test/*.go)
-
-# - Do not import x/net/context.
-(! git grep -l 'x/net/context' -- "*.go")
-
-# - Do not import math/rand for real library code.  Use internal/grpcrand for
-#   thread safety.
-git grep -l '"math/rand"' -- "*.go" 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|wrr_test')
-
-# - Ensure all ptypes proto packages are renamed when importing.
-(! git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go")
-
-# - Check imports that are illegal in appengine (until Go 1.11).
-# TODO: Remove when we drop Go 1.10 support
-go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go
-
-# - gofmt, goimports, golint (with exceptions for generated code), go vet.
-gofmt -s -d -l . 2>&1 | fail_on_output
-goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go") | fail_on_output
-golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:")
-go vet -all .
-
-# - Check that generated proto files are up to date.
-if [[ -z "${VET_SKIP_PROTO}" ]]; then
-  PATH="/home/travis/bin:${PATH}" make proto && \
-    git status --porcelain 2>&1 | fail_on_output || \
-    (git status; git --no-pager diff; exit 1)
-fi
-
-# - Check that our module is tidy.
-if go help mod >& /dev/null; then
-  go mod tidy && \
-    git status --porcelain 2>&1 | fail_on_output || \
-    (git status; git --no-pager diff; exit 1)
-fi
-
-# - Collection of static analysis checks
-# TODO(dfawley): don't use deprecated functions in examples.
-staticcheck -go 1.9 -checks 'inherit,-ST1015' -ignore '
-google.golang.org/grpc/balancer.go:SA1019
-google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go:SA1019
-google.golang.org/grpc/balancer/grpclb/grpclb_test.go:SA1019
-google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go:SA1019
-google.golang.org/grpc/xds/internal/balancer/edsbalancer/balancergroup.go:SA1019
-google.golang.org/grpc/xds/internal/resolver/xds_resolver.go:SA1019
-google.golang.org/grpc/xds/internal/balancer/xds.go:SA1019
-google.golang.org/grpc/xds/internal/balancer/xds_client.go:SA1019
-google.golang.org/grpc/balancer_conn_wrappers.go:SA1019
-google.golang.org/grpc/balancer_test.go:SA1019
-google.golang.org/grpc/benchmark/benchmain/main.go:SA1019
-google.golang.org/grpc/benchmark/worker/benchmark_client.go:SA1019
-google.golang.org/grpc/clientconn.go:S1024
-google.golang.org/grpc/clientconn_state_transition_test.go:SA1019
-google.golang.org/grpc/clientconn_test.go:SA1019
-google.golang.org/grpc/examples/features/debugging/client/main.go:SA1019
-google.golang.org/grpc/examples/features/load_balancing/client/main.go:SA1019
-google.golang.org/grpc/internal/transport/handler_server.go:SA1019
-google.golang.org/grpc/internal/transport/handler_server_test.go:SA1019
-google.golang.org/grpc/internal/resolver/dns/dns_resolver.go:SA1019
-google.golang.org/grpc/stats/stats_test.go:SA1019
-google.golang.org/grpc/test/balancer_test.go:SA1019
-google.golang.org/grpc/test/channelz_test.go:SA1019
-google.golang.org/grpc/test/end2end_test.go:SA1019
-google.golang.org/grpc/test/healthcheck_test.go:SA1019
-' ./...
-misspell -error .
diff --git a/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go
new file mode 100644
index 0000000..2ef36bb
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go
@@ -0,0 +1,160 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protodelim marshals and unmarshals varint size-delimited messages.
+package protodelim
+
+import (
+	"bufio"
+	"encoding/binary"
+	"fmt"
+	"io"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/internal/errors"
+	"google.golang.org/protobuf/proto"
+)
+
+// MarshalOptions is a configurable varint size-delimited marshaler.
+type MarshalOptions struct{ proto.MarshalOptions }
+
+// MarshalTo writes a varint size-delimited wire-format message to w.
+// If w returns an error, MarshalTo returns it unchanged.
+func (o MarshalOptions) MarshalTo(w io.Writer, m proto.Message) (int, error) {
+	msgBytes, err := o.MarshalOptions.Marshal(m)
+	if err != nil {
+		return 0, err
+	}
+
+	sizeBytes := protowire.AppendVarint(nil, uint64(len(msgBytes)))
+	sizeWritten, err := w.Write(sizeBytes)
+	if err != nil {
+		return sizeWritten, err
+	}
+	msgWritten, err := w.Write(msgBytes)
+	if err != nil {
+		return sizeWritten + msgWritten, err
+	}
+	return sizeWritten + msgWritten, nil
+}
+
+// MarshalTo writes a varint size-delimited wire-format message to w
+// with the default options.
+//
+// See the documentation for [MarshalOptions.MarshalTo].
+func MarshalTo(w io.Writer, m proto.Message) (int, error) {
+	return MarshalOptions{}.MarshalTo(w, m)
+}
+
+// UnmarshalOptions is a configurable varint size-delimited unmarshaler.
+type UnmarshalOptions struct {
+	proto.UnmarshalOptions
+
+	// MaxSize is the maximum size in wire-format bytes of a single message.
+	// Unmarshaling a message larger than MaxSize will return an error.
+	// A zero MaxSize will default to 4 MiB.
+	// Setting MaxSize to -1 disables the limit.
+	MaxSize int64
+}
+
+const defaultMaxSize = 4 << 20 // 4 MiB, corresponds to the default gRPC max request/response size
+
+// SizeTooLargeError is an error that is returned when the unmarshaler encounters a message size
+// that is larger than its configured [UnmarshalOptions.MaxSize].
+type SizeTooLargeError struct {
+	// Size is the varint size of the message encountered
+	// that was larger than the provided MaxSize.
+	Size uint64
+
+	// MaxSize is the MaxSize limit configured in UnmarshalOptions, which Size exceeded.
+	MaxSize uint64
+}
+
+func (e *SizeTooLargeError) Error() string {
+	return fmt.Sprintf("message size %d exceeded unmarshaler's maximum configured size %d", e.Size, e.MaxSize)
+}
+
+// Reader is the interface expected by [UnmarshalFrom].
+// It is implemented by *[bufio.Reader].
+type Reader interface {
+	io.Reader
+	io.ByteReader
+}
+
+// UnmarshalFrom parses and consumes a varint size-delimited wire-format message
+// from r.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+//
+// The error is [io.EOF] error only if no bytes are read.
+// If an EOF happens after reading some but not all the bytes,
+// UnmarshalFrom returns a non-io.EOF error.
+// In particular if r returns a non-io.EOF error, UnmarshalFrom returns it unchanged,
+// and if only a size is read with no subsequent message, [io.ErrUnexpectedEOF] is returned.
+func (o UnmarshalOptions) UnmarshalFrom(r Reader, m proto.Message) error {
+	var sizeArr [binary.MaxVarintLen64]byte
+	sizeBuf := sizeArr[:0]
+	for i := range sizeArr {
+		b, err := r.ReadByte()
+		if err != nil {
+			// Immediate EOF is unexpected.
+			if err == io.EOF && i != 0 {
+				break
+			}
+			return err
+		}
+		sizeBuf = append(sizeBuf, b)
+		if b < 0x80 {
+			break
+		}
+	}
+	size, n := protowire.ConsumeVarint(sizeBuf)
+	if n < 0 {
+		return protowire.ParseError(n)
+	}
+
+	maxSize := o.MaxSize
+	if maxSize == 0 {
+		maxSize = defaultMaxSize
+	}
+	if maxSize != -1 && size > uint64(maxSize) {
+		return errors.Wrap(&SizeTooLargeError{Size: size, MaxSize: uint64(maxSize)}, "")
+	}
+
+	var b []byte
+	var err error
+	if br, ok := r.(*bufio.Reader); ok {
+		// Use the []byte from the bufio.Reader instead of having to allocate one.
+		// This reduces CPU usage and allocated bytes.
+		b, err = br.Peek(int(size))
+		if err == nil {
+			defer br.Discard(int(size))
+		} else {
+			b = nil
+		}
+	}
+	if b == nil {
+		b = make([]byte, size)
+		_, err = io.ReadFull(r, b)
+	}
+
+	if err == io.EOF {
+		return io.ErrUnexpectedEOF
+	}
+	if err != nil {
+		return err
+	}
+	if err := o.Unmarshal(b, m); err != nil {
+		return err
+	}
+	return nil
+}
+
+// UnmarshalFrom parses and consumes a varint size-delimited wire-format message
+// from r with the default options.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+//
+// See the documentation for [UnmarshalOptions.UnmarshalFrom].
+func UnmarshalFrom(r Reader, m proto.Message) error {
+	return UnmarshalOptions{}.UnmarshalFrom(r, m)
+}
diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
index e942bc9..743bfb8 100644
--- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
+++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
@@ -371,7 +371,31 @@
 func SizeVarint(v uint64) int {
 	// This computes 1 + (bits.Len64(v)-1)/7.
 	// 9/64 is a good enough approximation of 1/7
-	return int(9*uint32(bits.Len64(v))+64) / 64
+	//
+	// The Go compiler can translate the bits.LeadingZeros64 call into the LZCNT
+	// instruction, which is very fast on CPUs from the last few years. The
+	// specific way of expressing the calculation matches C++ Protobuf, see
+	// https://godbolt.org/z/4P3h53oM4 for the C++ code and how gcc/clang
+	// optimize that function for GOAMD64=v1 and GOAMD64=v3 (-march=haswell).
+
+	// By OR'ing v with 1, we guarantee that v is never 0, without changing the
+	// result of SizeVarint. LZCNT is not defined for 0, meaning the compiler
+	// needs to add extra instructions to handle that case.
+	//
+	// The Go compiler currently (go1.24.4) does not make use of this knowledge.
+	// This opportunity (removing the XOR instruction, which handles the 0 case)
+	// results in a small (1%) performance win across CPU architectures.
+	//
+	// Independently of avoiding the 0 case, we need the v |= 1 line because
+	// it allows the Go compiler to eliminate an extra XCHGL barrier.
+	v |= 1
+
+	// It would be clearer to write log2value := 63 - uint32(...), but
+	// writing uint32(...) ^ 63 is much more efficient (-14% ARM, -20% Intel).
+	// Proof of identity for our value range [0..63]:
+	// https://go.dev/play/p/Pdn9hEWYakX
+	log2value := uint32(bits.LeadingZeros64(v)) ^ 63
+	return int((log2value*9 + (64 + 9)) / 64)
 }
 
 // AppendFixed32 appends v to b as a little-endian uint32.
diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
index 5a57ef6..0469635 100644
--- a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
+++ b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
Binary files differ
diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
index bf1aba0..7b9f01a 100644
--- a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
@@ -9,7 +9,7 @@
 
 const (
 	Minimum = descriptorpb.Edition_EDITION_PROTO2
-	Maximum = descriptorpb.Edition_EDITION_2023
+	Maximum = descriptorpb.Edition_EDITION_2024
 
 	// MaximumKnown is the maximum edition that is known to Go Protobuf, but not
 	// declared as supported. In other words: end users cannot use it, but
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index 688aabe..dbcf90b 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -72,9 +72,10 @@
 		EditionFeatures EditionFeatures
 	}
 	FileL2 struct {
-		Options   func() protoreflect.ProtoMessage
-		Imports   FileImports
-		Locations SourceLocations
+		Options       func() protoreflect.ProtoMessage
+		Imports       FileImports
+		OptionImports func() protoreflect.FileImports
+		Locations     SourceLocations
 	}
 
 	// EditionFeatures is a frequently-instantiated struct, so please take care
@@ -126,12 +127,9 @@
 func (fd *File) Parent() protoreflect.Descriptor         { return nil }
 func (fd *File) Index() int                              { return 0 }
 func (fd *File) Syntax() protoreflect.Syntax             { return fd.L1.Syntax }
-
-// Not exported and just used to reconstruct the original FileDescriptor proto
-func (fd *File) Edition() int32                  { return int32(fd.L1.Edition) }
-func (fd *File) Name() protoreflect.Name         { return fd.L1.Package.Name() }
-func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package }
-func (fd *File) IsPlaceholder() bool             { return false }
+func (fd *File) Name() protoreflect.Name                 { return fd.L1.Package.Name() }
+func (fd *File) FullName() protoreflect.FullName         { return fd.L1.Package }
+func (fd *File) IsPlaceholder() bool                     { return false }
 func (fd *File) Options() protoreflect.ProtoMessage {
 	if f := fd.lazyInit().Options; f != nil {
 		return f()
@@ -150,6 +148,16 @@
 func (fd *File) ProtoType(protoreflect.FileDescriptor)         {}
 func (fd *File) ProtoInternal(pragma.DoNotImplement)           {}
 
+// The next two are not part of the FileDescriptor interface. They are just used to reconstruct
+// the original FileDescriptor proto.
+func (fd *File) Edition() int32 { return int32(fd.L1.Edition) }
+func (fd *File) OptionImports() protoreflect.FileImports {
+	if f := fd.lazyInit().OptionImports; f != nil {
+		return f()
+	}
+	return emptyFiles
+}
+
 func (fd *File) lazyInit() *FileL2 {
 	if atomic.LoadUint32(&fd.once) == 0 {
 		fd.lazyInitOnce()
@@ -182,9 +190,9 @@
 		L2 *EnumL2 // protected by fileDesc.once
 	}
 	EnumL1 struct {
-		eagerValues bool // controls whether EnumL2.Values is already populated
-
 		EditionFeatures EditionFeatures
+		Visibility      int32
+		eagerValues     bool // controls whether EnumL2.Values is already populated
 	}
 	EnumL2 struct {
 		Options        func() protoreflect.ProtoMessage
@@ -219,6 +227,11 @@
 func (ed *Enum) ReservedRanges() protoreflect.EnumRanges { return &ed.lazyInit().ReservedRanges }
 func (ed *Enum) Format(s fmt.State, r rune)              { descfmt.FormatDesc(s, r, ed) }
 func (ed *Enum) ProtoType(protoreflect.EnumDescriptor)   {}
+
+// This is not part of the EnumDescriptor interface. It is just used to reconstruct
+// the original FileDescriptor proto.
+func (ed *Enum) Visibility() int32 { return ed.L1.Visibility }
+
 func (ed *Enum) lazyInit() *EnumL2 {
 	ed.L0.ParentFile.lazyInit() // implicitly initializes L2
 	return ed.L2
@@ -244,13 +257,13 @@
 		L2 *MessageL2 // protected by fileDesc.once
 	}
 	MessageL1 struct {
-		Enums        Enums
-		Messages     Messages
-		Extensions   Extensions
-		IsMapEntry   bool // promoted from google.protobuf.MessageOptions
-		IsMessageSet bool // promoted from google.protobuf.MessageOptions
-
+		Enums           Enums
+		Messages        Messages
+		Extensions      Extensions
 		EditionFeatures EditionFeatures
+		Visibility      int32
+		IsMapEntry      bool // promoted from google.protobuf.MessageOptions
+		IsMessageSet    bool // promoted from google.protobuf.MessageOptions
 	}
 	MessageL2 struct {
 		Options               func() protoreflect.ProtoMessage
@@ -319,6 +332,11 @@
 func (md *Message) Extensions() protoreflect.ExtensionDescriptors { return &md.L1.Extensions }
 func (md *Message) ProtoType(protoreflect.MessageDescriptor)      {}
 func (md *Message) Format(s fmt.State, r rune)                    { descfmt.FormatDesc(s, r, md) }
+
+// This is not part of the MessageDescriptor interface. It is just used to reconstruct
+// the original FileDescriptor proto.
+func (md *Message) Visibility() int32 { return md.L1.Visibility }
+
 func (md *Message) lazyInit() *MessageL2 {
 	md.L0.ParentFile.lazyInit() // implicitly initializes L2
 	return md.L2
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
index d2f5494..e91860f 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
@@ -284,6 +284,13 @@
 			case genid.EnumDescriptorProto_Value_field_number:
 				numValues++
 			}
+		case protowire.VarintType:
+			v, m := protowire.ConsumeVarint(b)
+			b = b[m:]
+			switch num {
+			case genid.EnumDescriptorProto_Visibility_field_number:
+				ed.L1.Visibility = int32(v)
+			}
 		default:
 			m := protowire.ConsumeFieldValue(num, typ, b)
 			b = b[m:]
@@ -365,6 +372,13 @@
 				md.unmarshalSeedOptions(v)
 			}
 			prevField = num
+		case protowire.VarintType:
+			v, m := protowire.ConsumeVarint(b)
+			b = b[m:]
+			switch num {
+			case genid.DescriptorProto_Visibility_field_number:
+				md.L1.Visibility = int32(v)
+			}
 		default:
 			m := protowire.ConsumeFieldValue(num, typ, b)
 			b = b[m:]
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
index d4c9445..dd31faa 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -134,6 +134,7 @@
 
 	var enumIdx, messageIdx, extensionIdx, serviceIdx int
 	var rawOptions []byte
+	var optionImports []string
 	fd.L2 = new(FileL2)
 	for len(b) > 0 {
 		num, typ, n := protowire.ConsumeTag(b)
@@ -157,6 +158,8 @@
 					imp = PlaceholderFile(path)
 				}
 				fd.L2.Imports = append(fd.L2.Imports, protoreflect.FileImport{FileDescriptor: imp})
+			case genid.FileDescriptorProto_OptionDependency_field_number:
+				optionImports = append(optionImports, sb.MakeString(v))
 			case genid.FileDescriptorProto_EnumType_field_number:
 				fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb)
 				enumIdx++
@@ -178,6 +181,23 @@
 		}
 	}
 	fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions)
+	if len(optionImports) > 0 {
+		var imps FileImports
+		var once sync.Once
+		fd.L2.OptionImports = func() protoreflect.FileImports {
+			once.Do(func() {
+				imps = make(FileImports, len(optionImports))
+				for i, path := range optionImports {
+					imp, _ := fd.builder.FileRegistry.FindFileByPath(path)
+					if imp == nil {
+						imp = PlaceholderFile(path)
+					}
+					imps[i] = protoreflect.FileImport{FileDescriptor: imp}
+				}
+			})
+			return &imps
+		}
+	}
 }
 
 func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) {
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
index 10132c9..66ba906 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
@@ -13,8 +13,10 @@
 	"google.golang.org/protobuf/reflect/protoreflect"
 )
 
-var defaultsCache = make(map[Edition]EditionFeatures)
-var defaultsKeys = []Edition{}
+var (
+	defaultsCache = make(map[Edition]EditionFeatures)
+	defaultsKeys  = []Edition{}
+)
 
 func init() {
 	unmarshalEditionDefaults(editiondefaults.Defaults)
@@ -41,7 +43,7 @@
 			b = b[m:]
 			parent.StripEnumPrefix = int(v)
 		default:
-			panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num))
+			panic(fmt.Sprintf("unknown field number %d while unmarshalling GoFeatures", num))
 		}
 	}
 	return parent
@@ -69,8 +71,14 @@
 				parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value
 			case genid.FeatureSet_JsonFormat_field_number:
 				parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value
+			case genid.FeatureSet_EnforceNamingStyle_field_number:
+				// EnforceNamingStyle is enforced in protoc, languages other than C++
+				// are not supposed to do anything with this feature.
+			case genid.FeatureSet_DefaultSymbolVisibility_field_number:
+				// DefaultSymbolVisibility is enforced in protoc, runtimes should not
+				// inspect this value.
 			default:
-				panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num))
+				panic(fmt.Sprintf("unknown field number %d while unmarshalling FeatureSet", num))
 			}
 		case protowire.BytesType:
 			v, m := protowire.ConsumeBytes(b)
@@ -144,7 +152,7 @@
 			_, m := protowire.ConsumeVarint(b)
 			b = b[m:]
 		default:
-			panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num))
+			panic(fmt.Sprintf("unknown field number %d while unmarshalling EditionDefault", num))
 		}
 	}
 }
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/presence.go b/vendor/google.golang.org/protobuf/internal/filedesc/presence.go
new file mode 100644
index 0000000..a12ec97
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/presence.go
@@ -0,0 +1,33 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import "google.golang.org/protobuf/reflect/protoreflect"
+
+// UsePresenceForField reports whether the presence bitmap should be used for
+// the specified field.
+func UsePresenceForField(fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
+	switch {
+	case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+		// Oneof fields never use the presence bitmap.
+		//
+		// Synthetic oneofs are an exception: Those are used to implement proto3
+		// optional fields and hence should follow non-oneof field semantics.
+		return false, false
+
+	case fd.IsMap():
+		// Map-typed fields never use the presence bitmap.
+		return false, false
+
+	case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
+		// Lazy fields always use the presence bitmap (only messages can be lazy).
+		isLazy := fd.(interface{ IsLazy() bool }).IsLazy()
+		return isLazy, isLazy
+
+	default:
+		// If the field has presence, use the presence bitmap.
+		return fd.HasPresence(), false
+	}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
index df8f918..3ceb6fa 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
@@ -27,6 +27,7 @@
 	Api_SourceContext_field_name protoreflect.Name = "source_context"
 	Api_Mixins_field_name        protoreflect.Name = "mixins"
 	Api_Syntax_field_name        protoreflect.Name = "syntax"
+	Api_Edition_field_name       protoreflect.Name = "edition"
 
 	Api_Name_field_fullname          protoreflect.FullName = "google.protobuf.Api.name"
 	Api_Methods_field_fullname       protoreflect.FullName = "google.protobuf.Api.methods"
@@ -35,6 +36,7 @@
 	Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context"
 	Api_Mixins_field_fullname        protoreflect.FullName = "google.protobuf.Api.mixins"
 	Api_Syntax_field_fullname        protoreflect.FullName = "google.protobuf.Api.syntax"
+	Api_Edition_field_fullname       protoreflect.FullName = "google.protobuf.Api.edition"
 )
 
 // Field numbers for google.protobuf.Api.
@@ -46,6 +48,7 @@
 	Api_SourceContext_field_number protoreflect.FieldNumber = 5
 	Api_Mixins_field_number        protoreflect.FieldNumber = 6
 	Api_Syntax_field_number        protoreflect.FieldNumber = 7
+	Api_Edition_field_number       protoreflect.FieldNumber = 8
 )
 
 // Names for google.protobuf.Method.
@@ -63,6 +66,7 @@
 	Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming"
 	Method_Options_field_name           protoreflect.Name = "options"
 	Method_Syntax_field_name            protoreflect.Name = "syntax"
+	Method_Edition_field_name           protoreflect.Name = "edition"
 
 	Method_Name_field_fullname              protoreflect.FullName = "google.protobuf.Method.name"
 	Method_RequestTypeUrl_field_fullname    protoreflect.FullName = "google.protobuf.Method.request_type_url"
@@ -71,6 +75,7 @@
 	Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming"
 	Method_Options_field_fullname           protoreflect.FullName = "google.protobuf.Method.options"
 	Method_Syntax_field_fullname            protoreflect.FullName = "google.protobuf.Method.syntax"
+	Method_Edition_field_fullname           protoreflect.FullName = "google.protobuf.Method.edition"
 )
 
 // Field numbers for google.protobuf.Method.
@@ -82,6 +87,7 @@
 	Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5
 	Method_Options_field_number           protoreflect.FieldNumber = 6
 	Method_Syntax_field_number            protoreflect.FieldNumber = 7
+	Method_Edition_field_number           protoreflect.FieldNumber = 8
 )
 
 // Names for google.protobuf.Mixin.
diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
index f30ab6b..950a6a3 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
@@ -34,6 +34,19 @@
 	Edition_EDITION_MAX_enum_value             = 2147483647
 )
 
+// Full and short names for google.protobuf.SymbolVisibility.
+const (
+	SymbolVisibility_enum_fullname = "google.protobuf.SymbolVisibility"
+	SymbolVisibility_enum_name     = "SymbolVisibility"
+)
+
+// Enum values for google.protobuf.SymbolVisibility.
+const (
+	SymbolVisibility_VISIBILITY_UNSET_enum_value  = 0
+	SymbolVisibility_VISIBILITY_LOCAL_enum_value  = 1
+	SymbolVisibility_VISIBILITY_EXPORT_enum_value = 2
+)
+
 // Names for google.protobuf.FileDescriptorSet.
 const (
 	FileDescriptorSet_message_name     protoreflect.Name     = "FileDescriptorSet"
@@ -65,6 +78,7 @@
 	FileDescriptorProto_Dependency_field_name       protoreflect.Name = "dependency"
 	FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency"
 	FileDescriptorProto_WeakDependency_field_name   protoreflect.Name = "weak_dependency"
+	FileDescriptorProto_OptionDependency_field_name protoreflect.Name = "option_dependency"
 	FileDescriptorProto_MessageType_field_name      protoreflect.Name = "message_type"
 	FileDescriptorProto_EnumType_field_name         protoreflect.Name = "enum_type"
 	FileDescriptorProto_Service_field_name          protoreflect.Name = "service"
@@ -79,6 +93,7 @@
 	FileDescriptorProto_Dependency_field_fullname       protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency"
 	FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency"
 	FileDescriptorProto_WeakDependency_field_fullname   protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency"
+	FileDescriptorProto_OptionDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.option_dependency"
 	FileDescriptorProto_MessageType_field_fullname      protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type"
 	FileDescriptorProto_EnumType_field_fullname         protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type"
 	FileDescriptorProto_Service_field_fullname          protoreflect.FullName = "google.protobuf.FileDescriptorProto.service"
@@ -96,6 +111,7 @@
 	FileDescriptorProto_Dependency_field_number       protoreflect.FieldNumber = 3
 	FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10
 	FileDescriptorProto_WeakDependency_field_number   protoreflect.FieldNumber = 11
+	FileDescriptorProto_OptionDependency_field_number protoreflect.FieldNumber = 15
 	FileDescriptorProto_MessageType_field_number      protoreflect.FieldNumber = 4
 	FileDescriptorProto_EnumType_field_number         protoreflect.FieldNumber = 5
 	FileDescriptorProto_Service_field_number          protoreflect.FieldNumber = 6
@@ -124,6 +140,7 @@
 	DescriptorProto_Options_field_name        protoreflect.Name = "options"
 	DescriptorProto_ReservedRange_field_name  protoreflect.Name = "reserved_range"
 	DescriptorProto_ReservedName_field_name   protoreflect.Name = "reserved_name"
+	DescriptorProto_Visibility_field_name     protoreflect.Name = "visibility"
 
 	DescriptorProto_Name_field_fullname           protoreflect.FullName = "google.protobuf.DescriptorProto.name"
 	DescriptorProto_Field_field_fullname          protoreflect.FullName = "google.protobuf.DescriptorProto.field"
@@ -135,6 +152,7 @@
 	DescriptorProto_Options_field_fullname        protoreflect.FullName = "google.protobuf.DescriptorProto.options"
 	DescriptorProto_ReservedRange_field_fullname  protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range"
 	DescriptorProto_ReservedName_field_fullname   protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name"
+	DescriptorProto_Visibility_field_fullname     protoreflect.FullName = "google.protobuf.DescriptorProto.visibility"
 )
 
 // Field numbers for google.protobuf.DescriptorProto.
@@ -149,6 +167,7 @@
 	DescriptorProto_Options_field_number        protoreflect.FieldNumber = 7
 	DescriptorProto_ReservedRange_field_number  protoreflect.FieldNumber = 9
 	DescriptorProto_ReservedName_field_number   protoreflect.FieldNumber = 10
+	DescriptorProto_Visibility_field_number     protoreflect.FieldNumber = 11
 )
 
 // Names for google.protobuf.DescriptorProto.ExtensionRange.
@@ -388,12 +407,14 @@
 	EnumDescriptorProto_Options_field_name       protoreflect.Name = "options"
 	EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range"
 	EnumDescriptorProto_ReservedName_field_name  protoreflect.Name = "reserved_name"
+	EnumDescriptorProto_Visibility_field_name    protoreflect.Name = "visibility"
 
 	EnumDescriptorProto_Name_field_fullname          protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name"
 	EnumDescriptorProto_Value_field_fullname         protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value"
 	EnumDescriptorProto_Options_field_fullname       protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options"
 	EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range"
 	EnumDescriptorProto_ReservedName_field_fullname  protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name"
+	EnumDescriptorProto_Visibility_field_fullname    protoreflect.FullName = "google.protobuf.EnumDescriptorProto.visibility"
 )
 
 // Field numbers for google.protobuf.EnumDescriptorProto.
@@ -403,6 +424,7 @@
 	EnumDescriptorProto_Options_field_number       protoreflect.FieldNumber = 3
 	EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4
 	EnumDescriptorProto_ReservedName_field_number  protoreflect.FieldNumber = 5
+	EnumDescriptorProto_Visibility_field_number    protoreflect.FieldNumber = 6
 )
 
 // Names for google.protobuf.EnumDescriptorProto.EnumReservedRange.
@@ -1008,29 +1030,35 @@
 
 // Field names for google.protobuf.FeatureSet.
 const (
-	FeatureSet_FieldPresence_field_name         protoreflect.Name = "field_presence"
-	FeatureSet_EnumType_field_name              protoreflect.Name = "enum_type"
-	FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding"
-	FeatureSet_Utf8Validation_field_name        protoreflect.Name = "utf8_validation"
-	FeatureSet_MessageEncoding_field_name       protoreflect.Name = "message_encoding"
-	FeatureSet_JsonFormat_field_name            protoreflect.Name = "json_format"
+	FeatureSet_FieldPresence_field_name           protoreflect.Name = "field_presence"
+	FeatureSet_EnumType_field_name                protoreflect.Name = "enum_type"
+	FeatureSet_RepeatedFieldEncoding_field_name   protoreflect.Name = "repeated_field_encoding"
+	FeatureSet_Utf8Validation_field_name          protoreflect.Name = "utf8_validation"
+	FeatureSet_MessageEncoding_field_name         protoreflect.Name = "message_encoding"
+	FeatureSet_JsonFormat_field_name              protoreflect.Name = "json_format"
+	FeatureSet_EnforceNamingStyle_field_name      protoreflect.Name = "enforce_naming_style"
+	FeatureSet_DefaultSymbolVisibility_field_name protoreflect.Name = "default_symbol_visibility"
 
-	FeatureSet_FieldPresence_field_fullname         protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
-	FeatureSet_EnumType_field_fullname              protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
-	FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding"
-	FeatureSet_Utf8Validation_field_fullname        protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
-	FeatureSet_MessageEncoding_field_fullname       protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
-	FeatureSet_JsonFormat_field_fullname            protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
+	FeatureSet_FieldPresence_field_fullname           protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
+	FeatureSet_EnumType_field_fullname                protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
+	FeatureSet_RepeatedFieldEncoding_field_fullname   protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding"
+	FeatureSet_Utf8Validation_field_fullname          protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
+	FeatureSet_MessageEncoding_field_fullname         protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
+	FeatureSet_JsonFormat_field_fullname              protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
+	FeatureSet_EnforceNamingStyle_field_fullname      protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style"
+	FeatureSet_DefaultSymbolVisibility_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.default_symbol_visibility"
 )
 
 // Field numbers for google.protobuf.FeatureSet.
 const (
-	FeatureSet_FieldPresence_field_number         protoreflect.FieldNumber = 1
-	FeatureSet_EnumType_field_number              protoreflect.FieldNumber = 2
-	FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3
-	FeatureSet_Utf8Validation_field_number        protoreflect.FieldNumber = 4
-	FeatureSet_MessageEncoding_field_number       protoreflect.FieldNumber = 5
-	FeatureSet_JsonFormat_field_number            protoreflect.FieldNumber = 6
+	FeatureSet_FieldPresence_field_number           protoreflect.FieldNumber = 1
+	FeatureSet_EnumType_field_number                protoreflect.FieldNumber = 2
+	FeatureSet_RepeatedFieldEncoding_field_number   protoreflect.FieldNumber = 3
+	FeatureSet_Utf8Validation_field_number          protoreflect.FieldNumber = 4
+	FeatureSet_MessageEncoding_field_number         protoreflect.FieldNumber = 5
+	FeatureSet_JsonFormat_field_number              protoreflect.FieldNumber = 6
+	FeatureSet_EnforceNamingStyle_field_number      protoreflect.FieldNumber = 7
+	FeatureSet_DefaultSymbolVisibility_field_number protoreflect.FieldNumber = 8
 )
 
 // Full and short names for google.protobuf.FeatureSet.FieldPresence.
@@ -1112,6 +1140,40 @@
 	FeatureSet_LEGACY_BEST_EFFORT_enum_value  = 2
 )
 
+// Full and short names for google.protobuf.FeatureSet.EnforceNamingStyle.
+const (
+	FeatureSet_EnforceNamingStyle_enum_fullname = "google.protobuf.FeatureSet.EnforceNamingStyle"
+	FeatureSet_EnforceNamingStyle_enum_name     = "EnforceNamingStyle"
+)
+
+// Enum values for google.protobuf.FeatureSet.EnforceNamingStyle.
+const (
+	FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN_enum_value = 0
+	FeatureSet_STYLE2024_enum_value                    = 1
+	FeatureSet_STYLE_LEGACY_enum_value                 = 2
+)
+
+// Names for google.protobuf.FeatureSet.VisibilityFeature.
+const (
+	FeatureSet_VisibilityFeature_message_name     protoreflect.Name     = "VisibilityFeature"
+	FeatureSet_VisibilityFeature_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet.VisibilityFeature"
+)
+
+// Full and short names for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility.
+const (
+	FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_fullname = "google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility"
+	FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_name     = "DefaultSymbolVisibility"
+)
+
+// Enum values for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility.
+const (
+	FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN_enum_value = 0
+	FeatureSet_VisibilityFeature_EXPORT_ALL_enum_value                        = 1
+	FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL_enum_value                  = 2
+	FeatureSet_VisibilityFeature_LOCAL_ALL_enum_value                         = 3
+	FeatureSet_VisibilityFeature_STRICT_enum_value                            = 4
+)
+
 // Names for google.protobuf.FeatureSetDefaults.
 const (
 	FeatureSetDefaults_message_name     protoreflect.Name     = "FeatureSetDefaults"
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
index 41c1f74..bdad12a 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
@@ -11,6 +11,7 @@
 
 	"google.golang.org/protobuf/encoding/protowire"
 	"google.golang.org/protobuf/internal/encoding/messageset"
+	"google.golang.org/protobuf/internal/filedesc"
 	"google.golang.org/protobuf/internal/order"
 	"google.golang.org/protobuf/reflect/protoreflect"
 	piface "google.golang.org/protobuf/runtime/protoiface"
@@ -80,7 +81,7 @@
 		// permit us to skip over definitely-unset fields at marshal time.
 
 		var hasPresence bool
-		hasPresence, cf.isLazy = usePresenceForField(si, fd)
+		hasPresence, cf.isLazy = filedesc.UsePresenceForField(fd)
 
 		if hasPresence {
 			cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
index dd55e8e..5a439da 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
@@ -11,6 +11,7 @@
 	"strings"
 	"sync/atomic"
 
+	"google.golang.org/protobuf/internal/filedesc"
 	"google.golang.org/protobuf/reflect/protoreflect"
 )
 
@@ -53,7 +54,7 @@
 		fd := fds.Get(i)
 		fs := si.fieldsByNumber[fd.Number()]
 		var fi fieldInfo
-		usePresence, _ := usePresenceForField(si, fd)
+		usePresence, _ := filedesc.UsePresenceForField(fd)
 
 		switch {
 		case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
@@ -343,17 +344,15 @@
 			if p.IsNil() {
 				return false
 			}
-			sp := p.Apply(fieldOffset).AtomicGetPointer()
-			if sp.IsNil() {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if rv.IsNil() {
 				return false
 			}
-			rv := sp.AsValueOf(fs.Type.Elem())
 			return rv.Elem().Len() > 0
 		},
 		clear: func(p pointer) {
-			sp := p.Apply(fieldOffset).AtomicGetPointer()
-			if !sp.IsNil() {
-				rv := sp.AsValueOf(fs.Type.Elem())
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if !rv.IsNil() {
 				rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
 			}
 		},
@@ -361,11 +360,10 @@
 			if p.IsNil() {
 				return conv.Zero()
 			}
-			sp := p.Apply(fieldOffset).AtomicGetPointer()
-			if sp.IsNil() {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if rv.IsNil() {
 				return conv.Zero()
 			}
-			rv := sp.AsValueOf(fs.Type.Elem())
 			if rv.Elem().Len() == 0 {
 				return conv.Zero()
 			}
@@ -598,30 +596,3 @@
 func (mi *MessageInfo) present(p pointer, index uint32) bool {
 	return p.Apply(mi.presenceOffset).PresenceInfo().Present(index)
 }
-
-// usePresenceForField implements the somewhat intricate logic of when
-// the presence bitmap is used for a field.  The main logic is that a
-// field that is optional or that can be lazy will use the presence
-// bit, but for proto2, also maps have a presence bit. It also records
-// if the field can ever be lazy, which is true if we have a
-// lazyOffset and the field is a message or a slice of messages. A
-// field that is lazy will always need a presence bit.  Oneofs are not
-// lazy and do not use presence, unless they are a synthetic oneof,
-// which is a proto3 optional field. For proto3 optionals, we use the
-// presence and they can also be lazy when applicable (a message).
-func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
-	hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy()
-
-	// Non-oneof scalar fields with explicit field presence use the presence array.
-	usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic())
-	switch {
-	case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
-		return false, false
-	case fd.IsMap():
-		return false, false
-	case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
-		return hasLazyField, hasLazyField
-	default:
-		return usesPresenceArray || (hasLazyField && fd.HasPresence()), false
-	}
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/presence.go b/vendor/google.golang.org/protobuf/internal/impl/presence.go
index 914cb1d..443afe8 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/presence.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/presence.go
@@ -32,9 +32,6 @@
 
 // Present checks for the presence of a specific field number in a presence set.
 func (p presence) Present(num uint32) bool {
-	if p.P == nil {
-		return false
-	}
 	return Export{}.Present(p.toElem(num), num)
 }
 
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
new file mode 100644
index 0000000..42dd6f7
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
@@ -0,0 +1,71 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package strs
+
+import (
+	"unsafe"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// UnsafeString returns an unsafe string reference of b.
+// The caller must treat the input slice as immutable.
+//
+// WARNING: Use carefully. The returned result must not leak to the end user
+// unless the input slice is provably immutable.
+func UnsafeString(b []byte) string {
+	return unsafe.String(unsafe.SliceData(b), len(b))
+}
+
+// UnsafeBytes returns an unsafe bytes slice reference of s.
+// The caller must treat returned slice as immutable.
+//
+// WARNING: Use carefully. The returned result must not leak to the end user.
+func UnsafeBytes(s string) []byte {
+	return unsafe.Slice(unsafe.StringData(s), len(s))
+}
+
+// Builder builds a set of strings with shared lifetime.
+// This differs from strings.Builder, which is for building a single string.
+type Builder struct {
+	buf []byte
+}
+
+// AppendFullName is equivalent to protoreflect.FullName.Append,
+// but optimized for large batches where each name has a shared lifetime.
+func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName {
+	n := len(prefix) + len(".") + len(name)
+	if len(prefix) == 0 {
+		n -= len(".")
+	}
+	sb.grow(n)
+	sb.buf = append(sb.buf, prefix...)
+	sb.buf = append(sb.buf, '.')
+	sb.buf = append(sb.buf, name...)
+	return protoreflect.FullName(sb.last(n))
+}
+
+// MakeString is equivalent to string(b), but optimized for large batches
+// with a shared lifetime.
+func (sb *Builder) MakeString(b []byte) string {
+	sb.grow(len(b))
+	sb.buf = append(sb.buf, b...)
+	return sb.last(len(b))
+}
+
+func (sb *Builder) grow(n int) {
+	if cap(sb.buf)-len(sb.buf) >= n {
+		return
+	}
+
+	// Unlike strings.Builder, we do not need to copy over the contents
+	// of the old buffer since our builder provides no API for
+	// retrieving previously created strings.
+	sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n))
+}
+
+func (sb *Builder) last(n int) string {
+	return UnsafeString(sb.buf[len(sb.buf)-n:])
+}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
deleted file mode 100644
index 832a798..0000000
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.21
-
-package strs
-
-import (
-	"unsafe"
-
-	"google.golang.org/protobuf/reflect/protoreflect"
-)
-
-type (
-	stringHeader struct {
-		Data unsafe.Pointer
-		Len  int
-	}
-	sliceHeader struct {
-		Data unsafe.Pointer
-		Len  int
-		Cap  int
-	}
-)
-
-// UnsafeString returns an unsafe string reference of b.
-// The caller must treat the input slice as immutable.
-//
-// WARNING: Use carefully. The returned result must not leak to the end user
-// unless the input slice is provably immutable.
-func UnsafeString(b []byte) (s string) {
-	src := (*sliceHeader)(unsafe.Pointer(&b))
-	dst := (*stringHeader)(unsafe.Pointer(&s))
-	dst.Data = src.Data
-	dst.Len = src.Len
-	return s
-}
-
-// UnsafeBytes returns an unsafe bytes slice reference of s.
-// The caller must treat returned slice as immutable.
-//
-// WARNING: Use carefully. The returned result must not leak to the end user.
-func UnsafeBytes(s string) (b []byte) {
-	src := (*stringHeader)(unsafe.Pointer(&s))
-	dst := (*sliceHeader)(unsafe.Pointer(&b))
-	dst.Data = src.Data
-	dst.Len = src.Len
-	dst.Cap = src.Len
-	return b
-}
-
-// Builder builds a set of strings with shared lifetime.
-// This differs from strings.Builder, which is for building a single string.
-type Builder struct {
-	buf []byte
-}
-
-// AppendFullName is equivalent to protoreflect.FullName.Append,
-// but optimized for large batches where each name has a shared lifetime.
-func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName {
-	n := len(prefix) + len(".") + len(name)
-	if len(prefix) == 0 {
-		n -= len(".")
-	}
-	sb.grow(n)
-	sb.buf = append(sb.buf, prefix...)
-	sb.buf = append(sb.buf, '.')
-	sb.buf = append(sb.buf, name...)
-	return protoreflect.FullName(sb.last(n))
-}
-
-// MakeString is equivalent to string(b), but optimized for large batches
-// with a shared lifetime.
-func (sb *Builder) MakeString(b []byte) string {
-	sb.grow(len(b))
-	sb.buf = append(sb.buf, b...)
-	return sb.last(len(b))
-}
-
-func (sb *Builder) grow(n int) {
-	if cap(sb.buf)-len(sb.buf) >= n {
-		return
-	}
-
-	// Unlike strings.Builder, we do not need to copy over the contents
-	// of the old buffer since our builder provides no API for
-	// retrieving previously created strings.
-	sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n))
-}
-
-func (sb *Builder) last(n int) string {
-	return UnsafeString(sb.buf[len(sb.buf)-n:])
-}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
deleted file mode 100644
index 1ffddf6..0000000
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.21
-
-package strs
-
-import (
-	"unsafe"
-
-	"google.golang.org/protobuf/reflect/protoreflect"
-)
-
-// UnsafeString returns an unsafe string reference of b.
-// The caller must treat the input slice as immutable.
-//
-// WARNING: Use carefully. The returned result must not leak to the end user
-// unless the input slice is provably immutable.
-func UnsafeString(b []byte) string {
-	return unsafe.String(unsafe.SliceData(b), len(b))
-}
-
-// UnsafeBytes returns an unsafe bytes slice reference of s.
-// The caller must treat returned slice as immutable.
-//
-// WARNING: Use carefully. The returned result must not leak to the end user.
-func UnsafeBytes(s string) []byte {
-	return unsafe.Slice(unsafe.StringData(s), len(s))
-}
-
-// Builder builds a set of strings with shared lifetime.
-// This differs from strings.Builder, which is for building a single string.
-type Builder struct {
-	buf []byte
-}
-
-// AppendFullName is equivalent to protoreflect.FullName.Append,
-// but optimized for large batches where each name has a shared lifetime.
-func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName {
-	n := len(prefix) + len(".") + len(name)
-	if len(prefix) == 0 {
-		n -= len(".")
-	}
-	sb.grow(n)
-	sb.buf = append(sb.buf, prefix...)
-	sb.buf = append(sb.buf, '.')
-	sb.buf = append(sb.buf, name...)
-	return protoreflect.FullName(sb.last(n))
-}
-
-// MakeString is equivalent to string(b), but optimized for large batches
-// with a shared lifetime.
-func (sb *Builder) MakeString(b []byte) string {
-	sb.grow(len(b))
-	sb.buf = append(sb.buf, b...)
-	return sb.last(len(b))
-}
-
-func (sb *Builder) grow(n int) {
-	if cap(sb.buf)-len(sb.buf) >= n {
-		return
-	}
-
-	// Unlike strings.Builder, we do not need to copy over the contents
-	// of the old buffer since our builder provides no API for
-	// retrieving previously created strings.
-	sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n))
-}
-
-func (sb *Builder) last(n int) string {
-	return UnsafeString(sb.buf[len(sb.buf)-n:])
-}
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index 01efc33..77de0f2 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -52,7 +52,7 @@
 const (
 	Major      = 1
 	Minor      = 36
-	Patch      = 5
+	Patch      = 10
 	PreRelease = ""
 )
 
diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go
index 3c6fe57..ef55b97 100644
--- a/vendor/google.golang.org/protobuf/proto/merge.go
+++ b/vendor/google.golang.org/protobuf/proto/merge.go
@@ -59,6 +59,12 @@
 	return dst.Interface()
 }
 
+// CloneOf returns a deep copy of m. If the top-level message is invalid,
+// it returns an invalid message as well.
+func CloneOf[M Message](m M) M {
+	return Clone(m).(M)
+}
+
 // mergeOptions provides a namespace for merge functions, and can be
 // exported in the future if we add user-visible merge options.
 type mergeOptions struct{}
diff --git a/vendor/google.golang.org/protobuf/protoadapt/convert.go b/vendor/google.golang.org/protobuf/protoadapt/convert.go
new file mode 100644
index 0000000..ea276d1
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/protoadapt/convert.go
@@ -0,0 +1,31 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protoadapt bridges the original and new proto APIs.
+package protoadapt
+
+import (
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/runtime/protoiface"
+	"google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// MessageV1 is the original [github.com/golang/protobuf/proto.Message] type.
+type MessageV1 = protoiface.MessageV1
+
+// MessageV2 is the [google.golang.org/protobuf/proto.Message] type used by the
+// current [google.golang.org/protobuf] module, adding support for reflection.
+type MessageV2 = proto.Message
+
+// MessageV1Of converts a v2 message to a v1 message.
+// It returns nil if m is nil.
+func MessageV1Of(m MessageV2) MessageV1 {
+	return protoimpl.X.ProtoMessageV1Of(m)
+}
+
+// MessageV2Of converts a v1 message to a v2 message.
+// It returns nil if m is nil.
+func MessageV2Of(m MessageV1) MessageV2 {
+	return protoimpl.X.ProtoMessageV2Of(m)
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
index 823dbf3..9196288 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
@@ -152,6 +152,28 @@
 		imp := &f.L2.Imports[i]
 		imps.importPublic(imp.Imports())
 	}
+	if len(fd.GetOptionDependency()) > 0 {
+		optionImports := make(filedesc.FileImports, len(fd.GetOptionDependency()))
+		for i, path := range fd.GetOptionDependency() {
+			imp := &optionImports[i]
+			f, err := r.FindFileByPath(path)
+			if err == protoregistry.NotFound {
+				// We always allow option imports to be unresolvable.
+				f = filedesc.PlaceholderFile(path)
+			} else if err != nil {
+				return nil, errors.New("could not resolve import %q: %v", path, err)
+			}
+			imp.FileDescriptor = f
+
+			if imps[imp.Path()] {
+				return nil, errors.New("already imported %q", path)
+			}
+			imps[imp.Path()] = true
+		}
+		f.L2.OptionImports = func() protoreflect.FileImports {
+			return &optionImports
+		}
+	}
 
 	// Handle source locations.
 	f.L2.Locations.File = f
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
index 9da3499..c826ad0 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
@@ -29,6 +29,7 @@
 			e.L2.Options = func() protoreflect.ProtoMessage { return opts }
 		}
 		e.L1.EditionFeatures = mergeEditionFeatures(parent, ed.GetOptions().GetFeatures())
+		e.L1.Visibility = int32(ed.GetVisibility())
 		for _, s := range ed.GetReservedName() {
 			e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s))
 		}
@@ -70,6 +71,7 @@
 			return nil, err
 		}
 		m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures())
+		m.L1.Visibility = int32(md.GetVisibility())
 		if opts := md.GetOptions(); opts != nil {
 			opts = proto.Clone(opts).(*descriptorpb.MessageOptions)
 			m.L2.Options = func() protoreflect.ProtoMessage { return opts }
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
index 9b880aa..6f91074 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
@@ -70,16 +70,27 @@
 	if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() {
 		p.Syntax = proto.String(file.Syntax().String())
 	}
+	desc := file
+	if fileImportDesc, ok := file.(protoreflect.FileImport); ok {
+		desc = fileImportDesc.FileDescriptor
+	}
 	if file.Syntax() == protoreflect.Editions {
-		desc := file
-		if fileImportDesc, ok := file.(protoreflect.FileImport); ok {
-			desc = fileImportDesc.FileDescriptor
-		}
-
 		if editionsInterface, ok := desc.(interface{ Edition() int32 }); ok {
 			p.Edition = descriptorpb.Edition(editionsInterface.Edition()).Enum()
 		}
 	}
+	type hasOptionImports interface {
+		OptionImports() protoreflect.FileImports
+	}
+	if opts, ok := desc.(hasOptionImports); ok {
+		if optionImports := opts.OptionImports(); optionImports.Len() > 0 {
+			optionDeps := make([]string, optionImports.Len())
+			for i := range optionImports.Len() {
+				optionDeps[i] = optionImports.Get(i).Path()
+			}
+			p.OptionDependency = optionDeps
+		}
+	}
 	return p
 }
 
@@ -123,6 +134,14 @@
 	for i, names := 0, message.ReservedNames(); i < names.Len(); i++ {
 		p.ReservedName = append(p.ReservedName, string(names.Get(i)))
 	}
+	type hasVisibility interface {
+		Visibility() int32
+	}
+	if vis, ok := message.(hasVisibility); ok {
+		if visibility := vis.Visibility(); visibility > 0 {
+			p.Visibility = descriptorpb.SymbolVisibility(visibility).Enum()
+		}
+	}
 	return p
 }
 
@@ -216,6 +235,14 @@
 	for i, names := 0, enum.ReservedNames(); i < names.Len(); i++ {
 		p.ReservedName = append(p.ReservedName, string(names.Get(i)))
 	}
+	type hasVisibility interface {
+		Visibility() int32
+	}
+	if vis, ok := enum.(hasVisibility); ok {
+		if visibility := vis.Visibility(); visibility > 0 {
+			p.Visibility = descriptorpb.SymbolVisibility(visibility).Enum()
+		}
+	}
 	return p
 }
 
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
index ea154ee..730331e 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
@@ -21,6 +21,8 @@
 		b = p.appendRepeatedField(b, "public_dependency", nil)
 	case 11:
 		b = p.appendRepeatedField(b, "weak_dependency", nil)
+	case 15:
+		b = p.appendRepeatedField(b, "option_dependency", nil)
 	case 4:
 		b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto)
 	case 5:
@@ -66,6 +68,8 @@
 		b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange)
 	case 10:
 		b = p.appendRepeatedField(b, "reserved_name", nil)
+	case 11:
+		b = p.appendSingularField(b, "visibility", nil)
 	}
 	return b
 }
@@ -85,6 +89,8 @@
 		b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange)
 	case 5:
 		b = p.appendRepeatedField(b, "reserved_name", nil)
+	case 6:
+		b = p.appendSingularField(b, "visibility", nil)
 	}
 	return b
 }
@@ -398,6 +404,10 @@
 		b = p.appendSingularField(b, "message_encoding", nil)
 	case 6:
 		b = p.appendSingularField(b, "json_format", nil)
+	case 7:
+		b = p.appendSingularField(b, "enforce_naming_style", nil)
+	case 8:
+		b = p.appendSingularField(b, "default_symbol_visibility", nil)
 	}
 	return b
 }
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
new file mode 100644
index 0000000..fe17f37
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
@@ -0,0 +1,84 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoreflect
+
+import (
+	"unsafe"
+
+	"google.golang.org/protobuf/internal/pragma"
+)
+
+type (
+	ifaceHeader struct {
+		_    [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it.
+		Type unsafe.Pointer
+		Data unsafe.Pointer
+	}
+)
+
+var (
+	nilType     = typeOf(nil)
+	boolType    = typeOf(*new(bool))
+	int32Type   = typeOf(*new(int32))
+	int64Type   = typeOf(*new(int64))
+	uint32Type  = typeOf(*new(uint32))
+	uint64Type  = typeOf(*new(uint64))
+	float32Type = typeOf(*new(float32))
+	float64Type = typeOf(*new(float64))
+	stringType  = typeOf(*new(string))
+	bytesType   = typeOf(*new([]byte))
+	enumType    = typeOf(*new(EnumNumber))
+)
+
+// typeOf returns a pointer to the Go type information.
+// The pointer is comparable and equal if and only if the types are identical.
+func typeOf(t any) unsafe.Pointer {
+	return (*ifaceHeader)(unsafe.Pointer(&t)).Type
+}
+
+// value is a union where only one type can be represented at a time.
+// The struct is 24B large on 64-bit systems and requires the minimum storage
+// necessary to represent each possible type.
+//
+// The Go GC needs to be able to scan variables containing pointers.
+// As such, pointers and non-pointers cannot be intermixed.
+type value struct {
+	pragma.DoNotCompare // 0B
+
+	// typ stores the type of the value as a pointer to the Go type.
+	typ unsafe.Pointer // 8B
+
+	// ptr stores the data pointer for a String, Bytes, or interface value.
+	ptr unsafe.Pointer // 8B
+
+	// num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or
+	// Enum value as a raw uint64.
+	//
+	// It is also used to store the length of a String or Bytes value;
+	// the capacity is ignored.
+	num uint64 // 8B
+}
+
+func valueOfString(v string) Value {
+	return Value{typ: stringType, ptr: unsafe.Pointer(unsafe.StringData(v)), num: uint64(len(v))}
+}
+func valueOfBytes(v []byte) Value {
+	return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))}
+}
+func valueOfIface(v any) Value {
+	p := (*ifaceHeader)(unsafe.Pointer(&v))
+	return Value{typ: p.Type, ptr: p.Data}
+}
+
+func (v Value) getString() string {
+	return unsafe.String((*byte)(v.ptr), v.num)
+}
+func (v Value) getBytes() []byte {
+	return unsafe.Slice((*byte)(v.ptr), v.num)
+}
+func (v Value) getIface() (x any) {
+	*(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
+	return x
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
deleted file mode 100644
index 0015fcb..0000000
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.21
-
-package protoreflect
-
-import (
-	"unsafe"
-
-	"google.golang.org/protobuf/internal/pragma"
-)
-
-type (
-	stringHeader struct {
-		Data unsafe.Pointer
-		Len  int
-	}
-	sliceHeader struct {
-		Data unsafe.Pointer
-		Len  int
-		Cap  int
-	}
-	ifaceHeader struct {
-		Type unsafe.Pointer
-		Data unsafe.Pointer
-	}
-)
-
-var (
-	nilType     = typeOf(nil)
-	boolType    = typeOf(*new(bool))
-	int32Type   = typeOf(*new(int32))
-	int64Type   = typeOf(*new(int64))
-	uint32Type  = typeOf(*new(uint32))
-	uint64Type  = typeOf(*new(uint64))
-	float32Type = typeOf(*new(float32))
-	float64Type = typeOf(*new(float64))
-	stringType  = typeOf(*new(string))
-	bytesType   = typeOf(*new([]byte))
-	enumType    = typeOf(*new(EnumNumber))
-)
-
-// typeOf returns a pointer to the Go type information.
-// The pointer is comparable and equal if and only if the types are identical.
-func typeOf(t any) unsafe.Pointer {
-	return (*ifaceHeader)(unsafe.Pointer(&t)).Type
-}
-
-// value is a union where only one type can be represented at a time.
-// The struct is 24B large on 64-bit systems and requires the minimum storage
-// necessary to represent each possible type.
-//
-// The Go GC needs to be able to scan variables containing pointers.
-// As such, pointers and non-pointers cannot be intermixed.
-type value struct {
-	pragma.DoNotCompare // 0B
-
-	// typ stores the type of the value as a pointer to the Go type.
-	typ unsafe.Pointer // 8B
-
-	// ptr stores the data pointer for a String, Bytes, or interface value.
-	ptr unsafe.Pointer // 8B
-
-	// num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or
-	// Enum value as a raw uint64.
-	//
-	// It is also used to store the length of a String or Bytes value;
-	// the capacity is ignored.
-	num uint64 // 8B
-}
-
-func valueOfString(v string) Value {
-	p := (*stringHeader)(unsafe.Pointer(&v))
-	return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))}
-}
-func valueOfBytes(v []byte) Value {
-	p := (*sliceHeader)(unsafe.Pointer(&v))
-	return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))}
-}
-func valueOfIface(v any) Value {
-	p := (*ifaceHeader)(unsafe.Pointer(&v))
-	return Value{typ: p.Type, ptr: p.Data}
-}
-
-func (v Value) getString() (x string) {
-	*(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)}
-	return x
-}
-func (v Value) getBytes() (x []byte) {
-	*(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)}
-	return x
-}
-func (v Value) getIface() (x any) {
-	*(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
-	return x
-}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
deleted file mode 100644
index 479527b..0000000
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.21
-
-package protoreflect
-
-import (
-	"unsafe"
-
-	"google.golang.org/protobuf/internal/pragma"
-)
-
-type (
-	ifaceHeader struct {
-		_    [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it.
-		Type unsafe.Pointer
-		Data unsafe.Pointer
-	}
-)
-
-var (
-	nilType     = typeOf(nil)
-	boolType    = typeOf(*new(bool))
-	int32Type   = typeOf(*new(int32))
-	int64Type   = typeOf(*new(int64))
-	uint32Type  = typeOf(*new(uint32))
-	uint64Type  = typeOf(*new(uint64))
-	float32Type = typeOf(*new(float32))
-	float64Type = typeOf(*new(float64))
-	stringType  = typeOf(*new(string))
-	bytesType   = typeOf(*new([]byte))
-	enumType    = typeOf(*new(EnumNumber))
-)
-
-// typeOf returns a pointer to the Go type information.
-// The pointer is comparable and equal if and only if the types are identical.
-func typeOf(t any) unsafe.Pointer {
-	return (*ifaceHeader)(unsafe.Pointer(&t)).Type
-}
-
-// value is a union where only one type can be represented at a time.
-// The struct is 24B large on 64-bit systems and requires the minimum storage
-// necessary to represent each possible type.
-//
-// The Go GC needs to be able to scan variables containing pointers.
-// As such, pointers and non-pointers cannot be intermixed.
-type value struct {
-	pragma.DoNotCompare // 0B
-
-	// typ stores the type of the value as a pointer to the Go type.
-	typ unsafe.Pointer // 8B
-
-	// ptr stores the data pointer for a String, Bytes, or interface value.
-	ptr unsafe.Pointer // 8B
-
-	// num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or
-	// Enum value as a raw uint64.
-	//
-	// It is also used to store the length of a String or Bytes value;
-	// the capacity is ignored.
-	num uint64 // 8B
-}
-
-func valueOfString(v string) Value {
-	return Value{typ: stringType, ptr: unsafe.Pointer(unsafe.StringData(v)), num: uint64(len(v))}
-}
-func valueOfBytes(v []byte) Value {
-	return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))}
-}
-func valueOfIface(v any) Value {
-	p := (*ifaceHeader)(unsafe.Pointer(&v))
-	return Value{typ: p.Type, ptr: p.Data}
-}
-
-func (v Value) getString() string {
-	return unsafe.String((*byte)(v.ptr), v.num)
-}
-func (v Value) getBytes() []byte {
-	return unsafe.Slice((*byte)(v.ptr), v.num)
-}
-func (v Value) getIface() (x any) {
-	*(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
-	return x
-}
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index a516337..4eacb52 100644
--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -151,6 +151,70 @@
 	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0}
 }
 
+// Describes the 'visibility' of a symbol with respect to the proto import
+// system. Symbols can only be imported when the visibility rules do not prevent
+// it (ex: local symbols cannot be imported).  Visibility modifiers can only set
+// on `message` and `enum` as they are the only types available to be referenced
+// from other files.
+type SymbolVisibility int32
+
+const (
+	SymbolVisibility_VISIBILITY_UNSET  SymbolVisibility = 0
+	SymbolVisibility_VISIBILITY_LOCAL  SymbolVisibility = 1
+	SymbolVisibility_VISIBILITY_EXPORT SymbolVisibility = 2
+)
+
+// Enum value maps for SymbolVisibility.
+var (
+	SymbolVisibility_name = map[int32]string{
+		0: "VISIBILITY_UNSET",
+		1: "VISIBILITY_LOCAL",
+		2: "VISIBILITY_EXPORT",
+	}
+	SymbolVisibility_value = map[string]int32{
+		"VISIBILITY_UNSET":  0,
+		"VISIBILITY_LOCAL":  1,
+		"VISIBILITY_EXPORT": 2,
+	}
+)
+
+func (x SymbolVisibility) Enum() *SymbolVisibility {
+	p := new(SymbolVisibility)
+	*p = x
+	return p
+}
+
+func (x SymbolVisibility) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SymbolVisibility) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
+}
+
+func (SymbolVisibility) Type() protoreflect.EnumType {
+	return &file_google_protobuf_descriptor_proto_enumTypes[1]
+}
+
+func (x SymbolVisibility) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *SymbolVisibility) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = SymbolVisibility(num)
+	return nil
+}
+
+// Deprecated: Use SymbolVisibility.Descriptor instead.
+func (SymbolVisibility) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1}
+}
+
 // The verification state of the extension range.
 type ExtensionRangeOptions_VerificationState int32
 
@@ -183,11 +247,11 @@
 }
 
 func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
 }
 
 func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[1]
+	return &file_google_protobuf_descriptor_proto_enumTypes[2]
 }
 
 func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber {
@@ -299,11 +363,11 @@
 }
 
 func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
 }
 
 func (FieldDescriptorProto_Type) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[2]
+	return &file_google_protobuf_descriptor_proto_enumTypes[3]
 }
 
 func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber {
@@ -362,11 +426,11 @@
 }
 
 func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
 }
 
 func (FieldDescriptorProto_Label) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[3]
+	return &file_google_protobuf_descriptor_proto_enumTypes[4]
 }
 
 func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber {
@@ -423,11 +487,11 @@
 }
 
 func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
 }
 
 func (FileOptions_OptimizeMode) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[4]
+	return &file_google_protobuf_descriptor_proto_enumTypes[5]
 }
 
 func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber {
@@ -489,11 +553,11 @@
 }
 
 func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
 }
 
 func (FieldOptions_CType) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[5]
+	return &file_google_protobuf_descriptor_proto_enumTypes[6]
 }
 
 func (x FieldOptions_CType) Number() protoreflect.EnumNumber {
@@ -551,11 +615,11 @@
 }
 
 func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
 }
 
 func (FieldOptions_JSType) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[6]
+	return &file_google_protobuf_descriptor_proto_enumTypes[7]
 }
 
 func (x FieldOptions_JSType) Number() protoreflect.EnumNumber {
@@ -611,11 +675,11 @@
 }
 
 func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
 }
 
 func (FieldOptions_OptionRetention) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[7]
+	return &file_google_protobuf_descriptor_proto_enumTypes[8]
 }
 
 func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber {
@@ -694,11 +758,11 @@
 }
 
 func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
 }
 
 func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[8]
+	return &file_google_protobuf_descriptor_proto_enumTypes[9]
 }
 
 func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber {
@@ -756,11 +820,11 @@
 }
 
 func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor()
 }
 
 func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[9]
+	return &file_google_protobuf_descriptor_proto_enumTypes[10]
 }
 
 func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber {
@@ -818,11 +882,11 @@
 }
 
 func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor()
 }
 
 func (FeatureSet_FieldPresence) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[10]
+	return &file_google_protobuf_descriptor_proto_enumTypes[11]
 }
 
 func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber {
@@ -877,11 +941,11 @@
 }
 
 func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor()
 }
 
 func (FeatureSet_EnumType) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[11]
+	return &file_google_protobuf_descriptor_proto_enumTypes[12]
 }
 
 func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber {
@@ -936,11 +1000,11 @@
 }
 
 func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor()
 }
 
 func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[12]
+	return &file_google_protobuf_descriptor_proto_enumTypes[13]
 }
 
 func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber {
@@ -995,11 +1059,11 @@
 }
 
 func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor()
 }
 
 func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[13]
+	return &file_google_protobuf_descriptor_proto_enumTypes[14]
 }
 
 func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber {
@@ -1054,11 +1118,11 @@
 }
 
 func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor()
 }
 
 func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[14]
+	return &file_google_protobuf_descriptor_proto_enumTypes[15]
 }
 
 func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber {
@@ -1113,11 +1177,11 @@
 }
 
 func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
 }
 
 func (FeatureSet_JsonFormat) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[15]
+	return &file_google_protobuf_descriptor_proto_enumTypes[16]
 }
 
 func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber {
@@ -1139,6 +1203,136 @@
 	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5}
 }
 
+type FeatureSet_EnforceNamingStyle int32
+
+const (
+	FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN FeatureSet_EnforceNamingStyle = 0
+	FeatureSet_STYLE2024                    FeatureSet_EnforceNamingStyle = 1
+	FeatureSet_STYLE_LEGACY                 FeatureSet_EnforceNamingStyle = 2
+)
+
+// Enum value maps for FeatureSet_EnforceNamingStyle.
+var (
+	FeatureSet_EnforceNamingStyle_name = map[int32]string{
+		0: "ENFORCE_NAMING_STYLE_UNKNOWN",
+		1: "STYLE2024",
+		2: "STYLE_LEGACY",
+	}
+	FeatureSet_EnforceNamingStyle_value = map[string]int32{
+		"ENFORCE_NAMING_STYLE_UNKNOWN": 0,
+		"STYLE2024":                    1,
+		"STYLE_LEGACY":                 2,
+	}
+)
+
+func (x FeatureSet_EnforceNamingStyle) Enum() *FeatureSet_EnforceNamingStyle {
+	p := new(FeatureSet_EnforceNamingStyle)
+	*p = x
+	return p
+}
+
+func (x FeatureSet_EnforceNamingStyle) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_EnforceNamingStyle) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor()
+}
+
+func (FeatureSet_EnforceNamingStyle) Type() protoreflect.EnumType {
+	return &file_google_protobuf_descriptor_proto_enumTypes[17]
+}
+
+func (x FeatureSet_EnforceNamingStyle) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_EnforceNamingStyle) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = FeatureSet_EnforceNamingStyle(num)
+	return nil
+}
+
+// Deprecated: Use FeatureSet_EnforceNamingStyle.Descriptor instead.
+func (FeatureSet_EnforceNamingStyle) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 6}
+}
+
+type FeatureSet_VisibilityFeature_DefaultSymbolVisibility int32
+
+const (
+	FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 0
+	// Default pre-EDITION_2024, all UNSET visibility are export.
+	FeatureSet_VisibilityFeature_EXPORT_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 1
+	// All top-level symbols default to export, nested default to local.
+	FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 2
+	// All symbols default to local.
+	FeatureSet_VisibilityFeature_LOCAL_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 3
+	// All symbols local by default. Nested types cannot be exported.
+	// With special case caveat for message { enum {} reserved 1 to max; }
+	// This is the recommended setting for new protos.
+	FeatureSet_VisibilityFeature_STRICT FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 4
+)
+
+// Enum value maps for FeatureSet_VisibilityFeature_DefaultSymbolVisibility.
+var (
+	FeatureSet_VisibilityFeature_DefaultSymbolVisibility_name = map[int32]string{
+		0: "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN",
+		1: "EXPORT_ALL",
+		2: "EXPORT_TOP_LEVEL",
+		3: "LOCAL_ALL",
+		4: "STRICT",
+	}
+	FeatureSet_VisibilityFeature_DefaultSymbolVisibility_value = map[string]int32{
+		"DEFAULT_SYMBOL_VISIBILITY_UNKNOWN": 0,
+		"EXPORT_ALL":                        1,
+		"EXPORT_TOP_LEVEL":                  2,
+		"LOCAL_ALL":                         3,
+		"STRICT":                            4,
+	}
+)
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Enum() *FeatureSet_VisibilityFeature_DefaultSymbolVisibility {
+	p := new(FeatureSet_VisibilityFeature_DefaultSymbolVisibility)
+	*p = x
+	return p
+}
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_descriptor_proto_enumTypes[18].Descriptor()
+}
+
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Type() protoreflect.EnumType {
+	return &file_google_protobuf_descriptor_proto_enumTypes[18]
+}
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_VisibilityFeature_DefaultSymbolVisibility) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = FeatureSet_VisibilityFeature_DefaultSymbolVisibility(num)
+	return nil
+}
+
+// Deprecated: Use FeatureSet_VisibilityFeature_DefaultSymbolVisibility.Descriptor instead.
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0, 0}
+}
+
 // Represents the identified object's effect on the element in the original
 // .proto file.
 type GeneratedCodeInfo_Annotation_Semantic int32
@@ -1177,11 +1371,11 @@
 }
 
 func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[19].Descriptor()
 }
 
 func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[16]
+	return &file_google_protobuf_descriptor_proto_enumTypes[19]
 }
 
 func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber {
@@ -1262,6 +1456,9 @@
 	// Indexes of the weak imported files in the dependency list.
 	// For Google-internal migration only. Do not use.
 	WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
+	// Names of files imported by this file purely for the purpose of providing
+	// option extensions. These are excluded from the dependency list above.
+	OptionDependency []string `protobuf:"bytes,15,rep,name=option_dependency,json=optionDependency" json:"option_dependency,omitempty"`
 	// All top-level definitions in this file.
 	MessageType []*DescriptorProto        `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
 	EnumType    []*EnumDescriptorProto    `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
@@ -1277,8 +1474,14 @@
 	// The supported values are "proto2", "proto3", and "editions".
 	//
 	// If `edition` is present, this value must be "editions".
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
 	// The edition of the proto file.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Edition       *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
 	unknownFields protoimpl.UnknownFields
 	sizeCache     protoimpl.SizeCache
@@ -1349,6 +1552,13 @@
 	return nil
 }
 
+func (x *FileDescriptorProto) GetOptionDependency() []string {
+	if x != nil {
+		return x.OptionDependency
+	}
+	return nil
+}
+
 func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto {
 	if x != nil {
 		return x.MessageType
@@ -1419,7 +1629,9 @@
 	ReservedRange  []*DescriptorProto_ReservedRange  `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
 	// Reserved field names, which may not be used by fields in the same message.
 	// A given name may only be reserved once.
-	ReservedName  []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	// Support for `export` and `local` keywords on enums.
+	Visibility    *SymbolVisibility `protobuf:"varint,11,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"`
 	unknownFields protoimpl.UnknownFields
 	sizeCache     protoimpl.SizeCache
 }
@@ -1524,6 +1736,13 @@
 	return nil
 }
 
+func (x *DescriptorProto) GetVisibility() SymbolVisibility {
+	if x != nil && x.Visibility != nil {
+		return *x.Visibility
+	}
+	return SymbolVisibility_VISIBILITY_UNSET
+}
+
 type ExtensionRangeOptions struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The parser stores options it doesn't recognize here. See above.
@@ -1836,7 +2055,9 @@
 	ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
 	// Reserved enum value names, which may not be reused. A given name may only
 	// be reserved once.
-	ReservedName  []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	// Support for `export` and `local` keywords on enums.
+	Visibility    *SymbolVisibility `protobuf:"varint,6,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"`
 	unknownFields protoimpl.UnknownFields
 	sizeCache     protoimpl.SizeCache
 }
@@ -1906,6 +2127,13 @@
 	return nil
 }
 
+func (x *EnumDescriptorProto) GetVisibility() SymbolVisibility {
+	if x != nil && x.Visibility != nil {
+		return *x.Visibility
+	}
+	return SymbolVisibility_VISIBILITY_UNSET
+}
+
 // Describes a value within an enum.
 type EnumValueDescriptorProto struct {
 	state         protoimpl.MessageState `protogen:"open.v1"`
@@ -2212,6 +2440,9 @@
 	// determining the ruby package.
 	RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here.
 	// See the documentation for the "Options" section above.
@@ -2482,6 +2713,9 @@
 	// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
 	DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -2639,7 +2873,10 @@
 	// for accessors, or it will be completely ignored; in the very least, this
 	// is a formalization for deprecating fields.
 	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// DEPRECATED. DO NOT USE!
 	// For Google-internal migration only. Do not use.
+	//
+	// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
 	Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
 	// Indicate that the field value should not be printed out when using debug
 	// formats, e.g. when the field contains sensitive credentials.
@@ -2648,6 +2885,9 @@
 	Targets         []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"`
 	EditionDefaults []*FieldOptions_EditionDefault  `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features       *FeatureSet                  `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"`
 	FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
@@ -2740,6 +2980,7 @@
 	return Default_FieldOptions_Deprecated
 }
 
+// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
 func (x *FieldOptions) GetWeak() bool {
 	if x != nil && x.Weak != nil {
 		return *x.Weak
@@ -2799,6 +3040,9 @@
 type OneofOptions struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -2871,6 +3115,9 @@
 	// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
 	DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -2958,6 +3205,9 @@
 	// this is a formalization for deprecating enum values.
 	Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"`
 	// Indicate that fields annotated with this enum value should not be printed
 	// out when using debug formats, e.g. when the field contains sensitive
@@ -3046,6 +3296,9 @@
 type ServiceOptions struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"`
 	// Is this service deprecated?
 	// Depending on the target platform, this can emit Deprecated annotations
@@ -3124,6 +3377,9 @@
 	Deprecated       *bool                           `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
 	IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
 	// Any features defined in the specific edition.
+	// WARNING: This field should only be used by protobuf plugins or special
+	// cases like the proto compiler. Other uses are discouraged and
+	// developers should rely on the protoreflect APIs for their client language.
 	Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"`
 	// The parser stores options it doesn't recognize here. See above.
 	UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
@@ -3303,16 +3559,18 @@
 // be designed and implemented to handle this, hopefully before we ever hit a
 // conflict here.
 type FeatureSet struct {
-	state                 protoimpl.MessageState            `protogen:"open.v1"`
-	FieldPresence         *FeatureSet_FieldPresence         `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
-	EnumType              *FeatureSet_EnumType              `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
-	RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
-	Utf8Validation        *FeatureSet_Utf8Validation        `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
-	MessageEncoding       *FeatureSet_MessageEncoding       `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
-	JsonFormat            *FeatureSet_JsonFormat            `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
-	extensionFields       protoimpl.ExtensionFields
-	unknownFields         protoimpl.UnknownFields
-	sizeCache             protoimpl.SizeCache
+	state                   protoimpl.MessageState                                `protogen:"open.v1"`
+	FieldPresence           *FeatureSet_FieldPresence                             `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
+	EnumType                *FeatureSet_EnumType                                  `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
+	RepeatedFieldEncoding   *FeatureSet_RepeatedFieldEncoding                     `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
+	Utf8Validation          *FeatureSet_Utf8Validation                            `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
+	MessageEncoding         *FeatureSet_MessageEncoding                           `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
+	JsonFormat              *FeatureSet_JsonFormat                                `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
+	EnforceNamingStyle      *FeatureSet_EnforceNamingStyle                        `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"`
+	DefaultSymbolVisibility *FeatureSet_VisibilityFeature_DefaultSymbolVisibility `protobuf:"varint,8,opt,name=default_symbol_visibility,json=defaultSymbolVisibility,enum=google.protobuf.FeatureSet_VisibilityFeature_DefaultSymbolVisibility" json:"default_symbol_visibility,omitempty"`
+	extensionFields         protoimpl.ExtensionFields
+	unknownFields           protoimpl.UnknownFields
+	sizeCache               protoimpl.SizeCache
 }
 
 func (x *FeatureSet) Reset() {
@@ -3387,6 +3645,20 @@
 	return FeatureSet_JSON_FORMAT_UNKNOWN
 }
 
+func (x *FeatureSet) GetEnforceNamingStyle() FeatureSet_EnforceNamingStyle {
+	if x != nil && x.EnforceNamingStyle != nil {
+		return *x.EnforceNamingStyle
+	}
+	return FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN
+}
+
+func (x *FeatureSet) GetDefaultSymbolVisibility() FeatureSet_VisibilityFeature_DefaultSymbolVisibility {
+	if x != nil && x.DefaultSymbolVisibility != nil {
+		return *x.DefaultSymbolVisibility
+	}
+	return FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN
+}
+
 // A compiled specification for the defaults of a set of features.  These
 // messages are generated from FeatureSet extensions and can be used to seed
 // feature resolution. The resolution with this object becomes a simple search
@@ -4047,6 +4319,42 @@
 	return false
 }
 
+type FeatureSet_VisibilityFeature struct {
+	state         protoimpl.MessageState `protogen:"open.v1"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *FeatureSet_VisibilityFeature) Reset() {
+	*x = FeatureSet_VisibilityFeature{}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *FeatureSet_VisibilityFeature) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FeatureSet_VisibilityFeature) ProtoMessage() {}
+
+func (x *FeatureSet_VisibilityFeature) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use FeatureSet_VisibilityFeature.ProtoReflect.Descriptor instead.
+func (*FeatureSet_VisibilityFeature) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0}
+}
+
 // A map from every known edition with a unique set of defaults to its
 // defaults. Not all editions may be contained here.  For a given edition,
 // the defaults at the closest matching edition ordered at or before it should
@@ -4064,7 +4372,7 @@
 
 func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() {
 	*x = FeatureSetDefaults_FeatureSetEditionDefault{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
 	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 	ms.StoreMessageInfo(mi)
 }
@@ -4076,7 +4384,7 @@
 func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {}
 
 func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
 	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -4212,7 +4520,7 @@
 
 func (x *SourceCodeInfo_Location) Reset() {
 	*x = SourceCodeInfo_Location{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
 	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 	ms.StoreMessageInfo(mi)
 }
@@ -4224,7 +4532,7 @@
 func (*SourceCodeInfo_Location) ProtoMessage() {}
 
 func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
 	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -4296,7 +4604,7 @@
 
 func (x *GeneratedCodeInfo_Annotation) Reset() {
 	*x = GeneratedCodeInfo_Annotation{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[33]
 	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 	ms.StoreMessageInfo(mi)
 }
@@ -4308,7 +4616,7 @@
 func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
 
 func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[33]
 	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -4361,777 +4669,389 @@
 
 var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_descriptor_proto_rawDesc = string([]byte{
-	0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x22, 0x5b, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
-	0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65,
-	0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73,
-	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69,
-	0x6c, 0x65, 0x2a, 0x0c, 0x08, 0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01,
-	0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
-	0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
-	0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07,
-	0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70,
-	0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64,
-	0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x65,
-	0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63,
-	0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20, 0x03, 0x28,
-	0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65,
-	0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65, 0x70, 0x65,
-	0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e, 0x77, 0x65,
-	0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43, 0x0a, 0x0c,
-	0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03,
-	0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
-	0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54, 0x79, 0x70,
-	0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05,
-	0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72,
-	0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d,
-	0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18,
-	0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44,
-	0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x07,
-	0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e,
-	0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
-	0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
-	0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x63,
-	0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52,
-	0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12,
-	0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69,
-	0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69,
-	0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06, 0x0a, 0x0f,
-	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
-	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
-	0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02, 0x20, 0x03,
-	0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
-	0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64,
-	0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20,
-	0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72,
-	0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65,
-	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f,
-	0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73,
-	0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a, 0x6e, 0x65,
-	0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d,
-	0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e,
-	0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
-	0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a, 0x0f, 0x65,
-	0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x05,
-	0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
-	0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
-	0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
-	0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x64,
-	0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f,
-	0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
-	0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a, 0x07, 0x6f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d,
-	0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
-	0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
-	0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d,
-	0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a,
-	0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a,
-	0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61,
-	0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52,
-	0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20,
-	0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e,
-	0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x07,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x37,
-	0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
-	0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05,
-	0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01,
-	0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78, 0x74, 0x65,
-	0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
-	0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b,
-	0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
-	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a, 0x0b, 0x64,
-	0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
-	0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67,
-	0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61,
-	0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61,
-	0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12,
-	0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
-	0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
-	0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x56, 0x65,
-	0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x3a,
-	0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88, 0x01, 0x02,
-	0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x94,
-	0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16,
-	0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06,
-	0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x6e,
-	0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c, 0x6c, 0x4e,
-	0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
-	0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72,
-	0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73, 0x65, 0x72,
-	0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x18,
-	0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x4a,
-	0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63,
-	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x44, 0x45,
-	0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x55,
-	0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08, 0xe8, 0x07,
-	0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
-	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
-	0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x03, 0x20,
-	0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a, 0x05, 0x6c,
-	0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
-	0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
-	0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x3e,
-	0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
-	0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
-	0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b,
-	0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28,
-	0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x65,
-	0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x65,
-	0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66, 0x61, 0x75,
-	0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
-	0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b,
-	0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20, 0x01, 0x28,
-	0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1b, 0x0a,
-	0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
-	0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f, 0x6f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6, 0x02, 0x0a,
-	0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x44, 0x4f,
-	0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46,
-	0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49,
-	0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55,
-	0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50,
-	0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54,
-	0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59,
-	0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a, 0x0a, 0x54,
-	0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a, 0x0c, 0x54,
-	0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12, 0x0e, 0x0a,
-	0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12, 0x0f, 0x0a,
-	0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d, 0x12, 0x0d,
-	0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12, 0x11, 0x0a,
-	0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x0f,
-	0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36,
-	0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e, 0x54,
-	0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49, 0x4e,
-	0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x12,
-	0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c,
-	0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45, 0x50, 0x45,
-	0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f,
-	0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14, 0x4f, 0x6e,
-	0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
-	0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22,
-	0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
-	0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
-	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x76,
-	0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75,
-	0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
-	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36, 0x0a, 0x07,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
-	0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
-	0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
-	0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52,
-	0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61,
-	0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f,
-	0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65,
-	0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e, 0x75, 0x6d,
-	0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a,
-	0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74,
-	0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05,
-	0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61,
-	0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
-	0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
-	0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x3b,
-	0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
-	0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x16,
-	0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
-	0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01,
-	0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6d, 0x65,
-	0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74,
-	0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
-	0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07, 0x6f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65,
-	0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
-	0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12,
-	0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e,
-	0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70,
-	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x54, 0x79,
-	0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74, 0x79, 0x70,
-	0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x54,
-	0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04,
-	0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x30, 0x0a,
-	0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
-	0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0f,
-	0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12,
-	0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
-	0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
-	0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
-	0x67, 0x22, 0xad, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67,
-	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50, 0x61, 0x63,
-	0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f, 0x75, 0x74,
-	0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01,
-	0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x61,
-	0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6d,
-	0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x0a, 0x20,
-	0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61, 0x76, 0x61,
-	0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x44, 0x0a,
-	0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x65,
-	0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x14,
-	0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65,
-	0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e, 0x64, 0x48,
-	0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x69,
-	0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18, 0x1b, 0x20,
-	0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61,
-	0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66, 0x38, 0x12,
-	0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x18,
-	0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65,
-	0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a,
-	0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
-	0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61, 0x63, 0x6b,
-	0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69,
-	0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08,
-	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72,
-	0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15, 0x6a, 0x61,
-	0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
-	0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
-	0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72,
-	0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65, 0x6e, 0x65,
-	0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12, 0x20, 0x01,
-	0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47, 0x65, 0x6e,
-	0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a,
-	0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08,
-	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
-	0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65,
-	0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74,
-	0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x72, 0x65,
-	0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c, 0x61, 0x73,
-	0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f,
-	0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
-	0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70,
-	0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68, 0x61, 0x72,
-	0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x77,
-	0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01, 0x28, 0x09,
-	0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a,
-	0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69,
-	0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c, 0x61, 0x73,
-	0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70, 0x5f, 0x6e,
-	0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
-	0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x16,
-	0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e, 0x61, 0x6d,
-	0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x70, 0x68,
-	0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
-	0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
-	0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79, 0x50, 0x61,
-	0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
-	0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58,
-	0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f,
-	0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
-	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70, 0x74, 0x69,
-	0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50, 0x45, 0x45,
-	0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49, 0x5a, 0x45,
-	0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49,
-	0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a,
-	0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x52, 0x14, 0x70, 0x68, 0x70,
-	0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
-	0x73, 0x22, 0xf4, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f,
-	0x73, 0x65, 0x74, 0x5f, 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18,
-	0x01, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65,
-	0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d,
-	0x61, 0x74, 0x12, 0x4c, 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72,
-	0x64, 0x5f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63,
-	0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
-	0x73, 0x65, 0x52, 0x1c, 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65,
-	0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72,
-	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03,
-	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
-	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65,
-	0x6e, 0x74, 0x72, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45,
-	0x6e, 0x74, 0x72, 0x79, 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
-	0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66,
-	0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b,
-	0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63,
-	0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69,
-	0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08,
-	0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
-	0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07,
-	0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
-	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e,
-	0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a,
-	0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05,
-	0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08,
-	0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x9d, 0x0d, 0x0a, 0x0c, 0x46, 0x69, 0x65,
-	0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79,
-	0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53,
-	0x54, 0x52, 0x49, 0x4e, 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06,
-	0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61,
-	0x63, 0x6b, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06,
-	0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e,
-	0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a,
-	0x04, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
-	0x73, 0x65, 0x52, 0x04, 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65,
-	0x72, 0x69, 0x66, 0x69, 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28,
-	0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69,
-	0x66, 0x69, 0x65, 0x64, 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
-	0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
-	0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
-	0x19, 0x0a, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
-	0x61, 0x6c, 0x73, 0x65, 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65,
-	0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08,
-	0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65,
-	0x64, 0x61, 0x63, 0x74, 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
-	0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74,
-	0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f,
-	0x6e, 0x12, 0x48, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03,
-	0x28, 0x0e, 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
-	0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79,
-	0x70, 0x65, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65,
-	0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18,
-	0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61,
-	0x75, 0x6c, 0x74, 0x52, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61,
-	0x75, 0x6c, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
-	0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
-	0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x55, 0x0a,
-	0x0f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74,
-	0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70,
-	0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70,
-	0x70, 0x6f, 0x72, 0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70,
-	0x70, 0x6f, 0x72, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
-	0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20,
-	0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
-	0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74,
-	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5a,
-	0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74,
-	0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
-	0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69,
-	0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
-	0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x96, 0x02, 0x0a, 0x0e, 0x46,
-	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x47, 0x0a,
-	0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x74, 0x72, 0x6f, 0x64, 0x75,
-	0x63, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74,
-	0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x74, 0x72,
-	0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x12, 0x47, 0x0a, 0x12, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f,
-	0x6e, 0x5f, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01,
-	0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x65, 0x64,
-	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
-	0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x77,
-	0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65,
-	0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67,
-	0x12, 0x41, 0x0a, 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x65, 0x6d, 0x6f,
-	0x76, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74,
-	0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x6d, 0x6f,
-	0x76, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06,
-	0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44,
-	0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45,
-	0x43, 0x45, 0x10, 0x02, 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d,
-	0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a,
-	0x09, 0x4a, 0x53, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09,
-	0x4a, 0x53, 0x5f, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15,
-	0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
-	0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49,
-	0x4f, 0x4e, 0x5f, 0x52, 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10,
-	0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45,
-	0x10, 0x02, 0x22, 0x8c, 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72,
-	0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45,
-	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00,
-	0x12, 0x14, 0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x46, 0x49, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54,
-	0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f,
-	0x52, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45,
-	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03,
-	0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45,
-	0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14,
-	0x0a, 0x10, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e,
-	0x55, 0x4d, 0x10, 0x06, 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54,
-	0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07,
-	0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52,
-	0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10,
-	0x09, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04,
-	0x10, 0x05, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65,
-	0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
-	0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65,
-	0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28,
-	0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
-	0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72,
-	0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8,
-	0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
-	0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c,
-	0x6c, 0x6f, 0x77, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72,
-	0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
-	0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12,
-	0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65,
-	0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
-	0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42,
-	0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c,
-	0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f,
-	0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
-	0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
-	0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
-	0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
-	0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
-	0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10,
-	0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0xd8, 0x02, 0x0a, 0x10,
-	0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
-	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01,
-	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
-	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
-	0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74,
-	0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64,
-	0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x55, 0x0a, 0x0f, 0x66, 0x65,
-	0x61, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x04, 0x20,
-	0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72,
-	0x74, 0x52, 0x0e, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72,
-	0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
-	0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b,
-	0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
-	0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07,
-	0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e, 0x53, 0x65, 0x72, 0x76, 0x69,
-	0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65,
-	0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
-	0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64,
-	0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
-	0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74,
-	0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
-	0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
-	0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x99,
-	0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
-	0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21,
-	0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70,
-	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11, 0x69, 0x64, 0x65, 0x6d, 0x70,
-	0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x22, 0x20, 0x01,
-	0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
-	0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65,
-	0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59,
-	0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f,
-	0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65,
-	0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
-	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
-	0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03,
-	0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
-	0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
-	0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x50, 0x0a,
-	0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65,
-	0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x43, 0x59,
-	0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f,
-	0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43, 0x54, 0x53, 0x10, 0x01, 0x12,
-	0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x2a,
-	0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0x9a, 0x03, 0x0a, 0x13, 0x55,
-	0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
-	0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
-	0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
-	0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x52,
-	0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66,
-	0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65,
-	0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74,
-	0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x10, 0x70, 0x6f,
-	0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c,
-	0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76,
-	0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x6e, 0x65, 0x67, 0x61,
-	0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c,
-	0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01,
-	0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12,
-	0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
-	0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c,
-	0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x5f,
-	0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x61, 0x67, 0x67,
-	0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x4a, 0x0a, 0x08, 0x4e,
-	0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x5f,
-	0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x61, 0x6d, 0x65,
-	0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e,
-	0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52, 0x0b, 0x69, 0x73, 0x45, 0x78,
-	0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa7, 0x0a, 0x0a, 0x0a, 0x46, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64,
-	0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
-	0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x46, 0x69, 0x65,
-	0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x3f, 0x88, 0x01, 0x01, 0x98,
-	0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43,
-	0x49, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43,
-	0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43,
-	0x49, 0x54, 0x18, 0xe8, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65,
-	0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x6c, 0x0a, 0x09, 0x65, 0x6e,
-	0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54,
-	0x79, 0x70, 0x65, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01,
-	0x0b, 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x09, 0x12,
-	0x04, 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08,
-	0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70,
-	0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f,
-	0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64,
-	0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x2d, 0x88,
-	0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50,
-	0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43,
-	0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x15, 0x72, 0x65,
-	0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e, 0x63, 0x6f, 0x64,
-	0x69, 0x6e, 0x67, 0x12, 0x7e, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76, 0x61, 0x6c, 0x69,
-	0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
-	0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66, 0x38, 0x56, 0x61,
-	0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x29, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04,
-	0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18, 0x84, 0x07, 0xa2,
-	0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03,
-	0x08, 0xe8, 0x07, 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
-	0x69, 0x6f, 0x6e, 0x12, 0x7e, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65,
-	0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
-	0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x26, 0x88, 0x01, 0x01, 0x98,
-	0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48,
-	0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08,
-	0xe8, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64,
-	0x69, 0x6e, 0x67, 0x12, 0x82, 0x01, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72,
-	0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61,
-	0x74, 0x42, 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2,
-	0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f,
-	0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c,
-	0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x0a, 0x6a, 0x73,
-	0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c,
-	0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45,
-	0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
-	0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49,
-	0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10,
-	0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55,
-	0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79,
-	0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
-	0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45,
-	0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22,
-	0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45,
-	0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44,
-	0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a,
-	0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50,
-	0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x49, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56,
-	0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46,
-	0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b,
-	0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59,
-	0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x04, 0x08, 0x01,
-	0x10, 0x01, 0x22, 0x53, 0x0a, 0x0f, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63,
-	0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45,
-	0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44, 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57,
-	0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52,
-	0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49,
-	0x4d, 0x49, 0x54, 0x45, 0x44, 0x10, 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46,
-	0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f,
-	0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09,
-	0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47,
-	0x41, 0x43, 0x59, 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10,
-	0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0x8b, 0x4e, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90,
-	0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e, 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8,
-	0x07, 0x22, 0xef, 0x03, 0x0a, 0x12, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
-	0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61,
-	0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e,
-	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f,
-	0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
-	0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64,
-	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64,
-	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64,
-	0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d,
-	0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75,
-	0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xf8, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61,
-	0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
-	0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
-	0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
-	0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4e, 0x0a, 0x14, 0x6f, 0x76, 0x65,
-	0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
-	0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
-	0x65, 0x53, 0x65, 0x74, 0x52, 0x13, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x61, 0x62, 0x6c,
-	0x65, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x42, 0x0a, 0x0e, 0x66, 0x69, 0x78,
-	0x65, 0x64, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28,
-	0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x0d,
-	0x66, 0x69, 0x78, 0x65, 0x64, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x4a, 0x04, 0x08,
-	0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
-	0x72, 0x65, 0x73, 0x22, 0xb5, 0x02, 0x0a, 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f,
-	0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69,
-	0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63,
-	0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
-	0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a,
-	0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74,
-	0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74,
-	0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42,
-	0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70, 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61,
-	0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20,
-	0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d,
-	0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67,
-	0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74,
-	0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74,
-	0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06,
-	0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74,
-	0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x2a, 0x0c, 0x08,
-	0x80, 0xec, 0xca, 0xff, 0x01, 0x10, 0x81, 0xec, 0xca, 0xff, 0x01, 0x22, 0xd0, 0x02, 0x0a, 0x11,
-	0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
-	0x6f, 0x12, 0x4d, 0x0a, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
-	0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
-	0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
-	0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
-	0x1a, 0xeb, 0x01, 0x0a, 0x0a, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12,
-	0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10,
-	0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63,
-	0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f,
-	0x75, 0x72, 0x63, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69,
-	0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10,
-	0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64,
-	0x12, 0x52, 0x0a, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01,
-	0x28, 0x0e, 0x32, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f,
-	0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f,
-	0x6e, 0x2e, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61,
-	0x6e, 0x74, 0x69, 0x63, 0x22, 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63,
-	0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45,
-	0x54, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0xa7,
-	0x02, 0x0a, 0x07, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44,
-	0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
-	0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4c, 0x45, 0x47, 0x41, 0x43,
-	0x59, 0x10, 0x84, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
-	0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49,
-	0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11,
-	0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8,
-	0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32,
-	0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
-	0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a,
-	0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f,
-	0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f,
-	0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c,
-	0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
-	0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59,
-	0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
-	0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10,
-	0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d,
-	0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42,
-	0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
-	0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61,
-	0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f,
-	0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
-	0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65,
-	0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
-})
+const file_google_protobuf_descriptor_proto_rawDesc = "" +
+	"\n" +
+	" google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n" +
+	"\x11FileDescriptorSet\x128\n" +
+	"\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xc5\x05\n" +
+	"\x13FileDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" +
+	"\apackage\x18\x02 \x01(\tR\apackage\x12\x1e\n" +
+	"\n" +
+	"dependency\x18\x03 \x03(\tR\n" +
+	"dependency\x12+\n" +
+	"\x11public_dependency\x18\n" +
+	" \x03(\x05R\x10publicDependency\x12'\n" +
+	"\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12+\n" +
+	"\x11option_dependency\x18\x0f \x03(\tR\x10optionDependency\x12C\n" +
+	"\fmessage_type\x18\x04 \x03(\v2 .google.protobuf.DescriptorProtoR\vmessageType\x12A\n" +
+	"\tenum_type\x18\x05 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12A\n" +
+	"\aservice\x18\x06 \x03(\v2'.google.protobuf.ServiceDescriptorProtoR\aservice\x12C\n" +
+	"\textension\x18\a \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x126\n" +
+	"\aoptions\x18\b \x01(\v2\x1c.google.protobuf.FileOptionsR\aoptions\x12I\n" +
+	"\x10source_code_info\x18\t \x01(\v2\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n" +
+	"\x06syntax\x18\f \x01(\tR\x06syntax\x122\n" +
+	"\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xfc\x06\n" +
+	"\x0fDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12;\n" +
+	"\x05field\x18\x02 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\x05field\x12C\n" +
+	"\textension\x18\x06 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x12A\n" +
+	"\vnested_type\x18\x03 \x03(\v2 .google.protobuf.DescriptorProtoR\n" +
+	"nestedType\x12A\n" +
+	"\tenum_type\x18\x04 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12X\n" +
+	"\x0fextension_range\x18\x05 \x03(\v2/.google.protobuf.DescriptorProto.ExtensionRangeR\x0eextensionRange\x12D\n" +
+	"\n" +
+	"oneof_decl\x18\b \x03(\v2%.google.protobuf.OneofDescriptorProtoR\toneofDecl\x129\n" +
+	"\aoptions\x18\a \x01(\v2\x1f.google.protobuf.MessageOptionsR\aoptions\x12U\n" +
+	"\x0ereserved_range\x18\t \x03(\v2..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n" +
+	"\rreserved_name\x18\n" +
+	" \x03(\tR\freservedName\x12A\n" +
+	"\n" +
+	"visibility\x18\v \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" +
+	"visibility\x1az\n" +
+	"\x0eExtensionRange\x12\x14\n" +
+	"\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
+	"\x03end\x18\x02 \x01(\x05R\x03end\x12@\n" +
+	"\aoptions\x18\x03 \x01(\v2&.google.protobuf.ExtensionRangeOptionsR\aoptions\x1a7\n" +
+	"\rReservedRange\x12\x14\n" +
+	"\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
+	"\x03end\x18\x02 \x01(\x05R\x03end\"\xcc\x04\n" +
+	"\x15ExtensionRangeOptions\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x12Y\n" +
+	"\vdeclaration\x18\x02 \x03(\v22.google.protobuf.ExtensionRangeOptions.DeclarationB\x03\x88\x01\x02R\vdeclaration\x127\n" +
+	"\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12m\n" +
+	"\fverification\x18\x03 \x01(\x0e28.google.protobuf.ExtensionRangeOptions.VerificationState:\n" +
+	"UNVERIFIEDB\x03\x88\x01\x02R\fverification\x1a\x94\x01\n" +
+	"\vDeclaration\x12\x16\n" +
+	"\x06number\x18\x01 \x01(\x05R\x06number\x12\x1b\n" +
+	"\tfull_name\x18\x02 \x01(\tR\bfullName\x12\x12\n" +
+	"\x04type\x18\x03 \x01(\tR\x04type\x12\x1a\n" +
+	"\breserved\x18\x05 \x01(\bR\breserved\x12\x1a\n" +
+	"\brepeated\x18\x06 \x01(\bR\brepeatedJ\x04\b\x04\x10\x05\"4\n" +
+	"\x11VerificationState\x12\x0f\n" +
+	"\vDECLARATION\x10\x00\x12\x0e\n" +
+	"\n" +
+	"UNVERIFIED\x10\x01*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xc1\x06\n" +
+	"\x14FieldDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" +
+	"\x06number\x18\x03 \x01(\x05R\x06number\x12A\n" +
+	"\x05label\x18\x04 \x01(\x0e2+.google.protobuf.FieldDescriptorProto.LabelR\x05label\x12>\n" +
+	"\x04type\x18\x05 \x01(\x0e2*.google.protobuf.FieldDescriptorProto.TypeR\x04type\x12\x1b\n" +
+	"\ttype_name\x18\x06 \x01(\tR\btypeName\x12\x1a\n" +
+	"\bextendee\x18\x02 \x01(\tR\bextendee\x12#\n" +
+	"\rdefault_value\x18\a \x01(\tR\fdefaultValue\x12\x1f\n" +
+	"\voneof_index\x18\t \x01(\x05R\n" +
+	"oneofIndex\x12\x1b\n" +
+	"\tjson_name\x18\n" +
+	" \x01(\tR\bjsonName\x127\n" +
+	"\aoptions\x18\b \x01(\v2\x1d.google.protobuf.FieldOptionsR\aoptions\x12'\n" +
+	"\x0fproto3_optional\x18\x11 \x01(\bR\x0eproto3Optional\"\xb6\x02\n" +
+	"\x04Type\x12\x0f\n" +
+	"\vTYPE_DOUBLE\x10\x01\x12\x0e\n" +
+	"\n" +
+	"TYPE_FLOAT\x10\x02\x12\x0e\n" +
+	"\n" +
+	"TYPE_INT64\x10\x03\x12\x0f\n" +
+	"\vTYPE_UINT64\x10\x04\x12\x0e\n" +
+	"\n" +
+	"TYPE_INT32\x10\x05\x12\x10\n" +
+	"\fTYPE_FIXED64\x10\x06\x12\x10\n" +
+	"\fTYPE_FIXED32\x10\a\x12\r\n" +
+	"\tTYPE_BOOL\x10\b\x12\x0f\n" +
+	"\vTYPE_STRING\x10\t\x12\x0e\n" +
+	"\n" +
+	"TYPE_GROUP\x10\n" +
+	"\x12\x10\n" +
+	"\fTYPE_MESSAGE\x10\v\x12\x0e\n" +
+	"\n" +
+	"TYPE_BYTES\x10\f\x12\x0f\n" +
+	"\vTYPE_UINT32\x10\r\x12\r\n" +
+	"\tTYPE_ENUM\x10\x0e\x12\x11\n" +
+	"\rTYPE_SFIXED32\x10\x0f\x12\x11\n" +
+	"\rTYPE_SFIXED64\x10\x10\x12\x0f\n" +
+	"\vTYPE_SINT32\x10\x11\x12\x0f\n" +
+	"\vTYPE_SINT64\x10\x12\"C\n" +
+	"\x05Label\x12\x12\n" +
+	"\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n" +
+	"\x0eLABEL_REPEATED\x10\x03\x12\x12\n" +
+	"\x0eLABEL_REQUIRED\x10\x02\"c\n" +
+	"\x14OneofDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x127\n" +
+	"\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xa6\x03\n" +
+	"\x13EnumDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12?\n" +
+	"\x05value\x18\x02 \x03(\v2).google.protobuf.EnumValueDescriptorProtoR\x05value\x126\n" +
+	"\aoptions\x18\x03 \x01(\v2\x1c.google.protobuf.EnumOptionsR\aoptions\x12]\n" +
+	"\x0ereserved_range\x18\x04 \x03(\v26.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n" +
+	"\rreserved_name\x18\x05 \x03(\tR\freservedName\x12A\n" +
+	"\n" +
+	"visibility\x18\x06 \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" +
+	"visibility\x1a;\n" +
+	"\x11EnumReservedRange\x12\x14\n" +
+	"\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
+	"\x03end\x18\x02 \x01(\x05R\x03end\"\x83\x01\n" +
+	"\x18EnumValueDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" +
+	"\x06number\x18\x02 \x01(\x05R\x06number\x12;\n" +
+	"\aoptions\x18\x03 \x01(\v2!.google.protobuf.EnumValueOptionsR\aoptions\"\xa7\x01\n" +
+	"\x16ServiceDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12>\n" +
+	"\x06method\x18\x02 \x03(\v2&.google.protobuf.MethodDescriptorProtoR\x06method\x129\n" +
+	"\aoptions\x18\x03 \x01(\v2\x1f.google.protobuf.ServiceOptionsR\aoptions\"\x89\x02\n" +
+	"\x15MethodDescriptorProto\x12\x12\n" +
+	"\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n" +
+	"\n" +
+	"input_type\x18\x02 \x01(\tR\tinputType\x12\x1f\n" +
+	"\voutput_type\x18\x03 \x01(\tR\n" +
+	"outputType\x128\n" +
+	"\aoptions\x18\x04 \x01(\v2\x1e.google.protobuf.MethodOptionsR\aoptions\x120\n" +
+	"\x10client_streaming\x18\x05 \x01(\b:\x05falseR\x0fclientStreaming\x120\n" +
+	"\x10server_streaming\x18\x06 \x01(\b:\x05falseR\x0fserverStreaming\"\xad\t\n" +
+	"\vFileOptions\x12!\n" +
+	"\fjava_package\x18\x01 \x01(\tR\vjavaPackage\x120\n" +
+	"\x14java_outer_classname\x18\b \x01(\tR\x12javaOuterClassname\x125\n" +
+	"\x13java_multiple_files\x18\n" +
+	" \x01(\b:\x05falseR\x11javaMultipleFiles\x12D\n" +
+	"\x1djava_generate_equals_and_hash\x18\x14 \x01(\bB\x02\x18\x01R\x19javaGenerateEqualsAndHash\x12:\n" +
+	"\x16java_string_check_utf8\x18\x1b \x01(\b:\x05falseR\x13javaStringCheckUtf8\x12S\n" +
+	"\foptimize_for\x18\t \x01(\x0e2).google.protobuf.FileOptions.OptimizeMode:\x05SPEEDR\voptimizeFor\x12\x1d\n" +
+	"\n" +
+	"go_package\x18\v \x01(\tR\tgoPackage\x125\n" +
+	"\x13cc_generic_services\x18\x10 \x01(\b:\x05falseR\x11ccGenericServices\x129\n" +
+	"\x15java_generic_services\x18\x11 \x01(\b:\x05falseR\x13javaGenericServices\x125\n" +
+	"\x13py_generic_services\x18\x12 \x01(\b:\x05falseR\x11pyGenericServices\x12%\n" +
+	"\n" +
+	"deprecated\x18\x17 \x01(\b:\x05falseR\n" +
+	"deprecated\x12.\n" +
+	"\x10cc_enable_arenas\x18\x1f \x01(\b:\x04trueR\x0eccEnableArenas\x12*\n" +
+	"\x11objc_class_prefix\x18$ \x01(\tR\x0fobjcClassPrefix\x12)\n" +
+	"\x10csharp_namespace\x18% \x01(\tR\x0fcsharpNamespace\x12!\n" +
+	"\fswift_prefix\x18' \x01(\tR\vswiftPrefix\x12(\n" +
+	"\x10php_class_prefix\x18( \x01(\tR\x0ephpClassPrefix\x12#\n" +
+	"\rphp_namespace\x18) \x01(\tR\fphpNamespace\x124\n" +
+	"\x16php_metadata_namespace\x18, \x01(\tR\x14phpMetadataNamespace\x12!\n" +
+	"\fruby_package\x18- \x01(\tR\vrubyPackage\x127\n" +
+	"\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\":\n" +
+	"\fOptimizeMode\x12\t\n" +
+	"\x05SPEED\x10\x01\x12\r\n" +
+	"\tCODE_SIZE\x10\x02\x12\x10\n" +
+	"\fLITE_RUNTIME\x10\x03*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b*\x10+J\x04\b&\x10'R\x14php_generic_services\"\xf4\x03\n" +
+	"\x0eMessageOptions\x12<\n" +
+	"\x17message_set_wire_format\x18\x01 \x01(\b:\x05falseR\x14messageSetWireFormat\x12L\n" +
+	"\x1fno_standard_descriptor_accessor\x18\x02 \x01(\b:\x05falseR\x1cnoStandardDescriptorAccessor\x12%\n" +
+	"\n" +
+	"deprecated\x18\x03 \x01(\b:\x05falseR\n" +
+	"deprecated\x12\x1b\n" +
+	"\tmap_entry\x18\a \x01(\bR\bmapEntry\x12V\n" +
+	"&deprecated_legacy_json_field_conflicts\x18\v \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" +
+	"\bfeatures\x18\f \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\tJ\x04\b\t\x10\n" +
+	"\"\xa1\r\n" +
+	"\fFieldOptions\x12A\n" +
+	"\x05ctype\x18\x01 \x01(\x0e2#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05ctype\x12\x16\n" +
+	"\x06packed\x18\x02 \x01(\bR\x06packed\x12G\n" +
+	"\x06jstype\x18\x06 \x01(\x0e2$.google.protobuf.FieldOptions.JSType:\tJS_NORMALR\x06jstype\x12\x19\n" +
+	"\x04lazy\x18\x05 \x01(\b:\x05falseR\x04lazy\x12.\n" +
+	"\x0funverified_lazy\x18\x0f \x01(\b:\x05falseR\x0eunverifiedLazy\x12%\n" +
+	"\n" +
+	"deprecated\x18\x03 \x01(\b:\x05falseR\n" +
+	"deprecated\x12\x1d\n" +
+	"\x04weak\x18\n" +
+	" \x01(\b:\x05falseB\x02\x18\x01R\x04weak\x12(\n" +
+	"\fdebug_redact\x18\x10 \x01(\b:\x05falseR\vdebugRedact\x12K\n" +
+	"\tretention\x18\x11 \x01(\x0e2-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n" +
+	"\atargets\x18\x13 \x03(\x0e2..google.protobuf.FieldOptions.OptionTargetTypeR\atargets\x12W\n" +
+	"\x10edition_defaults\x18\x14 \x03(\v2,.google.protobuf.FieldOptions.EditionDefaultR\x0feditionDefaults\x127\n" +
+	"\bfeatures\x18\x15 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12U\n" +
+	"\x0ffeature_support\x18\x16 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x1aZ\n" +
+	"\x0eEditionDefault\x122\n" +
+	"\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12\x14\n" +
+	"\x05value\x18\x02 \x01(\tR\x05value\x1a\x96\x02\n" +
+	"\x0eFeatureSupport\x12G\n" +
+	"\x12edition_introduced\x18\x01 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionIntroduced\x12G\n" +
+	"\x12edition_deprecated\x18\x02 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionDeprecated\x12/\n" +
+	"\x13deprecation_warning\x18\x03 \x01(\tR\x12deprecationWarning\x12A\n" +
+	"\x0fedition_removed\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eeditionRemoved\"/\n" +
+	"\x05CType\x12\n" +
+	"\n" +
+	"\x06STRING\x10\x00\x12\b\n" +
+	"\x04CORD\x10\x01\x12\x10\n" +
+	"\fSTRING_PIECE\x10\x02\"5\n" +
+	"\x06JSType\x12\r\n" +
+	"\tJS_NORMAL\x10\x00\x12\r\n" +
+	"\tJS_STRING\x10\x01\x12\r\n" +
+	"\tJS_NUMBER\x10\x02\"U\n" +
+	"\x0fOptionRetention\x12\x15\n" +
+	"\x11RETENTION_UNKNOWN\x10\x00\x12\x15\n" +
+	"\x11RETENTION_RUNTIME\x10\x01\x12\x14\n" +
+	"\x10RETENTION_SOURCE\x10\x02\"\x8c\x02\n" +
+	"\x10OptionTargetType\x12\x17\n" +
+	"\x13TARGET_TYPE_UNKNOWN\x10\x00\x12\x14\n" +
+	"\x10TARGET_TYPE_FILE\x10\x01\x12\x1f\n" +
+	"\x1bTARGET_TYPE_EXTENSION_RANGE\x10\x02\x12\x17\n" +
+	"\x13TARGET_TYPE_MESSAGE\x10\x03\x12\x15\n" +
+	"\x11TARGET_TYPE_FIELD\x10\x04\x12\x15\n" +
+	"\x11TARGET_TYPE_ONEOF\x10\x05\x12\x14\n" +
+	"\x10TARGET_TYPE_ENUM\x10\x06\x12\x1a\n" +
+	"\x16TARGET_TYPE_ENUM_ENTRY\x10\a\x12\x17\n" +
+	"\x13TARGET_TYPE_SERVICE\x10\b\x12\x16\n" +
+	"\x12TARGET_TYPE_METHOD\x10\t*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x12\x10\x13\"\xac\x01\n" +
+	"\fOneofOptions\x127\n" +
+	"\bfeatures\x18\x01 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd1\x02\n" +
+	"\vEnumOptions\x12\x1f\n" +
+	"\vallow_alias\x18\x02 \x01(\bR\n" +
+	"allowAlias\x12%\n" +
+	"\n" +
+	"deprecated\x18\x03 \x01(\b:\x05falseR\n" +
+	"deprecated\x12V\n" +
+	"&deprecated_legacy_json_field_conflicts\x18\x06 \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" +
+	"\bfeatures\x18\a \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x05\x10\x06\"\xd8\x02\n" +
+	"\x10EnumValueOptions\x12%\n" +
+	"\n" +
+	"deprecated\x18\x01 \x01(\b:\x05falseR\n" +
+	"deprecated\x127\n" +
+	"\bfeatures\x18\x02 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12(\n" +
+	"\fdebug_redact\x18\x03 \x01(\b:\x05falseR\vdebugRedact\x12U\n" +
+	"\x0ffeature_support\x18\x04 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd5\x01\n" +
+	"\x0eServiceOptions\x127\n" +
+	"\bfeatures\x18\" \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12%\n" +
+	"\n" +
+	"deprecated\x18! \x01(\b:\x05falseR\n" +
+	"deprecated\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x99\x03\n" +
+	"\rMethodOptions\x12%\n" +
+	"\n" +
+	"deprecated\x18! \x01(\b:\x05falseR\n" +
+	"deprecated\x12q\n" +
+	"\x11idempotency_level\x18\" \x01(\x0e2/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWNR\x10idempotencyLevel\x127\n" +
+	"\bfeatures\x18# \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+	"\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\"P\n" +
+	"\x10IdempotencyLevel\x12\x17\n" +
+	"\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n" +
+	"\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n" +
+	"\n" +
+	"IDEMPOTENT\x10\x02*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x9a\x03\n" +
+	"\x13UninterpretedOption\x12A\n" +
+	"\x04name\x18\x02 \x03(\v2-.google.protobuf.UninterpretedOption.NamePartR\x04name\x12)\n" +
+	"\x10identifier_value\x18\x03 \x01(\tR\x0fidentifierValue\x12,\n" +
+	"\x12positive_int_value\x18\x04 \x01(\x04R\x10positiveIntValue\x12,\n" +
+	"\x12negative_int_value\x18\x05 \x01(\x03R\x10negativeIntValue\x12!\n" +
+	"\fdouble_value\x18\x06 \x01(\x01R\vdoubleValue\x12!\n" +
+	"\fstring_value\x18\a \x01(\fR\vstringValue\x12'\n" +
+	"\x0faggregate_value\x18\b \x01(\tR\x0eaggregateValue\x1aJ\n" +
+	"\bNamePart\x12\x1b\n" +
+	"\tname_part\x18\x01 \x02(\tR\bnamePart\x12!\n" +
+	"\fis_extension\x18\x02 \x02(\bR\visExtension\"\x8e\x0f\n" +
+	"\n" +
+	"FeatureSet\x12\x91\x01\n" +
+	"\x0efield_presence\x18\x01 \x01(\x0e2).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPLICIT\x18\x84\a\xa2\x01\r\x12\bIMPLICIT\x18\xe7\a\xa2\x01\r\x12\bEXPLICIT\x18\xe8\a\xb2\x01\x03\b\xe8\aR\rfieldPresence\x12l\n" +
+	"\tenum_type\x18\x02 \x01(\x0e2$.google.protobuf.FeatureSet.EnumTypeB)\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\v\x12\x06CLOSED\x18\x84\a\xa2\x01\t\x12\x04OPEN\x18\xe7\a\xb2\x01\x03\b\xe8\aR\benumType\x12\x98\x01\n" +
+	"\x17repeated_field_encoding\x18\x03 \x01(\x0e21.google.protobuf.FeatureSet.RepeatedFieldEncodingB-\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPANDED\x18\x84\a\xa2\x01\v\x12\x06PACKED\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x15repeatedFieldEncoding\x12~\n" +
+	"\x0futf8_validation\x18\x04 \x01(\x0e2*.google.protobuf.FeatureSet.Utf8ValidationB)\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\t\x12\x04NONE\x18\x84\a\xa2\x01\v\x12\x06VERIFY\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x0eutf8Validation\x12~\n" +
+	"\x10message_encoding\x18\x05 \x01(\x0e2+.google.protobuf.FeatureSet.MessageEncodingB&\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\x14\x12\x0fLENGTH_PREFIXED\x18\x84\a\xb2\x01\x03\b\xe8\aR\x0fmessageEncoding\x12\x82\x01\n" +
+	"\vjson_format\x18\x06 \x01(\x0e2&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\a\xa2\x01\n" +
+	"\x12\x05ALLOW\x18\xe7\a\xb2\x01\x03\b\xe8\aR\n" +
+	"jsonFormat\x12\xab\x01\n" +
+	"\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\x12\xb9\x01\n" +
+	"\x19default_symbol_visibility\x18\b \x01(\x0e2E.google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibilityB6\x88\x01\x02\x98\x01\x01\xa2\x01\x0f\x12\n" +
+	"EXPORT_ALL\x18\x84\a\xa2\x01\x15\x12\x10EXPORT_TOP_LEVEL\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x17defaultSymbolVisibility\x1a\xa1\x01\n" +
+	"\x11VisibilityFeature\"\x81\x01\n" +
+	"\x17DefaultSymbolVisibility\x12%\n" +
+	"!DEFAULT_SYMBOL_VISIBILITY_UNKNOWN\x10\x00\x12\x0e\n" +
+	"\n" +
+	"EXPORT_ALL\x10\x01\x12\x14\n" +
+	"\x10EXPORT_TOP_LEVEL\x10\x02\x12\r\n" +
+	"\tLOCAL_ALL\x10\x03\x12\n" +
+	"\n" +
+	"\x06STRICT\x10\x04J\b\b\x01\x10\x80\x80\x80\x80\x02\"\\\n" +
+	"\rFieldPresence\x12\x1a\n" +
+	"\x16FIELD_PRESENCE_UNKNOWN\x10\x00\x12\f\n" +
+	"\bEXPLICIT\x10\x01\x12\f\n" +
+	"\bIMPLICIT\x10\x02\x12\x13\n" +
+	"\x0fLEGACY_REQUIRED\x10\x03\"7\n" +
+	"\bEnumType\x12\x15\n" +
+	"\x11ENUM_TYPE_UNKNOWN\x10\x00\x12\b\n" +
+	"\x04OPEN\x10\x01\x12\n" +
+	"\n" +
+	"\x06CLOSED\x10\x02\"V\n" +
+	"\x15RepeatedFieldEncoding\x12#\n" +
+	"\x1fREPEATED_FIELD_ENCODING_UNKNOWN\x10\x00\x12\n" +
+	"\n" +
+	"\x06PACKED\x10\x01\x12\f\n" +
+	"\bEXPANDED\x10\x02\"I\n" +
+	"\x0eUtf8Validation\x12\x1b\n" +
+	"\x17UTF8_VALIDATION_UNKNOWN\x10\x00\x12\n" +
+	"\n" +
+	"\x06VERIFY\x10\x02\x12\b\n" +
+	"\x04NONE\x10\x03\"\x04\b\x01\x10\x01\"S\n" +
+	"\x0fMessageEncoding\x12\x1c\n" +
+	"\x18MESSAGE_ENCODING_UNKNOWN\x10\x00\x12\x13\n" +
+	"\x0fLENGTH_PREFIXED\x10\x01\x12\r\n" +
+	"\tDELIMITED\x10\x02\"H\n" +
+	"\n" +
+	"JsonFormat\x12\x17\n" +
+	"\x13JSON_FORMAT_UNKNOWN\x10\x00\x12\t\n" +
+	"\x05ALLOW\x10\x01\x12\x16\n" +
+	"\x12LEGACY_BEST_EFFORT\x10\x02\"W\n" +
+	"\x12EnforceNamingStyle\x12 \n" +
+	"\x1cENFORCE_NAMING_STYLE_UNKNOWN\x10\x00\x12\r\n" +
+	"\tSTYLE2024\x10\x01\x12\x10\n" +
+	"\fSTYLE_LEGACY\x10\x02*\x06\b\xe8\a\x10\x8bN*\x06\b\x8bN\x10\x90N*\x06\b\x90N\x10\x91NJ\x06\b\xe7\a\x10\xe8\a\"\xef\x03\n" +
+	"\x12FeatureSetDefaults\x12X\n" +
+	"\bdefaults\x18\x01 \x03(\v2<.google.protobuf.FeatureSetDefaults.FeatureSetEditionDefaultR\bdefaults\x12A\n" +
+	"\x0fminimum_edition\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eminimumEdition\x12A\n" +
+	"\x0fmaximum_edition\x18\x05 \x01(\x0e2\x18.google.protobuf.EditionR\x0emaximumEdition\x1a\xf8\x01\n" +
+	"\x18FeatureSetEditionDefault\x122\n" +
+	"\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12N\n" +
+	"\x14overridable_features\x18\x04 \x01(\v2\x1b.google.protobuf.FeatureSetR\x13overridableFeatures\x12B\n" +
+	"\x0efixed_features\x18\x05 \x01(\v2\x1b.google.protobuf.FeatureSetR\rfixedFeaturesJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03R\bfeatures\"\xb5\x02\n" +
+	"\x0eSourceCodeInfo\x12D\n" +
+	"\blocation\x18\x01 \x03(\v2(.google.protobuf.SourceCodeInfo.LocationR\blocation\x1a\xce\x01\n" +
+	"\bLocation\x12\x16\n" +
+	"\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x16\n" +
+	"\x04span\x18\x02 \x03(\x05B\x02\x10\x01R\x04span\x12)\n" +
+	"\x10leading_comments\x18\x03 \x01(\tR\x0fleadingComments\x12+\n" +
+	"\x11trailing_comments\x18\x04 \x01(\tR\x10trailingComments\x12:\n" +
+	"\x19leading_detached_comments\x18\x06 \x03(\tR\x17leadingDetachedComments*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xd0\x02\n" +
+	"\x11GeneratedCodeInfo\x12M\n" +
+	"\n" +
+	"annotation\x18\x01 \x03(\v2-.google.protobuf.GeneratedCodeInfo.AnnotationR\n" +
+	"annotation\x1a\xeb\x01\n" +
+	"\n" +
+	"Annotation\x12\x16\n" +
+	"\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x1f\n" +
+	"\vsource_file\x18\x02 \x01(\tR\n" +
+	"sourceFile\x12\x14\n" +
+	"\x05begin\x18\x03 \x01(\x05R\x05begin\x12\x10\n" +
+	"\x03end\x18\x04 \x01(\x05R\x03end\x12R\n" +
+	"\bsemantic\x18\x05 \x01(\x0e26.google.protobuf.GeneratedCodeInfo.Annotation.SemanticR\bsemantic\"(\n" +
+	"\bSemantic\x12\b\n" +
+	"\x04NONE\x10\x00\x12\a\n" +
+	"\x03SET\x10\x01\x12\t\n" +
+	"\x05ALIAS\x10\x02*\xa7\x02\n" +
+	"\aEdition\x12\x13\n" +
+	"\x0fEDITION_UNKNOWN\x10\x00\x12\x13\n" +
+	"\x0eEDITION_LEGACY\x10\x84\a\x12\x13\n" +
+	"\x0eEDITION_PROTO2\x10\xe6\a\x12\x13\n" +
+	"\x0eEDITION_PROTO3\x10\xe7\a\x12\x11\n" +
+	"\fEDITION_2023\x10\xe8\a\x12\x11\n" +
+	"\fEDITION_2024\x10\xe9\a\x12\x17\n" +
+	"\x13EDITION_1_TEST_ONLY\x10\x01\x12\x17\n" +
+	"\x13EDITION_2_TEST_ONLY\x10\x02\x12\x1d\n" +
+	"\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" +
+	"\x17EDITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n" +
+	"\x17EDITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n" +
+	"\vEDITION_MAX\x10\xff\xff\xff\xff\a*U\n" +
+	"\x10SymbolVisibility\x12\x14\n" +
+	"\x10VISIBILITY_UNSET\x10\x00\x12\x14\n" +
+	"\x10VISIBILITY_LOCAL\x10\x01\x12\x15\n" +
+	"\x11VISIBILITY_EXPORT\x10\x02B~\n" +
+	"\x13com.google.protobufB\x10DescriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection"
 
 var (
 	file_google_protobuf_descriptor_proto_rawDescOnce sync.Once
@@ -5145,143 +5065,151 @@
 	return file_google_protobuf_descriptor_proto_rawDescData
 }
 
-var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17)
-var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33)
+var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 20)
+var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 34)
 var file_google_protobuf_descriptor_proto_goTypes = []any{
-	(Edition)(0), // 0: google.protobuf.Edition
-	(ExtensionRangeOptions_VerificationState)(0),        // 1: google.protobuf.ExtensionRangeOptions.VerificationState
-	(FieldDescriptorProto_Type)(0),                      // 2: google.protobuf.FieldDescriptorProto.Type
-	(FieldDescriptorProto_Label)(0),                     // 3: google.protobuf.FieldDescriptorProto.Label
-	(FileOptions_OptimizeMode)(0),                       // 4: google.protobuf.FileOptions.OptimizeMode
-	(FieldOptions_CType)(0),                             // 5: google.protobuf.FieldOptions.CType
-	(FieldOptions_JSType)(0),                            // 6: google.protobuf.FieldOptions.JSType
-	(FieldOptions_OptionRetention)(0),                   // 7: google.protobuf.FieldOptions.OptionRetention
-	(FieldOptions_OptionTargetType)(0),                  // 8: google.protobuf.FieldOptions.OptionTargetType
-	(MethodOptions_IdempotencyLevel)(0),                 // 9: google.protobuf.MethodOptions.IdempotencyLevel
-	(FeatureSet_FieldPresence)(0),                       // 10: google.protobuf.FeatureSet.FieldPresence
-	(FeatureSet_EnumType)(0),                            // 11: google.protobuf.FeatureSet.EnumType
-	(FeatureSet_RepeatedFieldEncoding)(0),               // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding
-	(FeatureSet_Utf8Validation)(0),                      // 13: google.protobuf.FeatureSet.Utf8Validation
-	(FeatureSet_MessageEncoding)(0),                     // 14: google.protobuf.FeatureSet.MessageEncoding
-	(FeatureSet_JsonFormat)(0),                          // 15: google.protobuf.FeatureSet.JsonFormat
-	(GeneratedCodeInfo_Annotation_Semantic)(0),          // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
-	(*FileDescriptorSet)(nil),                           // 17: google.protobuf.FileDescriptorSet
-	(*FileDescriptorProto)(nil),                         // 18: google.protobuf.FileDescriptorProto
-	(*DescriptorProto)(nil),                             // 19: google.protobuf.DescriptorProto
-	(*ExtensionRangeOptions)(nil),                       // 20: google.protobuf.ExtensionRangeOptions
-	(*FieldDescriptorProto)(nil),                        // 21: google.protobuf.FieldDescriptorProto
-	(*OneofDescriptorProto)(nil),                        // 22: google.protobuf.OneofDescriptorProto
-	(*EnumDescriptorProto)(nil),                         // 23: google.protobuf.EnumDescriptorProto
-	(*EnumValueDescriptorProto)(nil),                    // 24: google.protobuf.EnumValueDescriptorProto
-	(*ServiceDescriptorProto)(nil),                      // 25: google.protobuf.ServiceDescriptorProto
-	(*MethodDescriptorProto)(nil),                       // 26: google.protobuf.MethodDescriptorProto
-	(*FileOptions)(nil),                                 // 27: google.protobuf.FileOptions
-	(*MessageOptions)(nil),                              // 28: google.protobuf.MessageOptions
-	(*FieldOptions)(nil),                                // 29: google.protobuf.FieldOptions
-	(*OneofOptions)(nil),                                // 30: google.protobuf.OneofOptions
-	(*EnumOptions)(nil),                                 // 31: google.protobuf.EnumOptions
-	(*EnumValueOptions)(nil),                            // 32: google.protobuf.EnumValueOptions
-	(*ServiceOptions)(nil),                              // 33: google.protobuf.ServiceOptions
-	(*MethodOptions)(nil),                               // 34: google.protobuf.MethodOptions
-	(*UninterpretedOption)(nil),                         // 35: google.protobuf.UninterpretedOption
-	(*FeatureSet)(nil),                                  // 36: google.protobuf.FeatureSet
-	(*FeatureSetDefaults)(nil),                          // 37: google.protobuf.FeatureSetDefaults
-	(*SourceCodeInfo)(nil),                              // 38: google.protobuf.SourceCodeInfo
-	(*GeneratedCodeInfo)(nil),                           // 39: google.protobuf.GeneratedCodeInfo
-	(*DescriptorProto_ExtensionRange)(nil),              // 40: google.protobuf.DescriptorProto.ExtensionRange
-	(*DescriptorProto_ReservedRange)(nil),               // 41: google.protobuf.DescriptorProto.ReservedRange
-	(*ExtensionRangeOptions_Declaration)(nil),           // 42: google.protobuf.ExtensionRangeOptions.Declaration
-	(*EnumDescriptorProto_EnumReservedRange)(nil),       // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange
-	(*FieldOptions_EditionDefault)(nil),                 // 44: google.protobuf.FieldOptions.EditionDefault
-	(*FieldOptions_FeatureSupport)(nil),                 // 45: google.protobuf.FieldOptions.FeatureSupport
-	(*UninterpretedOption_NamePart)(nil),                // 46: google.protobuf.UninterpretedOption.NamePart
-	(*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 47: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
-	(*SourceCodeInfo_Location)(nil),                     // 48: google.protobuf.SourceCodeInfo.Location
-	(*GeneratedCodeInfo_Annotation)(nil),                // 49: google.protobuf.GeneratedCodeInfo.Annotation
+	(Edition)(0),          // 0: google.protobuf.Edition
+	(SymbolVisibility)(0), // 1: google.protobuf.SymbolVisibility
+	(ExtensionRangeOptions_VerificationState)(0),              // 2: google.protobuf.ExtensionRangeOptions.VerificationState
+	(FieldDescriptorProto_Type)(0),                            // 3: google.protobuf.FieldDescriptorProto.Type
+	(FieldDescriptorProto_Label)(0),                           // 4: google.protobuf.FieldDescriptorProto.Label
+	(FileOptions_OptimizeMode)(0),                             // 5: google.protobuf.FileOptions.OptimizeMode
+	(FieldOptions_CType)(0),                                   // 6: google.protobuf.FieldOptions.CType
+	(FieldOptions_JSType)(0),                                  // 7: google.protobuf.FieldOptions.JSType
+	(FieldOptions_OptionRetention)(0),                         // 8: google.protobuf.FieldOptions.OptionRetention
+	(FieldOptions_OptionTargetType)(0),                        // 9: google.protobuf.FieldOptions.OptionTargetType
+	(MethodOptions_IdempotencyLevel)(0),                       // 10: google.protobuf.MethodOptions.IdempotencyLevel
+	(FeatureSet_FieldPresence)(0),                             // 11: google.protobuf.FeatureSet.FieldPresence
+	(FeatureSet_EnumType)(0),                                  // 12: google.protobuf.FeatureSet.EnumType
+	(FeatureSet_RepeatedFieldEncoding)(0),                     // 13: google.protobuf.FeatureSet.RepeatedFieldEncoding
+	(FeatureSet_Utf8Validation)(0),                            // 14: google.protobuf.FeatureSet.Utf8Validation
+	(FeatureSet_MessageEncoding)(0),                           // 15: google.protobuf.FeatureSet.MessageEncoding
+	(FeatureSet_JsonFormat)(0),                                // 16: google.protobuf.FeatureSet.JsonFormat
+	(FeatureSet_EnforceNamingStyle)(0),                        // 17: google.protobuf.FeatureSet.EnforceNamingStyle
+	(FeatureSet_VisibilityFeature_DefaultSymbolVisibility)(0), // 18: google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility
+	(GeneratedCodeInfo_Annotation_Semantic)(0),                // 19: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+	(*FileDescriptorSet)(nil),                                 // 20: google.protobuf.FileDescriptorSet
+	(*FileDescriptorProto)(nil),                               // 21: google.protobuf.FileDescriptorProto
+	(*DescriptorProto)(nil),                                   // 22: google.protobuf.DescriptorProto
+	(*ExtensionRangeOptions)(nil),                             // 23: google.protobuf.ExtensionRangeOptions
+	(*FieldDescriptorProto)(nil),                              // 24: google.protobuf.FieldDescriptorProto
+	(*OneofDescriptorProto)(nil),                              // 25: google.protobuf.OneofDescriptorProto
+	(*EnumDescriptorProto)(nil),                               // 26: google.protobuf.EnumDescriptorProto
+	(*EnumValueDescriptorProto)(nil),                          // 27: google.protobuf.EnumValueDescriptorProto
+	(*ServiceDescriptorProto)(nil),                            // 28: google.protobuf.ServiceDescriptorProto
+	(*MethodDescriptorProto)(nil),                             // 29: google.protobuf.MethodDescriptorProto
+	(*FileOptions)(nil),                                       // 30: google.protobuf.FileOptions
+	(*MessageOptions)(nil),                                    // 31: google.protobuf.MessageOptions
+	(*FieldOptions)(nil),                                      // 32: google.protobuf.FieldOptions
+	(*OneofOptions)(nil),                                      // 33: google.protobuf.OneofOptions
+	(*EnumOptions)(nil),                                       // 34: google.protobuf.EnumOptions
+	(*EnumValueOptions)(nil),                                  // 35: google.protobuf.EnumValueOptions
+	(*ServiceOptions)(nil),                                    // 36: google.protobuf.ServiceOptions
+	(*MethodOptions)(nil),                                     // 37: google.protobuf.MethodOptions
+	(*UninterpretedOption)(nil),                               // 38: google.protobuf.UninterpretedOption
+	(*FeatureSet)(nil),                                        // 39: google.protobuf.FeatureSet
+	(*FeatureSetDefaults)(nil),                                // 40: google.protobuf.FeatureSetDefaults
+	(*SourceCodeInfo)(nil),                                    // 41: google.protobuf.SourceCodeInfo
+	(*GeneratedCodeInfo)(nil),                                 // 42: google.protobuf.GeneratedCodeInfo
+	(*DescriptorProto_ExtensionRange)(nil),                    // 43: google.protobuf.DescriptorProto.ExtensionRange
+	(*DescriptorProto_ReservedRange)(nil),                     // 44: google.protobuf.DescriptorProto.ReservedRange
+	(*ExtensionRangeOptions_Declaration)(nil),                 // 45: google.protobuf.ExtensionRangeOptions.Declaration
+	(*EnumDescriptorProto_EnumReservedRange)(nil),             // 46: google.protobuf.EnumDescriptorProto.EnumReservedRange
+	(*FieldOptions_EditionDefault)(nil),                       // 47: google.protobuf.FieldOptions.EditionDefault
+	(*FieldOptions_FeatureSupport)(nil),                       // 48: google.protobuf.FieldOptions.FeatureSupport
+	(*UninterpretedOption_NamePart)(nil),                      // 49: google.protobuf.UninterpretedOption.NamePart
+	(*FeatureSet_VisibilityFeature)(nil),                      // 50: google.protobuf.FeatureSet.VisibilityFeature
+	(*FeatureSetDefaults_FeatureSetEditionDefault)(nil),       // 51: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+	(*SourceCodeInfo_Location)(nil),                           // 52: google.protobuf.SourceCodeInfo.Location
+	(*GeneratedCodeInfo_Annotation)(nil),                      // 53: google.protobuf.GeneratedCodeInfo.Annotation
 }
 var file_google_protobuf_descriptor_proto_depIdxs = []int32{
-	18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
-	19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
-	23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
-	25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
-	21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
-	27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
-	38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
+	21, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
+	22, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
+	26, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+	28, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
+	24, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+	30, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
+	41, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
 	0,  // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition
-	21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
-	21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
-	19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
-	23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
-	40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
-	22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
-	28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
-	41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
-	35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
-	36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
-	1,  // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
-	3,  // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
-	2,  // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
-	29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
-	30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
-	24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
-	31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
-	43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
-	32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
-	26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
-	33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
-	34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
-	4,  // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
-	36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	5,  // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
-	6,  // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
-	7,  // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
-	8,  // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
-	44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
-	36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
-	45, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
-	35, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	36, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	36, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	36, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
-	45, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
-	35, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	36, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	9,  // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
-	36, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
-	35, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	46, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
-	10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
-	11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
-	12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
-	13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
-	14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
-	15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
-	47, // 63: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
-	0,  // 64: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
-	0,  // 65: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
-	48, // 66: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
-	49, // 67: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
-	20, // 68: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
-	0,  // 69: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
-	0,  // 70: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
-	0,  // 71: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
-	0,  // 72: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
-	0,  // 73: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
-	36, // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
-	36, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
-	16, // 76: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
-	77, // [77:77] is the sub-list for method output_type
-	77, // [77:77] is the sub-list for method input_type
-	77, // [77:77] is the sub-list for extension type_name
-	77, // [77:77] is the sub-list for extension extendee
-	0,  // [0:77] is the sub-list for field type_name
+	24, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
+	24, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+	22, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
+	26, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+	43, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
+	25, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
+	31, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
+	44, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
+	1,  // 16: google.protobuf.DescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility
+	38, // 17: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	45, // 18: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
+	39, // 19: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
+	2,  // 20: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
+	4,  // 21: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
+	3,  // 22: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
+	32, // 23: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
+	33, // 24: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
+	27, // 25: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
+	34, // 26: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
+	46, // 27: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
+	1,  // 28: google.protobuf.EnumDescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility
+	35, // 29: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
+	29, // 30: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
+	36, // 31: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
+	37, // 32: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
+	5,  // 33: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
+	39, // 34: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 35: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	39, // 36: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 37: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	6,  // 38: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
+	7,  // 39: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
+	8,  // 40: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
+	9,  // 41: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
+	47, // 42: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
+	39, // 43: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
+	48, // 44: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
+	38, // 45: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	39, // 46: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 47: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	39, // 48: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 49: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	39, // 50: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
+	48, // 51: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
+	38, // 52: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	39, // 53: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 54: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	10, // 55: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
+	39, // 56: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 57: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	49, // 58: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
+	11, // 59: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
+	12, // 60: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
+	13, // 61: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
+	14, // 62: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
+	15, // 63: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
+	16, // 64: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
+	17, // 65: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle
+	18, // 66: google.protobuf.FeatureSet.default_symbol_visibility:type_name -> google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility
+	51, // 67: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+	0,  // 68: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
+	0,  // 69: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
+	52, // 70: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
+	53, // 71: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
+	23, // 72: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
+	0,  // 73: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
+	0,  // 74: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
+	0,  // 75: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
+	0,  // 76: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
+	0,  // 77: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
+	39, // 78: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
+	39, // 79: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
+	19, // 80: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+	81, // [81:81] is the sub-list for method output_type
+	81, // [81:81] is the sub-list for method input_type
+	81, // [81:81] is the sub-list for extension type_name
+	81, // [81:81] is the sub-list for extension extendee
+	0,  // [0:81] is the sub-list for field type_name
 }
 
 func init() { file_google_protobuf_descriptor_proto_init() }
@@ -5294,8 +5222,8 @@
 		File: protoimpl.DescBuilder{
 			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
 			RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)),
-			NumEnums:      17,
-			NumMessages:   33,
+			NumEnums:      20,
+			NumMessages:   34,
 			NumExtensions: 0,
 			NumServices:   0,
 		},
diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go
new file mode 100644
index 0000000..1ba1dfa
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go
@@ -0,0 +1,718 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package dynamicpb creates protocol buffer messages using runtime type information.
+package dynamicpb
+
+import (
+	"math"
+
+	"google.golang.org/protobuf/internal/errors"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/runtime/protoiface"
+	"google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// enum is a dynamic protoreflect.Enum.
+type enum struct {
+	num protoreflect.EnumNumber
+	typ protoreflect.EnumType
+}
+
+func (e enum) Descriptor() protoreflect.EnumDescriptor { return e.typ.Descriptor() }
+func (e enum) Type() protoreflect.EnumType             { return e.typ }
+func (e enum) Number() protoreflect.EnumNumber         { return e.num }
+
+// enumType is a dynamic protoreflect.EnumType.
+type enumType struct {
+	desc protoreflect.EnumDescriptor
+}
+
+// NewEnumType creates a new EnumType with the provided descriptor.
+//
+// EnumTypes created by this package are equal if their descriptors are equal.
+// That is, if ed1 == ed2, then NewEnumType(ed1) == NewEnumType(ed2).
+//
+// Enum values created by the EnumType are equal if their numbers are equal.
+func NewEnumType(desc protoreflect.EnumDescriptor) protoreflect.EnumType {
+	return enumType{desc}
+}
+
+func (et enumType) New(n protoreflect.EnumNumber) protoreflect.Enum { return enum{n, et} }
+func (et enumType) Descriptor() protoreflect.EnumDescriptor         { return et.desc }
+
+// extensionType is a dynamic protoreflect.ExtensionType.
+type extensionType struct {
+	desc extensionTypeDescriptor
+}
+
+// A Message is a dynamically constructed protocol buffer message.
+//
+// Message implements the [google.golang.org/protobuf/proto.Message] interface,
+// and may be used with all  standard proto package functions
+// such as Marshal, Unmarshal, and so forth.
+//
+// Message also implements the [protoreflect.Message] interface.
+// See the [protoreflect] package documentation for that interface for how to
+// get and set fields and otherwise interact with the contents of a Message.
+//
+// Reflection API functions which construct messages, such as NewField,
+// return new dynamic messages of the appropriate type. Functions which take
+// messages, such as Set for a message-value field, will accept any message
+// with a compatible type.
+//
+// Operations which modify a Message are not safe for concurrent use.
+type Message struct {
+	typ     messageType
+	known   map[protoreflect.FieldNumber]protoreflect.Value
+	ext     map[protoreflect.FieldNumber]protoreflect.FieldDescriptor
+	unknown protoreflect.RawFields
+}
+
+var (
+	_ protoreflect.Message      = (*Message)(nil)
+	_ protoreflect.ProtoMessage = (*Message)(nil)
+	_ protoiface.MessageV1      = (*Message)(nil)
+)
+
+// NewMessage creates a new message with the provided descriptor.
+func NewMessage(desc protoreflect.MessageDescriptor) *Message {
+	return &Message{
+		typ:   messageType{desc},
+		known: make(map[protoreflect.FieldNumber]protoreflect.Value),
+		ext:   make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor),
+	}
+}
+
+// ProtoMessage implements the legacy message interface.
+func (m *Message) ProtoMessage() {}
+
+// ProtoReflect implements the [protoreflect.ProtoMessage] interface.
+func (m *Message) ProtoReflect() protoreflect.Message {
+	return m
+}
+
+// String returns a string representation of a message.
+func (m *Message) String() string {
+	return protoimpl.X.MessageStringOf(m)
+}
+
+// Reset clears the message to be empty, but preserves the dynamic message type.
+func (m *Message) Reset() {
+	m.known = make(map[protoreflect.FieldNumber]protoreflect.Value)
+	m.ext = make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor)
+	m.unknown = nil
+}
+
+// Descriptor returns the message descriptor.
+func (m *Message) Descriptor() protoreflect.MessageDescriptor {
+	return m.typ.desc
+}
+
+// Type returns the message type.
+func (m *Message) Type() protoreflect.MessageType {
+	return m.typ
+}
+
+// New returns a newly allocated empty message with the same descriptor.
+// See [protoreflect.Message] for details.
+func (m *Message) New() protoreflect.Message {
+	return m.Type().New()
+}
+
+// Interface returns the message.
+// See [protoreflect.Message] for details.
+func (m *Message) Interface() protoreflect.ProtoMessage {
+	return m
+}
+
+// ProtoMethods is an internal detail of the [protoreflect.Message] interface.
+// Users should never call this directly.
+func (m *Message) ProtoMethods() *protoiface.Methods {
+	return nil
+}
+
+// Range visits every populated field in undefined order.
+// See [protoreflect.Message] for details.
+func (m *Message) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
+	for num, v := range m.known {
+		fd := m.ext[num]
+		if fd == nil {
+			fd = m.Descriptor().Fields().ByNumber(num)
+		}
+		if !isSet(fd, v) {
+			continue
+		}
+		if !f(fd, v) {
+			return
+		}
+	}
+}
+
+// Has reports whether a field is populated.
+// See [protoreflect.Message] for details.
+func (m *Message) Has(fd protoreflect.FieldDescriptor) bool {
+	m.checkField(fd)
+	if fd.IsExtension() && m.ext[fd.Number()] != fd {
+		return false
+	}
+	v, ok := m.known[fd.Number()]
+	if !ok {
+		return false
+	}
+	return isSet(fd, v)
+}
+
+// Clear clears a field.
+// See [protoreflect.Message] for details.
+func (m *Message) Clear(fd protoreflect.FieldDescriptor) {
+	m.checkField(fd)
+	num := fd.Number()
+	delete(m.known, num)
+	delete(m.ext, num)
+}
+
+// Get returns the value of a field.
+// See [protoreflect.Message] for details.
+func (m *Message) Get(fd protoreflect.FieldDescriptor) protoreflect.Value {
+	m.checkField(fd)
+	num := fd.Number()
+	if fd.IsExtension() {
+		if fd != m.ext[num] {
+			return fd.(protoreflect.ExtensionTypeDescriptor).Type().Zero()
+		}
+		return m.known[num]
+	}
+	if v, ok := m.known[num]; ok {
+		switch {
+		case fd.IsMap():
+			if v.Map().Len() > 0 {
+				return v
+			}
+		case fd.IsList():
+			if v.List().Len() > 0 {
+				return v
+			}
+		default:
+			return v
+		}
+	}
+	switch {
+	case fd.IsMap():
+		return protoreflect.ValueOfMap(&dynamicMap{desc: fd})
+	case fd.IsList():
+		return protoreflect.ValueOfList(emptyList{desc: fd})
+	case fd.Message() != nil:
+		return protoreflect.ValueOfMessage(&Message{typ: messageType{fd.Message()}})
+	case fd.Kind() == protoreflect.BytesKind:
+		return protoreflect.ValueOfBytes(append([]byte(nil), fd.Default().Bytes()...))
+	default:
+		return fd.Default()
+	}
+}
+
+// Mutable returns a mutable reference to a repeated, map, or message field.
+// See [protoreflect.Message] for details.
+func (m *Message) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
+	m.checkField(fd)
+	if !fd.IsMap() && !fd.IsList() && fd.Message() == nil {
+		panic(errors.New("%v: getting mutable reference to non-composite type", fd.FullName()))
+	}
+	if m.known == nil {
+		panic(errors.New("%v: modification of read-only message", fd.FullName()))
+	}
+	num := fd.Number()
+	if fd.IsExtension() {
+		if fd != m.ext[num] {
+			m.ext[num] = fd
+			m.known[num] = fd.(protoreflect.ExtensionTypeDescriptor).Type().New()
+		}
+		return m.known[num]
+	}
+	if v, ok := m.known[num]; ok {
+		return v
+	}
+	m.clearOtherOneofFields(fd)
+	m.known[num] = m.NewField(fd)
+	if fd.IsExtension() {
+		m.ext[num] = fd
+	}
+	return m.known[num]
+}
+
+// Set stores a value in a field.
+// See [protoreflect.Message] for details.
+func (m *Message) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
+	m.checkField(fd)
+	if m.known == nil {
+		panic(errors.New("%v: modification of read-only message", fd.FullName()))
+	}
+	if fd.IsExtension() {
+		isValid := true
+		switch {
+		case !fd.(protoreflect.ExtensionTypeDescriptor).Type().IsValidValue(v):
+			isValid = false
+		case fd.IsList():
+			isValid = v.List().IsValid()
+		case fd.IsMap():
+			isValid = v.Map().IsValid()
+		case fd.Message() != nil:
+			isValid = v.Message().IsValid()
+		}
+		if !isValid {
+			panic(errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface()))
+		}
+		m.ext[fd.Number()] = fd
+	} else {
+		typecheck(fd, v)
+	}
+	m.clearOtherOneofFields(fd)
+	m.known[fd.Number()] = v
+}
+
+func (m *Message) clearOtherOneofFields(fd protoreflect.FieldDescriptor) {
+	od := fd.ContainingOneof()
+	if od == nil {
+		return
+	}
+	num := fd.Number()
+	for i := 0; i < od.Fields().Len(); i++ {
+		if n := od.Fields().Get(i).Number(); n != num {
+			delete(m.known, n)
+		}
+	}
+}
+
+// NewField returns a new value for assignable to the field of a given descriptor.
+// See [protoreflect.Message] for details.
+func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
+	m.checkField(fd)
+	switch {
+	case fd.IsExtension():
+		return fd.(protoreflect.ExtensionTypeDescriptor).Type().New()
+	case fd.IsMap():
+		return protoreflect.ValueOfMap(&dynamicMap{
+			desc: fd,
+			mapv: make(map[any]protoreflect.Value),
+		})
+	case fd.IsList():
+		return protoreflect.ValueOfList(&dynamicList{desc: fd})
+	case fd.Message() != nil:
+		return protoreflect.ValueOfMessage(NewMessage(fd.Message()).ProtoReflect())
+	default:
+		return fd.Default()
+	}
+}
+
+// WhichOneof reports which field in a oneof is populated, returning nil if none are populated.
+// See [protoreflect.Message] for details.
+func (m *Message) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
+	for i := 0; i < od.Fields().Len(); i++ {
+		fd := od.Fields().Get(i)
+		if m.Has(fd) {
+			return fd
+		}
+	}
+	return nil
+}
+
+// GetUnknown returns the raw unknown fields.
+// See [protoreflect.Message] for details.
+func (m *Message) GetUnknown() protoreflect.RawFields {
+	return m.unknown
+}
+
+// SetUnknown sets the raw unknown fields.
+// See [protoreflect.Message] for details.
+func (m *Message) SetUnknown(r protoreflect.RawFields) {
+	if m.known == nil {
+		panic(errors.New("%v: modification of read-only message", m.typ.desc.FullName()))
+	}
+	m.unknown = r
+}
+
+// IsValid reports whether the message is valid.
+// See [protoreflect.Message] for details.
+func (m *Message) IsValid() bool {
+	return m.known != nil
+}
+
+func (m *Message) checkField(fd protoreflect.FieldDescriptor) {
+	if fd.IsExtension() && fd.ContainingMessage().FullName() == m.Descriptor().FullName() {
+		if _, ok := fd.(protoreflect.ExtensionTypeDescriptor); !ok {
+			panic(errors.New("%v: extension field descriptor does not implement ExtensionTypeDescriptor", fd.FullName()))
+		}
+		return
+	}
+	if fd.Parent() == m.Descriptor() {
+		return
+	}
+	fields := m.Descriptor().Fields()
+	index := fd.Index()
+	if index >= fields.Len() || fields.Get(index) != fd {
+		panic(errors.New("%v: field descriptor does not belong to this message", fd.FullName()))
+	}
+}
+
+type messageType struct {
+	desc protoreflect.MessageDescriptor
+}
+
+// NewMessageType creates a new MessageType with the provided descriptor.
+//
+// MessageTypes created by this package are equal if their descriptors are equal.
+// That is, if md1 == md2, then NewMessageType(md1) == NewMessageType(md2).
+func NewMessageType(desc protoreflect.MessageDescriptor) protoreflect.MessageType {
+	return messageType{desc}
+}
+
+func (mt messageType) New() protoreflect.Message                  { return NewMessage(mt.desc) }
+func (mt messageType) Zero() protoreflect.Message                 { return &Message{typ: messageType{mt.desc}} }
+func (mt messageType) Descriptor() protoreflect.MessageDescriptor { return mt.desc }
+func (mt messageType) Enum(i int) protoreflect.EnumType {
+	if ed := mt.desc.Fields().Get(i).Enum(); ed != nil {
+		return NewEnumType(ed)
+	}
+	return nil
+}
+func (mt messageType) Message(i int) protoreflect.MessageType {
+	if md := mt.desc.Fields().Get(i).Message(); md != nil {
+		return NewMessageType(md)
+	}
+	return nil
+}
+
+type emptyList struct {
+	desc protoreflect.FieldDescriptor
+}
+
+func (x emptyList) Len() int                     { return 0 }
+func (x emptyList) Get(n int) protoreflect.Value { panic(errors.New("out of range")) }
+func (x emptyList) Set(n int, v protoreflect.Value) {
+	panic(errors.New("modification of immutable list"))
+}
+func (x emptyList) Append(v protoreflect.Value) { panic(errors.New("modification of immutable list")) }
+func (x emptyList) AppendMutable() protoreflect.Value {
+	panic(errors.New("modification of immutable list"))
+}
+func (x emptyList) Truncate(n int)                 { panic(errors.New("modification of immutable list")) }
+func (x emptyList) NewElement() protoreflect.Value { return newListEntry(x.desc) }
+func (x emptyList) IsValid() bool                  { return false }
+
+type dynamicList struct {
+	desc protoreflect.FieldDescriptor
+	list []protoreflect.Value
+}
+
+func (x *dynamicList) Len() int {
+	return len(x.list)
+}
+
+func (x *dynamicList) Get(n int) protoreflect.Value {
+	return x.list[n]
+}
+
+func (x *dynamicList) Set(n int, v protoreflect.Value) {
+	typecheckSingular(x.desc, v)
+	x.list[n] = v
+}
+
+func (x *dynamicList) Append(v protoreflect.Value) {
+	typecheckSingular(x.desc, v)
+	x.list = append(x.list, v)
+}
+
+func (x *dynamicList) AppendMutable() protoreflect.Value {
+	if x.desc.Message() == nil {
+		panic(errors.New("%v: invalid AppendMutable on list with non-message type", x.desc.FullName()))
+	}
+	v := x.NewElement()
+	x.Append(v)
+	return v
+}
+
+func (x *dynamicList) Truncate(n int) {
+	// Zero truncated elements to avoid keeping data live.
+	for i := n; i < len(x.list); i++ {
+		x.list[i] = protoreflect.Value{}
+	}
+	x.list = x.list[:n]
+}
+
+func (x *dynamicList) NewElement() protoreflect.Value {
+	return newListEntry(x.desc)
+}
+
+func (x *dynamicList) IsValid() bool {
+	return true
+}
+
+type dynamicMap struct {
+	desc protoreflect.FieldDescriptor
+	mapv map[any]protoreflect.Value
+}
+
+func (x *dynamicMap) Get(k protoreflect.MapKey) protoreflect.Value { return x.mapv[k.Interface()] }
+func (x *dynamicMap) Set(k protoreflect.MapKey, v protoreflect.Value) {
+	typecheckSingular(x.desc.MapKey(), k.Value())
+	typecheckSingular(x.desc.MapValue(), v)
+	x.mapv[k.Interface()] = v
+}
+func (x *dynamicMap) Has(k protoreflect.MapKey) bool { return x.Get(k).IsValid() }
+func (x *dynamicMap) Clear(k protoreflect.MapKey)    { delete(x.mapv, k.Interface()) }
+func (x *dynamicMap) Mutable(k protoreflect.MapKey) protoreflect.Value {
+	if x.desc.MapValue().Message() == nil {
+		panic(errors.New("%v: invalid Mutable on map with non-message value type", x.desc.FullName()))
+	}
+	v := x.Get(k)
+	if !v.IsValid() {
+		v = x.NewValue()
+		x.Set(k, v)
+	}
+	return v
+}
+func (x *dynamicMap) Len() int { return len(x.mapv) }
+func (x *dynamicMap) NewValue() protoreflect.Value {
+	if md := x.desc.MapValue().Message(); md != nil {
+		return protoreflect.ValueOfMessage(NewMessage(md).ProtoReflect())
+	}
+	return x.desc.MapValue().Default()
+}
+func (x *dynamicMap) IsValid() bool {
+	return x.mapv != nil
+}
+
+func (x *dynamicMap) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) {
+	for k, v := range x.mapv {
+		if !f(protoreflect.ValueOf(k).MapKey(), v) {
+			return
+		}
+	}
+}
+
+func isSet(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+	switch {
+	case fd.IsMap():
+		return v.Map().Len() > 0
+	case fd.IsList():
+		return v.List().Len() > 0
+	case fd.ContainingOneof() != nil:
+		return true
+	case !fd.HasPresence() && !fd.IsExtension():
+		switch fd.Kind() {
+		case protoreflect.BoolKind:
+			return v.Bool()
+		case protoreflect.EnumKind:
+			return v.Enum() != 0
+		case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind:
+			return v.Int() != 0
+		case protoreflect.Uint32Kind, protoreflect.Uint64Kind, protoreflect.Fixed32Kind, protoreflect.Fixed64Kind:
+			return v.Uint() != 0
+		case protoreflect.FloatKind, protoreflect.DoubleKind:
+			return v.Float() != 0 || math.Signbit(v.Float())
+		case protoreflect.StringKind:
+			return v.String() != ""
+		case protoreflect.BytesKind:
+			return len(v.Bytes()) > 0
+		}
+	}
+	return true
+}
+
+func typecheck(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
+	if err := typeIsValid(fd, v); err != nil {
+		panic(err)
+	}
+}
+
+func typeIsValid(fd protoreflect.FieldDescriptor, v protoreflect.Value) error {
+	switch {
+	case !v.IsValid():
+		return errors.New("%v: assigning invalid value", fd.FullName())
+	case fd.IsMap():
+		if mapv, ok := v.Interface().(*dynamicMap); !ok || mapv.desc != fd || !mapv.IsValid() {
+			return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface())
+		}
+		return nil
+	case fd.IsList():
+		switch list := v.Interface().(type) {
+		case *dynamicList:
+			if list.desc == fd && list.IsValid() {
+				return nil
+			}
+		case emptyList:
+			if list.desc == fd && list.IsValid() {
+				return nil
+			}
+		}
+		return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface())
+	default:
+		return singularTypeIsValid(fd, v)
+	}
+}
+
+func typecheckSingular(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
+	if err := singularTypeIsValid(fd, v); err != nil {
+		panic(err)
+	}
+}
+
+func singularTypeIsValid(fd protoreflect.FieldDescriptor, v protoreflect.Value) error {
+	vi := v.Interface()
+	var ok bool
+	switch fd.Kind() {
+	case protoreflect.BoolKind:
+		_, ok = vi.(bool)
+	case protoreflect.EnumKind:
+		// We could check against the valid set of enum values, but do not.
+		_, ok = vi.(protoreflect.EnumNumber)
+	case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+		_, ok = vi.(int32)
+	case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+		_, ok = vi.(uint32)
+	case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+		_, ok = vi.(int64)
+	case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+		_, ok = vi.(uint64)
+	case protoreflect.FloatKind:
+		_, ok = vi.(float32)
+	case protoreflect.DoubleKind:
+		_, ok = vi.(float64)
+	case protoreflect.StringKind:
+		_, ok = vi.(string)
+	case protoreflect.BytesKind:
+		_, ok = vi.([]byte)
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		var m protoreflect.Message
+		m, ok = vi.(protoreflect.Message)
+		if ok && m.Descriptor().FullName() != fd.Message().FullName() {
+			return errors.New("%v: assigning invalid message type %v", fd.FullName(), m.Descriptor().FullName())
+		}
+		if dm, ok := vi.(*Message); ok && dm.known == nil {
+			return errors.New("%v: assigning invalid zero-value message", fd.FullName())
+		}
+	}
+	if !ok {
+		return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface())
+	}
+	return nil
+}
+
+func newListEntry(fd protoreflect.FieldDescriptor) protoreflect.Value {
+	switch fd.Kind() {
+	case protoreflect.BoolKind:
+		return protoreflect.ValueOfBool(false)
+	case protoreflect.EnumKind:
+		return protoreflect.ValueOfEnum(fd.Enum().Values().Get(0).Number())
+	case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+		return protoreflect.ValueOfInt32(0)
+	case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+		return protoreflect.ValueOfUint32(0)
+	case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+		return protoreflect.ValueOfInt64(0)
+	case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+		return protoreflect.ValueOfUint64(0)
+	case protoreflect.FloatKind:
+		return protoreflect.ValueOfFloat32(0)
+	case protoreflect.DoubleKind:
+		return protoreflect.ValueOfFloat64(0)
+	case protoreflect.StringKind:
+		return protoreflect.ValueOfString("")
+	case protoreflect.BytesKind:
+		return protoreflect.ValueOfBytes(nil)
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		return protoreflect.ValueOfMessage(NewMessage(fd.Message()).ProtoReflect())
+	}
+	panic(errors.New("%v: unknown kind %v", fd.FullName(), fd.Kind()))
+}
+
+// NewExtensionType creates a new ExtensionType with the provided descriptor.
+//
+// Dynamic ExtensionTypes with the same descriptor compare as equal. That is,
+// if xd1 == xd2, then NewExtensionType(xd1) == NewExtensionType(xd2).
+//
+// The InterfaceOf and ValueOf methods of the extension type are defined as:
+//
+//	func (xt extensionType) ValueOf(iv any) protoreflect.Value {
+//		return protoreflect.ValueOf(iv)
+//	}
+//
+//	func (xt extensionType) InterfaceOf(v protoreflect.Value) any {
+//		return v.Interface()
+//	}
+//
+// The Go type used by the proto.GetExtension and proto.SetExtension functions
+// is determined by these methods, and is therefore equivalent to the Go type
+// used to represent a protoreflect.Value. See the protoreflect.Value
+// documentation for more details.
+func NewExtensionType(desc protoreflect.ExtensionDescriptor) protoreflect.ExtensionType {
+	if xt, ok := desc.(protoreflect.ExtensionTypeDescriptor); ok {
+		desc = xt.Descriptor()
+	}
+	return extensionType{extensionTypeDescriptor{desc}}
+}
+
+func (xt extensionType) New() protoreflect.Value {
+	switch {
+	case xt.desc.IsMap():
+		return protoreflect.ValueOfMap(&dynamicMap{
+			desc: xt.desc,
+			mapv: make(map[any]protoreflect.Value),
+		})
+	case xt.desc.IsList():
+		return protoreflect.ValueOfList(&dynamicList{desc: xt.desc})
+	case xt.desc.Message() != nil:
+		return protoreflect.ValueOfMessage(NewMessage(xt.desc.Message()))
+	default:
+		return xt.desc.Default()
+	}
+}
+
+func (xt extensionType) Zero() protoreflect.Value {
+	switch {
+	case xt.desc.IsMap():
+		return protoreflect.ValueOfMap(&dynamicMap{desc: xt.desc})
+	case xt.desc.Cardinality() == protoreflect.Repeated:
+		return protoreflect.ValueOfList(emptyList{desc: xt.desc})
+	case xt.desc.Message() != nil:
+		return protoreflect.ValueOfMessage(&Message{typ: messageType{xt.desc.Message()}})
+	default:
+		return xt.desc.Default()
+	}
+}
+
+func (xt extensionType) TypeDescriptor() protoreflect.ExtensionTypeDescriptor {
+	return xt.desc
+}
+
+func (xt extensionType) ValueOf(iv any) protoreflect.Value {
+	v := protoreflect.ValueOf(iv)
+	typecheck(xt.desc, v)
+	return v
+}
+
+func (xt extensionType) InterfaceOf(v protoreflect.Value) any {
+	typecheck(xt.desc, v)
+	return v.Interface()
+}
+
+func (xt extensionType) IsValidInterface(iv any) bool {
+	return typeIsValid(xt.desc, protoreflect.ValueOf(iv)) == nil
+}
+
+func (xt extensionType) IsValidValue(v protoreflect.Value) bool {
+	return typeIsValid(xt.desc, v) == nil
+}
+
+type extensionTypeDescriptor struct {
+	protoreflect.ExtensionDescriptor
+}
+
+func (xt extensionTypeDescriptor) Type() protoreflect.ExtensionType {
+	return extensionType{xt}
+}
+
+func (xt extensionTypeDescriptor) Descriptor() protoreflect.ExtensionDescriptor {
+	return xt.ExtensionDescriptor
+}
diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/types.go b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go
new file mode 100644
index 0000000..8e759fc
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go
@@ -0,0 +1,180 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dynamicpb
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+	"sync/atomic"
+
+	"google.golang.org/protobuf/internal/errors"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+)
+
+type extField struct {
+	name   protoreflect.FullName
+	number protoreflect.FieldNumber
+}
+
+// A Types is a collection of dynamically constructed descriptors.
+// Its methods are safe for concurrent use.
+//
+// Types implements [protoregistry.MessageTypeResolver] and [protoregistry.ExtensionTypeResolver].
+// A Types may be used as a [google.golang.org/protobuf/proto.UnmarshalOptions.Resolver].
+type Types struct {
+	// atomicExtFiles is used with sync/atomic and hence must be the first word
+	// of the struct to guarantee 64-bit alignment.
+	atomicExtFiles atomic.Uint64
+	extMu          sync.Mutex
+
+	files *protoregistry.Files
+
+	extensionsByMessage map[extField]protoreflect.ExtensionDescriptor
+}
+
+// NewTypes creates a new Types registry with the provided files.
+// The Files registry is retained, and changes to Files will be reflected in Types.
+// It is not safe to concurrently change the Files while calling Types methods.
+func NewTypes(f *protoregistry.Files) *Types {
+	return &Types{
+		files: f,
+	}
+}
+
+// FindEnumByName looks up an enum by its full name;
+// e.g., "google.protobuf.Field.Kind".
+//
+// This returns (nil, [protoregistry.NotFound]) if not found.
+func (t *Types) FindEnumByName(name protoreflect.FullName) (protoreflect.EnumType, error) {
+	d, err := t.files.FindDescriptorByName(name)
+	if err != nil {
+		return nil, err
+	}
+	ed, ok := d.(protoreflect.EnumDescriptor)
+	if !ok {
+		return nil, errors.New("found wrong type: got %v, want enum", descName(d))
+	}
+	return NewEnumType(ed), nil
+}
+
+// FindExtensionByName looks up an extension field by the field's full name.
+// Note that this is the full name of the field as determined by
+// where the extension is declared and is unrelated to the full name of the
+// message being extended.
+//
+// This returns (nil, [protoregistry.NotFound]) if not found.
+func (t *Types) FindExtensionByName(name protoreflect.FullName) (protoreflect.ExtensionType, error) {
+	d, err := t.files.FindDescriptorByName(name)
+	if err != nil {
+		return nil, err
+	}
+	xd, ok := d.(protoreflect.ExtensionDescriptor)
+	if !ok {
+		return nil, errors.New("found wrong type: got %v, want extension", descName(d))
+	}
+	return NewExtensionType(xd), nil
+}
+
+// FindExtensionByNumber looks up an extension field by the field number
+// within some parent message, identified by full name.
+//
+// This returns (nil, [protoregistry.NotFound]) if not found.
+func (t *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+	// Construct the extension number map lazily, since not every user will need it.
+	// Update the map if new files are added to the registry.
+	if t.atomicExtFiles.Load() != uint64(t.files.NumFiles()) {
+		t.updateExtensions()
+	}
+	xd := t.extensionsByMessage[extField{message, field}]
+	if xd == nil {
+		return nil, protoregistry.NotFound
+	}
+	return NewExtensionType(xd), nil
+}
+
+// FindMessageByName looks up a message by its full name;
+// e.g. "google.protobuf.Any".
+//
+// This returns (nil, [protoregistry.NotFound]) if not found.
+func (t *Types) FindMessageByName(name protoreflect.FullName) (protoreflect.MessageType, error) {
+	d, err := t.files.FindDescriptorByName(name)
+	if err != nil {
+		return nil, err
+	}
+	md, ok := d.(protoreflect.MessageDescriptor)
+	if !ok {
+		return nil, errors.New("found wrong type: got %v, want message", descName(d))
+	}
+	return NewMessageType(md), nil
+}
+
+// FindMessageByURL looks up a message by a URL identifier.
+// See documentation on google.protobuf.Any.type_url for the URL format.
+//
+// This returns (nil, [protoregistry.NotFound]) if not found.
+func (t *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) {
+	// This function is similar to FindMessageByName but
+	// truncates anything before and including '/' in the URL.
+	message := protoreflect.FullName(url)
+	if i := strings.LastIndexByte(url, '/'); i >= 0 {
+		message = message[i+len("/"):]
+	}
+	return t.FindMessageByName(message)
+}
+
+func (t *Types) updateExtensions() {
+	t.extMu.Lock()
+	defer t.extMu.Unlock()
+	if t.atomicExtFiles.Load() == uint64(t.files.NumFiles()) {
+		return
+	}
+	defer t.atomicExtFiles.Store(uint64(t.files.NumFiles()))
+	t.files.RangeFiles(func(fd protoreflect.FileDescriptor) bool {
+		t.registerExtensions(fd.Extensions())
+		t.registerExtensionsInMessages(fd.Messages())
+		return true
+	})
+}
+
+func (t *Types) registerExtensionsInMessages(mds protoreflect.MessageDescriptors) {
+	count := mds.Len()
+	for i := 0; i < count; i++ {
+		md := mds.Get(i)
+		t.registerExtensions(md.Extensions())
+		t.registerExtensionsInMessages(md.Messages())
+	}
+}
+
+func (t *Types) registerExtensions(xds protoreflect.ExtensionDescriptors) {
+	count := xds.Len()
+	for i := 0; i < count; i++ {
+		xd := xds.Get(i)
+		field := xd.Number()
+		message := xd.ContainingMessage().FullName()
+		if t.extensionsByMessage == nil {
+			t.extensionsByMessage = make(map[extField]protoreflect.ExtensionDescriptor)
+		}
+		t.extensionsByMessage[extField{message, field}] = xd
+	}
+}
+
+func descName(d protoreflect.Descriptor) string {
+	switch d.(type) {
+	case protoreflect.EnumDescriptor:
+		return "enum"
+	case protoreflect.EnumValueDescriptor:
+		return "enum value"
+	case protoreflect.MessageDescriptor:
+		return "message"
+	case protoreflect.ExtensionDescriptor:
+		return "extension"
+	case protoreflect.ServiceDescriptor:
+		return "service"
+	default:
+		return fmt.Sprintf("%T", d)
+	}
+}
diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
index 28d24ba..37e712b 100644
--- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
+++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
@@ -228,63 +228,29 @@
 
 var File_google_protobuf_go_features_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_go_features_proto_rawDesc = string([]byte{
-	0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
-	0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xab, 0x05, 0x0a, 0x0a, 0x47, 0x6f,
-	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0xbe, 0x01, 0x0a, 0x1a, 0x6c, 0x65, 0x67,
-	0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c, 0x5f, 0x6a, 0x73,
-	0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x42, 0x80, 0x01,
-	0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72,
-	0x75, 0x65, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18,
-	0xe7, 0x07, 0xb2, 0x01, 0x5b, 0x08, 0xe8, 0x07, 0x10, 0xe8, 0x07, 0x1a, 0x53, 0x54, 0x68, 0x65,
-	0x20, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x20, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61,
-	0x6c, 0x4a, 0x53, 0x4f, 0x4e, 0x20, 0x41, 0x50, 0x49, 0x20, 0x69, 0x73, 0x20, 0x64, 0x65, 0x70,
-	0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c,
-	0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x61,
-	0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x20, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
-	0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61,
-	0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x12, 0x74, 0x0a, 0x09, 0x61, 0x70, 0x69,
-	0x5f, 0x6c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x70,
-	0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x41, 0x50, 0x49,
-	0x4c, 0x65, 0x76, 0x65, 0x6c, 0x42, 0x3e, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98, 0x01, 0x01,
-	0xa2, 0x01, 0x1a, 0x12, 0x15, 0x41, 0x50, 0x49, 0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55,
-	0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x18, 0x84, 0x07, 0xa2, 0x01, 0x0f,
-	0x12, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45, 0x18, 0xe9, 0x07, 0xb2,
-	0x01, 0x03, 0x08, 0xe8, 0x07, 0x52, 0x08, 0x61, 0x70, 0x69, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12,
-	0x7c, 0x0a, 0x11, 0x73, 0x74, 0x72, 0x69, 0x70, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x70, 0x72,
-	0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x70, 0x62, 0x2e,
-	0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x70,
-	0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x42, 0x30, 0x88, 0x01, 0x01, 0x98,
-	0x01, 0x06, 0x98, 0x01, 0x07, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x1b, 0x12, 0x16, 0x53, 0x54, 0x52,
-	0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b,
-	0x45, 0x45, 0x50, 0x18, 0x84, 0x07, 0xb2, 0x01, 0x03, 0x08, 0xe9, 0x07, 0x52, 0x0f, 0x73, 0x74,
-	0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x22, 0x53, 0x0a,
-	0x08, 0x41, 0x50, 0x49, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x19, 0x0a, 0x15, 0x41, 0x50, 0x49,
-	0x5f, 0x4c, 0x45, 0x56, 0x45, 0x4c, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49,
-	0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x45, 0x4e,
-	0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x48, 0x59, 0x42, 0x52, 0x49, 0x44,
-	0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x50, 0x49, 0x5f, 0x4f, 0x50, 0x41, 0x51, 0x55, 0x45,
-	0x10, 0x03, 0x22, 0x92, 0x01, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x69, 0x70, 0x45, 0x6e, 0x75, 0x6d,
-	0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x21, 0x0a, 0x1d, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f,
-	0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x55, 0x4e, 0x53, 0x50,
-	0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1a, 0x0a, 0x16, 0x53, 0x54, 0x52,
-	0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x4b,
-	0x45, 0x45, 0x50, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x53, 0x54, 0x52, 0x49, 0x50, 0x5f, 0x45,
-	0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f, 0x47, 0x45, 0x4e, 0x45, 0x52,
-	0x41, 0x54, 0x45, 0x5f, 0x42, 0x4f, 0x54, 0x48, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x53, 0x54,
-	0x52, 0x49, 0x50, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x5f,
-	0x53, 0x54, 0x52, 0x49, 0x50, 0x10, 0x03, 0x3a, 0x3c, 0x0a, 0x02, 0x67, 0x6f, 0x12, 0x1b, 0x2e,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07, 0x20, 0x01, 0x28,
-	0x0b, 0x32, 0x0e, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
-	0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74,
-	0x75, 0x72, 0x65, 0x73, 0x70, 0x62,
-})
+const file_google_protobuf_go_features_proto_rawDesc = "" +
+	"\n" +
+	"!google/protobuf/go_features.proto\x12\x02pb\x1a google/protobuf/descriptor.proto\"\xab\x05\n" +
+	"\n" +
+	"GoFeatures\x12\xbe\x01\n" +
+	"\x1alegacy_unmarshal_json_enum\x18\x01 \x01(\bB\x80\x01\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\t\x12\x04true\x18\x84\a\xa2\x01\n" +
+	"\x12\x05false\x18\xe7\a\xb2\x01[\b\xe8\a\x10\xe8\a\x1aSThe legacy UnmarshalJSON API is deprecated and will be removed in a future edition.R\x17legacyUnmarshalJsonEnum\x12t\n" +
+	"\tapi_level\x18\x02 \x01(\x0e2\x17.pb.GoFeatures.APILevelB>\x88\x01\x01\x98\x01\x03\x98\x01\x01\xa2\x01\x1a\x12\x15API_LEVEL_UNSPECIFIED\x18\x84\a\xa2\x01\x0f\x12\n" +
+	"API_OPAQUE\x18\xe9\a\xb2\x01\x03\b\xe8\aR\bapiLevel\x12|\n" +
+	"\x11strip_enum_prefix\x18\x03 \x01(\x0e2\x1e.pb.GoFeatures.StripEnumPrefixB0\x88\x01\x01\x98\x01\x06\x98\x01\a\x98\x01\x01\xa2\x01\x1b\x12\x16STRIP_ENUM_PREFIX_KEEP\x18\x84\a\xb2\x01\x03\b\xe9\aR\x0fstripEnumPrefix\"S\n" +
+	"\bAPILevel\x12\x19\n" +
+	"\x15API_LEVEL_UNSPECIFIED\x10\x00\x12\f\n" +
+	"\bAPI_OPEN\x10\x01\x12\x0e\n" +
+	"\n" +
+	"API_HYBRID\x10\x02\x12\x0e\n" +
+	"\n" +
+	"API_OPAQUE\x10\x03\"\x92\x01\n" +
+	"\x0fStripEnumPrefix\x12!\n" +
+	"\x1dSTRIP_ENUM_PREFIX_UNSPECIFIED\x10\x00\x12\x1a\n" +
+	"\x16STRIP_ENUM_PREFIX_KEEP\x10\x01\x12#\n" +
+	"\x1fSTRIP_ENUM_PREFIX_GENERATE_BOTH\x10\x02\x12\x1b\n" +
+	"\x17STRIP_ENUM_PREFIX_STRIP\x10\x03:<\n" +
+	"\x02go\x12\x1b.google.protobuf.FeatureSet\x18\xea\a \x01(\v2\x0e.pb.GoFeaturesR\x02goB/Z-google.golang.org/protobuf/types/gofeaturespb"
 
 var (
 	file_google_protobuf_go_features_proto_rawDescOnce sync.Once
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 497da66..1ff0d14 100644
--- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -412,23 +412,13 @@
 
 var File_google_protobuf_any_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_any_proto_rawDesc = string([]byte{
-	0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03,
-	0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18,
-	0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14,
-	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76,
-	0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79,
-	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f,
-	0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65,
-	0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72,
-	0x6f, 0x74, 0x6f, 0x33,
-})
+const file_google_protobuf_any_proto_rawDesc = "" +
+	"\n" +
+	"\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"6\n" +
+	"\x03Any\x12\x19\n" +
+	"\btype_url\x18\x01 \x01(\tR\atypeUrl\x12\x14\n" +
+	"\x05value\x18\x02 \x01(\fR\x05valueBv\n" +
+	"\x13com.google.protobufB\bAnyProtoP\x01Z,google.golang.org/protobuf/types/known/anypb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
 
 var (
 	file_google_protobuf_any_proto_rawDescOnce sync.Once
diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
index 193880d..ca2e7b3 100644
--- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
@@ -289,24 +289,13 @@
 
 var File_google_protobuf_duration_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_duration_proto_rawDesc = string([]byte{
-	0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a,
-	0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07,
-	0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73,
-	0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01,
-	0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50,
-	0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
-	0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64,
-	0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47,
-	0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79,
-	0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
+const file_google_protobuf_duration_proto_rawDesc = "" +
+	"\n" +
+	"\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\":\n" +
+	"\bDuration\x12\x18\n" +
+	"\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" +
+	"\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x83\x01\n" +
+	"\x13com.google.protobufB\rDurationProtoP\x01Z1google.golang.org/protobuf/types/known/durationpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
 
 var (
 	file_google_protobuf_duration_proto_rawDescOnce sync.Once
diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
index a5b8657..1d7ee3b 100644
--- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
@@ -86,20 +86,12 @@
 
 var File_google_protobuf_empty_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_empty_proto_rawDesc = string([]byte{
-	0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07,
-	0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a,
-	0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f,
-	0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b,
-	0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2,
-	0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77,
-	0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
+const file_google_protobuf_empty_proto_rawDesc = "" +
+	"\n" +
+	"\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\a\n" +
+	"\x05EmptyB}\n" +
+	"\x13com.google.protobufB\n" +
+	"EmptyProtoP\x01Z.google.golang.org/protobuf/types/known/emptypb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
 
 var (
 	file_google_protobuf_empty_proto_rawDescOnce sync.Once
diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
index ecdd31a..30411b7 100644
--- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
@@ -672,55 +672,31 @@
 
 var File_google_protobuf_struct_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_struct_proto_rawDesc = string([]byte{
-	0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f,
-	0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22,
-	0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69,
-	0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f,
-	0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72,
-	0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
-	0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64,
-	0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
-	0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
-	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
-	0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
-	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56,
-	0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c,
-	0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56,
-	0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75,
-	0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75,
-	0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65,
-	0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67,
-	0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b,
-	0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62,
-	0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48,
-	0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c,
-	0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01,
-	0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73,
-	0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69,
-	0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69,
-	0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22,
-	0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06,
-	0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67,
-	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56,
-	0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09,
-	0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c,
-	0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
-	0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f,
-	0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65,
-	0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62,
-	0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67,
-	0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c,
-	0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x33,
-})
+const file_google_protobuf_struct_proto_rawDesc = "" +
+	"\n" +
+	"\x1cgoogle/protobuf/struct.proto\x12\x0fgoogle.protobuf\"\x98\x01\n" +
+	"\x06Struct\x12;\n" +
+	"\x06fields\x18\x01 \x03(\v2#.google.protobuf.Struct.FieldsEntryR\x06fields\x1aQ\n" +
+	"\vFieldsEntry\x12\x10\n" +
+	"\x03key\x18\x01 \x01(\tR\x03key\x12,\n" +
+	"\x05value\x18\x02 \x01(\v2\x16.google.protobuf.ValueR\x05value:\x028\x01\"\xb2\x02\n" +
+	"\x05Value\x12;\n" +
+	"\n" +
+	"null_value\x18\x01 \x01(\x0e2\x1a.google.protobuf.NullValueH\x00R\tnullValue\x12#\n" +
+	"\fnumber_value\x18\x02 \x01(\x01H\x00R\vnumberValue\x12#\n" +
+	"\fstring_value\x18\x03 \x01(\tH\x00R\vstringValue\x12\x1f\n" +
+	"\n" +
+	"bool_value\x18\x04 \x01(\bH\x00R\tboolValue\x12<\n" +
+	"\fstruct_value\x18\x05 \x01(\v2\x17.google.protobuf.StructH\x00R\vstructValue\x12;\n" +
+	"\n" +
+	"list_value\x18\x06 \x01(\v2\x1a.google.protobuf.ListValueH\x00R\tlistValueB\x06\n" +
+	"\x04kind\";\n" +
+	"\tListValue\x12.\n" +
+	"\x06values\x18\x01 \x03(\v2\x16.google.protobuf.ValueR\x06values*\x1b\n" +
+	"\tNullValue\x12\x0e\n" +
+	"\n" +
+	"NULL_VALUE\x10\x00B\x7f\n" +
+	"\x13com.google.protobufB\vStructProtoP\x01Z/google.golang.org/protobuf/types/known/structpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
 
 var (
 	file_google_protobuf_struct_proto_rawDescOnce sync.Once
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
index 00ac835..06d584c 100644
--- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -298,24 +298,13 @@
 
 var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_timestamp_proto_rawDesc = string([]byte{
-	0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
-	0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
-	0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12,
-	0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
-	0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e,
-	0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42,
-	0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
-	0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
-	0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
-	0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f,
-	0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77,
-	0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01,
-	0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
-	0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
-	0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
+const file_google_protobuf_timestamp_proto_rawDesc = "" +
+	"\n" +
+	"\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\";\n" +
+	"\tTimestamp\x12\x18\n" +
+	"\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" +
+	"\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x85\x01\n" +
+	"\x13com.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
 
 var (
 	file_google_protobuf_timestamp_proto_rawDescOnce sync.Once
diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
index 5de5301..b7c2d06 100644
--- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
@@ -28,10 +28,17 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 //
-// Wrappers for primitive (non-message) types. These types are useful
-// for embedding primitives in the `google.protobuf.Any` type and for places
-// where we need to distinguish between the absence of a primitive
-// typed field and its default value.
+// Wrappers for primitive (non-message) types. These types were needed
+// for legacy reasons and are not recommended for use in new APIs.
+//
+// Historically these wrappers were useful to have presence on proto3 primitive
+// fields, but proto3 syntax has been updated to support the `optional` keyword.
+// Using that keyword is now the strongly preferred way to add presence to
+// proto3 primitive fields.
+//
+// A secondary usecase was to embed primitives in the `google.protobuf.Any`
+// type: it is now recommended that you embed your value in your own wrapper
+// message which can be specifically documented.
 //
 // These wrappers have no meaningful use within repeated fields as they lack
 // the ability to detect presence on individual elements.
@@ -54,6 +61,9 @@
 // Wrapper message for `double`.
 //
 // The JSON representation for `DoubleValue` is JSON number.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type DoubleValue struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The double value.
@@ -107,6 +117,9 @@
 // Wrapper message for `float`.
 //
 // The JSON representation for `FloatValue` is JSON number.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type FloatValue struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The float value.
@@ -160,6 +173,9 @@
 // Wrapper message for `int64`.
 //
 // The JSON representation for `Int64Value` is JSON string.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type Int64Value struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The int64 value.
@@ -213,6 +229,9 @@
 // Wrapper message for `uint64`.
 //
 // The JSON representation for `UInt64Value` is JSON string.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type UInt64Value struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The uint64 value.
@@ -266,6 +285,9 @@
 // Wrapper message for `int32`.
 //
 // The JSON representation for `Int32Value` is JSON number.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type Int32Value struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The int32 value.
@@ -319,6 +341,9 @@
 // Wrapper message for `uint32`.
 //
 // The JSON representation for `UInt32Value` is JSON number.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type UInt32Value struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The uint32 value.
@@ -372,6 +397,9 @@
 // Wrapper message for `bool`.
 //
 // The JSON representation for `BoolValue` is JSON `true` and `false`.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type BoolValue struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The bool value.
@@ -425,6 +453,9 @@
 // Wrapper message for `string`.
 //
 // The JSON representation for `StringValue` is JSON string.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type StringValue struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The string value.
@@ -478,6 +509,9 @@
 // Wrapper message for `bytes`.
 //
 // The JSON representation for `BytesValue` is JSON string.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
 type BytesValue struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The bytes value.
@@ -530,41 +564,32 @@
 
 var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor
 
-var file_google_protobuf_wrappers_proto_rawDesc = string([]byte{
-	0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-	0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
-	0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
-	0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52,
-	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56,
-	0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
-	0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e,
-	0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
-	0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23,
-	0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a,
-	0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61,
-	0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75,
-	0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
-	0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33,
-	0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
-	0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09,
-	0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
-	0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22,
-	0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14,
-	0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
-	0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c,
-	0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
-	0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
-	0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
-	0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
-	0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
-	0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79,
-	0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
-	0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e,
-	0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
-	0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
-	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-})
+const file_google_protobuf_wrappers_proto_rawDesc = "" +
+	"\n" +
+	"\x1egoogle/protobuf/wrappers.proto\x12\x0fgoogle.protobuf\"#\n" +
+	"\vDoubleValue\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\x01R\x05value\"\"\n" +
+	"\n" +
+	"FloatValue\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\x02R\x05value\"\"\n" +
+	"\n" +
+	"Int64Value\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\x03R\x05value\"#\n" +
+	"\vUInt64Value\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\x04R\x05value\"\"\n" +
+	"\n" +
+	"Int32Value\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\x05R\x05value\"#\n" +
+	"\vUInt32Value\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\rR\x05value\"!\n" +
+	"\tBoolValue\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\bR\x05value\"#\n" +
+	"\vStringValue\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\tR\x05value\"\"\n" +
+	"\n" +
+	"BytesValue\x12\x14\n" +
+	"\x05value\x18\x01 \x01(\fR\x05valueB\x83\x01\n" +
+	"\x13com.google.protobufB\rWrappersProtoP\x01Z1google.golang.org/protobuf/types/known/wrapperspb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
 
 var (
 	file_google_protobuf_wrappers_proto_rawDescOnce sync.Once
diff --git a/vendor/modules.txt b/vendor/modules.txt
index b6dc866..7ece124 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,48 +1,32 @@
-# github.com/Shopify/sarama v1.29.1
-## explicit; go 1.13
-github.com/Shopify/sarama
+# github.com/IBM/sarama v1.46.2
+## explicit; go 1.24.0
+github.com/IBM/sarama
 # github.com/aead/cmac v0.0.0-20160719120800-7af84192f0b1
 ## explicit
 github.com/aead/cmac
 github.com/aead/cmac/aes
+# github.com/beorn7/perks v1.0.1
+## explicit; go 1.11
+github.com/beorn7/perks/quantile
 # github.com/boguslaw-wojcik/crc32a v1.0.0
 ## explicit
 github.com/boguslaw-wojcik/crc32a
-# github.com/bsm/sarama-cluster v2.1.15+incompatible
-## explicit
-github.com/bsm/sarama-cluster
-# github.com/cespare/xxhash/v2 v2.2.0
+# github.com/bufbuild/protocompile v0.14.1
+## explicit; go 1.21
+github.com/bufbuild/protocompile/internal/editions
+github.com/bufbuild/protocompile/protoutil
+# github.com/cespare/xxhash/v2 v2.3.0
 ## explicit; go 1.11
 github.com/cespare/xxhash/v2
-# github.com/cevaris/ordered_map v0.0.0-20190319150403-3adeae072e73
-## explicit
+# github.com/cevaris/ordered_map v0.0.0-20220813181356-34664b69742b
+## explicit; go 1.19
 github.com/cevaris/ordered_map
-# github.com/coreos/etcd v3.3.25+incompatible
-## explicit
-github.com/coreos/etcd/auth/authpb
-github.com/coreos/etcd/clientv3/balancer
-github.com/coreos/etcd/clientv3/balancer/connectivity
-github.com/coreos/etcd/clientv3/balancer/picker
-github.com/coreos/etcd/clientv3/balancer/resolver/endpoint
-github.com/coreos/etcd/clientv3/credentials
-github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes
-github.com/coreos/etcd/etcdserver/etcdserverpb
-github.com/coreos/etcd/mvcc/mvccpb
-github.com/coreos/etcd/pkg/logutil
-github.com/coreos/etcd/pkg/systemd
-github.com/coreos/etcd/pkg/types
-github.com/coreos/etcd/raft
-github.com/coreos/etcd/raft/raftpb
-github.com/coreos/etcd/version
-# github.com/coreos/go-semver v0.3.0
-## explicit
+# github.com/coreos/go-semver v0.3.1
+## explicit; go 1.8
 github.com/coreos/go-semver/semver
-# github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
-## explicit
-github.com/coreos/go-systemd/journal
-# github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f
-## explicit
-github.com/coreos/pkg/capnslog
+# github.com/coreos/go-systemd/v22 v22.5.0
+## explicit; go 1.12
+github.com/coreos/go-systemd/v22/journal
 # github.com/davecgh/go-spew v1.1.1
 ## explicit
 github.com/davecgh/go-spew/spew
@@ -52,20 +36,21 @@
 # github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
 ## explicit
 github.com/dgryski/go-rendezvous
-# github.com/eapache/go-resiliency v1.2.0
-## explicit
+# github.com/eapache/go-resiliency v1.7.0
+## explicit; go 1.13
 github.com/eapache/go-resiliency/breaker
-# github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21
-## explicit
+# github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3
+## explicit; go 1.17
 github.com/eapache/go-xerial-snappy
 # github.com/eapache/queue v1.1.0
 ## explicit
 github.com/eapache/queue
-# github.com/go-redis/redis/v8 v8.3.4
-## explicit; go 1.11
+# github.com/go-redis/redis/v8 v8.11.5
+## explicit; go 1.17
 github.com/go-redis/redis/v8
 github.com/go-redis/redis/v8/internal
 github.com/go-redis/redis/v8/internal/hashtag
+github.com/go-redis/redis/v8/internal/hscan
 github.com/go-redis/redis/v8/internal/pool
 github.com/go-redis/redis/v8/internal/proto
 github.com/go-redis/redis/v8/internal/rand
@@ -79,14 +64,9 @@
 ## explicit; go 1.17
 github.com/golang/protobuf/jsonpb
 github.com/golang/protobuf/proto
-github.com/golang/protobuf/protoc-gen-go/descriptor
-github.com/golang/protobuf/ptypes
 github.com/golang/protobuf/ptypes/any
-github.com/golang/protobuf/ptypes/duration
 github.com/golang/protobuf/ptypes/empty
-github.com/golang/protobuf/ptypes/struct
 github.com/golang/protobuf/ptypes/timestamp
-github.com/golang/protobuf/ptypes/wrappers
 # github.com/golang/snappy v0.0.4
 ## explicit
 github.com/golang/snappy
@@ -94,10 +74,10 @@
 ## explicit; go 1.12
 github.com/google/gopacket
 github.com/google/gopacket/layers
-# github.com/google/uuid v1.3.0
+# github.com/google/uuid v1.6.0
 ## explicit
 github.com/google/uuid
-# github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
+# github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
 ## explicit; go 1.14
 github.com/grpc-ecosystem/go-grpc-middleware
 github.com/grpc-ecosystem/go-grpc-middleware/retry
@@ -105,7 +85,13 @@
 github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing
 github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils
 github.com/grpc-ecosystem/go-grpc-middleware/util/metautils
-# github.com/hashicorp/go-uuid v1.0.2
+# github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
+## explicit
+github.com/grpc-ecosystem/go-grpc-prometheus
+# github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3
+## explicit; go 1.23.0
+github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options
+# github.com/hashicorp/go-uuid v1.0.3
 ## explicit
 github.com/hashicorp/go-uuid
 # github.com/jcmturner/aescts/v2 v2.0.0
@@ -114,12 +100,12 @@
 # github.com/jcmturner/dnsutils/v2 v2.0.0
 ## explicit; go 1.13
 github.com/jcmturner/dnsutils/v2
-# github.com/jcmturner/gofork v1.0.0
-## explicit
+# github.com/jcmturner/gofork v1.7.6
+## explicit; go 1.7
 github.com/jcmturner/gofork/encoding/asn1
 github.com/jcmturner/gofork/x/crypto/pbkdf2
-# github.com/jcmturner/gokrb5/v8 v8.4.2
-## explicit; go 1.14
+# github.com/jcmturner/gokrb5/v8 v8.4.4
+## explicit; go 1.16
 github.com/jcmturner/gokrb5/v8/asn1tools
 github.com/jcmturner/gokrb5/v8/client
 github.com/jcmturner/gokrb5/v8/config
@@ -154,35 +140,42 @@
 ## explicit; go 1.13
 github.com/jcmturner/rpc/v2/mstypes
 github.com/jcmturner/rpc/v2/ndr
-# github.com/jhump/protoreflect v1.10.2
-## explicit; go 1.13
+# github.com/jhump/protoreflect v1.17.0
+## explicit; go 1.21
 github.com/jhump/protoreflect/codec
 github.com/jhump/protoreflect/desc
 github.com/jhump/protoreflect/desc/internal
+github.com/jhump/protoreflect/desc/sourceinfo
 github.com/jhump/protoreflect/dynamic
 github.com/jhump/protoreflect/dynamic/grpcdynamic
 github.com/jhump/protoreflect/grpcreflect
 github.com/jhump/protoreflect/internal
 github.com/jhump/protoreflect/internal/codec
-# github.com/klauspost/compress v1.15.9
-## explicit; go 1.16
+# github.com/klauspost/compress v1.18.0
+## explicit; go 1.22
 github.com/klauspost/compress
+github.com/klauspost/compress/flate
 github.com/klauspost/compress/fse
+github.com/klauspost/compress/gzip
 github.com/klauspost/compress/huff0
 github.com/klauspost/compress/internal/cpuinfo
+github.com/klauspost/compress/internal/le
 github.com/klauspost/compress/internal/snapref
 github.com/klauspost/compress/zstd
 github.com/klauspost/compress/zstd/internal/xxhash
 # github.com/looplab/fsm v0.2.0
 ## explicit; go 1.13
 github.com/looplab/fsm
+# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
+## explicit
+github.com/munnerz/goautoneg
 # github.com/opencord/omci-lib-go/v2 v2.2.4
 ## explicit; go 1.16
 github.com/opencord/omci-lib-go/v2
 github.com/opencord/omci-lib-go/v2/generated
 github.com/opencord/omci-lib-go/v2/meframe
-# github.com/opencord/voltha-lib-go/v7 v7.6.6
-## explicit; go 1.16
+# github.com/opencord/voltha-lib-go/v7 v7.7.2
+## explicit; go 1.24.5
 github.com/opencord/voltha-lib-go/v7/pkg/config
 github.com/opencord/voltha-lib-go/v7/pkg/db
 github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore
@@ -220,23 +213,43 @@
 github.com/opentracing/opentracing-go
 github.com/opentracing/opentracing-go/ext
 github.com/opentracing/opentracing-go/log
-# github.com/pierrec/lz4 v2.6.0+incompatible
-## explicit
-github.com/pierrec/lz4
-github.com/pierrec/lz4/internal/xxh32
+# github.com/pierrec/lz4/v4 v4.1.22
+## explicit; go 1.14
+github.com/pierrec/lz4/v4
+github.com/pierrec/lz4/v4/internal/lz4block
+github.com/pierrec/lz4/v4/internal/lz4errors
+github.com/pierrec/lz4/v4/internal/lz4stream
+github.com/pierrec/lz4/v4/internal/xxh32
 # github.com/pkg/errors v0.9.1
 ## explicit
 github.com/pkg/errors
 # github.com/pmezard/go-difflib v1.0.0
 ## explicit
 github.com/pmezard/go-difflib/difflib
-# github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
+# github.com/prometheus/client_golang v1.23.2
+## explicit; go 1.23.0
+github.com/prometheus/client_golang/prometheus
+github.com/prometheus/client_golang/prometheus/internal
+# github.com/prometheus/client_model v0.6.2
+## explicit; go 1.22.0
+github.com/prometheus/client_model/go
+# github.com/prometheus/common v0.66.1
+## explicit; go 1.23.0
+github.com/prometheus/common/expfmt
+github.com/prometheus/common/model
+# github.com/prometheus/procfs v0.16.1
+## explicit; go 1.23.0
+github.com/prometheus/procfs
+github.com/prometheus/procfs/internal/fs
+github.com/prometheus/procfs/internal/util
+# github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9
 ## explicit
 github.com/rcrowley/go-metrics
-# github.com/stretchr/testify v1.8.1
-## explicit; go 1.13
+# github.com/stretchr/testify v1.11.1
+## explicit; go 1.17
 github.com/stretchr/testify/assert
-# github.com/uber/jaeger-client-go v2.29.1+incompatible
+github.com/stretchr/testify/assert/yaml
+# github.com/uber/jaeger-client-go v2.30.0+incompatible
 ## explicit
 github.com/uber/jaeger-client-go
 github.com/uber/jaeger-client-go/config
@@ -259,115 +272,154 @@
 # github.com/uber/jaeger-lib v2.4.1+incompatible
 ## explicit
 github.com/uber/jaeger-lib/metrics
-# go.etcd.io/etcd v3.3.25+incompatible
-## explicit
-go.etcd.io/etcd/clientv3
-go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes
-# go.opentelemetry.io/otel v0.13.0
-## explicit; go 1.14
-go.opentelemetry.io/otel
-go.opentelemetry.io/otel/api/global
-go.opentelemetry.io/otel/api/global/internal
-go.opentelemetry.io/otel/api/metric
-go.opentelemetry.io/otel/api/metric/registry
-go.opentelemetry.io/otel/api/trace
-go.opentelemetry.io/otel/codes
-go.opentelemetry.io/otel/internal
-go.opentelemetry.io/otel/internal/baggage
-go.opentelemetry.io/otel/internal/trace/noop
-go.opentelemetry.io/otel/label
-go.opentelemetry.io/otel/unit
+# go.etcd.io/etcd/api/v3 v3.6.5
+## explicit; go 1.24
+go.etcd.io/etcd/api/v3/authpb
+go.etcd.io/etcd/api/v3/etcdserverpb
+go.etcd.io/etcd/api/v3/membershippb
+go.etcd.io/etcd/api/v3/mvccpb
+go.etcd.io/etcd/api/v3/v3rpc/rpctypes
+go.etcd.io/etcd/api/v3/version
+go.etcd.io/etcd/api/v3/versionpb
+# go.etcd.io/etcd/client/pkg/v3 v3.6.5
+## explicit; go 1.24
+go.etcd.io/etcd/client/pkg/v3/fileutil
+go.etcd.io/etcd/client/pkg/v3/logutil
+go.etcd.io/etcd/client/pkg/v3/systemd
+go.etcd.io/etcd/client/pkg/v3/tlsutil
+go.etcd.io/etcd/client/pkg/v3/transport
+go.etcd.io/etcd/client/pkg/v3/types
+go.etcd.io/etcd/client/pkg/v3/verify
+# go.etcd.io/etcd/client/v3 v3.6.5
+## explicit; go 1.24
+go.etcd.io/etcd/client/v3
+go.etcd.io/etcd/client/v3/credentials
+go.etcd.io/etcd/client/v3/internal/endpoint
+go.etcd.io/etcd/client/v3/internal/resolver
 # go.uber.org/atomic v1.7.0
 ## explicit; go 1.13
 go.uber.org/atomic
-# go.uber.org/multierr v1.6.0
-## explicit; go 1.12
+# go.uber.org/multierr v1.11.0
+## explicit; go 1.19
 go.uber.org/multierr
-# go.uber.org/zap v1.18.1
-## explicit; go 1.13
+# go.uber.org/zap v1.27.0
+## explicit; go 1.19
 go.uber.org/zap
 go.uber.org/zap/buffer
+go.uber.org/zap/internal
 go.uber.org/zap/internal/bufferpool
 go.uber.org/zap/internal/color
 go.uber.org/zap/internal/exit
+go.uber.org/zap/internal/pool
+go.uber.org/zap/internal/stacktrace
 go.uber.org/zap/zapcore
-# golang.org/x/crypto v0.32.0
-## explicit; go 1.20
+go.uber.org/zap/zapgrpc
+# go.yaml.in/yaml/v2 v2.4.2
+## explicit; go 1.15
+go.yaml.in/yaml/v2
+# golang.org/x/crypto v0.43.0
+## explicit; go 1.24.0
 golang.org/x/crypto/md4
 golang.org/x/crypto/pbkdf2
-# golang.org/x/net v0.33.0
-## explicit; go 1.18
+# golang.org/x/net v0.46.0
+## explicit; go 1.24.0
 golang.org/x/net/context
 golang.org/x/net/http/httpguts
 golang.org/x/net/http2
 golang.org/x/net/http2/hpack
 golang.org/x/net/idna
+golang.org/x/net/internal/httpcommon
 golang.org/x/net/internal/socks
 golang.org/x/net/internal/timeseries
 golang.org/x/net/proxy
 golang.org/x/net/trace
-# golang.org/x/sys v0.29.0
-## explicit; go 1.18
+# golang.org/x/sys v0.37.0
+## explicit; go 1.24.0
 golang.org/x/sys/unix
-# golang.org/x/text v0.21.0
-## explicit; go 1.18
+golang.org/x/sys/windows
+# golang.org/x/text v0.30.0
+## explicit; go 1.24.0
 golang.org/x/text/secure/bidirule
 golang.org/x/text/transform
 golang.org/x/text/unicode/bidi
 golang.org/x/text/unicode/norm
-# google.golang.org/genproto v0.0.0-20230629202037-9506855d4529
-## explicit; go 1.19
-google.golang.org/genproto/internal
-# google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130
-## explicit; go 1.19
+# google.golang.org/genproto/googleapis/api v0.0.0-20250804133106-a7a43d27e69b
+## explicit; go 1.23.0
 google.golang.org/genproto/googleapis/api
 google.golang.org/genproto/googleapis/api/annotations
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529
-## explicit; go 1.19
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b
+## explicit; go 1.23.0
 google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.56.3 => google.golang.org/grpc v1.25.1
-## explicit; go 1.11
+# google.golang.org/grpc v1.76.0
+## explicit; go 1.24.0
 google.golang.org/grpc
+google.golang.org/grpc/attributes
 google.golang.org/grpc/backoff
 google.golang.org/grpc/balancer
 google.golang.org/grpc/balancer/base
+google.golang.org/grpc/balancer/endpointsharding
+google.golang.org/grpc/balancer/grpclb/state
+google.golang.org/grpc/balancer/pickfirst
+google.golang.org/grpc/balancer/pickfirst/internal
+google.golang.org/grpc/balancer/pickfirst/pickfirstleaf
 google.golang.org/grpc/balancer/roundrobin
 google.golang.org/grpc/binarylog/grpc_binarylog_v1
+google.golang.org/grpc/channelz
 google.golang.org/grpc/codes
 google.golang.org/grpc/connectivity
 google.golang.org/grpc/credentials
-google.golang.org/grpc/credentials/internal
+google.golang.org/grpc/credentials/insecure
 google.golang.org/grpc/encoding
 google.golang.org/grpc/encoding/proto
+google.golang.org/grpc/experimental/stats
 google.golang.org/grpc/grpclog
+google.golang.org/grpc/grpclog/internal
 google.golang.org/grpc/internal
 google.golang.org/grpc/internal/backoff
+google.golang.org/grpc/internal/balancer/gracefulswitch
 google.golang.org/grpc/internal/balancerload
 google.golang.org/grpc/internal/binarylog
 google.golang.org/grpc/internal/buffer
 google.golang.org/grpc/internal/channelz
+google.golang.org/grpc/internal/credentials
 google.golang.org/grpc/internal/envconfig
-google.golang.org/grpc/internal/grpcrand
+google.golang.org/grpc/internal/grpclog
 google.golang.org/grpc/internal/grpcsync
+google.golang.org/grpc/internal/grpcutil
+google.golang.org/grpc/internal/idle
+google.golang.org/grpc/internal/metadata
+google.golang.org/grpc/internal/pretty
+google.golang.org/grpc/internal/proxyattributes
+google.golang.org/grpc/internal/resolver
+google.golang.org/grpc/internal/resolver/delegatingresolver
 google.golang.org/grpc/internal/resolver/dns
+google.golang.org/grpc/internal/resolver/dns/internal
 google.golang.org/grpc/internal/resolver/passthrough
+google.golang.org/grpc/internal/resolver/unix
+google.golang.org/grpc/internal/serviceconfig
+google.golang.org/grpc/internal/stats
+google.golang.org/grpc/internal/status
 google.golang.org/grpc/internal/syscall
 google.golang.org/grpc/internal/transport
+google.golang.org/grpc/internal/transport/networktype
 google.golang.org/grpc/keepalive
+google.golang.org/grpc/mem
 google.golang.org/grpc/metadata
-google.golang.org/grpc/naming
 google.golang.org/grpc/peer
 google.golang.org/grpc/reflection
+google.golang.org/grpc/reflection/grpc_reflection_v1
 google.golang.org/grpc/reflection/grpc_reflection_v1alpha
+google.golang.org/grpc/reflection/internal
 google.golang.org/grpc/resolver
 google.golang.org/grpc/resolver/dns
-google.golang.org/grpc/resolver/passthrough
+google.golang.org/grpc/resolver/manual
 google.golang.org/grpc/serviceconfig
 google.golang.org/grpc/stats
 google.golang.org/grpc/status
 google.golang.org/grpc/tap
-# google.golang.org/protobuf v1.36.5
-## explicit; go 1.21
+# google.golang.org/protobuf v1.36.10
+## explicit; go 1.23
+google.golang.org/protobuf/encoding/protodelim
 google.golang.org/protobuf/encoding/protojson
 google.golang.org/protobuf/encoding/prototext
 google.golang.org/protobuf/encoding/protowire
@@ -394,12 +446,14 @@
 google.golang.org/protobuf/internal/strs
 google.golang.org/protobuf/internal/version
 google.golang.org/protobuf/proto
+google.golang.org/protobuf/protoadapt
 google.golang.org/protobuf/reflect/protodesc
 google.golang.org/protobuf/reflect/protoreflect
 google.golang.org/protobuf/reflect/protoregistry
 google.golang.org/protobuf/runtime/protoiface
 google.golang.org/protobuf/runtime/protoimpl
 google.golang.org/protobuf/types/descriptorpb
+google.golang.org/protobuf/types/dynamicpb
 google.golang.org/protobuf/types/gofeaturespb
 google.golang.org/protobuf/types/known/anypb
 google.golang.org/protobuf/types/known/durationpb
@@ -410,6 +464,3 @@
 # gopkg.in/yaml.v3 v3.0.1
 ## explicit
 gopkg.in/yaml.v3
-# github.com/coreos/bbolt v1.3.4 => go.etcd.io/bbolt v1.3.4
-# go.etcd.io/bbolt v1.3.4 => github.com/coreos/bbolt v1.3.4
-# google.golang.org/grpc => google.golang.org/grpc v1.25.1